diff --git a/README.rst b/README.rst index 73403f3..db9532a 100644 --- a/README.rst +++ b/README.rst @@ -17,6 +17,19 @@ Handles multi-API versions of Azure Storage Data Plane originally from https://g Change Log ---------- +1.0.0 +++++++ +* storageV1: + - Keep only v2018-11-09, v2017-11-09, v2017-04-17, v2015-04-05 +* blob: + - Keep only v2021-08-06, v2021-06-08, v2019-07-07 +* fileshare: + - Keep only v2021-06-08, v2019-07-07 +* filedatalake: + - Keep only v2021-08-06, v2019-07-07 +* queue: + - Keep only v2018-03-28, Add v2019-07-07 + 0.10.0 ++++++ * blob: diff --git a/azure/multiapi/storage/v2016_05_31/__init__.py b/azure/multiapi/storage/v2016_05_31/__init__.py deleted file mode 100644 index 703bb7d..0000000 --- a/azure/multiapi/storage/v2016_05_31/__init__.py +++ /dev/null @@ -1,47 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from ._constants import ( - __author__, - __version__, - X_MS_VERSION, -) - -from .models import ( - RetentionPolicy, - Logging, - Metrics, - CorsRule, - ServiceProperties, - AccessPolicy, - ResourceTypes, - Services, - AccountPermissions, - Protocol, - ServiceStats, - GeoReplication, - LocationMode, - RetryContext, -) - -from .retry import ( - ExponentialRetry, - LinearRetry, - no_retry, -) - -from .cloudstorageaccount import CloudStorageAccount -from .sharedaccesssignature import ( - SharedAccessSignature, -) diff --git a/azure/multiapi/storage/v2016_05_31/_auth.py b/azure/multiapi/storage/v2016_05_31/_auth.py deleted file mode 100644 index 6d2f77c..0000000 --- a/azure/multiapi/storage/v2016_05_31/_auth.py +++ /dev/null @@ -1,125 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from ._common_conversion import ( - _sign_string, -) - - -class _StorageSharedKeyAuthentication(object): - def __init__(self, account_name, account_key): - self.account_name = account_name - self.account_key = account_key - - def _get_headers(self, request, headers_to_sign): - headers = dict((name.lower(), value) for name, value in request.headers.items() if value) - if 'content-length' in headers and headers['content-length'] == '0': - del headers['content-length'] - return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' - - def _get_verb(self, request): - return request.method + '\n' - - def _get_canonicalized_resource(self, request): - uri_path = request.path.split('?')[0] - return '/' + self.account_name + uri_path - - def _get_canonicalized_headers(self, request): - string_to_sign = '' - x_ms_headers = [] - for name, value in request.headers.items(): - if name.startswith('x-ms-'): - x_ms_headers.append((name.lower(), value)) - x_ms_headers.sort() - for name, value in x_ms_headers: - if value is not None: - string_to_sign += ''.join([name, ':', value, '\n']) - return string_to_sign - - def _add_authorization_header(self, request, string_to_sign): - signature = _sign_string(self.account_key, string_to_sign) - auth_string = 'SharedKey ' + self.account_name + ':' + signature - request.headers['Authorization'] = auth_string - - -class _StorageSharedKeyAuthentication(_StorageSharedKeyAuthentication): - def sign_request(self, request): - string_to_sign = \ - self._get_verb(request) + \ - self._get_headers( - request, - [ - 'content-encoding', 'content-language', 'content-length', - 'content-md5', 'content-type', 'date', 'if-modified-since', - 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' - ] - ) + \ - self._get_canonicalized_headers(request) + \ - self._get_canonicalized_resource(request) + \ - self._get_canonicalized_resource_query(request) - - self._add_authorization_header(request, string_to_sign) - - def _get_canonicalized_resource_query(self, request): - sorted_queries = [(name, value) for name, value in request.query.items()] - sorted_queries.sort() - - string_to_sign = '' - for name, value in sorted_queries: - if value: - string_to_sign += '\n' + name.lower() + ':' + value - - return string_to_sign - - -class _StorageTableSharedKeyAuthentication(_StorageSharedKeyAuthentication): - def sign_request(self, request): - string_to_sign = \ - self._get_verb(request) + \ - self._get_headers( - request, - ['content-md5', 'content-type', 'x-ms-date'], - ) + \ - self._get_canonicalized_resource(request) + \ - self._get_canonicalized_resource_query(request) - - self._add_authorization_header(request, string_to_sign) - - def _get_canonicalized_resource_query(self, request): - for name, value in request.query.items(): - if name == 'comp': - return '?comp=' + value - return '' - - -class _StorageNoAuthentication(object): - def sign_request(self, request): - pass - - -class _StorageSASAuthentication(object): - def __init__(self, sas_token): - self.sas_token = sas_token - - def sign_request(self, request): - # if 'sig=' is present, then the request has already been signed - # as is the case when performing retries - if 'sig=' in request.path: - return - if '?' in request.path: - request.path += '&' - else: - request.path += '?' - - request.path += self.sas_token diff --git a/azure/multiapi/storage/v2016_05_31/_common_conversion.py b/azure/multiapi/storage/v2016_05_31/_common_conversion.py deleted file mode 100644 index 202c158..0000000 --- a/azure/multiapi/storage/v2016_05_31/_common_conversion.py +++ /dev/null @@ -1,126 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- - -import base64 -import hashlib -import hmac -import sys -from dateutil.tz import tzutc -from io import (IOBase, SEEK_SET) - -from ._error import ( - _ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM, - _ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM, -) -from .models import ( - _unicode_type, -) - -if sys.version_info < (3,): - def _str(value): - if isinstance(value, unicode): - return value.encode('utf-8') - - return str(value) -else: - _str = str - -def _to_str(value): - return _str(value) if value is not None else None - -def _int_to_str(value): - return str(int(value)) if value is not None else None - -def _bool_to_str(value): - if value is None: - return None - - if isinstance(value, bool): - if value: - return 'true' - else: - return 'false' - - return str(value) - -def _to_utc_datetime(value): - return value.strftime('%Y-%m-%dT%H:%M:%SZ') - -def _datetime_to_utc_string(value): - # Azure expects the date value passed in to be UTC. - # Azure will always return values as UTC. - # If a date is passed in without timezone info, it is assumed to be UTC. - if value is None: - return None - - if value.tzinfo: - value = value.astimezone(tzutc()) - - return value.strftime('%a, %d %b %Y %H:%M:%S GMT') - -def _encode_base64(data): - if isinstance(data, _unicode_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def _decode_base64_to_bytes(data): - if isinstance(data, _unicode_type): - data = data.encode('utf-8') - return base64.b64decode(data) - - -def _decode_base64_to_text(data): - decoded_bytes = _decode_base64_to_bytes(data) - return decoded_bytes.decode('utf-8') - - -def _sign_string(key, string_to_sign, key_is_base64=True): - if key_is_base64: - key = _decode_base64_to_bytes(key) - else: - if isinstance(key, _unicode_type): - key = key.encode('utf-8') - if isinstance(string_to_sign, _unicode_type): - string_to_sign = string_to_sign.encode('utf-8') - signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) - digest = signed_hmac_sha256.digest() - encoded_digest = _encode_base64(digest) - return encoded_digest - -def _get_content_md5(data): - md5 = hashlib.md5() - if isinstance(data, bytes): - md5.update(data) - elif hasattr(data, 'read'): - pos = 0 - try: - pos = data.tell() - except: - pass - for chunk in iter(lambda: data.read(4096), b""): - md5.update(chunk) - try: - data.seek(pos, SEEK_SET) - except (AttributeError, IOError): - raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM.format('data')) - else: - raise ValueError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format('data')) - - return base64.b64encode(md5.digest()).decode('utf-8') - -def _lower(text): - return text.lower() diff --git a/azure/multiapi/storage/v2016_05_31/_connection.py b/azure/multiapi/storage/v2016_05_31/_connection.py deleted file mode 100644 index 53a5367..0000000 --- a/azure/multiapi/storage/v2016_05_31/_connection.py +++ /dev/null @@ -1,146 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -import os -import sys -if sys.version_info >= (3,): - from urllib.parse import urlparse -else: - from urlparse import urlparse - -from ._constants import ( - SERVICE_HOST_BASE, - DEFAULT_PROTOCOL, - DEV_ACCOUNT_NAME, - DEV_ACCOUNT_KEY, - DEV_BLOB_HOST, - DEV_QUEUE_HOST, - DEV_TABLE_HOST -) -from ._error import ( - _ERROR_STORAGE_MISSING_INFO, -) - -_EMULATOR_ENDPOINTS = { - 'blob': DEV_BLOB_HOST, - 'queue': DEV_QUEUE_HOST, - 'table': DEV_TABLE_HOST, - 'file': '', -} - -_CONNECTION_ENDPONTS = { - 'blob': 'BlobEndpoint', - 'queue': 'QueueEndpoint', - 'table': 'TableEndpoint', - 'file': 'FileEndpoint', -} - -class _ServiceParameters(object): - def __init__(self, service, account_name=None, account_key=None, sas_token=None, - is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, - custom_domain=None): - - self.account_name = account_name - self.account_key = account_key - self.sas_token = sas_token - self.protocol = protocol or DEFAULT_PROTOCOL - - if is_emulated: - self.account_name = DEV_ACCOUNT_NAME - self.protocol = 'http' - - # Only set the account key if a sas_token is not present to allow sas to be used with the emulator - self.account_key = DEV_ACCOUNT_KEY if not self.sas_token else None - - self.primary_endpoint = '{}/{}'.format(_EMULATOR_ENDPOINTS[service], self.account_name) - self.secondary_endpoint = '{}/{}-secondary'.format(_EMULATOR_ENDPOINTS[service], self.account_name) - else: - # Strip whitespace from the key - if self.account_key: - self.account_key = self.account_key.strip() - - endpoint_suffix = endpoint_suffix or SERVICE_HOST_BASE - - # Setup the primary endpoint - if custom_domain: - parsed_url = urlparse(custom_domain) - - # Trim any trailing slashes from the path - path = parsed_url.path.rstrip('/') - - self.primary_endpoint = parsed_url.netloc + path - self.protocol = self.protocol if parsed_url.scheme == '' else parsed_url.scheme - else: - if not self.account_name: - raise ValueError(_ERROR_STORAGE_MISSING_INFO) - self.primary_endpoint = '{}.{}.{}'.format(self.account_name, service, endpoint_suffix) - - # Setup the secondary endpoint - if self.account_name: - self.secondary_endpoint = '{}-secondary.{}.{}'.format(self.account_name, service, endpoint_suffix) - else: - self.secondary_endpoint = None - - @staticmethod - def get_service_parameters(service, account_name=None, account_key=None, sas_token=None, is_emulated=None, - protocol=None, endpoint_suffix=None, custom_domain=None, request_session=None, - connection_string=None, socket_timeout=None): - if connection_string: - params = _ServiceParameters._from_connection_string(connection_string, service) - elif is_emulated: - params = _ServiceParameters(service, is_emulated=True) - elif account_name: - params = _ServiceParameters(service, - account_name=account_name, - account_key=account_key, - sas_token=sas_token, - is_emulated=is_emulated, - protocol=protocol, - endpoint_suffix=endpoint_suffix, - custom_domain=custom_domain) - else: - raise ValueError(_ERROR_STORAGE_MISSING_INFO) - - params.request_session = request_session - params.socket_timeout = socket_timeout - return params - - @staticmethod - def _from_connection_string(connection_string, service): - # Split into key=value pairs removing empties, then split the pairs into a dict - config = dict(s.split('=', 1) for s in connection_string.split(';') if s) - - # Authentication - account_name = config.get('AccountName') - account_key = config.get('AccountKey') - sas_token = config.get('SharedAccessSignature') - - # Emulator - is_emulated = config.get('UseDevelopmentStorage') - - # Basic URL Configuration - protocol = config.get('DefaultEndpointsProtocol') - endpoint_suffix = config.get('EndpointSuffix') - - # Custom URLs - endpoint = config.get(_CONNECTION_ENDPONTS[service]) - - return _ServiceParameters(service, - account_name=account_name, - account_key=account_key, - sas_token=sas_token, - is_emulated=is_emulated, - protocol=protocol, - endpoint_suffix=endpoint_suffix, - custom_domain=endpoint) diff --git a/azure/multiapi/storage/v2016_05_31/_constants.py b/azure/multiapi/storage/v2016_05_31/_constants.py deleted file mode 100644 index 8780801..0000000 --- a/azure/multiapi/storage/v2016_05_31/_constants.py +++ /dev/null @@ -1,43 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -import platform - -__author__ = 'Microsoft Corp. ' -__version__ = '0.34.3' - -# x-ms-version for storage service. -X_MS_VERSION = '2016-05-31' - -# UserAgent string sample: 'Azure-Storage/0.32.0 (Python CPython 3.4.2; Windows 8)' -USER_AGENT_STRING = 'Azure-Storage/{} (Python {} {}; {} {})'.format(__version__, platform.python_implementation(), platform.python_version(), platform.system(), platform.release()) - -# Live ServiceClient URLs -SERVICE_HOST_BASE = 'core.windows.net' -DEFAULT_PROTOCOL = 'https' - -# Development ServiceClient URLs -DEV_BLOB_HOST = '127.0.0.1:10000' -DEV_QUEUE_HOST = '127.0.0.1:10001' -DEV_TABLE_HOST = '127.0.0.1:10002' - -# Default credentials for Development Storage Service -DEV_ACCOUNT_NAME = 'devstoreaccount1' -DEV_ACCOUNT_KEY = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==' - -# Socket timeout in seconds -DEFAULT_SOCKET_TIMEOUT = 20 - -#Encryption constants -_ENCRYPTION_PROTOCOL_V1 = '1.0' \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/_deserialization.py b/azure/multiapi/storage/v2016_05_31/_deserialization.py deleted file mode 100644 index 36e24e2..0000000 --- a/azure/multiapi/storage/v2016_05_31/_deserialization.py +++ /dev/null @@ -1,332 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from dateutil import parser -from ._common_conversion import _to_str -try: - from xml.etree import cElementTree as ETree -except ImportError: - from xml.etree import ElementTree as ETree - -from .models import ( - ServiceProperties, - Logging, - Metrics, - CorsRule, - AccessPolicy, - _HeaderDict, - _dict, - GeoReplication, - ServiceStats, -) - -def _int_to_str(value): - return value if value is None else int(value) - -def _bool(value): - return value.lower() == 'true' - -def _get_download_size(start_range, end_range, resource_size): - if start_range is not None: - end_range = end_range if end_range else (resource_size if resource_size else None) - if end_range is not None: - return end_range - start_range - else: - return None - else: - return resource_size - -GET_PROPERTIES_ATTRIBUTE_MAP = { - 'last-modified': (None, 'last_modified', parser.parse), - 'etag': (None, 'etag', _to_str), - 'x-ms-blob-type': (None, 'blob_type', _to_str), - 'content-length': (None, 'content_length', _int_to_str), - 'content-range': (None, 'content_range', _to_str), - 'x-ms-blob-sequence-number': (None, 'page_blob_sequence_number', _int_to_str), - 'x-ms-blob-committed-block-count': (None, 'append_blob_committed_block_count', _int_to_str), - 'x-ms-share-quota': (None, 'quota', _int_to_str), - 'x-ms-server-encrypted': (None, 'server_encrypted', _bool), - 'content-type': ('content_settings', 'content_type', _to_str), - 'cache-control': ('content_settings', 'cache_control', _to_str), - 'content-encoding': ('content_settings', 'content_encoding', _to_str), - 'content-disposition': ('content_settings', 'content_disposition', _to_str), - 'content-language': ('content_settings', 'content_language', _to_str), - 'content-md5': ('content_settings', 'content_md5', _to_str), - 'x-ms-lease-status': ('lease', 'status', _to_str), - 'x-ms-lease-state': ('lease', 'state', _to_str), - 'x-ms-lease-duration': ('lease', 'duration', _to_str), - 'x-ms-copy-id': ('copy', 'id', _to_str), - 'x-ms-copy-source': ('copy', 'source', _to_str), - 'x-ms-copy-status': ('copy', 'status', _to_str), - 'x-ms-copy-progress': ('copy', 'progress', _to_str), - 'x-ms-copy-completion-time': ('copy', 'completion_time', parser.parse), - 'x-ms-copy-destination-snapshot': ('copy', 'destination_snapshot_time', _to_str), - 'x-ms-copy-status-description': ('copy', 'status_description', _to_str), -} - -def _parse_metadata(response): - ''' - Extracts out resource metadata information. - ''' - - if response is None or response.headers is None: - return None - - metadata = _dict() - for key, value in response.headers.items(): - if key.startswith('x-ms-meta-'): - metadata[key[10:]] = _to_str(value) - - return metadata - -def _parse_properties(response, result_class): - ''' - Extracts out resource properties and metadata information. - Ignores the standard http headers. - ''' - - if response is None or response.headers is None: - return None - - props = result_class() - for key, value in response.headers.items(): - info = GET_PROPERTIES_ATTRIBUTE_MAP.get(key) - if info: - if info[0] is None: - setattr(props, info[1], info[2](value)) - else: - attr = getattr(props, info[0]) - setattr(attr, info[1], info[2](value)) - - return props - -def _parse_length_from_content_range(content_range): - ''' - Parses the blob length from the content range header: bytes 1-3/65537 - ''' - if content_range is None: - return None - - # First, split in space and take the second half: '1-3/65537' - # Next, split on slash and take the second half: '65537' - # Finally, convert to an int: 65537 - return int(content_range.split(' ', 1)[1].split('/', 1)[1]) - -def _convert_xml_to_signed_identifiers(response): - ''' - - - - unique-value - - start-time - expiry-time - abbreviated-permission-list - - - - ''' - if response is None or response.body is None: - return None - - list_element = ETree.fromstring(response.body) - signed_identifiers = _dict() - - for signed_identifier_element in list_element.findall('SignedIdentifier'): - # Id element - id = signed_identifier_element.find('Id').text - - # Access policy element - access_policy = AccessPolicy() - access_policy_element = signed_identifier_element.find('AccessPolicy') - if access_policy_element is not None: - start_element = access_policy_element.find('Start') - if start_element is not None: - access_policy.start = parser.parse(start_element.text) - - expiry_element = access_policy_element.find('Expiry') - if expiry_element is not None: - access_policy.expiry = parser.parse(expiry_element.text) - - access_policy.permission = access_policy_element.findtext('Permission') - - signed_identifiers[id] = access_policy - - return signed_identifiers - -def _convert_xml_to_service_stats(response): - ''' - - - - live|bootstrap|unavailable - sync-time| - - - ''' - if response is None or response.body is None: - return None - - service_stats_element = ETree.fromstring(response.body) - - geo_replication_element = service_stats_element.find('GeoReplication') - - geo_replication = GeoReplication() - geo_replication.status = geo_replication_element.find('Status').text - geo_replication.last_sync_time = parser.parse(geo_replication_element.find('LastSyncTime').text) - - service_stats = ServiceStats() - service_stats.geo_replication = geo_replication - return service_stats - -def _convert_xml_to_service_properties(response): - ''' - - - - version-number - true|false - true|false - true|false - - true|false - number-of-days - - - - version-number - true|false - true|false - - true|false - number-of-days - - - - version-number - true|false - true|false - - true|false - number-of-days - - - - - comma-separated-list-of-allowed-origins - comma-separated-list-of-HTTP-verb - max-caching-age-in-seconds - comma-seperated-list-of-response-headers - comma-seperated-list-of-request-headers - - - - ''' - if response is None or response.body is None: - return None - - service_properties_element = ETree.fromstring(response.body) - service_properties = ServiceProperties() - - # Logging - logging = service_properties_element.find('Logging') - if logging is not None: - service_properties.logging = Logging() - service_properties.logging.version = logging.find('Version').text - service_properties.logging.delete = _bool(logging.find('Delete').text) - service_properties.logging.read = _bool(logging.find('Read').text) - service_properties.logging.write = _bool(logging.find('Write').text) - - _convert_xml_to_retention_policy(logging.find('RetentionPolicy'), - service_properties.logging.retention_policy) - # HourMetrics - hour_metrics_element = service_properties_element.find('HourMetrics') - if hour_metrics_element is not None: - service_properties.hour_metrics = Metrics() - _convert_xml_to_metrics(hour_metrics_element, service_properties.hour_metrics) - - # MinuteMetrics - minute_metrics_element = service_properties_element.find('MinuteMetrics') - if minute_metrics_element is not None: - service_properties.minute_metrics = Metrics() - _convert_xml_to_metrics(minute_metrics_element, service_properties.minute_metrics) - - # CORS - cors = service_properties_element.find('Cors') - if cors is not None: - service_properties.cors = list() - for rule in cors.findall('CorsRule'): - allowed_origins = rule.find('AllowedOrigins').text.split(',') - - allowed_methods = rule.find('AllowedMethods').text.split(',') - - max_age_in_seconds = int(rule.find('MaxAgeInSeconds').text) - - cors_rule = CorsRule(allowed_origins, allowed_methods, max_age_in_seconds) - - exposed_headers = rule.find('ExposedHeaders').text - if exposed_headers is not None: - cors_rule.exposed_headers = exposed_headers.split(',') - - allowed_headers = rule.find('AllowedHeaders').text - if allowed_headers is not None: - cors_rule.allowed_headers = allowed_headers.split(',') - - service_properties.cors.append(cors_rule) - - # Target version - target_version = service_properties_element.find('DefaultServiceVersion') - if target_version is not None: - service_properties.target_version = target_version.text - - return service_properties - - -def _convert_xml_to_metrics(xml, metrics): - ''' - version-number - true|false - true|false - - true|false - number-of-days - - ''' - # Version - metrics.version = xml.find('Version').text - - # Enabled - metrics.enabled = _bool(xml.find('Enabled').text) - - # IncludeAPIs - include_apis_element = xml.find('IncludeAPIs') - if include_apis_element is not None: - metrics.include_apis = _bool(include_apis_element.text) - - # RetentionPolicy - _convert_xml_to_retention_policy(xml.find('RetentionPolicy'), metrics.retention_policy) - - -def _convert_xml_to_retention_policy(xml, retention_policy): - ''' - true|false - number-of-days - ''' - # Enabled - retention_policy.enabled = _bool(xml.find('Enabled').text) - - # Days - days_element = xml.find('Days') - if days_element is not None: - retention_policy.days = int(days_element.text) \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/_encryption.py b/azure/multiapi/storage/v2016_05_31/_encryption.py deleted file mode 100644 index 828c6aa..0000000 --- a/azure/multiapi/storage/v2016_05_31/_encryption.py +++ /dev/null @@ -1,232 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from ._common_conversion import( - _encode_base64, - _decode_base64_to_bytes, -) -from ._constants import( - _ENCRYPTION_PROTOCOL_V1, - __version__, -) -from ._error import( - _ERROR_UNSUPPORTED_ENCRYPTION_VERSION, - _validate_not_none, - _validate_encryption_protocol_version, - _validate_key_encryption_key_unwrap, - _validate_kek_id, -) -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.ciphers.algorithms import AES -from cryptography.hazmat.primitives.ciphers.modes import CBC -from cryptography.hazmat.primitives.ciphers import Cipher -from collections import OrderedDict - -class _EncryptionAlgorithm(object): - ''' - Specifies which client encryption algorithm is used. - ''' - AES_CBC_256 = 'AES_CBC_256' - -class _WrappedContentKey: - ''' - Represents the envelope key details stored on the service. - ''' - - def __init__(self, algorithm, encrypted_key, key_id): - ''' - :param str algorithm: - The algorithm used for wrapping. - :param bytes encrypted_key: - The encrypted content-encryption-key. - :param str key_id: - The key-encryption-key identifier string. - ''' - - _validate_not_none('algorithm', algorithm) - _validate_not_none('encrypted_key', encrypted_key) - _validate_not_none('key_id', key_id) - - self.algorithm = algorithm - self.encrypted_key = encrypted_key - self.key_id = key_id - -class _EncryptionAgent: - ''' - Represents the encryption agent stored on the service. - It consists of the encryption protocol version and encryption algorithm used. - ''' - - def __init__(self, encryption_algorithm, protocol): - ''' - :param _EncryptionAlgorithm encryption_algorithm: - The algorithm used for encrypting the message contents. - :param str protocol: - The protocol version used for encryption. - ''' - - _validate_not_none('encryption_algorithm', encryption_algorithm) - _validate_not_none('protocol', protocol) - - self.encryption_algorithm = str(encryption_algorithm) - self.protocol = protocol - -class _EncryptionData: - ''' - Represents the encryption data that is stored on the service. - ''' - - def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, - key_wrapping_metadata): - ''' - :param bytes content_encryption_IV: - The content encryption initialization vector. - :param _EncryptionAgent encryption_agent: - The encryption agent. - :param _WrappedContentKey wrapped_content_key: - An object that stores the wrapping algorithm, the key identifier, - and the encrypted key bytes. - :param dict key_wrapping_metadata: - A dict containing metadata related to the key wrapping. - ''' - - _validate_not_none('content_encryption_IV', content_encryption_IV) - _validate_not_none('encryption_agent', encryption_agent) - _validate_not_none('wrapped_content_key', wrapped_content_key) - - self.content_encryption_IV = content_encryption_IV - self.encryption_agent = encryption_agent - self.wrapped_content_key = wrapped_content_key - self.key_wrapping_metadata = key_wrapping_metadata - -def _generate_encryption_data_dict(kek, cek, iv): - ''' - Generates and returns the encryption metadata as a dict. - - :param object kek: The key encryption key. See calling functions for more information. - :param bytes cek: The conetent encryption key. - :param bytes iv: The initialization vector. - :return: A dict containing all the encryption metadata. - :rtype: dict - ''' - # Encrypt the cek. - wrapped_cek = kek.wrap_key(cek) - - # Build the encryption_data dict. - # Use OrderedDict to comply with Java's ordering requirement. - wrapped_content_key = OrderedDict() - wrapped_content_key['KeyId'] = kek.get_kid() - wrapped_content_key['EncryptedKey'] = _encode_base64(wrapped_cek) - wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() - - encryption_agent = OrderedDict() - encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 - encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 - - encryption_data_dict = OrderedDict() - encryption_data_dict['WrappedContentKey'] = wrapped_content_key - encryption_data_dict['EncryptionAgent'] = encryption_agent - encryption_data_dict['ContentEncryptionIV'] = _encode_base64(iv) - encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary':'Python ' + __version__} - - return encryption_data_dict - -def _dict_to_encryption_data(encryption_data_dict): - ''' - Converts the specified dictionary to an EncryptionData object for - eventual use in decryption. - - :param dict encryption_data_dict: - The dictionary containing the encryption data. - :return: an _EncryptionData object built from the dictionary. - :rtype: _EncryptionData - ''' - try: - if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: - raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION) - except KeyError: - raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION) - wrapped_content_key = encryption_data_dict['WrappedContentKey'] - wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], - _decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), - wrapped_content_key['KeyId']) - - encryption_agent = encryption_data_dict['EncryptionAgent'] - encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], - encryption_agent['Protocol']) - - if 'KeyWrappingMetadata' in encryption_data_dict: - key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] - else: - key_wrapping_metadata = None - - encryption_data = _EncryptionData(_decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), - encryption_agent, - wrapped_content_key, - key_wrapping_metadata) - - return encryption_data - -def _generate_AES_CBC_cipher(cek, iv): - ''' - Generates and returns an encryption cipher for AES CBC using the given cek and iv. - - :param bytes[] cek: The content encryption key for the cipher. - :param bytes[] iv: The initialization vector for the cipher. - :return: A cipher for encrypting in AES256 CBC. - :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher - ''' - - backend = default_backend() - algorithm = AES(cek) - mode = CBC(iv) - return Cipher(algorithm, mode, backend) - -def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): - ''' - Extracts and returns the content_encryption_key stored in the encryption_data object - and performs necessary validation on all parameters. - :param _EncryptionData encryption_data: - The encryption metadata of the retrieved value. - :param obj key_encryption_key: - The key_encryption_key used to unwrap the cek. Please refer to high-level service object - (i.e. TableService) instance variables for more details. - :param func key_resolver: - A function used that, given a key_id, will return a key_encryption_key. Please refer - to high service object (i.e. TableService) instance variables for more details. - :return: the content_encryption_key stored in the encryption_data object. - :rtype: bytes[] - ''' - - _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) - _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) - - _validate_encryption_protocol_version(encryption_data.encryption_agent.protocol) - - content_encryption_key = None - - # If the resolver exists, give priority to the key it finds. - if key_resolver is not None: - key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) - - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_unwrap(key_encryption_key) - _validate_kek_id(encryption_data.wrapped_content_key.key_id, key_encryption_key.get_kid()) - - # Will throw an exception if the specified algorithm is not supported. - content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, - encryption_data.wrapped_content_key.algorithm) - _validate_not_none('content_encryption_key', content_encryption_key) - - return content_encryption_key \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/_error.py b/azure/multiapi/storage/v2016_05_31/_error.py deleted file mode 100644 index 5ada054..0000000 --- a/azure/multiapi/storage/v2016_05_31/_error.py +++ /dev/null @@ -1,170 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from sys import version_info -from io import IOBase -if version_info < (3,): - def _str(value): - if isinstance(value, unicode): - return value.encode('utf-8') - - return str(value) -else: - _str = str - -def _to_str(value): - return _str(value) if value is not None else None - -from azure.common import ( - AzureHttpError, - AzureConflictHttpError, - AzureMissingResourceHttpError, - AzureException, -) -from ._constants import ( - _ENCRYPTION_PROTOCOL_V1, -) -_ERROR_CONFLICT = 'Conflict ({0})' -_ERROR_NOT_FOUND = 'Not found ({0})' -_ERROR_UNKNOWN = 'Unknown error ({0})' -_ERROR_STORAGE_MISSING_INFO = \ - 'You need to provide an account name and either an account_key or sas_token when creating a storage service.' -_ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES = \ - 'The emulator does not support the file service.' -_ERROR_ACCESS_POLICY = \ - 'share_access_policy must be either SignedIdentifier or AccessPolicy ' + \ - 'instance' -_ERROR_PARALLEL_NOT_SEEKABLE = 'Parallel operations require a seekable stream.' -_ERROR_VALUE_SHOULD_BE_BYTES = '{0} should be of type bytes.' -_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM = '{0} should be of type bytes or a readable file-like/io.IOBase stream object.' -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' -_ERROR_VALUE_SHOULD_BE_STREAM = '{0} should be a file-like/io.IOBase type stream object with a read method.' -_ERROR_VALUE_NONE = '{0} should not be None.' -_ERROR_VALUE_NONE_OR_EMPTY = '{0} should not be None or empty.' -_ERROR_VALUE_NEGATIVE = '{0} should not be negative.' -_ERROR_NO_SINGLE_THREAD_CHUNKING = \ - 'To use {0} chunk downloader more than 1 thread must be ' + \ - 'used since get_{0}_to_bytes should be called for single threaded ' + \ - '{0} downloads.' -_ERROR_START_END_NEEDED_FOR_MD5 = \ - 'Both end_range and start_range need to be specified ' + \ - 'for getting content MD5.' -_ERROR_RANGE_TOO_LARGE_FOR_MD5 = \ - 'Getting content MD5 for a range greater than 4MB ' + \ - 'is not supported.' -_ERROR_MD5_MISMATCH = \ - 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.' -_ERROR_TOO_MANY_ACCESS_POLICIES = \ - 'Too many access policies provided. The server does not support setting more than 5 access policies on a single resource.' -_ERROR_OBJECT_INVALID = \ - '{0} does not define a complete interface. Value of {1} is either missing or invalid.' -_ERROR_UNSUPPORTED_ENCRYPTION_VERSION = \ - 'Encryption version is not supported.' -_ERROR_DECRYPTION_FAILURE = \ - 'Decryption failed' -_ERROR_ENCRYPTION_REQUIRED = \ - 'Encryption required but no key was provided.' -_ERROR_DECRYPTION_REQUIRED = \ - 'Decryption required but neither key nor resolver was provided.' + \ - ' If you do not want to decypt, please do not set the require encryption flag.' -_ERROR_INVALID_KID = \ - 'Provided or resolved key-encryption-key does not match the id of key used to encrypt.' -_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM = \ - 'Specified encryption algorithm is not supported.' -_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = 'The require_encryption flag is set, but encryption is not supported' + \ - ' for this method.' -_ERROR_UNKNOWN_KEY_WRAP_ALGORITHM = 'Unknown key wrap algorithm.' -_ERROR_DATA_NOT_ENCRYPTED = 'Encryption required, but received data does not contain appropriate metatadata.' + \ - 'Data was either not encrypted or metadata has been lost.' - -def _dont_fail_on_exist(error): - ''' don't throw exception if the resource exists. - This is called by create_* APIs with fail_on_exist=False''' - if isinstance(error, AzureConflictHttpError): - return False - else: - raise error - - -def _dont_fail_not_exist(error): - ''' don't throw exception if the resource doesn't exist. - This is called by create_* APIs with fail_on_exist=False''' - if isinstance(error, AzureMissingResourceHttpError): - return False - else: - raise error - - -def _http_error_handler(http_error): - ''' Simple error handler for azure.''' - message = str(http_error) - if http_error.respbody is not None: - message += '\n' + http_error.respbody.decode('utf-8-sig') - raise AzureHttpError(message, http_error.status) - - -def _validate_type_bytes(param_name, param): - if not isinstance(param, bytes): - raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name)) - -def _validate_type_bytes_or_stream(param_name, param): - if not (isinstance(param, bytes) or hasattr(param, 'read')): - raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format(param_name)) - -def _validate_not_none(param_name, param): - if param is None: - raise ValueError(_ERROR_VALUE_NONE.format(param_name)) - -def _validate_content_match(server_md5, computed_md5): - if server_md5 != computed_md5: - raise AzureException(_ERROR_MD5_MISMATCH.format(server_md5, computed_md5)) - -def _validate_access_policies(identifiers): - if identifiers and len(identifiers) > 5: - raise AzureException(_ERROR_TOO_MANY_ACCESS_POLICIES) -def _validate_key_encryption_key_wrap(kek): - # Note that None is not callable and so will fail the second clause of each check. - if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) - if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) - -def _validate_key_encryption_key_unwrap(kek): - if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(kek, 'unwrap_key') or not callable(kek.unwrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) - -def _validate_encryption_required(require_encryption, kek): - if require_encryption and (kek is None): - raise ValueError(_ERROR_ENCRYPTION_REQUIRED) - -def _validate_decryption_required(require_encryption, kek, resolver): - if(require_encryption and (kek is None) and - (resolver is None)): - raise ValueError(_ERROR_DECRYPTION_REQUIRED) - -def _validate_encryption_protocol_version(encryption_protocol): - if not (_ENCRYPTION_PROTOCOL_V1 == encryption_protocol): - raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION) - -def _validate_kek_id(kid, resolved_id): - if not (kid == resolved_id): - raise ValueError(_ERROR_INVALID_KID) - -def _validate_encryption_unsupported(require_encryption, key_encryption_key): - if require_encryption or (key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/_http/__init__.py b/azure/multiapi/storage/v2016_05_31/_http/__init__.py deleted file mode 100644 index 1b4c442..0000000 --- a/azure/multiapi/storage/v2016_05_31/_http/__init__.py +++ /dev/null @@ -1,85 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- - -class HTTPError(Exception): - - ''' - Represents an HTTP Exception when response status code >= 300. - - :ivar int status: - the status code of the response - :ivar str message: - the message - :ivar list headers: - the returned headers, as a list of (name, value) pairs - :ivar bytes body: - the body of the response - ''' - - def __init__(self, status, message, respheader, respbody): - self.status = status - self.respheader = respheader - self.respbody = respbody - Exception.__init__(self, message) - - -class HTTPResponse(object): - - ''' - Represents a response from an HTTP request. - - :ivar int status: - the status code of the response - :ivar str message: - the message - :ivar dict headers: - the returned headers - :ivar bytes body: - the body of the response - ''' - - def __init__(self, status, message, headers, body): - self.status = status - self.message = message - self.headers = headers - self.body = body - - -class HTTPRequest(object): - - ''' - Represents an HTTP Request. - - :ivar str host: - the host name to connect to - :ivar str method: - the method to use to connect (string such as GET, POST, PUT, etc.) - :ivar str path: - the uri fragment - :ivar dict query: - query parameters - :ivar dict headers: - header values - :ivar bytes body: - the body of the request. - ''' - - def __init__(self): - self.host = '' - self.method = '' - self.path = '' - self.query = {} # list of (name, value) - self.headers = {} # list of (header name, header value) - self.body = '' diff --git a/azure/multiapi/storage/v2016_05_31/_http/batchclient.py b/azure/multiapi/storage/v2016_05_31/_http/batchclient.py deleted file mode 100644 index 6086b21..0000000 --- a/azure/multiapi/storage/v2016_05_31/_http/batchclient.py +++ /dev/null @@ -1,351 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -import sys -import uuid - -from azure.common import ( - AzureHttpError, -) -from ..models import ( - AzureBatchOperationError, - AzureBatchValidationError, -) -from .._common_error import ( - _ERROR_CANNOT_FIND_PARTITION_KEY, - _ERROR_CANNOT_FIND_ROW_KEY, - _ERROR_INCORRECT_TABLE_IN_BATCH, - _ERROR_INCORRECT_PARTITION_KEY_IN_BATCH, - _ERROR_DUPLICATE_ROW_KEY_IN_BATCH, - _ERROR_BATCH_COMMIT_FAIL, -) -from .._common_serialization import ( - ETree, - url_unquote, - _get_etree_text, - _etree_entity_feed_namespaces, - _update_request_uri_query, -) -from ..table._serialization import ( - _update_storage_table_header, -) -from . import HTTPError, HTTPRequest, HTTPResponse -from .httpclient import _HTTPClient - -_DATASERVICES_NS = 'http://schemas.microsoft.com/ado/2007/08/dataservices' - -if sys.version_info < (3,): - def _new_boundary(): - return str(uuid.uuid1()) -else: - def _new_boundary(): - return str(uuid.uuid1()).encode('utf-8') - - -class _BatchClient(_HTTPClient): - - ''' - This is the class that is used for batch operation for storage table - service. It only supports one changeset. - ''' - - def __init__(self, service_instance, authentication, - protocol='http', request_session=None, timeout=65, user_agent=''): - _HTTPClient.__init__(self, service_instance, protocol=protocol, request_session=request_session, timeout=timeout, user_agent=user_agent) - self.authentication = authentication - self.is_batch = False - self.batch_requests = [] - self.batch_table = '' - self.batch_partition_key = '' - self.batch_row_keys = [] - - def get_request_table(self, request): - ''' - Extracts table name from request.uri. The request.uri has either - "/mytable(...)" or "/mytable" format. - - request: - the request to insert, update or delete entity - ''' - if '(' in request.path: - pos = request.path.find('(') - return request.path[1:pos] - else: - return request.path[1:] - - def get_request_partition_key(self, request): - ''' - Extracts PartitionKey from request.body if it is a POST request or from - request.path if it is not a POST request. Only insert operation request - is a POST request and the PartitionKey is in the request body. - - request: - the request to insert, update or delete entity - ''' - if request.method == 'POST': - doc = ETree.fromstring(request.body) - part_key = doc.find('./atom:content/m:properties/d:PartitionKey', _etree_entity_feed_namespaces) - if part_key is None: - raise AzureBatchValidationError(_ERROR_CANNOT_FIND_PARTITION_KEY) - return _get_etree_text(part_key) - else: - uri = url_unquote(request.path) - pos1 = uri.find('PartitionKey=\'') - pos2 = uri.find('\',', pos1) - if pos1 == -1 or pos2 == -1: - raise AzureBatchValidationError(_ERROR_CANNOT_FIND_PARTITION_KEY) - return uri[pos1 + len('PartitionKey=\''):pos2] - - def get_request_row_key(self, request): - ''' - Extracts RowKey from request.body if it is a POST request or from - request.path if it is not a POST request. Only insert operation request - is a POST request and the Rowkey is in the request body. - - request: - the request to insert, update or delete entity - ''' - if request.method == 'POST': - doc = ETree.fromstring(request.body) - row_key = doc.find('./atom:content/m:properties/d:RowKey', _etree_entity_feed_namespaces) - if row_key is None: - raise AzureBatchValidationError(_ERROR_CANNOT_FIND_ROW_KEY) - return _get_etree_text(row_key) - else: - uri = url_unquote(request.path) - pos1 = uri.find('RowKey=\'') - pos2 = uri.find('\')', pos1) - if pos1 == -1 or pos2 == -1: - raise AzureBatchValidationError(_ERROR_CANNOT_FIND_ROW_KEY) - row_key = uri[pos1 + len('RowKey=\''):pos2] - return row_key - - def validate_request_table(self, request): - ''' - Validates that all requests have the same table name. Set the table - name if it is the first request for the batch operation. - - request: - the request to insert, update or delete entity - ''' - if self.batch_table: - if self.get_request_table(request) != self.batch_table: - raise AzureBatchValidationError(_ERROR_INCORRECT_TABLE_IN_BATCH) - else: - self.batch_table = self.get_request_table(request) - - def validate_request_partition_key(self, request): - ''' - Validates that all requests have the same PartitiionKey. Set the - PartitionKey if it is the first request for the batch operation. - - request: - the request to insert, update or delete entity - ''' - if self.batch_partition_key: - if self.get_request_partition_key(request) != \ - self.batch_partition_key: - raise AzureBatchValidationError(_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH) - else: - self.batch_partition_key = self.get_request_partition_key(request) - - def validate_request_row_key(self, request): - ''' - Validates that all requests have the different RowKey and adds RowKey - to existing RowKey list. - - request: - the request to insert, update or delete entity - ''' - if self.batch_row_keys: - if self.get_request_row_key(request) in self.batch_row_keys: - raise AzureBatchValidationError(_ERROR_DUPLICATE_ROW_KEY_IN_BATCH) - else: - self.batch_row_keys.append(self.get_request_row_key(request)) - - def begin_batch(self): - ''' - Starts the batch operation. Intializes the batch variables - - is_batch: - batch operation flag. - batch_table: - the table name of the batch operation - batch_partition_key: - the PartitionKey of the batch requests. - batch_row_keys: - the RowKey list of adding requests. - batch_requests: - the list of the requests. - ''' - self.is_batch = True - self.batch_table = '' - self.batch_partition_key = '' - self.batch_row_keys = [] - self.batch_requests = [] - - def insert_request_to_batch(self, request): - ''' - Adds request to batch operation. - - request: - the request to insert, update or delete entity - ''' - self.validate_request_table(request) - self.validate_request_partition_key(request) - self.validate_request_row_key(request) - self.batch_requests.append(request) - - def commit_batch(self): - ''' Resets batch flag and commits the batch requests. ''' - if self.is_batch: - self.is_batch = False - self.commit_batch_requests() - - def commit_batch_requests(self): - ''' Commits the batch requests. ''' - - batch_boundary = b'batch_' + _new_boundary() - changeset_boundary = b'changeset_' + _new_boundary() - - # Commits batch only the requests list is not empty. - if self.batch_requests: - request = HTTPRequest() - request.method = 'POST' - request.host = self.batch_requests[0].host - request.path = '/$batch' - request.headers = [ - ('Content-Type', 'multipart/mixed; boundary=' + \ - batch_boundary.decode('utf-8')), - ('Accept', 'application/atom+xml,application/xml'), - ('Accept-Charset', 'UTF-8')] - - request.body = b'--' + batch_boundary + b'\n' - request.body += b'Content-Type: multipart/mixed; boundary=' - request.body += changeset_boundary + b'\n\n' - - content_id = 1 - - # Adds each request body to the POST data. - for batch_request in self.batch_requests: - request.body += b'--' + changeset_boundary + b'\n' - request.body += b'Content-Type: application/http\n' - request.body += b'Content-Transfer-Encoding: binary\n\n' - request.body += batch_request.method.encode('utf-8') - request.body += b' http://' - request.body += batch_request.host.encode('utf-8') - request.body += batch_request.path.encode('utf-8') - request.body += b' HTTP/1.1\n' - request.body += b'Content-ID: ' - request.body += str(content_id).encode('utf-8') + b'\n' - content_id += 1 - - # Add different headers for different type requests. - if not batch_request.method == 'DELETE': - request.body += \ - b'Content-Type: application/atom+xml;type=entry\n' - for name, value in batch_request.headers: - if name == 'If-Match': - request.body += name.encode('utf-8') + b': ' - request.body += value.encode('utf-8') + b'\n' - break - request.body += b'Content-Length: ' - request.body += str(len(batch_request.body)).encode('utf-8') - request.body += b'\n\n' - request.body += batch_request.body + b'\n' - else: - for name, value in batch_request.headers: - # If-Match should be already included in - # batch_request.headers, but in case it is missing, - # just add it. - if name == 'If-Match': - request.body += name.encode('utf-8') + b': ' - request.body += value.encode('utf-8') + b'\n\n' - break - else: - request.body += b'If-Match: *\n\n' - - request.body += b'--' + changeset_boundary + b'--' + b'\n' - request.body += b'--' + batch_boundary + b'--' - - request.path, request.query = _update_request_uri_query(request) - request.headers = _update_storage_table_header(request) - self.authentication.sign_request(request) - - # Submit the whole request as batch request. - response = self.perform_request(request) - if response.status >= 300: - # This exception will be caught by the general error handler - # and raised as an azure http exception - raise HTTPError(response.status, - _ERROR_BATCH_COMMIT_FAIL, - self.respheader, - response.body) - - # http://www.odata.org/documentation/odata-version-2-0/batch-processing/ - # The body of a ChangeSet response is either a response for all the - # successfully processed change request within the ChangeSet, - # formatted exactly as it would have appeared outside of a batch, - # or a single response indicating a failure of the entire ChangeSet. - responses = self._parse_batch_response(response.body) - if responses and responses[0].status >= 300: - self._report_batch_error(responses[0]) - - def cancel_batch(self): - ''' Resets the batch flag. ''' - self.is_batch = False - - def _parse_batch_response(self, body): - parts = body.split(b'--changesetresponse_') - - responses = [] - for part in parts: - httpLocation = part.find(b'HTTP/') - if httpLocation > 0: - response = self._parse_batch_response_part(part[httpLocation:]) - responses.append(response) - - return responses - - def _parse_batch_response_part(self, part): - lines = part.splitlines(); - - # First line is the HTTP status/reason - status, _, reason = lines[0].partition(b' ')[2].partition(b' ') - - # Followed by headers and body - headers = [] - body = b'' - isBody = False - for line in lines[1:]: - if line == b'' and not isBody: - isBody = True - elif isBody: - body += line - else: - headerName, _, headerVal = line.partition(b':') - headers.append((headerName.lower(), headerVal)) - - return HTTPResponse(int(status), reason.strip(), headers, body) - - def _report_batch_error(self, response): - doc = ETree.fromstring(response.body) - - code_element = doc.find('./m:code', _etree_entity_feed_namespaces) - code = _get_etree_text(code_element) if code_element is not None else '' - - message_element = doc.find('./m:message', _etree_entity_feed_namespaces) - message = _get_etree_text(message_element) if message_element is not None else '' - - raise AzureBatchOperationError(message, response.status, code) diff --git a/azure/multiapi/storage/v2016_05_31/_http/httpclient.py b/azure/multiapi/storage/v2016_05_31/_http/httpclient.py deleted file mode 100644 index 18e62fe..0000000 --- a/azure/multiapi/storage/v2016_05_31/_http/httpclient.py +++ /dev/null @@ -1,125 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -import base64 -import sys - -if sys.version_info < (3,): - from httplib import ( - HTTP_PORT, - HTTPS_PORT, - ) - from urllib2 import quote as url_quote -else: - from http.client import ( - HTTP_PORT, - HTTPS_PORT, - ) - from urllib.parse import quote as url_quote - -from . import HTTPError, HTTPResponse -from .._serialization import _get_data_bytes_or_stream_only - -class _HTTPClient(object): - ''' - Takes the request and sends it to cloud service and returns the response. - ''' - - def __init__(self, protocol=None, session=None, timeout=None): - ''' - :param str protocol: - http or https. - :param requests.Session request_session: - session object created with requests library (or compatible). - :param int timeout: - timeout for the http request, in seconds. - ''' - self.protocol = protocol - self.session = session - self.timeout = timeout - - # By default, requests adds an Accept:*/* and Accept-Encoding to the session, - # which causes issues with some Azure REST APIs. Removing these here gives us - # the flexibility to add it back on a case by case basis. - if 'Accept' in self.session.headers: - del self.session.headers['Accept'] - - if 'Accept-Encoding' in self.session.headers: - del self.session.headers['Accept-Encoding'] - - self.proxies = None - - def set_proxy(self, host, port, user, password): - ''' - Sets the proxy server host and port for the HTTP CONNECT Tunnelling. - - Note that we set the proxies directly on the request later on rather than - using the session object as requests has a bug where session proxy is ignored - in favor of environment proxy. So, auth will not work unless it is passed - directly when making the request as this overrides both. - - :param str host: - Address of the proxy. Ex: '192.168.0.100' - :param int port: - Port of the proxy. Ex: 6000 - :param str user: - User for proxy authorization. - :param str password: - Password for proxy authorization. - ''' - if user and password: - proxy_string = '{}:{}@{}:{}'.format(user, password, host, port) - else: - proxy_string = '{}:{}'.format(host, port) - - self.proxies = {} - self.proxies['http'] = 'http://{}'.format(proxy_string) - self.proxies['https'] = 'https://{}'.format(proxy_string) - - def perform_request(self, request): - ''' - Sends an HTTPRequest to Azure Storage and returns an HTTPResponse. If - the response code indicates an error, raise an HTTPError. - - :param HTTPRequest request: - The request to serialize and send. - :return: An HTTPResponse containing the parsed HTTP response. - :rtype: :class:`~azure.storage._http.HTTPResponse` - ''' - # Verify the body is in bytes or either a file-like/stream object - if request.body: - request.body = _get_data_bytes_or_stream_only('request.body', request.body) - - # Construct the URI - uri = self.protocol.lower() + '://' + request.host + request.path - - # Send the request - response = self.session.request(request.method, - uri, - params=request.query, - headers=request.headers, - data=request.body or None, - timeout=self.timeout, - proxies=self.proxies) - - # Parse the response - status = int(response.status_code) - respheaders = {} - for key, name in response.headers.items(): - respheaders[key.lower()] = name - - wrap = HTTPResponse(status, response.reason, respheaders, response.content) - response.close() - - return wrap diff --git a/azure/multiapi/storage/v2016_05_31/_serialization.py b/azure/multiapi/storage/v2016_05_31/_serialization.py deleted file mode 100644 index ba3a450..0000000 --- a/azure/multiapi/storage/v2016_05_31/_serialization.py +++ /dev/null @@ -1,340 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -import sys -import uuid -from datetime import date -from dateutil.tz import tzutc -from time import time -from wsgiref.handlers import format_date_time -from os import fstat -from io import (BytesIO, IOBase, SEEK_SET, SEEK_END, UnsupportedOperation) - -if sys.version_info >= (3,): - from urllib.parse import quote as url_quote -else: - from urllib2 import quote as url_quote - -try: - from xml.etree import cElementTree as ETree -except ImportError: - from xml.etree import ElementTree as ETree - -from ._error import ( - _ERROR_VALUE_SHOULD_BE_BYTES, - _ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM, - _ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM -) -from ._constants import ( - X_MS_VERSION, - USER_AGENT_STRING, -) -from .models import ( - _unicode_type, -) -from ._common_conversion import ( - _str, -) - -def _to_utc_datetime(value): - # Azure expects the date value passed in to be UTC. - # Azure will always return values as UTC. - # If a date is passed in without timezone info, it is assumed to be UTC. - if value.tzinfo: - value = value.astimezone(tzutc()) - return value.strftime('%Y-%m-%dT%H:%M:%SZ') - -def _update_request(request): - # Verify body - if request.body: - request.body = _get_data_bytes_or_stream_only('request.body', request.body) - length = _len_plus(request.body) - - # only scenario where this case is plausible is if the stream object is not seekable. - if length is None: - raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM) - - # if it is PUT, POST, MERGE, DELETE, need to add content-length to header. - if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']: - request.headers['Content-Length'] = str(length) - - # append addtional headers based on the service - request.headers['x-ms-version'] = X_MS_VERSION - request.headers['User-Agent'] = USER_AGENT_STRING - request.headers['x-ms-client-request-id'] = str(uuid.uuid1()) - - # If the host has a path component (ex local storage), move it - path = request.host.split('/', 1) - if len(path) == 2: - request.host = path[0] - request.path = '/{}{}'.format(path[1], request.path) - - # Encode and optionally add local storage prefix to path - request.path = url_quote(request.path, '/()$=\',~') - -def _add_metadata_headers(metadata, request): - if metadata: - if not request.headers: - request.headers = {} - for name, value in metadata.items(): - request.headers['x-ms-meta-' + name] = value - -def _add_date_header(request): - current_time = format_date_time(time()) - request.headers['x-ms-date'] = current_time - -def _get_data_bytes_only(param_name, param_value): - '''Validates the request body passed in and converts it to bytes - if our policy allows it.''' - if param_value is None: - return b'' - - if isinstance(param_value, bytes): - return param_value - - raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name)) - - -def _get_data_bytes_or_stream_only(param_name, param_value): - '''Validates the request body passed in is a stream/file-like or bytes - object.''' - if param_value is None: - return b'' - - if isinstance(param_value, bytes) or hasattr(param_value, 'read'): - return param_value - - raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format(param_name)) - - -def _get_request_body(request_body): - '''Converts an object into a request body. If it's None - we'll return an empty string, if it's one of our objects it'll - convert it to XML and return it. Otherwise we just use the object - directly''' - if request_body is None: - return b'' - - if isinstance(request_body, bytes) or isinstance(request_body, IOBase): - return request_body - - if isinstance(request_body, _unicode_type): - return request_body.encode('utf-8') - - request_body = str(request_body) - if isinstance(request_body, _unicode_type): - return request_body.encode('utf-8') - - return request_body - -def _convert_signed_identifiers_to_xml(signed_identifiers): - if signed_identifiers is None: - return '' - - sis = ETree.Element('SignedIdentifiers'); - for id, access_policy in signed_identifiers.items(): - # Root signed identifers element - si = ETree.SubElement(sis, 'SignedIdentifier') - - # Id element - ETree.SubElement(si, 'Id').text = id - - # Access policy element - policy = ETree.SubElement(si, 'AccessPolicy') - - if access_policy.start: - start = access_policy.start - if isinstance(access_policy.start, date): - start = _to_utc_datetime(start) - ETree.SubElement(policy, 'Start').text = start - - if access_policy.expiry: - expiry = access_policy.expiry - if isinstance(access_policy.expiry, date): - expiry = _to_utc_datetime(expiry) - ETree.SubElement(policy, 'Expiry').text = expiry - - if access_policy.permission: - ETree.SubElement(policy, 'Permission').text = _str(access_policy.permission) - - # Add xml declaration and serialize - try: - stream = BytesIO() - ETree.ElementTree(sis).write(stream, xml_declaration=True, encoding='utf-8', method='xml') - except: - raise - finally: - output = stream.getvalue() - stream.close() - - return output - -def _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics, cors, target_version=None): - ''' - - - - version-number - true|false - true|false - true|false - - true|false - number-of-days - - - - version-number - true|false - true|false - - true|false - number-of-days - - - - version-number - true|false - true|false - - true|false - number-of-days - - - - - comma-separated-list-of-allowed-origins - comma-separated-list-of-HTTP-verb - max-caching-age-in-seconds - comma-seperated-list-of-response-headers - comma-seperated-list-of-request-headers - - - - ''' - service_properties_element = ETree.Element('StorageServiceProperties'); - - # Logging - if logging: - logging_element = ETree.SubElement(service_properties_element, 'Logging') - ETree.SubElement(logging_element, 'Version').text = logging.version - ETree.SubElement(logging_element, 'Delete').text = str(logging.delete) - ETree.SubElement(logging_element, 'Read').text = str(logging.read) - ETree.SubElement(logging_element, 'Write').text = str(logging.write) - - retention_element = ETree.SubElement(logging_element, 'RetentionPolicy') - _convert_retention_policy_to_xml(logging.retention_policy, retention_element) - - # HourMetrics - if hour_metrics: - hour_metrics_element = ETree.SubElement(service_properties_element, 'HourMetrics') - _convert_metrics_to_xml(hour_metrics, hour_metrics_element) - - # MinuteMetrics - if minute_metrics: - minute_metrics_element = ETree.SubElement(service_properties_element, 'MinuteMetrics') - _convert_metrics_to_xml(minute_metrics, minute_metrics_element) - - # CORS - # Make sure to still serialize empty list - if cors is not None: - cors_element = ETree.SubElement(service_properties_element, 'Cors') - for rule in cors: - cors_rule = ETree.SubElement(cors_element, 'CorsRule') - ETree.SubElement(cors_rule, 'AllowedOrigins').text = ",".join(rule.allowed_origins) - ETree.SubElement(cors_rule, 'AllowedMethods').text = ",".join(rule.allowed_methods) - ETree.SubElement(cors_rule, 'MaxAgeInSeconds').text = str(rule.max_age_in_seconds) - ETree.SubElement(cors_rule, 'ExposedHeaders').text = ",".join(rule.exposed_headers) - ETree.SubElement(cors_rule, 'AllowedHeaders').text = ",".join(rule.allowed_headers) - - # Target version - if target_version: - ETree.SubElement(service_properties_element, 'DefaultServiceVersion').text = target_version - - # Add xml declaration and serialize - try: - stream = BytesIO() - ETree.ElementTree(service_properties_element).write(stream, xml_declaration=True, encoding='utf-8', method='xml') - except: - raise - finally: - output = stream.getvalue() - stream.close() - - return output - -def _convert_metrics_to_xml(metrics, root): - ''' - version-number - true|false - true|false - - true|false - number-of-days - - ''' - # Version - ETree.SubElement(root, 'Version').text = metrics.version - - # Enabled - ETree.SubElement(root, 'Enabled').text = str(metrics.enabled) - - # IncludeAPIs - if metrics.enabled and metrics.include_apis is not None: - ETree.SubElement(root, 'IncludeAPIs').text = str(metrics.include_apis) - - # RetentionPolicy - retention_element = ETree.SubElement(root, 'RetentionPolicy') - _convert_retention_policy_to_xml(metrics.retention_policy, retention_element) - -def _convert_retention_policy_to_xml(retention_policy, root): - ''' - true|false - number-of-days - ''' - # Enabled - ETree.SubElement(root, 'Enabled').text = str(retention_policy.enabled) - - # Days - if retention_policy.enabled and retention_policy.days: - ETree.SubElement(root, 'Days').text = str(retention_policy.days) - -def _len_plus(data): - length = None - # Check if object implements the __len__ method, covers most input cases such as bytearray. - try: - length = len(data) - except: - pass - - if not length: - # Check if the stream is a file-like stream object. - # If so, calculate the size using the file descriptor. - try: - fileno = data.fileno() - except (AttributeError, UnsupportedOperation): - pass - else: - return fstat(fileno).st_size - - # If the stream is seekable and tell() is implemented, calculate the stream size. - try: - currentPosition = data.tell() - data.seek(0, SEEK_END) - length = data.tell() - currentPosition - data.seek(currentPosition, SEEK_SET) - except (AttributeError, UnsupportedOperation): - pass - - return length diff --git a/azure/multiapi/storage/v2016_05_31/blob/__init__.py b/azure/multiapi/storage/v2016_05_31/blob/__init__.py deleted file mode 100644 index 13ce4f8..0000000 --- a/azure/multiapi/storage/v2016_05_31/blob/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from .models import ( - Container, - ContainerProperties, - Blob, - BlobProperties, - BlobBlock, - BlobBlockList, - PageRange, - ContentSettings, - CopyProperties, - ContainerPermissions, - BlobPermissions, - _LeaseActions, - AppendBlockProperties, - PageBlobProperties, - ResourceProperties, - Include, - SequenceNumberAction, - BlockListType, - PublicAccess, - BlobPrefix, - DeleteSnapshot, -) - -from .blockblobservice import BlockBlobService -from .pageblobservice import PageBlobService -from .appendblobservice import AppendBlobService diff --git a/azure/multiapi/storage/v2016_05_31/blob/_deserialization.py b/azure/multiapi/storage/v2016_05_31/blob/_deserialization.py deleted file mode 100644 index edf458b..0000000 --- a/azure/multiapi/storage/v2016_05_31/blob/_deserialization.py +++ /dev/null @@ -1,417 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from dateutil import parser -from .._error import AzureException -try: - from xml.etree import cElementTree as ETree -except ImportError: - from xml.etree import ElementTree as ETree -from .._common_conversion import ( - _decode_base64_to_text, - _to_str, -) -from .._deserialization import ( - _parse_properties, - _int_to_str, - _parse_metadata, - _convert_xml_to_signed_identifiers, - _bool, -) -from .models import ( - Container, - Blob, - BlobBlock, - BlobBlockList, - BlobBlockState, - BlobProperties, - PageRange, - ContainerProperties, - AppendBlockProperties, - PageBlobProperties, - ResourceProperties, - BlobPrefix, -) -from ._encryption import _decrypt_blob -from ..models import _list -from .._error import( - _validate_content_match, - _ERROR_DECRYPTION_FAILURE, -) -from .._common_conversion import _get_content_md5 - -def _parse_base_properties(response): - ''' - Extracts basic response headers. - ''' - resource_properties = ResourceProperties() - resource_properties.last_modified = parser.parse(response.headers.get('last-modified')) - resource_properties.etag = response.headers.get('etag') - - return resource_properties - -def _parse_page_properties(response): - ''' - Extracts page response headers. - ''' - put_page = PageBlobProperties() - put_page.last_modified = parser.parse(response.headers.get('last-modified')) - put_page.etag = response.headers.get('etag') - put_page.sequence_number = _int_to_str(response.headers.get('x-ms-blob-sequence-number')) - - return put_page - -def _parse_append_block(response): - ''' - Extracts append block response headers. - ''' - append_block = AppendBlockProperties() - append_block.last_modified = parser.parse(response.headers.get('last-modified')) - append_block.etag = response.headers.get('etag') - append_block.append_offset = _int_to_str(response.headers.get('x-ms-blob-append-offset')) - append_block.committed_block_count = _int_to_str(response.headers.get('x-ms-blob-committed-block-count')) - - return append_block - -def _parse_snapshot_blob(response, name): - ''' - Extracts snapshot return header. - ''' - snapshot = response.headers.get('x-ms-snapshot') - - return _parse_blob(response, name, snapshot) - -def _parse_lease(response): - ''' - Extracts lease time and ID return headers. - ''' - lease = {} - lease['time'] = response.headers.get('x-ms-lease-time') - if lease['time']: - lease['time'] = _int_to_str(lease['time']) - - lease['id'] = response.headers.get('x-ms-lease-id') - - return lease - -def _parse_blob(response, name, snapshot, validate_content=False, require_encryption=False, - key_encryption_key=None, key_resolver_function=None, start_offset=None, end_offset=None): - if response is None: - return None - - metadata = _parse_metadata(response) - props = _parse_properties(response, BlobProperties) - - # For range gets, only look at 'x-ms-blob-content-md5' for overall MD5 - content_settings = getattr(props, 'content_settings') - if 'content-range' in response.headers: - if 'x-ms-blob-content-md5' in response.headers: - setattr(content_settings, 'content_md5', _to_str(response.headers['x-ms-blob-content-md5'])) - else: - delattr(content_settings, 'content_md5') - - if validate_content: - computed_md5 = _get_content_md5(response.body) - _validate_content_match(response.headers['content-md5'], computed_md5) - - if key_encryption_key is not None or key_resolver_function is not None: - try: - response.body = _decrypt_blob(require_encryption, key_encryption_key, key_resolver_function, - response, start_offset, end_offset) - except: - raise AzureException(_ERROR_DECRYPTION_FAILURE) - - return Blob(name, snapshot, response.body, props, metadata) - -def _parse_container(response, name): - if response is None: - return None - - metadata = _parse_metadata(response) - props = _parse_properties(response, ContainerProperties) - return Container(name, props, metadata) - -def _convert_xml_to_signed_identifiers_and_access(response): - acl = _convert_xml_to_signed_identifiers(response) - acl.public_access = response.headers.get('x-ms-blob-public-access') - - return acl - -def _convert_xml_to_containers(response): - ''' - - - string-value - string-value - int-value - - - container-name - - date/time-value - etag - locked | unlocked - available | leased | expired | breaking | broken - infinite | fixed - blob | container - - - value - - - - marker-value - - ''' - if response is None or response.body is None: - return None - - containers = _list() - list_element = ETree.fromstring(response.body) - - # Set next marker - setattr(containers, 'next_marker', list_element.findtext('NextMarker')) - - containers_element = list_element.find('Containers') - - for container_element in containers_element.findall('Container'): - # Name element - container = Container() - container.name = container_element.findtext('Name') - - # Metadata - metadata_root_element = container_element.find('Metadata') - if metadata_root_element is not None: - container.metadata = dict() - for metadata_element in metadata_root_element: - container.metadata[metadata_element.tag] = metadata_element.text - - # Properties - properties_element = container_element.find('Properties') - container.properties.etag = properties_element.findtext('Etag') - container.properties.last_modified = parser.parse(properties_element.findtext('Last-Modified')) - container.properties.lease_status = properties_element.findtext('LeaseStatus') - container.properties.lease_state = properties_element.findtext('LeaseState') - container.properties.lease_duration = properties_element.findtext('LeaseDuration') - container.properties.public_access = properties_element.findtext('PublicAccess') - - # Add container to list - containers.append(container) - - return containers - -LIST_BLOBS_ATTRIBUTE_MAP = { - 'Last-Modified': (None, 'last_modified', parser.parse), - 'Etag': (None, 'etag', _to_str), - 'x-ms-blob-sequence-number': (None, 'sequence_number', _int_to_str), - 'BlobType': (None, 'blob_type', _to_str), - 'Content-Length': (None, 'content_length', _int_to_str), - 'ServerEncrypted': (None, 'server_encrypted', _bool), - 'Content-Type': ('content_settings', 'content_type', _to_str), - 'Content-Encoding': ('content_settings', 'content_encoding', _to_str), - 'Content-Disposition': ('content_settings', 'content_disposition', _to_str), - 'Content-Language': ('content_settings', 'content_language', _to_str), - 'Content-MD5': ('content_settings', 'content_md5', _to_str), - 'Cache-Control': ('content_settings', 'cache_control', _to_str), - 'LeaseStatus': ('lease', 'status', _to_str), - 'LeaseState': ('lease', 'state', _to_str), - 'LeaseDuration': ('lease', 'duration', _to_str), - 'CopyId': ('copy', 'id', _to_str), - 'CopySource': ('copy', 'source', _to_str), - 'CopyStatus': ('copy', 'status', _to_str), - 'CopyProgress': ('copy', 'progress', _to_str), - 'CopyCompletionTime': ('copy', 'completion_time', _to_str), - 'CopyStatusDescription': ('copy', 'status_description', _to_str), -} - -def _convert_xml_to_blob_list(response): - ''' - - - string-value - string-value - int-value - string-value - - - blob-name - date-time-value - - date-time-value - etag - size-in-bytes - blob-content-type - - - - - sequence-number - BlockBlob|PageBlob|AppendBlob - locked|unlocked - available | leased | expired | breaking | broken - infinite | fixed - id - pending | success | aborted | failed - source url - bytes copied/bytes total - datetime - error string - - - value - - - - blob-prefix - - - - - ''' - if response is None or response.body is None: - return None - - blob_list = _list() - list_element = ETree.fromstring(response.body) - - setattr(blob_list, 'next_marker', list_element.findtext('NextMarker')) - - blobs_element = list_element.find('Blobs') - blob_prefix_elements = blobs_element.findall('BlobPrefix') - if blob_prefix_elements is not None: - for blob_prefix_element in blob_prefix_elements: - prefix = BlobPrefix() - prefix.name = blob_prefix_element.findtext('Name') - blob_list.append(prefix) - - for blob_element in blobs_element.findall('Blob'): - blob = Blob() - blob.name = blob_element.findtext('Name') - blob.snapshot = blob_element.findtext('Snapshot') - - # Properties - properties_element = blob_element.find('Properties') - if properties_element is not None: - for property_element in properties_element: - info = LIST_BLOBS_ATTRIBUTE_MAP.get(property_element.tag) - if info is None: - setattr(blob.properties, property_element.tag, _to_str(property_element.text)) - elif info[0] is None: - setattr(blob.properties, info[1], info[2](property_element.text)) - else: - attr = getattr(blob.properties, info[0]) - setattr(attr, info[1], info[2](property_element.text)) - - - # Metadata - metadata_root_element = blob_element.find('Metadata') - if metadata_root_element is not None: - blob.metadata = dict() - for metadata_element in metadata_root_element: - blob.metadata[metadata_element.tag] = metadata_element.text - - # Add blob to list - blob_list.append(blob) - - return blob_list - -def _convert_xml_to_block_list(response): - ''' - - - - - base64-encoded-block-id - size-in-bytes - - - - - base64-encoded-block-id - size-in-bytes - - - - - Converts xml response to block list class. - ''' - if response is None or response.body is None: - return None - - block_list = BlobBlockList() - - list_element = ETree.fromstring(response.body) - - committed_blocks_element = list_element.find('CommittedBlocks') - if committed_blocks_element is not None: - for block_element in committed_blocks_element.findall('Block'): - block_id = _decode_base64_to_text(block_element.findtext('Name', '')) - block_size = int(block_element.findtext('Size')) - block = BlobBlock(id=block_id, state=BlobBlockState.Committed) - block._set_size(block_size) - block_list.committed_blocks.append(block) - - uncommitted_blocks_element = list_element.find('UncommittedBlocks') - if uncommitted_blocks_element is not None: - for block_element in uncommitted_blocks_element.findall('Block'): - block_id = _decode_base64_to_text(block_element.findtext('Name', '')) - block_size = int(block_element.findtext('Size')) - block = BlobBlock(id=block_id, state=BlobBlockState.Uncommitted) - block._set_size(block_size) - block_list.uncommitted_blocks.append(block) - - return block_list - -def _convert_xml_to_page_ranges(response): - ''' - - - - Start Byte - End Byte - - - Start Byte - End Byte - - - Start Byte - End Byte - - - ''' - if response is None or response.body is None: - return None - - page_list = list() - - list_element = ETree.fromstring(response.body) - - for page_range_element in list_element: - if page_range_element.tag == 'PageRange': - is_cleared = False - elif page_range_element.tag == 'ClearRange': - is_cleared = True - else: - pass # ignore any unrecognized Page Range types - - page_list.append( - PageRange( - int(page_range_element.findtext('Start')), - int(page_range_element.findtext('End')), - is_cleared - ) - ) - - return page_list \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/blob/_download_chunking.py b/azure/multiapi/storage/v2016_05_31/blob/_download_chunking.py deleted file mode 100644 index 1c2d4f9..0000000 --- a/azure/multiapi/storage/v2016_05_31/blob/_download_chunking.py +++ /dev/null @@ -1,138 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -import threading - -from time import sleep -from azure.common import ( - AzureHttpError, -) -from .._error import _ERROR_NO_SINGLE_THREAD_CHUNKING - -def _download_blob_chunks(blob_service, container_name, blob_name, snapshot, - download_size, block_size, progress, start_range, end_range, - stream, max_connections, progress_callback, validate_content, - lease_id, if_modified_since, if_unmodified_since, if_match, - if_none_match, timeout, operation_context): - if max_connections <= 1: - raise ValueError(_ERROR_NO_SINGLE_THREAD_CHUNKING.format('blob')) - - downloader = _BlobChunkDownloader( - blob_service, - container_name, - blob_name, - snapshot, - download_size, - block_size, - progress, - start_range, - end_range, - stream, - progress_callback, - validate_content, - lease_id, - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout, - operation_context, - ) - - import concurrent.futures - executor = concurrent.futures.ThreadPoolExecutor(max_connections) - result = list(executor.map(downloader.process_chunk, downloader.get_chunk_offsets())) - -class _BlobChunkDownloader(object): - def __init__(self, blob_service, container_name, blob_name, snapshot, download_size, - chunk_size, progress, start_range, end_range, stream, - progress_callback, validate_content, lease_id, if_modified_since, - if_unmodified_since, if_match, if_none_match, timeout, operation_context): - self.blob_service = blob_service - self.container_name = container_name - self.blob_name = blob_name - self.snapshot = snapshot - self.chunk_size = chunk_size - - self.download_size = download_size - self.start_index = start_range - self.blob_end = end_range - - self.stream = stream - self.stream_start = stream.tell() - self.stream_lock = threading.Lock() - self.progress_callback = progress_callback - self.progress_total = progress - self.progress_lock = threading.Lock() - self.timeout = timeout - self.operation_context = operation_context - - self.validate_content = validate_content - self.lease_id = lease_id - self.if_modified_since=if_modified_since - self.if_unmodified_since=if_unmodified_since - self.if_match=if_match - self.if_none_match=if_none_match - - def get_chunk_offsets(self): - index = self.start_index - while index < self.blob_end: - yield index - index += self.chunk_size - - def process_chunk(self, chunk_start): - if chunk_start + self.chunk_size > self.blob_end: - chunk_end = self.blob_end - else: - chunk_end = chunk_start + self.chunk_size - - chunk_data = self._download_chunk(chunk_start, chunk_end).content - length = chunk_end - chunk_start - if length > 0: - self._write_to_stream(chunk_data, chunk_start) - self._update_progress(length) - - def _update_progress(self, length): - if self.progress_callback is not None: - with self.progress_lock: - self.progress_total += length - total = self.progress_total - self.progress_callback(total, self.download_size) - - def _write_to_stream(self, chunk_data, chunk_start): - with self.stream_lock: - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - - def _download_chunk(self, chunk_start, chunk_end): - response = self.blob_service._get_blob( - self.container_name, - self.blob_name, - snapshot=self.snapshot, - start_range=chunk_start, - end_range=chunk_end - 1, - validate_content=self.validate_content, - lease_id=self.lease_id, - if_modified_since=self.if_modified_since, - if_unmodified_since=self.if_unmodified_since, - if_match=self.if_match, - if_none_match=self.if_none_match, - timeout=self.timeout, - _context=self.operation_context - ) - - # This makes sure that if_match is set so that we can validate - # that subsequent downloads are to an unmodified blob - self.if_match = response.properties.etag - return response \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/blob/_encryption.py b/azure/multiapi/storage/v2016_05_31/blob/_encryption.py deleted file mode 100644 index 4f30288..0000000 --- a/azure/multiapi/storage/v2016_05_31/blob/_encryption.py +++ /dev/null @@ -1,189 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- - -from os import urandom -from json import( - dumps, - loads, -) -from .._error import( - _validate_not_none, - _validate_key_encryption_key_wrap, - _ERROR_DATA_NOT_ENCRYPTED, -) -from .._encryption import ( - _generate_encryption_data_dict, - _generate_AES_CBC_cipher, - _dict_to_encryption_data, - _validate_and_unwrap_cek, - _EncryptionAlgorithm, -) -from cryptography.hazmat.primitives.padding import PKCS7 - -def _encrypt_blob(blob, key_encryption_key): - ''' - Encrypts the given blob using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encryption metadata. This method should - only be used when a blob is small enough for single shot upload. Encrypting larger blobs - is done as a part of the _upload_blob_chunks method. - - :param bytes blob: - The blob to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. - :rtype: (str, bytes) - ''' - - _validate_not_none('blob', blob) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(blob) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - - return dumps(encryption_data), encrypted_data - -def _generate_blob_encryption_data(key_encryption_key): - ''' - Generates the encryption_metadata for the blob. - - :param bytes key_encryption_key: - The key-encryption-key used to wrap the cek associate with this blob. - :return: A tuple containing the cek and iv for this blob as well as the - serialized encryption metadata for the blob. - :rtype: (bytes, bytes, str) - ''' - encryption_data = None - content_encryption_key = None - initialization_vector = None - if key_encryption_key: - _validate_key_encryption_key_wrap(key_encryption_key) - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - encryption_data = _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - encryption_data = dumps(encryption_data) - - return (content_encryption_key, initialization_vector, encryption_data) - -def _decrypt_blob(require_encryption, key_encryption_key, key_resolver, - response, start_offset, end_offset): - ''' - Decrypts the given blob contents and returns only the requested range. - - :param bool require_encryption: - Whether or not the calling blob service requires objects to be decrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :param key_resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted blob content. - :rtype: bytes - ''' - _validate_not_none('response', response) - content = response.body - _validate_not_none('content', content) - - try: - encryption_data = _dict_to_encryption_data(loads(response.headers['x-ms-meta-encryptiondata'])) - except: - if require_encryption: - raise ValueError(_ERROR_DATA_NOT_ENCRYPTED) - else: - return content - - if not(encryption_data.encryption_agent.encryption_algorithm == _EncryptionAlgorithm.AES_CBC_256): - raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM) - - blob_type = response.headers['x-ms-blob-type'] - - iv = None - unpad = False - start_range, end_range = 0, len(content) - if 'content-range' in response.headers: - range = response.headers['content-range'] - # Format: 'bytes x-y/size' - - # Ignore the word 'bytes' - range = range.split(' ') - - range = range[1].split('-') - start_range = int(range[0]) - range = range[1].split('/') - end_range = int(range[0]) - blob_size = int(range[1]) - - if start_offset >= 16: - iv = content[:16] - content = content[16:] - start_offset -= 16 - else: - iv = encryption_data.content_encryption_IV - - if end_range == blob_size-1: - unpad = True - else: - unpad = True - iv = encryption_data.content_encryption_IV - - if blob_type == 'PageBlob': - unpad = False - - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) - cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) - decryptor = cipher.decryptor() - - content = decryptor.update(content) + decryptor.finalize() - if unpad: - unpadder = PKCS7(128).unpadder() - content = unpadder.update(content) + unpadder.finalize() - - return content[start_offset : len(content) - end_offset] - -def _get_blob_encryptor_and_padder(cek, iv, should_pad): - encryptor = None - padder = None - - if cek is not None and iv is not None: - cipher = _generate_AES_CBC_cipher(cek, iv) - encryptor = cipher.encryptor() - padder = PKCS7(128).padder() if should_pad else None - - return encryptor, padder \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/blob/_error.py b/azure/multiapi/storage/v2016_05_31/blob/_error.py deleted file mode 100644 index bf5ca2c..0000000 --- a/azure/multiapi/storage/v2016_05_31/blob/_error.py +++ /dev/null @@ -1,38 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- - -_ERROR_PAGE_BLOB_SIZE_ALIGNMENT = \ - 'Invalid page blob size: {0}. ' + \ - 'The size must be aligned to a 512-byte boundary.' - -_ERROR_PAGE_BLOB_START_ALIGNMENT = \ - 'start_range must align with 512 page size' - -_ERROR_PAGE_BLOB_END_ALIGNMENT = \ - 'end_range must align with 512 page size' - -_ERROR_INVALID_BLOCK_ID = \ - 'All blocks in block list need to have valid block ids.' - -_ERROR_INVALID_LEASE_DURATION = \ - "lease_duration param needs to be between 15 and 60 or -1." - -_ERROR_INVALID_LEASE_BREAK_PERIOD = \ - "lease_break_period param needs to be between 0 and 60." - -_ERROR_NO_SINGLE_THREAD_CHUNKING = \ - 'To use blob chunk downloader more than 1 thread must be ' + \ - 'used since get_blob_to_bytes should be called for single threaded ' + \ - 'blob downloads.' \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/blob/_serialization.py b/azure/multiapi/storage/v2016_05_31/blob/_serialization.py deleted file mode 100644 index da80497..0000000 --- a/azure/multiapi/storage/v2016_05_31/blob/_serialization.py +++ /dev/null @@ -1,124 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from xml.sax.saxutils import escape as xml_escape -try: - from xml.etree import cElementTree as ETree -except ImportError: - from xml.etree import ElementTree as ETree -from .._common_conversion import ( - _encode_base64, - _str, -) -from .._error import ( - _validate_not_none, - _ERROR_START_END_NEEDED_FOR_MD5, - _ERROR_RANGE_TOO_LARGE_FOR_MD5, -) -from ._error import ( - _ERROR_PAGE_BLOB_START_ALIGNMENT, - _ERROR_PAGE_BLOB_END_ALIGNMENT, - _ERROR_INVALID_BLOCK_ID, -) -from io import BytesIO - -def _get_path(container_name=None, blob_name=None): - ''' - Creates the path to access a blob resource. - - container_name: - Name of container. - blob_name: - The path to the blob. - ''' - if container_name and blob_name: - return '/{0}/{1}'.format( - _str(container_name), - _str(blob_name)) - elif container_name: - return '/{0}'.format(_str(container_name)) - else: - return '/' - - -def _validate_and_format_range_headers(request, start_range, end_range, start_range_required=True, - end_range_required=True, check_content_md5=False, align_to_page=False): - # If end range is provided, start range must be provided - if start_range_required == True or end_range is not None: - _validate_not_none('start_range', start_range) - if end_range_required == True: - _validate_not_none('end_range', end_range) - - # Page ranges must be 512 aligned - if align_to_page == True: - if start_range is not None and start_range % 512 != 0: - raise ValueError(_ERROR_PAGE_BLOB_START_ALIGNMENT) - if end_range is not None and end_range % 512 != 511: - raise ValueError(_ERROR_PAGE_BLOB_END_ALIGNMENT) - - # Format based on whether end_range is present - request.headers = request.headers or {} - if end_range is not None: - request.headers['x-ms-range'] = 'bytes={0}-{1}'.format(start_range, end_range) - elif start_range is not None: - request.headers['x-ms-range'] = "bytes={0}-".format(start_range) - - # Content MD5 can only be provided for a complete range less than 4MB in size - if check_content_md5 == True: - if start_range is None or end_range is None: - raise ValueError(_ERROR_START_END_NEEDED_FOR_MD5) - if end_range - start_range > 4 * 1024 * 1024: - raise ValueError(_ERROR_RANGE_TOO_LARGE_FOR_MD5) - - request.headers['x-ms-range-get-content-md5'] = 'true' - -def _convert_block_list_to_xml(block_id_list): - ''' - - - first-base64-encoded-block-id - second-base64-encoded-block-id - third-base64-encoded-block-id - - - Convert a block list to xml to send. - - block_id_list: - A list of BlobBlock containing the block ids and block state that are used in put_block_list. - Only get block from latest blocks. - ''' - if block_id_list is None: - return '' - - block_list_element = ETree.Element('BlockList'); - - # Enabled - for block in block_id_list: - if block.id is None: - raise ValueError(_ERROR_INVALID_BLOCK_ID) - id = xml_escape(_str(format(_encode_base64(block.id)))) - ETree.SubElement(block_list_element, block.state).text = id - - # Add xml declaration and serialize - try: - stream = BytesIO() - ETree.ElementTree(block_list_element).write(stream, xml_declaration=True, encoding='utf-8', method='xml') - except: - raise - finally: - output = stream.getvalue() - stream.close() - - # return xml value - return output \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/blob/_upload_chunking.py b/azure/multiapi/storage/v2016_05_31/blob/_upload_chunking.py deleted file mode 100644 index a8a8e20..0000000 --- a/azure/multiapi/storage/v2016_05_31/blob/_upload_chunking.py +++ /dev/null @@ -1,443 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -------------------------------------------------------------------------- -import sys -from threading import Lock -from time import sleep -from cryptography.hazmat.primitives.padding import PKCS7 -from .._common_conversion import _encode_base64 -from .._serialization import ( - url_quote, - _get_data_bytes_only, - _len_plus -) -from ._encryption import( - _get_blob_encryptor_and_padder, -) -from azure.common import ( - AzureHttpError, -) -from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) -from .models import BlobBlock -from math import ceil -from .._error import _ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM - -def _upload_blob_chunks(blob_service, container_name, blob_name, - blob_size, block_size, stream, max_connections, - progress_callback, validate_content, lease_id, uploader_class, - maxsize_condition=None, if_match=None, timeout=None, - content_encryption_key=None, initialization_vector=None): - - encryptor, padder = _get_blob_encryptor_and_padder(content_encryption_key, initialization_vector, - uploader_class is not _PageBlobChunkUploader) - - uploader = uploader_class( - blob_service, - container_name, - blob_name, - blob_size, - block_size, - stream, - max_connections > 1, - progress_callback, - validate_content, - lease_id, - timeout, - encryptor, - padder - ) - - uploader.maxsize_condition = maxsize_condition - - # ETag matching does not work with parallelism as a ranged upload may start - # before the previous finishes and provides an etag - uploader.if_match = if_match if not max_connections > 1 else None - - if progress_callback is not None: - progress_callback(0, blob_size) - - if max_connections > 1: - import concurrent.futures - from threading import BoundedSemaphore - - ''' - Ensures we bound the chunking so we only buffer and submit 'max_connections' amount of work items to the executor. - This is necessary as the executor queue will keep accepting submitted work items, which results in buffering all the blocks if - the max_connections + 1 ensures the next chunk is already buffered and ready for when the worker thread is available. - ''' - chunk_throttler = BoundedSemaphore(max_connections + 1) - - executor = concurrent.futures.ThreadPoolExecutor(max_connections) - futures = [] - running_futures = [] - - # Check for exceptions and fail fast. - for chunk in uploader.get_chunk_streams(): - for f in running_futures: - if f.done(): - if f.exception(): - raise f.exception() - else: - running_futures.remove(f) - - chunk_throttler.acquire() - future = executor.submit(uploader.process_chunk, chunk) - - # Calls callback upon completion (even if the callback was added after the Future task is done). - future.add_done_callback(lambda x: chunk_throttler.release()) - futures.append(future) - running_futures.append(future) - - # result() will wait until completion and also raise any exceptions that may have been set. - range_ids = [f.result() for f in futures] - else: - range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] - - return range_ids - -def _upload_blob_substream_blocks(blob_service, container_name, blob_name, - blob_size, block_size, stream, max_connections, - progress_callback, validate_content, lease_id, uploader_class, - maxsize_condition=None, if_match=None, timeout=None): - - uploader = uploader_class( - blob_service, - container_name, - blob_name, - blob_size, - block_size, - stream, - max_connections > 1, - progress_callback, - validate_content, - lease_id, - timeout, - None, - None - ) - - uploader.maxsize_condition = maxsize_condition - - # ETag matching does not work with parallelism as a ranged upload may start - # before the previous finishes and provides an etag - uploader.if_match = if_match if not max_connections > 1 else None - - if progress_callback is not None: - progress_callback(0, blob_size) - - if max_connections > 1: - import concurrent.futures - executor = concurrent.futures.ThreadPoolExecutor(max_connections) - range_ids = list(executor.map(uploader.process_substream_block, uploader.get_substream_blocks())) - else: - range_ids = [uploader.process_substream_block(result) for result in uploader.get_substream_blocks()] - - return range_ids - -class _BlobChunkUploader(object): - def __init__(self, blob_service, container_name, blob_name, blob_size, - chunk_size, stream, parallel, progress_callback, - validate_content, lease_id, timeout, encryptor, padder): - self.blob_service = blob_service - self.container_name = container_name - self.blob_name = blob_name - self.blob_size = blob_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - self.stream_start = stream.tell() if parallel else None - self.stream_lock = Lock() if parallel else None - self.progress_callback = progress_callback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - self.validate_content = validate_content - self.lease_id = lease_id - self.timeout = timeout - self.encryptor = encryptor - self.padder = padder - - def get_chunk_streams(self): - index = 0 - while True: - data = b'' - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.blob_size: - read_size = min(self.chunk_size-len(data), self.blob_size - (index + len(data))) - temp = self.stream.read(read_size) - temp = _get_data_bytes_only('temp', temp) - data += temp - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b'' or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, BytesIO(data) - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if len(data) > 0: - yield index, BytesIO(data) - break - index += len(data) - - def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1].read() - chunk_offset = chunk_data[0] - return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - def _update_progress(self, length): - if self.progress_callback is not None: - if self.progress_lock is not None: - with self.progress_lock: - self.progress_total += length - total = self.progress_total - else: - self.progress_total += length - total = self.progress_total - self.progress_callback(total, self.blob_size) - - def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = self._upload_chunk(chunk_offset, chunk_data) - self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.blob_size - - if blob_length is None: - blob_length = _len_plus(self.stream) - if blob_length is None: - raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM.format('stream')) - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - yield ('BlockId{}'.format("%05d" % i), - _SubStream(self.stream, i * self.chunk_size, last_block_size if i == blocks - 1 else self.chunk_size, - lock)) - - def process_substream_block(self, block_data): - return self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - def _upload_substream_block_with_progress(self, block_id, block_stream): - range_id = self._upload_substream_block(block_id, block_stream) - self._update_progress(len(block_stream)) - return range_id - -class _BlockBlobChunkUploader(_BlobChunkUploader): - def _upload_chunk(self, chunk_offset, chunk_data): - block_id = url_quote(_encode_base64('{0:032d}'.format(chunk_offset))) - self.blob_service._put_block( - self.container_name, - self.blob_name, - chunk_data, - block_id, - validate_content=self.validate_content, - lease_id=self.lease_id, - timeout=self.timeout, - ) - return BlobBlock(block_id) - - def _upload_substream_block(self, block_id, block_stream): - try: - self.blob_service._put_block( - self.container_name, - self.blob_name, - block_stream, - block_id, - validate_content=self.validate_content, - lease_id=self.lease_id, - timeout=self.timeout, - ) - finally: - block_stream.close() - return BlobBlock(block_id) - - -class _PageBlobChunkUploader(_BlobChunkUploader): - def _upload_chunk(self, chunk_start, chunk_data): - chunk_end = chunk_start + len(chunk_data) - 1 - resp = self.blob_service._update_page( - self.container_name, - self.blob_name, - chunk_data, - chunk_start, - chunk_end, - validate_content=self.validate_content, - lease_id=self.lease_id, - if_match=self.if_match, - timeout=self.timeout, - ) - - if not self.parallel: - self.if_match = resp.etag - -class _AppendBlobChunkUploader(_BlobChunkUploader): - def _upload_chunk(self, chunk_offset, chunk_data): - if not hasattr(self, 'current_length'): - resp = self.blob_service.append_block( - self.container_name, - self.blob_name, - chunk_data, - validate_content=self.validate_content, - lease_id=self.lease_id, - maxsize_condition=self.maxsize_condition, - timeout=self.timeout, - ) - - self.current_length = resp.append_offset - else: - resp = self.blob_service.append_block( - self.container_name, - self.blob_name, - chunk_data, - validate_content=self.validate_content, - lease_id=self.lease_id, - maxsize_condition=self.maxsize_condition, - appendpos_condition=self.current_length + chunk_offset, - timeout=self.timeout, - ) - -class _SubStream(IOBase): - def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): - # Python 2.7: file-like objects created with open() typically support seek(), but are not - # derivations of io.IOBase and thus do not implement seekable(). - # Python > 3.0: file-like objects created with open() are derived from io.IOBase. - try: - wrapped_stream.seek(0, SEEK_CUR) - except: - raise ValueError("Wrapped stream must support seek().") - - self._lock = lockObj - self._wrapped_stream = wrapped_stream - self._position = 0 - self._stream_begin_index = stream_begin_index - self._length = length - self._count = 0 - self._buffer = BytesIO() - self._read_buffer_size = 4 * 1024 * 1024 - - def __len__(self): - return self._length - - def close(self): - if self._buffer: - self._buffer.close() - self._wrapped_stream = None - IOBase.close(self) - - def fileno(self): - return self._wrapped_stream.fileno() - - def flush(self): - pass - - def read(self, n): - if self.closed: - raise ValueError("Stream is closed.") - - # adjust if out of bounds - if n + self._position >= self._length: - n = self._length - self._position - - # return fast - if n == 0 or self._buffer.closed: - return b'' - - # attempt first read from the read buffer - read_buffer = self._buffer.read(n) - bytes_read = len(read_buffer) - bytes_remaining = n - bytes_read - - # repopulate the read buffer from the underlying stream to fulfill the request - # ensure the seek and read operations are done atomically (only if a lock is provided) - if bytes_remaining > 0: - with self._buffer: - # lock is only defined if max_connections > 1 (parallel uploads) - if self._lock: - with self._lock: - # reposition the underlying stream to match the start of the substream - absolute_position = self._stream_begin_index + self._position - self._wrapped_stream.seek(absolute_position, SEEK_SET) - # If we can't seek to the right location, our read will be corrupted so fail fast. - if self._wrapped_stream.tell() != absolute_position: - raise IOError("Stream failed to seek to the desired location.") - buffer_from_stream = self._wrapped_stream.read(self._read_buffer_size) - else: - buffer_from_stream = self._wrapped_stream.read(self._read_buffer_size) - - if buffer_from_stream: - self._buffer = BytesIO(buffer_from_stream) - second_read_buffer = self._buffer.read(bytes_remaining) - bytes_read += len(second_read_buffer) - read_buffer += second_read_buffer - - self._position += bytes_read - return read_buffer - - def readable(self): - return True - - def readinto(self, b): - raise UnsupportedOperation - - def seek(self, offset, whence=0): - if whence is SEEK_SET: - startIndex = 0 - elif whence is SEEK_CUR: - startIndex = self._position - elif whence is SEEK_END: - startIndex = self._length - offset = - offset - else: - raise ValueError("Invalid argument for the 'whence' parameter.") - - pos = startIndex + offset - - if pos > self._length: - pos = self._length - elif pos < 0: - pos = 0 - - self._position = pos - return pos - - def seekable(self): - return True - - def tell(self): - return self._position - - def write(self): - raise UnsupportedOperation - - def writelines(self): - raise UnsupportedOperation - - def writeable(self): - return False \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/blob/appendblobservice.py b/azure/multiapi/storage/v2016_05_31/blob/appendblobservice.py deleted file mode 100644 index 7c17136..0000000 --- a/azure/multiapi/storage/v2016_05_31/blob/appendblobservice.py +++ /dev/null @@ -1,541 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from .._error import ( - _validate_not_none, - _validate_type_bytes, - _validate_encryption_unsupported, - _ERROR_VALUE_NEGATIVE, -) -from .._common_conversion import ( - _to_str, - _int_to_str, - _datetime_to_utc_string, - _get_content_md5, -) -from .._serialization import ( - _get_data_bytes_only, - _add_metadata_headers, -) -from .._http import HTTPRequest -from ._upload_chunking import ( - _AppendBlobChunkUploader, - _upload_blob_chunks, -) -from .models import _BlobTypes -from .._constants import ( - SERVICE_HOST_BASE, - DEFAULT_PROTOCOL, -) -from ._serialization import ( - _get_path, -) -from ._deserialization import ( - _parse_append_block, - _parse_base_properties, -) -from .baseblobservice import BaseBlobService -from os import path -import sys -if sys.version_info >= (3,): - from io import BytesIO -else: - from cStringIO import StringIO as BytesIO - - -class AppendBlobService(BaseBlobService): - ''' - An append blob is comprised of blocks and is optimized for append operations. - When you modify an append blob, blocks are added to the end of the blob only, - via the append_block operation. Updating or deleting of existing blocks is not - supported. Unlike a block blob, an append blob does not expose its block IDs. - - Each block in an append blob can be a different size, up to a maximum of 4 MB, - and an append blob can include up to 50,000 blocks. The maximum size of an - append blob is therefore slightly more than 195 GB (4 MB X 50,000 blocks). - - :ivar int MAX_BLOCK_SIZE: - The size of the blocks put by append_blob_from_* methods. Smaller blocks - may be put if there is less data provided. The maximum block size the service - supports is 4MB. - ''' - MAX_BLOCK_SIZE = 4 * 1024 * 1024 - - def __init__(self, account_name=None, account_key=None, sas_token=None, - is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, - custom_domain=None, request_session=None, connection_string=None, socket_timeout=None): - ''' - :param str account_name: - The storage account name. This is used to authenticate requests - signed with an account key and to construct the storage endpoint. It - is required unless a connection string is given, or if a custom - domain is used with anonymous authentication. - :param str account_key: - The storage account key. This is used for shared key authentication. - If neither account key or sas token is specified, anonymous access - will be used. - :param str sas_token: - A shared access signature token to use to authenticate requests - instead of the account key. If account key and sas token are both - specified, account key will be used to sign. If neither are - specified, anonymous access will be used. - :param bool is_emulated: - Whether to use the emulator. Defaults to False. If specified, will - override all other parameters besides connection string and request - session. - :param str protocol: - The protocol to use for requests. Defaults to https. - :param str endpoint_suffix: - The host base component of the url, minus the account name. Defaults - to Azure (core.windows.net). Override this to use the China cloud - (core.chinacloudapi.cn). - :param str custom_domain: - The custom domain to use. This can be set in the Azure Portal. For - example, 'www.mydomain.com'. - :param requests.Session request_session: - The session object to use for http requests. - :param str connection_string: - If specified, this will override all other parameters besides - request session. See - http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ - for the connection string format. - :param int socket_timeout: - If specified, this will override the default socket timeout. The timeout specified is in seconds. - See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value. - ''' - self.blob_type = _BlobTypes.AppendBlob - super(AppendBlobService, self).__init__( - account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix, - custom_domain, request_session, connection_string, socket_timeout) - - def create_blob(self, container_name, blob_name, content_settings=None, - metadata=None, lease_id=None, - if_modified_since=None, if_unmodified_since=None, - if_match=None, if_none_match=None, timeout=None): - ''' - Creates a blob or overrides an existing blob. Use if_match=* to - prevent overriding an existing blob. - - See create_blob_from_* for high level - functions that handle the creation and upload of large blobs with - automatic chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set blob properties. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: a dict mapping str to str - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to - perform the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Append Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = {'timeout': _int_to_str(timeout)} - request.headers = { - 'x-ms-blob-type': _to_str(self.blob_type), - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match) - } - _add_metadata_headers(metadata, request) - if content_settings is not None: - request.headers.update(content_settings._to_headers()) - - return self._perform_request(request, _parse_base_properties) - - def append_block(self, container_name, blob_name, block, - validate_content=False, maxsize_condition=None, - appendpos_condition=None, - lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Commits a new block of data to the end of an existing append blob. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param bytes block: - Content of the block in bytes. - :param bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - blob. - :param int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :param int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the - AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: - ETag, last modified, append offset, and committed block count - properties for the updated Append Blob - :rtype: :class:`~azure.storage.blob.models.AppendBlockProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('block', block) - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'appendblock', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-blob-condition-maxsize': _to_str(maxsize_condition), - 'x-ms-blob-condition-appendpos': _to_str(appendpos_condition), - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match) - } - request.body = _get_data_bytes_only('block', block) - - if validate_content: - computed_md5 = _get_content_md5(request.body) - request.headers['Content-MD5'] = _to_str(computed_md5) - - return self._perform_request(request, _parse_append_block) - - #----Convenience APIs---------------------------------------------- - - def append_blob_from_path( - self, container_name, blob_name, file_path, validate_content=False, - maxsize_condition=None, progress_callback=None, lease_id=None, timeout=None): - ''' - Appends to the content of an existing blob from a file path, with automatic - chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param str file_path: - Path of the file to upload as the blob content. - :param bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: callback function in format of func(current, total) - :param str lease_id: - Required if the blob has an active lease. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('file_path', file_path) - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - count = path.getsize(file_path) - with open(file_path, 'rb') as stream: - self.append_blob_from_stream( - container_name, - blob_name, - stream, - count=count, - validate_content=validate_content, - maxsize_condition=maxsize_condition, - progress_callback=progress_callback, - lease_id=lease_id, - timeout=timeout) - - def append_blob_from_bytes( - self, container_name, blob_name, blob, index=0, count=None, - validate_content=False, maxsize_condition=None, progress_callback=None, - lease_id=None, timeout=None): - ''' - Appends to the content of an existing blob from an array of bytes, with - automatic chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param bytes blob: - Content of blob as an array of bytes. - :param int index: - Start index in the array of bytes. - :param int count: - Number of bytes to upload. Set to None or negative value to upload - all bytes starting from index. - :param bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: callback function in format of func(current, total) - :param str lease_id: - Required if the blob has an active lease. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('blob', blob) - _validate_not_none('index', index) - _validate_type_bytes('blob', blob) - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - if index < 0: - raise IndexError(_ERROR_VALUE_NEGATIVE.format('index')) - - if count is None or count < 0: - count = len(blob) - index - - stream = BytesIO(blob) - stream.seek(index) - - self.append_blob_from_stream( - container_name, - blob_name, - stream, - count=count, - validate_content=validate_content, - maxsize_condition=maxsize_condition, - lease_id=lease_id, - progress_callback=progress_callback, - timeout=timeout) - - def append_blob_from_text( - self, container_name, blob_name, text, encoding='utf-8', - validate_content=False, maxsize_condition=None, progress_callback=None, - lease_id=None, timeout=None): - ''' - Appends to the content of an existing blob from str/unicode, with - automatic chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param str text: - Text to upload to the blob. - :param str encoding: - Python encoding to use to convert the text to bytes. - :param bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: callback function in format of func(current, total) - :param str lease_id: - Required if the blob has an active lease. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('text', text) - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - if not isinstance(text, bytes): - _validate_not_none('encoding', encoding) - text = text.encode(encoding) - - self.append_blob_from_bytes( - container_name, - blob_name, - text, - index=0, - count=len(text), - validate_content=validate_content, - maxsize_condition=maxsize_condition, - lease_id=lease_id, - progress_callback=progress_callback, - timeout=timeout) - - def append_blob_from_stream( - self, container_name, blob_name, stream, count=None, - validate_content=False, maxsize_condition=None, progress_callback=None, - lease_id=None, timeout=None): - ''' - Appends to the content of an existing blob from a file/stream, with - automatic chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param io.IOBase stream: - Opened stream to upload as the blob content. - :param int count: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param int maxsize_condition: - Conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: callback function in format of func(current, total) - :param str lease_id: - Required if the blob has an active lease. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('stream', stream) - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - _upload_blob_chunks( - blob_service=self, - container_name=container_name, - blob_name=blob_name, - blob_size=count, - block_size=self.MAX_BLOCK_SIZE, - stream=stream, - max_connections=1, # upload not easily parallelizable - progress_callback=progress_callback, - validate_content=validate_content, - lease_id=lease_id, - uploader_class=_AppendBlobChunkUploader, - maxsize_condition=maxsize_condition, - timeout=timeout - ) \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/blob/baseblobservice.py b/azure/multiapi/storage/v2016_05_31/blob/baseblobservice.py deleted file mode 100644 index 93007a7..0000000 --- a/azure/multiapi/storage/v2016_05_31/blob/baseblobservice.py +++ /dev/null @@ -1,3167 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from azure.common import AzureHttpError -from .._error import AzureException -from .._error import ( - _dont_fail_not_exist, - _dont_fail_on_exist, - _validate_not_none, - _validate_decryption_required, - _validate_access_policies, - _validate_content_match, - _ERROR_PARALLEL_NOT_SEEKABLE, - _ERROR_DECRYPTION_FAILURE, -) -from ._error import ( - _ERROR_INVALID_LEASE_DURATION, - _ERROR_INVALID_LEASE_BREAK_PERIOD, -) -from .._common_conversion import ( - _int_to_str, - _to_str, - _datetime_to_utc_string, - _get_content_md5, -) -from abc import ABCMeta -from .._serialization import ( - _get_request_body, - _convert_signed_identifiers_to_xml, - _convert_service_properties_to_xml, - _add_metadata_headers, -) -from .._http import HTTPRequest -from ._download_chunking import _download_blob_chunks -from ..models import ( - Services, - ListGenerator, - _OperationContext, -) -from .models import ( - Blob, - BlobProperties, - _LeaseActions, - ContainerPermissions, - BlobPermissions, - Container, - ContainerProperties, -) -from .._auth import ( - _StorageSASAuthentication, - _StorageSharedKeyAuthentication, - _StorageNoAuthentication, -) -from .._connection import _ServiceParameters -from .._constants import ( - SERVICE_HOST_BASE, - DEFAULT_PROTOCOL, -) -from .._deserialization import ( - _convert_xml_to_service_properties, - _get_download_size, - _parse_metadata, - _parse_properties, - _convert_xml_to_service_stats, - _parse_length_from_content_range, -) -from ._serialization import ( - _get_path, - _validate_and_format_range_headers, -) -from ._deserialization import ( - _convert_xml_to_containers, - _parse_blob, - _convert_xml_to_blob_list, - _parse_container, - _parse_snapshot_blob, - _parse_lease, - _convert_xml_to_signed_identifiers_and_access, - _parse_base_properties, -) -from ..sharedaccesssignature import ( - SharedAccessSignature, -) -from ..storageclient import StorageClient -import sys -if sys.version_info >= (3,): - from io import BytesIO -else: - from cStringIO import StringIO as BytesIO - -class BaseBlobService(StorageClient): - - ''' - This is the main class managing Blob resources. - - The Blob service stores text and binary data as blobs in the cloud. - The Blob service offers the following three resources: the storage account, - containers, and blobs. Within your storage account, containers provide a - way to organize sets of blobs. For more information please see: - https://msdn.microsoft.com/en-us/library/azure/ee691964.aspx - - :ivar int MAX_SINGLE_GET_SIZE: - The size of the first range get performed by get_blob_to_* methods if - max_connections is greater than 1. Less data will be returned if the - blob is smaller than this. - :ivar int MAX_CHUNK_GET_SIZE: - The size of subsequent range gets performed by get_blob_to_* methods if - max_connections is greater than 1 and the blob is larger than MAX_SINGLE_GET_SIZE. - Less data will be returned if the remainder of the blob is smaller than - this. If this is set to larger than 4MB, content_validation will throw an - error if enabled. However, if content_validation is not desired a size - greater than 4MB may be optimal. Setting this below 4MB is not recommended. - :ivar object key_encryption_key: - The key-encryption-key optionally provided by the user. If provided, will be used to - encrypt/decrypt in supported methods. - For methods requiring decryption, either the key_encryption_key OR the resolver must be provided. - If both are provided, the resolver will take precedence. - Must implement the following methods for APIs requiring encryption: - wrap_key(key)--wraps the specified key (bytes) using an algorithm of the user's choice. Returns the encrypted key as bytes. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - Must implement the following methods for APIs requiring decryption: - unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid()--returns a string key id for this key-encryption-key. - :ivar function key_resolver_function(kid): - A function to resolve keys optionally provided by the user. If provided, will be used to decrypt in supported methods. - For methods requiring decryption, either the key_encryption_key OR - the resolver must be provided. If both are provided, the resolver will take precedence. - It uses the kid string to return a key-encryption-key implementing the interface defined above. - :ivar bool require_encryption: - A flag that may be set to ensure that all messages successfully uploaded to the queue and all those downloaded and - successfully read from the queue are/were encrypted while on the server. If this flag is set, all required - parameters for encryption/decryption must be provided. See the above comments on the key_encryption_key and resolver. - ''' - - __metaclass__ = ABCMeta - MAX_SINGLE_GET_SIZE = 32 * 1024 * 1024 - MAX_CHUNK_GET_SIZE = 4 * 1024 * 1024 - - def __init__(self, account_name=None, account_key=None, sas_token=None, - is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, - custom_domain=None, request_session=None, connection_string=None, socket_timeout=None): - ''' - :param str account_name: - The storage account name. This is used to authenticate requests - signed with an account key and to construct the storage endpoint. It - is required unless a connection string is given, or if a custom - domain is used with anonymous authentication. - :param str account_key: - The storage account key. This is used for shared key authentication. - If neither account key or sas token is specified, anonymous access - will be used. - :param str sas_token: - A shared access signature token to use to authenticate requests - instead of the account key. If account key and sas token are both - specified, account key will be used to sign. If neither are - specified, anonymous access will be used. - :param bool is_emulated: - Whether to use the emulator. Defaults to False. If specified, will - override all other parameters besides connection string and request - session. - :param str protocol: - The protocol to use for requests. Defaults to https. - :param str endpoint_suffix: - The host base component of the url, minus the account name. Defaults - to Azure (core.windows.net). Override this to use the China cloud - (core.chinacloudapi.cn). - :param str custom_domain: - The custom domain to use. This can be set in the Azure Portal. For - example, 'www.mydomain.com'. - :param requests.Session request_session: - The session object to use for http requests. - :param str connection_string: - If specified, this will override all other parameters besides - request session. See - http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ - for the connection string format - :param int socket_timeout: - If specified, this will override the default socket timeout. The timeout specified is in seconds. - See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value. - ''' - service_params = _ServiceParameters.get_service_parameters( - 'blob', - account_name=account_name, - account_key=account_key, - sas_token=sas_token, - is_emulated=is_emulated, - protocol=protocol, - endpoint_suffix=endpoint_suffix, - custom_domain=custom_domain, - request_session=request_session, - connection_string=connection_string, - socket_timeout=socket_timeout) - - super(BaseBlobService, self).__init__(service_params) - - if self.account_key: - self.authentication = _StorageSharedKeyAuthentication( - self.account_name, - self.account_key, - ) - elif self.sas_token: - self.authentication = _StorageSASAuthentication(self.sas_token) - else: - self.authentication = _StorageNoAuthentication() - - self.require_encryption = False - self.key_encryption_key = None - self.key_resolver_function = None - - def make_blob_url(self, container_name, blob_name, protocol=None, sas_token=None, snapshot=None): - ''' - Creates the url to access a blob. - - :param str container_name: - Name of container. - :param str blob_name: - Name of blob. - :param str protocol: - Protocol to use: 'http' or 'https'. If not specified, uses the - protocol specified when BaseBlobService was initialized. - :param str sas_token: - Shared access signature token created with - generate_shared_access_signature. - :param str snapshot: - An string value that uniquely identifies the snapshot. The value of - this query parameter indicates the snapshot version. - :return: blob access URL. - :rtype: str - ''' - - url = '{}://{}/{}/{}'.format( - protocol or self.protocol, - self.primary_endpoint, - container_name, - blob_name, - ) - - if snapshot and sas_token: - url = '{}?snapshot={}&{}'.format(url, snapshot, sas_token) - elif snapshot: - url = '{}?snapshot={}'.format(url, snapshot) - elif sas_token: - url = '{}?{}'.format(url, sas_token) - - return url - - def generate_account_shared_access_signature(self, resource_types, permission, - expiry, start=None, ip=None, protocol=None): - ''' - Generates a shared access signature for the blob service. - Use the returned signature with the sas_token parameter of any BlobService. - - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account SAS. - :param AccountPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: date or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: date or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.models.Protocol` for possible values. - :return: A Shared Access Signature (sas) token. - :rtype: str - ''' - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = SharedAccessSignature(self.account_name, self.account_key) - return sas.generate_account(Services.BLOB, resource_types, permission, - expiry, start=start, ip=ip, protocol=protocol) - - def generate_container_shared_access_signature(self, container_name, - permission=None, expiry=None, - start=None, id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None): - ''' - Generates a shared access signature for the container. - Use the returned signature with the sas_token parameter of any BlobService. - - :param str container_name: - Name of container. - :param ContainerPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: date or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: date or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_blob_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :return: A Shared Access Signature (sas) token. - :rtype: str - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = SharedAccessSignature(self.account_name, self.account_key) - return sas.generate_container( - container_name, - permission, - expiry, - start=start, - id=id, - ip=ip, - protocol=protocol, - cache_control=cache_control, - content_disposition=content_disposition, - content_encoding=content_encoding, - content_language=content_language, - content_type=content_type, - ) - - def generate_blob_shared_access_signature( - self, container_name, blob_name, permission=None, - expiry=None, start=None, id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None): - ''' - Generates a shared access signature for the blob. - Use the returned signature with the sas_token parameter of any BlobService. - - :param str container_name: - Name of container. - :param str blob_name: - Name of blob. - :param BlobPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: date or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: date or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use :func:`~set_container_acl`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :return: A Shared Access Signature (sas) token. - :rtype: str - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = SharedAccessSignature(self.account_name, self.account_key) - return sas.generate_blob( - container_name, - blob_name, - permission, - expiry, - start=start, - id=id, - ip=ip, - protocol=protocol, - cache_control=cache_control, - content_disposition=content_disposition, - content_encoding=content_encoding, - content_language=content_language, - content_type=content_type, - ) - - def list_containers(self, prefix=None, num_results=None, include_metadata=False, - marker=None, timeout=None): - ''' - Returns a generator to list the containers under the specified account. - The generator will lazily follow the continuation tokens returned by - the service and stop when all containers have been returned or num_results is reached. - - If num_results is specified and the account has more than that number of - containers, the generator will have a populated next_marker field once it - finishes. This marker can be used to create a new generator if more - results are desired. - - :param str prefix: - Filters the results to return only containers whose names - begin with the specified prefix. - :param int num_results: - Specifies the maximum number of containers to return. A single list - request may return up to 1000 contianers and potentially a continuation - token which should be followed to get additional resutls. - :param bool include_metadata: - Specifies that container metadata be returned in the response. - :param str marker: - An opaque continuation token. This value can be retrieved from the - next_marker field of a previous generator object if num_results was - specified and that generator has finished enumerating results. If - specified, this generator will begin returning results from the point - where the previous generator stopped. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - include = 'metadata' if include_metadata else None - operation_context = _OperationContext(location_lock=True) - kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results, - 'include': include, 'timeout': timeout, '_context': operation_context} - resp = self._list_containers(**kwargs) - - return ListGenerator(resp, self._list_containers, (), kwargs) - - - def _list_containers(self, prefix=None, marker=None, max_results=None, - include=None, timeout=None, _context=None): - ''' - Returns a list of the containers under the specified account. - - :param str prefix: - Filters the results to return only containers whose names - begin with the specified prefix. - :param str marker: - A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns - a next_marker value within the response body if the list returned was - not complete. The marker value may then be used in a subsequent - call to request the next set of list items. The marker value is - opaque to the client. - :param int max_results: - Specifies the maximum number of containers to return. A single list - request may return up to 1000 contianers and potentially a continuation - token which should be followed to get additional resutls. - :param str include: - Include this parameter to specify that the container's - metadata be returned as part of the response body. set this - parameter to string 'metadata' to get container's metadata. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path() - request.query = { - 'comp': 'list', - 'prefix': _to_str(prefix), - 'marker': _to_str(marker), - 'maxresults': _int_to_str(max_results), - 'include': _to_str(include), - 'timeout': _int_to_str(timeout) - } - - return self._perform_request(request, _convert_xml_to_containers, operation_context=_context) - - def create_container(self, container_name, metadata=None, - public_access=None, fail_on_exist=False, timeout=None): - ''' - Creates a new container under the specified account. If the container - with the same name already exists, the operation fails if - fail_on_exist is True. - - :param str container_name: - Name of container to create. - :param metadata: - A dict with name_value pairs to associate with the - container as metadata. Example:{'Category':'test'} - :type metadata: a dict mapping str to str - :param public_access: - Possible values include: container, blob. - :type public_access: - One of the values listed in the :class:`~azure.storage.blob.models.PublicAccess` enum. - :param bool fail_on_exist: - Specify whether to throw an exception when the container exists. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: True if container is created, False if container already exists. - :rtype: bool - ''' - _validate_not_none('container_name', container_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name) - request.query = { - 'restype': 'container', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-blob-public-access': _to_str(public_access) - } - _add_metadata_headers(metadata, request) - - if not fail_on_exist: - try: - self._perform_request(request) - return True - except AzureHttpError as ex: - _dont_fail_on_exist(ex) - return False - else: - self._perform_request(request) - return True - - def get_container_properties(self, container_name, lease_id=None, timeout=None): - ''' - Returns all user-defined metadata and system properties for the specified - container. The data returned does not include the container's list of blobs. - - :param str container_name: - Name of existing container. - :param str lease_id: - If specified, get_container_properties only succeeds if the - container's lease is active and matches this ID. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: properties for the specified container within a container object. - :rtype: :class:`~azure.storage.blob.models.Container` - ''' - _validate_not_none('container_name', container_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name) - request.query = { - 'restype': 'container', - 'timeout': _int_to_str(timeout), - } - request.headers = {'x-ms-lease-id': _to_str(lease_id)} - - return self._perform_request(request, _parse_container, [container_name]) - - def get_container_metadata(self, container_name, lease_id=None, timeout=None): - ''' - Returns all user-defined metadata for the specified container. - - :param str container_name: - Name of existing container. - :param str lease_id: - If specified, get_container_metadata only succeeds if the - container's lease is active and matches this ID. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: - A dictionary representing the container metadata name, value pairs. - :rtype: a dict mapping str to str - ''' - _validate_not_none('container_name', container_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name) - request.query = { - 'restype': 'container', - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - request.headers = {'x-ms-lease-id': _to_str(lease_id)} - - return self._perform_request(request, _parse_metadata) - - def set_container_metadata(self, container_name, metadata=None, - lease_id=None, if_modified_since=None, timeout=None): - ''' - Sets one or more user-defined name-value pairs for the specified - container. Each call to this operation replaces all existing metadata - attached to the container. To remove all metadata from the container, - call this operation with no metadata dict. - - :param str container_name: - Name of existing container. - :param metadata: - A dict containing name-value pairs to associate with the container as - metadata. Example: {'category':'test'} - :type metadata: a dict mapping str to str - :param str lease_id: - If specified, set_container_metadata only succeeds if the - container's lease is active and matches this ID. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Container - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name) - request.query = { - 'restype': 'container', - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'x-ms-lease-id': _to_str(lease_id), - } - _add_metadata_headers(metadata, request) - - return self._perform_request(request, _parse_base_properties) - - def get_container_acl(self, container_name, lease_id=None, timeout=None): - ''' - Gets the permissions for the specified container. - The permissions indicate whether container data may be accessed publicly. - - :param str container_name: - Name of existing container. - :param lease_id: - If specified, get_container_acl only succeeds if the - container's lease is active and matches this ID. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: A dictionary of access policies associated with the container. - :rtype: - dict of str to :class:`.AccessPolicy` and a public_access property - if public access is turned on - ''' - _validate_not_none('container_name', container_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name) - request.query = { - 'restype': 'container', - 'comp': 'acl', - 'timeout': _int_to_str(timeout), - } - request.headers = {'x-ms-lease-id': _to_str(lease_id)} - - return self._perform_request(request, _convert_xml_to_signed_identifiers_and_access) - - def set_container_acl(self, container_name, signed_identifiers=None, - public_access=None, lease_id=None, - if_modified_since=None, if_unmodified_since=None, timeout=None): - ''' - Sets the permissions for the specified container or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether blobs in a container may be accessed publicly. - - :param str container_name: - Name of existing container. - :param signed_identifiers: - A dictionary of access policies to associate with the container. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict of str to :class:`.AccessPolicy` - :param public_access: - Possible values include: container, blob. - :type public_access: - One of the values listed in the :class:`~azure.storage.blob.models.PublicAccess` enum. - :param str lease_id: - If specified, set_container_acl only succeeds if the - container's lease is active and matches this ID. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Container - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_access_policies(signed_identifiers) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name) - request.query = { - 'restype': 'container', - 'comp': 'acl', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-blob-public-access': _to_str(public_access), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'x-ms-lease-id': _to_str(lease_id), - } - request.body = _get_request_body( - _convert_signed_identifiers_to_xml(signed_identifiers)) - - return self._perform_request(request, _parse_base_properties) - - def delete_container(self, container_name, fail_not_exist=False, - lease_id=None, if_modified_since=None, - if_unmodified_since=None, timeout=None): - ''' - Marks the specified container for deletion. The container and any blobs - contained within it are later deleted during garbage collection. - - :param str container_name: - Name of container to delete. - :param bool fail_not_exist: - Specify whether to throw an exception when the container doesn't - exist. - :param str lease_id: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: True if container is deleted, False container doesn't exist. - :rtype: bool - ''' - _validate_not_none('container_name', container_name) - request = HTTPRequest() - request.method = 'DELETE' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name) - request.query = { - 'restype': 'container', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - } - - if not fail_not_exist: - try: - self._perform_request(request) - return True - except AzureHttpError as ex: - _dont_fail_not_exist(ex) - return False - else: - self._perform_request(request) - return True - - def _lease_container_impl( - self, container_name, lease_action, lease_id, lease_duration, - lease_break_period, proposed_lease_id, if_modified_since, - if_unmodified_since, timeout): - ''' - Establishes and manages a lease on a container. - The Lease Container operation can be called in one of five modes - Acquire, to request a new lease - Renew, to renew an existing lease - Change, to change the ID of an existing lease - Release, to free the lease if it is no longer needed so that another - client may immediately acquire a lease against the container - Break, to end the lease but ensure that another client cannot acquire - a new lease until the current lease period has expired - - :param str container_name: - Name of existing container. - :param str lease_action: - Possible _LeaseActions values: acquire|renew|release|break|change - :param str lease_id: - Required if the container has an active lease. - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. For backwards compatibility, the default is - 60, and the value is only used on an acquire operation. - :param int lease_break_period: - For a break operation, this is the proposed duration of - seconds that the lease should continue before it is broken, between - 0 and 60 seconds. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining - on the lease is used. A new lease will not be available before the - break period has expired, but the lease may be held for longer than - the break period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :param str proposed_lease_id: - Optional for Acquire, required for Change. Proposed lease ID, in a - GUID string format. The Blob service returns 400 (Invalid request) - if the proposed lease ID is not in the correct format. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: - Response headers returned from the service call. - :rtype: a dict mapping str to str - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('lease_action', lease_action) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name) - request.query = { - 'restype': 'container', - 'comp': 'lease', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'x-ms-lease-action': _to_str(lease_action), - 'x-ms-lease-duration': _to_str(lease_duration), - 'x-ms-lease-break-period': _to_str(lease_break_period), - 'x-ms-proposed-lease-id': _to_str(proposed_lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - } - - return self._perform_request(request, _parse_lease) - - def acquire_container_lease( - self, container_name, lease_duration=-1, proposed_lease_id=None, - if_modified_since=None, if_unmodified_since=None, timeout=None): - ''' - Requests a new lease. If the container does not have an active lease, - the Blob service creates a lease on the container and returns a new - lease ID. - - :param str container_name: - Name of existing container. - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: the lease ID of the newly created lease. - :return: str - ''' - _validate_not_none('lease_duration', lease_duration) - if lease_duration != -1 and\ - (lease_duration < 15 or lease_duration > 60): - raise ValueError(_ERROR_INVALID_LEASE_DURATION) - - lease = self._lease_container_impl(container_name, - _LeaseActions.Acquire, - None, # lease_id - lease_duration, - None, # lease_break_period - proposed_lease_id, - if_modified_since, - if_unmodified_since, - timeout) - return lease['id'] - - def renew_container_lease( - self, container_name, lease_id, if_modified_since=None, - if_unmodified_since=None, timeout=None): - ''' - Renews the lease. The lease can be renewed if the lease ID specified - matches that associated with the container. Note that - the lease may be renewed even if it has expired as long as the container - has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - :param str container_name: - Name of existing container. - :param str lease_id: - Lease ID for active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: the lease ID of the renewed lease. - :return: str - ''' - _validate_not_none('lease_id', lease_id) - - lease = self._lease_container_impl(container_name, - _LeaseActions.Renew, - lease_id, - None, # lease_duration - None, # lease_break_period - None, # proposed_lease_id - if_modified_since, - if_unmodified_since, - timeout) - return lease['id'] - - def release_container_lease( - self, container_name, lease_id, if_modified_since=None, - if_unmodified_since=None, timeout=None): - ''' - Release the lease. The lease may be released if the lease_id specified matches - that associated with the container. Releasing the lease allows another client - to immediately acquire the lease for the container as soon as the release is complete. - - :param str container_name: - Name of existing container. - :param str lease_id: - Lease ID for active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('lease_id', lease_id) - - self._lease_container_impl(container_name, - _LeaseActions.Release, - lease_id, - None, # lease_duration - None, # lease_break_period - None, # proposed_lease_id - if_modified_since, - if_unmodified_since, - timeout) - - def break_container_lease( - self, container_name, lease_break_period=None, - if_modified_since=None, if_unmodified_since=None, timeout=None): - ''' - Break the lease, if the container has an active lease. Once a lease is - broken, it cannot be renewed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. When a lease - is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the container. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :param str container_name: - Name of existing container. - :param int lease_break_period: - This is the proposed duration of seconds that the lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the lease. If longer, the time remaining on the lease is used. - A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :return: int - ''' - if (lease_break_period is not None) and (lease_break_period < 0 or lease_break_period > 60): - raise ValueError(_ERROR_INVALID_LEASE_BREAK_PERIOD) - - lease = self._lease_container_impl(container_name, - _LeaseActions.Break, - None, # lease_id - None, # lease_duration - lease_break_period, - None, # proposed_lease_id - if_modified_since, - if_unmodified_since, - timeout) - return lease['time'] - - def change_container_lease( - self, container_name, lease_id, proposed_lease_id, - if_modified_since=None, if_unmodified_since=None, timeout=None): - ''' - Change the lease ID of an active lease. A change must include the current - lease ID and a new lease ID. - - :param str container_name: - Name of existing container. - :param str lease_id: - Lease ID for active lease. - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('lease_id', lease_id) - - self._lease_container_impl(container_name, - _LeaseActions.Change, - lease_id, - None, # lease_duration - None, # lease_break_period - proposed_lease_id, - if_modified_since, - if_unmodified_since, - timeout) - - def list_blobs(self, container_name, prefix=None, num_results=None, include=None, - delimiter=None, marker=None, timeout=None): - ''' - Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service and stop when all blobs have been returned or num_results is reached. - - If num_results is specified and the account has more than that number of - blobs, the generator will have a populated next_marker field once it - finishes. This marker can be used to create a new generator if more - results are desired. - - :param str container_name: - Name of existing container. - :param str prefix: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param int num_results: - Specifies the maximum number of blobs to return, - including all :class:`BlobPrefix` elements. If the request does not specify - num_results or specifies a value greater than 5,000, the server will - return up to 5,000 items. Setting num_results to a value less than - or equal to zero results in error response code 400 (Bad Request). - :param ~azure.storage.blob.models.Include include: - Specifies one or more additional datasets to include in the response. - :param str delimiter: - When the request includes this parameter, the operation - returns a :class:`~azure.storage.blob.models.BlobPrefix` element in the - result list that acts as a placeholder for all blobs whose names begin - with the same substring up to the appearance of the delimiter character. - The delimiter may be a single character or a string. - :param str marker: - An opaque continuation token. This value can be retrieved from the - next_marker field of a previous generator object if num_results was - specified and that generator has finished enumerating results. If - specified, this generator will begin returning results from the point - where the previous generator stopped. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - operation_context = _OperationContext(location_lock=True) - args = (container_name,) - kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results, - 'include': include, 'delimiter': delimiter, 'timeout': timeout, - '_context': operation_context} - resp = self._list_blobs(*args, **kwargs) - - return ListGenerator(resp, self._list_blobs, args, kwargs) - - def _list_blobs(self, container_name, prefix=None, marker=None, - max_results=None, include=None, delimiter=None, timeout=None, - _context=None): - ''' - Returns the list of blobs under the specified container. - - :param str container_name: - Name of existing container. - :parm str prefix: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param str marker: - A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns - a next_marker value within the response body if the list returned was - not complete. The marker value may then be used in a subsequent - call to request the next set of list items. The marker value is - opaque to the client. - :param int max_results: - Specifies the maximum number of blobs to return, - including all :class:`BlobPrefix` elements. If the request does not specify - max_results or specifies a value greater than 5,000, the server will - return up to 5,000 items. Setting max_results to a value less than - or equal to zero results in error response code 400 (Bad Request). - :param str include: - Specifies one or more datasets to include in the - response. To specify more than one of these options on the URI, - you must separate each option with a comma. Valid values are: - snapshots: - Specifies that snapshots should be included in the - enumeration. Snapshots are listed from oldest to newest in - the response. - metadata: - Specifies that blob metadata be returned in the response. - uncommittedblobs: - Specifies that blobs for which blocks have been uploaded, - but which have not been committed using Put Block List - (REST API), be included in the response. - copy: - Version 2012-02-12 and newer. Specifies that metadata - related to any current or previous Copy Blob operation - should be included in the response. - :param str delimiter: - When the request includes this parameter, the operation - returns a :class:`BlobPrefix` element in the response body that acts as a - placeholder for all blobs whose names begin with the same - substring up to the appearance of the delimiter character. The - delimiter may be a single character or a string. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('container_name', container_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name) - request.query = { - 'restype': 'container', - 'comp': 'list', - 'prefix': _to_str(prefix), - 'delimiter': _to_str(delimiter), - 'marker': _to_str(marker), - 'maxresults': _int_to_str(max_results), - 'include': _to_str(include), - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_blob_list, operation_context=_context) - - def get_blob_service_stats(self, timeout=None): - ''' - Retrieves statistics related to replication for the Blob service. It is - only available when read-access geo-redundant replication is enabled for - the storage account. - - With geo-redundant replication, Azure Storage maintains your data durable - in two locations. In both locations, Azure Storage constantly maintains - multiple healthy replicas of your data. The location where you read, - create, update, or delete data is the primary storage account location. - The primary location exists in the region you choose at the time you - create an account via the Azure Management Azure classic portal, for - example, North Central US. The location to which your data is replicated - is the secondary location. The secondary location is automatically - determined based on the location of the primary; it is in a second data - center that resides in the same region as the primary location. Read-only - access is available from the secondary location, if read-access geo-redundant - replication is enabled for your storage account. - - :param int timeout: - The timeout parameter is expressed in seconds. - :return: The blob service stats. - :rtype: :class:`~azure.storage.models.ServiceStats` - ''' - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(primary=False, secondary=True) - request.path = _get_path() - request.query = { - 'restype': 'service', - 'comp': 'stats', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_service_stats) - - def set_blob_service_properties( - self, logging=None, hour_metrics=None, minute_metrics=None, - cors=None, target_version=None, timeout=None): - ''' - Sets the properties of a storage account's Blob service, including - Azure Storage Analytics. If an element (ex Logging) is left as None, the - existing settings on the service for that functionality are preserved. - - :param Logging logging: - Groups the Azure Analytics Logging settings. - :param Metrics hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for blobs. - :param Metrics minute_metrics: - The minute metrics settings provide request statistics - for each minute for blobs. - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list of :class:`CorsRule` - :param string target_version: - Indicates the default version to use for requests if an incoming - request's version is not specified. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path() - request.query = { - 'restype': 'service', - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - request.body = _get_request_body( - _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics, cors, target_version)) - - self._perform_request(request) - - def get_blob_service_properties(self, timeout=None): - ''' - Gets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - :param int timeout: - The timeout parameter is expressed in seconds. - :return: The blob service properties. - :rtype: - :class:`~azure.storage.models.ServiceProperties` with an attached - target_version property - ''' - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path() - request.query = { - 'restype': 'service', - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_service_properties) - - def get_blob_properties( - self, container_name, blob_name, snapshot=None, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Returns all user-defined metadata, standard HTTP properties, and - system properties for the blob. It does not return the content of the blob. - Returns :class:`.Blob` with :class:`.BlobProperties` and a metadata dict. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to retrieve. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: a blob object including properties and metadata. - :rtype: :class:`~azure.storage.blob.models.Blob` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - request = HTTPRequest() - request.method = 'HEAD' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name, blob_name) - request.query = { - 'snapshot': _to_str(snapshot), - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - - return self._perform_request(request, _parse_blob, [blob_name, snapshot]) - - def set_blob_properties( - self, container_name, blob_name, content_settings=None, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Sets system properties on the blob. If one property is set for the - content_settings, all properties will be overriden. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set blob properties. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - 'x-ms-lease-id': _to_str(lease_id) - } - if content_settings is not None: - request.headers.update(content_settings._to_headers()) - - return self._perform_request(request, _parse_base_properties) - - def exists(self, container_name, blob_name=None, snapshot=None, timeout=None): - ''' - Returns a boolean indicating whether the container exists (if blob_name - is None), or otherwise a boolean indicating whether the blob exists. - - :param str container_name: - Name of a container. - :param str blob_name: - Name of a blob. If None, the container will be checked for existence. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the snapshot. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: A boolean indicating whether the resource exists. - :rtype: bool - ''' - _validate_not_none('container_name', container_name) - try: - if blob_name is None: - self.get_container_properties(container_name, timeout=timeout) - else: - self.get_blob_properties(container_name, blob_name, snapshot=snapshot, timeout=timeout) - return True - except AzureHttpError as ex: - _dont_fail_not_exist(ex) - return False - - def _get_blob( - self, container_name, blob_name, snapshot=None, start_range=None, - end_range=None, validate_content=False, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None, - _context=None): - ''' - Downloads a blob's content, metadata, and properties. You can also - call this API to read a snapshot. You can specify a range if you don't - need to download the blob in its entirety. If no range is specified, - the full blob will be downloaded. - - See get_blob_to_* for high level functions that handle the download - of large blobs with automatic chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to retrieve. - :param int start_range: - Start of byte range to use for downloading a section of the blob. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param int end_range: - End of byte range to use for downloading a section of the blob. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param bool validate_content: - When this is set to True and specified together with the Range header, - the service returns the MD5 hash for the range, as long as the range - is less than or equal to 4 MB in size. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: A Blob with content, properties, and metadata. - :rtype: :class:`~azure.storage.blob.models.Blob` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_decryption_required(self.require_encryption, - self.key_encryption_key, - self.key_resolver_function) - - start_offset, end_offset = 0,0 - if self.key_encryption_key is not None or self.key_resolver_function is not None: - if start_range is not None: - # Align the start of the range along a 16 byte block - start_offset = start_range % 16 - start_range -= start_offset - - # Include an extra 16 bytes for the IV if necessary - # Because of the previous offsetting, start_range will always - # be a multiple of 16. - if start_range > 0: - start_offset += 16 - start_range -= 16 - - if end_range is not None: - # Align the end of the range along a 16 byte block - end_offset = 15 - (end_range % 16) - end_range += end_offset - - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name, blob_name) - request.query = { - 'snapshot': _to_str(snapshot), - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - _validate_and_format_range_headers( - request, - start_range, - end_range, - start_range_required=False, - end_range_required=False, - check_content_md5=validate_content) - - return self._perform_request(request, _parse_blob, - [blob_name, snapshot, validate_content, self.require_encryption, - self.key_encryption_key, self.key_resolver_function, - start_offset, end_offset], - operation_context=_context) - - def get_blob_to_path( - self, container_name, blob_name, file_path, open_mode='wb', - snapshot=None, start_range=None, end_range=None, - validate_content=False, progress_callback=None, - max_connections=2, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, - timeout=None): - ''' - Downloads a blob to a file path, with automatic chunking and progress - notifications. Returns an instance of :class:`Blob` with - properties and metadata. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str file_path: - Path of file to write out to. - :param str open_mode: - Mode to use when opening the file. Note that specifying append only - open_mode prevents parallel download. So, max_connections must be set - to 1 if this open_mode is used. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to retrieve. - :param int start_range: - Start of byte range to use for downloading a section of the blob. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param int end_range: - End of byte range to use for downloading a section of the blob. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param bool validate_content: - If set to true, validates an MD5 hash for each retrieved portion of - the blob. This is primarily valuable for detecting bitflips on the wire - if using http instead of https as https (the default) will already - validate. Note that the service will only return transactional MD5s - for chunks 4MB or less so the first get request will be of size - self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If - self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be - thrown. As computing the MD5 takes processing time and more requests - will need to be done due to the reduced chunk size there may be some - increase in latency. - :param progress_callback: - Callback for progress with signature function(current, total) - where current is the number of bytes transfered so far, and total is - the size of the blob if known. - :type progress_callback: callback function in format of func(current, total) - :param int max_connections: - If set to 2 or greater, an initial get will be done for the first - self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, - the method returns at this point. If it is not, it will download the - remaining data parallel using the number of threads equal to - max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. - If set to 1, a single large get request will be done. This is not - generally recommended but available if very few threads should be - used, network requests are very expensive, or a non-seekable stream - prevents parallel download. This may also be useful if many blobs are - expected to be empty as an extra request is required for empty blobs - if max_connections is greater than 1. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :return: A Blob with properties and metadata. If max_connections is greater - than 1, the content_md5 (if set on the blob) will not be returned. If you - require this value, either use get_blob_properties or set max_connections - to 1. - :rtype: :class:`~azure.storage.blob.models.Blob` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('file_path', file_path) - _validate_not_none('open_mode', open_mode) - - if max_connections > 1 and 'a' in open_mode: - raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE) - - with open(file_path, open_mode) as stream: - blob = self.get_blob_to_stream( - container_name, - blob_name, - stream, - snapshot, - start_range, - end_range, - validate_content, - progress_callback, - max_connections, - lease_id, - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout) - - return blob - - def get_blob_to_stream( - self, container_name, blob_name, stream, snapshot=None, - start_range=None, end_range=None, validate_content=False, - progress_callback=None, max_connections=2, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - - ''' - Downloads a blob to a stream, with automatic chunking and progress - notifications. Returns an instance of :class:`Blob` with - properties and metadata. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param io.IOBase stream: - Opened stream to write to. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to retrieve. - :param int start_range: - Start of byte range to use for downloading a section of the blob. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param int end_range: - End of byte range to use for downloading a section of the blob. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param bool validate_content: - If set to true, validates an MD5 hash for each retrieved portion of - the blob. This is primarily valuable for detecting bitflips on the wire - if using http instead of https as https (the default) will already - validate. Note that the service will only return transactional MD5s - for chunks 4MB or less so the first get request will be of size - self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If - self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be - thrown. As computing the MD5 takes processing time and more requests - will need to be done due to the reduced chunk size there may be some - increase in latency. - :param progress_callback: - Callback for progress with signature function(current, total) - where current is the number of bytes transfered so far, and total is - the size of the blob if known. - :type progress_callback: callback function in format of func(current, total) - :param int max_connections: - If set to 2 or greater, an initial get will be done for the first - self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, - the method returns at this point. If it is not, it will download the - remaining data parallel using the number of threads equal to - max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. - If set to 1, a single large get request will be done. This is not - generally recommended but available if very few threads should be - used, network requests are very expensive, or a non-seekable stream - prevents parallel download. This may also be useful if many blobs are - expected to be empty as an extra request is required for empty blobs - if max_connections is greater than 1. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :return: A Blob with properties and metadata. If max_connections is greater - than 1, the content_md5 (if set on the blob) will not be returned. If you - require this value, either use get_blob_properties or set max_connections - to 1. - :rtype: :class:`~azure.storage.blob.models.Blob` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('stream', stream) - - # If the user explicitly sets max_connections to 1, do a single shot download - if max_connections == 1: - blob = self._get_blob(container_name, - blob_name, - snapshot, - start_range=start_range, - end_range=end_range, - validate_content=validate_content, - lease_id=lease_id, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout) - - # Set the download size - download_size = blob.properties.content_length - - # If max_connections is greater than 1, do the first get to establish the - # size of the blob and get the first segment of data - else: - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE) - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - first_get_size = self.MAX_SINGLE_GET_SIZE if not validate_content else self.MAX_CHUNK_GET_SIZE - - initial_request_start = start_range if start_range else 0 - - if end_range and end_range - start_range < first_get_size: - initial_request_end = end_range - else: - initial_request_end = initial_request_start + first_get_size - 1 - - # Send a context object to make sure we always retry to the initial location - operation_context = _OperationContext(location_lock=True) - try: - blob = self._get_blob(container_name, - blob_name, - snapshot, - start_range=initial_request_start, - end_range=initial_request_end, - validate_content=validate_content, - lease_id=lease_id, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout, - _context=operation_context) - - # Parse the total blob size and adjust the download size if ranges - # were specified - blob_size = _parse_length_from_content_range(blob.properties.content_range) - if end_range: - # Use the end_range unless it is over the end of the blob - download_size = min(blob_size, end_range - start_range + 1) - elif start_range: - download_size = blob_size - start_range - else: - download_size = blob_size - except AzureHttpError as ex: - if not start_range and ex.status_code == 416: - # Get range will fail on an empty blob. If the user did not - # request a range, do a regular get request in order to get - # any properties. - blob = self._get_blob(container_name, - blob_name, - snapshot, - validate_content=validate_content, - lease_id=lease_id, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout, - _context=operation_context) - - # Set the download size to empty - download_size = 0 - else: - raise ex - - # Mark the first progress chunk. If the blob is small or this is a single - # shot download, this is the only call - if progress_callback: - progress_callback(blob.properties.content_length, download_size) - - # Write the content to the user stream - # Clear blob content since output has been written to user stream - if blob.content is not None: - stream.write(blob.content) - blob.content = None - - # If the blob is small or single shot download was used, the download is - # complete at this point. If blob size is large, use parallel download. - if blob.properties.content_length != download_size: - # Lock on the etag. This can be overriden by the user by specifying '*' - if_match = if_match if if_match is not None else blob.properties.etag - - end_blob = blob_size - if end_range: - # Use the end_range unless it is over the end of the blob - end_blob = min(blob_size, end_range + 1) - - _download_blob_chunks( - self, - container_name, - blob_name, - snapshot, - download_size, - self.MAX_CHUNK_GET_SIZE, - first_get_size, - initial_request_end + 1, # start where the first download ended - end_blob, - stream, - max_connections, - progress_callback, - validate_content, - lease_id, - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout, - operation_context - ) - - # Set the content length to the download size instead of the size of - # the last range - blob.properties.content_length = download_size - - # Overwrite the content range to the user requested range - blob.properties.content_range = 'bytes {0}-{1}/{2}'.format(start_range, end_range, blob_size) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - blob.properties.content_md5 = None - - return blob - - def get_blob_to_bytes( - self, container_name, blob_name, snapshot=None, - start_range=None, end_range=None, validate_content=False, - progress_callback=None, max_connections=2, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Downloads a blob as an array of bytes, with automatic chunking and - progress notifications. Returns an instance of :class:`Blob` with - properties, metadata, and content. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to retrieve. - :param int start_range: - Start of byte range to use for downloading a section of the blob. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param int end_range: - End of byte range to use for downloading a section of the blob. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param bool validate_content: - If set to true, validates an MD5 hash for each retrieved portion of - the blob. This is primarily valuable for detecting bitflips on the wire - if using http instead of https as https (the default) will already - validate. Note that the service will only return transactional MD5s - for chunks 4MB or less so the first get request will be of size - self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If - self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be - thrown. As computing the MD5 takes processing time and more requests - will need to be done due to the reduced chunk size there may be some - increase in latency. - :param progress_callback: - Callback for progress with signature function(current, total) - where current is the number of bytes transfered so far, and total is - the size of the blob if known. - :type progress_callback: callback function in format of func(current, total) - :param int max_connections: - If set to 2 or greater, an initial get will be done for the first - self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, - the method returns at this point. If it is not, it will download the - remaining data parallel using the number of threads equal to - max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. - If set to 1, a single large get request will be done. This is not - generally recommended but available if very few threads should be - used, network requests are very expensive, or a non-seekable stream - prevents parallel download. This may also be useful if many blobs are - expected to be empty as an extra request is required for empty blobs - if max_connections is greater than 1. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :return: A Blob with properties and metadata. If max_connections is greater - than 1, the content_md5 (if set on the blob) will not be returned. If you - require this value, either use get_blob_properties or set max_connections - to 1. - :rtype: :class:`~azure.storage.blob.models.Blob` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - - stream = BytesIO() - blob = self.get_blob_to_stream( - container_name, - blob_name, - stream, - snapshot, - start_range, - end_range, - validate_content, - progress_callback, - max_connections, - lease_id, - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout) - - blob.content = stream.getvalue() - return blob - - def get_blob_to_text( - self, container_name, blob_name, encoding='utf-8', snapshot=None, - start_range=None, end_range=None, validate_content=False, - progress_callback=None, max_connections=2, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Downloads a blob as unicode text, with automatic chunking and progress - notifications. Returns an instance of :class:`Blob` with - properties, metadata, and content. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str encoding: - Python encoding to use when decoding the blob data. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to retrieve. - :param int start_range: - Start of byte range to use for downloading a section of the blob. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param int end_range: - End of byte range to use for downloading a section of the blob. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param bool validate_content: - If set to true, validates an MD5 hash for each retrieved portion of - the blob. This is primarily valuable for detecting bitflips on the wire - if using http instead of https as https (the default) will already - validate. Note that the service will only return transactional MD5s - for chunks 4MB or less so the first get request will be of size - self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If - self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be - thrown. As computing the MD5 takes processing time and more requests - will need to be done due to the reduced chunk size there may be some - increase in latency. - :param progress_callback: - Callback for progress with signature function(current, total) - where current is the number of bytes transfered so far, and total is - the size of the blob if known. - :type progress_callback: callback function in format of func(current, total) - :param int max_connections: - If set to 2 or greater, an initial get will be done for the first - self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, - the method returns at this point. If it is not, it will download the - remaining data parallel using the number of threads equal to - max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. - If set to 1, a single large get request will be done. This is not - generally recommended but available if very few threads should be - used, network requests are very expensive, or a non-seekable stream - prevents parallel download. This may also be useful if many blobs are - expected to be empty as an extra request is required for empty blobs - if max_connections is greater than 1. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :return: A Blob with properties and metadata. If max_connections is greater - than 1, the content_md5 (if set on the blob) will not be returned. If you - require this value, either use get_blob_properties or set max_connections - to 1. - :rtype: :class:`~azure.storage.blob.models.Blob` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('encoding', encoding) - - blob = self.get_blob_to_bytes(container_name, - blob_name, - snapshot, - start_range, - end_range, - validate_content, - progress_callback, - max_connections, - lease_id, - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout) - blob.content = blob.content.decode(encoding) - return blob - - def get_blob_metadata( - self, container_name, blob_name, snapshot=None, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Returns all user-defined metadata for the specified blob or snapshot. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str snapshot: - The snapshot parameter is an opaque value that, - when present, specifies the blob snapshot to retrieve. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: - A dictionary representing the blob metadata name, value pairs. - :rtype: a dict mapping str to str - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name, blob_name) - request.query = { - 'snapshot': _to_str(snapshot), - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - - return self._perform_request(request, _parse_metadata) - - def set_blob_metadata(self, container_name, blob_name, - metadata=None, lease_id=None, - if_modified_since=None, if_unmodified_since=None, - if_match=None, if_none_match=None, timeout=None): - ''' - Sets user-defined metadata for the specified blob as one or more - name-value pairs. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param metadata: - Dict containing name and value pairs. Each call to this operation - replaces all existing metadata attached to the blob. To remove all - metadata from the blob, call this operation with no metadata headers. - :type metadata: a dict mapping str to str - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - 'x-ms-lease-id': _to_str(lease_id), - } - _add_metadata_headers(metadata, request) - - return self._perform_request(request, _parse_base_properties) - - def _lease_blob_impl(self, container_name, blob_name, - lease_action, lease_id, - lease_duration, lease_break_period, - proposed_lease_id, if_modified_since, - if_unmodified_since, if_match, if_none_match, timeout=None): - ''' - Establishes and manages a lease on a blob for write and delete operations. - The Lease Blob operation can be called in one of five modes: - Acquire, to request a new lease. - Renew, to renew an existing lease. - Change, to change the ID of an existing lease. - Release, to free the lease if it is no longer needed so that another - client may immediately acquire a lease against the blob. - Break, to end the lease but ensure that another client cannot acquire - a new lease until the current lease period has expired. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str lease_action: - Possible _LeaseActions acquire|renew|release|break|change - :param str lease_id: - Required if the blob has an active lease. - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. - :param int lease_break_period: - For a break operation, this is the proposed duration of - seconds that the lease should continue before it is broken, between - 0 and 60 seconds. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining - on the lease is used. A new lease will not be available before the - break period has expired, but the lease may be held for longer than - the break period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :param str proposed_lease_id: - Optional for acquire, required for change. Proposed lease ID, in a - GUID string format. The Blob service returns 400 (Invalid request) - if the proposed lease ID is not in the correct format. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: - Response headers returned from the service call. - :rtype: a dict mapping str to str - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('lease_action', lease_action) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'lease', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'x-ms-lease-action': _to_str(lease_action), - 'x-ms-lease-duration': _to_str(lease_duration), - 'x-ms-lease-break-period': _to_str(lease_break_period), - 'x-ms-proposed-lease-id': _to_str(proposed_lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - - return self._perform_request(request, _parse_lease) - - def acquire_blob_lease(self, container_name, blob_name, - lease_duration=-1, - proposed_lease_id=None, - if_modified_since=None, - if_unmodified_since=None, - if_match=None, - if_none_match=None, timeout=None): - ''' - Requests a new lease. If the blob does not have an active lease, the Blob - service creates a lease on the blob and returns a new lease ID. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The Blob service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: the lease ID of the newly created lease. - :return: str - ''' - _validate_not_none('lease_duration', lease_duration) - - if lease_duration != -1 and\ - (lease_duration < 15 or lease_duration > 60): - raise ValueError(_ERROR_INVALID_LEASE_DURATION) - lease = self._lease_blob_impl(container_name, - blob_name, - _LeaseActions.Acquire, - None, # lease_id - lease_duration, - None, # lease_break_period - proposed_lease_id, - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout) - return lease['id'] - - def renew_blob_lease(self, container_name, blob_name, - lease_id, if_modified_since=None, - if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Renews the lease. The lease can be renewed if the lease ID specified on - the request matches that associated with the blob. Note that the lease may - be renewed even if it has expired as long as the blob has not been modified - or leased again since the expiration of that lease. When you renew a lease, - the lease duration clock resets. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str lease_id: - Lease ID for active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: the lease ID of the renewed lease. - :return: str - ''' - _validate_not_none('lease_id', lease_id) - - lease = self._lease_blob_impl(container_name, - blob_name, - _LeaseActions.Renew, - lease_id, - None, # lease_duration - None, # lease_break_period - None, # proposed_lease_id - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout) - return lease['id'] - - def release_blob_lease(self, container_name, blob_name, - lease_id, if_modified_since=None, - if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Releases the lease. The lease may be released if the lease ID specified on the - request matches that associated with the blob. Releasing the lease allows another - client to immediately acquire the lease for the blob as soon as the release is complete. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str lease_id: - Lease ID for active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('lease_id', lease_id) - - self._lease_blob_impl(container_name, - blob_name, - _LeaseActions.Release, - lease_id, - None, # lease_duration - None, # lease_break_period - None, # proposed_lease_id - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout) - - def break_blob_lease(self, container_name, blob_name, - lease_break_period=None, - if_modified_since=None, - if_unmodified_since=None, - if_match=None, - if_none_match=None, timeout=None): - ''' - Breaks the lease, if the blob has an active lease. Once a lease is broken, - it cannot be renewed. Any authorized request can break the lease; the request - is not required to specify a matching lease ID. When a lease is broken, - the lease break period is allowed to elapse, during which time no lease operation - except break and release can be performed on the blob. When a lease is successfully - broken, the response indicates the interval in seconds until a new lease can be acquired. - - A lease that has been broken can also be released, in which case another client may - immediately acquire the lease on the blob. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param int lease_break_period: - For a break operation, this is the proposed duration of - seconds that the lease should continue before it is broken, between - 0 and 60 seconds. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining - on the lease is used. A new lease will not be available before the - break period has expired, but the lease may be held for longer than - the break period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :return: int - ''' - if (lease_break_period is not None) and (lease_break_period < 0 or lease_break_period > 60): - raise ValueError(_ERROR_INVALID_LEASE_BREAK_PERIOD) - - lease = self._lease_blob_impl(container_name, - blob_name, - _LeaseActions.Break, - None, # lease_id - None, # lease_duration - lease_break_period, - None, # proposed_lease_id - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout) - return lease['time'] - - def change_blob_lease(self, container_name, blob_name, - lease_id, - proposed_lease_id, - if_modified_since=None, - if_unmodified_since=None, - if_match=None, - if_none_match=None, timeout=None): - ''' - Changes the lease ID of an active lease. A change must include the current - lease ID and a new lease ID. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str lease_id: - Required if the blob has an active lease. - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - self._lease_blob_impl(container_name, - blob_name, - _LeaseActions.Change, - lease_id, - None, # lease_duration - None, # lease_break_period - proposed_lease_id, - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout) - - def snapshot_blob(self, container_name, blob_name, - metadata=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, - if_none_match=None, lease_id=None, timeout=None): - ''' - Creates a read-only snapshot of a blob. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param metadata: - Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the - base blob metadata to the snapshot. If one or more name-value pairs - are specified, the snapshot is created with the specified metadata, - and metadata is not copied from the base blob. - :type metadata: a dict mapping str to str - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param str lease_id: - Required if the blob has an active lease. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: snapshot properties - :rtype: :class:`~azure.storage.blob.models.Blob` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'snapshot', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - 'x-ms-lease-id': _to_str(lease_id) - } - _add_metadata_headers(metadata, request) - - return self._perform_request(request, _parse_snapshot_blob, [blob_name]) - - def copy_blob(self, container_name, blob_name, copy_source, - metadata=None, - source_if_modified_since=None, - source_if_unmodified_since=None, - source_if_match=None, source_if_none_match=None, - destination_if_modified_since=None, - destination_if_unmodified_since=None, - destination_if_match=None, - destination_if_none_match=None, - destination_lease_id=None, - source_lease_id=None, timeout=None): - ''' - Copies a blob asynchronously. This operation returns a copy operation - properties object, including a copy ID you can use to check or abort the - copy operation. The Blob service copies blobs on a best-effort basis. - - The source blob for a copy operation may be a block blob, an append blob, - or a page blob. If the destination blob already exists, it must be of the - same blob type as the source blob. Any existing destination blob will be - overwritten. The destination blob cannot be modified while a copy operation - is in progress. - - When copying from a page blob, the Blob service creates a destination page - blob of the source blob's length, initially containing all zeroes. Then - the source page ranges are enumerated, and non-empty ranges are copied. - - For a block blob or an append blob, the Blob service creates a committed - blob of zero length before returning from this operation. When copying - from a block blob, all committed blocks and their block IDs are copied. - Uncommitted blocks are not copied. At the end of the copy operation, the - destination blob will have the same committed block count as the source. - - When copying from an append blob, all committed blocks are copied. At the - end of the copy operation, the destination blob will have the same committed - block count as the source. - - For all blob types, you can call get_blob_properties on the destination - blob to check the status of the copy operation. The final blob will be - committed when the copy completes. - - :param str container_name: - Name of the destination container. The container must exist. - :param str blob_name: - Name of the destination blob. If the destination blob exists, it will - be overwritten. Otherwise, it will be created. - :param str copy_source: - A URL of up to 2 KB in length that specifies an Azure file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.blob.core.windows.net/mycontainer/myblob - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken - :param metadata: - Name-value pairs associated with the blob as metadata. If no name-value - pairs are specified, the operation will copy the metadata from the - source blob or file to the destination blob. If one or more name-value - pairs are specified, the destination blob is created with the specified - metadata, and metadata is not copied from the source blob or file. - :type metadata: A dict mapping str to str. - :param datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source - blob has been modified since the specified date/time. - :param datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source blob - has not been modified since the specified date/time. - :param ETag source_if_match: - An ETag value, or the wildcard character (*). Specify this conditional - header to copy the source blob only if its ETag matches the value - specified. If the ETag values do not match, the Blob service returns - status code 412 (Precondition Failed). This header cannot be specified - if the source is an Azure File. - :param ETag source_if_none_match: - An ETag value, or the wildcard character (*). Specify this conditional - header to copy the blob only if its ETag does not match the value - specified. If the values are identical, the Blob service returns status - code 412 (Precondition Failed). This header cannot be specified if the - source is an Azure File. - :param datetime destination_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has been modified since the specified date/time. - If the destination blob has not been modified, the Blob service returns - status code 412 (Precondition Failed). - :param datetime destination_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has not been modified since the specified - date/time. If the destination blob has been modified, the Blob service - returns status code 412 (Precondition Failed). - :param ETag destination_if_match: - An ETag value, or the wildcard character (*). Specify an ETag value for - this conditional header to copy the blob only if the specified ETag value - matches the ETag value for an existing destination blob. If the ETag for - the destination blob does not match the ETag specified for If-Match, the - Blob service returns status code 412 (Precondition Failed). - :param ETag destination_if_none_match: - An ETag value, or the wildcard character (*). Specify an ETag value for - this conditional header to copy the blob only if the specified ETag value - does not match the ETag value for the destination blob. Specify the wildcard - character (*) to perform the operation only if the destination blob does not - exist. If the specified condition isn't met, the Blob service returns status - code 412 (Precondition Failed). - :param str destination_lease_id: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :param str source_lease_id: - Specify this to perform the Copy Blob operation only if - the lease ID given matches the active lease ID of the source blob. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: Copy operation properties such as status, source, and ID. - :rtype: :class:`~azure.storage.blob.models.CopyProperties` - ''' - return self._copy_blob(container_name, blob_name, copy_source, - metadata, - source_if_modified_since, source_if_unmodified_since, - source_if_match, source_if_none_match, - destination_if_modified_since, - destination_if_unmodified_since, - destination_if_match, - destination_if_none_match, - destination_lease_id, - source_lease_id, timeout, - False) - - def _copy_blob(self, container_name, blob_name, copy_source, - metadata=None, - source_if_modified_since=None, - source_if_unmodified_since=None, - source_if_match=None, source_if_none_match=None, - destination_if_modified_since=None, - destination_if_unmodified_since=None, - destination_if_match=None, - destination_if_none_match=None, - destination_lease_id=None, - source_lease_id=None, timeout=None, - incremental_copy=False): - ''' - See copy_blob for more details. This helper method - allows for standard copies as well as incremental copies which are only supported for page blobs. - :param bool incremental_copy: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('copy_source', copy_source) - - if copy_source.startswith('/'): - # Backwards compatibility for earlier versions of the SDK where - # the copy source can be in the following formats: - # - Blob in named container: - # /accountName/containerName/blobName - # - Snapshot in named container: - # /accountName/containerName/blobName?snapshot= - # - Blob in root container: - # /accountName/blobName - # - Snapshot in root container: - # /accountName/blobName?snapshot= - account, _, source =\ - copy_source.partition('/')[2].partition('/') - copy_source = self.protocol + '://' + \ - self.primary_endpoint + '/' + source - - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - - if incremental_copy: - request.query = { - 'comp': 'incrementalcopy', - 'timeout': _int_to_str(timeout), - } - else: - request.query = {'timeout': _int_to_str(timeout)} - - request.headers = { - 'x-ms-copy-source': _to_str(copy_source), - 'x-ms-source-if-modified-since': _to_str(source_if_modified_since), - 'x-ms-source-if-unmodified-since': _to_str(source_if_unmodified_since), - 'x-ms-source-if-match': _to_str(source_if_match), - 'x-ms-source-if-none-match': _to_str(source_if_none_match), - 'If-Modified-Since': _datetime_to_utc_string(destination_if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(destination_if_unmodified_since), - 'If-Match': _to_str(destination_if_match), - 'If-None-Match': _to_str(destination_if_none_match), - 'x-ms-lease-id': _to_str(destination_lease_id), - 'x-ms-source-lease-id': _to_str(source_lease_id) - } - _add_metadata_headers(metadata, request) - - return self._perform_request(request, _parse_properties, [BlobProperties]).copy - - def abort_copy_blob(self, container_name, blob_name, copy_id, - lease_id=None, timeout=None): - ''' - Aborts a pending copy_blob operation, and leaves a destination blob - with zero length and full metadata. - - :param str container_name: - Name of destination container. - :param str blob_name: - Name of destination blob. - :param str copy_id: - Copy identifier provided in the copy.id of the original - copy_blob operation. - :param str lease_id: - Required if the destination blob has an active infinite lease. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('copy_id', copy_id) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'copy', - 'copyid': _to_str(copy_id), - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'x-ms-copy-action': 'abort', - } - - self._perform_request(request) - - def delete_blob(self, container_name, blob_name, snapshot=None, - lease_id=None, delete_snapshots=None, - if_modified_since=None, if_unmodified_since=None, - if_match=None, if_none_match=None, timeout=None): - ''' - Marks the specified blob or snapshot for deletion. - The blob is later deleted during garbage collection. - - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the Delete - Blob operation. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to delete. - :param str lease_id: - Required if the blob has an active lease. - :param delete_snapshots: - Required if the blob has associated snapshots. - :type delete_snapshots: - One of the values listed in the :class:`~azure.storage.blob.models.DeleteSnapshot` enum. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - request = HTTPRequest() - request.method = 'DELETE' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'x-ms-delete-snapshots': _to_str(delete_snapshots), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - request.query = { - 'snapshot': _to_str(snapshot), - 'timeout': _int_to_str(timeout) - } - - self._perform_request(request) diff --git a/azure/multiapi/storage/v2016_05_31/blob/blockblobservice.py b/azure/multiapi/storage/v2016_05_31/blob/blockblobservice.py deleted file mode 100644 index 6e792e6..0000000 --- a/azure/multiapi/storage/v2016_05_31/blob/blockblobservice.py +++ /dev/null @@ -1,963 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from .._error import ( - _validate_not_none, - _validate_type_bytes, - _validate_encryption_required, - _validate_encryption_unsupported, - _ERROR_VALUE_NEGATIVE, - _ERROR_VALUE_SHOULD_BE_STREAM -) -from .._common_conversion import ( - _encode_base64, - _to_str, - _int_to_str, - _datetime_to_utc_string, - _get_content_md5, -) -from .._serialization import ( - _get_request_body, - _get_data_bytes_only, - _get_data_bytes_or_stream_only, - _add_metadata_headers, -) -from .._http import HTTPRequest -from ._upload_chunking import ( - _BlockBlobChunkUploader, - _upload_blob_chunks, - _upload_blob_substream_blocks, -) -from .models import ( - _BlobTypes, -) -from .._constants import ( - SERVICE_HOST_BASE, - DEFAULT_PROTOCOL, -) -from ._serialization import ( - _convert_block_list_to_xml, - _get_path, -) -from .._serialization import ( - _len_plus -) -from ._deserialization import ( - _convert_xml_to_block_list, - _parse_base_properties, -) -from ._encryption import( - _encrypt_blob, - _generate_blob_encryption_data, -) -from .baseblobservice import BaseBlobService -from os import( - path, -) -import sys -from io import ( - BytesIO, - IOBase -) - -class BlockBlobService(BaseBlobService): - ''' - Block blobs let you upload large blobs efficiently. Block blobs are comprised - of blocks, each of which is identified by a block ID. You create or modify a - block blob by writing a set of blocks and committing them by their block IDs. - Each block can be a different size, up to a maximum of 4 MB, and a block blob - can include up to 50,000 blocks. The maximum size of a block blob is therefore - slightly more than 195 GB (4 MB X 50,000 blocks). If you are writing a block - blob that is no more than 64 MB in size, you can upload it in its entirety with - a single write operation; see create_blob_from_bytes. - - :ivar int MAX_SINGLE_PUT_SIZE: - The largest size upload supported in a single put call. This is used by - the create_blob_from_* methods if the content length is known and is less - than this value. - :ivar int MAX_BLOCK_SIZE: - The size of the blocks put by create_blob_from_* methods if the content - length is unknown or is larger than MAX_SINGLE_PUT_SIZE. Smaller blocks - may be put. The maximum block size the service supports is 100MB. - :ivar int MIN_LARGE_BLOCK_UPLOAD_THRESHOLD: - The minimum block size at which the the memory-optimized, block upload - algorithm is considered. This algorithm is only applicable to the create_blob_from_file and - create_blob_from_stream methods and will prevent the full buffering of blocks. - In addition to the block size, ContentMD5 validation and Encryption must be disabled as - these options require the blocks to be buffered. - ''' - - MAX_SINGLE_PUT_SIZE = 64 * 1024 * 1024 - MAX_BLOCK_SIZE = 4 * 1024 * 1024 - MIN_LARGE_BLOCK_UPLOAD_THRESHOLD = 4 * 1024 * 1024 + 1 - - def __init__(self, account_name=None, account_key=None, sas_token=None, - is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, - custom_domain=None, request_session=None, connection_string=None, socket_timeout=None): - ''' - :param str account_name: - The storage account name. This is used to authenticate requests - signed with an account key and to construct the storage endpoint. It - is required unless a connection string is given, or if a custom - domain is used with anonymous authentication. - :param str account_key: - The storage account key. This is used for shared key authentication. - If neither account key or sas token is specified, anonymous access - will be used. - :param str sas_token: - A shared access signature token to use to authenticate requests - instead of the account key. If account key and sas token are both - specified, account key will be used to sign. If neither are - specified, anonymous access will be used. - :param bool is_emulated: - Whether to use the emulator. Defaults to False. If specified, will - override all other parameters besides connection string and request - session. - :param str protocol: - The protocol to use for requests. Defaults to https. - :param str endpoint_suffix: - The host base component of the url, minus the account name. Defaults - to Azure (core.windows.net). Override this to use the China cloud - (core.chinacloudapi.cn). - :param str custom_domain: - The custom domain to use. This can be set in the Azure Portal. For - example, 'www.mydomain.com'. - :param requests.Session request_session: - The session object to use for http requests. - :param str connection_string: - If specified, this will override all other parameters besides - request session. See - http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ - for the connection string format. - :param int socket_timeout: - If specified, this will override the default socket timeout. The timeout specified is in seconds. - See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value. - ''' - self.blob_type = _BlobTypes.BlockBlob - super(BlockBlobService, self).__init__( - account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix, - custom_domain, request_session, connection_string, socket_timeout) - - def put_block(self, container_name, blob_name, block, block_id, - validate_content=False, lease_id=None, timeout=None): - ''' - Creates a new block to be committed as part of a blob. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param block: Content of the block. - :type block: io.IOBase or bytes - Content of the block. - :param str block_id: - A valid Base64 string value that identifies the block. Prior to - encoding, the string must be less than or equal to 64 bytes in size. - For a given blob, the length of the value specified for the blockid - parameter must be the same size for each block. Note that the Base64 - string must be URL-encoded. - :param bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - blob. - :param str lease_id: - Required if the blob has an active lease. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - self._put_block( - container_name, - blob_name, - block, - block_id, - validate_content=validate_content, - lease_id=lease_id, - timeout=timeout - ) - - def put_block_list( - self, container_name, blob_name, block_list, content_settings=None, - metadata=None, validate_content=False, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, - timeout=None): - ''' - Writes a blob by specifying the list of block IDs that make up the blob. - In order to be written as part of a blob, a block must have been - successfully written to the server in a prior Put Block operation. - - You can call Put Block List to update a blob by uploading only those - blocks that have changed, then committing the new and existing blocks - together. You can do this by specifying whether to commit a block from - the committed block list or from the uncommitted block list, or to commit - the most recently uploaded version of the block, whichever list it may - belong to. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param block_list: - A list of :class:`~azure.storeage.blob.models.BlobBlock` containing the block ids and block state. - :type block_list: list of :class:`~azure.storage.blob.models.BlobBlock` - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set properties on the blob. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: a dict mapping str to str - :param bool validate_content: - If true, calculates an MD5 hash of the block list content. The storage - service checks the hash of the block list content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this check is associated with - the block list content, and not with the content of the blob itself. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Block Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - return self._put_block_list( - container_name, - blob_name, - block_list, - content_settings=content_settings, - metadata=metadata, - validate_content=validate_content, - lease_id=lease_id, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout - ) - - def get_block_list(self, container_name, blob_name, snapshot=None, - block_list_type=None, lease_id=None, timeout=None): - ''' - Retrieves the list of blocks that have been uploaded as part of a - block blob. There are two block lists maintained for a blob: - Committed Block List: - The list of blocks that have been successfully committed to a - given blob with Put Block List. - Uncommitted Block List: - The list of blocks that have been uploaded for a blob using - Put Block, but that have not yet been committed. These blocks - are stored in Azure in association with a blob, but do not yet - form part of the blob. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str snapshot: - Datetime to determine the time to retrieve the blocks. - :param str block_list_type: - Specifies whether to return the list of committed blocks, the list - of uncommitted blocks, or both lists together. Valid values are: - committed, uncommitted, or all. - :param str lease_id: - Required if the blob has an active lease. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: list committed and/or uncommitted blocks for Block Blob - :rtype: :class:`~azure.storage.blob.models.BlobBlockList` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'blocklist', - 'snapshot': _to_str(snapshot), - 'blocklisttype': _to_str(block_list_type), - 'timeout': _int_to_str(timeout), - } - request.headers = {'x-ms-lease-id': _to_str(lease_id)} - - return self._perform_request(request, _convert_xml_to_block_list) - - #----Convenience APIs----------------------------------------------------- - - def create_blob_from_path( - self, container_name, blob_name, file_path, content_settings=None, - metadata=None, validate_content=False, progress_callback=None, - max_connections=2, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None): - ''' - Creates a new blob from a file path, or updates the content of an - existing blob, with automatic chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param str file_path: - Path of the file to upload as the blob content. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set blob properties. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: a dict mapping str to str - :param bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: callback function in format of func(current, total) - :param int max_connections: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('file_path', file_path) - - count = path.getsize(file_path) - with open(file_path, 'rb') as stream: - self.create_blob_from_stream( - container_name=container_name, - blob_name=blob_name, - stream=stream, - count=count, - content_settings=content_settings, - metadata=metadata, - validate_content=validate_content, - lease_id=lease_id, - progress_callback=progress_callback, - max_connections=max_connections, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout) - - def create_blob_from_stream( - self, container_name, blob_name, stream, count=None, - content_settings=None, metadata=None, validate_content=False, - progress_callback=None, max_connections=2, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None, use_byte_buffer=False): - ''' - Creates a new blob from a file/stream, or updates the content of - an existing blob, with automatic chunking and progress - notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param io.IOBase stream: - Opened file/stream to upload as the blob content. - :param int count: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set blob properties. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: a dict mapping str to str - :param bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: callback function in format of func(current, total) - :param int max_connections: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. Note that parallel upload requires the stream to be seekable. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :param bool use_byte_buffer: - If True, this will force usage of the original full block buffering upload path. - By default, this value is False and will employ a memory-efficient, - streaming upload algorithm under the following conditions: - The provided stream is seekable, 'require_encryption' is False, and - MAX_BLOCK_SIZE >= MIN_LARGE_BLOCK_UPLOAD_THRESHOLD. - One should consider the drawbacks of using this approach. In order to achieve - memory-efficiency, a IOBase stream or file-like object is segmented into logical blocks - using a SubStream wrapper. In order to read the correct data, each SubStream must acquire - a lock so that it can safely seek to the right position on the shared, underlying stream. - If max_connections > 1, the concurrency will result in a considerable amount of seeking on - the underlying stream. For the most common inputs such as a file-like stream object, seeking - is an inexpensive operation and this is not much of a concern. However, for other variants of streams - this may not be the case. The trade-off for memory-efficiency must be weighed against the cost of seeking - with your input stream. - The SubStream class will attempt to buffer up to 4 MB internally to reduce the amount of - seek and read calls to the underlying stream. This is particularly beneficial when uploading larger blocks. - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('stream', stream) - _validate_encryption_required(self.require_encryption, self.key_encryption_key) - - # Adjust count to include padding if we are expected to encrypt. - adjusted_count = count - if (self.key_encryption_key is not None) and (adjusted_count is not None): - adjusted_count += (16 - (count % 16)) - - if adjusted_count and adjusted_count < self.MAX_SINGLE_PUT_SIZE: - if progress_callback: - progress_callback(0, count) - - data = stream.read(count) - self._put_blob( - container_name=container_name, - blob_name=blob_name, - blob=data, - content_settings=content_settings, - metadata=metadata, - validate_content=validate_content, - lease_id=lease_id, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout) - - if progress_callback: - progress_callback(count, count) - else: - cek, iv, encryption_data = None, None, None - - use_original_upload_path = use_byte_buffer or self.require_encryption or \ - self.MAX_BLOCK_SIZE < self.MIN_LARGE_BLOCK_UPLOAD_THRESHOLD or \ - hasattr(stream, 'seekable') and not stream.seekable() or \ - not hasattr(stream, 'seek') or not hasattr(stream, 'tell') - - if use_original_upload_path: - if self.key_encryption_key: - cek, iv, encryption_data = _generate_blob_encryption_data(self.key_encryption_key) - - block_ids = _upload_blob_chunks( - blob_service=self, - container_name=container_name, - blob_name=blob_name, - blob_size=count, - block_size=self.MAX_BLOCK_SIZE, - stream=stream, - max_connections=max_connections, - progress_callback=progress_callback, - validate_content=validate_content, - lease_id=lease_id, - uploader_class=_BlockBlobChunkUploader, - timeout=timeout, - content_encryption_key=cek, - initialization_vector=iv - ) - else: - block_ids = _upload_blob_substream_blocks( - blob_service=self, - container_name=container_name, - blob_name=blob_name, - blob_size=count, - block_size=self.MAX_BLOCK_SIZE, - stream=stream, - max_connections=max_connections, - progress_callback=progress_callback, - validate_content=validate_content, - lease_id=lease_id, - uploader_class=_BlockBlobChunkUploader, - timeout=timeout, - ) - - self._put_block_list( - container_name=container_name, - blob_name=blob_name, - block_list=block_ids, - content_settings=content_settings, - metadata=metadata, - validate_content=validate_content, - lease_id=lease_id, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout, - encryption_data=encryption_data - ) - - def create_blob_from_bytes( - self, container_name, blob_name, blob, index=0, count=None, - content_settings=None, metadata=None, validate_content=False, - progress_callback=None, max_connections=2, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Creates a new blob from an array of bytes, or updates the content - of an existing blob, with automatic chunking and progress - notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param bytes blob: - Content of blob as an array of bytes. - :param int index: - Start index in the array of bytes. - :param int count: - Number of bytes to upload. Set to None or negative value to upload - all bytes starting from index. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set blob properties. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: a dict mapping str to str - :param bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: callback function in format of func(current, total) - :param int max_connections: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('blob', blob) - _validate_not_none('index', index) - _validate_type_bytes('blob', blob) - - if index < 0: - raise IndexError(_ERROR_VALUE_NEGATIVE.format('index')) - - if count is None or count < 0: - count = len(blob) - index - - stream = BytesIO(blob) - stream.seek(index) - - self.create_blob_from_stream( - container_name=container_name, - blob_name=blob_name, - stream=stream, - count=count, - content_settings=content_settings, - metadata=metadata, - validate_content=validate_content, - progress_callback=progress_callback, - max_connections=max_connections, - lease_id=lease_id, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout, - use_byte_buffer=True - ) - - def create_blob_from_text( - self, container_name, blob_name, text, encoding='utf-8', - content_settings=None, metadata=None, validate_content=False, - progress_callback=None, max_connections=2, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Creates a new blob from str/unicode, or updates the content of an - existing blob, with automatic chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param str text: - Text to upload to the blob. - :param str encoding: - Python encoding to use to convert the text to bytes. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set blob properties. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: a dict mapping str to str - :param bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: callback function in format of func(current, total) - :param int max_connections: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('text', text) - - if not isinstance(text, bytes): - _validate_not_none('encoding', encoding) - text = text.encode(encoding) - - self.create_blob_from_bytes( - container_name=container_name, - blob_name=blob_name, - blob=text, - index=0, - count=len(text), - content_settings=content_settings, - metadata=metadata, - validate_content=validate_content, - lease_id=lease_id, - progress_callback=progress_callback, - max_connections=max_connections, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout) - - #-----Helper methods------------------------------------ - def _put_blob(self, container_name, blob_name, blob, content_settings=None, - metadata=None, validate_content=False, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, - timeout=None): - ''' - Creates a blob or updates an existing blob. - - See create_blob_from_* for high level - functions that handle the creation and upload of large blobs with - automatic chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param bytes blob: - Content of blob as bytes (size < 64MB). For larger size, you - must call put_block and put_block_list to set content of blob. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set properties on the blob. - :param metadata: - Name-value pairs associated with the blob as metadata. - :param bool validate_content: - If true, calculates an MD5 hash of the blob content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - blob. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the new Block Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_encryption_required(self.require_encryption, self.key_encryption_key) - - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = {'timeout': _int_to_str(timeout)} - request.headers = { - 'x-ms-blob-type': _to_str(self.blob_type), - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match) - } - _add_metadata_headers(metadata, request) - if content_settings is not None: - request.headers.update(content_settings._to_headers()) - blob = _get_data_bytes_only('blob', blob) - if self.key_encryption_key: - encryption_data, blob = _encrypt_blob(blob, self.key_encryption_key) - request.headers['x-ms-meta-encryptiondata'] = encryption_data - request.body = blob - - if validate_content: - computed_md5 = _get_content_md5(request.body) - request.headers['Content-MD5'] = _to_str(computed_md5) - - return self._perform_request(request, _parse_base_properties) - - def _put_block(self, container_name, blob_name, block, block_id, - validate_content=False, lease_id=None, timeout=None): - ''' - See put_block for more details. This helper method - allows for encryption or other such special behavior because - it is safely handled by the library. These behaviors are - prohibited in the public version of this function. - ''' - - - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('block', block) - _validate_not_none('block_id', block_id) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'block', - 'blockid': _encode_base64(_to_str(block_id)), - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id) - } - request.body = _get_data_bytes_or_stream_only('block', block) - if hasattr(request.body, 'read'): - if _len_plus(request.body) is None: - try: - data = b'' - for chunk in iter(lambda: request.body.read(4096), b""): - data += chunk - request.body = data - except AttributeError: - raise ValueError(_ERROR_VALUE_SHOULD_BE_STREAM.format('request.body')) - - if validate_content: - computed_md5 = _get_content_md5(request.body) - request.headers['Content-MD5'] = _to_str(computed_md5) - - self._perform_request(request) - - def _put_block_list( - self, container_name, blob_name, block_list, content_settings=None, - metadata=None, validate_content=False, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, - timeout=None, encryption_data=None): - ''' - See put_block_list for more details. This helper method - allows for encryption or other such special behavior because - it is safely handled by the library. These behaviors are - prohibited in the public version of this function. - :param str encryption_data: - A JSON formatted string containing the encryption metadata generated for this - blob if it was encrypted all at once upon upload. This should only be passed - in by internal methods. - ''' - - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('block_list', block_list) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'blocklist', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - _add_metadata_headers(metadata, request) - if content_settings is not None: - request.headers.update(content_settings._to_headers()) - request.body = _get_request_body( - _convert_block_list_to_xml(block_list)) - - if validate_content: - computed_md5 = _get_content_md5(request.body) - request.headers['Content-MD5'] = _to_str(computed_md5) - - if encryption_data is not None: - request.headers['x-ms-meta-encryptiondata'] = encryption_data - - return self._perform_request(request, _parse_base_properties) \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/blob/models.py b/azure/multiapi/storage/v2016_05_31/blob/models.py deleted file mode 100644 index 97f0487..0000000 --- a/azure/multiapi/storage/v2016_05_31/blob/models.py +++ /dev/null @@ -1,684 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from .._common_conversion import _to_str -class Container(object): - - ''' - Blob container class. - - :ivar str name: - The name of the container. - :ivar metadata: - A dict containing name-value pairs associated with the container as metadata. - This var is set to None unless the include=metadata param was included - for the list containers operation. If this parameter was specified but the - container has no metadata, metadata will be set to an empty dictionary. - :vartype metadata: dict mapping str to str - :ivar ContainerProperties properties: - System properties for the container. - ''' - - def __init__(self, name=None, props=None, metadata=None): - self.name = name - self.properties = props or ContainerProperties() - self.metadata = metadata - - -class ContainerProperties(object): - - ''' - Blob container's properties class. - - :ivar datetime last_modified: - A datetime object representing the last time the container was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar LeaseProperties lease: - Stores all the lease information for the container. - ''' - - def __init__(self): - self.last_modified = None - self.etag = None - self.lease = LeaseProperties() - self.public_access = None - - -class Blob(object): - - ''' - Blob class. - - :ivar str name: - Name of blob. - :ivar str snapshot: - A DateTime value that uniquely identifies the snapshot. The value of - this header indicates the snapshot version, and may be used in - subsequent requests to access the snapshot. - :ivar content: - Blob content. - :vartype content: str or bytes - :ivar BlobProperties properties: - Stores all the system properties for the blob. - :ivar metadata: - Name-value pairs associated with the blob as metadata. - ''' - def __init__(self, name=None, snapshot=None, content=None, props=None, metadata=None): - self.name = name - self.snapshot = snapshot - self.content = content - self.properties = props or BlobProperties() - self.metadata = metadata - - -class BlobProperties(object): - - ''' - Blob Properties - - :ivar str blob_type: - String indicating this blob's type. - :ivar datetime last_modified: - A datetime object representing the last time the blob was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar int content_length: - The length of the content returned. If the entire blob was requested, - the length of blob in bytes. If a subset of the blob was requested, the - length of the returned subset. - :ivar str content_range: - Indicates the range of bytes returned in the event that the client - requested a subset of the blob. - :ivar int append_blob_committed_block_count: - (For Append Blobs) Number of committed blocks in the blob. - :ivar int page_blob_sequence_number: - (For Page Blobs) Sequence number for page blob used for coordinating - concurrent writes. - :ivar bool server_encrypted: - Set to true if the blob is encrypted on the server. - :ivar ~azure.storage.blob.models.CopyProperties copy: - Stores all the copy properties for the blob. - :ivar ~azure.storage.blob.models.ContentSettings content_settings: - Stores all the content settings for the blob. - :ivar ~azure.storage.blob.models.LeaseProperties lease: - Stores all the lease information for the blob. - ''' - - def __init__(self): - self.blob_type = None - self.last_modified = None - self.etag = None - self.content_length = None - self.content_range = None - self.append_blob_committed_block_count = None - self.page_blob_sequence_number = None - self.server_encrypted = None - self.copy = CopyProperties() - self.content_settings = ContentSettings() - self.lease = LeaseProperties() - - -class ContentSettings(object): - - ''' - Used to store the content settings of a blob. - - :ivar str content_type: - The content type specified for the blob. If no content type was - specified, the default content type is application/octet-stream. - :ivar str content_encoding: - If the content_encoding has previously been set - for the blob, that value is stored. - :ivar str content_language: - If the content_language has previously been set - for the blob, that value is stored. - :ivar str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the blob, that value is stored. - :ivar str cache_control: - If the cache_control has previously been set for - the blob, that value is stored. - :ivar str content_md5: - If the content_md5 has been set for the blob, this response - header is stored so that the client can check for message content - integrity. - ''' - - def __init__( - self, content_type=None, content_encoding=None, - content_language=None, content_disposition=None, - cache_control=None, content_md5=None): - - self.content_type = content_type - self.content_encoding = content_encoding - self.content_language = content_language - self.content_disposition = content_disposition - self.cache_control = cache_control - self.content_md5 = content_md5 - - def _to_headers(self): - return { - 'x-ms-blob-cache-control': _to_str(self.cache_control), - 'x-ms-blob-content-type': _to_str(self.content_type), - 'x-ms-blob-content-disposition': _to_str(self.content_disposition), - 'x-ms-blob-content-md5': _to_str(self.content_md5), - 'x-ms-blob-content-encoding': _to_str(self.content_encoding), - 'x-ms-blob-content-language': _to_str(self.content_language), - } - - -class CopyProperties(object): - ''' - Blob Copy Properties. - - :ivar str id: - String identifier for the last attempted Copy Blob operation where this blob - was the destination blob. This header does not appear if this blob has never - been the destination in a Copy Blob operation, or if this blob has been - modified after a concluded Copy Blob operation using Set Blob Properties, - Put Blob, or Put Block List. - :ivar str source: - URL up to 2 KB in length that specifies the source blob used in the last attempted - Copy Blob operation where this blob was the destination blob. This header does not - appear if this blob has never been the destination in a Copy Blob operation, or if - this blob has been modified after a concluded Copy Blob operation using - Set Blob Properties, Put Blob, or Put Block List. - :ivar str status: - State of the copy operation identified by Copy ID, with these values: - success: - Copy completed successfully. - pending: - Copy is in progress. Check copy_status_description if intermittent, - non-fatal errors impede copy progress but don't cause failure. - aborted: - Copy was ended by Abort Copy Blob. - failed: - Copy failed. See copy_status_description for failure details. - :ivar str progress: - Contains the number of bytes copied and the total bytes in the source in the last - attempted Copy Blob operation where this blob was the destination blob. Can show - between 0 and Content-Length bytes copied. - :ivar datetime completion_time: - Conclusion time of the last attempted Copy Blob operation where this blob was the - destination blob. This value can specify the time of a completed, aborted, or - failed copy attempt. - :ivar str status_description: - only appears when x-ms-copy-status is failed or pending. Describes cause of fatal - or non-fatal copy operation failure. - ''' - - def __init__(self): - self.id = None - self.source = None - self.status = None - self.progress = None - self.completion_time = None - self.status_description = None - - -class LeaseProperties(object): - - ''' - Blob Lease Properties. - - :ivar str status: - The lease status of the blob. - :ivar str state: - Lease state of the blob. - Possible values: pending|success|aborted|failed - :ivar str duration: - When a blob is leased, specifies whether the lease is of infinite or fixed duration. - ''' - - def __init__(self): - self.status = None - self.state = None - self.duration = None - - -class BlobPrefix(object): - ''' - BlobPrefix objects may potentially returned in the blob list when - :func:`~azure.storage.blob.baseblobservice.BaseBlobService.list_blobs` is - used with a delimiter. Prefixes can be thought of as virtual blob directories. - - :ivar str name: The name of the blob prefix. - ''' - - def __init__(self): - self.name = None - - -class BlobBlockState(object): - '''Block blob block types.''' - - Committed = 'Committed' - '''Committed blocks.''' - - Latest = 'Latest' - '''Latest blocks.''' - - Uncommitted = 'Uncommitted' - '''Uncommitted blocks.''' - - -class BlobBlock(object): - - ''' - BlockBlob Block class. - - :ivar str id: - Block id. - :ivar str state: - Block state. - Possible valuse: committed|uncommitted - :ivar int size: - Block size in bytes. - ''' - - def __init__(self, id=None, state=BlobBlockState.Latest): - self.id = id - self.state = state - - def _set_size(self, size): - self.size = size - - -class BlobBlockList(object): - - ''' - Blob Block List class. - - :ivar committed_blocks: - List of committed blocks. - :vartype committed_blocks: list of :class:`BlobBlock` - :ivar uncommitted_blocks: - List of uncommitted blocks. - :vartype uncommitted_blocks: list of :class:`BlobBlock` - ''' - - def __init__(self): - self.committed_blocks = list() - self.uncommitted_blocks = list() - -class PageRange(object): - - ''' - Page Range for page blob. - - :ivar int start: - Start of page range in bytes. - :ivar int end: - End of page range in bytes. - :ivar bool is_cleared: - Indicates if a page range is cleared or not. Only applicable - for get_page_range_diff API. - ''' - - def __init__(self, start=None, end=None, is_cleared=False): - self.start = start - self.end = end - self.is_cleared = is_cleared - -class ResourceProperties(object): - - ''' - Base response for a resource request. - - :ivar str etag: - Opaque etag value that can be used to check if resource - has been modified. - :ivar datetime last_modified: - Datetime for last time resource was modified. - ''' - - def __init__(self): - self.last_modified = None - self.etag = None - -class AppendBlockProperties(ResourceProperties): - - ''' - Response for an append block request. - - :ivar int append_offset: - Position to start next append. - :ivar int committed_block_count: - Number of committed append blocks. - ''' - - def __init__(self): - super(ResourceProperties, self).__init__() - self.append_offset = None - self.committed_block_count = None - - -class PageBlobProperties(ResourceProperties): - - ''' - Response for a page request. - - :ivar int sequence_number: - Identifer for page blobs to help handle concurrent writes. - ''' - - def __init__(self): - super(ResourceProperties, self).__init__() - self.sequence_number = None - - -class PublicAccess(object): - ''' - Specifies whether data in the container may be accessed publicly and the level of access. - ''' - - OFF = 'off' - ''' - Specifies that there is no public read access for both the container and blobs within the container. - Clients cannot enumerate the containers within the storage account as well as the blobs within the container. - ''' - - Blob = 'blob' - ''' - Specifies public read access for blobs. Blob data within this container can be read - via anonymous request, but container data is not available. Clients cannot enumerate - blobs within the container via anonymous request. - ''' - - Container = 'container' - ''' - Specifies full public read access for container and blob data. Clients can enumerate - blobs within the container via anonymous request, but cannot enumerate containers - within the storage account. - ''' - -class DeleteSnapshot(object): - ''' - Required if the blob has associated snapshots. Specifies how to handle the snapshots. - ''' - - Include = 'include' - ''' - Delete the base blob and all of its snapshots. - ''' - - Only = 'only' - ''' - Delete only the blob's snapshots and not the blob itself. - ''' - -class BlockListType(object): - ''' - Specifies whether to return the list of committed blocks, the list of uncommitted - blocks, or both lists together. - ''' - - All = 'all' - '''Both committed and uncommitted blocks.''' - - Committed = 'committed' - '''Committed blocks.''' - - Uncommitted = 'uncommitted' - '''Uncommitted blocks.''' - - -class SequenceNumberAction(object): - '''Sequence number actions.''' - - Increment = 'increment' - ''' - Increments the value of the sequence number by 1. If specifying this option, - do not include the x-ms-blob-sequence-number header. - ''' - - Max = 'max' - ''' - Sets the sequence number to be the higher of the value included with the - request and the value currently stored for the blob. - ''' - - Update = 'update' - '''Sets the sequence number to the value included with the request.''' - - -class _LeaseActions(object): - '''Actions for a lease.''' - - Acquire = 'acquire' - '''Acquire the lease.''' - - Break = 'break' - '''Break the lease.''' - - Change = 'change' - '''Change the lease ID.''' - - Release = 'release' - '''Release the lease.''' - - Renew = 'renew' - '''Renew the lease.''' - -class _BlobTypes(object): - '''Blob type options.''' - - AppendBlob = 'AppendBlob' - '''Append blob type.''' - - BlockBlob = 'BlockBlob' - '''Block blob type.''' - - PageBlob = 'PageBlob' - '''Page blob type.''' - -class Include(object): - - ''' - Specifies the datasets to include in the blob list response. - - :ivar ~azure.storage.blob.models.Include Include.COPY: - Specifies that metadata related to any current or previous Copy Blob operation - should be included in the response. - :ivar ~azure.storage.blob.models.Include Include.METADATA: - Specifies that metadata be returned in the response. - :ivar ~azure.storage.blob.models.Include Include.SNAPSHOTS: - Specifies that snapshots should be included in the enumeration. - :ivar ~azure.storage.blob.models.Include Include.UNCOMMITTED_BLOBS: - Specifies that blobs for which blocks have been uploaded, but which have not - been committed using Put Block List, be included in the response. - ''' - - def __init__(self, snapshots=False, metadata=False, uncommitted_blobs=False, - copy=False, _str=None): - ''' - :param bool snapshots: - Specifies that snapshots should be included in the enumeration. - :param bool metadata: - Specifies that metadata be returned in the response. - :param bool uncommitted_blobs: - Specifies that blobs for which blocks have been uploaded, but which have - not been committed using Put Block List, be included in the response. - :param bool copy: - Specifies that metadata related to any current or previous Copy Blob - operation should be included in the response. - :param str _str: - A string representing the includes. - ''' - if not _str: - _str = '' - components = _str.split(',') - self.snapshots = snapshots or ('snapshots' in components) - self.metadata = metadata or ('metadata' in components) - self.uncommitted_blobs = uncommitted_blobs or ('uncommittedblobs' in components) - self.copy = copy or ('copy' in components) - - def __or__(self, other): - return Include(_str=str(self) + str(other)) - - def __add__(self, other): - return Include(_str=str(self) + str(other)) - - def __str__(self): - include = (('snapshots,' if self.snapshots else '') + - ('metadata,' if self.metadata else '') + - ('uncommittedblobs,' if self.uncommitted_blobs else '') + - ('copy,' if self.copy else '')) - return include.rstrip(',') - -Include.COPY = Include(copy=True) -Include.METADATA = Include(metadata=True) -Include.SNAPSHOTS = Include(snapshots=True) -Include.UNCOMMITTED_BLOBS = Include(uncommitted_blobs=True) - - -class BlobPermissions(object): - - ''' - BlobPermissions class to be used with - :func:`~azure.storage.blob.baseblobservice.BaseBlobService.generate_blob_shared_access_signature` API. - - :ivar BlobPermissions BlobPermissions.ADD: - Add a block to an append blob. - :ivar BlobPermissions BlobPermissions.CREATE: - Write a new blob, snapshot a blob, or copy a blob to a new blob. - :ivar BlobPermissions BlobPermissions.DELETE: - Delete the blob. - :ivar BlobPermissions BlobPermissions.READ: - Read the content, properties, metadata and block list. Use the blob as the source of a copy operation. - :ivar BlobPermissions BlobPermissions.WRITE: - Create or write content, properties, metadata, or block list. Snapshot or lease - the blob. Resize the blob (page blob only). Use the blob as the destination of a - copy operation within the same account. - ''' - - def __init__(self, read=False, add=False, create=False, write=False, - delete=False, _str=None): - ''' - :param bool read: - Read the content, properties, metadata and block list. Use the blob as - the source of a copy operation. - :param bool add: - Add a block to an append blob. - :param bool create: - Write a new blob, snapshot a blob, or copy a blob to a new blob. - :param bool write: - Create or write content, properties, metadata, or block list. Snapshot - or lease the blob. Resize the blob (page blob only). Use the blob as the - destination of a copy operation within the same account. - :param bool delete: - Delete the blob. - :param str _str: - A string representing the permissions. - ''' - if not _str: - _str = '' - self.read = read or ('r' in _str) - self.add = add or ('a' in _str) - self.create = create or ('c' in _str) - self.write = write or ('w' in _str) - self.delete = delete or ('d' in _str) - - def __or__(self, other): - return BlobPermissions(_str=str(self) + str(other)) - - def __add__(self, other): - return BlobPermissions(_str=str(self) + str(other)) - - def __str__(self): - return (('r' if self.read else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '')) - -BlobPermissions.ADD = BlobPermissions(add=True) -BlobPermissions.CREATE = BlobPermissions(create=True) -BlobPermissions.DELETE = BlobPermissions(delete=True) -BlobPermissions.READ = BlobPermissions(read=True) -BlobPermissions.WRITE = BlobPermissions(write=True) - - -class ContainerPermissions(object): - - ''' - ContainerPermissions class to be used with :func:`~azure.storage.blob.baseblobservice.BaseBlobService.generate_container_shared_access_signature` - API and for the AccessPolicies used with :func:`~azure.storage.blob.baseblobservice.BaseBlobService.set_container_acl`. - - :ivar ContainerPermissions ContainerPermissions.DELETE: - Delete any blob in the container. Note: You cannot grant permissions to - delete a container with a container SAS. Use an account SAS instead. - :ivar ContainerPermissions ContainerPermissions.LIST: - List blobs in the container. - :ivar ContainerPermissions ContainerPermissions.READ: - Read the content, properties, metadata or block list of any blob in the - container. Use any blob in the container as the source of a copy operation. - :ivar ContainerPermissions ContainerPermissions.WRITE: - For any blob in the container, create or write content, properties, - metadata, or block list. Snapshot or lease the blob. Resize the blob - (page blob only). Use the blob as the destination of a copy operation - within the same account. Note: You cannot grant permissions to read or - write container properties or metadata, nor to lease a container, with - a container SAS. Use an account SAS instead. - ''' - - def __init__(self, read=False, write=False, delete=False, list=False, - _str=None): - ''' - :param bool read: - Read the content, properties, metadata or block list of any blob in the - container. Use any blob in the container as the source of a copy operation. - :param bool write: - For any blob in the container, create or write content, properties, - metadata, or block list. Snapshot or lease the blob. Resize the blob - (page blob only). Use the blob as the destination of a copy operation - within the same account. Note: You cannot grant permissions to read or - write container properties or metadata, nor to lease a container, with - a container SAS. Use an account SAS instead. - :param bool delete: - Delete any blob in the container. Note: You cannot grant permissions to - delete a container with a container SAS. Use an account SAS instead. - :param bool list: - List blobs in the container. - :param str _str: - A string representing the permissions. - ''' - if not _str: - _str = '' - self.read = read or ('r' in _str) - self.write = write or ('w' in _str) - self.delete = delete or ('d' in _str) - self.list = list or ('l' in _str) - - def __or__(self, other): - return ContainerPermissions(_str=str(self) + str(other)) - - def __add__(self, other): - return ContainerPermissions(_str=str(self) + str(other)) - - def __str__(self): - return (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('l' if self.list else '')) - -ContainerPermissions.DELETE = ContainerPermissions(delete=True) -ContainerPermissions.LIST = ContainerPermissions(list=True) -ContainerPermissions.READ = ContainerPermissions(read=True) -ContainerPermissions.WRITE = ContainerPermissions(write=True) diff --git a/azure/multiapi/storage/v2016_05_31/blob/pageblobservice.py b/azure/multiapi/storage/v2016_05_31/blob/pageblobservice.py deleted file mode 100644 index 4262775..0000000 --- a/azure/multiapi/storage/v2016_05_31/blob/pageblobservice.py +++ /dev/null @@ -1,1180 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from .._error import ( - _validate_not_none, - _validate_type_bytes, - _validate_encryption_required, - _validate_encryption_unsupported, - _ERROR_VALUE_NEGATIVE, -) -from .._common_conversion import ( - _int_to_str, - _to_str, - _datetime_to_utc_string, - _get_content_md5, -) -from .._serialization import ( - _get_data_bytes_only, - _add_metadata_headers, -) -from .._http import HTTPRequest -from ._error import ( - _ERROR_PAGE_BLOB_SIZE_ALIGNMENT, -) -from ._upload_chunking import ( - _PageBlobChunkUploader, - _upload_blob_chunks, -) -from .models import ( - _BlobTypes, - PageBlobProperties, -) -from .._constants import ( - SERVICE_HOST_BASE, - DEFAULT_PROTOCOL, -) -from ._encryption import _generate_blob_encryption_data -from ._serialization import ( - _get_path, - _validate_and_format_range_headers, -) -from ._deserialization import ( - _convert_xml_to_page_ranges, - _parse_page_properties, - _parse_base_properties, -) -from .baseblobservice import BaseBlobService -from os import path -import sys -if sys.version_info >= (3,): - from io import BytesIO -else: - from cStringIO import StringIO as BytesIO - -# Keep this value sync with _ERROR_PAGE_BLOB_SIZE_ALIGNMENT -_PAGE_ALIGNMENT = 512 - - -class PageBlobService(BaseBlobService): - ''' - Page blobs are a collection of 512-byte pages optimized for random read and - write operations. To create a page blob, you initialize the page blob and - specify the maximum size the page blob will grow. To add or update the - contents of a page blob, you write a page or pages by specifying an offset - and a range that align to 512-byte page boundaries. A write to a page blob - can overwrite just one page, some pages, or up to 4 MB of the page blob. - Writes to page blobs happen in-place and are immediately committed to the - blob. The maximum size for a page blob is 1 TB. - - :ivar int MAX_PAGE_SIZE: - The size of the pages put by create_blob_from_* methods. Smaller pages - may be put if there is less data provided. The maximum page size the service - supports is 4MB. - ''' - - MAX_PAGE_SIZE = 4 * 1024 * 1024 - - def __init__(self, account_name=None, account_key=None, sas_token=None, - is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, - custom_domain=None, request_session=None, connection_string=None, socket_timeout=None): - ''' - :param str account_name: - The storage account name. This is used to authenticate requests - signed with an account key and to construct the storage endpoint. It - is required unless a connection string is given, or if a custom - domain is used with anonymous authentication. - :param str account_key: - The storage account key. This is used for shared key authentication. - If neither account key or sas token is specified, anonymous access - will be used. - :param str sas_token: - A shared access signature token to use to authenticate requests - instead of the account key. If account key and sas token are both - specified, account key will be used to sign. If neither are - specified, anonymous access will be used. - :param bool is_emulated: - Whether to use the emulator. Defaults to False. If specified, will - override all other parameters besides connection string and request - session. - :param str protocol: - The protocol to use for requests. Defaults to https. - :param str endpoint_suffix: - The host base component of the url, minus the account name. Defaults - to Azure (core.windows.net). Override this to use the China cloud - (core.chinacloudapi.cn). - :param str custom_domain: - The custom domain to use. This can be set in the Azure Portal. For - example, 'www.mydomain.com'. - :param requests.Session request_session: - The session object to use for http requests. - :param str connection_string: - If specified, this will override all other parameters besides - request session. See - http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ - for the connection string format. - :param int socket_timeout: - If specified, this will override the default socket timeout. The timeout specified is in seconds. - See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value. - ''' - self.blob_type = _BlobTypes.PageBlob - super(PageBlobService, self).__init__( - account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix, - custom_domain, request_session, connection_string, socket_timeout) - - def create_blob( - self, container_name, blob_name, content_length, content_settings=None, - sequence_number=None, metadata=None, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None): - ''' - Creates a new Page Blob. - - See create_blob_from_* for high level functions that handle the - creation and upload of large blobs with automatic chunking and - progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param int content_length: - Required. This header specifies the maximum size - for the page blob, up to 1 TB. The page blob size must be aligned - to a 512-byte boundary. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set properties on the blob. - :param int sequence_number: - The sequence number is a user-controlled value that you can use to - track requests. The value of the sequence number must be between 0 - and 2^63 - 1.The default value is 0. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: a dict mapping str to str - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the new Page Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - return self._create_blob( - container_name, - blob_name, - content_length, - content_settings=content_settings, - sequence_number=sequence_number, - metadata=metadata, - lease_id=lease_id, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout - ) - - def incremental_copy_blob(self, container_name, blob_name, copy_source, - metadata=None, destination_if_modified_since=None, destination_if_unmodified_since=None, - destination_if_match=None, destination_if_none_match=None, destination_lease_id=None, - source_lease_id=None, timeout=None): - ''' - Copies an incremental copy of a blob asynchronously. This operation returns a copy operation - properties object, including a copy ID you can use to check or abort the - copy operation. The Blob service copies blobs on a best-effort basis. - - The source blob for an incremental copy operation must be a page blob. - Call get_blob_properties on the destination blob to check the status of the copy operation. - The final blob will be committed when the copy completes. - - :param str container_name: - Name of the destination container. The container must exist. - :param str blob_name: - Name of the destination blob. If the destination blob exists, it will - be overwritten. Otherwise, it will be created. - :param str copy_source: - A URL of up to 2 KB in length that specifies an Azure page blob. - The value should be URL-encoded as it would appear in a request URI. - The copy source must be a snapshot and include a valid SAS token or be public. - Example: - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=&sastoken - :param metadata: - Name-value pairs associated with the blob as metadata. If no name-value - pairs are specified, the operation will copy the metadata from the - source blob or file to the destination blob. If one or more name-value - pairs are specified, the destination blob is created with the specified - metadata, and metadata is not copied from the source blob or file. - :type metadata: A dict mapping str to str. - :param datetime destination_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has been modified since the specified date/time. - If the destination blob has not been modified, the Blob service returns - status code 412 (Precondition Failed). - :param datetime destination_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the destination blob - has not been modified since the specified ate/time. If the destination blob - has been modified, the Blob service returns status code 412 (Precondition Failed). - :param ETag destination_if_match: - An ETag value, or the wildcard character (*). Specify an ETag value for - this conditional header to copy the blob only if the specified ETag value - matches the ETag value for an existing destination blob. If the ETag for - the destination blob does not match the ETag specified for If-Match, the - Blob service returns status code 412 (Precondition Failed). - :param ETag destination_if_none_match: - An ETag value, or the wildcard character (*). Specify an ETag value for - this conditional header to copy the blob only if the specified ETag value - does not match the ETag value for the destination blob. Specify the wildcard - character (*) to perform the operation only if the destination blob does not - exist. If the specified condition isn't met, the Blob service returns status - code 412 (Precondition Failed). - :param str destination_lease_id: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :param str source_lease_id: - Specify this to perform the Copy Blob operation only if - the lease ID given matches the active lease ID of the source blob. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: Copy operation properties such as status, source, and ID. - :rtype: :class:`~azure.storage.blob.models.CopyProperties` - ''' - return self._copy_blob(container_name, blob_name, copy_source, - metadata, - source_if_modified_since=None, source_if_unmodified_since=None, - source_if_match=None, source_if_none_match=None, - destination_if_modified_since=destination_if_modified_since, - destination_if_unmodified_since=destination_if_unmodified_since, - destination_if_match=destination_if_match, - destination_if_none_match=destination_if_none_match, - destination_lease_id=destination_lease_id, - source_lease_id=source_lease_id, timeout=timeout, - incremental_copy=True) - - def update_page( - self, container_name, blob_name, page, start_range, end_range, - validate_content=False, lease_id=None, if_sequence_number_lte=None, - if_sequence_number_lt=None, if_sequence_number_eq=None, - if_modified_since=None, if_unmodified_since=None, - if_match=None, if_none_match=None, timeout=None): - ''' - Updates a range of pages. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param bytes page: - Content of the page. - :param int start_range: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the end offset must be a modulus of - 512-1. Examples of valid byte ranges are 0-511, 512-1023, etc. - :param int end_range: - End of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the end offset must be a modulus of - 512-1. Examples of valid byte ranges are 0-511, 512-1023, etc. - :param bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - blob. - :param str lease_id: - Required if the blob has an active lease. - :param int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :param int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :param int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify an ETag value for this conditional - header to write the page only if the blob's ETag value matches the - value specified. If the values do not match, the Blob service fails. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify an ETag value for this conditional - header to write the page only if the blob's ETag value does not - match the value specified. If the values are identical, the Blob - service fails. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Page Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - return self._update_page( - container_name, - blob_name, - page, - start_range, - end_range, - validate_content=validate_content, - lease_id=lease_id, - if_sequence_number_lte=if_sequence_number_lte, - if_sequence_number_lt=if_sequence_number_lt, - if_sequence_number_eq=if_sequence_number_eq, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout - ) - - def clear_page( - self, container_name, blob_name, start_range, end_range, - lease_id=None, if_sequence_number_lte=None, - if_sequence_number_lt=None, if_sequence_number_eq=None, - if_modified_since=None, if_unmodified_since=None, - if_match=None, if_none_match=None, timeout=None): - ''' - Clears a range of pages. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param int start_range: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the end offset must be a modulus of - 512-1. Examples of valid byte ranges are 0-511, 512-1023, etc. - :param int end_range: - End of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the end offset must be a modulus of - 512-1. Examples of valid byte ranges are 0-511, 512-1023, etc. - :param str lease_id: - Required if the blob has an active lease. - :param int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :param int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :param int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify an ETag value for this conditional - header to write the page only if the blob's ETag value matches the - value specified. If the values do not match, the Blob service fails. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify an ETag value for this conditional - header to write the page only if the blob's ETag value does not - match the value specified. If the values are identical, the Blob - service fails. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Page Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'page', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-page-write': 'clear', - 'x-ms-lease-id': _to_str(lease_id), - 'x-ms-if-sequence-number-le': _to_str(if_sequence_number_lte), - 'x-ms-if-sequence-number-lt': _to_str(if_sequence_number_lt), - 'x-ms-if-sequence-number-eq': _to_str(if_sequence_number_eq), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match) - } - _validate_and_format_range_headers( - request, - start_range, - end_range, - align_to_page=True) - - return self._perform_request(request, _parse_page_properties) - - def get_page_ranges( - self, container_name, blob_name, snapshot=None, start_range=None, - end_range=None, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None): - ''' - Returns the list of valid page ranges for a Page Blob or snapshot - of a page blob. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to retrieve information - from. - :param int start_range: - Start of byte range to use for getting valid page ranges. - If no end_range is given, all bytes after the start_range will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the end offset must be a modulus of - 512-1. Examples of valid byte ranges are 0-511, 512-, etc. - :param int end_range: - End of byte range to use for getting valid page ranges. - If end_range is given, start_range must be provided. - This range will return valid page ranges for from the offset start up to - offset end. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the end offset must be a modulus of - 512-1. Examples of valid byte ranges are 0-511, 512-, etc. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: A list of valid Page Ranges for the Page Blob. - :rtype: list of :class:`~azure.storage.blob.models.PageRange` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'pagelist', - 'snapshot': _to_str(snapshot), - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - if start_range is not None: - _validate_and_format_range_headers( - request, - start_range, - end_range, - start_range_required=False, - end_range_required=False, - align_to_page=True) - - return self._perform_request(request, _convert_xml_to_page_ranges) - - def get_page_ranges_diff( - self, container_name, blob_name, previous_snapshot, snapshot=None, - start_range=None, end_range=None, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None): - ''' - The response will include only the pages that are different between either a - recent snapshot or the current blob and a previous snapshot, including pages - that were cleared. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str previous_snapshot: - The snapshot parameter is an opaque DateTime value that - specifies a previous blob snapshot to be compared - against a more recent snapshot or the current blob. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that - specifies a more recent blob snapshot to be compared - against a previous snapshot (previous_snapshot). - :param int start_range: - Start of byte range to use for getting different page ranges. - If no end_range is given, all bytes after the start_range will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the end offset must be a modulus of - 512-1. Examples of valid byte ranges are 0-511, 512-, etc. - :param int end_range: - End of byte range to use for getting different page ranges. - If end_range is given, start_range must be provided. - This range will return valid page ranges for from the offset start up to - offset end. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the end offset must be a modulus of - 512-1. Examples of valid byte ranges are 0-511, 512-, etc. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: A list of different Page Ranges for the Page Blob. - :rtype: list of :class:`~azure.storage.blob.models.PageRange` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('previous_snapshot', previous_snapshot) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'pagelist', - 'snapshot': _to_str(snapshot), - 'prevsnapshot': _to_str(previous_snapshot), - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - if start_range is not None: - _validate_and_format_range_headers( - request, - start_range, - end_range, - start_range_required=False, - end_range_required=False, - align_to_page=True) - - return self._perform_request(request, _convert_xml_to_page_ranges) - - def set_sequence_number( - self, container_name, blob_name, sequence_number_action, sequence_number=None, - lease_id=None, if_modified_since=None, if_unmodified_since=None, - if_match=None, if_none_match=None, timeout=None): - - ''' - Sets the blob sequence number. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str sequence_number_action: - This property indicates how the service should modify the blob's sequence - number. See :class:`.SequenceNumberAction` for more information. - :param str sequence_number: - This property sets the blob's sequence number. The sequence number is a - user-controlled property that you can use to track requests and manage - concurrency issues. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Page Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('sequence_number_action', sequence_number_action) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-blob-sequence-number': _to_str(sequence_number), - 'x-ms-sequence-number-action': _to_str(sequence_number_action), - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - - return self._perform_request(request, _parse_page_properties) - - def resize_blob( - self, container_name, blob_name, content_length, - lease_id=None, if_modified_since=None, if_unmodified_since=None, - if_match=None, if_none_match=None, timeout=None): - - ''' - Resizes a page blob to the specified size. If the specified value is less - than the current size of the blob, then all pages above the specified value - are cleared. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param int content_length: - Size to resize blob to. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Page Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('content_length', content_length) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-blob-content-length': _to_str(content_length), - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - - return self._perform_request(request, _parse_page_properties) - - #----Convenience APIs----------------------------------------------------- - - def create_blob_from_path( - self, container_name, blob_name, file_path, content_settings=None, - metadata=None, validate_content=False, progress_callback=None, max_connections=2, - lease_id=None, if_modified_since=None, if_unmodified_since=None, - if_match=None, if_none_match=None, timeout=None): - ''' - Creates a new blob from a file path, or updates the content of an - existing blob, with automatic chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param str file_path: - Path of the file to upload as the blob content. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set blob properties. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: a dict mapping str to str - :param bool validate_content: - If true, calculates an MD5 hash for each page of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: callback function in format of func(current, total) - :param int max_connections: - Maximum number of parallel connections to use. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('file_path', file_path) - - count = path.getsize(file_path) - with open(file_path, 'rb') as stream: - self.create_blob_from_stream( - container_name=container_name, - blob_name=blob_name, - stream=stream, - count=count, - content_settings=content_settings, - metadata=metadata, - validate_content=validate_content, - progress_callback=progress_callback, - max_connections=max_connections, - lease_id=lease_id, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout) - - - def create_blob_from_stream( - self, container_name, blob_name, stream, count, content_settings=None, - metadata=None, validate_content=False, progress_callback=None, - max_connections=2, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None): - ''' - Creates a new blob from a file/stream, or updates the content of an - existing blob, with automatic chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param io.IOBase stream: - Opened file/stream to upload as the blob content. - :param int count: - Number of bytes to read from the stream. This is required, a page - blob cannot be created if the count is unknown. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set the blob properties. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: a dict mapping str to str - :param bool validate_content: - If true, calculates an MD5 hash for each page of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: callback function in format of func(current, total) - :param int max_connections: - Maximum number of parallel connections to use. Note that parallel upload - requires the stream to be seekable. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('stream', stream) - _validate_not_none('count', count) - _validate_encryption_required(self.require_encryption, self.key_encryption_key) - - if count < 0: - raise ValueError(_ERROR_VALUE_NEGATIVE.format('count')) - - if count % _PAGE_ALIGNMENT != 0: - raise ValueError(_ERROR_PAGE_BLOB_SIZE_ALIGNMENT.format(count)) - - cek, iv, encryption_data = None, None, None - if self.key_encryption_key is not None: - cek, iv, encryption_data = _generate_blob_encryption_data(self.key_encryption_key) - - response = self._create_blob( - container_name=container_name, - blob_name=blob_name, - content_length=count, - content_settings=content_settings, - metadata=metadata, - lease_id=lease_id, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout, - encryption_data=encryption_data - ) - - _upload_blob_chunks( - blob_service=self, - container_name=container_name, - blob_name=blob_name, - blob_size=count, - block_size=self.MAX_PAGE_SIZE, - stream=stream, - max_connections=max_connections, - progress_callback=progress_callback, - validate_content=validate_content, - lease_id=lease_id, - uploader_class=_PageBlobChunkUploader, - if_match=response.etag, - timeout=timeout, - content_encryption_key=cek, - initialization_vector=iv - ) - - def create_blob_from_bytes( - self, container_name, blob_name, blob, index=0, count=None, - content_settings=None, metadata=None, validate_content=False, - progress_callback=None, max_connections=2, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Creates a new blob from an array of bytes, or updates the content - of an existing blob, with automatic chunking and progress - notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param bytes blob: - Content of blob as an array of bytes. - :param int index: - Start index in the byte array. - :param int count: - Number of bytes to upload. Set to None or negative value to upload - all bytes starting from index. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set blob properties. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: a dict mapping str to str - :param bool validate_content: - If true, calculates an MD5 hash for each page of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: callback function in format of func(current, total) - :param int max_connections: - Maximum number of parallel connections to use. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('blob', blob) - _validate_type_bytes('blob', blob) - - if index < 0: - raise IndexError(_ERROR_VALUE_NEGATIVE.format('index')) - - if count is None or count < 0: - count = len(blob) - index - - stream = BytesIO(blob) - stream.seek(index) - - self.create_blob_from_stream( - container_name=container_name, - blob_name=blob_name, - stream=stream, - count=count, - content_settings=content_settings, - metadata=metadata, - validate_content=validate_content, - lease_id=lease_id, - progress_callback=progress_callback, - max_connections=max_connections, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout) - - #-----Helper methods----------------------------------------------------- - - def _create_blob( - self, container_name, blob_name, content_length, content_settings=None, - sequence_number=None, metadata=None, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None, - encryption_data=None): - ''' - See create_blob for more details. This helper method - allows for encryption or other such special behavior because - it is safely handled by the library. These behaviors are - prohibited in the public version of this function. - :param str _encryption_data: - The JSON formatted encryption metadata to upload as a part of the blob. - This should only be passed internally from other methods and only applied - when uploading entire blob contents immediately follows creation of the blob. - ''' - - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('content_length', content_length) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = {'timeout': _int_to_str(timeout)} - request.headers = { - 'x-ms-blob-type': _to_str(self.blob_type), - 'x-ms-blob-content-length': _to_str(content_length), - 'x-ms-lease-id': _to_str(lease_id), - 'x-ms-blob-sequence-number': _to_str(sequence_number), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match) - } - _add_metadata_headers(metadata, request) - if content_settings is not None: - request.headers.update(content_settings._to_headers()) - - if encryption_data is not None: - request.headers['x-ms-meta-encryptiondata'] = encryption_data - - return self._perform_request(request, _parse_base_properties) - - def _update_page( - self, container_name, blob_name, page, start_range, end_range, - validate_content=False, lease_id=None, if_sequence_number_lte=None, - if_sequence_number_lt=None, if_sequence_number_eq=None, - if_modified_since=None, if_unmodified_since=None, - if_match=None, if_none_match=None, timeout=None): - ''' - See update_page for more details. This helper method - allows for encryption or other such special behavior because - it is safely handled by the library. These behaviors are - prohibited in the public version of this function. - ''' - - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'page', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-page-write': 'update', - 'x-ms-lease-id': _to_str(lease_id), - 'x-ms-if-sequence-number-le': _to_str(if_sequence_number_lte), - 'x-ms-if-sequence-number-lt': _to_str(if_sequence_number_lt), - 'x-ms-if-sequence-number-eq': _to_str(if_sequence_number_eq), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match) - } - _validate_and_format_range_headers( - request, - start_range, - end_range, - align_to_page=True) - request.body = _get_data_bytes_only('page', page) - - if validate_content: - computed_md5 = _get_content_md5(request.body) - request.headers['Content-MD5'] = _to_str(computed_md5) - - return self._perform_request(request, _parse_page_properties) \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/cloudstorageaccount.py b/azure/multiapi/storage/v2016_05_31/cloudstorageaccount.py deleted file mode 100644 index e1dcee1..0000000 --- a/azure/multiapi/storage/v2016_05_31/cloudstorageaccount.py +++ /dev/null @@ -1,189 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- - -# Note that we import BlobService/QueueService/TableService on demand -# because this module is imported by azure/storage/__init__ -# ie. we don't want 'import azure.storage' to trigger an automatic import -# of blob/queue/table packages. - -from .sharedaccesssignature import ( - SharedAccessSignature, -) -from .models import ( - ResourceTypes, - Services, - AccountPermissions, -) -from ._error import _validate_not_none - -class CloudStorageAccount(object): - """ - Provides a factory for creating the blob, queue, table, and file services - with a common account name and account key or sas token. Users can either - use the factory or can construct the appropriate service directly. - """ - - def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=None): - ''' - :param str account_name: - The storage account name. This is used to authenticate requests - signed with an account key and to construct the storage endpoint. It - is required unless is_emulated is used. - :param str account_key: - The storage account key. This is used for shared key authentication. - :param str sas_token: - A shared access signature token to use to authenticate requests - instead of the account key. If account key and sas token are both - specified, account key will be used to sign. - :param bool is_emulated: - Whether to use the emulator. Defaults to False. If specified, will - override all other parameters. - ''' - self.account_name = account_name - self.account_key = account_key - self.sas_token = sas_token - self.is_emulated = is_emulated - - def create_block_blob_service(self): - ''' - Creates a BlockBlobService object with the settings specified in the - CloudStorageAccount. - - :return: A service object. - :rtype: :class:`~azure.storage.blob.blockblobservice.BlockBlobService` - ''' - from .blob.blockblobservice import BlockBlobService - return BlockBlobService(self.account_name, self.account_key, - sas_token=self.sas_token, - is_emulated=self.is_emulated) - - def create_page_blob_service(self): - ''' - Creates a PageBlobService object with the settings specified in the - CloudStorageAccount. - - :return: A service object. - :rtype: :class:`~azure.storage.blob.pageblobservice.PageBlobService` - ''' - from .blob.pageblobservice import PageBlobService - return PageBlobService(self.account_name, self.account_key, - sas_token=self.sas_token, - is_emulated=self.is_emulated) - - def create_append_blob_service(self): - ''' - Creates a AppendBlobService object with the settings specified in the - CloudStorageAccount. - - :return: A service object. - :rtype: :class:`~azure.storage.blob.appendblobservice.AppendBlobService` - ''' - from .blob.appendblobservice import AppendBlobService - return AppendBlobService(self.account_name, self.account_key, - sas_token=self.sas_token, - is_emulated=self.is_emulated) - - def create_table_service(self): - ''' - Creates a TableService object with the settings specified in the - CloudStorageAccount. - - :return: A service object. - :rtype: :class:`~azure.storage.table.tableservice.TableService` - ''' - from .table.tableservice import TableService - return TableService(self.account_name, self.account_key, - sas_token=self.sas_token, - is_emulated=self.is_emulated) - - def create_queue_service(self): - ''' - Creates a QueueService object with the settings specified in the - CloudStorageAccount. - - :return: A service object. - :rtype: :class:`~azure.storage.queue.queueservice.QueueService` - ''' - from .queue.queueservice import QueueService - return QueueService(self.account_name, self.account_key, - sas_token=self.sas_token, - is_emulated=self.is_emulated) - - def create_file_service(self): - ''' - Creates a FileService object with the settings specified in the - CloudStorageAccount. - - :return: A service object. - :rtype: :class:`~azure.storage.file.fileservice.FileService` - ''' - from .file.fileservice import FileService - return FileService(self.account_name, self.account_key, - sas_token=self.sas_token) - - def generate_shared_access_signature(self, services, resource_types, - permission, expiry, start=None, - ip=None, protocol=None): - ''' - Generates a shared access signature for the account. - Use the returned signature with the sas_token parameter of the service - or to create a new account object. - - :param Services services: - Specifies the services accessible with the account SAS. You can - combine values to provide access to more than one service. - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account - SAS. You can combine values to provide access to more than one - resource type. - :param AccountPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. You can combine - values to provide more than one permission. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: date or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: date or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. Possible values are - both HTTPS and HTTP (https,http) or HTTPS only (https). The default value - is https,http. Note that HTTP only is not a permitted value. - ''' - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = SharedAccessSignature(self.account_name, self.account_key) - return sas.generate_account(services, resource_types, permission, - expiry, start=start, ip=ip, protocol=protocol) \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/common/__init__.py b/azure/multiapi/storage/v2016_05_31/common/__init__.py deleted file mode 100644 index addc24d..0000000 --- a/azure/multiapi/storage/v2016_05_31/common/__init__.py +++ /dev/null @@ -1,48 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- - -from .._constants import ( - __author__, - __version__, - X_MS_VERSION, -) - -from ..models import ( - RetentionPolicy, - Logging, - Metrics, - CorsRule, - ServiceProperties, - AccessPolicy, - ResourceTypes, - Services, - AccountPermissions, - Protocol, - ServiceStats, - GeoReplication, - LocationMode, - RetryContext, -) - -from ..retry import ( - ExponentialRetry, - LinearRetry, - no_retry, -) - -from ..cloudstorageaccount import CloudStorageAccount -from ..sharedaccesssignature import ( - SharedAccessSignature, -) diff --git a/azure/multiapi/storage/v2016_05_31/common/_error.py b/azure/multiapi/storage/v2016_05_31/common/_error.py deleted file mode 100644 index b448d2f..0000000 --- a/azure/multiapi/storage/v2016_05_31/common/_error.py +++ /dev/null @@ -1 +0,0 @@ -from .._error import * \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/common/models.py b/azure/multiapi/storage/v2016_05_31/common/models.py deleted file mode 100644 index c66adb2..0000000 --- a/azure/multiapi/storage/v2016_05_31/common/models.py +++ /dev/null @@ -1 +0,0 @@ -from ..models import * \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/file/__init__.py b/azure/multiapi/storage/v2016_05_31/file/__init__.py deleted file mode 100644 index caed4b5..0000000 --- a/azure/multiapi/storage/v2016_05_31/file/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from .models import ( - Share, - ShareProperties, - File, - FileProperties, - Directory, - DirectoryProperties, - FileRange, - ContentSettings, - CopyProperties, - SharePermissions, - FilePermissions, -) - -from .fileservice import FileService diff --git a/azure/multiapi/storage/v2016_05_31/file/_deserialization.py b/azure/multiapi/storage/v2016_05_31/file/_deserialization.py deleted file mode 100644 index cd8f0ae..0000000 --- a/azure/multiapi/storage/v2016_05_31/file/_deserialization.py +++ /dev/null @@ -1,231 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from dateutil import parser -try: - from xml.etree import cElementTree as ETree -except ImportError: - from xml.etree import ElementTree as ETree -from .models import ( - Share, - Directory, - File, - FileProperties, - FileRange, - ShareProperties, - DirectoryProperties, -) -from ..models import ( - _list, -) -from .._deserialization import ( - _parse_properties, - _parse_metadata, -) -from .._error import _validate_content_match -from .._common_conversion import ( - _get_content_md5, - _to_str, -) - -def _parse_share(response, name): - if response is None: - return None - - metadata = _parse_metadata(response) - props = _parse_properties(response, ShareProperties) - return Share(name, props, metadata) - -def _parse_directory(response, name): - if response is None: - return None - - metadata = _parse_metadata(response) - props = _parse_properties(response, DirectoryProperties) - return Directory(name, props, metadata) - -def _parse_file(response, name, validate_content=False): - if response is None: - return None - - metadata = _parse_metadata(response) - props = _parse_properties(response, FileProperties) - - # For range gets, only look at 'x-ms-content-md5' for overall MD5 - content_settings = getattr(props, 'content_settings') - if 'content-range' in response.headers: - if 'x-ms-content-md5' in response.headers: - setattr(content_settings, 'content_md5', _to_str(response.headers['x-ms-content-md5'])) - else: - delattr(content_settings, 'content_md5') - - if validate_content: - computed_md5 = _get_content_md5(response.body) - _validate_content_match(response.headers['content-md5'], computed_md5) - - return File(name, response.body, props, metadata) - -def _convert_xml_to_shares(response): - ''' - - - string-value - string-value - int-value - - - share-name - - date/time-value - etag - max-share-size - - - value - - - - marker-value - - ''' - if response is None or response.body is None: - return None - - shares = _list() - list_element = ETree.fromstring(response.body) - - # Set next marker - next_marker = list_element.findtext('NextMarker') or None - setattr(shares, 'next_marker', next_marker) - - shares_element = list_element.find('Shares') - - for share_element in shares_element.findall('Share'): - # Name element - share = Share() - share.name = share_element.findtext('Name') - - # Metadata - metadata_root_element = share_element.find('Metadata') - if metadata_root_element is not None: - share.metadata = dict() - for metadata_element in metadata_root_element: - share.metadata[metadata_element.tag] = metadata_element.text - - # Properties - properties_element = share_element.find('Properties') - share.properties.last_modified = parser.parse(properties_element.findtext('Last-Modified')) - share.properties.etag = properties_element.findtext('Etag') - share.properties.quota = int(properties_element.findtext('Quota')) - - # Add share to list - shares.append(share) - - return shares - -def _convert_xml_to_directories_and_files(response): - ''' - - - string-value - int-value - - - file-name - - size-in-bytes - - - - directory-name - - - - - ''' - if response is None or response.body is None: - return None - - entries = _list() - list_element = ETree.fromstring(response.body) - - # Set next marker - next_marker = list_element.findtext('NextMarker') or None - setattr(entries, 'next_marker', next_marker) - - entries_element = list_element.find('Entries') - - for file_element in entries_element.findall('File'): - # Name element - file = File() - file.name = file_element.findtext('Name') - - # Properties - properties_element = file_element.find('Properties') - file.properties.content_length = int(properties_element.findtext('Content-Length')) - - # Add file to list - entries.append(file) - - for directory_element in entries_element.findall('Directory'): - # Name element - directory = Directory() - directory.name = directory_element.findtext('Name') - - # Add directory to list - entries.append(directory) - - return entries - -def _convert_xml_to_ranges(response): - ''' - - - - Start Byte - End Byte - - - Start Byte - End Byte - - - ''' - if response is None or response.body is None: - return None - - ranges = list() - ranges_element = ETree.fromstring(response.body) - - for range_element in ranges_element.findall('Range'): - # Parse range - range = FileRange(int(range_element.findtext('Start')), int(range_element.findtext('End'))) - - # Add range to list - ranges.append(range) - - return ranges - -def _convert_xml_to_share_stats(response): - ''' - - - 15 - - ''' - if response is None or response.body is None: - return None - - share_stats_element = ETree.fromstring(response.body) - return int(share_stats_element.findtext('ShareUsage')) \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/file/_download_chunking.py b/azure/multiapi/storage/v2016_05_31/file/_download_chunking.py deleted file mode 100644 index 66ed15a..0000000 --- a/azure/multiapi/storage/v2016_05_31/file/_download_chunking.py +++ /dev/null @@ -1,112 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -import threading - -from time import sleep -from .._error import _ERROR_NO_SINGLE_THREAD_CHUNKING - -def _download_file_chunks(file_service, share_name, directory_name, file_name, - download_size, block_size, progress, start_range, end_range, - stream, max_connections, progress_callback, validate_content, - timeout, operation_context): - if max_connections <= 1: - raise ValueError(_ERROR_NO_SINGLE_THREAD_CHUNKING.format('file')) - - downloader = _FileChunkDownloader( - file_service, - share_name, - directory_name, - file_name, - download_size, - block_size, - progress, - start_range, - end_range, - stream, - progress_callback, - validate_content, - timeout, - operation_context, - ) - - import concurrent.futures - executor = concurrent.futures.ThreadPoolExecutor(max_connections) - result = list(executor.map(downloader.process_chunk, downloader.get_chunk_offsets())) - -class _FileChunkDownloader(object): - def __init__(self, file_service, share_name, directory_name, file_name, - download_size, chunk_size, progress, start_range, end_range, - stream, progress_callback, validate_content, timeout, operation_context): - self.file_service = file_service - self.share_name = share_name - self.directory_name = directory_name - self.file_name = file_name - self.chunk_size = chunk_size - - self.download_size = download_size - self.start_index = start_range - self.file_end = end_range - - self.stream = stream - self.stream_start = stream.tell() - self.stream_lock = threading.Lock() - self.progress_callback = progress_callback - self.progress_total = progress - self.progress_lock = threading.Lock() - self.validate_content = validate_content - self.timeout = timeout - self.operation_context = operation_context - - def get_chunk_offsets(self): - index = self.start_index - while index < self.file_end: - yield index - index += self.chunk_size - - def process_chunk(self, chunk_start): - if chunk_start + self.chunk_size > self.file_end: - chunk_end = self.file_end - else: - chunk_end = chunk_start + self.chunk_size - - chunk_data = self._download_chunk(chunk_start, chunk_end).content - length = chunk_end - chunk_start - if length > 0: - self._write_to_stream(chunk_data, chunk_start) - self._update_progress(length) - - def _update_progress(self, length): - if self.progress_callback is not None: - with self.progress_lock: - self.progress_total += length - total = self.progress_total - self.progress_callback(total, self.download_size) - - def _write_to_stream(self, chunk_data, chunk_start): - with self.stream_lock: - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - - def _download_chunk(self, chunk_start, chunk_end): - return self.file_service._get_file( - self.share_name, - self.directory_name, - self.file_name, - start_range=chunk_start, - end_range=chunk_end - 1, - validate_content=self.validate_content, - timeout=self.timeout, - _context=self.operation_context - ) \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/file/_serialization.py b/azure/multiapi/storage/v2016_05_31/file/_serialization.py deleted file mode 100644 index d7928f0..0000000 --- a/azure/multiapi/storage/v2016_05_31/file/_serialization.py +++ /dev/null @@ -1,74 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from time import time -from wsgiref.handlers import format_date_time -from .._error import ( - _validate_not_none, - _ERROR_START_END_NEEDED_FOR_MD5, - _ERROR_RANGE_TOO_LARGE_FOR_MD5, -) -from .._common_conversion import _str - -def _get_path(share_name=None, directory_name=None, file_name=None): - ''' - Creates the path to access a file resource. - - share_name: - Name of share. - directory_name: - The path to the directory. - file_name: - Name of file. - ''' - if share_name and directory_name and file_name: - return '/{0}/{1}/{2}'.format( - _str(share_name), - _str(directory_name), - _str(file_name)) - elif share_name and directory_name: - return '/{0}/{1}'.format( - _str(share_name), - _str(directory_name)) - elif share_name and file_name: - return '/{0}/{1}'.format( - _str(share_name), - _str(file_name)) - elif share_name: - return '/{0}'.format(_str(share_name)) - else: - return '/' - -def _validate_and_format_range_headers(request, start_range, end_range, start_range_required=True, end_range_required=True, check_content_md5=False): - # If end range is provided, start range must be provided - if start_range_required == True or end_range is not None: - _validate_not_none('start_range', start_range) - if end_range_required == True: - _validate_not_none('end_range', end_range) - - # Format based on whether end_range is present - request.headers = request.headers or {} - if end_range is not None: - request.headers['x-ms-range'] = 'bytes={0}-{1}'.format(start_range, end_range) - elif start_range is not None: - request.headers['x-ms-range'] = 'bytes={0}-'.format(start_range) - - # Content MD5 can only be provided for a complete range less than 4MB in size - if check_content_md5 == True: - if start_range is None or end_range is None: - raise ValueError(_ERROR_START_END_NEEDED_FOR_MD5) - if end_range - start_range > 4 * 1024 * 1024: - raise ValueError(_ERROR_RANGE_TOO_LARGE_FOR_MD5) - - request.headers['x-ms-range-get-content-md5'] = 'true' \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/file/_upload_chunking.py b/azure/multiapi/storage/v2016_05_31/file/_upload_chunking.py deleted file mode 100644 index f3428cb..0000000 --- a/azure/multiapi/storage/v2016_05_31/file/_upload_chunking.py +++ /dev/null @@ -1,142 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -import threading - -from time import sleep - -def _upload_file_chunks(file_service, share_name, directory_name, file_name, - file_size, block_size, stream, max_connections, - progress_callback, validate_content, timeout): - uploader = _FileChunkUploader( - file_service, - share_name, - directory_name, - file_name, - file_size, - block_size, - stream, - max_connections > 1, - progress_callback, - validate_content, - timeout - ) - - if progress_callback is not None: - progress_callback(0, file_size) - - if max_connections > 1: - import concurrent.futures - executor = concurrent.futures.ThreadPoolExecutor(max_connections) - range_ids = list(executor.map(uploader.process_chunk, uploader.get_chunk_offsets())) - else: - if file_size is not None: - range_ids = [uploader.process_chunk(start) for start in uploader.get_chunk_offsets()] - else: - range_ids = uploader.process_all_unknown_size() - - return range_ids - -class _FileChunkUploader(object): - def __init__(self, file_service, share_name, directory_name, file_name, - file_size, chunk_size, stream, parallel, progress_callback, - validate_content, timeout): - self.file_service = file_service - self.share_name = share_name - self.directory_name = directory_name - self.file_name = file_name - self.file_size = file_size - self.chunk_size = chunk_size - self.stream = stream - self.stream_start = stream.tell() if parallel else None - self.stream_lock = threading.Lock() if parallel else None - self.progress_callback = progress_callback - self.progress_total = 0 - self.progress_lock = threading.Lock() if parallel else None - self.validate_content = validate_content - self.timeout = timeout - - def get_chunk_offsets(self): - index = 0 - if self.file_size is None: - # we don't know the size of the stream, so we have no - # choice but to seek - while True: - data = self._read_from_stream(index, 1) - if not data: - break - yield index - index += self.chunk_size - else: - while index < self.file_size: - yield index - index += self.chunk_size - - def process_chunk(self, chunk_offset): - size = self.chunk_size - if self.file_size is not None: - size = min(size, self.file_size - chunk_offset) - chunk_data = self._read_from_stream(chunk_offset, size) - return self._upload_chunk_with_progress(chunk_offset, chunk_data) - - def process_all_unknown_size(self): - assert self.stream_lock is None - range_ids = [] - index = 0 - while True: - data = self._read_from_stream(None, self.chunk_size) - if data: - index += len(data) - range_id = self._upload_chunk_with_progress(index, data) - range_ids.append(range_id) - else: - break - - return range_ids - - def _read_from_stream(self, offset, count): - if self.stream_lock is not None: - with self.stream_lock: - self.stream.seek(self.stream_start + offset) - data = self.stream.read(count) - else: - data = self.stream.read(count) - return data - - def _update_progress(self, length): - if self.progress_callback is not None: - if self.progress_lock is not None: - with self.progress_lock: - self.progress_total += length - total = self.progress_total - else: - self.progress_total += length - total = self.progress_total - self.progress_callback(total, self.file_size) - - def _upload_chunk_with_progress(self, chunk_start, chunk_data): - chunk_end = chunk_start + len(chunk_data) - 1 - self.file_service.update_range( - self.share_name, - self.directory_name, - self.file_name, - chunk_data, - chunk_start, - chunk_end, - self.validate_content, - timeout=self.timeout - ) - range_id = 'bytes={0}-{1}'.format(chunk_start, chunk_end) - self._update_progress(len(chunk_data)) - return range_id \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/file/fileservice.py b/azure/multiapi/storage/v2016_05_31/file/fileservice.py deleted file mode 100644 index cab16a5..0000000 --- a/azure/multiapi/storage/v2016_05_31/file/fileservice.py +++ /dev/null @@ -1,2373 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from azure.common import AzureHttpError -from .._error import ( - _dont_fail_not_exist, - _dont_fail_on_exist, - _validate_not_none, - _validate_type_bytes, - _ERROR_VALUE_NEGATIVE, - _ERROR_STORAGE_MISSING_INFO, - _ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES, - _ERROR_PARALLEL_NOT_SEEKABLE, - _validate_access_policies, -) -from .._common_conversion import ( - _int_to_str, - _to_str, - _get_content_md5, -) -from .._serialization import ( - _get_request_body, - _get_data_bytes_only, - _convert_signed_identifiers_to_xml, - _convert_service_properties_to_xml, - _add_metadata_headers, -) -from .._deserialization import ( - _convert_xml_to_service_properties, - _convert_xml_to_signed_identifiers, - _get_download_size, - _parse_metadata, - _parse_properties, - _parse_length_from_content_range, -) -from ..models import ( - Services, - ListGenerator, - _OperationContext, -) -from .models import ( - File, - FileProperties, -) -from .._http import HTTPRequest -from ._upload_chunking import _upload_file_chunks -from ._download_chunking import _download_file_chunks -from .._auth import ( - _StorageSharedKeyAuthentication, - _StorageSASAuthentication, -) -from .._connection import _ServiceParameters -from .._constants import ( - SERVICE_HOST_BASE, - DEFAULT_PROTOCOL, - DEV_ACCOUNT_NAME, -) -from ._serialization import ( - _get_path, - _validate_and_format_range_headers, -) -from ._deserialization import ( - _convert_xml_to_shares, - _convert_xml_to_directories_and_files, - _convert_xml_to_ranges, - _convert_xml_to_share_stats, - _parse_file, - _parse_share, - _parse_directory, -) -from ..sharedaccesssignature import ( - SharedAccessSignature, -) -from ..storageclient import StorageClient -from os import path -import sys -if sys.version_info >= (3,): - from io import BytesIO -else: - from cStringIO import StringIO as BytesIO - -class FileService(StorageClient): - - ''' - The Server Message Block (SMB) protocol is the preferred file share protocol - used on premise today. The Microsoft Azure File service enables customers to - leverage the availability and scalability of Azure's Cloud Infrastructure as - a Service (IaaS) SMB without having to rewrite SMB client applications. - - The Azure File service also offers a compelling alternative to traditional - Direct Attached Storage (DAS) and Storage Area Network (SAN) solutions, which - are often complex and expensive to install, configure, and operate. - - :ivar int MAX_SINGLE_GET_SIZE: - The size of the first range get performed by get_file_to_* methods if - max_connections is greater than 1. Less data will be returned if the - file is smaller than this. - :ivar int MAX_CHUNK_GET_SIZE: - The size of subsequent range gets performed by get_file_to_* methods if - max_connections is greater than 1 and the file is larger than MAX_SINGLE_GET_SIZE. - Less data will be returned if the remainder of the file is smaller than - this. If this is set to larger than 4MB, content_validation will throw an - error if enabled. However, if content_validation is not desired a size - greater than 4MB may be optimal. Setting this below 4MB is not recommended. - :ivar int MAX_RANGE_SIZE: - The size of the ranges put by create_file_from_* methods. Smaller ranges - may be put if there is less data provided. The maximum range size the service - supports is 4MB. - ''' - MAX_SINGLE_GET_SIZE = 32 * 1024 * 1024 - MAX_CHUNK_GET_SIZE = 8 * 1024 * 1024 - MAX_RANGE_SIZE = 4 * 1024 * 1024 - - def __init__(self, account_name=None, account_key=None, sas_token=None, - protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, - request_session=None, connection_string=None, socket_timeout=None): - ''' - :param str account_name: - The storage account name. This is used to authenticate requests - signed with an account key and to construct the storage endpoint. It - is required unless a connection string is given. - :param str account_key: - The storage account key. This is used for shared key authentication. - :param str sas_token: - A shared access signature token to use to authenticate requests - instead of the account key. If account key and sas token are both - specified, account key will be used to sign. - :param str protocol: - The protocol to use for requests. Defaults to https. - :param str endpoint_suffix: - The host base component of the url, minus the account name. Defaults - to Azure (core.windows.net). Override this to use the China cloud - (core.chinacloudapi.cn). - :param requests.Session request_session: - The session object to use for http requests. - :param str connection_string: - If specified, this will override all other parameters besides - request session. See - http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ - for the connection string format. - :param int socket_timeout: - If specified, this will override the default socket timeout. The timeout specified is in seconds. - See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value. - ''' - service_params = _ServiceParameters.get_service_parameters( - 'file', - account_name=account_name, - account_key=account_key, - sas_token=sas_token, - protocol=protocol, - endpoint_suffix=endpoint_suffix, - request_session=request_session, - connection_string=connection_string, - socket_timeout=socket_timeout) - - super(FileService, self).__init__(service_params) - - if self.account_name == DEV_ACCOUNT_NAME: - raise ValueError(_ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES) - - if self.account_key: - self.authentication = _StorageSharedKeyAuthentication( - self.account_name, - self.account_key, - ) - elif self.sas_token: - self.authentication = _StorageSASAuthentication(self.sas_token) - else: - raise ValueError(_ERROR_STORAGE_MISSING_INFO) - - def make_file_url(self, share_name, directory_name, file_name, - protocol=None, sas_token=None): - ''' - Creates the url to access a file. - - :param str share_name: - Name of share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of file. - :param str protocol: - Protocol to use: 'http' or 'https'. If not specified, uses the - protocol specified when FileService was initialized. - :param str sas_token: - Shared access signature token created with - generate_shared_access_signature. - :return: file access URL. - :rtype: str - ''' - - if directory_name is None: - url = '{}://{}/{}/{}'.format( - protocol or self.protocol, - self.primary_endpoint, - share_name, - file_name, - ) - else: - url = '{}://{}/{}/{}/{}'.format( - protocol or self.protocol, - self.primary_endpoint, - share_name, - directory_name, - file_name, - ) - - if sas_token: - url += '?' + sas_token - - return url - - def generate_account_shared_access_signature(self, resource_types, permission, - expiry, start=None, ip=None, protocol=None): - ''' - Generates a shared access signature for the file service. - Use the returned signature with the sas_token parameter of the FileService. - - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account SAS. - :param AccountPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: date or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: date or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. Possible values are - both HTTPS and HTTP (https,http) or HTTPS only (https). The default value - is https,http. Note that HTTP only is not a permitted value. - :return: A Shared Access Signature (sas) token. - :rtype: str - ''' - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = SharedAccessSignature(self.account_name, self.account_key) - return sas.generate_account(Services.FILE, resource_types, permission, - expiry, start=start, ip=ip, protocol=protocol) - - def generate_share_shared_access_signature(self, share_name, - permission=None, - expiry=None, - start=None, - id=None, - ip=None, - protocol=None, - cache_control=None, - content_disposition=None, - content_encoding=None, - content_language=None, - content_type=None): - ''' - Generates a shared access signature for the share. - Use the returned signature with the sas_token parameter of FileService. - - :param str share_name: - Name of share. - :param SharePermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, create, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: date or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: date or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use :func:`~set_share_acl`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. Possible values are - both HTTPS and HTTP (https,http) or HTTPS only (https). The default value - is https,http. Note that HTTP only is not a permitted value. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :return: A Shared Access Signature (sas) token. - :rtype: str - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = SharedAccessSignature(self.account_name, self.account_key) - return sas.generate_share( - share_name, - permission, - expiry, - start=start, - id=id, - ip=ip, - protocol=protocol, - cache_control=cache_control, - content_disposition=content_disposition, - content_encoding=content_encoding, - content_language=content_language, - content_type=content_type, - ) - - def generate_file_shared_access_signature(self, share_name, - directory_name=None, - file_name=None, - permission=None, - expiry=None, - start=None, - id=None, - ip=None, - protocol=None, - cache_control=None, - content_disposition=None, - content_encoding=None, - content_language=None, - content_type=None): - ''' - Generates a shared access signature for the file. - Use the returned signature with the sas_token parameter of FileService. - - :param str share_name: - Name of share. - :param str directory_name: - Name of directory. SAS tokens cannot be created for directories, so - this parameter should only be present if file_name is provided. - :param str file_name: - Name of file. - :param FilePermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, create, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: date or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: date or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_file_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. Possible values are - both HTTPS and HTTP (https,http) or HTTPS only (https). The default value - is https,http. Note that HTTP only is not a permitted value. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :return: A Shared Access Signature (sas) token. - :rtype: str - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = SharedAccessSignature(self.account_name, self.account_key) - return sas.generate_file( - share_name, - directory_name, - file_name, - permission, - expiry, - start=start, - id=id, - ip=ip, - protocol=protocol, - cache_control=cache_control, - content_disposition=content_disposition, - content_encoding=content_encoding, - content_language=content_language, - content_type=content_type, - ) - - def set_file_service_properties(self, hour_metrics=None, minute_metrics=None, - cors=None, timeout=None): - ''' - Sets the properties of a storage account's File service, including - Azure Storage Analytics. If an element (ex HourMetrics) is left as None, the - existing settings on the service for that functionality are preserved. - - :param Metrics hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for files. - :param Metrics minute_metrics: - The minute metrics settings provide request statistics - for each minute for files. - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list of :class:`~azure.storage.models.CorsRule` - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path() - request.query = { - 'restype': 'service', - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - request.body = _get_request_body( - _convert_service_properties_to_xml(None, hour_metrics, minute_metrics, cors)) - - self._perform_request(request) - - def get_file_service_properties(self, timeout=None): - ''' - Gets the properties of a storage account's File service, including - Azure Storage Analytics. - - :param int timeout: - The timeout parameter is expressed in seconds. - :return: The file service properties. - :rtype: - :class:`~azure.storage.models.ServiceProperties` - ''' - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path() - request.query = { - 'restype': 'service', - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_service_properties) - - def list_shares(self, prefix=None, marker=None, num_results=None, - include_metadata=False, timeout=None): - ''' - Returns a generator to list the shares under the specified account. - The generator will lazily follow the continuation tokens returned by - the service and stop when all shares have been returned or num_results - is reached. - - If num_results is specified and the account has more than that number of - shares, the generator will have a populated next_marker field once it - finishes. This marker can be used to create a new generator if more - results are desired. - - :param str prefix: - Filters the results to return only shares whose names - begin with the specified prefix. - :param int num_results: - Specifies the maximum number of shares to return. - :param bool include_metadata: - Specifies that share metadata be returned in the response. - :param str marker: - An opaque continuation token. This value can be retrieved from the - next_marker field of a previous generator object if num_results was - specified and that generator has finished enumerating results. If - specified, this generator will begin returning results from the point - where the previous generator stopped. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - include = 'metadata' if include_metadata else None - operation_context = _OperationContext(location_lock=True) - kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results, - 'include': include, 'timeout': timeout, '_context': operation_context} - resp = self._list_shares(**kwargs) - - return ListGenerator(resp, self._list_shares, (), kwargs) - - def _list_shares(self, prefix=None, marker=None, max_results=None, - include=None, timeout=None, _context=None): - ''' - Returns a list of the shares under the specified account. - - :param str prefix: - Filters the results to return only shares whose names - begin with the specified prefix. - :param str marker: - A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns - a next_marker value within the response body if the list returned was - not complete. The marker value may then be used in a subsequent - call to request the next set of list items. The marker value is - opaque to the client. - :param int max_results: - Specifies the maximum number of shares to return. A single list - request may return up to 1000 shares and potentially a continuation - token which should be followed to get additional resutls. - :param string include: - Include this parameter to specify that the share's - metadata be returned as part of the response body. set this - parameter to string 'metadata' to get share's metadata. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path() - request.query = { - 'comp': 'list', - 'prefix': _to_str(prefix), - 'marker': _to_str(marker), - 'maxresults': _int_to_str(max_results), - 'include': _to_str(include), - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_shares, operation_context=_context) - - def create_share(self, share_name, metadata=None, quota=None, - fail_on_exist=False, timeout=None): - ''' - Creates a new share under the specified account. If the share - with the same name already exists, the operation fails on the - service. By default, the exception is swallowed by the client. - To expose the exception, specify True for fail_on_exists. - - :param str share_name: - Name of share to create. - :param metadata: - A dict with name_value pairs to associate with the - share as metadata. Example:{'Category':'test'} - :type metadata: a dict of str to str: - :param int quota: - Specifies the maximum size of the share, in gigabytes. Must be - greater than 0, and less than or equal to 5TB (5120). - :param bool fail_on_exist: - Specify whether to throw an exception when the share exists. - False by default. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: True if share is created, False if share already exists. - :rtype: bool - ''' - _validate_not_none('share_name', share_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.query = { - 'restype': 'share', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-share-quota': _int_to_str(quota) - } - _add_metadata_headers(metadata, request) - - if not fail_on_exist: - try: - self._perform_request(request) - return True - except AzureHttpError as ex: - _dont_fail_on_exist(ex) - return False - else: - self._perform_request(request) - return True - - def get_share_properties(self, share_name, timeout=None): - ''' - Returns all user-defined metadata and system properties for the - specified share. The data returned does not include the shares's - list of files or directories. - - :param str share_name: - Name of existing share. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: A Share that exposes properties and metadata. - :rtype: :class:`.Share` - ''' - _validate_not_none('share_name', share_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.query = { - 'restype': 'share', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _parse_share, [share_name]) - - def set_share_properties(self, share_name, quota, timeout=None): - ''' - Sets service-defined properties for the specified share. - - :param str share_name: - Name of existing share. - :param int quota: - Specifies the maximum size of the share, in gigabytes. Must be - greater than 0, and less than or equal to 5 TB (5120 GB). - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('quota', quota) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.query = { - 'restype': 'share', - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-share-quota': _int_to_str(quota) - } - - self._perform_request(request) - - def get_share_metadata(self, share_name, timeout=None): - ''' - Returns all user-defined metadata for the specified share. - - :param str share_name: - Name of existing share. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: - A dictionary representing the share metadata name, value pairs. - :rtype: a dict mapping str to str - ''' - _validate_not_none('share_name', share_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.query = { - 'restype': 'share', - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _parse_metadata) - - def set_share_metadata(self, share_name, metadata=None, timeout=None): - ''' - Sets one or more user-defined name-value pairs for the specified - share. Each call to this operation replaces all existing metadata - attached to the share. To remove all metadata from the share, - call this operation with no metadata dict. - - :param str share_name: - Name of existing share. - :param metadata: - A dict containing name-value pairs to associate with the share as - metadata. Example: {'category':'test'} - :type metadata: a dict mapping str to str - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.query = { - 'restype': 'share', - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - _add_metadata_headers(metadata, request) - - self._perform_request(request) - - def get_share_acl(self, share_name, timeout=None): - ''' - Gets the permissions for the specified share. - - :param str share_name: - Name of existing share. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: A dictionary of access policies associated with the share. - :rtype: dict of str to :class:`.AccessPolicy` - ''' - _validate_not_none('share_name', share_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.query = { - 'restype': 'share', - 'comp': 'acl', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_signed_identifiers) - - def set_share_acl(self, share_name, signed_identifiers=None, timeout=None): - ''' - Sets the permissions for the specified share or stored access - policies that may be used with Shared Access Signatures. - - :param str share_name: - Name of existing share. - :param signed_identifiers: - A dictionary of access policies to associate with the share. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict of str to :class:`.AccessPolicy` - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_access_policies(signed_identifiers) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.query = { - 'restype': 'share', - 'comp': 'acl', - 'timeout': _int_to_str(timeout), - } - request.body = _get_request_body( - _convert_signed_identifiers_to_xml(signed_identifiers)) - - self._perform_request(request) - - def get_share_stats(self, share_name, timeout=None): - ''' - Gets the approximate size of the data stored on the share, - rounded up to the nearest gigabyte. - - Note that this value may not include all recently created - or recently resized files. - - :param str share_name: - Name of existing share. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: the approximate size of the data stored on the share. - :rtype: int - ''' - _validate_not_none('share_name', share_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.query = { - 'restype': 'share', - 'comp': 'stats', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_share_stats) - - def delete_share(self, share_name, fail_not_exist=False, timeout=None): - ''' - Marks the specified share for deletion. If the share - does not exist, the operation fails on the service. By - default, the exception is swallowed by the client. - To expose the exception, specify True for fail_not_exist. - - :param str share_name: - Name of share to delete. - :param bool fail_not_exist: - Specify whether to throw an exception when the share doesn't - exist. False by default. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: True if share is deleted, False share doesn't exist. - :rtype: bool - ''' - _validate_not_none('share_name', share_name) - request = HTTPRequest() - request.method = 'DELETE' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.query = { - 'restype': 'share', - 'timeout': _int_to_str(timeout), - } - - if not fail_not_exist: - try: - self._perform_request(request) - return True - except AzureHttpError as ex: - _dont_fail_not_exist(ex) - return False - else: - self._perform_request(request) - return True - - def create_directory(self, share_name, directory_name, metadata=None, - fail_on_exist=False, timeout=None): - ''' - Creates a new directory under the specified share or parent directory. - If the directory with the same name already exists, the operation fails - on the service. By default, the exception is swallowed by the client. - To expose the exception, specify True for fail_on_exists. - - :param str share_name: - Name of existing share. - :param str directory_name: - Name of directory to create, including the path to the parent - directory. - :param metadata: - A dict with name_value pairs to associate with the - share as metadata. Example:{'Category':'test'} - :type metadata: dict of str to str: - :param bool fail_on_exist: - specify whether to throw an exception when the directory exists. - False by default. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: True if directory is created, False if directory already exists. - :rtype: bool - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('directory_name', directory_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name) - request.query = { - 'restype': 'directory', - 'timeout': _int_to_str(timeout), - } - _add_metadata_headers(metadata, request) - - if not fail_on_exist: - try: - self._perform_request(request) - return True - except AzureHttpError as ex: - _dont_fail_on_exist(ex) - return False - else: - self._perform_request(request) - return True - - def delete_directory(self, share_name, directory_name, - fail_not_exist=False, timeout=None): - ''' - Deletes the specified empty directory. Note that the directory must - be empty before it can be deleted. Attempting to delete directories - that are not empty will fail. - - If the directory does not exist, the operation fails on the - service. By default, the exception is swallowed by the client. - To expose the exception, specify True for fail_not_exist. - - :param str share_name: - Name of existing share. - :param str directory_name: - Name of directory to delete, including the path to the parent - directory. - :param bool fail_not_exist: - Specify whether to throw an exception when the directory doesn't - exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: True if directory is deleted, False otherwise. - :rtype: bool - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('directory_name', directory_name) - request = HTTPRequest() - request.method = 'DELETE' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name) - request.query = { - 'restype': 'directory', - 'timeout': _int_to_str(timeout), - } - - if not fail_not_exist: - try: - self._perform_request(request) - return True - except AzureHttpError as ex: - _dont_fail_not_exist(ex) - return False - else: - self._perform_request(request) - return True - - def get_directory_properties(self, share_name, directory_name, timeout=None): - ''' - Returns all user-defined metadata and system properties for the - specified directory. The data returned does not include the directory's - list of files. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to an existing directory. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: properties for the specified directory within a directory object. - :rtype: :class:`~azure.storage.file.models.Directory` - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('directory_name', directory_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name) - request.query = { - 'restype': 'directory', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _parse_directory, [directory_name]) - - def get_directory_metadata(self, share_name, directory_name, timeout=None): - ''' - Returns all user-defined metadata for the specified directory. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: - A dictionary representing the directory metadata name, value pairs. - :rtype: a dict mapping str to str - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('directory_name', directory_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name) - request.query = { - 'restype': 'directory', - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _parse_metadata) - - def set_directory_metadata(self, share_name, directory_name, metadata=None, timeout=None): - ''' - Sets one or more user-defined name-value pairs for the specified - directory. Each call to this operation replaces all existing metadata - attached to the directory. To remove all metadata from the directory, - call this operation with no metadata dict. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param metadata: - A dict containing name-value pairs to associate with the directory - as metadata. Example: {'category':'test'} - :type metadata: A dict mapping str to str. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('directory_name', directory_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name) - request.query = { - 'restype': 'directory', - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - _add_metadata_headers(metadata, request) - - self._perform_request(request) - - def list_directories_and_files(self, share_name, directory_name=None, - num_results=None, marker=None, timeout=None, - prefix=None): - - ''' - Returns a generator to list the directories and files under the specified share. - The generator will lazily follow the continuation tokens returned by - the service and stop when all directories and files have been returned or - num_results is reached. - - If num_results is specified and the share has more than that number of - files and directories, the generator will have a populated next_marker - field once it finishes. This marker can be used to create a new generator - if more results are desired. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param int num_results: - Specifies the maximum number of files to return, - including all directory elements. If the request does not specify - num_results or specifies a value greater than 5,000, the server will - return up to 5,000 items. Setting num_results to a value less than - or equal to zero results in error response code 400 (Bad Request). - :param str marker: - An opaque continuation token. This value can be retrieved from the - next_marker field of a previous generator object if num_results was - specified and that generator has finished enumerating results. If - specified, this generator will begin returning results from the point - where the previous generator stopped. - :param int timeout: - The timeout parameter is expressed in seconds. - :param str prefix: - List only the files and/or directories with the given prefix. - ''' - operation_context = _OperationContext(location_lock=True) - args = (share_name, directory_name) - kwargs = {'marker': marker, 'max_results': num_results, 'timeout': timeout, - '_context': operation_context, 'prefix': prefix} - - resp = self._list_directories_and_files(*args, **kwargs) - - return ListGenerator(resp, self._list_directories_and_files, args, kwargs) - - def _list_directories_and_files(self, share_name, directory_name=None, - marker=None, max_results=None, timeout=None, - prefix=None, _context=None): - - ''' - Returns a list of the directories and files under the specified share. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str marker: - A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns - a next_marker value within the response body if the list returned was - not complete. The marker value may then be used in a subsequent - call to request the next set of list items. The marker value is - opaque to the client. - :param int max_results: - Specifies the maximum number of files to return, - including all directory elements. If the request does not specify - max_results or specifies a value greater than 5,000, the server will - return up to 5,000 items. Setting max_results to a value less than - or equal to zero results in error response code 400 (Bad Request). - :param int timeout: - The timeout parameter is expressed in seconds. - :param str prefix: - List only the files and/or directories with the given prefix. - ''' - _validate_not_none('share_name', share_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name) - request.query = { - 'restype': 'directory', - 'comp': 'list', - 'prefix': _to_str(prefix), - 'marker': _to_str(marker), - 'maxresults': _int_to_str(max_results), - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_directories_and_files, - operation_context=_context) - - def get_file_properties(self, share_name, directory_name, file_name, timeout=None): - ''' - Returns all user-defined metadata, standard HTTP properties, and - system properties for the file. Returns an instance of :class:`.File` with - :class:`.FileProperties` and a metadata dict. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: a file object including properties and metadata. - :rtype: :class:`~azure.storage.file.models.File` - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - request = HTTPRequest() - request.method = 'HEAD' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { 'timeout': _int_to_str(timeout)} - - return self._perform_request(request, _parse_file, [file_name]) - - def exists(self, share_name, directory_name=None, file_name=None, timeout=None): - ''' - Returns a boolean indicating whether the share exists if only share name is - given. If directory_name is specificed a boolean will be returned indicating - if the directory exists. If file_name is specified as well, a boolean will be - returned indicating if the file exists. - - :param str share_name: - Name of a share. - :param str directory_name: - The path to a directory. - :param str file_name: - Name of a file. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: A boolean indicating whether the resource exists. - :rtype: bool - ''' - _validate_not_none('share_name', share_name) - try: - if file_name is not None: - self.get_file_properties(share_name, directory_name, file_name, timeout=timeout) - elif directory_name is not None: - self.get_directory_properties(share_name, directory_name, timeout=timeout) - else: - self.get_share_properties(share_name, timeout=timeout) - return True - except AzureHttpError as ex: - _dont_fail_not_exist(ex) - return False - - def resize_file(self, share_name, directory_name, - file_name, content_length, timeout=None): - ''' - Resizes a file to the specified size. If the specified byte - value is less than the current size of the file, then all - ranges above the specified byte value are cleared. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param int content_length: - The length to resize the file to. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('content_length', content_length) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-content-length': _to_str(content_length) - } - - self._perform_request(request) - - def set_file_properties(self, share_name, directory_name, file_name, - content_settings, timeout=None): - ''' - Sets system properties on the file. If one property is set for the - content_settings, all properties will be overriden. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param ~azure.storage.file.models.ContentSettings content_settings: - ContentSettings object used to set the file properties. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('content_settings', content_settings) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - request.headers = content_settings._to_headers() - - self._perform_request(request) - - def get_file_metadata(self, share_name, directory_name, file_name, timeout=None): - ''' - Returns all user-defined metadata for the specified file. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: - A dictionary representing the file metadata name, value pairs. - :rtype: dict mapping str to str. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _parse_metadata) - - def set_file_metadata(self, share_name, directory_name, - file_name, metadata=None, timeout=None): - ''' - Sets user-defined metadata for the specified file as one or more - name-value pairs. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param metadata: - Dict containing name and value pairs. Each call to this operation - replaces all existing metadata attached to the file. To remove all - metadata from the file, call this operation with no metadata headers. - :type metadata: dict mapping str to str - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - _add_metadata_headers(metadata, request) - - self._perform_request(request) - - def copy_file(self, share_name, directory_name, file_name, copy_source, - metadata=None, timeout=None): - ''' - Copies a file asynchronously. This operation returns a copy operation - properties object, including a copy ID you can use to check or abort the - copy operation. The File service copies files on a best-effort basis. - - If the destination file exists, it will be overwritten. The destination - file cannot be modified while the copy operation is in progress. - - :param str share_name: - Name of the destination share. The share must exist. - :param str directory_name: - Name of the destination directory. The directory must exist. - :param str file_name: - Name of the destination file. If the destination file exists, it will - be overwritten. Otherwise, it will be created. - :param str copy_source: - A URL of up to 2 KB in length that specifies an Azure file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.file.core.windows.net/myshare/mydir/myfile - https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken - :param metadata: - Name-value pairs associated with the file as metadata. If no name-value - pairs are specified, the operation will copy the metadata from the - source blob or file to the destination file. If one or more name-value - pairs are specified, the destination file is created with the specified - metadata, and the metadata is not copied from the source blob or file. - :type metadata: A dict mapping str to str. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: Copy operation properties such as status, source, and ID. - :rtype: :class:`~azure.storage.file.models.CopyProperties` - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('copy_source', copy_source) - - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { 'timeout': _int_to_str(timeout)} - request.headers = { - 'x-ms-copy-source': _to_str(copy_source), - } - _add_metadata_headers(metadata, request) - - return self._perform_request(request, _parse_properties, [FileProperties]).copy - - def abort_copy_file(self, share_name, directory_name, file_name, copy_id, timeout=None): - ''' - Aborts a pending copy_file operation, and leaves a destination file - with zero length and full metadata. - - :param str share_name: - Name of destination share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of destination file. - :param str copy_id: - Copy identifier provided in the copy.id of the original - copy_file operation. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('copy_id', copy_id) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { - 'comp': 'copy', - 'copyid': _to_str(copy_id), - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-copy-action': 'abort', - } - - self._perform_request(request) - - def delete_file(self, share_name, directory_name, file_name, timeout=None): - ''' - Marks the specified file for deletion. The file is later - deleted during garbage collection. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - request = HTTPRequest() - request.method = 'DELETE' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { 'timeout': _int_to_str(timeout)} - - self._perform_request(request) - - def create_file(self, share_name, directory_name, file_name, - content_length, content_settings=None, metadata=None, - timeout=None): - ''' - Creates a new file. - - See create_file_from_* for high level functions that handle the - creation and upload of large files with automatic chunking and - progress notifications. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of file to create or update. - :param int content_length: - Length of the file in bytes. - :param ~azure.storage.file.models.ContentSettings content_settings: - ContentSettings object used to set file properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: a dict mapping str to str - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('content_length', content_length) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { 'timeout': _int_to_str(timeout)} - request.headers = { - 'x-ms-content-length': _to_str(content_length), - 'x-ms-type': 'file' - } - _add_metadata_headers(metadata, request) - if content_settings is not None: - request.headers.update(content_settings._to_headers()) - - self._perform_request(request) - - def create_file_from_path(self, share_name, directory_name, file_name, - local_file_path, content_settings=None, - metadata=None, validate_content=False, progress_callback=None, - max_connections=2, timeout=None): - ''' - Creates a new azure file from a local file path, or updates the content of an - existing file, with automatic chunking and progress notifications. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of file to create or update. - :param str local_file_path: - Path of the local file to upload as the file content. - :param ~azure.storage.file.models.ContentSettings content_settings: - ContentSettings object used for setting file properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: a dict mapping str to str - :param bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far and total is the - size of the file, or None if the total size is unknown. - :type progress_callback: callback function in format of func(current, total) - :param int max_connections: - Maximum number of parallel connections to use. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('local_file_path', local_file_path) - - count = path.getsize(local_file_path) - with open(local_file_path, 'rb') as stream: - self.create_file_from_stream( - share_name, directory_name, file_name, stream, - count, content_settings, metadata, validate_content, progress_callback, - max_connections, timeout) - - def create_file_from_text(self, share_name, directory_name, file_name, - text, encoding='utf-8', content_settings=None, - metadata=None, validate_content=False, timeout=None): - ''' - Creates a new file from str/unicode, or updates the content of an - existing file, with automatic chunking and progress notifications. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of file to create or update. - :param str text: - Text to upload to the file. - :param str encoding: - Python encoding to use to convert the text to bytes. - :param ~azure.storage.file.models.ContentSettings content_settings: - ContentSettings object used to set file properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: a dict mapping str to str - :param bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('text', text) - - if not isinstance(text, bytes): - _validate_not_none('encoding', encoding) - text = text.encode(encoding) - - self.create_file_from_bytes( - share_name, directory_name, file_name, text, count=len(text), - content_settings=content_settings, metadata=metadata, - validate_content=validate_content, timeout=timeout) - - def create_file_from_bytes( - self, share_name, directory_name, file_name, file, - index=0, count=None, content_settings=None, metadata=None, - validate_content=False, progress_callback=None, max_connections=2, - timeout=None): - ''' - Creates a new file from an array of bytes, or updates the content - of an existing file, with automatic chunking and progress - notifications. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of file to create or update. - :param str file: - Content of file as an array of bytes. - :param int index: - Start index in the array of bytes. - :param int count: - Number of bytes to upload. Set to None or negative value to upload - all bytes starting from index. - :param ~azure.storage.file.models.ContentSettings content_settings: - ContentSettings object used to set file properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: a dict mapping str to str - :param bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far and total is the - size of the file, or None if the total size is unknown. - :type progress_callback: callback function in format of func(current, total) - :param int max_connections: - Maximum number of parallel connections to use. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('file', file) - _validate_type_bytes('file', file) - - if index < 0: - raise TypeError(_ERROR_VALUE_NEGATIVE.format('index')) - - if count is None or count < 0: - count = len(file) - index - - stream = BytesIO(file) - stream.seek(index) - - self.create_file_from_stream( - share_name, directory_name, file_name, stream, count, - content_settings, metadata, validate_content, progress_callback, - max_connections, timeout) - - def create_file_from_stream( - self, share_name, directory_name, file_name, stream, count, - content_settings=None, metadata=None, validate_content=False, - progress_callback=None, max_connections=2, timeout=None): - ''' - Creates a new file from a file/stream, or updates the content of an - existing file, with automatic chunking and progress notifications. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of file to create or update. - :param io.IOBase stream: - Opened file/stream to upload as the file content. - :param int count: - Number of bytes to read from the stream. This is required, a - file cannot be created if the count is unknown. - :param ~azure.storage.file.models.ContentSettings content_settings: - ContentSettings object used to set file properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: a dict mapping str to str - :param bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far and total is the - size of the file, or None if the total size is unknown. - :type progress_callback: callback function in format of func(current, total) - :param int max_connections: - Maximum number of parallel connections to use. Note that parallel upload - requires the stream to be seekable. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('stream', stream) - _validate_not_none('count', count) - - if count < 0: - raise TypeError(_ERROR_VALUE_NEGATIVE.format('count')) - - self.create_file( - share_name, - directory_name, - file_name, - count, - content_settings, - metadata, - timeout - ) - - _upload_file_chunks( - self, - share_name, - directory_name, - file_name, - count, - self.MAX_RANGE_SIZE, - stream, - max_connections, - progress_callback, - validate_content, - timeout - ) - - def _get_file(self, share_name, directory_name, file_name, - start_range=None, end_range=None, validate_content=False, - timeout=None, _context=None): - ''' - Downloads a file's content, metadata, and properties. You can specify a - range if you don't need to download the file in its entirety. If no range - is specified, the full file will be downloaded. - - See get_file_to_* for high level functions that handle the download - of large files with automatic chunking and progress notifications. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param int start_range: - Start of byte range to use for downloading a section of the file. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int end_range: - End of byte range to use for downloading a section of the file. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param bool validate_content: - When this is set to True and specified together with the Range header, - the service returns the MD5 hash for the range, as long as the range - is less than or equal to 4 MB in size. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: A File with content, properties, and metadata. - :rtype: :class:`~azure.storage.file.models.File` - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { 'timeout': _int_to_str(timeout)} - _validate_and_format_range_headers( - request, - start_range, - end_range, - start_range_required=False, - end_range_required=False, - check_content_md5=validate_content) - - return self._perform_request(request, _parse_file, - [file_name, validate_content], - operation_context=_context) - - def get_file_to_path(self, share_name, directory_name, file_name, file_path, - open_mode='wb', start_range=None, end_range=None, - validate_content=False, progress_callback=None, - max_connections=2, timeout=None): - ''' - Downloads a file to a file path, with automatic chunking and progress - notifications. Returns an instance of File with properties and metadata. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param str file_path: - Path of file to write to. - :param str open_mode: - Mode to use when opening the file. Note that specifying append only - open_mode prevents parallel download. So, max_connections must be set - to 1 if this open_mode is used. - :param int start_range: - Start of byte range to use for downloading a section of the file. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int end_range: - End of byte range to use for downloading a section of the file. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param bool validate_content: - If set to true, validates an MD5 hash for each retrieved portion of - the file. This is primarily valuable for detecting bitflips on the wire - if using http instead of https as https (the default) will already - validate. Note that the service will only return transactional MD5s - for chunks 4MB or less so the first get request will be of size - self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If - self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be - thrown. As computing the MD5 takes processing time and more requests - will need to be done due to the reduced chunk size there may be some - increase in latency. - :param progress_callback: - Callback for progress with signature function(current, total) - where current is the number of bytes transfered so far, and total is - the size of the file if known. - :type progress_callback: callback function in format of func(current, total) - :param int max_connections: - If set to 2 or greater, an initial get will be done for the first - self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file, - the method returns at this point. If it is not, it will download the - remaining data parallel using the number of threads equal to - max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. - If set to 1, a single large get request will be done. This is not - generally recommended but available if very few threads should be - used, network requests are very expensive, or a non-seekable stream - prevents parallel download. This may also be valuable if the file is - being concurrently modified to enforce atomicity or if many files are - expected to be empty as an extra request is required for empty files - if max_connections is greater than 1. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :return: A File with properties and metadata. - :rtype: :class:`~azure.storage.file.models.File` - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('file_path', file_path) - _validate_not_none('open_mode', open_mode) - - if max_connections > 1 and 'a' in open_mode: - raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE) - - with open(file_path, open_mode) as stream: - file = self.get_file_to_stream( - share_name, directory_name, file_name, stream, - start_range, end_range, validate_content, - progress_callback, max_connections, timeout) - - return file - - def get_file_to_stream( - self, share_name, directory_name, file_name, stream, - start_range=None, end_range=None, validate_content=False, - progress_callback=None, max_connections=2, timeout=None): - ''' - Downloads a file to a stream, with automatic chunking and progress - notifications. Returns an instance of :class:`File` with properties - and metadata. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param io.IOBase stream: - Opened file/stream to write to. - :param int start_range: - Start of byte range to use for downloading a section of the file. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int end_range: - End of byte range to use for downloading a section of the file. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param bool validate_content: - If set to true, validates an MD5 hash for each retrieved portion of - the file. This is primarily valuable for detecting bitflips on the wire - if using http instead of https as https (the default) will already - validate. Note that the service will only return transactional MD5s - for chunks 4MB or less so the first get request will be of size - self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If - self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be - thrown. As computing the MD5 takes processing time and more requests - will need to be done due to the reduced chunk size there may be some - increase in latency. - :param progress_callback: - Callback for progress with signature function(current, total) - where current is the number of bytes transfered so far, and total is - the size of the file if known. - :type progress_callback: callback function in format of func(current, total) - :param int max_connections: - If set to 2 or greater, an initial get will be done for the first - self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file, - the method returns at this point. If it is not, it will download the - remaining data parallel using the number of threads equal to - max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. - If set to 1, a single large get request will be done. This is not - generally recommended but available if very few threads should be - used, network requests are very expensive, or a non-seekable stream - prevents parallel download. This may also be valuable if the file is - being concurrently modified to enforce atomicity or if many files are - expected to be empty as an extra request is required for empty files - if max_connections is greater than 1. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :return: A File with properties and metadata. - :rtype: :class:`~azure.storage.file.models.File` - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('stream', stream) - - # If the user explicitly sets max_connections to 1, do a single shot download - if max_connections == 1: - file = self._get_file(share_name, - directory_name, - file_name, - start_range=start_range, - end_range=end_range, - validate_content=validate_content, - timeout=timeout) - - # Set the download size - download_size = file.properties.content_length - - # If max_connections is greater than 1, do the first get to establish the - # size of the file and get the first segment of data - else: - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE) - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - first_get_size = self.MAX_SINGLE_GET_SIZE if not validate_content else self.MAX_CHUNK_GET_SIZE - - initial_request_start = start_range if start_range else 0 - - if end_range and end_range - start_range < first_get_size: - initial_request_end = end_range - else: - initial_request_end = initial_request_start + first_get_size - 1 - - # Send a context object to make sure we always retry to the initial location - operation_context = _OperationContext(location_lock=True) - try: - file = self._get_file(share_name, - directory_name, - file_name, - start_range=initial_request_start, - end_range=initial_request_end, - validate_content=validate_content, - timeout=timeout, - _context=operation_context) - - # Parse the total file size and adjust the download size if ranges - # were specified - file_size = _parse_length_from_content_range(file.properties.content_range) - if end_range: - # Use the end_range unless it is over the end of the file - download_size = min(file_size, end_range - start_range + 1) - elif start_range: - download_size = file_size - start_range - else: - download_size = file_size - except AzureHttpError as ex: - if not start_range and ex.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - file = self._get_file(share_name, - directory_name, - file_name, - validate_content=validate_content, - timeout=timeout, - _context=operation_context) - - # Set the download size to empty - download_size = 0 - else: - raise ex - - # Mark the first progress chunk. If the file is small or this is a single - # shot download, this is the only call - if progress_callback: - progress_callback(file.properties.content_length, download_size) - - # Write the content to the user stream - # Clear file content since output has been written to user stream - if file.content is not None: - stream.write(file.content) - file.content = None - - # If the file is small or single shot download was used, the download is - # complete at this point. If file size is large, use parallel download. - if file.properties.content_length != download_size: - # At this point would like to lock on something like the etag so that - # if the file is modified, we dont get a corrupted download. However, - # this feature is not yet available on the file service. - - end_file = file_size - if end_range: - # Use the end_range unless it is over the end of the file - end_file = min(file_size, end_range + 1) - - _download_file_chunks( - self, - share_name, - directory_name, - file_name, - download_size, - self.MAX_CHUNK_GET_SIZE, - first_get_size, - initial_request_end + 1, # start where the first download ended - end_file, - stream, - max_connections, - progress_callback, - validate_content, - timeout, - operation_context, - ) - - # Set the content length to the download size instead of the size of - # the last range - file.properties.content_length = download_size - - # Overwrite the content range to the user requested range - file.properties.content_range = 'bytes {0}-{1}/{2}'.format(start_range, end_range, file_size) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - file.properties.content_md5 = None - - return file - - def get_file_to_bytes(self, share_name, directory_name, file_name, - start_range=None, end_range=None, validate_content=False, - progress_callback=None, max_connections=2, timeout=None): - ''' - Downloads a file as an array of bytes, with automatic chunking and - progress notifications. Returns an instance of :class:`File` with - properties, metadata, and content. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param int start_range: - Start of byte range to use for downloading a section of the file. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int end_range: - End of byte range to use for downloading a section of the file. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param bool validate_content: - If set to true, validates an MD5 hash for each retrieved portion of - the file. This is primarily valuable for detecting bitflips on the wire - if using http instead of https as https (the default) will already - validate. Note that the service will only return transactional MD5s - for chunks 4MB or less so the first get request will be of size - self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If - self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be - thrown. As computing the MD5 takes processing time and more requests - will need to be done due to the reduced chunk size there may be some - increase in latency. - :param progress_callback: - Callback for progress with signature function(current, total) - where current is the number of bytes transfered so far, and total is - the size of the file if known. - :type progress_callback: callback function in format of func(current, total) - :param int max_connections: - If set to 2 or greater, an initial get will be done for the first - self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file, - the method returns at this point. If it is not, it will download the - remaining data parallel using the number of threads equal to - max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. - If set to 1, a single large get request will be done. This is not - generally recommended but available if very few threads should be - used, network requests are very expensive, or a non-seekable stream - prevents parallel download. This may also be valuable if the file is - being concurrently modified to enforce atomicity or if many files are - expected to be empty as an extra request is required for empty files - if max_connections is greater than 1. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :return: A File with properties, content, and metadata. - :rtype: :class:`~azure.storage.file.models.File` - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - - stream = BytesIO() - file = self.get_file_to_stream( - share_name, - directory_name, - file_name, - stream, - start_range, - end_range, - validate_content, - progress_callback, - max_connections, - timeout) - - file.content = stream.getvalue() - return file - - def get_file_to_text( - self, share_name, directory_name, file_name, encoding='utf-8', - start_range=None, end_range=None, validate_content=False, - progress_callback=None, max_connections=2, timeout=None): - ''' - Downloads a file as unicode text, with automatic chunking and progress - notifications. Returns an instance of :class:`File` with properties, - metadata, and content. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param str encoding: - Python encoding to use when decoding the file data. - :param int start_range: - Start of byte range to use for downloading a section of the file. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int end_range: - End of byte range to use for downloading a section of the file. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param bool validate_content: - If set to true, validates an MD5 hash for each retrieved portion of - the file. This is primarily valuable for detecting bitflips on the wire - if using http instead of https as https (the default) will already - validate. Note that the service will only return transactional MD5s - for chunks 4MB or less so the first get request will be of size - self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If - self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be - thrown. As computing the MD5 takes processing time and more requests - will need to be done due to the reduced chunk size there may be some - increase in latency. - :param progress_callback: - Callback for progress with signature function(current, total) - where current is the number of bytes transfered so far, and total is - the size of the file if known. - :type progress_callback: callback function in format of func(current, total) - :param int max_connections: - If set to 2 or greater, an initial get will be done for the first - self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file, - the method returns at this point. If it is not, it will download the - remaining data parallel using the number of threads equal to - max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. - If set to 1, a single large get request will be done. This is not - generally recommended but available if very few threads should be - used, network requests are very expensive, or a non-seekable stream - prevents parallel download. This may also be valuable if the file is - being concurrently modified to enforce atomicity or if many files are - expected to be empty as an extra request is required for empty files - if max_connections is greater than 1. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :return: A File with properties, content, and metadata. - :rtype: :class:`~azure.storage.file.models.File` - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('encoding', encoding) - - file = self.get_file_to_bytes( - share_name, - directory_name, - file_name, - start_range, - end_range, - validate_content, - progress_callback, - max_connections, - timeout) - - file.content = file.content.decode(encoding) - return file - - def update_range(self, share_name, directory_name, file_name, data, - start_range, end_range, validate_content=False, timeout=None): - ''' - Writes the bytes specified by the request body into the specified range. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param bytes data: - Content of the range. - :param int start_range: - Start of byte range to use for updating a section of the file. - The range can be up to 4 MB in size. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int end_range: - End of byte range to use for updating a section of the file. - The range can be up to 4 MB in size. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - file. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('data', data) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { - 'comp': 'range', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-write': 'update', - } - _validate_and_format_range_headers( - request, start_range, end_range) - request.body = _get_data_bytes_only('data', data) - - if validate_content: - computed_md5 = _get_content_md5(request.body) - request.headers['Content-MD5'] = _to_str(computed_md5) - - self._perform_request(request) - - def clear_range(self, share_name, directory_name, file_name, start_range, - end_range, timeout=None): - ''' - Clears the specified range and releases the space used in storage for - that range. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param int start_range: - Start of byte range to use for clearing a section of the file. - The range can be up to 4 MB in size. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int end_range: - End of byte range to use for clearing a section of the file. - The range can be up to 4 MB in size. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { - 'comp': 'range', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'Content-Length': '0', - 'x-ms-write': 'clear', - } - _validate_and_format_range_headers( - request, start_range, end_range) - - self._perform_request(request) - - def list_ranges(self, share_name, directory_name, file_name, - start_range=None, end_range=None, timeout=None): - ''' - Retrieves the valid ranges for a file. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param int start_range: - Specifies the start offset of bytes over which to list ranges. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int end_range: - Specifies the end offset of bytes over which to list ranges. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int timeout: - The timeout parameter is expressed in seconds. - :returns: a list of valid ranges - :rtype: a list of :class:`.FileRange` - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { - 'comp': 'rangelist', - 'timeout': _int_to_str(timeout), - } - if start_range is not None: - _validate_and_format_range_headers( - request, - start_range, - end_range, - start_range_required=False, - end_range_required=False) - - return self._perform_request(request, _convert_xml_to_ranges) \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/file/models.py b/azure/multiapi/storage/v2016_05_31/file/models.py deleted file mode 100644 index d560a28..0000000 --- a/azure/multiapi/storage/v2016_05_31/file/models.py +++ /dev/null @@ -1,397 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from .._common_conversion import _to_str -class Share(object): - - ''' - File share class. - - :ivar str name: - The name of the share. - :ivar ShareProperties properties: - System properties for the share. - :ivar metadata: - A dict containing name-value pairs associated with the share as metadata. - This var is set to None unless the include=metadata param was included - for the list shares operation. If this parameter was specified but the - share has no metadata, metadata will be set to an empty dictionary. - :vartype metadata: dict mapping str to str - ''' - - def __init__(self, name=None, props=None, metadata=None): - self.name = name - self.properties = props or ShareProperties() - self.metadata = metadata - - -class ShareProperties(object): - - ''' - File share's properties class. - - :ivar datetime last_modified: - A datetime object representing the last time the share was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar int quote: - Returns the current share quota in GB. - ''' - - def __init__(self): - self.last_modified = None - self.etag = None - self.quota = None - -class Directory(object): - - ''' - Directory class. - - :ivar str name: - The name of the directory. - :ivar DirectoryProperties properties: - System properties for the directory. - :ivar metadata: - A dict containing name-value pairs associated with the directory as metadata. - This var is set to None unless the include=metadata param was included - for the list directory operation. If this parameter was specified but the - directory has no metadata, metadata will be set to an empty dictionary. - :vartype metadata: dict mapping str to str - ''' - - def __init__(self, name=None, props=None, metadata=None): - self.name = name - self.properties = props or DirectoryProperties() - self.metadata = metadata - -class DirectoryProperties(object): - - ''' - File directory's properties class. - - :ivar datetime last_modified: - A datetime object representing the last time the directory was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - ''' - - def __init__(self): - self.last_modified = None - self.etag = None - -class File(object): - - ''' - File class. - - :ivar str name: - The name of the file. - :ivar content: - File content. - :vartype content: str or bytes - :ivar FileProperties properties: - System properties for the file. - :ivar metadata: - A dict containing name-value pairs associated with the file as metadata. - This var is set to None unless the include=metadata param was included - for the list file operation. If this parameter was specified but the - file has no metadata, metadata will be set to an empty dictionary. - :vartype metadata: dict mapping str to str - ''' - - def __init__(self, name=None, content=None, props=None, metadata=None): - self.name = name - self.content = content - self.properties = props or FileProperties() - self.metadata = metadata - - -class FileProperties(object): - - ''' - File Properties. - - :ivar datetime last_modified: - A datetime object representing the last time the file was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar int content_length: - The length of the content returned. If the entire blob was requested, - the length of blob in bytes. If a subset of the blob was requested, the - length of the returned subset. - :ivar str content_range: - Indicates the range of bytes returned in the event that the client - requested a subset of the blob. - :ivar ~azure.storage.file.models.ContentSettings content_settings: - Stores all the content settings for the file. - :ivar ~azure.storage.file.models.CopyProperties copy: - Stores all the copy properties for the file. - ''' - - def __init__(self): - self.last_modified = None - self.etag = None - self.content_length = None - self.content_range = None - self.content_settings = ContentSettings() - self.copy = CopyProperties() - - -class ContentSettings(object): - - ''' - Used to store the content settings of a file. - - :ivar str content_type: - The content type specified for the file. If no content type was - specified, the default content type is application/octet-stream. - :ivar str content_encoding: - If content_encoding has previously been set - for the file, that value is stored. - :ivar str content_language: - If content_language has previously been set - for the file, that value is stored. - :ivar str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the file, that value is stored. - :ivar str cache_control: - If cache_control has previously been set for - the file, that value is stored. - :ivar str content_md5: - If the content_md5 has been set for the file, this response - header is stored so that the client can check for message content - integrity. - ''' - - def __init__( - self, content_type=None, content_encoding=None, - content_language=None, content_disposition=None, - cache_control=None, content_md5=None): - - self.content_type = content_type - self.content_encoding = content_encoding - self.content_language = content_language - self.content_disposition = content_disposition - self.cache_control = cache_control - self.content_md5 = content_md5 - - def _to_headers(self): - return { - 'x-ms-cache-control': _to_str(self.cache_control), - 'x-ms-content-type': _to_str(self.content_type), - 'x-ms-content-disposition': _to_str(self.content_disposition), - 'x-ms-content-md5': _to_str(self.content_md5), - 'x-ms-content-encoding': _to_str(self.content_encoding), - 'x-ms-content-language': _to_str(self.content_language), - } - - -class CopyProperties(object): - ''' - File Copy Properties. - - :ivar str id: - String identifier for the last attempted Copy File operation where this file - was the destination file. This header does not appear if this file has never - been the destination in a Copy File operation, or if this file has been - modified after a concluded Copy File operation using Set File Properties or - Put File. - :ivar str source: - URL up to 2 KB in length that specifies the source file used in the last attempted - Copy File operation where this file was the destination file. This header does not - appear if this file has never been the destination in a Copy File operation, or if - this file has been modified after a concluded Copy File operation using - Set File Properties or Put File. - :ivar str status: - State of the copy operation identified by Copy ID, with these values: - success: - Copy completed successfully. - pending: - Copy is in progress. Check copy_status_description if intermittent, - non-fatal errors impede copy progress but don't cause failure. - aborted: - Copy was ended by Abort Copy File. - failed: - Copy failed. See copy_status_description for failure details. - :ivar str progress: - Contains the number of bytes copied and the total bytes in the source in the last - attempted Copy File operation where this file was the destination file. Can show - between 0 and Content-Length bytes copied. - :ivar datetime completion_time: - Conclusion time of the last attempted Copy File operation where this file was the - destination file. This value can specify the time of a completed, aborted, or - failed copy attempt. - :ivar str status_description: - Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal - or non-fatal copy operation failure. - ''' - - def __init__(self): - self.id = None - self.source = None - self.status = None - self.progress = None - self.completion_time = None - self.status_description = None - - -class FileRange(object): - - ''' - File Range. - - :ivar int start: - Byte index for start of file range. - :ivar int end: - Byte index for end of file range. - ''' - - def __init__(self, start=None, end=None): - self.start = start - self.end = end - - -class FilePermissions(object): - - ''' - FilePermissions class to be used with - :func:`~azure.storage.file.fileservice.FileService.generate_file_shared_access_signature` API. - - :ivar FilePermissions FilePermissions.CREATE: - Create a new file or copy a file to a new file. - :ivar FilePermissions FilePermissions.DELETE: - Delete the file. - :ivar FilePermissions FilePermissions.READ: - Read the content, properties, metadata. Use the file as the source of a copy - operation. - :ivar FilePermissions FilePermissions.WRITE: - Create or write content, properties, metadata. Resize the file. Use the file - as the destination of a copy operation within the same account. - ''' - def __init__(self, read=False, create=False, write=False, delete=False, - _str=None): - ''' - :param bool read: - Read the content, properties, metadata. Use the file as the source of a copy - operation. - :param bool create: - Create a new file or copy a file to a new file. - :param bool write: - Create or write content, properties, metadata. Resize the file. Use the file - as the destination of a copy operation within the same account. - :param bool delete: - Delete the file. - :param str _str: - A string representing the permissions. - ''' - - if not _str: - _str = '' - self.read = read or ('r' in _str) - self.create = create or ('c' in _str) - self.write = write or ('w' in _str) - self.delete = delete or ('d' in _str) - - def __or__(self, other): - return FilePermissions(_str=str(self) + str(other)) - - def __add__(self, other): - return FilePermissions(_str=str(self) + str(other)) - - def __str__(self): - return (('r' if self.read else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '')) - - -FilePermissions.CREATE = FilePermissions(create=True) -FilePermissions.DELETE = FilePermissions(delete=True) -FilePermissions.READ = FilePermissions(read=True) -FilePermissions.WRITE = FilePermissions(write=True) - - -class SharePermissions(object): - - ''' - SharePermissions class to be used with `azure.storage.file.FileService.generate_share_shared_access_signature` - method and for the AccessPolicies used with `azure.storage.file.FileService.set_share_acl`. - - :ivar SharePermissions FilePermissions.DELETE: - Delete any file in the share. - Note: You cannot grant permissions to delete a share with a service SAS. Use - an account SAS instead. - :ivar SharePermissions FilePermissions.LIST: - List files and directories in the share. - :ivar SharePermissions FilePermissions.READ: - Read the content, properties or metadata of any file in the share. Use any - file in the share as the source of a copy operation. - :ivar SharePermissions FilePermissions.WRITE: - For any file in the share, create or write content, properties or metadata. - Resize the file. Use the file as the destination of a copy operation within - the same account. - Note: You cannot grant permissions to read or write share properties or - metadata with a service SAS. Use an account SAS instead. - ''' - def __init__(self, read=False, write=False, delete=False, list=False, - _str=None): - ''' - :param bool read: - Read the content, properties or metadata of any file in the share. Use any - file in the share as the source of a copy operation. - :param bool write: - For any file in the share, create or write content, properties or metadata. - Resize the file. Use the file as the destination of a copy operation within - the same account. - Note: You cannot grant permissions to read or write share properties or - metadata with a service SAS. Use an account SAS instead. - :param bool delete: - Delete any file in the share. - Note: You cannot grant permissions to delete a share with a service SAS. Use - an account SAS instead. - :param bool list: - List files and directories in the share. - :param str _str: - A string representing the permissions - ''' - - if not _str: - _str = '' - self.read = read or ('r' in _str) - self.write = write or ('w' in _str) - self.delete = delete or ('d' in _str) - self.list = list or ('l' in _str) - - def __or__(self, other): - return SharePermissions(_str=str(self) + str(other)) - - def __add__(self, other): - return SharePermissions(_str=str(self) + str(other)) - - def __str__(self): - return (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('l' if self.list else '')) - -SharePermissions.DELETE = SharePermissions(delete=True) -SharePermissions.LIST = SharePermissions(list=True) -SharePermissions.READ = SharePermissions(read=True) -SharePermissions.WRITE = SharePermissions(write=True) \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/models.py b/azure/multiapi/storage/v2016_05_31/models.py deleted file mode 100644 index b3d9d9d..0000000 --- a/azure/multiapi/storage/v2016_05_31/models.py +++ /dev/null @@ -1,632 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -import sys -if sys.version_info < (3,): - from collections import Iterable - _unicode_type = unicode -else: - from collections.abc import Iterable - _unicode_type = str - -from ._error import ( - _validate_not_none, - _ERROR_UNKNOWN_KEY_WRAP_ALGORITHM -) -from cryptography.hazmat.primitives.keywrap import( - aes_key_wrap, - aes_key_unwrap, -) -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.asymmetric.rsa import generate_private_key -from cryptography.hazmat.primitives.asymmetric.padding import ( - OAEP, - MGF1, -) -from cryptography.hazmat.primitives.hashes import SHA1 -from os import urandom - -class _HeaderDict(dict): - - def __getitem__(self, index): - return super(_HeaderDict, self).__getitem__(index.lower()) - -class _list(list): - '''Used so that additional properties can be set on the return list''' - pass - -class _dict(dict): - '''Used so that additional properties can be set on the return dictionary''' - pass - -class _OperationContext(object): - ''' - Contains information that lasts the lifetime of an operation. This operation - may span multiple calls to the Azure service. - - :ivar bool location_lock: - Whether the location should be locked for this operation. - :ivar str location: - The location to lock to. - ''' - def __init__(self, location_lock=False): - self.location_lock = location_lock - self.host_location = None - -class ListGenerator(Iterable): - ''' - A generator object used to list storage resources. The generator will lazily - follow the continuation tokens returned by the service and stop when all - resources have been returned or max_results is reached. - - If max_results is specified and the account has more than that number of - resources, the generator will have a populated next_marker field once it - finishes. This marker can be used to create a new generator if more - results are desired. - ''' - def __init__(self, resources, list_method, list_args, list_kwargs): - self.items = resources - self.next_marker = resources.next_marker - - self._list_method = list_method - self._list_args = list_args - self._list_kwargs = list_kwargs - - def __iter__(self): - # return results - for i in self.items: - yield i - - while True: - # if no more results on the service, return - if not self.next_marker: - break - - # update the marker args - self._list_kwargs['marker'] = self.next_marker - - # handle max results, if present - max_results = self._list_kwargs.get('max_results') - if max_results is not None: - max_results = max_results - len(self.items) - - # if we've reached max_results, return - # else, update the max_results arg - if max_results <= 0: - break - else: - self._list_kwargs['max_results'] = max_results - - # get the next segment - resources = self._list_method(*self._list_args, **self._list_kwargs) - self.items = resources - self.next_marker = resources.next_marker - - # return results - for i in self.items: - yield i - - -class RetryContext(object): - ''' - Contains the request and response information that can be used to determine - whether and how to retry. This context is stored across retries and may be - used to store other information relevant to the retry strategy. - - :ivar :class:`~azure.storage._http.HTTPRequest` request: - The request sent to the storage service. - :ivar :class:`~azure.storage._http.HTTPResponse` response: - The response returned by the storage service. - :ivar LocationMode location_mode: - The location the request was sent to. - ''' - def __init__(self): - self.request = None - self.response = None - self.location_mode = None - -class LocationMode(object): - ''' - Specifies the location the request should be sent to. This mode only applies - for RA-GRS accounts which allow secondary read access. All other account types - must use PRIMARY. - ''' - - PRIMARY = 'primary' - ''' Requests should be sent to the primary location. ''' - - SECONDARY = 'secondary' - ''' Requests should be sent to the secondary location, if possible. ''' - - -class RetentionPolicy(object): - - ''' - By default, Storage Analytics will not delete any logging or metrics data. Blobs - and table entities will continue to be written until the shared 20TB limit is - reached. Once the 20TB limit is reached, Storage Analytics will stop writing - new data and will not resume until free space is available. This 20TB limit - is independent of the total limit for your storage account. - - There are two ways to delete Storage Analytics data: by manually making deletion - requests or by setting a data retention policy. Manual requests to delete Storage - Analytics data are billable, but delete requests resulting from a retention policy - are not billable. - ''' - - def __init__(self, enabled=False, days=None): - ''' - :param bool enabled: - Indicates whether a retention policy is enabled for the - storage service. If disabled, logging and metrics data will be retained - infinitely by the service unless explicitly deleted. - :param int days: - Required if enabled is true. Indicates the number of - days that metrics or logging data should be retained. All data older - than this value will be deleted. The minimum value you can specify is 1; - the largest value is 365 (one year). - ''' - _validate_not_none("enabled", enabled) - if enabled: - _validate_not_none("days", days) - - self.enabled = enabled - self.days = days - - -class Logging(object): - - ''' - Storage Analytics logs detailed information about successful and failed requests - to a storage service. This information can be used to monitor individual requests - and to diagnose issues with a storage service. Requests are logged on a best-effort - basis. - - All logs are stored in block blobs in a container named $logs, which is - automatically created when Storage Analytics is enabled for a storage account. - The $logs container is located in the blob namespace of the storage account. - This container cannot be deleted once Storage Analytics has been enabled, though - its contents can be deleted. - - For more information, see https://msdn.microsoft.com/en-us/library/azure/hh343262.aspx - ''' - - def __init__(self, delete=False, read=False, write=False, - retention_policy=None): - ''' - :param bool delete: - Indicates whether all delete requests should be logged. - :param bool read: - Indicates whether all read requests should be logged. - :param bool write: - Indicates whether all write requests should be logged. - :param RetentionPolicy retention_policy: - The retention policy for the metrics. - ''' - _validate_not_none("read", read) - _validate_not_none("write", write) - _validate_not_none("delete", delete) - - self.version = u'1.0' - self.delete = delete - self.read = read - self.write = write - self.retention_policy = retention_policy if retention_policy else RetentionPolicy() - - -class Metrics(object): - - ''' - Metrics include aggregated transaction statistics and capacity data about requests - to a storage service. Transactions are reported at both the API operation level - as well as at the storage service level, and capacity is reported at the storage - service level. Metrics data can be used to analyze storage service usage, diagnose - issues with requests made against the storage service, and to improve the - performance of applications that use a service. - - For more information, see https://msdn.microsoft.com/en-us/library/azure/hh343258.aspx - ''' - - def __init__(self, enabled=False, include_apis=None, - retention_policy=None): - ''' - :param bool enabled: - Indicates whether metrics are enabled for - the service. - :param bool include_apis: - Required if enabled is True. Indicates whether metrics - should generate summary statistics for called API operations. - :param RetentionPolicy retention_policy: - The retention policy for the metrics. - ''' - _validate_not_none("enabled", enabled) - if enabled: - _validate_not_none("include_apis", include_apis) - - self.version = u'1.0' - self.enabled = enabled - self.include_apis = include_apis - self.retention_policy = retention_policy if retention_policy else RetentionPolicy() - - -class CorsRule(object): - - ''' - CORS is an HTTP feature that enables a web application running under one domain - to access resources in another domain. Web browsers implement a security - restriction known as same-origin policy that prevents a web page from calling - APIs in a different domain; CORS provides a secure way to allow one domain - (the origin domain) to call APIs in another domain. - - For more information, see https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx - ''' - - def __init__(self, allowed_origins, allowed_methods, max_age_in_seconds=0, - exposed_headers=None, allowed_headers=None): - ''' - :param allowed_origins: - A list of origin domains that will be allowed via CORS, or "*" to allow - all domains. The list of must contain at least one entry. Limited to 64 - origin domains. Each allowed origin can have up to 256 characters. - :type allowed_origins: list of str - :param allowed_methods: - A list of HTTP methods that are allowed to be executed by the origin. - The list of must contain at least one entry. For Azure Storage, - permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. - :type allowed_methods: list of str - :param int max_age_in_seconds: - The number of seconds that the client/browser should cache a - preflight response. - :param exposed_headers: - Defaults to an empty list. A list of response headers to expose to CORS - clients. Limited to 64 defined headers and two prefixed headers. Each - header can be up to 256 characters. - :type exposed_headers: list of str - :param allowed_headers: - Defaults to an empty list. A list of headers allowed to be part of - the cross-origin request. Limited to 64 defined headers and 2 prefixed - headers. Each header can be up to 256 characters. - :type allowed_headers: list of str - ''' - _validate_not_none("allowed_origins", allowed_origins) - _validate_not_none("allowed_methods", allowed_methods) - _validate_not_none("max_age_in_seconds", max_age_in_seconds) - - self.allowed_origins = allowed_origins if allowed_origins else list() - self.allowed_methods = allowed_methods if allowed_methods else list() - self.max_age_in_seconds = max_age_in_seconds - self.exposed_headers = exposed_headers if exposed_headers else list() - self.allowed_headers = allowed_headers if allowed_headers else list() - - -class ServiceProperties(object): - ''' - Returned by get_*_service_properties functions. Contains the properties of a - storage service, including Analytics and CORS rules. - - Azure Storage Analytics performs logging and provides metrics data for a storage - account. You can use this data to trace requests, analyze usage trends, and - diagnose issues with your storage account. To use Storage Analytics, you must - enable it individually for each service you want to monitor. - - The aggregated data is stored in a well-known blob (for logging) and in well-known - tables (for metrics), which may be accessed using the Blob service and Table - service APIs. - - For an in-depth guide on using Storage Analytics and other tools to identify, - diagnose, and troubleshoot Azure Storage-related issues, see - http://azure.microsoft.com/documentation/articles/storage-monitoring-diagnosing-troubleshooting/ - - For more information on CORS, see https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx - ''' - - pass - - -class ServiceStats(object): - ''' - Returned by get_*_service_stats functions. Contains statistics related to - replication for the given service. It is only available when read-access - geo-redundant replication is enabled for the storage account. - - :ivar GeoReplication geo_replication: - An object containing statistics related to replication for the given service. - ''' - pass - - -class GeoReplication(object): - ''' - Contains statistics related to replication for the given service. - - :ivar str status: - The status of the secondary location. Possible values are: - live: Indicates that the secondary location is active and operational. - bootstrap: Indicates initial synchronization from the primary location - to the secondary location is in progress. This typically occurs - when replication is first enabled. - unavailable: Indicates that the secondary location is temporarily - unavailable. - :ivar date last_sync_time: - A GMT date value, to the second. All primary writes preceding this value - are guaranteed to be available for read operations at the secondary. - Primary writes after this point in time may or may not be available for - reads. The value may be empty if LastSyncTime is not available. This can - happen if the replication status is bootstrap or unavailable. Although - geo-replication is continuously enabled, the LastSyncTime result may - reflect a cached value from the service that is refreshed every few minutes. - ''' - pass - - -class AccessPolicy(object): - - ''' - Access Policy class used by the set and get acl methods in each service. - - A stored access policy can specify the start time, expiry time, and - permissions for the Shared Access Signatures with which it's associated. - Depending on how you want to control access to your table resource, you can - specify all of these parameters within the stored access policy, and omit - them from the URL for the Shared Access Signature. Doing so permits you to - modify the associated signature's behavior at any time, as well as to revoke - it. Or you can specify one or more of the access policy parameters within - the stored access policy, and the others on the URL. Finally, you can - specify all of the parameters on the URL. In this case, you can use the - stored access policy to revoke the signature, but not to modify its behavior. - - Together the Shared Access Signature and the stored access policy must - include all fields required to authenticate the signature. If any required - fields are missing, the request will fail. Likewise, if a field is specified - both in the Shared Access Signature URL and in the stored access policy, the - request will fail with status code 400 (Bad Request). - ''' - - def __init__(self, permission=None, expiry=None, start=None): - ''' - :param str permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: date or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: date or str - ''' - self.start = start - self.expiry = expiry - self.permission = permission - - -class Protocol(object): - ''' - Specifies the protocol permitted for a SAS token. Note that HTTP only is - not allowed. - ''' - - HTTPS = 'https' - ''' Allow HTTPS requests only. ''' - - HTTPS_HTTP = 'https,http' - ''' Allow HTTP and HTTPS requests. ''' - -class ResourceTypes(object): - - ''' - Specifies the resource types that are accessible with the account SAS. - - :ivar ResourceTypes ResourceTypes.CONTAINER: - Access to container-level APIs (e.g., Create/Delete Container, - Create/Delete Queue, Create/Delete Table, Create/Delete Share, - List Blobs/Files and Directories) - :ivar ResourceTypes ResourceTypes.OBJECT: - Access to object-level APIs for blobs, queue messages, table entities, and - files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) - :ivar ResourceTypes ResourceTypes.SERVICE: - Access to service-level APIs (e.g., Get/Set Service Properties, - Get Service Stats, List Containers/Queues/Tables/Shares) - ''' - def __init__(self, service=False, container=False, object=False, _str=None): - ''' - :param bool service: - Access to service-level APIs (e.g., Get/Set Service Properties, - Get Service Stats, List Containers/Queues/Tables/Shares) - :param bool container: - Access to container-level APIs (e.g., Create/Delete Container, - Create/Delete Queue, Create/Delete Table, Create/Delete Share, - List Blobs/Files and Directories) - :param bool object: - Access to object-level APIs for blobs, queue messages, table entities, and - files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) - :param str _str: - A string representing the resource types. - ''' - if not _str: - _str = '' - self.service = service or ('s' in _str) - self.container = container or ('c' in _str) - self.object = object or ('o' in _str) - - def __or__(self, other): - return ResourceTypes(_str=str(self) + str(other)) - - def __add__(self, other): - return ResourceTypes(_str=str(self) + str(other)) - - def __str__(self): - return (('s' if self.service else '') + - ('c' if self.container else '') + - ('o' if self.object else '')) - -ResourceTypes.SERVICE = ResourceTypes(service=True) -ResourceTypes.CONTAINER = ResourceTypes(container=True) -ResourceTypes.OBJECT = ResourceTypes(object=True) - - -class Services(object): - - ''' - Specifies the services accessible with the account SAS. - - :ivar Services Services.BLOB: The blob service. - :ivar Services Services.FILE: The file service - :ivar Services Services.QUEUE: The queue service. - :ivar Services Services.TABLE: The table service - ''' - def __init__(self, blob=False, queue=False, table=False, file=False, _str=None): - ''' - :param bool blob: - Access to any blob service, for example, the `.BlockBlobService` - :param bool queue: - Access to the `.QueueService` - :param bool table: - Access to the `.TableService` - :param bool file: - Access to the `.FileService` - :param str _str: - A string representing the services. - ''' - if not _str: - _str = '' - self.blob = blob or ('b' in _str) - self.queue = queue or ('q' in _str) - self.table = table or ('t' in _str) - self.file = file or ('f' in _str) - - def __or__(self, other): - return Services(_str=str(self) + str(other)) - - def __add__(self, other): - return Services(_str=str(self) + str(other)) - - def __str__(self): - return (('b' if self.blob else '') + - ('q' if self.queue else '') + - ('t' if self.table else '') + - ('f' if self.file else '')) - -Services.BLOB = Services(blob=True) -Services.QUEUE = Services(queue=True) -Services.TABLE = Services(table=True) -Services.FILE = Services(file=True) - - -class AccountPermissions(object): - - ''' - :class:`~ResourceTypes` class to be used with generate_shared_access_signature - method and for the AccessPolicies used with set_*_acl. There are two types of - SAS which may be used to grant resource access. One is to grant access to a - specific resource (resource-specific). Another is to grant access to the - entire service for a specific account and allow certain operations based on - perms found here. - - :ivar AccountPermissions AccountPermissions.ADD: - Valid for the following Object resource types only: queue messages, table - entities, and append blobs. - :ivar AccountPermissions AccountPermissions.CREATE: - Valid for the following Object resource types only: blobs and files. Users - can create new blobs or files, but may not overwrite existing blobs or files. - :ivar AccountPermissions AccountPermissions.DELETE: - Valid for Container and Object resource types, except for queue messages. - :ivar AccountPermissions AccountPermissions.LIST: - Valid for Service and Container resource types only. - :ivar AccountPermissions AccountPermissions.PROCESS: - Valid for the following Object resource type only: queue messages. - :ivar AccountPermissions AccountPermissions.READ: - Valid for all signed resources types (Service, Container, and Object). - Permits read permissions to the specified resource type. - :ivar AccountPermissions AccountPermissions.UPDATE: - Valid for the following Object resource types only: queue messages and table - entities. - :ivar AccountPermissions AccountPermissions.WRITE: - Valid for all signed resources types (Service, Container, and Object). - Permits write permissions to the specified resource type. - ''' - def __init__(self, read=False, write=False, delete=False, list=False, - add=False, create=False, update=False, process=False, _str=None): - ''' - :param bool read: - Valid for all signed resources types (Service, Container, and Object). - Permits read permissions to the specified resource type. - :param bool write: - Valid for all signed resources types (Service, Container, and Object). - Permits write permissions to the specified resource type. - :param bool delete: - Valid for Container and Object resource types, except for queue messages. - :param bool list: - Valid for Service and Container resource types only. - :param bool add: - Valid for the following Object resource types only: queue messages, - table entities, and append blobs. - :param bool create: - Valid for the following Object resource types only: blobs and files. - Users can create new blobs or files, but may not overwrite existing - blobs or files. - :param bool update: - Valid for the following Object resource types only: queue messages and - table entities. - :param bool process: - Valid for the following Object resource type only: queue messages. - :param str _str: - A string representing the permissions. - ''' - if not _str: - _str = '' - self.read = read or ('r' in _str) - self.write = write or ('w' in _str) - self.delete = delete or ('d' in _str) - self.list = list or ('l' in _str) - self.add = add or ('a' in _str) - self.create = create or ('c' in _str) - self.update = update or ('u' in _str) - self.process = process or ('p' in _str) - - def __or__(self, other): - return ResourceTypes(_str=str(self) + str(other)) - - def __add__(self, other): - return ResourceTypes(_str=str(self) + str(other)) - - def __str__(self): - return (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('l' if self.list else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('u' if self.update else '') + - ('p' if self.process else '')) - -AccountPermissions.READ = AccountPermissions(read=True) -AccountPermissions.WRITE = AccountPermissions(write=True) -AccountPermissions.DELETE = AccountPermissions(delete=True) -AccountPermissions.LIST = AccountPermissions(list=True) -AccountPermissions.ADD = AccountPermissions(add=True) -AccountPermissions.CREATE = AccountPermissions(create=True) -AccountPermissions.UPDATE = AccountPermissions(update=True) -AccountPermissions.PROCESS = AccountPermissions(process=True) \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/queue/__init__.py b/azure/multiapi/storage/v2016_05_31/queue/__init__.py deleted file mode 100644 index cae1e38..0000000 --- a/azure/multiapi/storage/v2016_05_31/queue/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from .models import ( - Queue, - QueueMessage, - QueuePermissions, - QueueMessageFormat, -) - -from .queueservice import QueueService diff --git a/azure/multiapi/storage/v2016_05_31/queue/_deserialization.py b/azure/multiapi/storage/v2016_05_31/queue/_deserialization.py deleted file mode 100644 index 2549ae4..0000000 --- a/azure/multiapi/storage/v2016_05_31/queue/_deserialization.py +++ /dev/null @@ -1,153 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from dateutil import parser -try: - from xml.etree import cElementTree as ETree -except ImportError: - from xml.etree import ElementTree as ETree - -from .models import ( - Queue, - QueueMessage, -) -from ..models import ( - _list, -) -from .._deserialization import ( - _int_to_str, - _parse_metadata, -) -from ._encryption import ( - _decrypt_queue_message, -) - -def _parse_metadata_and_message_count(response): - ''' - Extracts approximate messages count header. - ''' - metadata = _parse_metadata(response) - metadata.approximate_message_count = _int_to_str(response.headers.get('x-ms-approximate-messages-count')) - - return metadata - -def _parse_queue_message_from_headers(response): - ''' - Extracts pop receipt and time next visible from headers. - ''' - message = QueueMessage() - message.pop_receipt = response.headers.get('x-ms-popreceipt') - message.time_next_visible = parser.parse(response.headers.get('x-ms-time-next-visible')) - - return message - -def _convert_xml_to_queues(response): - ''' - - - string-value - string-value - int-value - - - string-value - - value - - - - - ''' - if response is None or response.body is None: - return None - - queues = _list() - list_element = ETree.fromstring(response.body) - - # Set next marker - next_marker = list_element.findtext('NextMarker') or None - setattr(queues, 'next_marker', next_marker) - - queues_element = list_element.find('Queues') - - for queue_element in queues_element.findall('Queue'): - # Name element - queue = Queue() - queue.name = queue_element.findtext('Name') - - # Metadata - metadata_root_element = queue_element.find('Metadata') - if metadata_root_element is not None: - queue.metadata = dict() - for metadata_element in metadata_root_element: - queue.metadata[metadata_element.tag] = metadata_element.text - - # Add queue to list - queues.append(queue) - - return queues - -def _convert_xml_to_queue_messages(response, decode_function, require_encryption, key_encryption_key, resolver, content=None): - ''' - - - - string-message-id - insertion-time - expiration-time - opaque-string-receipt-data - time-next-visible - integer - message-body - - - ''' - if response is None or response.body is None: - return None - - messages = list() - list_element = ETree.fromstring(response.body) - - for message_element in list_element.findall('QueueMessage'): - message = QueueMessage() - - message.id = message_element.findtext('MessageId') - - dequeue_count = message_element.findtext('DequeueCount') - if dequeue_count is not None: - message.dequeue_count = _int_to_str(dequeue_count) - - # content is not returned for put_message - if content is not None: - message.content = content - else: - message.content = message_element.findtext('MessageText') - if (key_encryption_key is not None) or (resolver is not None): - message.content = _decrypt_queue_message(message.content, require_encryption, - key_encryption_key, resolver) - message.content = decode_function(message.content) - - message.insertion_time = parser.parse(message_element.findtext('InsertionTime')) - message.expiration_time = parser.parse(message_element.findtext('ExpirationTime')) - - message.pop_receipt = message_element.findtext('PopReceipt') - - time_next_visible = message_element.find('TimeNextVisible') - if time_next_visible is not None: - message.time_next_visible = parser.parse(time_next_visible.text) - - # Add message to list - messages.append(message) - - return messages \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/queue/_encryption.py b/azure/multiapi/storage/v2016_05_31/queue/_encryption.py deleted file mode 100644 index 7de9cd3..0000000 --- a/azure/multiapi/storage/v2016_05_31/queue/_encryption.py +++ /dev/null @@ -1,173 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- - -from azure.common import ( - AzureException, -) -from .._constants import ( - _ENCRYPTION_PROTOCOL_V1, -) -from .._encryption import ( - _generate_encryption_data_dict, - _dict_to_encryption_data, - _generate_AES_CBC_cipher, - _validate_and_unwrap_cek, - _EncryptionAlgorithm, -) -from json import ( - dumps, - loads, -) -from base64 import( - b64encode, - b64decode, -) -from .._error import( - _ERROR_UNSUPPORTED_ENCRYPTION_VERSION, - _ERROR_DECRYPTION_FAILURE, - _ERROR_DATA_NOT_ENCRYPTED, - _ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM, - _validate_not_none, - _validate_key_encryption_key_wrap, - _validate_key_encryption_key_unwrap, - _validate_encryption_protocol_version, - _validate_kek_id, -) -from .._common_conversion import ( - _encode_base64, - _decode_base64_to_bytes -) -from cryptography.hazmat.primitives.padding import PKCS7 -import os - -def _encrypt_queue_message(message, key_encryption_key): - ''' - Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encrypted message and the encryption metadata. - - :param object message: - The plain text messge to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A json-formatted string containing the encrypted message and the encryption metadata. - :rtype: str - ''' - - _validate_not_none('message', message) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = os.urandom(32) - initialization_vector = os.urandom(16) - - # Queue encoding functions all return unicode strings, and encryption should - # operate on binary strings. - message = message.encode('utf-8') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(message) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - - # Build the dictionary structure. - queue_message = {} - queue_message['EncryptedMessageContents'] = _encode_base64(encrypted_data) - queue_message['EncryptionData'] = _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector) - - return dumps(queue_message) - -def _decrypt_queue_message(message, require_encryption, key_encryption_key, resolver): - ''' - Returns the decrypted message contents from an EncryptedQueueMessage. - If no encryption metadata is present, will return the unaltered message. - :param str message: - The JSON formatted QueueEncryptedMessage contents with all associated metadata. - :param bool require_encryption: - If set, will enforce that the retrieved messages are encrypted and decrypt them. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid()--returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key implementing the interface defined above. - :return: The plain text message from the queue message. - :rtype: str - ''' - - try: - message = loads(message) - - encryption_data = _dict_to_encryption_data(message['EncryptionData']) - decoded_data = _decode_base64_to_bytes(message['EncryptedMessageContents']) - except (KeyError, ValueError) as e: - # Message was not json formatted and so was not encrypted - # or the user provided a json formatted message. - if require_encryption: - raise ValueError(_ERROR_MESSAGE_NOT_ENCRYPTED) - else: - return message - try: - return _decrypt(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') - except Exception as e: - raise AzureException(_ERROR_DECRYPTION_FAILURE) - -def _decrypt(message, encryption_data, key_encryption_key=None, resolver=None): - ''' - Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. - Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). Returns the original plaintex. - - :param str message: - The ciphertext to be decrypted. - :param _EncryptionData encryption_data: - The metadata associated with this ciphertext. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid()--returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key implementing the interface defined above. - :return: The decrypted plaintext. - :rtype: str - ''' - _validate_not_none('message', message) - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) - - if not ( _EncryptionAlgorithm.AES_CBC_256 == encryption_data.encryption_agent.encryption_algorithm): - raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM) - - cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) - - #decrypt data - decrypted_data = message - decryptor = cipher.decryptor() - decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) - - #unpad data - unpadder = PKCS7(128).unpadder() - decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) - - return decrypted_data \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/queue/_error.py b/azure/multiapi/storage/v2016_05_31/queue/_error.py deleted file mode 100644 index 3b599f9..0000000 --- a/azure/multiapi/storage/v2016_05_31/queue/_error.py +++ /dev/null @@ -1,33 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -import sys -from .._error import ( - _validate_type_bytes, -) - -_ERROR_MESSAGE_SHOULD_BE_UNICODE = 'message should be of type unicode.' -_ERROR_MESSAGE_SHOULD_BE_STR = 'message should be of type str.' -_ERROR_MESSAGE_NOT_BASE64 = 'message is not a valid base64 value.' - -def _validate_message_type_text(param): - if sys.version_info < (3,): - if not isinstance(param, unicode): - raise TypeError(_ERROR_MESSAGE_SHOULD_BE_UNICODE) - else: - if not isinstance(param, str): - raise TypeError(_ERROR_MESSAGE_SHOULD_BE_STR) - -def _validate_message_type_bytes(param): - _validate_type_bytes('message', param) diff --git a/azure/multiapi/storage/v2016_05_31/queue/_serialization.py b/azure/multiapi/storage/v2016_05_31/queue/_serialization.py deleted file mode 100644 index 874501a..0000000 --- a/azure/multiapi/storage/v2016_05_31/queue/_serialization.py +++ /dev/null @@ -1,81 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -import sys -if sys.version_info >= (3,): - from io import BytesIO -else: - try: - from cStringIO import StringIO as BytesIO - except: - from StringIO import StringIO as BytesIO - -try: - from xml.etree import cElementTree as ETree -except ImportError: - from xml.etree import ElementTree as ETree - -from xml.sax.saxutils import escape as xml_escape -from .._common_conversion import ( - _str, -) -from ._encryption import ( - _encrypt_queue_message, -) - -def _get_path(queue_name=None, include_messages=None, message_id=None): - ''' - Creates the path to access a queue resource. - - queue_name: - Name of queue. - include_messages: - Whether or not to include messages. - message_id: - Message id. - ''' - if queue_name and include_messages and message_id: - return '/{0}/messages/{1}'.format(_str(queue_name), message_id) - if queue_name and include_messages: - return '/{0}/messages'.format(_str(queue_name)) - elif queue_name: - return '/{0}'.format(_str(queue_name)) - else: - return '/' - - -def _convert_queue_message_xml(message_text, encode_function, key_encryption_key): - ''' - - - - - ''' - queue_message_element = ETree.Element('QueueMessage'); - - # Enabled - message_text = encode_function(message_text) - if key_encryption_key is not None: - message_text = _encrypt_queue_message(message_text, key_encryption_key) - ETree.SubElement(queue_message_element, 'MessageText').text = message_text - - # Add xml declaration and serialize - try: - stream = BytesIO() - ETree.ElementTree(queue_message_element).write(stream, xml_declaration=True, encoding='utf-8', method='xml') - output = stream.getvalue() - finally: - stream.close() - - return output diff --git a/azure/multiapi/storage/v2016_05_31/queue/models.py b/azure/multiapi/storage/v2016_05_31/queue/models.py deleted file mode 100644 index ff9c250..0000000 --- a/azure/multiapi/storage/v2016_05_31/queue/models.py +++ /dev/null @@ -1,246 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from xml.sax.saxutils import escape as xml_escape -from xml.sax.saxutils import unescape as xml_unescape -from base64 import ( - b64encode, - b64decode, -) -from ._error import ( - _validate_message_type_bytes, - _validate_message_type_text, - _ERROR_MESSAGE_NOT_BASE64, -) - -class Queue(object): - - ''' - Queue class. - - :ivar str name: - The name of the queue. - :ivar metadata: - A dict containing name-value pairs associated with the queue as metadata. - This var is set to None unless the include=metadata param was included - for the list queues operation. If this parameter was specified but the - queue has no metadata, metadata will be set to an empty dictionary. - :vartype metadata: dict mapping str to str - ''' - - def __init__(self): - self.name = None - self.metadata = None - - -class QueueMessage(object): - ''' - Queue message class. - - :ivar str id: - A GUID value assigned to the message by the Queue service that - identifies the message in the queue. This value may be used together - with the value of pop_receipt to delete a message from the queue after - it has been retrieved with the get messages operation. - :ivar date insertion_time: - A UTC date value representing the time the messages was inserted. - :ivar date expiration_time: - A UTC date value representing the time the message expires. - :ivar int dequeue_count: - Begins with a value of 1 the first time the message is dequeued. This - value is incremented each time the message is subsequently dequeued. - :ivar obj content: - The message content. Type is determined by the decode_function set on - the service. Default is str. - :ivar str pop_receipt: - A receipt str which can be used together with the message_id element to - delete a message from the queue after it has been retrieved with the get - messages operation. Only returned by get messages operations. Set to - None for peek messages. - :ivar date time_next_visible: - A UTC date value representing the time the message will next be visible. - Only returned by get messages operations. Set to None for peek messages. - ''' - - def __init__(self): - self.id = None - self.insertion_time = None - self.expiration_time = None - self.dequeue_count = None - self.content = None - self.pop_receipt = None - self.time_next_visible = None - - -class QueueMessageFormat: - ''' - Encoding and decoding methods which can be used to modify how the queue service - encodes and decodes queue messages. Set these to queueservice.encode_function - and queueservice.decode_function to modify the behavior. The defaults are - text_xmlencode and text_xmldecode, respectively. - ''' - - @staticmethod - def text_base64encode(data): - ''' - Base64 encode unicode text. - - :param str data: String to encode. - :return: Base64 encoded string. - :rtype: str - ''' - _validate_message_type_text(data) - return b64encode(data.encode('utf-8')).decode('utf-8') - - @staticmethod - def text_base64decode(data): - ''' - Base64 decode to unicode text. - - :param str data: String data to decode to unicode. - :return: Base64 decoded string. - :rtype: str - ''' - try: - return b64decode(data.encode('utf-8')).decode('utf-8') - except (ValueError, TypeError): - # ValueError for Python 3, TypeError for Python 2 - raise ValueError(_ERROR_MESSAGE_NOT_BASE64) - - @staticmethod - def binary_base64encode(data): - ''' - Base64 encode byte strings. - - :param str data: Binary string to encode. - :return: Base64 encoded data. - :rtype: str - ''' - _validate_message_type_bytes(data) - return b64encode(data).decode('utf-8') - - @staticmethod - def binary_base64decode(data): - ''' - Base64 decode to byte string. - - :param str data: Data to decode to a byte string. - :return: Base64 decoded data. - :rtype: str - ''' - try: - return b64decode(data.encode('utf-8')) - except (ValueError, TypeError): - # ValueError for Python 3, TypeError for Python 2 - raise ValueError(_ERROR_MESSAGE_NOT_BASE64) - - @staticmethod - def text_xmlencode(data): - ''' - XML encode unicode text. - - :param str data: Unicode string to encode - :return: XML encoded data. - :rtype: str - ''' - _validate_message_type_text(data) - return xml_escape(data) - - @staticmethod - def text_xmldecode(data): - ''' - XML decode to unicode text. - - :param str data: Data to decode to unicode. - :return: XML decoded data. - :rtype: str - ''' - return xml_unescape(data) - - @staticmethod - def noencode(data): - ''' - Do no encoding. - - :param str data: Data. - :return: The data passed in is returned unmodified. - :rtype: str - ''' - return data - - @staticmethod - def nodecode(data): - ''' - Do no decoding. - - :param str data: Data. - :return: The data passed in is returned unmodified. - :rtype: str - ''' - return data - - -class QueuePermissions(object): - - ''' - QueuePermissions class to be used with :func:`~azure.storage.queue.queueservice.QueueService.generate_queue_shared_access_signature` - method and for the AccessPolicies used with :func:`~azure.storage.queue.queueservice.QueueService.set_queue_acl`. - - :ivar QueuePermissions QueuePermissions.READ: - Read metadata and properties, including message count. Peek at messages. - :ivar QueuePermissions QueuePermissions.ADD: - Add messages to the queue. - :ivar QueuePermissions QueuePermissions.UPDATE: - Update messages in the queue. Note: Use the Process permission with - Update so you can first get the message you want to update. - :ivar QueuePermissions QueuePermissions.PROCESS: Delete entities. - Get and delete messages from the queue. - ''' - def __init__(self, read=False, add=False, update=False, process=False, _str=None): - ''' - :param bool read: - Read metadata and properties, including message count. Peek at messages. - :param bool add: - Add messages to the queue. - :param bool update: - Update messages in the queue. Note: Use the Process permission with - Update so you can first get the message you want to update. - :param bool process: - Get and delete messages from the queue. - :param str _str: - A string representing the permissions. - ''' - if not _str: - _str = '' - self.read = read or ('r' in _str) - self.add = add or ('a' in _str) - self.update = update or ('u' in _str) - self.process = process or ('p' in _str) - - def __or__(self, other): - return QueuePermissions(_str=str(self) + str(other)) - - def __add__(self, other): - return QueuePermissions(_str=str(self) + str(other)) - - def __str__(self): - return (('r' if self.read else '') + - ('a' if self.add else '') + - ('u' if self.update else '') + - ('p' if self.process else '')) - -QueuePermissions.READ = QueuePermissions(read=True) -QueuePermissions.ADD = QueuePermissions(add=True) -QueuePermissions.UPDATE = QueuePermissions(update=True) -QueuePermissions.PROCESS = QueuePermissions(process=True) diff --git a/azure/multiapi/storage/v2016_05_31/queue/queueservice.py b/azure/multiapi/storage/v2016_05_31/queue/queueservice.py deleted file mode 100644 index c33d1fe..0000000 --- a/azure/multiapi/storage/v2016_05_31/queue/queueservice.py +++ /dev/null @@ -1,990 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from azure.common import ( - AzureConflictHttpError, - AzureHttpError, -) -from .._constants import ( - SERVICE_HOST_BASE, - DEFAULT_PROTOCOL, -) -from .._error import ( - _dont_fail_not_exist, - _dont_fail_on_exist, - _validate_not_none, - _ERROR_CONFLICT, - _ERROR_STORAGE_MISSING_INFO, - _validate_access_policies, - _validate_encryption_required, - _validate_decryption_required, -) -from .._serialization import ( - _get_request_body, - _add_metadata_headers, -) -from .._common_conversion import ( - _int_to_str, - _to_str, -) -from .._http import ( - HTTPRequest, -) -from ..models import ( - Services, - ListGenerator, - _OperationContext, -) -from .models import ( - QueueMessageFormat, -) -from .._auth import ( - _StorageSASAuthentication, - _StorageSharedKeyAuthentication, -) -from .._connection import _ServiceParameters -from .._serialization import ( - _convert_signed_identifiers_to_xml, - _convert_service_properties_to_xml, -) -from .._deserialization import ( - _convert_xml_to_service_properties, - _convert_xml_to_signed_identifiers, - _convert_xml_to_service_stats, -) -from ._serialization import ( - _convert_queue_message_xml, - _get_path, -) -from ._deserialization import ( - _convert_xml_to_queues, - _convert_xml_to_queue_messages, - _parse_queue_message_from_headers, - _parse_metadata_and_message_count, -) -from ..sharedaccesssignature import ( - SharedAccessSignature, -) -from ..storageclient import StorageClient - - -_HTTP_RESPONSE_NO_CONTENT = 204 - -class QueueService(StorageClient): - - ''' - This is the main class managing queue resources. - - The Queue service stores messages. A queue can contain an unlimited number of - messages, each of which can be up to 64KB in size. Messages are generally added - to the end of the queue and retrieved from the front of the queue, although - first in, first out (FIFO) behavior is not guaranteed. - - :ivar function(data) encode_function: - A function used to encode queue messages. Takes as - a parameter the data passed to the put_message API and returns the encoded - message. Defaults to take text and xml encode, but bytes and other - encodings can be used. For example, base64 may be preferable for developing - across multiple Azure Storage libraries in different languages. See the - :class:`~azure.storage.queue.models.QueueMessageFormat` for xml, base64 and - no encoding methods as well as binary equivalents. - :ivar function(data) decode_function: - A function used to encode decode messages. Takes as - a parameter the data returned by the get_messages and peek_messages APIs and - returns the decoded message. Defaults to return text and xml decode, but - bytes and other decodings can be used. For example, base64 may be preferable - for developing across multiple Azure Storage libraries in different languages. - See the :class:`~azure.storage.queue.models.QueueMessageFormat` for xml, base64 - and no decoding methods as well as binary equivalents. - :ivar object key_encryption_key: - The key-encryption-key optionally provided by the user. If provided, will be used to - encrypt/decrypt in supported methods. - For methods requiring decryption, either the key_encryption_key OR the resolver must be provided. - If both are provided, the resolver will take precedence. - Must implement the following methods for APIs requiring encryption: - wrap_key(key)--wraps the specified key (bytes) using an algorithm of the user's choice. Returns the encrypted key as bytes. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - Must implement the following methods for APIs requiring decryption: - unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid()--returns a string key id for this key-encryption-key. - :ivar function key_resolver_function(kid): - A function to resolve keys optionally provided by the user. If provided, will be used to decrypt in supported methods. - For methods requiring decryption, either the key_encryption_key OR - the resolver must be provided. If both are provided, the resolver will take precedence. - It uses the kid string to return a key-encryption-key implementing the interface defined above. - :ivar bool require_encryption: - A flag that may be set to ensure that all messages successfully uploaded to the queue and all those downloaded and - successfully read from the queue are/were encrypted while on the server. If this flag is set, all required - parameters for encryption/decryption must be provided. See the above comments on the key_encryption_key and resolver. - ''' - - def __init__(self, account_name=None, account_key=None, sas_token=None, - is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, - request_session=None, connection_string=None, socket_timeout=None): - ''' - :param str account_name: - The storage account name. This is used to authenticate requests - signed with an account key and to construct the storage endpoint. It - is required unless a connection string is given. - :param str account_key: - The storage account key. This is used for shared key authentication. - :param str sas_token: - A shared access signature token to use to authenticate requests - instead of the account key. If account key and sas token are both - specified, account key will be used to sign. - :param bool is_emulated: - Whether to use the emulator. Defaults to False. If specified, will - override all other parameters besides connection string and request - session. - :param str protocol: - The protocol to use for requests. Defaults to https. - :param str endpoint_suffix: - The host base component of the url, minus the account name. Defaults - to Azure (core.windows.net). Override this to use the China cloud - (core.chinacloudapi.cn). - :param requests.Session request_session: - The session object to use for http requests. - :param str connection_string: - If specified, this will override all other parameters besides - request session. See - http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ - for the connection string format. - :param int socket_timeout: - If specified, this will override the default socket timeout. The timeout specified is in seconds. - See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value. - ''' - service_params = _ServiceParameters.get_service_parameters( - 'queue', - account_name=account_name, - account_key=account_key, - sas_token=sas_token, - is_emulated=is_emulated, - protocol=protocol, - endpoint_suffix=endpoint_suffix, - request_session=request_session, - connection_string=connection_string, - socket_timeout=socket_timeout) - - super(QueueService, self).__init__(service_params) - - if self.account_key: - self.authentication = _StorageSharedKeyAuthentication( - self.account_name, - self.account_key, - ) - elif self.sas_token: - self.authentication = _StorageSASAuthentication(self.sas_token) - else: - raise ValueError(_ERROR_STORAGE_MISSING_INFO) - - self.encode_function = QueueMessageFormat.text_xmlencode - self.decode_function = QueueMessageFormat.text_xmldecode - self.key_encryption_key = None - self.key_resolver_function = None - self.require_encryption = False - - def generate_account_shared_access_signature(self, resource_types, permission, - expiry, start=None, ip=None, protocol=None): - ''' - Generates a shared access signature for the queue service. - Use the returned signature with the sas_token parameter of QueueService. - - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account SAS. - :param AccountPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: date or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: date or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.models.Protocol` for possible values. - :return: A Shared Access Signature (sas) token. - :rtype: str - ''' - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = SharedAccessSignature(self.account_name, self.account_key) - return sas.generate_account(Services.QUEUE, resource_types, permission, - expiry, start=start, ip=ip, protocol=protocol) - - def generate_queue_shared_access_signature(self, queue_name, - permission=None, - expiry=None, - start=None, - id=None, - ip=None, protocol=None,): - ''' - Generates a shared access signature for the queue. - Use the returned signature with the sas_token parameter of QueueService. - - :param str queue_name: - The name of the queue to create a SAS token for. - :param QueuePermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: date or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: date or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use :func:`~set_queue_acl`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip='168.1.5.65' or sip='168.1.5.60-168.1.5.70' on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.models.Protocol` for possible values. - :return: A Shared Access Signature (sas) token. - :rtype: str - ''' - _validate_not_none('queue_name', queue_name) - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = SharedAccessSignature(self.account_name, self.account_key) - return sas.generate_queue( - queue_name, - permission=permission, - expiry=expiry, - start=start, - id=id, - ip=ip, - protocol=protocol, - ) - - def get_queue_service_stats(self, timeout=None): - ''' - Retrieves statistics related to replication for the Queue service. It is - only available when read-access geo-redundant replication is enabled for - the storage account. - - With geo-redundant replication, Azure Storage maintains your data durable - in two locations. In both locations, Azure Storage constantly maintains - multiple healthy replicas of your data. The location where you read, - create, update, or delete data is the primary storage account location. - The primary location exists in the region you choose at the time you - create an account via the Azure Management Azure classic portal, for - example, North Central US. The location to which your data is replicated - is the secondary location. The secondary location is automatically - determined based on the location of the primary; it is in a second data - center that resides in the same region as the primary location. Read-only - access is available from the secondary location, if read-access geo-redundant - replication is enabled for your storage account. - - :param int timeout: - The timeout parameter is expressed in seconds. - :return: The queue service stats. - :rtype: :class:`~azure.storage.models.ServiceStats` - ''' - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(primary=False, secondary=True) - request.path = _get_path() - request.query = { - 'restype': 'service', - 'comp': 'stats', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_service_stats) - - def get_queue_service_properties(self, timeout=None): - ''' - Gets the properties of a storage account's Queue service, including - logging, analytics and CORS rules. - - :param int timeout: - The server timeout, expressed in seconds. - :return: The queue service properties. - :rtype: :class:`~azure.storage.models.ServiceProperties` - ''' - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path() - request.query = { - 'restype': 'service', - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_service_properties) - - def set_queue_service_properties(self, logging=None, hour_metrics=None, - minute_metrics=None, cors=None, timeout=None): - ''' - Sets the properties of a storage account's Queue service, including - Azure Storage Analytics. If an element (ex Logging) is left as None, the - existing settings on the service for that functionality are preserved. - For more information on Azure Storage Analytics, see - https://msdn.microsoft.com/en-us/library/azure/hh343270.aspx. - - :param Logging logging: - The logging settings provide request logs. - :param Metrics hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for queuess. - :param Metrics minute_metrics: - The minute metrics settings provide request statistics - for each minute for queues. - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. For detailed information - about CORS rules and evaluation logic, see - https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx. - :type cors: list of :class:`~azure.storage.models.CorsRule` - :param int timeout: - The server timeout, expressed in seconds. - ''' - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path() - request.query = { - 'restype': 'service', - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - request.body = _get_request_body( - _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics, cors)) - self._perform_request(request) - - def list_queues(self, prefix=None, num_results=None, include_metadata=False, - marker=None, timeout=None): - ''' - Returns a generator to list the queues. The generator will lazily follow - the continuation tokens returned by the service and stop when all queues - have been returned or num_results is reached. - - If num_results is specified and the account has more than that number of - queues, the generator will have a populated next_marker field once it - finishes. This marker can be used to create a new generator if more - results are desired. - - :param str prefix: - Filters the results to return only queues with names that begin - with the specified prefix. - :param int num_results: - The maximum number of queues to return. - :param bool include_metadata: - Specifies that container metadata be returned in the response. - :param str marker: - An opaque continuation token. This value can be retrieved from the - next_marker field of a previous generator object if num_results was - specified and that generator has finished enumerating results. If - specified, this generator will begin returning results from the point - where the previous generator stopped. - :param int timeout: - The server timeout, expressed in seconds. This function may make multiple - calls to the service in which case the timeout value specified will be - applied to each individual call. - ''' - include = 'metadata' if include_metadata else None - operation_context = _OperationContext(location_lock=True) - kwargs = {'prefix': prefix, 'max_results': num_results, 'include': include, - 'marker': marker, 'timeout': timeout, '_context': operation_context} - resp = self._list_queues(**kwargs) - - return ListGenerator(resp, self._list_queues, (), kwargs) - - def _list_queues(self, prefix=None, marker=None, max_results=None, - include=None, timeout=None, _context=None): - ''' - Returns a list of queues under the specified account. Makes a single list - request to the service. Used internally by the list_queues method. - - :param str prefix: - Filters the results to return only queues with names that begin - with the specified prefix. - :param str marker: - A token which identifies the portion of the query to be - returned with the next query operation. The operation returns a - next_marker element within the response body if the list returned - was not complete. This value may then be used as a query parameter - in a subsequent call to request the next portion of the list of - queues. The marker value is opaque to the client. - :param int max_results: - The maximum number of queues to return. A single list request may - return up to 1000 queues and potentially a continuation token which - should be followed to get additional resutls. - :param str include: - Include this parameter to specify that the container's - metadata be returned as part of the response body. - :param int timeout: - The server timeout, expressed in seconds. - ''' - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path() - request.query = { - 'comp': 'list', - 'prefix': _to_str(prefix), - 'marker': _to_str(marker), - 'maxresults': _int_to_str(max_results), - 'include': _to_str(include), - 'timeout': _int_to_str(timeout) - } - - return self._perform_request(request, _convert_xml_to_queues, operation_context=_context) - - def create_queue(self, queue_name, metadata=None, fail_on_exist=False, timeout=None): - ''' - Creates a queue under the given account. - - :param str queue_name: - The name of the queue to create. A queue name must be from 3 through - 63 characters long and may only contain lowercase letters, numbers, - and the dash (-) character. The first and last letters in the queue - must be alphanumeric. The dash (-) character cannot be the first or - last character. Consecutive dash characters are not permitted in the - queue name. - :param metadata: - A dict containing name-value pairs to associate with the queue as - metadata. Note that metadata names preserve the case with which they - were created, but are case-insensitive when set or read. - :type metadata: a dict mapping str to str - :param bool fail_on_exist: - Specifies whether to throw an exception if the queue already exists. - :param int timeout: - The server timeout, expressed in seconds. - :return: - A boolean indicating whether the queue was created. If fail_on_exist - was set to True, this will throw instead of returning false. - :rtype: bool - ''' - _validate_not_none('queue_name', queue_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(queue_name) - request.query = {'timeout': _int_to_str(timeout)} - _add_metadata_headers(metadata, request) - - def _return_request(request): - return request - - if not fail_on_exist: - try: - response = self._perform_request(request, parser=_return_request) - if response.status == _HTTP_RESPONSE_NO_CONTENT: - return False - return True - except AzureHttpError as ex: - _dont_fail_on_exist(ex) - return False - else: - response = self._perform_request(request, parser=_return_request) - if response.status == _HTTP_RESPONSE_NO_CONTENT: - raise AzureConflictHttpError( - _ERROR_CONFLICT.format(response.message), response.status) - return True - - def delete_queue(self, queue_name, fail_not_exist=False, timeout=None): - ''' - Deletes the specified queue and any messages it contains. - - When a queue is successfully deleted, it is immediately marked for deletion - and is no longer accessible to clients. The queue is later removed from - the Queue service during garbage collection. - - Note that deleting a queue is likely to take at least 40 seconds to complete. - If an operation is attempted against the queue while it was being deleted, - an :class:`AzureConflictHttpError` will be thrown. - - :param str queue_name: - The name of the queue to delete. - :param bool fail_not_exist: - Specifies whether to throw an exception if the queue doesn't exist. - :param int timeout: - The server timeout, expressed in seconds. - :return: - A boolean indicating whether the queue was deleted. If fail_not_exist - was set to True, this will throw instead of returning false. - :rtype: bool - ''' - _validate_not_none('queue_name', queue_name) - request = HTTPRequest() - request.method = 'DELETE' - request.host_locations = self._get_host_locations() - request.path = _get_path(queue_name) - request.query = {'timeout': _int_to_str(timeout)} - if not fail_not_exist: - try: - self._perform_request(request) - return True - except AzureHttpError as ex: - _dont_fail_not_exist(ex) - return False - else: - self._perform_request(request) - return True - - def get_queue_metadata(self, queue_name, timeout=None): - ''' - Retrieves user-defined metadata and queue properties on the specified - queue. Metadata is associated with the queue as name-value pairs. - - :param str queue_name: - The name of an existing queue. - :param int timeout: - The server timeout, expressed in seconds. - :return: - A dictionary representing the queue metadata with an - approximate_message_count int property on the dict estimating the - number of messages in the queue. - :rtype: a dict mapping str to str - ''' - _validate_not_none('queue_name', queue_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(queue_name) - request.query = { - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _parse_metadata_and_message_count) - - def set_queue_metadata(self, queue_name, metadata=None, timeout=None): - ''' - Sets user-defined metadata on the specified queue. Metadata is - associated with the queue as name-value pairs. - - :param str queue_name: - The name of an existing queue. - :param dict metadata: - A dict containing name-value pairs to associate with the - queue as metadata. - :param int timeout: - The server timeout, expressed in seconds. - ''' - _validate_not_none('queue_name', queue_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(queue_name) - request.query = { - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - _add_metadata_headers(metadata, request) - - self._perform_request(request) - - def exists(self, queue_name, timeout=None): - ''' - Returns a boolean indicating whether the queue exists. - - :param str queue_name: - The name of queue to check for existence. - :param int timeout: - The server timeout, expressed in seconds. - :return: A boolean indicating whether the queue exists. - :rtype: bool - ''' - try: - self.get_queue_metadata(queue_name, timeout=timeout) - return True - except AzureHttpError as ex: - _dont_fail_not_exist(ex) - return False - - def get_queue_acl(self, queue_name, timeout=None): - ''' - Returns details about any stored access policies specified on the - queue that may be used with Shared Access Signatures. - - :param str queue_name: - The name of an existing queue. - :param int timeout: - The server timeout, expressed in seconds. - :return: A dictionary of access policies associated with the queue. - :rtype: dict of str to :class:`~azure.storage.models.AccessPolicy` - ''' - _validate_not_none('queue_name', queue_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(queue_name) - request.query = { - 'comp': 'acl', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_signed_identifiers) - - def set_queue_acl(self, queue_name, signed_identifiers=None, timeout=None): - ''' - Sets stored access policies for the queue that may be used with Shared - Access Signatures. - - When you set permissions for a queue, the existing permissions are replaced. - To update the queue's permissions, call :func:`~get_queue_acl` to fetch - all access policies associated with the queue, modify the access policy - that you wish to change, and then call this function with the complete - set of data to perform the update. - - When you establish a stored access policy on a queue, it may take up to - 30 seconds to take effect. During this interval, a shared access signature - that is associated with the stored access policy will throw an - :class:`AzureHttpError` until the access policy becomes active. - - :param str queue_name: - The name of an existing queue. - :param signed_identifiers: - A dictionary of access policies to associate with the queue. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict of str to :class:`~azure.storage.models.AccessPolicy` - :param int timeout: - The server timeout, expressed in seconds. - ''' - _validate_not_none('queue_name', queue_name) - _validate_access_policies(signed_identifiers) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(queue_name) - request.query = { - 'comp': 'acl', - 'timeout': _int_to_str(timeout), - } - request.body = _get_request_body( - _convert_signed_identifiers_to_xml(signed_identifiers)) - self._perform_request(request) - - def put_message(self, queue_name, content, visibility_timeout=None, - time_to_live=None, timeout=None): - ''' - Adds a new message to the back of the message queue. - - The visibility timeout specifies the time that the message will be - invisible. After the timeout expires, the message will become visible. - If a visibility timeout is not specified, the default value of 0 is used. - - The message time-to-live specifies how long a message will remain in the - queue. The message will be deleted from the queue when the time-to-live - period expires. - - If the key-encryption-key field is set on the local service object, this method will - encrypt the content before uploading. - - :param str queue_name: - The name of the queue to put the message into. - :param obj content: - Message content. Allowed type is determined by the encode_function - set on the service. Default is str. The encoded message can be up to - 64KB in size. - :param int visibility_timeout: - If not specified, the default value is 0. Specifies the - new visibility timeout value, in seconds, relative to server time. - The value must be larger than or equal to 0, and cannot be - larger than 7 days. The visibility timeout of a message cannot be - set to a value later than the expiry time. visibility_timeout - should be set to a value smaller than the time-to-live value. - :param int time_to_live: - Specifies the time-to-live interval for the message, in - seconds. The maximum time-to-live allowed is 7 days. If this - parameter is omitted, the default time-to-live is 7 days. - :param int timeout: - The server timeout, expressed in seconds. - :return: - A :class:`~azure.storage.queue.models.QueueMessage` object. - This object is also populated with the content although it is not - returned from the service. - :rtype: :class:`~azure.storage.queue.models.QueueMessage` - ''' - - _validate_encryption_required(self.require_encryption, self.key_encryption_key) - - _validate_not_none('queue_name', queue_name) - _validate_not_none('content', content) - request = HTTPRequest() - request.method = 'POST' - request.host_locations = self._get_host_locations() - request.path = _get_path(queue_name, True) - request.query = { - 'visibilitytimeout': _to_str(visibility_timeout), - 'messagettl': _to_str(time_to_live), - 'timeout': _int_to_str(timeout) - } - - request.body = _get_request_body(_convert_queue_message_xml(content, self.encode_function, - self.key_encryption_key)) - - message_list = self._perform_request(request, _convert_xml_to_queue_messages, - [self.decode_function, False, - None, None, content]) - return message_list[0] - - def get_messages(self, queue_name, num_messages=None, - visibility_timeout=None, timeout=None): - ''' - Retrieves one or more messages from the front of the queue. - - When a message is retrieved from the queue, the response includes the message - content and a pop_receipt value, which is required to delete the message. - The message is not automatically deleted from the queue, but after it has - been retrieved, it is not visible to other clients for the time interval - specified by the visibility_timeout parameter. - - If the key-encryption-key or resolver field is set on the local service object, the messages will be - decrypted before being returned. - - :param str queue_name: - The name of the queue to get messages from. - :param int num_messages: - A nonzero integer value that specifies the number of - messages to retrieve from the queue, up to a maximum of 32. If - fewer are visible, the visible messages are returned. By default, - a single message is retrieved from the queue with this operation. - :param int visibility_timeout: - Specifies the new visibility timeout value, in seconds, relative - to server time. The new value must be larger than or equal to 1 - second, and cannot be larger than 7 days. The visibility timeout of - a message can be set to a value later than the expiry time. - :param int timeout: - The server timeout, expressed in seconds. - :return: A :class:`~azure.storage.queue.models.QueueMessage` object representing the information passed. - :rtype: list of :class:`~azure.storage.queue.models.QueueMessage` - ''' - _validate_decryption_required(self.require_encryption, self.key_encryption_key, - self.key_resolver_function) - - _validate_not_none('queue_name', queue_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(queue_name, True) - request.query = { - 'numofmessages': _to_str(num_messages), - 'visibilitytimeout': _to_str(visibility_timeout), - 'timeout': _int_to_str(timeout) - } - - return self._perform_request(request, _convert_xml_to_queue_messages, - [self.decode_function, self.require_encryption, - self.key_encryption_key, self.key_resolver_function]) - - def peek_messages(self, queue_name, num_messages=None, timeout=None): - ''' - Retrieves one or more messages from the front of the queue, but does - not alter the visibility of the message. - - Only messages that are visible may be retrieved. When a message is retrieved - for the first time with a call to get_messages, its dequeue_count property - is set to 1. If it is not deleted and is subsequently retrieved again, the - dequeue_count property is incremented. The client may use this value to - determine how many times a message has been retrieved. Note that a call - to peek_messages does not increment the value of DequeueCount, but returns - this value for the client to read. - - If the key-encryption-key or resolver field is set on the local service object, the messages will be - decrypted before being returned. - - :param str queue_name: - The name of the queue to peek messages from. - :param int num_messages: - A nonzero integer value that specifies the number of - messages to peek from the queue, up to a maximum of 32. By default, - a single message is peeked from the queue with this operation. - :param int timeout: - The server timeout, expressed in seconds. - :return: - A list of :class:`~azure.storage.queue.models.QueueMessage` objects. Note that - time_next_visible and pop_receipt will not be populated as peek does - not pop the message and can only retrieve already visible messages. - :rtype: list of :class:`~azure.storage.queue.models.QueueMessage` - ''' - - _validate_decryption_required(self.require_encryption, self.key_encryption_key, - self.key_resolver_function) - - _validate_not_none('queue_name', queue_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(queue_name, True) - request.query = { - 'peekonly': 'true', - 'numofmessages': _to_str(num_messages), - 'timeout': _int_to_str(timeout) - } - - return self._perform_request(request, _convert_xml_to_queue_messages, - [self.decode_function, self.require_encryption, - self.key_encryption_key, self.key_resolver_function]) - - def delete_message(self, queue_name, message_id, pop_receipt, timeout=None): - ''' - Deletes the specified message. - - Normally after a client retrieves a message with the get_messages operation, - the client is expected to process and delete the message. To delete the - message, you must have two items of data: id and pop_receipt. The - id is returned from the previous get_messages operation. The - pop_receipt is returned from the most recent :func:`~get_messages` or - :func:`~update_message` operation. In order for the delete_message operation - to succeed, the pop_receipt specified on the request must match the - pop_receipt returned from the :func:`~get_messages` or :func:`~update_message` - operation. - - :param str queue_name: - The name of the queue from which to delete the message. - :param str message_id: - The message id identifying the message to delete. - :param str pop_receipt: - A valid pop receipt value returned from an earlier call - to the :func:`~get_messages` or :func:`~update_message`. - :param int timeout: - The server timeout, expressed in seconds. - ''' - _validate_not_none('queue_name', queue_name) - _validate_not_none('message_id', message_id) - _validate_not_none('pop_receipt', pop_receipt) - request = HTTPRequest() - request.method = 'DELETE' - request.host_locations = self._get_host_locations() - request.path = _get_path(queue_name, True, message_id) - request.query = { - 'popreceipt': _to_str(pop_receipt), - 'timeout': _int_to_str(timeout) - } - self._perform_request(request) - - def clear_messages(self, queue_name, timeout=None): - ''' - Deletes all messages from the specified queue. - - :param str queue_name: - The name of the queue whose messages to clear. - :param int timeout: - The server timeout, expressed in seconds. - ''' - _validate_not_none('queue_name', queue_name) - request = HTTPRequest() - request.method = 'DELETE' - request.host_locations = self._get_host_locations() - request.path = _get_path(queue_name, True) - request.query = {'timeout': _int_to_str(timeout)} - self._perform_request(request) - - def update_message(self, queue_name, message_id, pop_receipt, visibility_timeout, - content=None, timeout=None): - ''' - Updates the visibility timeout of a message. You can also use this - operation to update the contents of a message. - - This operation can be used to continually extend the invisibility of a - queue message. This functionality can be useful if you want a worker role - to "lease" a queue message. For example, if a worker role calls get_messages - and recognizes that it needs more time to process a message, it can - continually extend the message's invisibility until it is processed. If - the worker role were to fail during processing, eventually the message - would become visible again and another worker role could process it. - - If the key-encryption-key field is set on the local service object, this method will - encrypt the content before uploading. - - :param str queue_name: - The name of the queue containing the message to update. - :param str message_id: - The message id identifying the message to update. - :param str pop_receipt: - A valid pop receipt value returned from an earlier call - to the :func:`~get_messages` or :func:`~update_message` operation. - :param int visibility_timeout: - Specifies the new visibility timeout value, in seconds, - relative to server time. The new value must be larger than or equal - to 0, and cannot be larger than 7 days. The visibility timeout of a - message cannot be set to a value later than the expiry time. A - message can be updated until it has been deleted or has expired. - :param obj content: - Message content. Allowed type is determined by the encode_function - set on the service. Default is str. - :param int timeout: - The server timeout, expressed in seconds. - :return: - A list of :class:`~azure.storage.queue.models.QueueMessage` objects. For convenience, - this object is also populated with the content, although it is not returned by the service. - :rtype: list of :class:`~azure.storage.queue.models.QueueMessage` - ''' - - _validate_encryption_required(self.require_encryption, self.key_encryption_key) - - _validate_not_none('queue_name', queue_name) - _validate_not_none('message_id', message_id) - _validate_not_none('pop_receipt', pop_receipt) - _validate_not_none('visibility_timeout', visibility_timeout) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(queue_name, True, message_id) - request.query = { - 'popreceipt': _to_str(pop_receipt), - 'visibilitytimeout': _int_to_str(visibility_timeout), - 'timeout': _int_to_str(timeout) - } - - if content is not None: - request.body = _get_request_body(_convert_queue_message_xml(content, self.encode_function, - self.key_encryption_key)) - - return self._perform_request(request, _parse_queue_message_from_headers) diff --git a/azure/multiapi/storage/v2016_05_31/retry.py b/azure/multiapi/storage/v2016_05_31/retry.py deleted file mode 100644 index 7c334f2..0000000 --- a/azure/multiapi/storage/v2016_05_31/retry.py +++ /dev/null @@ -1,259 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from math import pow -from abc import ABCMeta - -from .models import LocationMode - -class _Retry(object): - ''' - The base class for Exponential and Linear retries containing shared code. - ''' - __metaclass__ = ABCMeta - - def __init__(self, max_attempts, retry_to_secondary): - ''' - Constructs a base retry object. - - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - ''' - self.max_attempts = max_attempts - self.retry_to_secondary = retry_to_secondary - - def _should_retry(self, context): - ''' - A function which determines whether or not to retry. - - :param ~azure.storage.models.RetryContext context: - The retry context. This contains the request, response, and other data - which can be used to determine whether or not to retry. - :return: - A boolean indicating whether or not to retry the request. - :rtype: bool - ''' - # If max attempts are reached, do not retry. - if (context.count >= self.max_attempts): - return False - - status = None - if context.response and context.response.status: - status = context.response.status - - if status == None: - ''' - If status is None, retry as this request triggered an exception. For - example, network issues would trigger this. - ''' - return True - elif status >= 200 and status < 300: - ''' - This method is called after a successful response, meaning we failed - during the response body download or parsing. So, success codes should - be retried. - ''' - return True - elif status >= 300 and status < 500: - ''' - An exception occured, but in most cases it was expected. Examples could - include a 309 Conflict or 412 Precondition Failed. - ''' - if status == 404 and context.location_mode == LocationMode.SECONDARY: - # Response code 404 should be retried if secondary was used. - return True - if status == 408: - # Response code 408 is a timeout and should be retried. - return True - return False - elif status >= 500: - ''' - Response codes above 500 with the exception of 501 Not Implemented and - 505 Version Not Supported indicate a server issue and should be retried. - ''' - if status == 501 or status == 505: - return False - return True - else: - # If something else happened, it's unexpected. Retry. - return True - - def _set_next_host_location(self, context): - ''' - A function which sets the next host location on the request, if applicable. - - :param ~azure.storage.models.RetryContext context: - The retry context containing the previous host location and the request - to evaluate and possibly modify. - ''' - if len(context.request.host_locations) > 1: - # If there's more than one possible location, retry to the alternative - if context.location_mode == LocationMode.PRIMARY: - context.location_mode = LocationMode.SECONDARY - else: - context.location_mode = LocationMode.PRIMARY - - context.request.host = context.request.host_locations.get(context.location_mode) - - def _retry(self, context, backoff): - ''' - A function which determines whether and how to retry. - - :param ~azure.storage.models.RetryContext context: - The retry context. This contains the request, response, and other data - which can be used to determine whether or not to retry. - :param function() backoff: - A function which returns the backoff time if a retry is to be performed. - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - ''' - # If the context does not contain a count parameter, this request has not - # been retried yet. Add the count parameter to track the number of retries. - if not hasattr(context, 'count'): - context.count = 0 - - # Determine whether to retry, and if so increment the count, modify the - # request as desired, and return the backoff. - if self._should_retry(context): - context.count += 1 - - # If retry to secondary is enabled, attempt to change the host if the - # request allows it - if self.retry_to_secondary: - self._set_next_host_location(context) - - return backoff(context) - - return None - - -class ExponentialRetry(_Retry): - ''' - Exponential retry. - ''' - - def __init__(self, initial_backoff=15, increment_power=3, max_attempts=3, - retry_to_secondary=False): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_power: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - ''' - self.initial_backoff = initial_backoff - self.increment_power = increment_power - super(ExponentialRetry, self).__init__(max_attempts, retry_to_secondary) - - ''' - A function which determines whether and how to retry. - - :param ~azure.storage.models.RetryContext context: - The retry context. This contains the request, response, and other data - which can be used to determine whether or not to retry. - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - ''' - def retry(self, context): - return self._retry(context, self._backoff) - - ''' - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - ''' - def _backoff(self, context): - return self.initial_backoff + pow(self.increment_power, context.count) - -class LinearRetry(_Retry): - ''' - Linear retry. - ''' - - def __init__(self, backoff=15, max_attempts=3, retry_to_secondary=False): - ''' - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - ''' - self.backoff = backoff - self.max_attempts = max_attempts - super(LinearRetry, self).__init__(max_attempts, retry_to_secondary) - - ''' - A function which determines whether and how to retry. - - :param ~azure.storage.models.RetryContext context: - The retry context. This contains the request, response, and other data - which can be used to determine whether or not to retry. - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - ''' - def retry(self, context): - return self._retry(context, self._backoff) - - ''' - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - ''' - def _backoff(self, context): - return self.backoff - -def no_retry(context): - ''' - Specifies never to retry. - - :param ~azure.storage.models.RetryContext context: - The retry context. - :return: - Always returns None to indicate never to retry. - :rtype: None - ''' - return None \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/sharedaccesssignature.py b/azure/multiapi/storage/v2016_05_31/sharedaccesssignature.py deleted file mode 100644 index b3f3356..0000000 --- a/azure/multiapi/storage/v2016_05_31/sharedaccesssignature.py +++ /dev/null @@ -1,671 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from datetime import date - -from ._common_conversion import ( - _sign_string, - _to_str, -) -from ._serialization import ( - url_quote, - _to_utc_datetime, -) -from ._constants import X_MS_VERSION - -class SharedAccessSignature(object): - ''' - Provides a factory for creating blob, queue, table, and file shares access - signature tokens with a common account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to genenerate the shares access signatures. - ''' - self.account_name = account_name - self.account_key = account_key - - def generate_table(self, table_name, permission=None, - expiry=None, start=None, id=None, - ip=None, protocol=None, - start_pk=None, start_rk=None, - end_pk=None, end_rk=None): - ''' - Generates a shared access signature for the table. - Use the returned signature with the sas_token parameter of TableService. - - :param str table_name: - Name of table. - :param TablePermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: date or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: date or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_blob_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.models.Protocol` for possible values. - :param str start_pk: - The minimum partition key accessible with this shared access - signature. startpk must accompany startrk. Key values are inclusive. - If omitted, there is no lower bound on the table entities that can - be accessed. - :param str start_rk: - The minimum row key accessible with this shared access signature. - startpk must accompany startrk. Key values are inclusive. If - omitted, there is no lower bound on the table entities that can be - accessed. - :param str end_pk: - The maximum partition key accessible with this shared access - signature. endpk must accompany endrk. Key values are inclusive. If - omitted, there is no upper bound on the table entities that can be - accessed. - :param str end_rk: - The maximum row key accessible with this shared access signature. - endpk must accompany endrk. Key values are inclusive. If omitted, - there is no upper bound on the table entities that can be accessed. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol) - sas.add_id(id) - sas.add_table_access_ranges(table_name, start_pk, start_rk, end_pk, end_rk) - - # Table names must be signed lower case - resource_path = table_name.lower() - sas.add_resource_signature(self.account_name, self.account_key, 'table', resource_path) - - return sas.get_token() - - def generate_queue(self, queue_name, permission=None, - expiry=None, start=None, id=None, - ip=None, protocol=None): - ''' - Generates a shared access signature for the queue. - Use the returned signature with the sas_token parameter of QueueService. - - :param str queue_name: - Name of queue. - :param QueuePermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, add, update, process. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: date or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: date or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_blob_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.models.Protocol` for possible values. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol) - sas.add_id(id) - sas.add_resource_signature(self.account_name, self.account_key, 'queue', queue_name) - - return sas.get_token() - - def generate_blob(self, container_name, blob_name, permission=None, - expiry=None, start=None, id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None): - ''' - Generates a shared access signature for the blob. - Use the returned signature with the sas_token parameter of any BlobService. - - :param str container_name: - Name of container. - :param str blob_name: - Name of blob. - :param BlobPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: date or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: date or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_blob_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - resource_path = container_name + '/' + blob_name - - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol) - sas.add_id(id) - sas.add_resource('b') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_resource_signature(self.account_name, self.account_key, 'blob', resource_path) - - return sas.get_token() - - def generate_container(self, container_name, permission=None, expiry=None, - start=None, id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None): - ''' - Generates a shared access signature for the container. - Use the returned signature with the sas_token parameter of any BlobService. - - :param str container_name: - Name of container. - :param ContainerPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: date or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: date or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_blob_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol) - sas.add_id(id) - sas.add_resource('c') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_resource_signature(self.account_name, self.account_key, 'blob', container_name) - - return sas.get_token() - - def generate_file(self, share_name, directory_name=None, file_name=None, - permission=None, expiry=None, start=None, id=None, - ip=None, protocol=None, cache_control=None, - content_disposition=None, content_encoding=None, - content_language=None, content_type=None): - ''' - Generates a shared access signature for the file. - Use the returned signature with the sas_token parameter of FileService. - - :param str share_name: - Name of share. - :param str directory_name: - Name of directory. SAS tokens cannot be created for directories, so - this parameter should only be present if file_name is provided. - :param str file_name: - Name of file. - :param FilePermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, create, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: date or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: date or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_file_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - resource_path = share_name - if directory_name is not None: - resource_path += '/' + _to_str(directory_name) - resource_path += '/' + _to_str(file_name) - - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol) - sas.add_id(id) - sas.add_resource('f') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_resource_signature(self.account_name, self.account_key, 'file', resource_path) - - return sas.get_token() - - def generate_share(self, share_name, permission=None, expiry=None, - start=None, id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None): - ''' - Generates a shared access signature for the share. - Use the returned signature with the sas_token parameter of FileService. - - :param str share_name: - Name of share. - :param SharePermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, create, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: date or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: date or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_file_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol) - sas.add_id(id) - sas.add_resource('s') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_resource_signature(self.account_name, self.account_key, 'file', share_name) - - return sas.get_token() - - def generate_account(self, services, resource_types, permission, expiry, start=None, - ip=None, protocol=None): - ''' - Generates a shared access signature for the account. - Use the returned signature with the sas_token parameter of the service - or to create a new account object. - - :param Services services: - Specifies the services accessible with the account SAS. You can - combine values to provide access to more than one service. - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account - SAS. You can combine values to provide access to more than one - resource type. - :param AccountPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. You can combine - values to provide more than one permission. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: date or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: date or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.models.Protocol` for possible values. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol) - sas.add_account(services, resource_types) - sas.add_account_signature(self.account_name, self.account_key) - - return sas.get_token() - -class _QueryStringConstants(object): - SIGNED_SIGNATURE = 'sig' - SIGNED_PERMISSION = 'sp' - SIGNED_START = 'st' - SIGNED_EXPIRY = 'se' - SIGNED_RESOURCE = 'sr' - SIGNED_IDENTIFIER = 'si' - SIGNED_IP = 'sip' - SIGNED_PROTOCOL = 'spr' - SIGNED_VERSION = 'sv' - SIGNED_CACHE_CONTROL = 'rscc' - SIGNED_CONTENT_DISPOSITION = 'rscd' - SIGNED_CONTENT_ENCODING = 'rsce' - SIGNED_CONTENT_LANGUAGE = 'rscl' - SIGNED_CONTENT_TYPE = 'rsct' - TABLE_NAME = 'tn' - START_PK = 'spk' - START_RK = 'srk' - END_PK = 'epk' - END_RK = 'erk' - SIGNED_RESOURCE_TYPES = 'srt' - SIGNED_SERVICES = 'ss' - -class _SharedAccessHelper(): - - def __init__(self): - self.query_dict = {} - - def _add_query(self, name, val): - if val: - self.query_dict[name] = _to_str(val) - - def add_base(self, permission, expiry, start, ip, protocol): - if isinstance(start, date): - start = _to_utc_datetime(start) - - if isinstance(expiry, date): - expiry = _to_utc_datetime(expiry) - - self._add_query(_QueryStringConstants.SIGNED_START, start) - self._add_query(_QueryStringConstants.SIGNED_EXPIRY, expiry) - self._add_query(_QueryStringConstants.SIGNED_PERMISSION, permission) - self._add_query(_QueryStringConstants.SIGNED_IP, ip) - self._add_query(_QueryStringConstants.SIGNED_PROTOCOL, protocol) - self._add_query(_QueryStringConstants.SIGNED_VERSION, X_MS_VERSION) - - def add_resource(self, resource): - self._add_query(_QueryStringConstants.SIGNED_RESOURCE, resource) - - def add_id(self, id): - self._add_query(_QueryStringConstants.SIGNED_IDENTIFIER, id) - - def add_account(self, services, resource_types): - self._add_query(_QueryStringConstants.SIGNED_SERVICES, services) - self._add_query(_QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) - - def add_table_access_ranges(self, table_name, start_pk, start_rk, - end_pk, end_rk): - self._add_query(_QueryStringConstants.TABLE_NAME, table_name) - self._add_query(_QueryStringConstants.START_PK, start_pk) - self._add_query(_QueryStringConstants.START_RK, start_rk) - self._add_query(_QueryStringConstants.END_PK, end_pk) - self._add_query(_QueryStringConstants.END_RK, end_rk) - - def add_override_response_headers(self, cache_control, - content_disposition, - content_encoding, - content_language, - content_type): - self._add_query(_QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) - self._add_query(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) - self._add_query(_QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) - self._add_query(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) - self._add_query(_QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) - - def add_resource_signature(self, account_name, account_key, service, path): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - if path[0] != '/': - path = '/' + path - - canonicalized_resource = '/' + service + '/' + account_name + path + '\n' - - # Form the string to sign from shared_access_policy and canonicalized - # resource. The order of values is important. - string_to_sign = \ - (get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(_QueryStringConstants.SIGNED_START) + - get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) + - canonicalized_resource + - get_value_to_append(_QueryStringConstants.SIGNED_IDENTIFIER) + - get_value_to_append(_QueryStringConstants.SIGNED_IP) + - get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(_QueryStringConstants.SIGNED_VERSION)) - - if service == 'blob' or service == 'file': - string_to_sign += \ - (get_value_to_append(_QueryStringConstants.SIGNED_CACHE_CONTROL) + - get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION) + - get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_ENCODING) + - get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE) + - get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_TYPE)) - - if service == 'table': - string_to_sign += \ - (get_value_to_append(_QueryStringConstants.START_PK) + - get_value_to_append(_QueryStringConstants.START_RK) + - get_value_to_append(_QueryStringConstants.END_PK) + - get_value_to_append(_QueryStringConstants.END_RK)) - - # remove the trailing newline - if string_to_sign[-1] == '\n': - string_to_sign = string_to_sign[:-1] - - self._add_query(_QueryStringConstants.SIGNED_SIGNATURE, - _sign_string(account_key, string_to_sign)) - - def add_account_signature(self, account_name, account_key): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - string_to_sign = \ - (account_name + '\n' + - get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(_QueryStringConstants.SIGNED_SERVICES) + - get_value_to_append(_QueryStringConstants.SIGNED_RESOURCE_TYPES) + - get_value_to_append(_QueryStringConstants.SIGNED_START) + - get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) + - get_value_to_append(_QueryStringConstants.SIGNED_IP) + - get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(_QueryStringConstants.SIGNED_VERSION)) - - self._add_query(_QueryStringConstants.SIGNED_SIGNATURE, - _sign_string(account_key, string_to_sign)) - - def get_token(self): - return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/storageclient.py b/azure/multiapi/storage/v2016_05_31/storageclient.py deleted file mode 100644 index e905186..0000000 --- a/azure/multiapi/storage/v2016_05_31/storageclient.py +++ /dev/null @@ -1,285 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -import os -import sys -import copy -import requests -from time import sleep -from abc import ABCMeta - -from azure.common import ( - AzureException, -) -from .models import ( - RetryContext, - LocationMode, - _OperationContext, -) -from .retry import ExponentialRetry -from ._constants import ( - DEFAULT_SOCKET_TIMEOUT -) -from ._http import HTTPError -from ._http.httpclient import _HTTPClient -from ._serialization import ( - _update_request, - _add_date_header, -) -from ._error import ( - _ERROR_STORAGE_MISSING_INFO, - _ERROR_DECRYPTION_FAILURE, - _http_error_handler, -) - -class StorageClient(object): - - ''' - This is the base class for service objects. Service objects are used to do - all requests to Storage. This class cannot be instantiated directly. - - :ivar str account_name: - The storage account name. This is used to authenticate requests - signed with an account key and to construct the storage endpoint. It - is required unless a connection string is given, or if a custom - domain is used with anonymous authentication. - :ivar str account_key: - The storage account key. This is used for shared key authentication. - If neither account key or sas token is specified, anonymous access - will be used. - :ivar str sas_token: - A shared access signature token to use to authenticate requests - instead of the account key. If account key and sas token are both - specified, account key will be used to sign. If neither are - specified, anonymous access will be used. - :ivar str primary_endpoint: - The endpoint to send storage requests to. - :ivar str secondary_endpoint: - The secondary endpoint to read storage data from. This will only be a - valid endpoint if the storage account used is RA-GRS and thus allows - reading from secondary. - :ivar function(context) retry: - A function which determines whether to retry. Takes as a parameter a - :class:`~azure.storage.models.RetryContext` object. Returns the number - of seconds to wait before retrying the request, or None to indicate not - to retry. - :ivar LocationMode location_mode: - The host location to use to make requests. Defaults to LocationMode.PRIMARY. - Note that this setting only applies to RA-GRS accounts as other account - types do not allow reading from secondary. If the location_mode is set to - LocationMode.SECONDARY, read requests will be sent to the secondary endpoint. - Write requests will continue to be sent to primary. - :ivar str protocol: - The protocol to use for requests. Defaults to https. - :ivar requests.Session request_session: - The session object to use for http requests. - :ivar function(request) request_callback: - A function called immediately before each request is sent. This function - takes as a parameter the request object and returns nothing. It may be - used to added custom headers or log request data. - :ivar function() response_callback: - A function called immediately after each response is received. This - function takes as a parameter the response object and returns nothing. - It may be used to log response data. - :ivar function() retry_callback: - A function called immediately after retry evaluation is performed. This - function takes as a parameter the retry context object and returns nothing. - It may be used to detect retries and log context information. - ''' - - __metaclass__ = ABCMeta - - def __init__(self, connection_params): - ''' - :param obj connection_params: The parameters to use to construct the client. - ''' - self.account_name = connection_params.account_name - self.account_key = connection_params.account_key - self.sas_token = connection_params.sas_token - - self.primary_endpoint = connection_params.primary_endpoint - self.secondary_endpoint = connection_params.secondary_endpoint - - protocol = connection_params.protocol - request_session = connection_params.request_session or requests.Session() - socket_timeout = connection_params.socket_timeout or DEFAULT_SOCKET_TIMEOUT - self._httpclient = _HTTPClient( - protocol=protocol, - session=request_session, - timeout=socket_timeout, - ) - - self.retry = ExponentialRetry().retry - self.location_mode = LocationMode.PRIMARY - - self.request_callback = None - self.response_callback = None - self.retry_callback = None - - @property - def socket_timeout(self): - return self._httpclient.timeout - - @socket_timeout.setter - def socket_timeout(self, value): - self._httpclient.timeout = value - - @property - def protocol(self): - return self._httpclient.protocol - - @protocol.setter - def protocol(self, value): - self._httpclient.protocol = value - - @property - def request_session(self): - return self._httpclient.session - - @request_session.setter - def request_session(self, value): - self._httpclient.session = value - - def set_proxy(self, host, port, user=None, password=None): - ''' - Sets the proxy server host and port for the HTTP CONNECT Tunnelling. - - :param str host: Address of the proxy. Ex: '192.168.0.100' - :param int port: Port of the proxy. Ex: 6000 - :param str user: User for proxy authorization. - :param str password: Password for proxy authorization. - ''' - self._httpclient.set_proxy(host, port, user, password) - - def _get_host_locations(self, primary=True, secondary=False): - locations = {} - if primary: - locations[LocationMode.PRIMARY] = self.primary_endpoint - if secondary: - locations[LocationMode.SECONDARY] = self.secondary_endpoint - return locations - - def _apply_host(self, request, operation_context, retry_context): - if operation_context.location_lock and operation_context.host_location: - # If this is a location locked operation and the location is set, - # override the request location and host_location. - request.host_locations = operation_context.host_location - request.host = list(operation_context.host_location.values())[0] - retry_context.location_mode = list(operation_context.host_location.keys())[0] - elif len(request.host_locations) == 1: - # If only one location is allowed, use that location. - request.host = list(request.host_locations.values())[0] - retry_context.location_mode = list(request.host_locations.keys())[0] - else: - # If multiple locations are possible, choose based on the location mode. - request.host = request.host_locations.get(self.location_mode) - retry_context.location_mode = self.location_mode - - def _perform_request(self, request, parser=None, parser_args=None, operation_context=None): - ''' - Sends the request and return response. Catches HTTPError and hands it - to error handler - ''' - operation_context = operation_context or _OperationContext() - retry_context = RetryContext() - - # Apply the appropriate host based on the location mode - self._apply_host(request, operation_context, retry_context) - - # Apply common settings to the request - _update_request(request) - - while(True): - try: - try: - # Execute the request callback - if self.request_callback: - self.request_callback(request) - - # Add date and auth after the callback so date doesn't get too old and - # authentication is still correct if signed headers are added in the request - # callback. This also ensures retry policies with long back offs - # will work as it resets the time sensitive headers. - _add_date_header(request) - self.authentication.sign_request(request) - - # Set the request context - retry_context.request = request - - # Perform the request - response = self._httpclient.perform_request(request) - - # Execute the response callback - if self.response_callback: - self.response_callback(response) - - # Set the response context - retry_context.response = response - - # Parse and wrap HTTP errors in AzureHttpError which inherits from AzureException - if response.status >= 300: - # This exception will be caught by the general error handler - # and raised as an azure http exception - _http_error_handler(HTTPError(response.status, response.message, response.headers, response.body)) - - # Parse the response - if parser: - if parser_args: - args = [response] - args.extend(parser_args) - return parser(*args) - else: - return parser(response) - else: - return - except AzureException as ex: - raise ex - except Exception as ex: - if sys.version_info >= (3,): - # Automatic chaining in Python 3 means we keep the trace - raise AzureException(ex.args[0]) - else: - # There isn't a good solution in 2 for keeping the stack trace - # in general, or that will not result in an error in 3 - # However, we can keep the previous error type and message - # TODO: In the future we will log the trace - msg = "" - if len(ex.args) > 0: - msg = ex.args[0] - raise AzureException('{}: {}'.format(ex.__class__.__name__, msg)) - - - except AzureException as ex: - # Decryption failures (invalid objects, invalid algorithms, data unencrypted in strict mode, etc) - # will not be resolved with retries. - if str(ex) == _ERROR_DECRYPTION_FAILURE: - raise ex - # Determine whether a retry should be performed and if so, how - # long to wait before performing retry. - retry_interval = self.retry(retry_context) - if retry_interval is not None: - # Execute the callback - if self.retry_callback: - self.retry_callback(retry_context) - - # Sleep for the desired retry interval - sleep(retry_interval) - else: - raise ex - finally: - # If this is a location locked operation and the location is not set, - # this is the first request of that operation. Set the location to - # be used for subsequent requests in the operation. - if operation_context.location_lock and not operation_context.host_location: - operation_context.host_location = {retry_context.location_mode: request.host} \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/table/__init__.py b/azure/multiapi/storage/v2016_05_31/table/__init__.py deleted file mode 100644 index fa37c55..0000000 --- a/azure/multiapi/storage/v2016_05_31/table/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from .models import ( - Entity, - EntityProperty, - Table, - TablePermissions, - TablePayloadFormat, - EdmType, - AzureBatchOperationError, - AzureBatchValidationError, -) -from .tablebatch import TableBatch -from .tableservice import TableService diff --git a/azure/multiapi/storage/v2016_05_31/table/_deserialization.py b/azure/multiapi/storage/v2016_05_31/table/_deserialization.py deleted file mode 100644 index e5883b4..0000000 --- a/azure/multiapi/storage/v2016_05_31/table/_deserialization.py +++ /dev/null @@ -1,348 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -import sys - -from dateutil import parser -if sys.version_info < (3,): - from urllib2 import quote as url_quote -else: - from urllib.parse import quote as url_quote -from json import ( - loads, -) -from .._http import HTTPResponse -from azure.common import ( - AzureException, -) -from .._common_conversion import ( - _decode_base64_to_bytes, -) -from .._error import ( - _ERROR_DECRYPTION_FAILURE, - _validate_decryption_required, -) -from ._error import ( - _ERROR_TYPE_NOT_SUPPORTED, - _ERROR_INVALID_PROPERTY_RESOLVER, -) -from .models import ( - Entity, - EntityProperty, - Table, - EdmType, - AzureBatchOperationError, -) -from ..models import ( - _list, - _HeaderDict, -) -from ._encryption import ( - _decrypt_entity, - _extract_encryption_metadata, -) - -def _get_continuation_from_response_headers(response): - marker = {} - for name, value in response.headers.items(): - if name.startswith('x-ms-continuation'): - marker[name[len('x-ms-continuation') + 1:]] = value - return marker - -# Tables of conversions to and from entity types. We support specific -# datatypes, and beyond that the user can use an EntityProperty to get -# custom data type support. - -def _from_entity_binary(value): - return EntityProperty(EdmType.BINARY, _decode_base64_to_bytes(value)) - - -def _from_entity_int32(value): - return EntityProperty(EdmType.INT32, int(value)) - - -def _from_entity_datetime(value): - # Note that Azure always returns UTC datetime, and dateutil parser - # will set the tzinfo on the date it returns - return parser.parse(value) - - -_EDM_TYPES = [EdmType.BINARY, EdmType.INT64, EdmType.GUID, EdmType.DATETIME, - EdmType.STRING, EdmType.INT32, EdmType.DOUBLE, EdmType.BOOLEAN] - - -_ENTITY_TO_PYTHON_CONVERSIONS = { - EdmType.BINARY: _from_entity_binary, - EdmType.INT32: _from_entity_int32, - EdmType.INT64: int, - EdmType.DOUBLE: float, - EdmType.DATETIME: _from_entity_datetime, -} - -def _convert_json_response_to_entity(response, property_resolver, require_encryption, - key_encryption_key, key_resolver): - ''' - :param bool require_encryption: - If set, will enforce that the retrieved entity is encrypted and decrypt it. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the - string-specified algorithm. - get_kid()--returns a string key id for this key-encryption-key. - :param function key_resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key implementing - the interface defined above. - ''' - if response is None or response.body is None: - return None - - root = loads(response.body.decode('utf-8')) - return _decrypt_and_deserialize_entity(root, property_resolver, require_encryption, - key_encryption_key, key_resolver) - - -def _convert_json_to_entity(entry_element, property_resolver, encrypted_properties): - ''' Convert json response to entity. - - The entity format is: - { - "Address":"Mountain View", - "Age":23, - "AmountDue":200.23, - "CustomerCode@odata.type":"Edm.Guid", - "CustomerCode":"c9da6455-213d-42c9-9a79-3e9149a57833", - "CustomerSince@odata.type":"Edm.DateTime", - "CustomerSince":"2008-07-10T00:00:00", - "IsActive":true, - "NumberOfOrders@odata.type":"Edm.Int64", - "NumberOfOrders":"255", - "PartitionKey":"mypartitionkey", - "RowKey":"myrowkey" - } - ''' - entity = Entity() - - properties = {} - edmtypes = {} - odata = {} - - for name, value in entry_element.items(): - if name.startswith('odata.'): - odata[name[6:]] = value - elif name.endswith('@odata.type'): - edmtypes[name[:-11]] = value - else: - properties[name] = value - - # Partition key is a known property - partition_key = properties.pop('PartitionKey', None) - if partition_key: - entity['PartitionKey'] = partition_key - - # Row key is a known property - row_key = properties.pop('RowKey', None) - if row_key: - entity['RowKey'] = row_key - - # Timestamp is a known property - timestamp = properties.pop('Timestamp', None) - if timestamp: - entity['Timestamp'] = _from_entity_datetime(timestamp) - - for name, value in properties.items(): - mtype = edmtypes.get(name); - - # use the property resolver if present - if property_resolver: - # Clients are not expected to resolve these interal fields. - # This check avoids unexpected behavior from the user-defined - # property resolver. - if not (name == '_ClientEncryptionMetadata1' or \ - name == '_ClientEncryptionMetadata2'): - mtype = property_resolver(partition_key, row_key, - name, value, mtype) - - # throw if the type returned is not a valid edm type - if mtype and mtype not in _EDM_TYPES: - raise AzureException(_ERROR_TYPE_NOT_SUPPORTED.format(mtype)) - - # If the property was encrypted, supercede the results of the resolver and set as binary - if encrypted_properties is not None and name in encrypted_properties: - mtype = EdmType.BINARY - - # Add type for Int32 - if type(value) is int: - mtype = EdmType.INT32 - - # no type info, property should parse automatically - if not mtype: - entity[name] = value - else: # need an object to hold the property - conv = _ENTITY_TO_PYTHON_CONVERSIONS.get(mtype) - if conv is not None: - try: - property = conv(value) - except Exception as e: - # throw if the type returned by the property resolver - # cannot be used in the conversion - if property_resolver: - raise AzureException( - _ERROR_INVALID_PROPERTY_RESOLVER.format(name, value, mtype)) - else: - raise e - else: - property = EntityProperty(mtype, value) - entity[name] = property - - # extract etag from entry - etag = odata.get('etag') - if timestamp: - etag = 'W/"datetime\'' + url_quote(timestamp) + '\'"' - entity['etag'] = etag - - return entity - - -def _convert_json_response_to_tables(response): - ''' Converts the response to tables class. - ''' - if response is None or response.body is None: - return None - - tables = _list() - - continuation = _get_continuation_from_response_headers(response) - tables.next_marker = continuation.get('nexttablename') - - root = loads(response.body.decode('utf-8')) - - if 'TableName' in root: - table = Table() - table.name = root['TableName'] - tables.append(table) - else: - for element in root['value']: - table = Table() - table.name = element['TableName'] - tables.append(table) - - return tables - - -def _convert_json_response_to_entities(response, property_resolver, require_encryption, - key_encryption_key, key_resolver): - ''' Converts the response to tables class. - ''' - if response is None or response.body is None: - return None - - entities = _list() - - entities.next_marker = _get_continuation_from_response_headers(response) - - root = loads(response.body.decode('utf-8')) - - if 'value' in root: - for entity in root['value']: - entity = _decrypt_and_deserialize_entity(entity, property_resolver, require_encryption, - key_encryption_key, key_resolver) - entities.append(entity) - - else: - entities.append(_convert_json_to_entity(entity, - property_resolver)) - - return entities - -def _decrypt_and_deserialize_entity(entity, property_resolver, require_encryption, - key_encryption_key, key_resolver): - try: - _validate_decryption_required(require_encryption, key_encryption_key, - key_resolver) - entity_iv, encrypted_properties, content_encryption_key, isJavaV1 = None, None, None, False - if (key_encryption_key is not None) or (key_resolver is not None): - entity_iv, encrypted_properties, content_encryption_key, isJavaV1 = \ - _extract_encryption_metadata(entity, require_encryption, key_encryption_key, key_resolver) - except: - raise AzureException(_ERROR_DECRYPTION_FAILURE) - - entity = _convert_json_to_entity(entity, property_resolver, encrypted_properties) - - if entity_iv is not None and encrypted_properties is not None and \ - content_encryption_key is not None: - try: - entity = _decrypt_entity(entity, encrypted_properties, content_encryption_key, - entity_iv, isJavaV1) - except: - raise AzureException(_ERROR_DECRYPTION_FAILURE) - - return entity - -def _extract_etag(response): - ''' Extracts the etag from the response headers. ''' - if response and response.headers: - return response.headers.get('etag') - - return None - -def _parse_batch_response(response): - if response is None or response.body is None: - return None - - parts = response.body.split(b'--changesetresponse_') - - responses = [] - for part in parts: - httpLocation = part.find(b'HTTP/') - if httpLocation > 0: - response_part = _parse_batch_response_part(part[httpLocation:]) - if response_part.status >= 300: - _parse_batch_error(response_part) - responses.append(_extract_etag(response_part)) - - return responses - -def _parse_batch_response_part(part): - lines = part.splitlines(); - - # First line is the HTTP status/reason - status, _, reason = lines[0].partition(b' ')[2].partition(b' ') - - # Followed by headers and body - headers = {} - body = b'' - isBody = False - for line in lines[1:]: - if line == b'' and not isBody: - isBody = True - elif isBody: - body += line - else: - headerName, _, headerVal = line.partition(b': ') - headers[headerName.lower().decode("utf-8")] = headerVal.decode("utf-8") - - return HTTPResponse(int(status), reason.strip(), headers, body) - -def _parse_batch_error(part): - doc = loads(part.body.decode('utf-8')) - - code = '' - message = '' - error = doc.get('odata.error') - if error: - code = error.get('code') - if error.get('message'): - message = error.get('message').get('value') - - raise AzureBatchOperationError(message, part.status, code) diff --git a/azure/multiapi/storage/v2016_05_31/table/_encryption.py b/azure/multiapi/storage/v2016_05_31/table/_encryption.py deleted file mode 100644 index 8e52168..0000000 --- a/azure/multiapi/storage/v2016_05_31/table/_encryption.py +++ /dev/null @@ -1,299 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- - -from .._error import( - _ERROR_UNSUPPORTED_ENCRYPTION_VERSION, - _ERROR_DECRYPTION_FAILURE, - _ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM, - _ERROR_DATA_NOT_ENCRYPTED, - _validate_not_none, - _validate_key_encryption_key_wrap, - _validate_key_encryption_key_unwrap, - _validate_kek_id, -) -from .._constants import( - _ENCRYPTION_PROTOCOL_V1, -) -from .._common_conversion import( - _decode_base64_to_bytes, -) -from .._encryption import( - _generate_encryption_data_dict, - _dict_to_encryption_data, - _generate_AES_CBC_cipher, - _validate_and_unwrap_cek, - _EncryptionData, - _EncryptionAgent, - _WrappedContentKey, - _EncryptionAlgorithm -) -from ._error import( - _ERROR_UNSUPPORTED_TYPE_FOR_ENCRYPTION, -) -from .models import( - Entity, - EntityProperty, - EdmType, -) -from json import( - dumps, - loads, -) -import os -from copy import deepcopy -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.padding import PKCS7 -from cryptography.hazmat.primitives.hashes import( - Hash, - SHA256, -) - -def _encrypt_entity(entity, key_encryption_key, encryption_resolver): - ''' - Encrypts the given entity using AES256 in CBC mode with 128 bit padding. - Will generate a content-encryption-key (cek) to encrypt the properties either - stored in an EntityProperty with the 'encrypt' flag set or those - specified by the encryption resolver. This cek is then wrapped using the - provided key_encryption_key (kek). Only strings may be encrypted and the - result is stored as binary on the service. - - :param entity: - The entity to insert. Could be a dict or an entity object. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :param function(partition_key, row_key, property_name) encryption_resolver: - A function that takes in an entities partition key, row key, and property name and returns - a boolean that indicates whether that property should be encrypted. - :return: An entity with both the appropriate properties encrypted and the - encryption data. - :rtype: object - ''' - - _validate_not_none('entity', entity) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = os.urandom(32) - entity_initialization_vector = os.urandom(16) - - encrypted_properties = [] - encrypted_entity = Entity() - for key, value in entity.items(): - # If the property resolver says it should be encrypted - # or it is an EntityProperty with the 'encrypt' property set. - if (isinstance(value, EntityProperty) and value.encrypt) or \ - (encryption_resolver is not None \ - and encryption_resolver(entity['PartitionKey'], entity['RowKey'], key)): - - # Only strings can be encrypted and None is not an instance of str. - if isinstance(value, EntityProperty): - if value.type == EdmType.STRING: - value = value.value - else: - raise ValueError(_ERROR_UNSUPPORTED_TYPE_FOR_ENCRYPTION) - if not isinstance(value, str): - raise ValueError(_ERROR_UNSUPPORTED_TYPE_FOR_ENCRYPTION) - - # Value is now confirmed to hold a valid string value to be encrypted - # and should be added to the list of encrypted properties. - encrypted_properties.append(key) - - propertyIV = _generate_property_iv(entity_initialization_vector, - entity['PartitionKey'], entity['RowKey'], - key, False) - - # Encode the strings for encryption. - value = value.encode('utf-8') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, propertyIV) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(value) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - - # Set the new value of this key to be a binary EntityProperty for proper serialization. - value = EntityProperty(EdmType.BINARY, encrypted_data) - - encrypted_entity[key] = value - - encrypted_properties = dumps(encrypted_properties) - - # Generate the metadata iv. - metadataIV = _generate_property_iv(entity_initialization_vector, - entity['PartitionKey'], entity['RowKey'], - '_ClientEncryptionMetadata2', False) - - encrypted_properties = encrypted_properties.encode('utf-8') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, metadataIV) - - padder = PKCS7(128).padder() - padded_data = padder.update(encrypted_properties) + padder.finalize() - - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - - encrypted_entity['_ClientEncryptionMetadata2'] = EntityProperty(EdmType.BINARY, encrypted_data) - - encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, - entity_initialization_vector) - - encrypted_entity['_ClientEncryptionMetadata1'] = dumps(encryption_data) - return encrypted_entity - -def _decrypt_entity(entity, encrypted_properties_list, content_encryption_key, entityIV, isJavaV1): - ''' - Decrypts the specified entity using AES256 in CBC mode with 128 bit padding. Unwraps the CEK - using either the specified KEK or the key returned by the key_resolver. Properties - specified in the encrypted_properties_list, will be decrypted and decoded to utf-8 strings. - - :param entity: - The entity being retrieved and decrypted. Could be a dict or an entity object. - :param list encrypted_properties_list: - The encrypted list of all the properties that are encrypted. - :param bytes[] content_encryption_key: - The key used internally to encrypt the entity. Extrated from the entity metadata. - :param bytes[] entityIV: - The intialization vector used to seed the encryption algorithm. Extracted from the - entity metadata. - :return: The decrypted entity - :rtype: Entity - ''' - - _validate_not_none('entity', entity) - - decrypted_entity = deepcopy(entity) - try: - for property in entity.keys(): - if property in encrypted_properties_list: - value = entity[property] - - propertyIV = _generate_property_iv(entityIV, - entity['PartitionKey'], entity['RowKey'], - property, isJavaV1) - cipher = _generate_AES_CBC_cipher(content_encryption_key, - propertyIV) - - # Decrypt the property. - decryptor = cipher.decryptor() - decrypted_data = (decryptor.update(value.value) + decryptor.finalize()) - - # Unpad the data. - unpadder = PKCS7(128).unpadder() - decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) - - decrypted_data = decrypted_data.decode('utf-8') - - decrypted_entity[property] = decrypted_data - - decrypted_entity.pop('_ClientEncryptionMetadata1') - decrypted_entity.pop('_ClientEncryptionMetadata2') - return decrypted_entity - except: - raise AzureException(_ERROR_DECRYPTION_FAILURE) - -def _extract_encryption_metadata(entity, require_encryption, key_encryption_key, key_resolver): - ''' - Extracts the encryption metadata from the given entity, setting them to be utf-8 strings. - If no encryption metadata is present, will return None for all return values unless - require_encryption is true, in which case the method will throw. - - :param entity: - The entity being retrieved and decrypted. Could be a dict or an entity object. - :param bool require_encryption: - If set, will enforce that the retrieved entity is encrypted and decrypt it. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the - string-specified algorithm. - get_kid()--returns a string key id for this key-encryption-key. - :param function key_resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key implementing - the interface defined above. - :returns: a tuple containing the entity iv, the list of encrypted properties, the entity cek, - and whether the entity was encrypted using JavaV1. - :rtype: tuple (bytes[], list, bytes[], bool) - ''' - _validate_not_none('entity', entity) - - try: - encrypted_properties_list = _decode_base64_to_bytes(entity['_ClientEncryptionMetadata2']) - encryption_data = entity['_ClientEncryptionMetadata1'] - encryption_data = _dict_to_encryption_data(loads(encryption_data)) - except Exception as e: - # Message did not have properly formatted encryption metadata. - if require_encryption: - raise ValueError(_ERROR_ENTITY_NOT_ENCRYPTED) - else: - return (None,None,None,None) - - if not(encryption_data.encryption_agent.encryption_algorithm == _EncryptionAlgorithm.AES_CBC_256): - raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM) - - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) - - # Special check for compatibility with Java V1 encryption protocol. - isJavaV1 = (encryption_data.key_wrapping_metadata is None) or \ - ((encryption_data.encryption_agent.protocol == _ENCRYPTION_PROTOCOL_V1) and \ - 'EncryptionLibrary' in encryption_data.key_wrapping_metadata and \ - 'Java' in encryption_data.key_wrapping_metadata['EncryptionLibrary']) - - metadataIV = _generate_property_iv(encryption_data.content_encryption_IV, - entity['PartitionKey'], entity['RowKey'], - '_ClientEncryptionMetadata2', isJavaV1) - - cipher = _generate_AES_CBC_cipher(content_encryption_key, metadataIV) - - # Decrypt the data. - decryptor = cipher.decryptor() - encrypted_properties_list = decryptor.update(encrypted_properties_list) + decryptor.finalize() - - # Unpad the data. - unpadder = PKCS7(128).unpadder() - encrypted_properties_list = unpadder.update(encrypted_properties_list) + unpadder.finalize() - - encrypted_properties_list = encrypted_properties_list.decode('utf-8') - - if isJavaV1: - # Strip the square braces from the ends and split string into list. - encrypted_properties_list = encrypted_properties_list[1:-1] - encrypted_properties_list = encrypted_properties_list.split(', ') - else: - encrypted_properties_list = loads(encrypted_properties_list) - - return (encryption_data.content_encryption_IV, encrypted_properties_list, content_encryption_key, isJavaV1) - -def _generate_property_iv(entity_iv, pk, rk, property_name, isJavaV1): - ''' - Uses the entity_iv, partition key, and row key to generate and return - the iv for the specified property. - ''' - digest = Hash(SHA256(), default_backend()) - if not isJavaV1: - digest.update(entity_iv + - (rk + pk + property_name).encode('utf-8')) - else: - digest.update(entity_iv + - (pk + rk + property_name).encode('utf-8')) - propertyIV = digest.finalize() - return propertyIV[:16] \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/table/_error.py b/azure/multiapi/storage/v2016_05_31/table/_error.py deleted file mode 100644 index 6da7888..0000000 --- a/azure/multiapi/storage/v2016_05_31/table/_error.py +++ /dev/null @@ -1,73 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- - -from .._error import ( - _validate_not_none, - _ERROR_VALUE_NONE_OR_EMPTY, -) - -_ERROR_ATTRIBUTE_MISSING = '\'{0}\' object has no attribute \'{1}\'' -_ERROR_BATCH_COMMIT_FAIL = 'Batch Commit Fail' -_ERROR_CANNOT_FIND_PARTITION_KEY = 'Cannot find partition key in request.' -_ERROR_CANNOT_FIND_ROW_KEY = 'Cannot find row key in request.' -_ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY = \ - 'Cannot serialize the specified value ({0}) to an entity. Please use ' + \ - 'an EntityProperty (which can specify custom types), int, str, bool, ' + \ - 'or datetime.' -_ERROR_DUPLICATE_ROW_KEY_IN_BATCH = \ - 'Row Keys should not be the same in a batch operations' -_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH = \ - 'Partition Key should be the same in a batch operations' -_ERROR_INVALID_ENTITY_TYPE = 'The entity must be either in dict format or an entity object.' -_ERROR_INVALID_PROPERTY_RESOLVER = \ - 'The specified property resolver returned an invalid type. Name: {0}, Value: {1}, ' + \ - 'EdmType: {2}' -_ERROR_PROPERTY_NAME_TOO_LONG = 'The property name exceeds the maximum allowed length.' -_ERROR_TOO_MANY_ENTITIES_IN_BATCH = \ - 'Batches may only contain 100 operations' -_ERROR_TOO_MANY_PROPERTIES = 'The entity contains more properties than allowed.' -_ERROR_TYPE_NOT_SUPPORTED = 'Type not supported when sending data to the service: {0}.' -_ERROR_VALUE_TOO_LARGE = '{0} is too large to be cast to type {1}.' -_ERROR_UNSUPPORTED_TYPE_FOR_ENCRYPTION = 'Encryption is only supported for not None strings.' - -def _validate_object_has_param(param_name, object): - if not object.get(param_name): - raise ValueError(_ERROR_VALUE_NONE_OR_EMPTY.format(param_name)) - -def _validate_entity(entity, encrypt=None): - # Validate entity exists - _validate_not_none('entity', entity) - - # Entity inherits from dict, so just validating dict is fine - if not isinstance(entity, dict): - raise TypeError(_ERROR_INVALID_ENTITY_TYPE) - - # Validate partition key and row key are present - _validate_object_has_param('PartitionKey', entity) - _validate_object_has_param('RowKey', entity) - - # Two properties are added during encryption. Validate sufficient space - max_properties = 255 - if(encrypt): - max_properties = max_properties - 2 - - # Validate there are not more than 255 properties including Timestamp - if (len(entity) > max_properties) or (len(entity) > (max_properties - 1) and not 'Timestamp' in entity): - raise ValueError(_ERROR_TOO_MANY_PROPERTIES) - - # Validate the property names are not too long - for propname in entity: - if len(propname) > 255: - raise ValueError(_ERROR_PROPERTY_NAME_TOO_LONG) diff --git a/azure/multiapi/storage/v2016_05_31/table/_request.py b/azure/multiapi/storage/v2016_05_31/table/_request.py deleted file mode 100644 index 741b4c5..0000000 --- a/azure/multiapi/storage/v2016_05_31/table/_request.py +++ /dev/null @@ -1,198 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from .._http import HTTPRequest -from .._common_conversion import ( - _to_str, -) -from .._error import ( - _validate_not_none, - _validate_encryption_required, - _validate_encryption_unsupported, -) -from .._serialization import ( - _get_request_body, -) -from ._error import ( - _validate_entity, -) -from ._serialization import ( - _convert_entity_to_json, - _DEFAULT_ACCEPT_HEADER, - _DEFAULT_CONTENT_TYPE_HEADER, - _DEFAULT_PREFER_HEADER, -) -from ._encryption import ( - _encrypt_entity, -) - -def _get_entity(partition_key, row_key, select, accept): - ''' - Constructs a get entity request. - ''' - _validate_not_none('partition_key', partition_key) - _validate_not_none('row_key', row_key) - _validate_not_none('accept', accept) - request = HTTPRequest() - request.method = 'GET' - request.headers = {'Accept': _to_str(accept)} - request.query = {'$select': _to_str(select)} - - return request - -def _insert_entity(entity, encryption_required=False, - key_encryption_key=None, encryption_resolver=None): - ''' - Constructs an insert entity request. - :param entity: - The entity to insert. Could be a dict or an entity object. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :param function(partition_key, row_key, property_name) encryption_resolver: - A function that takes in an entities partition key, row key, and property name and returns - a boolean that indicates whether that property should be encrypted. - ''' - _validate_entity(entity, key_encryption_key is not None) - _validate_encryption_required(encryption_required, key_encryption_key) - - request = HTTPRequest() - request.method = 'POST' - request.headers = { - _DEFAULT_CONTENT_TYPE_HEADER[0]: _DEFAULT_CONTENT_TYPE_HEADER[1], - _DEFAULT_ACCEPT_HEADER[0]: _DEFAULT_ACCEPT_HEADER[1], - _DEFAULT_PREFER_HEADER[0]: _DEFAULT_PREFER_HEADER[1] - } - if(key_encryption_key): - entity = _encrypt_entity(entity, key_encryption_key, encryption_resolver) - request.body = _get_request_body(_convert_entity_to_json(entity)) - - return request - -def _update_entity(entity, if_match, encryption_required=False, - key_encryption_key=None, encryption_resolver=None): - ''' - Constructs an update entity request. - :param entity: - The entity to insert. Could be a dict or an entity object. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :param function(partition_key, row_key, property_name) encryption_resolver: - A function that takes in an entities partition key, row key, and property name and returns - a boolean that indicates whether that property should be encrypted. - ''' - _validate_not_none('if_match', if_match) - _validate_entity(entity, key_encryption_key is not None) - _validate_encryption_required(encryption_required, key_encryption_key) - - request = HTTPRequest() - request.method = 'PUT' - request.headers = { - _DEFAULT_CONTENT_TYPE_HEADER[0]: _DEFAULT_CONTENT_TYPE_HEADER[1], - _DEFAULT_ACCEPT_HEADER[0]: _DEFAULT_ACCEPT_HEADER[1], - 'If-Match': _to_str(if_match), - } - if(key_encryption_key): - entity = _encrypt_entity(entity, key_encryption_key, encryption_resolver) - request.body = _get_request_body(_convert_entity_to_json(entity)) - - return request - -def _merge_entity(entity, if_match, require_encryption=False, key_encryption_key=None): - ''' - Constructs a merge entity request. - ''' - _validate_not_none('if_match', if_match) - _validate_entity(entity) - _validate_encryption_unsupported(require_encryption, key_encryption_key) - - request = HTTPRequest() - request.method = 'MERGE' - request.headers = { - _DEFAULT_CONTENT_TYPE_HEADER[0]: _DEFAULT_CONTENT_TYPE_HEADER[1], - _DEFAULT_ACCEPT_HEADER[0]: _DEFAULT_ACCEPT_HEADER[1], - 'If-Match': _to_str(if_match) - } - request.body = _get_request_body(_convert_entity_to_json(entity)) - - return request - -def _delete_entity(partition_key, row_key, if_match): - ''' - Constructs a delete entity request. - ''' - _validate_not_none('if_match', if_match) - _validate_not_none('partition_key', partition_key) - _validate_not_none('row_key', row_key) - request = HTTPRequest() - request.method = 'DELETE' - request.headers = { - _DEFAULT_ACCEPT_HEADER[0]: _DEFAULT_ACCEPT_HEADER[1], - 'If-Match': _to_str(if_match) - } - - return request - -def _insert_or_replace_entity(entity, require_encryption=False, - key_encryption_key=None, encryption_resolver=None): - ''' - Constructs an insert or replace entity request. - ''' - _validate_entity(entity, key_encryption_key is not None) - _validate_encryption_required(require_encryption, key_encryption_key) - - request = HTTPRequest() - request.method = 'PUT' - request.headers = { - _DEFAULT_CONTENT_TYPE_HEADER[0]: _DEFAULT_CONTENT_TYPE_HEADER[1], - _DEFAULT_ACCEPT_HEADER[0]: _DEFAULT_ACCEPT_HEADER[1], - } - - if(key_encryption_key): - entity = _encrypt_entity(entity, key_encryption_key, encryption_resolver) - request.body = _get_request_body(_convert_entity_to_json(entity)) - - return request - -def _insert_or_merge_entity(entity, require_encryption=False, key_encryption_key=None): - ''' - Constructs an insert or merge entity request. - :param entity: - The entity to insert. Could be a dict or an entity object. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :param function(partition_key, row_key, property_name) encryption_resolver: - A function that takes in an entities partition key, row key, and property name and returns - a boolean that indicates whether that property should be encrypted. - ''' - _validate_entity(entity) - _validate_encryption_unsupported(require_encryption, key_encryption_key) - - request = HTTPRequest() - request.method = 'MERGE' - request.headers = { - _DEFAULT_CONTENT_TYPE_HEADER[0]: _DEFAULT_CONTENT_TYPE_HEADER[1], - _DEFAULT_ACCEPT_HEADER[0]: _DEFAULT_ACCEPT_HEADER[1], - } - request.body = _get_request_body(_convert_entity_to_json(entity)) - - return request \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/table/_serialization.py b/azure/multiapi/storage/v2016_05_31/table/_serialization.py deleted file mode 100644 index ea1b334..0000000 --- a/azure/multiapi/storage/v2016_05_31/table/_serialization.py +++ /dev/null @@ -1,254 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -import sys -import types -import uuid - -from datetime import datetime -from json import ( - dumps, -) -from math import( - isnan, -) -from .._common_conversion import ( - _encode_base64, - _to_str, -) -from .._serialization import ( - _to_utc_datetime, -) -from ._error import ( - _ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY, - _ERROR_TYPE_NOT_SUPPORTED, - _ERROR_VALUE_TOO_LARGE, -) -from .models import ( - EntityProperty, - TablePayloadFormat, - EdmType, -) - -if sys.version_info < (3,): - def _new_boundary(): - return str(uuid.uuid1()) -else: - def _new_boundary(): - return str(uuid.uuid1()).encode('utf-8') - -_DEFAULT_ACCEPT_HEADER = ('Accept', TablePayloadFormat.JSON_MINIMAL_METADATA) -_DEFAULT_CONTENT_TYPE_HEADER = ('Content-Type', 'application/json') -_DEFAULT_PREFER_HEADER = ('Prefer', 'return-no-content') -_SUB_HEADERS = ['If-Match', 'Prefer', 'Accept', 'Content-Type', 'DataServiceVersion'] - -def _get_entity_path(table_name, partition_key, row_key): - return '/{0}(PartitionKey=\'{1}\',RowKey=\'{2}\')'.format( - _to_str(table_name), - _to_str(partition_key), - _to_str(row_key)) - -def _update_storage_table_header(request): - ''' add additional headers for storage table request. ''' - - # set service version - request.headers['DataServiceVersion'] = '3.0;NetFx' - request.headers['MaxDataServiceVersion'] = '3.0' - -def _to_entity_binary(value): - return EdmType.BINARY, _encode_base64(value) - -def _to_entity_bool(value): - return None, value - -def _to_entity_datetime(value): - return EdmType.DATETIME, _to_utc_datetime(value) - -def _to_entity_float(value): - if isnan(value): - return EdmType.DOUBLE, 'NaN' - if value == float('inf'): - return EdmType.DOUBLE, 'Infinity' - if value == float('-inf'): - return EdmType.DOUBLE, '-Infinity' - return None, value - -def _to_entity_guid(value): - return EdmType.GUID, str(value) - -def _to_entity_int32(value): - if sys.version_info < (3,): - value = long(value) - else: - value = int(value) - if value >= 2**31 or value < -(2**31): - raise TypeError(_ERROR_VALUE_TOO_LARGE.format(str(value), EdmType.INT32)) - return None, value - -def _to_entity_int64(value): - if sys.version_info < (3,): - ivalue = long(value) - else: - ivalue = int(value) - if ivalue >= 2**63 or ivalue < -(2**63): - raise TypeError(_ERROR_VALUE_TOO_LARGE.format(str(value), EdmType.INT64)) - return EdmType.INT64, str(value) - -def _to_entity_str(value): - return None, value - -def _to_entity_none(value): - return None, None - -# Conversion from Python type to a function which returns a tuple of the -# type string and content string. -_PYTHON_TO_ENTITY_CONVERSIONS = { - int: _to_entity_int64, - bool: _to_entity_bool, - datetime: _to_entity_datetime, - float: _to_entity_float, - str: _to_entity_str, -} - -# Conversion from Edm type to a function which returns a tuple of the -# type string and content string. -_EDM_TO_ENTITY_CONVERSIONS = { - EdmType.BINARY: _to_entity_binary, - EdmType.BOOLEAN: _to_entity_bool, - EdmType.DATETIME: _to_entity_datetime, - EdmType.DOUBLE: _to_entity_float, - EdmType.GUID: _to_entity_guid, - EdmType.INT32: _to_entity_int32, - EdmType.INT64: _to_entity_int64, - EdmType.STRING: _to_entity_str, -} - -if sys.version_info < (3,): - _PYTHON_TO_ENTITY_CONVERSIONS.update({ - long: _to_entity_int64, - types.NoneType: _to_entity_none, - unicode: _to_entity_str, - }) - - -def _convert_entity_to_json(source): - ''' Converts an entity object to json to send. - The entity format is: - { - "Address":"Mountain View", - "Age":23, - "AmountDue":200.23, - "CustomerCode@odata.type":"Edm.Guid", - "CustomerCode":"c9da6455-213d-42c9-9a79-3e9149a57833", - "CustomerSince@odata.type":"Edm.DateTime", - "CustomerSince":"2008-07-10T00:00:00", - "IsActive":true, - "NumberOfOrders@odata.type":"Edm.Int64", - "NumberOfOrders":"255", - "PartitionKey":"mypartitionkey", - "RowKey":"myrowkey" - } - ''' - - properties = {} - - # set properties type for types we know if value has no type info. - # if value has type info, then set the type to value.type - for name, value in source.items(): - mtype = '' - - if isinstance(value, EntityProperty): - conv = _EDM_TO_ENTITY_CONVERSIONS.get(value.type) - if conv is None: - raise TypeError( - _ERROR_TYPE_NOT_SUPPORTED.format(value.type)) - mtype, value = conv(value.value) - else: - conv = _PYTHON_TO_ENTITY_CONVERSIONS.get(type(value)) - if conv is None and sys.version_info >= (3,) and value is None: - conv = _to_entity_none - if conv is None: - raise TypeError( - _ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY.format( - type(value).__name__)) - mtype, value = conv(value) - - # form the property node - properties[name] = value - if mtype: - properties[name + '@odata.type'] = mtype - - # generate the entity_body - return dumps(properties) - - -def _convert_table_to_json(table_name): - ''' - Create json to send for a given table name. Since json format for table is - the same as entity and the only difference is that table has only one - property 'TableName', so we just call _convert_entity_to_json. - - table_name: - the name of the table - ''' - return _convert_entity_to_json({'TableName': table_name}) - -def _convert_batch_to_json(batch_requests): - ''' - Create json to send for an array of batch requests. - - batch_requests: - an array of requests - ''' - batch_boundary = b'batch_' + _new_boundary() - changeset_boundary = b'changeset_' + _new_boundary() - - body = [] - body.append(b'--' + batch_boundary + b'\n') - body.append(b'Content-Type: multipart/mixed; boundary=') - body.append(changeset_boundary + b'\n\n') - - content_id = 1 - - # Adds each request body to the POST data. - for _, request in batch_requests: - body.append(b'--' + changeset_boundary + b'\n') - body.append(b'Content-Type: application/http\n') - body.append(b'Content-Transfer-Encoding: binary\n\n') - body.append(request.method.encode('utf-8')) - body.append(b' ') - body.append(request.path.encode('utf-8')) - body.append(b' HTTP/1.1\n') - body.append(b'Content-ID: ') - body.append(str(content_id).encode('utf-8') + b'\n') - content_id += 1 - - for name, value in request.headers.items(): - if name in _SUB_HEADERS: - body.append(name.encode('utf-8') + b': ') - body.append(value.encode('utf-8') + b'\n') - - # Add different headers for different request types. - if not request.method == 'DELETE': - body.append(b'Content-Length: ') - body.append(str(len(request.body)).encode('utf-8')) - body.append(b'\n\n') - body.append(request.body + b'\n') - - body.append(b'\n') - - body.append(b'--' + changeset_boundary + b'--' + b'\n') - body.append(b'--' + batch_boundary + b'--') - - return b''.join(body), 'multipart/mixed; boundary=' + batch_boundary.decode('utf-8') diff --git a/azure/multiapi/storage/v2016_05_31/table/models.py b/azure/multiapi/storage/v2016_05_31/table/models.py deleted file mode 100644 index 6a3378f..0000000 --- a/azure/multiapi/storage/v2016_05_31/table/models.py +++ /dev/null @@ -1,206 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from azure.common import ( - AzureException, - AzureHttpError, -) -from ._error import ( - _ERROR_ATTRIBUTE_MISSING, -) - -class AzureBatchValidationError(AzureException): - ''' - Indicates that a batch operation cannot proceed due to invalid input. - - :ivar str message: - A detailed error message indicating the reason for the failure. - ''' - -class AzureBatchOperationError(AzureHttpError): - - ''' - Indicates that a batch operation failed. - - :ivar str message: - A detailed error message indicating the index of the batch - request which failed and the reason for the failure. For example, - '0:One of the request inputs is out of range.' indicates the 0th batch - request failed as one of its property values was out of range. - :ivar int status_code: - The HTTP status code of the batch request. For example, 400. - :ivar str batch_code: - The batch status code. For example, 'OutOfRangeInput'. - ''' - - def __init__(self, message, status_code, batch_code): - super(AzureBatchOperationError, self).__init__(message, status_code) - self.code = batch_code - -class Entity(dict): - ''' - An entity object. Can be accessed as a dict or as an obj. The attributes of - the entity will be created dynamically. For example, the following are both - valid:: - entity = Entity() - entity.a = 'b' - entity['x'] = 'y' - ''' - - def __getattr__(self, name): - try: - return self[name] - except KeyError: - raise AttributeError(_ERROR_ATTRIBUTE_MISSING.format('Entity', name)) - - __setattr__ = dict.__setitem__ - - def __delattr__(self, name): - try: - del self[name] - except KeyError: - raise AttributeError(_ERROR_ATTRIBUTE_MISSING.format('Entity', name)) - - def __dir__(self): - return dir({}) + list(self.keys()) - - -class EntityProperty(object): - ''' - An entity property. Used to explicitly set :class:`~EdmType` when necessary. - - Values which require explicit typing are GUID, INT32, and BINARY. Other EdmTypes - may be explicitly create as EntityProperty objects but need not be. For example, - the below with both create STRING typed properties on the entity:: - entity = Entity() - entity.a = 'b' - entity.x = EntityProperty(EdmType.STRING, 'y') - ''' - - def __init__(self, type=None, value=None, encrypt=False): - ''' - Represents an Azure Table. Returned by list_tables. - - :param str type: The type of the property. - :param EdmType value: The value of the property. - :param bool encrypt: Indicates whether or not the property should be encrypted. - ''' - self.type = type - self.value = value - self.encrypt = encrypt - - -class Table(object): - ''' - Represents an Azure Table. Returned by list_tables. - - :ivar str name: The name of the table. - ''' - pass - - -class TablePayloadFormat(object): - ''' - Specifies the accepted content type of the response payload. More information - can be found here: https://msdn.microsoft.com/en-us/library/azure/dn535600.aspx - ''' - - JSON_NO_METADATA = 'application/json;odata=nometadata' - '''Returns no type information for the entity properties.''' - - JSON_MINIMAL_METADATA = 'application/json;odata=minimalmetadata' - '''Returns minimal type information for the entity properties.''' - - JSON_FULL_METADATA = 'application/json;odata=fullmetadata' - '''Returns minimal type information for the entity properties plus some extra odata properties.''' - - -class EdmType(object): - ''' - Used by :class:`~.EntityProperty` to represent the type of the entity property - to be stored by the Table service. - ''' - - BINARY = 'Edm.Binary' - ''' Represents byte data. Must be specified. ''' - - INT64 = 'Edm.Int64' - ''' Represents a number between -(2^31) and 2^31. This is the default type for Python numbers. ''' - - GUID = 'Edm.Guid' - ''' Represents a GUID. Must be specified. ''' - - DATETIME = 'Edm.DateTime' - ''' Represents a date. This type will be inferred for Python datetime objects. ''' - - STRING = 'Edm.String' - ''' Represents a string. This type will be inferred for Python strings. ''' - - INT32 = 'Edm.Int32' - ''' Represents a number between -(2^15) and 2^15. Must be specified or numbers will default to INT64. ''' - - DOUBLE = 'Edm.Double' - ''' Represents a double. This type will be inferred for Python floating point numbers. ''' - - BOOLEAN = 'Edm.Boolean' - ''' Represents a boolean. This type will be inferred for Python bools. ''' - - -class TablePermissions(object): - ''' - TablePermissions class to be used with the :func:`~azure.storage.table.tableservice.TableService.generate_table_shared_access_signature` - method and for the AccessPolicies used with :func:`~azure.storage.table.tableservice.TableService.set_table_acl`. - - :ivar TablePermissions TablePermissions.QUERY: Get entities and query entities. - :ivar TablePermissions TablePermissions.ADD: Add entities. - :ivar TablePermissions TablePermissions.UPDATE: Update entities. - :ivar TablePermissions TablePermissions.DELETE: Delete entities. - ''' - - def __init__(self, query=False, add=False, update=False, delete=False, _str=None): - ''' - :param bool query: - Get entities and query entities. - :param bool add: - Add entities. Add and Update permissions are required for upsert operations. - :param bool update: - Update entities. Add and Update permissions are required for upsert operations. - :param bool delete: - Delete entities. - :param str _str: - A string representing the permissions. - ''' - if not _str: - _str = '' - self.query = query or ('r' in _str) - self.add = add or ('a' in _str) - self.update = update or ('u' in _str) - self.delete = delete or ('d' in _str) - - def __or__(self, other): - return TablePermissions(_str=str(self) + str(other)) - - def __add__(self, other): - return TablePermissions(_str=str(self) + str(other)) - - def __str__(self): - return (('r' if self.query else '') + - ('a' if self.add else '') + - ('u' if self.update else '') + - ('d' if self.delete else '')) - -TablePermissions.QUERY = TablePermissions(query=True) -TablePermissions.ADD = TablePermissions(add=True) -TablePermissions.UPDATE = TablePermissions(update=True) -TablePermissions.DELETE = TablePermissions(delete=True) \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/table/tablebatch.py b/azure/multiapi/storage/v2016_05_31/table/tablebatch.py deleted file mode 100644 index ecaa7b9..0000000 --- a/azure/multiapi/storage/v2016_05_31/table/tablebatch.py +++ /dev/null @@ -1,209 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from ._error import ( - _ERROR_INCORRECT_PARTITION_KEY_IN_BATCH, - _ERROR_DUPLICATE_ROW_KEY_IN_BATCH, - _ERROR_TOO_MANY_ENTITIES_IN_BATCH, -) -from .models import ( - AzureBatchValidationError, -) -from ._request import ( - _insert_entity, - _update_entity, - _merge_entity, - _delete_entity, - _insert_or_replace_entity, - _insert_or_merge_entity, -) - -class TableBatch(object): - - ''' - This is the class that is used for batch operation for storage table service. - - The Table service supports batch transactions on entities that are in the - same table and belong to the same partition group. Multiple operations are - supported within a single transaction. The batch can include at most 100 - entities, and its total payload may be no more than 4 MB in size. - ''' - - def __init__(self, require_encryption=False, key_encryption_key=None, - encryption_resolver=None): - self._requests = [] - self._partition_key = None - self._row_keys = [] - self._require_encryption = require_encryption - self._key_encryption_key = key_encryption_key - self._encryption_resolver = encryption_resolver - - def insert_entity(self, entity): - ''' - Adds an insert entity operation to the batch. See - :func:`~azure.storage.table.tableservice.TableService.insert_entity` for more - information on inserts. - - The operation will not be executed until the batch is committed. - - :param entity: - The entity to insert. Could be a dict or an entity object. - Must contain a PartitionKey and a RowKey. - :type entity: a dict or :class:`azure.storage.table.models.Entity` - ''' - request = _insert_entity(entity, self._require_encryption, self._key_encryption_key, - self._encryption_resolver) - self._add_to_batch(entity['PartitionKey'], entity['RowKey'], request) - - def update_entity(self, entity, if_match='*'): - ''' - Adds an update entity operation to the batch. See - :func:`~azure.storage.table.tableservice.TableService.update_entity` for more - information on updates. - - The operation will not be executed until the batch is committed. - - :param entity: - The entity to update. Could be a dict or an entity object. - Must contain a PartitionKey and a RowKey. - :type entity: a dict or :class:`azure.storage.table.models.Entity` - :param str if_match: - The client may specify the ETag for the entity on the - request in order to compare to the ETag maintained by the service - for the purpose of optimistic concurrency. The update operation - will be performed only if the ETag sent by the client matches the - value maintained by the server, indicating that the entity has - not been modified since it was retrieved by the client. To force - an unconditional update, set If-Match to the wildcard character (*). - ''' - request = _update_entity(entity, if_match, self._require_encryption, - self._key_encryption_key, self._encryption_resolver) - self._add_to_batch(entity['PartitionKey'], entity['RowKey'], request) - - def merge_entity(self, entity, if_match='*'): - ''' - Adds a merge entity operation to the batch. See - :func:`~azure.storage.table.tableservice.TableService.merge_entity` for more - information on merges. - - The operation will not be executed until the batch is committed. - - :param entity: - The entity to merge. Could be a dict or an entity object. - Must contain a PartitionKey and a RowKey. - :type entity: a dict or :class:`azure.storage.table.models.Entity` - :param str if_match: - The client may specify the ETag for the entity on the - request in order to compare to the ETag maintained by the service - for the purpose of optimistic concurrency. The merge operation - will be performed only if the ETag sent by the client matches the - value maintained by the server, indicating that the entity has - not been modified since it was retrieved by the client. To force - an unconditional merge, set If-Match to the wildcard character (*). - ''' - - request = _merge_entity(entity, if_match, self._require_encryption, - self._key_encryption_key) - self._add_to_batch(entity['PartitionKey'], entity['RowKey'], request) - - def delete_entity(self, partition_key, row_key, - if_match='*'): - ''' - Adds a delete entity operation to the batch. See - :func:`~azure.storage.table.tableservice.TableService.delete_entity` for more - information on deletes. - - The operation will not be executed until the batch is committed. - - :param str partition_key: - The PartitionKey of the entity. - :param str row_key: - The RowKey of the entity. - :param str if_match: - The client may specify the ETag for the entity on the - request in order to compare to the ETag maintained by the service - for the purpose of optimistic concurrency. The delete operation - will be performed only if the ETag sent by the client matches the - value maintained by the server, indicating that the entity has - not been modified since it was retrieved by the client. To force - an unconditional delete, set If-Match to the wildcard character (*). - ''' - request = _delete_entity(partition_key, row_key, if_match) - self._add_to_batch(partition_key, row_key, request) - - def insert_or_replace_entity(self, entity): - ''' - Adds an insert or replace entity operation to the batch. See - :func:`~azure.storage.table.tableservice.TableService.insert_or_replace_entity` for more - information on insert or replace operations. - - The operation will not be executed until the batch is committed. - - :param entity: - The entity to insert or replace. Could be a dict or an entity object. - Must contain a PartitionKey and a RowKey. - :type entity: a dict or :class:`azure.storage.table.models.Entity` - ''' - request = _insert_or_replace_entity(entity, self._require_encryption, self._key_encryption_key, - self._encryption_resolver) - self._add_to_batch(entity['PartitionKey'], entity['RowKey'], request) - - def insert_or_merge_entity(self, entity): - ''' - Adds an insert or merge entity operation to the batch. See - :func:`~azure.storage.table.tableservice.TableService.insert_or_merge_entity` for more - information on insert or merge operations. - - The operation will not be executed until the batch is committed. - - :param entity: - The entity to insert or merge. Could be a dict or an entity object. - Must contain a PartitionKey and a RowKey. - :type entity: a dict or :class:`azure.storage.table.models.Entity` - ''' - - request = _insert_or_merge_entity(entity, self._require_encryption, - self._key_encryption_key) - self._add_to_batch(entity['PartitionKey'], entity['RowKey'], request) - - def _add_to_batch(self, partition_key, row_key, request): - ''' - Validates batch-specific rules. - - :param str partition_key: - PartitionKey of the entity. - :param str row_key: - RowKey of the entity. - :param request: - the request to insert, update or delete entity - ''' - # All same partition keys - if self._partition_key: - if self._partition_key != partition_key: - raise AzureBatchValidationError(_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH) - else: - self._partition_key = partition_key - - # All different row keys - if row_key in self._row_keys: - raise AzureBatchValidationError(_ERROR_DUPLICATE_ROW_KEY_IN_BATCH) - else: - self._row_keys.append(row_key) - - # 100 entities - if len(self._requests) >= 100: - raise AzureBatchValidationError(_ERROR_TOO_MANY_ENTITIES_IN_BATCH) - - # Add the request to the batch - self._requests.append((row_key, request)) \ No newline at end of file diff --git a/azure/multiapi/storage/v2016_05_31/table/tableservice.py b/azure/multiapi/storage/v2016_05_31/table/tableservice.py deleted file mode 100644 index 3bba921..0000000 --- a/azure/multiapi/storage/v2016_05_31/table/tableservice.py +++ /dev/null @@ -1,1093 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- -from contextlib import contextmanager -from azure.common import ( - AzureHttpError, -) -from .._common_conversion import ( - _int_to_str, - _to_str, -) -from .._error import ( - _dont_fail_not_exist, - _dont_fail_on_exist, - _validate_not_none, - _ERROR_STORAGE_MISSING_INFO, - _validate_access_policies, -) -from .._serialization import ( - _get_request_body, - _update_request, - _convert_signed_identifiers_to_xml, - _convert_service_properties_to_xml, -) -from .._http import HTTPRequest -from ..models import ( - Services, - ListGenerator, - _OperationContext, -) -from .models import TablePayloadFormat -from .._auth import ( - _StorageSASAuthentication, - _StorageTableSharedKeyAuthentication, -) -from .._connection import _ServiceParameters -from .._deserialization import ( - _convert_xml_to_service_properties, - _convert_xml_to_signed_identifiers, - _convert_xml_to_service_stats, -) -from ._serialization import ( - _convert_table_to_json, - _convert_batch_to_json, - _update_storage_table_header, - _get_entity_path, - _DEFAULT_ACCEPT_HEADER, - _DEFAULT_CONTENT_TYPE_HEADER, - _DEFAULT_PREFER_HEADER, -) -from ._deserialization import ( - _convert_json_response_to_entity, - _convert_json_response_to_tables, - _convert_json_response_to_entities, - _parse_batch_response, - _extract_etag, -) -from .._constants import ( - SERVICE_HOST_BASE, - DEFAULT_PROTOCOL, -) -from ._request import ( - _get_entity, - _insert_entity, - _update_entity, - _merge_entity, - _delete_entity, - _insert_or_replace_entity, - _insert_or_merge_entity, -) -from ..sharedaccesssignature import ( - SharedAccessSignature, -) -from ..storageclient import StorageClient -from .tablebatch import TableBatch - -class TableService(StorageClient): - - ''' - This is the main class managing Azure Table resources. - - The Azure Table service offers structured storage in the form of tables. Tables - store data as collections of entities. Entities are similar to rows. An entity - has a primary key and a set of properties. A property is a name, typed-value pair, - similar to a column. The Table service does not enforce any schema for tables, - so two entities in the same table may have different sets of properties. Developers - may choose to enforce a schema on the client side. A table may contain any number - of entities. - - :ivar object key_encryption_key: - The key-encryption-key optionally provided by the user. If provided, will be used to - encrypt/decrypt in supported methods. - For methods requiring decryption, either the key_encryption_key OR the resolver must be provided. - If both are provided, the resolver will take precedence. - Must implement the following methods for APIs requiring encryption: - wrap_key(key)--wraps the specified key (bytes) using an algorithm of the user's choice. Returns the encrypted key as bytes. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - Must implement the following methods for APIs requiring decryption: - unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid()--returns a string key id for this key-encryption-key. - :ivar function key_resolver_function(kid): - A function to resolve keys optionally provided by the user. If provided, will be used to decrypt in supported methods. - For methods requiring decryption, either the key_encryption_key OR - the resolver must be provided. If both are provided, the resolver will take precedence. - It uses the kid string to return a key-encryption-key implementing the interface defined above. - :ivar function(partition_key, row_key, property_name) encryption_resolver_functions: - A function that takes in an entity's partition key, row key, and property name and returns - a boolean that indicates whether that property should be encrypted. - :ivar bool require_encryption: - A flag that may be set to ensure that all messages successfully uploaded to the queue and all those downloaded and - successfully read from the queue are/were encrypted while on the server. If this flag is set, all required - parameters for encryption/decryption must be provided. See the above comments on the key_encryption_key and resolver. - ''' - - def __init__(self, account_name=None, account_key=None, sas_token=None, - is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, - request_session=None, connection_string=None, socket_timeout=None): - ''' - :param str account_name: - The storage account name. This is used to authenticate requests - signed with an account key and to construct the storage endpoint. It - is required unless a connection string is given. - :param str account_key: - The storage account key. This is used for shared key authentication. - :param str sas_token: - A shared access signature token to use to authenticate requests - instead of the account key. If account key and sas token are both - specified, account key will be used to sign. - :param bool is_emulated: - Whether to use the emulator. Defaults to False. If specified, will - override all other parameters besides connection string and request - session. - :param str protocol: - The protocol to use for requests. Defaults to https. - :param str endpoint_suffix: - The host base component of the url, minus the account name. Defaults - to Azure (core.windows.net). Override this to use the China cloud - (core.chinacloudapi.cn). - :param requests.Session request_session: - The session object to use for http requests. - :param str connection_string: - If specified, this will override all other parameters besides - request session. See - http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ - for the connection string format. - :param int socket_timeout: - If specified, this will override the default socket timeout. The timeout specified is in seconds. - See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value. - ''' - service_params = _ServiceParameters.get_service_parameters( - 'table', - account_name=account_name, - account_key=account_key, - sas_token=sas_token, - is_emulated=is_emulated, - protocol=protocol, - endpoint_suffix=endpoint_suffix, - request_session=request_session, - connection_string=connection_string, - socket_timeout=socket_timeout) - - super(TableService, self).__init__(service_params) - - if self.account_key: - self.authentication = _StorageTableSharedKeyAuthentication( - self.account_name, - self.account_key, - ) - elif self.sas_token: - self.authentication = _StorageSASAuthentication(self.sas_token) - else: - raise ValueError(_ERROR_STORAGE_MISSING_INFO) - - self.require_encryption = False - self.key_encryption_key = None - self.key_resolver_function = None - self.encryption_resolver_function = None - - def generate_account_shared_access_signature(self, resource_types, permission, - expiry, start=None, ip=None, protocol=None): - ''' - Generates a shared access signature for the table service. - Use the returned signature with the sas_token parameter of TableService. - - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account SAS. - :param AccountPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: date or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: date or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.models.Protocol` for possible values. - :return: A Shared Access Signature (sas) token. - :rtype: str - ''' - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = SharedAccessSignature(self.account_name, self.account_key) - return sas.generate_account(Services.TABLE, resource_types, permission, - expiry, start=start, ip=ip, protocol=protocol) - - - def generate_table_shared_access_signature(self, table_name, permission=None, - expiry=None, start=None, id=None, - ip=None, protocol=None, - start_pk=None, start_rk=None, - end_pk=None, end_rk=None): - ''' - Generates a shared access signature for the table. - Use the returned signature with the sas_token parameter of TableService. - - :param str table_name: - The name of the table to create a SAS token for. - :param TablePermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: date or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: date or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use :func:`~set_table_acl`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip='168.1.5.65' or sip='168.1.5.60-168.1.5.70' on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.models.Protocol` for possible values. - :param str start_pk: - The minimum partition key accessible with this shared access - signature. startpk must accompany startrk. Key values are inclusive. - If omitted, there is no lower bound on the table entities that can - be accessed. - :param str start_rk: - The minimum row key accessible with this shared access signature. - startpk must accompany startrk. Key values are inclusive. If - omitted, there is no lower bound on the table entities that can be - accessed. - :param str end_pk: - The maximum partition key accessible with this shared access - signature. endpk must accompany endrk. Key values are inclusive. If - omitted, there is no upper bound on the table entities that can be - accessed. - :param str end_rk: - The maximum row key accessible with this shared access signature. - endpk must accompany endrk. Key values are inclusive. If omitted, - there is no upper bound on the table entities that can be accessed. - :return: A Shared Access Signature (sas) token. - :rtype: str - ''' - _validate_not_none('table_name', table_name) - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = SharedAccessSignature(self.account_name, self.account_key) - return sas.generate_table( - table_name, - permission=permission, - expiry=expiry, - start=start, - id=id, - ip=ip, - protocol=protocol, - start_pk=start_pk, - start_rk=start_rk, - end_pk=end_pk, - end_rk=end_rk, - ) - - def get_table_service_stats(self, timeout=None): - ''' - Retrieves statistics related to replication for the Table service. It is - only available when read-access geo-redundant replication is enabled for - the storage account. - - With geo-redundant replication, Azure Storage maintains your data durable - in two locations. In both locations, Azure Storage constantly maintains - multiple healthy replicas of your data. The location where you read, - create, update, or delete data is the primary storage account location. - The primary location exists in the region you choose at the time you - create an account via the Azure Management Azure classic portal, for - example, North Central US. The location to which your data is replicated - is the secondary location. The secondary location is automatically - determined based on the location of the primary; it is in a second data - center that resides in the same region as the primary location. Read-only - access is available from the secondary location, if read-access geo-redundant - replication is enabled for your storage account. - - :param int timeout: - The timeout parameter is expressed in seconds. - :return: The table service stats. - :rtype: :class:`~azure.storage.models.ServiceStats` - ''' - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(primary=False, secondary=True) - request.path = '/' - request.query = { - 'restype': 'service', - 'comp': 'stats', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_service_stats) - - def get_table_service_properties(self, timeout=None): - ''' - Gets the properties of a storage account's Table service, including - logging, analytics and CORS rules. - - :param int timeout: - The server timeout, expressed in seconds. - :return: The table service properties. - :rtype: :class:`~azure.storage.models.ServiceProperties` - ''' - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = '/' - request.query = { - 'restype': 'service', - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_service_properties) - - def set_table_service_properties(self, logging=None, hour_metrics=None, - minute_metrics=None, cors=None, timeout=None): - ''' - Sets the properties of a storage account's Table service, including - Azure Storage Analytics. If an element (ex Logging) is left as None, the - existing settings on the service for that functionality are preserved. - For more information on Azure Storage Analytics, see - https://msdn.microsoft.com/en-us/library/azure/hh343270.aspx. - - :param Logging logging: - The logging settings provide request logs. - :param Metrics hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for tables. - :param Metrics minute_metrics: - The minute metrics settings provide request statistics - for each minute for tables. - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. For detailed information - about CORS rules and evaluation logic, see - https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx. - :type cors: list of :class:`~azure.storage.models.CorsRule` - :param int timeout: - The server timeout, expressed in seconds. - ''' - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = '/' - request.query = { - 'restype': 'service', - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - request.body = _get_request_body( - _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics, cors)) - - self._perform_request(request) - - def list_tables(self, num_results=None, marker=None, timeout=None): - ''' - Returns a generator to list the tables. The generator will lazily follow - the continuation tokens returned by the service and stop when all tables - have been returned or num_results is reached. - - If num_results is specified and the account has more than that number of - tables, the generator will have a populated next_marker field once it - finishes. This marker can be used to create a new generator if more - results are desired. - - :param int num_results: - The maximum number of tables to return. - :param marker: - An opaque continuation object. This value can be retrieved from the - next_marker field of a previous generator object if num_results was - specified and that generator has finished enumerating results. If - specified, this generator will begin returning results from the point - where the previous generator stopped. - :type marker: obj - :param int timeout: - The server timeout, expressed in seconds. This function may make multiple - calls to the service in which case the timeout value specified will be - applied to each individual call. - :return: A generator which produces :class:`~azure.storage.models.table.Table` objects. - :rtype: :class:`~azure.storage.models.ListGenerator`: - ''' - operation_context = _OperationContext(location_lock=True) - kwargs = {'max_results': num_results, 'marker': marker, 'timeout': timeout, - '_context': operation_context} - resp = self._list_tables(**kwargs) - - return ListGenerator(resp, self._list_tables, (), kwargs) - - def _list_tables(self, max_results=None, marker=None, timeout=None, _context=None): - ''' - Returns a list of tables under the specified account. Makes a single list - request to the service. Used internally by the list_tables method. - - :param int max_results: - The maximum number of tables to return. A single list request may - return up to 1000 tables and potentially a continuation token which - should be followed to get additional resutls. - :param marker: - A dictionary which identifies the portion of the query to be - returned with the next query operation. The operation returns a - next_marker element within the response body if the list returned - was not complete. This value may then be used as a query parameter - in a subsequent call to request the next portion of the list of - tables. The marker value is opaque to the client. - :type marker: obj - :param int timeout: - The server timeout, expressed in seconds. - :return: A list of tables, potentially with a next_marker property. - :rtype: list of :class:`~azure.storage.models.table.Table`: - ''' - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = '/Tables' - request.headers = {'Accept': TablePayloadFormat.JSON_NO_METADATA} - request.query = { - '$top': _int_to_str(max_results), - 'NextTableName': _to_str(marker), - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_json_response_to_tables, - operation_context=_context) - - def create_table(self, table_name, fail_on_exist=False, timeout=None): - ''' - Creates a new table in the storage account. - - :param str table_name: - The name of the table to create. The table name may contain only - alphanumeric characters and cannot begin with a numeric character. - It is case-insensitive and must be from 3 to 63 characters long. - :param bool fail_on_exist: - Specifies whether to throw an exception if the table already exists. - :param int timeout: - The server timeout, expressed in seconds. - :return: - A boolean indicating whether the table was created. If fail_on_exist - was set to True, this will throw instead of returning false. - :rtype: bool - ''' - _validate_not_none('table', table_name) - request = HTTPRequest() - request.method = 'POST' - request.host_locations = self._get_host_locations() - request.path = '/Tables' - request.query = {'timeout': _int_to_str(timeout)} - request.headers = { - _DEFAULT_CONTENT_TYPE_HEADER[0]: _DEFAULT_CONTENT_TYPE_HEADER[1], - _DEFAULT_PREFER_HEADER[0]: _DEFAULT_PREFER_HEADER[1], - _DEFAULT_ACCEPT_HEADER[0]: _DEFAULT_ACCEPT_HEADER[1] - } - request.body = _get_request_body(_convert_table_to_json(table_name)) - - if not fail_on_exist: - try: - self._perform_request(request) - return True - except AzureHttpError as ex: - _dont_fail_on_exist(ex) - return False - else: - self._perform_request(request) - return True - - def exists(self, table_name, timeout=None): - ''' - Returns a boolean indicating whether the table exists. - - :param str table_name: - The name of table to check for existence. - :param int timeout: - The server timeout, expressed in seconds. - :return: A boolean indicating whether the table exists. - :rtype: bool - ''' - _validate_not_none('table_name', table_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = '/Tables' + "('" + table_name + "')" - request.headers = {'Accept': TablePayloadFormat.JSON_NO_METADATA} - request.query = {'timeout': _int_to_str(timeout)} - - try: - self._perform_request(request) - return True - except AzureHttpError as ex: - _dont_fail_not_exist(ex) - return False - - def delete_table(self, table_name, fail_not_exist=False, timeout=None): - ''' - Deletes the specified table and any data it contains. - - When a table is successfully deleted, it is immediately marked for deletion - and is no longer accessible to clients. The table is later removed from - the Table service during garbage collection. - - Note that deleting a table is likely to take at least 40 seconds to complete. - If an operation is attempted against the table while it was being deleted, - an :class:`AzureConflictHttpError` will be thrown. - - :param str table_name: - The name of the table to delete. - :param bool fail_not_exist: - Specifies whether to throw an exception if the table doesn't exist. - :param int timeout: - The server timeout, expressed in seconds. - :return: - A boolean indicating whether the table was deleted. If fail_not_exist - was set to True, this will throw instead of returning false. - :rtype: bool - ''' - _validate_not_none('table_name', table_name) - request = HTTPRequest() - request.method = 'DELETE' - request.host_locations = self._get_host_locations() - request.path = '/Tables(\'' + _to_str(table_name) + '\')' - request.query = {'timeout': _int_to_str(timeout)} - request.headers = {_DEFAULT_ACCEPT_HEADER[0]: _DEFAULT_ACCEPT_HEADER[1]} - - if not fail_not_exist: - try: - self._perform_request(request) - return True - except AzureHttpError as ex: - _dont_fail_not_exist(ex) - return False - else: - self._perform_request(request) - return True - - def get_table_acl(self, table_name, timeout=None): - ''' - Returns details about any stored access policies specified on the - table that may be used with Shared Access Signatures. - - :param str table_name: - The name of an existing table. - :param int timeout: - The server timeout, expressed in seconds. - :return: A dictionary of access policies associated with the table. - :rtype: dict of str to :class:`~azure.storage.models.AccessPolicy`: - ''' - _validate_not_none('table_name', table_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = '/' + _to_str(table_name) - request.query = { - 'comp': 'acl', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_signed_identifiers) - - def set_table_acl(self, table_name, signed_identifiers=None, timeout=None): - ''' - Sets stored access policies for the table that may be used with Shared - Access Signatures. - - When you set permissions for a table, the existing permissions are replaced. - To update the table's permissions, call :func:`~get_table_acl` to fetch - all access policies associated with the table, modify the access policy - that you wish to change, and then call this function with the complete - set of data to perform the update. - - When you establish a stored access policy on a table, it may take up to - 30 seconds to take effect. During this interval, a shared access signature - that is associated with the stored access policy will throw an - :class:`AzureHttpError` until the access policy becomes active. - - :param str table_name: - The name of an existing table. - :param signed_identifiers: - A dictionary of access policies to associate with the table. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict of str to :class:`~azure.storage.models.AccessPolicy` - :param int timeout: - The server timeout, expressed in seconds. - ''' - _validate_not_none('table_name', table_name) - _validate_access_policies(signed_identifiers) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = '/' + _to_str(table_name) - request.query = { - 'comp': 'acl', - 'timeout': _int_to_str(timeout), - } - request.body = _get_request_body( - _convert_signed_identifiers_to_xml(signed_identifiers)) - - self._perform_request(request) - - def query_entities(self, table_name, filter=None, select=None, num_results=None, - marker=None, accept=TablePayloadFormat.JSON_MINIMAL_METADATA, - property_resolver=None, timeout=None): - ''' - Returns a generator to list the entities in the table specified. The - generator will lazily follow the continuation tokens returned by the - service and stop when all entities have been returned or max_results is - reached. - - If max_results is specified and the account has more than that number of - entities, the generator will have a populated next_marker field once it - finishes. This marker can be used to create a new generator if more - results are desired. - - :param str table_name: - The name of the table to query. - :param str filter: - Returns only entities that satisfy the specified filter. Note that - no more than 15 discrete comparisons are permitted within a $filter - string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx - for more information on constructing filters. - :param str select: - Returns only the desired properties of an entity from the set. - :param int num_results: - The maximum number of entities to return. - :param marker: - An opaque continuation object. This value can be retrieved from the - next_marker field of a previous generator object if max_results was - specified and that generator has finished enumerating results. If - specified, this generator will begin returning results from the point - where the previous generator stopped. - :type marker: obj - :param str accept: - Specifies the accepted content type of the response payload. See - :class:`~azure.storage.table.models.TablePayloadFormat` for possible - values. - :param property_resolver: - A function which given the partition key, row key, property name, - property value, and the property EdmType if returned by the service, - returns the EdmType of the property. Generally used if accept is set - to JSON_NO_METADATA. - :type property_resolver: callback function in format of func(pk, rk, prop_name, prop_value, service_edm_type) - :param int timeout: - The server timeout, expressed in seconds. This function may make multiple - calls to the service in which case the timeout value specified will be - applied to each individual call. - :return: A generator which produces :class:`~azure.storage.table.models.Entity` objects. - :rtype: :class:`~azure.storage.models.ListGenerator` - ''' - - operation_context = _OperationContext(location_lock=True) - if self.key_encryption_key is not None or self.key_resolver_function is not None: - # If query already requests all properties, no need to add the metadata columns - if select is not None and select != '*': - select += ',_ClientEncryptionMetadata1,_ClientEncryptionMetadata2' - - args = (table_name,) - kwargs = {'filter': filter, 'select': select, 'max_results': num_results, 'marker': marker, - 'accept': accept, 'property_resolver': property_resolver, 'timeout': timeout, - '_context': operation_context} - resp = self._query_entities(*args, **kwargs) - - return ListGenerator(resp, self._query_entities, args, kwargs) - - def _query_entities(self, table_name, filter=None, select=None, max_results=None, - marker=None, accept=TablePayloadFormat.JSON_MINIMAL_METADATA, - property_resolver=None, timeout=None, _context=None): - ''' - Returns a list of entities under the specified table. Makes a single list - request to the service. Used internally by the query_entities method. - - :param str table_name: - The name of the table to query. - :param str filter: - Returns only entities that satisfy the specified filter. Note that - no more than 15 discrete comparisons are permitted within a $filter - string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx - for more information on constructing filters. - :param str select: - Returns only the desired properties of an entity from the set. - :param int top: - The maximum number of entities to return. - :param marker: - A dictionary which identifies the portion of the query to be - returned with the next query operation. The operation returns a - next_marker element within the response body if the list returned - was not complete. This value may then be used as a query parameter - in a subsequent call to request the next portion of the list of - table. The marker value is opaque to the client. - :type marker: obj - :param str accept: - Specifies the accepted content type of the response payload. See - :class:`~azure.storage.table.models.TablePayloadFormat` for possible - values. - :param property_resolver: - A function which given the partition key, row key, property name, - property value, and the property EdmType if returned by the service, - returns the EdmType of the property. Generally used if accept is set - to JSON_NO_METADATA. - :type property_resolver: callback function in format of func(pk, rk, prop_name, prop_value, service_edm_type) - :param int timeout: - The server timeout, expressed in seconds. - :return: A list of entities, potentially with a next_marker property. - :rtype: list of :class:`~azure.storage.table.models.Entity` - ''' - _validate_not_none('table_name', table_name) - _validate_not_none('accept', accept) - next_partition_key = None if marker is None else marker.get('nextpartitionkey') - next_row_key = None if marker is None else marker.get('nextrowkey') - - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = '/' + _to_str(table_name) + '()' - request.headers = {'Accept': _to_str(accept)} - request.query = { - '$filter': _to_str(filter), - '$select': _to_str(select), - '$top': _int_to_str(max_results), - 'NextPartitionKey': _to_str(next_partition_key), - 'NextRowKey': _to_str(next_row_key), - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_json_response_to_entities, - [property_resolver, self.require_encryption, - self.key_encryption_key, self.key_resolver_function], - operation_context=_context) - - def commit_batch(self, table_name, batch, timeout=None): - ''' - Commits a :class:`~azure.storage.table.TableBatch` request. - - :param str table_name: - The name of the table to commit the batch to. - :param TableBatch batch: - The batch to commit. - :param int timeout: - The server timeout, expressed in seconds. - :return: A list of the batch responses corresponding to the requests in the batch. - :rtype: list of response objects - ''' - _validate_not_none('table_name', table_name) - - # Construct the batch request - request = HTTPRequest() - request.method = 'POST' - request.host_locations = self._get_host_locations() - request.path = '/' + '$batch' - request.query = {'timeout': _int_to_str(timeout)} - - # Update the batch operation requests with table and client specific info - for row_key, batch_request in batch._requests: - if batch_request.method == 'POST': - batch_request.path = '/' + _to_str(table_name) - else: - batch_request.path = _get_entity_path(table_name, batch._partition_key, row_key) - _update_request(batch_request) - - # Construct the batch body - request.body, boundary = _convert_batch_to_json(batch._requests) - request.headers = {'Content-Type': boundary} - - # Perform the batch request and return the response - return self._perform_request(request, _parse_batch_response) - - @contextmanager - def batch(self, table_name, timeout=None): - ''' - Creates a batch object which can be used as a context manager. Commits the batch on exit. - - :param str table_name: - The name of the table to commit the batch to. - :param int timeout: - The server timeout, expressed in seconds. - ''' - batch = TableBatch(self.require_encryption, self.key_encryption_key, self.encryption_resolver_function) - yield batch - self.commit_batch(table_name, batch, timeout=timeout) - - def get_entity(self, table_name, partition_key, row_key, select=None, - accept=TablePayloadFormat.JSON_MINIMAL_METADATA, - property_resolver=None, timeout=None): - ''' - Get an entity from the specified table. Throws if the entity does not exist. - - :param str table_name: - The name of the table to get the entity from. - :param str partition_key: - The PartitionKey of the entity. - :param str row_key: - The RowKey of the entity. - :param str select: - Returns only the desired properties of an entity from the set. - :param str accept: - Specifies the accepted content type of the response payload. See - :class:`~azure.storage.table.models.TablePayloadFormat` for possible - values. - :param property_resolver: - A function which given the partition key, row key, property name, - property value, and the property EdmType if returned by the service, - returns the EdmType of the property. Generally used if accept is set - to JSON_NO_METADATA. - :type property_resolver: callback function in format of func(pk, rk, prop_name, prop_value, service_edm_type) - :param int timeout: - The server timeout, expressed in seconds. - :return: The retrieved entity. - :rtype: :class:`~azure.storage.table.models.Entity` - ''' - _validate_not_none('table_name', table_name) - request = _get_entity(partition_key, row_key, select, accept) - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_entity_path(table_name, partition_key, row_key) - request.query['timeout'] = _int_to_str(timeout) - - return self._perform_request(request, _convert_json_response_to_entity, - [property_resolver, self.require_encryption, - self.key_encryption_key, self.key_resolver_function]) - - def insert_entity(self, table_name, entity, timeout=None): - ''' - Inserts a new entity into the table. Throws if an entity with the same - PartitionKey and RowKey already exists. - - When inserting an entity into a table, you must specify values for the - PartitionKey and RowKey system properties. Together, these properties - form the primary key and must be unique within the table. Both the - PartitionKey and RowKey values must be string values; each key value may - be up to 64 KB in size. If you are using an integer value for the key - value, you should convert the integer to a fixed-width string, because - they are canonically sorted. For example, you should convert the value - 1 to 0000001 to ensure proper sorting. - - :param str table_name: - The name of the table to insert the entity into. - :param entity: - The entity to insert. Could be a dict or an entity object. - Must contain a PartitionKey and a RowKey. - :type entity: a dict or :class:`~azure.storage.table.models.Entity` - :param int timeout: - The server timeout, expressed in seconds. - :return: The etag of the inserted entity. - :rtype: str - ''' - _validate_not_none('table_name', table_name) - - request = _insert_entity(entity, self.require_encryption, self.key_encryption_key, - self.encryption_resolver_function) - request.host_locations = self._get_host_locations() - request.path = '/' + _to_str(table_name) - request.query['timeout'] = _int_to_str(timeout) - - return self._perform_request(request, _extract_etag) - - def update_entity(self, table_name, entity, if_match='*', timeout=None): - ''' - Updates an existing entity in a table. Throws if the entity does not exist. - The update_entity operation replaces the entire entity and can be used to - remove properties. - - :param str table_name: - The name of the table containing the entity to update. - :param entity: - The entity to update. Could be a dict or an entity object. - Must contain a PartitionKey and a RowKey. - :type entity: a dict or :class:`~azure.storage.table.models.Entity` - :param str if_match: - The client may specify the ETag for the entity on the - request in order to compare to the ETag maintained by the service - for the purpose of optimistic concurrency. The update operation - will be performed only if the ETag sent by the client matches the - value maintained by the server, indicating that the entity has - not been modified since it was retrieved by the client. To force - an unconditional update, set If-Match to the wildcard character (*). - :param int timeout: - The server timeout, expressed in seconds. - :return: The etag of the entity. - :rtype: str - ''' - _validate_not_none('table_name', table_name) - request = _update_entity(entity, if_match, self.require_encryption, self.key_encryption_key, - self.encryption_resolver_function) - request.host_locations = self._get_host_locations() - request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey']) - request.query['timeout'] = _int_to_str(timeout) - - return self._perform_request(request, _extract_etag) - - def merge_entity(self, table_name, entity, if_match='*', timeout=None): - ''' - Updates an existing entity by merging the entity's properties. Throws - if the entity does not exist. - - This operation does not replace the existing entity as the update_entity - operation does. A property cannot be removed with merge_entity. - - Any properties with null values are ignored. All other properties will be - updated or added. - - :param str table_name: - The name of the table containing the entity to merge. - :param entity: - The entity to merge. Could be a dict or an entity object. - Must contain a PartitionKey and a RowKey. - :type entity: a dict or :class:`~azure.storage.table.models.Entity` - :param str if_match: - The client may specify the ETag for the entity on the - request in order to compare to the ETag maintained by the service - for the purpose of optimistic concurrency. The merge operation - will be performed only if the ETag sent by the client matches the - value maintained by the server, indicating that the entity has - not been modified since it was retrieved by the client. To force - an unconditional merge, set If-Match to the wildcard character (*). - :param int timeout: - The server timeout, expressed in seconds. - :return: The etag of the entity. - :rtype: str - ''' - - _validate_not_none('table_name', table_name) - - request = _merge_entity(entity, if_match, self.require_encryption, - self.key_encryption_key) - request.host_locations = self._get_host_locations() - request.query['timeout'] = _int_to_str(timeout) - request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey']) - - return self._perform_request(request, _extract_etag) - - def delete_entity(self, table_name, partition_key, row_key, - if_match='*', timeout=None): - ''' - Deletes an existing entity in a table. Throws if the entity does not exist. - - When an entity is successfully deleted, the entity is immediately marked - for deletion and is no longer accessible to clients. The entity is later - removed from the Table service during garbage collection. - - :param str table_name: - The name of the table containing the entity to delete. - :param str partition_key: - The PartitionKey of the entity. - :param str row_key: - The RowKey of the entity. - :param str if_match: - The client may specify the ETag for the entity on the - request in order to compare to the ETag maintained by the service - for the purpose of optimistic concurrency. The delete operation - will be performed only if the ETag sent by the client matches the - value maintained by the server, indicating that the entity has - not been modified since it was retrieved by the client. To force - an unconditional delete, set If-Match to the wildcard character (*). - :param int timeout: - The server timeout, expressed in seconds. - ''' - _validate_not_none('table_name', table_name) - request = _delete_entity(partition_key, row_key, if_match) - request.host_locations = self._get_host_locations() - request.query['timeout'] = _int_to_str(timeout) - request.path = _get_entity_path(table_name, partition_key, row_key) - - self._perform_request(request) - - def insert_or_replace_entity(self, table_name, entity, timeout=None): - ''' - Replaces an existing entity or inserts a new entity if it does not - exist in the table. Because this operation can insert or update an - entity, it is also known as an "upsert" operation. - - If insert_or_replace_entity is used to replace an entity, any properties - from the previous entity will be removed if the new entity does not define - them. - - :param str table_name: - The name of the table in which to insert or replace the entity. - :param entity: - The entity to insert or replace. Could be a dict or an entity object. - Must contain a PartitionKey and a RowKey. - :type entity: a dict or :class:`~azure.storage.table.models.Entity` - :param int timeout: - The server timeout, expressed in seconds. - :return: The etag of the entity. - :rtype: str - ''' - _validate_not_none('table_name', table_name) - request = _insert_or_replace_entity(entity, self.require_encryption, self.key_encryption_key, - self.encryption_resolver_function) - request.host_locations = self._get_host_locations() - request.query['timeout'] = _int_to_str(timeout) - request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey']) - - return self._perform_request(request, _extract_etag) - - def insert_or_merge_entity(self, table_name, entity, timeout=None): - ''' - Merges an existing entity or inserts a new entity if it does not exist - in the table. - - If insert_or_merge_entity is used to merge an entity, any properties from - the previous entity will be retained if the request does not define or - include them. - - :param str table_name: - The name of the table in which to insert or merge the entity. - :param entity: - The entity to insert or merge. Could be a dict or an entity object. - Must contain a PartitionKey and a RowKey. - :type entity: a dict or :class:`~azure.storage.table.models.Entity` - :param int timeout: - The server timeout, expressed in seconds. - :return: The etag of the entity. - :rtype: str - ''' - - _validate_not_none('table_name', table_name) - request = _insert_or_merge_entity(entity, self.require_encryption, - self.key_encryption_key) - request.host_locations = self._get_host_locations() - request.query['timeout'] = _int_to_str(timeout) - request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey']) - - return self._perform_request(request, _extract_etag) - - def _perform_request(self, request, parser=None, parser_args=None, operation_context=None): - _update_storage_table_header(request) - return super(TableService, self)._perform_request(request, parser, parser_args, operation_context) \ No newline at end of file diff --git a/azure/multiapi/storage/v2017_07_29/__init__.py b/azure/multiapi/storage/v2017_07_29/__init__.py deleted file mode 100644 index 5b0f8ae..0000000 --- a/azure/multiapi/storage/v2017_07_29/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- diff --git a/azure/multiapi/storage/v2017_07_29/blob/__init__.py b/azure/multiapi/storage/v2017_07_29/blob/__init__.py deleted file mode 100644 index eb3e5d0..0000000 --- a/azure/multiapi/storage/v2017_07_29/blob/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from .appendblobservice import AppendBlobService -from .blockblobservice import BlockBlobService -from .models import ( - Container, - ContainerProperties, - Blob, - BlobProperties, - BlobBlock, - BlobBlockList, - PageRange, - ContentSettings, - CopyProperties, - ContainerPermissions, - BlobPermissions, - _LeaseActions, - AppendBlockProperties, - PageBlobProperties, - ResourceProperties, - Include, - SequenceNumberAction, - BlockListType, - PublicAccess, - BlobPrefix, - DeleteSnapshot, -) -from .pageblobservice import PageBlobService diff --git a/azure/multiapi/storage/v2017_07_29/blob/_constants.py b/azure/multiapi/storage/v2017_07_29/blob/_constants.py deleted file mode 100644 index 5c50469..0000000 --- a/azure/multiapi/storage/v2017_07_29/blob/_constants.py +++ /dev/null @@ -1,14 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -__author__ = 'Microsoft Corp. ' -__version__ = '1.1.0' - -# x-ms-version for storage service. -X_MS_VERSION = '2017-07-29' - -# internal configurations, should not be changed -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 diff --git a/azure/multiapi/storage/v2017_07_29/blob/_deserialization.py b/azure/multiapi/storage/v2017_07_29/blob/_deserialization.py deleted file mode 100644 index a2f7b08..0000000 --- a/azure/multiapi/storage/v2017_07_29/blob/_deserialization.py +++ /dev/null @@ -1,436 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from azure.common import AzureException -from dateutil import parser - -try: - from xml.etree import cElementTree as ETree -except ImportError: - from xml.etree import ElementTree as ETree -from ..common._common_conversion import ( - _decode_base64_to_text, - _to_str, - _get_content_md5 -) -from ..common._deserialization import ( - _parse_properties, - _to_int, - _parse_metadata, - _convert_xml_to_signed_identifiers, - _bool, -) -from .models import ( - Container, - Blob, - BlobBlock, - BlobBlockList, - BlobBlockState, - BlobProperties, - PageRange, - ContainerProperties, - AppendBlockProperties, - PageBlobProperties, - ResourceProperties, - BlobPrefix, -) -from ._encryption import _decrypt_blob -from ..common.models import _list -from ..common._error import ( - _validate_content_match, - _ERROR_DECRYPTION_FAILURE, -) - - -def _parse_base_properties(response): - ''' - Extracts basic response headers. - ''' - resource_properties = ResourceProperties() - resource_properties.last_modified = parser.parse(response.headers.get('last-modified')) - resource_properties.etag = response.headers.get('etag') - - return resource_properties - - -def _parse_page_properties(response): - ''' - Extracts page response headers. - ''' - put_page = PageBlobProperties() - put_page.last_modified = parser.parse(response.headers.get('last-modified')) - put_page.etag = response.headers.get('etag') - put_page.sequence_number = _to_int(response.headers.get('x-ms-blob-sequence-number')) - - return put_page - - -def _parse_append_block(response): - ''' - Extracts append block response headers. - ''' - append_block = AppendBlockProperties() - append_block.last_modified = parser.parse(response.headers.get('last-modified')) - append_block.etag = response.headers.get('etag') - append_block.append_offset = _to_int(response.headers.get('x-ms-blob-append-offset')) - append_block.committed_block_count = _to_int(response.headers.get('x-ms-blob-committed-block-count')) - - return append_block - - -def _parse_snapshot_blob(response, name): - ''' - Extracts snapshot return header. - ''' - snapshot = response.headers.get('x-ms-snapshot') - - return _parse_blob(response, name, snapshot) - - -def _parse_lease(response): - ''' - Extracts lease time and ID return headers. - ''' - lease = {'time': response.headers.get('x-ms-lease-time')} - if lease['time']: - lease['time'] = _to_int(lease['time']) - - lease['id'] = response.headers.get('x-ms-lease-id') - - return lease - - -def _parse_blob(response, name, snapshot, validate_content=False, require_encryption=False, - key_encryption_key=None, key_resolver_function=None, start_offset=None, end_offset=None): - if response is None: - return None - - metadata = _parse_metadata(response) - props = _parse_properties(response, BlobProperties) - - # For range gets, only look at 'x-ms-blob-content-md5' for overall MD5 - content_settings = getattr(props, 'content_settings') - if 'content-range' in response.headers: - if 'x-ms-blob-content-md5' in response.headers: - setattr(content_settings, 'content_md5', _to_str(response.headers['x-ms-blob-content-md5'])) - else: - delattr(content_settings, 'content_md5') - - if validate_content: - computed_md5 = _get_content_md5(response.body) - _validate_content_match(response.headers['content-md5'], computed_md5) - - if key_encryption_key is not None or key_resolver_function is not None: - try: - response.body = _decrypt_blob(require_encryption, key_encryption_key, key_resolver_function, - response, start_offset, end_offset) - except: - raise AzureException(_ERROR_DECRYPTION_FAILURE) - - return Blob(name, snapshot, response.body, props, metadata) - - -def _parse_container(response, name): - if response is None: - return None - - metadata = _parse_metadata(response) - props = _parse_properties(response, ContainerProperties) - return Container(name, props, metadata) - - -def _convert_xml_to_signed_identifiers_and_access(response): - acl = _convert_xml_to_signed_identifiers(response) - acl.public_access = response.headers.get('x-ms-blob-public-access') - - return acl - - -def _convert_xml_to_containers(response): - ''' - - - string-value - string-value - int-value - - - container-name - - date/time-value - etag - locked | unlocked - available | leased | expired | breaking | broken - infinite | fixed - blob | container - - - value - - - - marker-value - - ''' - if response is None or response.body is None: - return None - - containers = _list() - list_element = ETree.fromstring(response.body) - - # Set next marker - setattr(containers, 'next_marker', list_element.findtext('NextMarker')) - - containers_element = list_element.find('Containers') - - for container_element in containers_element.findall('Container'): - # Name element - container = Container() - container.name = container_element.findtext('Name') - - # Metadata - metadata_root_element = container_element.find('Metadata') - if metadata_root_element is not None: - container.metadata = dict() - for metadata_element in metadata_root_element: - container.metadata[metadata_element.tag] = metadata_element.text - - # Properties - properties_element = container_element.find('Properties') - container.properties.etag = properties_element.findtext('Etag') - container.properties.last_modified = parser.parse(properties_element.findtext('Last-Modified')) - container.properties.lease_status = properties_element.findtext('LeaseStatus') - container.properties.lease_state = properties_element.findtext('LeaseState') - container.properties.lease_duration = properties_element.findtext('LeaseDuration') - container.properties.public_access = properties_element.findtext('PublicAccess') - - # Add container to list - containers.append(container) - - return containers - - -LIST_BLOBS_ATTRIBUTE_MAP = { - 'Last-Modified': (None, 'last_modified', parser.parse), - 'Etag': (None, 'etag', _to_str), - 'x-ms-blob-sequence-number': (None, 'sequence_number', _to_int), - 'BlobType': (None, 'blob_type', _to_str), - 'Content-Length': (None, 'content_length', _to_int), - 'ServerEncrypted': (None, 'server_encrypted', _bool), - 'Content-Type': ('content_settings', 'content_type', _to_str), - 'Content-Encoding': ('content_settings', 'content_encoding', _to_str), - 'Content-Disposition': ('content_settings', 'content_disposition', _to_str), - 'Content-Language': ('content_settings', 'content_language', _to_str), - 'Content-MD5': ('content_settings', 'content_md5', _to_str), - 'Cache-Control': ('content_settings', 'cache_control', _to_str), - 'LeaseStatus': ('lease', 'status', _to_str), - 'LeaseState': ('lease', 'state', _to_str), - 'LeaseDuration': ('lease', 'duration', _to_str), - 'CopyId': ('copy', 'id', _to_str), - 'CopySource': ('copy', 'source', _to_str), - 'CopyStatus': ('copy', 'status', _to_str), - 'CopyProgress': ('copy', 'progress', _to_str), - 'CopyCompletionTime': ('copy', 'completion_time', _to_str), - 'CopyStatusDescription': ('copy', 'status_description', _to_str), - 'AccessTier': (None, 'blob_tier', _to_str), - 'AccessTierChangeTime': (None, 'blob_tier_change_time', parser.parse), - 'AccessTierInferred': (None, 'blob_tier_inferred', _bool), - 'ArchiveStatus': (None, 'rehydration_status', _to_str), - 'DeletedTime': (None, 'deleted_time', parser.parse), - 'RemainingRetentionDays': (None, 'remaining_retention_days', _to_int), -} - - -def _convert_xml_to_blob_list(response): - ''' - - - string-value - string-value - int-value - string-value - - - blob-name - true - date-time-value - - date-time-value - etag - size-in-bytes - blob-content-type - - - - - sequence-number - BlockBlob|PageBlob|AppendBlob - locked|unlocked - available | leased | expired | breaking | broken - infinite | fixed - id - pending | success | aborted | failed - source url - bytes copied/bytes total - datetime - error string - P4 | P6 | P10 | P20 | P30 | P40 | P50 | P60 | Archive | Cool | Hot - date-time-value - true - datetime - int - - - value - - - - blob-prefix - - - - - ''' - if response is None or response.body is None: - return None - - blob_list = _list() - list_element = ETree.fromstring(response.body) - - setattr(blob_list, 'next_marker', list_element.findtext('NextMarker')) - - blobs_element = list_element.find('Blobs') - blob_prefix_elements = blobs_element.findall('BlobPrefix') - if blob_prefix_elements is not None: - for blob_prefix_element in blob_prefix_elements: - prefix = BlobPrefix() - prefix.name = blob_prefix_element.findtext('Name') - blob_list.append(prefix) - - for blob_element in blobs_element.findall('Blob'): - blob = Blob() - blob.name = blob_element.findtext('Name') - blob.snapshot = blob_element.findtext('Snapshot') - - deleted = blob_element.findtext('Deleted') - if deleted: - blob.deleted = _bool(deleted) - - # Properties - properties_element = blob_element.find('Properties') - if properties_element is not None: - for property_element in properties_element: - info = LIST_BLOBS_ATTRIBUTE_MAP.get(property_element.tag) - if info is None: - setattr(blob.properties, property_element.tag, _to_str(property_element.text)) - elif info[0] is None: - setattr(blob.properties, info[1], info[2](property_element.text)) - else: - attr = getattr(blob.properties, info[0]) - setattr(attr, info[1], info[2](property_element.text)) - - # Metadata - metadata_root_element = blob_element.find('Metadata') - if metadata_root_element is not None: - blob.metadata = dict() - for metadata_element in metadata_root_element: - blob.metadata[metadata_element.tag] = metadata_element.text - - # Add blob to list - blob_list.append(blob) - - return blob_list - - -def _convert_xml_to_block_list(response): - ''' - - - - - base64-encoded-block-id - size-in-bytes - - - - - base64-encoded-block-id - size-in-bytes - - - - - Converts xml response to block list class. - ''' - if response is None or response.body is None: - return None - - block_list = BlobBlockList() - - list_element = ETree.fromstring(response.body) - - committed_blocks_element = list_element.find('CommittedBlocks') - if committed_blocks_element is not None: - for block_element in committed_blocks_element.findall('Block'): - block_id = _decode_base64_to_text(block_element.findtext('Name', '')) - block_size = int(block_element.findtext('Size')) - block = BlobBlock(id=block_id, state=BlobBlockState.Committed) - block._set_size(block_size) - block_list.committed_blocks.append(block) - - uncommitted_blocks_element = list_element.find('UncommittedBlocks') - if uncommitted_blocks_element is not None: - for block_element in uncommitted_blocks_element.findall('Block'): - block_id = _decode_base64_to_text(block_element.findtext('Name', '')) - block_size = int(block_element.findtext('Size')) - block = BlobBlock(id=block_id, state=BlobBlockState.Uncommitted) - block._set_size(block_size) - block_list.uncommitted_blocks.append(block) - - return block_list - - -def _convert_xml_to_page_ranges(response): - ''' - - - - Start Byte - End Byte - - - Start Byte - End Byte - - - Start Byte - End Byte - - - ''' - if response is None or response.body is None: - return None - - page_list = list() - - list_element = ETree.fromstring(response.body) - - for page_range_element in list_element: - if page_range_element.tag == 'PageRange': - is_cleared = False - elif page_range_element.tag == 'ClearRange': - is_cleared = True - else: - pass # ignore any unrecognized Page Range types - - page_list.append( - PageRange( - int(page_range_element.findtext('Start')), - int(page_range_element.findtext('End')), - is_cleared - ) - ) - - return page_list diff --git a/azure/multiapi/storage/v2017_07_29/blob/_download_chunking.py b/azure/multiapi/storage/v2017_07_29/blob/_download_chunking.py deleted file mode 100644 index 067b16d..0000000 --- a/azure/multiapi/storage/v2017_07_29/blob/_download_chunking.py +++ /dev/null @@ -1,127 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import threading - -from ..common._error import _ERROR_NO_SINGLE_THREAD_CHUNKING - - -def _download_blob_chunks(blob_service, container_name, blob_name, snapshot, - download_size, block_size, progress, start_range, end_range, - stream, max_connections, progress_callback, validate_content, - lease_id, if_modified_since, if_unmodified_since, if_match, - if_none_match, timeout, operation_context): - if max_connections <= 1: - raise ValueError(_ERROR_NO_SINGLE_THREAD_CHUNKING.format('blob')) - - downloader = _BlobChunkDownloader( - blob_service, - container_name, - blob_name, - snapshot, - download_size, - block_size, - progress, - start_range, - end_range, - stream, - progress_callback, - validate_content, - lease_id, - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout, - operation_context, - ) - - import concurrent.futures - executor = concurrent.futures.ThreadPoolExecutor(max_connections) - result = list(executor.map(downloader.process_chunk, downloader.get_chunk_offsets())) - - -class _BlobChunkDownloader(object): - def __init__(self, blob_service, container_name, blob_name, snapshot, download_size, - chunk_size, progress, start_range, end_range, stream, - progress_callback, validate_content, lease_id, if_modified_since, - if_unmodified_since, if_match, if_none_match, timeout, operation_context): - self.blob_service = blob_service - self.container_name = container_name - self.blob_name = blob_name - self.snapshot = snapshot - self.chunk_size = chunk_size - - self.download_size = download_size - self.start_index = start_range - self.blob_end = end_range - - self.stream = stream - self.stream_start = stream.tell() - self.stream_lock = threading.Lock() - self.progress_callback = progress_callback - self.progress_total = progress - self.progress_lock = threading.Lock() - self.timeout = timeout - self.operation_context = operation_context - - self.validate_content = validate_content - self.lease_id = lease_id - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - self.if_match = if_match - self.if_none_match = if_none_match - - def get_chunk_offsets(self): - index = self.start_index - while index < self.blob_end: - yield index - index += self.chunk_size - - def process_chunk(self, chunk_start): - if chunk_start + self.chunk_size > self.blob_end: - chunk_end = self.blob_end - else: - chunk_end = chunk_start + self.chunk_size - - chunk_data = self._download_chunk(chunk_start, chunk_end).content - length = chunk_end - chunk_start - if length > 0: - self._write_to_stream(chunk_data, chunk_start) - self._update_progress(length) - - def _update_progress(self, length): - if self.progress_callback is not None: - with self.progress_lock: - self.progress_total += length - total = self.progress_total - self.progress_callback(total, self.download_size) - - def _write_to_stream(self, chunk_data, chunk_start): - with self.stream_lock: - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - - def _download_chunk(self, chunk_start, chunk_end): - response = self.blob_service._get_blob( - self.container_name, - self.blob_name, - snapshot=self.snapshot, - start_range=chunk_start, - end_range=chunk_end - 1, - validate_content=self.validate_content, - lease_id=self.lease_id, - if_modified_since=self.if_modified_since, - if_unmodified_since=self.if_unmodified_since, - if_match=self.if_match, - if_none_match=self.if_none_match, - timeout=self.timeout, - _context=self.operation_context - ) - - # This makes sure that if_match is set so that we can validate - # that subsequent downloads are to an unmodified blob - self.if_match = response.properties.etag - return response diff --git a/azure/multiapi/storage/v2017_07_29/blob/_encryption.py b/azure/multiapi/storage/v2017_07_29/blob/_encryption.py deleted file mode 100644 index f1e9b54..0000000 --- a/azure/multiapi/storage/v2017_07_29/blob/_encryption.py +++ /dev/null @@ -1,187 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from json import ( - dumps, - loads, -) -from os import urandom - -from cryptography.hazmat.primitives.padding import PKCS7 - -from ..common._encryption import ( - _generate_encryption_data_dict, - _generate_AES_CBC_cipher, - _dict_to_encryption_data, - _validate_and_unwrap_cek, - _EncryptionAlgorithm, -) -from ..common._error import ( - _validate_not_none, - _validate_key_encryption_key_wrap, - _ERROR_DATA_NOT_ENCRYPTED, - _ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM, -) - - -def _encrypt_blob(blob, key_encryption_key): - ''' - Encrypts the given blob using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encryption metadata. This method should - only be used when a blob is small enough for single shot upload. Encrypting larger blobs - is done as a part of the _upload_blob_chunks method. - - :param bytes blob: - The blob to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. - :rtype: (str, bytes) - ''' - - _validate_not_none('blob', blob) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(blob) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - - return dumps(encryption_data), encrypted_data - - -def _generate_blob_encryption_data(key_encryption_key): - ''' - Generates the encryption_metadata for the blob. - - :param bytes key_encryption_key: - The key-encryption-key used to wrap the cek associate with this blob. - :return: A tuple containing the cek and iv for this blob as well as the - serialized encryption metadata for the blob. - :rtype: (bytes, bytes, str) - ''' - encryption_data = None - content_encryption_key = None - initialization_vector = None - if key_encryption_key: - _validate_key_encryption_key_wrap(key_encryption_key) - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - encryption_data = _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - encryption_data = dumps(encryption_data) - - return content_encryption_key, initialization_vector, encryption_data - - -def _decrypt_blob(require_encryption, key_encryption_key, key_resolver, - response, start_offset, end_offset): - ''' - Decrypts the given blob contents and returns only the requested range. - - :param bool require_encryption: - Whether or not the calling blob service requires objects to be decrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :param key_resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted blob content. - :rtype: bytes - ''' - _validate_not_none('response', response) - content = response.body - _validate_not_none('content', content) - - try: - encryption_data = _dict_to_encryption_data(loads(response.headers['x-ms-meta-encryptiondata'])) - except: - if require_encryption: - raise ValueError(_ERROR_DATA_NOT_ENCRYPTED) - else: - return content - - if not (encryption_data.encryption_agent.encryption_algorithm == _EncryptionAlgorithm.AES_CBC_256): - raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM) - - blob_type = response.headers['x-ms-blob-type'] - - iv = None - unpad = False - start_range, end_range = 0, len(content) - if 'content-range' in response.headers: - content_range = response.headers['content-range'] - # Format: 'bytes x-y/size' - - # Ignore the word 'bytes' - content_range = content_range.split(' ') - - content_range = content_range[1].split('-') - start_range = int(content_range[0]) - content_range = content_range[1].split('/') - end_range = int(content_range[0]) - blob_size = int(content_range[1]) - - if start_offset >= 16: - iv = content[:16] - content = content[16:] - start_offset -= 16 - else: - iv = encryption_data.content_encryption_IV - - if end_range == blob_size - 1: - unpad = True - else: - unpad = True - iv = encryption_data.content_encryption_IV - - if blob_type == 'PageBlob': - unpad = False - - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) - cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) - decryptor = cipher.decryptor() - - content = decryptor.update(content) + decryptor.finalize() - if unpad: - unpadder = PKCS7(128).unpadder() - content = unpadder.update(content) + unpadder.finalize() - - return content[start_offset: len(content) - end_offset] - - -def _get_blob_encryptor_and_padder(cek, iv, should_pad): - encryptor = None - padder = None - - if cek is not None and iv is not None: - cipher = _generate_AES_CBC_cipher(cek, iv) - encryptor = cipher.encryptor() - padder = PKCS7(128).padder() if should_pad else None - - return encryptor, padder diff --git a/azure/multiapi/storage/v2017_07_29/blob/_error.py b/azure/multiapi/storage/v2017_07_29/blob/_error.py deleted file mode 100644 index f24edc8..0000000 --- a/azure/multiapi/storage/v2017_07_29/blob/_error.py +++ /dev/null @@ -1,29 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -_ERROR_PAGE_BLOB_SIZE_ALIGNMENT = \ - 'Invalid page blob size: {0}. ' + \ - 'The size must be aligned to a 512-byte boundary.' - -_ERROR_PAGE_BLOB_START_ALIGNMENT = \ - 'start_range must align with 512 page size' - -_ERROR_PAGE_BLOB_END_ALIGNMENT = \ - 'end_range must align with 512 page size' - -_ERROR_INVALID_BLOCK_ID = \ - 'All blocks in block list need to have valid block ids.' - -_ERROR_INVALID_LEASE_DURATION = \ - "lease_duration param needs to be between 15 and 60 or -1." - -_ERROR_INVALID_LEASE_BREAK_PERIOD = \ - "lease_break_period param needs to be between 0 and 60." - -_ERROR_NO_SINGLE_THREAD_CHUNKING = \ - 'To use blob chunk downloader more than 1 thread must be ' + \ - 'used since get_blob_to_bytes should be called for single threaded ' + \ - 'blob downloads.' diff --git a/azure/multiapi/storage/v2017_07_29/blob/_serialization.py b/azure/multiapi/storage/v2017_07_29/blob/_serialization.py deleted file mode 100644 index 100b408..0000000 --- a/azure/multiapi/storage/v2017_07_29/blob/_serialization.py +++ /dev/null @@ -1,118 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from xml.sax.saxutils import escape as xml_escape - -try: - from xml.etree import cElementTree as ETree -except ImportError: - from xml.etree import ElementTree as ETree -from ..common._common_conversion import ( - _encode_base64, - _str, -) -from ..common._error import ( - _validate_not_none, - _ERROR_START_END_NEEDED_FOR_MD5, - _ERROR_RANGE_TOO_LARGE_FOR_MD5, -) -from ._error import ( - _ERROR_PAGE_BLOB_START_ALIGNMENT, - _ERROR_PAGE_BLOB_END_ALIGNMENT, - _ERROR_INVALID_BLOCK_ID, -) -from io import BytesIO - - -def _get_path(container_name=None, blob_name=None): - ''' - Creates the path to access a blob resource. - - container_name: - Name of container. - blob_name: - The path to the blob. - ''' - if container_name and blob_name: - return '/{0}/{1}'.format( - _str(container_name), - _str(blob_name)) - elif container_name: - return '/{0}'.format(_str(container_name)) - else: - return '/' - - -def _validate_and_format_range_headers(request, start_range, end_range, start_range_required=True, - end_range_required=True, check_content_md5=False, align_to_page=False): - # If end range is provided, start range must be provided - if start_range_required or end_range is not None: - _validate_not_none('start_range', start_range) - if end_range_required: - _validate_not_none('end_range', end_range) - - # Page ranges must be 512 aligned - if align_to_page: - if start_range is not None and start_range % 512 != 0: - raise ValueError(_ERROR_PAGE_BLOB_START_ALIGNMENT) - if end_range is not None and end_range % 512 != 511: - raise ValueError(_ERROR_PAGE_BLOB_END_ALIGNMENT) - - # Format based on whether end_range is present - request.headers = request.headers or {} - if end_range is not None: - request.headers['x-ms-range'] = 'bytes={0}-{1}'.format(start_range, end_range) - elif start_range is not None: - request.headers['x-ms-range'] = "bytes={0}-".format(start_range) - - # Content MD5 can only be provided for a complete range less than 4MB in size - if check_content_md5: - if start_range is None or end_range is None: - raise ValueError(_ERROR_START_END_NEEDED_FOR_MD5) - if end_range - start_range > 4 * 1024 * 1024: - raise ValueError(_ERROR_RANGE_TOO_LARGE_FOR_MD5) - - request.headers['x-ms-range-get-content-md5'] = 'true' - - -def _convert_block_list_to_xml(block_id_list): - ''' - - - first-base64-encoded-block-id - second-base64-encoded-block-id - third-base64-encoded-block-id - - - Convert a block list to xml to send. - - block_id_list: - A list of BlobBlock containing the block ids and block state that are used in put_block_list. - Only get block from latest blocks. - ''' - if block_id_list is None: - return '' - - block_list_element = ETree.Element('BlockList') - - # Enabled - for block in block_id_list: - if block.id is None: - raise ValueError(_ERROR_INVALID_BLOCK_ID) - id = xml_escape(_str(format(_encode_base64(block.id)))) - ETree.SubElement(block_list_element, block.state).text = id - - # Add xml declaration and serialize - try: - stream = BytesIO() - ETree.ElementTree(block_list_element).write(stream, xml_declaration=True, encoding='utf-8', method='xml') - except: - raise - finally: - output = stream.getvalue() - stream.close() - - # return xml value - return output diff --git a/azure/multiapi/storage/v2017_07_29/blob/_upload_chunking.py b/azure/multiapi/storage/v2017_07_29/blob/_upload_chunking.py deleted file mode 100644 index 0abf847..0000000 --- a/azure/multiapi/storage/v2017_07_29/blob/_upload_chunking.py +++ /dev/null @@ -1,485 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) -from math import ceil -from threading import Lock - -from ..common._common_conversion import _encode_base64 -from ..common._error import _ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM -from ..common._serialization import ( - url_quote, - _get_data_bytes_only, - _len_plus -) -from ._encryption import ( - _get_blob_encryptor_and_padder, -) -from .models import BlobBlock -from ._constants import ( - _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE -) - - -def _upload_blob_chunks(blob_service, container_name, blob_name, - blob_size, block_size, stream, max_connections, - progress_callback, validate_content, lease_id, uploader_class, - maxsize_condition=None, if_match=None, timeout=None, - content_encryption_key=None, initialization_vector=None, resource_properties=None): - encryptor, padder = _get_blob_encryptor_and_padder(content_encryption_key, initialization_vector, - uploader_class is not _PageBlobChunkUploader) - - uploader = uploader_class( - blob_service, - container_name, - blob_name, - blob_size, - block_size, - stream, - max_connections > 1, - progress_callback, - validate_content, - lease_id, - timeout, - encryptor, - padder - ) - - uploader.maxsize_condition = maxsize_condition - - # ETag matching does not work with parallelism as a ranged upload may start - # before the previous finishes and provides an etag - uploader.if_match = if_match if not max_connections > 1 else None - - if progress_callback is not None: - progress_callback(0, blob_size) - - if max_connections > 1: - import concurrent.futures - from threading import BoundedSemaphore - - ''' - Ensures we bound the chunking so we only buffer and submit 'max_connections' amount of work items to the executor. - This is necessary as the executor queue will keep accepting submitted work items, which results in buffering all the blocks if - the max_connections + 1 ensures the next chunk is already buffered and ready for when the worker thread is available. - ''' - chunk_throttler = BoundedSemaphore(max_connections + 1) - - executor = concurrent.futures.ThreadPoolExecutor(max_connections) - futures = [] - running_futures = [] - - # Check for exceptions and fail fast. - for chunk in uploader.get_chunk_streams(): - for f in running_futures: - if f.done(): - if f.exception(): - raise f.exception() - else: - running_futures.remove(f) - - chunk_throttler.acquire() - future = executor.submit(uploader.process_chunk, chunk) - - # Calls callback upon completion (even if the callback was added after the Future task is done). - future.add_done_callback(lambda x: chunk_throttler.release()) - futures.append(future) - running_futures.append(future) - - # result() will wait until completion and also raise any exceptions that may have been set. - range_ids = [f.result() for f in futures] - else: - range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] - - if resource_properties: - resource_properties.last_modified = uploader.last_modified - resource_properties.etag = uploader.etag - - return range_ids - - -def _upload_blob_substream_blocks(blob_service, container_name, blob_name, - blob_size, block_size, stream, max_connections, - progress_callback, validate_content, lease_id, uploader_class, - maxsize_condition=None, if_match=None, timeout=None): - uploader = uploader_class( - blob_service, - container_name, - blob_name, - blob_size, - block_size, - stream, - max_connections > 1, - progress_callback, - validate_content, - lease_id, - timeout, - None, - None - ) - - uploader.maxsize_condition = maxsize_condition - - # ETag matching does not work with parallelism as a ranged upload may start - # before the previous finishes and provides an etag - uploader.if_match = if_match if not max_connections > 1 else None - - if progress_callback is not None: - progress_callback(0, blob_size) - - if max_connections > 1: - import concurrent.futures - executor = concurrent.futures.ThreadPoolExecutor(max_connections) - range_ids = list(executor.map(uploader.process_substream_block, uploader.get_substream_blocks())) - else: - range_ids = [uploader.process_substream_block(result) for result in uploader.get_substream_blocks()] - - return range_ids - - -class _BlobChunkUploader(object): - def __init__(self, blob_service, container_name, blob_name, blob_size, - chunk_size, stream, parallel, progress_callback, - validate_content, lease_id, timeout, encryptor, padder): - self.blob_service = blob_service - self.container_name = container_name - self.blob_name = blob_name - self.blob_size = blob_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - self.stream_start = stream.tell() if parallel else None - self.stream_lock = Lock() if parallel else None - self.progress_callback = progress_callback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - self.validate_content = validate_content - self.lease_id = lease_id - self.timeout = timeout - self.encryptor = encryptor - self.padder = padder - self.last_modified = None - self.etag = None - - def get_chunk_streams(self): - index = 0 - while True: - data = b'' - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.blob_size: - read_size = min(self.chunk_size - len(data), self.blob_size - (index + len(data))) - temp = self.stream.read(read_size) - temp = _get_data_bytes_only('temp', temp) - data += temp - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b'' or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if len(data) > 0: - yield index, data - break - index += len(data) - - def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - def _update_progress(self, length): - if self.progress_callback is not None: - if self.progress_lock is not None: - with self.progress_lock: - self.progress_total += length - total = self.progress_total - else: - self.progress_total += length - total = self.progress_total - self.progress_callback(total, self.blob_size) - - def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = self._upload_chunk(chunk_offset, chunk_data) - self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.blob_size - - if blob_length is None: - blob_length = _len_plus(self.stream) - if blob_length is None: - raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM.format('stream')) - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - yield ('BlockId{}'.format("%05d" % i), - _SubStream(self.stream, i * self.chunk_size, last_block_size if i == blocks - 1 else self.chunk_size, - lock)) - - def process_substream_block(self, block_data): - return self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - def _upload_substream_block_with_progress(self, block_id, block_stream): - range_id = self._upload_substream_block(block_id, block_stream) - self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class _BlockBlobChunkUploader(_BlobChunkUploader): - def _upload_chunk(self, chunk_offset, chunk_data): - block_id = url_quote(_encode_base64('{0:032d}'.format(chunk_offset))) - self.blob_service._put_block( - self.container_name, - self.blob_name, - chunk_data, - block_id, - validate_content=self.validate_content, - lease_id=self.lease_id, - timeout=self.timeout, - ) - return BlobBlock(block_id) - - def _upload_substream_block(self, block_id, block_stream): - try: - self.blob_service._put_block( - self.container_name, - self.blob_name, - block_stream, - block_id, - validate_content=self.validate_content, - lease_id=self.lease_id, - timeout=self.timeout, - ) - finally: - block_stream.close() - return BlobBlock(block_id) - - -class _PageBlobChunkUploader(_BlobChunkUploader): - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - for each_byte in chunk_data: - if each_byte != 0: - return False - return True - - def _upload_chunk(self, chunk_start, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_start + len(chunk_data) - 1 - resp = self.blob_service._update_page( - self.container_name, - self.blob_name, - chunk_data, - chunk_start, - chunk_end, - validate_content=self.validate_content, - lease_id=self.lease_id, - if_match=self.if_match, - timeout=self.timeout, - ) - - if not self.parallel: - self.if_match = resp.etag - - self.set_response_properties(resp) - - -class _AppendBlobChunkUploader(_BlobChunkUploader): - def _upload_chunk(self, chunk_offset, chunk_data): - if not hasattr(self, 'current_length'): - resp = self.blob_service.append_block( - self.container_name, - self.blob_name, - chunk_data, - validate_content=self.validate_content, - lease_id=self.lease_id, - maxsize_condition=self.maxsize_condition, - timeout=self.timeout, - ) - - self.current_length = resp.append_offset - else: - resp = self.blob_service.append_block( - self.container_name, - self.blob_name, - chunk_data, - validate_content=self.validate_content, - lease_id=self.lease_id, - maxsize_condition=self.maxsize_condition, - appendpos_condition=self.current_length + chunk_offset, - timeout=self.timeout, - ) - - self.set_response_properties(resp) - - -class _SubStream(IOBase): - def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): - # Python 2.7: file-like objects created with open() typically support seek(), but are not - # derivations of io.IOBase and thus do not implement seekable(). - # Python > 3.0: file-like objects created with open() are derived from io.IOBase. - try: - # only the main thread runs this, so there's no need grabbing the lock - wrapped_stream.seek(0, SEEK_CUR) - except: - raise ValueError("Wrapped stream must support seek().") - - self._lock = lockObj - self._wrapped_stream = wrapped_stream - self._position = 0 - self._stream_begin_index = stream_begin_index - self._length = length - self._buffer = BytesIO() - - # we must avoid buffering more than necessary, and also not use up too much memory - # so the max buffer size is capped at 4MB - self._max_buffer_size = length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE \ - else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE - self._current_buffer_start = 0 - self._current_buffer_size = 0 - - def __len__(self): - return self._length - - def close(self): - if self._buffer: - self._buffer.close() - self._wrapped_stream = None - IOBase.close(self) - - def fileno(self): - return self._wrapped_stream.fileno() - - def flush(self): - pass - - def read(self, n): - if self.closed: - raise ValueError("Stream is closed.") - - # adjust if out of bounds - if n + self._position >= self._length: - n = self._length - self._position - - # return fast - if n == 0 or self._buffer.closed: - return b'' - - # attempt first read from the read buffer and update position - read_buffer = self._buffer.read(n) - bytes_read = len(read_buffer) - bytes_remaining = n - bytes_read - self._position += bytes_read - - # repopulate the read buffer from the underlying stream to fulfill the request - # ensure the seek and read operations are done atomically (only if a lock is provided) - if bytes_remaining > 0: - with self._buffer: - # either read in the max buffer size specified on the class - # or read in just enough data for the current block/sub stream - current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) - - # lock is only defined if max_connections > 1 (parallel uploads) - if self._lock: - with self._lock: - # reposition the underlying stream to match the start of the data to read - absolute_position = self._stream_begin_index + self._position - self._wrapped_stream.seek(absolute_position, SEEK_SET) - # If we can't seek to the right location, our read will be corrupted so fail fast. - if self._wrapped_stream.tell() != absolute_position: - raise IOError("Stream failed to seek to the desired location.") - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - else: - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - - if buffer_from_stream: - # update the buffer with new data from the wrapped stream - # we need to note down the start position and size of the buffer, in case seek is performed later - self._buffer = BytesIO(buffer_from_stream) - self._current_buffer_start = self._position - self._current_buffer_size = len(buffer_from_stream) - - # read the remaining bytes from the new buffer and update position - second_read_buffer = self._buffer.read(bytes_remaining) - read_buffer += second_read_buffer - self._position += len(second_read_buffer) - - return read_buffer - - def readable(self): - return True - - def readinto(self, b): - raise UnsupportedOperation - - def seek(self, offset, whence=0): - if whence is SEEK_SET: - start_index = 0 - elif whence is SEEK_CUR: - start_index = self._position - elif whence is SEEK_END: - start_index = self._length - offset = - offset - else: - raise ValueError("Invalid argument for the 'whence' parameter.") - - pos = start_index + offset - - if pos > self._length: - pos = self._length - elif pos < 0: - pos = 0 - - # check if buffer is still valid - # if not, drop buffer - if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: - self._buffer.close() - self._buffer = BytesIO() - else: # if yes seek to correct position - delta = pos - self._current_buffer_start - self._buffer.seek(delta, SEEK_SET) - - self._position = pos - return pos - - def seekable(self): - return True - - def tell(self): - return self._position - - def write(self): - raise UnsupportedOperation - - def writelines(self): - raise UnsupportedOperation - - def writeable(self): - return False diff --git a/azure/multiapi/storage/v2017_07_29/blob/appendblobservice.py b/azure/multiapi/storage/v2017_07_29/blob/appendblobservice.py deleted file mode 100644 index 76a81fe..0000000 --- a/azure/multiapi/storage/v2017_07_29/blob/appendblobservice.py +++ /dev/null @@ -1,552 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import sys -from os import path - -from ..common._common_conversion import ( - _to_str, - _int_to_str, - _datetime_to_utc_string, - _get_content_md5, -) -from ..common._constants import ( - SERVICE_HOST_BASE, - DEFAULT_PROTOCOL, -) -from ..common._error import ( - _validate_not_none, - _validate_type_bytes, - _validate_encryption_unsupported, - _ERROR_VALUE_NEGATIVE, -) -from ..common._http import HTTPRequest -from ..common._serialization import ( - _get_data_bytes_only, - _add_metadata_headers, -) -from ._deserialization import ( - _parse_append_block, - _parse_base_properties, -) -from ._serialization import ( - _get_path, -) -from ._upload_chunking import ( - _AppendBlobChunkUploader, - _upload_blob_chunks, -) -from .baseblobservice import BaseBlobService -from .models import ( - _BlobTypes, - ResourceProperties -) - -if sys.version_info >= (3,): - from io import BytesIO -else: - from cStringIO import StringIO as BytesIO - - -class AppendBlobService(BaseBlobService): - ''' - An append blob is comprised of blocks and is optimized for append operations. - When you modify an append blob, blocks are added to the end of the blob only, - via the append_block operation. Updating or deleting of existing blocks is not - supported. Unlike a block blob, an append blob does not expose its block IDs. - - Each block in an append blob can be a different size, up to a maximum of 4 MB, - and an append blob can include up to 50,000 blocks. The maximum size of an - append blob is therefore slightly more than 195 GB (4 MB X 50,000 blocks). - - :ivar int MAX_BLOCK_SIZE: - The size of the blocks put by append_blob_from_* methods. Smaller blocks - may be put if there is less data provided. The maximum block size the service - supports is 4MB. - ''' - MAX_BLOCK_SIZE = 4 * 1024 * 1024 - - def __init__(self, account_name=None, account_key=None, sas_token=None, - is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, - custom_domain=None, request_session=None, connection_string=None, socket_timeout=None): - ''' - :param str account_name: - The storage account name. This is used to authenticate requests - signed with an account key and to construct the storage endpoint. It - is required unless a connection string is given, or if a custom - domain is used with anonymous authentication. - :param str account_key: - The storage account key. This is used for shared key authentication. - If neither account key or sas token is specified, anonymous access - will be used. - :param str sas_token: - A shared access signature token to use to authenticate requests - instead of the account key. If account key and sas token are both - specified, account key will be used to sign. If neither are - specified, anonymous access will be used. - :param bool is_emulated: - Whether to use the emulator. Defaults to False. If specified, will - override all other parameters besides connection string and request - session. - :param str protocol: - The protocol to use for requests. Defaults to https. - :param str endpoint_suffix: - The host base component of the url, minus the account name. Defaults - to Azure (core.windows.net). Override this to use the China cloud - (core.chinacloudapi.cn). - :param str custom_domain: - The custom domain to use. This can be set in the Azure Portal. For - example, 'www.mydomain.com'. - :param requests.Session request_session: - The session object to use for http requests. - :param str connection_string: - If specified, this will override all other parameters besides - request session. See - http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ - for the connection string format. - :param int socket_timeout: - If specified, this will override the default socket timeout. The timeout specified is in seconds. - See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value. - ''' - self.blob_type = _BlobTypes.AppendBlob - super(AppendBlobService, self).__init__( - account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix, - custom_domain, request_session, connection_string, socket_timeout) - - def create_blob(self, container_name, blob_name, content_settings=None, - metadata=None, lease_id=None, - if_modified_since=None, if_unmodified_since=None, - if_match=None, if_none_match=None, timeout=None): - ''' - Creates a blob or overrides an existing blob. Use if_match=* to - prevent overriding an existing blob. - - See create_blob_from_* for high level - functions that handle the creation and upload of large blobs with - automatic chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set blob properties. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to - perform the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Append Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = {'timeout': _int_to_str(timeout)} - request.headers = { - 'x-ms-blob-type': _to_str(self.blob_type), - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match) - } - _add_metadata_headers(metadata, request) - if content_settings is not None: - request.headers.update(content_settings._to_headers()) - - return self._perform_request(request, _parse_base_properties) - - def append_block(self, container_name, blob_name, block, - validate_content=False, maxsize_condition=None, - appendpos_condition=None, - lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Commits a new block of data to the end of an existing append blob. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param bytes block: - Content of the block in bytes. - :param bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - blob. - :param int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :param int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the - AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: - ETag, last modified, append offset, and committed block count - properties for the updated Append Blob - :rtype: :class:`~azure.storage.blob.models.AppendBlockProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('block', block) - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'appendblock', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-blob-condition-maxsize': _to_str(maxsize_condition), - 'x-ms-blob-condition-appendpos': _to_str(appendpos_condition), - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match) - } - request.body = _get_data_bytes_only('block', block) - - if validate_content: - computed_md5 = _get_content_md5(request.body) - request.headers['Content-MD5'] = _to_str(computed_md5) - - return self._perform_request(request, _parse_append_block) - - # ----Convenience APIs---------------------------------------------- - - def append_blob_from_path( - self, container_name, blob_name, file_path, validate_content=False, - maxsize_condition=None, progress_callback=None, lease_id=None, timeout=None): - ''' - Appends to the content of an existing blob from a file path, with automatic - chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param str file_path: - Path of the file to upload as the blob content. - :param bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: func(current, total) - :param str lease_id: - Required if the blob has an active lease. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :return: ETag and last modified properties for the Append Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('file_path', file_path) - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - count = path.getsize(file_path) - with open(file_path, 'rb') as stream: - return self.append_blob_from_stream( - container_name, - blob_name, - stream, - count=count, - validate_content=validate_content, - maxsize_condition=maxsize_condition, - progress_callback=progress_callback, - lease_id=lease_id, - timeout=timeout) - - def append_blob_from_bytes( - self, container_name, blob_name, blob, index=0, count=None, - validate_content=False, maxsize_condition=None, progress_callback=None, - lease_id=None, timeout=None): - ''' - Appends to the content of an existing blob from an array of bytes, with - automatic chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param bytes blob: - Content of blob as an array of bytes. - :param int index: - Start index in the array of bytes. - :param int count: - Number of bytes to upload. Set to None or negative value to upload - all bytes starting from index. - :param bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: func(current, total) - :param str lease_id: - Required if the blob has an active lease. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :return: ETag and last modified properties for the Append Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('blob', blob) - _validate_not_none('index', index) - _validate_type_bytes('blob', blob) - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - if index < 0: - raise IndexError(_ERROR_VALUE_NEGATIVE.format('index')) - - if count is None or count < 0: - count = len(blob) - index - - stream = BytesIO(blob) - stream.seek(index) - - return self.append_blob_from_stream( - container_name, - blob_name, - stream, - count=count, - validate_content=validate_content, - maxsize_condition=maxsize_condition, - lease_id=lease_id, - progress_callback=progress_callback, - timeout=timeout) - - def append_blob_from_text( - self, container_name, blob_name, text, encoding='utf-8', - validate_content=False, maxsize_condition=None, progress_callback=None, - lease_id=None, timeout=None): - ''' - Appends to the content of an existing blob from str/unicode, with - automatic chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param str text: - Text to upload to the blob. - :param str encoding: - Python encoding to use to convert the text to bytes. - :param bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: func(current, total) - :param str lease_id: - Required if the blob has an active lease. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :return: ETag and last modified properties for the Append Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('text', text) - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - if not isinstance(text, bytes): - _validate_not_none('encoding', encoding) - text = text.encode(encoding) - - return self.append_blob_from_bytes( - container_name, - blob_name, - text, - index=0, - count=len(text), - validate_content=validate_content, - maxsize_condition=maxsize_condition, - lease_id=lease_id, - progress_callback=progress_callback, - timeout=timeout) - - def append_blob_from_stream( - self, container_name, blob_name, stream, count=None, - validate_content=False, maxsize_condition=None, progress_callback=None, - lease_id=None, timeout=None): - ''' - Appends to the content of an existing blob from a file/stream, with - automatic chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param io.IOBase stream: - Opened stream to upload as the blob content. - :param int count: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param int maxsize_condition: - Conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: func(current, total) - :param str lease_id: - Required if the blob has an active lease. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :return: ETag and last modified properties for the Append Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('stream', stream) - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - # _upload_blob_chunks returns the block ids for block blobs so resource_properties - # is passed as a parameter to get the last_modified and etag for page and append blobs. - # this info is not needed for block_blobs since _put_block_list is called after which gets this info - resource_properties = ResourceProperties() - _upload_blob_chunks( - blob_service=self, - container_name=container_name, - blob_name=blob_name, - blob_size=count, - block_size=self.MAX_BLOCK_SIZE, - stream=stream, - max_connections=1, # upload not easily parallelizable - progress_callback=progress_callback, - validate_content=validate_content, - lease_id=lease_id, - uploader_class=_AppendBlobChunkUploader, - maxsize_condition=maxsize_condition, - timeout=timeout, - resource_properties=resource_properties - ) - - return resource_properties diff --git a/azure/multiapi/storage/v2017_07_29/blob/baseblobservice.py b/azure/multiapi/storage/v2017_07_29/blob/baseblobservice.py deleted file mode 100644 index 7ffbb39..0000000 --- a/azure/multiapi/storage/v2017_07_29/blob/baseblobservice.py +++ /dev/null @@ -1,3238 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import sys -from abc import ABCMeta - -from azure.common import AzureHttpError - -from ..common._auth import ( - _StorageSASAuthentication, - _StorageSharedKeyAuthentication, - _StorageNoAuthentication, -) -from ..common._common_conversion import ( - _int_to_str, - _to_str, - _datetime_to_utc_string, -) -from ..common._connection import _ServiceParameters -from ..common._constants import ( - SERVICE_HOST_BASE, - DEFAULT_PROTOCOL, -) -from ..common._deserialization import ( - _convert_xml_to_service_properties, - _parse_metadata, - _parse_properties, - _convert_xml_to_service_stats, - _parse_length_from_content_range, -) -from ..common._error import ( - _dont_fail_not_exist, - _dont_fail_on_exist, - _validate_not_none, - _validate_decryption_required, - _validate_access_policies, - _ERROR_PARALLEL_NOT_SEEKABLE, -) -from ..common._http import HTTPRequest -from ..common._serialization import ( - _get_request_body, - _convert_signed_identifiers_to_xml, - _convert_service_properties_to_xml, - _add_metadata_headers, -) -from ..common.models import ( - Services, - ListGenerator, - _OperationContext, -) -from .sharedaccesssignature import ( - BlobSharedAccessSignature, -) -from ..common.storageclient import StorageClient -from ._deserialization import ( - _convert_xml_to_containers, - _parse_blob, - _convert_xml_to_blob_list, - _parse_container, - _parse_snapshot_blob, - _parse_lease, - _convert_xml_to_signed_identifiers_and_access, - _parse_base_properties, -) -from ._download_chunking import _download_blob_chunks -from ._error import ( - _ERROR_INVALID_LEASE_DURATION, - _ERROR_INVALID_LEASE_BREAK_PERIOD, -) -from ._serialization import ( - _get_path, - _validate_and_format_range_headers, -) -from .models import ( - BlobProperties, - _LeaseActions, - ContainerPermissions, - BlobPermissions, -) - -from ._constants import ( - X_MS_VERSION, - __version__ as package_version, -) - -if sys.version_info >= (3,): - from io import BytesIO -else: - from cStringIO import StringIO as BytesIO - - -class BaseBlobService(StorageClient): - ''' - This is the main class managing Blob resources. - - The Blob service stores text and binary data as blobs in the cloud. - The Blob service offers the following three resources: the storage account, - containers, and blobs. Within your storage account, containers provide a - way to organize sets of blobs. For more information please see: - https://msdn.microsoft.com/en-us/library/azure/ee691964.aspx - - :ivar int MAX_SINGLE_GET_SIZE: - The size of the first range get performed by get_blob_to_* methods if - max_connections is greater than 1. Less data will be returned if the - blob is smaller than this. - :ivar int MAX_CHUNK_GET_SIZE: - The size of subsequent range gets performed by get_blob_to_* methods if - max_connections is greater than 1 and the blob is larger than MAX_SINGLE_GET_SIZE. - Less data will be returned if the remainder of the blob is smaller than - this. If this is set to larger than 4MB, content_validation will throw an - error if enabled. However, if content_validation is not desired a size - greater than 4MB may be optimal. Setting this below 4MB is not recommended. - :ivar object key_encryption_key: - The key-encryption-key optionally provided by the user. If provided, will be used to - encrypt/decrypt in supported methods. - For methods requiring decryption, either the key_encryption_key OR the resolver must be provided. - If both are provided, the resolver will take precedence. - Must implement the following methods for APIs requiring encryption: - wrap_key(key)--wraps the specified key (bytes) using an algorithm of the user's choice. Returns the encrypted key as bytes. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - Must implement the following methods for APIs requiring decryption: - unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid()--returns a string key id for this key-encryption-key. - :ivar function key_resolver_function(kid): - A function to resolve keys optionally provided by the user. If provided, will be used to decrypt in supported methods. - For methods requiring decryption, either the key_encryption_key OR - the resolver must be provided. If both are provided, the resolver will take precedence. - It uses the kid string to return a key-encryption-key implementing the interface defined above. - :ivar bool require_encryption: - A flag that may be set to ensure that all messages successfully uploaded to the queue and all those downloaded and - successfully read from the queue are/were encrypted while on the server. If this flag is set, all required - parameters for encryption/decryption must be provided. See the above comments on the key_encryption_key and resolver. - ''' - - __metaclass__ = ABCMeta - MAX_SINGLE_GET_SIZE = 32 * 1024 * 1024 - MAX_CHUNK_GET_SIZE = 4 * 1024 * 1024 - - def __init__(self, account_name=None, account_key=None, sas_token=None, - is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, - custom_domain=None, request_session=None, connection_string=None, socket_timeout=None): - ''' - :param str account_name: - The storage account name. This is used to authenticate requests - signed with an account key and to construct the storage endpoint. It - is required unless a connection string is given, or if a custom - domain is used with anonymous authentication. - :param str account_key: - The storage account key. This is used for shared key authentication. - If neither account key or sas token is specified, anonymous access - will be used. - :param str sas_token: - A shared access signature token to use to authenticate requests - instead of the account key. If account key and sas token are both - specified, account key will be used to sign. If neither are - specified, anonymous access will be used. - :param bool is_emulated: - Whether to use the emulator. Defaults to False. If specified, will - override all other parameters besides connection string and request - session. - :param str protocol: - The protocol to use for requests. Defaults to https. - :param str endpoint_suffix: - The host base component of the url, minus the account name. Defaults - to Azure (core.windows.net). Override this to use the China cloud - (core.chinacloudapi.cn). - :param str custom_domain: - The custom domain to use. This can be set in the Azure Portal. For - example, 'www.mydomain.com'. - :param requests.Session request_session: - The session object to use for http requests. - :param str connection_string: - If specified, this will override all other parameters besides - request session. See - http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ - for the connection string format - :param int socket_timeout: - If specified, this will override the default socket timeout. The timeout specified is in seconds. - See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value. - ''' - service_params = _ServiceParameters.get_service_parameters( - 'blob', - account_name=account_name, - account_key=account_key, - sas_token=sas_token, - is_emulated=is_emulated, - protocol=protocol, - endpoint_suffix=endpoint_suffix, - custom_domain=custom_domain, - request_session=request_session, - connection_string=connection_string, - socket_timeout=socket_timeout) - - super(BaseBlobService, self).__init__(service_params) - - if self.account_key: - self.authentication = _StorageSharedKeyAuthentication( - self.account_name, - self.account_key, - self.is_emulated - ) - elif self.sas_token: - self.authentication = _StorageSASAuthentication(self.sas_token) - else: - self.authentication = _StorageNoAuthentication() - - self.require_encryption = False - self.key_encryption_key = None - self.key_resolver_function = None - self._X_MS_VERSION = X_MS_VERSION - self._update_user_agent_string(package_version) - - def make_blob_url(self, container_name, blob_name, protocol=None, sas_token=None, snapshot=None): - ''' - Creates the url to access a blob. - - :param str container_name: - Name of container. - :param str blob_name: - Name of blob. - :param str protocol: - Protocol to use: 'http' or 'https'. If not specified, uses the - protocol specified when BaseBlobService was initialized. - :param str sas_token: - Shared access signature token created with - generate_shared_access_signature. - :param str snapshot: - An string value that uniquely identifies the snapshot. The value of - this query parameter indicates the snapshot version. - :return: blob access URL. - :rtype: str - ''' - - url = '{}://{}/{}/{}'.format( - protocol or self.protocol, - self.primary_endpoint, - container_name, - blob_name, - ) - - if snapshot and sas_token: - url = '{}?snapshot={}&{}'.format(url, snapshot, sas_token) - elif snapshot: - url = '{}?snapshot={}'.format(url, snapshot) - elif sas_token: - url = '{}?{}'.format(url, sas_token) - - return url - - def make_container_url(self, container_name, protocol=None, sas_token=None): - ''' - Creates the url to access a container. - - :param str container_name: - Name of container. - :param str protocol: - Protocol to use: 'http' or 'https'. If not specified, uses the - protocol specified when BaseBlobService was initialized. - :param str sas_token: - Shared access signature token created with - generate_shared_access_signature. - :return: container access URL. - :rtype: str - ''' - - url = '{}://{}/{}?restype=container'.format( - protocol or self.protocol, - self.primary_endpoint, - container_name, - ) - - if sas_token: - url = '{}&{}'.format(url, sas_token) - - return url - - def generate_account_shared_access_signature(self, resource_types, permission, - expiry, start=None, ip=None, protocol=None): - ''' - Generates a shared access signature for the blob service. - Use the returned signature with the sas_token parameter of any BlobService. - - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account SAS. - :param AccountPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :return: A Shared Access Signature (sas) token. - :rtype: str - ''' - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = BlobSharedAccessSignature(self.account_name, self.account_key) - return sas.generate_account(Services.BLOB, resource_types, permission, - expiry, start=start, ip=ip, protocol=protocol) - - def generate_container_shared_access_signature(self, container_name, - permission=None, expiry=None, - start=None, id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None): - ''' - Generates a shared access signature for the container. - Use the returned signature with the sas_token parameter of any BlobService. - - :param str container_name: - Name of container. - :param ContainerPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_blob_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :return: A Shared Access Signature (sas) token. - :rtype: str - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = BlobSharedAccessSignature(self.account_name, self.account_key) - return sas.generate_container( - container_name, - permission, - expiry, - start=start, - id=id, - ip=ip, - protocol=protocol, - cache_control=cache_control, - content_disposition=content_disposition, - content_encoding=content_encoding, - content_language=content_language, - content_type=content_type, - ) - - def generate_blob_shared_access_signature( - self, container_name, blob_name, permission=None, - expiry=None, start=None, id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None): - ''' - Generates a shared access signature for the blob. - Use the returned signature with the sas_token parameter of any BlobService. - - :param str container_name: - Name of container. - :param str blob_name: - Name of blob. - :param BlobPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use :func:`~set_container_acl`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :return: A Shared Access Signature (sas) token. - :rtype: str - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = BlobSharedAccessSignature(self.account_name, self.account_key) - return sas.generate_blob( - container_name, - blob_name, - permission, - expiry, - start=start, - id=id, - ip=ip, - protocol=protocol, - cache_control=cache_control, - content_disposition=content_disposition, - content_encoding=content_encoding, - content_language=content_language, - content_type=content_type, - ) - - def list_containers(self, prefix=None, num_results=None, include_metadata=False, - marker=None, timeout=None): - ''' - Returns a generator to list the containers under the specified account. - The generator will lazily follow the continuation tokens returned by - the service and stop when all containers have been returned or num_results is reached. - - If num_results is specified and the account has more than that number of - containers, the generator will have a populated next_marker field once it - finishes. This marker can be used to create a new generator if more - results are desired. - - :param str prefix: - Filters the results to return only containers whose names - begin with the specified prefix. - :param int num_results: - Specifies the maximum number of containers to return. A single list - request may return up to 1000 contianers and potentially a continuation - token which should be followed to get additional resutls. - :param bool include_metadata: - Specifies that container metadata be returned in the response. - :param str marker: - An opaque continuation token. This value can be retrieved from the - next_marker field of a previous generator object if num_results was - specified and that generator has finished enumerating results. If - specified, this generator will begin returning results from the point - where the previous generator stopped. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - include = 'metadata' if include_metadata else None - operation_context = _OperationContext(location_lock=True) - kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results, - 'include': include, 'timeout': timeout, '_context': operation_context} - resp = self._list_containers(**kwargs) - - return ListGenerator(resp, self._list_containers, (), kwargs) - - def _list_containers(self, prefix=None, marker=None, max_results=None, - include=None, timeout=None, _context=None): - ''' - Returns a list of the containers under the specified account. - - :param str prefix: - Filters the results to return only containers whose names - begin with the specified prefix. - :param str marker: - A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns - a next_marker value within the response body if the list returned was - not complete. The marker value may then be used in a subsequent - call to request the next set of list items. The marker value is - opaque to the client. - :param int max_results: - Specifies the maximum number of containers to return. A single list - request may return up to 1000 contianers and potentially a continuation - token which should be followed to get additional resutls. - :param str include: - Include this parameter to specify that the container's - metadata be returned as part of the response body. set this - parameter to string 'metadata' to get container's metadata. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path() - request.query = { - 'comp': 'list', - 'prefix': _to_str(prefix), - 'marker': _to_str(marker), - 'maxresults': _int_to_str(max_results), - 'include': _to_str(include), - 'timeout': _int_to_str(timeout) - } - - return self._perform_request(request, _convert_xml_to_containers, operation_context=_context) - - def create_container(self, container_name, metadata=None, - public_access=None, fail_on_exist=False, timeout=None): - ''' - Creates a new container under the specified account. If the container - with the same name already exists, the operation fails if - fail_on_exist is True. - - :param str container_name: - Name of container to create. - :param metadata: - A dict with name_value pairs to associate with the - container as metadata. Example:{'Category':'test'} - :type metadata: dict(str, str) - :param ~azure.storage.blob.models.PublicAccess public_access: - Possible values include: container, blob. - :param bool fail_on_exist: - Specify whether to throw an exception when the container exists. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: True if container is created, False if container already exists. - :rtype: bool - ''' - _validate_not_none('container_name', container_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name) - request.query = { - 'restype': 'container', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-blob-public-access': _to_str(public_access) - } - _add_metadata_headers(metadata, request) - - if not fail_on_exist: - try: - self._perform_request(request) - return True - except AzureHttpError as ex: - _dont_fail_on_exist(ex) - return False - else: - self._perform_request(request) - return True - - def get_container_properties(self, container_name, lease_id=None, timeout=None): - ''' - Returns all user-defined metadata and system properties for the specified - container. The data returned does not include the container's list of blobs. - - :param str container_name: - Name of existing container. - :param str lease_id: - If specified, get_container_properties only succeeds if the - container's lease is active and matches this ID. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: properties for the specified container within a container object. - :rtype: :class:`~azure.storage.blob.models.Container` - ''' - _validate_not_none('container_name', container_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name) - request.query = { - 'restype': 'container', - 'timeout': _int_to_str(timeout), - } - request.headers = {'x-ms-lease-id': _to_str(lease_id)} - - return self._perform_request(request, _parse_container, [container_name]) - - def get_container_metadata(self, container_name, lease_id=None, timeout=None): - ''' - Returns all user-defined metadata for the specified container. - - :param str container_name: - Name of existing container. - :param str lease_id: - If specified, get_container_metadata only succeeds if the - container's lease is active and matches this ID. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: - A dictionary representing the container metadata name, value pairs. - :rtype: dict(str, str) - ''' - _validate_not_none('container_name', container_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name) - request.query = { - 'restype': 'container', - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - request.headers = {'x-ms-lease-id': _to_str(lease_id)} - - return self._perform_request(request, _parse_metadata) - - def set_container_metadata(self, container_name, metadata=None, - lease_id=None, if_modified_since=None, timeout=None): - ''' - Sets one or more user-defined name-value pairs for the specified - container. Each call to this operation replaces all existing metadata - attached to the container. To remove all metadata from the container, - call this operation with no metadata dict. - - :param str container_name: - Name of existing container. - :param metadata: - A dict containing name-value pairs to associate with the container as - metadata. Example: {'category':'test'} - :type metadata: dict(str, str) - :param str lease_id: - If specified, set_container_metadata only succeeds if the - container's lease is active and matches this ID. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Container - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name) - request.query = { - 'restype': 'container', - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'x-ms-lease-id': _to_str(lease_id), - } - _add_metadata_headers(metadata, request) - - return self._perform_request(request, _parse_base_properties) - - def get_container_acl(self, container_name, lease_id=None, timeout=None): - ''' - Gets the permissions for the specified container. - The permissions indicate whether container data may be accessed publicly. - - :param str container_name: - Name of existing container. - :param lease_id: - If specified, get_container_acl only succeeds if the - container's lease is active and matches this ID. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: A dictionary of access policies associated with the container. dict of str to - :class:`azure.storage.common.models.AccessPolicy` and a public_access property - if public access is turned on - ''' - _validate_not_none('container_name', container_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name) - request.query = { - 'restype': 'container', - 'comp': 'acl', - 'timeout': _int_to_str(timeout), - } - request.headers = {'x-ms-lease-id': _to_str(lease_id)} - - return self._perform_request(request, _convert_xml_to_signed_identifiers_and_access) - - def set_container_acl(self, container_name, signed_identifiers=None, - public_access=None, lease_id=None, - if_modified_since=None, if_unmodified_since=None, timeout=None): - ''' - Sets the permissions for the specified container or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether blobs in a container may be accessed publicly. - - :param str container_name: - Name of existing container. - :param signed_identifiers: - A dictionary of access policies to associate with the container. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict(str, :class:`~azure.storage.common.models.AccessPolicy`) - :param ~azure.storage.blob.models.PublicAccess public_access: - Possible values include: container, blob. - :param str lease_id: - If specified, set_container_acl only succeeds if the - container's lease is active and matches this ID. - :param datetime if_modified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified date/time. - :param datetime if_unmodified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Container - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_access_policies(signed_identifiers) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name) - request.query = { - 'restype': 'container', - 'comp': 'acl', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-blob-public-access': _to_str(public_access), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'x-ms-lease-id': _to_str(lease_id), - } - request.body = _get_request_body( - _convert_signed_identifiers_to_xml(signed_identifiers)) - - return self._perform_request(request, _parse_base_properties) - - def delete_container(self, container_name, fail_not_exist=False, - lease_id=None, if_modified_since=None, - if_unmodified_since=None, timeout=None): - ''' - Marks the specified container for deletion. The container and any blobs - contained within it are later deleted during garbage collection. - - :param str container_name: - Name of container to delete. - :param bool fail_not_exist: - Specify whether to throw an exception when the container doesn't - exist. - :param str lease_id: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: True if container is deleted, False container doesn't exist. - :rtype: bool - ''' - _validate_not_none('container_name', container_name) - request = HTTPRequest() - request.method = 'DELETE' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name) - request.query = { - 'restype': 'container', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - } - - if not fail_not_exist: - try: - self._perform_request(request) - return True - except AzureHttpError as ex: - _dont_fail_not_exist(ex) - return False - else: - self._perform_request(request) - return True - - def _lease_container_impl( - self, container_name, lease_action, lease_id, lease_duration, - lease_break_period, proposed_lease_id, if_modified_since, - if_unmodified_since, timeout): - ''' - Establishes and manages a lease on a container. - The Lease Container operation can be called in one of five modes - Acquire, to request a new lease - Renew, to renew an existing lease - Change, to change the ID of an existing lease - Release, to free the lease if it is no longer needed so that another - client may immediately acquire a lease against the container - Break, to end the lease but ensure that another client cannot acquire - a new lease until the current lease period has expired - - :param str container_name: - Name of existing container. - :param str lease_action: - Possible _LeaseActions values: acquire|renew|release|break|change - :param str lease_id: - Required if the container has an active lease. - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. For backwards compatibility, the default is - 60, and the value is only used on an acquire operation. - :param int lease_break_period: - For a break operation, this is the proposed duration of - seconds that the lease should continue before it is broken, between - 0 and 60 seconds. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining - on the lease is used. A new lease will not be available before the - break period has expired, but the lease may be held for longer than - the break period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :param str proposed_lease_id: - Optional for Acquire, required for Change. Proposed lease ID, in a - GUID string format. The Blob service returns 400 (Invalid request) - if the proposed lease ID is not in the correct format. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: - Response headers returned from the service call. - :rtype: dict(str, str) - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('lease_action', lease_action) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name) - request.query = { - 'restype': 'container', - 'comp': 'lease', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'x-ms-lease-action': _to_str(lease_action), - 'x-ms-lease-duration': _to_str(lease_duration), - 'x-ms-lease-break-period': _to_str(lease_break_period), - 'x-ms-proposed-lease-id': _to_str(proposed_lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - } - - return self._perform_request(request, _parse_lease) - - def acquire_container_lease( - self, container_name, lease_duration=-1, proposed_lease_id=None, - if_modified_since=None, if_unmodified_since=None, timeout=None): - ''' - Requests a new lease. If the container does not have an active lease, - the Blob service creates a lease on the container and returns a new - lease ID. - - :param str container_name: - Name of existing container. - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: the lease ID of the newly created lease. - :return: str - ''' - _validate_not_none('lease_duration', lease_duration) - if lease_duration != -1 and \ - (lease_duration < 15 or lease_duration > 60): - raise ValueError(_ERROR_INVALID_LEASE_DURATION) - - lease = self._lease_container_impl(container_name, - _LeaseActions.Acquire, - None, # lease_id - lease_duration, - None, # lease_break_period - proposed_lease_id, - if_modified_since, - if_unmodified_since, - timeout) - return lease['id'] - - def renew_container_lease( - self, container_name, lease_id, if_modified_since=None, - if_unmodified_since=None, timeout=None): - ''' - Renews the lease. The lease can be renewed if the lease ID specified - matches that associated with the container. Note that - the lease may be renewed even if it has expired as long as the container - has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - :param str container_name: - Name of existing container. - :param str lease_id: - Lease ID for active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: the lease ID of the renewed lease. - :return: str - ''' - _validate_not_none('lease_id', lease_id) - - lease = self._lease_container_impl(container_name, - _LeaseActions.Renew, - lease_id, - None, # lease_duration - None, # lease_break_period - None, # proposed_lease_id - if_modified_since, - if_unmodified_since, - timeout) - return lease['id'] - - def release_container_lease( - self, container_name, lease_id, if_modified_since=None, - if_unmodified_since=None, timeout=None): - ''' - Release the lease. The lease may be released if the lease_id specified matches - that associated with the container. Releasing the lease allows another client - to immediately acquire the lease for the container as soon as the release is complete. - - :param str container_name: - Name of existing container. - :param str lease_id: - Lease ID for active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('lease_id', lease_id) - - self._lease_container_impl(container_name, - _LeaseActions.Release, - lease_id, - None, # lease_duration - None, # lease_break_period - None, # proposed_lease_id - if_modified_since, - if_unmodified_since, - timeout) - - def break_container_lease( - self, container_name, lease_break_period=None, - if_modified_since=None, if_unmodified_since=None, timeout=None): - ''' - Break the lease, if the container has an active lease. Once a lease is - broken, it cannot be renewed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. When a lease - is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the container. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :param str container_name: - Name of existing container. - :param int lease_break_period: - This is the proposed duration of seconds that the lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the lease. If longer, the time remaining on the lease is used. - A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :return: int - ''' - if (lease_break_period is not None) and (lease_break_period < 0 or lease_break_period > 60): - raise ValueError(_ERROR_INVALID_LEASE_BREAK_PERIOD) - - lease = self._lease_container_impl(container_name, - _LeaseActions.Break, - None, # lease_id - None, # lease_duration - lease_break_period, - None, # proposed_lease_id - if_modified_since, - if_unmodified_since, - timeout) - return lease['time'] - - def change_container_lease( - self, container_name, lease_id, proposed_lease_id, - if_modified_since=None, if_unmodified_since=None, timeout=None): - ''' - Change the lease ID of an active lease. A change must include the current - lease ID and a new lease ID. - - :param str container_name: - Name of existing container. - :param str lease_id: - Lease ID for active lease. - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('lease_id', lease_id) - - self._lease_container_impl(container_name, - _LeaseActions.Change, - lease_id, - None, # lease_duration - None, # lease_break_period - proposed_lease_id, - if_modified_since, - if_unmodified_since, - timeout) - - def list_blobs(self, container_name, prefix=None, num_results=None, include=None, - delimiter=None, marker=None, timeout=None): - ''' - Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service and stop when all blobs have been returned or num_results is reached. - - If num_results is specified and the account has more than that number of - blobs, the generator will have a populated next_marker field once it - finishes. This marker can be used to create a new generator if more - results are desired. - - :param str container_name: - Name of existing container. - :param str prefix: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param int num_results: - Specifies the maximum number of blobs to return, - including all :class:`BlobPrefix` elements. If the request does not specify - num_results or specifies a value greater than 5,000, the server will - return up to 5,000 items. Setting num_results to a value less than - or equal to zero results in error response code 400 (Bad Request). - :param ~azure.storage.blob.models.Include include: - Specifies one or more additional datasets to include in the response. - :param str delimiter: - When the request includes this parameter, the operation - returns a :class:`~azure.storage.blob.models.BlobPrefix` element in the - result list that acts as a placeholder for all blobs whose names begin - with the same substring up to the appearance of the delimiter character. - The delimiter may be a single character or a string. - :param str marker: - An opaque continuation token. This value can be retrieved from the - next_marker field of a previous generator object if num_results was - specified and that generator has finished enumerating results. If - specified, this generator will begin returning results from the point - where the previous generator stopped. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - operation_context = _OperationContext(location_lock=True) - args = (container_name,) - kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results, - 'include': include, 'delimiter': delimiter, 'timeout': timeout, - '_context': operation_context} - resp = self._list_blobs(*args, **kwargs) - - return ListGenerator(resp, self._list_blobs, args, kwargs) - - def _list_blobs(self, container_name, prefix=None, marker=None, - max_results=None, include=None, delimiter=None, timeout=None, - _context=None): - ''' - Returns the list of blobs under the specified container. - - :param str container_name: - Name of existing container. - :parm str prefix: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param str marker: - A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns - a next_marker value within the response body if the list returned was - not complete. The marker value may then be used in a subsequent - call to request the next set of list items. The marker value is - opaque to the client. - :param int max_results: - Specifies the maximum number of blobs to return, - including all :class:`~azure.storage.blob.models.BlobPrefix` elements. If the request does not specify - max_results or specifies a value greater than 5,000, the server will - return up to 5,000 items. Setting max_results to a value less than - or equal to zero results in error response code 400 (Bad Request). - :param str include: - Specifies one or more datasets to include in the - response. To specify more than one of these options on the URI, - you must separate each option with a comma. Valid values are: - snapshots: - Specifies that snapshots should be included in the - enumeration. Snapshots are listed from oldest to newest in - the response. - metadata: - Specifies that blob metadata be returned in the response. - uncommittedblobs: - Specifies that blobs for which blocks have been uploaded, - but which have not been committed using Put Block List - (REST API), be included in the response. - copy: - Version 2012-02-12 and newer. Specifies that metadata - related to any current or previous Copy Blob operation - should be included in the response. - deleted: - Version 2017-07-29 and newer. Specifies that soft deleted blobs - which are retained by the service should be included - in the response. - :param str delimiter: - When the request includes this parameter, the operation - returns a :class:`~azure.storage.blob.models.BlobPrefix` element in the response body that acts as a - placeholder for all blobs whose names begin with the same - substring up to the appearance of the delimiter character. The - delimiter may be a single character or a string. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('container_name', container_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name) - request.query = { - 'restype': 'container', - 'comp': 'list', - 'prefix': _to_str(prefix), - 'delimiter': _to_str(delimiter), - 'marker': _to_str(marker), - 'maxresults': _int_to_str(max_results), - 'include': _to_str(include), - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_blob_list, operation_context=_context) - - def get_blob_service_stats(self, timeout=None): - ''' - Retrieves statistics related to replication for the Blob service. It is - only available when read-access geo-redundant replication is enabled for - the storage account. - - With geo-redundant replication, Azure Storage maintains your data durable - in two locations. In both locations, Azure Storage constantly maintains - multiple healthy replicas of your data. The location where you read, - create, update, or delete data is the primary storage account location. - The primary location exists in the region you choose at the time you - create an account via the Azure Management Azure classic portal, for - example, North Central US. The location to which your data is replicated - is the secondary location. The secondary location is automatically - determined based on the location of the primary; it is in a second data - center that resides in the same region as the primary location. Read-only - access is available from the secondary location, if read-access geo-redundant - replication is enabled for your storage account. - - :param int timeout: - The timeout parameter is expressed in seconds. - :return: The blob service stats. - :rtype: :class:`~azure.storage.common.models.ServiceStats` - ''' - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(primary=False, secondary=True) - request.path = _get_path() - request.query = { - 'restype': 'service', - 'comp': 'stats', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_service_stats) - - def set_blob_service_properties( - self, logging=None, hour_metrics=None, minute_metrics=None, - cors=None, target_version=None, timeout=None, delete_retention_policy=None): - ''' - Sets the properties of a storage account's Blob service, including - Azure Storage Analytics. If an element (ex Logging) is left as None, the - existing settings on the service for that functionality are preserved. - - :param logging: - Groups the Azure Analytics Logging settings. - :type logging: - :class:`~azure.storage.common.models.Logging` - :param hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for blobs. - :type hour_metrics: - :class:`~azure.storage.common.models.Metrics` - :param minute_metrics: - The minute metrics settings provide request statistics - for each minute for blobs. - :type minute_metrics: - :class:`~azure.storage.common.models.Metrics` - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list(:class:`~azure.storage.common.models.CorsRule`) - :param str target_version: - Indicates the default version to use for requests if an incoming - request's version is not specified. - :param int timeout: - The timeout parameter is expressed in seconds. - :param delete_retention_policy: - The delete retention policy specifies whether to retain deleted blobs. - It also specifies the number of days and versions of blob to keep. - :type delete_retention_policy: - :class:`~azure.storage.common.models.DeleteRetentionPolicy` - ''' - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path() - request.query = { - 'restype': 'service', - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - request.body = _get_request_body( - _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics, - cors, target_version, delete_retention_policy)) - - self._perform_request(request) - - def get_blob_service_properties(self, timeout=None): - ''' - Gets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - :param int timeout: - The timeout parameter is expressed in seconds. - :return: The blob :class:`~azure.storage.common.models.ServiceProperties` with an attached - target_version property. - ''' - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path() - request.query = { - 'restype': 'service', - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_service_properties) - - def get_blob_properties( - self, container_name, blob_name, snapshot=None, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Returns all user-defined metadata, standard HTTP properties, and - system properties for the blob. It does not return the content of the blob. - Returns :class:`~azure.storage.blob.models.Blob` - with :class:`~azure.storage.blob.models.BlobProperties` and a metadata dict. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to retrieve. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: a blob object including properties and metadata. - :rtype: :class:`~azure.storage.blob.models.Blob` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - request = HTTPRequest() - request.method = 'HEAD' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name, blob_name) - request.query = { - 'snapshot': _to_str(snapshot), - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - - return self._perform_request(request, _parse_blob, [blob_name, snapshot]) - - def set_blob_properties( - self, container_name, blob_name, content_settings=None, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Sets system properties on the blob. If one property is set for the - content_settings, all properties will be overriden. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set blob properties. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - 'x-ms-lease-id': _to_str(lease_id) - } - if content_settings is not None: - request.headers.update(content_settings._to_headers()) - - return self._perform_request(request, _parse_base_properties) - - def exists(self, container_name, blob_name=None, snapshot=None, timeout=None): - ''' - Returns a boolean indicating whether the container exists (if blob_name - is None), or otherwise a boolean indicating whether the blob exists. - - :param str container_name: - Name of a container. - :param str blob_name: - Name of a blob. If None, the container will be checked for existence. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the snapshot. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: A boolean indicating whether the resource exists. - :rtype: bool - ''' - _validate_not_none('container_name', container_name) - try: - if blob_name is None: - self.get_container_properties(container_name, timeout=timeout) - else: - self.get_blob_properties(container_name, blob_name, snapshot=snapshot, timeout=timeout) - return True - except AzureHttpError as ex: - _dont_fail_not_exist(ex) - return False - - def _get_blob( - self, container_name, blob_name, snapshot=None, start_range=None, - end_range=None, validate_content=False, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None, - _context=None): - ''' - Downloads a blob's content, metadata, and properties. You can also - call this API to read a snapshot. You can specify a range if you don't - need to download the blob in its entirety. If no range is specified, - the full blob will be downloaded. - - See get_blob_to_* for high level functions that handle the download - of large blobs with automatic chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to retrieve. - :param int start_range: - Start of byte range to use for downloading a section of the blob. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param int end_range: - End of byte range to use for downloading a section of the blob. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param bool validate_content: - When this is set to True and specified together with the Range header, - the service returns the MD5 hash for the range, as long as the range - is less than or equal to 4 MB in size. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: A Blob with content, properties, and metadata. - :rtype: :class:`~azure.storage.blob.models.Blob` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_decryption_required(self.require_encryption, - self.key_encryption_key, - self.key_resolver_function) - - start_offset, end_offset = 0, 0 - if self.key_encryption_key is not None or self.key_resolver_function is not None: - if start_range is not None: - # Align the start of the range along a 16 byte block - start_offset = start_range % 16 - start_range -= start_offset - - # Include an extra 16 bytes for the IV if necessary - # Because of the previous offsetting, start_range will always - # be a multiple of 16. - if start_range > 0: - start_offset += 16 - start_range -= 16 - - if end_range is not None: - # Align the end of the range along a 16 byte block - end_offset = 15 - (end_range % 16) - end_range += end_offset - - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name, blob_name) - request.query = { - 'snapshot': _to_str(snapshot), - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - _validate_and_format_range_headers( - request, - start_range, - end_range, - start_range_required=False, - end_range_required=False, - check_content_md5=validate_content) - - return self._perform_request(request, _parse_blob, - [blob_name, snapshot, validate_content, self.require_encryption, - self.key_encryption_key, self.key_resolver_function, - start_offset, end_offset], - operation_context=_context) - - def get_blob_to_path( - self, container_name, blob_name, file_path, open_mode='wb', - snapshot=None, start_range=None, end_range=None, - validate_content=False, progress_callback=None, - max_connections=2, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, - timeout=None): - ''' - Downloads a blob to a file path, with automatic chunking and progress - notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with - properties and metadata. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str file_path: - Path of file to write out to. - :param str open_mode: - Mode to use when opening the file. Note that specifying append only - open_mode prevents parallel download. So, max_connections must be set - to 1 if this open_mode is used. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to retrieve. - :param int start_range: - Start of byte range to use for downloading a section of the blob. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param int end_range: - End of byte range to use for downloading a section of the blob. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param bool validate_content: - If set to true, validates an MD5 hash for each retrieved portion of - the blob. This is primarily valuable for detecting bitflips on the wire - if using http instead of https as https (the default) will already - validate. Note that the service will only return transactional MD5s - for chunks 4MB or less so the first get request will be of size - self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If - self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be - thrown. As computing the MD5 takes processing time and more requests - will need to be done due to the reduced chunk size there may be some - increase in latency. - :param progress_callback: - Callback for progress with signature function(current, total) - where current is the number of bytes transfered so far, and total is - the size of the blob if known. - :type progress_callback: func(current, total) - :param int max_connections: - If set to 2 or greater, an initial get will be done for the first - self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, - the method returns at this point. If it is not, it will download the - remaining data parallel using the number of threads equal to - max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. - If set to 1, a single large get request will be done. This is not - generally recommended but available if very few threads should be - used, network requests are very expensive, or a non-seekable stream - prevents parallel download. This may also be useful if many blobs are - expected to be empty as an extra request is required for empty blobs - if max_connections is greater than 1. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :return: A Blob with properties and metadata. If max_connections is greater - than 1, the content_md5 (if set on the blob) will not be returned. If you - require this value, either use get_blob_properties or set max_connections - to 1. - :rtype: :class:`~azure.storage.blob.models.Blob` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('file_path', file_path) - _validate_not_none('open_mode', open_mode) - - if max_connections > 1 and 'a' in open_mode: - raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE) - - with open(file_path, open_mode) as stream: - blob = self.get_blob_to_stream( - container_name, - blob_name, - stream, - snapshot, - start_range, - end_range, - validate_content, - progress_callback, - max_connections, - lease_id, - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout) - - return blob - - def get_blob_to_stream( - self, container_name, blob_name, stream, snapshot=None, - start_range=None, end_range=None, validate_content=False, - progress_callback=None, max_connections=2, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - - ''' - Downloads a blob to a stream, with automatic chunking and progress - notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with - properties and metadata. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param io.IOBase stream: - Opened stream to write to. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to retrieve. - :param int start_range: - Start of byte range to use for downloading a section of the blob. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param int end_range: - End of byte range to use for downloading a section of the blob. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param bool validate_content: - If set to true, validates an MD5 hash for each retrieved portion of - the blob. This is primarily valuable for detecting bitflips on the wire - if using http instead of https as https (the default) will already - validate. Note that the service will only return transactional MD5s - for chunks 4MB or less so the first get request will be of size - self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If - self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be - thrown. As computing the MD5 takes processing time and more requests - will need to be done due to the reduced chunk size there may be some - increase in latency. - :param progress_callback: - Callback for progress with signature function(current, total) - where current is the number of bytes transfered so far, and total is - the size of the blob if known. - :type progress_callback: func(current, total) - :param int max_connections: - If set to 2 or greater, an initial get will be done for the first - self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, - the method returns at this point. If it is not, it will download the - remaining data parallel using the number of threads equal to - max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. - If set to 1, a single large get request will be done. This is not - generally recommended but available if very few threads should be - used, network requests are very expensive, or a non-seekable stream - prevents parallel download. This may also be useful if many blobs are - expected to be empty as an extra request is required for empty blobs - if max_connections is greater than 1. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :return: A Blob with properties and metadata. If max_connections is greater - than 1, the content_md5 (if set on the blob) will not be returned. If you - require this value, either use get_blob_properties or set max_connections - to 1. - :rtype: :class:`~azure.storage.blob.models.Blob` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('stream', stream) - - if end_range is not None: - _validate_not_none("start_range", start_range) - - # If the user explicitly sets max_connections to 1, do a single shot download - if max_connections == 1: - blob = self._get_blob(container_name, - blob_name, - snapshot, - start_range=start_range, - end_range=end_range, - validate_content=validate_content, - lease_id=lease_id, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout) - - # Set the download size - download_size = blob.properties.content_length - - # If max_connections is greater than 1, do the first get to establish the - # size of the blob and get the first segment of data - else: - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE) - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - first_get_size = self.MAX_SINGLE_GET_SIZE if not validate_content else self.MAX_CHUNK_GET_SIZE - - initial_request_start = start_range if start_range is not None else 0 - - if end_range is not None and end_range - start_range < first_get_size: - initial_request_end = end_range - else: - initial_request_end = initial_request_start + first_get_size - 1 - - # Send a context object to make sure we always retry to the initial location - operation_context = _OperationContext(location_lock=True) - try: - blob = self._get_blob(container_name, - blob_name, - snapshot, - start_range=initial_request_start, - end_range=initial_request_end, - validate_content=validate_content, - lease_id=lease_id, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout, - _context=operation_context) - - # Parse the total blob size and adjust the download size if ranges - # were specified - blob_size = _parse_length_from_content_range(blob.properties.content_range) - if end_range is not None: - # Use the end_range unless it is over the end of the blob - download_size = min(blob_size, end_range - start_range + 1) - elif start_range is not None: - download_size = blob_size - start_range - else: - download_size = blob_size - except AzureHttpError as ex: - if start_range is None and ex.status_code == 416: - # Get range will fail on an empty blob. If the user did not - # request a range, do a regular get request in order to get - # any properties. - blob = self._get_blob(container_name, - blob_name, - snapshot, - validate_content=validate_content, - lease_id=lease_id, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout, - _context=operation_context) - - # Set the download size to empty - download_size = 0 - else: - raise ex - - # Mark the first progress chunk. If the blob is small or this is a single - # shot download, this is the only call - if progress_callback: - progress_callback(blob.properties.content_length, download_size) - - # Write the content to the user stream - # Clear blob content since output has been written to user stream - if blob.content is not None: - stream.write(blob.content) - blob.content = None - - # If the blob is small or single shot download was used, the download is - # complete at this point. If blob size is large, use parallel download. - if blob.properties.content_length != download_size: - # Lock on the etag. This can be overriden by the user by specifying '*' - if_match = if_match if if_match is not None else blob.properties.etag - - end_blob = blob_size - if end_range is not None: - # Use the end_range unless it is over the end of the blob - end_blob = min(blob_size, end_range + 1) - - _download_blob_chunks( - self, - container_name, - blob_name, - snapshot, - download_size, - self.MAX_CHUNK_GET_SIZE, - first_get_size, - initial_request_end + 1, # start where the first download ended - end_blob, - stream, - max_connections, - progress_callback, - validate_content, - lease_id, - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout, - operation_context - ) - - # Set the content length to the download size instead of the size of - # the last range - blob.properties.content_length = download_size - - # Overwrite the content range to the user requested range - blob.properties.content_range = 'bytes {0}-{1}/{2}'.format(start_range, end_range, blob_size) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - blob.properties.content_md5 = None - - return blob - - def get_blob_to_bytes( - self, container_name, blob_name, snapshot=None, - start_range=None, end_range=None, validate_content=False, - progress_callback=None, max_connections=2, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Downloads a blob as an array of bytes, with automatic chunking and - progress notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with - properties, metadata, and content. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to retrieve. - :param int start_range: - Start of byte range to use for downloading a section of the blob. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param int end_range: - End of byte range to use for downloading a section of the blob. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param bool validate_content: - If set to true, validates an MD5 hash for each retrieved portion of - the blob. This is primarily valuable for detecting bitflips on the wire - if using http instead of https as https (the default) will already - validate. Note that the service will only return transactional MD5s - for chunks 4MB or less so the first get request will be of size - self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If - self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be - thrown. As computing the MD5 takes processing time and more requests - will need to be done due to the reduced chunk size there may be some - increase in latency. - :param progress_callback: - Callback for progress with signature function(current, total) - where current is the number of bytes transfered so far, and total is - the size of the blob if known. - :type progress_callback: func(current, total) - :param int max_connections: - If set to 2 or greater, an initial get will be done for the first - self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, - the method returns at this point. If it is not, it will download the - remaining data parallel using the number of threads equal to - max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. - If set to 1, a single large get request will be done. This is not - generally recommended but available if very few threads should be - used, network requests are very expensive, or a non-seekable stream - prevents parallel download. This may also be useful if many blobs are - expected to be empty as an extra request is required for empty blobs - if max_connections is greater than 1. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :return: A Blob with properties and metadata. If max_connections is greater - than 1, the content_md5 (if set on the blob) will not be returned. If you - require this value, either use get_blob_properties or set max_connections - to 1. - :rtype: :class:`~azure.storage.blob.models.Blob` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - - stream = BytesIO() - blob = self.get_blob_to_stream( - container_name, - blob_name, - stream, - snapshot, - start_range, - end_range, - validate_content, - progress_callback, - max_connections, - lease_id, - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout) - - blob.content = stream.getvalue() - return blob - - def get_blob_to_text( - self, container_name, blob_name, encoding='utf-8', snapshot=None, - start_range=None, end_range=None, validate_content=False, - progress_callback=None, max_connections=2, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Downloads a blob as unicode text, with automatic chunking and progress - notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with - properties, metadata, and content. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str encoding: - Python encoding to use when decoding the blob data. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to retrieve. - :param int start_range: - Start of byte range to use for downloading a section of the blob. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param int end_range: - End of byte range to use for downloading a section of the blob. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param bool validate_content: - If set to true, validates an MD5 hash for each retrieved portion of - the blob. This is primarily valuable for detecting bitflips on the wire - if using http instead of https as https (the default) will already - validate. Note that the service will only return transactional MD5s - for chunks 4MB or less so the first get request will be of size - self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If - self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be - thrown. As computing the MD5 takes processing time and more requests - will need to be done due to the reduced chunk size there may be some - increase in latency. - :param progress_callback: - Callback for progress with signature function(current, total) - where current is the number of bytes transfered so far, and total is - the size of the blob if known. - :type progress_callback: func(current, total) - :param int max_connections: - If set to 2 or greater, an initial get will be done for the first - self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, - the method returns at this point. If it is not, it will download the - remaining data parallel using the number of threads equal to - max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. - If set to 1, a single large get request will be done. This is not - generally recommended but available if very few threads should be - used, network requests are very expensive, or a non-seekable stream - prevents parallel download. This may also be useful if many blobs are - expected to be empty as an extra request is required for empty blobs - if max_connections is greater than 1. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :return: A Blob with properties and metadata. If max_connections is greater - than 1, the content_md5 (if set on the blob) will not be returned. If you - require this value, either use get_blob_properties or set max_connections - to 1. - :rtype: :class:`~azure.storage.blob.models.Blob` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('encoding', encoding) - - blob = self.get_blob_to_bytes(container_name, - blob_name, - snapshot, - start_range, - end_range, - validate_content, - progress_callback, - max_connections, - lease_id, - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout) - blob.content = blob.content.decode(encoding) - return blob - - def get_blob_metadata( - self, container_name, blob_name, snapshot=None, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Returns all user-defined metadata for the specified blob or snapshot. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str snapshot: - The snapshot parameter is an opaque value that, - when present, specifies the blob snapshot to retrieve. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: - A dictionary representing the blob metadata name, value pairs. - :rtype: dict(str, str) - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name, blob_name) - request.query = { - 'snapshot': _to_str(snapshot), - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - - return self._perform_request(request, _parse_metadata) - - def set_blob_metadata(self, container_name, blob_name, - metadata=None, lease_id=None, - if_modified_since=None, if_unmodified_since=None, - if_match=None, if_none_match=None, timeout=None): - ''' - Sets user-defined metadata for the specified blob as one or more - name-value pairs. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param metadata: - Dict containing name and value pairs. Each call to this operation - replaces all existing metadata attached to the blob. To remove all - metadata from the blob, call this operation with no metadata headers. - :type metadata: dict(str, str) - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - 'x-ms-lease-id': _to_str(lease_id), - } - _add_metadata_headers(metadata, request) - - return self._perform_request(request, _parse_base_properties) - - def _lease_blob_impl(self, container_name, blob_name, - lease_action, lease_id, - lease_duration, lease_break_period, - proposed_lease_id, if_modified_since, - if_unmodified_since, if_match, if_none_match, timeout=None): - ''' - Establishes and manages a lease on a blob for write and delete operations. - The Lease Blob operation can be called in one of five modes: - Acquire, to request a new lease. - Renew, to renew an existing lease. - Change, to change the ID of an existing lease. - Release, to free the lease if it is no longer needed so that another - client may immediately acquire a lease against the blob. - Break, to end the lease but ensure that another client cannot acquire - a new lease until the current lease period has expired. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str lease_action: - Possible _LeaseActions acquire|renew|release|break|change - :param str lease_id: - Required if the blob has an active lease. - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. - :param int lease_break_period: - For a break operation, this is the proposed duration of - seconds that the lease should continue before it is broken, between - 0 and 60 seconds. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining - on the lease is used. A new lease will not be available before the - break period has expired, but the lease may be held for longer than - the break period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :param str proposed_lease_id: - Optional for acquire, required for change. Proposed lease ID, in a - GUID string format. The Blob service returns 400 (Invalid request) - if the proposed lease ID is not in the correct format. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: - Response headers returned from the service call. - :rtype: dict(str, str) - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('lease_action', lease_action) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'lease', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'x-ms-lease-action': _to_str(lease_action), - 'x-ms-lease-duration': _to_str(lease_duration), - 'x-ms-lease-break-period': _to_str(lease_break_period), - 'x-ms-proposed-lease-id': _to_str(proposed_lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - - return self._perform_request(request, _parse_lease) - - def acquire_blob_lease(self, container_name, blob_name, - lease_duration=-1, - proposed_lease_id=None, - if_modified_since=None, - if_unmodified_since=None, - if_match=None, - if_none_match=None, timeout=None): - ''' - Requests a new lease. If the blob does not have an active lease, the Blob - service creates a lease on the blob and returns a new lease ID. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The Blob service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: the lease ID of the newly created lease. - :return: str - ''' - _validate_not_none('lease_duration', lease_duration) - - if lease_duration != -1 and \ - (lease_duration < 15 or lease_duration > 60): - raise ValueError(_ERROR_INVALID_LEASE_DURATION) - lease = self._lease_blob_impl(container_name, - blob_name, - _LeaseActions.Acquire, - None, # lease_id - lease_duration, - None, # lease_break_period - proposed_lease_id, - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout) - return lease['id'] - - def renew_blob_lease(self, container_name, blob_name, - lease_id, if_modified_since=None, - if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Renews the lease. The lease can be renewed if the lease ID specified on - the request matches that associated with the blob. Note that the lease may - be renewed even if it has expired as long as the blob has not been modified - or leased again since the expiration of that lease. When you renew a lease, - the lease duration clock resets. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str lease_id: - Lease ID for active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: the lease ID of the renewed lease. - :return: str - ''' - _validate_not_none('lease_id', lease_id) - - lease = self._lease_blob_impl(container_name, - blob_name, - _LeaseActions.Renew, - lease_id, - None, # lease_duration - None, # lease_break_period - None, # proposed_lease_id - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout) - return lease['id'] - - def release_blob_lease(self, container_name, blob_name, - lease_id, if_modified_since=None, - if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Releases the lease. The lease may be released if the lease ID specified on the - request matches that associated with the blob. Releasing the lease allows another - client to immediately acquire the lease for the blob as soon as the release is complete. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str lease_id: - Lease ID for active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('lease_id', lease_id) - - self._lease_blob_impl(container_name, - blob_name, - _LeaseActions.Release, - lease_id, - None, # lease_duration - None, # lease_break_period - None, # proposed_lease_id - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout) - - def break_blob_lease(self, container_name, blob_name, - lease_break_period=None, - if_modified_since=None, - if_unmodified_since=None, - if_match=None, - if_none_match=None, timeout=None): - ''' - Breaks the lease, if the blob has an active lease. Once a lease is broken, - it cannot be renewed. Any authorized request can break the lease; the request - is not required to specify a matching lease ID. When a lease is broken, - the lease break period is allowed to elapse, during which time no lease operation - except break and release can be performed on the blob. When a lease is successfully - broken, the response indicates the interval in seconds until a new lease can be acquired. - - A lease that has been broken can also be released, in which case another client may - immediately acquire the lease on the blob. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param int lease_break_period: - For a break operation, this is the proposed duration of - seconds that the lease should continue before it is broken, between - 0 and 60 seconds. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining - on the lease is used. A new lease will not be available before the - break period has expired, but the lease may be held for longer than - the break period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :return: int - ''' - if (lease_break_period is not None) and (lease_break_period < 0 or lease_break_period > 60): - raise ValueError(_ERROR_INVALID_LEASE_BREAK_PERIOD) - - lease = self._lease_blob_impl(container_name, - blob_name, - _LeaseActions.Break, - None, # lease_id - None, # lease_duration - lease_break_period, - None, # proposed_lease_id - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout) - return lease['time'] - - def change_blob_lease(self, container_name, blob_name, - lease_id, - proposed_lease_id, - if_modified_since=None, - if_unmodified_since=None, - if_match=None, - if_none_match=None, timeout=None): - ''' - Changes the lease ID of an active lease. A change must include the current - lease ID and a new lease ID. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str lease_id: - Required if the blob has an active lease. - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - self._lease_blob_impl(container_name, - blob_name, - _LeaseActions.Change, - lease_id, - None, # lease_duration - None, # lease_break_period - proposed_lease_id, - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout) - - def snapshot_blob(self, container_name, blob_name, - metadata=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, - if_none_match=None, lease_id=None, timeout=None): - ''' - Creates a read-only snapshot of a blob. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param metadata: - Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the - base blob metadata to the snapshot. If one or more name-value pairs - are specified, the snapshot is created with the specified metadata, - and metadata is not copied from the base blob. - :type metadata: dict(str, str) - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param str lease_id: - Required if the blob has an active lease. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: snapshot properties - :rtype: :class:`~azure.storage.blob.models.Blob` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'snapshot', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - 'x-ms-lease-id': _to_str(lease_id) - } - _add_metadata_headers(metadata, request) - - return self._perform_request(request, _parse_snapshot_blob, [blob_name]) - - def copy_blob(self, container_name, blob_name, copy_source, - metadata=None, - source_if_modified_since=None, - source_if_unmodified_since=None, - source_if_match=None, source_if_none_match=None, - destination_if_modified_since=None, - destination_if_unmodified_since=None, - destination_if_match=None, - destination_if_none_match=None, - destination_lease_id=None, - source_lease_id=None, timeout=None): - ''' - Copies a blob asynchronously. This operation returns a copy operation - properties object, including a copy ID you can use to check or abort the - copy operation. The Blob service copies blobs on a best-effort basis. - - The source blob for a copy operation may be a block blob, an append blob, - or a page blob. If the destination blob already exists, it must be of the - same blob type as the source blob. Any existing destination blob will be - overwritten. The destination blob cannot be modified while a copy operation - is in progress. - - When copying from a page blob, the Blob service creates a destination page - blob of the source blob's length, initially containing all zeroes. Then - the source page ranges are enumerated, and non-empty ranges are copied. - - For a block blob or an append blob, the Blob service creates a committed - blob of zero length before returning from this operation. When copying - from a block blob, all committed blocks and their block IDs are copied. - Uncommitted blocks are not copied. At the end of the copy operation, the - destination blob will have the same committed block count as the source. - - When copying from an append blob, all committed blocks are copied. At the - end of the copy operation, the destination blob will have the same committed - block count as the source. - - For all blob types, you can call get_blob_properties on the destination - blob to check the status of the copy operation. The final blob will be - committed when the copy completes. - - :param str container_name: - Name of the destination container. The container must exist. - :param str blob_name: - Name of the destination blob. If the destination blob exists, it will - be overwritten. Otherwise, it will be created. - :param str copy_source: - A URL of up to 2 KB in length that specifies an Azure file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.blob.core.windows.net/mycontainer/myblob - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken - :param metadata: - Name-value pairs associated with the blob as metadata. If no name-value - pairs are specified, the operation will copy the metadata from the - source blob or file to the destination blob. If one or more name-value - pairs are specified, the destination blob is created with the specified - metadata, and metadata is not copied from the source blob or file. - :type metadata: dict(str, str) - :param datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source - blob has been modified since the specified date/time. - :param datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source blob - has not been modified since the specified date/time. - :param ETag source_if_match: - An ETag value, or the wildcard character (*). Specify this conditional - header to copy the source blob only if its ETag matches the value - specified. If the ETag values do not match, the Blob service returns - status code 412 (Precondition Failed). This header cannot be specified - if the source is an Azure File. - :param ETag source_if_none_match: - An ETag value, or the wildcard character (*). Specify this conditional - header to copy the blob only if its ETag does not match the value - specified. If the values are identical, the Blob service returns status - code 412 (Precondition Failed). This header cannot be specified if the - source is an Azure File. - :param datetime destination_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has been modified since the specified date/time. - If the destination blob has not been modified, the Blob service returns - status code 412 (Precondition Failed). - :param datetime destination_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has not been modified since the specified - date/time. If the destination blob has been modified, the Blob service - returns status code 412 (Precondition Failed). - :param ETag destination_if_match: - An ETag value, or the wildcard character (*). Specify an ETag value for - this conditional header to copy the blob only if the specified ETag value - matches the ETag value for an existing destination blob. If the ETag for - the destination blob does not match the ETag specified for If-Match, the - Blob service returns status code 412 (Precondition Failed). - :param ETag destination_if_none_match: - An ETag value, or the wildcard character (*). Specify an ETag value for - this conditional header to copy the blob only if the specified ETag value - does not match the ETag value for the destination blob. Specify the wildcard - character (*) to perform the operation only if the destination blob does not - exist. If the specified condition isn't met, the Blob service returns status - code 412 (Precondition Failed). - :param str destination_lease_id: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :param str source_lease_id: - Specify this to perform the Copy Blob operation only if - the lease ID given matches the active lease ID of the source blob. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: Copy operation properties such as status, source, and ID. - :rtype: :class:`~azure.storage.blob.models.CopyProperties` - ''' - return self._copy_blob(container_name, blob_name, copy_source, - metadata, - None, - source_if_modified_since, source_if_unmodified_since, - source_if_match, source_if_none_match, - destination_if_modified_since, - destination_if_unmodified_since, - destination_if_match, - destination_if_none_match, - destination_lease_id, - source_lease_id, timeout, - False) - - def _copy_blob(self, container_name, blob_name, copy_source, - metadata=None, - premium_page_blob_tier=None, - source_if_modified_since=None, - source_if_unmodified_since=None, - source_if_match=None, source_if_none_match=None, - destination_if_modified_since=None, - destination_if_unmodified_since=None, - destination_if_match=None, - destination_if_none_match=None, - destination_lease_id=None, - source_lease_id=None, timeout=None, - incremental_copy=False): - ''' - See copy_blob for more details. This helper method - allows for standard copies as well as incremental copies which are only supported for page blobs. - :param bool incremental_copy: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('copy_source', copy_source) - - if copy_source.startswith('/'): - # Backwards compatibility for earlier versions of the SDK where - # the copy source can be in the following formats: - # - Blob in named container: - # /accountName/containerName/blobName - # - Snapshot in named container: - # /accountName/containerName/blobName?snapshot= - # - Blob in root container: - # /accountName/blobName - # - Snapshot in root container: - # /accountName/blobName?snapshot= - account, _, source = \ - copy_source.partition('/')[2].partition('/') - copy_source = self.protocol + '://' + \ - self.primary_endpoint + '/' + source - - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - - if incremental_copy: - request.query = { - 'comp': 'incrementalcopy', - 'timeout': _int_to_str(timeout), - } - else: - request.query = {'timeout': _int_to_str(timeout)} - - request.headers = { - 'x-ms-copy-source': _to_str(copy_source), - 'x-ms-source-if-modified-since': _to_str(source_if_modified_since), - 'x-ms-source-if-unmodified-since': _to_str(source_if_unmodified_since), - 'x-ms-source-if-match': _to_str(source_if_match), - 'x-ms-source-if-none-match': _to_str(source_if_none_match), - 'If-Modified-Since': _datetime_to_utc_string(destination_if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(destination_if_unmodified_since), - 'If-Match': _to_str(destination_if_match), - 'If-None-Match': _to_str(destination_if_none_match), - 'x-ms-lease-id': _to_str(destination_lease_id), - 'x-ms-source-lease-id': _to_str(source_lease_id), - 'x-ms-access-tier': _to_str(premium_page_blob_tier) - } - _add_metadata_headers(metadata, request) - - return self._perform_request(request, _parse_properties, [BlobProperties]).copy - - def abort_copy_blob(self, container_name, blob_name, copy_id, - lease_id=None, timeout=None): - ''' - Aborts a pending copy_blob operation, and leaves a destination blob - with zero length and full metadata. - - :param str container_name: - Name of destination container. - :param str blob_name: - Name of destination blob. - :param str copy_id: - Copy identifier provided in the copy.id of the original - copy_blob operation. - :param str lease_id: - Required if the destination blob has an active infinite lease. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('copy_id', copy_id) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'copy', - 'copyid': _to_str(copy_id), - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'x-ms-copy-action': 'abort', - } - - self._perform_request(request) - - def delete_blob(self, container_name, blob_name, snapshot=None, - lease_id=None, delete_snapshots=None, - if_modified_since=None, if_unmodified_since=None, - if_match=None, if_none_match=None, timeout=None): - ''' - Marks the specified blob or snapshot for deletion. - The blob is later deleted during garbage collection. - - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the Delete - Blob operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot - and retains the blob or snapshot for specified number of days. - After specified number of days, blob's data is removed from the service during garbage collection. - Soft deleted blob or snapshot is accessible through List Blobs API specifying include=Include.Deleted option. - Soft-deleted blob or snapshot can be restored using Undelete API. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to delete. - :param str lease_id: - Required if the blob has an active lease. - :param ~azure.storage.blob.models.DeleteSnapshot delete_snapshots: - Required if the blob has associated snapshots. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - request = HTTPRequest() - request.method = 'DELETE' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'x-ms-delete-snapshots': _to_str(delete_snapshots), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - request.query = { - 'snapshot': _to_str(snapshot), - 'timeout': _int_to_str(timeout) - } - - self._perform_request(request) - - def undelete_blob(self, container_name, blob_name, timeout=None): - ''' - The undelete Blob operation restores the contents and metadata of soft deleted blob or snapshot. - Attempting to undelete a blob or snapshot that is not soft deleted will succeed without any changes. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'undelete', - 'timeout': _int_to_str(timeout) - } - - self._perform_request(request) diff --git a/azure/multiapi/storage/v2017_07_29/blob/blockblobservice.py b/azure/multiapi/storage/v2017_07_29/blob/blockblobservice.py deleted file mode 100644 index 075173c..0000000 --- a/azure/multiapi/storage/v2017_07_29/blob/blockblobservice.py +++ /dev/null @@ -1,1003 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from io import ( - BytesIO -) -from os import ( - path, -) - -from ..common._common_conversion import ( - _encode_base64, - _to_str, - _int_to_str, - _datetime_to_utc_string, - _get_content_md5, -) -from ..common._constants import ( - SERVICE_HOST_BASE, - DEFAULT_PROTOCOL, -) -from ..common._error import ( - _validate_not_none, - _validate_type_bytes, - _validate_encryption_required, - _validate_encryption_unsupported, - _ERROR_VALUE_NEGATIVE, - _ERROR_VALUE_SHOULD_BE_STREAM -) -from ..common._http import HTTPRequest -from ..common._serialization import ( - _get_request_body, - _get_data_bytes_only, - _get_data_bytes_or_stream_only, - _add_metadata_headers, -) -from ..common._serialization import ( - _len_plus -) -from ._deserialization import ( - _convert_xml_to_block_list, - _parse_base_properties, -) -from ._encryption import ( - _encrypt_blob, - _generate_blob_encryption_data, -) -from ._serialization import ( - _convert_block_list_to_xml, - _get_path, -) -from ._upload_chunking import ( - _BlockBlobChunkUploader, - _upload_blob_chunks, - _upload_blob_substream_blocks, -) -from .baseblobservice import BaseBlobService -from .models import ( - _BlobTypes, -) - - -class BlockBlobService(BaseBlobService): - ''' - Block blobs let you upload large blobs efficiently. Block blobs are comprised - of blocks, each of which is identified by a block ID. You create or modify a - block blob by writing a set of blocks and committing them by their block IDs. - Each block can be a different size, up to a maximum of 100 MB, and a block blob - can include up to 50,000 blocks. The maximum size of a block blob is therefore - approximately 4.75 TB (100 MB X 50,000 blocks). If you are writing a block - blob that is no more than 64 MB in size, you can upload it in its entirety with - a single write operation; see create_blob_from_bytes. - - :ivar int MAX_SINGLE_PUT_SIZE: - The largest size upload supported in a single put call. This is used by - the create_blob_from_* methods if the content length is known and is less - than this value. - :ivar int MAX_BLOCK_SIZE: - The size of the blocks put by create_blob_from_* methods if the content - length is unknown or is larger than MAX_SINGLE_PUT_SIZE. Smaller blocks - may be put. The maximum block size the service supports is 100MB. - :ivar int MIN_LARGE_BLOCK_UPLOAD_THRESHOLD: - The minimum block size at which the the memory-optimized, block upload - algorithm is considered. This algorithm is only applicable to the create_blob_from_file and - create_blob_from_stream methods and will prevent the full buffering of blocks. - In addition to the block size, ContentMD5 validation and Encryption must be disabled as - these options require the blocks to be buffered. - ''' - - MAX_SINGLE_PUT_SIZE = 64 * 1024 * 1024 - MAX_BLOCK_SIZE = 4 * 1024 * 1024 - MIN_LARGE_BLOCK_UPLOAD_THRESHOLD = 4 * 1024 * 1024 + 1 - - def __init__(self, account_name=None, account_key=None, sas_token=None, - is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, - custom_domain=None, request_session=None, connection_string=None, socket_timeout=None): - ''' - :param str account_name: - The storage account name. This is used to authenticate requests - signed with an account key and to construct the storage endpoint. It - is required unless a connection string is given, or if a custom - domain is used with anonymous authentication. - :param str account_key: - The storage account key. This is used for shared key authentication. - If neither account key or sas token is specified, anonymous access - will be used. - :param str sas_token: - A shared access signature token to use to authenticate requests - instead of the account key. If account key and sas token are both - specified, account key will be used to sign. If neither are - specified, anonymous access will be used. - :param bool is_emulated: - Whether to use the emulator. Defaults to False. If specified, will - override all other parameters besides connection string and request - session. - :param str protocol: - The protocol to use for requests. Defaults to https. - :param str endpoint_suffix: - The host base component of the url, minus the account name. Defaults - to Azure (core.windows.net). Override this to use the China cloud - (core.chinacloudapi.cn). - :param str custom_domain: - The custom domain to use. This can be set in the Azure Portal. For - example, 'www.mydomain.com'. - :param requests.Session request_session: - The session object to use for http requests. - :param str connection_string: - If specified, this will override all other parameters besides - request session. See - http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ - for the connection string format. - :param int socket_timeout: - If specified, this will override the default socket timeout. The timeout specified is in seconds. - See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value. - ''' - self.blob_type = _BlobTypes.BlockBlob - super(BlockBlobService, self).__init__( - account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix, - custom_domain, request_session, connection_string, socket_timeout) - - def put_block(self, container_name, blob_name, block, block_id, - validate_content=False, lease_id=None, timeout=None): - ''' - Creates a new block to be committed as part of a blob. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param block: Content of the block. - :type block: io.IOBase or bytes - Content of the block. - :param str block_id: - A valid Base64 string value that identifies the block. Prior to - encoding, the string must be less than or equal to 64 bytes in size. - For a given blob, the length of the value specified for the blockid - parameter must be the same size for each block. Note that the Base64 - string must be URL-encoded. - :param bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - blob. - :param str lease_id: - Required if the blob has an active lease. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - self._put_block( - container_name, - blob_name, - block, - block_id, - validate_content=validate_content, - lease_id=lease_id, - timeout=timeout - ) - - def put_block_list( - self, container_name, blob_name, block_list, content_settings=None, - metadata=None, validate_content=False, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, - timeout=None): - ''' - Writes a blob by specifying the list of block IDs that make up the blob. - In order to be written as part of a blob, a block must have been - successfully written to the server in a prior Put Block operation. - - You can call Put Block List to update a blob by uploading only those - blocks that have changed, then committing the new and existing blocks - together. You can do this by specifying whether to commit a block from - the committed block list or from the uncommitted block list, or to commit - the most recently uploaded version of the block, whichever list it may - belong to. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param block_list: - A list of :class:`~azure.storeage.blob.models.BlobBlock` containing the block ids and block state. - :type block_list: list(:class:`~azure.storage.blob.models.BlobBlock`) - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set properties on the blob. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param bool validate_content: - If true, calculates an MD5 hash of the block list content. The storage - service checks the hash of the block list content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this check is associated with - the block list content, and not with the content of the blob itself. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Block Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - return self._put_block_list( - container_name, - blob_name, - block_list, - content_settings=content_settings, - metadata=metadata, - validate_content=validate_content, - lease_id=lease_id, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout - ) - - def get_block_list(self, container_name, blob_name, snapshot=None, - block_list_type=None, lease_id=None, timeout=None): - ''' - Retrieves the list of blocks that have been uploaded as part of a - block blob. There are two block lists maintained for a blob: - Committed Block List: - The list of blocks that have been successfully committed to a - given blob with Put Block List. - Uncommitted Block List: - The list of blocks that have been uploaded for a blob using - Put Block, but that have not yet been committed. These blocks - are stored in Azure in association with a blob, but do not yet - form part of the blob. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str snapshot: - Datetime to determine the time to retrieve the blocks. - :param str block_list_type: - Specifies whether to return the list of committed blocks, the list - of uncommitted blocks, or both lists together. Valid values are: - committed, uncommitted, or all. - :param str lease_id: - Required if the blob has an active lease. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: list committed and/or uncommitted blocks for Block Blob - :rtype: :class:`~azure.storage.blob.models.BlobBlockList` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'blocklist', - 'snapshot': _to_str(snapshot), - 'blocklisttype': _to_str(block_list_type), - 'timeout': _int_to_str(timeout), - } - request.headers = {'x-ms-lease-id': _to_str(lease_id)} - - return self._perform_request(request, _convert_xml_to_block_list) - - # ----Convenience APIs----------------------------------------------------- - - def create_blob_from_path( - self, container_name, blob_name, file_path, content_settings=None, - metadata=None, validate_content=False, progress_callback=None, - max_connections=2, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None): - ''' - Creates a new blob from a file path, or updates the content of an - existing blob, with automatic chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param str file_path: - Path of the file to upload as the blob content. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set blob properties. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: func(current, total) - :param int max_connections: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :return: ETag and last modified properties for the Block Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('file_path', file_path) - - count = path.getsize(file_path) - with open(file_path, 'rb') as stream: - return self.create_blob_from_stream( - container_name=container_name, - blob_name=blob_name, - stream=stream, - count=count, - content_settings=content_settings, - metadata=metadata, - validate_content=validate_content, - lease_id=lease_id, - progress_callback=progress_callback, - max_connections=max_connections, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout) - - def create_blob_from_stream( - self, container_name, blob_name, stream, count=None, - content_settings=None, metadata=None, validate_content=False, - progress_callback=None, max_connections=2, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None, use_byte_buffer=False): - ''' - Creates a new blob from a file/stream, or updates the content of - an existing blob, with automatic chunking and progress - notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param io.IOBase stream: - Opened file/stream to upload as the blob content. - :param int count: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set blob properties. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: func(current, total) - :param int max_connections: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. Note that parallel upload requires the stream to be seekable. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :param bool use_byte_buffer: - If True, this will force usage of the original full block buffering upload path. - By default, this value is False and will employ a memory-efficient, - streaming upload algorithm under the following conditions: - The provided stream is seekable, 'require_encryption' is False, and - MAX_BLOCK_SIZE >= MIN_LARGE_BLOCK_UPLOAD_THRESHOLD. - One should consider the drawbacks of using this approach. In order to achieve - memory-efficiency, a IOBase stream or file-like object is segmented into logical blocks - using a SubStream wrapper. In order to read the correct data, each SubStream must acquire - a lock so that it can safely seek to the right position on the shared, underlying stream. - If max_connections > 1, the concurrency will result in a considerable amount of seeking on - the underlying stream. For the most common inputs such as a file-like stream object, seeking - is an inexpensive operation and this is not much of a concern. However, for other variants of streams - this may not be the case. The trade-off for memory-efficiency must be weighed against the cost of seeking - with your input stream. - The SubStream class will attempt to buffer up to 4 MB internally to reduce the amount of - seek and read calls to the underlying stream. This is particularly beneficial when uploading larger blocks. - :return: ETag and last modified properties for the Block Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('stream', stream) - _validate_encryption_required(self.require_encryption, self.key_encryption_key) - - # Adjust count to include padding if we are expected to encrypt. - adjusted_count = count - if (self.key_encryption_key is not None) and (adjusted_count is not None): - adjusted_count += (16 - (count % 16)) - - # Do single put if the size is smaller than MAX_SINGLE_PUT_SIZE - if adjusted_count is not None and (adjusted_count < self.MAX_SINGLE_PUT_SIZE): - if progress_callback: - progress_callback(0, count) - - data = stream.read(count) - resp = self._put_blob( - container_name=container_name, - blob_name=blob_name, - blob=data, - content_settings=content_settings, - metadata=metadata, - validate_content=validate_content, - lease_id=lease_id, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout) - - if progress_callback: - progress_callback(count, count) - - return resp - else: # Size is larger than MAX_SINGLE_PUT_SIZE, must upload with multiple put_block calls - cek, iv, encryption_data = None, None, None - - use_original_upload_path = use_byte_buffer or validate_content or self.require_encryption or \ - self.MAX_BLOCK_SIZE < self.MIN_LARGE_BLOCK_UPLOAD_THRESHOLD or \ - hasattr(stream, 'seekable') and not stream.seekable() or \ - not hasattr(stream, 'seek') or not hasattr(stream, 'tell') - - if use_original_upload_path: - if self.key_encryption_key: - cek, iv, encryption_data = _generate_blob_encryption_data(self.key_encryption_key) - - block_ids = _upload_blob_chunks( - blob_service=self, - container_name=container_name, - blob_name=blob_name, - blob_size=count, - block_size=self.MAX_BLOCK_SIZE, - stream=stream, - max_connections=max_connections, - progress_callback=progress_callback, - validate_content=validate_content, - lease_id=lease_id, - uploader_class=_BlockBlobChunkUploader, - timeout=timeout, - content_encryption_key=cek, - initialization_vector=iv - ) - else: - block_ids = _upload_blob_substream_blocks( - blob_service=self, - container_name=container_name, - blob_name=blob_name, - blob_size=count, - block_size=self.MAX_BLOCK_SIZE, - stream=stream, - max_connections=max_connections, - progress_callback=progress_callback, - validate_content=validate_content, - lease_id=lease_id, - uploader_class=_BlockBlobChunkUploader, - timeout=timeout, - ) - - return self._put_block_list( - container_name=container_name, - blob_name=blob_name, - block_list=block_ids, - content_settings=content_settings, - metadata=metadata, - validate_content=validate_content, - lease_id=lease_id, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout, - encryption_data=encryption_data - ) - - def create_blob_from_bytes( - self, container_name, blob_name, blob, index=0, count=None, - content_settings=None, metadata=None, validate_content=False, - progress_callback=None, max_connections=2, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Creates a new blob from an array of bytes, or updates the content - of an existing blob, with automatic chunking and progress - notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param bytes blob: - Content of blob as an array of bytes. - :param int index: - Start index in the array of bytes. - :param int count: - Number of bytes to upload. Set to None or negative value to upload - all bytes starting from index. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set blob properties. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: func(current, total) - :param int max_connections: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :return: ETag and last modified properties for the Block Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('blob', blob) - _validate_not_none('index', index) - _validate_type_bytes('blob', blob) - - if index < 0: - raise IndexError(_ERROR_VALUE_NEGATIVE.format('index')) - - if count is None or count < 0: - count = len(blob) - index - - stream = BytesIO(blob) - stream.seek(index) - - return self.create_blob_from_stream( - container_name=container_name, - blob_name=blob_name, - stream=stream, - count=count, - content_settings=content_settings, - metadata=metadata, - validate_content=validate_content, - progress_callback=progress_callback, - max_connections=max_connections, - lease_id=lease_id, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout, - use_byte_buffer=True - ) - - def create_blob_from_text( - self, container_name, blob_name, text, encoding='utf-8', - content_settings=None, metadata=None, validate_content=False, - progress_callback=None, max_connections=2, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Creates a new blob from str/unicode, or updates the content of an - existing blob, with automatic chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param str text: - Text to upload to the blob. - :param str encoding: - Python encoding to use to convert the text to bytes. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set blob properties. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: func(current, total) - :param int max_connections: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :return: ETag and last modified properties for the Block Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('text', text) - - if not isinstance(text, bytes): - _validate_not_none('encoding', encoding) - text = text.encode(encoding) - - return self.create_blob_from_bytes( - container_name=container_name, - blob_name=blob_name, - blob=text, - index=0, - count=len(text), - content_settings=content_settings, - metadata=metadata, - validate_content=validate_content, - lease_id=lease_id, - progress_callback=progress_callback, - max_connections=max_connections, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout) - - def set_standard_blob_tier( - self, container_name, blob_name, standard_blob_tier, timeout=None): - ''' - Sets the block blob tiers on the blob. This API is only supported for block blobs on standard storage accounts. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to update. - :param StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('standard_blob_tier', standard_blob_tier) - - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'tier', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-access-tier': _to_str(standard_blob_tier) - } - - self._perform_request(request) - - # -----Helper methods------------------------------------ - def _put_blob(self, container_name, blob_name, blob, content_settings=None, - metadata=None, validate_content=False, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, - timeout=None): - ''' - Creates a blob or updates an existing blob. - - See create_blob_from_* for high level - functions that handle the creation and upload of large blobs with - automatic chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param bytes blob: - Content of blob as bytes (size < 64MB). For larger size, you - must call put_block and put_block_list to set content of blob. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set properties on the blob. - :param metadata: - Name-value pairs associated with the blob as metadata. - :param bool validate_content: - If true, calculates an MD5 hash of the blob content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - blob. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the new Block Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_encryption_required(self.require_encryption, self.key_encryption_key) - - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = {'timeout': _int_to_str(timeout)} - request.headers = { - 'x-ms-blob-type': _to_str(self.blob_type), - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match) - } - _add_metadata_headers(metadata, request) - if content_settings is not None: - request.headers.update(content_settings._to_headers()) - blob = _get_data_bytes_only('blob', blob) - if self.key_encryption_key: - encryption_data, blob = _encrypt_blob(blob, self.key_encryption_key) - request.headers['x-ms-meta-encryptiondata'] = encryption_data - request.body = blob - - if validate_content: - computed_md5 = _get_content_md5(request.body) - request.headers['Content-MD5'] = _to_str(computed_md5) - - return self._perform_request(request, _parse_base_properties) - - def _put_block(self, container_name, blob_name, block, block_id, - validate_content=False, lease_id=None, timeout=None): - ''' - See put_block for more details. This helper method - allows for encryption or other such special behavior because - it is safely handled by the library. These behaviors are - prohibited in the public version of this function. - ''' - - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('block', block) - _validate_not_none('block_id', block_id) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'block', - 'blockid': _encode_base64(_to_str(block_id)), - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id) - } - request.body = _get_data_bytes_or_stream_only('block', block) - if hasattr(request.body, 'read'): - if _len_plus(request.body) is None: - try: - data = b'' - for chunk in iter(lambda: request.body.read(4096), b""): - data += chunk - request.body = data - except AttributeError: - raise ValueError(_ERROR_VALUE_SHOULD_BE_STREAM.format('request.body')) - - if validate_content: - computed_md5 = _get_content_md5(request.body) - request.headers['Content-MD5'] = _to_str(computed_md5) - - self._perform_request(request) - - def _put_block_list( - self, container_name, blob_name, block_list, content_settings=None, - metadata=None, validate_content=False, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, - timeout=None, encryption_data=None): - ''' - See put_block_list for more details. This helper method - allows for encryption or other such special behavior because - it is safely handled by the library. These behaviors are - prohibited in the public version of this function. - :param str encryption_data: - A JSON formatted string containing the encryption metadata generated for this - blob if it was encrypted all at once upon upload. This should only be passed - in by internal methods. - ''' - - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('block_list', block_list) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'blocklist', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - _add_metadata_headers(metadata, request) - if content_settings is not None: - request.headers.update(content_settings._to_headers()) - request.body = _get_request_body( - _convert_block_list_to_xml(block_list)) - - if validate_content: - computed_md5 = _get_content_md5(request.body) - request.headers['Content-MD5'] = _to_str(computed_md5) - - if encryption_data is not None: - request.headers['x-ms-meta-encryptiondata'] = encryption_data - - return self._perform_request(request, _parse_base_properties) diff --git a/azure/multiapi/storage/v2017_07_29/blob/models.py b/azure/multiapi/storage/v2017_07_29/blob/models.py deleted file mode 100644 index 1f9cd20..0000000 --- a/azure/multiapi/storage/v2017_07_29/blob/models.py +++ /dev/null @@ -1,755 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from ..common._common_conversion import _to_str - - -class Container(object): - ''' - Blob container class. - - :ivar str name: - The name of the container. - :ivar metadata: - A dict containing name-value pairs associated with the container as metadata. - This var is set to None unless the include=metadata param was included - for the list containers operation. If this parameter was specified but the - container has no metadata, metadata will be set to an empty dictionary. - :vartype metadata: dict(str, str) - :ivar ContainerProperties properties: - System properties for the container. - ''' - - def __init__(self, name=None, props=None, metadata=None): - self.name = name - self.properties = props or ContainerProperties() - self.metadata = metadata - - -class ContainerProperties(object): - ''' - Blob container's properties class. - - :ivar datetime last_modified: - A datetime object representing the last time the container was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar LeaseProperties lease: - Stores all the lease information for the container. - ''' - - def __init__(self): - self.last_modified = None - self.etag = None - self.lease = LeaseProperties() - self.public_access = None - - -class Blob(object): - ''' - Blob class. - - :ivar str name: - Name of blob. - :ivar str snapshot: - A DateTime value that uniquely identifies the snapshot. The value of - this header indicates the snapshot version, and may be used in - subsequent requests to access the snapshot. - :ivar content: - Blob content. - :vartype content: str or bytes - :ivar BlobProperties properties: - Stores all the system properties for the blob. - :ivar metadata: - Name-value pairs associated with the blob as metadata. - :ivar bool deleted: - Specify whether the blob was soft deleted. - In other words, if the blob is being retained by the delete retention policy, - this field would be True. The blob could be undeleted or it will be garbage collected after the specified - time period. - ''' - - def __init__(self, name=None, snapshot=None, content=None, props=None, metadata=None, deleted=False): - self.name = name - self.snapshot = snapshot - self.content = content - self.properties = props or BlobProperties() - self.metadata = metadata - self.deleted = deleted - - -class BlobProperties(object): - ''' - Blob Properties - - :ivar str blob_type: - String indicating this blob's type. - :ivar datetime last_modified: - A datetime object representing the last time the blob was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar int content_length: - The length of the content returned. If the entire blob was requested, - the length of blob in bytes. If a subset of the blob was requested, the - length of the returned subset. - :ivar str content_range: - Indicates the range of bytes returned in the event that the client - requested a subset of the blob. - :ivar int append_blob_committed_block_count: - (For Append Blobs) Number of committed blocks in the blob. - :ivar int page_blob_sequence_number: - (For Page Blobs) Sequence number for page blob used for coordinating - concurrent writes. - :ivar bool server_encrypted: - Set to true if the blob is encrypted on the server. - :ivar ~azure.storage.blob.models.CopyProperties copy: - Stores all the copy properties for the blob. - :ivar ~azure.storage.blob.models.ContentSettings content_settings: - Stores all the content settings for the blob. - :ivar ~azure.storage.blob.models.LeaseProperties lease: - Stores all the lease information for the blob. - :ivar StandardBlobTier blob_tier: - Indicates the access tier of the blob. The hot tier is optimized - for storing data that is accessed frequently. The cool storage tier - is optimized for storing data that is infrequently accessed and stored - for at least a month. The archive tier is optimized for storing - data that is rarely accessed and stored for at least six months - with flexible latency requirements. - :ivar datetime blob_tier_change_time: - Indicates when the access tier was last changed. - :ivar bool blob_tier_inferred: - Indicates whether the access tier was inferred by the service. - If false, it indicates that the tier was set explicitly. - :ivar datetime deleted_time: - A datetime object representing the time at which the blob was deleted. - :ivar int remaining_retention_days: - The number of days that the blob will be retained before being permanently deleted by the service. - ''' - - def __init__(self): - self.blob_type = None - self.last_modified = None - self.etag = None - self.content_length = None - self.content_range = None - self.append_blob_committed_block_count = None - self.page_blob_sequence_number = None - self.server_encrypted = None - self.copy = CopyProperties() - self.content_settings = ContentSettings() - self.lease = LeaseProperties() - self.blob_tier = None - self.blob_tier_change_time = None - self.blob_tier_inferred = False - self.deleted_time = None - self.remaining_retention_days = None - - -class ContentSettings(object): - ''' - Used to store the content settings of a blob. - - :ivar str content_type: - The content type specified for the blob. If no content type was - specified, the default content type is application/octet-stream. - :ivar str content_encoding: - If the content_encoding has previously been set - for the blob, that value is stored. - :ivar str content_language: - If the content_language has previously been set - for the blob, that value is stored. - :ivar str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the blob, that value is stored. - :ivar str cache_control: - If the cache_control has previously been set for - the blob, that value is stored. - :ivar str content_md5: - If the content_md5 has been set for the blob, this response - header is stored so that the client can check for message content - integrity. - ''' - - def __init__( - self, content_type=None, content_encoding=None, - content_language=None, content_disposition=None, - cache_control=None, content_md5=None): - self.content_type = content_type - self.content_encoding = content_encoding - self.content_language = content_language - self.content_disposition = content_disposition - self.cache_control = cache_control - self.content_md5 = content_md5 - - def _to_headers(self): - return { - 'x-ms-blob-cache-control': _to_str(self.cache_control), - 'x-ms-blob-content-type': _to_str(self.content_type), - 'x-ms-blob-content-disposition': _to_str(self.content_disposition), - 'x-ms-blob-content-md5': _to_str(self.content_md5), - 'x-ms-blob-content-encoding': _to_str(self.content_encoding), - 'x-ms-blob-content-language': _to_str(self.content_language), - } - - -class CopyProperties(object): - ''' - Blob Copy Properties. - - :ivar str id: - String identifier for the last attempted Copy Blob operation where this blob - was the destination blob. This header does not appear if this blob has never - been the destination in a Copy Blob operation, or if this blob has been - modified after a concluded Copy Blob operation using Set Blob Properties, - Put Blob, or Put Block List. - :ivar str source: - URL up to 2 KB in length that specifies the source blob used in the last attempted - Copy Blob operation where this blob was the destination blob. This header does not - appear if this blob has never been the destination in a Copy Blob operation, or if - this blob has been modified after a concluded Copy Blob operation using - Set Blob Properties, Put Blob, or Put Block List. - :ivar str status: - State of the copy operation identified by Copy ID, with these values: - success: - Copy completed successfully. - pending: - Copy is in progress. Check copy_status_description if intermittent, - non-fatal errors impede copy progress but don't cause failure. - aborted: - Copy was ended by Abort Copy Blob. - failed: - Copy failed. See copy_status_description for failure details. - :ivar str progress: - Contains the number of bytes copied and the total bytes in the source in the last - attempted Copy Blob operation where this blob was the destination blob. Can show - between 0 and Content-Length bytes copied. - :ivar datetime completion_time: - Conclusion time of the last attempted Copy Blob operation where this blob was the - destination blob. This value can specify the time of a completed, aborted, or - failed copy attempt. - :ivar str status_description: - only appears when x-ms-copy-status is failed or pending. Describes cause of fatal - or non-fatal copy operation failure. - ''' - - def __init__(self): - self.id = None - self.source = None - self.status = None - self.progress = None - self.completion_time = None - self.status_description = None - - -class LeaseProperties(object): - ''' - Blob Lease Properties. - - :ivar str status: - The lease status of the blob. - Possible values: locked|unlocked - :ivar str state: - Lease state of the blob. - Possible values: available|leased|expired|breaking|broken - :ivar str duration: - When a blob is leased, specifies whether the lease is of infinite or fixed duration. - ''' - - def __init__(self): - self.status = None - self.state = None - self.duration = None - - -class BlobPrefix(object): - ''' - BlobPrefix objects may potentially returned in the blob list when - :func:`~azure.storage.blob.baseblobservice.BaseBlobService.list_blobs` is - used with a delimiter. Prefixes can be thought of as virtual blob directories. - - :ivar str name: The name of the blob prefix. - ''' - - def __init__(self): - self.name = None - - -class BlobBlockState(object): - '''Block blob block types.''' - - Committed = 'Committed' - '''Committed blocks.''' - - Latest = 'Latest' - '''Latest blocks.''' - - Uncommitted = 'Uncommitted' - '''Uncommitted blocks.''' - - -class BlobBlock(object): - ''' - BlockBlob Block class. - - :ivar str id: - Block id. - :ivar str state: - Block state. - Possible valuse: committed|uncommitted - :ivar int size: - Block size in bytes. - ''' - - def __init__(self, id=None, state=BlobBlockState.Latest): - self.id = id - self.state = state - - def _set_size(self, size): - self.size = size - - -class BlobBlockList(object): - ''' - Blob Block List class. - - :ivar committed_blocks: - List of committed blocks. - :vartype committed_blocks: list(:class:`~azure.storage.blob.models.BlobBlock`) - :ivar uncommitted_blocks: - List of uncommitted blocks. - :vartype uncommitted_blocks: list(:class:`~azure.storage.blob.models.BlobBlock`) - ''' - - def __init__(self): - self.committed_blocks = list() - self.uncommitted_blocks = list() - - -class PageRange(object): - ''' - Page Range for page blob. - - :ivar int start: - Start of page range in bytes. - :ivar int end: - End of page range in bytes. - :ivar bool is_cleared: - Indicates if a page range is cleared or not. Only applicable - for get_page_range_diff API. - ''' - - def __init__(self, start=None, end=None, is_cleared=False): - self.start = start - self.end = end - self.is_cleared = is_cleared - - -class ResourceProperties(object): - ''' - Base response for a resource request. - - :ivar str etag: - Opaque etag value that can be used to check if resource - has been modified. - :ivar datetime last_modified: - Datetime for last time resource was modified. - ''' - - def __init__(self): - self.last_modified = None - self.etag = None - - -class AppendBlockProperties(ResourceProperties): - ''' - Response for an append block request. - - :ivar int append_offset: - Position to start next append. - :ivar int committed_block_count: - Number of committed append blocks. - ''' - - def __init__(self): - super(ResourceProperties, self).__init__() - self.append_offset = None - self.committed_block_count = None - - -class PageBlobProperties(ResourceProperties): - ''' - Response for a page request. - - :ivar int sequence_number: - Identifer for page blobs to help handle concurrent writes. - ''' - - def __init__(self): - super(ResourceProperties, self).__init__() - self.sequence_number = None - - -class PublicAccess(object): - ''' - Specifies whether data in the container may be accessed publicly and the level of access. - ''' - - OFF = 'off' - ''' - Specifies that there is no public read access for both the container and blobs within the container. - Clients cannot enumerate the containers within the storage account as well as the blobs within the container. - ''' - - Blob = 'blob' - ''' - Specifies public read access for blobs. Blob data within this container can be read - via anonymous request, but container data is not available. Clients cannot enumerate - blobs within the container via anonymous request. - ''' - - Container = 'container' - ''' - Specifies full public read access for container and blob data. Clients can enumerate - blobs within the container via anonymous request, but cannot enumerate containers - within the storage account. - ''' - - -class DeleteSnapshot(object): - ''' - Required if the blob has associated snapshots. Specifies how to handle the snapshots. - ''' - - Include = 'include' - ''' - Delete the base blob and all of its snapshots. - ''' - - Only = 'only' - ''' - Delete only the blob's snapshots and not the blob itself. - ''' - - -class BlockListType(object): - ''' - Specifies whether to return the list of committed blocks, the list of uncommitted - blocks, or both lists together. - ''' - - All = 'all' - '''Both committed and uncommitted blocks.''' - - Committed = 'committed' - '''Committed blocks.''' - - Uncommitted = 'uncommitted' - '''Uncommitted blocks.''' - - -class SequenceNumberAction(object): - '''Sequence number actions.''' - - Increment = 'increment' - ''' - Increments the value of the sequence number by 1. If specifying this option, - do not include the x-ms-blob-sequence-number header. - ''' - - Max = 'max' - ''' - Sets the sequence number to be the higher of the value included with the - request and the value currently stored for the blob. - ''' - - Update = 'update' - '''Sets the sequence number to the value included with the request.''' - - -class _LeaseActions(object): - '''Actions for a lease.''' - - Acquire = 'acquire' - '''Acquire the lease.''' - - Break = 'break' - '''Break the lease.''' - - Change = 'change' - '''Change the lease ID.''' - - Release = 'release' - '''Release the lease.''' - - Renew = 'renew' - '''Renew the lease.''' - - -class _BlobTypes(object): - '''Blob type options.''' - - AppendBlob = 'AppendBlob' - '''Append blob type.''' - - BlockBlob = 'BlockBlob' - '''Block blob type.''' - - PageBlob = 'PageBlob' - '''Page blob type.''' - - -class Include(object): - ''' - Specifies the datasets to include in the blob list response. - - :ivar ~azure.storage.blob.models.Include Include.COPY: - Specifies that metadata related to any current or previous Copy Blob operation - should be included in the response. - :ivar ~azure.storage.blob.models.Include Include.METADATA: - Specifies that metadata be returned in the response. - :ivar ~azure.storage.blob.models.Include Include.SNAPSHOTS: - Specifies that snapshots should be included in the enumeration. - :ivar ~azure.storage.blob.models.Include Include.UNCOMMITTED_BLOBS: - Specifies that blobs for which blocks have been uploaded, but which have not - been committed using Put Block List, be included in the response. - :ivar ~azure.storage.blob.models.Include Include.DELETED: - Specifies that deleted blobs should be returned in the response. - ''' - - def __init__(self, snapshots=False, metadata=False, uncommitted_blobs=False, - copy=False, deleted=False, _str=None): - ''' - :param bool snapshots: - Specifies that snapshots should be included in the enumeration. - :param bool metadata: - Specifies that metadata be returned in the response. - :param bool uncommitted_blobs: - Specifies that blobs for which blocks have been uploaded, but which have - not been committed using Put Block List, be included in the response. - :param bool copy: - Specifies that metadata related to any current or previous Copy Blob - operation should be included in the response. - :param bool deleted: - Specifies that deleted blobs should be returned in the response. - :param str _str: - A string representing the includes. - ''' - if not _str: - _str = '' - components = _str.split(',') - self.snapshots = snapshots or ('snapshots' in components) - self.metadata = metadata or ('metadata' in components) - self.uncommitted_blobs = uncommitted_blobs or ('uncommittedblobs' in components) - self.copy = copy or ('copy' in components) - self.deleted = deleted or ('deleted' in components) - - def __or__(self, other): - return Include(_str=str(self) + str(other)) - - def __add__(self, other): - return Include(_str=str(self) + str(other)) - - def __str__(self): - include = (('snapshots,' if self.snapshots else '') + - ('metadata,' if self.metadata else '') + - ('uncommittedblobs,' if self.uncommitted_blobs else '') + - ('copy,' if self.copy else '') + - ('deleted,' if self.deleted else '')) - return include.rstrip(',') - - -Include.COPY = Include(copy=True) -Include.METADATA = Include(metadata=True) -Include.SNAPSHOTS = Include(snapshots=True) -Include.UNCOMMITTED_BLOBS = Include(uncommitted_blobs=True) -Include.DELETED = Include(deleted=True) - - -class BlobPermissions(object): - ''' - BlobPermissions class to be used with - :func:`~azure.storage.blob.baseblobservice.BaseBlobService.generate_blob_shared_access_signature` API. - - :ivar BlobPermissions BlobPermissions.ADD: - Add a block to an append blob. - :ivar BlobPermissions BlobPermissions.CREATE: - Write a new blob, snapshot a blob, or copy a blob to a new blob. - :ivar BlobPermissions BlobPermissions.DELETE: - Delete the blob. - :ivar BlobPermissions BlobPermissions.READ: - Read the content, properties, metadata and block list. Use the blob as the source of a copy operation. - :ivar BlobPermissions BlobPermissions.WRITE: - Create or write content, properties, metadata, or block list. Snapshot or lease - the blob. Resize the blob (page blob only). Use the blob as the destination of a - copy operation within the same account. - ''' - - def __init__(self, read=False, add=False, create=False, write=False, - delete=False, _str=None): - ''' - :param bool read: - Read the content, properties, metadata and block list. Use the blob as - the source of a copy operation. - :param bool add: - Add a block to an append blob. - :param bool create: - Write a new blob, snapshot a blob, or copy a blob to a new blob. - :param bool write: - Create or write content, properties, metadata, or block list. Snapshot - or lease the blob. Resize the blob (page blob only). Use the blob as the - destination of a copy operation within the same account. - :param bool delete: - Delete the blob. - :param str _str: - A string representing the permissions. - ''' - if not _str: - _str = '' - self.read = read or ('r' in _str) - self.add = add or ('a' in _str) - self.create = create or ('c' in _str) - self.write = write or ('w' in _str) - self.delete = delete or ('d' in _str) - - def __or__(self, other): - return BlobPermissions(_str=str(self) + str(other)) - - def __add__(self, other): - return BlobPermissions(_str=str(self) + str(other)) - - def __str__(self): - return (('r' if self.read else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '')) - - -BlobPermissions.ADD = BlobPermissions(add=True) -BlobPermissions.CREATE = BlobPermissions(create=True) -BlobPermissions.DELETE = BlobPermissions(delete=True) -BlobPermissions.READ = BlobPermissions(read=True) -BlobPermissions.WRITE = BlobPermissions(write=True) - - -class ContainerPermissions(object): - ''' - ContainerPermissions class to be used with :func:`~azure.storage.blob.baseblobservice.BaseBlobService.generate_container_shared_access_signature` - API and for the AccessPolicies used with :func:`~azure.storage.blob.baseblobservice.BaseBlobService.set_container_acl`. - - :ivar ContainerPermissions ContainerPermissions.DELETE: - Delete any blob in the container. Note: You cannot grant permissions to - delete a container with a container SAS. Use an account SAS instead. - :ivar ContainerPermissions ContainerPermissions.LIST: - List blobs in the container. - :ivar ContainerPermissions ContainerPermissions.READ: - Read the content, properties, metadata or block list of any blob in the - container. Use any blob in the container as the source of a copy operation. - :ivar ContainerPermissions ContainerPermissions.WRITE: - For any blob in the container, create or write content, properties, - metadata, or block list. Snapshot or lease the blob. Resize the blob - (page blob only). Use the blob as the destination of a copy operation - within the same account. Note: You cannot grant permissions to read or - write container properties or metadata, nor to lease a container, with - a container SAS. Use an account SAS instead. - ''' - - def __init__(self, read=False, write=False, delete=False, list=False, - _str=None): - ''' - :param bool read: - Read the content, properties, metadata or block list of any blob in the - container. Use any blob in the container as the source of a copy operation. - :param bool write: - For any blob in the container, create or write content, properties, - metadata, or block list. Snapshot or lease the blob. Resize the blob - (page blob only). Use the blob as the destination of a copy operation - within the same account. Note: You cannot grant permissions to read or - write container properties or metadata, nor to lease a container, with - a container SAS. Use an account SAS instead. - :param bool delete: - Delete any blob in the container. Note: You cannot grant permissions to - delete a container with a container SAS. Use an account SAS instead. - :param bool list: - List blobs in the container. - :param str _str: - A string representing the permissions. - ''' - if not _str: - _str = '' - self.read = read or ('r' in _str) - self.write = write or ('w' in _str) - self.delete = delete or ('d' in _str) - self.list = list or ('l' in _str) - - def __or__(self, other): - return ContainerPermissions(_str=str(self) + str(other)) - - def __add__(self, other): - return ContainerPermissions(_str=str(self) + str(other)) - - def __str__(self): - return (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('l' if self.list else '')) - - -ContainerPermissions.DELETE = ContainerPermissions(delete=True) -ContainerPermissions.LIST = ContainerPermissions(list=True) -ContainerPermissions.READ = ContainerPermissions(read=True) -ContainerPermissions.WRITE = ContainerPermissions(write=True) - - -class PremiumPageBlobTier(object): - ''' - Specifies the page blob tier to set the blob to. This is only applicable to page - blobs on premium storage accounts. - Please take a look at https://docs.microsoft.com/en-us/azure/storage/storage-premium-storage#scalability-and-performance-targets - for detailed information on the corresponding IOPS and throughtput per PageBlobTier. - ''' - - P4 = 'P4' - ''' P4 Tier ''' - - P6 = 'P6' - ''' P6 Tier ''' - - P10 = 'P10' - ''' P10 Tier ''' - - P20 = 'P20' - ''' P20 Tier ''' - - P30 = 'P30' - ''' P30 Tier ''' - - P40 = 'P40' - ''' P40 Tier ''' - - P50 = 'P50' - ''' P50 Tier ''' - - P60 = 'P60' - ''' P60 Tier ''' - - -class StandardBlobTier(object): - ''' - Specifies the blob tier to set the blob to. This is only applicable for block blobs on standard storage accounts. - ''' - - Archive = 'Archive' - ''' Archive ''' - - Cool = 'Cool' - ''' Cool ''' - - Hot = 'Hot' - ''' Hot ''' diff --git a/azure/multiapi/storage/v2017_07_29/blob/pageblobservice.py b/azure/multiapi/storage/v2017_07_29/blob/pageblobservice.py deleted file mode 100644 index 28c5ea9..0000000 --- a/azure/multiapi/storage/v2017_07_29/blob/pageblobservice.py +++ /dev/null @@ -1,1388 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import sys -from os import path - -from ..common._common_conversion import ( - _int_to_str, - _to_str, - _datetime_to_utc_string, - _get_content_md5, -) -from ..common._constants import ( - SERVICE_HOST_BASE, - DEFAULT_PROTOCOL, -) -from ..common._error import ( - _validate_not_none, - _validate_type_bytes, - _validate_encryption_required, - _validate_encryption_unsupported, - _ERROR_VALUE_NEGATIVE, -) -from ..common._http import HTTPRequest -from ..common._serialization import ( - _get_data_bytes_only, - _add_metadata_headers, -) -from ._deserialization import ( - _convert_xml_to_page_ranges, - _parse_page_properties, - _parse_base_properties, -) -from ._encryption import _generate_blob_encryption_data -from ._error import ( - _ERROR_PAGE_BLOB_SIZE_ALIGNMENT, -) -from ._serialization import ( - _get_path, - _validate_and_format_range_headers, -) -from ._upload_chunking import ( - _PageBlobChunkUploader, - _upload_blob_chunks, -) -from .baseblobservice import BaseBlobService -from .models import ( - _BlobTypes, - ResourceProperties) - -if sys.version_info >= (3,): - from io import BytesIO -else: - from cStringIO import StringIO as BytesIO - -# Keep this value sync with _ERROR_PAGE_BLOB_SIZE_ALIGNMENT -_PAGE_ALIGNMENT = 512 - - -class PageBlobService(BaseBlobService): - ''' - Page blobs are a collection of 512-byte pages optimized for random read and - write operations. To create a page blob, you initialize the page blob and - specify the maximum size the page blob will grow. To add or update the - contents of a page blob, you write a page or pages by specifying an offset - and a range that align to 512-byte page boundaries. A write to a page blob - can overwrite just one page, some pages, or up to 4 MB of the page blob. - Writes to page blobs happen in-place and are immediately committed to the - blob. The maximum size for a page blob is 8 TB. - - :ivar int MAX_PAGE_SIZE: - The size of the pages put by create_blob_from_* methods. Smaller pages - may be put if there is less data provided. The maximum page size the service - supports is 4MB. When using the create_blob_from_* methods, empty pages are skipped. - ''' - - MAX_PAGE_SIZE = 4 * 1024 * 1024 - - def __init__(self, account_name=None, account_key=None, sas_token=None, - is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, - custom_domain=None, request_session=None, connection_string=None, socket_timeout=None): - ''' - :param str account_name: - The storage account name. This is used to authenticate requests - signed with an account key and to construct the storage endpoint. It - is required unless a connection string is given, or if a custom - domain is used with anonymous authentication. - :param str account_key: - The storage account key. This is used for shared key authentication. - If neither account key or sas token is specified, anonymous access - will be used. - :param str sas_token: - A shared access signature token to use to authenticate requests - instead of the account key. If account key and sas token are both - specified, account key will be used to sign. If neither are - specified, anonymous access will be used. - :param bool is_emulated: - Whether to use the emulator. Defaults to False. If specified, will - override all other parameters besides connection string and request - session. - :param str protocol: - The protocol to use for requests. Defaults to https. - :param str endpoint_suffix: - The host base component of the url, minus the account name. Defaults - to Azure (core.windows.net). Override this to use the China cloud - (core.chinacloudapi.cn). - :param str custom_domain: - The custom domain to use. This can be set in the Azure Portal. For - example, 'www.mydomain.com'. - :param requests.Session request_session: - The session object to use for http requests. - :param str connection_string: - If specified, this will override all other parameters besides - request session. See - http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ - for the connection string format. - :param int socket_timeout: - If specified, this will override the default socket timeout. The timeout specified is in seconds. - See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value. - ''' - self.blob_type = _BlobTypes.PageBlob - super(PageBlobService, self).__init__( - account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix, - custom_domain, request_session, connection_string, socket_timeout) - - def create_blob( - self, container_name, blob_name, content_length, content_settings=None, - sequence_number=None, metadata=None, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None, premium_page_blob_tier=None): - ''' - Creates a new Page Blob. - - See create_blob_from_* for high level functions that handle the - creation and upload of large blobs with automatic chunking and - progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param int content_length: - Required. This header specifies the maximum size - for the page blob, up to 1 TB. The page blob size must be aligned - to a 512-byte boundary. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set properties on the blob. - :param int sequence_number: - The sequence number is a user-controlled value that you can use to - track requests. The value of the sequence number must be between 0 - and 2^63 - 1.The default value is 0. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :param PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :return: ETag and last modified properties for the new Page Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - return self._create_blob( - container_name, - blob_name, - content_length, - content_settings=content_settings, - sequence_number=sequence_number, - metadata=metadata, - lease_id=lease_id, - premium_page_blob_tier=premium_page_blob_tier, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout - ) - - def incremental_copy_blob(self, container_name, blob_name, copy_source, - metadata=None, destination_if_modified_since=None, destination_if_unmodified_since=None, - destination_if_match=None, destination_if_none_match=None, destination_lease_id=None, - source_lease_id=None, timeout=None): - ''' - Copies an incremental copy of a blob asynchronously. This operation returns a copy operation - properties object, including a copy ID you can use to check or abort the - copy operation. The Blob service copies blobs on a best-effort basis. - - The source blob for an incremental copy operation must be a page blob. - Call get_blob_properties on the destination blob to check the status of the copy operation. - The final blob will be committed when the copy completes. - - :param str container_name: - Name of the destination container. The container must exist. - :param str blob_name: - Name of the destination blob. If the destination blob exists, it will - be overwritten. Otherwise, it will be created. - :param str copy_source: - A URL of up to 2 KB in length that specifies an Azure page blob. - The value should be URL-encoded as it would appear in a request URI. - The copy source must be a snapshot and include a valid SAS token or be public. - Example: - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=&sastoken - :param metadata: - Name-value pairs associated with the blob as metadata. If no name-value - pairs are specified, the operation will copy the metadata from the - source blob or file to the destination blob. If one or more name-value - pairs are specified, the destination blob is created with the specified - metadata, and metadata is not copied from the source blob or file. - :type metadata: dict(str, str). - :param datetime destination_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has been modified since the specified date/time. - If the destination blob has not been modified, the Blob service returns - status code 412 (Precondition Failed). - :param datetime destination_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the destination blob - has not been modified since the specified ate/time. If the destination blob - has been modified, the Blob service returns status code 412 (Precondition Failed). - :param ETag destination_if_match: - An ETag value, or the wildcard character (*). Specify an ETag value for - this conditional header to copy the blob only if the specified ETag value - matches the ETag value for an existing destination blob. If the ETag for - the destination blob does not match the ETag specified for If-Match, the - Blob service returns status code 412 (Precondition Failed). - :param ETag destination_if_none_match: - An ETag value, or the wildcard character (*). Specify an ETag value for - this conditional header to copy the blob only if the specified ETag value - does not match the ETag value for the destination blob. Specify the wildcard - character (*) to perform the operation only if the destination blob does not - exist. If the specified condition isn't met, the Blob service returns status - code 412 (Precondition Failed). - :param str destination_lease_id: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :param str source_lease_id: - Specify this to perform the Copy Blob operation only if - the lease ID given matches the active lease ID of the source blob. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: Copy operation properties such as status, source, and ID. - :rtype: :class:`~azure.storage.blob.models.CopyProperties` - ''' - return self._copy_blob(container_name, blob_name, copy_source, - metadata, - source_if_modified_since=None, source_if_unmodified_since=None, - source_if_match=None, source_if_none_match=None, - destination_if_modified_since=destination_if_modified_since, - destination_if_unmodified_since=destination_if_unmodified_since, - destination_if_match=destination_if_match, - destination_if_none_match=destination_if_none_match, - destination_lease_id=destination_lease_id, - source_lease_id=source_lease_id, timeout=timeout, - incremental_copy=True) - - def update_page( - self, container_name, blob_name, page, start_range, end_range, - validate_content=False, lease_id=None, if_sequence_number_lte=None, - if_sequence_number_lt=None, if_sequence_number_eq=None, - if_modified_since=None, if_unmodified_since=None, - if_match=None, if_none_match=None, timeout=None): - ''' - Updates a range of pages. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param bytes page: - Content of the page. - :param int start_range: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the end offset must be a modulus of - 512-1. Examples of valid byte ranges are 0-511, 512-1023, etc. - :param int end_range: - End of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the end offset must be a modulus of - 512-1. Examples of valid byte ranges are 0-511, 512-1023, etc. - :param bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - blob. - :param str lease_id: - Required if the blob has an active lease. - :param int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :param int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :param int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify an ETag value for this conditional - header to write the page only if the blob's ETag value matches the - value specified. If the values do not match, the Blob service fails. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify an ETag value for this conditional - header to write the page only if the blob's ETag value does not - match the value specified. If the values are identical, the Blob - service fails. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Page Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - return self._update_page( - container_name, - blob_name, - page, - start_range, - end_range, - validate_content=validate_content, - lease_id=lease_id, - if_sequence_number_lte=if_sequence_number_lte, - if_sequence_number_lt=if_sequence_number_lt, - if_sequence_number_eq=if_sequence_number_eq, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout - ) - - def clear_page( - self, container_name, blob_name, start_range, end_range, - lease_id=None, if_sequence_number_lte=None, - if_sequence_number_lt=None, if_sequence_number_eq=None, - if_modified_since=None, if_unmodified_since=None, - if_match=None, if_none_match=None, timeout=None): - ''' - Clears a range of pages. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param int start_range: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the end offset must be a modulus of - 512-1. Examples of valid byte ranges are 0-511, 512-1023, etc. - :param int end_range: - End of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the end offset must be a modulus of - 512-1. Examples of valid byte ranges are 0-511, 512-1023, etc. - :param str lease_id: - Required if the blob has an active lease. - :param int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :param int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :param int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify an ETag value for this conditional - header to write the page only if the blob's ETag value matches the - value specified. If the values do not match, the Blob service fails. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify an ETag value for this conditional - header to write the page only if the blob's ETag value does not - match the value specified. If the values are identical, the Blob - service fails. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Page Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'page', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-page-write': 'clear', - 'x-ms-lease-id': _to_str(lease_id), - 'x-ms-if-sequence-number-le': _to_str(if_sequence_number_lte), - 'x-ms-if-sequence-number-lt': _to_str(if_sequence_number_lt), - 'x-ms-if-sequence-number-eq': _to_str(if_sequence_number_eq), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match) - } - _validate_and_format_range_headers( - request, - start_range, - end_range, - align_to_page=True) - - return self._perform_request(request, _parse_page_properties) - - def get_page_ranges( - self, container_name, blob_name, snapshot=None, start_range=None, - end_range=None, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None): - ''' - Returns the list of valid page ranges for a Page Blob or snapshot - of a page blob. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to retrieve information - from. - :param int start_range: - Start of byte range to use for getting valid page ranges. - If no end_range is given, all bytes after the start_range will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the end offset must be a modulus of - 512-1. Examples of valid byte ranges are 0-511, 512-, etc. - :param int end_range: - End of byte range to use for getting valid page ranges. - If end_range is given, start_range must be provided. - This range will return valid page ranges for from the offset start up to - offset end. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the end offset must be a modulus of - 512-1. Examples of valid byte ranges are 0-511, 512-, etc. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: A list of valid Page Ranges for the Page Blob. - :rtype: list(:class:`~azure.storage.blob.models.PageRange`) - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'pagelist', - 'snapshot': _to_str(snapshot), - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - if start_range is not None: - _validate_and_format_range_headers( - request, - start_range, - end_range, - start_range_required=False, - end_range_required=False, - align_to_page=True) - - return self._perform_request(request, _convert_xml_to_page_ranges) - - def get_page_ranges_diff( - self, container_name, blob_name, previous_snapshot, snapshot=None, - start_range=None, end_range=None, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None): - ''' - The response will include only the pages that are different between either a - recent snapshot or the current blob and a previous snapshot, including pages - that were cleared. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str previous_snapshot: - The snapshot parameter is an opaque DateTime value that - specifies a previous blob snapshot to be compared - against a more recent snapshot or the current blob. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that - specifies a more recent blob snapshot to be compared - against a previous snapshot (previous_snapshot). - :param int start_range: - Start of byte range to use for getting different page ranges. - If no end_range is given, all bytes after the start_range will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the end offset must be a modulus of - 512-1. Examples of valid byte ranges are 0-511, 512-, etc. - :param int end_range: - End of byte range to use for getting different page ranges. - If end_range is given, start_range must be provided. - This range will return valid page ranges for from the offset start up to - offset end. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the end offset must be a modulus of - 512-1. Examples of valid byte ranges are 0-511, 512-, etc. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: A list of different Page Ranges for the Page Blob. - :rtype: list(:class:`~azure.storage.blob.models.PageRange`) - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('previous_snapshot', previous_snapshot) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'pagelist', - 'snapshot': _to_str(snapshot), - 'prevsnapshot': _to_str(previous_snapshot), - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - if start_range is not None: - _validate_and_format_range_headers( - request, - start_range, - end_range, - start_range_required=False, - end_range_required=False, - align_to_page=True) - - return self._perform_request(request, _convert_xml_to_page_ranges) - - def set_sequence_number( - self, container_name, blob_name, sequence_number_action, sequence_number=None, - lease_id=None, if_modified_since=None, if_unmodified_since=None, - if_match=None, if_none_match=None, timeout=None): - - ''' - Sets the blob sequence number. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str sequence_number_action: - This property indicates how the service should modify the blob's sequence - number. See :class:`~azure.storage.blob.models.SequenceNumberAction` for more information. - :param str sequence_number: - This property sets the blob's sequence number. The sequence number is a - user-controlled property that you can use to track requests and manage - concurrency issues. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Page Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('sequence_number_action', sequence_number_action) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-blob-sequence-number': _to_str(sequence_number), - 'x-ms-sequence-number-action': _to_str(sequence_number_action), - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - - return self._perform_request(request, _parse_page_properties) - - def resize_blob( - self, container_name, blob_name, content_length, - lease_id=None, if_modified_since=None, if_unmodified_since=None, - if_match=None, if_none_match=None, timeout=None): - - ''' - Resizes a page blob to the specified size. If the specified value is less - than the current size of the blob, then all pages above the specified value - are cleared. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param int content_length: - Size to resize blob to. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Page Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('content_length', content_length) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-blob-content-length': _to_str(content_length), - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - - return self._perform_request(request, _parse_page_properties) - - # ----Convenience APIs----------------------------------------------------- - - def create_blob_from_path( - self, container_name, blob_name, file_path, content_settings=None, - metadata=None, validate_content=False, progress_callback=None, max_connections=2, - lease_id=None, if_modified_since=None, if_unmodified_since=None, - if_match=None, if_none_match=None, timeout=None, premium_page_blob_tier=None): - ''' - Creates a new blob from a file path, or updates the content of an - existing blob, with automatic chunking and progress notifications. - Empty chunks are skipped, while non-emtpy ones(even if only partly filled) are uploaded. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param str file_path: - Path of the file to upload as the blob content. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set blob properties. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param bool validate_content: - If true, calculates an MD5 hash for each page of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: func(current, total) - :param int max_connections: - Maximum number of parallel connections to use. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :param premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :return: ETag and last modified properties for the Page Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('file_path', file_path) - - count = path.getsize(file_path) - with open(file_path, 'rb') as stream: - return self.create_blob_from_stream( - container_name=container_name, - blob_name=blob_name, - stream=stream, - count=count, - content_settings=content_settings, - metadata=metadata, - validate_content=validate_content, - progress_callback=progress_callback, - max_connections=max_connections, - lease_id=lease_id, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout, - premium_page_blob_tier=premium_page_blob_tier) - - def create_blob_from_stream( - self, container_name, blob_name, stream, count, content_settings=None, - metadata=None, validate_content=False, progress_callback=None, - max_connections=2, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None, - premium_page_blob_tier=None): - ''' - Creates a new blob from a file/stream, or updates the content of an - existing blob, with automatic chunking and progress notifications. - Empty chunks are skipped, while non-emtpy ones(even if only partly filled) are uploaded. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param io.IOBase stream: - Opened file/stream to upload as the blob content. - :param int count: - Number of bytes to read from the stream. This is required, a page - blob cannot be created if the count is unknown. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set the blob properties. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param bool validate_content: - If true, calculates an MD5 hash for each page of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: func(current, total) - :param int max_connections: - Maximum number of parallel connections to use. Note that parallel upload - requires the stream to be seekable. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :param premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :return: ETag and last modified properties for the Page Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('stream', stream) - _validate_not_none('count', count) - _validate_encryption_required(self.require_encryption, self.key_encryption_key) - - if count < 0: - raise ValueError(_ERROR_VALUE_NEGATIVE.format('count')) - - if count % _PAGE_ALIGNMENT != 0: - raise ValueError(_ERROR_PAGE_BLOB_SIZE_ALIGNMENT.format(count)) - - cek, iv, encryption_data = None, None, None - if self.key_encryption_key is not None: - cek, iv, encryption_data = _generate_blob_encryption_data(self.key_encryption_key) - - response = self._create_blob( - container_name=container_name, - blob_name=blob_name, - content_length=count, - content_settings=content_settings, - metadata=metadata, - lease_id=lease_id, - premium_page_blob_tier=premium_page_blob_tier, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout, - encryption_data=encryption_data - ) - - if count == 0: - return response - - # _upload_blob_chunks returns the block ids for block blobs so resource_properties - # is passed as a parameter to get the last_modified and etag for page and append blobs. - # this info is not needed for block_blobs since _put_block_list is called after which gets this info - resource_properties = ResourceProperties() - _upload_blob_chunks( - blob_service=self, - container_name=container_name, - blob_name=blob_name, - blob_size=count, - block_size=self.MAX_PAGE_SIZE, - stream=stream, - max_connections=max_connections, - progress_callback=progress_callback, - validate_content=validate_content, - lease_id=lease_id, - uploader_class=_PageBlobChunkUploader, - if_match=response.etag, - timeout=timeout, - content_encryption_key=cek, - initialization_vector=iv, - resource_properties=resource_properties - ) - - return resource_properties - - def create_blob_from_bytes( - self, container_name, blob_name, blob, index=0, count=None, - content_settings=None, metadata=None, validate_content=False, - progress_callback=None, max_connections=2, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None, premium_page_blob_tier=None): - ''' - Creates a new blob from an array of bytes, or updates the content - of an existing blob, with automatic chunking and progress - notifications. Empty chunks are skipped, while non-emtpy ones(even if only partly filled) are uploaded. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param bytes blob: - Content of blob as an array of bytes. - :param int index: - Start index in the byte array. - :param int count: - Number of bytes to upload. Set to None or negative value to upload - all bytes starting from index. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set blob properties. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param bool validate_content: - If true, calculates an MD5 hash for each page of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: func(current, total) - :param int max_connections: - Maximum number of parallel connections to use. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :param premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :return: ETag and last modified properties for the Page Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('blob', blob) - _validate_type_bytes('blob', blob) - - if index < 0: - raise IndexError(_ERROR_VALUE_NEGATIVE.format('index')) - - if count is None or count < 0: - count = len(blob) - index - - stream = BytesIO(blob) - stream.seek(index) - - return self.create_blob_from_stream( - container_name=container_name, - blob_name=blob_name, - stream=stream, - count=count, - content_settings=content_settings, - metadata=metadata, - validate_content=validate_content, - lease_id=lease_id, - progress_callback=progress_callback, - max_connections=max_connections, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout, - premium_page_blob_tier=premium_page_blob_tier) - - def set_premium_page_blob_tier( - self, container_name, blob_name, premium_page_blob_tier, - timeout=None): - ''' - Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to update. - :param PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('premium_page_blob_tier', premium_page_blob_tier) - - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'tier', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-access-tier': _to_str(premium_page_blob_tier) - } - - self._perform_request(request) - - def copy_blob(self, container_name, blob_name, copy_source, - metadata=None, - source_if_modified_since=None, - source_if_unmodified_since=None, - source_if_match=None, source_if_none_match=None, - destination_if_modified_since=None, - destination_if_unmodified_since=None, - destination_if_match=None, - destination_if_none_match=None, - destination_lease_id=None, - source_lease_id=None, timeout=None, - premium_page_blob_tier=None): - ''' - Copies a blob asynchronously. This operation returns a copy operation - properties object, including a copy ID you can use to check or abort the - copy operation. The Blob service copies blobs on a best-effort basis. - - The source blob for a copy operation must be a page blob. If the destination - blob already exists, it must be of the same blob type as the source blob. - Any existing destination blob will be overwritten. - The destination blob cannot be modified while a copy operation is in progress. - - When copying from a page blob, the Blob service creates a destination page - blob of the source blob's length, initially containing all zeroes. Then - the source page ranges are enumerated, and non-empty ranges are copied. - - If the tier on the source blob is larger than the tier being passed to this - copy operation or if the size of the blob exceeds the tier being passed to - this copy operation then the operation will fail. - - You can call get_blob_properties on the destination - blob to check the status of the copy operation. The final blob will be - committed when the copy completes. - - :param str container_name: - Name of the destination container. The container must exist. - :param str blob_name: - Name of the destination blob. If the destination blob exists, it will - be overwritten. Otherwise, it will be created. - :param str copy_source: - A URL of up to 2 KB in length that specifies an Azure file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.blob.core.windows.net/mycontainer/myblob - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken - :param metadata: - Name-value pairs associated with the blob as metadata. If no name-value - pairs are specified, the operation will copy the metadata from the - source blob or file to the destination blob. If one or more name-value - pairs are specified, the destination blob is created with the specified - metadata, and metadata is not copied from the source blob or file. - :type metadata: dict(str, str). - :param datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source - blob has been modified since the specified date/time. - :param datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source blob - has not been modified since the specified date/time. - :param ETag source_if_match: - An ETag value, or the wildcard character (*). Specify this conditional - header to copy the source blob only if its ETag matches the value - specified. If the ETag values do not match, the Blob service returns - status code 412 (Precondition Failed). This header cannot be specified - if the source is an Azure File. - :param ETag source_if_none_match: - An ETag value, or the wildcard character (*). Specify this conditional - header to copy the blob only if its ETag does not match the value - specified. If the values are identical, the Blob service returns status - code 412 (Precondition Failed). This header cannot be specified if the - source is an Azure File. - :param datetime destination_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has been modified since the specified date/time. - If the destination blob has not been modified, the Blob service returns - status code 412 (Precondition Failed). - :param datetime destination_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has not been modified since the specified - date/time. If the destination blob has been modified, the Blob service - returns status code 412 (Precondition Failed). - :param ETag destination_if_match: - An ETag value, or the wildcard character (*). Specify an ETag value for - this conditional header to copy the blob only if the specified ETag value - matches the ETag value for an existing destination blob. If the ETag for - the destination blob does not match the ETag specified for If-Match, the - Blob service returns status code 412 (Precondition Failed). - :param ETag destination_if_none_match: - An ETag value, or the wildcard character (*). Specify an ETag value for - this conditional header to copy the blob only if the specified ETag value - does not match the ETag value for the destination blob. Specify the wildcard - character (*) to perform the operation only if the destination blob does not - exist. If the specified condition isn't met, the Blob service returns status - code 412 (Precondition Failed). - :param str destination_lease_id: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :param str source_lease_id: - Specify this to perform the Copy Blob operation only if - the lease ID given matches the active lease ID of the source blob. - :param int timeout: - The timeout parameter is expressed in seconds. - :param PageBlobTier premium_page_blob_tier: - A page blob tier value to set on the destination blob. The tier correlates to - the size of the blob and number of allowed IOPS. This is only applicable to - page blobs on premium storage accounts. - If the tier on the source blob is larger than the tier being passed to this - copy operation or if the size of the blob exceeds the tier being passed to - this copy operation then the operation will fail. - :return: Copy operation properties such as status, source, and ID. - :rtype: :class:`~azure.storage.blob.models.CopyProperties` - ''' - return self._copy_blob(container_name, blob_name, copy_source, - metadata, premium_page_blob_tier, - source_if_modified_since, source_if_unmodified_since, - source_if_match, source_if_none_match, - destination_if_modified_since, - destination_if_unmodified_since, - destination_if_match, - destination_if_none_match, - destination_lease_id, - source_lease_id, timeout, - False) - - # -----Helper methods----------------------------------------------------- - - def _create_blob( - self, container_name, blob_name, content_length, content_settings=None, - sequence_number=None, metadata=None, lease_id=None, premium_page_blob_tier=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None, - encryption_data=None): - ''' - See create_blob for more details. This helper method - allows for encryption or other such special behavior because - it is safely handled by the library. These behaviors are - prohibited in the public version of this function. - :param str encryption_data: - The JSON formatted encryption metadata to upload as a part of the blob. - This should only be passed internally from other methods and only applied - when uploading entire blob contents immediately follows creation of the blob. - ''' - - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('content_length', content_length) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = {'timeout': _int_to_str(timeout)} - request.headers = { - 'x-ms-blob-type': _to_str(self.blob_type), - 'x-ms-blob-content-length': _to_str(content_length), - 'x-ms-lease-id': _to_str(lease_id), - 'x-ms-blob-sequence-number': _to_str(sequence_number), - 'x-ms-access-tier': _to_str(premium_page_blob_tier), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match) - } - _add_metadata_headers(metadata, request) - if content_settings is not None: - request.headers.update(content_settings._to_headers()) - - if encryption_data is not None: - request.headers['x-ms-meta-encryptiondata'] = encryption_data - - return self._perform_request(request, _parse_base_properties) - - def _update_page( - self, container_name, blob_name, page, start_range, end_range, - validate_content=False, lease_id=None, if_sequence_number_lte=None, - if_sequence_number_lt=None, if_sequence_number_eq=None, - if_modified_since=None, if_unmodified_since=None, - if_match=None, if_none_match=None, timeout=None): - ''' - See update_page for more details. This helper method - allows for encryption or other such special behavior because - it is safely handled by the library. These behaviors are - prohibited in the public version of this function. - ''' - - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'page', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-page-write': 'update', - 'x-ms-lease-id': _to_str(lease_id), - 'x-ms-if-sequence-number-le': _to_str(if_sequence_number_lte), - 'x-ms-if-sequence-number-lt': _to_str(if_sequence_number_lt), - 'x-ms-if-sequence-number-eq': _to_str(if_sequence_number_eq), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match) - } - _validate_and_format_range_headers( - request, - start_range, - end_range, - align_to_page=True) - request.body = _get_data_bytes_only('page', page) - - if validate_content: - computed_md5 = _get_content_md5(request.body) - request.headers['Content-MD5'] = _to_str(computed_md5) - - return self._perform_request(request, _parse_page_properties) diff --git a/azure/multiapi/storage/v2017_07_29/blob/sharedaccesssignature.py b/azure/multiapi/storage/v2017_07_29/blob/sharedaccesssignature.py deleted file mode 100644 index f44d44b..0000000 --- a/azure/multiapi/storage/v2017_07_29/blob/sharedaccesssignature.py +++ /dev/null @@ -1,179 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ..common.sharedaccesssignature import ( - SharedAccessSignature, - _SharedAccessHelper, -) -from ._constants import X_MS_VERSION - - -class BlobSharedAccessSignature(SharedAccessSignature): - ''' - Provides a factory for creating blob and container access - signature tokens with a common account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - ''' - super(BlobSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) - - def generate_blob(self, container_name, blob_name, permission=None, - expiry=None, start=None, id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None): - ''' - Generates a shared access signature for the blob. - Use the returned signature with the sas_token parameter of any BlobService. - - :param str container_name: - Name of container. - :param str blob_name: - Name of blob. - :param BlobPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_blob_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - resource_path = container_name + '/' + blob_name - - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(id) - sas.add_resource('b') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_resource_signature(self.account_name, self.account_key, 'blob', resource_path) - - return sas.get_token() - - def generate_container(self, container_name, permission=None, expiry=None, - start=None, id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None): - ''' - Generates a shared access signature for the container. - Use the returned signature with the sas_token parameter of any BlobService. - - :param str container_name: - Name of container. - :param ContainerPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_blob_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(id) - sas.add_resource('c') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_resource_signature(self.account_name, self.account_key, 'blob', container_name) - - return sas.get_token() diff --git a/azure/multiapi/storage/v2017_07_29/common/__init__.py b/azure/multiapi/storage/v2017_07_29/common/__init__.py deleted file mode 100644 index 5655b1b..0000000 --- a/azure/multiapi/storage/v2017_07_29/common/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from ._constants import ( - __author__, - __version__, - DEFAULT_X_MS_VERSION, -) -from .cloudstorageaccount import CloudStorageAccount -from .models import ( - RetentionPolicy, - Logging, - Metrics, - CorsRule, - DeleteRetentionPolicy, - ServiceProperties, - AccessPolicy, - ResourceTypes, - Services, - AccountPermissions, - Protocol, - ServiceStats, - GeoReplication, - LocationMode, - RetryContext, -) -from .retry import ( - ExponentialRetry, - LinearRetry, - no_retry, -) -from .sharedaccesssignature import ( - SharedAccessSignature, -) diff --git a/azure/multiapi/storage/v2017_07_29/common/_auth.py b/azure/multiapi/storage/v2017_07_29/common/_auth.py deleted file mode 100644 index 318e620..0000000 --- a/azure/multiapi/storage/v2017_07_29/common/_auth.py +++ /dev/null @@ -1,117 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from ._common_conversion import ( - _sign_string, -) -from ._constants import ( - DEV_ACCOUNT_NAME, - DEV_ACCOUNT_SECONDARY_NAME -) - -import logging -logger = logging.getLogger(__name__) - - -class _StorageSharedKeyAuthentication(object): - def __init__(self, account_name, account_key, is_emulated=False): - self.account_name = account_name - self.account_key = account_key - self.is_emulated = is_emulated - - def _get_headers(self, request, headers_to_sign): - headers = dict((name.lower(), value) for name, value in request.headers.items() if value) - if 'content-length' in headers and headers['content-length'] == '0': - del headers['content-length'] - return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' - - def _get_verb(self, request): - return request.method + '\n' - - def _get_canonicalized_resource(self, request): - uri_path = request.path.split('?')[0] - - # for emulator, use the DEV_ACCOUNT_NAME instead of DEV_ACCOUNT_SECONDARY_NAME - # as this is how the emulator works - if self.is_emulated and uri_path.find(DEV_ACCOUNT_SECONDARY_NAME) == 1: - # only replace the first instance - uri_path = uri_path.replace(DEV_ACCOUNT_SECONDARY_NAME, DEV_ACCOUNT_NAME, 1) - - return '/' + self.account_name + uri_path - - def _get_canonicalized_headers(self, request): - string_to_sign = '' - x_ms_headers = [] - for name, value in request.headers.items(): - if name.startswith('x-ms-'): - x_ms_headers.append((name.lower(), value)) - x_ms_headers.sort() - for name, value in x_ms_headers: - if value is not None: - string_to_sign += ''.join([name, ':', value, '\n']) - return string_to_sign - - def _add_authorization_header(self, request, string_to_sign): - signature = _sign_string(self.account_key, string_to_sign) - auth_string = 'SharedKey ' + self.account_name + ':' + signature - request.headers['Authorization'] = auth_string - - -class _StorageSharedKeyAuthentication(_StorageSharedKeyAuthentication): - def sign_request(self, request): - string_to_sign = \ - self._get_verb(request) + \ - self._get_headers( - request, - [ - 'content-encoding', 'content-language', 'content-length', - 'content-md5', 'content-type', 'date', 'if-modified-since', - 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' - ] - ) + \ - self._get_canonicalized_headers(request) + \ - self._get_canonicalized_resource(request) + \ - self._get_canonicalized_resource_query(request) - - self._add_authorization_header(request, string_to_sign) - logger.debug("String_to_sign=%s", string_to_sign) - - def _get_canonicalized_resource_query(self, request): - sorted_queries = [(name, value) for name, value in request.query.items()] - sorted_queries.sort() - - string_to_sign = '' - for name, value in sorted_queries: - if value: - string_to_sign += '\n' + name.lower() + ':' + value - - return string_to_sign - - -class _StorageNoAuthentication(object): - def sign_request(self, request): - pass - - -class _StorageSASAuthentication(object): - def __init__(self, sas_token): - # ignore ?-prefix (added by tools such as Azure Portal) on sas tokens - # doing so avoids double question marks when signing - if sas_token[0] == '?': - self.sas_token = sas_token[1:] - else: - self.sas_token = sas_token - - def sign_request(self, request): - # if 'sig=' is present, then the request has already been signed - # as is the case when performing retries - if 'sig=' in request.path: - return - if '?' in request.path: - request.path += '&' - else: - request.path += '?' - - request.path += self.sas_token diff --git a/azure/multiapi/storage/v2017_07_29/common/_common_conversion.py b/azure/multiapi/storage/v2017_07_29/common/_common_conversion.py deleted file mode 100644 index 8b50afb..0000000 --- a/azure/multiapi/storage/v2017_07_29/common/_common_conversion.py +++ /dev/null @@ -1,126 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import hmac -import sys -from io import (SEEK_SET) - -from dateutil.tz import tzutc - -from ._error import ( - _ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM, - _ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM, -) -from .models import ( - _unicode_type, -) - -if sys.version_info < (3,): - def _str(value): - if isinstance(value, unicode): - return value.encode('utf-8') - - return str(value) -else: - _str = str - - -def _to_str(value): - return _str(value) if value is not None else None - - -def _int_to_str(value): - return str(int(value)) if value is not None else None - - -def _bool_to_str(value): - if value is None: - return None - - if isinstance(value, bool): - if value: - return 'true' - else: - return 'false' - - return str(value) - - -def _to_utc_datetime(value): - return value.strftime('%Y-%m-%dT%H:%M:%SZ') - - -def _datetime_to_utc_string(value): - # Azure expects the date value passed in to be UTC. - # Azure will always return values as UTC. - # If a date is passed in without timezone info, it is assumed to be UTC. - if value is None: - return None - - if value.tzinfo: - value = value.astimezone(tzutc()) - - return value.strftime('%a, %d %b %Y %H:%M:%S GMT') - - -def _encode_base64(data): - if isinstance(data, _unicode_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def _decode_base64_to_bytes(data): - if isinstance(data, _unicode_type): - data = data.encode('utf-8') - return base64.b64decode(data) - - -def _decode_base64_to_text(data): - decoded_bytes = _decode_base64_to_bytes(data) - return decoded_bytes.decode('utf-8') - - -def _sign_string(key, string_to_sign, key_is_base64=True): - if key_is_base64: - key = _decode_base64_to_bytes(key) - else: - if isinstance(key, _unicode_type): - key = key.encode('utf-8') - if isinstance(string_to_sign, _unicode_type): - string_to_sign = string_to_sign.encode('utf-8') - signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) - digest = signed_hmac_sha256.digest() - encoded_digest = _encode_base64(digest) - return encoded_digest - - -def _get_content_md5(data): - md5 = hashlib.md5() - if isinstance(data, bytes): - md5.update(data) - elif hasattr(data, 'read'): - pos = 0 - try: - pos = data.tell() - except: - pass - for chunk in iter(lambda: data.read(4096), b""): - md5.update(chunk) - try: - data.seek(pos, SEEK_SET) - except (AttributeError, IOError): - raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM.format('data')) - else: - raise ValueError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format('data')) - - return base64.b64encode(md5.digest()).decode('utf-8') - - -def _lower(text): - return text.lower() diff --git a/azure/multiapi/storage/v2017_07_29/common/_connection.py b/azure/multiapi/storage/v2017_07_29/common/_connection.py deleted file mode 100644 index 990f8e9..0000000 --- a/azure/multiapi/storage/v2017_07_29/common/_connection.py +++ /dev/null @@ -1,155 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import sys - -if sys.version_info >= (3,): - from urllib.parse import urlparse -else: - from urlparse import urlparse - -from ._constants import ( - SERVICE_HOST_BASE, - DEFAULT_PROTOCOL, - DEV_ACCOUNT_NAME, - DEV_ACCOUNT_SECONDARY_NAME, - DEV_ACCOUNT_KEY, - DEV_BLOB_HOST, - DEV_QUEUE_HOST, -) -from ._error import ( - _ERROR_STORAGE_MISSING_INFO, -) - -_EMULATOR_ENDPOINTS = { - 'blob': DEV_BLOB_HOST, - 'queue': DEV_QUEUE_HOST, - 'file': '', -} - -_CONNECTION_ENDPOINTS = { - 'blob': 'BlobEndpoint', - 'queue': 'QueueEndpoint', - 'file': 'FileEndpoint', -} - -_CONNECTION_ENDPOINTS_SECONDARY = { - 'blob': 'BlobSecondaryEndpoint', - 'queue': 'QueueSecondaryEndpoint', - 'file': 'FileSecondaryEndpoint', -} - -class _ServiceParameters(object): - def __init__(self, service, account_name=None, account_key=None, sas_token=None, - is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, - custom_domain=None, custom_domain_secondary=None): - - self.account_name = account_name - self.account_key = account_key - self.sas_token = sas_token - self.protocol = protocol or DEFAULT_PROTOCOL - self.is_emulated = is_emulated - - if is_emulated: - self.account_name = DEV_ACCOUNT_NAME - self.protocol = 'http' - - # Only set the account key if a sas_token is not present to allow sas to be used with the emulator - self.account_key = DEV_ACCOUNT_KEY if not self.sas_token else None - - self.primary_endpoint = '{}/{}'.format(_EMULATOR_ENDPOINTS[service], DEV_ACCOUNT_NAME) - self.secondary_endpoint = '{}/{}'.format(_EMULATOR_ENDPOINTS[service], DEV_ACCOUNT_SECONDARY_NAME) - else: - # Strip whitespace from the key - if self.account_key: - self.account_key = self.account_key.strip() - - endpoint_suffix = endpoint_suffix or SERVICE_HOST_BASE - - # Setup the primary endpoint - if custom_domain: - parsed_url = urlparse(custom_domain) - - # Trim any trailing slashes from the path - path = parsed_url.path.rstrip('/') - - self.primary_endpoint = parsed_url.netloc + path - self.protocol = self.protocol if parsed_url.scheme == '' else parsed_url.scheme - else: - if not self.account_name: - raise ValueError(_ERROR_STORAGE_MISSING_INFO) - self.primary_endpoint = '{}.{}.{}'.format(self.account_name, service, endpoint_suffix) - - # Setup the secondary endpoint - if custom_domain_secondary: - if not custom_domain: - raise ValueError(_ERROR_STORAGE_MISSING_INFO) - - parsed_url = urlparse(custom_domain_secondary) - - # Trim any trailing slashes from the path - path = parsed_url.path.rstrip('/') - - self.secondary_endpoint = parsed_url.netloc + path - else: - if self.account_name: - self.secondary_endpoint = '{}-secondary.{}.{}'.format(self.account_name, service, endpoint_suffix) - else: - self.secondary_endpoint = None - - @staticmethod - def get_service_parameters(service, account_name=None, account_key=None, sas_token=None, is_emulated=None, - protocol=None, endpoint_suffix=None, custom_domain=None, request_session=None, - connection_string=None, socket_timeout=None): - if connection_string: - params = _ServiceParameters._from_connection_string(connection_string, service) - elif is_emulated: - params = _ServiceParameters(service, is_emulated=True) - elif account_name: - params = _ServiceParameters(service, - account_name=account_name, - account_key=account_key, - sas_token=sas_token, - is_emulated=is_emulated, - protocol=protocol, - endpoint_suffix=endpoint_suffix, - custom_domain=custom_domain) - else: - raise ValueError(_ERROR_STORAGE_MISSING_INFO) - - params.request_session = request_session - params.socket_timeout = socket_timeout - return params - - @staticmethod - def _from_connection_string(connection_string, service): - # Split into key=value pairs removing empties, then split the pairs into a dict - config = dict(s.split('=', 1) for s in connection_string.split(';') if s) - - # Authentication - account_name = config.get('AccountName') - account_key = config.get('AccountKey') - sas_token = config.get('SharedAccessSignature') - - # Emulator - is_emulated = config.get('UseDevelopmentStorage') - - # Basic URL Configuration - protocol = config.get('DefaultEndpointsProtocol') - endpoint_suffix = config.get('EndpointSuffix') - - # Custom URLs - endpoint = config.get(_CONNECTION_ENDPOINTS[service]) - endpoint_secondary = config.get(_CONNECTION_ENDPOINTS_SECONDARY[service]) - - return _ServiceParameters(service, - account_name=account_name, - account_key=account_key, - sas_token=sas_token, - is_emulated=is_emulated, - protocol=protocol, - endpoint_suffix=endpoint_suffix, - custom_domain=endpoint, - custom_domain_secondary=endpoint_secondary) diff --git a/azure/multiapi/storage/v2017_07_29/common/_constants.py b/azure/multiapi/storage/v2017_07_29/common/_constants.py deleted file mode 100644 index fe9f874..0000000 --- a/azure/multiapi/storage/v2017_07_29/common/_constants.py +++ /dev/null @@ -1,39 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import platform - -__author__ = 'Microsoft Corp. ' -__version__ = '1.1.0' - -# UserAgent string sample: 'Azure-Storage/0.37.0-0.38.0 (Python CPython 3.4.2; Windows 8)' -# First version(0.37.0) is the common package, and the second version(0.38.0) is the service package -USER_AGENT_STRING_PREFIX = 'Azure-Storage/{}-'.format(__version__) -USER_AGENT_STRING_SUFFIX = '(Python {} {}; {} {})'.format(platform.python_implementation(), - platform.python_version(), platform.system(), - platform.release()) - -# default values for common package, in case it is used directly -DEFAULT_X_MS_VERSION = '2017-07-29' -DEFAULT_USER_AGENT_STRING = '{}None {}'.format(USER_AGENT_STRING_PREFIX, USER_AGENT_STRING_SUFFIX) - -# Live ServiceClient URLs -SERVICE_HOST_BASE = 'core.windows.net' -DEFAULT_PROTOCOL = 'https' - -# Development ServiceClient URLs -DEV_BLOB_HOST = '127.0.0.1:10000' -DEV_QUEUE_HOST = '127.0.0.1:10001' - -# Default credentials for Development Storage Service -DEV_ACCOUNT_NAME = 'devstoreaccount1' -DEV_ACCOUNT_SECONDARY_NAME = 'devstoreaccount1-secondary' -DEV_ACCOUNT_KEY = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==' - -# Socket timeout in seconds -DEFAULT_SOCKET_TIMEOUT = 20 - -# Encryption constants -_ENCRYPTION_PROTOCOL_V1 = '1.0' diff --git a/azure/multiapi/storage/v2017_07_29/common/_deserialization.py b/azure/multiapi/storage/v2017_07_29/common/_deserialization.py deleted file mode 100644 index 622197d..0000000 --- a/azure/multiapi/storage/v2017_07_29/common/_deserialization.py +++ /dev/null @@ -1,360 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from dateutil import parser - -from ._common_conversion import _to_str - -try: - from xml.etree import cElementTree as ETree -except ImportError: - from xml.etree import ElementTree as ETree - -from .models import ( - ServiceProperties, - Logging, - Metrics, - CorsRule, - AccessPolicy, - _dict, - GeoReplication, - ServiceStats, - DeleteRetentionPolicy, -) - - -def _to_int(value): - return value if value is None else int(value) - - -def _bool(value): - return value.lower() == 'true' - - -def _to_upper_str(value): - return _to_str(value).upper() if value is not None else None - - -def _get_download_size(start_range, end_range, resource_size): - if start_range is not None: - end_range = end_range if end_range else (resource_size if resource_size else None) - if end_range is not None: - return end_range - start_range - else: - return None - else: - return resource_size - - -GET_PROPERTIES_ATTRIBUTE_MAP = { - 'last-modified': (None, 'last_modified', parser.parse), - 'etag': (None, 'etag', _to_str), - 'x-ms-blob-type': (None, 'blob_type', _to_str), - 'content-length': (None, 'content_length', _to_int), - 'content-range': (None, 'content_range', _to_str), - 'x-ms-blob-sequence-number': (None, 'page_blob_sequence_number', _to_int), - 'x-ms-blob-committed-block-count': (None, 'append_blob_committed_block_count', _to_int), - 'x-ms-access-tier': (None, 'blob_tier', _to_str), - 'x-ms-access-tier-change-time': (None, 'blob_tier_change_time', parser.parse), - 'x-ms-access-tier-inferred': (None, 'blob_tier_inferred', _bool), - 'x-ms-archive-status': (None, 'rehydration_status', _to_str), - 'x-ms-share-quota': (None, 'quota', _to_int), - 'x-ms-server-encrypted': (None, 'server_encrypted', _bool), - 'content-type': ('content_settings', 'content_type', _to_str), - 'cache-control': ('content_settings', 'cache_control', _to_str), - 'content-encoding': ('content_settings', 'content_encoding', _to_str), - 'content-disposition': ('content_settings', 'content_disposition', _to_str), - 'content-language': ('content_settings', 'content_language', _to_str), - 'content-md5': ('content_settings', 'content_md5', _to_str), - 'x-ms-lease-status': ('lease', 'status', _to_str), - 'x-ms-lease-state': ('lease', 'state', _to_str), - 'x-ms-lease-duration': ('lease', 'duration', _to_str), - 'x-ms-copy-id': ('copy', 'id', _to_str), - 'x-ms-copy-source': ('copy', 'source', _to_str), - 'x-ms-copy-status': ('copy', 'status', _to_str), - 'x-ms-copy-progress': ('copy', 'progress', _to_str), - 'x-ms-copy-completion-time': ('copy', 'completion_time', parser.parse), - 'x-ms-copy-destination-snapshot': ('copy', 'destination_snapshot_time', _to_str), - 'x-ms-copy-status-description': ('copy', 'status_description', _to_str), -} - - -def _parse_metadata(response): - ''' - Extracts out resource metadata information. - ''' - - if response is None or response.headers is None: - return None - - metadata = _dict() - for key, value in response.headers.items(): - if key.lower().startswith('x-ms-meta-'): - metadata[key[10:]] = _to_str(value) - - return metadata - - -def _parse_properties(response, result_class): - ''' - Extracts out resource properties and metadata information. - Ignores the standard http headers. - ''' - - if response is None or response.headers is None: - return None - - props = result_class() - for key, value in response.headers.items(): - info = GET_PROPERTIES_ATTRIBUTE_MAP.get(key) - if info: - if info[0] is None: - setattr(props, info[1], info[2](value)) - else: - attr = getattr(props, info[0]) - setattr(attr, info[1], info[2](value)) - - if hasattr(props, 'blob_type') and props.blob_type == 'PageBlob' and hasattr(props, 'blob_tier') and props.blob_tier is not None: - props.blob_tier = _to_upper_str(props.blob_tier) - return props - - -def _parse_length_from_content_range(content_range): - ''' - Parses the blob length from the content range header: bytes 1-3/65537 - ''' - if content_range is None: - return None - - # First, split in space and take the second half: '1-3/65537' - # Next, split on slash and take the second half: '65537' - # Finally, convert to an int: 65537 - return int(content_range.split(' ', 1)[1].split('/', 1)[1]) - - -def _convert_xml_to_signed_identifiers(response): - ''' - - - - unique-value - - start-time - expiry-time - abbreviated-permission-list - - - - ''' - if response is None or response.body is None: - return None - - list_element = ETree.fromstring(response.body) - signed_identifiers = _dict() - - for signed_identifier_element in list_element.findall('SignedIdentifier'): - # Id element - id = signed_identifier_element.find('Id').text - - # Access policy element - access_policy = AccessPolicy() - access_policy_element = signed_identifier_element.find('AccessPolicy') - if access_policy_element is not None: - start_element = access_policy_element.find('Start') - if start_element is not None: - access_policy.start = parser.parse(start_element.text) - - expiry_element = access_policy_element.find('Expiry') - if expiry_element is not None: - access_policy.expiry = parser.parse(expiry_element.text) - - access_policy.permission = access_policy_element.findtext('Permission') - - signed_identifiers[id] = access_policy - - return signed_identifiers - - -def _convert_xml_to_service_stats(response): - ''' - - - - live|bootstrap|unavailable - sync-time| - - - ''' - if response is None or response.body is None: - return None - - service_stats_element = ETree.fromstring(response.body) - - geo_replication_element = service_stats_element.find('GeoReplication') - - geo_replication = GeoReplication() - geo_replication.status = geo_replication_element.find('Status').text - last_sync_time = geo_replication_element.find('LastSyncTime').text - geo_replication.last_sync_time = parser.parse(last_sync_time) if last_sync_time else None - - service_stats = ServiceStats() - service_stats.geo_replication = geo_replication - return service_stats - - -def _convert_xml_to_service_properties(response): - ''' - - - - version-number - true|false - true|false - true|false - - true|false - number-of-days - - - - version-number - true|false - true|false - - true|false - number-of-days - - - - version-number - true|false - true|false - - true|false - number-of-days - - - - - comma-separated-list-of-allowed-origins - comma-separated-list-of-HTTP-verb - max-caching-age-in-seconds - comma-seperated-list-of-response-headers - comma-seperated-list-of-request-headers - - - - true|false - number-of-days - - - ''' - if response is None or response.body is None: - return None - - service_properties_element = ETree.fromstring(response.body) - service_properties = ServiceProperties() - - # Logging - logging = service_properties_element.find('Logging') - if logging is not None: - service_properties.logging = Logging() - service_properties.logging.version = logging.find('Version').text - service_properties.logging.delete = _bool(logging.find('Delete').text) - service_properties.logging.read = _bool(logging.find('Read').text) - service_properties.logging.write = _bool(logging.find('Write').text) - - _convert_xml_to_retention_policy(logging.find('RetentionPolicy'), - service_properties.logging.retention_policy) - # HourMetrics - hour_metrics_element = service_properties_element.find('HourMetrics') - if hour_metrics_element is not None: - service_properties.hour_metrics = Metrics() - _convert_xml_to_metrics(hour_metrics_element, service_properties.hour_metrics) - - # MinuteMetrics - minute_metrics_element = service_properties_element.find('MinuteMetrics') - if minute_metrics_element is not None: - service_properties.minute_metrics = Metrics() - _convert_xml_to_metrics(minute_metrics_element, service_properties.minute_metrics) - - # CORS - cors = service_properties_element.find('Cors') - if cors is not None: - service_properties.cors = list() - for rule in cors.findall('CorsRule'): - allowed_origins = rule.find('AllowedOrigins').text.split(',') - - allowed_methods = rule.find('AllowedMethods').text.split(',') - - max_age_in_seconds = int(rule.find('MaxAgeInSeconds').text) - - cors_rule = CorsRule(allowed_origins, allowed_methods, max_age_in_seconds) - - exposed_headers = rule.find('ExposedHeaders').text - if exposed_headers is not None: - cors_rule.exposed_headers = exposed_headers.split(',') - - allowed_headers = rule.find('AllowedHeaders').text - if allowed_headers is not None: - cors_rule.allowed_headers = allowed_headers.split(',') - - service_properties.cors.append(cors_rule) - - # Target version - target_version = service_properties_element.find('DefaultServiceVersion') - if target_version is not None: - service_properties.target_version = target_version.text - - # DeleteRetentionPolicy - delete_retention_policy_element = service_properties_element.find('DeleteRetentionPolicy') - if delete_retention_policy_element is not None: - service_properties.delete_retention_policy = DeleteRetentionPolicy() - policy_enabled = _bool(delete_retention_policy_element.find('Enabled').text) - service_properties.delete_retention_policy.enabled = policy_enabled - - if policy_enabled: - service_properties.delete_retention_policy.days = int(delete_retention_policy_element.find('Days').text) - - return service_properties - - -def _convert_xml_to_metrics(xml, metrics): - ''' - version-number - true|false - true|false - - true|false - number-of-days - - ''' - # Version - metrics.version = xml.find('Version').text - - # Enabled - metrics.enabled = _bool(xml.find('Enabled').text) - - # IncludeAPIs - include_apis_element = xml.find('IncludeAPIs') - if include_apis_element is not None: - metrics.include_apis = _bool(include_apis_element.text) - - # RetentionPolicy - _convert_xml_to_retention_policy(xml.find('RetentionPolicy'), metrics.retention_policy) - - -def _convert_xml_to_retention_policy(xml, retention_policy): - ''' - true|false - number-of-days - ''' - # Enabled - retention_policy.enabled = _bool(xml.find('Enabled').text) - - # Days - days_element = xml.find('Days') - if days_element is not None: - retention_policy.days = int(days_element.text) diff --git a/azure/multiapi/storage/v2017_07_29/common/_encryption.py b/azure/multiapi/storage/v2017_07_29/common/_encryption.py deleted file mode 100644 index cd7d92e..0000000 --- a/azure/multiapi/storage/v2017_07_29/common/_encryption.py +++ /dev/null @@ -1,233 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from collections import OrderedDict - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.ciphers import Cipher -from cryptography.hazmat.primitives.ciphers.algorithms import AES -from cryptography.hazmat.primitives.ciphers.modes import CBC - -from ._common_conversion import ( - _encode_base64, - _decode_base64_to_bytes, -) -from ._constants import ( - _ENCRYPTION_PROTOCOL_V1, - __version__, -) -from ._error import ( - _ERROR_UNSUPPORTED_ENCRYPTION_VERSION, - _validate_not_none, - _validate_encryption_protocol_version, - _validate_key_encryption_key_unwrap, - _validate_kek_id, -) - - -class _EncryptionAlgorithm(object): - ''' - Specifies which client encryption algorithm is used. - ''' - AES_CBC_256 = 'AES_CBC_256' - - -class _WrappedContentKey: - ''' - Represents the envelope key details stored on the service. - ''' - - def __init__(self, algorithm, encrypted_key, key_id): - ''' - :param str algorithm: - The algorithm used for wrapping. - :param bytes encrypted_key: - The encrypted content-encryption-key. - :param str key_id: - The key-encryption-key identifier string. - ''' - - _validate_not_none('algorithm', algorithm) - _validate_not_none('encrypted_key', encrypted_key) - _validate_not_none('key_id', key_id) - - self.algorithm = algorithm - self.encrypted_key = encrypted_key - self.key_id = key_id - - -class _EncryptionAgent: - ''' - Represents the encryption agent stored on the service. - It consists of the encryption protocol version and encryption algorithm used. - ''' - - def __init__(self, encryption_algorithm, protocol): - ''' - :param _EncryptionAlgorithm encryption_algorithm: - The algorithm used for encrypting the message contents. - :param str protocol: - The protocol version used for encryption. - ''' - - _validate_not_none('encryption_algorithm', encryption_algorithm) - _validate_not_none('protocol', protocol) - - self.encryption_algorithm = str(encryption_algorithm) - self.protocol = protocol - - -class _EncryptionData: - ''' - Represents the encryption data that is stored on the service. - ''' - - def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, - key_wrapping_metadata): - ''' - :param bytes content_encryption_IV: - The content encryption initialization vector. - :param _EncryptionAgent encryption_agent: - The encryption agent. - :param _WrappedContentKey wrapped_content_key: - An object that stores the wrapping algorithm, the key identifier, - and the encrypted key bytes. - :param dict key_wrapping_metadata: - A dict containing metadata related to the key wrapping. - ''' - - _validate_not_none('content_encryption_IV', content_encryption_IV) - _validate_not_none('encryption_agent', encryption_agent) - _validate_not_none('wrapped_content_key', wrapped_content_key) - - self.content_encryption_IV = content_encryption_IV - self.encryption_agent = encryption_agent - self.wrapped_content_key = wrapped_content_key - self.key_wrapping_metadata = key_wrapping_metadata - - -def _generate_encryption_data_dict(kek, cek, iv): - ''' - Generates and returns the encryption metadata as a dict. - - :param object kek: The key encryption key. See calling functions for more information. - :param bytes cek: The content encryption key. - :param bytes iv: The initialization vector. - :return: A dict containing all the encryption metadata. - :rtype: dict - ''' - # Encrypt the cek. - wrapped_cek = kek.wrap_key(cek) - - # Build the encryption_data dict. - # Use OrderedDict to comply with Java's ordering requirement. - wrapped_content_key = OrderedDict() - wrapped_content_key['KeyId'] = kek.get_kid() - wrapped_content_key['EncryptedKey'] = _encode_base64(wrapped_cek) - wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() - - encryption_agent = OrderedDict() - encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 - encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 - - encryption_data_dict = OrderedDict() - encryption_data_dict['WrappedContentKey'] = wrapped_content_key - encryption_data_dict['EncryptionAgent'] = encryption_agent - encryption_data_dict['ContentEncryptionIV'] = _encode_base64(iv) - encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + __version__} - - return encryption_data_dict - - -def _dict_to_encryption_data(encryption_data_dict): - ''' - Converts the specified dictionary to an EncryptionData object for - eventual use in decryption. - - :param dict encryption_data_dict: - The dictionary containing the encryption data. - :return: an _EncryptionData object built from the dictionary. - :rtype: _EncryptionData - ''' - try: - if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: - raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION) - except KeyError: - raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION) - wrapped_content_key = encryption_data_dict['WrappedContentKey'] - wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], - _decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), - wrapped_content_key['KeyId']) - - encryption_agent = encryption_data_dict['EncryptionAgent'] - encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], - encryption_agent['Protocol']) - - if 'KeyWrappingMetadata' in encryption_data_dict: - key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] - else: - key_wrapping_metadata = None - - encryption_data = _EncryptionData(_decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), - encryption_agent, - wrapped_content_key, - key_wrapping_metadata) - - return encryption_data - - -def _generate_AES_CBC_cipher(cek, iv): - ''' - Generates and returns an encryption cipher for AES CBC using the given cek and iv. - - :param bytes[] cek: The content encryption key for the cipher. - :param bytes[] iv: The initialization vector for the cipher. - :return: A cipher for encrypting in AES256 CBC. - :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher - ''' - - backend = default_backend() - algorithm = AES(cek) - mode = CBC(iv) - return Cipher(algorithm, mode, backend) - - -def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): - ''' - Extracts and returns the content_encryption_key stored in the encryption_data object - and performs necessary validation on all parameters. - :param _EncryptionData encryption_data: - The encryption metadata of the retrieved value. - :param obj key_encryption_key: - The key_encryption_key used to unwrap the cek. Please refer to high-level service object - instance variables for more details. - :param func key_resolver: - A function used that, given a key_id, will return a key_encryption_key. Please refer - to high-level service object instance variables for more details. - :return: the content_encryption_key stored in the encryption_data object. - :rtype: bytes[] - ''' - - _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) - _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) - - _validate_encryption_protocol_version(encryption_data.encryption_agent.protocol) - - content_encryption_key = None - - # If the resolver exists, give priority to the key it finds. - if key_resolver is not None: - key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) - - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_unwrap(key_encryption_key) - _validate_kek_id(encryption_data.wrapped_content_key.key_id, key_encryption_key.get_kid()) - - # Will throw an exception if the specified algorithm is not supported. - content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, - encryption_data.wrapped_content_key.algorithm) - _validate_not_none('content_encryption_key', content_encryption_key) - - return content_encryption_key diff --git a/azure/multiapi/storage/v2017_07_29/common/_error.py b/azure/multiapi/storage/v2017_07_29/common/_error.py deleted file mode 100644 index a8f6786..0000000 --- a/azure/multiapi/storage/v2017_07_29/common/_error.py +++ /dev/null @@ -1,179 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from sys import version_info - -if version_info < (3,): - def _str(value): - if isinstance(value, unicode): - return value.encode('utf-8') - - return str(value) -else: - _str = str - - -def _to_str(value): - return _str(value) if value is not None else None - - -from azure.common import ( - AzureHttpError, - AzureConflictHttpError, - AzureMissingResourceHttpError, - AzureException, -) -from ._constants import ( - _ENCRYPTION_PROTOCOL_V1, -) - -_ERROR_CONFLICT = 'Conflict ({0})' -_ERROR_NOT_FOUND = 'Not found ({0})' -_ERROR_UNKNOWN = 'Unknown error ({0})' -_ERROR_STORAGE_MISSING_INFO = \ - 'You need to provide an account name and either an account_key or sas_token when creating a storage service.' -_ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES = \ - 'The emulator does not support the file service.' -_ERROR_ACCESS_POLICY = \ - 'share_access_policy must be either SignedIdentifier or AccessPolicy ' + \ - 'instance' -_ERROR_PARALLEL_NOT_SEEKABLE = 'Parallel operations require a seekable stream.' -_ERROR_VALUE_SHOULD_BE_BYTES = '{0} should be of type bytes.' -_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM = '{0} should be of type bytes or a readable file-like/io.IOBase stream object.' -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' -_ERROR_VALUE_SHOULD_BE_STREAM = '{0} should be a file-like/io.IOBase type stream object with a read method.' -_ERROR_VALUE_NONE = '{0} should not be None.' -_ERROR_VALUE_NONE_OR_EMPTY = '{0} should not be None or empty.' -_ERROR_VALUE_NEGATIVE = '{0} should not be negative.' -_ERROR_NO_SINGLE_THREAD_CHUNKING = \ - 'To use {0} chunk downloader more than 1 thread must be ' + \ - 'used since get_{0}_to_bytes should be called for single threaded ' + \ - '{0} downloads.' -_ERROR_START_END_NEEDED_FOR_MD5 = \ - 'Both end_range and start_range need to be specified ' + \ - 'for getting content MD5.' -_ERROR_RANGE_TOO_LARGE_FOR_MD5 = \ - 'Getting content MD5 for a range greater than 4MB ' + \ - 'is not supported.' -_ERROR_MD5_MISMATCH = \ - 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.' -_ERROR_TOO_MANY_ACCESS_POLICIES = \ - 'Too many access policies provided. The server does not support setting more than 5 access policies on a single resource.' -_ERROR_OBJECT_INVALID = \ - '{0} does not define a complete interface. Value of {1} is either missing or invalid.' -_ERROR_UNSUPPORTED_ENCRYPTION_VERSION = \ - 'Encryption version is not supported.' -_ERROR_DECRYPTION_FAILURE = \ - 'Decryption failed' -_ERROR_ENCRYPTION_REQUIRED = \ - 'Encryption required but no key was provided.' -_ERROR_DECRYPTION_REQUIRED = \ - 'Decryption required but neither key nor resolver was provided.' + \ - ' If you do not want to decypt, please do not set the require encryption flag.' -_ERROR_INVALID_KID = \ - 'Provided or resolved key-encryption-key does not match the id of key used to encrypt.' -_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM = \ - 'Specified encryption algorithm is not supported.' -_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = 'The require_encryption flag is set, but encryption is not supported' + \ - ' for this method.' -_ERROR_UNKNOWN_KEY_WRAP_ALGORITHM = 'Unknown key wrap algorithm.' -_ERROR_DATA_NOT_ENCRYPTED = 'Encryption required, but received data does not contain appropriate metatadata.' + \ - 'Data was either not encrypted or metadata has been lost.' - - -def _dont_fail_on_exist(error): - ''' don't throw exception if the resource exists. - This is called by create_* APIs with fail_on_exist=False''' - if isinstance(error, AzureConflictHttpError): - return False - else: - raise error - - -def _dont_fail_not_exist(error): - ''' don't throw exception if the resource doesn't exist. - This is called by create_* APIs with fail_on_exist=False''' - if isinstance(error, AzureMissingResourceHttpError): - return False - else: - raise error - - -def _http_error_handler(http_error): - ''' Simple error handler for azure.''' - message = str(http_error) - if 'x-ms-error-code' in http_error.respheader: - message += 'ErrorCode: ' + http_error.respheader['x-ms-error-code'] - if http_error.respbody is not None: - message += '\n' + http_error.respbody.decode('utf-8-sig') - raise AzureHttpError(message, http_error.status) - - -def _validate_type_bytes(param_name, param): - if not isinstance(param, bytes): - raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name)) - - -def _validate_type_bytes_or_stream(param_name, param): - if not (isinstance(param, bytes) or hasattr(param, 'read')): - raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format(param_name)) - - -def _validate_not_none(param_name, param): - if param is None: - raise ValueError(_ERROR_VALUE_NONE.format(param_name)) - - -def _validate_content_match(server_md5, computed_md5): - if server_md5 != computed_md5: - raise AzureException(_ERROR_MD5_MISMATCH.format(server_md5, computed_md5)) - - -def _validate_access_policies(identifiers): - if identifiers and len(identifiers) > 5: - raise AzureException(_ERROR_TOO_MANY_ACCESS_POLICIES) - - -def _validate_key_encryption_key_wrap(kek): - # Note that None is not callable and so will fail the second clause of each check. - if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) - if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) - - -def _validate_key_encryption_key_unwrap(kek): - if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(kek, 'unwrap_key') or not callable(kek.unwrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) - - -def _validate_encryption_required(require_encryption, kek): - if require_encryption and (kek is None): - raise ValueError(_ERROR_ENCRYPTION_REQUIRED) - - -def _validate_decryption_required(require_encryption, kek, resolver): - if (require_encryption and (kek is None) and - (resolver is None)): - raise ValueError(_ERROR_DECRYPTION_REQUIRED) - - -def _validate_encryption_protocol_version(encryption_protocol): - if not (_ENCRYPTION_PROTOCOL_V1 == encryption_protocol): - raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION) - - -def _validate_kek_id(kid, resolved_id): - if not (kid == resolved_id): - raise ValueError(_ERROR_INVALID_KID) - - -def _validate_encryption_unsupported(require_encryption, key_encryption_key): - if require_encryption or (key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) diff --git a/azure/multiapi/storage/v2017_07_29/common/_http/__init__.py b/azure/multiapi/storage/v2017_07_29/common/_http/__init__.py deleted file mode 100644 index 2990ec8..0000000 --- a/azure/multiapi/storage/v2017_07_29/common/_http/__init__.py +++ /dev/null @@ -1,74 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - - -class HTTPError(Exception): - ''' - Represents an HTTP Exception when response status code >= 300. - - :ivar int status: - the status code of the response - :ivar str message: - the message - :ivar list headers: - the returned headers, as a list of (name, value) pairs - :ivar bytes body: - the body of the response - ''' - - def __init__(self, status, message, respheader, respbody): - self.status = status - self.respheader = respheader - self.respbody = respbody - Exception.__init__(self, message) - - -class HTTPResponse(object): - ''' - Represents a response from an HTTP request. - - :ivar int status: - the status code of the response - :ivar str message: - the message - :ivar dict headers: - the returned headers - :ivar bytes body: - the body of the response - ''' - - def __init__(self, status, message, headers, body): - self.status = status - self.message = message - self.headers = headers - self.body = body - - -class HTTPRequest(object): - ''' - Represents an HTTP Request. - - :ivar str host: - the host name to connect to - :ivar str method: - the method to use to connect (string such as GET, POST, PUT, etc.) - :ivar str path: - the uri fragment - :ivar dict query: - query parameters - :ivar dict headers: - header values - :ivar bytes body: - the body of the request. - ''' - - def __init__(self): - self.host = '' - self.method = '' - self.path = '' - self.query = {} # list of (name, value) - self.headers = {} # list of (header name, header value) - self.body = '' diff --git a/azure/multiapi/storage/v2017_07_29/common/_http/httpclient.py b/azure/multiapi/storage/v2017_07_29/common/_http/httpclient.py deleted file mode 100644 index b584766..0000000 --- a/azure/multiapi/storage/v2017_07_29/common/_http/httpclient.py +++ /dev/null @@ -1,107 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import logging -from . import HTTPResponse -from .._serialization import _get_data_bytes_or_stream_only -logger = logging.getLogger(__name__) - - -class _HTTPClient(object): - ''' - Takes the request and sends it to cloud service and returns the response. - ''' - - def __init__(self, protocol=None, session=None, timeout=None): - ''' - :param str protocol: - http or https. - :param requests.Session session: - session object created with requests library (or compatible). - :param int timeout: - timeout for the http request, in seconds. - ''' - self.protocol = protocol - self.session = session - self.timeout = timeout - - # By default, requests adds an Accept:*/* and Accept-Encoding to the session, - # which causes issues with some Azure REST APIs. Removing these here gives us - # the flexibility to add it back on a case by case basis. - if 'Accept' in self.session.headers: - del self.session.headers['Accept'] - - if 'Accept-Encoding' in self.session.headers: - del self.session.headers['Accept-Encoding'] - - self.proxies = None - - def set_proxy(self, host, port, user, password): - ''' - Sets the proxy server host and port for the HTTP CONNECT Tunnelling. - - Note that we set the proxies directly on the request later on rather than - using the session object as requests has a bug where session proxy is ignored - in favor of environment proxy. So, auth will not work unless it is passed - directly when making the request as this overrides both. - - :param str host: - Address of the proxy. Ex: '192.168.0.100' - :param int port: - Port of the proxy. Ex: 6000 - :param str user: - User for proxy authorization. - :param str password: - Password for proxy authorization. - ''' - if user and password: - proxy_string = '{}:{}@{}:{}'.format(user, password, host, port) - else: - proxy_string = '{}:{}'.format(host, port) - - self.proxies = {'http': 'http://{}'.format(proxy_string), - 'https': 'https://{}'.format(proxy_string)} - - def perform_request(self, request): - ''' - Sends an HTTPRequest to Azure Storage and returns an HTTPResponse. If - the response code indicates an error, raise an HTTPError. - - :param HTTPRequest request: - The request to serialize and send. - :return: An HTTPResponse containing the parsed HTTP response. - :rtype: :class:`~azure.storage.common._http.HTTPResponse` - ''' - # Verify the body is in bytes or either a file-like/stream object - if request.body: - request.body = _get_data_bytes_or_stream_only('request.body', request.body) - - # Construct the URI - uri = self.protocol.lower() + '://' + request.host + request.path - - # Send the request - response = self.session.request(request.method, - uri, - params=request.query, - headers=request.headers, - data=request.body or None, - timeout=self.timeout, - proxies=self.proxies) - - # Parse the response - status = int(response.status_code) - response_headers = {} - for key, name in response.headers.items(): - # Preserve the case of metadata - if key.lower().startswith('x-ms-meta-'): - response_headers[key] = name - else: - response_headers[key.lower()] = name - - wrap = HTTPResponse(status, response.reason, response_headers, response.content) - response.close() - - return wrap diff --git a/azure/multiapi/storage/v2017_07_29/common/_serialization.py b/azure/multiapi/storage/v2017_07_29/common/_serialization.py deleted file mode 100644 index fd34d58..0000000 --- a/azure/multiapi/storage/v2017_07_29/common/_serialization.py +++ /dev/null @@ -1,352 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import sys -import uuid -from datetime import date -from io import (BytesIO, IOBase, SEEK_SET, SEEK_END, UnsupportedOperation) -from os import fstat -from time import time -from wsgiref.handlers import format_date_time - -from dateutil.tz import tzutc - -if sys.version_info >= (3,): - from urllib.parse import quote as url_quote -else: - from urllib2 import quote as url_quote - -try: - from xml.etree import cElementTree as ETree -except ImportError: - from xml.etree import ElementTree as ETree - -from ._error import ( - _ERROR_VALUE_SHOULD_BE_BYTES, - _ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM, - _ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM -) -from .models import ( - _unicode_type, -) -from ._common_conversion import ( - _str, -) - - -def _to_utc_datetime(value): - # Azure expects the date value passed in to be UTC. - # Azure will always return values as UTC. - # If a date is passed in without timezone info, it is assumed to be UTC. - if value.tzinfo: - value = value.astimezone(tzutc()) - return value.strftime('%Y-%m-%dT%H:%M:%SZ') - - -def _update_request(request, x_ms_version, user_agent_string): - # Verify body - if request.body: - request.body = _get_data_bytes_or_stream_only('request.body', request.body) - length = _len_plus(request.body) - - # only scenario where this case is plausible is if the stream object is not seekable. - if length is None: - raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM) - - # if it is PUT, POST, MERGE, DELETE, need to add content-length to header. - if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']: - request.headers['Content-Length'] = str(length) - - # append addtional headers based on the service - request.headers['x-ms-version'] = x_ms_version - request.headers['User-Agent'] = user_agent_string - request.headers['x-ms-client-request-id'] = str(uuid.uuid1()) - - # If the host has a path component (ex local storage), move it - path = request.host.split('/', 1) - if len(path) == 2: - request.host = path[0] - request.path = '/{}{}'.format(path[1], request.path) - - # Encode and optionally add local storage prefix to path - request.path = url_quote(request.path, '/()$=\',~') - - -def _add_metadata_headers(metadata, request): - if metadata: - if not request.headers: - request.headers = {} - for name, value in metadata.items(): - request.headers['x-ms-meta-' + name] = value - - -def _add_date_header(request): - current_time = format_date_time(time()) - request.headers['x-ms-date'] = current_time - - -def _get_data_bytes_only(param_name, param_value): - '''Validates the request body passed in and converts it to bytes - if our policy allows it.''' - if param_value is None: - return b'' - - if isinstance(param_value, bytes): - return param_value - - raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name)) - - -def _get_data_bytes_or_stream_only(param_name, param_value): - '''Validates the request body passed in is a stream/file-like or bytes - object.''' - if param_value is None: - return b'' - - if isinstance(param_value, bytes) or hasattr(param_value, 'read'): - return param_value - - raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format(param_name)) - - -def _get_request_body(request_body): - '''Converts an object into a request body. If it's None - we'll return an empty string, if it's one of our objects it'll - convert it to XML and return it. Otherwise we just use the object - directly''' - if request_body is None: - return b'' - - if isinstance(request_body, bytes) or isinstance(request_body, IOBase): - return request_body - - if isinstance(request_body, _unicode_type): - return request_body.encode('utf-8') - - request_body = str(request_body) - if isinstance(request_body, _unicode_type): - return request_body.encode('utf-8') - - return request_body - - -def _convert_signed_identifiers_to_xml(signed_identifiers): - if signed_identifiers is None: - return '' - - sis = ETree.Element('SignedIdentifiers') - for id, access_policy in signed_identifiers.items(): - # Root signed identifers element - si = ETree.SubElement(sis, 'SignedIdentifier') - - # Id element - ETree.SubElement(si, 'Id').text = id - - # Access policy element - policy = ETree.SubElement(si, 'AccessPolicy') - - if access_policy.start: - start = access_policy.start - if isinstance(access_policy.start, date): - start = _to_utc_datetime(start) - ETree.SubElement(policy, 'Start').text = start - - if access_policy.expiry: - expiry = access_policy.expiry - if isinstance(access_policy.expiry, date): - expiry = _to_utc_datetime(expiry) - ETree.SubElement(policy, 'Expiry').text = expiry - - if access_policy.permission: - ETree.SubElement(policy, 'Permission').text = _str(access_policy.permission) - - # Add xml declaration and serialize - try: - stream = BytesIO() - ETree.ElementTree(sis).write(stream, xml_declaration=True, encoding='utf-8', method='xml') - except: - raise - finally: - output = stream.getvalue() - stream.close() - - return output - - -def _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics, - cors, target_version=None, delete_retention_policy=None): - ''' - - - - version-number - true|false - true|false - true|false - - true|false - number-of-days - - - - version-number - true|false - true|false - - true|false - number-of-days - - - - version-number - true|false - true|false - - true|false - number-of-days - - - - - comma-separated-list-of-allowed-origins - comma-separated-list-of-HTTP-verb - max-caching-age-in-seconds - comma-seperated-list-of-response-headers - comma-seperated-list-of-request-headers - - - - true|false - number-of-days - - - ''' - service_properties_element = ETree.Element('StorageServiceProperties') - - # Logging - if logging: - logging_element = ETree.SubElement(service_properties_element, 'Logging') - ETree.SubElement(logging_element, 'Version').text = logging.version - ETree.SubElement(logging_element, 'Delete').text = str(logging.delete) - ETree.SubElement(logging_element, 'Read').text = str(logging.read) - ETree.SubElement(logging_element, 'Write').text = str(logging.write) - - retention_element = ETree.SubElement(logging_element, 'RetentionPolicy') - _convert_retention_policy_to_xml(logging.retention_policy, retention_element) - - # HourMetrics - if hour_metrics: - hour_metrics_element = ETree.SubElement(service_properties_element, 'HourMetrics') - _convert_metrics_to_xml(hour_metrics, hour_metrics_element) - - # MinuteMetrics - if minute_metrics: - minute_metrics_element = ETree.SubElement(service_properties_element, 'MinuteMetrics') - _convert_metrics_to_xml(minute_metrics, minute_metrics_element) - - # CORS - # Make sure to still serialize empty list - if cors is not None: - cors_element = ETree.SubElement(service_properties_element, 'Cors') - for rule in cors: - cors_rule = ETree.SubElement(cors_element, 'CorsRule') - ETree.SubElement(cors_rule, 'AllowedOrigins').text = ",".join(rule.allowed_origins) - ETree.SubElement(cors_rule, 'AllowedMethods').text = ",".join(rule.allowed_methods) - ETree.SubElement(cors_rule, 'MaxAgeInSeconds').text = str(rule.max_age_in_seconds) - ETree.SubElement(cors_rule, 'ExposedHeaders').text = ",".join(rule.exposed_headers) - ETree.SubElement(cors_rule, 'AllowedHeaders').text = ",".join(rule.allowed_headers) - - # Target version - if target_version: - ETree.SubElement(service_properties_element, 'DefaultServiceVersion').text = target_version - - # DeleteRetentionPolicy - if delete_retention_policy: - policy_element = ETree.SubElement(service_properties_element, 'DeleteRetentionPolicy') - ETree.SubElement(policy_element, 'Enabled').text = str(delete_retention_policy.enabled) - - if delete_retention_policy.enabled: - ETree.SubElement(policy_element, 'Days').text = str(delete_retention_policy.days) - - # Add xml declaration and serialize - try: - stream = BytesIO() - ETree.ElementTree(service_properties_element).write(stream, xml_declaration=True, encoding='utf-8', - method='xml') - except: - raise - finally: - output = stream.getvalue() - stream.close() - - return output - - -def _convert_metrics_to_xml(metrics, root): - ''' - version-number - true|false - true|false - - true|false - number-of-days - - ''' - # Version - ETree.SubElement(root, 'Version').text = metrics.version - - # Enabled - ETree.SubElement(root, 'Enabled').text = str(metrics.enabled) - - # IncludeAPIs - if metrics.enabled and metrics.include_apis is not None: - ETree.SubElement(root, 'IncludeAPIs').text = str(metrics.include_apis) - - # RetentionPolicy - retention_element = ETree.SubElement(root, 'RetentionPolicy') - _convert_retention_policy_to_xml(metrics.retention_policy, retention_element) - - -def _convert_retention_policy_to_xml(retention_policy, root): - ''' - true|false - number-of-days - ''' - # Enabled - ETree.SubElement(root, 'Enabled').text = str(retention_policy.enabled) - - # Days - if retention_policy.enabled and retention_policy.days: - ETree.SubElement(root, 'Days').text = str(retention_policy.days) - - -def _len_plus(data): - length = None - # Check if object implements the __len__ method, covers most input cases such as bytearray. - try: - length = len(data) - except: - pass - - if not length: - # Check if the stream is a file-like stream object. - # If so, calculate the size using the file descriptor. - try: - fileno = data.fileno() - except (AttributeError, UnsupportedOperation): - pass - else: - return fstat(fileno).st_size - - # If the stream is seekable and tell() is implemented, calculate the stream size. - try: - current_position = data.tell() - data.seek(0, SEEK_END) - length = data.tell() - current_position - data.seek(current_position, SEEK_SET) - except (AttributeError, UnsupportedOperation): - pass - - return length diff --git a/azure/multiapi/storage/v2017_07_29/common/cloudstorageaccount.py b/azure/multiapi/storage/v2017_07_29/common/cloudstorageaccount.py deleted file mode 100644 index 4bb03b5..0000000 --- a/azure/multiapi/storage/v2017_07_29/common/cloudstorageaccount.py +++ /dev/null @@ -1,188 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -# Note that we import BlobService/QueueService/FileService on demand -# because this module is imported by azure/storage/__init__ -# ie. we don't want 'import azure.storage' to trigger an automatic import -# of blob/queue/file packages. - -from ._error import _validate_not_none -from .models import ( - ResourceTypes, - Services, - AccountPermissions, -) -from .sharedaccesssignature import ( - SharedAccessSignature, -) - - -class CloudStorageAccount(object): - """ - Provides a factory for creating the blob, queue, and file services - with a common account name and account key or sas token. Users can either - use the factory or can construct the appropriate service directly. - """ - - def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=None): - ''' - :param str account_name: - The storage account name. This is used to authenticate requests - signed with an account key and to construct the storage endpoint. It - is required unless is_emulated is used. - :param str account_key: - The storage account key. This is used for shared key authentication. - :param str sas_token: - A shared access signature token to use to authenticate requests - instead of the account key. If account key and sas token are both - specified, account key will be used to sign. - :param bool is_emulated: - Whether to use the emulator. Defaults to False. If specified, will - override all other parameters. - ''' - self.account_name = account_name - self.account_key = account_key - self.sas_token = sas_token - self.is_emulated = is_emulated - - def create_block_blob_service(self): - ''' - Creates a BlockBlobService object with the settings specified in the - CloudStorageAccount. - - :return: A service object. - :rtype: :class:`~azure.storage.blob.blockblobservice.BlockBlobService` - ''' - try: - from .blob.blockblobservice import BlockBlobService - return BlockBlobService(self.account_name, self.account_key, - sas_token=self.sas_token, - is_emulated=self.is_emulated) - except ImportError: - raise Exception('The package azure-storage-blob is required. ' - + 'Please install it using "pip install azure-storage-blob"') - - def create_page_blob_service(self): - ''' - Creates a PageBlobService object with the settings specified in the - CloudStorageAccount. - - :return: A service object. - :rtype: :class:`~azure.storage.blob.pageblobservice.PageBlobService` - ''' - try: - from .blob.pageblobservice import PageBlobService - return PageBlobService(self.account_name, self.account_key, - sas_token=self.sas_token, - is_emulated=self.is_emulated) - except ImportError: - raise Exception('The package azure-storage-blob is required. ' - + 'Please install it using "pip install azure-storage-blob"') - - def create_append_blob_service(self): - ''' - Creates a AppendBlobService object with the settings specified in the - CloudStorageAccount. - - :return: A service object. - :rtype: :class:`~azure.storage.blob.appendblobservice.AppendBlobService` - ''' - try: - from .blob.appendblobservice import AppendBlobService - return AppendBlobService(self.account_name, self.account_key, - sas_token=self.sas_token, - is_emulated=self.is_emulated) - except ImportError: - raise Exception('The package azure-storage-blob is required. ' - + 'Please install it using "pip install azure-storage-blob"') - - def create_queue_service(self): - ''' - Creates a QueueService object with the settings specified in the - CloudStorageAccount. - - :return: A service object. - :rtype: :class:`~azure.storage.queue.queueservice.QueueService` - ''' - try: - from .queue.queueservice import QueueService - return QueueService(self.account_name, self.account_key, - sas_token=self.sas_token, - is_emulated=self.is_emulated) - except ImportError: - raise Exception('The package azure-storage-queue is required. ' - + 'Please install it using "pip install azure-storage-queue"') - - def create_file_service(self): - ''' - Creates a FileService object with the settings specified in the - CloudStorageAccount. - - :return: A service object. - :rtype: :class:`~azure.storage.file.fileservice.FileService` - ''' - try: - from .file.fileservice import FileService - return FileService(self.account_name, self.account_key, - sas_token=self.sas_token) - except ImportError: - raise Exception('The package azure-storage-file is required. ' - + 'Please install it using "pip install azure-storage-file"') - - def generate_shared_access_signature(self, services, resource_types, - permission, expiry, start=None, - ip=None, protocol=None): - ''' - Generates a shared access signature for the account. - Use the returned signature with the sas_token parameter of the service - or to create a new account object. - - :param Services services: - Specifies the services accessible with the account SAS. You can - combine values to provide access to more than one service. - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account - SAS. You can combine values to provide access to more than one - resource type. - :param AccountPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. You can combine - values to provide more than one permission. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. Possible values are - both HTTPS and HTTP (https,http) or HTTPS only (https). The default value - is https,http. Note that HTTP only is not a permitted value. - ''' - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = SharedAccessSignature(self.account_name, self.account_key) - return sas.generate_account(services, resource_types, permission, - expiry, start=start, ip=ip, protocol=protocol) diff --git a/azure/multiapi/storage/v2017_07_29/common/models.py b/azure/multiapi/storage/v2017_07_29/common/models.py deleted file mode 100644 index e11153c..0000000 --- a/azure/multiapi/storage/v2017_07_29/common/models.py +++ /dev/null @@ -1,646 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import sys - -if sys.version_info < (3,): - from collections import Iterable - - _unicode_type = unicode -else: - from collections.abc import Iterable - - _unicode_type = str - -from ._error import ( - _validate_not_none -) - - -class _HeaderDict(dict): - def __getitem__(self, index): - return super(_HeaderDict, self).__getitem__(index.lower()) - - -class _list(list): - '''Used so that additional properties can be set on the return list''' - pass - - -class _dict(dict): - '''Used so that additional properties can be set on the return dictionary''' - pass - - -class _OperationContext(object): - ''' - Contains information that lasts the lifetime of an operation. This operation - may span multiple calls to the Azure service. - - :ivar bool location_lock: - Whether the location should be locked for this operation. - :ivar str location: - The location to lock to. - ''' - - def __init__(self, location_lock=False): - self.location_lock = location_lock - self.host_location = None - - -class ListGenerator(Iterable): - ''' - A generator object used to list storage resources. The generator will lazily - follow the continuation tokens returned by the service and stop when all - resources have been returned or max_results is reached. - - If max_results is specified and the account has more than that number of - resources, the generator will have a populated next_marker field once it - finishes. This marker can be used to create a new generator if more - results are desired. - ''' - - def __init__(self, resources, list_method, list_args, list_kwargs): - self.items = resources - self.next_marker = resources.next_marker - - self._list_method = list_method - self._list_args = list_args - self._list_kwargs = list_kwargs - - def __iter__(self): - # return results - for i in self.items: - yield i - - while True: - # if no more results on the service, return - if not self.next_marker: - break - - # update the marker args - self._list_kwargs['marker'] = self.next_marker - - # handle max results, if present - max_results = self._list_kwargs.get('max_results') - if max_results is not None: - max_results = max_results - len(self.items) - - # if we've reached max_results, return - # else, update the max_results arg - if max_results <= 0: - break - else: - self._list_kwargs['max_results'] = max_results - - # get the next segment - resources = self._list_method(*self._list_args, **self._list_kwargs) - self.items = resources - self.next_marker = resources.next_marker - - # return results - for i in self.items: - yield i - - -class RetryContext(object): - ''' - Contains the request and response information that can be used to determine - whether and how to retry. This context is stored across retries and may be - used to store other information relevant to the retry strategy. - - :ivar ~azure.storage.common._http.HTTPRequest request: - The request sent to the storage service. - :ivar ~azure.storage.common._http.HTTPResponse response: - The response returned by the storage service. - :ivar LocationMode location_mode: - The location the request was sent to. - :ivar Exception exception: - The exception that just occurred. The type could either be AzureException (for HTTP errors), - or other Exception types from lower layers, which are kept unwrapped for easier processing. - :ivar bool is_emulated: - Whether retry is targeting the emulator. The default value is False. - ''' - - def __init__(self): - self.request = None - self.response = None - self.location_mode = None - self.exception = None - self.is_emulated = False - - -class LocationMode(object): - ''' - Specifies the location the request should be sent to. This mode only applies - for RA-GRS accounts which allow secondary read access. All other account types - must use PRIMARY. - ''' - - PRIMARY = 'primary' - ''' Requests should be sent to the primary location. ''' - - SECONDARY = 'secondary' - ''' Requests should be sent to the secondary location, if possible. ''' - - -class RetentionPolicy(object): - ''' - By default, Storage Analytics will not delete any logging or metrics data. Blobs - will continue to be written until the shared 20TB limit is - reached. Once the 20TB limit is reached, Storage Analytics will stop writing - new data and will not resume until free space is available. This 20TB limit - is independent of the total limit for your storage account. - - There are two ways to delete Storage Analytics data: by manually making deletion - requests or by setting a data retention policy. Manual requests to delete Storage - Analytics data are billable, but delete requests resulting from a retention policy - are not billable. - ''' - - def __init__(self, enabled=False, days=None): - ''' - :param bool enabled: - Indicates whether a retention policy is enabled for the - storage service. If disabled, logging and metrics data will be retained - infinitely by the service unless explicitly deleted. - :param int days: - Required if enabled is true. Indicates the number of - days that metrics or logging data should be retained. All data older - than this value will be deleted. The minimum value you can specify is 1; - the largest value is 365 (one year). - ''' - _validate_not_none("enabled", enabled) - if enabled: - _validate_not_none("days", days) - - self.enabled = enabled - self.days = days - - -class Logging(object): - ''' - Storage Analytics logs detailed information about successful and failed requests - to a storage service. This information can be used to monitor individual requests - and to diagnose issues with a storage service. Requests are logged on a best-effort - basis. - - All logs are stored in block blobs in a container named $logs, which is - automatically created when Storage Analytics is enabled for a storage account. - The $logs container is located in the blob namespace of the storage account. - This container cannot be deleted once Storage Analytics has been enabled, though - its contents can be deleted. - - For more information, see https://msdn.microsoft.com/en-us/library/azure/hh343262.aspx - ''' - - def __init__(self, delete=False, read=False, write=False, - retention_policy=None): - ''' - :param bool delete: - Indicates whether all delete requests should be logged. - :param bool read: - Indicates whether all read requests should be logged. - :param bool write: - Indicates whether all write requests should be logged. - :param RetentionPolicy retention_policy: - The retention policy for the metrics. - ''' - _validate_not_none("read", read) - _validate_not_none("write", write) - _validate_not_none("delete", delete) - - self.version = u'1.0' - self.delete = delete - self.read = read - self.write = write - self.retention_policy = retention_policy if retention_policy else RetentionPolicy() - - -class Metrics(object): - ''' - Metrics include aggregated transaction statistics and capacity data about requests - to a storage service. Transactions are reported at both the API operation level - as well as at the storage service level, and capacity is reported at the storage - service level. Metrics data can be used to analyze storage service usage, diagnose - issues with requests made against the storage service, and to improve the - performance of applications that use a service. - - For more information, see https://msdn.microsoft.com/en-us/library/azure/hh343258.aspx - ''' - - def __init__(self, enabled=False, include_apis=None, - retention_policy=None): - ''' - :param bool enabled: - Indicates whether metrics are enabled for - the service. - :param bool include_apis: - Required if enabled is True. Indicates whether metrics - should generate summary statistics for called API operations. - :param RetentionPolicy retention_policy: - The retention policy for the metrics. - ''' - _validate_not_none("enabled", enabled) - if enabled: - _validate_not_none("include_apis", include_apis) - - self.version = u'1.0' - self.enabled = enabled - self.include_apis = include_apis - self.retention_policy = retention_policy if retention_policy else RetentionPolicy() - - -class CorsRule(object): - ''' - CORS is an HTTP feature that enables a web application running under one domain - to access resources in another domain. Web browsers implement a security - restriction known as same-origin policy that prevents a web page from calling - APIs in a different domain; CORS provides a secure way to allow one domain - (the origin domain) to call APIs in another domain. - - For more information, see https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx - ''' - - def __init__(self, allowed_origins, allowed_methods, max_age_in_seconds=0, - exposed_headers=None, allowed_headers=None): - ''' - :param allowed_origins: - A list of origin domains that will be allowed via CORS, or "*" to allow - all domains. The list of must contain at least one entry. Limited to 64 - origin domains. Each allowed origin can have up to 256 characters. - :type allowed_origins: list(str) - :param allowed_methods: - A list of HTTP methods that are allowed to be executed by the origin. - The list of must contain at least one entry. For Azure Storage, - permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. - :type allowed_methods: list(str) - :param int max_age_in_seconds: - The number of seconds that the client/browser should cache a - preflight response. - :param exposed_headers: - Defaults to an empty list. A list of response headers to expose to CORS - clients. Limited to 64 defined headers and two prefixed headers. Each - header can be up to 256 characters. - :type exposed_headers: list(str) - :param allowed_headers: - Defaults to an empty list. A list of headers allowed to be part of - the cross-origin request. Limited to 64 defined headers and 2 prefixed - headers. Each header can be up to 256 characters. - :type allowed_headers: list(str) - ''' - _validate_not_none("allowed_origins", allowed_origins) - _validate_not_none("allowed_methods", allowed_methods) - _validate_not_none("max_age_in_seconds", max_age_in_seconds) - - self.allowed_origins = allowed_origins if allowed_origins else list() - self.allowed_methods = allowed_methods if allowed_methods else list() - self.max_age_in_seconds = max_age_in_seconds - self.exposed_headers = exposed_headers if exposed_headers else list() - self.allowed_headers = allowed_headers if allowed_headers else list() - - -class DeleteRetentionPolicy(object): - ''' - To set DeleteRetentionPolicy, you must call Set Blob Service Properties using version 2017-07-29 or later. - This class groups the settings related to delete retention policy. - ''' - - def __init__(self, enabled=False, days=None): - ''' - :param bool enabled: - Required. Indicates whether a deleted blob or snapshot is retained or immediately removed by delete operation. - :param int days: - Required only if Enabled is true. Indicates the number of days that deleted blob be retained. - All data older than this value will be permanently deleted. - The minimum value you can specify is 1; the largest value is 365. - ''' - _validate_not_none("enabled", enabled) - if enabled: - _validate_not_none("days", days) - - self.enabled = enabled - self.days = days - - -class ServiceProperties(object): - ''' - Returned by get_*_service_properties functions. Contains the properties of a - storage service, including Analytics and CORS rules. - - Azure Storage Analytics performs logging and provides metrics data for a storage - account. You can use this data to trace requests, analyze usage trends, and - diagnose issues with your storage account. To use Storage Analytics, you must - enable it individually for each service you want to monitor. - - The aggregated data is stored in a well-known blob (for logging) and in well-known - tables (for metrics), which may be accessed using the Blob service and Table - service APIs. - - For an in-depth guide on using Storage Analytics and other tools to identify, - diagnose, and troubleshoot Azure Storage-related issues, see - http://azure.microsoft.com/documentation/articles/storage-monitoring-diagnosing-troubleshooting/ - - For more information on CORS, see https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx - ''' - - pass - - -class ServiceStats(object): - ''' - Returned by get_*_service_stats functions. Contains statistics related to - replication for the given service. It is only available when read-access - geo-redundant replication is enabled for the storage account. - - :ivar GeoReplication geo_replication: - An object containing statistics related to replication for the given service. - ''' - pass - - -class GeoReplication(object): - ''' - Contains statistics related to replication for the given service. - - :ivar str status: - The status of the secondary location. Possible values are: - live: Indicates that the secondary location is active and operational. - bootstrap: Indicates initial synchronization from the primary location - to the secondary location is in progress. This typically occurs - when replication is first enabled. - unavailable: Indicates that the secondary location is temporarily - unavailable. - :ivar date last_sync_time: - A GMT date value, to the second. All primary writes preceding this value - are guaranteed to be available for read operations at the secondary. - Primary writes after this point in time may or may not be available for - reads. The value may be empty if LastSyncTime is not available. This can - happen if the replication status is bootstrap or unavailable. Although - geo-replication is continuously enabled, the LastSyncTime result may - reflect a cached value from the service that is refreshed every few minutes. - ''' - pass - - -class AccessPolicy(object): - ''' - Access Policy class used by the set and get acl methods in each service. - - A stored access policy can specify the start time, expiry time, and - permissions for the Shared Access Signatures with which it's associated. - Depending on how you want to control access to your resource, you can - specify all of these parameters within the stored access policy, and omit - them from the URL for the Shared Access Signature. Doing so permits you to - modify the associated signature's behavior at any time, as well as to revoke - it. Or you can specify one or more of the access policy parameters within - the stored access policy, and the others on the URL. Finally, you can - specify all of the parameters on the URL. In this case, you can use the - stored access policy to revoke the signature, but not to modify its behavior. - - Together the Shared Access Signature and the stored access policy must - include all fields required to authenticate the signature. If any required - fields are missing, the request will fail. Likewise, if a field is specified - both in the Shared Access Signature URL and in the stored access policy, the - request will fail with status code 400 (Bad Request). - ''' - - def __init__(self, permission=None, expiry=None, start=None): - ''' - :param str permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - ''' - self.start = start - self.expiry = expiry - self.permission = permission - - -class Protocol(object): - ''' - Specifies the protocol permitted for a SAS token. Note that HTTP only is - not allowed. - ''' - - HTTPS = 'https' - ''' Allow HTTPS requests only. ''' - - HTTPS_HTTP = 'https,http' - ''' Allow HTTP and HTTPS requests. ''' - - -class ResourceTypes(object): - ''' - Specifies the resource types that are accessible with the account SAS. - - :ivar ResourceTypes ResourceTypes.CONTAINER: - Access to container-level APIs (e.g., Create/Delete Container, - Create/Delete Queue, Create/Delete Share, - List Blobs/Files and Directories) - :ivar ResourceTypes ResourceTypes.OBJECT: - Access to object-level APIs for blobs, queue messages, and - files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) - :ivar ResourceTypes ResourceTypes.SERVICE: - Access to service-level APIs (e.g., Get/Set Service Properties, - Get Service Stats, List Containers/Queues/Shares) - ''' - - def __init__(self, service=False, container=False, object=False, _str=None): - ''' - :param bool service: - Access to service-level APIs (e.g., Get/Set Service Properties, - Get Service Stats, List Containers/Queues/Shares) - :param bool container: - Access to container-level APIs (e.g., Create/Delete Container, - Create/Delete Queue, Create/Delete Share, - List Blobs/Files and Directories) - :param bool object: - Access to object-level APIs for blobs, queue messages, and - files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) - :param str _str: - A string representing the resource types. - ''' - if not _str: - _str = '' - self.service = service or ('s' in _str) - self.container = container or ('c' in _str) - self.object = object or ('o' in _str) - - def __or__(self, other): - return ResourceTypes(_str=str(self) + str(other)) - - def __add__(self, other): - return ResourceTypes(_str=str(self) + str(other)) - - def __str__(self): - return (('s' if self.service else '') + - ('c' if self.container else '') + - ('o' if self.object else '')) - - -ResourceTypes.SERVICE = ResourceTypes(service=True) -ResourceTypes.CONTAINER = ResourceTypes(container=True) -ResourceTypes.OBJECT = ResourceTypes(object=True) - - -class Services(object): - ''' - Specifies the services accessible with the account SAS. - - :ivar Services Services.BLOB: The blob service. - :ivar Services Services.FILE: The file service - :ivar Services Services.QUEUE: The queue service. - :ivar Services Services.TABLE: The table service. - ''' - - def __init__(self, blob=False, queue=False, file=False, table=False, _str=None): - ''' - :param bool blob: - Access to any blob service, for example, the `.BlockBlobService` - :param bool queue: - Access to the `.QueueService` - :param bool file: - Access to the `.FileService` - :param bool table: - Access to the TableService - :param str _str: - A string representing the services. - ''' - if not _str: - _str = '' - self.blob = blob or ('b' in _str) - self.queue = queue or ('q' in _str) - self.file = file or ('f' in _str) - self.table = table or ('t' in _str) - - def __or__(self, other): - return Services(_str=str(self) + str(other)) - - def __add__(self, other): - return Services(_str=str(self) + str(other)) - - def __str__(self): - return (('b' if self.blob else '') + - ('q' if self.queue else '') + - ('t' if self.table else '') + - ('f' if self.file else '')) - - -Services.BLOB = Services(blob=True) -Services.QUEUE = Services(queue=True) -Services.TABLE = Services(table=True) -Services.FILE = Services(file=True) - - -class AccountPermissions(object): - ''' - :class:`~ResourceTypes` class to be used with generate_shared_access_signature - method and for the AccessPolicies used with set_*_acl. There are two types of - SAS which may be used to grant resource access. One is to grant access to a - specific resource (resource-specific). Another is to grant access to the - entire service for a specific account and allow certain operations based on - perms found here. - - :ivar AccountPermissions AccountPermissions.ADD: - Valid for the following Object resource types only: queue messages and append blobs. - :ivar AccountPermissions AccountPermissions.CREATE: - Valid for the following Object resource types only: blobs and files. Users - can create new blobs or files, but may not overwrite existing blobs or files. - :ivar AccountPermissions AccountPermissions.DELETE: - Valid for Container and Object resource types, except for queue messages. - :ivar AccountPermissions AccountPermissions.LIST: - Valid for Service and Container resource types only. - :ivar AccountPermissions AccountPermissions.PROCESS: - Valid for the following Object resource type only: queue messages. - :ivar AccountPermissions AccountPermissions.READ: - Valid for all signed resources types (Service, Container, and Object). - Permits read permissions to the specified resource type. - :ivar AccountPermissions AccountPermissions.UPDATE: - Valid for the following Object resource types only: queue messages. - :ivar AccountPermissions AccountPermissions.WRITE: - Valid for all signed resources types (Service, Container, and Object). - Permits write permissions to the specified resource type. - ''' - - def __init__(self, read=False, write=False, delete=False, list=False, - add=False, create=False, update=False, process=False, _str=None): - ''' - :param bool read: - Valid for all signed resources types (Service, Container, and Object). - Permits read permissions to the specified resource type. - :param bool write: - Valid for all signed resources types (Service, Container, and Object). - Permits write permissions to the specified resource type. - :param bool delete: - Valid for Container and Object resource types, except for queue messages. - :param bool list: - Valid for Service and Container resource types only. - :param bool add: - Valid for the following Object resource types only: queue messages, and append blobs. - :param bool create: - Valid for the following Object resource types only: blobs and files. - Users can create new blobs or files, but may not overwrite existing - blobs or files. - :param bool update: - Valid for the following Object resource types only: queue messages. - :param bool process: - Valid for the following Object resource type only: queue messages. - :param str _str: - A string representing the permissions. - ''' - if not _str: - _str = '' - self.read = read or ('r' in _str) - self.write = write or ('w' in _str) - self.delete = delete or ('d' in _str) - self.list = list or ('l' in _str) - self.add = add or ('a' in _str) - self.create = create or ('c' in _str) - self.update = update or ('u' in _str) - self.process = process or ('p' in _str) - - def __or__(self, other): - return AccountPermissions(_str=str(self) + str(other)) - - def __add__(self, other): - return AccountPermissions(_str=str(self) + str(other)) - - def __str__(self): - return (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('l' if self.list else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('u' if self.update else '') + - ('p' if self.process else '')) - - -AccountPermissions.READ = AccountPermissions(read=True) -AccountPermissions.WRITE = AccountPermissions(write=True) -AccountPermissions.DELETE = AccountPermissions(delete=True) -AccountPermissions.LIST = AccountPermissions(list=True) -AccountPermissions.ADD = AccountPermissions(add=True) -AccountPermissions.CREATE = AccountPermissions(create=True) -AccountPermissions.UPDATE = AccountPermissions(update=True) -AccountPermissions.PROCESS = AccountPermissions(process=True) diff --git a/azure/multiapi/storage/v2017_07_29/common/retry.py b/azure/multiapi/storage/v2017_07_29/common/retry.py deleted file mode 100644 index 69dafef..0000000 --- a/azure/multiapi/storage/v2017_07_29/common/retry.py +++ /dev/null @@ -1,292 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from abc import ABCMeta -from math import pow -import random - -from .models import LocationMode -from ._constants import ( - DEV_ACCOUNT_NAME, - DEV_ACCOUNT_SECONDARY_NAME -) - - -class _Retry(object): - ''' - The base class for Exponential and Linear retries containing shared code. - ''' - __metaclass__ = ABCMeta - - def __init__(self, max_attempts, retry_to_secondary): - ''' - Constructs a base retry object. - - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - ''' - self.max_attempts = max_attempts - self.retry_to_secondary = retry_to_secondary - - def _should_retry(self, context): - ''' - A function which determines whether or not to retry. - - :param ~azure.storage.models.RetryContext context: - The retry context. This contains the request, response, and other data - which can be used to determine whether or not to retry. - :return: - A boolean indicating whether or not to retry the request. - :rtype: bool - ''' - # If max attempts are reached, do not retry. - if context.count >= self.max_attempts: - return False - - status = None - if context.response and context.response.status: - status = context.response.status - - if status is None: - ''' - If status is None, retry as this request triggered an exception. For - example, network issues would trigger this. - ''' - return True - elif 200 <= status < 300: - ''' - This method is called after a successful response, meaning we failed - during the response body download or parsing. So, success codes should - be retried. - ''' - return True - elif 300 <= status < 500: - ''' - An exception occured, but in most cases it was expected. Examples could - include a 309 Conflict or 412 Precondition Failed. - ''' - if status == 404 and context.location_mode == LocationMode.SECONDARY: - # Response code 404 should be retried if secondary was used. - return True - if status == 408: - # Response code 408 is a timeout and should be retried. - return True - return False - elif status >= 500: - ''' - Response codes above 500 with the exception of 501 Not Implemented and - 505 Version Not Supported indicate a server issue and should be retried. - ''' - if status == 501 or status == 505: - return False - return True - else: - # If something else happened, it's unexpected. Retry. - return True - - def _set_next_host_location(self, context): - ''' - A function which sets the next host location on the request, if applicable. - - :param ~azure.storage.models.RetryContext context: - The retry context containing the previous host location and the request - to evaluate and possibly modify. - ''' - if len(context.request.host_locations) > 1: - # If there's more than one possible location, retry to the alternative - if context.location_mode == LocationMode.PRIMARY: - context.location_mode = LocationMode.SECONDARY - - # if targeting the emulator (with path style), change path instead of host - if context.is_emulated: - # replace the first instance of primary account name with the secondary account name - context.request.path = context.request.path.replace(DEV_ACCOUNT_NAME, DEV_ACCOUNT_SECONDARY_NAME, 1) - else: - context.request.host = context.request.host_locations.get(context.location_mode) - else: - context.location_mode = LocationMode.PRIMARY - - # if targeting the emulator (with path style), change path instead of host - if context.is_emulated: - # replace the first instance of secondary account name with the primary account name - context.request.path = context.request.path.replace(DEV_ACCOUNT_SECONDARY_NAME, DEV_ACCOUNT_NAME, 1) - else: - context.request.host = context.request.host_locations.get(context.location_mode) - - def _retry(self, context, backoff): - ''' - A function which determines whether and how to retry. - - :param ~azure.storage.models.RetryContext context: - The retry context. This contains the request, response, and other data - which can be used to determine whether or not to retry. - :param function() backoff: - A function which returns the backoff time if a retry is to be performed. - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - ''' - # If the context does not contain a count parameter, this request has not - # been retried yet. Add the count parameter to track the number of retries. - if not hasattr(context, 'count'): - context.count = 0 - - # Determine whether to retry, and if so increment the count, modify the - # request as desired, and return the backoff. - if self._should_retry(context): - backoff_interval = backoff(context) - context.count += 1 - - # If retry to secondary is enabled, attempt to change the host if the - # request allows it - if self.retry_to_secondary: - self._set_next_host_location(context) - - return backoff_interval - - return None - - -class ExponentialRetry(_Retry): - ''' - Exponential retry. - ''' - - def __init__(self, initial_backoff=15, increment_base=3, max_attempts=3, - retry_to_secondary=False, random_jitter_range=3): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__(max_attempts, retry_to_secondary) - - ''' - A function which determines whether and how to retry. - - :param ~azure.storage.models.RetryContext context: - The retry context. This contains the request, response, and other data - which can be used to determine whether or not to retry. - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - ''' - - def retry(self, context): - return self._retry(context, self._backoff) - - ''' - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - ''' - - def _backoff(self, context): - random_generator = random.Random() - backoff = self.initial_backoff + (0 if context.count == 0 else pow(self.increment_base, context.count)) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(_Retry): - ''' - Linear retry. - ''' - - def __init__(self, backoff=15, max_attempts=3, retry_to_secondary=False, random_jitter_range=3): - ''' - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.backoff = backoff - self.max_attempts = max_attempts - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__(max_attempts, retry_to_secondary) - - ''' - A function which determines whether and how to retry. - - :param ~azure.storage.models.RetryContext context: - The retry context. This contains the request, response, and other data - which can be used to determine whether or not to retry. - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - ''' - - def retry(self, context): - return self._retry(context, self._backoff) - - ''' - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - ''' - - def _backoff(self, context): - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - self.random_range_start = self.backoff - self.random_jitter_range if self.backoff > self.random_jitter_range else 0 - self.random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(self.random_range_start, self.random_range_end) - - -def no_retry(context): - ''' - Specifies never to retry. - - :param ~azure.storage.models.RetryContext context: - The retry context. - :return: - Always returns None to indicate never to retry. - :rtype: None - ''' - return None diff --git a/azure/multiapi/storage/v2017_07_29/common/sharedaccesssignature.py b/azure/multiapi/storage/v2017_07_29/common/sharedaccesssignature.py deleted file mode 100644 index c23201a..0000000 --- a/azure/multiapi/storage/v2017_07_29/common/sharedaccesssignature.py +++ /dev/null @@ -1,217 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from datetime import date - -from ._common_conversion import ( - _sign_string, - _to_str, -) -from ._constants import DEFAULT_X_MS_VERSION -from ._serialization import ( - url_quote, - _to_utc_datetime, -) - - -class SharedAccessSignature(object): - ''' - Provides a factory for creating account access - signature tokens with an account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key, x_ms_version=DEFAULT_X_MS_VERSION): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - :param str x_ms_version: - The service version used to generate the shared access signatures. - ''' - self.account_name = account_name - self.account_key = account_key - self.x_ms_version = x_ms_version - - def generate_account(self, services, resource_types, permission, expiry, start=None, - ip=None, protocol=None): - ''' - Generates a shared access signature for the account. - Use the returned signature with the sas_token parameter of the service - or to create a new account object. - - :param Services services: - Specifies the services accessible with the account SAS. You can - combine values to provide access to more than one service. - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account - SAS. You can combine values to provide access to more than one - resource type. - :param AccountPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. You can combine - values to provide more than one permission. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_account(services, resource_types) - sas.add_account_signature(self.account_name, self.account_key) - - return sas.get_token() - - -class _QueryStringConstants(object): - SIGNED_SIGNATURE = 'sig' - SIGNED_PERMISSION = 'sp' - SIGNED_START = 'st' - SIGNED_EXPIRY = 'se' - SIGNED_RESOURCE = 'sr' - SIGNED_IDENTIFIER = 'si' - SIGNED_IP = 'sip' - SIGNED_PROTOCOL = 'spr' - SIGNED_VERSION = 'sv' - SIGNED_CACHE_CONTROL = 'rscc' - SIGNED_CONTENT_DISPOSITION = 'rscd' - SIGNED_CONTENT_ENCODING = 'rsce' - SIGNED_CONTENT_LANGUAGE = 'rscl' - SIGNED_CONTENT_TYPE = 'rsct' - START_PK = 'spk' - START_RK = 'srk' - END_PK = 'epk' - END_RK = 'erk' - SIGNED_RESOURCE_TYPES = 'srt' - SIGNED_SERVICES = 'ss' - - -class _SharedAccessHelper(object): - def __init__(self): - self.query_dict = {} - - def _add_query(self, name, val): - if val: - self.query_dict[name] = _to_str(val) - - def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): - if isinstance(start, date): - start = _to_utc_datetime(start) - - if isinstance(expiry, date): - expiry = _to_utc_datetime(expiry) - - self._add_query(_QueryStringConstants.SIGNED_START, start) - self._add_query(_QueryStringConstants.SIGNED_EXPIRY, expiry) - self._add_query(_QueryStringConstants.SIGNED_PERMISSION, permission) - self._add_query(_QueryStringConstants.SIGNED_IP, ip) - self._add_query(_QueryStringConstants.SIGNED_PROTOCOL, protocol) - self._add_query(_QueryStringConstants.SIGNED_VERSION, x_ms_version) - - def add_resource(self, resource): - self._add_query(_QueryStringConstants.SIGNED_RESOURCE, resource) - - def add_id(self, id): - self._add_query(_QueryStringConstants.SIGNED_IDENTIFIER, id) - - def add_account(self, services, resource_types): - self._add_query(_QueryStringConstants.SIGNED_SERVICES, services) - self._add_query(_QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) - - def add_override_response_headers(self, cache_control, - content_disposition, - content_encoding, - content_language, - content_type): - self._add_query(_QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) - self._add_query(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) - self._add_query(_QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) - self._add_query(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) - self._add_query(_QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) - - def add_resource_signature(self, account_name, account_key, service, path): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - if path[0] != '/': - path = '/' + path - - canonicalized_resource = '/' + service + '/' + account_name + path + '\n' - - # Form the string to sign from shared_access_policy and canonicalized - # resource. The order of values is important. - string_to_sign = \ - (get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(_QueryStringConstants.SIGNED_START) + - get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) + - canonicalized_resource + - get_value_to_append(_QueryStringConstants.SIGNED_IDENTIFIER) + - get_value_to_append(_QueryStringConstants.SIGNED_IP) + - get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(_QueryStringConstants.SIGNED_VERSION)) - - if service == 'blob' or service == 'file': - string_to_sign += \ - (get_value_to_append(_QueryStringConstants.SIGNED_CACHE_CONTROL) + - get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION) + - get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_ENCODING) + - get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE) + - get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_TYPE)) - - # remove the trailing newline - if string_to_sign[-1] == '\n': - string_to_sign = string_to_sign[:-1] - - self._add_query(_QueryStringConstants.SIGNED_SIGNATURE, - _sign_string(account_key, string_to_sign)) - - def add_account_signature(self, account_name, account_key): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - string_to_sign = \ - (account_name + '\n' + - get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(_QueryStringConstants.SIGNED_SERVICES) + - get_value_to_append(_QueryStringConstants.SIGNED_RESOURCE_TYPES) + - get_value_to_append(_QueryStringConstants.SIGNED_START) + - get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) + - get_value_to_append(_QueryStringConstants.SIGNED_IP) + - get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(_QueryStringConstants.SIGNED_VERSION)) - - self._add_query(_QueryStringConstants.SIGNED_SIGNATURE, - _sign_string(account_key, string_to_sign)) - - def get_token(self): - return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storage/v2017_07_29/common/storageclient.py b/azure/multiapi/storage/v2017_07_29/common/storageclient.py deleted file mode 100644 index f6aae91..0000000 --- a/azure/multiapi/storage/v2017_07_29/common/storageclient.py +++ /dev/null @@ -1,363 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -from abc import ABCMeta -import logging - -logger = logging.getLogger(__name__) -from time import sleep - -import requests -from azure.common import ( - AzureException, -) - -from ._constants import ( - DEFAULT_SOCKET_TIMEOUT, - DEFAULT_X_MS_VERSION, - DEFAULT_USER_AGENT_STRING, - USER_AGENT_STRING_PREFIX, - USER_AGENT_STRING_SUFFIX, -) -from ._error import ( - _ERROR_DECRYPTION_FAILURE, - _http_error_handler, -) -from ._http import HTTPError -from ._http.httpclient import _HTTPClient -from ._serialization import ( - _update_request, - _add_date_header, -) -from .models import ( - RetryContext, - LocationMode, - _OperationContext, -) -from .retry import ExponentialRetry - - -class StorageClient(object): - ''' - This is the base class for service objects. Service objects are used to do - all requests to Storage. This class cannot be instantiated directly. - - :ivar str account_name: - The storage account name. This is used to authenticate requests - signed with an account key and to construct the storage endpoint. It - is required unless a connection string is given, or if a custom - domain is used with anonymous authentication. - :ivar str account_key: - The storage account key. This is used for shared key authentication. - If neither account key or sas token is specified, anonymous access - will be used. - :ivar str sas_token: - A shared access signature token to use to authenticate requests - instead of the account key. If account key and sas token are both - specified, account key will be used to sign. If neither are - specified, anonymous access will be used. - :ivar str primary_endpoint: - The endpoint to send storage requests to. - :ivar str secondary_endpoint: - The secondary endpoint to read storage data from. This will only be a - valid endpoint if the storage account used is RA-GRS and thus allows - reading from secondary. - :ivar function(context) retry: - A function which determines whether to retry. Takes as a parameter a - :class:`~azure.storage.common.models.RetryContext` object. Returns the number - of seconds to wait before retrying the request, or None to indicate not - to retry. - :ivar ~azure.storage.common.models.LocationMode location_mode: - The host location to use to make requests. Defaults to LocationMode.PRIMARY. - Note that this setting only applies to RA-GRS accounts as other account - types do not allow reading from secondary. If the location_mode is set to - LocationMode.SECONDARY, read requests will be sent to the secondary endpoint. - Write requests will continue to be sent to primary. - :ivar str protocol: - The protocol to use for requests. Defaults to https. - :ivar requests.Session request_session: - The session object to use for http requests. - :ivar function(request) request_callback: - A function called immediately before each request is sent. This function - takes as a parameter the request object and returns nothing. It may be - used to added custom headers or log request data. - :ivar function() response_callback: - A function called immediately after each response is received. This - function takes as a parameter the response object and returns nothing. - It may be used to log response data. - :ivar function() retry_callback: - A function called immediately after retry evaluation is performed. This - function takes as a parameter the retry context object and returns nothing. - It may be used to detect retries and log context information. - ''' - - __metaclass__ = ABCMeta - - def __init__(self, connection_params): - ''' - :param obj connection_params: The parameters to use to construct the client. - ''' - self.account_name = connection_params.account_name - self.account_key = connection_params.account_key - self.sas_token = connection_params.sas_token - self.is_emulated = connection_params.is_emulated - - self.primary_endpoint = connection_params.primary_endpoint - self.secondary_endpoint = connection_params.secondary_endpoint - - protocol = connection_params.protocol - request_session = connection_params.request_session or requests.Session() - socket_timeout = connection_params.socket_timeout or DEFAULT_SOCKET_TIMEOUT - self._httpclient = _HTTPClient( - protocol=protocol, - session=request_session, - timeout=socket_timeout, - ) - - self.retry = ExponentialRetry().retry - self.location_mode = LocationMode.PRIMARY - - self.request_callback = None - self.response_callback = None - self.retry_callback = None - self._X_MS_VERSION = DEFAULT_X_MS_VERSION - self._USER_AGENT_STRING = DEFAULT_USER_AGENT_STRING - - def _update_user_agent_string(self, service_package_version): - self._USER_AGENT_STRING = '{}{} {}'.format(USER_AGENT_STRING_PREFIX, - service_package_version, - USER_AGENT_STRING_SUFFIX) - - @property - def socket_timeout(self): - return self._httpclient.timeout - - @socket_timeout.setter - def socket_timeout(self, value): - self._httpclient.timeout = value - - @property - def protocol(self): - return self._httpclient.protocol - - @protocol.setter - def protocol(self, value): - self._httpclient.protocol = value - - @property - def request_session(self): - return self._httpclient.session - - @request_session.setter - def request_session(self, value): - self._httpclient.session = value - - def set_proxy(self, host, port, user=None, password=None): - ''' - Sets the proxy server host and port for the HTTP CONNECT Tunnelling. - - :param str host: Address of the proxy. Ex: '192.168.0.100' - :param int port: Port of the proxy. Ex: 6000 - :param str user: User for proxy authorization. - :param str password: Password for proxy authorization. - ''' - self._httpclient.set_proxy(host, port, user, password) - - def _get_host_locations(self, primary=True, secondary=False): - locations = {} - if primary: - locations[LocationMode.PRIMARY] = self.primary_endpoint - if secondary: - locations[LocationMode.SECONDARY] = self.secondary_endpoint - return locations - - def _apply_host(self, request, operation_context, retry_context): - if operation_context.location_lock and operation_context.host_location: - # If this is a location locked operation and the location is set, - # override the request location and host_location. - request.host_locations = operation_context.host_location - request.host = list(operation_context.host_location.values())[0] - retry_context.location_mode = list(operation_context.host_location.keys())[0] - elif len(request.host_locations) == 1: - # If only one location is allowed, use that location. - request.host = list(request.host_locations.values())[0] - retry_context.location_mode = list(request.host_locations.keys())[0] - else: - # If multiple locations are possible, choose based on the location mode. - request.host = request.host_locations.get(self.location_mode) - retry_context.location_mode = self.location_mode - - @staticmethod - def extract_date_and_request_id(retry_context): - if getattr(retry_context, 'response', None) is None: - return "" - resp = retry_context.response - - if 'date' in resp.headers and 'x-ms-request-id' in resp.headers: - return str.format("Server-Timestamp={0}, Server-Request-ID={1}", - resp.headers['date'], resp.headers['x-ms-request-id']) - elif 'date' in resp.headers: - return str.format("Server-Timestamp={0}", resp.headers['date']) - elif 'x-ms-request-id' in resp.headers: - return str.format("Server-Request-ID={0}", resp.headers['x-ms-request-id']) - else: - return "" - - def _perform_request(self, request, parser=None, parser_args=None, operation_context=None): - ''' - Sends the request and return response. Catches HTTPError and hands it - to error handler - ''' - operation_context = operation_context or _OperationContext() - retry_context = RetryContext() - retry_context.is_emulated = self.is_emulated - - # Apply the appropriate host based on the location mode - self._apply_host(request, operation_context, retry_context) - - # Apply common settings to the request - _update_request(request, self._X_MS_VERSION, self._USER_AGENT_STRING) - client_request_id_prefix = str.format("Client-Request-ID={0}", request.headers['x-ms-client-request-id']) - - while True: - try: - try: - # Execute the request callback - if self.request_callback: - self.request_callback(request) - - # Add date and auth after the callback so date doesn't get too old and - # authentication is still correct if signed headers are added in the request - # callback. This also ensures retry policies with long back offs - # will work as it resets the time sensitive headers. - _add_date_header(request) - self.authentication.sign_request(request) - - # Set the request context - retry_context.request = request - - # Log the request before it goes out - logger.info("%s Outgoing request: Method=%s, Path=%s, Query=%s, Headers=%s.", - client_request_id_prefix, - request.method, - request.path, - request.query, - str(request.headers).replace('\n', '')) - - # Perform the request - response = self._httpclient.perform_request(request) - - # Execute the response callback - if self.response_callback: - self.response_callback(response) - - # Set the response context - retry_context.response = response - - # Log the response when it comes back - logger.info("%s Receiving Response: " - "%s, HTTP Status Code=%s, Message=%s, Headers=%s.", - client_request_id_prefix, - self.extract_date_and_request_id(retry_context), - response.status, - response.message, - str(request.headers).replace('\n', '')) - - # Parse and wrap HTTP errors in AzureHttpError which inherits from AzureException - if response.status >= 300: - # This exception will be caught by the general error handler - # and raised as an azure http exception - _http_error_handler( - HTTPError(response.status, response.message, response.headers, response.body)) - - # Parse the response - if parser: - if parser_args: - args = [response] - args.extend(parser_args) - return parser(*args) - else: - return parser(response) - else: - return - except AzureException as ex: - retry_context.exception = ex - raise ex - except Exception as ex: - retry_context.exception = ex - if sys.version_info >= (3,): - # Automatic chaining in Python 3 means we keep the trace - raise AzureException(ex.args[0]) - else: - # There isn't a good solution in 2 for keeping the stack trace - # in general, or that will not result in an error in 3 - # However, we can keep the previous error type and message - # TODO: In the future we will log the trace - msg = "" - if len(ex.args) > 0: - msg = ex.args[0] - raise AzureException('{}: {}'.format(ex.__class__.__name__, msg)) - - except AzureException as ex: - # only parse the strings used for logging if logging is at least enabled for CRITICAL - if logger.isEnabledFor(logging.CRITICAL): - exception_str_in_one_line = str(ex).replace('\n', '') - status_code = retry_context.response.status if retry_context.response is not None else 'Unknown' - timestamp_and_request_id = self.extract_date_and_request_id(retry_context) - - logger.info("%s Operation failed: checking if the operation should be retried. " - "Current retry count=%s, %s, HTTP status code=%s, Exception=%s.", - client_request_id_prefix, - retry_context.count if hasattr(retry_context, 'count') else 0, - timestamp_and_request_id, - status_code, - exception_str_in_one_line) - - # Decryption failures (invalid objects, invalid algorithms, data unencrypted in strict mode, etc) - # will not be resolved with retries. - if str(ex) == _ERROR_DECRYPTION_FAILURE: - logger.error("%s Encountered decryption failure: this cannot be retried. " - "%s, HTTP status code=%s, Exception=%s.", - client_request_id_prefix, - timestamp_and_request_id, - status_code, - exception_str_in_one_line) - raise ex - - # Determine whether a retry should be performed and if so, how - # long to wait before performing retry. - retry_interval = self.retry(retry_context) - if retry_interval is not None: - # Execute the callback - if self.retry_callback: - self.retry_callback(retry_context) - - logger.info( - "%s Retry policy is allowing a retry: Retry count=%s, Interval=%s.", - client_request_id_prefix, - retry_context.count, - retry_interval) - - # Sleep for the desired retry interval - sleep(retry_interval) - else: - logger.error("%s Retry policy did not allow for a retry: " - "%s, HTTP status code=%s, Exception=%s.", - client_request_id_prefix, - timestamp_and_request_id, - status_code, - exception_str_in_one_line) - raise ex - finally: - # If this is a location locked operation and the location is not set, - # this is the first request of that operation. Set the location to - # be used for subsequent requests in the operation. - if operation_context.location_lock and not operation_context.host_location: - # note: to cover the emulator scenario, the host_location is grabbed - # from request.host_locations(which includes the dev account name) - # instead of request.host(which at this point no longer includes the dev account name) - operation_context.host_location = {retry_context.location_mode: request.host_locations[retry_context.location_mode]} diff --git a/azure/multiapi/storage/v2017_07_29/file/__init__.py b/azure/multiapi/storage/v2017_07_29/file/__init__.py deleted file mode 100644 index 464a949..0000000 --- a/azure/multiapi/storage/v2017_07_29/file/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from .fileservice import FileService -from .models import ( - Share, - ShareProperties, - File, - FileProperties, - Directory, - DirectoryProperties, - FileRange, - ContentSettings, - CopyProperties, - SharePermissions, - FilePermissions, - DeleteSnapshot, -) diff --git a/azure/multiapi/storage/v2017_07_29/file/_constants.py b/azure/multiapi/storage/v2017_07_29/file/_constants.py deleted file mode 100644 index 51dfcab..0000000 --- a/azure/multiapi/storage/v2017_07_29/file/_constants.py +++ /dev/null @@ -1,11 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -__author__ = 'Microsoft Corp. ' -__version__ = '1.1.0' - -# x-ms-version for storage service. -X_MS_VERSION = '2017-07-29' diff --git a/azure/multiapi/storage/v2017_07_29/file/_deserialization.py b/azure/multiapi/storage/v2017_07_29/file/_deserialization.py deleted file mode 100644 index e1e4ec4..0000000 --- a/azure/multiapi/storage/v2017_07_29/file/_deserialization.py +++ /dev/null @@ -1,241 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from dateutil import parser - -try: - from xml.etree import cElementTree as ETree -except ImportError: - from xml.etree import ElementTree as ETree -from .models import ( - Share, - Directory, - File, - FileProperties, - FileRange, - ShareProperties, - DirectoryProperties, -) -from ..common.models import ( - _list, -) -from ..common._deserialization import ( - _parse_properties, - _parse_metadata, -) -from ..common._error import _validate_content_match -from ..common._common_conversion import ( - _get_content_md5, - _to_str, -) - -def _parse_snapshot_share(response, name): - ''' - Extracts snapshot return header. - ''' - snapshot = response.headers.get('x-ms-snapshot') - - return _parse_share(response, name, snapshot) - -def _parse_share(response, name, snapshot=None): - if response is None: - return None - - metadata = _parse_metadata(response) - props = _parse_properties(response, ShareProperties) - return Share(name, props, metadata, snapshot) - - -def _parse_directory(response, name): - if response is None: - return None - - metadata = _parse_metadata(response) - props = _parse_properties(response, DirectoryProperties) - return Directory(name, props, metadata) - - -def _parse_file(response, name, validate_content=False): - if response is None: - return None - - metadata = _parse_metadata(response) - props = _parse_properties(response, FileProperties) - - # For range gets, only look at 'x-ms-content-md5' for overall MD5 - content_settings = getattr(props, 'content_settings') - if 'content-range' in response.headers: - if 'x-ms-content-md5' in response.headers: - setattr(content_settings, 'content_md5', _to_str(response.headers['x-ms-content-md5'])) - else: - delattr(content_settings, 'content_md5') - - if validate_content: - computed_md5 = _get_content_md5(response.body) - _validate_content_match(response.headers['content-md5'], computed_md5) - - return File(name, response.body, props, metadata) - - -def _convert_xml_to_shares(response): - ''' - - - string-value - string-value - int-value - - - share-name - date-time-value - - date/time-value - etag - max-share-size - - - value - - - - marker-value - - ''' - if response is None or response.body is None: - return None - - shares = _list() - list_element = ETree.fromstring(response.body) - - # Set next marker - next_marker = list_element.findtext('NextMarker') or None - setattr(shares, 'next_marker', next_marker) - - shares_element = list_element.find('Shares') - - for share_element in shares_element.findall('Share'): - # Name element - share = Share() - share.name = share_element.findtext('Name') - - # Snapshot - share.snapshot = share_element.findtext('Snapshot') - - # Metadata - metadata_root_element = share_element.find('Metadata') - if metadata_root_element is not None: - share.metadata = dict() - for metadata_element in metadata_root_element: - share.metadata[metadata_element.tag] = metadata_element.text - - # Properties - properties_element = share_element.find('Properties') - share.properties.last_modified = parser.parse(properties_element.findtext('Last-Modified')) - share.properties.etag = properties_element.findtext('Etag') - share.properties.quota = int(properties_element.findtext('Quota')) - - # Add share to list - shares.append(share) - - return shares - - -def _convert_xml_to_directories_and_files(response): - ''' - - - string-value - int-value - - - file-name - - size-in-bytes - - - - directory-name - - - - - ''' - if response is None or response.body is None: - return None - - entries = _list() - list_element = ETree.fromstring(response.body) - - # Set next marker - next_marker = list_element.findtext('NextMarker') or None - setattr(entries, 'next_marker', next_marker) - - entries_element = list_element.find('Entries') - - for file_element in entries_element.findall('File'): - # Name element - file = File() - file.name = file_element.findtext('Name') - - # Properties - properties_element = file_element.find('Properties') - file.properties.content_length = int(properties_element.findtext('Content-Length')) - - # Add file to list - entries.append(file) - - for directory_element in entries_element.findall('Directory'): - # Name element - directory = Directory() - directory.name = directory_element.findtext('Name') - - # Add directory to list - entries.append(directory) - - return entries - - -def _convert_xml_to_ranges(response): - ''' - - - - Start Byte - End Byte - - - Start Byte - End Byte - - - ''' - if response is None or response.body is None: - return None - - ranges = list() - ranges_element = ETree.fromstring(response.body) - - for range_element in ranges_element.findall('Range'): - # Parse range - range = FileRange(int(range_element.findtext('Start')), int(range_element.findtext('End'))) - - # Add range to list - ranges.append(range) - - return ranges - - -def _convert_xml_to_share_stats(response): - ''' - - - 15 - - ''' - if response is None or response.body is None: - return None - - share_stats_element = ETree.fromstring(response.body) - return int(share_stats_element.findtext('ShareUsage')) diff --git a/azure/multiapi/storage/v2017_07_29/file/_download_chunking.py b/azure/multiapi/storage/v2017_07_29/file/_download_chunking.py deleted file mode 100644 index 5eb2bdb..0000000 --- a/azure/multiapi/storage/v2017_07_29/file/_download_chunking.py +++ /dev/null @@ -1,107 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import threading - -from ..common._error import _ERROR_NO_SINGLE_THREAD_CHUNKING - - -def _download_file_chunks(file_service, share_name, directory_name, file_name, - download_size, block_size, progress, start_range, end_range, - stream, max_connections, progress_callback, validate_content, - timeout, operation_context, snapshot): - if max_connections <= 1: - raise ValueError(_ERROR_NO_SINGLE_THREAD_CHUNKING.format('file')) - - downloader = _FileChunkDownloader( - file_service, - share_name, - directory_name, - file_name, - download_size, - block_size, - progress, - start_range, - end_range, - stream, - progress_callback, - validate_content, - timeout, - operation_context, - snapshot, - ) - - import concurrent.futures - executor = concurrent.futures.ThreadPoolExecutor(max_connections) - result = list(executor.map(downloader.process_chunk, downloader.get_chunk_offsets())) - - -class _FileChunkDownloader(object): - def __init__(self, file_service, share_name, directory_name, file_name, - download_size, chunk_size, progress, start_range, end_range, - stream, progress_callback, validate_content, timeout, operation_context, snapshot): - self.file_service = file_service - self.share_name = share_name - self.directory_name = directory_name - self.file_name = file_name - self.chunk_size = chunk_size - - self.download_size = download_size - self.start_index = start_range - self.file_end = end_range - - self.stream = stream - self.stream_start = stream.tell() - self.stream_lock = threading.Lock() - self.progress_callback = progress_callback - self.progress_total = progress - self.progress_lock = threading.Lock() - self.validate_content = validate_content - self.timeout = timeout - self.operation_context = operation_context - self.snapshot = snapshot - - def get_chunk_offsets(self): - index = self.start_index - while index < self.file_end: - yield index - index += self.chunk_size - - def process_chunk(self, chunk_start): - if chunk_start + self.chunk_size > self.file_end: - chunk_end = self.file_end - else: - chunk_end = chunk_start + self.chunk_size - - chunk_data = self._download_chunk(chunk_start, chunk_end).content - length = chunk_end - chunk_start - if length > 0: - self._write_to_stream(chunk_data, chunk_start) - self._update_progress(length) - - def _update_progress(self, length): - if self.progress_callback is not None: - with self.progress_lock: - self.progress_total += length - total = self.progress_total - self.progress_callback(total, self.download_size) - - def _write_to_stream(self, chunk_data, chunk_start): - with self.stream_lock: - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - - def _download_chunk(self, chunk_start, chunk_end): - return self.file_service._get_file( - self.share_name, - self.directory_name, - self.file_name, - start_range=chunk_start, - end_range=chunk_end - 1, - validate_content=self.validate_content, - timeout=self.timeout, - _context=self.operation_context, - snapshot=self.snapshot - ) diff --git a/azure/multiapi/storage/v2017_07_29/file/_serialization.py b/azure/multiapi/storage/v2017_07_29/file/_serialization.py deleted file mode 100644 index 03aecd1..0000000 --- a/azure/multiapi/storage/v2017_07_29/file/_serialization.py +++ /dev/null @@ -1,66 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from ..common._common_conversion import _str -from ..common._error import ( - _validate_not_none, - _ERROR_START_END_NEEDED_FOR_MD5, - _ERROR_RANGE_TOO_LARGE_FOR_MD5, -) - - -def _get_path(share_name=None, directory_name=None, file_name=None): - ''' - Creates the path to access a file resource. - - share_name: - Name of share. - directory_name: - The path to the directory. - file_name: - Name of file. - ''' - if share_name and directory_name and file_name: - return '/{0}/{1}/{2}'.format( - _str(share_name), - _str(directory_name), - _str(file_name)) - elif share_name and directory_name: - return '/{0}/{1}'.format( - _str(share_name), - _str(directory_name)) - elif share_name and file_name: - return '/{0}/{1}'.format( - _str(share_name), - _str(file_name)) - elif share_name: - return '/{0}'.format(_str(share_name)) - else: - return '/' - - -def _validate_and_format_range_headers(request, start_range, end_range, start_range_required=True, - end_range_required=True, check_content_md5=False): - # If end range is provided, start range must be provided - if start_range_required or end_range is not None: - _validate_not_none('start_range', start_range) - if end_range_required: - _validate_not_none('end_range', end_range) - - # Format based on whether end_range is present - request.headers = request.headers or {} - if end_range is not None: - request.headers['x-ms-range'] = 'bytes={0}-{1}'.format(start_range, end_range) - elif start_range is not None: - request.headers['x-ms-range'] = 'bytes={0}-'.format(start_range) - - # Content MD5 can only be provided for a complete range less than 4MB in size - if check_content_md5: - if start_range is None or end_range is None: - raise ValueError(_ERROR_START_END_NEEDED_FOR_MD5) - if end_range - start_range > 4 * 1024 * 1024: - raise ValueError(_ERROR_RANGE_TOO_LARGE_FOR_MD5) - - request.headers['x-ms-range-get-content-md5'] = 'true' diff --git a/azure/multiapi/storage/v2017_07_29/file/_upload_chunking.py b/azure/multiapi/storage/v2017_07_29/file/_upload_chunking.py deleted file mode 100644 index c6fb34f..0000000 --- a/azure/multiapi/storage/v2017_07_29/file/_upload_chunking.py +++ /dev/null @@ -1,133 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import threading - - -def _upload_file_chunks(file_service, share_name, directory_name, file_name, - file_size, block_size, stream, max_connections, - progress_callback, validate_content, timeout): - uploader = _FileChunkUploader( - file_service, - share_name, - directory_name, - file_name, - file_size, - block_size, - stream, - max_connections > 1, - progress_callback, - validate_content, - timeout - ) - - if progress_callback is not None: - progress_callback(0, file_size) - - if max_connections > 1: - import concurrent.futures - executor = concurrent.futures.ThreadPoolExecutor(max_connections) - range_ids = list(executor.map(uploader.process_chunk, uploader.get_chunk_offsets())) - else: - if file_size is not None: - range_ids = [uploader.process_chunk(start) for start in uploader.get_chunk_offsets()] - else: - range_ids = uploader.process_all_unknown_size() - - return range_ids - - -class _FileChunkUploader(object): - def __init__(self, file_service, share_name, directory_name, file_name, - file_size, chunk_size, stream, parallel, progress_callback, - validate_content, timeout): - self.file_service = file_service - self.share_name = share_name - self.directory_name = directory_name - self.file_name = file_name - self.file_size = file_size - self.chunk_size = chunk_size - self.stream = stream - self.stream_start = stream.tell() if parallel else None - self.stream_lock = threading.Lock() if parallel else None - self.progress_callback = progress_callback - self.progress_total = 0 - self.progress_lock = threading.Lock() if parallel else None - self.validate_content = validate_content - self.timeout = timeout - - def get_chunk_offsets(self): - index = 0 - if self.file_size is None: - # we don't know the size of the stream, so we have no - # choice but to seek - while True: - data = self._read_from_stream(index, 1) - if not data: - break - yield index - index += self.chunk_size - else: - while index < self.file_size: - yield index - index += self.chunk_size - - def process_chunk(self, chunk_offset): - size = self.chunk_size - if self.file_size is not None: - size = min(size, self.file_size - chunk_offset) - chunk_data = self._read_from_stream(chunk_offset, size) - return self._upload_chunk_with_progress(chunk_offset, chunk_data) - - def process_all_unknown_size(self): - assert self.stream_lock is None - range_ids = [] - index = 0 - while True: - data = self._read_from_stream(None, self.chunk_size) - if data: - index += len(data) - range_id = self._upload_chunk_with_progress(index, data) - range_ids.append(range_id) - else: - break - - return range_ids - - def _read_from_stream(self, offset, count): - if self.stream_lock is not None: - with self.stream_lock: - self.stream.seek(self.stream_start + offset) - data = self.stream.read(count) - else: - data = self.stream.read(count) - return data - - def _update_progress(self, length): - if self.progress_callback is not None: - if self.progress_lock is not None: - with self.progress_lock: - self.progress_total += length - total = self.progress_total - else: - self.progress_total += length - total = self.progress_total - self.progress_callback(total, self.file_size) - - def _upload_chunk_with_progress(self, chunk_start, chunk_data): - chunk_end = chunk_start + len(chunk_data) - 1 - self.file_service.update_range( - self.share_name, - self.directory_name, - self.file_name, - chunk_data, - chunk_start, - chunk_end, - self.validate_content, - timeout=self.timeout - ) - range_id = 'bytes={0}-{1}'.format(chunk_start, chunk_end) - self._update_progress(len(chunk_data)) - return range_id diff --git a/azure/multiapi/storage/v2017_07_29/file/fileservice.py b/azure/multiapi/storage/v2017_07_29/file/fileservice.py deleted file mode 100644 index 43efe2e..0000000 --- a/azure/multiapi/storage/v2017_07_29/file/fileservice.py +++ /dev/null @@ -1,2468 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import sys -from os import path - -from azure.common import AzureHttpError - -from ..common._auth import ( - _StorageSharedKeyAuthentication, - _StorageSASAuthentication, -) -from ..common._common_conversion import ( - _int_to_str, - _to_str, - _get_content_md5, -) -from ..common._connection import _ServiceParameters -from ..common._constants import ( - SERVICE_HOST_BASE, - DEFAULT_PROTOCOL, - DEV_ACCOUNT_NAME, -) -from ..common._deserialization import ( - _convert_xml_to_service_properties, - _convert_xml_to_signed_identifiers, - _parse_metadata, - _parse_properties, - _parse_length_from_content_range, -) -from ..common._error import ( - _dont_fail_not_exist, - _dont_fail_on_exist, - _validate_not_none, - _validate_type_bytes, - _ERROR_VALUE_NEGATIVE, - _ERROR_STORAGE_MISSING_INFO, - _ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES, - _ERROR_PARALLEL_NOT_SEEKABLE, - _validate_access_policies, -) -from ..common._http import HTTPRequest -from ..common._serialization import ( - _get_request_body, - _get_data_bytes_only, - _convert_signed_identifiers_to_xml, - _convert_service_properties_to_xml, - _add_metadata_headers, -) -from ..common.models import ( - Services, - ListGenerator, - _OperationContext, -) -from .sharedaccesssignature import ( - FileSharedAccessSignature, -) -from ..common.storageclient import StorageClient -from ._deserialization import ( - _convert_xml_to_shares, - _convert_xml_to_directories_and_files, - _convert_xml_to_ranges, - _convert_xml_to_share_stats, - _parse_file, - _parse_share, - _parse_snapshot_share, - _parse_directory, -) -from ._download_chunking import _download_file_chunks -from ._serialization import ( - _get_path, - _validate_and_format_range_headers, -) -from ._upload_chunking import _upload_file_chunks -from .models import ( - FileProperties, -) - -from ._constants import ( - X_MS_VERSION, - __version__ as package_version, -) - -if sys.version_info >= (3,): - from io import BytesIO -else: - from cStringIO import StringIO as BytesIO - - -class FileService(StorageClient): - ''' - The Server Message Block (SMB) protocol is the preferred file share protocol - used on premise today. The Microsoft Azure File service enables customers to - leverage the availability and scalability of Azure's Cloud Infrastructure as - a Service (IaaS) SMB without having to rewrite SMB client applications. - - The Azure File service also offers a compelling alternative to traditional - Direct Attached Storage (DAS) and Storage Area Network (SAN) solutions, which - are often complex and expensive to install, configure, and operate. - - :ivar int MAX_SINGLE_GET_SIZE: - The size of the first range get performed by get_file_to_* methods if - max_connections is greater than 1. Less data will be returned if the - file is smaller than this. - :ivar int MAX_CHUNK_GET_SIZE: - The size of subsequent range gets performed by get_file_to_* methods if - max_connections is greater than 1 and the file is larger than MAX_SINGLE_GET_SIZE. - Less data will be returned if the remainder of the file is smaller than - this. If this is set to larger than 4MB, content_validation will throw an - error if enabled. However, if content_validation is not desired a size - greater than 4MB may be optimal. Setting this below 4MB is not recommended. - :ivar int MAX_RANGE_SIZE: - The size of the ranges put by create_file_from_* methods. Smaller ranges - may be put if there is less data provided. The maximum range size the service - supports is 4MB. - ''' - MAX_SINGLE_GET_SIZE = 32 * 1024 * 1024 - MAX_CHUNK_GET_SIZE = 8 * 1024 * 1024 - MAX_RANGE_SIZE = 4 * 1024 * 1024 - - def __init__(self, account_name=None, account_key=None, sas_token=None, - protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, - request_session=None, connection_string=None, socket_timeout=None): - ''' - :param str account_name: - The storage account name. This is used to authenticate requests - signed with an account key and to construct the storage endpoint. It - is required unless a connection string is given. - :param str account_key: - The storage account key. This is used for shared key authentication. - :param str sas_token: - A shared access signature token to use to authenticate requests - instead of the account key. If account key and sas token are both - specified, account key will be used to sign. - :param str protocol: - The protocol to use for requests. Defaults to https. - :param str endpoint_suffix: - The host base component of the url, minus the account name. Defaults - to Azure (core.windows.net). Override this to use the China cloud - (core.chinacloudapi.cn). - :param requests.Session request_session: - The session object to use for http requests. - :param str connection_string: - If specified, this will override all other parameters besides - request session. See - http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ - for the connection string format. - :param int socket_timeout: - If specified, this will override the default socket timeout. The timeout specified is in seconds. - See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value. - ''' - service_params = _ServiceParameters.get_service_parameters( - 'file', - account_name=account_name, - account_key=account_key, - sas_token=sas_token, - protocol=protocol, - endpoint_suffix=endpoint_suffix, - request_session=request_session, - connection_string=connection_string, - socket_timeout=socket_timeout) - - super(FileService, self).__init__(service_params) - - if self.account_name == DEV_ACCOUNT_NAME: - raise ValueError(_ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES) - - if self.account_key: - self.authentication = _StorageSharedKeyAuthentication( - self.account_name, - self.account_key, - ) - elif self.sas_token: - self.authentication = _StorageSASAuthentication(self.sas_token) - else: - raise ValueError(_ERROR_STORAGE_MISSING_INFO) - self._X_MS_VERSION = X_MS_VERSION - self._update_user_agent_string(package_version) - - def make_file_url(self, share_name, directory_name, file_name, - protocol=None, sas_token=None): - ''' - Creates the url to access a file. - - :param str share_name: - Name of share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of file. - :param str protocol: - Protocol to use: 'http' or 'https'. If not specified, uses the - protocol specified when FileService was initialized. - :param str sas_token: - Shared access signature token created with - generate_shared_access_signature. - :return: file access URL. - :rtype: str - ''' - - if directory_name is None: - url = '{}://{}/{}/{}'.format( - protocol or self.protocol, - self.primary_endpoint, - share_name, - file_name, - ) - else: - url = '{}://{}/{}/{}/{}'.format( - protocol or self.protocol, - self.primary_endpoint, - share_name, - directory_name, - file_name, - ) - - if sas_token: - url += '?' + sas_token - - return url - - def generate_account_shared_access_signature(self, resource_types, permission, - expiry, start=None, ip=None, protocol=None): - ''' - Generates a shared access signature for the file service. - Use the returned signature with the sas_token parameter of the FileService. - - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account SAS. - :param AccountPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. Possible values are - both HTTPS and HTTP (https,http) or HTTPS only (https). The default value - is https,http. Note that HTTP only is not a permitted value. - :return: A Shared Access Signature (sas) token. - :rtype: str - ''' - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = FileSharedAccessSignature(self.account_name, self.account_key) - return sas.generate_account(Services.FILE, resource_types, permission, - expiry, start=start, ip=ip, protocol=protocol) - - def generate_share_shared_access_signature(self, share_name, - permission=None, - expiry=None, - start=None, - id=None, - ip=None, - protocol=None, - cache_control=None, - content_disposition=None, - content_encoding=None, - content_language=None, - content_type=None): - ''' - Generates a shared access signature for the share. - Use the returned signature with the sas_token parameter of FileService. - - :param str share_name: - Name of share. - :param SharePermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, create, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use :func:`~set_share_acl`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. Possible values are - both HTTPS and HTTP (https,http) or HTTPS only (https). The default value - is https,http. Note that HTTP only is not a permitted value. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :return: A Shared Access Signature (sas) token. - :rtype: str - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = FileSharedAccessSignature(self.account_name, self.account_key) - return sas.generate_share( - share_name, - permission, - expiry, - start=start, - id=id, - ip=ip, - protocol=protocol, - cache_control=cache_control, - content_disposition=content_disposition, - content_encoding=content_encoding, - content_language=content_language, - content_type=content_type, - ) - - def generate_file_shared_access_signature(self, share_name, - directory_name=None, - file_name=None, - permission=None, - expiry=None, - start=None, - id=None, - ip=None, - protocol=None, - cache_control=None, - content_disposition=None, - content_encoding=None, - content_language=None, - content_type=None): - ''' - Generates a shared access signature for the file. - Use the returned signature with the sas_token parameter of FileService. - - :param str share_name: - Name of share. - :param str directory_name: - Name of directory. SAS tokens cannot be created for directories, so - this parameter should only be present if file_name is provided. - :param str file_name: - Name of file. - :param FilePermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, create, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_file_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. Possible values are - both HTTPS and HTTP (https,http) or HTTPS only (https). The default value - is https,http. Note that HTTP only is not a permitted value. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :return: A Shared Access Signature (sas) token. - :rtype: str - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = FileSharedAccessSignature(self.account_name, self.account_key) - return sas.generate_file( - share_name, - directory_name, - file_name, - permission, - expiry, - start=start, - id=id, - ip=ip, - protocol=protocol, - cache_control=cache_control, - content_disposition=content_disposition, - content_encoding=content_encoding, - content_language=content_language, - content_type=content_type, - ) - - def set_file_service_properties(self, hour_metrics=None, minute_metrics=None, - cors=None, timeout=None): - ''' - Sets the properties of a storage account's File service, including - Azure Storage Analytics. If an element (ex HourMetrics) is left as None, the - existing settings on the service for that functionality are preserved. - - :param Metrics hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for files. - :param Metrics minute_metrics: - The minute metrics settings provide request statistics - for each minute for files. - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list(:class:`~azure.storage.common.models.CorsRule`) - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path() - request.query = { - 'restype': 'service', - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - request.body = _get_request_body( - _convert_service_properties_to_xml(None, hour_metrics, minute_metrics, cors)) - - self._perform_request(request) - - def get_file_service_properties(self, timeout=None): - ''' - Gets the properties of a storage account's File service, including - Azure Storage Analytics. - - :param int timeout: - The timeout parameter is expressed in seconds. - :return: The file service properties. - :rtype: - :class:`~azure.storage.common.models.ServiceProperties` - ''' - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path() - request.query = { - 'restype': 'service', - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_service_properties) - - def list_shares(self, prefix=None, marker=None, num_results=None, - include_metadata=False, timeout=None, include_snapshots=False): - ''' - Returns a generator to list the shares under the specified account. - The generator will lazily follow the continuation tokens returned by - the service and stop when all shares have been returned or num_results - is reached. - - If num_results is specified and the account has more than that number of - shares, the generator will have a populated next_marker field once it - finishes. This marker can be used to create a new generator if more - results are desired. - - :param str prefix: - Filters the results to return only shares whose names - begin with the specified prefix. - :param int num_results: - Specifies the maximum number of shares to return. - :param bool include_metadata: - Specifies that share metadata be returned in the response. - :param str marker: - An opaque continuation token. This value can be retrieved from the - next_marker field of a previous generator object if num_results was - specified and that generator has finished enumerating results. If - specified, this generator will begin returning results from the point - where the previous generator stopped. - :param int timeout: - The timeout parameter is expressed in seconds. - :param bool include_snapshots: - Specifies that share snapshots be returned in the response. - ''' - include = 'snapshots' if include_snapshots else None - if include_metadata: - if include is not None: - include = include + ',metadata' - else: - include = 'metadata' - operation_context = _OperationContext(location_lock=True) - kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results, - 'include': include, 'timeout': timeout, '_context': operation_context} - resp = self._list_shares(**kwargs) - - return ListGenerator(resp, self._list_shares, (), kwargs) - - def _list_shares(self, prefix=None, marker=None, max_results=None, - include=None, timeout=None, _context=None): - ''' - Returns a list of the shares under the specified account. - - :param str prefix: - Filters the results to return only shares whose names - begin with the specified prefix. - :param str marker: - A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns - a next_marker value within the response body if the list returned was - not complete. The marker value may then be used in a subsequent - call to request the next set of list items. The marker value is - opaque to the client. - :param int max_results: - Specifies the maximum number of shares to return. A single list - request may return up to 1000 shares and potentially a continuation - token which should be followed to get additional resutls. - :param string include: - Include this parameter to specify that either the share's - metadata, snapshots or both be returned as part of the response body. set this - parameter to string 'metadata' to get share's metadata. set this parameter to 'snapshots' - to get all the share snapshots. for both use 'snapshots,metadata'. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path() - request.query = { - 'comp': 'list', - 'prefix': _to_str(prefix), - 'marker': _to_str(marker), - 'maxresults': _int_to_str(max_results), - 'include': _to_str(include), - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_shares, operation_context=_context) - - def create_share(self, share_name, metadata=None, quota=None, - fail_on_exist=False, timeout=None): - ''' - Creates a new share under the specified account. If the share - with the same name already exists, the operation fails on the - service. By default, the exception is swallowed by the client. - To expose the exception, specify True for fail_on_exists. - - :param str share_name: - Name of share to create. - :param metadata: - A dict with name_value pairs to associate with the - share as metadata. Example:{'Category':'test'} - :type metadata: dict(str, str) - :param int quota: - Specifies the maximum size of the share, in gigabytes. Must be - greater than 0, and less than or equal to 5TB (5120). - :param bool fail_on_exist: - Specify whether to throw an exception when the share exists. - False by default. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: True if share is created, False if share already exists. - :rtype: bool - ''' - _validate_not_none('share_name', share_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.query = { - 'restype': 'share', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-share-quota': _int_to_str(quota) - } - _add_metadata_headers(metadata, request) - - if not fail_on_exist: - try: - self._perform_request(request) - return True - except AzureHttpError as ex: - _dont_fail_on_exist(ex) - return False - else: - self._perform_request(request) - return True - - def snapshot_share(self, share_name, metadata=None, quota=None, timeout=None): - ''' - Creates a snapshot of an existing share under the specified account. - - :param str share_name: - The name of the share to create a snapshot of. - :param metadata: - A dict with name_value pairs to associate with the - share as metadata. Example:{'Category':'test'} - :type metadata: a dict of str to str: - :param int quota: - Specifies the maximum size of the share, in gigabytes. Must be - greater than 0, and less than or equal to 5TB (5120). - :param int timeout: - The timeout parameter is expressed in seconds. - :return: snapshot properties - :rtype: azure.storage.file.models.Share - ''' - _validate_not_none('share_name', share_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.query = { - 'restype': 'share', - 'comp': 'snapshot', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-share-quota': _int_to_str(quota) - } - _add_metadata_headers(metadata, request) - - return self._perform_request(request, _parse_snapshot_share, [share_name]) - - def get_share_properties(self, share_name, timeout=None, snapshot=None): - ''' - Returns all user-defined metadata and system properties for the - specified share. The data returned does not include the shares's - list of files or directories. - - :param str share_name: - Name of existing share. - :param int timeout: - The timeout parameter is expressed in seconds. - :param str snapshot: - A string that represents the snapshot version, if applicable. - :return: A Share that exposes properties and metadata. - :rtype: :class:`~azure.storage.file.models.Share` - ''' - _validate_not_none('share_name', share_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.query = { - 'restype': 'share', - 'timeout': _int_to_str(timeout), - 'sharesnapshot': _to_str(snapshot) - } - - return self._perform_request(request, _parse_share, [share_name]) - - def set_share_properties(self, share_name, quota, timeout=None): - ''' - Sets service-defined properties for the specified share. - - :param str share_name: - Name of existing share. - :param int quota: - Specifies the maximum size of the share, in gigabytes. Must be - greater than 0, and less than or equal to 5 TB (5120 GB). - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('quota', quota) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.query = { - 'restype': 'share', - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-share-quota': _int_to_str(quota) - } - - self._perform_request(request) - - def get_share_metadata(self, share_name, timeout=None, snapshot=None): - ''' - Returns all user-defined metadata for the specified share. - - :param str share_name: - Name of existing share. - :param int timeout: - The timeout parameter is expressed in seconds. - :param str snapshot: - A string that represents the snapshot version, if applicable. - :return: - A dictionary representing the share metadata name, value pairs. - :rtype: dict(str, str) - ''' - _validate_not_none('share_name', share_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.query = { - 'restype': 'share', - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - 'sharesnapshot': _to_str(snapshot), - } - - return self._perform_request(request, _parse_metadata) - - def set_share_metadata(self, share_name, metadata=None, timeout=None): - ''' - Sets one or more user-defined name-value pairs for the specified - share. Each call to this operation replaces all existing metadata - attached to the share. To remove all metadata from the share, - call this operation with no metadata dict. - - :param str share_name: - Name of existing share. - :param metadata: - A dict containing name-value pairs to associate with the share as - metadata. Example: {'category':'test'} - :type metadata: dict(str, str) - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.query = { - 'restype': 'share', - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - _add_metadata_headers(metadata, request) - - self._perform_request(request) - - def get_share_acl(self, share_name, timeout=None): - ''' - Gets the permissions for the specified share. - - :param str share_name: - Name of existing share. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: A dictionary of access policies associated with the share. - :rtype: dict(str, :class:`~azure.storage.common.models.AccessPolicy`) - ''' - _validate_not_none('share_name', share_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.query = { - 'restype': 'share', - 'comp': 'acl', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_signed_identifiers) - - def set_share_acl(self, share_name, signed_identifiers=None, timeout=None): - ''' - Sets the permissions for the specified share or stored access - policies that may be used with Shared Access Signatures. - - :param str share_name: - Name of existing share. - :param signed_identifiers: - A dictionary of access policies to associate with the share. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict(str, :class:`~azure.storage.common.models.AccessPolicy`) - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_access_policies(signed_identifiers) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.query = { - 'restype': 'share', - 'comp': 'acl', - 'timeout': _int_to_str(timeout), - } - request.body = _get_request_body( - _convert_signed_identifiers_to_xml(signed_identifiers)) - - self._perform_request(request) - - def get_share_stats(self, share_name, timeout=None): - ''' - Gets the approximate size of the data stored on the share, - rounded up to the nearest gigabyte. - - Note that this value may not include all recently created - or recently resized files. - - :param str share_name: - Name of existing share. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: the approximate size of the data stored on the share. - :rtype: int - ''' - _validate_not_none('share_name', share_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.query = { - 'restype': 'share', - 'comp': 'stats', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_share_stats) - - def delete_share(self, share_name, fail_not_exist=False, timeout=None, snapshot=None, delete_snapshots=None): - ''' - Marks the specified share for deletion. If the share - does not exist, the operation fails on the service. By - default, the exception is swallowed by the client. - To expose the exception, specify True for fail_not_exist. - - :param str share_name: - Name of share to delete. - :param bool fail_not_exist: - Specify whether to throw an exception when the share doesn't - exist. False by default. - :param int timeout: - The timeout parameter is expressed in seconds. - :param str snapshot: - A string that represents the snapshot version, if applicable. - Specify this argument to delete a specific snapshot only. - delete_snapshots must be None if this is specified. - :param ~azure.storage.file.models.DeleteSnapshot delete_snapshots: - To delete a share that has snapshots, this must be specified as DeleteSnapshot.Include. - :return: True if share is deleted, False share doesn't exist. - :rtype: bool - ''' - _validate_not_none('share_name', share_name) - request = HTTPRequest() - request.method = 'DELETE' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.headers = { - 'x-ms-delete-snapshots': _to_str(delete_snapshots) - } - request.query = { - 'restype': 'share', - 'timeout': _int_to_str(timeout), - 'sharesnapshot': _to_str(snapshot), - } - - if not fail_not_exist: - try: - self._perform_request(request) - return True - except AzureHttpError as ex: - _dont_fail_not_exist(ex) - return False - else: - self._perform_request(request) - return True - - def create_directory(self, share_name, directory_name, metadata=None, - fail_on_exist=False, timeout=None): - ''' - Creates a new directory under the specified share or parent directory. - If the directory with the same name already exists, the operation fails - on the service. By default, the exception is swallowed by the client. - To expose the exception, specify True for fail_on_exists. - - :param str share_name: - Name of existing share. - :param str directory_name: - Name of directory to create, including the path to the parent - directory. - :param metadata: - A dict with name_value pairs to associate with the - share as metadata. Example:{'Category':'test'} - :type metadata: dict(str, str): - :param bool fail_on_exist: - specify whether to throw an exception when the directory exists. - False by default. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: True if directory is created, False if directory already exists. - :rtype: bool - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('directory_name', directory_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name) - request.query = { - 'restype': 'directory', - 'timeout': _int_to_str(timeout), - } - _add_metadata_headers(metadata, request) - - if not fail_on_exist: - try: - self._perform_request(request) - return True - except AzureHttpError as ex: - _dont_fail_on_exist(ex) - return False - else: - self._perform_request(request) - return True - - def delete_directory(self, share_name, directory_name, - fail_not_exist=False, timeout=None): - ''' - Deletes the specified empty directory. Note that the directory must - be empty before it can be deleted. Attempting to delete directories - that are not empty will fail. - - If the directory does not exist, the operation fails on the - service. By default, the exception is swallowed by the client. - To expose the exception, specify True for fail_not_exist. - - :param str share_name: - Name of existing share. - :param str directory_name: - Name of directory to delete, including the path to the parent - directory. - :param bool fail_not_exist: - Specify whether to throw an exception when the directory doesn't - exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: True if directory is deleted, False otherwise. - :rtype: bool - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('directory_name', directory_name) - request = HTTPRequest() - request.method = 'DELETE' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name) - request.query = { - 'restype': 'directory', - 'timeout': _int_to_str(timeout), - } - - if not fail_not_exist: - try: - self._perform_request(request) - return True - except AzureHttpError as ex: - _dont_fail_not_exist(ex) - return False - else: - self._perform_request(request) - return True - - def get_directory_properties(self, share_name, directory_name, timeout=None, snapshot=None): - ''' - Returns all user-defined metadata and system properties for the - specified directory. The data returned does not include the directory's - list of files. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to an existing directory. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: properties for the specified directory within a directory object. - :param str snapshot: - A string that represents the snapshot version, if applicable. - :rtype: :class:`~azure.storage.file.models.Directory` - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('directory_name', directory_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name) - request.query = { - 'restype': 'directory', - 'timeout': _int_to_str(timeout), - 'sharesnapshot': _to_str(snapshot) - } - - return self._perform_request(request, _parse_directory, [directory_name]) - - def get_directory_metadata(self, share_name, directory_name, timeout=None, snapshot=None): - ''' - Returns all user-defined metadata for the specified directory. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param int timeout: - The timeout parameter is expressed in seconds. - :param str snapshot: - A string that represents the snapshot version, if applicable. - :return: - A dictionary representing the directory metadata name, value pairs. - :rtype: dict(str, str) - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('directory_name', directory_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name) - request.query = { - 'restype': 'directory', - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - 'sharesnapshot': _to_str(snapshot) - } - - return self._perform_request(request, _parse_metadata) - - def set_directory_metadata(self, share_name, directory_name, metadata=None, timeout=None): - ''' - Sets one or more user-defined name-value pairs for the specified - directory. Each call to this operation replaces all existing metadata - attached to the directory. To remove all metadata from the directory, - call this operation with no metadata dict. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param metadata: - A dict containing name-value pairs to associate with the directory - as metadata. Example: {'category':'test'} - :type metadata: dict(str, str). - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('directory_name', directory_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name) - request.query = { - 'restype': 'directory', - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - _add_metadata_headers(metadata, request) - - self._perform_request(request) - - def list_directories_and_files(self, share_name, directory_name=None, - num_results=None, marker=None, timeout=None, - prefix=None, snapshot=None): - - ''' - Returns a generator to list the directories and files under the specified share. - The generator will lazily follow the continuation tokens returned by - the service and stop when all directories and files have been returned or - num_results is reached. - - If num_results is specified and the share has more than that number of - files and directories, the generator will have a populated next_marker - field once it finishes. This marker can be used to create a new generator - if more results are desired. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param int num_results: - Specifies the maximum number of files to return, - including all directory elements. If the request does not specify - num_results or specifies a value greater than 5,000, the server will - return up to 5,000 items. Setting num_results to a value less than - or equal to zero results in error response code 400 (Bad Request). - :param str marker: - An opaque continuation token. This value can be retrieved from the - next_marker field of a previous generator object if num_results was - specified and that generator has finished enumerating results. If - specified, this generator will begin returning results from the point - where the previous generator stopped. - :param int timeout: - The timeout parameter is expressed in seconds. - :param str prefix: - List only the files and/or directories with the given prefix. - :param str snapshot: - A string that represents the snapshot version, if applicable. - ''' - operation_context = _OperationContext(location_lock=True) - args = (share_name, directory_name) - kwargs = {'marker': marker, 'max_results': num_results, 'timeout': timeout, - '_context': operation_context, 'prefix': prefix, 'snapshot': snapshot} - - resp = self._list_directories_and_files(*args, **kwargs) - - return ListGenerator(resp, self._list_directories_and_files, args, kwargs) - - def _list_directories_and_files(self, share_name, directory_name=None, - marker=None, max_results=None, timeout=None, - prefix=None, _context=None, snapshot=None): - ''' - Returns a list of the directories and files under the specified share. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str marker: - A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns - a next_marker value within the response body if the list returned was - not complete. The marker value may then be used in a subsequent - call to request the next set of list items. The marker value is - opaque to the client. - :param int max_results: - Specifies the maximum number of files to return, - including all directory elements. If the request does not specify - max_results or specifies a value greater than 5,000, the server will - return up to 5,000 items. Setting max_results to a value less than - or equal to zero results in error response code 400 (Bad Request). - :param int timeout: - The timeout parameter is expressed in seconds. - :param str prefix: - List only the files and/or directories with the given prefix. - :param str snapshot: - A string that represents the snapshot version, if applicable. - ''' - _validate_not_none('share_name', share_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name) - request.query = { - 'restype': 'directory', - 'comp': 'list', - 'prefix': _to_str(prefix), - 'marker': _to_str(marker), - 'maxresults': _int_to_str(max_results), - 'timeout': _int_to_str(timeout), - 'sharesnapshot': _to_str(snapshot) - } - - return self._perform_request(request, _convert_xml_to_directories_and_files, - operation_context=_context) - - def get_file_properties(self, share_name, directory_name, file_name, timeout=None, snapshot=None): - ''' - Returns all user-defined metadata, standard HTTP properties, and - system properties for the file. Returns an instance of :class:`~azure.storage.file.models.File` with - :class:`~azure.storage.file.models.FileProperties` and a metadata dict. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param int timeout: - The timeout parameter is expressed in seconds. - :param str snapshot: - A string that represents the snapshot version, if applicable. - :return: a file object including properties and metadata. - :rtype: :class:`~azure.storage.file.models.File` - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - request = HTTPRequest() - request.method = 'HEAD' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { 'timeout': _int_to_str(timeout), 'sharesnapshot': _to_str(snapshot)} - - return self._perform_request(request, _parse_file, [file_name]) - - def exists(self, share_name, directory_name=None, file_name=None, timeout=None, snapshot=None): - ''' - Returns a boolean indicating whether the share exists if only share name is - given. If directory_name is specificed a boolean will be returned indicating - if the directory exists. If file_name is specified as well, a boolean will be - returned indicating if the file exists. - - :param str share_name: - Name of a share. - :param str directory_name: - The path to a directory. - :param str file_name: - Name of a file. - :param int timeout: - The timeout parameter is expressed in seconds. - :param str snapshot: - A string that represents the snapshot version, if applicable. - :return: A boolean indicating whether the resource exists. - :rtype: bool - ''' - _validate_not_none('share_name', share_name) - try: - if file_name is not None: - self.get_file_properties(share_name, directory_name, file_name, timeout=timeout, snapshot=snapshot) - elif directory_name is not None: - self.get_directory_properties(share_name, directory_name, timeout=timeout, snapshot=snapshot) - else: - self.get_share_properties(share_name, timeout=timeout, snapshot=snapshot) - return True - except AzureHttpError as ex: - _dont_fail_not_exist(ex) - return False - - def resize_file(self, share_name, directory_name, - file_name, content_length, timeout=None): - ''' - Resizes a file to the specified size. If the specified byte - value is less than the current size of the file, then all - ranges above the specified byte value are cleared. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param int content_length: - The length to resize the file to. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('content_length', content_length) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-content-length': _to_str(content_length) - } - - self._perform_request(request) - - def set_file_properties(self, share_name, directory_name, file_name, - content_settings, timeout=None): - ''' - Sets system properties on the file. If one property is set for the - content_settings, all properties will be overriden. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param ~azure.storage.file.models.ContentSettings content_settings: - ContentSettings object used to set the file properties. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('content_settings', content_settings) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - request.headers = content_settings._to_headers() - - self._perform_request(request) - - def get_file_metadata(self, share_name, directory_name, file_name, timeout=None, snapshot=None): - ''' - Returns all user-defined metadata for the specified file. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param int timeout: - The timeout parameter is expressed in seconds. - :param str snapshot: - A string that represents the snapshot version, if applicable. - :return: - A dictionary representing the file metadata name, value pairs. - :rtype: dict(str, str) - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - 'sharesnapshot': _to_str(snapshot), - } - - return self._perform_request(request, _parse_metadata) - - def set_file_metadata(self, share_name, directory_name, - file_name, metadata=None, timeout=None): - ''' - Sets user-defined metadata for the specified file as one or more - name-value pairs. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param metadata: - Dict containing name and value pairs. Each call to this operation - replaces all existing metadata attached to the file. To remove all - metadata from the file, call this operation with no metadata headers. - :type metadata: dict(str, str) - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - _add_metadata_headers(metadata, request) - - self._perform_request(request) - - def copy_file(self, share_name, directory_name, file_name, copy_source, - metadata=None, timeout=None): - ''' - Copies a file asynchronously. This operation returns a copy operation - properties object, including a copy ID you can use to check or abort the - copy operation. The File service copies files on a best-effort basis. - - If the destination file exists, it will be overwritten. The destination - file cannot be modified while the copy operation is in progress. - - :param str share_name: - Name of the destination share. The share must exist. - :param str directory_name: - Name of the destination directory. The directory must exist. - :param str file_name: - Name of the destination file. If the destination file exists, it will - be overwritten. Otherwise, it will be created. - :param str copy_source: - A URL of up to 2 KB in length that specifies an Azure file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.file.core.windows.net/myshare/mydir/myfile - https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken - :param metadata: - Name-value pairs associated with the file as metadata. If no name-value - pairs are specified, the operation will copy the metadata from the - source blob or file to the destination file. If one or more name-value - pairs are specified, the destination file is created with the specified - metadata, and the metadata is not copied from the source blob or file. - :type metadata: dict(str, str). - :param int timeout: - The timeout parameter is expressed in seconds. - :return: Copy operation properties such as status, source, and ID. - :rtype: :class:`~azure.storage.file.models.CopyProperties` - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('copy_source', copy_source) - - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = {'timeout': _int_to_str(timeout)} - request.headers = { - 'x-ms-copy-source': _to_str(copy_source), - } - _add_metadata_headers(metadata, request) - - return self._perform_request(request, _parse_properties, [FileProperties]).copy - - def abort_copy_file(self, share_name, directory_name, file_name, copy_id, timeout=None): - ''' - Aborts a pending copy_file operation, and leaves a destination file - with zero length and full metadata. - - :param str share_name: - Name of destination share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of destination file. - :param str copy_id: - Copy identifier provided in the copy.id of the original - copy_file operation. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('copy_id', copy_id) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { - 'comp': 'copy', - 'copyid': _to_str(copy_id), - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-copy-action': 'abort', - } - - self._perform_request(request) - - def delete_file(self, share_name, directory_name, file_name, timeout=None): - ''' - Marks the specified file for deletion. The file is later - deleted during garbage collection. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - request = HTTPRequest() - request.method = 'DELETE' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = {'timeout': _int_to_str(timeout)} - - self._perform_request(request) - - def create_file(self, share_name, directory_name, file_name, - content_length, content_settings=None, metadata=None, - timeout=None): - ''' - Creates a new file. - - See create_file_from_* for high level functions that handle the - creation and upload of large files with automatic chunking and - progress notifications. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of file to create or update. - :param int content_length: - Length of the file in bytes. - :param ~azure.storage.file.models.ContentSettings content_settings: - ContentSettings object used to set file properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('content_length', content_length) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = {'timeout': _int_to_str(timeout)} - request.headers = { - 'x-ms-content-length': _to_str(content_length), - 'x-ms-type': 'file' - } - _add_metadata_headers(metadata, request) - if content_settings is not None: - request.headers.update(content_settings._to_headers()) - - self._perform_request(request) - - def create_file_from_path(self, share_name, directory_name, file_name, - local_file_path, content_settings=None, - metadata=None, validate_content=False, progress_callback=None, - max_connections=2, timeout=None): - ''' - Creates a new azure file from a local file path, or updates the content of an - existing file, with automatic chunking and progress notifications. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of file to create or update. - :param str local_file_path: - Path of the local file to upload as the file content. - :param ~azure.storage.file.models.ContentSettings content_settings: - ContentSettings object used for setting file properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :param bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far and total is the - size of the file, or None if the total size is unknown. - :type progress_callback: func(current, total) - :param int max_connections: - Maximum number of parallel connections to use. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('local_file_path', local_file_path) - - count = path.getsize(local_file_path) - with open(local_file_path, 'rb') as stream: - self.create_file_from_stream( - share_name, directory_name, file_name, stream, - count, content_settings, metadata, validate_content, progress_callback, - max_connections, timeout) - - def create_file_from_text(self, share_name, directory_name, file_name, - text, encoding='utf-8', content_settings=None, - metadata=None, validate_content=False, timeout=None): - ''' - Creates a new file from str/unicode, or updates the content of an - existing file, with automatic chunking and progress notifications. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of file to create or update. - :param str text: - Text to upload to the file. - :param str encoding: - Python encoding to use to convert the text to bytes. - :param ~azure.storage.file.models.ContentSettings content_settings: - ContentSettings object used to set file properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :param bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('text', text) - - if not isinstance(text, bytes): - _validate_not_none('encoding', encoding) - text = text.encode(encoding) - - self.create_file_from_bytes( - share_name, directory_name, file_name, text, count=len(text), - content_settings=content_settings, metadata=metadata, - validate_content=validate_content, timeout=timeout) - - def create_file_from_bytes( - self, share_name, directory_name, file_name, file, - index=0, count=None, content_settings=None, metadata=None, - validate_content=False, progress_callback=None, max_connections=2, - timeout=None): - ''' - Creates a new file from an array of bytes, or updates the content - of an existing file, with automatic chunking and progress - notifications. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of file to create or update. - :param str file: - Content of file as an array of bytes. - :param int index: - Start index in the array of bytes. - :param int count: - Number of bytes to upload. Set to None or negative value to upload - all bytes starting from index. - :param ~azure.storage.file.models.ContentSettings content_settings: - ContentSettings object used to set file properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :param bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far and total is the - size of the file, or None if the total size is unknown. - :type progress_callback: func(current, total) - :param int max_connections: - Maximum number of parallel connections to use. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('file', file) - _validate_type_bytes('file', file) - - if index < 0: - raise TypeError(_ERROR_VALUE_NEGATIVE.format('index')) - - if count is None or count < 0: - count = len(file) - index - - stream = BytesIO(file) - stream.seek(index) - - self.create_file_from_stream( - share_name, directory_name, file_name, stream, count, - content_settings, metadata, validate_content, progress_callback, - max_connections, timeout) - - def create_file_from_stream( - self, share_name, directory_name, file_name, stream, count, - content_settings=None, metadata=None, validate_content=False, - progress_callback=None, max_connections=2, timeout=None): - ''' - Creates a new file from a file/stream, or updates the content of an - existing file, with automatic chunking and progress notifications. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of file to create or update. - :param io.IOBase stream: - Opened file/stream to upload as the file content. - :param int count: - Number of bytes to read from the stream. This is required, a - file cannot be created if the count is unknown. - :param ~azure.storage.file.models.ContentSettings content_settings: - ContentSettings object used to set file properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :param bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far and total is the - size of the file, or None if the total size is unknown. - :type progress_callback: func(current, total) - :param int max_connections: - Maximum number of parallel connections to use. Note that parallel upload - requires the stream to be seekable. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('stream', stream) - _validate_not_none('count', count) - - if count < 0: - raise TypeError(_ERROR_VALUE_NEGATIVE.format('count')) - - self.create_file( - share_name, - directory_name, - file_name, - count, - content_settings, - metadata, - timeout - ) - - _upload_file_chunks( - self, - share_name, - directory_name, - file_name, - count, - self.MAX_RANGE_SIZE, - stream, - max_connections, - progress_callback, - validate_content, - timeout - ) - - def _get_file(self, share_name, directory_name, file_name, - start_range=None, end_range=None, validate_content=False, - timeout=None, _context=None, snapshot=None): - ''' - Downloads a file's content, metadata, and properties. You can specify a - range if you don't need to download the file in its entirety. If no range - is specified, the full file will be downloaded. - - See get_file_to_* for high level functions that handle the download - of large files with automatic chunking and progress notifications. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param int start_range: - Start of byte range to use for downloading a section of the file. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int end_range: - End of byte range to use for downloading a section of the file. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param bool validate_content: - When this is set to True and specified together with the Range header, - the service returns the MD5 hash for the range, as long as the range - is less than or equal to 4 MB in size. - :param int timeout: - The timeout parameter is expressed in seconds. - :param str snapshot: - A string that represents the snapshot version, if applicable. - :return: A File with content, properties, and metadata. - :rtype: :class:`~azure.storage.file.models.File` - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { 'timeout': _int_to_str(timeout), 'sharesnapshot': _to_str(snapshot)} - _validate_and_format_range_headers( - request, - start_range, - end_range, - start_range_required=False, - end_range_required=False, - check_content_md5=validate_content) - - return self._perform_request(request, _parse_file, - [file_name, validate_content], - operation_context=_context) - - def get_file_to_path(self, share_name, directory_name, file_name, file_path, - open_mode='wb', start_range=None, end_range=None, - validate_content=False, progress_callback=None, - max_connections=2, timeout=None, snapshot=None): - ''' - Downloads a file to a file path, with automatic chunking and progress - notifications. Returns an instance of File with properties and metadata. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param str file_path: - Path of file to write to. - :param str open_mode: - Mode to use when opening the file. Note that specifying append only - open_mode prevents parallel download. So, max_connections must be set - to 1 if this open_mode is used. - :param int start_range: - Start of byte range to use for downloading a section of the file. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int end_range: - End of byte range to use for downloading a section of the file. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param bool validate_content: - If set to true, validates an MD5 hash for each retrieved portion of - the file. This is primarily valuable for detecting bitflips on the wire - if using http instead of https as https (the default) will already - validate. Note that the service will only return transactional MD5s - for chunks 4MB or less so the first get request will be of size - self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If - self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be - thrown. As computing the MD5 takes processing time and more requests - will need to be done due to the reduced chunk size there may be some - increase in latency. - :param progress_callback: - Callback for progress with signature function(current, total) - where current is the number of bytes transfered so far, and total is - the size of the file if known. - :type progress_callback: func(current, total) - :param int max_connections: - If set to 2 or greater, an initial get will be done for the first - self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file, - the method returns at this point. If it is not, it will download the - remaining data parallel using the number of threads equal to - max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. - If set to 1, a single large get request will be done. This is not - generally recommended but available if very few threads should be - used, network requests are very expensive, or a non-seekable stream - prevents parallel download. This may also be valuable if the file is - being concurrently modified to enforce atomicity or if many files are - expected to be empty as an extra request is required for empty files - if max_connections is greater than 1. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :param str snapshot: - A string that represents the snapshot version, if applicable. - :return: A File with properties and metadata. - :rtype: :class:`~azure.storage.file.models.File` - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('file_path', file_path) - _validate_not_none('open_mode', open_mode) - - if max_connections > 1 and 'a' in open_mode: - raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE) - - with open(file_path, open_mode) as stream: - file = self.get_file_to_stream( - share_name, directory_name, file_name, stream, - start_range, end_range, validate_content, - progress_callback, max_connections, timeout, snapshot) - - return file - - def get_file_to_stream( - self, share_name, directory_name, file_name, stream, - start_range=None, end_range=None, validate_content=False, - progress_callback=None, max_connections=2, timeout=None, snapshot=None): - ''' - Downloads a file to a stream, with automatic chunking and progress - notifications. Returns an instance of :class:`~azure.storage.file.models.File` with properties - and metadata. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param io.IOBase stream: - Opened file/stream to write to. - :param int start_range: - Start of byte range to use for downloading a section of the file. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int end_range: - End of byte range to use for downloading a section of the file. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param bool validate_content: - If set to true, validates an MD5 hash for each retrieved portion of - the file. This is primarily valuable for detecting bitflips on the wire - if using http instead of https as https (the default) will already - validate. Note that the service will only return transactional MD5s - for chunks 4MB or less so the first get request will be of size - self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If - self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be - thrown. As computing the MD5 takes processing time and more requests - will need to be done due to the reduced chunk size there may be some - increase in latency. - :param progress_callback: - Callback for progress with signature function(current, total) - where current is the number of bytes transfered so far, and total is - the size of the file if known. - :type progress_callback: func(current, total) - :param int max_connections: - If set to 2 or greater, an initial get will be done for the first - self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file, - the method returns at this point. If it is not, it will download the - remaining data parallel using the number of threads equal to - max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. - If set to 1, a single large get request will be done. This is not - generally recommended but available if very few threads should be - used, network requests are very expensive, or a non-seekable stream - prevents parallel download. This may also be valuable if the file is - being concurrently modified to enforce atomicity or if many files are - expected to be empty as an extra request is required for empty files - if max_connections is greater than 1. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :param str snapshot: - A string that represents the snapshot version, if applicable. - :return: A File with properties and metadata. - :rtype: :class:`~azure.storage.file.models.File` - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('stream', stream) - - # If the user explicitly sets max_connections to 1, do a single shot download - if max_connections == 1: - file = self._get_file(share_name, - directory_name, - file_name, - start_range=start_range, - end_range=end_range, - validate_content=validate_content, - timeout=timeout, - snapshot=snapshot) - - # Set the download size - download_size = file.properties.content_length - - # If max_connections is greater than 1, do the first get to establish the - # size of the file and get the first segment of data - else: - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE) - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - first_get_size = self.MAX_SINGLE_GET_SIZE if not validate_content else self.MAX_CHUNK_GET_SIZE - - initial_request_start = start_range if start_range is not None else 0 - - if end_range is not None and end_range - start_range < first_get_size: - initial_request_end = end_range - else: - initial_request_end = initial_request_start + first_get_size - 1 - - # Send a context object to make sure we always retry to the initial location - operation_context = _OperationContext(location_lock=True) - try: - file = self._get_file(share_name, - directory_name, - file_name, - start_range=initial_request_start, - end_range=initial_request_end, - validate_content=validate_content, - timeout=timeout, - _context=operation_context, - snapshot=snapshot) - - # Parse the total file size and adjust the download size if ranges - # were specified - file_size = _parse_length_from_content_range(file.properties.content_range) - if end_range is not None: - # Use the end_range unless it is over the end of the file - download_size = min(file_size, end_range - start_range + 1) - elif start_range is not None: - download_size = file_size - start_range - else: - download_size = file_size - except AzureHttpError as ex: - if start_range is None and ex.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - file = self._get_file(share_name, - directory_name, - file_name, - validate_content=validate_content, - timeout=timeout, - _context=operation_context, - snapshot=snapshot) - - # Set the download size to empty - download_size = 0 - else: - raise ex - - # Mark the first progress chunk. If the file is small or this is a single - # shot download, this is the only call - if progress_callback: - progress_callback(file.properties.content_length, download_size) - - # Write the content to the user stream - # Clear file content since output has been written to user stream - if file.content is not None: - stream.write(file.content) - file.content = None - - # If the file is small or single shot download was used, the download is - # complete at this point. If file size is large, use parallel download. - if file.properties.content_length != download_size: - # At this point would like to lock on something like the etag so that - # if the file is modified, we dont get a corrupted download. However, - # this feature is not yet available on the file service. - - end_file = file_size - if end_range is not None: - # Use the end_range unless it is over the end of the file - end_file = min(file_size, end_range + 1) - - _download_file_chunks( - self, - share_name, - directory_name, - file_name, - download_size, - self.MAX_CHUNK_GET_SIZE, - first_get_size, - initial_request_end + 1, # start where the first download ended - end_file, - stream, - max_connections, - progress_callback, - validate_content, - timeout, - operation_context, - snapshot - ) - - # Set the content length to the download size instead of the size of - # the last range - file.properties.content_length = download_size - - # Overwrite the content range to the user requested range - file.properties.content_range = 'bytes {0}-{1}/{2}'.format(start_range, end_range, file_size) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - file.properties.content_md5 = None - - return file - - def get_file_to_bytes(self, share_name, directory_name, file_name, - start_range=None, end_range=None, validate_content=False, - progress_callback=None, max_connections=2, timeout=None, snapshot=None): - ''' - Downloads a file as an array of bytes, with automatic chunking and - progress notifications. Returns an instance of :class:`~azure.storage.file.models.File` with - properties, metadata, and content. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param int start_range: - Start of byte range to use for downloading a section of the file. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int end_range: - End of byte range to use for downloading a section of the file. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param bool validate_content: - If set to true, validates an MD5 hash for each retrieved portion of - the file. This is primarily valuable for detecting bitflips on the wire - if using http instead of https as https (the default) will already - validate. Note that the service will only return transactional MD5s - for chunks 4MB or less so the first get request will be of size - self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If - self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be - thrown. As computing the MD5 takes processing time and more requests - will need to be done due to the reduced chunk size there may be some - increase in latency. - :param progress_callback: - Callback for progress with signature function(current, total) - where current is the number of bytes transfered so far, and total is - the size of the file if known. - :type progress_callback: func(current, total) - :param int max_connections: - If set to 2 or greater, an initial get will be done for the first - self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file, - the method returns at this point. If it is not, it will download the - remaining data parallel using the number of threads equal to - max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. - If set to 1, a single large get request will be done. This is not - generally recommended but available if very few threads should be - used, network requests are very expensive, or a non-seekable stream - prevents parallel download. This may also be valuable if the file is - being concurrently modified to enforce atomicity or if many files are - expected to be empty as an extra request is required for empty files - if max_connections is greater than 1. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :param str snapshot: - A string that represents the snapshot version, if applicable. - :return: A File with properties, content, and metadata. - :rtype: :class:`~azure.storage.file.models.File` - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - - stream = BytesIO() - file = self.get_file_to_stream( - share_name, - directory_name, - file_name, - stream, - start_range, - end_range, - validate_content, - progress_callback, - max_connections, - timeout, - snapshot) - - file.content = stream.getvalue() - return file - - def get_file_to_text( - self, share_name, directory_name, file_name, encoding='utf-8', - start_range=None, end_range=None, validate_content=False, - progress_callback=None, max_connections=2, timeout=None, snapshot=None): - ''' - Downloads a file as unicode text, with automatic chunking and progress - notifications. Returns an instance of :class:`~azure.storage.file.models.File` with properties, - metadata, and content. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param str encoding: - Python encoding to use when decoding the file data. - :param int start_range: - Start of byte range to use for downloading a section of the file. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int end_range: - End of byte range to use for downloading a section of the file. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param bool validate_content: - If set to true, validates an MD5 hash for each retrieved portion of - the file. This is primarily valuable for detecting bitflips on the wire - if using http instead of https as https (the default) will already - validate. Note that the service will only return transactional MD5s - for chunks 4MB or less so the first get request will be of size - self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If - self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be - thrown. As computing the MD5 takes processing time and more requests - will need to be done due to the reduced chunk size there may be some - increase in latency. - :param progress_callback: - Callback for progress with signature function(current, total) - where current is the number of bytes transfered so far, and total is - the size of the file if known. - :type progress_callback: func(current, total) - :param int max_connections: - If set to 2 or greater, an initial get will be done for the first - self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file, - the method returns at this point. If it is not, it will download the - remaining data parallel using the number of threads equal to - max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. - If set to 1, a single large get request will be done. This is not - generally recommended but available if very few threads should be - used, network requests are very expensive, or a non-seekable stream - prevents parallel download. This may also be valuable if the file is - being concurrently modified to enforce atomicity or if many files are - expected to be empty as an extra request is required for empty files - if max_connections is greater than 1. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :param str snapshot: - A string that represents the snapshot version, if applicable. - :return: A File with properties, content, and metadata. - :rtype: :class:`~azure.storage.file.models.File` - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('encoding', encoding) - - file = self.get_file_to_bytes( - share_name, - directory_name, - file_name, - start_range, - end_range, - validate_content, - progress_callback, - max_connections, - timeout, - snapshot) - - file.content = file.content.decode(encoding) - return file - - def update_range(self, share_name, directory_name, file_name, data, - start_range, end_range, validate_content=False, timeout=None): - ''' - Writes the bytes specified by the request body into the specified range. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param bytes data: - Content of the range. - :param int start_range: - Start of byte range to use for updating a section of the file. - The range can be up to 4 MB in size. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int end_range: - End of byte range to use for updating a section of the file. - The range can be up to 4 MB in size. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - file. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('data', data) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { - 'comp': 'range', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-write': 'update', - } - _validate_and_format_range_headers( - request, start_range, end_range) - request.body = _get_data_bytes_only('data', data) - - if validate_content: - computed_md5 = _get_content_md5(request.body) - request.headers['Content-MD5'] = _to_str(computed_md5) - - self._perform_request(request) - - def clear_range(self, share_name, directory_name, file_name, start_range, - end_range, timeout=None): - ''' - Clears the specified range and releases the space used in storage for - that range. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param int start_range: - Start of byte range to use for clearing a section of the file. - The range can be up to 4 MB in size. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int end_range: - End of byte range to use for clearing a section of the file. - The range can be up to 4 MB in size. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { - 'comp': 'range', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'Content-Length': '0', - 'x-ms-write': 'clear', - } - _validate_and_format_range_headers( - request, start_range, end_range) - - self._perform_request(request) - - def list_ranges(self, share_name, directory_name, file_name, - start_range=None, end_range=None, timeout=None, snapshot=None): - ''' - Retrieves the valid ranges for a file. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param int start_range: - Specifies the start offset of bytes over which to list ranges. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int end_range: - Specifies the end offset of bytes over which to list ranges. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int timeout: - The timeout parameter is expressed in seconds. - :param str snapshot: - A string that represents the snapshot version, if applicable. - :returns: a list of valid ranges - :rtype: a list of :class:`~azure.storage.file.models.FileRange` - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { - 'comp': 'rangelist', - 'timeout': _int_to_str(timeout), - 'sharesnapshot': _to_str(snapshot), - } - if start_range is not None: - _validate_and_format_range_headers( - request, - start_range, - end_range, - start_range_required=False, - end_range_required=False) - - return self._perform_request(request, _convert_xml_to_ranges) diff --git a/azure/multiapi/storage/v2017_07_29/file/models.py b/azure/multiapi/storage/v2017_07_29/file/models.py deleted file mode 100644 index 0811371..0000000 --- a/azure/multiapi/storage/v2017_07_29/file/models.py +++ /dev/null @@ -1,407 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from ..common._common_conversion import _to_str - - -class Share(object): - ''' - File share class. - - :ivar str name: - The name of the share. - :ivar ShareProperties properties: - System properties for the share. - :ivar metadata: - A dict containing name-value pairs associated with the share as metadata. - This var is set to None unless the include=metadata param was included - for the list shares operation. If this parameter was specified but the - share has no metadata, metadata will be set to an empty dictionary. - :vartype metadata: dict(str, str) - :ivar str snapshot: - A DateTime value that uniquely identifies the snapshot. The value of - this header indicates the snapshot version, and may be used in - subsequent requests to access the snapshot. - ''' - - def __init__(self, name=None, props=None, metadata=None, snapshot=None): - self.name = name - self.properties = props or ShareProperties() - self.metadata = metadata - self.snapshot = snapshot - - -class ShareProperties(object): - ''' - File share's properties class. - - :ivar datetime last_modified: - A datetime object representing the last time the share was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar int quote: - Returns the current share quota in GB. - ''' - - def __init__(self): - self.last_modified = None - self.etag = None - self.quota = None - - -class Directory(object): - ''' - Directory class. - - :ivar str name: - The name of the directory. - :ivar DirectoryProperties properties: - System properties for the directory. - :ivar metadata: - A dict containing name-value pairs associated with the directory as metadata. - This var is set to None unless the include=metadata param was included - for the list directory operation. If this parameter was specified but the - directory has no metadata, metadata will be set to an empty dictionary. - :vartype metadata: dict(str, str) - ''' - - def __init__(self, name=None, props=None, metadata=None): - self.name = name - self.properties = props or DirectoryProperties() - self.metadata = metadata - - -class DirectoryProperties(object): - ''' - File directory's properties class. - - :ivar datetime last_modified: - A datetime object representing the last time the directory was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar bool server_encrypted: - Set to true if the directory metadata is encrypted on the server. - ''' - - def __init__(self): - self.last_modified = None - self.etag = None - self.server_encrypted = None - - -class File(object): - ''' - File class. - - :ivar str name: - The name of the file. - :ivar content: - File content. - :vartype content: str or bytes - :ivar FileProperties properties: - System properties for the file. - :ivar metadata: - A dict containing name-value pairs associated with the file as metadata. - This var is set to None unless the include=metadata param was included - for the list file operation. If this parameter was specified but the - file has no metadata, metadata will be set to an empty dictionary. - :vartype metadata: dict(str, str) - ''' - - def __init__(self, name=None, content=None, props=None, metadata=None): - self.name = name - self.content = content - self.properties = props or FileProperties() - self.metadata = metadata - - -class FileProperties(object): - ''' - File Properties. - - :ivar datetime last_modified: - A datetime object representing the last time the file was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar int content_length: - The length of the content returned. If the entire blob was requested, - the length of blob in bytes. If a subset of the blob was requested, the - length of the returned subset. - :ivar str content_range: - Indicates the range of bytes returned in the event that the client - requested a subset of the blob. - :ivar ~azure.storage.file.models.ContentSettings content_settings: - Stores all the content settings for the file. - :ivar ~azure.storage.file.models.CopyProperties copy: - Stores all the copy properties for the file. - ivar bool server_encrypted: - Set to true if the file data and application metadata are completely encrypted. - ''' - - def __init__(self): - self.last_modified = None - self.etag = None - self.content_length = None - self.content_range = None - self.content_settings = ContentSettings() - self.copy = CopyProperties() - self.server_encrypted = None - - -class ContentSettings(object): - ''' - Used to store the content settings of a file. - - :ivar str content_type: - The content type specified for the file. If no content type was - specified, the default content type is application/octet-stream. - :ivar str content_encoding: - If content_encoding has previously been set - for the file, that value is stored. - :ivar str content_language: - If content_language has previously been set - for the file, that value is stored. - :ivar str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the file, that value is stored. - :ivar str cache_control: - If cache_control has previously been set for - the file, that value is stored. - :ivar str content_md5: - If the content_md5 has been set for the file, this response - header is stored so that the client can check for message content - integrity. - ''' - - def __init__( - self, content_type=None, content_encoding=None, - content_language=None, content_disposition=None, - cache_control=None, content_md5=None): - self.content_type = content_type - self.content_encoding = content_encoding - self.content_language = content_language - self.content_disposition = content_disposition - self.cache_control = cache_control - self.content_md5 = content_md5 - - def _to_headers(self): - return { - 'x-ms-cache-control': _to_str(self.cache_control), - 'x-ms-content-type': _to_str(self.content_type), - 'x-ms-content-disposition': _to_str(self.content_disposition), - 'x-ms-content-md5': _to_str(self.content_md5), - 'x-ms-content-encoding': _to_str(self.content_encoding), - 'x-ms-content-language': _to_str(self.content_language), - } - - -class CopyProperties(object): - ''' - File Copy Properties. - - :ivar str id: - String identifier for the last attempted Copy File operation where this file - was the destination file. This header does not appear if this file has never - been the destination in a Copy File operation, or if this file has been - modified after a concluded Copy File operation using Set File Properties or - Put File. - :ivar str source: - URL up to 2 KB in length that specifies the source file used in the last attempted - Copy File operation where this file was the destination file. This header does not - appear if this file has never been the destination in a Copy File operation, or if - this file has been modified after a concluded Copy File operation using - Set File Properties or Put File. - :ivar str status: - State of the copy operation identified by Copy ID, with these values: - success: - Copy completed successfully. - pending: - Copy is in progress. Check copy_status_description if intermittent, - non-fatal errors impede copy progress but don't cause failure. - aborted: - Copy was ended by Abort Copy File. - failed: - Copy failed. See copy_status_description for failure details. - :ivar str progress: - Contains the number of bytes copied and the total bytes in the source in the last - attempted Copy File operation where this file was the destination file. Can show - between 0 and Content-Length bytes copied. - :ivar datetime completion_time: - Conclusion time of the last attempted Copy File operation where this file was the - destination file. This value can specify the time of a completed, aborted, or - failed copy attempt. - :ivar str status_description: - Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal - or non-fatal copy operation failure. - ''' - - def __init__(self): - self.id = None - self.source = None - self.status = None - self.progress = None - self.completion_time = None - self.status_description = None - - -class FileRange(object): - ''' - File Range. - - :ivar int start: - Byte index for start of file range. - :ivar int end: - Byte index for end of file range. - ''' - - def __init__(self, start=None, end=None): - self.start = start - self.end = end - - -class DeleteSnapshot(object): - ''' - Required if the Share has associated snapshots. Specifies how to handle the snapshots. - ''' - - Include = 'include' - ''' - Delete the share and all of its snapshots. - ''' - - -class FilePermissions(object): - ''' - FilePermissions class to be used with - :func:`~azure.storage.file.fileservice.FileService.generate_file_shared_access_signature` API. - - :ivar FilePermissions FilePermissions.CREATE: - Create a new file or copy a file to a new file. - :ivar FilePermissions FilePermissions.DELETE: - Delete the file. - :ivar FilePermissions FilePermissions.READ: - Read the content, properties, metadata. Use the file as the source of a copy - operation. - :ivar FilePermissions FilePermissions.WRITE: - Create or write content, properties, metadata. Resize the file. Use the file - as the destination of a copy operation within the same account. - ''' - - def __init__(self, read=False, create=False, write=False, delete=False, - _str=None): - ''' - :param bool read: - Read the content, properties, metadata. Use the file as the source of a copy - operation. - :param bool create: - Create a new file or copy a file to a new file. - :param bool write: - Create or write content, properties, metadata. Resize the file. Use the file - as the destination of a copy operation within the same account. - :param bool delete: - Delete the file. - :param str _str: - A string representing the permissions. - ''' - - if not _str: - _str = '' - self.read = read or ('r' in _str) - self.create = create or ('c' in _str) - self.write = write or ('w' in _str) - self.delete = delete or ('d' in _str) - - def __or__(self, other): - return FilePermissions(_str=str(self) + str(other)) - - def __add__(self, other): - return FilePermissions(_str=str(self) + str(other)) - - def __str__(self): - return (('r' if self.read else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '')) - - -FilePermissions.CREATE = FilePermissions(create=True) -FilePermissions.DELETE = FilePermissions(delete=True) -FilePermissions.READ = FilePermissions(read=True) -FilePermissions.WRITE = FilePermissions(write=True) - - -class SharePermissions(object): - ''' - SharePermissions class to be used with `azure.storage.file.FileService.generate_share_shared_access_signature` - method and for the AccessPolicies used with `azure.storage.file.FileService.set_share_acl`. - - :ivar SharePermissions FilePermissions.DELETE: - Delete any file in the share. - Note: You cannot grant permissions to delete a share with a service SAS. Use - an account SAS instead. - :ivar SharePermissions FilePermissions.LIST: - List files and directories in the share. - :ivar SharePermissions FilePermissions.READ: - Read the content, properties or metadata of any file in the share. Use any - file in the share as the source of a copy operation. - :ivar SharePermissions FilePermissions.WRITE: - For any file in the share, create or write content, properties or metadata. - Resize the file. Use the file as the destination of a copy operation within - the same account. - Note: You cannot grant permissions to read or write share properties or - metadata with a service SAS. Use an account SAS instead. - ''' - - def __init__(self, read=False, write=False, delete=False, list=False, - _str=None): - ''' - :param bool read: - Read the content, properties or metadata of any file in the share. Use any - file in the share as the source of a copy operation. - :param bool write: - For any file in the share, create or write content, properties or metadata. - Resize the file. Use the file as the destination of a copy operation within - the same account. - Note: You cannot grant permissions to read or write share properties or - metadata with a service SAS. Use an account SAS instead. - :param bool delete: - Delete any file in the share. - Note: You cannot grant permissions to delete a share with a service SAS. Use - an account SAS instead. - :param bool list: - List files and directories in the share. - :param str _str: - A string representing the permissions - ''' - - if not _str: - _str = '' - self.read = read or ('r' in _str) - self.write = write or ('w' in _str) - self.delete = delete or ('d' in _str) - self.list = list or ('l' in _str) - - def __or__(self, other): - return SharePermissions(_str=str(self) + str(other)) - - def __add__(self, other): - return SharePermissions(_str=str(self) + str(other)) - - def __str__(self): - return (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('l' if self.list else '')) - - -SharePermissions.DELETE = SharePermissions(delete=True) -SharePermissions.LIST = SharePermissions(list=True) -SharePermissions.READ = SharePermissions(read=True) -SharePermissions.WRITE = SharePermissions(write=True) diff --git a/azure/multiapi/storage/v2017_07_29/file/sharedaccesssignature.py b/azure/multiapi/storage/v2017_07_29/file/sharedaccesssignature.py deleted file mode 100644 index c9ac021..0000000 --- a/azure/multiapi/storage/v2017_07_29/file/sharedaccesssignature.py +++ /dev/null @@ -1,188 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ..common.sharedaccesssignature import ( - SharedAccessSignature, - _SharedAccessHelper, -) -from ..common._common_conversion import ( - _to_str, -) -from ._constants import X_MS_VERSION - - -class FileSharedAccessSignature(SharedAccessSignature): - ''' - Provides a factory for creating file and share access - signature tokens with a common account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - ''' - super(FileSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) - - def generate_file(self, share_name, directory_name=None, file_name=None, - permission=None, expiry=None, start=None, id=None, - ip=None, protocol=None, cache_control=None, - content_disposition=None, content_encoding=None, - content_language=None, content_type=None): - ''' - Generates a shared access signature for the file. - Use the returned signature with the sas_token parameter of FileService. - - :param str share_name: - Name of share. - :param str directory_name: - Name of directory. SAS tokens cannot be created for directories, so - this parameter should only be present if file_name is provided. - :param str file_name: - Name of file. - :param FilePermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, create, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_file_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - resource_path = share_name - if directory_name is not None: - resource_path += '/' + _to_str(directory_name) - resource_path += '/' + _to_str(file_name) - - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(id) - sas.add_resource('f') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_resource_signature(self.account_name, self.account_key, 'file', resource_path) - - return sas.get_token() - - def generate_share(self, share_name, permission=None, expiry=None, - start=None, id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None): - ''' - Generates a shared access signature for the share. - Use the returned signature with the sas_token parameter of FileService. - - :param str share_name: - Name of share. - :param SharePermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, create, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_file_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(id) - sas.add_resource('s') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_resource_signature(self.account_name, self.account_key, 'file', share_name) - - return sas.get_token() diff --git a/azure/multiapi/storage/v2017_07_29/queue/__init__.py b/azure/multiapi/storage/v2017_07_29/queue/__init__.py deleted file mode 100644 index 0c64f78..0000000 --- a/azure/multiapi/storage/v2017_07_29/queue/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from .models import ( - Queue, - QueueMessage, - QueuePermissions, - QueueMessageFormat, -) - -from .queueservice import QueueService diff --git a/azure/multiapi/storage/v2017_07_29/queue/_constants.py b/azure/multiapi/storage/v2017_07_29/queue/_constants.py deleted file mode 100644 index 51dfcab..0000000 --- a/azure/multiapi/storage/v2017_07_29/queue/_constants.py +++ /dev/null @@ -1,11 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -__author__ = 'Microsoft Corp. ' -__version__ = '1.1.0' - -# x-ms-version for storage service. -X_MS_VERSION = '2017-07-29' diff --git a/azure/multiapi/storage/v2017_07_29/queue/_deserialization.py b/azure/multiapi/storage/v2017_07_29/queue/_deserialization.py deleted file mode 100644 index d0ef297..0000000 --- a/azure/multiapi/storage/v2017_07_29/queue/_deserialization.py +++ /dev/null @@ -1,150 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from dateutil import parser - -try: - from xml.etree import cElementTree as ETree -except ImportError: - from xml.etree import ElementTree as ETree - -from .models import ( - Queue, - QueueMessage, -) -from ..common.models import ( - _list, -) -from ..common._deserialization import ( - _to_int, - _parse_metadata, -) -from ._encryption import ( - _decrypt_queue_message, -) - - -def _parse_metadata_and_message_count(response): - ''' - Extracts approximate messages count header. - ''' - metadata = _parse_metadata(response) - metadata.approximate_message_count = _to_int(response.headers.get('x-ms-approximate-messages-count')) - - return metadata - - -def _parse_queue_message_from_headers(response): - ''' - Extracts pop receipt and time next visible from headers. - ''' - message = QueueMessage() - message.pop_receipt = response.headers.get('x-ms-popreceipt') - message.time_next_visible = parser.parse(response.headers.get('x-ms-time-next-visible')) - - return message - - -def _convert_xml_to_queues(response): - ''' - - - string-value - string-value - int-value - - - string-value - - value - - - - - ''' - if response is None or response.body is None: - return None - - queues = _list() - list_element = ETree.fromstring(response.body) - - # Set next marker - next_marker = list_element.findtext('NextMarker') or None - setattr(queues, 'next_marker', next_marker) - - queues_element = list_element.find('Queues') - - for queue_element in queues_element.findall('Queue'): - # Name element - queue = Queue() - queue.name = queue_element.findtext('Name') - - # Metadata - metadata_root_element = queue_element.find('Metadata') - if metadata_root_element is not None: - queue.metadata = dict() - for metadata_element in metadata_root_element: - queue.metadata[metadata_element.tag] = metadata_element.text - - # Add queue to list - queues.append(queue) - - return queues - - -def _convert_xml_to_queue_messages(response, decode_function, require_encryption, key_encryption_key, resolver, - content=None): - ''' - - - - string-message-id - insertion-time - expiration-time - opaque-string-receipt-data - time-next-visible - integer - message-body - - - ''' - if response is None or response.body is None: - return None - - messages = list() - list_element = ETree.fromstring(response.body) - - for message_element in list_element.findall('QueueMessage'): - message = QueueMessage() - - message.id = message_element.findtext('MessageId') - - dequeue_count = message_element.findtext('DequeueCount') - if dequeue_count is not None: - message.dequeue_count = _to_int(dequeue_count) - - # content is not returned for put_message - if content is not None: - message.content = content - else: - message.content = message_element.findtext('MessageText') - if (key_encryption_key is not None) or (resolver is not None): - message.content = _decrypt_queue_message(message.content, require_encryption, - key_encryption_key, resolver) - message.content = decode_function(message.content) - - message.insertion_time = parser.parse(message_element.findtext('InsertionTime')) - message.expiration_time = parser.parse(message_element.findtext('ExpirationTime')) - - message.pop_receipt = message_element.findtext('PopReceipt') - - time_next_visible = message_element.find('TimeNextVisible') - if time_next_visible is not None: - message.time_next_visible = parser.parse(time_next_visible.text) - - # Add message to list - messages.append(message) - - return messages diff --git a/azure/multiapi/storage/v2017_07_29/queue/_encryption.py b/azure/multiapi/storage/v2017_07_29/queue/_encryption.py deleted file mode 100644 index 75979f3..0000000 --- a/azure/multiapi/storage/v2017_07_29/queue/_encryption.py +++ /dev/null @@ -1,159 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os -from json import ( - dumps, - loads, -) - -from azure.common import ( - AzureException, -) -from cryptography.hazmat.primitives.padding import PKCS7 - -from ..common._common_conversion import ( - _encode_base64, - _decode_base64_to_bytes -) -from ..common._encryption import ( - _generate_encryption_data_dict, - _dict_to_encryption_data, - _generate_AES_CBC_cipher, - _validate_and_unwrap_cek, - _EncryptionAlgorithm, -) -from ..common._error import ( - _ERROR_DECRYPTION_FAILURE, - _ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM, - _validate_not_none, - _validate_key_encryption_key_wrap, -) -from ._error import ( - _ERROR_MESSAGE_NOT_ENCRYPTED -) - - -def _encrypt_queue_message(message, key_encryption_key): - ''' - Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encrypted message and the encryption metadata. - - :param object message: - The plain text messge to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A json-formatted string containing the encrypted message and the encryption metadata. - :rtype: str - ''' - - _validate_not_none('message', message) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = os.urandom(32) - initialization_vector = os.urandom(16) - - # Queue encoding functions all return unicode strings, and encryption should - # operate on binary strings. - message = message.encode('utf-8') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(message) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - - # Build the dictionary structure. - queue_message = {'EncryptedMessageContents': _encode_base64(encrypted_data), - 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector)} - - return dumps(queue_message) - - -def _decrypt_queue_message(message, require_encryption, key_encryption_key, resolver): - ''' - Returns the decrypted message contents from an EncryptedQueueMessage. - If no encryption metadata is present, will return the unaltered message. - :param str message: - The JSON formatted QueueEncryptedMessage contents with all associated metadata. - :param bool require_encryption: - If set, will enforce that the retrieved messages are encrypted and decrypt them. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid()--returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key implementing the interface defined above. - :return: The plain text message from the queue message. - :rtype: str - ''' - - try: - message = loads(message) - - encryption_data = _dict_to_encryption_data(message['EncryptionData']) - decoded_data = _decode_base64_to_bytes(message['EncryptedMessageContents']) - except (KeyError, ValueError): - # Message was not json formatted and so was not encrypted - # or the user provided a json formatted message. - if require_encryption: - raise ValueError(_ERROR_MESSAGE_NOT_ENCRYPTED) - else: - return message - try: - return _decrypt(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') - except Exception: - raise AzureException(_ERROR_DECRYPTION_FAILURE) - - -def _decrypt(message, encryption_data, key_encryption_key=None, resolver=None): - ''' - Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. - Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). Returns the original plaintex. - - :param str message: - The ciphertext to be decrypted. - :param _EncryptionData encryption_data: - The metadata associated with this ciphertext. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid()--returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key implementing the interface defined above. - :return: The decrypted plaintext. - :rtype: str - ''' - _validate_not_none('message', message) - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) - - if not (_EncryptionAlgorithm.AES_CBC_256 == encryption_data.encryption_agent.encryption_algorithm): - raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM) - - cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) - - # decrypt data - decrypted_data = message - decryptor = cipher.decryptor() - decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) - - # unpad data - unpadder = PKCS7(128).unpadder() - decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) - - return decrypted_data diff --git a/azure/multiapi/storage/v2017_07_29/queue/_error.py b/azure/multiapi/storage/v2017_07_29/queue/_error.py deleted file mode 100644 index cb15935..0000000 --- a/azure/multiapi/storage/v2017_07_29/queue/_error.py +++ /dev/null @@ -1,27 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import sys - -from ..common._error import ( - _validate_type_bytes, -) - -_ERROR_MESSAGE_SHOULD_BE_UNICODE = 'message should be of type unicode.' -_ERROR_MESSAGE_SHOULD_BE_STR = 'message should be of type str.' -_ERROR_MESSAGE_NOT_BASE64 = 'message is not a valid base64 value.' -_ERROR_MESSAGE_NOT_ENCRYPTED = 'Message was not encrypted.' - -def _validate_message_type_text(param): - if sys.version_info < (3,): - if not isinstance(param, unicode): - raise TypeError(_ERROR_MESSAGE_SHOULD_BE_UNICODE) - else: - if not isinstance(param, str): - raise TypeError(_ERROR_MESSAGE_SHOULD_BE_STR) - - -def _validate_message_type_bytes(param): - _validate_type_bytes('message', param) diff --git a/azure/multiapi/storage/v2017_07_29/queue/_serialization.py b/azure/multiapi/storage/v2017_07_29/queue/_serialization.py deleted file mode 100644 index 21569e5..0000000 --- a/azure/multiapi/storage/v2017_07_29/queue/_serialization.py +++ /dev/null @@ -1,73 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import sys - -if sys.version_info >= (3,): - from io import BytesIO -else: - try: - from cStringIO import StringIO as BytesIO - except: - from StringIO import StringIO as BytesIO - -try: - from xml.etree import cElementTree as ETree -except ImportError: - from xml.etree import ElementTree as ETree - -from ..common._common_conversion import ( - _str, -) -from ._encryption import ( - _encrypt_queue_message, -) - - -def _get_path(queue_name=None, include_messages=None, message_id=None): - ''' - Creates the path to access a queue resource. - - queue_name: - Name of queue. - include_messages: - Whether or not to include messages. - message_id: - Message id. - ''' - if queue_name and include_messages and message_id: - return '/{0}/messages/{1}'.format(_str(queue_name), message_id) - if queue_name and include_messages: - return '/{0}/messages'.format(_str(queue_name)) - elif queue_name: - return '/{0}'.format(_str(queue_name)) - else: - return '/' - - -def _convert_queue_message_xml(message_text, encode_function, key_encryption_key): - ''' - - - - - ''' - queue_message_element = ETree.Element('QueueMessage') - - # Enabled - message_text = encode_function(message_text) - if key_encryption_key is not None: - message_text = _encrypt_queue_message(message_text, key_encryption_key) - ETree.SubElement(queue_message_element, 'MessageText').text = message_text - - # Add xml declaration and serialize - try: - stream = BytesIO() - ETree.ElementTree(queue_message_element).write(stream, xml_declaration=True, encoding='utf-8', method='xml') - output = stream.getvalue() - finally: - stream.close() - - return output diff --git a/azure/multiapi/storage/v2017_07_29/queue/models.py b/azure/multiapi/storage/v2017_07_29/queue/models.py deleted file mode 100644 index fb3932a..0000000 --- a/azure/multiapi/storage/v2017_07_29/queue/models.py +++ /dev/null @@ -1,239 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from base64 import ( - b64encode, - b64decode, -) -from xml.sax.saxutils import escape as xml_escape -from xml.sax.saxutils import unescape as xml_unescape - -from ._error import ( - _validate_message_type_bytes, - _validate_message_type_text, - _ERROR_MESSAGE_NOT_BASE64, -) - - -class Queue(object): - ''' - Queue class. - - :ivar str name: - The name of the queue. - :ivar metadata: - A dict containing name-value pairs associated with the queue as metadata. - This var is set to None unless the include=metadata param was included - for the list queues operation. If this parameter was specified but the - queue has no metadata, metadata will be set to an empty dictionary. - :vartype metadata: dict(str, str) - ''' - - def __init__(self): - self.name = None - self.metadata = None - - -class QueueMessage(object): - ''' - Queue message class. - - :ivar str id: - A GUID value assigned to the message by the Queue service that - identifies the message in the queue. This value may be used together - with the value of pop_receipt to delete a message from the queue after - it has been retrieved with the get messages operation. - :ivar date insertion_time: - A UTC date value representing the time the messages was inserted. - :ivar date expiration_time: - A UTC date value representing the time the message expires. - :ivar int dequeue_count: - Begins with a value of 1 the first time the message is dequeued. This - value is incremented each time the message is subsequently dequeued. - :ivar obj content: - The message content. Type is determined by the decode_function set on - the service. Default is str. - :ivar str pop_receipt: - A receipt str which can be used together with the message_id element to - delete a message from the queue after it has been retrieved with the get - messages operation. Only returned by get messages operations. Set to - None for peek messages. - :ivar date time_next_visible: - A UTC date value representing the time the message will next be visible. - Only returned by get messages operations. Set to None for peek messages. - ''' - - def __init__(self): - self.id = None - self.insertion_time = None - self.expiration_time = None - self.dequeue_count = None - self.content = None - self.pop_receipt = None - self.time_next_visible = None - - -class QueueMessageFormat: - ''' - Encoding and decoding methods which can be used to modify how the queue service - encodes and decodes queue messages. Set these to queueservice.encode_function - and queueservice.decode_function to modify the behavior. The defaults are - text_xmlencode and text_xmldecode, respectively. - ''' - - @staticmethod - def text_base64encode(data): - ''' - Base64 encode unicode text. - - :param str data: String to encode. - :return: Base64 encoded string. - :rtype: str - ''' - _validate_message_type_text(data) - return b64encode(data.encode('utf-8')).decode('utf-8') - - @staticmethod - def text_base64decode(data): - ''' - Base64 decode to unicode text. - - :param str data: String data to decode to unicode. - :return: Base64 decoded string. - :rtype: str - ''' - try: - return b64decode(data.encode('utf-8')).decode('utf-8') - except (ValueError, TypeError): - # ValueError for Python 3, TypeError for Python 2 - raise ValueError(_ERROR_MESSAGE_NOT_BASE64) - - @staticmethod - def binary_base64encode(data): - ''' - Base64 encode byte strings. - - :param str data: Binary string to encode. - :return: Base64 encoded data. - :rtype: str - ''' - _validate_message_type_bytes(data) - return b64encode(data).decode('utf-8') - - @staticmethod - def binary_base64decode(data): - ''' - Base64 decode to byte string. - - :param str data: Data to decode to a byte string. - :return: Base64 decoded data. - :rtype: str - ''' - try: - return b64decode(data.encode('utf-8')) - except (ValueError, TypeError): - # ValueError for Python 3, TypeError for Python 2 - raise ValueError(_ERROR_MESSAGE_NOT_BASE64) - - @staticmethod - def text_xmlencode(data): - ''' - XML encode unicode text. - - :param str data: Unicode string to encode - :return: XML encoded data. - :rtype: str - ''' - _validate_message_type_text(data) - return xml_escape(data) - - @staticmethod - def text_xmldecode(data): - ''' - XML decode to unicode text. - - :param str data: Data to decode to unicode. - :return: XML decoded data. - :rtype: str - ''' - return xml_unescape(data) - - @staticmethod - def noencode(data): - ''' - Do no encoding. - - :param str data: Data. - :return: The data passed in is returned unmodified. - :rtype: str - ''' - return data - - @staticmethod - def nodecode(data): - ''' - Do no decoding. - - :param str data: Data. - :return: The data passed in is returned unmodified. - :rtype: str - ''' - return data - - -class QueuePermissions(object): - ''' - QueuePermissions class to be used with :func:`~azure.storage.queue.queueservice.QueueService.generate_queue_shared_access_signature` - method and for the AccessPolicies used with :func:`~azure.storage.queue.queueservice.QueueService.set_queue_acl`. - - :ivar QueuePermissions QueuePermissions.READ: - Read metadata and properties, including message count. Peek at messages. - :ivar QueuePermissions QueuePermissions.ADD: - Add messages to the queue. - :ivar QueuePermissions QueuePermissions.UPDATE: - Update messages in the queue. Note: Use the Process permission with - Update so you can first get the message you want to update. - :ivar QueuePermissions QueuePermissions.PROCESS: Delete entities. - Get and delete messages from the queue. - ''' - - def __init__(self, read=False, add=False, update=False, process=False, _str=None): - ''' - :param bool read: - Read metadata and properties, including message count. Peek at messages. - :param bool add: - Add messages to the queue. - :param bool update: - Update messages in the queue. Note: Use the Process permission with - Update so you can first get the message you want to update. - :param bool process: - Get and delete messages from the queue. - :param str _str: - A string representing the permissions. - ''' - if not _str: - _str = '' - self.read = read or ('r' in _str) - self.add = add or ('a' in _str) - self.update = update or ('u' in _str) - self.process = process or ('p' in _str) - - def __or__(self, other): - return QueuePermissions(_str=str(self) + str(other)) - - def __add__(self, other): - return QueuePermissions(_str=str(self) + str(other)) - - def __str__(self): - return (('r' if self.read else '') + - ('a' if self.add else '') + - ('u' if self.update else '') + - ('p' if self.process else '')) - - -QueuePermissions.READ = QueuePermissions(read=True) -QueuePermissions.ADD = QueuePermissions(add=True) -QueuePermissions.UPDATE = QueuePermissions(update=True) -QueuePermissions.PROCESS = QueuePermissions(process=True) diff --git a/azure/multiapi/storage/v2017_07_29/queue/queueservice.py b/azure/multiapi/storage/v2017_07_29/queue/queueservice.py deleted file mode 100644 index c2af11b..0000000 --- a/azure/multiapi/storage/v2017_07_29/queue/queueservice.py +++ /dev/null @@ -1,988 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from azure.common import ( - AzureConflictHttpError, - AzureHttpError, -) - -from ..common._auth import ( - _StorageSASAuthentication, - _StorageSharedKeyAuthentication, -) -from ..common._common_conversion import ( - _int_to_str, - _to_str, -) -from ..common._connection import _ServiceParameters -from ..common._constants import ( - SERVICE_HOST_BASE, - DEFAULT_PROTOCOL, -) -from ..common._deserialization import ( - _convert_xml_to_service_properties, - _convert_xml_to_signed_identifiers, - _convert_xml_to_service_stats, -) -from ..common._error import ( - _dont_fail_not_exist, - _dont_fail_on_exist, - _validate_not_none, - _ERROR_CONFLICT, - _ERROR_STORAGE_MISSING_INFO, - _validate_access_policies, - _validate_encryption_required, - _validate_decryption_required, -) -from ..common._http import ( - HTTPRequest, -) -from ..common._serialization import ( - _convert_signed_identifiers_to_xml, - _convert_service_properties_to_xml, -) -from ..common._serialization import ( - _get_request_body, - _add_metadata_headers, -) -from ..common.models import ( - Services, - ListGenerator, - _OperationContext, -) -from .sharedaccesssignature import ( - QueueSharedAccessSignature, -) -from ..common.storageclient import StorageClient -from ._deserialization import ( - _convert_xml_to_queues, - _convert_xml_to_queue_messages, - _parse_queue_message_from_headers, - _parse_metadata_and_message_count, -) -from ._serialization import ( - _convert_queue_message_xml, - _get_path, -) -from .models import ( - QueueMessageFormat, -) -from ._constants import ( - X_MS_VERSION, - __version__ as package_version, -) - -_HTTP_RESPONSE_NO_CONTENT = 204 - - -class QueueService(StorageClient): - ''' - This is the main class managing queue resources. - - The Queue service stores messages. A queue can contain an unlimited number of - messages, each of which can be up to 64KB in size. Messages are generally added - to the end of the queue and retrieved from the front of the queue, although - first in, first out (FIFO) behavior is not guaranteed. - - :ivar function(data) encode_function: - A function used to encode queue messages. Takes as - a parameter the data passed to the put_message API and returns the encoded - message. Defaults to take text and xml encode, but bytes and other - encodings can be used. For example, base64 may be preferable for developing - across multiple Azure Storage libraries in different languages. See the - :class:`~azure.storage.queue.models.QueueMessageFormat` for xml, base64 and - no encoding methods as well as binary equivalents. - :ivar function(data) decode_function: - A function used to encode decode messages. Takes as - a parameter the data returned by the get_messages and peek_messages APIs and - returns the decoded message. Defaults to return text and xml decode, but - bytes and other decodings can be used. For example, base64 may be preferable - for developing across multiple Azure Storage libraries in different languages. - See the :class:`~azure.storage.queue.models.QueueMessageFormat` for xml, base64 - and no decoding methods as well as binary equivalents. - :ivar object key_encryption_key: - The key-encryption-key optionally provided by the user. If provided, will be used to - encrypt/decrypt in supported methods. - For methods requiring decryption, either the key_encryption_key OR the resolver must be provided. - If both are provided, the resolver will take precedence. - Must implement the following methods for APIs requiring encryption: - wrap_key(key)--wraps the specified key (bytes) using an algorithm of the user's choice. Returns the encrypted key as bytes. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - Must implement the following methods for APIs requiring decryption: - unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid()--returns a string key id for this key-encryption-key. - :ivar function key_resolver_function(kid): - A function to resolve keys optionally provided by the user. If provided, will be used to decrypt in supported methods. - For methods requiring decryption, either the key_encryption_key OR - the resolver must be provided. If both are provided, the resolver will take precedence. - It uses the kid string to return a key-encryption-key implementing the interface defined above. - :ivar bool require_encryption: - A flag that may be set to ensure that all messages successfully uploaded to the queue and all those downloaded and - successfully read from the queue are/were encrypted while on the server. If this flag is set, all required - parameters for encryption/decryption must be provided. See the above comments on the key_encryption_key and resolver. - ''' - - def __init__(self, account_name=None, account_key=None, sas_token=None, - is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, - request_session=None, connection_string=None, socket_timeout=None): - ''' - :param str account_name: - The storage account name. This is used to authenticate requests - signed with an account key and to construct the storage endpoint. It - is required unless a connection string is given. - :param str account_key: - The storage account key. This is used for shared key authentication. - :param str sas_token: - A shared access signature token to use to authenticate requests - instead of the account key. If account key and sas token are both - specified, account key will be used to sign. - :param bool is_emulated: - Whether to use the emulator. Defaults to False. If specified, will - override all other parameters besides connection string and request - session. - :param str protocol: - The protocol to use for requests. Defaults to https. - :param str endpoint_suffix: - The host base component of the url, minus the account name. Defaults - to Azure (core.windows.net). Override this to use the China cloud - (core.chinacloudapi.cn). - :param requests.Session request_session: - The session object to use for http requests. - :param str connection_string: - If specified, this will override all other parameters besides - request session. See - http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ - for the connection string format. - :param int socket_timeout: - If specified, this will override the default socket timeout. The timeout specified is in seconds. - See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value. - ''' - service_params = _ServiceParameters.get_service_parameters( - 'queue', - account_name=account_name, - account_key=account_key, - sas_token=sas_token, - is_emulated=is_emulated, - protocol=protocol, - endpoint_suffix=endpoint_suffix, - request_session=request_session, - connection_string=connection_string, - socket_timeout=socket_timeout) - - super(QueueService, self).__init__(service_params) - - if self.account_key: - self.authentication = _StorageSharedKeyAuthentication( - self.account_name, - self.account_key, - self.is_emulated - ) - elif self.sas_token: - self.authentication = _StorageSASAuthentication(self.sas_token) - else: - raise ValueError(_ERROR_STORAGE_MISSING_INFO) - - self.encode_function = QueueMessageFormat.text_xmlencode - self.decode_function = QueueMessageFormat.text_xmldecode - self.key_encryption_key = None - self.key_resolver_function = None - self.require_encryption = False - self._X_MS_VERSION = X_MS_VERSION - self._update_user_agent_string(package_version) - - def generate_account_shared_access_signature(self, resource_types, permission, - expiry, start=None, ip=None, protocol=None): - ''' - Generates a shared access signature for the queue service. - Use the returned signature with the sas_token parameter of QueueService. - - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account SAS. - :param AccountPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :return: A Shared Access Signature (sas) token. - :rtype: str - ''' - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = QueueSharedAccessSignature(self.account_name, self.account_key) - return sas.generate_account(Services.QUEUE, resource_types, permission, - expiry, start=start, ip=ip, protocol=protocol) - - def generate_queue_shared_access_signature(self, queue_name, - permission=None, - expiry=None, - start=None, - id=None, - ip=None, protocol=None, ): - ''' - Generates a shared access signature for the queue. - Use the returned signature with the sas_token parameter of QueueService. - - :param str queue_name: - The name of the queue to create a SAS token for. - :param QueuePermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use :func:`~set_queue_acl`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip='168.1.5.65' or sip='168.1.5.60-168.1.5.70' on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :return: A Shared Access Signature (sas) token. - :rtype: str - ''' - _validate_not_none('queue_name', queue_name) - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = QueueSharedAccessSignature(self.account_name, self.account_key) - return sas.generate_queue( - queue_name, - permission=permission, - expiry=expiry, - start=start, - id=id, - ip=ip, - protocol=protocol, - ) - - def get_queue_service_stats(self, timeout=None): - ''' - Retrieves statistics related to replication for the Queue service. It is - only available when read-access geo-redundant replication is enabled for - the storage account. - - With geo-redundant replication, Azure Storage maintains your data durable - in two locations. In both locations, Azure Storage constantly maintains - multiple healthy replicas of your data. The location where you read, - create, update, or delete data is the primary storage account location. - The primary location exists in the region you choose at the time you - create an account via the Azure Management Azure classic portal, for - example, North Central US. The location to which your data is replicated - is the secondary location. The secondary location is automatically - determined based on the location of the primary; it is in a second data - center that resides in the same region as the primary location. Read-only - access is available from the secondary location, if read-access geo-redundant - replication is enabled for your storage account. - - :param int timeout: - The timeout parameter is expressed in seconds. - :return: The queue service stats. - :rtype: :class:`~azure.storage.common.models.ServiceStats` - ''' - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(primary=False, secondary=True) - request.path = _get_path() - request.query = { - 'restype': 'service', - 'comp': 'stats', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_service_stats) - - def get_queue_service_properties(self, timeout=None): - ''' - Gets the properties of a storage account's Queue service, including - logging, analytics and CORS rules. - - :param int timeout: - The server timeout, expressed in seconds. - :return: The queue service properties. - :rtype: :class:`~azure.storage.common.models.ServiceProperties` - ''' - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path() - request.query = { - 'restype': 'service', - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_service_properties) - - def set_queue_service_properties(self, logging=None, hour_metrics=None, - minute_metrics=None, cors=None, timeout=None): - ''' - Sets the properties of a storage account's Queue service, including - Azure Storage Analytics. If an element (ex Logging) is left as None, the - existing settings on the service for that functionality are preserved. - For more information on Azure Storage Analytics, see - https://msdn.microsoft.com/en-us/library/azure/hh343270.aspx. - - :param Logging logging: - The logging settings provide request logs. - :param Metrics hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for queuess. - :param Metrics minute_metrics: - The minute metrics settings provide request statistics - for each minute for queues. - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. For detailed information - about CORS rules and evaluation logic, see - https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx. - :type cors: list(:class:`~azure.storage.common.models.CorsRule`) - :param int timeout: - The server timeout, expressed in seconds. - ''' - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path() - request.query = { - 'restype': 'service', - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - request.body = _get_request_body( - _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics, cors)) - self._perform_request(request) - - def list_queues(self, prefix=None, num_results=None, include_metadata=False, - marker=None, timeout=None): - ''' - Returns a generator to list the queues. The generator will lazily follow - the continuation tokens returned by the service and stop when all queues - have been returned or num_results is reached. - - If num_results is specified and the account has more than that number of - queues, the generator will have a populated next_marker field once it - finishes. This marker can be used to create a new generator if more - results are desired. - - :param str prefix: - Filters the results to return only queues with names that begin - with the specified prefix. - :param int num_results: - The maximum number of queues to return. - :param bool include_metadata: - Specifies that container metadata be returned in the response. - :param str marker: - An opaque continuation token. This value can be retrieved from the - next_marker field of a previous generator object if num_results was - specified and that generator has finished enumerating results. If - specified, this generator will begin returning results from the point - where the previous generator stopped. - :param int timeout: - The server timeout, expressed in seconds. This function may make multiple - calls to the service in which case the timeout value specified will be - applied to each individual call. - ''' - include = 'metadata' if include_metadata else None - operation_context = _OperationContext(location_lock=True) - kwargs = {'prefix': prefix, 'max_results': num_results, 'include': include, - 'marker': marker, 'timeout': timeout, '_context': operation_context} - resp = self._list_queues(**kwargs) - - return ListGenerator(resp, self._list_queues, (), kwargs) - - def _list_queues(self, prefix=None, marker=None, max_results=None, - include=None, timeout=None, _context=None): - ''' - Returns a list of queues under the specified account. Makes a single list - request to the service. Used internally by the list_queues method. - - :param str prefix: - Filters the results to return only queues with names that begin - with the specified prefix. - :param str marker: - A token which identifies the portion of the query to be - returned with the next query operation. The operation returns a - next_marker element within the response body if the list returned - was not complete. This value may then be used as a query parameter - in a subsequent call to request the next portion of the list of - queues. The marker value is opaque to the client. - :param int max_results: - The maximum number of queues to return. A single list request may - return up to 1000 queues and potentially a continuation token which - should be followed to get additional resutls. - :param str include: - Include this parameter to specify that the container's - metadata be returned as part of the response body. - :param int timeout: - The server timeout, expressed in seconds. - ''' - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path() - request.query = { - 'comp': 'list', - 'prefix': _to_str(prefix), - 'marker': _to_str(marker), - 'maxresults': _int_to_str(max_results), - 'include': _to_str(include), - 'timeout': _int_to_str(timeout) - } - - return self._perform_request(request, _convert_xml_to_queues, operation_context=_context) - - def create_queue(self, queue_name, metadata=None, fail_on_exist=False, timeout=None): - ''' - Creates a queue under the given account. - - :param str queue_name: - The name of the queue to create. A queue name must be from 3 through - 63 characters long and may only contain lowercase letters, numbers, - and the dash (-) character. The first and last letters in the queue - must be alphanumeric. The dash (-) character cannot be the first or - last character. Consecutive dash characters are not permitted in the - queue name. - :param metadata: - A dict containing name-value pairs to associate with the queue as - metadata. Note that metadata names preserve the case with which they - were created, but are case-insensitive when set or read. - :type metadata: dict(str, str) - :param bool fail_on_exist: - Specifies whether to throw an exception if the queue already exists. - :param int timeout: - The server timeout, expressed in seconds. - :return: - A boolean indicating whether the queue was created. If fail_on_exist - was set to True, this will throw instead of returning false. - :rtype: bool - ''' - _validate_not_none('queue_name', queue_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(queue_name) - request.query = {'timeout': _int_to_str(timeout)} - _add_metadata_headers(metadata, request) - - def _return_request(request): - return request - - if not fail_on_exist: - try: - response = self._perform_request(request, parser=_return_request) - if response.status == _HTTP_RESPONSE_NO_CONTENT: - return False - return True - except AzureHttpError as ex: - _dont_fail_on_exist(ex) - return False - else: - response = self._perform_request(request, parser=_return_request) - if response.status == _HTTP_RESPONSE_NO_CONTENT: - raise AzureConflictHttpError( - _ERROR_CONFLICT.format(response.message), response.status) - return True - - def delete_queue(self, queue_name, fail_not_exist=False, timeout=None): - ''' - Deletes the specified queue and any messages it contains. - - When a queue is successfully deleted, it is immediately marked for deletion - and is no longer accessible to clients. The queue is later removed from - the Queue service during garbage collection. - - Note that deleting a queue is likely to take at least 40 seconds to complete. - If an operation is attempted against the queue while it was being deleted, - an :class:`AzureConflictHttpError` will be thrown. - - :param str queue_name: - The name of the queue to delete. - :param bool fail_not_exist: - Specifies whether to throw an exception if the queue doesn't exist. - :param int timeout: - The server timeout, expressed in seconds. - :return: - A boolean indicating whether the queue was deleted. If fail_not_exist - was set to True, this will throw instead of returning false. - :rtype: bool - ''' - _validate_not_none('queue_name', queue_name) - request = HTTPRequest() - request.method = 'DELETE' - request.host_locations = self._get_host_locations() - request.path = _get_path(queue_name) - request.query = {'timeout': _int_to_str(timeout)} - if not fail_not_exist: - try: - self._perform_request(request) - return True - except AzureHttpError as ex: - _dont_fail_not_exist(ex) - return False - else: - self._perform_request(request) - return True - - def get_queue_metadata(self, queue_name, timeout=None): - ''' - Retrieves user-defined metadata and queue properties on the specified - queue. Metadata is associated with the queue as name-value pairs. - - :param str queue_name: - The name of an existing queue. - :param int timeout: - The server timeout, expressed in seconds. - :return: - A dictionary representing the queue metadata with an - approximate_message_count int property on the dict estimating the - number of messages in the queue. - :rtype: dict(str, str) - ''' - _validate_not_none('queue_name', queue_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(queue_name) - request.query = { - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _parse_metadata_and_message_count) - - def set_queue_metadata(self, queue_name, metadata=None, timeout=None): - ''' - Sets user-defined metadata on the specified queue. Metadata is - associated with the queue as name-value pairs. - - :param str queue_name: - The name of an existing queue. - :param dict metadata: - A dict containing name-value pairs to associate with the - queue as metadata. - :param int timeout: - The server timeout, expressed in seconds. - ''' - _validate_not_none('queue_name', queue_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(queue_name) - request.query = { - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - _add_metadata_headers(metadata, request) - - self._perform_request(request) - - def exists(self, queue_name, timeout=None): - ''' - Returns a boolean indicating whether the queue exists. - - :param str queue_name: - The name of queue to check for existence. - :param int timeout: - The server timeout, expressed in seconds. - :return: A boolean indicating whether the queue exists. - :rtype: bool - ''' - try: - self.get_queue_metadata(queue_name, timeout=timeout) - return True - except AzureHttpError as ex: - _dont_fail_not_exist(ex) - return False - - def get_queue_acl(self, queue_name, timeout=None): - ''' - Returns details about any stored access policies specified on the - queue that may be used with Shared Access Signatures. - - :param str queue_name: - The name of an existing queue. - :param int timeout: - The server timeout, expressed in seconds. - :return: A dictionary of access policies associated with the queue. - :rtype: dict(str, :class:`~azure.storage.common.models.AccessPolicy`) - ''' - _validate_not_none('queue_name', queue_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(queue_name) - request.query = { - 'comp': 'acl', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_signed_identifiers) - - def set_queue_acl(self, queue_name, signed_identifiers=None, timeout=None): - ''' - Sets stored access policies for the queue that may be used with Shared - Access Signatures. - - When you set permissions for a queue, the existing permissions are replaced. - To update the queue's permissions, call :func:`~get_queue_acl` to fetch - all access policies associated with the queue, modify the access policy - that you wish to change, and then call this function with the complete - set of data to perform the update. - - When you establish a stored access policy on a queue, it may take up to - 30 seconds to take effect. During this interval, a shared access signature - that is associated with the stored access policy will throw an - :class:`AzureHttpError` until the access policy becomes active. - - :param str queue_name: - The name of an existing queue. - :param signed_identifiers: - A dictionary of access policies to associate with the queue. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict(str, :class:`~azure.storage.common.models.AccessPolicy`) - :param int timeout: - The server timeout, expressed in seconds. - ''' - _validate_not_none('queue_name', queue_name) - _validate_access_policies(signed_identifiers) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(queue_name) - request.query = { - 'comp': 'acl', - 'timeout': _int_to_str(timeout), - } - request.body = _get_request_body( - _convert_signed_identifiers_to_xml(signed_identifiers)) - self._perform_request(request) - - def put_message(self, queue_name, content, visibility_timeout=None, - time_to_live=None, timeout=None): - ''' - Adds a new message to the back of the message queue. - - The visibility timeout specifies the time that the message will be - invisible. After the timeout expires, the message will become visible. - If a visibility timeout is not specified, the default value of 0 is used. - - The message time-to-live specifies how long a message will remain in the - queue. The message will be deleted from the queue when the time-to-live - period expires. - - If the key-encryption-key field is set on the local service object, this method will - encrypt the content before uploading. - - :param str queue_name: - The name of the queue to put the message into. - :param obj content: - Message content. Allowed type is determined by the encode_function - set on the service. Default is str. The encoded message can be up to - 64KB in size. - :param int visibility_timeout: - If not specified, the default value is 0. Specifies the - new visibility timeout value, in seconds, relative to server time. - The value must be larger than or equal to 0, and cannot be - larger than 7 days. The visibility timeout of a message cannot be - set to a value later than the expiry time. visibility_timeout - should be set to a value smaller than the time-to-live value. - :param int time_to_live: - Specifies the time-to-live interval for the message, in - seconds. The time-to-live may be any positive number or -1 for infinity. If this - parameter is omitted, the default time-to-live is 7 days. - :param int timeout: - The server timeout, expressed in seconds. - :return: - A :class:`~azure.storage.queue.models.QueueMessage` object. - This object is also populated with the content although it is not - returned from the service. - :rtype: :class:`~azure.storage.queue.models.QueueMessage` - ''' - - _validate_encryption_required(self.require_encryption, self.key_encryption_key) - - _validate_not_none('queue_name', queue_name) - _validate_not_none('content', content) - request = HTTPRequest() - request.method = 'POST' - request.host_locations = self._get_host_locations() - request.path = _get_path(queue_name, True) - request.query = { - 'visibilitytimeout': _to_str(visibility_timeout), - 'messagettl': _to_str(time_to_live), - 'timeout': _int_to_str(timeout) - } - - request.body = _get_request_body(_convert_queue_message_xml(content, self.encode_function, - self.key_encryption_key)) - - message_list = self._perform_request(request, _convert_xml_to_queue_messages, - [self.decode_function, False, - None, None, content]) - return message_list[0] - - def get_messages(self, queue_name, num_messages=None, - visibility_timeout=None, timeout=None): - ''' - Retrieves one or more messages from the front of the queue. - - When a message is retrieved from the queue, the response includes the message - content and a pop_receipt value, which is required to delete the message. - The message is not automatically deleted from the queue, but after it has - been retrieved, it is not visible to other clients for the time interval - specified by the visibility_timeout parameter. - - If the key-encryption-key or resolver field is set on the local service object, the messages will be - decrypted before being returned. - - :param str queue_name: - The name of the queue to get messages from. - :param int num_messages: - A nonzero integer value that specifies the number of - messages to retrieve from the queue, up to a maximum of 32. If - fewer are visible, the visible messages are returned. By default, - a single message is retrieved from the queue with this operation. - :param int visibility_timeout: - Specifies the new visibility timeout value, in seconds, relative - to server time. The new value must be larger than or equal to 1 - second, and cannot be larger than 7 days. The visibility timeout of - a message can be set to a value later than the expiry time. - :param int timeout: - The server timeout, expressed in seconds. - :return: A :class:`~azure.storage.queue.models.QueueMessage` object representing the information passed. - :rtype: list(:class:`~azure.storage.queue.models.QueueMessage`) - ''' - _validate_decryption_required(self.require_encryption, self.key_encryption_key, - self.key_resolver_function) - - _validate_not_none('queue_name', queue_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(queue_name, True) - request.query = { - 'numofmessages': _to_str(num_messages), - 'visibilitytimeout': _to_str(visibility_timeout), - 'timeout': _int_to_str(timeout) - } - - return self._perform_request(request, _convert_xml_to_queue_messages, - [self.decode_function, self.require_encryption, - self.key_encryption_key, self.key_resolver_function]) - - def peek_messages(self, queue_name, num_messages=None, timeout=None): - ''' - Retrieves one or more messages from the front of the queue, but does - not alter the visibility of the message. - - Only messages that are visible may be retrieved. When a message is retrieved - for the first time with a call to get_messages, its dequeue_count property - is set to 1. If it is not deleted and is subsequently retrieved again, the - dequeue_count property is incremented. The client may use this value to - determine how many times a message has been retrieved. Note that a call - to peek_messages does not increment the value of DequeueCount, but returns - this value for the client to read. - - If the key-encryption-key or resolver field is set on the local service object, the messages will be - decrypted before being returned. - - :param str queue_name: - The name of the queue to peek messages from. - :param int num_messages: - A nonzero integer value that specifies the number of - messages to peek from the queue, up to a maximum of 32. By default, - a single message is peeked from the queue with this operation. - :param int timeout: - The server timeout, expressed in seconds. - :return: - A list of :class:`~azure.storage.queue.models.QueueMessage` objects. Note that - time_next_visible and pop_receipt will not be populated as peek does - not pop the message and can only retrieve already visible messages. - :rtype: list(:class:`~azure.storage.queue.models.QueueMessage`) - ''' - - _validate_decryption_required(self.require_encryption, self.key_encryption_key, - self.key_resolver_function) - - _validate_not_none('queue_name', queue_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(queue_name, True) - request.query = { - 'peekonly': 'true', - 'numofmessages': _to_str(num_messages), - 'timeout': _int_to_str(timeout) - } - - return self._perform_request(request, _convert_xml_to_queue_messages, - [self.decode_function, self.require_encryption, - self.key_encryption_key, self.key_resolver_function]) - - def delete_message(self, queue_name, message_id, pop_receipt, timeout=None): - ''' - Deletes the specified message. - - Normally after a client retrieves a message with the get_messages operation, - the client is expected to process and delete the message. To delete the - message, you must have two items of data: id and pop_receipt. The - id is returned from the previous get_messages operation. The - pop_receipt is returned from the most recent :func:`~get_messages` or - :func:`~update_message` operation. In order for the delete_message operation - to succeed, the pop_receipt specified on the request must match the - pop_receipt returned from the :func:`~get_messages` or :func:`~update_message` - operation. - - :param str queue_name: - The name of the queue from which to delete the message. - :param str message_id: - The message id identifying the message to delete. - :param str pop_receipt: - A valid pop receipt value returned from an earlier call - to the :func:`~get_messages` or :func:`~update_message`. - :param int timeout: - The server timeout, expressed in seconds. - ''' - _validate_not_none('queue_name', queue_name) - _validate_not_none('message_id', message_id) - _validate_not_none('pop_receipt', pop_receipt) - request = HTTPRequest() - request.method = 'DELETE' - request.host_locations = self._get_host_locations() - request.path = _get_path(queue_name, True, message_id) - request.query = { - 'popreceipt': _to_str(pop_receipt), - 'timeout': _int_to_str(timeout) - } - self._perform_request(request) - - def clear_messages(self, queue_name, timeout=None): - ''' - Deletes all messages from the specified queue. - - :param str queue_name: - The name of the queue whose messages to clear. - :param int timeout: - The server timeout, expressed in seconds. - ''' - _validate_not_none('queue_name', queue_name) - request = HTTPRequest() - request.method = 'DELETE' - request.host_locations = self._get_host_locations() - request.path = _get_path(queue_name, True) - request.query = {'timeout': _int_to_str(timeout)} - self._perform_request(request) - - def update_message(self, queue_name, message_id, pop_receipt, visibility_timeout, - content=None, timeout=None): - ''' - Updates the visibility timeout of a message. You can also use this - operation to update the contents of a message. - - This operation can be used to continually extend the invisibility of a - queue message. This functionality can be useful if you want a worker role - to "lease" a queue message. For example, if a worker role calls get_messages - and recognizes that it needs more time to process a message, it can - continually extend the message's invisibility until it is processed. If - the worker role were to fail during processing, eventually the message - would become visible again and another worker role could process it. - - If the key-encryption-key field is set on the local service object, this method will - encrypt the content before uploading. - - :param str queue_name: - The name of the queue containing the message to update. - :param str message_id: - The message id identifying the message to update. - :param str pop_receipt: - A valid pop receipt value returned from an earlier call - to the :func:`~get_messages` or :func:`~update_message` operation. - :param int visibility_timeout: - Specifies the new visibility timeout value, in seconds, - relative to server time. The new value must be larger than or equal - to 0, and cannot be larger than 7 days. The visibility timeout of a - message cannot be set to a value later than the expiry time. A - message can be updated until it has been deleted or has expired. - :param obj content: - Message content. Allowed type is determined by the encode_function - set on the service. Default is str. - :param int timeout: - The server timeout, expressed in seconds. - :return: - A list of :class:`~azure.storage.queue.models.QueueMessage` objects. For convenience, - this object is also populated with the content, although it is not returned by the service. - :rtype: list(:class:`~azure.storage.queue.models.QueueMessage`) - ''' - - _validate_encryption_required(self.require_encryption, self.key_encryption_key) - - _validate_not_none('queue_name', queue_name) - _validate_not_none('message_id', message_id) - _validate_not_none('pop_receipt', pop_receipt) - _validate_not_none('visibility_timeout', visibility_timeout) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(queue_name, True, message_id) - request.query = { - 'popreceipt': _to_str(pop_receipt), - 'visibilitytimeout': _int_to_str(visibility_timeout), - 'timeout': _int_to_str(timeout) - } - - if content is not None: - request.body = _get_request_body(_convert_queue_message_xml(content, self.encode_function, - self.key_encryption_key)) - - return self._perform_request(request, _parse_queue_message_from_headers) diff --git a/azure/multiapi/storage/v2017_07_29/queue/sharedaccesssignature.py b/azure/multiapi/storage/v2017_07_29/queue/sharedaccesssignature.py deleted file mode 100644 index 13ad3fa..0000000 --- a/azure/multiapi/storage/v2017_07_29/queue/sharedaccesssignature.py +++ /dev/null @@ -1,81 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ..common.sharedaccesssignature import ( - SharedAccessSignature, - _SharedAccessHelper, -) -from ._constants import X_MS_VERSION - - -class QueueSharedAccessSignature(SharedAccessSignature): - ''' - Provides a factory for creating queue shares access - signature tokens with a common account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - ''' - super(QueueSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) - - def generate_queue(self, queue_name, permission=None, - expiry=None, start=None, id=None, - ip=None, protocol=None): - ''' - Generates a shared access signature for the queue. - Use the returned signature with the sas_token parameter of QueueService. - - :param str queue_name: - Name of queue. - :param QueuePermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, add, update, process. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_blob_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(id) - sas.add_resource_signature(self.account_name, self.account_key, 'queue', queue_name) - - return sas.get_token() diff --git a/azure/multiapi/storage/v2018_03_28/__init__.py b/azure/multiapi/storage/v2018_03_28/__init__.py deleted file mode 100644 index 5b0f8ae..0000000 --- a/azure/multiapi/storage/v2018_03_28/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#-------------------------------------------------------------------------- diff --git a/azure/multiapi/storage/v2018_03_28/blob/__init__.py b/azure/multiapi/storage/v2018_03_28/blob/__init__.py deleted file mode 100644 index eb3e5d0..0000000 --- a/azure/multiapi/storage/v2018_03_28/blob/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from .appendblobservice import AppendBlobService -from .blockblobservice import BlockBlobService -from .models import ( - Container, - ContainerProperties, - Blob, - BlobProperties, - BlobBlock, - BlobBlockList, - PageRange, - ContentSettings, - CopyProperties, - ContainerPermissions, - BlobPermissions, - _LeaseActions, - AppendBlockProperties, - PageBlobProperties, - ResourceProperties, - Include, - SequenceNumberAction, - BlockListType, - PublicAccess, - BlobPrefix, - DeleteSnapshot, -) -from .pageblobservice import PageBlobService diff --git a/azure/multiapi/storage/v2018_03_28/blob/_constants.py b/azure/multiapi/storage/v2018_03_28/blob/_constants.py deleted file mode 100644 index b450d83..0000000 --- a/azure/multiapi/storage/v2018_03_28/blob/_constants.py +++ /dev/null @@ -1,14 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -__author__ = 'Microsoft Corp. ' -__version__ = '1.3.1' - -# x-ms-version for storage service. -X_MS_VERSION = '2018-03-28' - -# internal configurations, should not be changed -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 diff --git a/azure/multiapi/storage/v2018_03_28/blob/_deserialization.py b/azure/multiapi/storage/v2018_03_28/blob/_deserialization.py deleted file mode 100644 index 474c78c..0000000 --- a/azure/multiapi/storage/v2018_03_28/blob/_deserialization.py +++ /dev/null @@ -1,451 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from azure.common import AzureException -from dateutil import parser - -try: - from xml.etree import cElementTree as ETree -except ImportError: - from xml.etree import ElementTree as ETree -from ..common._common_conversion import ( - _decode_base64_to_text, - _to_str, - _get_content_md5 -) -from ..common._deserialization import ( - _parse_properties, - _to_int, - _parse_metadata, - _convert_xml_to_signed_identifiers, - _bool, -) -from .models import ( - Container, - Blob, - BlobBlock, - BlobBlockList, - BlobBlockState, - BlobProperties, - PageRange, - ContainerProperties, - AppendBlockProperties, - PageBlobProperties, - ResourceProperties, - BlobPrefix, - AccountInformation, -) -from ._encryption import _decrypt_blob -from ..common.models import _list -from ..common._error import ( - _validate_content_match, - _ERROR_DECRYPTION_FAILURE, -) - - -def _parse_base_properties(response): - ''' - Extracts basic response headers. - ''' - resource_properties = ResourceProperties() - resource_properties.last_modified = parser.parse(response.headers.get('last-modified')) - resource_properties.etag = response.headers.get('etag') - - return resource_properties - - -def _parse_page_properties(response): - ''' - Extracts page response headers. - ''' - put_page = PageBlobProperties() - put_page.last_modified = parser.parse(response.headers.get('last-modified')) - put_page.etag = response.headers.get('etag') - put_page.sequence_number = _to_int(response.headers.get('x-ms-blob-sequence-number')) - - return put_page - - -def _parse_append_block(response): - ''' - Extracts append block response headers. - ''' - append_block = AppendBlockProperties() - append_block.last_modified = parser.parse(response.headers.get('last-modified')) - append_block.etag = response.headers.get('etag') - append_block.append_offset = _to_int(response.headers.get('x-ms-blob-append-offset')) - append_block.committed_block_count = _to_int(response.headers.get('x-ms-blob-committed-block-count')) - - return append_block - - -def _parse_snapshot_blob(response, name): - ''' - Extracts snapshot return header. - ''' - snapshot = response.headers.get('x-ms-snapshot') - - return _parse_blob(response, name, snapshot) - - -def _parse_lease(response): - ''' - Extracts lease time and ID return headers. - ''' - lease = {'time': response.headers.get('x-ms-lease-time')} - if lease['time']: - lease['time'] = _to_int(lease['time']) - - lease['id'] = response.headers.get('x-ms-lease-id') - - return lease - - -def _parse_blob(response, name, snapshot, validate_content=False, require_encryption=False, - key_encryption_key=None, key_resolver_function=None, start_offset=None, end_offset=None): - if response is None: - return None - - metadata = _parse_metadata(response) - props = _parse_properties(response, BlobProperties) - - # For range gets, only look at 'x-ms-blob-content-md5' for overall MD5 - content_settings = getattr(props, 'content_settings') - if 'content-range' in response.headers: - if 'x-ms-blob-content-md5' in response.headers: - setattr(content_settings, 'content_md5', _to_str(response.headers['x-ms-blob-content-md5'])) - else: - delattr(content_settings, 'content_md5') - - if validate_content: - computed_md5 = _get_content_md5(response.body) - _validate_content_match(response.headers['content-md5'], computed_md5) - - if key_encryption_key is not None or key_resolver_function is not None: - try: - response.body = _decrypt_blob(require_encryption, key_encryption_key, key_resolver_function, - response, start_offset, end_offset) - except: - raise AzureException(_ERROR_DECRYPTION_FAILURE) - - return Blob(name, snapshot, response.body, props, metadata) - - -def _parse_container(response, name): - if response is None: - return None - - metadata = _parse_metadata(response) - props = _parse_properties(response, ContainerProperties) - return Container(name, props, metadata) - - -def _convert_xml_to_signed_identifiers_and_access(response): - acl = _convert_xml_to_signed_identifiers(response) - acl.public_access = response.headers.get('x-ms-blob-public-access') - - return acl - - -def _convert_xml_to_containers(response): - ''' - - - string-value - string-value - int-value - - - container-name - - date/time-value - etag - locked | unlocked - available | leased | expired | breaking | broken - infinite | fixed - blob | container - true | false - true | false - - - value - - - - marker-value - - ''' - if response is None or response.body is None: - return None - - containers = _list() - list_element = ETree.fromstring(response.body) - - # Set next marker - setattr(containers, 'next_marker', list_element.findtext('NextMarker')) - - containers_element = list_element.find('Containers') - - for container_element in containers_element.findall('Container'): - # Name element - container = Container() - container.name = container_element.findtext('Name') - - # Metadata - metadata_root_element = container_element.find('Metadata') - if metadata_root_element is not None: - container.metadata = dict() - for metadata_element in metadata_root_element: - container.metadata[metadata_element.tag] = metadata_element.text - - # Properties - properties_element = container_element.find('Properties') - container.properties.etag = properties_element.findtext('Etag') - container.properties.last_modified = parser.parse(properties_element.findtext('Last-Modified')) - container.properties.lease_status = properties_element.findtext('LeaseStatus') - container.properties.lease_state = properties_element.findtext('LeaseState') - container.properties.lease_duration = properties_element.findtext('LeaseDuration') - container.properties.public_access = properties_element.findtext('PublicAccess') - container.properties.has_immutability_policy = properties_element.findtext('HasImmutabilityPolicy') - container.properties.has_legal_hold = properties_element.findtext('HasLegalHold') - - # Add container to list - containers.append(container) - - return containers - - -LIST_BLOBS_ATTRIBUTE_MAP = { - 'Last-Modified': (None, 'last_modified', parser.parse), - 'Etag': (None, 'etag', _to_str), - 'x-ms-blob-sequence-number': (None, 'sequence_number', _to_int), - 'BlobType': (None, 'blob_type', _to_str), - 'Content-Length': (None, 'content_length', _to_int), - 'ServerEncrypted': (None, 'server_encrypted', _bool), - 'Content-Type': ('content_settings', 'content_type', _to_str), - 'Content-Encoding': ('content_settings', 'content_encoding', _to_str), - 'Content-Disposition': ('content_settings', 'content_disposition', _to_str), - 'Content-Language': ('content_settings', 'content_language', _to_str), - 'Content-MD5': ('content_settings', 'content_md5', _to_str), - 'Cache-Control': ('content_settings', 'cache_control', _to_str), - 'LeaseStatus': ('lease', 'status', _to_str), - 'LeaseState': ('lease', 'state', _to_str), - 'LeaseDuration': ('lease', 'duration', _to_str), - 'CopyId': ('copy', 'id', _to_str), - 'CopySource': ('copy', 'source', _to_str), - 'CopyStatus': ('copy', 'status', _to_str), - 'CopyProgress': ('copy', 'progress', _to_str), - 'CopyCompletionTime': ('copy', 'completion_time', _to_str), - 'CopyStatusDescription': ('copy', 'status_description', _to_str), - 'AccessTier': (None, 'blob_tier', _to_str), - 'AccessTierChangeTime': (None, 'blob_tier_change_time', parser.parse), - 'AccessTierInferred': (None, 'blob_tier_inferred', _bool), - 'ArchiveStatus': (None, 'rehydration_status', _to_str), - 'DeletedTime': (None, 'deleted_time', parser.parse), - 'RemainingRetentionDays': (None, 'remaining_retention_days', _to_int), - 'Creation-Time': (None, 'creation_time', parser.parse), -} - - -def _convert_xml_to_blob_list(response): - ''' - - - string-value - string-value - int-value - string-value - - - blob-name - true - date-time-value - - date-time-value - etag - size-in-bytes - blob-content-type - - - - - sequence-number - BlockBlob|PageBlob|AppendBlob - locked|unlocked - available | leased | expired | breaking | broken - infinite | fixed - id - pending | success | aborted | failed - source url - bytes copied/bytes total - datetime - error string - P4 | P6 | P10 | P20 | P30 | P40 | P50 | P60 | Archive | Cool | Hot - date-time-value - true - datetime - int - date-time-value - - - value - - - - blob-prefix - - - - - ''' - if response is None or response.body is None: - return None - - blob_list = _list() - list_element = ETree.fromstring(response.body) - - setattr(blob_list, 'next_marker', list_element.findtext('NextMarker')) - - blobs_element = list_element.find('Blobs') - blob_prefix_elements = blobs_element.findall('BlobPrefix') - if blob_prefix_elements is not None: - for blob_prefix_element in blob_prefix_elements: - prefix = BlobPrefix() - prefix.name = blob_prefix_element.findtext('Name') - blob_list.append(prefix) - - for blob_element in blobs_element.findall('Blob'): - blob = Blob() - blob.name = blob_element.findtext('Name') - blob.snapshot = blob_element.findtext('Snapshot') - - deleted = blob_element.findtext('Deleted') - if deleted: - blob.deleted = _bool(deleted) - - # Properties - properties_element = blob_element.find('Properties') - if properties_element is not None: - for property_element in properties_element: - info = LIST_BLOBS_ATTRIBUTE_MAP.get(property_element.tag) - if info is None: - setattr(blob.properties, property_element.tag, _to_str(property_element.text)) - elif info[0] is None: - setattr(blob.properties, info[1], info[2](property_element.text)) - else: - attr = getattr(blob.properties, info[0]) - setattr(attr, info[1], info[2](property_element.text)) - - # Metadata - metadata_root_element = blob_element.find('Metadata') - if metadata_root_element is not None: - blob.metadata = dict() - for metadata_element in metadata_root_element: - blob.metadata[metadata_element.tag] = metadata_element.text - - # Add blob to list - blob_list.append(blob) - - return blob_list - - -def _convert_xml_to_block_list(response): - ''' - - - - - base64-encoded-block-id - size-in-bytes - - - - - base64-encoded-block-id - size-in-bytes - - - - - Converts xml response to block list class. - ''' - if response is None or response.body is None: - return None - - block_list = BlobBlockList() - - list_element = ETree.fromstring(response.body) - - committed_blocks_element = list_element.find('CommittedBlocks') - if committed_blocks_element is not None: - for block_element in committed_blocks_element.findall('Block'): - block_id = _decode_base64_to_text(block_element.findtext('Name', '')) - block_size = int(block_element.findtext('Size')) - block = BlobBlock(id=block_id, state=BlobBlockState.Committed) - block._set_size(block_size) - block_list.committed_blocks.append(block) - - uncommitted_blocks_element = list_element.find('UncommittedBlocks') - if uncommitted_blocks_element is not None: - for block_element in uncommitted_blocks_element.findall('Block'): - block_id = _decode_base64_to_text(block_element.findtext('Name', '')) - block_size = int(block_element.findtext('Size')) - block = BlobBlock(id=block_id, state=BlobBlockState.Uncommitted) - block._set_size(block_size) - block_list.uncommitted_blocks.append(block) - - return block_list - - -def _convert_xml_to_page_ranges(response): - ''' - - - - Start Byte - End Byte - - - Start Byte - End Byte - - - Start Byte - End Byte - - - ''' - if response is None or response.body is None: - return None - - page_list = list() - - list_element = ETree.fromstring(response.body) - - for page_range_element in list_element: - if page_range_element.tag == 'PageRange': - is_cleared = False - elif page_range_element.tag == 'ClearRange': - is_cleared = True - else: - pass # ignore any unrecognized Page Range types - - page_list.append( - PageRange( - int(page_range_element.findtext('Start')), - int(page_range_element.findtext('End')), - is_cleared - ) - ) - - return page_list - - -def _parse_account_information(response): - account_info = AccountInformation() - account_info.sku_name = response.headers['x-ms-sku-name'] - account_info.account_kind = response.headers['x-ms-account-kind'] - - return account_info diff --git a/azure/multiapi/storage/v2018_03_28/blob/_download_chunking.py b/azure/multiapi/storage/v2018_03_28/blob/_download_chunking.py deleted file mode 100644 index e68a0e5..0000000 --- a/azure/multiapi/storage/v2018_03_28/blob/_download_chunking.py +++ /dev/null @@ -1,178 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import threading - - -def _download_blob_chunks(blob_service, container_name, blob_name, snapshot, - download_size, block_size, progress, start_range, end_range, - stream, max_connections, progress_callback, validate_content, - lease_id, if_modified_since, if_unmodified_since, if_match, - if_none_match, timeout, operation_context): - - downloader_class = _ParallelBlobChunkDownloader if max_connections > 1 else _SequentialBlobChunkDownloader - - downloader = downloader_class( - blob_service, - container_name, - blob_name, - snapshot, - download_size, - block_size, - progress, - start_range, - end_range, - stream, - progress_callback, - validate_content, - lease_id, - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout, - operation_context, - ) - - if max_connections > 1: - import concurrent.futures - executor = concurrent.futures.ThreadPoolExecutor(max_connections) - list(executor.map(downloader.process_chunk, downloader.get_chunk_offsets())) - else: - for chunk in downloader.get_chunk_offsets(): - downloader.process_chunk(chunk) - - -class _BlobChunkDownloader(object): - def __init__(self, blob_service, container_name, blob_name, snapshot, download_size, - chunk_size, progress, start_range, end_range, stream, - progress_callback, validate_content, lease_id, if_modified_since, - if_unmodified_since, if_match, if_none_match, timeout, operation_context): - # identifiers for the blob - self.blob_service = blob_service - self.container_name = container_name - self.blob_name = blob_name - self.snapshot = snapshot - - # information on the download range/chunk size - self.chunk_size = chunk_size - self.download_size = download_size - self.start_index = start_range - self.blob_end = end_range - - # the destination that we will write to - self.stream = stream - - # progress related - self.progress_callback = progress_callback - self.progress_total = progress - - # parameters for each get blob operation - self.timeout = timeout - self.operation_context = operation_context - self.validate_content = validate_content - self.lease_id = lease_id - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - self.if_match = if_match - self.if_none_match = if_none_match - - def get_chunk_offsets(self): - index = self.start_index - while index < self.blob_end: - yield index - index += self.chunk_size - - def process_chunk(self, chunk_start): - if chunk_start + self.chunk_size > self.blob_end: - chunk_end = self.blob_end - else: - chunk_end = chunk_start + self.chunk_size - - chunk_data = self._download_chunk(chunk_start, chunk_end).content - length = chunk_end - chunk_start - if length > 0: - self._write_to_stream(chunk_data, chunk_start) - self._update_progress(length) - - # should be provided by the subclass - def _update_progress(self, length): - pass - - # should be provided by the subclass - def _write_to_stream(self, chunk_data, chunk_start): - pass - - def _download_chunk(self, chunk_start, chunk_end): - response = self.blob_service._get_blob( - self.container_name, - self.blob_name, - snapshot=self.snapshot, - start_range=chunk_start, - end_range=chunk_end - 1, - validate_content=self.validate_content, - lease_id=self.lease_id, - if_modified_since=self.if_modified_since, - if_unmodified_since=self.if_unmodified_since, - if_match=self.if_match, - if_none_match=self.if_none_match, - timeout=self.timeout, - _context=self.operation_context - ) - - # This makes sure that if_match is set so that we can validate - # that subsequent downloads are to an unmodified blob - self.if_match = response.properties.etag - return response - - -class _ParallelBlobChunkDownloader(_BlobChunkDownloader): - def __init__(self, blob_service, container_name, blob_name, snapshot, download_size, - chunk_size, progress, start_range, end_range, stream, - progress_callback, validate_content, lease_id, if_modified_since, - if_unmodified_since, if_match, if_none_match, timeout, operation_context): - - super(_ParallelBlobChunkDownloader, self).__init__(blob_service, container_name, blob_name, snapshot, - download_size, - chunk_size, progress, start_range, end_range, stream, - progress_callback, validate_content, lease_id, - if_modified_since, - if_unmodified_since, if_match, if_none_match, timeout, - operation_context) - - # for a parallel download, the stream is always seekable, so we note down the current position - # in order to seek to the right place when out-of-order chunks come in - self.stream_start = stream.tell() - - # since parallel operations are going on - # it is essential to protect the writing and progress reporting operations - self.stream_lock = threading.Lock() - self.progress_lock = threading.Lock() - - def _update_progress(self, length): - if self.progress_callback is not None: - with self.progress_lock: - self.progress_total += length - total_so_far = self.progress_total - self.progress_callback(total_so_far, self.download_size) - - def _write_to_stream(self, chunk_data, chunk_start): - with self.stream_lock: - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - - -class _SequentialBlobChunkDownloader(_BlobChunkDownloader): - def __init__(self, *args): - super(_SequentialBlobChunkDownloader, self).__init__(*args) - - def _update_progress(self, length): - if self.progress_callback is not None: - self.progress_total += length - self.progress_callback(self.progress_total, self.download_size) - - def _write_to_stream(self, chunk_data, chunk_start): - # chunk_start is ignored in the case of sequential download since we cannot seek the destination stream - self.stream.write(chunk_data) diff --git a/azure/multiapi/storage/v2018_03_28/blob/_encryption.py b/azure/multiapi/storage/v2018_03_28/blob/_encryption.py deleted file mode 100644 index f1e9b54..0000000 --- a/azure/multiapi/storage/v2018_03_28/blob/_encryption.py +++ /dev/null @@ -1,187 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from json import ( - dumps, - loads, -) -from os import urandom - -from cryptography.hazmat.primitives.padding import PKCS7 - -from ..common._encryption import ( - _generate_encryption_data_dict, - _generate_AES_CBC_cipher, - _dict_to_encryption_data, - _validate_and_unwrap_cek, - _EncryptionAlgorithm, -) -from ..common._error import ( - _validate_not_none, - _validate_key_encryption_key_wrap, - _ERROR_DATA_NOT_ENCRYPTED, - _ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM, -) - - -def _encrypt_blob(blob, key_encryption_key): - ''' - Encrypts the given blob using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encryption metadata. This method should - only be used when a blob is small enough for single shot upload. Encrypting larger blobs - is done as a part of the _upload_blob_chunks method. - - :param bytes blob: - The blob to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. - :rtype: (str, bytes) - ''' - - _validate_not_none('blob', blob) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(blob) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - - return dumps(encryption_data), encrypted_data - - -def _generate_blob_encryption_data(key_encryption_key): - ''' - Generates the encryption_metadata for the blob. - - :param bytes key_encryption_key: - The key-encryption-key used to wrap the cek associate with this blob. - :return: A tuple containing the cek and iv for this blob as well as the - serialized encryption metadata for the blob. - :rtype: (bytes, bytes, str) - ''' - encryption_data = None - content_encryption_key = None - initialization_vector = None - if key_encryption_key: - _validate_key_encryption_key_wrap(key_encryption_key) - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - encryption_data = _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - encryption_data = dumps(encryption_data) - - return content_encryption_key, initialization_vector, encryption_data - - -def _decrypt_blob(require_encryption, key_encryption_key, key_resolver, - response, start_offset, end_offset): - ''' - Decrypts the given blob contents and returns only the requested range. - - :param bool require_encryption: - Whether or not the calling blob service requires objects to be decrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :param key_resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted blob content. - :rtype: bytes - ''' - _validate_not_none('response', response) - content = response.body - _validate_not_none('content', content) - - try: - encryption_data = _dict_to_encryption_data(loads(response.headers['x-ms-meta-encryptiondata'])) - except: - if require_encryption: - raise ValueError(_ERROR_DATA_NOT_ENCRYPTED) - else: - return content - - if not (encryption_data.encryption_agent.encryption_algorithm == _EncryptionAlgorithm.AES_CBC_256): - raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM) - - blob_type = response.headers['x-ms-blob-type'] - - iv = None - unpad = False - start_range, end_range = 0, len(content) - if 'content-range' in response.headers: - content_range = response.headers['content-range'] - # Format: 'bytes x-y/size' - - # Ignore the word 'bytes' - content_range = content_range.split(' ') - - content_range = content_range[1].split('-') - start_range = int(content_range[0]) - content_range = content_range[1].split('/') - end_range = int(content_range[0]) - blob_size = int(content_range[1]) - - if start_offset >= 16: - iv = content[:16] - content = content[16:] - start_offset -= 16 - else: - iv = encryption_data.content_encryption_IV - - if end_range == blob_size - 1: - unpad = True - else: - unpad = True - iv = encryption_data.content_encryption_IV - - if blob_type == 'PageBlob': - unpad = False - - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) - cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) - decryptor = cipher.decryptor() - - content = decryptor.update(content) + decryptor.finalize() - if unpad: - unpadder = PKCS7(128).unpadder() - content = unpadder.update(content) + unpadder.finalize() - - return content[start_offset: len(content) - end_offset] - - -def _get_blob_encryptor_and_padder(cek, iv, should_pad): - encryptor = None - padder = None - - if cek is not None and iv is not None: - cipher = _generate_AES_CBC_cipher(cek, iv) - encryptor = cipher.encryptor() - padder = PKCS7(128).padder() if should_pad else None - - return encryptor, padder diff --git a/azure/multiapi/storage/v2018_03_28/blob/_error.py b/azure/multiapi/storage/v2018_03_28/blob/_error.py deleted file mode 100644 index f24edc8..0000000 --- a/azure/multiapi/storage/v2018_03_28/blob/_error.py +++ /dev/null @@ -1,29 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -_ERROR_PAGE_BLOB_SIZE_ALIGNMENT = \ - 'Invalid page blob size: {0}. ' + \ - 'The size must be aligned to a 512-byte boundary.' - -_ERROR_PAGE_BLOB_START_ALIGNMENT = \ - 'start_range must align with 512 page size' - -_ERROR_PAGE_BLOB_END_ALIGNMENT = \ - 'end_range must align with 512 page size' - -_ERROR_INVALID_BLOCK_ID = \ - 'All blocks in block list need to have valid block ids.' - -_ERROR_INVALID_LEASE_DURATION = \ - "lease_duration param needs to be between 15 and 60 or -1." - -_ERROR_INVALID_LEASE_BREAK_PERIOD = \ - "lease_break_period param needs to be between 0 and 60." - -_ERROR_NO_SINGLE_THREAD_CHUNKING = \ - 'To use blob chunk downloader more than 1 thread must be ' + \ - 'used since get_blob_to_bytes should be called for single threaded ' + \ - 'blob downloads.' diff --git a/azure/multiapi/storage/v2018_03_28/blob/_serialization.py b/azure/multiapi/storage/v2018_03_28/blob/_serialization.py deleted file mode 100644 index 100b408..0000000 --- a/azure/multiapi/storage/v2018_03_28/blob/_serialization.py +++ /dev/null @@ -1,118 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from xml.sax.saxutils import escape as xml_escape - -try: - from xml.etree import cElementTree as ETree -except ImportError: - from xml.etree import ElementTree as ETree -from ..common._common_conversion import ( - _encode_base64, - _str, -) -from ..common._error import ( - _validate_not_none, - _ERROR_START_END_NEEDED_FOR_MD5, - _ERROR_RANGE_TOO_LARGE_FOR_MD5, -) -from ._error import ( - _ERROR_PAGE_BLOB_START_ALIGNMENT, - _ERROR_PAGE_BLOB_END_ALIGNMENT, - _ERROR_INVALID_BLOCK_ID, -) -from io import BytesIO - - -def _get_path(container_name=None, blob_name=None): - ''' - Creates the path to access a blob resource. - - container_name: - Name of container. - blob_name: - The path to the blob. - ''' - if container_name and blob_name: - return '/{0}/{1}'.format( - _str(container_name), - _str(blob_name)) - elif container_name: - return '/{0}'.format(_str(container_name)) - else: - return '/' - - -def _validate_and_format_range_headers(request, start_range, end_range, start_range_required=True, - end_range_required=True, check_content_md5=False, align_to_page=False): - # If end range is provided, start range must be provided - if start_range_required or end_range is not None: - _validate_not_none('start_range', start_range) - if end_range_required: - _validate_not_none('end_range', end_range) - - # Page ranges must be 512 aligned - if align_to_page: - if start_range is not None and start_range % 512 != 0: - raise ValueError(_ERROR_PAGE_BLOB_START_ALIGNMENT) - if end_range is not None and end_range % 512 != 511: - raise ValueError(_ERROR_PAGE_BLOB_END_ALIGNMENT) - - # Format based on whether end_range is present - request.headers = request.headers or {} - if end_range is not None: - request.headers['x-ms-range'] = 'bytes={0}-{1}'.format(start_range, end_range) - elif start_range is not None: - request.headers['x-ms-range'] = "bytes={0}-".format(start_range) - - # Content MD5 can only be provided for a complete range less than 4MB in size - if check_content_md5: - if start_range is None or end_range is None: - raise ValueError(_ERROR_START_END_NEEDED_FOR_MD5) - if end_range - start_range > 4 * 1024 * 1024: - raise ValueError(_ERROR_RANGE_TOO_LARGE_FOR_MD5) - - request.headers['x-ms-range-get-content-md5'] = 'true' - - -def _convert_block_list_to_xml(block_id_list): - ''' - - - first-base64-encoded-block-id - second-base64-encoded-block-id - third-base64-encoded-block-id - - - Convert a block list to xml to send. - - block_id_list: - A list of BlobBlock containing the block ids and block state that are used in put_block_list. - Only get block from latest blocks. - ''' - if block_id_list is None: - return '' - - block_list_element = ETree.Element('BlockList') - - # Enabled - for block in block_id_list: - if block.id is None: - raise ValueError(_ERROR_INVALID_BLOCK_ID) - id = xml_escape(_str(format(_encode_base64(block.id)))) - ETree.SubElement(block_list_element, block.state).text = id - - # Add xml declaration and serialize - try: - stream = BytesIO() - ETree.ElementTree(block_list_element).write(stream, xml_declaration=True, encoding='utf-8', method='xml') - except: - raise - finally: - output = stream.getvalue() - stream.close() - - # return xml value - return output diff --git a/azure/multiapi/storage/v2018_03_28/blob/_upload_chunking.py b/azure/multiapi/storage/v2018_03_28/blob/_upload_chunking.py deleted file mode 100644 index 6da0858..0000000 --- a/azure/multiapi/storage/v2018_03_28/blob/_upload_chunking.py +++ /dev/null @@ -1,496 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) -from threading import Lock - -from math import ceil - -from ..common._common_conversion import _encode_base64 -from ..common._error import _ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM -from ..common._serialization import ( - url_quote, - _get_data_bytes_only, - _len_plus -) -from ._constants import ( - _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE -) -from ._encryption import ( - _get_blob_encryptor_and_padder, -) -from .models import BlobBlock - - -def _upload_blob_chunks(blob_service, container_name, blob_name, - blob_size, block_size, stream, max_connections, - progress_callback, validate_content, lease_id, uploader_class, - maxsize_condition=None, if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None, - content_encryption_key=None, initialization_vector=None, resource_properties=None): - encryptor, padder = _get_blob_encryptor_and_padder(content_encryption_key, initialization_vector, - uploader_class is not _PageBlobChunkUploader) - - uploader = uploader_class( - blob_service, - container_name, - blob_name, - blob_size, - block_size, - stream, - max_connections > 1, - progress_callback, - validate_content, - lease_id, - timeout, - encryptor, - padder - ) - - uploader.maxsize_condition = maxsize_condition - - # Access conditions do not work with parallelism - if max_connections > 1: - uploader.if_match = uploader.if_none_match = uploader.if_modified_since = uploader.if_unmodified_since = None - else: - uploader.if_match = if_match - uploader.if_none_match = if_none_match - uploader.if_modified_since = if_modified_since - uploader.if_unmodified_since = if_unmodified_since - - if progress_callback is not None: - progress_callback(0, blob_size) - - if max_connections > 1: - import concurrent.futures - from threading import BoundedSemaphore - - ''' - Ensures we bound the chunking so we only buffer and submit 'max_connections' amount of work items to the executor. - This is necessary as the executor queue will keep accepting submitted work items, which results in buffering all the blocks if - the max_connections + 1 ensures the next chunk is already buffered and ready for when the worker thread is available. - ''' - chunk_throttler = BoundedSemaphore(max_connections + 1) - - executor = concurrent.futures.ThreadPoolExecutor(max_connections) - futures = [] - running_futures = [] - - # Check for exceptions and fail fast. - for chunk in uploader.get_chunk_streams(): - for f in running_futures: - if f.done(): - if f.exception(): - raise f.exception() - else: - running_futures.remove(f) - - chunk_throttler.acquire() - future = executor.submit(uploader.process_chunk, chunk) - - # Calls callback upon completion (even if the callback was added after the Future task is done). - future.add_done_callback(lambda x: chunk_throttler.release()) - futures.append(future) - running_futures.append(future) - - # result() will wait until completion and also raise any exceptions that may have been set. - range_ids = [f.result() for f in futures] - else: - range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] - - if resource_properties: - resource_properties.last_modified = uploader.last_modified - resource_properties.etag = uploader.etag - - return range_ids - - -def _upload_blob_substream_blocks(blob_service, container_name, blob_name, - blob_size, block_size, stream, max_connections, - progress_callback, validate_content, lease_id, uploader_class, - maxsize_condition=None, if_match=None, timeout=None): - uploader = uploader_class( - blob_service, - container_name, - blob_name, - blob_size, - block_size, - stream, - max_connections > 1, - progress_callback, - validate_content, - lease_id, - timeout, - None, - None - ) - - uploader.maxsize_condition = maxsize_condition - - # ETag matching does not work with parallelism as a ranged upload may start - # before the previous finishes and provides an etag - uploader.if_match = if_match if not max_connections > 1 else None - - if progress_callback is not None: - progress_callback(0, blob_size) - - if max_connections > 1: - import concurrent.futures - executor = concurrent.futures.ThreadPoolExecutor(max_connections) - range_ids = list(executor.map(uploader.process_substream_block, uploader.get_substream_blocks())) - else: - range_ids = [uploader.process_substream_block(result) for result in uploader.get_substream_blocks()] - - return range_ids - - -class _BlobChunkUploader(object): - def __init__(self, blob_service, container_name, blob_name, blob_size, - chunk_size, stream, parallel, progress_callback, - validate_content, lease_id, timeout, encryptor, padder): - self.blob_service = blob_service - self.container_name = container_name - self.blob_name = blob_name - self.blob_size = blob_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - self.stream_start = stream.tell() if parallel else None - self.stream_lock = Lock() if parallel else None - self.progress_callback = progress_callback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - self.validate_content = validate_content - self.lease_id = lease_id - self.timeout = timeout - self.encryptor = encryptor - self.padder = padder - self.last_modified = None - self.etag = None - - def get_chunk_streams(self): - index = 0 - while True: - data = b'' - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.blob_size: - read_size = min(self.chunk_size - len(data), self.blob_size - (index + len(data))) - temp = self.stream.read(read_size) - temp = _get_data_bytes_only('temp', temp) - data += temp - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b'' or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if len(data) > 0: - yield index, data - break - index += len(data) - - def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - def _update_progress(self, length): - if self.progress_callback is not None: - if self.progress_lock is not None: - with self.progress_lock: - self.progress_total += length - total = self.progress_total - else: - self.progress_total += length - total = self.progress_total - self.progress_callback(total, self.blob_size) - - def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = self._upload_chunk(chunk_offset, chunk_data) - self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.blob_size - - if blob_length is None: - blob_length = _len_plus(self.stream) - if blob_length is None: - raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM.format('stream')) - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - yield ('BlockId{}'.format("%05d" % i), - _SubStream(self.stream, i * self.chunk_size, last_block_size if i == blocks - 1 else self.chunk_size, - lock)) - - def process_substream_block(self, block_data): - return self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - def _upload_substream_block_with_progress(self, block_id, block_stream): - range_id = self._upload_substream_block(block_id, block_stream) - self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class _BlockBlobChunkUploader(_BlobChunkUploader): - def _upload_chunk(self, chunk_offset, chunk_data): - block_id = url_quote(_encode_base64('{0:032d}'.format(chunk_offset))) - self.blob_service._put_block( - self.container_name, - self.blob_name, - chunk_data, - block_id, - validate_content=self.validate_content, - lease_id=self.lease_id, - timeout=self.timeout, - ) - return BlobBlock(block_id) - - def _upload_substream_block(self, block_id, block_stream): - try: - self.blob_service._put_block( - self.container_name, - self.blob_name, - block_stream, - block_id, - validate_content=self.validate_content, - lease_id=self.lease_id, - timeout=self.timeout, - ) - finally: - block_stream.close() - return BlobBlock(block_id) - - -class _PageBlobChunkUploader(_BlobChunkUploader): - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - for each_byte in chunk_data: - if each_byte != 0 and each_byte != b'\x00': - return False - return True - - def _upload_chunk(self, chunk_start, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_start + len(chunk_data) - 1 - resp = self.blob_service._update_page( - self.container_name, - self.blob_name, - chunk_data, - chunk_start, - chunk_end, - validate_content=self.validate_content, - lease_id=self.lease_id, - if_match=self.if_match, - timeout=self.timeout, - ) - - if not self.parallel: - self.if_match = resp.etag - - self.set_response_properties(resp) - - -class _AppendBlobChunkUploader(_BlobChunkUploader): - def _upload_chunk(self, chunk_offset, chunk_data): - if not hasattr(self, 'current_length'): - resp = self.blob_service.append_block( - self.container_name, - self.blob_name, - chunk_data, - validate_content=self.validate_content, - lease_id=self.lease_id, - maxsize_condition=self.maxsize_condition, - timeout=self.timeout, - if_modified_since=self.if_modified_since, - if_unmodified_since=self.if_unmodified_since, - if_match=self.if_match, - if_none_match=self.if_none_match - ) - - self.current_length = resp.append_offset - else: - resp = self.blob_service.append_block( - self.container_name, - self.blob_name, - chunk_data, - validate_content=self.validate_content, - lease_id=self.lease_id, - maxsize_condition=self.maxsize_condition, - appendpos_condition=self.current_length + chunk_offset, - timeout=self.timeout, - ) - - self.set_response_properties(resp) - - -class _SubStream(IOBase): - def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): - # Python 2.7: file-like objects created with open() typically support seek(), but are not - # derivations of io.IOBase and thus do not implement seekable(). - # Python > 3.0: file-like objects created with open() are derived from io.IOBase. - try: - # only the main thread runs this, so there's no need grabbing the lock - wrapped_stream.seek(0, SEEK_CUR) - except: - raise ValueError("Wrapped stream must support seek().") - - self._lock = lockObj - self._wrapped_stream = wrapped_stream - self._position = 0 - self._stream_begin_index = stream_begin_index - self._length = length - self._buffer = BytesIO() - - # we must avoid buffering more than necessary, and also not use up too much memory - # so the max buffer size is capped at 4MB - self._max_buffer_size = length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE \ - else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE - self._current_buffer_start = 0 - self._current_buffer_size = 0 - - def __len__(self): - return self._length - - def close(self): - if self._buffer: - self._buffer.close() - self._wrapped_stream = None - IOBase.close(self) - - def fileno(self): - return self._wrapped_stream.fileno() - - def flush(self): - pass - - def read(self, n): - if self.closed: - raise ValueError("Stream is closed.") - - # adjust if out of bounds - if n + self._position >= self._length: - n = self._length - self._position - - # return fast - if n == 0 or self._buffer.closed: - return b'' - - # attempt first read from the read buffer and update position - read_buffer = self._buffer.read(n) - bytes_read = len(read_buffer) - bytes_remaining = n - bytes_read - self._position += bytes_read - - # repopulate the read buffer from the underlying stream to fulfill the request - # ensure the seek and read operations are done atomically (only if a lock is provided) - if bytes_remaining > 0: - with self._buffer: - # either read in the max buffer size specified on the class - # or read in just enough data for the current block/sub stream - current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) - - # lock is only defined if max_connections > 1 (parallel uploads) - if self._lock: - with self._lock: - # reposition the underlying stream to match the start of the data to read - absolute_position = self._stream_begin_index + self._position - self._wrapped_stream.seek(absolute_position, SEEK_SET) - # If we can't seek to the right location, our read will be corrupted so fail fast. - if self._wrapped_stream.tell() != absolute_position: - raise IOError("Stream failed to seek to the desired location.") - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - else: - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - - if buffer_from_stream: - # update the buffer with new data from the wrapped stream - # we need to note down the start position and size of the buffer, in case seek is performed later - self._buffer = BytesIO(buffer_from_stream) - self._current_buffer_start = self._position - self._current_buffer_size = len(buffer_from_stream) - - # read the remaining bytes from the new buffer and update position - second_read_buffer = self._buffer.read(bytes_remaining) - read_buffer += second_read_buffer - self._position += len(second_read_buffer) - - return read_buffer - - def readable(self): - return True - - def readinto(self, b): - raise UnsupportedOperation - - def seek(self, offset, whence=0): - if whence is SEEK_SET: - start_index = 0 - elif whence is SEEK_CUR: - start_index = self._position - elif whence is SEEK_END: - start_index = self._length - offset = - offset - else: - raise ValueError("Invalid argument for the 'whence' parameter.") - - pos = start_index + offset - - if pos > self._length: - pos = self._length - elif pos < 0: - pos = 0 - - # check if buffer is still valid - # if not, drop buffer - if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: - self._buffer.close() - self._buffer = BytesIO() - else: # if yes seek to correct position - delta = pos - self._current_buffer_start - self._buffer.seek(delta, SEEK_SET) - - self._position = pos - return pos - - def seekable(self): - return True - - def tell(self): - return self._position - - def write(self): - raise UnsupportedOperation - - def writelines(self): - raise UnsupportedOperation - - def writeable(self): - return False diff --git a/azure/multiapi/storage/v2018_03_28/blob/appendblobservice.py b/azure/multiapi/storage/v2018_03_28/blob/appendblobservice.py deleted file mode 100644 index 8369cb3..0000000 --- a/azure/multiapi/storage/v2018_03_28/blob/appendblobservice.py +++ /dev/null @@ -1,661 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import sys -from os import path - -from ..common._common_conversion import ( - _to_str, - _int_to_str, - _datetime_to_utc_string, - _get_content_md5, -) -from ..common._constants import ( - SERVICE_HOST_BASE, - DEFAULT_PROTOCOL, -) -from ..common._error import ( - _validate_not_none, - _validate_type_bytes, - _validate_encryption_unsupported, - _ERROR_VALUE_NEGATIVE, -) -from ..common._http import HTTPRequest -from ..common._serialization import ( - _get_data_bytes_only, - _add_metadata_headers, -) -from ._deserialization import ( - _parse_append_block, - _parse_base_properties, -) -from ._serialization import ( - _get_path, -) -from ._upload_chunking import ( - _AppendBlobChunkUploader, - _upload_blob_chunks, -) -from .baseblobservice import BaseBlobService -from .models import ( - _BlobTypes, - ResourceProperties -) - -if sys.version_info >= (3,): - from io import BytesIO -else: - from cStringIO import StringIO as BytesIO - - -class AppendBlobService(BaseBlobService): - ''' - An append blob is comprised of blocks and is optimized for append operations. - When you modify an append blob, blocks are added to the end of the blob only, - via the append_block operation. Updating or deleting of existing blocks is not - supported. Unlike a block blob, an append blob does not expose its block IDs. - - Each block in an append blob can be a different size, up to a maximum of 4 MB, - and an append blob can include up to 50,000 blocks. The maximum size of an - append blob is therefore slightly more than 195 GB (4 MB X 50,000 blocks). - - :ivar int MAX_BLOCK_SIZE: - The size of the blocks put by append_blob_from_* methods. Smaller blocks - may be put if there is less data provided. The maximum block size the service - supports is 4MB. - ''' - MAX_BLOCK_SIZE = 4 * 1024 * 1024 - - def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=False, - protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, custom_domain=None, request_session=None, - connection_string=None, socket_timeout=None, token_credential=None): - ''' - :param str account_name: - The storage account name. This is used to authenticate requests - signed with an account key and to construct the storage endpoint. It - is required unless a connection string is given, or if a custom - domain is used with anonymous authentication. - :param str account_key: - The storage account key. This is used for shared key authentication. - If neither account key or sas token is specified, anonymous access - will be used. - :param str sas_token: - A shared access signature token to use to authenticate requests - instead of the account key. If account key and sas token are both - specified, account key will be used to sign. If neither are - specified, anonymous access will be used. - :param bool is_emulated: - Whether to use the emulator. Defaults to False. If specified, will - override all other parameters besides connection string and request - session. - :param str protocol: - The protocol to use for requests. Defaults to https. - :param str endpoint_suffix: - The host base component of the url, minus the account name. Defaults - to Azure (core.windows.net). Override this to use the China cloud - (core.chinacloudapi.cn). - :param str custom_domain: - The custom domain to use. This can be set in the Azure Portal. For - example, 'www.mydomain.com'. - :param requests.Session request_session: - The session object to use for http requests. - :param str connection_string: - If specified, this will override all other parameters besides - request session. See - http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ - for the connection string format. - :param int socket_timeout: - If specified, this will override the default socket timeout. The timeout specified is in seconds. - See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value. - :param token_credential: - A token credential used to authenticate HTTPS requests. The token value - should be updated before its expiration. - :type `~..common.TokenCredential` - ''' - self.blob_type = _BlobTypes.AppendBlob - super(AppendBlobService, self).__init__( - account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix, - custom_domain, request_session, connection_string, socket_timeout, token_credential) - - def create_blob(self, container_name, blob_name, content_settings=None, - metadata=None, lease_id=None, - if_modified_since=None, if_unmodified_since=None, - if_match=None, if_none_match=None, timeout=None): - ''' - Creates a blob or overrides an existing blob. Use if_none_match=* to - prevent overriding an existing blob. - - See create_blob_from_* for high level - functions that handle the creation and upload of large blobs with - automatic chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set blob properties. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to - perform the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Append Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = {'timeout': _int_to_str(timeout)} - request.headers = { - 'x-ms-blob-type': _to_str(self.blob_type), - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match) - } - _add_metadata_headers(metadata, request) - if content_settings is not None: - request.headers.update(content_settings._to_headers()) - - return self._perform_request(request, _parse_base_properties) - - def append_block(self, container_name, blob_name, block, - validate_content=False, maxsize_condition=None, - appendpos_condition=None, - lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Commits a new block of data to the end of an existing append blob. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param bytes block: - Content of the block in bytes. - :param bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - blob. - :param int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :param int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the - AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: - ETag, last modified, append offset, and committed block count - properties for the updated Append Blob - :rtype: :class:`~azure.storage.blob.models.AppendBlockProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('block', block) - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'appendblock', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-blob-condition-maxsize': _to_str(maxsize_condition), - 'x-ms-blob-condition-appendpos': _to_str(appendpos_condition), - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match) - } - request.body = _get_data_bytes_only('block', block) - - if validate_content: - computed_md5 = _get_content_md5(request.body) - request.headers['Content-MD5'] = _to_str(computed_md5) - - return self._perform_request(request, _parse_append_block) - - # ----Convenience APIs---------------------------------------------- - - def append_blob_from_path( - self, container_name, blob_name, file_path, validate_content=False, - maxsize_condition=None, progress_callback=None, lease_id=None, timeout=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None): - ''' - Appends to the content of an existing blob from a file path, with automatic - chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param str file_path: - Path of the file to upload as the blob content. - :param bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: func(current, total) - :param str lease_id: - Required if the blob has an active lease. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetime will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetime will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :return: ETag and last modified properties for the Append Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('file_path', file_path) - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - count = path.getsize(file_path) - with open(file_path, 'rb') as stream: - return self.append_blob_from_stream( - container_name, - blob_name, - stream, - count=count, - validate_content=validate_content, - maxsize_condition=maxsize_condition, - progress_callback=progress_callback, - lease_id=lease_id, - timeout=timeout, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match) - - def append_blob_from_bytes( - self, container_name, blob_name, blob, index=0, count=None, - validate_content=False, maxsize_condition=None, progress_callback=None, - lease_id=None, timeout=None, if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None): - ''' - Appends to the content of an existing blob from an array of bytes, with - automatic chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param bytes blob: - Content of blob as an array of bytes. - :param int index: - Start index in the array of bytes. - :param int count: - Number of bytes to upload. Set to None or negative value to upload - all bytes starting from index. - :param bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: func(current, total) - :param str lease_id: - Required if the blob has an active lease. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetime will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetime will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :return: ETag and last modified properties for the Append Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('blob', blob) - _validate_not_none('index', index) - _validate_type_bytes('blob', blob) - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - if index < 0: - raise IndexError(_ERROR_VALUE_NEGATIVE.format('index')) - - if count is None or count < 0: - count = len(blob) - index - - stream = BytesIO(blob) - stream.seek(index) - - return self.append_blob_from_stream( - container_name, - blob_name, - stream, - count=count, - validate_content=validate_content, - maxsize_condition=maxsize_condition, - lease_id=lease_id, - progress_callback=progress_callback, - timeout=timeout, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match) - - def append_blob_from_text( - self, container_name, blob_name, text, encoding='utf-8', - validate_content=False, maxsize_condition=None, progress_callback=None, - lease_id=None, timeout=None, if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None): - ''' - Appends to the content of an existing blob from str/unicode, with - automatic chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param str text: - Text to upload to the blob. - :param str encoding: - Python encoding to use to convert the text to bytes. - :param bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: func(current, total) - :param str lease_id: - Required if the blob has an active lease. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetime will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetime will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :return: ETag and last modified properties for the Append Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('text', text) - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - if not isinstance(text, bytes): - _validate_not_none('encoding', encoding) - text = text.encode(encoding) - - return self.append_blob_from_bytes( - container_name, - blob_name, - text, - index=0, - count=len(text), - validate_content=validate_content, - maxsize_condition=maxsize_condition, - lease_id=lease_id, - progress_callback=progress_callback, - timeout=timeout, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match) - - def append_blob_from_stream( - self, container_name, blob_name, stream, count=None, - validate_content=False, maxsize_condition=None, progress_callback=None, - lease_id=None, timeout=None, if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None): - ''' - Appends to the content of an existing blob from a file/stream, with - automatic chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param io.IOBase stream: - Opened stream to upload as the blob content. - :param int count: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param int maxsize_condition: - Conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: func(current, total) - :param str lease_id: - Required if the blob has an active lease. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetime will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetime will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :return: ETag and last modified properties for the Append Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('stream', stream) - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - # _upload_blob_chunks returns the block ids for block blobs so resource_properties - # is passed as a parameter to get the last_modified and etag for page and append blobs. - # this info is not needed for block_blobs since _put_block_list is called after which gets this info - resource_properties = ResourceProperties() - _upload_blob_chunks( - blob_service=self, - container_name=container_name, - blob_name=blob_name, - blob_size=count, - block_size=self.MAX_BLOCK_SIZE, - stream=stream, - max_connections=1, # upload not easily parallelizable - progress_callback=progress_callback, - validate_content=validate_content, - lease_id=lease_id, - uploader_class=_AppendBlobChunkUploader, - maxsize_condition=maxsize_condition, - timeout=timeout, - resource_properties=resource_properties, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match - ) - - return resource_properties diff --git a/azure/multiapi/storage/v2018_03_28/blob/baseblobservice.py b/azure/multiapi/storage/v2018_03_28/blob/baseblobservice.py deleted file mode 100644 index 8373938..0000000 --- a/azure/multiapi/storage/v2018_03_28/blob/baseblobservice.py +++ /dev/null @@ -1,3279 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import sys -from abc import ABCMeta - -from azure.common import AzureHttpError - -from ..common._auth import ( - _StorageSASAuthentication, - _StorageSharedKeyAuthentication, - _StorageNoAuthentication, -) -from ..common._common_conversion import ( - _int_to_str, - _to_str, - _datetime_to_utc_string, -) -from ..common._connection import _ServiceParameters -from ..common._constants import ( - SERVICE_HOST_BASE, - DEFAULT_PROTOCOL, -) -from ..common._deserialization import ( - _convert_xml_to_service_properties, - _parse_metadata, - _parse_properties, - _convert_xml_to_service_stats, - _parse_length_from_content_range, -) -from ..common._error import ( - _dont_fail_not_exist, - _dont_fail_on_exist, - _validate_not_none, - _validate_decryption_required, - _validate_access_policies, - _ERROR_PARALLEL_NOT_SEEKABLE, -) -from ..common._http import HTTPRequest -from ..common._serialization import ( - _get_request_body, - _convert_signed_identifiers_to_xml, - _convert_service_properties_to_xml, - _add_metadata_headers, -) -from ..common.models import ( - Services, - ListGenerator, - _OperationContext, -) -from .sharedaccesssignature import ( - BlobSharedAccessSignature, -) -from ..common.storageclient import StorageClient -from ._deserialization import ( - _convert_xml_to_containers, - _parse_blob, - _convert_xml_to_blob_list, - _parse_container, - _parse_snapshot_blob, - _parse_lease, - _convert_xml_to_signed_identifiers_and_access, - _parse_base_properties, - _parse_account_information, -) -from ._download_chunking import _download_blob_chunks -from ._error import ( - _ERROR_INVALID_LEASE_DURATION, - _ERROR_INVALID_LEASE_BREAK_PERIOD, -) -from ._serialization import ( - _get_path, - _validate_and_format_range_headers, -) -from .models import ( - BlobProperties, - _LeaseActions, - ContainerPermissions, - BlobPermissions, -) - -from ._constants import ( - X_MS_VERSION, - __version__ as package_version, -) - -_CONTAINER_ALREADY_EXISTS_ERROR_CODE = 'ContainerAlreadyExists' -_BLOB_NOT_FOUND_ERROR_CODE = 'BlobNotFound' -_CONTAINER_NOT_FOUND_ERROR_CODE = 'ContainerNotFound' - -if sys.version_info >= (3,): - from io import BytesIO -else: - from cStringIO import StringIO as BytesIO - - -class BaseBlobService(StorageClient): - ''' - This is the main class managing Blob resources. - - The Blob service stores text and binary data as blobs in the cloud. - The Blob service offers the following three resources: the storage account, - containers, and blobs. Within your storage account, containers provide a - way to organize sets of blobs. For more information please see: - https://msdn.microsoft.com/en-us/library/azure/ee691964.aspx - - :ivar int MAX_SINGLE_GET_SIZE: - The size of the first range get performed by get_blob_to_* methods if - max_connections is greater than 1. Less data will be returned if the - blob is smaller than this. - :ivar int MAX_CHUNK_GET_SIZE: - The size of subsequent range gets performed by get_blob_to_* methods if - max_connections is greater than 1 and the blob is larger than MAX_SINGLE_GET_SIZE. - Less data will be returned if the remainder of the blob is smaller than - this. If this is set to larger than 4MB, content_validation will throw an - error if enabled. However, if content_validation is not desired a size - greater than 4MB may be optimal. Setting this below 4MB is not recommended. - :ivar object key_encryption_key: - The key-encryption-key optionally provided by the user. If provided, will be used to - encrypt/decrypt in supported methods. - For methods requiring decryption, either the key_encryption_key OR the resolver must be provided. - If both are provided, the resolver will take precedence. - Must implement the following methods for APIs requiring encryption: - wrap_key(key)--wraps the specified key (bytes) using an algorithm of the user's choice. Returns the encrypted key as bytes. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - Must implement the following methods for APIs requiring decryption: - unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid()--returns a string key id for this key-encryption-key. - :ivar function key_resolver_function(kid): - A function to resolve keys optionally provided by the user. If provided, will be used to decrypt in supported methods. - For methods requiring decryption, either the key_encryption_key OR - the resolver must be provided. If both are provided, the resolver will take precedence. - It uses the kid string to return a key-encryption-key implementing the interface defined above. - :ivar bool require_encryption: - A flag that may be set to ensure that all messages successfully uploaded to the queue and all those downloaded and - successfully read from the queue are/were encrypted while on the server. If this flag is set, all required - parameters for encryption/decryption must be provided. See the above comments on the key_encryption_key and resolver. - ''' - - __metaclass__ = ABCMeta - MAX_SINGLE_GET_SIZE = 32 * 1024 * 1024 - MAX_CHUNK_GET_SIZE = 4 * 1024 * 1024 - - def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=False, - protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, custom_domain=None, request_session=None, - connection_string=None, socket_timeout=None, token_credential=None): - ''' - :param str account_name: - The storage account name. This is used to authenticate requests - signed with an account key and to construct the storage endpoint. It - is required unless a connection string is given, or if a custom - domain is used with anonymous authentication. - :param str account_key: - The storage account key. This is used for shared key authentication. - If neither account key or sas token is specified, anonymous access - will be used. - :param str sas_token: - A shared access signature token to use to authenticate requests - instead of the account key. If account key and sas token are both - specified, account key will be used to sign. If neither are - specified, anonymous access will be used. - :param bool is_emulated: - Whether to use the emulator. Defaults to False. If specified, will - override all other parameters besides connection string and request - session. - :param str protocol: - The protocol to use for requests. Defaults to https. - :param str endpoint_suffix: - The host base component of the url, minus the account name. Defaults - to Azure (core.windows.net). Override this to use the China cloud - (core.chinacloudapi.cn). - :param str custom_domain: - The custom domain to use. This can be set in the Azure Portal. For - example, 'www.mydomain.com'. - :param requests.Session request_session: - The session object to use for http requests. - :param str connection_string: - If specified, this will override all other parameters besides - request session. See - http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ - for the connection string format - :param int socket_timeout: - If specified, this will override the default socket timeout. The timeout specified is in seconds. - See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value. - :param token_credential: - A token credential used to authenticate HTTPS requests. The token value - should be updated before its expiration. - :type `~..common.TokenCredential` - ''' - service_params = _ServiceParameters.get_service_parameters( - 'blob', - account_name=account_name, - account_key=account_key, - sas_token=sas_token, - token_credential=token_credential, - is_emulated=is_emulated, - protocol=protocol, - endpoint_suffix=endpoint_suffix, - custom_domain=custom_domain, - request_session=request_session, - connection_string=connection_string, - socket_timeout=socket_timeout) - - super(BaseBlobService, self).__init__(service_params) - - if self.account_key: - self.authentication = _StorageSharedKeyAuthentication( - self.account_name, - self.account_key, - self.is_emulated - ) - elif self.sas_token: - self.authentication = _StorageSASAuthentication(self.sas_token) - elif self.token_credential: - self.authentication = self.token_credential - else: - self.authentication = _StorageNoAuthentication() - - self.require_encryption = False - self.key_encryption_key = None - self.key_resolver_function = None - self._X_MS_VERSION = X_MS_VERSION - self._update_user_agent_string(package_version) - - def make_blob_url(self, container_name, blob_name, protocol=None, sas_token=None, snapshot=None): - ''' - Creates the url to access a blob. - - :param str container_name: - Name of container. - :param str blob_name: - Name of blob. - :param str protocol: - Protocol to use: 'http' or 'https'. If not specified, uses the - protocol specified when BaseBlobService was initialized. - :param str sas_token: - Shared access signature token created with - generate_shared_access_signature. - :param str snapshot: - An string value that uniquely identifies the snapshot. The value of - this query parameter indicates the snapshot version. - :return: blob access URL. - :rtype: str - ''' - - url = '{}://{}/{}/{}'.format( - protocol or self.protocol, - self.primary_endpoint, - container_name, - blob_name, - ) - - if snapshot and sas_token: - url = '{}?snapshot={}&{}'.format(url, snapshot, sas_token) - elif snapshot: - url = '{}?snapshot={}'.format(url, snapshot) - elif sas_token: - url = '{}?{}'.format(url, sas_token) - - return url - - def make_container_url(self, container_name, protocol=None, sas_token=None): - ''' - Creates the url to access a container. - - :param str container_name: - Name of container. - :param str protocol: - Protocol to use: 'http' or 'https'. If not specified, uses the - protocol specified when BaseBlobService was initialized. - :param str sas_token: - Shared access signature token created with - generate_shared_access_signature. - :return: container access URL. - :rtype: str - ''' - - url = '{}://{}/{}?restype=container'.format( - protocol or self.protocol, - self.primary_endpoint, - container_name, - ) - - if sas_token: - url = '{}&{}'.format(url, sas_token) - - return url - - def generate_account_shared_access_signature(self, resource_types, permission, - expiry, start=None, ip=None, protocol=None): - ''' - Generates a shared access signature for the blob service. - Use the returned signature with the sas_token parameter of any BlobService. - - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account SAS. - :param AccountPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~..common.models.Protocol` for possible values. - :return: A Shared Access Signature (sas) token. - :rtype: str - ''' - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = BlobSharedAccessSignature(self.account_name, self.account_key) - return sas.generate_account(Services.BLOB, resource_types, permission, - expiry, start=start, ip=ip, protocol=protocol) - - def generate_container_shared_access_signature(self, container_name, - permission=None, expiry=None, - start=None, id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None): - ''' - Generates a shared access signature for the container. - Use the returned signature with the sas_token parameter of any BlobService. - - :param str container_name: - Name of container. - :param ContainerPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_blob_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~..common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :return: A Shared Access Signature (sas) token. - :rtype: str - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = BlobSharedAccessSignature(self.account_name, self.account_key) - return sas.generate_container( - container_name, - permission, - expiry, - start=start, - id=id, - ip=ip, - protocol=protocol, - cache_control=cache_control, - content_disposition=content_disposition, - content_encoding=content_encoding, - content_language=content_language, - content_type=content_type, - ) - - def generate_blob_shared_access_signature( - self, container_name, blob_name, permission=None, - expiry=None, start=None, id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None): - ''' - Generates a shared access signature for the blob. - Use the returned signature with the sas_token parameter of any BlobService. - - :param str container_name: - Name of container. - :param str blob_name: - Name of blob. - :param BlobPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use :func:`~set_container_acl`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~..common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :return: A Shared Access Signature (sas) token. - :rtype: str - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = BlobSharedAccessSignature(self.account_name, self.account_key) - return sas.generate_blob( - container_name, - blob_name, - permission, - expiry, - start=start, - id=id, - ip=ip, - protocol=protocol, - cache_control=cache_control, - content_disposition=content_disposition, - content_encoding=content_encoding, - content_language=content_language, - content_type=content_type, - ) - - def list_containers(self, prefix=None, num_results=None, include_metadata=False, - marker=None, timeout=None): - ''' - Returns a generator to list the containers under the specified account. - The generator will lazily follow the continuation tokens returned by - the service and stop when all containers have been returned or num_results is reached. - - If num_results is specified and the account has more than that number of - containers, the generator will have a populated next_marker field once it - finishes. This marker can be used to create a new generator if more - results are desired. - - :param str prefix: - Filters the results to return only containers whose names - begin with the specified prefix. - :param int num_results: - Specifies the maximum number of containers to return. A single list - request may return up to 1000 contianers and potentially a continuation - token which should be followed to get additional resutls. - :param bool include_metadata: - Specifies that container metadata be returned in the response. - :param str marker: - An opaque continuation token. This value can be retrieved from the - next_marker field of a previous generator object if num_results was - specified and that generator has finished enumerating results. If - specified, this generator will begin returning results from the point - where the previous generator stopped. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - include = 'metadata' if include_metadata else None - operation_context = _OperationContext(location_lock=True) - kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results, - 'include': include, 'timeout': timeout, '_context': operation_context} - resp = self._list_containers(**kwargs) - - return ListGenerator(resp, self._list_containers, (), kwargs) - - def _list_containers(self, prefix=None, marker=None, max_results=None, - include=None, timeout=None, _context=None): - ''' - Returns a list of the containers under the specified account. - - :param str prefix: - Filters the results to return only containers whose names - begin with the specified prefix. - :param str marker: - A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns - a next_marker value within the response body if the list returned was - not complete. The marker value may then be used in a subsequent - call to request the next set of list items. The marker value is - opaque to the client. - :param int max_results: - Specifies the maximum number of containers to return. A single list - request may return up to 1000 contianers and potentially a continuation - token which should be followed to get additional resutls. - :param str include: - Include this parameter to specify that the container's - metadata be returned as part of the response body. set this - parameter to string 'metadata' to get container's metadata. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path() - request.query = { - 'comp': 'list', - 'prefix': _to_str(prefix), - 'marker': _to_str(marker), - 'maxresults': _int_to_str(max_results), - 'include': _to_str(include), - 'timeout': _int_to_str(timeout) - } - - return self._perform_request(request, _convert_xml_to_containers, operation_context=_context) - - def create_container(self, container_name, metadata=None, - public_access=None, fail_on_exist=False, timeout=None): - ''' - Creates a new container under the specified account. If the container - with the same name already exists, the operation fails if - fail_on_exist is True. - - :param str container_name: - Name of container to create. - :param metadata: - A dict with name_value pairs to associate with the - container as metadata. Example:{'Category':'test'} - :type metadata: dict(str, str) - :param ~azure.storage.blob.models.PublicAccess public_access: - Possible values include: container, blob. - :param bool fail_on_exist: - Specify whether to throw an exception when the container exists. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: True if container is created, False if container already exists. - :rtype: bool - ''' - _validate_not_none('container_name', container_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name) - request.query = { - 'restype': 'container', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-blob-public-access': _to_str(public_access) - } - _add_metadata_headers(metadata, request) - - if not fail_on_exist: - try: - self._perform_request(request, expected_errors=[_CONTAINER_ALREADY_EXISTS_ERROR_CODE]) - return True - except AzureHttpError as ex: - _dont_fail_on_exist(ex) - return False - else: - self._perform_request(request) - return True - - def get_container_properties(self, container_name, lease_id=None, timeout=None): - ''' - Returns all user-defined metadata and system properties for the specified - container. The data returned does not include the container's list of blobs. - - :param str container_name: - Name of existing container. - :param str lease_id: - If specified, get_container_properties only succeeds if the - container's lease is active and matches this ID. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: properties for the specified container within a container object. - :rtype: :class:`~azure.storage.blob.models.Container` - ''' - _validate_not_none('container_name', container_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name) - request.query = { - 'restype': 'container', - 'timeout': _int_to_str(timeout), - } - request.headers = {'x-ms-lease-id': _to_str(lease_id)} - - return self._perform_request(request, _parse_container, [container_name]) - - def get_container_metadata(self, container_name, lease_id=None, timeout=None): - ''' - Returns all user-defined metadata for the specified container. - - :param str container_name: - Name of existing container. - :param str lease_id: - If specified, get_container_metadata only succeeds if the - container's lease is active and matches this ID. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: - A dictionary representing the container metadata name, value pairs. - :rtype: dict(str, str) - ''' - _validate_not_none('container_name', container_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name) - request.query = { - 'restype': 'container', - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - request.headers = {'x-ms-lease-id': _to_str(lease_id)} - - return self._perform_request(request, _parse_metadata) - - def set_container_metadata(self, container_name, metadata=None, - lease_id=None, if_modified_since=None, timeout=None): - ''' - Sets one or more user-defined name-value pairs for the specified - container. Each call to this operation replaces all existing metadata - attached to the container. To remove all metadata from the container, - call this operation with no metadata dict. - - :param str container_name: - Name of existing container. - :param metadata: - A dict containing name-value pairs to associate with the container as - metadata. Example: {'category':'test'} - :type metadata: dict(str, str) - :param str lease_id: - If specified, set_container_metadata only succeeds if the - container's lease is active and matches this ID. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Container - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name) - request.query = { - 'restype': 'container', - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'x-ms-lease-id': _to_str(lease_id), - } - _add_metadata_headers(metadata, request) - - return self._perform_request(request, _parse_base_properties) - - def get_container_acl(self, container_name, lease_id=None, timeout=None): - ''' - Gets the permissions for the specified container. - The permissions indicate whether container data may be accessed publicly. - - :param str container_name: - Name of existing container. - :param lease_id: - If specified, get_container_acl only succeeds if the - container's lease is active and matches this ID. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: A dictionary of access policies associated with the container. dict of str to - :class:`..common.models.AccessPolicy` and a public_access property - if public access is turned on - ''' - _validate_not_none('container_name', container_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name) - request.query = { - 'restype': 'container', - 'comp': 'acl', - 'timeout': _int_to_str(timeout), - } - request.headers = {'x-ms-lease-id': _to_str(lease_id)} - - return self._perform_request(request, _convert_xml_to_signed_identifiers_and_access) - - def set_container_acl(self, container_name, signed_identifiers=None, - public_access=None, lease_id=None, - if_modified_since=None, if_unmodified_since=None, timeout=None): - ''' - Sets the permissions for the specified container or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether blobs in a container may be accessed publicly. - - :param str container_name: - Name of existing container. - :param signed_identifiers: - A dictionary of access policies to associate with the container. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict(str, :class:`~..common.models.AccessPolicy`) - :param ~azure.storage.blob.models.PublicAccess public_access: - Possible values include: container, blob. - :param str lease_id: - If specified, set_container_acl only succeeds if the - container's lease is active and matches this ID. - :param datetime if_modified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified date/time. - :param datetime if_unmodified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Container - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_access_policies(signed_identifiers) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name) - request.query = { - 'restype': 'container', - 'comp': 'acl', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-blob-public-access': _to_str(public_access), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'x-ms-lease-id': _to_str(lease_id), - } - request.body = _get_request_body( - _convert_signed_identifiers_to_xml(signed_identifiers)) - - return self._perform_request(request, _parse_base_properties) - - def delete_container(self, container_name, fail_not_exist=False, - lease_id=None, if_modified_since=None, - if_unmodified_since=None, timeout=None): - ''' - Marks the specified container for deletion. The container and any blobs - contained within it are later deleted during garbage collection. - - :param str container_name: - Name of container to delete. - :param bool fail_not_exist: - Specify whether to throw an exception when the container doesn't - exist. - :param str lease_id: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: True if container is deleted, False container doesn't exist. - :rtype: bool - ''' - _validate_not_none('container_name', container_name) - request = HTTPRequest() - request.method = 'DELETE' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name) - request.query = { - 'restype': 'container', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - } - - if not fail_not_exist: - try: - self._perform_request(request, expected_errors=[_CONTAINER_NOT_FOUND_ERROR_CODE]) - return True - except AzureHttpError as ex: - _dont_fail_not_exist(ex) - return False - else: - self._perform_request(request) - return True - - def _lease_container_impl( - self, container_name, lease_action, lease_id, lease_duration, - lease_break_period, proposed_lease_id, if_modified_since, - if_unmodified_since, timeout): - ''' - Establishes and manages a lease on a container. - The Lease Container operation can be called in one of five modes - Acquire, to request a new lease - Renew, to renew an existing lease - Change, to change the ID of an existing lease - Release, to free the lease if it is no longer needed so that another - client may immediately acquire a lease against the container - Break, to end the lease but ensure that another client cannot acquire - a new lease until the current lease period has expired - - :param str container_name: - Name of existing container. - :param str lease_action: - Possible _LeaseActions values: acquire|renew|release|break|change - :param str lease_id: - Required if the container has an active lease. - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. For backwards compatibility, the default is - 60, and the value is only used on an acquire operation. - :param int lease_break_period: - For a break operation, this is the proposed duration of - seconds that the lease should continue before it is broken, between - 0 and 60 seconds. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining - on the lease is used. A new lease will not be available before the - break period has expired, but the lease may be held for longer than - the break period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :param str proposed_lease_id: - Optional for Acquire, required for Change. Proposed lease ID, in a - GUID string format. The Blob service returns 400 (Invalid request) - if the proposed lease ID is not in the correct format. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: - Response headers returned from the service call. - :rtype: dict(str, str) - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('lease_action', lease_action) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name) - request.query = { - 'restype': 'container', - 'comp': 'lease', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'x-ms-lease-action': _to_str(lease_action), - 'x-ms-lease-duration': _to_str(lease_duration), - 'x-ms-lease-break-period': _to_str(lease_break_period), - 'x-ms-proposed-lease-id': _to_str(proposed_lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - } - - return self._perform_request(request, _parse_lease) - - def acquire_container_lease( - self, container_name, lease_duration=-1, proposed_lease_id=None, - if_modified_since=None, if_unmodified_since=None, timeout=None): - ''' - Requests a new lease. If the container does not have an active lease, - the Blob service creates a lease on the container and returns a new - lease ID. - - :param str container_name: - Name of existing container. - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: the lease ID of the newly created lease. - :return: str - ''' - _validate_not_none('lease_duration', lease_duration) - if lease_duration != -1 and \ - (lease_duration < 15 or lease_duration > 60): - raise ValueError(_ERROR_INVALID_LEASE_DURATION) - - lease = self._lease_container_impl(container_name, - _LeaseActions.Acquire, - None, # lease_id - lease_duration, - None, # lease_break_period - proposed_lease_id, - if_modified_since, - if_unmodified_since, - timeout) - return lease['id'] - - def renew_container_lease( - self, container_name, lease_id, if_modified_since=None, - if_unmodified_since=None, timeout=None): - ''' - Renews the lease. The lease can be renewed if the lease ID specified - matches that associated with the container. Note that - the lease may be renewed even if it has expired as long as the container - has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - :param str container_name: - Name of existing container. - :param str lease_id: - Lease ID for active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: the lease ID of the renewed lease. - :return: str - ''' - _validate_not_none('lease_id', lease_id) - - lease = self._lease_container_impl(container_name, - _LeaseActions.Renew, - lease_id, - None, # lease_duration - None, # lease_break_period - None, # proposed_lease_id - if_modified_since, - if_unmodified_since, - timeout) - return lease['id'] - - def release_container_lease( - self, container_name, lease_id, if_modified_since=None, - if_unmodified_since=None, timeout=None): - ''' - Release the lease. The lease may be released if the lease_id specified matches - that associated with the container. Releasing the lease allows another client - to immediately acquire the lease for the container as soon as the release is complete. - - :param str container_name: - Name of existing container. - :param str lease_id: - Lease ID for active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('lease_id', lease_id) - - self._lease_container_impl(container_name, - _LeaseActions.Release, - lease_id, - None, # lease_duration - None, # lease_break_period - None, # proposed_lease_id - if_modified_since, - if_unmodified_since, - timeout) - - def break_container_lease( - self, container_name, lease_break_period=None, - if_modified_since=None, if_unmodified_since=None, timeout=None): - ''' - Break the lease, if the container has an active lease. Once a lease is - broken, it cannot be renewed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. When a lease - is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the container. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :param str container_name: - Name of existing container. - :param int lease_break_period: - This is the proposed duration of seconds that the lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the lease. If longer, the time remaining on the lease is used. - A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :return: int - ''' - if (lease_break_period is not None) and (lease_break_period < 0 or lease_break_period > 60): - raise ValueError(_ERROR_INVALID_LEASE_BREAK_PERIOD) - - lease = self._lease_container_impl(container_name, - _LeaseActions.Break, - None, # lease_id - None, # lease_duration - lease_break_period, - None, # proposed_lease_id - if_modified_since, - if_unmodified_since, - timeout) - return lease['time'] - - def change_container_lease( - self, container_name, lease_id, proposed_lease_id, - if_modified_since=None, if_unmodified_since=None, timeout=None): - ''' - Change the lease ID of an active lease. A change must include the current - lease ID and a new lease ID. - - :param str container_name: - Name of existing container. - :param str lease_id: - Lease ID for active lease. - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('lease_id', lease_id) - - self._lease_container_impl(container_name, - _LeaseActions.Change, - lease_id, - None, # lease_duration - None, # lease_break_period - proposed_lease_id, - if_modified_since, - if_unmodified_since, - timeout) - - def list_blobs(self, container_name, prefix=None, num_results=None, include=None, - delimiter=None, marker=None, timeout=None): - ''' - Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service and stop when all blobs have been returned or num_results is reached. - - If num_results is specified and the account has more than that number of - blobs, the generator will have a populated next_marker field once it - finishes. This marker can be used to create a new generator if more - results are desired. - - :param str container_name: - Name of existing container. - :param str prefix: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param int num_results: - Specifies the maximum number of blobs to return, - including all :class:`BlobPrefix` elements. If the request does not specify - num_results or specifies a value greater than 5,000, the server will - return up to 5,000 items. Setting num_results to a value less than - or equal to zero results in error response code 400 (Bad Request). - :param ~azure.storage.blob.models.Include include: - Specifies one or more additional datasets to include in the response. - :param str delimiter: - When the request includes this parameter, the operation - returns a :class:`~azure.storage.blob.models.BlobPrefix` element in the - result list that acts as a placeholder for all blobs whose names begin - with the same substring up to the appearance of the delimiter character. - The delimiter may be a single character or a string. - :param str marker: - An opaque continuation token. This value can be retrieved from the - next_marker field of a previous generator object if num_results was - specified and that generator has finished enumerating results. If - specified, this generator will begin returning results from the point - where the previous generator stopped. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - operation_context = _OperationContext(location_lock=True) - args = (container_name,) - kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results, - 'include': include, 'delimiter': delimiter, 'timeout': timeout, - '_context': operation_context} - resp = self._list_blobs(*args, **kwargs) - - return ListGenerator(resp, self._list_blobs, args, kwargs) - - def _list_blobs(self, container_name, prefix=None, marker=None, - max_results=None, include=None, delimiter=None, timeout=None, - _context=None): - ''' - Returns the list of blobs under the specified container. - - :param str container_name: - Name of existing container. - :parm str prefix: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param str marker: - A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns - a next_marker value within the response body if the list returned was - not complete. The marker value may then be used in a subsequent - call to request the next set of list items. The marker value is - opaque to the client. - :param int max_results: - Specifies the maximum number of blobs to return, - including all :class:`~azure.storage.blob.models.BlobPrefix` elements. If the request does not specify - max_results or specifies a value greater than 5,000, the server will - return up to 5,000 items. Setting max_results to a value less than - or equal to zero results in error response code 400 (Bad Request). - :param str include: - Specifies one or more datasets to include in the - response. To specify more than one of these options on the URI, - you must separate each option with a comma. Valid values are: - snapshots: - Specifies that snapshots should be included in the - enumeration. Snapshots are listed from oldest to newest in - the response. - metadata: - Specifies that blob metadata be returned in the response. - uncommittedblobs: - Specifies that blobs for which blocks have been uploaded, - but which have not been committed using Put Block List - (REST API), be included in the response. - copy: - Version 2012-02-12 and newer. Specifies that metadata - related to any current or previous Copy Blob operation - should be included in the response. - deleted: - Version 2017-07-29 and newer. Specifies that soft deleted blobs - which are retained by the service should be included - in the response. - :param str delimiter: - When the request includes this parameter, the operation - returns a :class:`~azure.storage.blob.models.BlobPrefix` element in the response body that acts as a - placeholder for all blobs whose names begin with the same - substring up to the appearance of the delimiter character. The - delimiter may be a single character or a string. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('container_name', container_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name) - request.query = { - 'restype': 'container', - 'comp': 'list', - 'prefix': _to_str(prefix), - 'delimiter': _to_str(delimiter), - 'marker': _to_str(marker), - 'maxresults': _int_to_str(max_results), - 'include': _to_str(include), - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_blob_list, operation_context=_context) - - def get_blob_account_information(self, container_name=None, blob_name=None, timeout=None): - """ - Gets information related to the storage account. - The information can also be retrieved if the user has a SAS to a container or blob. - - :param str container_name: - Name of existing container. - Optional, unless using a SAS token to a specific container or blob, in which case it's required. - :param str blob_name: - Name of existing blob. - Optional, unless using a SAS token to a specific blob, in which case it's required. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: The :class:`~azure.storage.blob.models.AccountInformation`. - """ - request = HTTPRequest() - request.method = 'HEAD' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name, blob_name) - request.query = { - 'restype': 'account', - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _parse_account_information) - - def get_blob_service_stats(self, timeout=None): - ''' - Retrieves statistics related to replication for the Blob service. It is - only available when read-access geo-redundant replication is enabled for - the storage account. - - With geo-redundant replication, Azure Storage maintains your data durable - in two locations. In both locations, Azure Storage constantly maintains - multiple healthy replicas of your data. The location where you read, - create, update, or delete data is the primary storage account location. - The primary location exists in the region you choose at the time you - create an account via the Azure Management Azure classic portal, for - example, North Central US. The location to which your data is replicated - is the secondary location. The secondary location is automatically - determined based on the location of the primary; it is in a second data - center that resides in the same region as the primary location. Read-only - access is available from the secondary location, if read-access geo-redundant - replication is enabled for your storage account. - - :param int timeout: - The timeout parameter is expressed in seconds. - :return: The blob service stats. - :rtype: :class:`~..common.models.ServiceStats` - ''' - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(primary=False, secondary=True) - request.path = _get_path() - request.query = { - 'restype': 'service', - 'comp': 'stats', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_service_stats) - - def set_blob_service_properties( - self, logging=None, hour_metrics=None, minute_metrics=None, - cors=None, target_version=None, timeout=None, delete_retention_policy=None, static_website=None): - ''' - Sets the properties of a storage account's Blob service, including - Azure Storage Analytics. If an element (ex Logging) is left as None, the - existing settings on the service for that functionality are preserved. - - :param logging: - Groups the Azure Analytics Logging settings. - :type logging: - :class:`~..common.models.Logging` - :param hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for blobs. - :type hour_metrics: - :class:`~..common.models.Metrics` - :param minute_metrics: - The minute metrics settings provide request statistics - for each minute for blobs. - :type minute_metrics: - :class:`~..common.models.Metrics` - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list(:class:`~..common.models.CorsRule`) - :param str target_version: - Indicates the default version to use for requests if an incoming - request's version is not specified. - :param int timeout: - The timeout parameter is expressed in seconds. - :param delete_retention_policy: - The delete retention policy specifies whether to retain deleted blobs. - It also specifies the number of days and versions of blob to keep. - :type delete_retention_policy: - :class:`~..common.models.DeleteRetentionPolicy` - :param static_website: - Specifies whether the static website feature is enabled, - and if yes, indicates the index document and 404 error document to use. - :type static_website: - :class:`~..common.models.StaticWebsite` - ''' - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path() - request.query = { - 'restype': 'service', - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - request.body = _get_request_body( - _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics, - cors, target_version, delete_retention_policy, static_website)) - - self._perform_request(request) - - def get_blob_service_properties(self, timeout=None): - ''' - Gets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - :param int timeout: - The timeout parameter is expressed in seconds. - :return: The blob :class:`~..common.models.ServiceProperties` with an attached - target_version property. - ''' - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path() - request.query = { - 'restype': 'service', - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_service_properties) - - def get_blob_properties( - self, container_name, blob_name, snapshot=None, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Returns all user-defined metadata, standard HTTP properties, and - system properties for the blob. It does not return the content of the blob. - Returns :class:`~azure.storage.blob.models.Blob` - with :class:`~azure.storage.blob.models.BlobProperties` and a metadata dict. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to retrieve. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: a blob object including properties and metadata. - :rtype: :class:`~azure.storage.blob.models.Blob` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - request = HTTPRequest() - request.method = 'HEAD' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name, blob_name) - request.query = { - 'snapshot': _to_str(snapshot), - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - - return self._perform_request(request, _parse_blob, [blob_name, snapshot]) - - def set_blob_properties( - self, container_name, blob_name, content_settings=None, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Sets system properties on the blob. If one property is set for the - content_settings, all properties will be overriden. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set blob properties. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - 'x-ms-lease-id': _to_str(lease_id) - } - if content_settings is not None: - request.headers.update(content_settings._to_headers()) - - return self._perform_request(request, _parse_base_properties) - - def exists(self, container_name, blob_name=None, snapshot=None, timeout=None): - ''' - Returns a boolean indicating whether the container exists (if blob_name - is None), or otherwise a boolean indicating whether the blob exists. - - :param str container_name: - Name of a container. - :param str blob_name: - Name of a blob. If None, the container will be checked for existence. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the snapshot. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: A boolean indicating whether the resource exists. - :rtype: bool - ''' - _validate_not_none('container_name', container_name) - try: - # make head request to see if container/blob/snapshot exists - request = HTTPRequest() - request.method = 'GET' if blob_name is None else 'HEAD' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name, blob_name) - request.query = { - 'snapshot': _to_str(snapshot), - 'timeout': _int_to_str(timeout), - 'restype': 'container' if blob_name is None else None, - } - - expected_errors = [_CONTAINER_NOT_FOUND_ERROR_CODE] if blob_name is None \ - else [_CONTAINER_NOT_FOUND_ERROR_CODE, _BLOB_NOT_FOUND_ERROR_CODE] - self._perform_request(request, expected_errors=expected_errors) - - return True - except AzureHttpError as ex: - _dont_fail_not_exist(ex) - return False - - def _get_blob( - self, container_name, blob_name, snapshot=None, start_range=None, - end_range=None, validate_content=False, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None, - _context=None): - ''' - Downloads a blob's content, metadata, and properties. You can also - call this API to read a snapshot. You can specify a range if you don't - need to download the blob in its entirety. If no range is specified, - the full blob will be downloaded. - - See get_blob_to_* for high level functions that handle the download - of large blobs with automatic chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to retrieve. - :param int start_range: - Start of byte range to use for downloading a section of the blob. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param int end_range: - End of byte range to use for downloading a section of the blob. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param bool validate_content: - When this is set to True and specified together with the Range header, - the service returns the MD5 hash for the range, as long as the range - is less than or equal to 4 MB in size. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: A Blob with content, properties, and metadata. - :rtype: :class:`~azure.storage.blob.models.Blob` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_decryption_required(self.require_encryption, - self.key_encryption_key, - self.key_resolver_function) - - start_offset, end_offset = 0, 0 - if self.key_encryption_key is not None or self.key_resolver_function is not None: - if start_range is not None: - # Align the start of the range along a 16 byte block - start_offset = start_range % 16 - start_range -= start_offset - - # Include an extra 16 bytes for the IV if necessary - # Because of the previous offsetting, start_range will always - # be a multiple of 16. - if start_range > 0: - start_offset += 16 - start_range -= 16 - - if end_range is not None: - # Align the end of the range along a 16 byte block - end_offset = 15 - (end_range % 16) - end_range += end_offset - - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name, blob_name) - request.query = { - 'snapshot': _to_str(snapshot), - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - _validate_and_format_range_headers( - request, - start_range, - end_range, - start_range_required=False, - end_range_required=False, - check_content_md5=validate_content) - - return self._perform_request(request, _parse_blob, - [blob_name, snapshot, validate_content, self.require_encryption, - self.key_encryption_key, self.key_resolver_function, - start_offset, end_offset], - operation_context=_context) - - def get_blob_to_path( - self, container_name, blob_name, file_path, open_mode='wb', - snapshot=None, start_range=None, end_range=None, - validate_content=False, progress_callback=None, - max_connections=2, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, - timeout=None): - ''' - Downloads a blob to a file path, with automatic chunking and progress - notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with - properties and metadata. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str file_path: - Path of file to write out to. - :param str open_mode: - Mode to use when opening the file. Note that specifying append only - open_mode prevents parallel download. So, max_connections must be set - to 1 if this open_mode is used. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to retrieve. - :param int start_range: - Start of byte range to use for downloading a section of the blob. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param int end_range: - End of byte range to use for downloading a section of the blob. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param bool validate_content: - If set to true, validates an MD5 hash for each retrieved portion of - the blob. This is primarily valuable for detecting bitflips on the wire - if using http instead of https as https (the default) will already - validate. Note that the service will only return transactional MD5s - for chunks 4MB or less so the first get request will be of size - self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If - self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be - thrown. As computing the MD5 takes processing time and more requests - will need to be done due to the reduced chunk size there may be some - increase in latency. - :param progress_callback: - Callback for progress with signature function(current, total) - where current is the number of bytes transfered so far, and total is - the size of the blob if known. - :type progress_callback: func(current, total) - :param int max_connections: - If set to 2 or greater, an initial get will be done for the first - self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, - the method returns at this point. If it is not, it will download the - remaining data parallel using the number of threads equal to - max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. - If set to 1, a single large get request will be done. This is not - generally recommended but available if very few threads should be - used, network requests are very expensive, or a non-seekable stream - prevents parallel download. This may also be useful if many blobs are - expected to be empty as an extra request is required for empty blobs - if max_connections is greater than 1. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :return: A Blob with properties and metadata. If max_connections is greater - than 1, the content_md5 (if set on the blob) will not be returned. If you - require this value, either use get_blob_properties or set max_connections - to 1. - :rtype: :class:`~azure.storage.blob.models.Blob` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('file_path', file_path) - _validate_not_none('open_mode', open_mode) - - if max_connections > 1 and 'a' in open_mode: - raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE) - - with open(file_path, open_mode) as stream: - blob = self.get_blob_to_stream( - container_name, - blob_name, - stream, - snapshot, - start_range, - end_range, - validate_content, - progress_callback, - max_connections, - lease_id, - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout) - - return blob - - def get_blob_to_stream( - self, container_name, blob_name, stream, snapshot=None, - start_range=None, end_range=None, validate_content=False, - progress_callback=None, max_connections=2, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - - ''' - Downloads a blob to a stream, with automatic chunking and progress - notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with - properties and metadata. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param io.IOBase stream: - Opened stream to write to. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to retrieve. - :param int start_range: - Start of byte range to use for downloading a section of the blob. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param int end_range: - End of byte range to use for downloading a section of the blob. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param bool validate_content: - If set to true, validates an MD5 hash for each retrieved portion of - the blob. This is primarily valuable for detecting bitflips on the wire - if using http instead of https as https (the default) will already - validate. Note that the service will only return transactional MD5s - for chunks 4MB or less so the first get request will be of size - self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If - self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be - thrown. As computing the MD5 takes processing time and more requests - will need to be done due to the reduced chunk size there may be some - increase in latency. - :param progress_callback: - Callback for progress with signature function(current, total) - where current is the number of bytes transfered so far, and total is - the size of the blob if known. - :type progress_callback: func(current, total) - :param int max_connections: - If set to 2 or greater, an initial get will be done for the first - self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, - the method returns at this point. If it is not, it will download the - remaining data parallel using the number of threads equal to - max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. - If set to 1, a single large get request will be done. This is not - generally recommended but available if very few threads should be - used, network requests are very expensive, or a non-seekable stream - prevents parallel download. This may also be useful if many blobs are - expected to be empty as an extra request is required for empty blobs - if max_connections is greater than 1. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :return: A Blob with properties and metadata. If max_connections is greater - than 1, the content_md5 (if set on the blob) will not be returned. If you - require this value, either use get_blob_properties or set max_connections - to 1. - :rtype: :class:`~azure.storage.blob.models.Blob` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('stream', stream) - - if end_range is not None: - _validate_not_none("start_range", start_range) - - # the stream must be seekable if parallel download is required - if max_connections > 1: - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE) - else: - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE) - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - first_get_size = self.MAX_SINGLE_GET_SIZE if not validate_content else self.MAX_CHUNK_GET_SIZE - - initial_request_start = start_range if start_range is not None else 0 - - if end_range is not None and end_range - start_range < first_get_size: - initial_request_end = end_range - else: - initial_request_end = initial_request_start + first_get_size - 1 - - # Send a context object to make sure we always retry to the initial location - operation_context = _OperationContext(location_lock=True) - try: - blob = self._get_blob(container_name, - blob_name, - snapshot, - start_range=initial_request_start, - end_range=initial_request_end, - validate_content=validate_content, - lease_id=lease_id, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout, - _context=operation_context) - - # Parse the total blob size and adjust the download size if ranges - # were specified - blob_size = _parse_length_from_content_range(blob.properties.content_range) - if end_range is not None: - # Use the end_range unless it is over the end of the blob - download_size = min(blob_size, end_range - start_range + 1) - elif start_range is not None: - download_size = blob_size - start_range - else: - download_size = blob_size - except AzureHttpError as ex: - if start_range is None and ex.status_code == 416: - # Get range will fail on an empty blob. If the user did not - # request a range, do a regular get request in order to get - # any properties. - blob = self._get_blob(container_name, - blob_name, - snapshot, - validate_content=validate_content, - lease_id=lease_id, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout, - _context=operation_context) - - # Set the download size to empty - download_size = 0 - else: - raise ex - - # Mark the first progress chunk. If the blob is small or this is a single - # shot download, this is the only call - if progress_callback: - progress_callback(blob.properties.content_length, download_size) - - # Write the content to the user stream - # Clear blob content since output has been written to user stream - if blob.content is not None: - stream.write(blob.content) - blob.content = None - - # If the blob is small, the download is complete at this point. - # If blob size is large, download the rest of the blob in chunks. - if blob.properties.content_length != download_size: - # Lock on the etag. This can be overriden by the user by specifying '*' - if_match = if_match if if_match is not None else blob.properties.etag - - end_blob = blob_size - if end_range is not None: - # Use the end_range unless it is over the end of the blob - end_blob = min(blob_size, end_range + 1) - - _download_blob_chunks( - self, - container_name, - blob_name, - snapshot, - download_size, - self.MAX_CHUNK_GET_SIZE, - first_get_size, - initial_request_end + 1, # start where the first download ended - end_blob, - stream, - max_connections, - progress_callback, - validate_content, - lease_id, - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout, - operation_context - ) - - # Set the content length to the download size instead of the size of - # the last range - blob.properties.content_length = download_size - - # Overwrite the content range to the user requested range - blob.properties.content_range = 'bytes {0}-{1}/{2}'.format(start_range, end_range, blob_size) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - blob.properties.content_md5 = None - - return blob - - def get_blob_to_bytes( - self, container_name, blob_name, snapshot=None, - start_range=None, end_range=None, validate_content=False, - progress_callback=None, max_connections=2, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Downloads a blob as an array of bytes, with automatic chunking and - progress notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with - properties, metadata, and content. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to retrieve. - :param int start_range: - Start of byte range to use for downloading a section of the blob. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param int end_range: - End of byte range to use for downloading a section of the blob. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param bool validate_content: - If set to true, validates an MD5 hash for each retrieved portion of - the blob. This is primarily valuable for detecting bitflips on the wire - if using http instead of https as https (the default) will already - validate. Note that the service will only return transactional MD5s - for chunks 4MB or less so the first get request will be of size - self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If - self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be - thrown. As computing the MD5 takes processing time and more requests - will need to be done due to the reduced chunk size there may be some - increase in latency. - :param progress_callback: - Callback for progress with signature function(current, total) - where current is the number of bytes transfered so far, and total is - the size of the blob if known. - :type progress_callback: func(current, total) - :param int max_connections: - If set to 2 or greater, an initial get will be done for the first - self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, - the method returns at this point. If it is not, it will download the - remaining data parallel using the number of threads equal to - max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. - If set to 1, a single large get request will be done. This is not - generally recommended but available if very few threads should be - used, network requests are very expensive, or a non-seekable stream - prevents parallel download. This may also be useful if many blobs are - expected to be empty as an extra request is required for empty blobs - if max_connections is greater than 1. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :return: A Blob with properties and metadata. If max_connections is greater - than 1, the content_md5 (if set on the blob) will not be returned. If you - require this value, either use get_blob_properties or set max_connections - to 1. - :rtype: :class:`~azure.storage.blob.models.Blob` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - - stream = BytesIO() - blob = self.get_blob_to_stream( - container_name, - blob_name, - stream, - snapshot, - start_range, - end_range, - validate_content, - progress_callback, - max_connections, - lease_id, - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout) - - blob.content = stream.getvalue() - return blob - - def get_blob_to_text( - self, container_name, blob_name, encoding='utf-8', snapshot=None, - start_range=None, end_range=None, validate_content=False, - progress_callback=None, max_connections=2, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Downloads a blob as unicode text, with automatic chunking and progress - notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with - properties, metadata, and content. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str encoding: - Python encoding to use when decoding the blob data. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to retrieve. - :param int start_range: - Start of byte range to use for downloading a section of the blob. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param int end_range: - End of byte range to use for downloading a section of the blob. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of blob. - :param bool validate_content: - If set to true, validates an MD5 hash for each retrieved portion of - the blob. This is primarily valuable for detecting bitflips on the wire - if using http instead of https as https (the default) will already - validate. Note that the service will only return transactional MD5s - for chunks 4MB or less so the first get request will be of size - self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If - self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be - thrown. As computing the MD5 takes processing time and more requests - will need to be done due to the reduced chunk size there may be some - increase in latency. - :param progress_callback: - Callback for progress with signature function(current, total) - where current is the number of bytes transfered so far, and total is - the size of the blob if known. - :type progress_callback: func(current, total) - :param int max_connections: - If set to 2 or greater, an initial get will be done for the first - self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, - the method returns at this point. If it is not, it will download the - remaining data parallel using the number of threads equal to - max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. - If set to 1, a single large get request will be done. This is not - generally recommended but available if very few threads should be - used, network requests are very expensive, or a non-seekable stream - prevents parallel download. This may also be useful if many blobs are - expected to be empty as an extra request is required for empty blobs - if max_connections is greater than 1. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :return: A Blob with properties and metadata. If max_connections is greater - than 1, the content_md5 (if set on the blob) will not be returned. If you - require this value, either use get_blob_properties or set max_connections - to 1. - :rtype: :class:`~azure.storage.blob.models.Blob` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('encoding', encoding) - - blob = self.get_blob_to_bytes(container_name, - blob_name, - snapshot, - start_range, - end_range, - validate_content, - progress_callback, - max_connections, - lease_id, - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout) - blob.content = blob.content.decode(encoding) - return blob - - def get_blob_metadata( - self, container_name, blob_name, snapshot=None, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Returns all user-defined metadata for the specified blob or snapshot. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str snapshot: - The snapshot parameter is an opaque value that, - when present, specifies the blob snapshot to retrieve. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: - A dictionary representing the blob metadata name, value pairs. - :rtype: dict(str, str) - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name, blob_name) - request.query = { - 'snapshot': _to_str(snapshot), - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - - return self._perform_request(request, _parse_metadata) - - def set_blob_metadata(self, container_name, blob_name, - metadata=None, lease_id=None, - if_modified_since=None, if_unmodified_since=None, - if_match=None, if_none_match=None, timeout=None): - ''' - Sets user-defined metadata for the specified blob as one or more - name-value pairs. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param metadata: - Dict containing name and value pairs. Each call to this operation - replaces all existing metadata attached to the blob. To remove all - metadata from the blob, call this operation with no metadata headers. - :type metadata: dict(str, str) - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - 'x-ms-lease-id': _to_str(lease_id), - } - _add_metadata_headers(metadata, request) - - return self._perform_request(request, _parse_base_properties) - - def _lease_blob_impl(self, container_name, blob_name, - lease_action, lease_id, - lease_duration, lease_break_period, - proposed_lease_id, if_modified_since, - if_unmodified_since, if_match, if_none_match, timeout=None): - ''' - Establishes and manages a lease on a blob for write and delete operations. - The Lease Blob operation can be called in one of five modes: - Acquire, to request a new lease. - Renew, to renew an existing lease. - Change, to change the ID of an existing lease. - Release, to free the lease if it is no longer needed so that another - client may immediately acquire a lease against the blob. - Break, to end the lease but ensure that another client cannot acquire - a new lease until the current lease period has expired. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str lease_action: - Possible _LeaseActions acquire|renew|release|break|change - :param str lease_id: - Required if the blob has an active lease. - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. - :param int lease_break_period: - For a break operation, this is the proposed duration of - seconds that the lease should continue before it is broken, between - 0 and 60 seconds. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining - on the lease is used. A new lease will not be available before the - break period has expired, but the lease may be held for longer than - the break period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :param str proposed_lease_id: - Optional for acquire, required for change. Proposed lease ID, in a - GUID string format. The Blob service returns 400 (Invalid request) - if the proposed lease ID is not in the correct format. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: - Response headers returned from the service call. - :rtype: dict(str, str) - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('lease_action', lease_action) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'lease', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'x-ms-lease-action': _to_str(lease_action), - 'x-ms-lease-duration': _to_str(lease_duration), - 'x-ms-lease-break-period': _to_str(lease_break_period), - 'x-ms-proposed-lease-id': _to_str(proposed_lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - - return self._perform_request(request, _parse_lease) - - def acquire_blob_lease(self, container_name, blob_name, - lease_duration=-1, - proposed_lease_id=None, - if_modified_since=None, - if_unmodified_since=None, - if_match=None, - if_none_match=None, timeout=None): - ''' - Requests a new lease. If the blob does not have an active lease, the Blob - service creates a lease on the blob and returns a new lease ID. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The Blob service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: the lease ID of the newly created lease. - :return: str - ''' - _validate_not_none('lease_duration', lease_duration) - - if lease_duration != -1 and \ - (lease_duration < 15 or lease_duration > 60): - raise ValueError(_ERROR_INVALID_LEASE_DURATION) - lease = self._lease_blob_impl(container_name, - blob_name, - _LeaseActions.Acquire, - None, # lease_id - lease_duration, - None, # lease_break_period - proposed_lease_id, - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout) - return lease['id'] - - def renew_blob_lease(self, container_name, blob_name, - lease_id, if_modified_since=None, - if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Renews the lease. The lease can be renewed if the lease ID specified on - the request matches that associated with the blob. Note that the lease may - be renewed even if it has expired as long as the blob has not been modified - or leased again since the expiration of that lease. When you renew a lease, - the lease duration clock resets. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str lease_id: - Lease ID for active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: the lease ID of the renewed lease. - :return: str - ''' - _validate_not_none('lease_id', lease_id) - - lease = self._lease_blob_impl(container_name, - blob_name, - _LeaseActions.Renew, - lease_id, - None, # lease_duration - None, # lease_break_period - None, # proposed_lease_id - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout) - return lease['id'] - - def release_blob_lease(self, container_name, blob_name, - lease_id, if_modified_since=None, - if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Releases the lease. The lease may be released if the lease ID specified on the - request matches that associated with the blob. Releasing the lease allows another - client to immediately acquire the lease for the blob as soon as the release is complete. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str lease_id: - Lease ID for active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('lease_id', lease_id) - - self._lease_blob_impl(container_name, - blob_name, - _LeaseActions.Release, - lease_id, - None, # lease_duration - None, # lease_break_period - None, # proposed_lease_id - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout) - - def break_blob_lease(self, container_name, blob_name, - lease_break_period=None, - if_modified_since=None, - if_unmodified_since=None, - if_match=None, - if_none_match=None, timeout=None): - ''' - Breaks the lease, if the blob has an active lease. Once a lease is broken, - it cannot be renewed. Any authorized request can break the lease; the request - is not required to specify a matching lease ID. When a lease is broken, - the lease break period is allowed to elapse, during which time no lease operation - except break and release can be performed on the blob. When a lease is successfully - broken, the response indicates the interval in seconds until a new lease can be acquired. - - A lease that has been broken can also be released, in which case another client may - immediately acquire the lease on the blob. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param int lease_break_period: - For a break operation, this is the proposed duration of - seconds that the lease should continue before it is broken, between - 0 and 60 seconds. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining - on the lease is used. A new lease will not be available before the - break period has expired, but the lease may be held for longer than - the break period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :return: int - ''' - if (lease_break_period is not None) and (lease_break_period < 0 or lease_break_period > 60): - raise ValueError(_ERROR_INVALID_LEASE_BREAK_PERIOD) - - lease = self._lease_blob_impl(container_name, - blob_name, - _LeaseActions.Break, - None, # lease_id - None, # lease_duration - lease_break_period, - None, # proposed_lease_id - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout) - return lease['time'] - - def change_blob_lease(self, container_name, blob_name, - lease_id, - proposed_lease_id, - if_modified_since=None, - if_unmodified_since=None, - if_match=None, - if_none_match=None, timeout=None): - ''' - Changes the lease ID of an active lease. A change must include the current - lease ID and a new lease ID. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str lease_id: - Required if the blob has an active lease. - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - self._lease_blob_impl(container_name, - blob_name, - _LeaseActions.Change, - lease_id, - None, # lease_duration - None, # lease_break_period - proposed_lease_id, - if_modified_since, - if_unmodified_since, - if_match, - if_none_match, - timeout) - - def snapshot_blob(self, container_name, blob_name, - metadata=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, - if_none_match=None, lease_id=None, timeout=None): - ''' - Creates a read-only snapshot of a blob. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param metadata: - Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the - base blob metadata to the snapshot. If one or more name-value pairs - are specified, the snapshot is created with the specified metadata, - and metadata is not copied from the base blob. - :type metadata: dict(str, str) - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param str lease_id: - Required if the blob has an active lease. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: snapshot properties - :rtype: :class:`~azure.storage.blob.models.Blob` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'snapshot', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - 'x-ms-lease-id': _to_str(lease_id) - } - _add_metadata_headers(metadata, request) - - return self._perform_request(request, _parse_snapshot_blob, [blob_name]) - - def copy_blob(self, container_name, blob_name, copy_source, - metadata=None, - source_if_modified_since=None, - source_if_unmodified_since=None, - source_if_match=None, source_if_none_match=None, - destination_if_modified_since=None, - destination_if_unmodified_since=None, - destination_if_match=None, - destination_if_none_match=None, - destination_lease_id=None, - source_lease_id=None, timeout=None): - ''' - Copies a blob asynchronously. This operation returns a copy operation - properties object, including a copy ID you can use to check or abort the - copy operation. The Blob service copies blobs on a best-effort basis. - - The source blob for a copy operation may be a block blob, an append blob, - or a page blob. If the destination blob already exists, it must be of the - same blob type as the source blob. Any existing destination blob will be - overwritten. The destination blob cannot be modified while a copy operation - is in progress. - - When copying from a page blob, the Blob service creates a destination page - blob of the source blob's length, initially containing all zeroes. Then - the source page ranges are enumerated, and non-empty ranges are copied. - - For a block blob or an append blob, the Blob service creates a committed - blob of zero length before returning from this operation. When copying - from a block blob, all committed blocks and their block IDs are copied. - Uncommitted blocks are not copied. At the end of the copy operation, the - destination blob will have the same committed block count as the source. - - When copying from an append blob, all committed blocks are copied. At the - end of the copy operation, the destination blob will have the same committed - block count as the source. - - For all blob types, you can call get_blob_properties on the destination - blob to check the status of the copy operation. The final blob will be - committed when the copy completes. - - :param str container_name: - Name of the destination container. The container must exist. - :param str blob_name: - Name of the destination blob. If the destination blob exists, it will - be overwritten. Otherwise, it will be created. - :param str copy_source: - A URL of up to 2 KB in length that specifies an Azure file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.blob.core.windows.net/mycontainer/myblob - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken - :param metadata: - Name-value pairs associated with the blob as metadata. If no name-value - pairs are specified, the operation will copy the metadata from the - source blob or file to the destination blob. If one or more name-value - pairs are specified, the destination blob is created with the specified - metadata, and metadata is not copied from the source blob or file. - :type metadata: dict(str, str) - :param datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source - blob has been modified since the specified date/time. - :param datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source blob - has not been modified since the specified date/time. - :param ETag source_if_match: - An ETag value, or the wildcard character (*). Specify this conditional - header to copy the source blob only if its ETag matches the value - specified. If the ETag values do not match, the Blob service returns - status code 412 (Precondition Failed). This header cannot be specified - if the source is an Azure File. - :param ETag source_if_none_match: - An ETag value, or the wildcard character (*). Specify this conditional - header to copy the blob only if its ETag does not match the value - specified. If the values are identical, the Blob service returns status - code 412 (Precondition Failed). This header cannot be specified if the - source is an Azure File. - :param datetime destination_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has been modified since the specified date/time. - If the destination blob has not been modified, the Blob service returns - status code 412 (Precondition Failed). - :param datetime destination_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has not been modified since the specified - date/time. If the destination blob has been modified, the Blob service - returns status code 412 (Precondition Failed). - :param ETag destination_if_match: - An ETag value, or the wildcard character (*). Specify an ETag value for - this conditional header to copy the blob only if the specified ETag value - matches the ETag value for an existing destination blob. If the ETag for - the destination blob does not match the ETag specified for If-Match, the - Blob service returns status code 412 (Precondition Failed). - :param ETag destination_if_none_match: - An ETag value, or the wildcard character (*). Specify an ETag value for - this conditional header to copy the blob only if the specified ETag value - does not match the ETag value for the destination blob. Specify the wildcard - character (*) to perform the operation only if the destination blob does not - exist. If the specified condition isn't met, the Blob service returns status - code 412 (Precondition Failed). - :param str destination_lease_id: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :param str source_lease_id: - Specify this to perform the Copy Blob operation only if - the lease ID given matches the active lease ID of the source blob. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: Copy operation properties such as status, source, and ID. - :rtype: :class:`~azure.storage.blob.models.CopyProperties` - ''' - return self._copy_blob(container_name, blob_name, copy_source, - metadata, - None, - source_if_modified_since, source_if_unmodified_since, - source_if_match, source_if_none_match, - destination_if_modified_since, - destination_if_unmodified_since, - destination_if_match, - destination_if_none_match, - destination_lease_id, - source_lease_id, timeout, - False) - - def _copy_blob(self, container_name, blob_name, copy_source, - metadata=None, - premium_page_blob_tier=None, - source_if_modified_since=None, - source_if_unmodified_since=None, - source_if_match=None, source_if_none_match=None, - destination_if_modified_since=None, - destination_if_unmodified_since=None, - destination_if_match=None, - destination_if_none_match=None, - destination_lease_id=None, - source_lease_id=None, timeout=None, - incremental_copy=False): - ''' - See copy_blob for more details. This helper method - allows for standard copies as well as incremental copies which are only supported for page blobs. - :param bool incremental_copy: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('copy_source', copy_source) - - if copy_source.startswith('/'): - # Backwards compatibility for earlier versions of the SDK where - # the copy source can be in the following formats: - # - Blob in named container: - # /accountName/containerName/blobName - # - Snapshot in named container: - # /accountName/containerName/blobName?snapshot= - # - Blob in root container: - # /accountName/blobName - # - Snapshot in root container: - # /accountName/blobName?snapshot= - account, _, source = \ - copy_source.partition('/')[2].partition('/') - copy_source = self.protocol + '://' + \ - self.primary_endpoint + '/' + source - - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - - if incremental_copy: - request.query = { - 'comp': 'incrementalcopy', - 'timeout': _int_to_str(timeout), - } - else: - request.query = {'timeout': _int_to_str(timeout)} - - request.headers = { - 'x-ms-copy-source': _to_str(copy_source), - 'x-ms-source-if-modified-since': _to_str(source_if_modified_since), - 'x-ms-source-if-unmodified-since': _to_str(source_if_unmodified_since), - 'x-ms-source-if-match': _to_str(source_if_match), - 'x-ms-source-if-none-match': _to_str(source_if_none_match), - 'If-Modified-Since': _datetime_to_utc_string(destination_if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(destination_if_unmodified_since), - 'If-Match': _to_str(destination_if_match), - 'If-None-Match': _to_str(destination_if_none_match), - 'x-ms-lease-id': _to_str(destination_lease_id), - 'x-ms-source-lease-id': _to_str(source_lease_id), - 'x-ms-access-tier': _to_str(premium_page_blob_tier) - } - _add_metadata_headers(metadata, request) - - return self._perform_request(request, _parse_properties, [BlobProperties]).copy - - def abort_copy_blob(self, container_name, blob_name, copy_id, - lease_id=None, timeout=None): - ''' - Aborts a pending copy_blob operation, and leaves a destination blob - with zero length and full metadata. - - :param str container_name: - Name of destination container. - :param str blob_name: - Name of destination blob. - :param str copy_id: - Copy identifier provided in the copy.id of the original - copy_blob operation. - :param str lease_id: - Required if the destination blob has an active infinite lease. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('copy_id', copy_id) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'copy', - 'copyid': _to_str(copy_id), - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'x-ms-copy-action': 'abort', - } - - self._perform_request(request) - - def delete_blob(self, container_name, blob_name, snapshot=None, - lease_id=None, delete_snapshots=None, - if_modified_since=None, if_unmodified_since=None, - if_match=None, if_none_match=None, timeout=None): - ''' - Marks the specified blob or snapshot for deletion. - The blob is later deleted during garbage collection. - - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the Delete - Blob operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot - and retains the blob or snapshot for specified number of days. - After specified number of days, blob's data is removed from the service during garbage collection. - Soft deleted blob or snapshot is accessible through List Blobs API specifying include=Include.Deleted option. - Soft-deleted blob or snapshot can be restored using Undelete API. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to delete. - :param str lease_id: - Required if the blob has an active lease. - :param ~azure.storage.blob.models.DeleteSnapshot delete_snapshots: - Required if the blob has associated snapshots. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - request = HTTPRequest() - request.method = 'DELETE' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'x-ms-delete-snapshots': _to_str(delete_snapshots), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - request.query = { - 'snapshot': _to_str(snapshot), - 'timeout': _int_to_str(timeout) - } - - self._perform_request(request) - - def undelete_blob(self, container_name, blob_name, timeout=None): - ''' - The undelete Blob operation restores the contents and metadata of soft deleted blob or snapshot. - Attempting to undelete a blob or snapshot that is not soft deleted will succeed without any changes. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'undelete', - 'timeout': _int_to_str(timeout) - } - - self._perform_request(request) diff --git a/azure/multiapi/storage/v2018_03_28/blob/blockblobservice.py b/azure/multiapi/storage/v2018_03_28/blob/blockblobservice.py deleted file mode 100644 index c9e332c..0000000 --- a/azure/multiapi/storage/v2018_03_28/blob/blockblobservice.py +++ /dev/null @@ -1,1062 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from io import ( - BytesIO -) -from os import ( - path, -) - -from ..common._common_conversion import ( - _encode_base64, - _to_str, - _int_to_str, - _datetime_to_utc_string, - _get_content_md5, -) -from ..common._constants import ( - SERVICE_HOST_BASE, - DEFAULT_PROTOCOL, -) -from ..common._error import ( - _validate_not_none, - _validate_type_bytes, - _validate_encryption_required, - _validate_encryption_unsupported, - _ERROR_VALUE_NEGATIVE, - _ERROR_VALUE_SHOULD_BE_STREAM -) -from ..common._http import HTTPRequest -from ..common._serialization import ( - _get_request_body, - _get_data_bytes_only, - _get_data_bytes_or_stream_only, - _add_metadata_headers, -) -from ..common._serialization import ( - _len_plus -) -from ._deserialization import ( - _convert_xml_to_block_list, - _parse_base_properties, -) -from ._encryption import ( - _encrypt_blob, - _generate_blob_encryption_data, -) -from ._serialization import ( - _convert_block_list_to_xml, - _get_path, -) -from ._upload_chunking import ( - _BlockBlobChunkUploader, - _upload_blob_chunks, - _upload_blob_substream_blocks, -) -from .baseblobservice import BaseBlobService -from .models import ( - _BlobTypes, -) - - -class BlockBlobService(BaseBlobService): - ''' - Block blobs let you upload large blobs efficiently. Block blobs are comprised - of blocks, each of which is identified by a block ID. You create or modify a - block blob by writing a set of blocks and committing them by their block IDs. - Each block can be a different size, up to a maximum of 100 MB, and a block blob - can include up to 50,000 blocks. The maximum size of a block blob is therefore - approximately 4.75 TB (100 MB X 50,000 blocks). If you are writing a block - blob that is no more than 64 MB in size, you can upload it in its entirety with - a single write operation; see create_blob_from_bytes. - - :ivar int MAX_SINGLE_PUT_SIZE: - The largest size upload supported in a single put call. This is used by - the create_blob_from_* methods if the content length is known and is less - than this value. - :ivar int MAX_BLOCK_SIZE: - The size of the blocks put by create_blob_from_* methods if the content - length is unknown or is larger than MAX_SINGLE_PUT_SIZE. Smaller blocks - may be put. The maximum block size the service supports is 100MB. - :ivar int MIN_LARGE_BLOCK_UPLOAD_THRESHOLD: - The minimum block size at which the the memory-optimized, block upload - algorithm is considered. This algorithm is only applicable to the create_blob_from_file and - create_blob_from_stream methods and will prevent the full buffering of blocks. - In addition to the block size, ContentMD5 validation and Encryption must be disabled as - these options require the blocks to be buffered. - ''' - - MAX_SINGLE_PUT_SIZE = 64 * 1024 * 1024 - MAX_BLOCK_SIZE = 4 * 1024 * 1024 - MIN_LARGE_BLOCK_UPLOAD_THRESHOLD = 4 * 1024 * 1024 + 1 - - def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=False, - protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, custom_domain=None, - request_session=None, connection_string=None, socket_timeout=None, token_credential=None): - ''' - :param str account_name: - The storage account name. This is used to authenticate requests - signed with an account key and to construct the storage endpoint. It - is required unless a connection string is given, or if a custom - domain is used with anonymous authentication. - :param str account_key: - The storage account key. This is used for shared key authentication. - If neither account key or sas token is specified, anonymous access - will be used. - :param str sas_token: - A shared access signature token to use to authenticate requests - instead of the account key. If account key and sas token are both - specified, account key will be used to sign. If neither are - specified, anonymous access will be used. - :param bool is_emulated: - Whether to use the emulator. Defaults to False. If specified, will - override all other parameters besides connection string and request - session. - :param str protocol: - The protocol to use for requests. Defaults to https. - :param str endpoint_suffix: - The host base component of the url, minus the account name. Defaults - to Azure (core.windows.net). Override this to use the China cloud - (core.chinacloudapi.cn). - :param str custom_domain: - The custom domain to use. This can be set in the Azure Portal. For - example, 'www.mydomain.com'. - :param requests.Session request_session: - The session object to use for http requests. - :param str connection_string: - If specified, this will override all other parameters besides - request session. See - http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ - for the connection string format. - :param int socket_timeout: - If specified, this will override the default socket timeout. The timeout specified is in seconds. - See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value. - :param token_credential: - A token credential used to authenticate HTTPS requests. The token value - should be updated before its expiration. - :type `~..common.TokenCredential` - ''' - self.blob_type = _BlobTypes.BlockBlob - super(BlockBlobService, self).__init__( - account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix, - custom_domain, request_session, connection_string, socket_timeout, token_credential) - - def put_block(self, container_name, blob_name, block, block_id, - validate_content=False, lease_id=None, timeout=None): - ''' - Creates a new block to be committed as part of a blob. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob. - :param block: Content of the block. - :type block: io.IOBase or bytes - Content of the block. - :param str block_id: - A valid Base64 string value that identifies the block. Prior to - encoding, the string must be less than or equal to 64 bytes in size. - For a given blob, the length of the value specified for the blockid - parameter must be the same size for each block. Note that the Base64 - string must be URL-encoded. - :param bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - blob. - :param str lease_id: - Required if the blob has an active lease. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - self._put_block( - container_name, - blob_name, - block, - block_id, - validate_content=validate_content, - lease_id=lease_id, - timeout=timeout - ) - - def put_block_list( - self, container_name, blob_name, block_list, content_settings=None, - metadata=None, validate_content=False, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, - timeout=None): - ''' - Writes a blob by specifying the list of block IDs that make up the blob. - In order to be written as part of a blob, a block must have been - successfully written to the server in a prior Put Block operation. - - You can call Put Block List to update a blob by uploading only those - blocks that have changed, then committing the new and existing blocks - together. You can do this by specifying whether to commit a block from - the committed block list or from the uncommitted block list, or to commit - the most recently uploaded version of the block, whichever list it may - belong to. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param block_list: - A list of :class:`~azure.storeage.blob.models.BlobBlock` containing the block ids and block state. - :type block_list: list(:class:`~azure.storage.blob.models.BlobBlock`) - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set properties on the blob. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param bool validate_content: - If true, calculates an MD5 hash of the block list content. The storage - service checks the hash of the block list content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this check is associated with - the block list content, and not with the content of the blob itself. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Block Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - return self._put_block_list( - container_name, - blob_name, - block_list, - content_settings=content_settings, - metadata=metadata, - validate_content=validate_content, - lease_id=lease_id, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout - ) - - def get_block_list(self, container_name, blob_name, snapshot=None, - block_list_type=None, lease_id=None, timeout=None): - ''' - Retrieves the list of blocks that have been uploaded as part of a - block blob. There are two block lists maintained for a blob: - Committed Block List: - The list of blocks that have been successfully committed to a - given blob with Put Block List. - Uncommitted Block List: - The list of blocks that have been uploaded for a blob using - Put Block, but that have not yet been committed. These blocks - are stored in Azure in association with a blob, but do not yet - form part of the blob. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str snapshot: - Datetime to determine the time to retrieve the blocks. - :param str block_list_type: - Specifies whether to return the list of committed blocks, the list - of uncommitted blocks, or both lists together. Valid values are: - committed, uncommitted, or all. - :param str lease_id: - Required if the blob has an active lease. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: list committed and/or uncommitted blocks for Block Blob - :rtype: :class:`~azure.storage.blob.models.BlobBlockList` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'blocklist', - 'snapshot': _to_str(snapshot), - 'blocklisttype': _to_str(block_list_type), - 'timeout': _int_to_str(timeout), - } - request.headers = {'x-ms-lease-id': _to_str(lease_id)} - - return self._perform_request(request, _convert_xml_to_block_list) - - def put_block_from_url(self, container_name, blob_name, copy_source_url, source_range_start, source_range_end, - block_id, source_content_md5=None, lease_id=None, timeout=None): - """ - Creates a new block to be committed as part of a blob. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob. - :param str copy_source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int source_range_start: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - :param int source_range_end: - This indicates the end of the range of bytes(inclusive) that has to be taken from the copy source. - :param str block_id: - A valid Base64 string value that identifies the block. Prior to - encoding, the string must be less than or equal to 64 bytes in size. - For a given blob, the length of the value specified for the blockid - parameter must be the same size for each block. Note that the Base64 - string must be URL-encoded. - :param str source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :param str lease_id: - Required if the blob has an active lease. - :param int timeout: - The timeout parameter is expressed in seconds. - """ - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('copy_source_url', copy_source_url) - _validate_not_none('source_range_start', source_range_start) - _validate_not_none('source_range_end', source_range_end) - _validate_not_none('block_id', block_id) - - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'block', - 'blockid': _encode_base64(_to_str(block_id)), - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'x-ms-copy-source': copy_source_url, - 'x-ms-source-range': 'bytes=' + _to_str(source_range_start) + '-' + _to_str(source_range_end), - 'x-ms-source-content-md5': source_content_md5, - } - - self._perform_request(request) - - # ----Convenience APIs----------------------------------------------------- - - def create_blob_from_path( - self, container_name, blob_name, file_path, content_settings=None, - metadata=None, validate_content=False, progress_callback=None, - max_connections=2, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None): - ''' - Creates a new blob from a file path, or updates the content of an - existing blob, with automatic chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param str file_path: - Path of the file to upload as the blob content. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set blob properties. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: func(current, total) - :param int max_connections: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :return: ETag and last modified properties for the Block Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('file_path', file_path) - - count = path.getsize(file_path) - with open(file_path, 'rb') as stream: - return self.create_blob_from_stream( - container_name=container_name, - blob_name=blob_name, - stream=stream, - count=count, - content_settings=content_settings, - metadata=metadata, - validate_content=validate_content, - lease_id=lease_id, - progress_callback=progress_callback, - max_connections=max_connections, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout) - - def create_blob_from_stream( - self, container_name, blob_name, stream, count=None, - content_settings=None, metadata=None, validate_content=False, - progress_callback=None, max_connections=2, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None, use_byte_buffer=False): - ''' - Creates a new blob from a file/stream, or updates the content of - an existing blob, with automatic chunking and progress - notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param io.IOBase stream: - Opened file/stream to upload as the blob content. - :param int count: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set blob properties. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: func(current, total) - :param int max_connections: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. Note that parallel upload requires the stream to be seekable. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :param bool use_byte_buffer: - If True, this will force usage of the original full block buffering upload path. - By default, this value is False and will employ a memory-efficient, - streaming upload algorithm under the following conditions: - The provided stream is seekable, 'require_encryption' is False, and - MAX_BLOCK_SIZE >= MIN_LARGE_BLOCK_UPLOAD_THRESHOLD. - One should consider the drawbacks of using this approach. In order to achieve - memory-efficiency, a IOBase stream or file-like object is segmented into logical blocks - using a SubStream wrapper. In order to read the correct data, each SubStream must acquire - a lock so that it can safely seek to the right position on the shared, underlying stream. - If max_connections > 1, the concurrency will result in a considerable amount of seeking on - the underlying stream. For the most common inputs such as a file-like stream object, seeking - is an inexpensive operation and this is not much of a concern. However, for other variants of streams - this may not be the case. The trade-off for memory-efficiency must be weighed against the cost of seeking - with your input stream. - The SubStream class will attempt to buffer up to 4 MB internally to reduce the amount of - seek and read calls to the underlying stream. This is particularly beneficial when uploading larger blocks. - :return: ETag and last modified properties for the Block Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('stream', stream) - _validate_encryption_required(self.require_encryption, self.key_encryption_key) - - # Adjust count to include padding if we are expected to encrypt. - adjusted_count = count - if (self.key_encryption_key is not None) and (adjusted_count is not None): - adjusted_count += (16 - (count % 16)) - - # Do single put if the size is smaller than MAX_SINGLE_PUT_SIZE - if adjusted_count is not None and (adjusted_count < self.MAX_SINGLE_PUT_SIZE): - if progress_callback: - progress_callback(0, count) - - data = stream.read(count) - resp = self._put_blob( - container_name=container_name, - blob_name=blob_name, - blob=data, - content_settings=content_settings, - metadata=metadata, - validate_content=validate_content, - lease_id=lease_id, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout) - - if progress_callback: - progress_callback(count, count) - - return resp - else: # Size is larger than MAX_SINGLE_PUT_SIZE, must upload with multiple put_block calls - cek, iv, encryption_data = None, None, None - - use_original_upload_path = use_byte_buffer or validate_content or self.require_encryption or \ - self.MAX_BLOCK_SIZE < self.MIN_LARGE_BLOCK_UPLOAD_THRESHOLD or \ - hasattr(stream, 'seekable') and not stream.seekable() or \ - not hasattr(stream, 'seek') or not hasattr(stream, 'tell') - - if use_original_upload_path: - if self.key_encryption_key: - cek, iv, encryption_data = _generate_blob_encryption_data(self.key_encryption_key) - - block_ids = _upload_blob_chunks( - blob_service=self, - container_name=container_name, - blob_name=blob_name, - blob_size=count, - block_size=self.MAX_BLOCK_SIZE, - stream=stream, - max_connections=max_connections, - progress_callback=progress_callback, - validate_content=validate_content, - lease_id=lease_id, - uploader_class=_BlockBlobChunkUploader, - timeout=timeout, - content_encryption_key=cek, - initialization_vector=iv - ) - else: - block_ids = _upload_blob_substream_blocks( - blob_service=self, - container_name=container_name, - blob_name=blob_name, - blob_size=count, - block_size=self.MAX_BLOCK_SIZE, - stream=stream, - max_connections=max_connections, - progress_callback=progress_callback, - validate_content=validate_content, - lease_id=lease_id, - uploader_class=_BlockBlobChunkUploader, - timeout=timeout, - ) - - return self._put_block_list( - container_name=container_name, - blob_name=blob_name, - block_list=block_ids, - content_settings=content_settings, - metadata=metadata, - validate_content=validate_content, - lease_id=lease_id, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout, - encryption_data=encryption_data - ) - - def create_blob_from_bytes( - self, container_name, blob_name, blob, index=0, count=None, - content_settings=None, metadata=None, validate_content=False, - progress_callback=None, max_connections=2, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Creates a new blob from an array of bytes, or updates the content - of an existing blob, with automatic chunking and progress - notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param bytes blob: - Content of blob as an array of bytes. - :param int index: - Start index in the array of bytes. - :param int count: - Number of bytes to upload. Set to None or negative value to upload - all bytes starting from index. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set blob properties. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: func(current, total) - :param int max_connections: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :return: ETag and last modified properties for the Block Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('blob', blob) - _validate_not_none('index', index) - _validate_type_bytes('blob', blob) - - if index < 0: - raise IndexError(_ERROR_VALUE_NEGATIVE.format('index')) - - if count is None or count < 0: - count = len(blob) - index - - stream = BytesIO(blob) - stream.seek(index) - - return self.create_blob_from_stream( - container_name=container_name, - blob_name=blob_name, - stream=stream, - count=count, - content_settings=content_settings, - metadata=metadata, - validate_content=validate_content, - progress_callback=progress_callback, - max_connections=max_connections, - lease_id=lease_id, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout, - use_byte_buffer=True - ) - - def create_blob_from_text( - self, container_name, blob_name, text, encoding='utf-8', - content_settings=None, metadata=None, validate_content=False, - progress_callback=None, max_connections=2, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None): - ''' - Creates a new blob from str/unicode, or updates the content of an - existing blob, with automatic chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param str text: - Text to upload to the blob. - :param str encoding: - Python encoding to use to convert the text to bytes. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set blob properties. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: func(current, total) - :param int max_connections: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :return: ETag and last modified properties for the Block Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('text', text) - - if not isinstance(text, bytes): - _validate_not_none('encoding', encoding) - text = text.encode(encoding) - - return self.create_blob_from_bytes( - container_name=container_name, - blob_name=blob_name, - blob=text, - index=0, - count=len(text), - content_settings=content_settings, - metadata=metadata, - validate_content=validate_content, - lease_id=lease_id, - progress_callback=progress_callback, - max_connections=max_connections, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout) - - def set_standard_blob_tier( - self, container_name, blob_name, standard_blob_tier, timeout=None): - ''' - Sets the block blob tiers on the blob. This API is only supported for block blobs on standard storage accounts. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to update. - :param StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('standard_blob_tier', standard_blob_tier) - - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'tier', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-access-tier': _to_str(standard_blob_tier) - } - - self._perform_request(request) - - # -----Helper methods------------------------------------ - def _put_blob(self, container_name, blob_name, blob, content_settings=None, - metadata=None, validate_content=False, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, - timeout=None): - ''' - Creates a blob or updates an existing blob. - - See create_blob_from_* for high level - functions that handle the creation and upload of large blobs with - automatic chunking and progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param bytes blob: - Content of blob as bytes (size < 64MB). For larger size, you - must call put_block and put_block_list to set content of blob. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set properties on the blob. - :param metadata: - Name-value pairs associated with the blob as metadata. - :param bool validate_content: - If true, calculates an MD5 hash of the blob content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - blob. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the new Block Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_encryption_required(self.require_encryption, self.key_encryption_key) - - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = {'timeout': _int_to_str(timeout)} - request.headers = { - 'x-ms-blob-type': _to_str(self.blob_type), - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match) - } - _add_metadata_headers(metadata, request) - if content_settings is not None: - request.headers.update(content_settings._to_headers()) - blob = _get_data_bytes_only('blob', blob) - if self.key_encryption_key: - encryption_data, blob = _encrypt_blob(blob, self.key_encryption_key) - request.headers['x-ms-meta-encryptiondata'] = encryption_data - request.body = blob - - if validate_content: - computed_md5 = _get_content_md5(request.body) - request.headers['Content-MD5'] = _to_str(computed_md5) - - return self._perform_request(request, _parse_base_properties) - - def _put_block(self, container_name, blob_name, block, block_id, - validate_content=False, lease_id=None, timeout=None): - ''' - See put_block for more details. This helper method - allows for encryption or other such special behavior because - it is safely handled by the library. These behaviors are - prohibited in the public version of this function. - ''' - - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('block', block) - _validate_not_none('block_id', block_id) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'block', - 'blockid': _encode_base64(_to_str(block_id)), - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id) - } - request.body = _get_data_bytes_or_stream_only('block', block) - if hasattr(request.body, 'read'): - if _len_plus(request.body) is None: - try: - data = b'' - for chunk in iter(lambda: request.body.read(4096), b""): - data += chunk - request.body = data - except AttributeError: - raise ValueError(_ERROR_VALUE_SHOULD_BE_STREAM.format('request.body')) - - if validate_content: - computed_md5 = _get_content_md5(request.body) - request.headers['Content-MD5'] = _to_str(computed_md5) - - self._perform_request(request) - - def _put_block_list( - self, container_name, blob_name, block_list, content_settings=None, - metadata=None, validate_content=False, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, - timeout=None, encryption_data=None): - ''' - See put_block_list for more details. This helper method - allows for encryption or other such special behavior because - it is safely handled by the library. These behaviors are - prohibited in the public version of this function. - :param str encryption_data: - A JSON formatted string containing the encryption metadata generated for this - blob if it was encrypted all at once upon upload. This should only be passed - in by internal methods. - ''' - - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('block_list', block_list) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'blocklist', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - _add_metadata_headers(metadata, request) - if content_settings is not None: - request.headers.update(content_settings._to_headers()) - request.body = _get_request_body( - _convert_block_list_to_xml(block_list)) - - if validate_content: - computed_md5 = _get_content_md5(request.body) - request.headers['Content-MD5'] = _to_str(computed_md5) - - if encryption_data is not None: - request.headers['x-ms-meta-encryptiondata'] = encryption_data - - return self._perform_request(request, _parse_base_properties) diff --git a/azure/multiapi/storage/v2018_03_28/blob/models.py b/azure/multiapi/storage/v2018_03_28/blob/models.py deleted file mode 100644 index f106181..0000000 --- a/azure/multiapi/storage/v2018_03_28/blob/models.py +++ /dev/null @@ -1,780 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from ..common._common_conversion import _to_str - - -class Container(object): - ''' - Blob container class. - - :ivar str name: - The name of the container. - :ivar metadata: - A dict containing name-value pairs associated with the container as metadata. - This var is set to None unless the include=metadata param was included - for the list containers operation. If this parameter was specified but the - container has no metadata, metadata will be set to an empty dictionary. - :vartype metadata: dict(str, str) - :ivar ContainerProperties properties: - System properties for the container. - ''' - - def __init__(self, name=None, props=None, metadata=None): - self.name = name - self.properties = props or ContainerProperties() - self.metadata = metadata - - -class ContainerProperties(object): - ''' - Blob container's properties class. - - :ivar datetime last_modified: - A datetime object representing the last time the container was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar LeaseProperties lease: - Stores all the lease information for the container. - :ivar bool has_immutability_policy: - Represents whether the container has an immutability policy. - :ivar bool has_legal_hold: - Represents whether the container has a legal hold. - ''' - - def __init__(self): - self.last_modified = None - self.etag = None - self.lease = LeaseProperties() - self.public_access = None - self.has_immutability_policy = None - self.has_legal_hold = None - - -class Blob(object): - ''' - Blob class. - - :ivar str name: - Name of blob. - :ivar str snapshot: - A DateTime value that uniquely identifies the snapshot. The value of - this header indicates the snapshot version, and may be used in - subsequent requests to access the snapshot. - :ivar content: - Blob content. - :vartype content: str or bytes - :ivar BlobProperties properties: - Stores all the system properties for the blob. - :ivar metadata: - Name-value pairs associated with the blob as metadata. - :ivar bool deleted: - Specify whether the blob was soft deleted. - In other words, if the blob is being retained by the delete retention policy, - this field would be True. The blob could be undeleted or it will be garbage collected after the specified - time period. - ''' - - def __init__(self, name=None, snapshot=None, content=None, props=None, metadata=None, deleted=False): - self.name = name - self.snapshot = snapshot - self.content = content - self.properties = props or BlobProperties() - self.metadata = metadata - self.deleted = deleted - - -class BlobProperties(object): - ''' - Blob Properties - - :ivar str blob_type: - String indicating this blob's type. - :ivar datetime last_modified: - A datetime object representing the last time the blob was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar int content_length: - The length of the content returned. If the entire blob was requested, - the length of blob in bytes. If a subset of the blob was requested, the - length of the returned subset. - :ivar str content_range: - Indicates the range of bytes returned in the event that the client - requested a subset of the blob. - :ivar int append_blob_committed_block_count: - (For Append Blobs) Number of committed blocks in the blob. - :ivar int page_blob_sequence_number: - (For Page Blobs) Sequence number for page blob used for coordinating - concurrent writes. - :ivar bool server_encrypted: - Set to true if the blob is encrypted on the server. - :ivar ~azure.storage.blob.models.CopyProperties copy: - Stores all the copy properties for the blob. - :ivar ~azure.storage.blob.models.ContentSettings content_settings: - Stores all the content settings for the blob. - :ivar ~azure.storage.blob.models.LeaseProperties lease: - Stores all the lease information for the blob. - :ivar StandardBlobTier blob_tier: - Indicates the access tier of the blob. The hot tier is optimized - for storing data that is accessed frequently. The cool storage tier - is optimized for storing data that is infrequently accessed and stored - for at least a month. The archive tier is optimized for storing - data that is rarely accessed and stored for at least six months - with flexible latency requirements. - :ivar datetime blob_tier_change_time: - Indicates when the access tier was last changed. - :ivar bool blob_tier_inferred: - Indicates whether the access tier was inferred by the service. - If false, it indicates that the tier was set explicitly. - :ivar datetime deleted_time: - A datetime object representing the time at which the blob was deleted. - :ivar int remaining_retention_days: - The number of days that the blob will be retained before being permanently deleted by the service. - :ivar datetime creation_time: - Indicates when the blob was created, in UTC. - ''' - - def __init__(self): - self.blob_type = None - self.last_modified = None - self.etag = None - self.content_length = None - self.content_range = None - self.append_blob_committed_block_count = None - self.page_blob_sequence_number = None - self.server_encrypted = None - self.copy = CopyProperties() - self.content_settings = ContentSettings() - self.lease = LeaseProperties() - self.blob_tier = None - self.blob_tier_change_time = None - self.blob_tier_inferred = False - self.deleted_time = None - self.remaining_retention_days = None - self.creation_time = None - - -class ContentSettings(object): - ''' - Used to store the content settings of a blob. - - :ivar str content_type: - The content type specified for the blob. If no content type was - specified, the default content type is application/octet-stream. - :ivar str content_encoding: - If the content_encoding has previously been set - for the blob, that value is stored. - :ivar str content_language: - If the content_language has previously been set - for the blob, that value is stored. - :ivar str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the blob, that value is stored. - :ivar str cache_control: - If the cache_control has previously been set for - the blob, that value is stored. - :ivar str content_md5: - If the content_md5 has been set for the blob, this response - header is stored so that the client can check for message content - integrity. - ''' - - def __init__( - self, content_type=None, content_encoding=None, - content_language=None, content_disposition=None, - cache_control=None, content_md5=None): - self.content_type = content_type - self.content_encoding = content_encoding - self.content_language = content_language - self.content_disposition = content_disposition - self.cache_control = cache_control - self.content_md5 = content_md5 - - def _to_headers(self): - return { - 'x-ms-blob-cache-control': _to_str(self.cache_control), - 'x-ms-blob-content-type': _to_str(self.content_type), - 'x-ms-blob-content-disposition': _to_str(self.content_disposition), - 'x-ms-blob-content-md5': _to_str(self.content_md5), - 'x-ms-blob-content-encoding': _to_str(self.content_encoding), - 'x-ms-blob-content-language': _to_str(self.content_language), - } - - -class CopyProperties(object): - ''' - Blob Copy Properties. - - :ivar str id: - String identifier for the last attempted Copy Blob operation where this blob - was the destination blob. This header does not appear if this blob has never - been the destination in a Copy Blob operation, or if this blob has been - modified after a concluded Copy Blob operation using Set Blob Properties, - Put Blob, or Put Block List. - :ivar str source: - URL up to 2 KB in length that specifies the source blob used in the last attempted - Copy Blob operation where this blob was the destination blob. This header does not - appear if this blob has never been the destination in a Copy Blob operation, or if - this blob has been modified after a concluded Copy Blob operation using - Set Blob Properties, Put Blob, or Put Block List. - :ivar str status: - State of the copy operation identified by Copy ID, with these values: - success: - Copy completed successfully. - pending: - Copy is in progress. Check copy_status_description if intermittent, - non-fatal errors impede copy progress but don't cause failure. - aborted: - Copy was ended by Abort Copy Blob. - failed: - Copy failed. See copy_status_description for failure details. - :ivar str progress: - Contains the number of bytes copied and the total bytes in the source in the last - attempted Copy Blob operation where this blob was the destination blob. Can show - between 0 and Content-Length bytes copied. - :ivar datetime completion_time: - Conclusion time of the last attempted Copy Blob operation where this blob was the - destination blob. This value can specify the time of a completed, aborted, or - failed copy attempt. - :ivar str status_description: - only appears when x-ms-copy-status is failed or pending. Describes cause of fatal - or non-fatal copy operation failure. - ''' - - def __init__(self): - self.id = None - self.source = None - self.status = None - self.progress = None - self.completion_time = None - self.status_description = None - - -class LeaseProperties(object): - ''' - Blob Lease Properties. - - :ivar str status: - The lease status of the blob. - Possible values: locked|unlocked - :ivar str state: - Lease state of the blob. - Possible values: available|leased|expired|breaking|broken - :ivar str duration: - When a blob is leased, specifies whether the lease is of infinite or fixed duration. - ''' - - def __init__(self): - self.status = None - self.state = None - self.duration = None - - -class BlobPrefix(object): - ''' - BlobPrefix objects may potentially returned in the blob list when - :func:`~azure.storage.blob.baseblobservice.BaseBlobService.list_blobs` is - used with a delimiter. Prefixes can be thought of as virtual blob directories. - - :ivar str name: The name of the blob prefix. - ''' - - def __init__(self): - self.name = None - - -class BlobBlockState(object): - '''Block blob block types.''' - - Committed = 'Committed' - '''Committed blocks.''' - - Latest = 'Latest' - '''Latest blocks.''' - - Uncommitted = 'Uncommitted' - '''Uncommitted blocks.''' - - -class BlobBlock(object): - ''' - BlockBlob Block class. - - :ivar str id: - Block id. - :ivar str state: - Block state. - Possible valuse: committed|uncommitted - :ivar int size: - Block size in bytes. - ''' - - def __init__(self, id=None, state=BlobBlockState.Latest): - self.id = id - self.state = state - - def _set_size(self, size): - self.size = size - - -class BlobBlockList(object): - ''' - Blob Block List class. - - :ivar committed_blocks: - List of committed blocks. - :vartype committed_blocks: list(:class:`~azure.storage.blob.models.BlobBlock`) - :ivar uncommitted_blocks: - List of uncommitted blocks. - :vartype uncommitted_blocks: list(:class:`~azure.storage.blob.models.BlobBlock`) - ''' - - def __init__(self): - self.committed_blocks = list() - self.uncommitted_blocks = list() - - -class PageRange(object): - ''' - Page Range for page blob. - - :ivar int start: - Start of page range in bytes. - :ivar int end: - End of page range in bytes. - :ivar bool is_cleared: - Indicates if a page range is cleared or not. Only applicable - for get_page_range_diff API. - ''' - - def __init__(self, start=None, end=None, is_cleared=False): - self.start = start - self.end = end - self.is_cleared = is_cleared - - -class ResourceProperties(object): - ''' - Base response for a resource request. - - :ivar str etag: - Opaque etag value that can be used to check if resource - has been modified. - :ivar datetime last_modified: - Datetime for last time resource was modified. - ''' - - def __init__(self): - self.last_modified = None - self.etag = None - - -class AppendBlockProperties(ResourceProperties): - ''' - Response for an append block request. - - :ivar int append_offset: - Position to start next append. - :ivar int committed_block_count: - Number of committed append blocks. - ''' - - def __init__(self): - super(ResourceProperties, self).__init__() - self.append_offset = None - self.committed_block_count = None - - -class PageBlobProperties(ResourceProperties): - ''' - Response for a page request. - - :ivar int sequence_number: - Identifer for page blobs to help handle concurrent writes. - ''' - - def __init__(self): - super(ResourceProperties, self).__init__() - self.sequence_number = None - - -class PublicAccess(object): - ''' - Specifies whether data in the container may be accessed publicly and the level of access. - ''' - - OFF = 'off' - ''' - Specifies that there is no public read access for both the container and blobs within the container. - Clients cannot enumerate the containers within the storage account as well as the blobs within the container. - ''' - - Blob = 'blob' - ''' - Specifies public read access for blobs. Blob data within this container can be read - via anonymous request, but container data is not available. Clients cannot enumerate - blobs within the container via anonymous request. - ''' - - Container = 'container' - ''' - Specifies full public read access for container and blob data. Clients can enumerate - blobs within the container via anonymous request, but cannot enumerate containers - within the storage account. - ''' - - -class DeleteSnapshot(object): - ''' - Required if the blob has associated snapshots. Specifies how to handle the snapshots. - ''' - - Include = 'include' - ''' - Delete the base blob and all of its snapshots. - ''' - - Only = 'only' - ''' - Delete only the blob's snapshots and not the blob itself. - ''' - - -class BlockListType(object): - ''' - Specifies whether to return the list of committed blocks, the list of uncommitted - blocks, or both lists together. - ''' - - All = 'all' - '''Both committed and uncommitted blocks.''' - - Committed = 'committed' - '''Committed blocks.''' - - Uncommitted = 'uncommitted' - '''Uncommitted blocks.''' - - -class SequenceNumberAction(object): - '''Sequence number actions.''' - - Increment = 'increment' - ''' - Increments the value of the sequence number by 1. If specifying this option, - do not include the x-ms-blob-sequence-number header. - ''' - - Max = 'max' - ''' - Sets the sequence number to be the higher of the value included with the - request and the value currently stored for the blob. - ''' - - Update = 'update' - '''Sets the sequence number to the value included with the request.''' - - -class _LeaseActions(object): - '''Actions for a lease.''' - - Acquire = 'acquire' - '''Acquire the lease.''' - - Break = 'break' - '''Break the lease.''' - - Change = 'change' - '''Change the lease ID.''' - - Release = 'release' - '''Release the lease.''' - - Renew = 'renew' - '''Renew the lease.''' - - -class _BlobTypes(object): - '''Blob type options.''' - - AppendBlob = 'AppendBlob' - '''Append blob type.''' - - BlockBlob = 'BlockBlob' - '''Block blob type.''' - - PageBlob = 'PageBlob' - '''Page blob type.''' - - -class Include(object): - ''' - Specifies the datasets to include in the blob list response. - - :ivar ~azure.storage.blob.models.Include Include.COPY: - Specifies that metadata related to any current or previous Copy Blob operation - should be included in the response. - :ivar ~azure.storage.blob.models.Include Include.METADATA: - Specifies that metadata be returned in the response. - :ivar ~azure.storage.blob.models.Include Include.SNAPSHOTS: - Specifies that snapshots should be included in the enumeration. - :ivar ~azure.storage.blob.models.Include Include.UNCOMMITTED_BLOBS: - Specifies that blobs for which blocks have been uploaded, but which have not - been committed using Put Block List, be included in the response. - :ivar ~azure.storage.blob.models.Include Include.DELETED: - Specifies that deleted blobs should be returned in the response. - ''' - - def __init__(self, snapshots=False, metadata=False, uncommitted_blobs=False, - copy=False, deleted=False, _str=None): - ''' - :param bool snapshots: - Specifies that snapshots should be included in the enumeration. - :param bool metadata: - Specifies that metadata be returned in the response. - :param bool uncommitted_blobs: - Specifies that blobs for which blocks have been uploaded, but which have - not been committed using Put Block List, be included in the response. - :param bool copy: - Specifies that metadata related to any current or previous Copy Blob - operation should be included in the response. - :param bool deleted: - Specifies that deleted blobs should be returned in the response. - :param str _str: - A string representing the includes. - ''' - if not _str: - _str = '' - components = _str.split(',') - self.snapshots = snapshots or ('snapshots' in components) - self.metadata = metadata or ('metadata' in components) - self.uncommitted_blobs = uncommitted_blobs or ('uncommittedblobs' in components) - self.copy = copy or ('copy' in components) - self.deleted = deleted or ('deleted' in components) - - def __or__(self, other): - return Include(_str=str(self) + str(other)) - - def __add__(self, other): - return Include(_str=str(self) + str(other)) - - def __str__(self): - include = (('snapshots,' if self.snapshots else '') + - ('metadata,' if self.metadata else '') + - ('uncommittedblobs,' if self.uncommitted_blobs else '') + - ('copy,' if self.copy else '') + - ('deleted,' if self.deleted else '')) - return include.rstrip(',') - - -Include.COPY = Include(copy=True) -Include.METADATA = Include(metadata=True) -Include.SNAPSHOTS = Include(snapshots=True) -Include.UNCOMMITTED_BLOBS = Include(uncommitted_blobs=True) -Include.DELETED = Include(deleted=True) - - -class BlobPermissions(object): - ''' - BlobPermissions class to be used with - :func:`~azure.storage.blob.baseblobservice.BaseBlobService.generate_blob_shared_access_signature` API. - - :ivar BlobPermissions BlobPermissions.ADD: - Add a block to an append blob. - :ivar BlobPermissions BlobPermissions.CREATE: - Write a new blob, snapshot a blob, or copy a blob to a new blob. - :ivar BlobPermissions BlobPermissions.DELETE: - Delete the blob. - :ivar BlobPermissions BlobPermissions.READ: - Read the content, properties, metadata and block list. Use the blob as the source of a copy operation. - :ivar BlobPermissions BlobPermissions.WRITE: - Create or write content, properties, metadata, or block list. Snapshot or lease - the blob. Resize the blob (page blob only). Use the blob as the destination of a - copy operation within the same account. - ''' - - def __init__(self, read=False, add=False, create=False, write=False, - delete=False, _str=None): - ''' - :param bool read: - Read the content, properties, metadata and block list. Use the blob as - the source of a copy operation. - :param bool add: - Add a block to an append blob. - :param bool create: - Write a new blob, snapshot a blob, or copy a blob to a new blob. - :param bool write: - Create or write content, properties, metadata, or block list. Snapshot - or lease the blob. Resize the blob (page blob only). Use the blob as the - destination of a copy operation within the same account. - :param bool delete: - Delete the blob. - :param str _str: - A string representing the permissions. - ''' - if not _str: - _str = '' - self.read = read or ('r' in _str) - self.add = add or ('a' in _str) - self.create = create or ('c' in _str) - self.write = write or ('w' in _str) - self.delete = delete or ('d' in _str) - - def __or__(self, other): - return BlobPermissions(_str=str(self) + str(other)) - - def __add__(self, other): - return BlobPermissions(_str=str(self) + str(other)) - - def __str__(self): - return (('r' if self.read else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '')) - - -BlobPermissions.ADD = BlobPermissions(add=True) -BlobPermissions.CREATE = BlobPermissions(create=True) -BlobPermissions.DELETE = BlobPermissions(delete=True) -BlobPermissions.READ = BlobPermissions(read=True) -BlobPermissions.WRITE = BlobPermissions(write=True) - - -class ContainerPermissions(object): - ''' - ContainerPermissions class to be used with :func:`~azure.storage.blob.baseblobservice.BaseBlobService.generate_container_shared_access_signature` - API and for the AccessPolicies used with :func:`~azure.storage.blob.baseblobservice.BaseBlobService.set_container_acl`. - - :ivar ContainerPermissions ContainerPermissions.DELETE: - Delete any blob in the container. Note: You cannot grant permissions to - delete a container with a container SAS. Use an account SAS instead. - :ivar ContainerPermissions ContainerPermissions.LIST: - List blobs in the container. - :ivar ContainerPermissions ContainerPermissions.READ: - Read the content, properties, metadata or block list of any blob in the - container. Use any blob in the container as the source of a copy operation. - :ivar ContainerPermissions ContainerPermissions.WRITE: - For any blob in the container, create or write content, properties, - metadata, or block list. Snapshot or lease the blob. Resize the blob - (page blob only). Use the blob as the destination of a copy operation - within the same account. Note: You cannot grant permissions to read or - write container properties or metadata, nor to lease a container, with - a container SAS. Use an account SAS instead. - ''' - - def __init__(self, read=False, write=False, delete=False, list=False, - _str=None): - ''' - :param bool read: - Read the content, properties, metadata or block list of any blob in the - container. Use any blob in the container as the source of a copy operation. - :param bool write: - For any blob in the container, create or write content, properties, - metadata, or block list. Snapshot or lease the blob. Resize the blob - (page blob only). Use the blob as the destination of a copy operation - within the same account. Note: You cannot grant permissions to read or - write container properties or metadata, nor to lease a container, with - a container SAS. Use an account SAS instead. - :param bool delete: - Delete any blob in the container. Note: You cannot grant permissions to - delete a container with a container SAS. Use an account SAS instead. - :param bool list: - List blobs in the container. - :param str _str: - A string representing the permissions. - ''' - if not _str: - _str = '' - self.read = read or ('r' in _str) - self.write = write or ('w' in _str) - self.delete = delete or ('d' in _str) - self.list = list or ('l' in _str) - - def __or__(self, other): - return ContainerPermissions(_str=str(self) + str(other)) - - def __add__(self, other): - return ContainerPermissions(_str=str(self) + str(other)) - - def __str__(self): - return (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('l' if self.list else '')) - - -ContainerPermissions.DELETE = ContainerPermissions(delete=True) -ContainerPermissions.LIST = ContainerPermissions(list=True) -ContainerPermissions.READ = ContainerPermissions(read=True) -ContainerPermissions.WRITE = ContainerPermissions(write=True) - - -class PremiumPageBlobTier(object): - ''' - Specifies the page blob tier to set the blob to. This is only applicable to page - blobs on premium storage accounts. - Please take a look at https://docs.microsoft.com/en-us/azure/storage/storage-premium-storage#scalability-and-performance-targets - for detailed information on the corresponding IOPS and throughtput per PageBlobTier. - ''' - - P4 = 'P4' - ''' P4 Tier ''' - - P6 = 'P6' - ''' P6 Tier ''' - - P10 = 'P10' - ''' P10 Tier ''' - - P20 = 'P20' - ''' P20 Tier ''' - - P30 = 'P30' - ''' P30 Tier ''' - - P40 = 'P40' - ''' P40 Tier ''' - - P50 = 'P50' - ''' P50 Tier ''' - - P60 = 'P60' - ''' P60 Tier ''' - - -class StandardBlobTier(object): - ''' - Specifies the blob tier to set the blob to. This is only applicable for block blobs on standard storage accounts. - ''' - - Archive = 'Archive' - ''' Archive ''' - - Cool = 'Cool' - ''' Cool ''' - - Hot = 'Hot' - ''' Hot ''' - - -class AccountInformation(object): - """ - Holds information related to the storage account. - - :ivar str sku_name: - Name of the storage SKU, also known as account type. - Example: Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS, Premium_LRS, Premium_ZRS - :ivar str account_kind: - Describes the flavour of the storage account, also known as account kind. - Example: Storage, StorageV2, BlobStorage - """ - def __init__(self): - self.sku_name = None - self.account_kind = None diff --git a/azure/multiapi/storage/v2018_03_28/blob/pageblobservice.py b/azure/multiapi/storage/v2018_03_28/blob/pageblobservice.py deleted file mode 100644 index cfeea64..0000000 --- a/azure/multiapi/storage/v2018_03_28/blob/pageblobservice.py +++ /dev/null @@ -1,1392 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import sys -from os import path - -from ..common._common_conversion import ( - _int_to_str, - _to_str, - _datetime_to_utc_string, - _get_content_md5, -) -from ..common._constants import ( - SERVICE_HOST_BASE, - DEFAULT_PROTOCOL, -) -from ..common._error import ( - _validate_not_none, - _validate_type_bytes, - _validate_encryption_required, - _validate_encryption_unsupported, - _ERROR_VALUE_NEGATIVE, -) -from ..common._http import HTTPRequest -from ..common._serialization import ( - _get_data_bytes_only, - _add_metadata_headers, -) -from ._deserialization import ( - _convert_xml_to_page_ranges, - _parse_page_properties, - _parse_base_properties, -) -from ._encryption import _generate_blob_encryption_data -from ._error import ( - _ERROR_PAGE_BLOB_SIZE_ALIGNMENT, -) -from ._serialization import ( - _get_path, - _validate_and_format_range_headers, -) -from ._upload_chunking import ( - _PageBlobChunkUploader, - _upload_blob_chunks, -) -from .baseblobservice import BaseBlobService -from .models import ( - _BlobTypes, - ResourceProperties) - -if sys.version_info >= (3,): - from io import BytesIO -else: - from cStringIO import StringIO as BytesIO - -# Keep this value sync with _ERROR_PAGE_BLOB_SIZE_ALIGNMENT -_PAGE_ALIGNMENT = 512 - - -class PageBlobService(BaseBlobService): - ''' - Page blobs are a collection of 512-byte pages optimized for random read and - write operations. To create a page blob, you initialize the page blob and - specify the maximum size the page blob will grow. To add or update the - contents of a page blob, you write a page or pages by specifying an offset - and a range that align to 512-byte page boundaries. A write to a page blob - can overwrite just one page, some pages, or up to 4 MB of the page blob. - Writes to page blobs happen in-place and are immediately committed to the - blob. The maximum size for a page blob is 8 TB. - - :ivar int MAX_PAGE_SIZE: - The size of the pages put by create_blob_from_* methods. Smaller pages - may be put if there is less data provided. The maximum page size the service - supports is 4MB. When using the create_blob_from_* methods, empty pages are skipped. - ''' - - MAX_PAGE_SIZE = 4 * 1024 * 1024 - - def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=False, - protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, custom_domain=None, - request_session=None, connection_string=None, socket_timeout=None, token_credential=None): - ''' - :param str account_name: - The storage account name. This is used to authenticate requests - signed with an account key and to construct the storage endpoint. It - is required unless a connection string is given, or if a custom - domain is used with anonymous authentication. - :param str account_key: - The storage account key. This is used for shared key authentication. - If neither account key or sas token is specified, anonymous access - will be used. - :param str sas_token: - A shared access signature token to use to authenticate requests - instead of the account key. If account key and sas token are both - specified, account key will be used to sign. If neither are - specified, anonymous access will be used. - :param bool is_emulated: - Whether to use the emulator. Defaults to False. If specified, will - override all other parameters besides connection string and request - session. - :param str protocol: - The protocol to use for requests. Defaults to https. - :param str endpoint_suffix: - The host base component of the url, minus the account name. Defaults - to Azure (core.windows.net). Override this to use the China cloud - (core.chinacloudapi.cn). - :param str custom_domain: - The custom domain to use. This can be set in the Azure Portal. For - example, 'www.mydomain.com'. - :param requests.Session request_session: - The session object to use for http requests. - :param str connection_string: - If specified, this will override all other parameters besides - request session. See - http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ - for the connection string format. - :param int socket_timeout: - If specified, this will override the default socket timeout. The timeout specified is in seconds. - See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value. - :param token_credential: - A token credential used to authenticate HTTPS requests. The token value - should be updated before its expiration. - :type `~..common.TokenCredential` - ''' - self.blob_type = _BlobTypes.PageBlob - super(PageBlobService, self).__init__( - account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix, - custom_domain, request_session, connection_string, socket_timeout, token_credential) - - def create_blob( - self, container_name, blob_name, content_length, content_settings=None, - sequence_number=None, metadata=None, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None, premium_page_blob_tier=None): - ''' - Creates a new Page Blob. - - See create_blob_from_* for high level functions that handle the - creation and upload of large blobs with automatic chunking and - progress notifications. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param int content_length: - Required. This header specifies the maximum size - for the page blob, up to 1 TB. The page blob size must be aligned - to a 512-byte boundary. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set properties on the blob. - :param int sequence_number: - The sequence number is a user-controlled value that you can use to - track requests. The value of the sequence number must be between 0 - and 2^63 - 1.The default value is 0. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :param PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :return: ETag and last modified properties for the new Page Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - return self._create_blob( - container_name, - blob_name, - content_length, - content_settings=content_settings, - sequence_number=sequence_number, - metadata=metadata, - lease_id=lease_id, - premium_page_blob_tier=premium_page_blob_tier, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout - ) - - def incremental_copy_blob(self, container_name, blob_name, copy_source, - metadata=None, destination_if_modified_since=None, destination_if_unmodified_since=None, - destination_if_match=None, destination_if_none_match=None, destination_lease_id=None, - source_lease_id=None, timeout=None): - ''' - Copies an incremental copy of a blob asynchronously. This operation returns a copy operation - properties object, including a copy ID you can use to check or abort the - copy operation. The Blob service copies blobs on a best-effort basis. - - The source blob for an incremental copy operation must be a page blob. - Call get_blob_properties on the destination blob to check the status of the copy operation. - The final blob will be committed when the copy completes. - - :param str container_name: - Name of the destination container. The container must exist. - :param str blob_name: - Name of the destination blob. If the destination blob exists, it will - be overwritten. Otherwise, it will be created. - :param str copy_source: - A URL of up to 2 KB in length that specifies an Azure page blob. - The value should be URL-encoded as it would appear in a request URI. - The copy source must be a snapshot and include a valid SAS token or be public. - Example: - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=&sastoken - :param metadata: - Name-value pairs associated with the blob as metadata. If no name-value - pairs are specified, the operation will copy the metadata from the - source blob or file to the destination blob. If one or more name-value - pairs are specified, the destination blob is created with the specified - metadata, and metadata is not copied from the source blob or file. - :type metadata: dict(str, str). - :param datetime destination_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has been modified since the specified date/time. - If the destination blob has not been modified, the Blob service returns - status code 412 (Precondition Failed). - :param datetime destination_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the destination blob - has not been modified since the specified ate/time. If the destination blob - has been modified, the Blob service returns status code 412 (Precondition Failed). - :param ETag destination_if_match: - An ETag value, or the wildcard character (*). Specify an ETag value for - this conditional header to copy the blob only if the specified ETag value - matches the ETag value for an existing destination blob. If the ETag for - the destination blob does not match the ETag specified for If-Match, the - Blob service returns status code 412 (Precondition Failed). - :param ETag destination_if_none_match: - An ETag value, or the wildcard character (*). Specify an ETag value for - this conditional header to copy the blob only if the specified ETag value - does not match the ETag value for the destination blob. Specify the wildcard - character (*) to perform the operation only if the destination blob does not - exist. If the specified condition isn't met, the Blob service returns status - code 412 (Precondition Failed). - :param str destination_lease_id: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :param str source_lease_id: - Specify this to perform the Copy Blob operation only if - the lease ID given matches the active lease ID of the source blob. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: Copy operation properties such as status, source, and ID. - :rtype: :class:`~azure.storage.blob.models.CopyProperties` - ''' - return self._copy_blob(container_name, blob_name, copy_source, - metadata, - source_if_modified_since=None, source_if_unmodified_since=None, - source_if_match=None, source_if_none_match=None, - destination_if_modified_since=destination_if_modified_since, - destination_if_unmodified_since=destination_if_unmodified_since, - destination_if_match=destination_if_match, - destination_if_none_match=destination_if_none_match, - destination_lease_id=destination_lease_id, - source_lease_id=source_lease_id, timeout=timeout, - incremental_copy=True) - - def update_page( - self, container_name, blob_name, page, start_range, end_range, - validate_content=False, lease_id=None, if_sequence_number_lte=None, - if_sequence_number_lt=None, if_sequence_number_eq=None, - if_modified_since=None, if_unmodified_since=None, - if_match=None, if_none_match=None, timeout=None): - ''' - Updates a range of pages. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param bytes page: - Content of the page. - :param int start_range: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the end offset must be a modulus of - 512-1. Examples of valid byte ranges are 0-511, 512-1023, etc. - :param int end_range: - End of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the end offset must be a modulus of - 512-1. Examples of valid byte ranges are 0-511, 512-1023, etc. - :param bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - blob. - :param str lease_id: - Required if the blob has an active lease. - :param int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :param int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :param int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify an ETag value for this conditional - header to write the page only if the blob's ETag value matches the - value specified. If the values do not match, the Blob service fails. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify an ETag value for this conditional - header to write the page only if the blob's ETag value does not - match the value specified. If the values are identical, the Blob - service fails. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Page Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - - _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) - - return self._update_page( - container_name, - blob_name, - page, - start_range, - end_range, - validate_content=validate_content, - lease_id=lease_id, - if_sequence_number_lte=if_sequence_number_lte, - if_sequence_number_lt=if_sequence_number_lt, - if_sequence_number_eq=if_sequence_number_eq, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout - ) - - def clear_page( - self, container_name, blob_name, start_range, end_range, - lease_id=None, if_sequence_number_lte=None, - if_sequence_number_lt=None, if_sequence_number_eq=None, - if_modified_since=None, if_unmodified_since=None, - if_match=None, if_none_match=None, timeout=None): - ''' - Clears a range of pages. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param int start_range: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the end offset must be a modulus of - 512-1. Examples of valid byte ranges are 0-511, 512-1023, etc. - :param int end_range: - End of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the end offset must be a modulus of - 512-1. Examples of valid byte ranges are 0-511, 512-1023, etc. - :param str lease_id: - Required if the blob has an active lease. - :param int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :param int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :param int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify an ETag value for this conditional - header to write the page only if the blob's ETag value matches the - value specified. If the values do not match, the Blob service fails. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify an ETag value for this conditional - header to write the page only if the blob's ETag value does not - match the value specified. If the values are identical, the Blob - service fails. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Page Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'page', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-page-write': 'clear', - 'x-ms-lease-id': _to_str(lease_id), - 'x-ms-if-sequence-number-le': _to_str(if_sequence_number_lte), - 'x-ms-if-sequence-number-lt': _to_str(if_sequence_number_lt), - 'x-ms-if-sequence-number-eq': _to_str(if_sequence_number_eq), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match) - } - _validate_and_format_range_headers( - request, - start_range, - end_range, - align_to_page=True) - - return self._perform_request(request, _parse_page_properties) - - def get_page_ranges( - self, container_name, blob_name, snapshot=None, start_range=None, - end_range=None, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None): - ''' - Returns the list of valid page ranges for a Page Blob or snapshot - of a page blob. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to retrieve information - from. - :param int start_range: - Start of byte range to use for getting valid page ranges. - If no end_range is given, all bytes after the start_range will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the end offset must be a modulus of - 512-1. Examples of valid byte ranges are 0-511, 512-, etc. - :param int end_range: - End of byte range to use for getting valid page ranges. - If end_range is given, start_range must be provided. - This range will return valid page ranges for from the offset start up to - offset end. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the end offset must be a modulus of - 512-1. Examples of valid byte ranges are 0-511, 512-, etc. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: A list of valid Page Ranges for the Page Blob. - :rtype: list(:class:`~azure.storage.blob.models.PageRange`) - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'pagelist', - 'snapshot': _to_str(snapshot), - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - if start_range is not None: - _validate_and_format_range_headers( - request, - start_range, - end_range, - start_range_required=False, - end_range_required=False, - align_to_page=True) - - return self._perform_request(request, _convert_xml_to_page_ranges) - - def get_page_ranges_diff( - self, container_name, blob_name, previous_snapshot, snapshot=None, - start_range=None, end_range=None, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None): - ''' - The response will include only the pages that are different between either a - recent snapshot or the current blob and a previous snapshot, including pages - that were cleared. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str previous_snapshot: - The snapshot parameter is an opaque DateTime value that - specifies a previous blob snapshot to be compared - against a more recent snapshot or the current blob. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that - specifies a more recent blob snapshot to be compared - against a previous snapshot (previous_snapshot). - :param int start_range: - Start of byte range to use for getting different page ranges. - If no end_range is given, all bytes after the start_range will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the end offset must be a modulus of - 512-1. Examples of valid byte ranges are 0-511, 512-, etc. - :param int end_range: - End of byte range to use for getting different page ranges. - If end_range is given, start_range must be provided. - This range will return valid page ranges for from the offset start up to - offset end. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the end offset must be a modulus of - 512-1. Examples of valid byte ranges are 0-511, 512-, etc. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: A list of different Page Ranges for the Page Blob. - :rtype: list(:class:`~azure.storage.blob.models.PageRange`) - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('previous_snapshot', previous_snapshot) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'pagelist', - 'snapshot': _to_str(snapshot), - 'prevsnapshot': _to_str(previous_snapshot), - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - if start_range is not None: - _validate_and_format_range_headers( - request, - start_range, - end_range, - start_range_required=False, - end_range_required=False, - align_to_page=True) - - return self._perform_request(request, _convert_xml_to_page_ranges) - - def set_sequence_number( - self, container_name, blob_name, sequence_number_action, sequence_number=None, - lease_id=None, if_modified_since=None, if_unmodified_since=None, - if_match=None, if_none_match=None, timeout=None): - - ''' - Sets the blob sequence number. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param str sequence_number_action: - This property indicates how the service should modify the blob's sequence - number. See :class:`~azure.storage.blob.models.SequenceNumberAction` for more information. - :param str sequence_number: - This property sets the blob's sequence number. The sequence number is a - user-controlled property that you can use to track requests and manage - concurrency issues. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Page Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('sequence_number_action', sequence_number_action) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-blob-sequence-number': _to_str(sequence_number), - 'x-ms-sequence-number-action': _to_str(sequence_number_action), - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - - return self._perform_request(request, _parse_page_properties) - - def resize_blob( - self, container_name, blob_name, content_length, - lease_id=None, if_modified_since=None, if_unmodified_since=None, - if_match=None, if_none_match=None, timeout=None): - - ''' - Resizes a page blob to the specified size. If the specified value is less - than the current size of the blob, then all pages above the specified value - are cleared. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of existing blob. - :param int content_length: - Size to resize blob to. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: ETag and last modified properties for the updated Page Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('content_length', content_length) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-blob-content-length': _to_str(content_length), - 'x-ms-lease-id': _to_str(lease_id), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match), - } - - return self._perform_request(request, _parse_page_properties) - - # ----Convenience APIs----------------------------------------------------- - - def create_blob_from_path( - self, container_name, blob_name, file_path, content_settings=None, - metadata=None, validate_content=False, progress_callback=None, max_connections=2, - lease_id=None, if_modified_since=None, if_unmodified_since=None, - if_match=None, if_none_match=None, timeout=None, premium_page_blob_tier=None): - ''' - Creates a new blob from a file path, or updates the content of an - existing blob, with automatic chunking and progress notifications. - Empty chunks are skipped, while non-emtpy ones(even if only partly filled) are uploaded. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param str file_path: - Path of the file to upload as the blob content. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set blob properties. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param bool validate_content: - If true, calculates an MD5 hash for each page of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: func(current, total) - :param int max_connections: - Maximum number of parallel connections to use. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :param premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :return: ETag and last modified properties for the Page Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('file_path', file_path) - - count = path.getsize(file_path) - with open(file_path, 'rb') as stream: - return self.create_blob_from_stream( - container_name=container_name, - blob_name=blob_name, - stream=stream, - count=count, - content_settings=content_settings, - metadata=metadata, - validate_content=validate_content, - progress_callback=progress_callback, - max_connections=max_connections, - lease_id=lease_id, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout, - premium_page_blob_tier=premium_page_blob_tier) - - def create_blob_from_stream( - self, container_name, blob_name, stream, count, content_settings=None, - metadata=None, validate_content=False, progress_callback=None, - max_connections=2, lease_id=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None, - premium_page_blob_tier=None): - ''' - Creates a new blob from a file/stream, or updates the content of an - existing blob, with automatic chunking and progress notifications. - Empty chunks are skipped, while non-emtpy ones(even if only partly filled) are uploaded. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param io.IOBase stream: - Opened file/stream to upload as the blob content. - :param int count: - Number of bytes to read from the stream. This is required, a page - blob cannot be created if the count is unknown. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set the blob properties. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param bool validate_content: - If true, calculates an MD5 hash for each page of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: func(current, total) - :param int max_connections: - Maximum number of parallel connections to use. Note that parallel upload - requires the stream to be seekable. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :param premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :return: ETag and last modified properties for the Page Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('stream', stream) - _validate_not_none('count', count) - _validate_encryption_required(self.require_encryption, self.key_encryption_key) - - if count < 0: - raise ValueError(_ERROR_VALUE_NEGATIVE.format('count')) - - if count % _PAGE_ALIGNMENT != 0: - raise ValueError(_ERROR_PAGE_BLOB_SIZE_ALIGNMENT.format(count)) - - cek, iv, encryption_data = None, None, None - if self.key_encryption_key is not None: - cek, iv, encryption_data = _generate_blob_encryption_data(self.key_encryption_key) - - response = self._create_blob( - container_name=container_name, - blob_name=blob_name, - content_length=count, - content_settings=content_settings, - metadata=metadata, - lease_id=lease_id, - premium_page_blob_tier=premium_page_blob_tier, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout, - encryption_data=encryption_data - ) - - if count == 0: - return response - - # _upload_blob_chunks returns the block ids for block blobs so resource_properties - # is passed as a parameter to get the last_modified and etag for page and append blobs. - # this info is not needed for block_blobs since _put_block_list is called after which gets this info - resource_properties = ResourceProperties() - _upload_blob_chunks( - blob_service=self, - container_name=container_name, - blob_name=blob_name, - blob_size=count, - block_size=self.MAX_PAGE_SIZE, - stream=stream, - max_connections=max_connections, - progress_callback=progress_callback, - validate_content=validate_content, - lease_id=lease_id, - uploader_class=_PageBlobChunkUploader, - if_match=response.etag, - timeout=timeout, - content_encryption_key=cek, - initialization_vector=iv, - resource_properties=resource_properties - ) - - return resource_properties - - def create_blob_from_bytes( - self, container_name, blob_name, blob, index=0, count=None, - content_settings=None, metadata=None, validate_content=False, - progress_callback=None, max_connections=2, lease_id=None, - if_modified_since=None, if_unmodified_since=None, if_match=None, - if_none_match=None, timeout=None, premium_page_blob_tier=None): - ''' - Creates a new blob from an array of bytes, or updates the content - of an existing blob, with automatic chunking and progress - notifications. Empty chunks are skipped, while non-emtpy ones(even if only partly filled) are uploaded. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to create or update. - :param bytes blob: - Content of blob as an array of bytes. - :param int index: - Start index in the byte array. - :param int count: - Number of bytes to upload. Set to None or negative value to upload - all bytes starting from index. - :param ~azure.storage.blob.models.ContentSettings content_settings: - ContentSettings object used to set blob properties. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param bool validate_content: - If true, calculates an MD5 hash for each page of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far, and total is the - size of the blob, or None if the total size is unknown. - :type progress_callback: func(current, total) - :param int max_connections: - Maximum number of parallel connections to use. - :param str lease_id: - Required if the blob has an active lease. - :param datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :param str if_match: - An ETag value, or the wildcard character (*). Specify this header to perform - the operation only if the resource's ETag matches the value specified. - :param str if_none_match: - An ETag value, or the wildcard character (*). Specify this header - to perform the operation only if the resource's ETag does not match - the value specified. Specify the wildcard character (*) to perform - the operation only if the resource does not exist, and fail the - operation if it does exist. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :param premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :return: ETag and last modified properties for the Page Blob - :rtype: :class:`~azure.storage.blob.models.ResourceProperties` - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('blob', blob) - _validate_type_bytes('blob', blob) - - if index < 0: - raise IndexError(_ERROR_VALUE_NEGATIVE.format('index')) - - if count is None or count < 0: - count = len(blob) - index - - stream = BytesIO(blob) - stream.seek(index) - - return self.create_blob_from_stream( - container_name=container_name, - blob_name=blob_name, - stream=stream, - count=count, - content_settings=content_settings, - metadata=metadata, - validate_content=validate_content, - lease_id=lease_id, - progress_callback=progress_callback, - max_connections=max_connections, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_match=if_match, - if_none_match=if_none_match, - timeout=timeout, - premium_page_blob_tier=premium_page_blob_tier) - - def set_premium_page_blob_tier( - self, container_name, blob_name, premium_page_blob_tier, - timeout=None): - ''' - Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts. - - :param str container_name: - Name of existing container. - :param str blob_name: - Name of blob to update. - :param PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - ''' - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('premium_page_blob_tier', premium_page_blob_tier) - - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'tier', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-access-tier': _to_str(premium_page_blob_tier) - } - - self._perform_request(request) - - def copy_blob(self, container_name, blob_name, copy_source, - metadata=None, - source_if_modified_since=None, - source_if_unmodified_since=None, - source_if_match=None, source_if_none_match=None, - destination_if_modified_since=None, - destination_if_unmodified_since=None, - destination_if_match=None, - destination_if_none_match=None, - destination_lease_id=None, - source_lease_id=None, timeout=None, - premium_page_blob_tier=None): - ''' - Copies a blob asynchronously. This operation returns a copy operation - properties object, including a copy ID you can use to check or abort the - copy operation. The Blob service copies blobs on a best-effort basis. - - The source blob for a copy operation must be a page blob. If the destination - blob already exists, it must be of the same blob type as the source blob. - Any existing destination blob will be overwritten. - The destination blob cannot be modified while a copy operation is in progress. - - When copying from a page blob, the Blob service creates a destination page - blob of the source blob's length, initially containing all zeroes. Then - the source page ranges are enumerated, and non-empty ranges are copied. - - If the tier on the source blob is larger than the tier being passed to this - copy operation or if the size of the blob exceeds the tier being passed to - this copy operation then the operation will fail. - - You can call get_blob_properties on the destination - blob to check the status of the copy operation. The final blob will be - committed when the copy completes. - - :param str container_name: - Name of the destination container. The container must exist. - :param str blob_name: - Name of the destination blob. If the destination blob exists, it will - be overwritten. Otherwise, it will be created. - :param str copy_source: - A URL of up to 2 KB in length that specifies an Azure file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.blob.core.windows.net/mycontainer/myblob - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken - :param metadata: - Name-value pairs associated with the blob as metadata. If no name-value - pairs are specified, the operation will copy the metadata from the - source blob or file to the destination blob. If one or more name-value - pairs are specified, the destination blob is created with the specified - metadata, and metadata is not copied from the source blob or file. - :type metadata: dict(str, str). - :param datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source - blob has been modified since the specified date/time. - :param datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source blob - has not been modified since the specified date/time. - :param ETag source_if_match: - An ETag value, or the wildcard character (*). Specify this conditional - header to copy the source blob only if its ETag matches the value - specified. If the ETag values do not match, the Blob service returns - status code 412 (Precondition Failed). This header cannot be specified - if the source is an Azure File. - :param ETag source_if_none_match: - An ETag value, or the wildcard character (*). Specify this conditional - header to copy the blob only if its ETag does not match the value - specified. If the values are identical, the Blob service returns status - code 412 (Precondition Failed). This header cannot be specified if the - source is an Azure File. - :param datetime destination_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has been modified since the specified date/time. - If the destination blob has not been modified, the Blob service returns - status code 412 (Precondition Failed). - :param datetime destination_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has not been modified since the specified - date/time. If the destination blob has been modified, the Blob service - returns status code 412 (Precondition Failed). - :param ETag destination_if_match: - An ETag value, or the wildcard character (*). Specify an ETag value for - this conditional header to copy the blob only if the specified ETag value - matches the ETag value for an existing destination blob. If the ETag for - the destination blob does not match the ETag specified for If-Match, the - Blob service returns status code 412 (Precondition Failed). - :param ETag destination_if_none_match: - An ETag value, or the wildcard character (*). Specify an ETag value for - this conditional header to copy the blob only if the specified ETag value - does not match the ETag value for the destination blob. Specify the wildcard - character (*) to perform the operation only if the destination blob does not - exist. If the specified condition isn't met, the Blob service returns status - code 412 (Precondition Failed). - :param str destination_lease_id: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :param str source_lease_id: - Specify this to perform the Copy Blob operation only if - the lease ID given matches the active lease ID of the source blob. - :param int timeout: - The timeout parameter is expressed in seconds. - :param PageBlobTier premium_page_blob_tier: - A page blob tier value to set on the destination blob. The tier correlates to - the size of the blob and number of allowed IOPS. This is only applicable to - page blobs on premium storage accounts. - If the tier on the source blob is larger than the tier being passed to this - copy operation or if the size of the blob exceeds the tier being passed to - this copy operation then the operation will fail. - :return: Copy operation properties such as status, source, and ID. - :rtype: :class:`~azure.storage.blob.models.CopyProperties` - ''' - return self._copy_blob(container_name, blob_name, copy_source, - metadata, premium_page_blob_tier, - source_if_modified_since, source_if_unmodified_since, - source_if_match, source_if_none_match, - destination_if_modified_since, - destination_if_unmodified_since, - destination_if_match, - destination_if_none_match, - destination_lease_id, - source_lease_id, timeout, - False) - - # -----Helper methods----------------------------------------------------- - - def _create_blob( - self, container_name, blob_name, content_length, content_settings=None, - sequence_number=None, metadata=None, lease_id=None, premium_page_blob_tier=None, if_modified_since=None, - if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None, - encryption_data=None): - ''' - See create_blob for more details. This helper method - allows for encryption or other such special behavior because - it is safely handled by the library. These behaviors are - prohibited in the public version of this function. - :param str encryption_data: - The JSON formatted encryption metadata to upload as a part of the blob. - This should only be passed internally from other methods and only applied - when uploading entire blob contents immediately follows creation of the blob. - ''' - - _validate_not_none('container_name', container_name) - _validate_not_none('blob_name', blob_name) - _validate_not_none('content_length', content_length) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = {'timeout': _int_to_str(timeout)} - request.headers = { - 'x-ms-blob-type': _to_str(self.blob_type), - 'x-ms-blob-content-length': _to_str(content_length), - 'x-ms-lease-id': _to_str(lease_id), - 'x-ms-blob-sequence-number': _to_str(sequence_number), - 'x-ms-access-tier': _to_str(premium_page_blob_tier), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match) - } - _add_metadata_headers(metadata, request) - if content_settings is not None: - request.headers.update(content_settings._to_headers()) - - if encryption_data is not None: - request.headers['x-ms-meta-encryptiondata'] = encryption_data - - return self._perform_request(request, _parse_base_properties) - - def _update_page( - self, container_name, blob_name, page, start_range, end_range, - validate_content=False, lease_id=None, if_sequence_number_lte=None, - if_sequence_number_lt=None, if_sequence_number_eq=None, - if_modified_since=None, if_unmodified_since=None, - if_match=None, if_none_match=None, timeout=None): - ''' - See update_page for more details. This helper method - allows for encryption or other such special behavior because - it is safely handled by the library. These behaviors are - prohibited in the public version of this function. - ''' - - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(container_name, blob_name) - request.query = { - 'comp': 'page', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-page-write': 'update', - 'x-ms-lease-id': _to_str(lease_id), - 'x-ms-if-sequence-number-le': _to_str(if_sequence_number_lte), - 'x-ms-if-sequence-number-lt': _to_str(if_sequence_number_lt), - 'x-ms-if-sequence-number-eq': _to_str(if_sequence_number_eq), - 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), - 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), - 'If-Match': _to_str(if_match), - 'If-None-Match': _to_str(if_none_match) - } - _validate_and_format_range_headers( - request, - start_range, - end_range, - align_to_page=True) - request.body = _get_data_bytes_only('page', page) - - if validate_content: - computed_md5 = _get_content_md5(request.body) - request.headers['Content-MD5'] = _to_str(computed_md5) - - return self._perform_request(request, _parse_page_properties) diff --git a/azure/multiapi/storage/v2018_03_28/blob/sharedaccesssignature.py b/azure/multiapi/storage/v2018_03_28/blob/sharedaccesssignature.py deleted file mode 100644 index 472ce78..0000000 --- a/azure/multiapi/storage/v2018_03_28/blob/sharedaccesssignature.py +++ /dev/null @@ -1,179 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ..common.sharedaccesssignature import ( - SharedAccessSignature, - _SharedAccessHelper, -) -from ._constants import X_MS_VERSION - - -class BlobSharedAccessSignature(SharedAccessSignature): - ''' - Provides a factory for creating blob and container access - signature tokens with a common account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - ''' - super(BlobSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) - - def generate_blob(self, container_name, blob_name, permission=None, - expiry=None, start=None, id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None): - ''' - Generates a shared access signature for the blob. - Use the returned signature with the sas_token parameter of any BlobService. - - :param str container_name: - Name of container. - :param str blob_name: - Name of blob. - :param BlobPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_blob_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~..common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - resource_path = container_name + '/' + blob_name - - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(id) - sas.add_resource('b') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_resource_signature(self.account_name, self.account_key, 'blob', resource_path) - - return sas.get_token() - - def generate_container(self, container_name, permission=None, expiry=None, - start=None, id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None): - ''' - Generates a shared access signature for the container. - Use the returned signature with the sas_token parameter of any BlobService. - - :param str container_name: - Name of container. - :param ContainerPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_blob_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~..common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(id) - sas.add_resource('c') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_resource_signature(self.account_name, self.account_key, 'blob', container_name) - - return sas.get_token() diff --git a/azure/multiapi/storage/v2018_03_28/common/__init__.py b/azure/multiapi/storage/v2018_03_28/common/__init__.py deleted file mode 100644 index 797c970..0000000 --- a/azure/multiapi/storage/v2018_03_28/common/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from ._constants import ( - __author__, - __version__, - DEFAULT_X_MS_VERSION, -) -from .cloudstorageaccount import CloudStorageAccount -from .models import ( - RetentionPolicy, - Logging, - Metrics, - CorsRule, - DeleteRetentionPolicy, - StaticWebsite, - ServiceProperties, - AccessPolicy, - ResourceTypes, - Services, - AccountPermissions, - Protocol, - ServiceStats, - GeoReplication, - LocationMode, - RetryContext, -) -from .retry import ( - ExponentialRetry, - LinearRetry, - no_retry, -) -from .sharedaccesssignature import ( - SharedAccessSignature, -) -from .tokencredential import TokenCredential diff --git a/azure/multiapi/storage/v2018_03_28/common/_auth.py b/azure/multiapi/storage/v2018_03_28/common/_auth.py deleted file mode 100644 index 15c15b9..0000000 --- a/azure/multiapi/storage/v2018_03_28/common/_auth.py +++ /dev/null @@ -1,117 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from ._common_conversion import ( - _sign_string, -) -from ._constants import ( - DEV_ACCOUNT_NAME, - DEV_ACCOUNT_SECONDARY_NAME -) - -import logging -logger = logging.getLogger(__name__) - - -class _StorageSharedKeyAuthentication(object): - def __init__(self, account_name, account_key, is_emulated=False): - self.account_name = account_name - self.account_key = account_key - self.is_emulated = is_emulated - - def _get_headers(self, request, headers_to_sign): - headers = dict((name.lower(), value) for name, value in request.headers.items() if value) - if 'content-length' in headers and headers['content-length'] == '0': - del headers['content-length'] - return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' - - def _get_verb(self, request): - return request.method + '\n' - - def _get_canonicalized_resource(self, request): - uri_path = request.path.split('?')[0] - - # for emulator, use the DEV_ACCOUNT_NAME instead of DEV_ACCOUNT_SECONDARY_NAME - # as this is how the emulator works - if self.is_emulated and uri_path.find(DEV_ACCOUNT_SECONDARY_NAME) == 1: - # only replace the first instance - uri_path = uri_path.replace(DEV_ACCOUNT_SECONDARY_NAME, DEV_ACCOUNT_NAME, 1) - - return '/' + self.account_name + uri_path - - def _get_canonicalized_headers(self, request): - string_to_sign = '' - x_ms_headers = [] - for name, value in request.headers.items(): - if name.startswith('x-ms-'): - x_ms_headers.append((name.lower(), value)) - x_ms_headers.sort() - for name, value in x_ms_headers: - if value is not None: - string_to_sign += ''.join([name, ':', value, '\n']) - return string_to_sign - - def _add_authorization_header(self, request, string_to_sign): - signature = _sign_string(self.account_key, string_to_sign) - auth_string = 'SharedKey ' + self.account_name + ':' + signature - request.headers['Authorization'] = auth_string - - -class _StorageSharedKeyAuthentication(_StorageSharedKeyAuthentication): - def sign_request(self, request): - string_to_sign = \ - self._get_verb(request) + \ - self._get_headers( - request, - [ - 'content-encoding', 'content-language', 'content-length', - 'content-md5', 'content-type', 'date', 'if-modified-since', - 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' - ] - ) + \ - self._get_canonicalized_headers(request) + \ - self._get_canonicalized_resource(request) + \ - self._get_canonicalized_resource_query(request) - - self._add_authorization_header(request, string_to_sign) - logger.debug("String_to_sign=%s", string_to_sign) - - def _get_canonicalized_resource_query(self, request): - sorted_queries = [(name, value) for name, value in request.query.items()] - sorted_queries.sort() - - string_to_sign = '' - for name, value in sorted_queries: - if value is not None: - string_to_sign += '\n' + name.lower() + ':' + value - - return string_to_sign - - -class _StorageNoAuthentication(object): - def sign_request(self, request): - pass - - -class _StorageSASAuthentication(object): - def __init__(self, sas_token): - # ignore ?-prefix (added by tools such as Azure Portal) on sas tokens - # doing so avoids double question marks when signing - if sas_token[0] == '?': - self.sas_token = sas_token[1:] - else: - self.sas_token = sas_token - - def sign_request(self, request): - # if 'sig=' is present, then the request has already been signed - # as is the case when performing retries - if 'sig=' in request.path: - return - if '?' in request.path: - request.path += '&' - else: - request.path += '?' - - request.path += self.sas_token diff --git a/azure/multiapi/storage/v2018_03_28/common/_common_conversion.py b/azure/multiapi/storage/v2018_03_28/common/_common_conversion.py deleted file mode 100644 index 8b50afb..0000000 --- a/azure/multiapi/storage/v2018_03_28/common/_common_conversion.py +++ /dev/null @@ -1,126 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import hmac -import sys -from io import (SEEK_SET) - -from dateutil.tz import tzutc - -from ._error import ( - _ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM, - _ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM, -) -from .models import ( - _unicode_type, -) - -if sys.version_info < (3,): - def _str(value): - if isinstance(value, unicode): - return value.encode('utf-8') - - return str(value) -else: - _str = str - - -def _to_str(value): - return _str(value) if value is not None else None - - -def _int_to_str(value): - return str(int(value)) if value is not None else None - - -def _bool_to_str(value): - if value is None: - return None - - if isinstance(value, bool): - if value: - return 'true' - else: - return 'false' - - return str(value) - - -def _to_utc_datetime(value): - return value.strftime('%Y-%m-%dT%H:%M:%SZ') - - -def _datetime_to_utc_string(value): - # Azure expects the date value passed in to be UTC. - # Azure will always return values as UTC. - # If a date is passed in without timezone info, it is assumed to be UTC. - if value is None: - return None - - if value.tzinfo: - value = value.astimezone(tzutc()) - - return value.strftime('%a, %d %b %Y %H:%M:%S GMT') - - -def _encode_base64(data): - if isinstance(data, _unicode_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def _decode_base64_to_bytes(data): - if isinstance(data, _unicode_type): - data = data.encode('utf-8') - return base64.b64decode(data) - - -def _decode_base64_to_text(data): - decoded_bytes = _decode_base64_to_bytes(data) - return decoded_bytes.decode('utf-8') - - -def _sign_string(key, string_to_sign, key_is_base64=True): - if key_is_base64: - key = _decode_base64_to_bytes(key) - else: - if isinstance(key, _unicode_type): - key = key.encode('utf-8') - if isinstance(string_to_sign, _unicode_type): - string_to_sign = string_to_sign.encode('utf-8') - signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) - digest = signed_hmac_sha256.digest() - encoded_digest = _encode_base64(digest) - return encoded_digest - - -def _get_content_md5(data): - md5 = hashlib.md5() - if isinstance(data, bytes): - md5.update(data) - elif hasattr(data, 'read'): - pos = 0 - try: - pos = data.tell() - except: - pass - for chunk in iter(lambda: data.read(4096), b""): - md5.update(chunk) - try: - data.seek(pos, SEEK_SET) - except (AttributeError, IOError): - raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM.format('data')) - else: - raise ValueError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format('data')) - - return base64.b64encode(md5.digest()).decode('utf-8') - - -def _lower(text): - return text.lower() diff --git a/azure/multiapi/storage/v2018_03_28/common/_connection.py b/azure/multiapi/storage/v2018_03_28/common/_connection.py deleted file mode 100644 index 6160148..0000000 --- a/azure/multiapi/storage/v2018_03_28/common/_connection.py +++ /dev/null @@ -1,160 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import sys - -if sys.version_info >= (3,): - from urllib.parse import urlparse -else: - from urlparse import urlparse - -from ._constants import ( - SERVICE_HOST_BASE, - DEFAULT_PROTOCOL, - DEV_ACCOUNT_NAME, - DEV_ACCOUNT_SECONDARY_NAME, - DEV_ACCOUNT_KEY, - DEV_BLOB_HOST, - DEV_QUEUE_HOST, -) -from ._error import ( - _ERROR_STORAGE_MISSING_INFO, -) - -_EMULATOR_ENDPOINTS = { - 'blob': DEV_BLOB_HOST, - 'queue': DEV_QUEUE_HOST, - 'file': '', -} - -_CONNECTION_ENDPOINTS = { - 'blob': 'BlobEndpoint', - 'queue': 'QueueEndpoint', - 'file': 'FileEndpoint', -} - -_CONNECTION_ENDPOINTS_SECONDARY = { - 'blob': 'BlobSecondaryEndpoint', - 'queue': 'QueueSecondaryEndpoint', - 'file': 'FileSecondaryEndpoint', -} - - -class _ServiceParameters(object): - def __init__(self, service, account_name=None, account_key=None, sas_token=None, token_credential=None, - is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, - custom_domain=None, custom_domain_secondary=None): - - self.account_name = account_name - self.account_key = account_key - self.sas_token = sas_token - self.token_credential = token_credential - self.protocol = protocol or DEFAULT_PROTOCOL - self.is_emulated = is_emulated - - if is_emulated: - self.account_name = DEV_ACCOUNT_NAME - self.protocol = 'http' - - # Only set the account key if a sas_token is not present to allow sas to be used with the emulator - self.account_key = DEV_ACCOUNT_KEY if not self.sas_token else None - - self.primary_endpoint = '{}/{}'.format(_EMULATOR_ENDPOINTS[service], DEV_ACCOUNT_NAME) - self.secondary_endpoint = '{}/{}'.format(_EMULATOR_ENDPOINTS[service], DEV_ACCOUNT_SECONDARY_NAME) - else: - # Strip whitespace from the key - if self.account_key: - self.account_key = self.account_key.strip() - - endpoint_suffix = endpoint_suffix or SERVICE_HOST_BASE - - # Setup the primary endpoint - if custom_domain: - parsed_url = urlparse(custom_domain) - - # Trim any trailing slashes from the path - path = parsed_url.path.rstrip('/') - - self.primary_endpoint = parsed_url.netloc + path - self.protocol = self.protocol if parsed_url.scheme == '' else parsed_url.scheme - else: - if not self.account_name: - raise ValueError(_ERROR_STORAGE_MISSING_INFO) - self.primary_endpoint = '{}.{}.{}'.format(self.account_name, service, endpoint_suffix) - - # Setup the secondary endpoint - if custom_domain_secondary: - if not custom_domain: - raise ValueError(_ERROR_STORAGE_MISSING_INFO) - - parsed_url = urlparse(custom_domain_secondary) - - # Trim any trailing slashes from the path - path = parsed_url.path.rstrip('/') - - self.secondary_endpoint = parsed_url.netloc + path - else: - if self.account_name: - self.secondary_endpoint = '{}-secondary.{}.{}'.format(self.account_name, service, endpoint_suffix) - else: - self.secondary_endpoint = None - - @staticmethod - def get_service_parameters(service, account_name=None, account_key=None, sas_token=None, token_credential= None, - is_emulated=None, protocol=None, endpoint_suffix=None, custom_domain=None, - request_session=None, connection_string=None, socket_timeout=None): - if connection_string: - params = _ServiceParameters._from_connection_string(connection_string, service) - elif is_emulated: - params = _ServiceParameters(service, is_emulated=True) - elif account_name: - if protocol.lower() != 'https' and token_credential is not None: - raise ValueError("Token credential is only supported with HTTPS.") - params = _ServiceParameters(service, - account_name=account_name, - account_key=account_key, - sas_token=sas_token, - token_credential=token_credential, - is_emulated=is_emulated, - protocol=protocol, - endpoint_suffix=endpoint_suffix, - custom_domain=custom_domain) - else: - raise ValueError(_ERROR_STORAGE_MISSING_INFO) - - params.request_session = request_session - params.socket_timeout = socket_timeout - return params - - @staticmethod - def _from_connection_string(connection_string, service): - # Split into key=value pairs removing empties, then split the pairs into a dict - config = dict(s.split('=', 1) for s in connection_string.split(';') if s) - - # Authentication - account_name = config.get('AccountName') - account_key = config.get('AccountKey') - sas_token = config.get('SharedAccessSignature') - - # Emulator - is_emulated = config.get('UseDevelopmentStorage') - - # Basic URL Configuration - protocol = config.get('DefaultEndpointsProtocol') - endpoint_suffix = config.get('EndpointSuffix') - - # Custom URLs - endpoint = config.get(_CONNECTION_ENDPOINTS[service]) - endpoint_secondary = config.get(_CONNECTION_ENDPOINTS_SECONDARY[service]) - - return _ServiceParameters(service, - account_name=account_name, - account_key=account_key, - sas_token=sas_token, - is_emulated=is_emulated, - protocol=protocol, - endpoint_suffix=endpoint_suffix, - custom_domain=endpoint, - custom_domain_secondary=endpoint_secondary) diff --git a/azure/multiapi/storage/v2018_03_28/common/_constants.py b/azure/multiapi/storage/v2018_03_28/common/_constants.py deleted file mode 100644 index 22516d6..0000000 --- a/azure/multiapi/storage/v2018_03_28/common/_constants.py +++ /dev/null @@ -1,47 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import platform -import sys - -__author__ = 'Microsoft Corp. ' -__version__ = '1.3.0' - -# UserAgent string sample: 'Azure-Storage/0.37.0-0.38.0 (Python CPython 3.4.2; Windows 8)' -# First version(0.37.0) is the common package, and the second version(0.38.0) is the service package -USER_AGENT_STRING_PREFIX = 'Azure-Storage/{}-'.format(__version__) -USER_AGENT_STRING_SUFFIX = '(Python {} {}; {} {})'.format(platform.python_implementation(), - platform.python_version(), platform.system(), - platform.release()) - -# default values for common package, in case it is used directly -DEFAULT_X_MS_VERSION = '2018-03-28' -DEFAULT_USER_AGENT_STRING = '{}None {}'.format(USER_AGENT_STRING_PREFIX, USER_AGENT_STRING_SUFFIX) - -# Live ServiceClient URLs -SERVICE_HOST_BASE = 'core.windows.net' -DEFAULT_PROTOCOL = 'https' - -# Development ServiceClient URLs -DEV_BLOB_HOST = '127.0.0.1:10000' -DEV_QUEUE_HOST = '127.0.0.1:10001' - -# Default credentials for Development Storage Service -DEV_ACCOUNT_NAME = 'devstoreaccount1' -DEV_ACCOUNT_SECONDARY_NAME = 'devstoreaccount1-secondary' -DEV_ACCOUNT_KEY = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==' - -# Socket timeout in seconds -DEFAULT_SOCKET_TIMEOUT = 20 - -# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned) -# The socket timeout is now the maximum total duration to send all data. -if sys.version_info >= (3, 5): - # the timeout to connect is 20 seconds, and the read timeout is 2000 seconds - # the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) - DEFAULT_SOCKET_TIMEOUT = (20, 2000) - -# Encryption constants -_ENCRYPTION_PROTOCOL_V1 = '1.0' diff --git a/azure/multiapi/storage/v2018_03_28/common/_deserialization.py b/azure/multiapi/storage/v2018_03_28/common/_deserialization.py deleted file mode 100644 index 80803da..0000000 --- a/azure/multiapi/storage/v2018_03_28/common/_deserialization.py +++ /dev/null @@ -1,384 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from dateutil import parser - -from ._common_conversion import _to_str - -try: - from xml.etree import cElementTree as ETree -except ImportError: - from xml.etree import ElementTree as ETree - -from .models import ( - ServiceProperties, - Logging, - Metrics, - CorsRule, - AccessPolicy, - _dict, - GeoReplication, - ServiceStats, - DeleteRetentionPolicy, - StaticWebsite, -) - - -def _to_int(value): - return value if value is None else int(value) - - -def _bool(value): - return value.lower() == 'true' - - -def _to_upper_str(value): - return _to_str(value).upper() if value is not None else None - - -def _get_download_size(start_range, end_range, resource_size): - if start_range is not None: - end_range = end_range if end_range else (resource_size if resource_size else None) - if end_range is not None: - return end_range - start_range - else: - return None - else: - return resource_size - - -GET_PROPERTIES_ATTRIBUTE_MAP = { - 'last-modified': (None, 'last_modified', parser.parse), - 'etag': (None, 'etag', _to_str), - 'x-ms-blob-type': (None, 'blob_type', _to_str), - 'content-length': (None, 'content_length', _to_int), - 'content-range': (None, 'content_range', _to_str), - 'x-ms-blob-sequence-number': (None, 'page_blob_sequence_number', _to_int), - 'x-ms-blob-committed-block-count': (None, 'append_blob_committed_block_count', _to_int), - 'x-ms-blob-public-access': (None, 'public_access', _to_str), - 'x-ms-access-tier': (None, 'blob_tier', _to_str), - 'x-ms-access-tier-change-time': (None, 'blob_tier_change_time', parser.parse), - 'x-ms-access-tier-inferred': (None, 'blob_tier_inferred', _bool), - 'x-ms-archive-status': (None, 'rehydration_status', _to_str), - 'x-ms-share-quota': (None, 'quota', _to_int), - 'x-ms-server-encrypted': (None, 'server_encrypted', _bool), - 'x-ms-creation-time': (None, 'creation_time', parser.parse), - 'content-type': ('content_settings', 'content_type', _to_str), - 'cache-control': ('content_settings', 'cache_control', _to_str), - 'content-encoding': ('content_settings', 'content_encoding', _to_str), - 'content-disposition': ('content_settings', 'content_disposition', _to_str), - 'content-language': ('content_settings', 'content_language', _to_str), - 'content-md5': ('content_settings', 'content_md5', _to_str), - 'x-ms-lease-status': ('lease', 'status', _to_str), - 'x-ms-lease-state': ('lease', 'state', _to_str), - 'x-ms-lease-duration': ('lease', 'duration', _to_str), - 'x-ms-copy-id': ('copy', 'id', _to_str), - 'x-ms-copy-source': ('copy', 'source', _to_str), - 'x-ms-copy-status': ('copy', 'status', _to_str), - 'x-ms-copy-progress': ('copy', 'progress', _to_str), - 'x-ms-copy-completion-time': ('copy', 'completion_time', parser.parse), - 'x-ms-copy-destination-snapshot': ('copy', 'destination_snapshot_time', _to_str), - 'x-ms-copy-status-description': ('copy', 'status_description', _to_str), - 'x-ms-has-immutability-policy': (None, 'has_immutability_policy', _bool), - 'x-ms-has-legal-hold': (None, 'has_legal_hold', _bool), -} - - -def _parse_metadata(response): - ''' - Extracts out resource metadata information. - ''' - - if response is None or response.headers is None: - return None - - metadata = _dict() - for key, value in response.headers.items(): - if key.lower().startswith('x-ms-meta-'): - metadata[key[10:]] = _to_str(value) - - return metadata - - -def _parse_properties(response, result_class): - ''' - Extracts out resource properties and metadata information. - Ignores the standard http headers. - ''' - - if response is None or response.headers is None: - return None - - props = result_class() - for key, value in response.headers.items(): - info = GET_PROPERTIES_ATTRIBUTE_MAP.get(key) - if info: - if info[0] is None: - setattr(props, info[1], info[2](value)) - else: - attr = getattr(props, info[0]) - setattr(attr, info[1], info[2](value)) - - if hasattr(props, 'blob_type') and props.blob_type == 'PageBlob' and hasattr(props, 'blob_tier') and props.blob_tier is not None: - props.blob_tier = _to_upper_str(props.blob_tier) - return props - - -def _parse_length_from_content_range(content_range): - ''' - Parses the blob length from the content range header: bytes 1-3/65537 - ''' - if content_range is None: - return None - - # First, split in space and take the second half: '1-3/65537' - # Next, split on slash and take the second half: '65537' - # Finally, convert to an int: 65537 - return int(content_range.split(' ', 1)[1].split('/', 1)[1]) - - -def _convert_xml_to_signed_identifiers(response): - ''' - - - - unique-value - - start-time - expiry-time - abbreviated-permission-list - - - - ''' - if response is None or response.body is None: - return None - - list_element = ETree.fromstring(response.body) - signed_identifiers = _dict() - - for signed_identifier_element in list_element.findall('SignedIdentifier'): - # Id element - id = signed_identifier_element.find('Id').text - - # Access policy element - access_policy = AccessPolicy() - access_policy_element = signed_identifier_element.find('AccessPolicy') - if access_policy_element is not None: - start_element = access_policy_element.find('Start') - if start_element is not None: - access_policy.start = parser.parse(start_element.text) - - expiry_element = access_policy_element.find('Expiry') - if expiry_element is not None: - access_policy.expiry = parser.parse(expiry_element.text) - - access_policy.permission = access_policy_element.findtext('Permission') - - signed_identifiers[id] = access_policy - - return signed_identifiers - - -def _convert_xml_to_service_stats(response): - ''' - - - - live|bootstrap|unavailable - sync-time| - - - ''' - if response is None or response.body is None: - return None - - service_stats_element = ETree.fromstring(response.body) - - geo_replication_element = service_stats_element.find('GeoReplication') - - geo_replication = GeoReplication() - geo_replication.status = geo_replication_element.find('Status').text - last_sync_time = geo_replication_element.find('LastSyncTime').text - geo_replication.last_sync_time = parser.parse(last_sync_time) if last_sync_time else None - - service_stats = ServiceStats() - service_stats.geo_replication = geo_replication - return service_stats - - -def _convert_xml_to_service_properties(response): - ''' - - - - version-number - true|false - true|false - true|false - - true|false - number-of-days - - - - version-number - true|false - true|false - - true|false - number-of-days - - - - version-number - true|false - true|false - - true|false - number-of-days - - - - - comma-separated-list-of-allowed-origins - comma-separated-list-of-HTTP-verb - max-caching-age-in-seconds - comma-seperated-list-of-response-headers - comma-seperated-list-of-request-headers - - - - true|false - number-of-days - - - true|false - - - - - ''' - if response is None or response.body is None: - return None - - service_properties_element = ETree.fromstring(response.body) - service_properties = ServiceProperties() - - # Logging - logging = service_properties_element.find('Logging') - if logging is not None: - service_properties.logging = Logging() - service_properties.logging.version = logging.find('Version').text - service_properties.logging.delete = _bool(logging.find('Delete').text) - service_properties.logging.read = _bool(logging.find('Read').text) - service_properties.logging.write = _bool(logging.find('Write').text) - - _convert_xml_to_retention_policy(logging.find('RetentionPolicy'), - service_properties.logging.retention_policy) - # HourMetrics - hour_metrics_element = service_properties_element.find('HourMetrics') - if hour_metrics_element is not None: - service_properties.hour_metrics = Metrics() - _convert_xml_to_metrics(hour_metrics_element, service_properties.hour_metrics) - - # MinuteMetrics - minute_metrics_element = service_properties_element.find('MinuteMetrics') - if minute_metrics_element is not None: - service_properties.minute_metrics = Metrics() - _convert_xml_to_metrics(minute_metrics_element, service_properties.minute_metrics) - - # CORS - cors = service_properties_element.find('Cors') - if cors is not None: - service_properties.cors = list() - for rule in cors.findall('CorsRule'): - allowed_origins = rule.find('AllowedOrigins').text.split(',') - - allowed_methods = rule.find('AllowedMethods').text.split(',') - - max_age_in_seconds = int(rule.find('MaxAgeInSeconds').text) - - cors_rule = CorsRule(allowed_origins, allowed_methods, max_age_in_seconds) - - exposed_headers = rule.find('ExposedHeaders').text - if exposed_headers is not None: - cors_rule.exposed_headers = exposed_headers.split(',') - - allowed_headers = rule.find('AllowedHeaders').text - if allowed_headers is not None: - cors_rule.allowed_headers = allowed_headers.split(',') - - service_properties.cors.append(cors_rule) - - # Target version - target_version = service_properties_element.find('DefaultServiceVersion') - if target_version is not None: - service_properties.target_version = target_version.text - - # DeleteRetentionPolicy - delete_retention_policy_element = service_properties_element.find('DeleteRetentionPolicy') - if delete_retention_policy_element is not None: - service_properties.delete_retention_policy = DeleteRetentionPolicy() - policy_enabled = _bool(delete_retention_policy_element.find('Enabled').text) - service_properties.delete_retention_policy.enabled = policy_enabled - - if policy_enabled: - service_properties.delete_retention_policy.days = int(delete_retention_policy_element.find('Days').text) - - # StaticWebsite - static_website_element = service_properties_element.find('StaticWebsite') - if static_website_element is not None: - service_properties.static_website = StaticWebsite() - service_properties.static_website.enabled = _bool(static_website_element.find('Enabled').text) - - index_document_element = static_website_element.find('IndexDocument') - if index_document_element is not None: - service_properties.static_website.index_document = index_document_element.text - - error_document_element = static_website_element.find('ErrorDocument404Path') - if error_document_element is not None: - service_properties.static_website.error_document_404_path = error_document_element.text - - return service_properties - - -def _convert_xml_to_metrics(xml, metrics): - ''' - version-number - true|false - true|false - - true|false - number-of-days - - ''' - # Version - metrics.version = xml.find('Version').text - - # Enabled - metrics.enabled = _bool(xml.find('Enabled').text) - - # IncludeAPIs - include_apis_element = xml.find('IncludeAPIs') - if include_apis_element is not None: - metrics.include_apis = _bool(include_apis_element.text) - - # RetentionPolicy - _convert_xml_to_retention_policy(xml.find('RetentionPolicy'), metrics.retention_policy) - - -def _convert_xml_to_retention_policy(xml, retention_policy): - ''' - true|false - number-of-days - ''' - # Enabled - retention_policy.enabled = _bool(xml.find('Enabled').text) - - # Days - days_element = xml.find('Days') - if days_element is not None: - retention_policy.days = int(days_element.text) diff --git a/azure/multiapi/storage/v2018_03_28/common/_encryption.py b/azure/multiapi/storage/v2018_03_28/common/_encryption.py deleted file mode 100644 index cd7d92e..0000000 --- a/azure/multiapi/storage/v2018_03_28/common/_encryption.py +++ /dev/null @@ -1,233 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from collections import OrderedDict - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.ciphers import Cipher -from cryptography.hazmat.primitives.ciphers.algorithms import AES -from cryptography.hazmat.primitives.ciphers.modes import CBC - -from ._common_conversion import ( - _encode_base64, - _decode_base64_to_bytes, -) -from ._constants import ( - _ENCRYPTION_PROTOCOL_V1, - __version__, -) -from ._error import ( - _ERROR_UNSUPPORTED_ENCRYPTION_VERSION, - _validate_not_none, - _validate_encryption_protocol_version, - _validate_key_encryption_key_unwrap, - _validate_kek_id, -) - - -class _EncryptionAlgorithm(object): - ''' - Specifies which client encryption algorithm is used. - ''' - AES_CBC_256 = 'AES_CBC_256' - - -class _WrappedContentKey: - ''' - Represents the envelope key details stored on the service. - ''' - - def __init__(self, algorithm, encrypted_key, key_id): - ''' - :param str algorithm: - The algorithm used for wrapping. - :param bytes encrypted_key: - The encrypted content-encryption-key. - :param str key_id: - The key-encryption-key identifier string. - ''' - - _validate_not_none('algorithm', algorithm) - _validate_not_none('encrypted_key', encrypted_key) - _validate_not_none('key_id', key_id) - - self.algorithm = algorithm - self.encrypted_key = encrypted_key - self.key_id = key_id - - -class _EncryptionAgent: - ''' - Represents the encryption agent stored on the service. - It consists of the encryption protocol version and encryption algorithm used. - ''' - - def __init__(self, encryption_algorithm, protocol): - ''' - :param _EncryptionAlgorithm encryption_algorithm: - The algorithm used for encrypting the message contents. - :param str protocol: - The protocol version used for encryption. - ''' - - _validate_not_none('encryption_algorithm', encryption_algorithm) - _validate_not_none('protocol', protocol) - - self.encryption_algorithm = str(encryption_algorithm) - self.protocol = protocol - - -class _EncryptionData: - ''' - Represents the encryption data that is stored on the service. - ''' - - def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, - key_wrapping_metadata): - ''' - :param bytes content_encryption_IV: - The content encryption initialization vector. - :param _EncryptionAgent encryption_agent: - The encryption agent. - :param _WrappedContentKey wrapped_content_key: - An object that stores the wrapping algorithm, the key identifier, - and the encrypted key bytes. - :param dict key_wrapping_metadata: - A dict containing metadata related to the key wrapping. - ''' - - _validate_not_none('content_encryption_IV', content_encryption_IV) - _validate_not_none('encryption_agent', encryption_agent) - _validate_not_none('wrapped_content_key', wrapped_content_key) - - self.content_encryption_IV = content_encryption_IV - self.encryption_agent = encryption_agent - self.wrapped_content_key = wrapped_content_key - self.key_wrapping_metadata = key_wrapping_metadata - - -def _generate_encryption_data_dict(kek, cek, iv): - ''' - Generates and returns the encryption metadata as a dict. - - :param object kek: The key encryption key. See calling functions for more information. - :param bytes cek: The content encryption key. - :param bytes iv: The initialization vector. - :return: A dict containing all the encryption metadata. - :rtype: dict - ''' - # Encrypt the cek. - wrapped_cek = kek.wrap_key(cek) - - # Build the encryption_data dict. - # Use OrderedDict to comply with Java's ordering requirement. - wrapped_content_key = OrderedDict() - wrapped_content_key['KeyId'] = kek.get_kid() - wrapped_content_key['EncryptedKey'] = _encode_base64(wrapped_cek) - wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() - - encryption_agent = OrderedDict() - encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 - encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 - - encryption_data_dict = OrderedDict() - encryption_data_dict['WrappedContentKey'] = wrapped_content_key - encryption_data_dict['EncryptionAgent'] = encryption_agent - encryption_data_dict['ContentEncryptionIV'] = _encode_base64(iv) - encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + __version__} - - return encryption_data_dict - - -def _dict_to_encryption_data(encryption_data_dict): - ''' - Converts the specified dictionary to an EncryptionData object for - eventual use in decryption. - - :param dict encryption_data_dict: - The dictionary containing the encryption data. - :return: an _EncryptionData object built from the dictionary. - :rtype: _EncryptionData - ''' - try: - if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: - raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION) - except KeyError: - raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION) - wrapped_content_key = encryption_data_dict['WrappedContentKey'] - wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], - _decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), - wrapped_content_key['KeyId']) - - encryption_agent = encryption_data_dict['EncryptionAgent'] - encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], - encryption_agent['Protocol']) - - if 'KeyWrappingMetadata' in encryption_data_dict: - key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] - else: - key_wrapping_metadata = None - - encryption_data = _EncryptionData(_decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), - encryption_agent, - wrapped_content_key, - key_wrapping_metadata) - - return encryption_data - - -def _generate_AES_CBC_cipher(cek, iv): - ''' - Generates and returns an encryption cipher for AES CBC using the given cek and iv. - - :param bytes[] cek: The content encryption key for the cipher. - :param bytes[] iv: The initialization vector for the cipher. - :return: A cipher for encrypting in AES256 CBC. - :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher - ''' - - backend = default_backend() - algorithm = AES(cek) - mode = CBC(iv) - return Cipher(algorithm, mode, backend) - - -def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): - ''' - Extracts and returns the content_encryption_key stored in the encryption_data object - and performs necessary validation on all parameters. - :param _EncryptionData encryption_data: - The encryption metadata of the retrieved value. - :param obj key_encryption_key: - The key_encryption_key used to unwrap the cek. Please refer to high-level service object - instance variables for more details. - :param func key_resolver: - A function used that, given a key_id, will return a key_encryption_key. Please refer - to high-level service object instance variables for more details. - :return: the content_encryption_key stored in the encryption_data object. - :rtype: bytes[] - ''' - - _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) - _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) - - _validate_encryption_protocol_version(encryption_data.encryption_agent.protocol) - - content_encryption_key = None - - # If the resolver exists, give priority to the key it finds. - if key_resolver is not None: - key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) - - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_unwrap(key_encryption_key) - _validate_kek_id(encryption_data.wrapped_content_key.key_id, key_encryption_key.get_kid()) - - # Will throw an exception if the specified algorithm is not supported. - content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, - encryption_data.wrapped_content_key.algorithm) - _validate_not_none('content_encryption_key', content_encryption_key) - - return content_encryption_key diff --git a/azure/multiapi/storage/v2018_03_28/common/_error.py b/azure/multiapi/storage/v2018_03_28/common/_error.py deleted file mode 100644 index 90faa01..0000000 --- a/azure/multiapi/storage/v2018_03_28/common/_error.py +++ /dev/null @@ -1,183 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from sys import version_info - -if version_info < (3,): - def _str(value): - if isinstance(value, unicode): - return value.encode('utf-8') - - return str(value) -else: - _str = str - - -def _to_str(value): - return _str(value) if value is not None else None - - -from azure.common import ( - AzureHttpError, - AzureConflictHttpError, - AzureMissingResourceHttpError, - AzureException, -) -from ._constants import ( - _ENCRYPTION_PROTOCOL_V1, -) - -_ERROR_CONFLICT = 'Conflict ({0})' -_ERROR_NOT_FOUND = 'Not found ({0})' -_ERROR_UNKNOWN = 'Unknown error ({0})' -_ERROR_STORAGE_MISSING_INFO = \ - 'You need to provide an account name and either an account_key or sas_token when creating a storage service.' -_ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES = \ - 'The emulator does not support the file service.' -_ERROR_ACCESS_POLICY = \ - 'share_access_policy must be either SignedIdentifier or AccessPolicy ' + \ - 'instance' -_ERROR_PARALLEL_NOT_SEEKABLE = 'Parallel operations require a seekable stream.' -_ERROR_VALUE_SHOULD_BE_BYTES = '{0} should be of type bytes.' -_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM = '{0} should be of type bytes or a readable file-like/io.IOBase stream object.' -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' -_ERROR_VALUE_SHOULD_BE_STREAM = '{0} should be a file-like/io.IOBase type stream object with a read method.' -_ERROR_VALUE_NONE = '{0} should not be None.' -_ERROR_VALUE_NONE_OR_EMPTY = '{0} should not be None or empty.' -_ERROR_VALUE_NEGATIVE = '{0} should not be negative.' -_ERROR_START_END_NEEDED_FOR_MD5 = \ - 'Both end_range and start_range need to be specified ' + \ - 'for getting content MD5.' -_ERROR_RANGE_TOO_LARGE_FOR_MD5 = \ - 'Getting content MD5 for a range greater than 4MB ' + \ - 'is not supported.' -_ERROR_MD5_MISMATCH = \ - 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.' -_ERROR_TOO_MANY_ACCESS_POLICIES = \ - 'Too many access policies provided. The server does not support setting more than 5 access policies on a single resource.' -_ERROR_OBJECT_INVALID = \ - '{0} does not define a complete interface. Value of {1} is either missing or invalid.' -_ERROR_UNSUPPORTED_ENCRYPTION_VERSION = \ - 'Encryption version is not supported.' -_ERROR_DECRYPTION_FAILURE = \ - 'Decryption failed' -_ERROR_ENCRYPTION_REQUIRED = \ - 'Encryption required but no key was provided.' -_ERROR_DECRYPTION_REQUIRED = \ - 'Decryption required but neither key nor resolver was provided.' + \ - ' If you do not want to decypt, please do not set the require encryption flag.' -_ERROR_INVALID_KID = \ - 'Provided or resolved key-encryption-key does not match the id of key used to encrypt.' -_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM = \ - 'Specified encryption algorithm is not supported.' -_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = 'The require_encryption flag is set, but encryption is not supported' + \ - ' for this method.' -_ERROR_UNKNOWN_KEY_WRAP_ALGORITHM = 'Unknown key wrap algorithm.' -_ERROR_DATA_NOT_ENCRYPTED = 'Encryption required, but received data does not contain appropriate metatadata.' + \ - 'Data was either not encrypted or metadata has been lost.' - - -def _dont_fail_on_exist(error): - ''' don't throw exception if the resource exists. - This is called by create_* APIs with fail_on_exist=False''' - if isinstance(error, AzureConflictHttpError): - return False - else: - raise error - - -def _dont_fail_not_exist(error): - ''' don't throw exception if the resource doesn't exist. - This is called by create_* APIs with fail_on_exist=False''' - if isinstance(error, AzureMissingResourceHttpError): - return False - else: - raise error - - -def _http_error_handler(http_error): - ''' Simple error handler for azure.''' - message = str(http_error) - error_code = None - - if 'x-ms-error-code' in http_error.respheader: - error_code = http_error.respheader['x-ms-error-code'] - message += ' ErrorCode: ' + error_code - - if http_error.respbody is not None: - message += '\n' + http_error.respbody.decode('utf-8-sig') - - ex = AzureHttpError(message, http_error.status) - ex.error_code = error_code - - raise ex - - -def _validate_type_bytes(param_name, param): - if not isinstance(param, bytes): - raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name)) - - -def _validate_type_bytes_or_stream(param_name, param): - if not (isinstance(param, bytes) or hasattr(param, 'read')): - raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format(param_name)) - - -def _validate_not_none(param_name, param): - if param is None: - raise ValueError(_ERROR_VALUE_NONE.format(param_name)) - - -def _validate_content_match(server_md5, computed_md5): - if server_md5 != computed_md5: - raise AzureException(_ERROR_MD5_MISMATCH.format(server_md5, computed_md5)) - - -def _validate_access_policies(identifiers): - if identifiers and len(identifiers) > 5: - raise AzureException(_ERROR_TOO_MANY_ACCESS_POLICIES) - - -def _validate_key_encryption_key_wrap(kek): - # Note that None is not callable and so will fail the second clause of each check. - if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) - if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) - - -def _validate_key_encryption_key_unwrap(kek): - if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(kek, 'unwrap_key') or not callable(kek.unwrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) - - -def _validate_encryption_required(require_encryption, kek): - if require_encryption and (kek is None): - raise ValueError(_ERROR_ENCRYPTION_REQUIRED) - - -def _validate_decryption_required(require_encryption, kek, resolver): - if (require_encryption and (kek is None) and - (resolver is None)): - raise ValueError(_ERROR_DECRYPTION_REQUIRED) - - -def _validate_encryption_protocol_version(encryption_protocol): - if not (_ENCRYPTION_PROTOCOL_V1 == encryption_protocol): - raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION) - - -def _validate_kek_id(kid, resolved_id): - if not (kid == resolved_id): - raise ValueError(_ERROR_INVALID_KID) - - -def _validate_encryption_unsupported(require_encryption, key_encryption_key): - if require_encryption or (key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) diff --git a/azure/multiapi/storage/v2018_03_28/common/_http/__init__.py b/azure/multiapi/storage/v2018_03_28/common/_http/__init__.py deleted file mode 100644 index 2990ec8..0000000 --- a/azure/multiapi/storage/v2018_03_28/common/_http/__init__.py +++ /dev/null @@ -1,74 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - - -class HTTPError(Exception): - ''' - Represents an HTTP Exception when response status code >= 300. - - :ivar int status: - the status code of the response - :ivar str message: - the message - :ivar list headers: - the returned headers, as a list of (name, value) pairs - :ivar bytes body: - the body of the response - ''' - - def __init__(self, status, message, respheader, respbody): - self.status = status - self.respheader = respheader - self.respbody = respbody - Exception.__init__(self, message) - - -class HTTPResponse(object): - ''' - Represents a response from an HTTP request. - - :ivar int status: - the status code of the response - :ivar str message: - the message - :ivar dict headers: - the returned headers - :ivar bytes body: - the body of the response - ''' - - def __init__(self, status, message, headers, body): - self.status = status - self.message = message - self.headers = headers - self.body = body - - -class HTTPRequest(object): - ''' - Represents an HTTP Request. - - :ivar str host: - the host name to connect to - :ivar str method: - the method to use to connect (string such as GET, POST, PUT, etc.) - :ivar str path: - the uri fragment - :ivar dict query: - query parameters - :ivar dict headers: - header values - :ivar bytes body: - the body of the request. - ''' - - def __init__(self): - self.host = '' - self.method = '' - self.path = '' - self.query = {} # list of (name, value) - self.headers = {} # list of (header name, header value) - self.body = '' diff --git a/azure/multiapi/storage/v2018_03_28/common/_http/httpclient.py b/azure/multiapi/storage/v2018_03_28/common/_http/httpclient.py deleted file mode 100644 index 6b104a1..0000000 --- a/azure/multiapi/storage/v2018_03_28/common/_http/httpclient.py +++ /dev/null @@ -1,107 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import logging -from . import HTTPResponse -from .._serialization import _get_data_bytes_or_stream_only -logger = logging.getLogger(__name__) - - -class _HTTPClient(object): - ''' - Takes the request and sends it to cloud service and returns the response. - ''' - - def __init__(self, protocol=None, session=None, timeout=None): - ''' - :param str protocol: - http or https. - :param requests.Session session: - session object created with requests library (or compatible). - :param int timeout: - timeout for the http request, in seconds. - ''' - self.protocol = protocol - self.session = session - self.timeout = timeout - - # By default, requests adds an Accept:*/* and Accept-Encoding to the session, - # which causes issues with some Azure REST APIs. Removing these here gives us - # the flexibility to add it back on a case by case basis. - if 'Accept' in self.session.headers: - del self.session.headers['Accept'] - - if 'Accept-Encoding' in self.session.headers: - del self.session.headers['Accept-Encoding'] - - self.proxies = None - - def set_proxy(self, host, port, user, password): - ''' - Sets the proxy server host and port for the HTTP CONNECT Tunnelling. - - Note that we set the proxies directly on the request later on rather than - using the session object as requests has a bug where session proxy is ignored - in favor of environment proxy. So, auth will not work unless it is passed - directly when making the request as this overrides both. - - :param str host: - Address of the proxy. Ex: '192.168.0.100' - :param int port: - Port of the proxy. Ex: 6000 - :param str user: - User for proxy authorization. - :param str password: - Password for proxy authorization. - ''' - if user and password: - proxy_string = '{}:{}@{}:{}'.format(user, password, host, port) - else: - proxy_string = '{}:{}'.format(host, port) - - self.proxies = {'http': 'http://{}'.format(proxy_string), - 'https': 'https://{}'.format(proxy_string)} - - def perform_request(self, request): - ''' - Sends an HTTPRequest to Azure Storage and returns an HTTPResponse. If - the response code indicates an error, raise an HTTPError. - - :param HTTPRequest request: - The request to serialize and send. - :return: An HTTPResponse containing the parsed HTTP response. - :rtype: :class:`~._http.HTTPResponse` - ''' - # Verify the body is in bytes or either a file-like/stream object - if request.body: - request.body = _get_data_bytes_or_stream_only('request.body', request.body) - - # Construct the URI - uri = self.protocol.lower() + '://' + request.host + request.path - - # Send the request - response = self.session.request(request.method, - uri, - params=request.query, - headers=request.headers, - data=request.body or None, - timeout=self.timeout, - proxies=self.proxies) - - # Parse the response - status = int(response.status_code) - response_headers = {} - for key, name in response.headers.items(): - # Preserve the case of metadata - if key.lower().startswith('x-ms-meta-'): - response_headers[key] = name - else: - response_headers[key.lower()] = name - - wrap = HTTPResponse(status, response.reason, response_headers, response.content) - response.close() - - return wrap diff --git a/azure/multiapi/storage/v2018_03_28/common/_serialization.py b/azure/multiapi/storage/v2018_03_28/common/_serialization.py deleted file mode 100644 index af27ce5..0000000 --- a/azure/multiapi/storage/v2018_03_28/common/_serialization.py +++ /dev/null @@ -1,371 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import sys -import uuid -from datetime import date -from io import (BytesIO, IOBase, SEEK_SET, SEEK_END, UnsupportedOperation) -from os import fstat -from time import time -from wsgiref.handlers import format_date_time - -from dateutil.tz import tzutc - -if sys.version_info >= (3,): - from urllib.parse import quote as url_quote -else: - from urllib2 import quote as url_quote - -try: - from xml.etree import cElementTree as ETree -except ImportError: - from xml.etree import ElementTree as ETree - -from ._error import ( - _ERROR_VALUE_SHOULD_BE_BYTES, - _ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM, - _ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM -) -from .models import ( - _unicode_type, -) -from ._common_conversion import ( - _str, -) - - -def _to_utc_datetime(value): - # Azure expects the date value passed in to be UTC. - # Azure will always return values as UTC. - # If a date is passed in without timezone info, it is assumed to be UTC. - if value.tzinfo: - value = value.astimezone(tzutc()) - return value.strftime('%Y-%m-%dT%H:%M:%SZ') - - -def _update_request(request, x_ms_version, user_agent_string): - # Verify body - if request.body: - request.body = _get_data_bytes_or_stream_only('request.body', request.body) - length = _len_plus(request.body) - - # only scenario where this case is plausible is if the stream object is not seekable. - if length is None: - raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM) - - # if it is PUT, POST, MERGE, DELETE, need to add content-length to header. - if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']: - request.headers['Content-Length'] = str(length) - - # append addtional headers based on the service - request.headers['x-ms-version'] = x_ms_version - request.headers['User-Agent'] = user_agent_string - request.headers['x-ms-client-request-id'] = str(uuid.uuid1()) - - # If the host has a path component (ex local storage), move it - path = request.host.split('/', 1) - if len(path) == 2: - request.host = path[0] - request.path = '/{}{}'.format(path[1], request.path) - - # Encode and optionally add local storage prefix to path - request.path = url_quote(request.path, '/()$=\',~') - - -def _add_metadata_headers(metadata, request): - if metadata: - if not request.headers: - request.headers = {} - for name, value in metadata.items(): - request.headers['x-ms-meta-' + name] = value - - -def _add_date_header(request): - current_time = format_date_time(time()) - request.headers['x-ms-date'] = current_time - - -def _get_data_bytes_only(param_name, param_value): - '''Validates the request body passed in and converts it to bytes - if our policy allows it.''' - if param_value is None: - return b'' - - if isinstance(param_value, bytes): - return param_value - - raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name)) - - -def _get_data_bytes_or_stream_only(param_name, param_value): - '''Validates the request body passed in is a stream/file-like or bytes - object.''' - if param_value is None: - return b'' - - if isinstance(param_value, bytes) or hasattr(param_value, 'read'): - return param_value - - raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format(param_name)) - - -def _get_request_body(request_body): - '''Converts an object into a request body. If it's None - we'll return an empty string, if it's one of our objects it'll - convert it to XML and return it. Otherwise we just use the object - directly''' - if request_body is None: - return b'' - - if isinstance(request_body, bytes) or isinstance(request_body, IOBase): - return request_body - - if isinstance(request_body, _unicode_type): - return request_body.encode('utf-8') - - request_body = str(request_body) - if isinstance(request_body, _unicode_type): - return request_body.encode('utf-8') - - return request_body - - -def _convert_signed_identifiers_to_xml(signed_identifiers): - if signed_identifiers is None: - return '' - - sis = ETree.Element('SignedIdentifiers') - for id, access_policy in signed_identifiers.items(): - # Root signed identifers element - si = ETree.SubElement(sis, 'SignedIdentifier') - - # Id element - ETree.SubElement(si, 'Id').text = id - - # Access policy element - policy = ETree.SubElement(si, 'AccessPolicy') - - if access_policy.start: - start = access_policy.start - if isinstance(access_policy.start, date): - start = _to_utc_datetime(start) - ETree.SubElement(policy, 'Start').text = start - - if access_policy.expiry: - expiry = access_policy.expiry - if isinstance(access_policy.expiry, date): - expiry = _to_utc_datetime(expiry) - ETree.SubElement(policy, 'Expiry').text = expiry - - if access_policy.permission: - ETree.SubElement(policy, 'Permission').text = _str(access_policy.permission) - - # Add xml declaration and serialize - try: - stream = BytesIO() - ETree.ElementTree(sis).write(stream, xml_declaration=True, encoding='utf-8', method='xml') - except: - raise - finally: - output = stream.getvalue() - stream.close() - - return output - - -def _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics, - cors, target_version=None, delete_retention_policy=None, static_website=None): - ''' - - - - version-number - true|false - true|false - true|false - - true|false - number-of-days - - - - version-number - true|false - true|false - - true|false - number-of-days - - - - version-number - true|false - true|false - - true|false - number-of-days - - - - - comma-separated-list-of-allowed-origins - comma-separated-list-of-HTTP-verb - max-caching-age-in-seconds - comma-seperated-list-of-response-headers - comma-seperated-list-of-request-headers - - - - true|false - number-of-days - - - true|false - - - - - ''' - service_properties_element = ETree.Element('StorageServiceProperties') - - # Logging - if logging: - logging_element = ETree.SubElement(service_properties_element, 'Logging') - ETree.SubElement(logging_element, 'Version').text = logging.version - ETree.SubElement(logging_element, 'Delete').text = str(logging.delete) - ETree.SubElement(logging_element, 'Read').text = str(logging.read) - ETree.SubElement(logging_element, 'Write').text = str(logging.write) - - retention_element = ETree.SubElement(logging_element, 'RetentionPolicy') - _convert_retention_policy_to_xml(logging.retention_policy, retention_element) - - # HourMetrics - if hour_metrics: - hour_metrics_element = ETree.SubElement(service_properties_element, 'HourMetrics') - _convert_metrics_to_xml(hour_metrics, hour_metrics_element) - - # MinuteMetrics - if minute_metrics: - minute_metrics_element = ETree.SubElement(service_properties_element, 'MinuteMetrics') - _convert_metrics_to_xml(minute_metrics, minute_metrics_element) - - # CORS - # Make sure to still serialize empty list - if cors is not None: - cors_element = ETree.SubElement(service_properties_element, 'Cors') - for rule in cors: - cors_rule = ETree.SubElement(cors_element, 'CorsRule') - ETree.SubElement(cors_rule, 'AllowedOrigins').text = ",".join(rule.allowed_origins) - ETree.SubElement(cors_rule, 'AllowedMethods').text = ",".join(rule.allowed_methods) - ETree.SubElement(cors_rule, 'MaxAgeInSeconds').text = str(rule.max_age_in_seconds) - ETree.SubElement(cors_rule, 'ExposedHeaders').text = ",".join(rule.exposed_headers) - ETree.SubElement(cors_rule, 'AllowedHeaders').text = ",".join(rule.allowed_headers) - - # Target version - if target_version: - ETree.SubElement(service_properties_element, 'DefaultServiceVersion').text = target_version - - # DeleteRetentionPolicy - if delete_retention_policy: - policy_element = ETree.SubElement(service_properties_element, 'DeleteRetentionPolicy') - ETree.SubElement(policy_element, 'Enabled').text = str(delete_retention_policy.enabled) - - if delete_retention_policy.enabled: - ETree.SubElement(policy_element, 'Days').text = str(delete_retention_policy.days) - - # StaticWebsite - if static_website: - static_website_element = ETree.SubElement(service_properties_element, 'StaticWebsite') - ETree.SubElement(static_website_element, 'Enabled').text = str(static_website.enabled) - - if static_website.enabled: - - if static_website.index_document is not None: - ETree.SubElement(static_website_element, 'IndexDocument').text = str(static_website.index_document) - - if static_website.error_document_404_path is not None: - ETree.SubElement(static_website_element, 'ErrorDocument404Path').text = \ - str(static_website.error_document_404_path) - - # Add xml declaration and serialize - try: - stream = BytesIO() - ETree.ElementTree(service_properties_element).write(stream, xml_declaration=True, encoding='utf-8', - method='xml') - except: - raise - finally: - output = stream.getvalue() - stream.close() - - return output - - -def _convert_metrics_to_xml(metrics, root): - ''' - version-number - true|false - true|false - - true|false - number-of-days - - ''' - # Version - ETree.SubElement(root, 'Version').text = metrics.version - - # Enabled - ETree.SubElement(root, 'Enabled').text = str(metrics.enabled) - - # IncludeAPIs - if metrics.enabled and metrics.include_apis is not None: - ETree.SubElement(root, 'IncludeAPIs').text = str(metrics.include_apis) - - # RetentionPolicy - retention_element = ETree.SubElement(root, 'RetentionPolicy') - _convert_retention_policy_to_xml(metrics.retention_policy, retention_element) - - -def _convert_retention_policy_to_xml(retention_policy, root): - ''' - true|false - number-of-days - ''' - # Enabled - ETree.SubElement(root, 'Enabled').text = str(retention_policy.enabled) - - # Days - if retention_policy.enabled and retention_policy.days: - ETree.SubElement(root, 'Days').text = str(retention_policy.days) - - -def _len_plus(data): - length = None - # Check if object implements the __len__ method, covers most input cases such as bytearray. - try: - length = len(data) - except: - pass - - if not length: - # Check if the stream is a file-like stream object. - # If so, calculate the size using the file descriptor. - try: - fileno = data.fileno() - except (AttributeError, UnsupportedOperation): - pass - else: - return fstat(fileno).st_size - - # If the stream is seekable and tell() is implemented, calculate the stream size. - try: - current_position = data.tell() - data.seek(0, SEEK_END) - length = data.tell() - current_position - data.seek(current_position, SEEK_SET) - except (AttributeError, UnsupportedOperation): - pass - - return length diff --git a/azure/multiapi/storage/v2018_03_28/common/cloudstorageaccount.py b/azure/multiapi/storage/v2018_03_28/common/cloudstorageaccount.py deleted file mode 100644 index 18cdf7e..0000000 --- a/azure/multiapi/storage/v2018_03_28/common/cloudstorageaccount.py +++ /dev/null @@ -1,188 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -# Note that we import BlobService/QueueService/FileService on demand -# because this module is imported by azure/storage/__init__ -# ie. we don't want 'import azure.storage' to trigger an automatic import -# of blob/queue/file packages. - -from ._error import _validate_not_none -from .models import ( - ResourceTypes, - Services, - AccountPermissions, -) -from .sharedaccesssignature import ( - SharedAccessSignature, -) - - -class CloudStorageAccount(object): - """ - Provides a factory for creating the blob, queue, and file services - with a common account name and account key or sas token. Users can either - use the factory or can construct the appropriate service directly. - """ - - def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=None): - ''' - :param str account_name: - The storage account name. This is used to authenticate requests - signed with an account key and to construct the storage endpoint. It - is required unless is_emulated is used. - :param str account_key: - The storage account key. This is used for shared key authentication. - :param str sas_token: - A shared access signature token to use to authenticate requests - instead of the account key. If account key and sas token are both - specified, account key will be used to sign. - :param bool is_emulated: - Whether to use the emulator. Defaults to False. If specified, will - override all other parameters. - ''' - self.account_name = account_name - self.account_key = account_key - self.sas_token = sas_token - self.is_emulated = is_emulated - - def create_block_blob_service(self): - ''' - Creates a BlockBlobService object with the settings specified in the - CloudStorageAccount. - - :return: A service object. - :rtype: :class:`~..blob.blockblobservice.BlockBlobService` - ''' - try: - from ..blob.blockblobservice import BlockBlobService - return BlockBlobService(self.account_name, self.account_key, - sas_token=self.sas_token, - is_emulated=self.is_emulated) - except ImportError: - raise Exception('The package azure-storage-blob is required. ' - + 'Please install it using "pip install azure-storage-blob"') - - def create_page_blob_service(self): - ''' - Creates a PageBlobService object with the settings specified in the - CloudStorageAccount. - - :return: A service object. - :rtype: :class:`~..blob.pageblobservice.PageBlobService` - ''' - try: - from ..blob.pageblobservice import PageBlobService - return PageBlobService(self.account_name, self.account_key, - sas_token=self.sas_token, - is_emulated=self.is_emulated) - except ImportError: - raise Exception('The package azure-storage-blob is required. ' - + 'Please install it using "pip install azure-storage-blob"') - - def create_append_blob_service(self): - ''' - Creates a AppendBlobService object with the settings specified in the - CloudStorageAccount. - - :return: A service object. - :rtype: :class:`~..blob.appendblobservice.AppendBlobService` - ''' - try: - from ..blob.appendblobservice import AppendBlobService - return AppendBlobService(self.account_name, self.account_key, - sas_token=self.sas_token, - is_emulated=self.is_emulated) - except ImportError: - raise Exception('The package azure-storage-blob is required. ' - + 'Please install it using "pip install azure-storage-blob"') - - def create_queue_service(self): - ''' - Creates a QueueService object with the settings specified in the - CloudStorageAccount. - - :return: A service object. - :rtype: :class:`~..queue.queueservice.QueueService` - ''' - try: - from ..queue.queueservice import QueueService - return QueueService(self.account_name, self.account_key, - sas_token=self.sas_token, - is_emulated=self.is_emulated) - except ImportError: - raise Exception('The package azure-storage-queue is required. ' - + 'Please install it using "pip install azure-storage-queue"') - - def create_file_service(self): - ''' - Creates a FileService object with the settings specified in the - CloudStorageAccount. - - :return: A service object. - :rtype: :class:`~..file.fileservice.FileService` - ''' - try: - from ..file.fileservice import FileService - return FileService(self.account_name, self.account_key, - sas_token=self.sas_token) - except ImportError: - raise Exception('The package azure-storage-file is required. ' - + 'Please install it using "pip install azure-storage-file"') - - def generate_shared_access_signature(self, services, resource_types, - permission, expiry, start=None, - ip=None, protocol=None): - ''' - Generates a shared access signature for the account. - Use the returned signature with the sas_token parameter of the service - or to create a new account object. - - :param Services services: - Specifies the services accessible with the account SAS. You can - combine values to provide access to more than one service. - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account - SAS. You can combine values to provide access to more than one - resource type. - :param AccountPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. You can combine - values to provide more than one permission. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. Possible values are - both HTTPS and HTTP (https,http) or HTTPS only (https). The default value - is https,http. Note that HTTP only is not a permitted value. - ''' - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = SharedAccessSignature(self.account_name, self.account_key) - return sas.generate_account(services, resource_types, permission, - expiry, start=start, ip=ip, protocol=protocol) diff --git a/azure/multiapi/storage/v2018_03_28/common/models.py b/azure/multiapi/storage/v2018_03_28/common/models.py deleted file mode 100644 index 202cc94..0000000 --- a/azure/multiapi/storage/v2018_03_28/common/models.py +++ /dev/null @@ -1,672 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import sys - -if sys.version_info < (3,): - from collections import Iterable - - _unicode_type = unicode -else: - from collections.abc import Iterable - - _unicode_type = str - -from ._error import ( - _validate_not_none -) - - -class _HeaderDict(dict): - def __getitem__(self, index): - return super(_HeaderDict, self).__getitem__(index.lower()) - - -class _list(list): - '''Used so that additional properties can be set on the return list''' - pass - - -class _dict(dict): - '''Used so that additional properties can be set on the return dictionary''' - pass - - -class _OperationContext(object): - ''' - Contains information that lasts the lifetime of an operation. This operation - may span multiple calls to the Azure service. - - :ivar bool location_lock: - Whether the location should be locked for this operation. - :ivar str location: - The location to lock to. - ''' - - def __init__(self, location_lock=False): - self.location_lock = location_lock - self.host_location = None - - -class ListGenerator(Iterable): - ''' - A generator object used to list storage resources. The generator will lazily - follow the continuation tokens returned by the service and stop when all - resources have been returned or max_results is reached. - - If max_results is specified and the account has more than that number of - resources, the generator will have a populated next_marker field once it - finishes. This marker can be used to create a new generator if more - results are desired. - ''' - - def __init__(self, resources, list_method, list_args, list_kwargs): - self.items = resources - self.next_marker = resources.next_marker - - self._list_method = list_method - self._list_args = list_args - self._list_kwargs = list_kwargs - - def __iter__(self): - # return results - for i in self.items: - yield i - - while True: - # if no more results on the service, return - if not self.next_marker: - break - - # update the marker args - self._list_kwargs['marker'] = self.next_marker - - # handle max results, if present - max_results = self._list_kwargs.get('max_results') - if max_results is not None: - max_results = max_results - len(self.items) - - # if we've reached max_results, return - # else, update the max_results arg - if max_results <= 0: - break - else: - self._list_kwargs['max_results'] = max_results - - # get the next segment - resources = self._list_method(*self._list_args, **self._list_kwargs) - self.items = resources - self.next_marker = resources.next_marker - - # return results - for i in self.items: - yield i - - -class RetryContext(object): - ''' - Contains the request and response information that can be used to determine - whether and how to retry. This context is stored across retries and may be - used to store other information relevant to the retry strategy. - - :ivar ~._http.HTTPRequest request: - The request sent to the storage service. - :ivar ~._http.HTTPResponse response: - The response returned by the storage service. - :ivar LocationMode location_mode: - The location the request was sent to. - :ivar Exception exception: - The exception that just occurred. The type could either be AzureException (for HTTP errors), - or other Exception types from lower layers, which are kept unwrapped for easier processing. - :ivar bool is_emulated: - Whether retry is targeting the emulator. The default value is False. - :ivar int body_position: - The initial position of the body stream. It is useful when retries happen and we need to rewind the stream. - ''' - - def __init__(self): - self.request = None - self.response = None - self.location_mode = None - self.exception = None - self.is_emulated = False - self.body_position = None - - -class LocationMode(object): - ''' - Specifies the location the request should be sent to. This mode only applies - for RA-GRS accounts which allow secondary read access. All other account types - must use PRIMARY. - ''' - - PRIMARY = 'primary' - ''' Requests should be sent to the primary location. ''' - - SECONDARY = 'secondary' - ''' Requests should be sent to the secondary location, if possible. ''' - - -class RetentionPolicy(object): - ''' - By default, Storage Analytics will not delete any logging or metrics data. Blobs - will continue to be written until the shared 20TB limit is - reached. Once the 20TB limit is reached, Storage Analytics will stop writing - new data and will not resume until free space is available. This 20TB limit - is independent of the total limit for your storage account. - - There are two ways to delete Storage Analytics data: by manually making deletion - requests or by setting a data retention policy. Manual requests to delete Storage - Analytics data are billable, but delete requests resulting from a retention policy - are not billable. - ''' - - def __init__(self, enabled=False, days=None): - ''' - :param bool enabled: - Indicates whether a retention policy is enabled for the - storage service. If disabled, logging and metrics data will be retained - infinitely by the service unless explicitly deleted. - :param int days: - Required if enabled is true. Indicates the number of - days that metrics or logging data should be retained. All data older - than this value will be deleted. The minimum value you can specify is 1; - the largest value is 365 (one year). - ''' - _validate_not_none("enabled", enabled) - if enabled: - _validate_not_none("days", days) - - self.enabled = enabled - self.days = days - - -class Logging(object): - ''' - Storage Analytics logs detailed information about successful and failed requests - to a storage service. This information can be used to monitor individual requests - and to diagnose issues with a storage service. Requests are logged on a best-effort - basis. - - All logs are stored in block blobs in a container named $logs, which is - automatically created when Storage Analytics is enabled for a storage account. - The $logs container is located in the blob namespace of the storage account. - This container cannot be deleted once Storage Analytics has been enabled, though - its contents can be deleted. - - For more information, see https://msdn.microsoft.com/en-us/library/azure/hh343262.aspx - ''' - - def __init__(self, delete=False, read=False, write=False, - retention_policy=None): - ''' - :param bool delete: - Indicates whether all delete requests should be logged. - :param bool read: - Indicates whether all read requests should be logged. - :param bool write: - Indicates whether all write requests should be logged. - :param RetentionPolicy retention_policy: - The retention policy for the metrics. - ''' - _validate_not_none("read", read) - _validate_not_none("write", write) - _validate_not_none("delete", delete) - - self.version = u'1.0' - self.delete = delete - self.read = read - self.write = write - self.retention_policy = retention_policy if retention_policy else RetentionPolicy() - - -class Metrics(object): - ''' - Metrics include aggregated transaction statistics and capacity data about requests - to a storage service. Transactions are reported at both the API operation level - as well as at the storage service level, and capacity is reported at the storage - service level. Metrics data can be used to analyze storage service usage, diagnose - issues with requests made against the storage service, and to improve the - performance of applications that use a service. - - For more information, see https://msdn.microsoft.com/en-us/library/azure/hh343258.aspx - ''' - - def __init__(self, enabled=False, include_apis=None, - retention_policy=None): - ''' - :param bool enabled: - Indicates whether metrics are enabled for - the service. - :param bool include_apis: - Required if enabled is True. Indicates whether metrics - should generate summary statistics for called API operations. - :param RetentionPolicy retention_policy: - The retention policy for the metrics. - ''' - _validate_not_none("enabled", enabled) - if enabled: - _validate_not_none("include_apis", include_apis) - - self.version = u'1.0' - self.enabled = enabled - self.include_apis = include_apis - self.retention_policy = retention_policy if retention_policy else RetentionPolicy() - - -class CorsRule(object): - ''' - CORS is an HTTP feature that enables a web application running under one domain - to access resources in another domain. Web browsers implement a security - restriction known as same-origin policy that prevents a web page from calling - APIs in a different domain; CORS provides a secure way to allow one domain - (the origin domain) to call APIs in another domain. - - For more information, see https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx - ''' - - def __init__(self, allowed_origins, allowed_methods, max_age_in_seconds=0, - exposed_headers=None, allowed_headers=None): - ''' - :param allowed_origins: - A list of origin domains that will be allowed via CORS, or "*" to allow - all domains. The list of must contain at least one entry. Limited to 64 - origin domains. Each allowed origin can have up to 256 characters. - :type allowed_origins: list(str) - :param allowed_methods: - A list of HTTP methods that are allowed to be executed by the origin. - The list of must contain at least one entry. For Azure Storage, - permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. - :type allowed_methods: list(str) - :param int max_age_in_seconds: - The number of seconds that the client/browser should cache a - preflight response. - :param exposed_headers: - Defaults to an empty list. A list of response headers to expose to CORS - clients. Limited to 64 defined headers and two prefixed headers. Each - header can be up to 256 characters. - :type exposed_headers: list(str) - :param allowed_headers: - Defaults to an empty list. A list of headers allowed to be part of - the cross-origin request. Limited to 64 defined headers and 2 prefixed - headers. Each header can be up to 256 characters. - :type allowed_headers: list(str) - ''' - _validate_not_none("allowed_origins", allowed_origins) - _validate_not_none("allowed_methods", allowed_methods) - _validate_not_none("max_age_in_seconds", max_age_in_seconds) - - self.allowed_origins = allowed_origins if allowed_origins else list() - self.allowed_methods = allowed_methods if allowed_methods else list() - self.max_age_in_seconds = max_age_in_seconds - self.exposed_headers = exposed_headers if exposed_headers else list() - self.allowed_headers = allowed_headers if allowed_headers else list() - - -class DeleteRetentionPolicy(object): - ''' - To set DeleteRetentionPolicy, you must call Set Blob Service Properties using version 2017-07-29 or later. - This class groups the settings related to delete retention policy. - ''' - - def __init__(self, enabled=False, days=None): - ''' - :param bool enabled: - Required. Indicates whether a deleted blob or snapshot is retained or immediately removed by delete operation. - :param int days: - Required only if Enabled is true. Indicates the number of days that deleted blob be retained. - All data older than this value will be permanently deleted. - The minimum value you can specify is 1; the largest value is 365. - ''' - _validate_not_none("enabled", enabled) - if enabled: - _validate_not_none("days", days) - - self.enabled = enabled - self.days = days - - -class StaticWebsite(object): - ''' - Class representing the service properties pertaining to static websites. - To set StaticWebsite, you must call Set Blob Service Properties using version 2018-03-28 or later. - ''' - - def __init__(self, enabled=False, index_document=None, error_document_404_path=None): - ''' - :param bool enabled: - Required. True if static websites should be enabled on the blob service for the corresponding Storage Account. - :param str index_document: - Represents the name of the index document. This is commonly "index.html". - :param str error_document_404_path: - Represents the path to the error document that should be shown when an error 404 is issued, - in other words, when a browser requests a page that does not exist. - ''' - _validate_not_none("enabled", enabled) - - self.enabled = enabled - self.index_document = index_document - self.error_document_404_path = error_document_404_path - - -class ServiceProperties(object): - ''' - Returned by get_*_service_properties functions. Contains the properties of a - storage service, including Analytics and CORS rules. - - Azure Storage Analytics performs logging and provides metrics data for a storage - account. You can use this data to trace requests, analyze usage trends, and - diagnose issues with your storage account. To use Storage Analytics, you must - enable it individually for each service you want to monitor. - - The aggregated data is stored in a well-known blob (for logging) and in well-known - tables (for metrics), which may be accessed using the Blob service and Table - service APIs. - - For an in-depth guide on using Storage Analytics and other tools to identify, - diagnose, and troubleshoot Azure Storage-related issues, see - http://azure.microsoft.com/documentation/articles/storage-monitoring-diagnosing-troubleshooting/ - - For more information on CORS, see https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx - ''' - - pass - - -class ServiceStats(object): - ''' - Returned by get_*_service_stats functions. Contains statistics related to - replication for the given service. It is only available when read-access - geo-redundant replication is enabled for the storage account. - - :ivar GeoReplication geo_replication: - An object containing statistics related to replication for the given service. - ''' - pass - - -class GeoReplication(object): - ''' - Contains statistics related to replication for the given service. - - :ivar str status: - The status of the secondary location. Possible values are: - live: Indicates that the secondary location is active and operational. - bootstrap: Indicates initial synchronization from the primary location - to the secondary location is in progress. This typically occurs - when replication is first enabled. - unavailable: Indicates that the secondary location is temporarily - unavailable. - :ivar date last_sync_time: - A GMT date value, to the second. All primary writes preceding this value - are guaranteed to be available for read operations at the secondary. - Primary writes after this point in time may or may not be available for - reads. The value may be empty if LastSyncTime is not available. This can - happen if the replication status is bootstrap or unavailable. Although - geo-replication is continuously enabled, the LastSyncTime result may - reflect a cached value from the service that is refreshed every few minutes. - ''' - pass - - -class AccessPolicy(object): - ''' - Access Policy class used by the set and get acl methods in each service. - - A stored access policy can specify the start time, expiry time, and - permissions for the Shared Access Signatures with which it's associated. - Depending on how you want to control access to your resource, you can - specify all of these parameters within the stored access policy, and omit - them from the URL for the Shared Access Signature. Doing so permits you to - modify the associated signature's behavior at any time, as well as to revoke - it. Or you can specify one or more of the access policy parameters within - the stored access policy, and the others on the URL. Finally, you can - specify all of the parameters on the URL. In this case, you can use the - stored access policy to revoke the signature, but not to modify its behavior. - - Together the Shared Access Signature and the stored access policy must - include all fields required to authenticate the signature. If any required - fields are missing, the request will fail. Likewise, if a field is specified - both in the Shared Access Signature URL and in the stored access policy, the - request will fail with status code 400 (Bad Request). - ''' - - def __init__(self, permission=None, expiry=None, start=None): - ''' - :param str permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - ''' - self.start = start - self.expiry = expiry - self.permission = permission - - -class Protocol(object): - ''' - Specifies the protocol permitted for a SAS token. Note that HTTP only is - not allowed. - ''' - - HTTPS = 'https' - ''' Allow HTTPS requests only. ''' - - HTTPS_HTTP = 'https,http' - ''' Allow HTTP and HTTPS requests. ''' - - -class ResourceTypes(object): - ''' - Specifies the resource types that are accessible with the account SAS. - - :ivar ResourceTypes ResourceTypes.CONTAINER: - Access to container-level APIs (e.g., Create/Delete Container, - Create/Delete Queue, Create/Delete Share, - List Blobs/Files and Directories) - :ivar ResourceTypes ResourceTypes.OBJECT: - Access to object-level APIs for blobs, queue messages, and - files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) - :ivar ResourceTypes ResourceTypes.SERVICE: - Access to service-level APIs (e.g., Get/Set Service Properties, - Get Service Stats, List Containers/Queues/Shares) - ''' - - def __init__(self, service=False, container=False, object=False, _str=None): - ''' - :param bool service: - Access to service-level APIs (e.g., Get/Set Service Properties, - Get Service Stats, List Containers/Queues/Shares) - :param bool container: - Access to container-level APIs (e.g., Create/Delete Container, - Create/Delete Queue, Create/Delete Share, - List Blobs/Files and Directories) - :param bool object: - Access to object-level APIs for blobs, queue messages, and - files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) - :param str _str: - A string representing the resource types. - ''' - if not _str: - _str = '' - self.service = service or ('s' in _str) - self.container = container or ('c' in _str) - self.object = object or ('o' in _str) - - def __or__(self, other): - return ResourceTypes(_str=str(self) + str(other)) - - def __add__(self, other): - return ResourceTypes(_str=str(self) + str(other)) - - def __str__(self): - return (('s' if self.service else '') + - ('c' if self.container else '') + - ('o' if self.object else '')) - - -ResourceTypes.SERVICE = ResourceTypes(service=True) -ResourceTypes.CONTAINER = ResourceTypes(container=True) -ResourceTypes.OBJECT = ResourceTypes(object=True) - - -class Services(object): - ''' - Specifies the services accessible with the account SAS. - - :ivar Services Services.BLOB: The blob service. - :ivar Services Services.FILE: The file service - :ivar Services Services.QUEUE: The queue service. - :ivar Services Services.TABLE: The table service. - ''' - - def __init__(self, blob=False, queue=False, file=False, table=False, _str=None): - ''' - :param bool blob: - Access to any blob service, for example, the `.BlockBlobService` - :param bool queue: - Access to the `.QueueService` - :param bool file: - Access to the `.FileService` - :param bool table: - Access to the TableService - :param str _str: - A string representing the services. - ''' - if not _str: - _str = '' - self.blob = blob or ('b' in _str) - self.queue = queue or ('q' in _str) - self.file = file or ('f' in _str) - self.table = table or ('t' in _str) - - def __or__(self, other): - return Services(_str=str(self) + str(other)) - - def __add__(self, other): - return Services(_str=str(self) + str(other)) - - def __str__(self): - return (('b' if self.blob else '') + - ('q' if self.queue else '') + - ('t' if self.table else '') + - ('f' if self.file else '')) - - -Services.BLOB = Services(blob=True) -Services.QUEUE = Services(queue=True) -Services.TABLE = Services(table=True) -Services.FILE = Services(file=True) - - -class AccountPermissions(object): - ''' - :class:`~ResourceTypes` class to be used with generate_shared_access_signature - method and for the AccessPolicies used with set_*_acl. There are two types of - SAS which may be used to grant resource access. One is to grant access to a - specific resource (resource-specific). Another is to grant access to the - entire service for a specific account and allow certain operations based on - perms found here. - - :ivar AccountPermissions AccountPermissions.ADD: - Valid for the following Object resource types only: queue messages and append blobs. - :ivar AccountPermissions AccountPermissions.CREATE: - Valid for the following Object resource types only: blobs and files. Users - can create new blobs or files, but may not overwrite existing blobs or files. - :ivar AccountPermissions AccountPermissions.DELETE: - Valid for Container and Object resource types, except for queue messages. - :ivar AccountPermissions AccountPermissions.LIST: - Valid for Service and Container resource types only. - :ivar AccountPermissions AccountPermissions.PROCESS: - Valid for the following Object resource type only: queue messages. - :ivar AccountPermissions AccountPermissions.READ: - Valid for all signed resources types (Service, Container, and Object). - Permits read permissions to the specified resource type. - :ivar AccountPermissions AccountPermissions.UPDATE: - Valid for the following Object resource types only: queue messages. - :ivar AccountPermissions AccountPermissions.WRITE: - Valid for all signed resources types (Service, Container, and Object). - Permits write permissions to the specified resource type. - ''' - - def __init__(self, read=False, write=False, delete=False, list=False, - add=False, create=False, update=False, process=False, _str=None): - ''' - :param bool read: - Valid for all signed resources types (Service, Container, and Object). - Permits read permissions to the specified resource type. - :param bool write: - Valid for all signed resources types (Service, Container, and Object). - Permits write permissions to the specified resource type. - :param bool delete: - Valid for Container and Object resource types, except for queue messages. - :param bool list: - Valid for Service and Container resource types only. - :param bool add: - Valid for the following Object resource types only: queue messages, and append blobs. - :param bool create: - Valid for the following Object resource types only: blobs and files. - Users can create new blobs or files, but may not overwrite existing - blobs or files. - :param bool update: - Valid for the following Object resource types only: queue messages. - :param bool process: - Valid for the following Object resource type only: queue messages. - :param str _str: - A string representing the permissions. - ''' - if not _str: - _str = '' - self.read = read or ('r' in _str) - self.write = write or ('w' in _str) - self.delete = delete or ('d' in _str) - self.list = list or ('l' in _str) - self.add = add or ('a' in _str) - self.create = create or ('c' in _str) - self.update = update or ('u' in _str) - self.process = process or ('p' in _str) - - def __or__(self, other): - return AccountPermissions(_str=str(self) + str(other)) - - def __add__(self, other): - return AccountPermissions(_str=str(self) + str(other)) - - def __str__(self): - return (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('l' if self.list else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('u' if self.update else '') + - ('p' if self.process else '')) - - -AccountPermissions.READ = AccountPermissions(read=True) -AccountPermissions.WRITE = AccountPermissions(write=True) -AccountPermissions.DELETE = AccountPermissions(delete=True) -AccountPermissions.LIST = AccountPermissions(list=True) -AccountPermissions.ADD = AccountPermissions(add=True) -AccountPermissions.CREATE = AccountPermissions(create=True) -AccountPermissions.UPDATE = AccountPermissions(update=True) -AccountPermissions.PROCESS = AccountPermissions(process=True) diff --git a/azure/multiapi/storage/v2018_03_28/common/retry.py b/azure/multiapi/storage/v2018_03_28/common/retry.py deleted file mode 100644 index 480b2a8..0000000 --- a/azure/multiapi/storage/v2018_03_28/common/retry.py +++ /dev/null @@ -1,306 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from abc import ABCMeta -from math import pow -import random -from io import (SEEK_SET, UnsupportedOperation) - -from .models import LocationMode -from ._constants import ( - DEV_ACCOUNT_NAME, - DEV_ACCOUNT_SECONDARY_NAME -) - - -class _Retry(object): - ''' - The base class for Exponential and Linear retries containing shared code. - ''' - __metaclass__ = ABCMeta - - def __init__(self, max_attempts, retry_to_secondary): - ''' - Constructs a base retry object. - - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - ''' - self.max_attempts = max_attempts - self.retry_to_secondary = retry_to_secondary - - def _should_retry(self, context): - ''' - A function which determines whether or not to retry. - - :param ~..models.RetryContext context: - The retry context. This contains the request, response, and other data - which can be used to determine whether or not to retry. - :return: - A boolean indicating whether or not to retry the request. - :rtype: bool - ''' - # If max attempts are reached, do not retry. - if context.count >= self.max_attempts: - return False - - status = None - if context.response and context.response.status: - status = context.response.status - - if status is None: - ''' - If status is None, retry as this request triggered an exception. For - example, network issues would trigger this. - ''' - return True - elif 200 <= status < 300: - ''' - This method is called after a successful response, meaning we failed - during the response body download or parsing. So, success codes should - be retried. - ''' - return True - elif 300 <= status < 500: - ''' - An exception occured, but in most cases it was expected. Examples could - include a 309 Conflict or 412 Precondition Failed. - ''' - if status == 404 and context.location_mode == LocationMode.SECONDARY: - # Response code 404 should be retried if secondary was used. - return True - if status == 408: - # Response code 408 is a timeout and should be retried. - return True - return False - elif status >= 500: - ''' - Response codes above 500 with the exception of 501 Not Implemented and - 505 Version Not Supported indicate a server issue and should be retried. - ''' - if status == 501 or status == 505: - return False - return True - else: - # If something else happened, it's unexpected. Retry. - return True - - def _set_next_host_location(self, context): - ''' - A function which sets the next host location on the request, if applicable. - - :param ~..models.RetryContext context: - The retry context containing the previous host location and the request - to evaluate and possibly modify. - ''' - if len(context.request.host_locations) > 1: - # If there's more than one possible location, retry to the alternative - if context.location_mode == LocationMode.PRIMARY: - context.location_mode = LocationMode.SECONDARY - - # if targeting the emulator (with path style), change path instead of host - if context.is_emulated: - # replace the first instance of primary account name with the secondary account name - context.request.path = context.request.path.replace(DEV_ACCOUNT_NAME, DEV_ACCOUNT_SECONDARY_NAME, 1) - else: - context.request.host = context.request.host_locations.get(context.location_mode) - else: - context.location_mode = LocationMode.PRIMARY - - # if targeting the emulator (with path style), change path instead of host - if context.is_emulated: - # replace the first instance of secondary account name with the primary account name - context.request.path = context.request.path.replace(DEV_ACCOUNT_SECONDARY_NAME, DEV_ACCOUNT_NAME, 1) - else: - context.request.host = context.request.host_locations.get(context.location_mode) - - def _retry(self, context, backoff): - ''' - A function which determines whether and how to retry. - - :param ~..models.RetryContext context: - The retry context. This contains the request, response, and other data - which can be used to determine whether or not to retry. - :param function() backoff: - A function which returns the backoff time if a retry is to be performed. - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - ''' - # If the context does not contain a count parameter, this request has not - # been retried yet. Add the count parameter to track the number of retries. - if not hasattr(context, 'count'): - context.count = 0 - - # Determine whether to retry, and if so increment the count, modify the - # request as desired, and return the backoff. - if self._should_retry(context): - backoff_interval = backoff(context) - context.count += 1 - - # If retry to secondary is enabled, attempt to change the host if the - # request allows it - if self.retry_to_secondary: - self._set_next_host_location(context) - - # rewind the request body if it is a stream - if hasattr(context.request.body, 'read'): - # no position was saved, then retry would not work - if context.body_position is None: - return None - else: - try: - # attempt to rewind the body to the initial position - context.request.body.seek(context.body_position, SEEK_SET) - except UnsupportedOperation: - # if body is not seekable, then retry would not work - return None - - return backoff_interval - - return None - - -class ExponentialRetry(_Retry): - ''' - Exponential retry. - ''' - - def __init__(self, initial_backoff=15, increment_base=3, max_attempts=3, - retry_to_secondary=False, random_jitter_range=3): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__(max_attempts, retry_to_secondary) - - ''' - A function which determines whether and how to retry. - - :param ~..models.RetryContext context: - The retry context. This contains the request, response, and other data - which can be used to determine whether or not to retry. - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - ''' - - def retry(self, context): - return self._retry(context, self._backoff) - - ''' - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - ''' - - def _backoff(self, context): - random_generator = random.Random() - backoff = self.initial_backoff + (0 if context.count == 0 else pow(self.increment_base, context.count)) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(_Retry): - ''' - Linear retry. - ''' - - def __init__(self, backoff=15, max_attempts=3, retry_to_secondary=False, random_jitter_range=3): - ''' - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.backoff = backoff - self.max_attempts = max_attempts - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__(max_attempts, retry_to_secondary) - - ''' - A function which determines whether and how to retry. - - :param ~..models.RetryContext context: - The retry context. This contains the request, response, and other data - which can be used to determine whether or not to retry. - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - ''' - - def retry(self, context): - return self._retry(context, self._backoff) - - ''' - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - ''' - - def _backoff(self, context): - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - self.random_range_start = self.backoff - self.random_jitter_range if self.backoff > self.random_jitter_range else 0 - self.random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(self.random_range_start, self.random_range_end) - - -def no_retry(context): - ''' - Specifies never to retry. - - :param ~..models.RetryContext context: - The retry context. - :return: - Always returns None to indicate never to retry. - :rtype: None - ''' - return None diff --git a/azure/multiapi/storage/v2018_03_28/common/sharedaccesssignature.py b/azure/multiapi/storage/v2018_03_28/common/sharedaccesssignature.py deleted file mode 100644 index 85669ec..0000000 --- a/azure/multiapi/storage/v2018_03_28/common/sharedaccesssignature.py +++ /dev/null @@ -1,217 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from datetime import date - -from ._common_conversion import ( - _sign_string, - _to_str, -) -from ._constants import DEFAULT_X_MS_VERSION -from ._serialization import ( - url_quote, - _to_utc_datetime, -) - - -class SharedAccessSignature(object): - ''' - Provides a factory for creating account access - signature tokens with an account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key, x_ms_version=DEFAULT_X_MS_VERSION): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - :param str x_ms_version: - The service version used to generate the shared access signatures. - ''' - self.account_name = account_name - self.account_key = account_key - self.x_ms_version = x_ms_version - - def generate_account(self, services, resource_types, permission, expiry, start=None, - ip=None, protocol=None): - ''' - Generates a shared access signature for the account. - Use the returned signature with the sas_token parameter of the service - or to create a new account object. - - :param Services services: - Specifies the services accessible with the account SAS. You can - combine values to provide access to more than one service. - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account - SAS. You can combine values to provide access to more than one - resource type. - :param AccountPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. You can combine - values to provide more than one permission. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~.models.Protocol` for possible values. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_account(services, resource_types) - sas.add_account_signature(self.account_name, self.account_key) - - return sas.get_token() - - -class _QueryStringConstants(object): - SIGNED_SIGNATURE = 'sig' - SIGNED_PERMISSION = 'sp' - SIGNED_START = 'st' - SIGNED_EXPIRY = 'se' - SIGNED_RESOURCE = 'sr' - SIGNED_IDENTIFIER = 'si' - SIGNED_IP = 'sip' - SIGNED_PROTOCOL = 'spr' - SIGNED_VERSION = 'sv' - SIGNED_CACHE_CONTROL = 'rscc' - SIGNED_CONTENT_DISPOSITION = 'rscd' - SIGNED_CONTENT_ENCODING = 'rsce' - SIGNED_CONTENT_LANGUAGE = 'rscl' - SIGNED_CONTENT_TYPE = 'rsct' - START_PK = 'spk' - START_RK = 'srk' - END_PK = 'epk' - END_RK = 'erk' - SIGNED_RESOURCE_TYPES = 'srt' - SIGNED_SERVICES = 'ss' - - -class _SharedAccessHelper(object): - def __init__(self): - self.query_dict = {} - - def _add_query(self, name, val): - if val: - self.query_dict[name] = _to_str(val) - - def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): - if isinstance(start, date): - start = _to_utc_datetime(start) - - if isinstance(expiry, date): - expiry = _to_utc_datetime(expiry) - - self._add_query(_QueryStringConstants.SIGNED_START, start) - self._add_query(_QueryStringConstants.SIGNED_EXPIRY, expiry) - self._add_query(_QueryStringConstants.SIGNED_PERMISSION, permission) - self._add_query(_QueryStringConstants.SIGNED_IP, ip) - self._add_query(_QueryStringConstants.SIGNED_PROTOCOL, protocol) - self._add_query(_QueryStringConstants.SIGNED_VERSION, x_ms_version) - - def add_resource(self, resource): - self._add_query(_QueryStringConstants.SIGNED_RESOURCE, resource) - - def add_id(self, id): - self._add_query(_QueryStringConstants.SIGNED_IDENTIFIER, id) - - def add_account(self, services, resource_types): - self._add_query(_QueryStringConstants.SIGNED_SERVICES, services) - self._add_query(_QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) - - def add_override_response_headers(self, cache_control, - content_disposition, - content_encoding, - content_language, - content_type): - self._add_query(_QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) - self._add_query(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) - self._add_query(_QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) - self._add_query(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) - self._add_query(_QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) - - def add_resource_signature(self, account_name, account_key, service, path): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - if path[0] != '/': - path = '/' + path - - canonicalized_resource = '/' + service + '/' + account_name + path + '\n' - - # Form the string to sign from shared_access_policy and canonicalized - # resource. The order of values is important. - string_to_sign = \ - (get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(_QueryStringConstants.SIGNED_START) + - get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) + - canonicalized_resource + - get_value_to_append(_QueryStringConstants.SIGNED_IDENTIFIER) + - get_value_to_append(_QueryStringConstants.SIGNED_IP) + - get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(_QueryStringConstants.SIGNED_VERSION)) - - if service == 'blob' or service == 'file': - string_to_sign += \ - (get_value_to_append(_QueryStringConstants.SIGNED_CACHE_CONTROL) + - get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION) + - get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_ENCODING) + - get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE) + - get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_TYPE)) - - # remove the trailing newline - if string_to_sign[-1] == '\n': - string_to_sign = string_to_sign[:-1] - - self._add_query(_QueryStringConstants.SIGNED_SIGNATURE, - _sign_string(account_key, string_to_sign)) - - def add_account_signature(self, account_name, account_key): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - string_to_sign = \ - (account_name + '\n' + - get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(_QueryStringConstants.SIGNED_SERVICES) + - get_value_to_append(_QueryStringConstants.SIGNED_RESOURCE_TYPES) + - get_value_to_append(_QueryStringConstants.SIGNED_START) + - get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) + - get_value_to_append(_QueryStringConstants.SIGNED_IP) + - get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(_QueryStringConstants.SIGNED_VERSION)) - - self._add_query(_QueryStringConstants.SIGNED_SIGNATURE, - _sign_string(account_key, string_to_sign)) - - def get_token(self): - return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storage/v2018_03_28/common/storageclient.py b/azure/multiapi/storage/v2018_03_28/common/storageclient.py deleted file mode 100644 index 2202e1d..0000000 --- a/azure/multiapi/storage/v2018_03_28/common/storageclient.py +++ /dev/null @@ -1,391 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -from abc import ABCMeta -import logging - -logger = logging.getLogger(__name__) -from time import sleep - -import requests -from azure.common import ( - AzureException, - AzureHttpError, -) - -from ._constants import ( - DEFAULT_SOCKET_TIMEOUT, - DEFAULT_X_MS_VERSION, - DEFAULT_USER_AGENT_STRING, - USER_AGENT_STRING_PREFIX, - USER_AGENT_STRING_SUFFIX, -) -from ._error import ( - _ERROR_DECRYPTION_FAILURE, - _http_error_handler, -) -from ._http import HTTPError -from ._http.httpclient import _HTTPClient -from ._serialization import ( - _update_request, - _add_date_header, -) -from .models import ( - RetryContext, - LocationMode, - _OperationContext, -) -from .retry import ExponentialRetry -from io import UnsupportedOperation - - -class StorageClient(object): - ''' - This is the base class for service objects. Service objects are used to do - all requests to Storage. This class cannot be instantiated directly. - - :ivar str account_name: - The storage account name. This is used to authenticate requests - signed with an account key and to construct the storage endpoint. It - is required unless a connection string is given, or if a custom - domain is used with anonymous authentication. - :ivar str account_key: - The storage account key. This is used for shared key authentication. - If neither account key or sas token is specified, anonymous access - will be used. - :ivar str sas_token: - A shared access signature token to use to authenticate requests - instead of the account key. If account key and sas token are both - specified, account key will be used to sign. If neither are - specified, anonymous access will be used. - :ivar str primary_endpoint: - The endpoint to send storage requests to. - :ivar str secondary_endpoint: - The secondary endpoint to read storage data from. This will only be a - valid endpoint if the storage account used is RA-GRS and thus allows - reading from secondary. - :ivar function(context) retry: - A function which determines whether to retry. Takes as a parameter a - :class:`~.models.RetryContext` object. Returns the number - of seconds to wait before retrying the request, or None to indicate not - to retry. - :ivar ~.models.LocationMode location_mode: - The host location to use to make requests. Defaults to LocationMode.PRIMARY. - Note that this setting only applies to RA-GRS accounts as other account - types do not allow reading from secondary. If the location_mode is set to - LocationMode.SECONDARY, read requests will be sent to the secondary endpoint. - Write requests will continue to be sent to primary. - :ivar str protocol: - The protocol to use for requests. Defaults to https. - :ivar requests.Session request_session: - The session object to use for http requests. - :ivar function(request) request_callback: - A function called immediately before each request is sent. This function - takes as a parameter the request object and returns nothing. It may be - used to added custom headers or log request data. - :ivar function() response_callback: - A function called immediately after each response is received. This - function takes as a parameter the response object and returns nothing. - It may be used to log response data. - :ivar function() retry_callback: - A function called immediately after retry evaluation is performed. This - function takes as a parameter the retry context object and returns nothing. - It may be used to detect retries and log context information. - ''' - - __metaclass__ = ABCMeta - - def __init__(self, connection_params): - ''' - :param obj connection_params: The parameters to use to construct the client. - ''' - self.account_name = connection_params.account_name - self.account_key = connection_params.account_key - self.sas_token = connection_params.sas_token - self.token_credential = connection_params.token_credential - self.is_emulated = connection_params.is_emulated - - self.primary_endpoint = connection_params.primary_endpoint - self.secondary_endpoint = connection_params.secondary_endpoint - - protocol = connection_params.protocol - request_session = connection_params.request_session or requests.Session() - socket_timeout = connection_params.socket_timeout or DEFAULT_SOCKET_TIMEOUT - self._httpclient = _HTTPClient( - protocol=protocol, - session=request_session, - timeout=socket_timeout, - ) - - self.retry = ExponentialRetry().retry - self.location_mode = LocationMode.PRIMARY - - self.request_callback = None - self.response_callback = None - self.retry_callback = None - self._X_MS_VERSION = DEFAULT_X_MS_VERSION - self._USER_AGENT_STRING = DEFAULT_USER_AGENT_STRING - - def _update_user_agent_string(self, service_package_version): - self._USER_AGENT_STRING = '{}{} {}'.format(USER_AGENT_STRING_PREFIX, - service_package_version, - USER_AGENT_STRING_SUFFIX) - - @property - def socket_timeout(self): - return self._httpclient.timeout - - @socket_timeout.setter - def socket_timeout(self, value): - self._httpclient.timeout = value - - @property - def protocol(self): - return self._httpclient.protocol - - @protocol.setter - def protocol(self, value): - self._httpclient.protocol = value - - @property - def request_session(self): - return self._httpclient.session - - @request_session.setter - def request_session(self, value): - self._httpclient.session = value - - def set_proxy(self, host, port, user=None, password=None): - ''' - Sets the proxy server host and port for the HTTP CONNECT Tunnelling. - - :param str host: Address of the proxy. Ex: '192.168.0.100' - :param int port: Port of the proxy. Ex: 6000 - :param str user: User for proxy authorization. - :param str password: Password for proxy authorization. - ''' - self._httpclient.set_proxy(host, port, user, password) - - def _get_host_locations(self, primary=True, secondary=False): - locations = {} - if primary: - locations[LocationMode.PRIMARY] = self.primary_endpoint - if secondary: - locations[LocationMode.SECONDARY] = self.secondary_endpoint - return locations - - def _apply_host(self, request, operation_context, retry_context): - if operation_context.location_lock and operation_context.host_location: - # If this is a location locked operation and the location is set, - # override the request location and host_location. - request.host_locations = operation_context.host_location - request.host = list(operation_context.host_location.values())[0] - retry_context.location_mode = list(operation_context.host_location.keys())[0] - elif len(request.host_locations) == 1: - # If only one location is allowed, use that location. - request.host = list(request.host_locations.values())[0] - retry_context.location_mode = list(request.host_locations.keys())[0] - else: - # If multiple locations are possible, choose based on the location mode. - request.host = request.host_locations.get(self.location_mode) - retry_context.location_mode = self.location_mode - - @staticmethod - def extract_date_and_request_id(retry_context): - if getattr(retry_context, 'response', None) is None: - return "" - resp = retry_context.response - - if 'date' in resp.headers and 'x-ms-request-id' in resp.headers: - return str.format("Server-Timestamp={0}, Server-Request-ID={1}", - resp.headers['date'], resp.headers['x-ms-request-id']) - elif 'date' in resp.headers: - return str.format("Server-Timestamp={0}", resp.headers['date']) - elif 'x-ms-request-id' in resp.headers: - return str.format("Server-Request-ID={0}", resp.headers['x-ms-request-id']) - else: - return "" - - def _perform_request(self, request, parser=None, parser_args=None, operation_context=None, expected_errors=None): - ''' - Sends the request and return response. Catches HTTPError and hands it - to error handler - ''' - operation_context = operation_context or _OperationContext() - retry_context = RetryContext() - retry_context.is_emulated = self.is_emulated - - # if request body is a stream, we need to remember its current position in case retries happen - if hasattr(request.body, 'read'): - try: - retry_context.body_position = request.body.tell() - except (AttributeError, UnsupportedOperation): - # if body position cannot be obtained, then retries will not work - pass - - # Apply the appropriate host based on the location mode - self._apply_host(request, operation_context, retry_context) - - # Apply common settings to the request - _update_request(request, self._X_MS_VERSION, self._USER_AGENT_STRING) - client_request_id_prefix = str.format("Client-Request-ID={0}", request.headers['x-ms-client-request-id']) - - while True: - try: - try: - # Execute the request callback - if self.request_callback: - self.request_callback(request) - - # Add date and auth after the callback so date doesn't get too old and - # authentication is still correct if signed headers are added in the request - # callback. This also ensures retry policies with long back offs - # will work as it resets the time sensitive headers. - _add_date_header(request) - - try: - # request can be signed individually - self.authentication.sign_request(request) - except AttributeError: - # session can also be signed - self.request_session = self.authentication.signed_session(self.request_session) - - # Set the request context - retry_context.request = request - - # Log the request before it goes out - logger.info("%s Outgoing request: Method=%s, Path=%s, Query=%s, Headers=%s.", - client_request_id_prefix, - request.method, - request.path, - request.query, - str(request.headers).replace('\n', '')) - - # Perform the request - response = self._httpclient.perform_request(request) - - # Execute the response callback - if self.response_callback: - self.response_callback(response) - - # Set the response context - retry_context.response = response - - # Log the response when it comes back - logger.info("%s Receiving Response: " - "%s, HTTP Status Code=%s, Message=%s, Headers=%s.", - client_request_id_prefix, - self.extract_date_and_request_id(retry_context), - response.status, - response.message, - str(response.headers).replace('\n', '')) - - # Parse and wrap HTTP errors in AzureHttpError which inherits from AzureException - if response.status >= 300: - # This exception will be caught by the general error handler - # and raised as an azure http exception - _http_error_handler( - HTTPError(response.status, response.message, response.headers, response.body)) - - # Parse the response - if parser: - if parser_args: - args = [response] - args.extend(parser_args) - return parser(*args) - else: - return parser(response) - else: - return - except AzureException as ex: - retry_context.exception = ex - raise ex - except Exception as ex: - retry_context.exception = ex - if sys.version_info >= (3,): - # Automatic chaining in Python 3 means we keep the trace - raise AzureException(ex.args[0]) - else: - # There isn't a good solution in 2 for keeping the stack trace - # in general, or that will not result in an error in 3 - # However, we can keep the previous error type and message - # TODO: In the future we will log the trace - msg = "" - if len(ex.args) > 0: - msg = ex.args[0] - raise AzureException('{}: {}'.format(ex.__class__.__name__, msg)) - - except AzureException as ex: - # only parse the strings used for logging if logging is at least enabled for CRITICAL - if logger.isEnabledFor(logging.CRITICAL): - exception_str_in_one_line = str(ex).replace('\n', '') - status_code = retry_context.response.status if retry_context.response is not None else 'Unknown' - timestamp_and_request_id = self.extract_date_and_request_id(retry_context) - - # if the http error was expected, we should short-circuit - if isinstance(ex, AzureHttpError) and expected_errors is not None and ex.error_code in expected_errors: - logger.info("%s Received expected http error: " - "%s, HTTP status code=%s, Exception=%s.", - client_request_id_prefix, - timestamp_and_request_id, - status_code, - exception_str_in_one_line) - raise ex - - logger.info("%s Operation failed: checking if the operation should be retried. " - "Current retry count=%s, %s, HTTP status code=%s, Exception=%s.", - client_request_id_prefix, - retry_context.count if hasattr(retry_context, 'count') else 0, - timestamp_and_request_id, - status_code, - exception_str_in_one_line) - - # Decryption failures (invalid objects, invalid algorithms, data unencrypted in strict mode, etc) - # will not be resolved with retries. - if str(ex) == _ERROR_DECRYPTION_FAILURE: - logger.error("%s Encountered decryption failure: this cannot be retried. " - "%s, HTTP status code=%s, Exception=%s.", - client_request_id_prefix, - timestamp_and_request_id, - status_code, - exception_str_in_one_line) - raise ex - - # Determine whether a retry should be performed and if so, how - # long to wait before performing retry. - retry_interval = self.retry(retry_context) - if retry_interval is not None: - # Execute the callback - if self.retry_callback: - self.retry_callback(retry_context) - - logger.info( - "%s Retry policy is allowing a retry: Retry count=%s, Interval=%s.", - client_request_id_prefix, - retry_context.count, - retry_interval) - - # Sleep for the desired retry interval - sleep(retry_interval) - else: - logger.error("%s Retry policy did not allow for a retry: " - "%s, HTTP status code=%s, Exception=%s.", - client_request_id_prefix, - timestamp_and_request_id, - status_code, - exception_str_in_one_line) - raise ex - finally: - # If this is a location locked operation and the location is not set, - # this is the first request of that operation. Set the location to - # be used for subsequent requests in the operation. - if operation_context.location_lock and not operation_context.host_location: - # note: to cover the emulator scenario, the host_location is grabbed - # from request.host_locations(which includes the dev account name) - # instead of request.host(which at this point no longer includes the dev account name) - operation_context.host_location = { - retry_context.location_mode: request.host_locations[retry_context.location_mode]} diff --git a/azure/multiapi/storage/v2018_03_28/common/tokencredential.py b/azure/multiapi/storage/v2018_03_28/common/tokencredential.py deleted file mode 100644 index 4d724ef..0000000 --- a/azure/multiapi/storage/v2018_03_28/common/tokencredential.py +++ /dev/null @@ -1,48 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import requests - - -class TokenCredential(object): - """ - Represents a token credential that is used to authorize HTTPS requests. - The token can be updated by the user. - - :ivar str token: - The authorization token. It can be set by the user at any point in a thread-safe way. - """ - - def __init__(self, initial_value=None): - """ - :param initial_value: initial value for the token. - """ - self.token = initial_value - - def signed_session(self, session=None): - """ - Sign requests session with the token. This method is called every time a request is going on the wire. - The user is responsible for updating the token with the preferred tool/SDK. - In general there are two options: - - override this method to update the token in a preferred way and set Authorization header on session - - not override this method, and have a timer that triggers periodically to update the token on this class - - The second option is recommended as it tends to be more performance-friendly. - - :param session: The session to configure for authentication - :type session: requests.Session - :rtype: requests.Session - """ - session = session or requests.Session() - session.headers['Authorization'] = "Bearer {}".format(self.token) - - return session - - def token(self, new_value): - """ - :param new_value: new value to be set as the token. - """ - self.token = new_value \ No newline at end of file diff --git a/azure/multiapi/storage/v2018_03_28/file/__init__.py b/azure/multiapi/storage/v2018_03_28/file/__init__.py deleted file mode 100644 index 464a949..0000000 --- a/azure/multiapi/storage/v2018_03_28/file/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from .fileservice import FileService -from .models import ( - Share, - ShareProperties, - File, - FileProperties, - Directory, - DirectoryProperties, - FileRange, - ContentSettings, - CopyProperties, - SharePermissions, - FilePermissions, - DeleteSnapshot, -) diff --git a/azure/multiapi/storage/v2018_03_28/file/_constants.py b/azure/multiapi/storage/v2018_03_28/file/_constants.py deleted file mode 100644 index 298141f..0000000 --- a/azure/multiapi/storage/v2018_03_28/file/_constants.py +++ /dev/null @@ -1,11 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -__author__ = 'Microsoft Corp. ' -__version__ = '1.3.1' - -# x-ms-version for storage service. -X_MS_VERSION = '2018-03-28' diff --git a/azure/multiapi/storage/v2018_03_28/file/_deserialization.py b/azure/multiapi/storage/v2018_03_28/file/_deserialization.py deleted file mode 100644 index e1e4ec4..0000000 --- a/azure/multiapi/storage/v2018_03_28/file/_deserialization.py +++ /dev/null @@ -1,241 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from dateutil import parser - -try: - from xml.etree import cElementTree as ETree -except ImportError: - from xml.etree import ElementTree as ETree -from .models import ( - Share, - Directory, - File, - FileProperties, - FileRange, - ShareProperties, - DirectoryProperties, -) -from ..common.models import ( - _list, -) -from ..common._deserialization import ( - _parse_properties, - _parse_metadata, -) -from ..common._error import _validate_content_match -from ..common._common_conversion import ( - _get_content_md5, - _to_str, -) - -def _parse_snapshot_share(response, name): - ''' - Extracts snapshot return header. - ''' - snapshot = response.headers.get('x-ms-snapshot') - - return _parse_share(response, name, snapshot) - -def _parse_share(response, name, snapshot=None): - if response is None: - return None - - metadata = _parse_metadata(response) - props = _parse_properties(response, ShareProperties) - return Share(name, props, metadata, snapshot) - - -def _parse_directory(response, name): - if response is None: - return None - - metadata = _parse_metadata(response) - props = _parse_properties(response, DirectoryProperties) - return Directory(name, props, metadata) - - -def _parse_file(response, name, validate_content=False): - if response is None: - return None - - metadata = _parse_metadata(response) - props = _parse_properties(response, FileProperties) - - # For range gets, only look at 'x-ms-content-md5' for overall MD5 - content_settings = getattr(props, 'content_settings') - if 'content-range' in response.headers: - if 'x-ms-content-md5' in response.headers: - setattr(content_settings, 'content_md5', _to_str(response.headers['x-ms-content-md5'])) - else: - delattr(content_settings, 'content_md5') - - if validate_content: - computed_md5 = _get_content_md5(response.body) - _validate_content_match(response.headers['content-md5'], computed_md5) - - return File(name, response.body, props, metadata) - - -def _convert_xml_to_shares(response): - ''' - - - string-value - string-value - int-value - - - share-name - date-time-value - - date/time-value - etag - max-share-size - - - value - - - - marker-value - - ''' - if response is None or response.body is None: - return None - - shares = _list() - list_element = ETree.fromstring(response.body) - - # Set next marker - next_marker = list_element.findtext('NextMarker') or None - setattr(shares, 'next_marker', next_marker) - - shares_element = list_element.find('Shares') - - for share_element in shares_element.findall('Share'): - # Name element - share = Share() - share.name = share_element.findtext('Name') - - # Snapshot - share.snapshot = share_element.findtext('Snapshot') - - # Metadata - metadata_root_element = share_element.find('Metadata') - if metadata_root_element is not None: - share.metadata = dict() - for metadata_element in metadata_root_element: - share.metadata[metadata_element.tag] = metadata_element.text - - # Properties - properties_element = share_element.find('Properties') - share.properties.last_modified = parser.parse(properties_element.findtext('Last-Modified')) - share.properties.etag = properties_element.findtext('Etag') - share.properties.quota = int(properties_element.findtext('Quota')) - - # Add share to list - shares.append(share) - - return shares - - -def _convert_xml_to_directories_and_files(response): - ''' - - - string-value - int-value - - - file-name - - size-in-bytes - - - - directory-name - - - - - ''' - if response is None or response.body is None: - return None - - entries = _list() - list_element = ETree.fromstring(response.body) - - # Set next marker - next_marker = list_element.findtext('NextMarker') or None - setattr(entries, 'next_marker', next_marker) - - entries_element = list_element.find('Entries') - - for file_element in entries_element.findall('File'): - # Name element - file = File() - file.name = file_element.findtext('Name') - - # Properties - properties_element = file_element.find('Properties') - file.properties.content_length = int(properties_element.findtext('Content-Length')) - - # Add file to list - entries.append(file) - - for directory_element in entries_element.findall('Directory'): - # Name element - directory = Directory() - directory.name = directory_element.findtext('Name') - - # Add directory to list - entries.append(directory) - - return entries - - -def _convert_xml_to_ranges(response): - ''' - - - - Start Byte - End Byte - - - Start Byte - End Byte - - - ''' - if response is None or response.body is None: - return None - - ranges = list() - ranges_element = ETree.fromstring(response.body) - - for range_element in ranges_element.findall('Range'): - # Parse range - range = FileRange(int(range_element.findtext('Start')), int(range_element.findtext('End'))) - - # Add range to list - ranges.append(range) - - return ranges - - -def _convert_xml_to_share_stats(response): - ''' - - - 15 - - ''' - if response is None or response.body is None: - return None - - share_stats_element = ETree.fromstring(response.body) - return int(share_stats_element.findtext('ShareUsage')) diff --git a/azure/multiapi/storage/v2018_03_28/file/_download_chunking.py b/azure/multiapi/storage/v2018_03_28/file/_download_chunking.py deleted file mode 100644 index 097d68b..0000000 --- a/azure/multiapi/storage/v2018_03_28/file/_download_chunking.py +++ /dev/null @@ -1,159 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import threading - - -def _download_file_chunks(file_service, share_name, directory_name, file_name, - download_size, block_size, progress, start_range, end_range, - stream, max_connections, progress_callback, validate_content, - timeout, operation_context, snapshot): - - downloader_class = _ParallelFileChunkDownloader if max_connections > 1 else _SequentialFileChunkDownloader - - downloader = downloader_class( - file_service, - share_name, - directory_name, - file_name, - download_size, - block_size, - progress, - start_range, - end_range, - stream, - progress_callback, - validate_content, - timeout, - operation_context, - snapshot, - ) - - if max_connections > 1: - import concurrent.futures - executor = concurrent.futures.ThreadPoolExecutor(max_connections) - list(executor.map(downloader.process_chunk, downloader.get_chunk_offsets())) - else: - for chunk in downloader.get_chunk_offsets(): - downloader.process_chunk(chunk) - - -class _FileChunkDownloader(object): - def __init__(self, file_service, share_name, directory_name, file_name, - download_size, chunk_size, progress, start_range, end_range, - stream, progress_callback, validate_content, timeout, operation_context, snapshot): - # identifiers for the file - self.file_service = file_service - self.share_name = share_name - self.directory_name = directory_name - self.file_name = file_name - - # information on the download range/chunk size - self.chunk_size = chunk_size - self.download_size = download_size - self.start_index = start_range - self.file_end = end_range - - # the destination that we will write to - self.stream = stream - - # progress related - self.progress_callback = progress_callback - self.progress_total = progress - - # parameters for each get file operation - self.validate_content = validate_content - self.timeout = timeout - self.operation_context = operation_context - self.snapshot = snapshot - - def get_chunk_offsets(self): - index = self.start_index - while index < self.file_end: - yield index - index += self.chunk_size - - def process_chunk(self, chunk_start): - if chunk_start + self.chunk_size > self.file_end: - chunk_end = self.file_end - else: - chunk_end = chunk_start + self.chunk_size - - chunk_data = self._download_chunk(chunk_start, chunk_end).content - length = chunk_end - chunk_start - if length > 0: - self._write_to_stream(chunk_data, chunk_start) - self._update_progress(length) - - # should be provided by the subclass - def _update_progress(self, length): - pass - - # should be provided by the subclass - def _write_to_stream(self, chunk_data, chunk_start): - pass - - def _download_chunk(self, chunk_start, chunk_end): - return self.file_service._get_file( - self.share_name, - self.directory_name, - self.file_name, - start_range=chunk_start, - end_range=chunk_end - 1, - validate_content=self.validate_content, - timeout=self.timeout, - _context=self.operation_context, - snapshot=self.snapshot - ) - - -class _ParallelFileChunkDownloader(_FileChunkDownloader): - def __init__(self, file_service, share_name, directory_name, file_name, - download_size, chunk_size, progress, start_range, end_range, - stream, progress_callback, validate_content, timeout, operation_context, snapshot): - super(_ParallelFileChunkDownloader, self).__init__(file_service, share_name, directory_name, file_name, - download_size, chunk_size, progress, start_range, end_range, - stream, progress_callback, validate_content, timeout, - operation_context, snapshot) - - # for a parallel download, the stream is always seekable, so we note down the current position - # in order to seek to the right place when out-of-order chunks come in - self.stream_start = stream.tell() - - # since parallel operations are going on - # it is essential to protect the writing and progress reporting operations - self.stream_lock = threading.Lock() - self.progress_lock = threading.Lock() - - def _update_progress(self, length): - if self.progress_callback is not None: - with self.progress_lock: - self.progress_total += length - total_so_far = self.progress_total - self.progress_callback(total_so_far, self.download_size) - - def _write_to_stream(self, chunk_data, chunk_start): - with self.stream_lock: - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - - -class _SequentialFileChunkDownloader(_FileChunkDownloader): - def __init__(self, file_service, share_name, directory_name, file_name, download_size, chunk_size, progress, - start_range, end_range, stream, progress_callback, validate_content, timeout, operation_context, - snapshot): - super(_SequentialFileChunkDownloader, self).__init__(file_service, share_name, directory_name, file_name, - download_size, chunk_size, progress, start_range, - end_range, stream, progress_callback, validate_content, - timeout, operation_context, snapshot) - - def _update_progress(self, length): - if self.progress_callback is not None: - self.progress_total += length - self.progress_callback(self.progress_total, self.download_size) - - def _write_to_stream(self, chunk_data, chunk_start): - # chunk_start is ignored in the case of sequential download since we cannot seek the destination stream - self.stream.write(chunk_data) diff --git a/azure/multiapi/storage/v2018_03_28/file/_serialization.py b/azure/multiapi/storage/v2018_03_28/file/_serialization.py deleted file mode 100644 index 03aecd1..0000000 --- a/azure/multiapi/storage/v2018_03_28/file/_serialization.py +++ /dev/null @@ -1,66 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from ..common._common_conversion import _str -from ..common._error import ( - _validate_not_none, - _ERROR_START_END_NEEDED_FOR_MD5, - _ERROR_RANGE_TOO_LARGE_FOR_MD5, -) - - -def _get_path(share_name=None, directory_name=None, file_name=None): - ''' - Creates the path to access a file resource. - - share_name: - Name of share. - directory_name: - The path to the directory. - file_name: - Name of file. - ''' - if share_name and directory_name and file_name: - return '/{0}/{1}/{2}'.format( - _str(share_name), - _str(directory_name), - _str(file_name)) - elif share_name and directory_name: - return '/{0}/{1}'.format( - _str(share_name), - _str(directory_name)) - elif share_name and file_name: - return '/{0}/{1}'.format( - _str(share_name), - _str(file_name)) - elif share_name: - return '/{0}'.format(_str(share_name)) - else: - return '/' - - -def _validate_and_format_range_headers(request, start_range, end_range, start_range_required=True, - end_range_required=True, check_content_md5=False): - # If end range is provided, start range must be provided - if start_range_required or end_range is not None: - _validate_not_none('start_range', start_range) - if end_range_required: - _validate_not_none('end_range', end_range) - - # Format based on whether end_range is present - request.headers = request.headers or {} - if end_range is not None: - request.headers['x-ms-range'] = 'bytes={0}-{1}'.format(start_range, end_range) - elif start_range is not None: - request.headers['x-ms-range'] = 'bytes={0}-'.format(start_range) - - # Content MD5 can only be provided for a complete range less than 4MB in size - if check_content_md5: - if start_range is None or end_range is None: - raise ValueError(_ERROR_START_END_NEEDED_FOR_MD5) - if end_range - start_range > 4 * 1024 * 1024: - raise ValueError(_ERROR_RANGE_TOO_LARGE_FOR_MD5) - - request.headers['x-ms-range-get-content-md5'] = 'true' diff --git a/azure/multiapi/storage/v2018_03_28/file/_upload_chunking.py b/azure/multiapi/storage/v2018_03_28/file/_upload_chunking.py deleted file mode 100644 index c6fb34f..0000000 --- a/azure/multiapi/storage/v2018_03_28/file/_upload_chunking.py +++ /dev/null @@ -1,133 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import threading - - -def _upload_file_chunks(file_service, share_name, directory_name, file_name, - file_size, block_size, stream, max_connections, - progress_callback, validate_content, timeout): - uploader = _FileChunkUploader( - file_service, - share_name, - directory_name, - file_name, - file_size, - block_size, - stream, - max_connections > 1, - progress_callback, - validate_content, - timeout - ) - - if progress_callback is not None: - progress_callback(0, file_size) - - if max_connections > 1: - import concurrent.futures - executor = concurrent.futures.ThreadPoolExecutor(max_connections) - range_ids = list(executor.map(uploader.process_chunk, uploader.get_chunk_offsets())) - else: - if file_size is not None: - range_ids = [uploader.process_chunk(start) for start in uploader.get_chunk_offsets()] - else: - range_ids = uploader.process_all_unknown_size() - - return range_ids - - -class _FileChunkUploader(object): - def __init__(self, file_service, share_name, directory_name, file_name, - file_size, chunk_size, stream, parallel, progress_callback, - validate_content, timeout): - self.file_service = file_service - self.share_name = share_name - self.directory_name = directory_name - self.file_name = file_name - self.file_size = file_size - self.chunk_size = chunk_size - self.stream = stream - self.stream_start = stream.tell() if parallel else None - self.stream_lock = threading.Lock() if parallel else None - self.progress_callback = progress_callback - self.progress_total = 0 - self.progress_lock = threading.Lock() if parallel else None - self.validate_content = validate_content - self.timeout = timeout - - def get_chunk_offsets(self): - index = 0 - if self.file_size is None: - # we don't know the size of the stream, so we have no - # choice but to seek - while True: - data = self._read_from_stream(index, 1) - if not data: - break - yield index - index += self.chunk_size - else: - while index < self.file_size: - yield index - index += self.chunk_size - - def process_chunk(self, chunk_offset): - size = self.chunk_size - if self.file_size is not None: - size = min(size, self.file_size - chunk_offset) - chunk_data = self._read_from_stream(chunk_offset, size) - return self._upload_chunk_with_progress(chunk_offset, chunk_data) - - def process_all_unknown_size(self): - assert self.stream_lock is None - range_ids = [] - index = 0 - while True: - data = self._read_from_stream(None, self.chunk_size) - if data: - index += len(data) - range_id = self._upload_chunk_with_progress(index, data) - range_ids.append(range_id) - else: - break - - return range_ids - - def _read_from_stream(self, offset, count): - if self.stream_lock is not None: - with self.stream_lock: - self.stream.seek(self.stream_start + offset) - data = self.stream.read(count) - else: - data = self.stream.read(count) - return data - - def _update_progress(self, length): - if self.progress_callback is not None: - if self.progress_lock is not None: - with self.progress_lock: - self.progress_total += length - total = self.progress_total - else: - self.progress_total += length - total = self.progress_total - self.progress_callback(total, self.file_size) - - def _upload_chunk_with_progress(self, chunk_start, chunk_data): - chunk_end = chunk_start + len(chunk_data) - 1 - self.file_service.update_range( - self.share_name, - self.directory_name, - self.file_name, - chunk_data, - chunk_start, - chunk_end, - self.validate_content, - timeout=self.timeout - ) - range_id = 'bytes={0}-{1}'.format(chunk_start, chunk_end) - self._update_progress(len(chunk_data)) - return range_id diff --git a/azure/multiapi/storage/v2018_03_28/file/fileservice.py b/azure/multiapi/storage/v2018_03_28/file/fileservice.py deleted file mode 100644 index f2d1d6a..0000000 --- a/azure/multiapi/storage/v2018_03_28/file/fileservice.py +++ /dev/null @@ -1,2481 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import sys -from os import path - -from azure.common import AzureHttpError - -from ..common._auth import ( - _StorageSharedKeyAuthentication, - _StorageSASAuthentication, -) -from ..common._common_conversion import ( - _int_to_str, - _to_str, - _get_content_md5, -) -from ..common._connection import _ServiceParameters -from ..common._constants import ( - SERVICE_HOST_BASE, - DEFAULT_PROTOCOL, - DEV_ACCOUNT_NAME, -) -from ..common._deserialization import ( - _convert_xml_to_service_properties, - _convert_xml_to_signed_identifiers, - _parse_metadata, - _parse_properties, - _parse_length_from_content_range, -) -from ..common._error import ( - _dont_fail_not_exist, - _dont_fail_on_exist, - _validate_not_none, - _validate_type_bytes, - _ERROR_VALUE_NEGATIVE, - _ERROR_STORAGE_MISSING_INFO, - _ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES, - _ERROR_PARALLEL_NOT_SEEKABLE, - _validate_access_policies, -) -from ..common._http import HTTPRequest -from ..common._serialization import ( - _get_request_body, - _get_data_bytes_only, - _convert_signed_identifiers_to_xml, - _convert_service_properties_to_xml, - _add_metadata_headers, -) -from ..common.models import ( - Services, - ListGenerator, - _OperationContext, -) -from .sharedaccesssignature import ( - FileSharedAccessSignature, -) -from ..common.storageclient import StorageClient -from ._deserialization import ( - _convert_xml_to_shares, - _convert_xml_to_directories_and_files, - _convert_xml_to_ranges, - _convert_xml_to_share_stats, - _parse_file, - _parse_share, - _parse_snapshot_share, - _parse_directory, -) -from ._download_chunking import _download_file_chunks -from ._serialization import ( - _get_path, - _validate_and_format_range_headers, -) -from ._upload_chunking import _upload_file_chunks -from .models import ( - FileProperties, -) - -from ._constants import ( - X_MS_VERSION, - __version__ as package_version, -) - -_SHARE_NOT_FOUND_ERROR_CODE = 'ShareNotFound' -_PARENT_NOT_FOUND_ERROR_CODE = 'ParentNotFound' -_RESOURCE_NOT_FOUND_ERROR_CODE = 'ResourceNotFound' -_RESOURCE_ALREADY_EXISTS_ERROR_CODE = 'ResourceAlreadyExists' -_SHARE_ALREADY_EXISTS_ERROR_CODE = 'ShareAlreadyExists' - -if sys.version_info >= (3,): - from io import BytesIO -else: - from cStringIO import StringIO as BytesIO - - -class FileService(StorageClient): - ''' - The Server Message Block (SMB) protocol is the preferred file share protocol - used on premise today. The Microsoft Azure File service enables customers to - leverage the availability and scalability of Azure's Cloud Infrastructure as - a Service (IaaS) SMB without having to rewrite SMB client applications. - - The Azure File service also offers a compelling alternative to traditional - Direct Attached Storage (DAS) and Storage Area Network (SAN) solutions, which - are often complex and expensive to install, configure, and operate. - - :ivar int MAX_SINGLE_GET_SIZE: - The size of the first range get performed by get_file_to_* methods if - max_connections is greater than 1. Less data will be returned if the - file is smaller than this. - :ivar int MAX_CHUNK_GET_SIZE: - The size of subsequent range gets performed by get_file_to_* methods if - max_connections is greater than 1 and the file is larger than MAX_SINGLE_GET_SIZE. - Less data will be returned if the remainder of the file is smaller than - this. If this is set to larger than 4MB, content_validation will throw an - error if enabled. However, if content_validation is not desired a size - greater than 4MB may be optimal. Setting this below 4MB is not recommended. - :ivar int MAX_RANGE_SIZE: - The size of the ranges put by create_file_from_* methods. Smaller ranges - may be put if there is less data provided. The maximum range size the service - supports is 4MB. - ''' - MAX_SINGLE_GET_SIZE = 32 * 1024 * 1024 - MAX_CHUNK_GET_SIZE = 8 * 1024 * 1024 - MAX_RANGE_SIZE = 4 * 1024 * 1024 - - def __init__(self, account_name=None, account_key=None, sas_token=None, - protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, - request_session=None, connection_string=None, socket_timeout=None): - ''' - :param str account_name: - The storage account name. This is used to authenticate requests - signed with an account key and to construct the storage endpoint. It - is required unless a connection string is given. - :param str account_key: - The storage account key. This is used for shared key authentication. - :param str sas_token: - A shared access signature token to use to authenticate requests - instead of the account key. If account key and sas token are both - specified, account key will be used to sign. - :param str protocol: - The protocol to use for requests. Defaults to https. - :param str endpoint_suffix: - The host base component of the url, minus the account name. Defaults - to Azure (core.windows.net). Override this to use the China cloud - (core.chinacloudapi.cn). - :param requests.Session request_session: - The session object to use for http requests. - :param str connection_string: - If specified, this will override all other parameters besides - request session. See - http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ - for the connection string format. - :param int socket_timeout: - If specified, this will override the default socket timeout. The timeout specified is in seconds. - See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value. - ''' - service_params = _ServiceParameters.get_service_parameters( - 'file', - account_name=account_name, - account_key=account_key, - sas_token=sas_token, - protocol=protocol, - endpoint_suffix=endpoint_suffix, - request_session=request_session, - connection_string=connection_string, - socket_timeout=socket_timeout) - - super(FileService, self).__init__(service_params) - - if self.account_name == DEV_ACCOUNT_NAME: - raise ValueError(_ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES) - - if self.account_key: - self.authentication = _StorageSharedKeyAuthentication( - self.account_name, - self.account_key, - ) - elif self.sas_token: - self.authentication = _StorageSASAuthentication(self.sas_token) - else: - raise ValueError(_ERROR_STORAGE_MISSING_INFO) - self._X_MS_VERSION = X_MS_VERSION - self._update_user_agent_string(package_version) - - def make_file_url(self, share_name, directory_name, file_name, - protocol=None, sas_token=None): - ''' - Creates the url to access a file. - - :param str share_name: - Name of share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of file. - :param str protocol: - Protocol to use: 'http' or 'https'. If not specified, uses the - protocol specified when FileService was initialized. - :param str sas_token: - Shared access signature token created with - generate_shared_access_signature. - :return: file access URL. - :rtype: str - ''' - - if directory_name is None: - url = '{}://{}/{}/{}'.format( - protocol or self.protocol, - self.primary_endpoint, - share_name, - file_name, - ) - else: - url = '{}://{}/{}/{}/{}'.format( - protocol or self.protocol, - self.primary_endpoint, - share_name, - directory_name, - file_name, - ) - - if sas_token: - url += '?' + sas_token - - return url - - def generate_account_shared_access_signature(self, resource_types, permission, - expiry, start=None, ip=None, protocol=None): - ''' - Generates a shared access signature for the file service. - Use the returned signature with the sas_token parameter of the FileService. - - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account SAS. - :param AccountPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. Possible values are - both HTTPS and HTTP (https,http) or HTTPS only (https). The default value - is https,http. Note that HTTP only is not a permitted value. - :return: A Shared Access Signature (sas) token. - :rtype: str - ''' - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = FileSharedAccessSignature(self.account_name, self.account_key) - return sas.generate_account(Services.FILE, resource_types, permission, - expiry, start=start, ip=ip, protocol=protocol) - - def generate_share_shared_access_signature(self, share_name, - permission=None, - expiry=None, - start=None, - id=None, - ip=None, - protocol=None, - cache_control=None, - content_disposition=None, - content_encoding=None, - content_language=None, - content_type=None): - ''' - Generates a shared access signature for the share. - Use the returned signature with the sas_token parameter of FileService. - - :param str share_name: - Name of share. - :param SharePermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, create, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use :func:`~set_share_acl`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. Possible values are - both HTTPS and HTTP (https,http) or HTTPS only (https). The default value - is https,http. Note that HTTP only is not a permitted value. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :return: A Shared Access Signature (sas) token. - :rtype: str - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = FileSharedAccessSignature(self.account_name, self.account_key) - return sas.generate_share( - share_name, - permission, - expiry, - start=start, - id=id, - ip=ip, - protocol=protocol, - cache_control=cache_control, - content_disposition=content_disposition, - content_encoding=content_encoding, - content_language=content_language, - content_type=content_type, - ) - - def generate_file_shared_access_signature(self, share_name, - directory_name=None, - file_name=None, - permission=None, - expiry=None, - start=None, - id=None, - ip=None, - protocol=None, - cache_control=None, - content_disposition=None, - content_encoding=None, - content_language=None, - content_type=None): - ''' - Generates a shared access signature for the file. - Use the returned signature with the sas_token parameter of FileService. - - :param str share_name: - Name of share. - :param str directory_name: - Name of directory. SAS tokens cannot be created for directories, so - this parameter should only be present if file_name is provided. - :param str file_name: - Name of file. - :param FilePermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, create, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_file_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. Possible values are - both HTTPS and HTTP (https,http) or HTTPS only (https). The default value - is https,http. Note that HTTP only is not a permitted value. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :return: A Shared Access Signature (sas) token. - :rtype: str - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = FileSharedAccessSignature(self.account_name, self.account_key) - return sas.generate_file( - share_name, - directory_name, - file_name, - permission, - expiry, - start=start, - id=id, - ip=ip, - protocol=protocol, - cache_control=cache_control, - content_disposition=content_disposition, - content_encoding=content_encoding, - content_language=content_language, - content_type=content_type, - ) - - def set_file_service_properties(self, hour_metrics=None, minute_metrics=None, - cors=None, timeout=None): - ''' - Sets the properties of a storage account's File service, including - Azure Storage Analytics. If an element (ex HourMetrics) is left as None, the - existing settings on the service for that functionality are preserved. - - :param Metrics hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for files. - :param Metrics minute_metrics: - The minute metrics settings provide request statistics - for each minute for files. - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list(:class:`~..common.models.CorsRule`) - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path() - request.query = { - 'restype': 'service', - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - request.body = _get_request_body( - _convert_service_properties_to_xml(None, hour_metrics, minute_metrics, cors)) - - self._perform_request(request) - - def get_file_service_properties(self, timeout=None): - ''' - Gets the properties of a storage account's File service, including - Azure Storage Analytics. - - :param int timeout: - The timeout parameter is expressed in seconds. - :return: The file service properties. - :rtype: - :class:`~..common.models.ServiceProperties` - ''' - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path() - request.query = { - 'restype': 'service', - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_service_properties) - - def list_shares(self, prefix=None, marker=None, num_results=None, - include_metadata=False, timeout=None, include_snapshots=False): - ''' - Returns a generator to list the shares under the specified account. - The generator will lazily follow the continuation tokens returned by - the service and stop when all shares have been returned or num_results - is reached. - - If num_results is specified and the account has more than that number of - shares, the generator will have a populated next_marker field once it - finishes. This marker can be used to create a new generator if more - results are desired. - - :param str prefix: - Filters the results to return only shares whose names - begin with the specified prefix. - :param int num_results: - Specifies the maximum number of shares to return. - :param bool include_metadata: - Specifies that share metadata be returned in the response. - :param str marker: - An opaque continuation token. This value can be retrieved from the - next_marker field of a previous generator object if num_results was - specified and that generator has finished enumerating results. If - specified, this generator will begin returning results from the point - where the previous generator stopped. - :param int timeout: - The timeout parameter is expressed in seconds. - :param bool include_snapshots: - Specifies that share snapshots be returned in the response. - ''' - include = 'snapshots' if include_snapshots else None - if include_metadata: - if include is not None: - include = include + ',metadata' - else: - include = 'metadata' - operation_context = _OperationContext(location_lock=True) - kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results, - 'include': include, 'timeout': timeout, '_context': operation_context} - resp = self._list_shares(**kwargs) - - return ListGenerator(resp, self._list_shares, (), kwargs) - - def _list_shares(self, prefix=None, marker=None, max_results=None, - include=None, timeout=None, _context=None): - ''' - Returns a list of the shares under the specified account. - - :param str prefix: - Filters the results to return only shares whose names - begin with the specified prefix. - :param str marker: - A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns - a next_marker value within the response body if the list returned was - not complete. The marker value may then be used in a subsequent - call to request the next set of list items. The marker value is - opaque to the client. - :param int max_results: - Specifies the maximum number of shares to return. A single list - request may return up to 1000 shares and potentially a continuation - token which should be followed to get additional resutls. - :param string include: - Include this parameter to specify that either the share's - metadata, snapshots or both be returned as part of the response body. set this - parameter to string 'metadata' to get share's metadata. set this parameter to 'snapshots' - to get all the share snapshots. for both use 'snapshots,metadata'. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path() - request.query = { - 'comp': 'list', - 'prefix': _to_str(prefix), - 'marker': _to_str(marker), - 'maxresults': _int_to_str(max_results), - 'include': _to_str(include), - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_shares, operation_context=_context) - - def create_share(self, share_name, metadata=None, quota=None, - fail_on_exist=False, timeout=None): - ''' - Creates a new share under the specified account. If the share - with the same name already exists, the operation fails on the - service. By default, the exception is swallowed by the client. - To expose the exception, specify True for fail_on_exists. - - :param str share_name: - Name of share to create. - :param metadata: - A dict with name_value pairs to associate with the - share as metadata. Example:{'Category':'test'} - :type metadata: dict(str, str) - :param int quota: - Specifies the maximum size of the share, in gigabytes. Must be - greater than 0, and less than or equal to 5TB (5120). - :param bool fail_on_exist: - Specify whether to throw an exception when the share exists. - False by default. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: True if share is created, False if share already exists. - :rtype: bool - ''' - _validate_not_none('share_name', share_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.query = { - 'restype': 'share', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-share-quota': _int_to_str(quota) - } - _add_metadata_headers(metadata, request) - - if not fail_on_exist: - try: - self._perform_request(request, expected_errors=[_SHARE_ALREADY_EXISTS_ERROR_CODE]) - return True - except AzureHttpError as ex: - _dont_fail_on_exist(ex) - return False - else: - self._perform_request(request) - return True - - def snapshot_share(self, share_name, metadata=None, quota=None, timeout=None): - ''' - Creates a snapshot of an existing share under the specified account. - - :param str share_name: - The name of the share to create a snapshot of. - :param metadata: - A dict with name_value pairs to associate with the - share as metadata. Example:{'Category':'test'} - :type metadata: a dict of str to str: - :param int quota: - Specifies the maximum size of the share, in gigabytes. Must be - greater than 0, and less than or equal to 5TB (5120). - :param int timeout: - The timeout parameter is expressed in seconds. - :return: snapshot properties - :rtype: azure.storage.file.models.Share - ''' - _validate_not_none('share_name', share_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.query = { - 'restype': 'share', - 'comp': 'snapshot', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-share-quota': _int_to_str(quota) - } - _add_metadata_headers(metadata, request) - - return self._perform_request(request, _parse_snapshot_share, [share_name]) - - def get_share_properties(self, share_name, timeout=None, snapshot=None): - ''' - Returns all user-defined metadata and system properties for the - specified share. The data returned does not include the shares's - list of files or directories. - - :param str share_name: - Name of existing share. - :param int timeout: - The timeout parameter is expressed in seconds. - :param str snapshot: - A string that represents the snapshot version, if applicable. - :return: A Share that exposes properties and metadata. - :rtype: :class:`~azure.storage.file.models.Share` - ''' - _validate_not_none('share_name', share_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.query = { - 'restype': 'share', - 'timeout': _int_to_str(timeout), - 'sharesnapshot': _to_str(snapshot) - } - - return self._perform_request(request, _parse_share, [share_name]) - - def set_share_properties(self, share_name, quota, timeout=None): - ''' - Sets service-defined properties for the specified share. - - :param str share_name: - Name of existing share. - :param int quota: - Specifies the maximum size of the share, in gigabytes. Must be - greater than 0, and less than or equal to 5 TB (5120 GB). - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('quota', quota) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.query = { - 'restype': 'share', - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-share-quota': _int_to_str(quota) - } - - self._perform_request(request) - - def get_share_metadata(self, share_name, timeout=None, snapshot=None): - ''' - Returns all user-defined metadata for the specified share. - - :param str share_name: - Name of existing share. - :param int timeout: - The timeout parameter is expressed in seconds. - :param str snapshot: - A string that represents the snapshot version, if applicable. - :return: - A dictionary representing the share metadata name, value pairs. - :rtype: dict(str, str) - ''' - _validate_not_none('share_name', share_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.query = { - 'restype': 'share', - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - 'sharesnapshot': _to_str(snapshot), - } - - return self._perform_request(request, _parse_metadata) - - def set_share_metadata(self, share_name, metadata=None, timeout=None): - ''' - Sets one or more user-defined name-value pairs for the specified - share. Each call to this operation replaces all existing metadata - attached to the share. To remove all metadata from the share, - call this operation with no metadata dict. - - :param str share_name: - Name of existing share. - :param metadata: - A dict containing name-value pairs to associate with the share as - metadata. Example: {'category':'test'} - :type metadata: dict(str, str) - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.query = { - 'restype': 'share', - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - _add_metadata_headers(metadata, request) - - self._perform_request(request) - - def get_share_acl(self, share_name, timeout=None): - ''' - Gets the permissions for the specified share. - - :param str share_name: - Name of existing share. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: A dictionary of access policies associated with the share. - :rtype: dict(str, :class:`~..common.models.AccessPolicy`) - ''' - _validate_not_none('share_name', share_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.query = { - 'restype': 'share', - 'comp': 'acl', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_signed_identifiers) - - def set_share_acl(self, share_name, signed_identifiers=None, timeout=None): - ''' - Sets the permissions for the specified share or stored access - policies that may be used with Shared Access Signatures. - - :param str share_name: - Name of existing share. - :param signed_identifiers: - A dictionary of access policies to associate with the share. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict(str, :class:`~..common.models.AccessPolicy`) - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_access_policies(signed_identifiers) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.query = { - 'restype': 'share', - 'comp': 'acl', - 'timeout': _int_to_str(timeout), - } - request.body = _get_request_body( - _convert_signed_identifiers_to_xml(signed_identifiers)) - - self._perform_request(request) - - def get_share_stats(self, share_name, timeout=None): - ''' - Gets the approximate size of the data stored on the share, - rounded up to the nearest gigabyte. - - Note that this value may not include all recently created - or recently resized files. - - :param str share_name: - Name of existing share. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: the approximate size of the data stored on the share. - :rtype: int - ''' - _validate_not_none('share_name', share_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.query = { - 'restype': 'share', - 'comp': 'stats', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_share_stats) - - def delete_share(self, share_name, fail_not_exist=False, timeout=None, snapshot=None, delete_snapshots=None): - ''' - Marks the specified share for deletion. If the share - does not exist, the operation fails on the service. By - default, the exception is swallowed by the client. - To expose the exception, specify True for fail_not_exist. - - :param str share_name: - Name of share to delete. - :param bool fail_not_exist: - Specify whether to throw an exception when the share doesn't - exist. False by default. - :param int timeout: - The timeout parameter is expressed in seconds. - :param str snapshot: - A string that represents the snapshot version, if applicable. - Specify this argument to delete a specific snapshot only. - delete_snapshots must be None if this is specified. - :param ~azure.storage.file.models.DeleteSnapshot delete_snapshots: - To delete a share that has snapshots, this must be specified as DeleteSnapshot.Include. - :return: True if share is deleted, False share doesn't exist. - :rtype: bool - ''' - _validate_not_none('share_name', share_name) - request = HTTPRequest() - request.method = 'DELETE' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name) - request.headers = { - 'x-ms-delete-snapshots': _to_str(delete_snapshots) - } - request.query = { - 'restype': 'share', - 'timeout': _int_to_str(timeout), - 'sharesnapshot': _to_str(snapshot), - } - - if not fail_not_exist: - try: - self._perform_request(request, expected_errors=[_SHARE_NOT_FOUND_ERROR_CODE]) - return True - except AzureHttpError as ex: - _dont_fail_not_exist(ex) - return False - else: - self._perform_request(request) - return True - - def create_directory(self, share_name, directory_name, metadata=None, - fail_on_exist=False, timeout=None): - ''' - Creates a new directory under the specified share or parent directory. - If the directory with the same name already exists, the operation fails - on the service. By default, the exception is swallowed by the client. - To expose the exception, specify True for fail_on_exists. - - :param str share_name: - Name of existing share. - :param str directory_name: - Name of directory to create, including the path to the parent - directory. - :param metadata: - A dict with name_value pairs to associate with the - share as metadata. Example:{'Category':'test'} - :type metadata: dict(str, str): - :param bool fail_on_exist: - specify whether to throw an exception when the directory exists. - False by default. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: True if directory is created, False if directory already exists. - :rtype: bool - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('directory_name', directory_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name) - request.query = { - 'restype': 'directory', - 'timeout': _int_to_str(timeout), - } - _add_metadata_headers(metadata, request) - - if not fail_on_exist: - try: - self._perform_request(request, expected_errors=_RESOURCE_ALREADY_EXISTS_ERROR_CODE) - return True - except AzureHttpError as ex: - _dont_fail_on_exist(ex) - return False - else: - self._perform_request(request) - return True - - def delete_directory(self, share_name, directory_name, - fail_not_exist=False, timeout=None): - ''' - Deletes the specified empty directory. Note that the directory must - be empty before it can be deleted. Attempting to delete directories - that are not empty will fail. - - If the directory does not exist, the operation fails on the - service. By default, the exception is swallowed by the client. - To expose the exception, specify True for fail_not_exist. - - :param str share_name: - Name of existing share. - :param str directory_name: - Name of directory to delete, including the path to the parent - directory. - :param bool fail_not_exist: - Specify whether to throw an exception when the directory doesn't - exist. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: True if directory is deleted, False otherwise. - :rtype: bool - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('directory_name', directory_name) - request = HTTPRequest() - request.method = 'DELETE' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name) - request.query = { - 'restype': 'directory', - 'timeout': _int_to_str(timeout), - } - - if not fail_not_exist: - try: - self._perform_request(request, expected_errors=[_RESOURCE_NOT_FOUND_ERROR_CODE]) - return True - except AzureHttpError as ex: - _dont_fail_not_exist(ex) - return False - else: - self._perform_request(request) - return True - - def get_directory_properties(self, share_name, directory_name, timeout=None, snapshot=None): - ''' - Returns all user-defined metadata and system properties for the - specified directory. The data returned does not include the directory's - list of files. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to an existing directory. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: properties for the specified directory within a directory object. - :param str snapshot: - A string that represents the snapshot version, if applicable. - :rtype: :class:`~azure.storage.file.models.Directory` - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('directory_name', directory_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name) - request.query = { - 'restype': 'directory', - 'timeout': _int_to_str(timeout), - 'sharesnapshot': _to_str(snapshot) - } - - return self._perform_request(request, _parse_directory, [directory_name]) - - def get_directory_metadata(self, share_name, directory_name, timeout=None, snapshot=None): - ''' - Returns all user-defined metadata for the specified directory. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param int timeout: - The timeout parameter is expressed in seconds. - :param str snapshot: - A string that represents the snapshot version, if applicable. - :return: - A dictionary representing the directory metadata name, value pairs. - :rtype: dict(str, str) - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('directory_name', directory_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name) - request.query = { - 'restype': 'directory', - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - 'sharesnapshot': _to_str(snapshot) - } - - return self._perform_request(request, _parse_metadata) - - def set_directory_metadata(self, share_name, directory_name, metadata=None, timeout=None): - ''' - Sets one or more user-defined name-value pairs for the specified - directory. Each call to this operation replaces all existing metadata - attached to the directory. To remove all metadata from the directory, - call this operation with no metadata dict. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param metadata: - A dict containing name-value pairs to associate with the directory - as metadata. Example: {'category':'test'} - :type metadata: dict(str, str). - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('directory_name', directory_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name) - request.query = { - 'restype': 'directory', - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - _add_metadata_headers(metadata, request) - - self._perform_request(request) - - def list_directories_and_files(self, share_name, directory_name=None, - num_results=None, marker=None, timeout=None, - prefix=None, snapshot=None): - - ''' - Returns a generator to list the directories and files under the specified share. - The generator will lazily follow the continuation tokens returned by - the service and stop when all directories and files have been returned or - num_results is reached. - - If num_results is specified and the share has more than that number of - files and directories, the generator will have a populated next_marker - field once it finishes. This marker can be used to create a new generator - if more results are desired. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param int num_results: - Specifies the maximum number of files to return, - including all directory elements. If the request does not specify - num_results or specifies a value greater than 5,000, the server will - return up to 5,000 items. Setting num_results to a value less than - or equal to zero results in error response code 400 (Bad Request). - :param str marker: - An opaque continuation token. This value can be retrieved from the - next_marker field of a previous generator object if num_results was - specified and that generator has finished enumerating results. If - specified, this generator will begin returning results from the point - where the previous generator stopped. - :param int timeout: - The timeout parameter is expressed in seconds. - :param str prefix: - List only the files and/or directories with the given prefix. - :param str snapshot: - A string that represents the snapshot version, if applicable. - ''' - operation_context = _OperationContext(location_lock=True) - args = (share_name, directory_name) - kwargs = {'marker': marker, 'max_results': num_results, 'timeout': timeout, - '_context': operation_context, 'prefix': prefix, 'snapshot': snapshot} - - resp = self._list_directories_and_files(*args, **kwargs) - - return ListGenerator(resp, self._list_directories_and_files, args, kwargs) - - def _list_directories_and_files(self, share_name, directory_name=None, - marker=None, max_results=None, timeout=None, - prefix=None, _context=None, snapshot=None): - ''' - Returns a list of the directories and files under the specified share. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str marker: - A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns - a next_marker value within the response body if the list returned was - not complete. The marker value may then be used in a subsequent - call to request the next set of list items. The marker value is - opaque to the client. - :param int max_results: - Specifies the maximum number of files to return, - including all directory elements. If the request does not specify - max_results or specifies a value greater than 5,000, the server will - return up to 5,000 items. Setting max_results to a value less than - or equal to zero results in error response code 400 (Bad Request). - :param int timeout: - The timeout parameter is expressed in seconds. - :param str prefix: - List only the files and/or directories with the given prefix. - :param str snapshot: - A string that represents the snapshot version, if applicable. - ''' - _validate_not_none('share_name', share_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name) - request.query = { - 'restype': 'directory', - 'comp': 'list', - 'prefix': _to_str(prefix), - 'marker': _to_str(marker), - 'maxresults': _int_to_str(max_results), - 'timeout': _int_to_str(timeout), - 'sharesnapshot': _to_str(snapshot) - } - - return self._perform_request(request, _convert_xml_to_directories_and_files, - operation_context=_context) - - def get_file_properties(self, share_name, directory_name, file_name, timeout=None, snapshot=None): - ''' - Returns all user-defined metadata, standard HTTP properties, and - system properties for the file. Returns an instance of :class:`~azure.storage.file.models.File` with - :class:`~azure.storage.file.models.FileProperties` and a metadata dict. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param int timeout: - The timeout parameter is expressed in seconds. - :param str snapshot: - A string that represents the snapshot version, if applicable. - :return: a file object including properties and metadata. - :rtype: :class:`~azure.storage.file.models.File` - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - request = HTTPRequest() - request.method = 'HEAD' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { 'timeout': _int_to_str(timeout), 'sharesnapshot': _to_str(snapshot)} - - return self._perform_request(request, _parse_file, [file_name]) - - def exists(self, share_name, directory_name=None, file_name=None, timeout=None, snapshot=None): - ''' - Returns a boolean indicating whether the share exists if only share name is - given. If directory_name is specificed a boolean will be returned indicating - if the directory exists. If file_name is specified as well, a boolean will be - returned indicating if the file exists. - - :param str share_name: - Name of a share. - :param str directory_name: - The path to a directory. - :param str file_name: - Name of a file. - :param int timeout: - The timeout parameter is expressed in seconds. - :param str snapshot: - A string that represents the snapshot version, if applicable. - :return: A boolean indicating whether the resource exists. - :rtype: bool - ''' - _validate_not_none('share_name', share_name) - try: - request = HTTPRequest() - request.method = 'HEAD' if file_name is not None else 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - - if file_name is not None: - restype = None - expected_errors = [_RESOURCE_NOT_FOUND_ERROR_CODE, _PARENT_NOT_FOUND_ERROR_CODE] - elif directory_name is not None: - restype = 'directory' - expected_errors = [_RESOURCE_NOT_FOUND_ERROR_CODE, _SHARE_NOT_FOUND_ERROR_CODE] - else: - restype = 'share' - expected_errors = [_SHARE_NOT_FOUND_ERROR_CODE] - - request.query = { - 'restype': restype, - 'timeout': _int_to_str(timeout), - 'sharesnapshot': _to_str(snapshot) - } - self._perform_request(request, expected_errors=expected_errors) - return True - except AzureHttpError as ex: - _dont_fail_not_exist(ex) - return False - - def resize_file(self, share_name, directory_name, - file_name, content_length, timeout=None): - ''' - Resizes a file to the specified size. If the specified byte - value is less than the current size of the file, then all - ranges above the specified byte value are cleared. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param int content_length: - The length to resize the file to. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('content_length', content_length) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-content-length': _to_str(content_length) - } - - self._perform_request(request) - - def set_file_properties(self, share_name, directory_name, file_name, - content_settings, timeout=None): - ''' - Sets system properties on the file. If one property is set for the - content_settings, all properties will be overriden. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param ~azure.storage.file.models.ContentSettings content_settings: - ContentSettings object used to set the file properties. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('content_settings', content_settings) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - request.headers = content_settings._to_headers() - - self._perform_request(request) - - def get_file_metadata(self, share_name, directory_name, file_name, timeout=None, snapshot=None): - ''' - Returns all user-defined metadata for the specified file. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param int timeout: - The timeout parameter is expressed in seconds. - :param str snapshot: - A string that represents the snapshot version, if applicable. - :return: - A dictionary representing the file metadata name, value pairs. - :rtype: dict(str, str) - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - 'sharesnapshot': _to_str(snapshot), - } - - return self._perform_request(request, _parse_metadata) - - def set_file_metadata(self, share_name, directory_name, - file_name, metadata=None, timeout=None): - ''' - Sets user-defined metadata for the specified file as one or more - name-value pairs. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param metadata: - Dict containing name and value pairs. Each call to this operation - replaces all existing metadata attached to the file. To remove all - metadata from the file, call this operation with no metadata headers. - :type metadata: dict(str, str) - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - _add_metadata_headers(metadata, request) - - self._perform_request(request) - - def copy_file(self, share_name, directory_name, file_name, copy_source, - metadata=None, timeout=None): - ''' - Copies a file asynchronously. This operation returns a copy operation - properties object, including a copy ID you can use to check or abort the - copy operation. The File service copies files on a best-effort basis. - - If the destination file exists, it will be overwritten. The destination - file cannot be modified while the copy operation is in progress. - - :param str share_name: - Name of the destination share. The share must exist. - :param str directory_name: - Name of the destination directory. The directory must exist. - :param str file_name: - Name of the destination file. If the destination file exists, it will - be overwritten. Otherwise, it will be created. - :param str copy_source: - A URL of up to 2 KB in length that specifies an Azure file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.file.core.windows.net/myshare/mydir/myfile - https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken - :param metadata: - Name-value pairs associated with the file as metadata. If no name-value - pairs are specified, the operation will copy the metadata from the - source blob or file to the destination file. If one or more name-value - pairs are specified, the destination file is created with the specified - metadata, and the metadata is not copied from the source blob or file. - :type metadata: dict(str, str). - :param int timeout: - The timeout parameter is expressed in seconds. - :return: Copy operation properties such as status, source, and ID. - :rtype: :class:`~azure.storage.file.models.CopyProperties` - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('copy_source', copy_source) - - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = {'timeout': _int_to_str(timeout)} - request.headers = { - 'x-ms-copy-source': _to_str(copy_source), - } - _add_metadata_headers(metadata, request) - - return self._perform_request(request, _parse_properties, [FileProperties]).copy - - def abort_copy_file(self, share_name, directory_name, file_name, copy_id, timeout=None): - ''' - Aborts a pending copy_file operation, and leaves a destination file - with zero length and full metadata. - - :param str share_name: - Name of destination share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of destination file. - :param str copy_id: - Copy identifier provided in the copy.id of the original - copy_file operation. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('copy_id', copy_id) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { - 'comp': 'copy', - 'copyid': _to_str(copy_id), - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-copy-action': 'abort', - } - - self._perform_request(request) - - def delete_file(self, share_name, directory_name, file_name, timeout=None): - ''' - Marks the specified file for deletion. The file is later - deleted during garbage collection. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - request = HTTPRequest() - request.method = 'DELETE' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = {'timeout': _int_to_str(timeout)} - - self._perform_request(request) - - def create_file(self, share_name, directory_name, file_name, - content_length, content_settings=None, metadata=None, - timeout=None): - ''' - Creates a new file. - - See create_file_from_* for high level functions that handle the - creation and upload of large files with automatic chunking and - progress notifications. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of file to create or update. - :param int content_length: - Length of the file in bytes. - :param ~azure.storage.file.models.ContentSettings content_settings: - ContentSettings object used to set file properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('content_length', content_length) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = {'timeout': _int_to_str(timeout)} - request.headers = { - 'x-ms-content-length': _to_str(content_length), - 'x-ms-type': 'file' - } - _add_metadata_headers(metadata, request) - if content_settings is not None: - request.headers.update(content_settings._to_headers()) - - self._perform_request(request) - - def create_file_from_path(self, share_name, directory_name, file_name, - local_file_path, content_settings=None, - metadata=None, validate_content=False, progress_callback=None, - max_connections=2, timeout=None): - ''' - Creates a new azure file from a local file path, or updates the content of an - existing file, with automatic chunking and progress notifications. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of file to create or update. - :param str local_file_path: - Path of the local file to upload as the file content. - :param ~azure.storage.file.models.ContentSettings content_settings: - ContentSettings object used for setting file properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :param bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far and total is the - size of the file, or None if the total size is unknown. - :type progress_callback: func(current, total) - :param int max_connections: - Maximum number of parallel connections to use. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('local_file_path', local_file_path) - - count = path.getsize(local_file_path) - with open(local_file_path, 'rb') as stream: - self.create_file_from_stream( - share_name, directory_name, file_name, stream, - count, content_settings, metadata, validate_content, progress_callback, - max_connections, timeout) - - def create_file_from_text(self, share_name, directory_name, file_name, - text, encoding='utf-8', content_settings=None, - metadata=None, validate_content=False, timeout=None): - ''' - Creates a new file from str/unicode, or updates the content of an - existing file, with automatic chunking and progress notifications. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of file to create or update. - :param str text: - Text to upload to the file. - :param str encoding: - Python encoding to use to convert the text to bytes. - :param ~azure.storage.file.models.ContentSettings content_settings: - ContentSettings object used to set file properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :param bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('text', text) - - if not isinstance(text, bytes): - _validate_not_none('encoding', encoding) - text = text.encode(encoding) - - self.create_file_from_bytes( - share_name, directory_name, file_name, text, count=len(text), - content_settings=content_settings, metadata=metadata, - validate_content=validate_content, timeout=timeout) - - def create_file_from_bytes( - self, share_name, directory_name, file_name, file, - index=0, count=None, content_settings=None, metadata=None, - validate_content=False, progress_callback=None, max_connections=2, - timeout=None): - ''' - Creates a new file from an array of bytes, or updates the content - of an existing file, with automatic chunking and progress - notifications. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of file to create or update. - :param str file: - Content of file as an array of bytes. - :param int index: - Start index in the array of bytes. - :param int count: - Number of bytes to upload. Set to None or negative value to upload - all bytes starting from index. - :param ~azure.storage.file.models.ContentSettings content_settings: - ContentSettings object used to set file properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :param bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far and total is the - size of the file, or None if the total size is unknown. - :type progress_callback: func(current, total) - :param int max_connections: - Maximum number of parallel connections to use. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('file', file) - _validate_type_bytes('file', file) - - if index < 0: - raise TypeError(_ERROR_VALUE_NEGATIVE.format('index')) - - if count is None or count < 0: - count = len(file) - index - - stream = BytesIO(file) - stream.seek(index) - - self.create_file_from_stream( - share_name, directory_name, file_name, stream, count, - content_settings, metadata, validate_content, progress_callback, - max_connections, timeout) - - def create_file_from_stream( - self, share_name, directory_name, file_name, stream, count, - content_settings=None, metadata=None, validate_content=False, - progress_callback=None, max_connections=2, timeout=None): - ''' - Creates a new file from a file/stream, or updates the content of an - existing file, with automatic chunking and progress notifications. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of file to create or update. - :param io.IOBase stream: - Opened file/stream to upload as the file content. - :param int count: - Number of bytes to read from the stream. This is required, a - file cannot be created if the count is unknown. - :param ~azure.storage.file.models.ContentSettings content_settings: - ContentSettings object used to set file properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :param bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :param progress_callback: - Callback for progress with signature function(current, total) where - current is the number of bytes transfered so far and total is the - size of the file, or None if the total size is unknown. - :type progress_callback: func(current, total) - :param int max_connections: - Maximum number of parallel connections to use. Note that parallel upload - requires the stream to be seekable. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('stream', stream) - _validate_not_none('count', count) - - if count < 0: - raise TypeError(_ERROR_VALUE_NEGATIVE.format('count')) - - self.create_file( - share_name, - directory_name, - file_name, - count, - content_settings, - metadata, - timeout - ) - - _upload_file_chunks( - self, - share_name, - directory_name, - file_name, - count, - self.MAX_RANGE_SIZE, - stream, - max_connections, - progress_callback, - validate_content, - timeout - ) - - def _get_file(self, share_name, directory_name, file_name, - start_range=None, end_range=None, validate_content=False, - timeout=None, _context=None, snapshot=None): - ''' - Downloads a file's content, metadata, and properties. You can specify a - range if you don't need to download the file in its entirety. If no range - is specified, the full file will be downloaded. - - See get_file_to_* for high level functions that handle the download - of large files with automatic chunking and progress notifications. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param int start_range: - Start of byte range to use for downloading a section of the file. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int end_range: - End of byte range to use for downloading a section of the file. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param bool validate_content: - When this is set to True and specified together with the Range header, - the service returns the MD5 hash for the range, as long as the range - is less than or equal to 4 MB in size. - :param int timeout: - The timeout parameter is expressed in seconds. - :param str snapshot: - A string that represents the snapshot version, if applicable. - :return: A File with content, properties, and metadata. - :rtype: :class:`~azure.storage.file.models.File` - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { 'timeout': _int_to_str(timeout), 'sharesnapshot': _to_str(snapshot)} - _validate_and_format_range_headers( - request, - start_range, - end_range, - start_range_required=False, - end_range_required=False, - check_content_md5=validate_content) - - return self._perform_request(request, _parse_file, - [file_name, validate_content], - operation_context=_context) - - def get_file_to_path(self, share_name, directory_name, file_name, file_path, - open_mode='wb', start_range=None, end_range=None, - validate_content=False, progress_callback=None, - max_connections=2, timeout=None, snapshot=None): - ''' - Downloads a file to a file path, with automatic chunking and progress - notifications. Returns an instance of File with properties and metadata. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param str file_path: - Path of file to write to. - :param str open_mode: - Mode to use when opening the file. Note that specifying append only - open_mode prevents parallel download. So, max_connections must be set - to 1 if this open_mode is used. - :param int start_range: - Start of byte range to use for downloading a section of the file. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int end_range: - End of byte range to use for downloading a section of the file. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param bool validate_content: - If set to true, validates an MD5 hash for each retrieved portion of - the file. This is primarily valuable for detecting bitflips on the wire - if using http instead of https as https (the default) will already - validate. Note that the service will only return transactional MD5s - for chunks 4MB or less so the first get request will be of size - self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If - self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be - thrown. As computing the MD5 takes processing time and more requests - will need to be done due to the reduced chunk size there may be some - increase in latency. - :param progress_callback: - Callback for progress with signature function(current, total) - where current is the number of bytes transfered so far, and total is - the size of the file if known. - :type progress_callback: func(current, total) - :param int max_connections: - If set to 2 or greater, an initial get will be done for the first - self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file, - the method returns at this point. If it is not, it will download the - remaining data parallel using the number of threads equal to - max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. - If set to 1, a single large get request will be done. This is not - generally recommended but available if very few threads should be - used, network requests are very expensive, or a non-seekable stream - prevents parallel download. This may also be valuable if the file is - being concurrently modified to enforce atomicity or if many files are - expected to be empty as an extra request is required for empty files - if max_connections is greater than 1. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :param str snapshot: - A string that represents the snapshot version, if applicable. - :return: A File with properties and metadata. - :rtype: :class:`~azure.storage.file.models.File` - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('file_path', file_path) - _validate_not_none('open_mode', open_mode) - - if max_connections > 1 and 'a' in open_mode: - raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE) - - with open(file_path, open_mode) as stream: - file = self.get_file_to_stream( - share_name, directory_name, file_name, stream, - start_range, end_range, validate_content, - progress_callback, max_connections, timeout, snapshot) - - return file - - def get_file_to_stream( - self, share_name, directory_name, file_name, stream, - start_range=None, end_range=None, validate_content=False, - progress_callback=None, max_connections=2, timeout=None, snapshot=None): - ''' - Downloads a file to a stream, with automatic chunking and progress - notifications. Returns an instance of :class:`~azure.storage.file.models.File` with properties - and metadata. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param io.IOBase stream: - Opened file/stream to write to. - :param int start_range: - Start of byte range to use for downloading a section of the file. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int end_range: - End of byte range to use for downloading a section of the file. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param bool validate_content: - If set to true, validates an MD5 hash for each retrieved portion of - the file. This is primarily valuable for detecting bitflips on the wire - if using http instead of https as https (the default) will already - validate. Note that the service will only return transactional MD5s - for chunks 4MB or less so the first get request will be of size - self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If - self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be - thrown. As computing the MD5 takes processing time and more requests - will need to be done due to the reduced chunk size there may be some - increase in latency. - :param progress_callback: - Callback for progress with signature function(current, total) - where current is the number of bytes transfered so far, and total is - the size of the file if known. - :type progress_callback: func(current, total) - :param int max_connections: - If set to 2 or greater, an initial get will be done for the first - self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file, - the method returns at this point. If it is not, it will download the - remaining data parallel using the number of threads equal to - max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. - If set to 1, a single large get request will be done. This is not - generally recommended but available if very few threads should be - used, network requests are very expensive, or a non-seekable stream - prevents parallel download. This may also be valuable if the file is - being concurrently modified to enforce atomicity or if many files are - expected to be empty as an extra request is required for empty files - if max_connections is greater than 1. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :param str snapshot: - A string that represents the snapshot version, if applicable. - :return: A File with properties and metadata. - :rtype: :class:`~azure.storage.file.models.File` - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('stream', stream) - - if end_range is not None: - _validate_not_none("start_range", start_range) - - # the stream must be seekable if parallel download is required - if max_connections > 1: - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE) - else: - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE) - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - first_get_size = self.MAX_SINGLE_GET_SIZE if not validate_content else self.MAX_CHUNK_GET_SIZE - - initial_request_start = start_range if start_range is not None else 0 - - if end_range is not None and end_range - start_range < first_get_size: - initial_request_end = end_range - else: - initial_request_end = initial_request_start + first_get_size - 1 - - # Send a context object to make sure we always retry to the initial location - operation_context = _OperationContext(location_lock=True) - try: - file = self._get_file(share_name, - directory_name, - file_name, - start_range=initial_request_start, - end_range=initial_request_end, - validate_content=validate_content, - timeout=timeout, - _context=operation_context, - snapshot=snapshot) - - # Parse the total file size and adjust the download size if ranges - # were specified - file_size = _parse_length_from_content_range(file.properties.content_range) - if end_range is not None: - # Use the end_range unless it is over the end of the file - download_size = min(file_size, end_range - start_range + 1) - elif start_range is not None: - download_size = file_size - start_range - else: - download_size = file_size - except AzureHttpError as ex: - if start_range is None and ex.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - file = self._get_file(share_name, - directory_name, - file_name, - validate_content=validate_content, - timeout=timeout, - _context=operation_context, - snapshot=snapshot) - - # Set the download size to empty - download_size = 0 - else: - raise ex - - # Mark the first progress chunk. If the file is small, this is the only call - if progress_callback: - progress_callback(file.properties.content_length, download_size) - - # Write the content to the user stream - # Clear file content since output has been written to user stream - if file.content is not None: - stream.write(file.content) - file.content = None - - # If the file is small, the download is complete at this point. - # If file size is large, download the rest of the blob in chunks. - if file.properties.content_length != download_size: - # At this point we would like to lock on something like the etag so that - # if the file is modified, we do not get a corrupted download. However, - # this feature is not yet available on the file service. - - end_file = file_size - if end_range is not None: - # Use the end_range unless it is over the end of the file - end_file = min(file_size, end_range + 1) - - _download_file_chunks( - self, - share_name, - directory_name, - file_name, - download_size, - self.MAX_CHUNK_GET_SIZE, - first_get_size, - initial_request_end + 1, # start where the first download ended - end_file, - stream, - max_connections, - progress_callback, - validate_content, - timeout, - operation_context, - snapshot - ) - - # Set the content length to the download size instead of the size of - # the last range - file.properties.content_length = download_size - - # Overwrite the content range to the user requested range - file.properties.content_range = 'bytes {0}-{1}/{2}'.format(start_range, end_range, file_size) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - file.properties.content_md5 = None - - return file - - def get_file_to_bytes(self, share_name, directory_name, file_name, - start_range=None, end_range=None, validate_content=False, - progress_callback=None, max_connections=2, timeout=None, snapshot=None): - ''' - Downloads a file as an array of bytes, with automatic chunking and - progress notifications. Returns an instance of :class:`~azure.storage.file.models.File` with - properties, metadata, and content. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param int start_range: - Start of byte range to use for downloading a section of the file. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int end_range: - End of byte range to use for downloading a section of the file. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param bool validate_content: - If set to true, validates an MD5 hash for each retrieved portion of - the file. This is primarily valuable for detecting bitflips on the wire - if using http instead of https as https (the default) will already - validate. Note that the service will only return transactional MD5s - for chunks 4MB or less so the first get request will be of size - self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If - self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be - thrown. As computing the MD5 takes processing time and more requests - will need to be done due to the reduced chunk size there may be some - increase in latency. - :param progress_callback: - Callback for progress with signature function(current, total) - where current is the number of bytes transfered so far, and total is - the size of the file if known. - :type progress_callback: func(current, total) - :param int max_connections: - If set to 2 or greater, an initial get will be done for the first - self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file, - the method returns at this point. If it is not, it will download the - remaining data parallel using the number of threads equal to - max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. - If set to 1, a single large get request will be done. This is not - generally recommended but available if very few threads should be - used, network requests are very expensive, or a non-seekable stream - prevents parallel download. This may also be valuable if the file is - being concurrently modified to enforce atomicity or if many files are - expected to be empty as an extra request is required for empty files - if max_connections is greater than 1. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :param str snapshot: - A string that represents the snapshot version, if applicable. - :return: A File with properties, content, and metadata. - :rtype: :class:`~azure.storage.file.models.File` - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - - stream = BytesIO() - file = self.get_file_to_stream( - share_name, - directory_name, - file_name, - stream, - start_range, - end_range, - validate_content, - progress_callback, - max_connections, - timeout, - snapshot) - - file.content = stream.getvalue() - return file - - def get_file_to_text( - self, share_name, directory_name, file_name, encoding='utf-8', - start_range=None, end_range=None, validate_content=False, - progress_callback=None, max_connections=2, timeout=None, snapshot=None): - ''' - Downloads a file as unicode text, with automatic chunking and progress - notifications. Returns an instance of :class:`~azure.storage.file.models.File` with properties, - metadata, and content. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param str encoding: - Python encoding to use when decoding the file data. - :param int start_range: - Start of byte range to use for downloading a section of the file. - If no end_range is given, all bytes after the start_range will be downloaded. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int end_range: - End of byte range to use for downloading a section of the file. - If end_range is given, start_range must be provided. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param bool validate_content: - If set to true, validates an MD5 hash for each retrieved portion of - the file. This is primarily valuable for detecting bitflips on the wire - if using http instead of https as https (the default) will already - validate. Note that the service will only return transactional MD5s - for chunks 4MB or less so the first get request will be of size - self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If - self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be - thrown. As computing the MD5 takes processing time and more requests - will need to be done due to the reduced chunk size there may be some - increase in latency. - :param progress_callback: - Callback for progress with signature function(current, total) - where current is the number of bytes transfered so far, and total is - the size of the file if known. - :type progress_callback: func(current, total) - :param int max_connections: - If set to 2 or greater, an initial get will be done for the first - self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file, - the method returns at this point. If it is not, it will download the - remaining data parallel using the number of threads equal to - max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. - If set to 1, a single large get request will be done. This is not - generally recommended but available if very few threads should be - used, network requests are very expensive, or a non-seekable stream - prevents parallel download. This may also be valuable if the file is - being concurrently modified to enforce atomicity or if many files are - expected to be empty as an extra request is required for empty files - if max_connections is greater than 1. - :param int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :param str snapshot: - A string that represents the snapshot version, if applicable. - :return: A File with properties, content, and metadata. - :rtype: :class:`~azure.storage.file.models.File` - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('encoding', encoding) - - file = self.get_file_to_bytes( - share_name, - directory_name, - file_name, - start_range, - end_range, - validate_content, - progress_callback, - max_connections, - timeout, - snapshot) - - file.content = file.content.decode(encoding) - return file - - def update_range(self, share_name, directory_name, file_name, data, - start_range, end_range, validate_content=False, timeout=None): - ''' - Writes the bytes specified by the request body into the specified range. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param bytes data: - Content of the range. - :param int start_range: - Start of byte range to use for updating a section of the file. - The range can be up to 4 MB in size. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int end_range: - End of byte range to use for updating a section of the file. - The range can be up to 4 MB in size. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - file. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - _validate_not_none('data', data) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { - 'comp': 'range', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'x-ms-write': 'update', - } - _validate_and_format_range_headers( - request, start_range, end_range) - request.body = _get_data_bytes_only('data', data) - - if validate_content: - computed_md5 = _get_content_md5(request.body) - request.headers['Content-MD5'] = _to_str(computed_md5) - - self._perform_request(request) - - def clear_range(self, share_name, directory_name, file_name, start_range, - end_range, timeout=None): - ''' - Clears the specified range and releases the space used in storage for - that range. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param int start_range: - Start of byte range to use for clearing a section of the file. - The range can be up to 4 MB in size. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int end_range: - End of byte range to use for clearing a section of the file. - The range can be up to 4 MB in size. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int timeout: - The timeout parameter is expressed in seconds. - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { - 'comp': 'range', - 'timeout': _int_to_str(timeout), - } - request.headers = { - 'Content-Length': '0', - 'x-ms-write': 'clear', - } - _validate_and_format_range_headers( - request, start_range, end_range) - - self._perform_request(request) - - def list_ranges(self, share_name, directory_name, file_name, - start_range=None, end_range=None, timeout=None, snapshot=None): - ''' - Retrieves the valid ranges for a file. - - :param str share_name: - Name of existing share. - :param str directory_name: - The path to the directory. - :param str file_name: - Name of existing file. - :param int start_range: - Specifies the start offset of bytes over which to list ranges. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int end_range: - Specifies the end offset of bytes over which to list ranges. - The start_range and end_range params are inclusive. - Ex: start_range=0, end_range=511 will download first 512 bytes of file. - :param int timeout: - The timeout parameter is expressed in seconds. - :param str snapshot: - A string that represents the snapshot version, if applicable. - :returns: a list of valid ranges - :rtype: a list of :class:`~azure.storage.file.models.FileRange` - ''' - _validate_not_none('share_name', share_name) - _validate_not_none('file_name', file_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(share_name, directory_name, file_name) - request.query = { - 'comp': 'rangelist', - 'timeout': _int_to_str(timeout), - 'sharesnapshot': _to_str(snapshot), - } - if start_range is not None: - _validate_and_format_range_headers( - request, - start_range, - end_range, - start_range_required=False, - end_range_required=False) - - return self._perform_request(request, _convert_xml_to_ranges) diff --git a/azure/multiapi/storage/v2018_03_28/file/models.py b/azure/multiapi/storage/v2018_03_28/file/models.py deleted file mode 100644 index 0811371..0000000 --- a/azure/multiapi/storage/v2018_03_28/file/models.py +++ /dev/null @@ -1,407 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from ..common._common_conversion import _to_str - - -class Share(object): - ''' - File share class. - - :ivar str name: - The name of the share. - :ivar ShareProperties properties: - System properties for the share. - :ivar metadata: - A dict containing name-value pairs associated with the share as metadata. - This var is set to None unless the include=metadata param was included - for the list shares operation. If this parameter was specified but the - share has no metadata, metadata will be set to an empty dictionary. - :vartype metadata: dict(str, str) - :ivar str snapshot: - A DateTime value that uniquely identifies the snapshot. The value of - this header indicates the snapshot version, and may be used in - subsequent requests to access the snapshot. - ''' - - def __init__(self, name=None, props=None, metadata=None, snapshot=None): - self.name = name - self.properties = props or ShareProperties() - self.metadata = metadata - self.snapshot = snapshot - - -class ShareProperties(object): - ''' - File share's properties class. - - :ivar datetime last_modified: - A datetime object representing the last time the share was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar int quote: - Returns the current share quota in GB. - ''' - - def __init__(self): - self.last_modified = None - self.etag = None - self.quota = None - - -class Directory(object): - ''' - Directory class. - - :ivar str name: - The name of the directory. - :ivar DirectoryProperties properties: - System properties for the directory. - :ivar metadata: - A dict containing name-value pairs associated with the directory as metadata. - This var is set to None unless the include=metadata param was included - for the list directory operation. If this parameter was specified but the - directory has no metadata, metadata will be set to an empty dictionary. - :vartype metadata: dict(str, str) - ''' - - def __init__(self, name=None, props=None, metadata=None): - self.name = name - self.properties = props or DirectoryProperties() - self.metadata = metadata - - -class DirectoryProperties(object): - ''' - File directory's properties class. - - :ivar datetime last_modified: - A datetime object representing the last time the directory was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar bool server_encrypted: - Set to true if the directory metadata is encrypted on the server. - ''' - - def __init__(self): - self.last_modified = None - self.etag = None - self.server_encrypted = None - - -class File(object): - ''' - File class. - - :ivar str name: - The name of the file. - :ivar content: - File content. - :vartype content: str or bytes - :ivar FileProperties properties: - System properties for the file. - :ivar metadata: - A dict containing name-value pairs associated with the file as metadata. - This var is set to None unless the include=metadata param was included - for the list file operation. If this parameter was specified but the - file has no metadata, metadata will be set to an empty dictionary. - :vartype metadata: dict(str, str) - ''' - - def __init__(self, name=None, content=None, props=None, metadata=None): - self.name = name - self.content = content - self.properties = props or FileProperties() - self.metadata = metadata - - -class FileProperties(object): - ''' - File Properties. - - :ivar datetime last_modified: - A datetime object representing the last time the file was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar int content_length: - The length of the content returned. If the entire blob was requested, - the length of blob in bytes. If a subset of the blob was requested, the - length of the returned subset. - :ivar str content_range: - Indicates the range of bytes returned in the event that the client - requested a subset of the blob. - :ivar ~azure.storage.file.models.ContentSettings content_settings: - Stores all the content settings for the file. - :ivar ~azure.storage.file.models.CopyProperties copy: - Stores all the copy properties for the file. - ivar bool server_encrypted: - Set to true if the file data and application metadata are completely encrypted. - ''' - - def __init__(self): - self.last_modified = None - self.etag = None - self.content_length = None - self.content_range = None - self.content_settings = ContentSettings() - self.copy = CopyProperties() - self.server_encrypted = None - - -class ContentSettings(object): - ''' - Used to store the content settings of a file. - - :ivar str content_type: - The content type specified for the file. If no content type was - specified, the default content type is application/octet-stream. - :ivar str content_encoding: - If content_encoding has previously been set - for the file, that value is stored. - :ivar str content_language: - If content_language has previously been set - for the file, that value is stored. - :ivar str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the file, that value is stored. - :ivar str cache_control: - If cache_control has previously been set for - the file, that value is stored. - :ivar str content_md5: - If the content_md5 has been set for the file, this response - header is stored so that the client can check for message content - integrity. - ''' - - def __init__( - self, content_type=None, content_encoding=None, - content_language=None, content_disposition=None, - cache_control=None, content_md5=None): - self.content_type = content_type - self.content_encoding = content_encoding - self.content_language = content_language - self.content_disposition = content_disposition - self.cache_control = cache_control - self.content_md5 = content_md5 - - def _to_headers(self): - return { - 'x-ms-cache-control': _to_str(self.cache_control), - 'x-ms-content-type': _to_str(self.content_type), - 'x-ms-content-disposition': _to_str(self.content_disposition), - 'x-ms-content-md5': _to_str(self.content_md5), - 'x-ms-content-encoding': _to_str(self.content_encoding), - 'x-ms-content-language': _to_str(self.content_language), - } - - -class CopyProperties(object): - ''' - File Copy Properties. - - :ivar str id: - String identifier for the last attempted Copy File operation where this file - was the destination file. This header does not appear if this file has never - been the destination in a Copy File operation, or if this file has been - modified after a concluded Copy File operation using Set File Properties or - Put File. - :ivar str source: - URL up to 2 KB in length that specifies the source file used in the last attempted - Copy File operation where this file was the destination file. This header does not - appear if this file has never been the destination in a Copy File operation, or if - this file has been modified after a concluded Copy File operation using - Set File Properties or Put File. - :ivar str status: - State of the copy operation identified by Copy ID, with these values: - success: - Copy completed successfully. - pending: - Copy is in progress. Check copy_status_description if intermittent, - non-fatal errors impede copy progress but don't cause failure. - aborted: - Copy was ended by Abort Copy File. - failed: - Copy failed. See copy_status_description for failure details. - :ivar str progress: - Contains the number of bytes copied and the total bytes in the source in the last - attempted Copy File operation where this file was the destination file. Can show - between 0 and Content-Length bytes copied. - :ivar datetime completion_time: - Conclusion time of the last attempted Copy File operation where this file was the - destination file. This value can specify the time of a completed, aborted, or - failed copy attempt. - :ivar str status_description: - Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal - or non-fatal copy operation failure. - ''' - - def __init__(self): - self.id = None - self.source = None - self.status = None - self.progress = None - self.completion_time = None - self.status_description = None - - -class FileRange(object): - ''' - File Range. - - :ivar int start: - Byte index for start of file range. - :ivar int end: - Byte index for end of file range. - ''' - - def __init__(self, start=None, end=None): - self.start = start - self.end = end - - -class DeleteSnapshot(object): - ''' - Required if the Share has associated snapshots. Specifies how to handle the snapshots. - ''' - - Include = 'include' - ''' - Delete the share and all of its snapshots. - ''' - - -class FilePermissions(object): - ''' - FilePermissions class to be used with - :func:`~azure.storage.file.fileservice.FileService.generate_file_shared_access_signature` API. - - :ivar FilePermissions FilePermissions.CREATE: - Create a new file or copy a file to a new file. - :ivar FilePermissions FilePermissions.DELETE: - Delete the file. - :ivar FilePermissions FilePermissions.READ: - Read the content, properties, metadata. Use the file as the source of a copy - operation. - :ivar FilePermissions FilePermissions.WRITE: - Create or write content, properties, metadata. Resize the file. Use the file - as the destination of a copy operation within the same account. - ''' - - def __init__(self, read=False, create=False, write=False, delete=False, - _str=None): - ''' - :param bool read: - Read the content, properties, metadata. Use the file as the source of a copy - operation. - :param bool create: - Create a new file or copy a file to a new file. - :param bool write: - Create or write content, properties, metadata. Resize the file. Use the file - as the destination of a copy operation within the same account. - :param bool delete: - Delete the file. - :param str _str: - A string representing the permissions. - ''' - - if not _str: - _str = '' - self.read = read or ('r' in _str) - self.create = create or ('c' in _str) - self.write = write or ('w' in _str) - self.delete = delete or ('d' in _str) - - def __or__(self, other): - return FilePermissions(_str=str(self) + str(other)) - - def __add__(self, other): - return FilePermissions(_str=str(self) + str(other)) - - def __str__(self): - return (('r' if self.read else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '')) - - -FilePermissions.CREATE = FilePermissions(create=True) -FilePermissions.DELETE = FilePermissions(delete=True) -FilePermissions.READ = FilePermissions(read=True) -FilePermissions.WRITE = FilePermissions(write=True) - - -class SharePermissions(object): - ''' - SharePermissions class to be used with `azure.storage.file.FileService.generate_share_shared_access_signature` - method and for the AccessPolicies used with `azure.storage.file.FileService.set_share_acl`. - - :ivar SharePermissions FilePermissions.DELETE: - Delete any file in the share. - Note: You cannot grant permissions to delete a share with a service SAS. Use - an account SAS instead. - :ivar SharePermissions FilePermissions.LIST: - List files and directories in the share. - :ivar SharePermissions FilePermissions.READ: - Read the content, properties or metadata of any file in the share. Use any - file in the share as the source of a copy operation. - :ivar SharePermissions FilePermissions.WRITE: - For any file in the share, create or write content, properties or metadata. - Resize the file. Use the file as the destination of a copy operation within - the same account. - Note: You cannot grant permissions to read or write share properties or - metadata with a service SAS. Use an account SAS instead. - ''' - - def __init__(self, read=False, write=False, delete=False, list=False, - _str=None): - ''' - :param bool read: - Read the content, properties or metadata of any file in the share. Use any - file in the share as the source of a copy operation. - :param bool write: - For any file in the share, create or write content, properties or metadata. - Resize the file. Use the file as the destination of a copy operation within - the same account. - Note: You cannot grant permissions to read or write share properties or - metadata with a service SAS. Use an account SAS instead. - :param bool delete: - Delete any file in the share. - Note: You cannot grant permissions to delete a share with a service SAS. Use - an account SAS instead. - :param bool list: - List files and directories in the share. - :param str _str: - A string representing the permissions - ''' - - if not _str: - _str = '' - self.read = read or ('r' in _str) - self.write = write or ('w' in _str) - self.delete = delete or ('d' in _str) - self.list = list or ('l' in _str) - - def __or__(self, other): - return SharePermissions(_str=str(self) + str(other)) - - def __add__(self, other): - return SharePermissions(_str=str(self) + str(other)) - - def __str__(self): - return (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('l' if self.list else '')) - - -SharePermissions.DELETE = SharePermissions(delete=True) -SharePermissions.LIST = SharePermissions(list=True) -SharePermissions.READ = SharePermissions(read=True) -SharePermissions.WRITE = SharePermissions(write=True) diff --git a/azure/multiapi/storage/v2018_03_28/file/sharedaccesssignature.py b/azure/multiapi/storage/v2018_03_28/file/sharedaccesssignature.py deleted file mode 100644 index de52de1..0000000 --- a/azure/multiapi/storage/v2018_03_28/file/sharedaccesssignature.py +++ /dev/null @@ -1,188 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ..common.sharedaccesssignature import ( - SharedAccessSignature, - _SharedAccessHelper, -) -from ..common._common_conversion import ( - _to_str, -) -from ._constants import X_MS_VERSION - - -class FileSharedAccessSignature(SharedAccessSignature): - ''' - Provides a factory for creating file and share access - signature tokens with a common account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - ''' - super(FileSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) - - def generate_file(self, share_name, directory_name=None, file_name=None, - permission=None, expiry=None, start=None, id=None, - ip=None, protocol=None, cache_control=None, - content_disposition=None, content_encoding=None, - content_language=None, content_type=None): - ''' - Generates a shared access signature for the file. - Use the returned signature with the sas_token parameter of FileService. - - :param str share_name: - Name of share. - :param str directory_name: - Name of directory. SAS tokens cannot be created for directories, so - this parameter should only be present if file_name is provided. - :param str file_name: - Name of file. - :param FilePermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, create, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_file_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~..common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - resource_path = share_name - if directory_name is not None: - resource_path += '/' + _to_str(directory_name) - resource_path += '/' + _to_str(file_name) - - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(id) - sas.add_resource('f') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_resource_signature(self.account_name, self.account_key, 'file', resource_path) - - return sas.get_token() - - def generate_share(self, share_name, permission=None, expiry=None, - start=None, id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None): - ''' - Generates a shared access signature for the share. - Use the returned signature with the sas_token parameter of FileService. - - :param str share_name: - Name of share. - :param SharePermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, create, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_file_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~..common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(id) - sas.add_resource('s') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_resource_signature(self.account_name, self.account_key, 'file', share_name) - - return sas.get_token() diff --git a/azure/multiapi/storage/v2018_03_28/queue/__init__.py b/azure/multiapi/storage/v2018_03_28/queue/__init__.py deleted file mode 100644 index 0c64f78..0000000 --- a/azure/multiapi/storage/v2018_03_28/queue/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from .models import ( - Queue, - QueueMessage, - QueuePermissions, - QueueMessageFormat, -) - -from .queueservice import QueueService diff --git a/azure/multiapi/storage/v2018_03_28/queue/_constants.py b/azure/multiapi/storage/v2018_03_28/queue/_constants.py deleted file mode 100644 index c75bf08..0000000 --- a/azure/multiapi/storage/v2018_03_28/queue/_constants.py +++ /dev/null @@ -1,11 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -__author__ = 'Microsoft Corp. ' -__version__ = '1.3.0' - -# x-ms-version for storage service. -X_MS_VERSION = '2018-03-28' diff --git a/azure/multiapi/storage/v2018_03_28/queue/_deserialization.py b/azure/multiapi/storage/v2018_03_28/queue/_deserialization.py deleted file mode 100644 index d0ef297..0000000 --- a/azure/multiapi/storage/v2018_03_28/queue/_deserialization.py +++ /dev/null @@ -1,150 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from dateutil import parser - -try: - from xml.etree import cElementTree as ETree -except ImportError: - from xml.etree import ElementTree as ETree - -from .models import ( - Queue, - QueueMessage, -) -from ..common.models import ( - _list, -) -from ..common._deserialization import ( - _to_int, - _parse_metadata, -) -from ._encryption import ( - _decrypt_queue_message, -) - - -def _parse_metadata_and_message_count(response): - ''' - Extracts approximate messages count header. - ''' - metadata = _parse_metadata(response) - metadata.approximate_message_count = _to_int(response.headers.get('x-ms-approximate-messages-count')) - - return metadata - - -def _parse_queue_message_from_headers(response): - ''' - Extracts pop receipt and time next visible from headers. - ''' - message = QueueMessage() - message.pop_receipt = response.headers.get('x-ms-popreceipt') - message.time_next_visible = parser.parse(response.headers.get('x-ms-time-next-visible')) - - return message - - -def _convert_xml_to_queues(response): - ''' - - - string-value - string-value - int-value - - - string-value - - value - - - - - ''' - if response is None or response.body is None: - return None - - queues = _list() - list_element = ETree.fromstring(response.body) - - # Set next marker - next_marker = list_element.findtext('NextMarker') or None - setattr(queues, 'next_marker', next_marker) - - queues_element = list_element.find('Queues') - - for queue_element in queues_element.findall('Queue'): - # Name element - queue = Queue() - queue.name = queue_element.findtext('Name') - - # Metadata - metadata_root_element = queue_element.find('Metadata') - if metadata_root_element is not None: - queue.metadata = dict() - for metadata_element in metadata_root_element: - queue.metadata[metadata_element.tag] = metadata_element.text - - # Add queue to list - queues.append(queue) - - return queues - - -def _convert_xml_to_queue_messages(response, decode_function, require_encryption, key_encryption_key, resolver, - content=None): - ''' - - - - string-message-id - insertion-time - expiration-time - opaque-string-receipt-data - time-next-visible - integer - message-body - - - ''' - if response is None or response.body is None: - return None - - messages = list() - list_element = ETree.fromstring(response.body) - - for message_element in list_element.findall('QueueMessage'): - message = QueueMessage() - - message.id = message_element.findtext('MessageId') - - dequeue_count = message_element.findtext('DequeueCount') - if dequeue_count is not None: - message.dequeue_count = _to_int(dequeue_count) - - # content is not returned for put_message - if content is not None: - message.content = content - else: - message.content = message_element.findtext('MessageText') - if (key_encryption_key is not None) or (resolver is not None): - message.content = _decrypt_queue_message(message.content, require_encryption, - key_encryption_key, resolver) - message.content = decode_function(message.content) - - message.insertion_time = parser.parse(message_element.findtext('InsertionTime')) - message.expiration_time = parser.parse(message_element.findtext('ExpirationTime')) - - message.pop_receipt = message_element.findtext('PopReceipt') - - time_next_visible = message_element.find('TimeNextVisible') - if time_next_visible is not None: - message.time_next_visible = parser.parse(time_next_visible.text) - - # Add message to list - messages.append(message) - - return messages diff --git a/azure/multiapi/storage/v2018_03_28/queue/_encryption.py b/azure/multiapi/storage/v2018_03_28/queue/_encryption.py deleted file mode 100644 index 75979f3..0000000 --- a/azure/multiapi/storage/v2018_03_28/queue/_encryption.py +++ /dev/null @@ -1,159 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os -from json import ( - dumps, - loads, -) - -from azure.common import ( - AzureException, -) -from cryptography.hazmat.primitives.padding import PKCS7 - -from ..common._common_conversion import ( - _encode_base64, - _decode_base64_to_bytes -) -from ..common._encryption import ( - _generate_encryption_data_dict, - _dict_to_encryption_data, - _generate_AES_CBC_cipher, - _validate_and_unwrap_cek, - _EncryptionAlgorithm, -) -from ..common._error import ( - _ERROR_DECRYPTION_FAILURE, - _ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM, - _validate_not_none, - _validate_key_encryption_key_wrap, -) -from ._error import ( - _ERROR_MESSAGE_NOT_ENCRYPTED -) - - -def _encrypt_queue_message(message, key_encryption_key): - ''' - Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encrypted message and the encryption metadata. - - :param object message: - The plain text messge to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A json-formatted string containing the encrypted message and the encryption metadata. - :rtype: str - ''' - - _validate_not_none('message', message) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = os.urandom(32) - initialization_vector = os.urandom(16) - - # Queue encoding functions all return unicode strings, and encryption should - # operate on binary strings. - message = message.encode('utf-8') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(message) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - - # Build the dictionary structure. - queue_message = {'EncryptedMessageContents': _encode_base64(encrypted_data), - 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector)} - - return dumps(queue_message) - - -def _decrypt_queue_message(message, require_encryption, key_encryption_key, resolver): - ''' - Returns the decrypted message contents from an EncryptedQueueMessage. - If no encryption metadata is present, will return the unaltered message. - :param str message: - The JSON formatted QueueEncryptedMessage contents with all associated metadata. - :param bool require_encryption: - If set, will enforce that the retrieved messages are encrypted and decrypt them. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid()--returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key implementing the interface defined above. - :return: The plain text message from the queue message. - :rtype: str - ''' - - try: - message = loads(message) - - encryption_data = _dict_to_encryption_data(message['EncryptionData']) - decoded_data = _decode_base64_to_bytes(message['EncryptedMessageContents']) - except (KeyError, ValueError): - # Message was not json formatted and so was not encrypted - # or the user provided a json formatted message. - if require_encryption: - raise ValueError(_ERROR_MESSAGE_NOT_ENCRYPTED) - else: - return message - try: - return _decrypt(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') - except Exception: - raise AzureException(_ERROR_DECRYPTION_FAILURE) - - -def _decrypt(message, encryption_data, key_encryption_key=None, resolver=None): - ''' - Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. - Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). Returns the original plaintex. - - :param str message: - The ciphertext to be decrypted. - :param _EncryptionData encryption_data: - The metadata associated with this ciphertext. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid()--returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key implementing the interface defined above. - :return: The decrypted plaintext. - :rtype: str - ''' - _validate_not_none('message', message) - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) - - if not (_EncryptionAlgorithm.AES_CBC_256 == encryption_data.encryption_agent.encryption_algorithm): - raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM) - - cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) - - # decrypt data - decrypted_data = message - decryptor = cipher.decryptor() - decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) - - # unpad data - unpadder = PKCS7(128).unpadder() - decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) - - return decrypted_data diff --git a/azure/multiapi/storage/v2018_03_28/queue/_error.py b/azure/multiapi/storage/v2018_03_28/queue/_error.py deleted file mode 100644 index cb15935..0000000 --- a/azure/multiapi/storage/v2018_03_28/queue/_error.py +++ /dev/null @@ -1,27 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import sys - -from ..common._error import ( - _validate_type_bytes, -) - -_ERROR_MESSAGE_SHOULD_BE_UNICODE = 'message should be of type unicode.' -_ERROR_MESSAGE_SHOULD_BE_STR = 'message should be of type str.' -_ERROR_MESSAGE_NOT_BASE64 = 'message is not a valid base64 value.' -_ERROR_MESSAGE_NOT_ENCRYPTED = 'Message was not encrypted.' - -def _validate_message_type_text(param): - if sys.version_info < (3,): - if not isinstance(param, unicode): - raise TypeError(_ERROR_MESSAGE_SHOULD_BE_UNICODE) - else: - if not isinstance(param, str): - raise TypeError(_ERROR_MESSAGE_SHOULD_BE_STR) - - -def _validate_message_type_bytes(param): - _validate_type_bytes('message', param) diff --git a/azure/multiapi/storage/v2018_03_28/queue/_serialization.py b/azure/multiapi/storage/v2018_03_28/queue/_serialization.py deleted file mode 100644 index 21569e5..0000000 --- a/azure/multiapi/storage/v2018_03_28/queue/_serialization.py +++ /dev/null @@ -1,73 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import sys - -if sys.version_info >= (3,): - from io import BytesIO -else: - try: - from cStringIO import StringIO as BytesIO - except: - from StringIO import StringIO as BytesIO - -try: - from xml.etree import cElementTree as ETree -except ImportError: - from xml.etree import ElementTree as ETree - -from ..common._common_conversion import ( - _str, -) -from ._encryption import ( - _encrypt_queue_message, -) - - -def _get_path(queue_name=None, include_messages=None, message_id=None): - ''' - Creates the path to access a queue resource. - - queue_name: - Name of queue. - include_messages: - Whether or not to include messages. - message_id: - Message id. - ''' - if queue_name and include_messages and message_id: - return '/{0}/messages/{1}'.format(_str(queue_name), message_id) - if queue_name and include_messages: - return '/{0}/messages'.format(_str(queue_name)) - elif queue_name: - return '/{0}'.format(_str(queue_name)) - else: - return '/' - - -def _convert_queue_message_xml(message_text, encode_function, key_encryption_key): - ''' - - - - - ''' - queue_message_element = ETree.Element('QueueMessage') - - # Enabled - message_text = encode_function(message_text) - if key_encryption_key is not None: - message_text = _encrypt_queue_message(message_text, key_encryption_key) - ETree.SubElement(queue_message_element, 'MessageText').text = message_text - - # Add xml declaration and serialize - try: - stream = BytesIO() - ETree.ElementTree(queue_message_element).write(stream, xml_declaration=True, encoding='utf-8', method='xml') - output = stream.getvalue() - finally: - stream.close() - - return output diff --git a/azure/multiapi/storage/v2018_03_28/queue/models.py b/azure/multiapi/storage/v2018_03_28/queue/models.py deleted file mode 100644 index fb3932a..0000000 --- a/azure/multiapi/storage/v2018_03_28/queue/models.py +++ /dev/null @@ -1,239 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from base64 import ( - b64encode, - b64decode, -) -from xml.sax.saxutils import escape as xml_escape -from xml.sax.saxutils import unescape as xml_unescape - -from ._error import ( - _validate_message_type_bytes, - _validate_message_type_text, - _ERROR_MESSAGE_NOT_BASE64, -) - - -class Queue(object): - ''' - Queue class. - - :ivar str name: - The name of the queue. - :ivar metadata: - A dict containing name-value pairs associated with the queue as metadata. - This var is set to None unless the include=metadata param was included - for the list queues operation. If this parameter was specified but the - queue has no metadata, metadata will be set to an empty dictionary. - :vartype metadata: dict(str, str) - ''' - - def __init__(self): - self.name = None - self.metadata = None - - -class QueueMessage(object): - ''' - Queue message class. - - :ivar str id: - A GUID value assigned to the message by the Queue service that - identifies the message in the queue. This value may be used together - with the value of pop_receipt to delete a message from the queue after - it has been retrieved with the get messages operation. - :ivar date insertion_time: - A UTC date value representing the time the messages was inserted. - :ivar date expiration_time: - A UTC date value representing the time the message expires. - :ivar int dequeue_count: - Begins with a value of 1 the first time the message is dequeued. This - value is incremented each time the message is subsequently dequeued. - :ivar obj content: - The message content. Type is determined by the decode_function set on - the service. Default is str. - :ivar str pop_receipt: - A receipt str which can be used together with the message_id element to - delete a message from the queue after it has been retrieved with the get - messages operation. Only returned by get messages operations. Set to - None for peek messages. - :ivar date time_next_visible: - A UTC date value representing the time the message will next be visible. - Only returned by get messages operations. Set to None for peek messages. - ''' - - def __init__(self): - self.id = None - self.insertion_time = None - self.expiration_time = None - self.dequeue_count = None - self.content = None - self.pop_receipt = None - self.time_next_visible = None - - -class QueueMessageFormat: - ''' - Encoding and decoding methods which can be used to modify how the queue service - encodes and decodes queue messages. Set these to queueservice.encode_function - and queueservice.decode_function to modify the behavior. The defaults are - text_xmlencode and text_xmldecode, respectively. - ''' - - @staticmethod - def text_base64encode(data): - ''' - Base64 encode unicode text. - - :param str data: String to encode. - :return: Base64 encoded string. - :rtype: str - ''' - _validate_message_type_text(data) - return b64encode(data.encode('utf-8')).decode('utf-8') - - @staticmethod - def text_base64decode(data): - ''' - Base64 decode to unicode text. - - :param str data: String data to decode to unicode. - :return: Base64 decoded string. - :rtype: str - ''' - try: - return b64decode(data.encode('utf-8')).decode('utf-8') - except (ValueError, TypeError): - # ValueError for Python 3, TypeError for Python 2 - raise ValueError(_ERROR_MESSAGE_NOT_BASE64) - - @staticmethod - def binary_base64encode(data): - ''' - Base64 encode byte strings. - - :param str data: Binary string to encode. - :return: Base64 encoded data. - :rtype: str - ''' - _validate_message_type_bytes(data) - return b64encode(data).decode('utf-8') - - @staticmethod - def binary_base64decode(data): - ''' - Base64 decode to byte string. - - :param str data: Data to decode to a byte string. - :return: Base64 decoded data. - :rtype: str - ''' - try: - return b64decode(data.encode('utf-8')) - except (ValueError, TypeError): - # ValueError for Python 3, TypeError for Python 2 - raise ValueError(_ERROR_MESSAGE_NOT_BASE64) - - @staticmethod - def text_xmlencode(data): - ''' - XML encode unicode text. - - :param str data: Unicode string to encode - :return: XML encoded data. - :rtype: str - ''' - _validate_message_type_text(data) - return xml_escape(data) - - @staticmethod - def text_xmldecode(data): - ''' - XML decode to unicode text. - - :param str data: Data to decode to unicode. - :return: XML decoded data. - :rtype: str - ''' - return xml_unescape(data) - - @staticmethod - def noencode(data): - ''' - Do no encoding. - - :param str data: Data. - :return: The data passed in is returned unmodified. - :rtype: str - ''' - return data - - @staticmethod - def nodecode(data): - ''' - Do no decoding. - - :param str data: Data. - :return: The data passed in is returned unmodified. - :rtype: str - ''' - return data - - -class QueuePermissions(object): - ''' - QueuePermissions class to be used with :func:`~azure.storage.queue.queueservice.QueueService.generate_queue_shared_access_signature` - method and for the AccessPolicies used with :func:`~azure.storage.queue.queueservice.QueueService.set_queue_acl`. - - :ivar QueuePermissions QueuePermissions.READ: - Read metadata and properties, including message count. Peek at messages. - :ivar QueuePermissions QueuePermissions.ADD: - Add messages to the queue. - :ivar QueuePermissions QueuePermissions.UPDATE: - Update messages in the queue. Note: Use the Process permission with - Update so you can first get the message you want to update. - :ivar QueuePermissions QueuePermissions.PROCESS: Delete entities. - Get and delete messages from the queue. - ''' - - def __init__(self, read=False, add=False, update=False, process=False, _str=None): - ''' - :param bool read: - Read metadata and properties, including message count. Peek at messages. - :param bool add: - Add messages to the queue. - :param bool update: - Update messages in the queue. Note: Use the Process permission with - Update so you can first get the message you want to update. - :param bool process: - Get and delete messages from the queue. - :param str _str: - A string representing the permissions. - ''' - if not _str: - _str = '' - self.read = read or ('r' in _str) - self.add = add or ('a' in _str) - self.update = update or ('u' in _str) - self.process = process or ('p' in _str) - - def __or__(self, other): - return QueuePermissions(_str=str(self) + str(other)) - - def __add__(self, other): - return QueuePermissions(_str=str(self) + str(other)) - - def __str__(self): - return (('r' if self.read else '') + - ('a' if self.add else '') + - ('u' if self.update else '') + - ('p' if self.process else '')) - - -QueuePermissions.READ = QueuePermissions(read=True) -QueuePermissions.ADD = QueuePermissions(add=True) -QueuePermissions.UPDATE = QueuePermissions(update=True) -QueuePermissions.PROCESS = QueuePermissions(process=True) diff --git a/azure/multiapi/storage/v2018_03_28/queue/queueservice.py b/azure/multiapi/storage/v2018_03_28/queue/queueservice.py deleted file mode 100644 index 2ef73a6..0000000 --- a/azure/multiapi/storage/v2018_03_28/queue/queueservice.py +++ /dev/null @@ -1,1009 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from azure.common import ( - AzureConflictHttpError, - AzureHttpError, -) - -from ..common._auth import ( - _StorageSASAuthentication, - _StorageSharedKeyAuthentication, -) -from ..common._common_conversion import ( - _int_to_str, - _to_str, -) -from ..common._connection import _ServiceParameters -from ..common._constants import ( - SERVICE_HOST_BASE, - DEFAULT_PROTOCOL, -) -from ..common._deserialization import ( - _convert_xml_to_service_properties, - _convert_xml_to_signed_identifiers, - _convert_xml_to_service_stats, -) -from ..common._error import ( - _dont_fail_not_exist, - _dont_fail_on_exist, - _validate_not_none, - _ERROR_CONFLICT, - _ERROR_STORAGE_MISSING_INFO, - _validate_access_policies, - _validate_encryption_required, - _validate_decryption_required, -) -from ..common._http import ( - HTTPRequest, -) -from ..common._serialization import ( - _convert_signed_identifiers_to_xml, - _convert_service_properties_to_xml, -) -from ..common._serialization import ( - _get_request_body, - _add_metadata_headers, -) -from ..common.models import ( - Services, - ListGenerator, - _OperationContext, -) -from .sharedaccesssignature import ( - QueueSharedAccessSignature, -) -from ..common.storageclient import StorageClient -from ._deserialization import ( - _convert_xml_to_queues, - _convert_xml_to_queue_messages, - _parse_queue_message_from_headers, - _parse_metadata_and_message_count, -) -from ._serialization import ( - _convert_queue_message_xml, - _get_path, -) -from .models import ( - QueueMessageFormat, -) -from ._constants import ( - X_MS_VERSION, - __version__ as package_version, -) - -_QUEUE_ALREADY_EXISTS_ERROR_CODE = 'QueueAlreadyExists' -_QUEUE_NOT_FOUND_ERROR_CODE = 'QueueNotFound' -_HTTP_RESPONSE_NO_CONTENT = 204 - - -class QueueService(StorageClient): - ''' - This is the main class managing queue resources. - - The Queue service stores messages. A queue can contain an unlimited number of - messages, each of which can be up to 64KB in size. Messages are generally added - to the end of the queue and retrieved from the front of the queue, although - first in, first out (FIFO) behavior is not guaranteed. - - :ivar function(data) encode_function: - A function used to encode queue messages. Takes as - a parameter the data passed to the put_message API and returns the encoded - message. Defaults to take text and xml encode, but bytes and other - encodings can be used. For example, base64 may be preferable for developing - across multiple Azure Storage libraries in different languages. See the - :class:`~azure.storage.queue.models.QueueMessageFormat` for xml, base64 and - no encoding methods as well as binary equivalents. - :ivar function(data) decode_function: - A function used to encode decode messages. Takes as - a parameter the data returned by the get_messages and peek_messages APIs and - returns the decoded message. Defaults to return text and xml decode, but - bytes and other decodings can be used. For example, base64 may be preferable - for developing across multiple Azure Storage libraries in different languages. - See the :class:`~azure.storage.queue.models.QueueMessageFormat` for xml, base64 - and no decoding methods as well as binary equivalents. - :ivar object key_encryption_key: - The key-encryption-key optionally provided by the user. If provided, will be used to - encrypt/decrypt in supported methods. - For methods requiring decryption, either the key_encryption_key OR the resolver must be provided. - If both are provided, the resolver will take precedence. - Must implement the following methods for APIs requiring encryption: - wrap_key(key)--wraps the specified key (bytes) using an algorithm of the user's choice. Returns the encrypted key as bytes. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - Must implement the following methods for APIs requiring decryption: - unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid()--returns a string key id for this key-encryption-key. - :ivar function key_resolver_function(kid): - A function to resolve keys optionally provided by the user. If provided, will be used to decrypt in supported methods. - For methods requiring decryption, either the key_encryption_key OR - the resolver must be provided. If both are provided, the resolver will take precedence. - It uses the kid string to return a key-encryption-key implementing the interface defined above. - :ivar bool require_encryption: - A flag that may be set to ensure that all messages successfully uploaded to the queue and all those downloaded and - successfully read from the queue are/were encrypted while on the server. If this flag is set, all required - parameters for encryption/decryption must be provided. See the above comments on the key_encryption_key and resolver. - ''' - - def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=False, - protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, request_session=None, - connection_string=None, socket_timeout=None, token_credential=None): - ''' - :param str account_name: - The storage account name. This is used to authenticate requests - signed with an account key and to construct the storage endpoint. It - is required unless a connection string is given. - :param str account_key: - The storage account key. This is used for shared key authentication. - :param str sas_token: - A shared access signature token to use to authenticate requests - instead of the account key. If account key and sas token are both - specified, account key will be used to sign. - :param bool is_emulated: - Whether to use the emulator. Defaults to False. If specified, will - override all other parameters besides connection string and request - session. - :param str protocol: - The protocol to use for requests. Defaults to https. - :param str endpoint_suffix: - The host base component of the url, minus the account name. Defaults - to Azure (core.windows.net). Override this to use the China cloud - (core.chinacloudapi.cn). - :param requests.Session request_session: - The session object to use for http requests. - :param str connection_string: - If specified, this will override all other parameters besides - request session. See - http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ - for the connection string format. - :param int socket_timeout: - If specified, this will override the default socket timeout. The timeout specified is in seconds. - See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value. - :param token_credential: - A token credential used to authenticate HTTPS requests. The token value - should be updated before its expiration. - :type `~..common.TokenCredential` - ''' - service_params = _ServiceParameters.get_service_parameters( - 'queue', - account_name=account_name, - account_key=account_key, - sas_token=sas_token, - token_credential=token_credential, - is_emulated=is_emulated, - protocol=protocol, - endpoint_suffix=endpoint_suffix, - request_session=request_session, - connection_string=connection_string, - socket_timeout=socket_timeout) - - super(QueueService, self).__init__(service_params) - - if self.account_key: - self.authentication = _StorageSharedKeyAuthentication( - self.account_name, - self.account_key, - self.is_emulated - ) - elif self.sas_token: - self.authentication = _StorageSASAuthentication(self.sas_token) - elif self.token_credential: - self.authentication = self.token_credential - else: - raise ValueError(_ERROR_STORAGE_MISSING_INFO) - - self.encode_function = QueueMessageFormat.text_xmlencode - self.decode_function = QueueMessageFormat.text_xmldecode - self.key_encryption_key = None - self.key_resolver_function = None - self.require_encryption = False - self._X_MS_VERSION = X_MS_VERSION - self._update_user_agent_string(package_version) - - def generate_account_shared_access_signature(self, resource_types, permission, - expiry, start=None, ip=None, protocol=None): - ''' - Generates a shared access signature for the queue service. - Use the returned signature with the sas_token parameter of QueueService. - - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account SAS. - :param AccountPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~..common.models.Protocol` for possible values. - :return: A Shared Access Signature (sas) token. - :rtype: str - ''' - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = QueueSharedAccessSignature(self.account_name, self.account_key) - return sas.generate_account(Services.QUEUE, resource_types, permission, - expiry, start=start, ip=ip, protocol=protocol) - - def generate_queue_shared_access_signature(self, queue_name, - permission=None, - expiry=None, - start=None, - id=None, - ip=None, protocol=None, ): - ''' - Generates a shared access signature for the queue. - Use the returned signature with the sas_token parameter of QueueService. - - :param str queue_name: - The name of the queue to create a SAS token for. - :param QueuePermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use :func:`~set_queue_acl`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip='168.1.5.65' or sip='168.1.5.60-168.1.5.70' on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~..common.models.Protocol` for possible values. - :return: A Shared Access Signature (sas) token. - :rtype: str - ''' - _validate_not_none('queue_name', queue_name) - _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - - sas = QueueSharedAccessSignature(self.account_name, self.account_key) - return sas.generate_queue( - queue_name, - permission=permission, - expiry=expiry, - start=start, - id=id, - ip=ip, - protocol=protocol, - ) - - def get_queue_service_stats(self, timeout=None): - ''' - Retrieves statistics related to replication for the Queue service. It is - only available when read-access geo-redundant replication is enabled for - the storage account. - - With geo-redundant replication, Azure Storage maintains your data durable - in two locations. In both locations, Azure Storage constantly maintains - multiple healthy replicas of your data. The location where you read, - create, update, or delete data is the primary storage account location. - The primary location exists in the region you choose at the time you - create an account via the Azure Management Azure classic portal, for - example, North Central US. The location to which your data is replicated - is the secondary location. The secondary location is automatically - determined based on the location of the primary; it is in a second data - center that resides in the same region as the primary location. Read-only - access is available from the secondary location, if read-access geo-redundant - replication is enabled for your storage account. - - :param int timeout: - The timeout parameter is expressed in seconds. - :return: The queue service stats. - :rtype: :class:`~..common.models.ServiceStats` - ''' - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(primary=False, secondary=True) - request.path = _get_path() - request.query = { - 'restype': 'service', - 'comp': 'stats', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_service_stats) - - def get_queue_service_properties(self, timeout=None): - ''' - Gets the properties of a storage account's Queue service, including - logging, analytics and CORS rules. - - :param int timeout: - The server timeout, expressed in seconds. - :return: The queue service properties. - :rtype: :class:`~..common.models.ServiceProperties` - ''' - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path() - request.query = { - 'restype': 'service', - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_service_properties) - - def set_queue_service_properties(self, logging=None, hour_metrics=None, - minute_metrics=None, cors=None, timeout=None): - ''' - Sets the properties of a storage account's Queue service, including - Azure Storage Analytics. If an element (ex Logging) is left as None, the - existing settings on the service for that functionality are preserved. - For more information on Azure Storage Analytics, see - https://msdn.microsoft.com/en-us/library/azure/hh343270.aspx. - - :param Logging logging: - The logging settings provide request logs. - :param Metrics hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for queuess. - :param Metrics minute_metrics: - The minute metrics settings provide request statistics - for each minute for queues. - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. For detailed information - about CORS rules and evaluation logic, see - https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx. - :type cors: list(:class:`~..common.models.CorsRule`) - :param int timeout: - The server timeout, expressed in seconds. - ''' - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path() - request.query = { - 'restype': 'service', - 'comp': 'properties', - 'timeout': _int_to_str(timeout), - } - request.body = _get_request_body( - _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics, cors)) - self._perform_request(request) - - def list_queues(self, prefix=None, num_results=None, include_metadata=False, - marker=None, timeout=None): - ''' - Returns a generator to list the queues. The generator will lazily follow - the continuation tokens returned by the service and stop when all queues - have been returned or num_results is reached. - - If num_results is specified and the account has more than that number of - queues, the generator will have a populated next_marker field once it - finishes. This marker can be used to create a new generator if more - results are desired. - - :param str prefix: - Filters the results to return only queues with names that begin - with the specified prefix. - :param int num_results: - The maximum number of queues to return. - :param bool include_metadata: - Specifies that container metadata be returned in the response. - :param str marker: - An opaque continuation token. This value can be retrieved from the - next_marker field of a previous generator object if num_results was - specified and that generator has finished enumerating results. If - specified, this generator will begin returning results from the point - where the previous generator stopped. - :param int timeout: - The server timeout, expressed in seconds. This function may make multiple - calls to the service in which case the timeout value specified will be - applied to each individual call. - ''' - include = 'metadata' if include_metadata else None - operation_context = _OperationContext(location_lock=True) - kwargs = {'prefix': prefix, 'max_results': num_results, 'include': include, - 'marker': marker, 'timeout': timeout, '_context': operation_context} - resp = self._list_queues(**kwargs) - - return ListGenerator(resp, self._list_queues, (), kwargs) - - def _list_queues(self, prefix=None, marker=None, max_results=None, - include=None, timeout=None, _context=None): - ''' - Returns a list of queues under the specified account. Makes a single list - request to the service. Used internally by the list_queues method. - - :param str prefix: - Filters the results to return only queues with names that begin - with the specified prefix. - :param str marker: - A token which identifies the portion of the query to be - returned with the next query operation. The operation returns a - next_marker element within the response body if the list returned - was not complete. This value may then be used as a query parameter - in a subsequent call to request the next portion of the list of - queues. The marker value is opaque to the client. - :param int max_results: - The maximum number of queues to return. A single list request may - return up to 1000 queues and potentially a continuation token which - should be followed to get additional resutls. - :param str include: - Include this parameter to specify that the container's - metadata be returned as part of the response body. - :param int timeout: - The server timeout, expressed in seconds. - ''' - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path() - request.query = { - 'comp': 'list', - 'prefix': _to_str(prefix), - 'marker': _to_str(marker), - 'maxresults': _int_to_str(max_results), - 'include': _to_str(include), - 'timeout': _int_to_str(timeout) - } - - return self._perform_request(request, _convert_xml_to_queues, operation_context=_context) - - def create_queue(self, queue_name, metadata=None, fail_on_exist=False, timeout=None): - ''' - Creates a queue under the given account. - - :param str queue_name: - The name of the queue to create. A queue name must be from 3 through - 63 characters long and may only contain lowercase letters, numbers, - and the dash (-) character. The first and last letters in the queue - must be alphanumeric. The dash (-) character cannot be the first or - last character. Consecutive dash characters are not permitted in the - queue name. - :param metadata: - A dict containing name-value pairs to associate with the queue as - metadata. Note that metadata names preserve the case with which they - were created, but are case-insensitive when set or read. - :type metadata: dict(str, str) - :param bool fail_on_exist: - Specifies whether to throw an exception if the queue already exists. - :param int timeout: - The server timeout, expressed in seconds. - :return: - A boolean indicating whether the queue was created. If fail_on_exist - was set to True, this will throw instead of returning false. - :rtype: bool - ''' - _validate_not_none('queue_name', queue_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(queue_name) - request.query = {'timeout': _int_to_str(timeout)} - _add_metadata_headers(metadata, request) - - def _return_request(request): - return request - - if not fail_on_exist: - try: - response = self._perform_request(request, parser=_return_request, - expected_errors=[_QUEUE_ALREADY_EXISTS_ERROR_CODE]) - if response.status == _HTTP_RESPONSE_NO_CONTENT: - return False - return True - except AzureHttpError as ex: - _dont_fail_on_exist(ex) - return False - else: - response = self._perform_request(request, parser=_return_request) - if response.status == _HTTP_RESPONSE_NO_CONTENT: - raise AzureConflictHttpError( - _ERROR_CONFLICT.format(response.message), response.status) - return True - - def delete_queue(self, queue_name, fail_not_exist=False, timeout=None): - ''' - Deletes the specified queue and any messages it contains. - - When a queue is successfully deleted, it is immediately marked for deletion - and is no longer accessible to clients. The queue is later removed from - the Queue service during garbage collection. - - Note that deleting a queue is likely to take at least 40 seconds to complete. - If an operation is attempted against the queue while it was being deleted, - an :class:`AzureConflictHttpError` will be thrown. - - :param str queue_name: - The name of the queue to delete. - :param bool fail_not_exist: - Specifies whether to throw an exception if the queue doesn't exist. - :param int timeout: - The server timeout, expressed in seconds. - :return: - A boolean indicating whether the queue was deleted. If fail_not_exist - was set to True, this will throw instead of returning false. - :rtype: bool - ''' - _validate_not_none('queue_name', queue_name) - request = HTTPRequest() - request.method = 'DELETE' - request.host_locations = self._get_host_locations() - request.path = _get_path(queue_name) - request.query = {'timeout': _int_to_str(timeout)} - if not fail_not_exist: - try: - self._perform_request(request, expected_errors=[_QUEUE_NOT_FOUND_ERROR_CODE]) - return True - except AzureHttpError as ex: - _dont_fail_not_exist(ex) - return False - else: - self._perform_request(request) - return True - - def get_queue_metadata(self, queue_name, timeout=None): - ''' - Retrieves user-defined metadata and queue properties on the specified - queue. Metadata is associated with the queue as name-value pairs. - - :param str queue_name: - The name of an existing queue. - :param int timeout: - The server timeout, expressed in seconds. - :return: - A dictionary representing the queue metadata with an - approximate_message_count int property on the dict estimating the - number of messages in the queue. - :rtype: dict(str, str) - ''' - _validate_not_none('queue_name', queue_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(queue_name) - request.query = { - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _parse_metadata_and_message_count) - - def set_queue_metadata(self, queue_name, metadata=None, timeout=None): - ''' - Sets user-defined metadata on the specified queue. Metadata is - associated with the queue as name-value pairs. - - :param str queue_name: - The name of an existing queue. - :param dict metadata: - A dict containing name-value pairs to associate with the - queue as metadata. - :param int timeout: - The server timeout, expressed in seconds. - ''' - _validate_not_none('queue_name', queue_name) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(queue_name) - request.query = { - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - _add_metadata_headers(metadata, request) - - self._perform_request(request) - - def exists(self, queue_name, timeout=None): - ''' - Returns a boolean indicating whether the queue exists. - - :param str queue_name: - The name of queue to check for existence. - :param int timeout: - The server timeout, expressed in seconds. - :return: A boolean indicating whether the queue exists. - :rtype: bool - ''' - _validate_not_none('queue_name', queue_name) - - try: - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(queue_name) - request.query = { - 'comp': 'metadata', - 'timeout': _int_to_str(timeout), - } - - self._perform_request(request, expected_errors=[_QUEUE_NOT_FOUND_ERROR_CODE]) - return True - except AzureHttpError as ex: - _dont_fail_not_exist(ex) - return False - - def get_queue_acl(self, queue_name, timeout=None): - ''' - Returns details about any stored access policies specified on the - queue that may be used with Shared Access Signatures. - - :param str queue_name: - The name of an existing queue. - :param int timeout: - The server timeout, expressed in seconds. - :return: A dictionary of access policies associated with the queue. - :rtype: dict(str, :class:`~..common.models.AccessPolicy`) - ''' - _validate_not_none('queue_name', queue_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(queue_name) - request.query = { - 'comp': 'acl', - 'timeout': _int_to_str(timeout), - } - - return self._perform_request(request, _convert_xml_to_signed_identifiers) - - def set_queue_acl(self, queue_name, signed_identifiers=None, timeout=None): - ''' - Sets stored access policies for the queue that may be used with Shared - Access Signatures. - - When you set permissions for a queue, the existing permissions are replaced. - To update the queue's permissions, call :func:`~get_queue_acl` to fetch - all access policies associated with the queue, modify the access policy - that you wish to change, and then call this function with the complete - set of data to perform the update. - - When you establish a stored access policy on a queue, it may take up to - 30 seconds to take effect. During this interval, a shared access signature - that is associated with the stored access policy will throw an - :class:`AzureHttpError` until the access policy becomes active. - - :param str queue_name: - The name of an existing queue. - :param signed_identifiers: - A dictionary of access policies to associate with the queue. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict(str, :class:`~..common.models.AccessPolicy`) - :param int timeout: - The server timeout, expressed in seconds. - ''' - _validate_not_none('queue_name', queue_name) - _validate_access_policies(signed_identifiers) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(queue_name) - request.query = { - 'comp': 'acl', - 'timeout': _int_to_str(timeout), - } - request.body = _get_request_body( - _convert_signed_identifiers_to_xml(signed_identifiers)) - self._perform_request(request) - - def put_message(self, queue_name, content, visibility_timeout=None, - time_to_live=None, timeout=None): - ''' - Adds a new message to the back of the message queue. - - The visibility timeout specifies the time that the message will be - invisible. After the timeout expires, the message will become visible. - If a visibility timeout is not specified, the default value of 0 is used. - - The message time-to-live specifies how long a message will remain in the - queue. The message will be deleted from the queue when the time-to-live - period expires. - - If the key-encryption-key field is set on the local service object, this method will - encrypt the content before uploading. - - :param str queue_name: - The name of the queue to put the message into. - :param obj content: - Message content. Allowed type is determined by the encode_function - set on the service. Default is str. The encoded message can be up to - 64KB in size. - :param int visibility_timeout: - If not specified, the default value is 0. Specifies the - new visibility timeout value, in seconds, relative to server time. - The value must be larger than or equal to 0, and cannot be - larger than 7 days. The visibility timeout of a message cannot be - set to a value later than the expiry time. visibility_timeout - should be set to a value smaller than the time-to-live value. - :param int time_to_live: - Specifies the time-to-live interval for the message, in - seconds. The time-to-live may be any positive number or -1 for infinity. If this - parameter is omitted, the default time-to-live is 7 days. - :param int timeout: - The server timeout, expressed in seconds. - :return: - A :class:`~azure.storage.queue.models.QueueMessage` object. - This object is also populated with the content although it is not - returned from the service. - :rtype: :class:`~azure.storage.queue.models.QueueMessage` - ''' - - _validate_encryption_required(self.require_encryption, self.key_encryption_key) - - _validate_not_none('queue_name', queue_name) - _validate_not_none('content', content) - request = HTTPRequest() - request.method = 'POST' - request.host_locations = self._get_host_locations() - request.path = _get_path(queue_name, True) - request.query = { - 'visibilitytimeout': _to_str(visibility_timeout), - 'messagettl': _to_str(time_to_live), - 'timeout': _int_to_str(timeout) - } - - request.body = _get_request_body(_convert_queue_message_xml(content, self.encode_function, - self.key_encryption_key)) - - message_list = self._perform_request(request, _convert_xml_to_queue_messages, - [self.decode_function, False, - None, None, content]) - return message_list[0] - - def get_messages(self, queue_name, num_messages=None, - visibility_timeout=None, timeout=None): - ''' - Retrieves one or more messages from the front of the queue. - - When a message is retrieved from the queue, the response includes the message - content and a pop_receipt value, which is required to delete the message. - The message is not automatically deleted from the queue, but after it has - been retrieved, it is not visible to other clients for the time interval - specified by the visibility_timeout parameter. - - If the key-encryption-key or resolver field is set on the local service object, the messages will be - decrypted before being returned. - - :param str queue_name: - The name of the queue to get messages from. - :param int num_messages: - A nonzero integer value that specifies the number of - messages to retrieve from the queue, up to a maximum of 32. If - fewer are visible, the visible messages are returned. By default, - a single message is retrieved from the queue with this operation. - :param int visibility_timeout: - Specifies the new visibility timeout value, in seconds, relative - to server time. The new value must be larger than or equal to 1 - second, and cannot be larger than 7 days. The visibility timeout of - a message can be set to a value later than the expiry time. - :param int timeout: - The server timeout, expressed in seconds. - :return: A :class:`~azure.storage.queue.models.QueueMessage` object representing the information passed. - :rtype: list(:class:`~azure.storage.queue.models.QueueMessage`) - ''' - _validate_decryption_required(self.require_encryption, self.key_encryption_key, - self.key_resolver_function) - - _validate_not_none('queue_name', queue_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations() - request.path = _get_path(queue_name, True) - request.query = { - 'numofmessages': _to_str(num_messages), - 'visibilitytimeout': _to_str(visibility_timeout), - 'timeout': _int_to_str(timeout) - } - - return self._perform_request(request, _convert_xml_to_queue_messages, - [self.decode_function, self.require_encryption, - self.key_encryption_key, self.key_resolver_function]) - - def peek_messages(self, queue_name, num_messages=None, timeout=None): - ''' - Retrieves one or more messages from the front of the queue, but does - not alter the visibility of the message. - - Only messages that are visible may be retrieved. When a message is retrieved - for the first time with a call to get_messages, its dequeue_count property - is set to 1. If it is not deleted and is subsequently retrieved again, the - dequeue_count property is incremented. The client may use this value to - determine how many times a message has been retrieved. Note that a call - to peek_messages does not increment the value of DequeueCount, but returns - this value for the client to read. - - If the key-encryption-key or resolver field is set on the local service object, the messages will be - decrypted before being returned. - - :param str queue_name: - The name of the queue to peek messages from. - :param int num_messages: - A nonzero integer value that specifies the number of - messages to peek from the queue, up to a maximum of 32. By default, - a single message is peeked from the queue with this operation. - :param int timeout: - The server timeout, expressed in seconds. - :return: - A list of :class:`~azure.storage.queue.models.QueueMessage` objects. Note that - time_next_visible and pop_receipt will not be populated as peek does - not pop the message and can only retrieve already visible messages. - :rtype: list(:class:`~azure.storage.queue.models.QueueMessage`) - ''' - - _validate_decryption_required(self.require_encryption, self.key_encryption_key, - self.key_resolver_function) - - _validate_not_none('queue_name', queue_name) - request = HTTPRequest() - request.method = 'GET' - request.host_locations = self._get_host_locations(secondary=True) - request.path = _get_path(queue_name, True) - request.query = { - 'peekonly': 'true', - 'numofmessages': _to_str(num_messages), - 'timeout': _int_to_str(timeout) - } - - return self._perform_request(request, _convert_xml_to_queue_messages, - [self.decode_function, self.require_encryption, - self.key_encryption_key, self.key_resolver_function]) - - def delete_message(self, queue_name, message_id, pop_receipt, timeout=None): - ''' - Deletes the specified message. - - Normally after a client retrieves a message with the get_messages operation, - the client is expected to process and delete the message. To delete the - message, you must have two items of data: id and pop_receipt. The - id is returned from the previous get_messages operation. The - pop_receipt is returned from the most recent :func:`~get_messages` or - :func:`~update_message` operation. In order for the delete_message operation - to succeed, the pop_receipt specified on the request must match the - pop_receipt returned from the :func:`~get_messages` or :func:`~update_message` - operation. - - :param str queue_name: - The name of the queue from which to delete the message. - :param str message_id: - The message id identifying the message to delete. - :param str pop_receipt: - A valid pop receipt value returned from an earlier call - to the :func:`~get_messages` or :func:`~update_message`. - :param int timeout: - The server timeout, expressed in seconds. - ''' - _validate_not_none('queue_name', queue_name) - _validate_not_none('message_id', message_id) - _validate_not_none('pop_receipt', pop_receipt) - request = HTTPRequest() - request.method = 'DELETE' - request.host_locations = self._get_host_locations() - request.path = _get_path(queue_name, True, message_id) - request.query = { - 'popreceipt': _to_str(pop_receipt), - 'timeout': _int_to_str(timeout) - } - self._perform_request(request) - - def clear_messages(self, queue_name, timeout=None): - ''' - Deletes all messages from the specified queue. - - :param str queue_name: - The name of the queue whose messages to clear. - :param int timeout: - The server timeout, expressed in seconds. - ''' - _validate_not_none('queue_name', queue_name) - request = HTTPRequest() - request.method = 'DELETE' - request.host_locations = self._get_host_locations() - request.path = _get_path(queue_name, True) - request.query = {'timeout': _int_to_str(timeout)} - self._perform_request(request) - - def update_message(self, queue_name, message_id, pop_receipt, visibility_timeout, - content=None, timeout=None): - ''' - Updates the visibility timeout of a message. You can also use this - operation to update the contents of a message. - - This operation can be used to continually extend the invisibility of a - queue message. This functionality can be useful if you want a worker role - to "lease" a queue message. For example, if a worker role calls get_messages - and recognizes that it needs more time to process a message, it can - continually extend the message's invisibility until it is processed. If - the worker role were to fail during processing, eventually the message - would become visible again and another worker role could process it. - - If the key-encryption-key field is set on the local service object, this method will - encrypt the content before uploading. - - :param str queue_name: - The name of the queue containing the message to update. - :param str message_id: - The message id identifying the message to update. - :param str pop_receipt: - A valid pop receipt value returned from an earlier call - to the :func:`~get_messages` or :func:`~update_message` operation. - :param int visibility_timeout: - Specifies the new visibility timeout value, in seconds, - relative to server time. The new value must be larger than or equal - to 0, and cannot be larger than 7 days. The visibility timeout of a - message cannot be set to a value later than the expiry time. A - message can be updated until it has been deleted or has expired. - :param obj content: - Message content. Allowed type is determined by the encode_function - set on the service. Default is str. - :param int timeout: - The server timeout, expressed in seconds. - :return: - A list of :class:`~azure.storage.queue.models.QueueMessage` objects. For convenience, - this object is also populated with the content, although it is not returned by the service. - :rtype: list(:class:`~azure.storage.queue.models.QueueMessage`) - ''' - - _validate_encryption_required(self.require_encryption, self.key_encryption_key) - - _validate_not_none('queue_name', queue_name) - _validate_not_none('message_id', message_id) - _validate_not_none('pop_receipt', pop_receipt) - _validate_not_none('visibility_timeout', visibility_timeout) - request = HTTPRequest() - request.method = 'PUT' - request.host_locations = self._get_host_locations() - request.path = _get_path(queue_name, True, message_id) - request.query = { - 'popreceipt': _to_str(pop_receipt), - 'visibilitytimeout': _int_to_str(visibility_timeout), - 'timeout': _int_to_str(timeout) - } - - if content is not None: - request.body = _get_request_body(_convert_queue_message_xml(content, self.encode_function, - self.key_encryption_key)) - - return self._perform_request(request, _parse_queue_message_from_headers) diff --git a/azure/multiapi/storage/v2018_03_28/queue/sharedaccesssignature.py b/azure/multiapi/storage/v2018_03_28/queue/sharedaccesssignature.py deleted file mode 100644 index 1cf585a..0000000 --- a/azure/multiapi/storage/v2018_03_28/queue/sharedaccesssignature.py +++ /dev/null @@ -1,81 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ..common.sharedaccesssignature import ( - SharedAccessSignature, - _SharedAccessHelper, -) -from ._constants import X_MS_VERSION - - -class QueueSharedAccessSignature(SharedAccessSignature): - ''' - Provides a factory for creating queue shares access - signature tokens with a common account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - ''' - super(QueueSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) - - def generate_queue(self, queue_name, permission=None, - expiry=None, start=None, id=None, - ip=None, protocol=None): - ''' - Generates a shared access signature for the queue. - Use the returned signature with the sas_token parameter of QueueService. - - :param str queue_name: - Name of queue. - :param QueuePermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, add, update, process. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_blob_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~..common.models.Protocol` for possible values. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(id) - sas.add_resource_signature(self.account_name, self.account_key, 'queue', queue_name) - - return sas.get_token() diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/__init__.py b/azure/multiapi/storagev2/blob/v2019_02_02/__init__.py deleted file mode 100644 index 3a21f3e..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/__init__.py +++ /dev/null @@ -1,208 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os - -from typing import Union, Iterable, AnyStr, IO, Any, Dict # pylint: disable=unused-import -from ._version import VERSION -from ._blob_client import BlobClient -from ._container_client import ContainerClient -from ._blob_service_client import BlobServiceClient -from ._lease import BlobLeaseClient -from ._download import StorageStreamDownloader -from ._shared_access_signature import generate_account_sas, generate_container_sas, generate_blob_sas -from ._shared.policies import ExponentialRetry, LinearRetry -from ._shared.response_handlers import PartialBatchErrorException -from ._shared.models import( - LocationMode, - ResourceTypes, - AccountSasPermissions, - StorageErrorCode, - UserDelegationKey -) -from ._generated.models import ( - RehydratePriority -) -from ._models import ( - BlobType, - BlockState, - StandardBlobTier, - PremiumPageBlobTier, - SequenceNumberAction, - PublicAccess, - BlobAnalyticsLogging, - Metrics, - RetentionPolicy, - StaticWebsite, - CorsRule, - ContainerProperties, - BlobProperties, - LeaseProperties, - ContentSettings, - CopyProperties, - BlobBlock, - PageRange, - AccessPolicy, - ContainerSasPermissions, - BlobSasPermissions, - CustomerProvidedEncryptionKey, -) - -__version__ = VERSION - - -def upload_blob_to_url( - blob_url, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - credential=None, # type: Any - **kwargs): - # type: (...) -> Dict[str, Any] - """Upload data to a given URL - - The data will be uploaded as a block blob. - - :param str blob_url: - The full URI to the blob. This can also include a SAS token. - :param data: - The data to upload. This can be bytes, text, an iterable or a file-like object. - :type data: bytes or str or Iterable - :param credential: - The credentials with which to authenticate. This is optional if the - blob URL already has a SAS token. The value can be a SAS token string, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword bool overwrite: - Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob_to_url will overwrite any existing data. If set to False, the - operation will fail with a ResourceExistsError. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword dict(str,str) metadata: - Name-value pairs associated with the blob as metadata. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword str encoding: - Encoding to use if text is supplied as input. Defaults to UTF-8. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: dict(str, Any) - """ - with BlobClient.from_blob_url(blob_url, credential=credential) as client: - return client.upload_blob(data=data, blob_type=BlobType.BlockBlob, **kwargs) - - -def _download_to_stream(client, handle, **kwargs): - """Download data to specified open file-handle.""" - stream = client.download_blob(**kwargs) - stream.readinto(handle) - - -def download_blob_from_url( - blob_url, # type: str - output, # type: str - credential=None, # type: Any - **kwargs): - # type: (...) -> None - """Download the contents of a blob to a local file or stream. - - :param str blob_url: - The full URI to the blob. This can also include a SAS token. - :param output: - Where the data should be downloaded to. This could be either a file path to write to, - or an open IO handle to write to. - :type output: str or writable stream. - :param credential: - The credentials with which to authenticate. This is optional if the - blob URL already has a SAS token or the blob is public. The value can be a SAS token string, - an account shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword bool overwrite: - Whether the local file should be overwritten if it already exists. The default value is - `False` - in which case a ValueError will be raised if the file already exists. If set to - `True`, an attempt will be made to write to the existing file. If a stream handle is passed - in, this value is ignored. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :keyword int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :rtype: None - """ - overwrite = kwargs.pop('overwrite', False) - with BlobClient.from_blob_url(blob_url, credential=credential) as client: - if hasattr(output, 'write'): - _download_to_stream(client, output, **kwargs) - else: - if not overwrite and os.path.isfile(output): - raise ValueError("The file '{}' already exists.".format(output)) - with open(output, 'wb') as file_handle: - _download_to_stream(client, file_handle, **kwargs) - - -__all__ = [ - 'upload_blob_to_url', - 'download_blob_from_url', - 'BlobServiceClient', - 'ContainerClient', - 'BlobClient', - 'BlobType', - 'BlobLeaseClient', - 'StorageErrorCode', - 'UserDelegationKey', - 'ExponentialRetry', - 'LinearRetry', - 'LocationMode', - 'BlockState', - 'StandardBlobTier', - 'PremiumPageBlobTier', - 'SequenceNumberAction', - 'PublicAccess', - 'BlobAnalyticsLogging', - 'Metrics', - 'RetentionPolicy', - 'StaticWebsite', - 'CorsRule', - 'ContainerProperties', - 'BlobProperties', - 'LeaseProperties', - 'ContentSettings', - 'CopyProperties', - 'BlobBlock', - 'PageRange', - 'AccessPolicy', - 'ContainerSasPermissions', - 'BlobSasPermissions', - 'ResourceTypes', - 'AccountSasPermissions', - 'StorageStreamDownloader', - 'CustomerProvidedEncryptionKey', - 'RehydratePriority', - 'generate_account_sas', - 'generate_container_sas', - 'generate_blob_sas', - 'PartialBatchErrorException' -] diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_blob_client.py b/azure/multiapi/storagev2/blob/v2019_02_02/_blob_client.py deleted file mode 100644 index 243a847..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_blob_client.py +++ /dev/null @@ -1,2809 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines,no-self-use - -from io import BytesIO -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, - TYPE_CHECKING -) -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six -from azure.core.tracing.decorator import distributed_trace - -from ._shared import encode_base64 -from ._shared.base_client import StorageAccountHostsMixin, parse_connection_str, parse_query -from ._shared.encryption import generate_blob_encryption_data -from ._shared.uploads import IterStreamer -from ._shared.request_handlers import ( - add_metadata_headers, get_length, read_length, - validate_and_format_range_headers) -from ._shared.response_handlers import return_response_headers, process_storage_error -from ._generated import AzureBlobStorage -from ._generated.models import ( # pylint: disable=unused-import - DeleteSnapshotsOptionType, - BlobHTTPHeaders, - BlockLookupList, - AppendPositionAccessConditions, - SequenceNumberAccessConditions, - StorageErrorException, - UserDelegationKey, - CpkInfo) -from ._serialize import get_modify_conditions, get_source_conditions -from ._deserialize import get_page_ranges_result, deserialize_blob_properties, deserialize_blob_stream -from ._upload_helpers import ( - upload_block_blob, - upload_append_blob, - upload_page_blob) -from ._models import BlobType, BlobBlock -from ._download import StorageStreamDownloader -from ._lease import BlobLeaseClient, get_access_conditions - -if TYPE_CHECKING: - from datetime import datetime - from ._generated.models import BlockList - from ._models import ( # pylint: disable=unused-import - ContainerProperties, - BlobProperties, - BlobSasPermissions, - ContentSettings, - PremiumPageBlobTier, - StandardBlobTier, - SequenceNumberAction - ) - -_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = ( - 'The require_encryption flag is set, but encryption is not supported' - ' for this method.') - - -class BlobClient(StorageAccountHostsMixin): # pylint: disable=too-many-public-methods - """A client to interact with a specific blob, although that blob may not yet exist. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the blob, - use the :func:`from_blob_url` classmethod. - :param container_name: The container name for the blob. - :type container_name: str - :param blob_name: The name of the blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob_name: str - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_blob_client] - :end-before: [END create_blob_client] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a URL to a public blob (no auth needed). - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_blob_client_sas_url] - :end-before: [END create_blob_client_sas_url] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a SAS URL to a blob. - """ - def __init__( - self, account_url, # type: str - container_name, # type: str - blob_name, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - - if not (container_name and blob_name): - raise ValueError("Please specify a container name and blob name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - path_snapshot, sas_token = parse_query(parsed_url.query) - - self.container_name = container_name - self.blob_name = blob_name - try: - self.snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - self.snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - self.snapshot = snapshot or path_snapshot - - self._query_str, credential = self._format_query_string(sas_token, credential, snapshot=self.snapshot) - super(BlobClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) - self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) - - def _format_url(self, hostname): - container_name = self.container_name - if isinstance(container_name, six.text_type): - container_name = container_name.encode('UTF-8') - return "{}://{}/{}/{}{}".format( - self.scheme, - hostname, - quote(container_name), - quote(self.blob_name, safe='~'), - self._query_str) - - @classmethod - def from_blob_url(cls, blob_url, credential=None, snapshot=None, **kwargs): - # type: (str, Optional[Any], Optional[Union[str, Dict[str, Any]]], Any) -> BlobClient - """Create BlobClient from a blob url. - - :param str blob_url: - The full endpoint URL to the Blob, including SAS token and snapshot if used. This could be - either the primary endpoint, or the secondary endpoint depending on the current `location_mode`. - :type blob_url: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. If specified, this will override - the snapshot in the url. - :returns: A Blob client. - :rtype: ~azure.storage.blob.BlobClient - """ - try: - if not blob_url.lower().startswith('http'): - blob_url = "https://" + blob_url - except AttributeError: - raise ValueError("Blob URL must be a string.") - parsed_url = urlparse(blob_url.rstrip('/')) - - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(blob_url)) - - path_blob = parsed_url.path.lstrip('/').split('/') - account_path = "" - if len(path_blob) > 2: - account_path = "/" + "/".join(path_blob[:-2]) - account_url = "{}://{}{}?{}".format( - parsed_url.scheme, - parsed_url.netloc.rstrip('/'), - account_path, - parsed_url.query) - container_name, blob_name = unquote(path_blob[-2]), unquote(path_blob[-1]) - if not container_name or not blob_name: - raise ValueError("Invalid URL. Provide a blob_url with a valid blob and container name.") - - path_snapshot, _ = parse_query(parsed_url.query) - if snapshot: - try: - path_snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - path_snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - path_snapshot = snapshot - - return cls( - account_url, container_name=container_name, blob_name=blob_name, - snapshot=path_snapshot, credential=credential, **kwargs - ) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - container_name, # type: str - blob_name, # type: str - snapshot=None, # type: Optional[str] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> BlobClient - """Create BlobClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param container_name: The container name for the blob. - :type container_name: str - :param blob_name: The name of the blob with which to interact. - :type blob_name: str - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :returns: A Blob client. - :rtype: ~azure.storage.blob.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START auth_from_connection_string_blob] - :end-before: [END auth_from_connection_string_blob] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, container_name=container_name, blob_name=blob_name, - snapshot=snapshot, credential=credential, **kwargs - ) - - @distributed_trace - def get_account_information(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """Gets information related to the storage account in which the blob resides. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - """ - try: - return self._client.blob.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _upload_blob_options( # pylint:disable=too-many-statements - self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption and not self.key_encryption_key: - raise ValueError("Encryption required but no key was provided.") - encryption_options = { - 'required': self.require_encryption, - 'key': self.key_encryption_key, - 'resolver': self.key_resolver_function, - } - if self.key_encryption_key is not None: - cek, iv, encryption_data = generate_blob_encryption_data(self.key_encryption_key) - encryption_options['cek'] = cek - encryption_options['vector'] = iv - encryption_options['data'] = encryption_data - - encoding = kwargs.pop('encoding', 'UTF-8') - if isinstance(data, six.text_type): - data = data.encode(encoding) # type: ignore - if length is None: - length = get_length(data) - if isinstance(data, bytes): - data = data[:length] - - if isinstance(data, bytes): - stream = BytesIO(data) - elif hasattr(data, 'read'): - stream = data - elif hasattr(data, '__iter__'): - stream = IterStreamer(data, encoding=encoding) - else: - raise TypeError("Unsupported data type: {}".format(type(data))) - - validate_content = kwargs.pop('validate_content', False) - content_settings = kwargs.pop('content_settings', None) - overwrite = kwargs.pop('overwrite', False) - max_concurrency = kwargs.pop('max_concurrency', 1) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - kwargs['cpk_info'] = cpk_info - - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None)) - kwargs['modified_access_conditions'] = get_modify_conditions(kwargs) - if content_settings: - kwargs['blob_headers'] = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - kwargs['stream'] = stream - kwargs['length'] = length - kwargs['overwrite'] = overwrite - kwargs['headers'] = headers - kwargs['validate_content'] = validate_content - kwargs['blob_settings'] = self._config - kwargs['max_concurrency'] = max_concurrency - kwargs['encryption_options'] = encryption_options - if blob_type == BlobType.BlockBlob: - kwargs['client'] = self._client.block_blob - kwargs['data'] = data - elif blob_type == BlobType.PageBlob: - kwargs['client'] = self._client.page_blob - elif blob_type == BlobType.AppendBlob: - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - kwargs['client'] = self._client.append_blob - else: - raise ValueError("Unsupported BlobType: {}".format(blob_type)) - return kwargs - - @distributed_trace - def upload_blob( # pylint: disable=too-many-locals - self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Any - """Creates a new blob from a data source with automatic chunking. - - :param data: The blob data to upload. - :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be - either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. The exception to the above is with Append - blob types: if set to False and the data already exists, an error will not be raised - and the data will be appended to the existing blob. If set overwrite=True, then the existing - append blob will be deleted, and a new one created. Defaults to False. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, upload_blob only succeeds if the - blob's lease is active and matches this ID. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int max_concurrency: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world.py - :start-after: [START upload_a_blob] - :end-before: [END upload_a_blob] - :language: python - :dedent: 12 - :caption: Upload a blob to the container. - """ - options = self._upload_blob_options( - data, - blob_type=blob_type, - length=length, - metadata=metadata, - **kwargs) - if blob_type == BlobType.BlockBlob: - return upload_block_blob(**options) - if blob_type == BlobType.PageBlob: - return upload_page_blob(**options) - return upload_append_blob(**options) - - def _download_blob_options(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], **Any) -> Dict[str, Any] - if self.require_encryption and not self.key_encryption_key: - raise ValueError("Encryption required but no key was provided.") - if length is not None and offset is None: - raise ValueError("Offset value must not be None if length is set.") - if length is not None: - length = offset + length - 1 # Service actually uses an end-range inclusive index - - validate_content = kwargs.pop('validate_content', False) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'clients': self._client, - 'config': self._config, - 'start_range': offset, - 'end_range': length, - 'validate_content': validate_content, - 'encryption_options': { - 'required': self.require_encryption, - 'key': self.key_encryption_key, - 'resolver': self.key_resolver_function}, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'cls': deserialize_blob_stream, - 'max_concurrency':kwargs.pop('max_concurrency', 1), - 'encoding': kwargs.pop('encoding', None), - 'timeout': kwargs.pop('timeout', None), - 'name': self.blob_name, - 'container': self.container_name} - options.update(kwargs) - return options - - @distributed_trace - def download_blob(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], **Any) -> Iterable[bytes] - """Downloads a blob to a stream with automatic chunking. - - :param int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, download_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword str encoding: - Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A iterable data generator (stream) - :rtype: ~azure.storage.blob.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world.py - :start-after: [START download_a_blob] - :end-before: [END download_a_blob] - :language: python - :dedent: 12 - :caption: Download a blob. - """ - options = self._download_blob_options( - offset=offset, - length=length, - **kwargs) - return StorageStreamDownloader(**options) - - @staticmethod - def _generic_delete_blob_options(delete_snapshots=False, **kwargs): - # type: (bool, **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if delete_snapshots: - delete_snapshots = DeleteSnapshotsOptionType(delete_snapshots) - options = { - 'timeout': kwargs.pop('timeout', None), - 'delete_snapshots': delete_snapshots or None, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions} - options.update(kwargs) - return options - - def _delete_blob_options(self, delete_snapshots=False, **kwargs): - # type: (bool, **Any) -> Dict[str, Any] - if self.snapshot and delete_snapshots: - raise ValueError("The delete_snapshots option cannot be used with a specific snapshot.") - options = self._generic_delete_blob_options(delete_snapshots, **kwargs) - options['snapshot'] = self.snapshot - return options - - @distributed_trace - def delete_blob(self, delete_snapshots=False, **kwargs): - # type: (bool, **Any) -> None - """Marks the specified blob for deletion. - - The blob is later deleted during garbage collection. - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the delete_blob() - operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob - and retains the blob for a specified number of days. - After the specified number of days, the blob's data is removed from the service during garbage collection. - Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']` - option. Soft-deleted blob can be restored using :func:`undelete` operation. - - :param str delete_snapshots: - Required if the blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword lease: - Required if the blob has an active lease. If specified, delete_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world.py - :start-after: [START delete_blob] - :end-before: [END delete_blob] - :language: python - :dedent: 12 - :caption: Delete a blob. - """ - options = self._delete_blob_options(delete_snapshots=delete_snapshots, **kwargs) - try: - self._client.blob.delete(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def undelete_blob(self, **kwargs): - # type: (**Any) -> None - """Restores soft-deleted blobs or snapshots. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START undelete_blob] - :end-before: [END undelete_blob] - :language: python - :dedent: 8 - :caption: Undeleting a blob. - """ - try: - self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_blob_properties(self, **kwargs): - # type: (**Any) -> BlobProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the blob. It does not return the content of the blob. - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: BlobProperties - :rtype: ~azure.storage.blob.BlobProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START get_blob_properties] - :end-before: [END get_blob_properties] - :language: python - :dedent: 8 - :caption: Getting the properties for a blob. - """ - # TODO: extract this out as _get_blob_properties_options - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - try: - blob_props = self._client.blob.get_properties( - timeout=kwargs.pop('timeout', None), - snapshot=self.snapshot, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=deserialize_blob_properties, - cpk_info=cpk_info, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - blob_props.name = self.blob_name - blob_props.container = self.container_name - return blob_props # type: ignore - - def _set_http_headers_options(self, content_settings=None, **kwargs): - # type: (Optional[ContentSettings], **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - blob_headers = None - if content_settings: - blob_headers = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - options = { - 'timeout': kwargs.pop('timeout', None), - 'blob_http_headers': blob_headers, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def set_http_headers(self, content_settings=None, **kwargs): - # type: (Optional[ContentSettings], **Any) -> None - """Sets system properties on the blob. - - If one property is set for the content_settings, all properties will be overridden. - - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - options = self._set_http_headers_options(content_settings=content_settings, **kwargs) - try: - return self._client.blob.set_http_headers(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _set_blob_metadata_options(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers, - 'cpk_info': cpk_info, - 'headers': headers} - options.update(kwargs) - return options - - @distributed_trace - def set_blob_metadata(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] - """Sets user-defined metadata for the blob as one or more name-value pairs. - - :param metadata: - Dict containing name and value pairs. Each call to this operation - replaces all existing metadata attached to the blob. To remove all - metadata from the blob, call this operation with no metadata headers. - :type metadata: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - """ - options = self._set_blob_metadata_options(metadata=metadata, **kwargs) - try: - return self._client.blob.set_metadata(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _create_page_blob_options( # type: ignore - self, size, # type: int - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - blob_headers = None - if content_settings: - blob_headers = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - - sequence_number = kwargs.pop('sequence_number', None) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - if premium_page_blob_tier: - try: - headers['x-ms-access-tier'] = premium_page_blob_tier.value # type: ignore - except AttributeError: - headers['x-ms-access-tier'] = premium_page_blob_tier # type: ignore - options = { - 'content_length': 0, - 'blob_content_length': size, - 'blob_sequence_number': sequence_number, - 'blob_http_headers': blob_headers, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers, - 'cpk_info': cpk_info, - 'headers': headers} - options.update(kwargs) - return options - - @distributed_trace - def create_page_blob( # type: ignore - self, size, # type: int - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Creates a new Page Blob of the specified size. - - :param int size: - This specifies the maximum size for the page blob, up to 1 TB. - The page blob size must be aligned to a 512-byte boundary. - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword int sequence_number: - Only for Page blobs. The sequence number is a user-controlled value that you can use to - track requests. The value of the sequence number must be between 0 - and 2^63 - 1.The default value is 0. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict[str, Any] - """ - options = self._create_page_blob_options( - size, - content_settings=content_settings, - metadata=metadata, - premium_page_blob_tier=premium_page_blob_tier, - **kwargs) - try: - return self._client.page_blob.create(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _create_append_blob_options(self, content_settings=None, metadata=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - blob_headers = None - if content_settings: - blob_headers = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'content_length': 0, - 'blob_http_headers': blob_headers, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers, - 'cpk_info': cpk_info, - 'headers': headers} - options.update(kwargs) - return options - - @distributed_trace - def create_append_blob(self, content_settings=None, metadata=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] - """Creates a new Append Blob. - - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict[str, Any] - """ - options = self._create_append_blob_options( - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return self._client.append_blob.create(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _create_snapshot_options(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers, - 'cpk_info': cpk_info, - 'headers': headers} - options.update(kwargs) - return options - - @distributed_trace - def create_snapshot(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] - """Creates a snapshot of the blob. - - A snapshot is a read-only version of a blob that's taken at a point in time. - It can be read, copied, or deleted, but not modified. Snapshots provide a way - to back up a blob as it appears at a moment in time. - - A snapshot of a blob has the same name as the base blob from which the snapshot - is taken, with a DateTime value appended to indicate the time at which the - snapshot was taken. - - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified). - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START create_blob_snapshot] - :end-before: [END create_blob_snapshot] - :language: python - :dedent: 8 - :caption: Create a snapshot of the blob. - """ - options = self._create_snapshot_options(metadata=metadata, **kwargs) - try: - return self._client.blob.create_snapshot(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _start_copy_from_url_options(self, source_url, metadata=None, incremental_copy=False, **kwargs): - # type: (str, Optional[Dict[str, str]], bool, **Any) -> Dict[str, Any] - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - if 'source_lease' in kwargs: - source_lease = kwargs.pop('source_lease') - try: - headers['x-ms-source-lease-id'] = source_lease.id # type: str - except AttributeError: - headers['x-ms-source-lease-id'] = source_lease - - tier = kwargs.pop('premium_page_blob_tier', None) or kwargs.pop('standard_blob_tier', None) - - if kwargs.get('requires_sync'): - headers['x-ms-requires-sync'] = str(kwargs.pop('requires_sync')) - - timeout = kwargs.pop('timeout', None) - dest_mod_conditions = get_modify_conditions(kwargs) - options = { - 'copy_source': source_url, - 'timeout': timeout, - 'modified_access_conditions': dest_mod_conditions, - 'headers': headers, - 'cls': return_response_headers, - } - if not incremental_copy: - source_mod_conditions = get_source_conditions(kwargs) - dest_access_conditions = get_access_conditions(kwargs.pop('destination_lease', None)) - options['source_modified_access_conditions'] = source_mod_conditions - options['lease_access_conditions'] = dest_access_conditions - options['tier'] = tier.value if tier else None - options.update(kwargs) - return options - - @distributed_trace - def start_copy_from_url(self, source_url, metadata=None, incremental_copy=False, **kwargs): - # type: (str, Optional[Dict[str, str]], bool, **Any) -> Dict[str, Union[str, datetime]] - """Copies a blob asynchronously. - - This operation returns a copy operation - object that can be used to wait on the completion of the operation, - as well as check status or abort the copy operation. - The Blob service copies blobs on a best-effort basis. - - The source blob for a copy operation may be a block blob, an append blob, - or a page blob. If the destination blob already exists, it must be of the - same blob type as the source blob. Any existing destination blob will be - overwritten. The destination blob cannot be modified while a copy operation - is in progress. - - When copying from a page blob, the Blob service creates a destination page - blob of the source blob's length, initially containing all zeroes. Then - the source page ranges are enumerated, and non-empty ranges are copied. - - For a block blob or an append blob, the Blob service creates a committed - blob of zero length before returning from this operation. When copying - from a block blob, all committed blocks and their block IDs are copied. - Uncommitted blocks are not copied. At the end of the copy operation, the - destination blob will have the same committed block count as the source. - - When copying from an append blob, all committed blocks are copied. At the - end of the copy operation, the destination blob will have the same committed - block count as the source. - - For all blob types, you can call status() on the returned polling object - to check the status of the copy operation, or wait() to block until the - operation is complete. The final blob will be committed when the copy completes. - - :param str source_url: - A URL of up to 2 KB in length that specifies a file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.blob.core.windows.net/mycontainer/myblob - - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - - https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken - :param metadata: - Name-value pairs associated with the blob as metadata. If no name-value - pairs are specified, the operation will copy the metadata from the - source blob or file to the destination blob. If one or more name-value - pairs are specified, the destination blob is created with the specified - metadata, and metadata is not copied from the source blob or file. - :type metadata: dict(str, str) - :param bool incremental_copy: - Copies the snapshot of the source page blob to a destination page blob. - The snapshot is copied such that only the differential changes between - the previously copied snapshot are transferred to the destination. - The copied snapshots are complete copies of the original snapshot and - can be read or copied from as usual. Defaults to False. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source - blob has been modified since the specified date/time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source blob - has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has been modified since the specified date/time. - If the destination blob has not been modified, the Blob service returns - status code 412 (Precondition Failed). - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has not been modified since the specified - date/time. If the destination blob has been modified, the Blob service - returns status code 412 (Precondition Failed). - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword destination_lease: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword source_lease: - Specify this to perform the Copy Blob operation only if - the lease ID given matches the active lease ID of the source blob. - :paramtype source_lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword bool requires_sync: - Enforces that the service will not return a response until the copy is complete. - :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status). - :rtype: dict[str, str or ~datetime.datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START copy_blob_from_url] - :end-before: [END copy_blob_from_url] - :language: python - :dedent: 12 - :caption: Copy a blob from a URL. - """ - options = self._start_copy_from_url_options( - source_url, - metadata=metadata, - incremental_copy=incremental_copy, - **kwargs) - try: - if incremental_copy: - return self._client.page_blob.copy_incremental(**options) - return self._client.blob.start_copy_from_url(**options) - except StorageErrorException as error: - process_storage_error(error) - - def _abort_copy_options(self, copy_id, **kwargs): - # type: (Union[str, Dict[str, Any], BlobProperties], **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - try: - copy_id = copy_id.copy.id - except AttributeError: - try: - copy_id = copy_id['copy_id'] - except TypeError: - pass - options = { - 'copy_id': copy_id, - 'lease_access_conditions': access_conditions, - 'timeout': kwargs.pop('timeout', None)} - options.update(kwargs) - return options - - @distributed_trace - def abort_copy(self, copy_id, **kwargs): - # type: (Union[str, Dict[str, Any], BlobProperties], **Any) -> None - """Abort an ongoing copy operation. - - This will leave a destination blob with zero length and full metadata. - This will raise an error if the copy operation has already ended. - - :param copy_id: - The copy operation to abort. This can be either an ID string, or an - instance of BlobProperties. - :type copy_id: str or ~azure.storage.blob.BlobProperties - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START abort_copy_blob_from_url] - :end-before: [END abort_copy_blob_from_url] - :language: python - :dedent: 12 - :caption: Abort copying a blob from URL. - """ - options = self._abort_copy_options(copy_id, **kwargs) - try: - self._client.blob.abort_copy_from_url(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): - # type: (int, Optional[str], **Any) -> BlobLeaseClient - """Requests a new lease. - - If the blob does not have an active lease, the Blob - Service creates a lease on the blob and returns a new lease. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Blob Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A BlobLeaseClient object. - :rtype: ~azure.storage.blob.BlobLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START acquire_lease_on_blob] - :end-before: [END acquire_lease_on_blob] - :language: python - :dedent: 8 - :caption: Acquiring a lease on a blob. - """ - lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore - lease.acquire(lease_duration=lease_duration, **kwargs) - return lease - - @distributed_trace - def set_standard_blob_tier(self, standard_blob_tier, **kwargs): - # type: (Union[str, StandardBlobTier], Any) -> None - """This operation sets the tier on a block blob. - - A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param standard_blob_tier: - Indicates the tier to be set on the blob. Options include 'Hot', 'Cool', - 'Archive'. The hot tier is optimized for storing data that is accessed - frequently. The cool storage tier is optimized for storing data that - is infrequently accessed and stored for at least a month. The archive - tier is optimized for storing data that is rarely accessed and stored - for at least six months with flexible latency requirements. - :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - if standard_blob_tier is None: - raise ValueError("A StandardBlobTier must be specified") - try: - self._client.blob.set_tier( - tier=standard_blob_tier, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - def _stage_block_options( - self, block_id, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - block_id = encode_base64(str(block_id)) - if isinstance(data, six.text_type): - data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - if length is None: - length = get_length(data) - if length is None: - length, data = read_length(data) - if isinstance(data, bytes): - data = data[:length] - - validate_content = kwargs.pop('validate_content', False) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'block_id': block_id, - 'content_length': length, - 'body': data, - 'transactional_content_md5': None, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'validate_content': validate_content, - 'cpk_info': cpk_info - } - options.update(kwargs) - return options - - @distributed_trace - def stage_block( - self, block_id, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> None - """Creates a new block to be committed as part of a blob. - - :param str block_id: A valid Base64 string value that identifies the - block. Prior to encoding, the string must be less than or equal to 64 - bytes in size. For a given blob, the length of the value specified for - the block_id parameter must be the same size for each block. - :param data: The blob data. - :param int length: Size of the block. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword str encoding: - Defaults to UTF-8. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - options = self._stage_block_options( - block_id, - data, - length=length, - **kwargs) - try: - self._client.block_blob.stage_block(**options) - except StorageErrorException as error: - process_storage_error(error) - - def _stage_block_from_url_options( - self, block_id, # type: str - source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - source_content_md5=None, # type: Optional[Union[bytes, bytearray]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if source_length is not None and source_offset is None: - raise ValueError("Source offset value must not be None if length is set.") - if source_length is not None: - source_length = source_offset + source_length - 1 - block_id = encode_base64(str(block_id)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - range_header = None - if source_offset is not None: - range_header, _ = validate_and_format_range_headers(source_offset, source_length) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'block_id': block_id, - 'content_length': 0, - 'source_url': source_url, - 'source_range': range_header, - 'source_content_md5': bytearray(source_content_md5) if source_content_md5 else None, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'cls': return_response_headers, - 'cpk_info': cpk_info - } - options.update(kwargs) - return options - - @distributed_trace - def stage_block_from_url( - self, block_id, # type: str - source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - source_content_md5=None, # type: Optional[Union[bytes, bytearray]] - **kwargs - ): - # type: (...) -> None - """Creates a new block to be committed as part of a blob where - the contents are read from a URL. - - :param str block_id: A valid Base64 string value that identifies the - block. Prior to encoding, the string must be less than or equal to 64 - bytes in size. For a given blob, the length of the value specified for - the block_id parameter must be the same size for each block. - :param str source_url: The URL. - :param int source_offset: - Start of byte range to use for the block. - Must be set if source length is provided. - :param int source_length: The size of the block in bytes. - :param bytearray source_content_md5: - Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - options = self._stage_block_from_url_options( - block_id, - source_url, - source_offset=source_offset, - source_length=source_length, - source_content_md5=source_content_md5, - **kwargs) - try: - self._client.block_blob.stage_block_from_url(**options) - except StorageErrorException as error: - process_storage_error(error) - - def _get_block_list_result(self, blocks): - # type: (BlockList) -> Tuple[List[BlobBlock], List[BlobBlock]] - committed = [] # type: List - uncommitted = [] # type: List - if blocks.committed_blocks: - committed = [BlobBlock._from_generated(b) for b in blocks.committed_blocks] # pylint: disable=protected-access - if blocks.uncommitted_blocks: - uncommitted = [BlobBlock._from_generated(b) for b in blocks.uncommitted_blocks] # pylint: disable=protected-access - return committed, uncommitted - - @distributed_trace - def get_block_list(self, block_list_type="committed", **kwargs): - # type: (Optional[str], **Any) -> Tuple[List[BlobBlock], List[BlobBlock]] - """The Get Block List operation retrieves the list of blocks that have - been uploaded as part of a block blob. - - :param str block_list_type: - Specifies whether to return the list of committed - blocks, the list of uncommitted blocks, or both lists together. - Possible values include: 'committed', 'uncommitted', 'all' - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A tuple of two lists - committed and uncommitted blocks - :rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock)) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - try: - blocks = self._client.block_blob.get_block_list( - list_type=block_list_type, - snapshot=self.snapshot, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return self._get_block_list_result(blocks) - - def _commit_block_list_options( # type: ignore - self, block_list, # type: List[BlobBlock] - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) - for block in block_list: - try: - if block.state.value == 'committed': - block_lookup.committed.append(encode_base64(str(block.id))) - elif block.state.value == 'uncommitted': - block_lookup.uncommitted.append(encode_base64(str(block.id))) - else: - block_lookup.latest.append(encode_base64(str(block.id))) - except AttributeError: - block_lookup.latest.append(encode_base64(str(block))) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - blob_headers = None - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if content_settings: - blob_headers = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - - validate_content = kwargs.pop('validate_content', False) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - tier = kwargs.pop('standard_blob_tier', None) - - options = { - 'blocks': block_lookup, - 'blob_http_headers': blob_headers, - 'lease_access_conditions': access_conditions, - 'timeout': kwargs.pop('timeout', None), - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers, - 'validate_content': validate_content, - 'cpk_info': cpk_info, - 'tier': tier.value if tier else None - } - options.update(kwargs) - return options - - @distributed_trace - def commit_block_list( # type: ignore - self, block_list, # type: List[BlobBlock] - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """The Commit Block List operation writes a blob by specifying the list of - block IDs that make up the blob. - - :param list block_list: - List of Blockblobs. - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict[str, str] - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._commit_block_list_options( - block_list, - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return self._client.block_blob.commit_block_list(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): - # type: (Union[str, PremiumPageBlobTier], **Any) -> None - """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts. - - :param premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - if premium_page_blob_tier is None: - raise ValueError("A PremiumPageBlobTier must be specified") - try: - self._client.blob.set_tier( - tier=premium_page_blob_tier, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - def _get_page_ranges_options( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if length is not None and offset is None: - raise ValueError("Offset value must not be None if length is set.") - if length is not None: - length = offset + length - 1 # Reformat to an inclusive range index - page_range, _ = validate_and_format_range_headers( - offset, length, start_range_required=False, end_range_required=False, align_to_page=True - ) - options = { - 'snapshot': self.snapshot, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'range': page_range} - if previous_snapshot_diff: - try: - options['prevsnapshot'] = previous_snapshot_diff.snapshot # type: ignore - except AttributeError: - try: - options['prevsnapshot'] = previous_snapshot_diff['snapshot'] # type: ignore - except TypeError: - options['prevsnapshot'] = previous_snapshot_diff - options.update(kwargs) - return options - - @distributed_trace - def get_page_ranges( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] - **kwargs - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a Page Blob or snapshot - of a page blob. - - :param int offset: - Start of byte range to use for getting valid page ranges. - If no length is given, all bytes after the offset will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for getting valid page ranges. - If length is given, offset must be provided. - This range will return valid page ranges from the offset start up to - the specified length. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param str previous_snapshot_diff: - The snapshot diff parameter that contains an opaque DateTime value that - specifies a previous blob snapshot to be compared - against a more recent snapshot or the current blob. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. - The first element are filled page ranges, the 2nd element is cleared page ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_page_ranges_options( - offset=offset, - length=length, - previous_snapshot_diff=previous_snapshot_diff, - **kwargs) - try: - if previous_snapshot_diff: - ranges = self._client.page_blob.get_page_ranges_diff(**options) - else: - ranges = self._client.page_blob.get_page_ranges(**options) - except StorageErrorException as error: - process_storage_error(error) - return get_page_ranges_result(ranges) - - def _set_sequence_number_options(self, sequence_number_action, sequence_number=None, **kwargs): - # type: (Union[str, SequenceNumberAction], Optional[str], **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if sequence_number_action is None: - raise ValueError("A sequence number action must be specified") - options = { - 'sequence_number_action': sequence_number_action, - 'timeout': kwargs.pop('timeout', None), - 'blob_sequence_number': sequence_number, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def set_sequence_number(self, sequence_number_action, sequence_number=None, **kwargs): - # type: (Union[str, SequenceNumberAction], Optional[str], **Any) -> Dict[str, Union[str, datetime]] - """Sets the blob sequence number. - - :param str sequence_number_action: - This property indicates how the service should modify the blob's sequence - number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information. - :param str sequence_number: - This property sets the blob's sequence number. The sequence number is a - user-controlled property that you can use to track requests and manage - concurrency issues. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._set_sequence_number_options( - sequence_number_action, sequence_number=sequence_number, **kwargs) - try: - return self._client.page_blob.update_sequence_number(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _resize_blob_options(self, size, **kwargs): - # type: (int, **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if size is None: - raise ValueError("A content length must be specified for a Page Blob.") - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'blob_content_length': size, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def resize_blob(self, size, **kwargs): - # type: (int, **Any) -> Dict[str, Union[str, datetime]] - """Resizes a page blob to the specified size. - - If the specified value is less than the current size of the blob, - then all pages above the specified value are cleared. - - :param int size: - Size used to resize blob. Maximum size for a page blob is up to 1 TB. - The page blob size must be aligned to a 512-byte boundary. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._resize_blob_options(size, **kwargs) - try: - return self._client.page_blob.resize(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _upload_page_options( # type: ignore - self, page, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - if isinstance(page, six.text_type): - page = page.encode(kwargs.pop('encoding', 'UTF-8')) - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 page size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 page size") - end_range = offset + length - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) # type: ignore - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - seq_conditions = SequenceNumberAccessConditions( - if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), - if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), - if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) - ) - mod_conditions = get_modify_conditions(kwargs) - - validate_content = kwargs.pop('validate_content', False) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'body': page[:length], - 'content_length': length, - 'transactional_content_md5': None, - 'timeout': kwargs.pop('timeout', None), - 'range': content_range, - 'lease_access_conditions': access_conditions, - 'sequence_number_access_conditions': seq_conditions, - 'modified_access_conditions': mod_conditions, - 'validate_content': validate_content, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def upload_page( # type: ignore - self, page, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """The Upload Pages operation writes a range of pages to a page blob. - - :param bytes page: - Content of the page. - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._upload_page_options( - page=page, - offset=offset, - length=length, - **kwargs) - try: - return self._client.page_blob.upload_pages(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _upload_pages_from_url_options( # type: ignore - self, source_url, # type: str - offset, # type: int - length, # type: int - source_offset, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - # TODO: extract the code to a method format_range - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 page size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 page size") - if source_offset is None or offset % 512 != 0: - raise ValueError("source_offset must be an integer that aligns with 512 page size") - - # Format range - end_range = offset + length - 1 - destination_range = 'bytes={0}-{1}'.format(offset, end_range) - source_range = 'bytes={0}-{1}'.format(source_offset, source_offset + length - 1) # should subtract 1 here? - - seq_conditions = SequenceNumberAccessConditions( - if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), - if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), - if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - source_mod_conditions = get_source_conditions(kwargs) - - source_content_md5 = kwargs.pop('source_content_md5', None) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'source_url': source_url, - 'content_length': 0, - 'source_range': source_range, - 'range': destination_range, - 'source_content_md5': bytearray(source_content_md5) if source_content_md5 else None, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'sequence_number_access_conditions': seq_conditions, - 'modified_access_conditions': mod_conditions, - 'source_modified_access_conditions': source_mod_conditions, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def upload_pages_from_url(self, source_url, # type: str - offset, # type: int - length, # type: int - source_offset, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """ - The Upload Pages operation writes a range of pages to a page blob where - the contents are read from a URL. - - :param str source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - The service will read the same number of bytes as the destination range (length-offset). - :keyword bytes source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - - options = self._upload_pages_from_url_options( - source_url=source_url, - offset=offset, - length=length, - source_offset=source_offset, - **kwargs - ) - try: - return self._client.page_blob.upload_pages_from_url(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _clear_page_options(self, offset, length, **kwargs): - # type: (int, int, **Any) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - seq_conditions = SequenceNumberAccessConditions( - if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), - if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), - if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) - ) - mod_conditions = get_modify_conditions(kwargs) - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 page size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 page size") - end_range = length + offset - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'content_length': 0, - 'timeout': kwargs.pop('timeout', None), - 'range': content_range, - 'lease_access_conditions': access_conditions, - 'sequence_number_access_conditions': seq_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def clear_page(self, offset, length, **kwargs): - # type: (int, int, **Any) -> Dict[str, Union[str, datetime]] - """Clears a range of pages. - - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._clear_page_options(offset, length, **kwargs) - try: - return self._client.page_blob.clear_pages(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _append_block_options( # type: ignore - self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - if isinstance(data, six.text_type): - data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore - if length is None: - length = get_length(data) - if length is None: - length, data = read_length(data) - if length == 0: - return {} - if isinstance(data, bytes): - data = data[:length] - - appendpos_condition = kwargs.pop('appendpos_condition', None) - maxsize_condition = kwargs.pop('maxsize_condition', None) - validate_content = kwargs.pop('validate_content', False) - append_conditions = None - if maxsize_condition or appendpos_condition is not None: - append_conditions = AppendPositionAccessConditions( - max_size=maxsize_condition, - append_position=appendpos_condition - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'body': data, - 'content_length': length, - 'timeout': kwargs.pop('timeout', None), - 'transactional_content_md5': None, - 'lease_access_conditions': access_conditions, - 'append_position_access_conditions': append_conditions, - 'modified_access_conditions': mod_conditions, - 'validate_content': validate_content, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def append_block( # type: ignore - self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """Commits a new block of data to the end of the existing append blob. - - :param data: - Content of the block. This can be bytes, text, an iterable or a file-like object. - :type data: bytes or str or Iterable - :param int length: - Size of the block in bytes. - :keyword bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str encoding: - Defaults to UTF-8. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). - :rtype: dict(str, Any) - """ - options = self._append_block_options( - data, - length=length, - **kwargs - ) - try: - return self._client.append_blob.append_block(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _append_block_from_url_options( # type: ignore - self, copy_source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - # If end range is provided, start range must be provided - if source_length is not None and source_offset is None: - raise ValueError("source_offset should also be specified if source_length is specified") - # Format based on whether length is present - source_range = None - if source_length is not None: - end_range = source_offset + source_length - 1 - source_range = 'bytes={0}-{1}'.format(source_offset, end_range) - elif source_offset is not None: - source_range = "bytes={0}-".format(source_offset) - - appendpos_condition = kwargs.pop('appendpos_condition', None) - maxsize_condition = kwargs.pop('maxsize_condition', None) - source_content_md5 = kwargs.pop('source_content_md5', None) - append_conditions = None - if maxsize_condition or appendpos_condition is not None: - append_conditions = AppendPositionAccessConditions( - max_size=maxsize_condition, - append_position=appendpos_condition - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - source_mod_conditions = get_source_conditions(kwargs) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'source_url': copy_source_url, - 'content_length': 0, - 'source_range': source_range, - 'source_content_md5': source_content_md5, - 'transactional_content_md5': None, - 'lease_access_conditions': access_conditions, - 'append_position_access_conditions': append_conditions, - 'modified_access_conditions': mod_conditions, - 'source_modified_access_conditions': source_mod_conditions, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - 'timeout': kwargs.pop('timeout', None)} - options.update(kwargs) - return options - - @distributed_trace - def append_block_from_url(self, copy_source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """ - Creates a new block to be committed as part of a blob, where the contents are read from a source url. - - :param str copy_source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int source_offset: - This indicates the start of the range of bytes (inclusive) that has to be taken from the copy source. - :param int source_length: - This indicates the end of the range of bytes that has to be taken from the copy source. - :keyword bytearray source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the - AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._append_block_from_url_options( - copy_source_url, - source_offset=source_offset, - source_length=source_length, - **kwargs - ) - try: - return self._client.append_blob.append_block_from_url(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_blob_service_client.py b/azure/multiapi/storagev2/blob/v2019_02_02/_blob_service_client.py deleted file mode 100644 index afb21b0..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_blob_service_client.py +++ /dev/null @@ -1,603 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, - TYPE_CHECKING -) - -try: - from urllib.parse import urlparse -except ImportError: - from urlparse import urlparse # type: ignore - -from azure.core.paging import ItemPaged -from azure.core.pipeline import Pipeline -from azure.core.tracing.decorator import distributed_trace - -from ._shared.models import LocationMode -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.parser import _to_utc_datetime -from ._shared.response_handlers import return_response_headers, process_storage_error, \ - parse_to_internal_user_delegation_key -from ._generated import AzureBlobStorage -from ._generated.models import StorageErrorException, StorageServiceProperties, KeyInfo -from ._container_client import ContainerClient -from ._blob_client import BlobClient -from ._models import ( - ContainerPropertiesPaged, - service_stats_deserialize, - service_properties_deserialize -) - -if TYPE_CHECKING: - from datetime import datetime - from azure.core.pipeline.transport import HttpTransport - from azure.core.pipeline.policies import HTTPPolicy - from ._shared.models import UserDelegationKey - from ._lease import BlobLeaseClient - from ._models import ( - BlobProperties, - ContainerProperties, - PublicAccess, - BlobAnalyticsLogging, - Metrics, - CorsRule, - RetentionPolicy, - StaticWebsite, - ) - - -class BlobServiceClient(StorageAccountHostsMixin): - """A client to interact with the Blob Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete containers within the account. - For operations relating to a specific container or blob, clients for those entities - can also be retrieved using the `get_client` functions. - - :param str account_url: - The URL to the blob storage account. Any other entities included - in the URL path (e.g. container or blob) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_blob_service_client] - :end-before: [END create_blob_service_client] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient with account url and credential. - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_blob_service_client_oauth] - :end-before: [END create_blob_service_client_oauth] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient with Azure Identity credentials. - """ - - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - _, sas_token = parse_query(parsed_url.query) - self._query_str, credential = self._format_query_string(sas_token, credential) - super(BlobServiceClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) - self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - return "{}://{}/{}".format(self.scheme, hostname, self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> BlobServiceClient - """Create BlobServiceClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :returns: A Blob service client. - :rtype: ~azure.storage.blob.BlobServiceClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START auth_from_connection_string] - :end-before: [END auth_from_connection_string] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient from a connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls(account_url, credential=credential, **kwargs) - - @distributed_trace - def get_user_delegation_key(self, key_start_time, # type: datetime - key_expiry_time, # type: datetime - **kwargs # type: Any - ): - # type: (...) -> UserDelegationKey - """ - Obtain a user delegation key for the purpose of signing SAS tokens. - A token credential must be present on the service object for this request to succeed. - - :param ~datetime.datetime key_start_time: - A DateTime value. Indicates when the key becomes valid. - :param ~datetime.datetime key_expiry_time: - A DateTime value. Indicates when the key stops being valid. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The user delegation key. - :rtype: ~azure.storage.blob.UserDelegationKey - """ - key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time)) - timeout = kwargs.pop('timeout', None) - try: - user_delegation_key = self._client.service.get_user_delegation_key(key_info=key_info, - timeout=timeout, - **kwargs) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - return parse_to_internal_user_delegation_key(user_delegation_key) # type: ignore - - @distributed_trace - def get_account_information(self, **kwargs): - # type: (Any) -> Dict[str, str] - """Gets information related to the storage account. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START get_blob_service_account_info] - :end-before: [END get_blob_service_account_info] - :language: python - :dedent: 8 - :caption: Getting account information for the blob service. - """ - try: - return self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_service_stats(self, **kwargs): - # type: (**Any) -> Dict[str, Any] - """Retrieves statistics related to replication for the Blob service. - - It is only available when read-access geo-redundant replication is enabled for - the storage account. - - With geo-redundant replication, Azure Storage maintains your data durable - in two locations. In both locations, Azure Storage constantly maintains - multiple healthy replicas of your data. The location where you read, - create, update, or delete data is the primary storage account location. - The primary location exists in the region you choose at the time you - create an account via the Azure Management Azure classic portal, for - example, North Central US. The location to which your data is replicated - is the secondary location. The secondary location is automatically - determined based on the location of the primary; it is in a second data - center that resides in the same region as the primary location. Read-only - access is available from the secondary location, if read-access geo-redundant - replication is enabled for your storage account. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The blob service stats. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START get_blob_service_stats] - :end-before: [END get_blob_service_stats] - :language: python - :dedent: 8 - :caption: Getting service stats for the blob service. - """ - timeout = kwargs.pop('timeout', None) - try: - stats = self._client.service.get_statistics( # type: ignore - timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs) - return service_stats_deserialize(stats) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_service_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An object containing blob service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START get_blob_service_properties] - :end-before: [END get_blob_service_properties] - :language: python - :dedent: 8 - :caption: Getting service properties for the blob service. - """ - timeout = kwargs.pop('timeout', None) - try: - service_props = self._client.service.get_properties(timeout=timeout, **kwargs) - return service_properties_deserialize(service_props) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def set_service_properties( - self, analytics_logging=None, # type: Optional[BlobAnalyticsLogging] - hour_metrics=None, # type: Optional[Metrics] - minute_metrics=None, # type: Optional[Metrics] - cors=None, # type: Optional[List[CorsRule]] - target_version=None, # type: Optional[str] - delete_retention_policy=None, # type: Optional[RetentionPolicy] - static_website=None, # type: Optional[StaticWebsite] - **kwargs - ): - # type: (...) -> None - """Sets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - If an element (e.g. analytics_logging) is left as None, the - existing settings on the service for that functionality are preserved. - - :param analytics_logging: - Groups the Azure Analytics Logging settings. - :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging - :param hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for blobs. - :type hour_metrics: ~azure.storage.blob.Metrics - :param minute_metrics: - The minute metrics settings provide request statistics - for each minute for blobs. - :type minute_metrics: ~azure.storage.blob.Metrics - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list[~azure.storage.blob.CorsRule] - :param str target_version: - Indicates the default version to use for requests if an incoming - request's version is not specified. - :param delete_retention_policy: - The delete retention policy specifies whether to retain deleted blobs. - It also specifies the number of days and versions of blob to keep. - :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy - :param static_website: - Specifies whether the static website feature is enabled, - and if yes, indicates the index document and 404 error document to use. - :type static_website: ~azure.storage.blob.StaticWebsite - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START set_blob_service_properties] - :end-before: [END set_blob_service_properties] - :language: python - :dedent: 8 - :caption: Setting service properties for the blob service. - """ - props = StorageServiceProperties( - logging=analytics_logging, - hour_metrics=hour_metrics, - minute_metrics=minute_metrics, - cors=cors, - default_service_version=target_version, - delete_retention_policy=delete_retention_policy, - static_website=static_website - ) - timeout = kwargs.pop('timeout', None) - try: - self._client.service.set_properties(props, timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_containers( - self, name_starts_with=None, # type: Optional[str] - include_metadata=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> ItemPaged[ContainerProperties] - """Returns a generator to list the containers under the specified account. - - The generator will lazily follow the continuation tokens returned by - the service and stop when all containers have been returned. - - :param str name_starts_with: - Filters the results to return only containers whose names - begin with the specified prefix. - :param bool include_metadata: - Specifies that container metadata to be returned in the response. - The default value is `False`. - :keyword int results_per_page: - The maximum number of container names to retrieve per API - call. If the request does not specify the server will return up to 5,000 items. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) of ContainerProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.ContainerProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_list_containers] - :end-before: [END bsc_list_containers] - :language: python - :dedent: 12 - :caption: Listing the containers in the blob service. - """ - include = 'metadata' if include_metadata else None - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.service.list_containers_segment, - prefix=name_starts_with, - include=include, - timeout=timeout, - **kwargs) - return ItemPaged( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - page_iterator_class=ContainerPropertiesPaged - ) - - @distributed_trace - def create_container( - self, name, # type: str - metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[Union[PublicAccess, str]] - **kwargs - ): - # type: (...) -> ContainerClient - """Creates a new container under the specified account. - - If the container with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created container. - - :param str name: The name of the container to create. - :param metadata: - A dict with name-value pairs to associate with the - container as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - Possible values include: 'container', 'blob'. - :type public_access: str or ~azure.storage.blob.PublicAccess - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_create_container] - :end-before: [END bsc_create_container] - :language: python - :dedent: 12 - :caption: Creating a container in the blob service. - """ - container = self.get_container_client(name) - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - container.create_container( - metadata=metadata, public_access=public_access, timeout=timeout, **kwargs) - return container - - @distributed_trace - def delete_container( - self, container, # type: Union[ContainerProperties, str] - lease=None, # type: Optional[Union[BlobLeaseClient, str]] - **kwargs - ): - # type: (...) -> None - """Marks the specified container for deletion. - - The container and any blobs contained within it are later deleted during garbage collection. - If the container is not found, a ResourceNotFoundError will be raised. - - :param container: - The container to delete. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :param lease: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_delete_container] - :end-before: [END bsc_delete_container] - :language: python - :dedent: 12 - :caption: Deleting a container in the blob service. - """ - container = self.get_container_client(container) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - container.delete_container( # type: ignore - lease=lease, - timeout=timeout, - **kwargs) - - def get_container_client(self, container): - # type: (Union[ContainerProperties, str]) -> ContainerClient - """Get a client to interact with the specified container. - - The container need not already exist. - - :param container: - The container. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :returns: A ContainerClient. - :rtype: ~azure.storage.blob.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_get_container_client] - :end-before: [END bsc_get_container_client] - :language: python - :dedent: 8 - :caption: Getting the container client to interact with a specific container. - """ - try: - container_name = container.name - except AttributeError: - container_name = container - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ContainerClient( - self.url, container_name=container_name, - credential=self.credential, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_blob_client( - self, container, # type: Union[ContainerProperties, str] - blob, # type: Union[BlobProperties, str] - snapshot=None # type: Optional[Union[Dict[str, Any], str]] - ): - # type: (...) -> BlobClient - """Get a client to interact with the specified blob. - - The blob need not already exist. - - :param container: - The container that the blob is in. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :param blob: - The blob with which to interact. This can either be the name of the blob, - or an instance of BlobProperties. - :type blob: str or ~azure.storage.blob.BlobProperties - :param snapshot: - The optional blob snapshot on which to operate. This can either be the ID of the snapshot, - or a dictionary output returned by :func:`~azure.storage.blob.BlobClient.create_snapshot()`. - :type snapshot: str or dict(str, Any) - :returns: A BlobClient. - :rtype: ~azure.storage.blob.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_get_blob_client] - :end-before: [END bsc_get_blob_client] - :language: python - :dedent: 12 - :caption: Getting the blob client to interact with a specific blob. - """ - try: - container_name = container.name - except AttributeError: - container_name = container - try: - blob_name = blob.name - except AttributeError: - blob_name = blob - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return BlobClient( # type: ignore - self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot, - credential=self.credential, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_container_client.py b/azure/multiapi/storagev2/blob/v2019_02_02/_container_client.py deleted file mode 100644 index 7d7f889..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_container_client.py +++ /dev/null @@ -1,1212 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, AnyStr, Dict, List, Tuple, IO, Iterator, - TYPE_CHECKING -) - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six - -from azure.core.paging import ItemPaged -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import Pipeline -from azure.core.pipeline.transport import HttpRequest - -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.request_handlers import add_metadata_headers, serialize_iso -from ._shared.response_handlers import ( - process_storage_error, - return_response_headers, - return_headers_and_deserialized) -from ._generated import AzureBlobStorage -from ._generated.models import ( - StorageErrorException, - SignedIdentifier) -from ._deserialize import deserialize_container_properties -from ._serialize import get_modify_conditions -from ._models import ( # pylint: disable=unused-import - ContainerProperties, - BlobProperties, - BlobPropertiesPaged, - BlobType, - BlobPrefix) -from ._lease import BlobLeaseClient, get_access_conditions -from ._blob_client import BlobClient - -if TYPE_CHECKING: - from azure.core.pipeline.transport import HttpTransport, HttpResponse # pylint: disable=ungrouped-imports - from azure.core.pipeline.policies import HTTPPolicy # pylint: disable=ungrouped-imports - from datetime import datetime - from ._models import ( # pylint: disable=unused-import - PublicAccess, - AccessPolicy, - ContentSettings, - StandardBlobTier, - PremiumPageBlobTier) - - -def _get_blob_name(blob): - """Return the blob name. - - :param blob: A blob string or BlobProperties - :rtype: str - """ - try: - return blob.name - except AttributeError: - return blob - - -class ContainerClient(StorageAccountHostsMixin): - """A client to interact with a specific container, although that container - may not yet exist. - - For operations relating to a specific blob within this container, a blob client can be - retrieved using the :func:`~get_blob_client` function. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the container, - use the :func:`from_container_url` classmethod. - :param container_name: - The name of the container for the blob. - :type container_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START create_container_client_from_service] - :end-before: [END create_container_client_from_service] - :language: python - :dedent: 8 - :caption: Get a ContainerClient from an existing BlobServiceClient. - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START create_container_client_sasurl] - :end-before: [END create_container_client_sasurl] - :language: python - :dedent: 8 - :caption: Creating the container client directly. - """ - def __init__( - self, account_url, # type: str - container_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Container URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not container_name: - raise ValueError("Please specify a container name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - _, sas_token = parse_query(parsed_url.query) - self.container_name = container_name - self._query_str, credential = self._format_query_string(sas_token, credential) - super(ContainerClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) - self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) - - def _format_url(self, hostname): - container_name = self.container_name - if isinstance(container_name, six.text_type): - container_name = container_name.encode('UTF-8') - return "{}://{}/{}{}".format( - self.scheme, - hostname, - quote(container_name), - self._query_str) - - @classmethod - def from_container_url(cls, container_url, credential=None, **kwargs): - # type: (str, Optional[Any], Any) -> ContainerClient - """Create ContainerClient from a container url. - - :param str container_url: - The full endpoint URL to the Container, including SAS token if used. This could be - either the primary endpoint, or the secondary endpoint depending on the current `location_mode`. - :type container_url: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :returns: A container client. - :rtype: ~azure.storage.blob.ContainerClient - """ - try: - if not container_url.lower().startswith('http'): - container_url = "https://" + container_url - except AttributeError: - raise ValueError("Container URL must be a string.") - parsed_url = urlparse(container_url.rstrip('/')) - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(container_url)) - - container_path = parsed_url.path.lstrip('/').split('/') - account_path = "" - if len(container_path) > 1: - account_path = "/" + "/".join(container_path[:-1]) - account_url = "{}://{}{}?{}".format( - parsed_url.scheme, - parsed_url.netloc.rstrip('/'), - account_path, - parsed_url.query) - container_name = unquote(container_path[-1]) - if not container_name: - raise ValueError("Invalid URL. Please provide a URL with a valid container name") - return cls(account_url, container_name=container_name, credential=credential, **kwargs) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - container_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> ContainerClient - """Create ContainerClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param container_name: - The container name for the blob. - :type container_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :returns: A container client. - :rtype: ~azure.storage.blob.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START auth_from_connection_string_container] - :end-before: [END auth_from_connection_string_container] - :language: python - :dedent: 8 - :caption: Creating the ContainerClient from a connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, container_name=container_name, credential=credential, **kwargs) - - @distributed_trace - def create_container(self, metadata=None, public_access=None, **kwargs): - # type: (Optional[Dict[str, str]], Optional[Union[PublicAccess, str]], **Any) -> None - """ - Creates a new container under the specified account. If the container - with the same name already exists, the operation fails. - - :param metadata: - A dict with name_value pairs to associate with the - container as metadata. Example:{'Category':'test'} - :type metadata: dict[str, str] - :param ~azure.storage.blob.PublicAccess public_access: - Possible values include: 'container', 'blob'. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START create_container] - :end-before: [END create_container] - :language: python - :dedent: 12 - :caption: Creating a container to store blobs. - """ - headers = kwargs.pop('headers', {}) - timeout = kwargs.pop('timeout', None) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return self._client.container.create( # type: ignore - timeout=timeout, - access=public_access, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def delete_container( - self, **kwargs): - # type: (Any) -> None - """ - Marks the specified container for deletion. The container and any blobs - contained within it are later deleted during garbage collection. - - :keyword lease: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START delete_container] - :end-before: [END delete_container] - :language: python - :dedent: 12 - :caption: Delete a container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - mod_conditions = get_modify_conditions(kwargs) - timeout = kwargs.pop('timeout', None) - try: - self._client.container.delete( - timeout=timeout, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def acquire_lease( - self, lease_duration=-1, # type: int - lease_id=None, # type: Optional[str] - **kwargs): - # type: (...) -> BlobLeaseClient - """ - Requests a new lease. If the container does not have an active lease, - the Blob service creates a lease on the container and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A BlobLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.blob.BlobLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START acquire_lease_on_container] - :end-before: [END acquire_lease_on_container] - :language: python - :dedent: 8 - :caption: Acquiring a lease on the container. - """ - lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs) - return lease - - @distributed_trace - def get_account_information(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """Gets information related to the storage account. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - """ - try: - return self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_container_properties(self, **kwargs): - # type: (Any) -> ContainerProperties - """Returns all user-defined metadata and system properties for the specified - container. The data returned does not include the container's list of blobs. - - :keyword lease: - If specified, get_container_properties only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Properties for the specified container within a container object. - :rtype: ~azure.storage.blob.ContainerProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START get_container_properties] - :end-before: [END get_container_properties] - :language: python - :dedent: 12 - :caption: Getting properties on the container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - response = self._client.container.get_properties( - timeout=timeout, - lease_access_conditions=access_conditions, - cls=deserialize_container_properties, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - response.name = self.container_name - return response # type: ignore - - @distributed_trace - def set_container_metadata( # type: ignore - self, metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - container. Each call to this operation replaces all existing metadata - attached to the container. To remove all metadata from the container, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the container as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword lease: - If specified, set_container_metadata only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Container-updated property dict (Etag and last modified). - :rtype: dict[str, str or datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START set_container_metadata] - :end-before: [END set_container_metadata] - :language: python - :dedent: 12 - :caption: Setting metadata on the container. - """ - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - mod_conditions = get_modify_conditions(kwargs) - timeout = kwargs.pop('timeout', None) - try: - return self._client.container.set_metadata( # type: ignore - timeout=timeout, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_container_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the specified container. - The permissions indicate whether container data may be accessed publicly. - - :keyword lease: - If specified, get_container_access_policy only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START get_container_access_policy] - :end-before: [END get_container_access_policy] - :language: python - :dedent: 12 - :caption: Getting the access policy on the container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - response, identifiers = self._client.container.get_access_policy( - timeout=timeout, - lease_access_conditions=access_conditions, - cls=return_headers_and_deserialized, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return { - 'public_access': response.get('blob_public_access'), - 'signed_identifiers': identifiers or [] - } - - @distributed_trace - def set_container_access_policy( - self, signed_identifiers, # type: Dict[str, AccessPolicy] - public_access=None, # type: Optional[Union[str, PublicAccess]] - **kwargs - ): # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the permissions for the specified container or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether blobs in a container may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the container. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy] - :param ~azure.storage.blob.PublicAccess public_access: - Possible values include: 'container', 'blob'. - :keyword lease: - Required if the container has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified date/time. - :keyword ~datetime.datetime if_unmodified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Container-updated property dict (Etag and last modified). - :rtype: dict[str, str or ~datetime.datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START set_container_access_policy] - :end-before: [END set_container_access_policy] - :language: python - :dedent: 12 - :caption: Setting access policy on the container. - """ - if len(signed_identifiers) > 5: - raise ValueError( - 'Too many access policies provided. The server does not support setting ' - 'more than 5 access policies on a single resource.') - identifiers = [] - for key, value in signed_identifiers.items(): - if value: - value.start = serialize_iso(value.start) - value.expiry = serialize_iso(value.expiry) - identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore - signed_identifiers = identifiers # type: ignore - lease = kwargs.pop('lease', None) - mod_conditions = get_modify_conditions(kwargs) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - return self._client.container.set_access_policy( - container_acl=signed_identifiers or None, - timeout=timeout, - access=public_access, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_blobs(self, name_starts_with=None, include=None, **kwargs): - # type: (Optional[str], Optional[Any], **Any) -> ItemPaged[BlobProperties] - """Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service. - - :param str name_starts_with: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param list[str] include: - Specifies one or more additional datasets to include in the response. - Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START list_blobs_in_container] - :end-before: [END list_blobs_in_container] - :language: python - :dedent: 8 - :caption: List the blobs in the container. - """ - if include and not isinstance(include, list): - include = [include] - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.list_blob_flat_segment, - include=include, - timeout=timeout, - **kwargs) - return ItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=BlobPropertiesPaged) - - @distributed_trace - def walk_blobs( - self, name_starts_with=None, # type: Optional[str] - include=None, # type: Optional[Any] - delimiter="/", # type: str - **kwargs # type: Optional[Any] - ): - # type: (...) -> ItemPaged[BlobProperties] - """Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service. This operation will list blobs in accordance with a hierarchy, - as delimited by the specified delimiter character. - - :param str name_starts_with: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param list[str] include: - Specifies one or more additional datasets to include in the response. - Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'. - :param str delimiter: - When the request includes this parameter, the operation returns a BlobPrefix - element in the response body that acts as a placeholder for all blobs whose - names begin with the same substring up to the appearance of the delimiter - character. The delimiter may be a single character or a string. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties] - """ - if include and not isinstance(include, list): - include = [include] - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.list_blob_hierarchy_segment, - delimiter=delimiter, - include=include, - timeout=timeout, - **kwargs) - return BlobPrefix( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - delimiter=delimiter) - - @distributed_trace - def upload_blob( - self, name, # type: Union[str, BlobProperties] - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> BlobClient - """Creates a new blob from a data source with automatic chunking. - - :param name: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type name: str or ~azure.storage.blob.BlobProperties - :param data: The blob data to upload. - :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be - either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. The exception to the above is with Append - blob types: if set to False and the data already exists, an error will not be raised - and the data will be appended to the existing blob. If set overwrite=True, then the existing - append blob will be deleted, and a new one created. Defaults to False. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the container has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int max_concurrency: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encoding: - Defaults to UTF-8. - :returns: A BlobClient to interact with the newly uploaded blob. - :rtype: ~azure.storage.blob.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START upload_blob_to_container] - :end-before: [END upload_blob_to_container] - :language: python - :dedent: 8 - :caption: Upload blob to the container. - """ - blob = self.get_blob_client(name) - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - blob.upload_blob( - data, - blob_type=blob_type, - length=length, - metadata=metadata, - timeout=timeout, - encoding=encoding, - **kwargs - ) - return blob - - @distributed_trace - def delete_blob( - self, blob, # type: Union[str, BlobProperties] - delete_snapshots=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> None - """Marks the specified blob or snapshot for deletion. - - The blob is later deleted during garbage collection. - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the delete_blob - operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot - and retains the blob or snapshot for specified number of days. - After specified number of days, blob's data is removed from the service during garbage collection. - Soft deleted blob or snapshot is accessible through :func:`list_blobs()` specifying `include=["deleted"]` - option. Soft-deleted blob or snapshot can be restored using :func:`~BlobClient.undelete()` - - :param blob: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob: str or ~azure.storage.blob.BlobProperties - :param str delete_snapshots: - Required if the blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - blob = self.get_blob_client(blob) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - blob.delete_blob( # type: ignore - delete_snapshots=delete_snapshots, - timeout=timeout, - **kwargs) - - def _generate_delete_blobs_options( - self, snapshot=None, - delete_snapshots=None, - request_id=None, - lease_access_conditions=None, - modified_access_conditions=None, - **kwargs - ): - """This code is a copy from _generated. - - Once Autorest is able to provide request preparation this code should be removed. - """ - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - # Construct parameters - timeout = kwargs.pop('timeout', None) - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._client._serialize.query("snapshot", snapshot, 'str') # pylint: disable=protected-access - if timeout is not None: - query_parameters['timeout'] = self._client._serialize.query("timeout", timeout, 'int', minimum=0) # pylint: disable=protected-access - - # Construct headers - header_parameters = {} - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._client._serialize.header( # pylint: disable=protected-access - "delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._client._serialize.header( # pylint: disable=protected-access - "request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._client._serialize.header( # pylint: disable=protected-access - "lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._client._serialize.header( # pylint: disable=protected-access - "if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._client._serialize.header( # pylint: disable=protected-access - "if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._client._serialize.header( # pylint: disable=protected-access - "if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._client._serialize.header( # pylint: disable=protected-access - "if_none_match", if_none_match, 'str') - - return query_parameters, header_parameters - - @distributed_trace - def delete_blobs(self, *blobs, **kwargs): - # type: (...) -> Iterator[HttpResponse] - """Marks the specified blobs or snapshots for deletion. - - The blobs are later deleted during garbage collection. - Note that in order to delete blobs, you must delete all of their - snapshots. You can delete both at the same time with the delete_blobs operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots - and retains the blobs or snapshots for specified number of days. - After specified number of days, blobs' data is removed from the service during garbage collection. - Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]` - Soft-deleted blobs or snapshots can be restored using :func:`~BlobClient.undelete()` - - :param blobs: The blobs to delete. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - :type blobs: str or ~azure.storage.blob.BlobProperties - :keyword str delete_snapshots: - Required if a blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword lease: - Required if a blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: An iterator of responses, one for each blob in order - :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START delete_multiple_blobs] - :end-before: [END delete_multiple_blobs] - :language: python - :dedent: 8 - :caption: Deleting multiple blobs. - """ - raise_on_any_failure = kwargs.pop('raise_on_any_failure', True) - options = BlobClient._generic_delete_blob_options( # pylint: disable=protected-access - **kwargs - ) - options.update({'raise_on_any_failure': raise_on_any_failure}) - query_parameters, header_parameters = self._generate_delete_blobs_options(**options) - # To pass kwargs to "_batch_send", we need to remove anything that was - # in the Autorest signature for Autorest, otherwise transport will be upset - for possible_param in ['timeout', 'delete_snapshots', 'lease_access_conditions', 'modified_access_conditions']: - options.pop(possible_param, None) - - reqs = [] - for blob in blobs: - blob_name = _get_blob_name(blob) - req = HttpRequest( - "DELETE", - "/{}/{}".format(self.container_name, blob_name), - headers=header_parameters - ) - req.format_parameters(query_parameters) - reqs.append(req) - - return self._batch_send(*reqs, **options) - - def _generate_set_tier_options( - self, tier, rehydrate_priority=None, request_id=None, lease_access_conditions=None, **kwargs - ): - """This code is a copy from _generated. - - Once Autorest is able to provide request preparation this code should be removed. - """ - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "tier" - timeout = kwargs.pop('timeout', None) - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._client._serialize.query("timeout", timeout, 'int', minimum=0) # pylint: disable=protected-access - query_parameters['comp'] = self._client._serialize.query("comp", comp, 'str') # pylint: disable=protected-access, specify-parameter-names-in-call - - # Construct headers - header_parameters = {} - header_parameters['x-ms-access-tier'] = self._client._serialize.header("tier", tier, 'str') # pylint: disable=protected-access, specify-parameter-names-in-call - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._client._serialize.header( # pylint: disable=protected-access - "rehydrate_priority", rehydrate_priority, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._client._serialize.header( # pylint: disable=protected-access - "request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._client._serialize.header("lease_id", lease_id, 'str') # pylint: disable=protected-access - - return query_parameters, header_parameters - - @distributed_trace - def set_standard_blob_tier_blobs( - self, - standard_blob_tier, # type: Union[str, StandardBlobTier] - *blobs, # type: Union[str, BlobProperties] - **kwargs - ): - # type: (...) -> Iterator[HttpResponse] - """This operation sets the tier on block blobs. - - A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param standard_blob_tier: - Indicates the tier to be set on the blob. Options include 'Hot', 'Cool', - 'Archive'. The hot tier is optimized for storing data that is accessed - frequently. The cool storage tier is optimized for storing data that - is infrequently accessed and stored for at least a month. The archive - tier is optimized for storing data that is rarely accessed and stored - for at least six months with flexible latency requirements. - :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier - :param blobs: The blobs with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - :type blobs: str or ~azure.storage.blob.BlobProperties - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. - :return: An iterator of responses, one for each blob in order - :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - if standard_blob_tier is None: - raise ValueError("A StandardBlobTier must be specified") - - query_parameters, header_parameters = self._generate_set_tier_options( - tier=standard_blob_tier, - lease_access_conditions=access_conditions, - **kwargs - ) - # To pass kwargs to "_batch_send", we need to remove anything that was - # in the Autorest signature for Autorest, otherwise transport will be upset - for possible_param in ['timeout', 'lease']: - kwargs.pop(possible_param, None) - - reqs = [] - for blob in blobs: - blob_name = _get_blob_name(blob) - req = HttpRequest( - "PUT", - "/{}/{}".format(self.container_name, blob_name), - headers=header_parameters - ) - req.format_parameters(query_parameters) - reqs.append(req) - - return self._batch_send(*reqs, **kwargs) - - @distributed_trace - def set_premium_page_blob_tier_blobs( - self, - premium_page_blob_tier, # type: Union[str, PremiumPageBlobTier] - *blobs, # type: Union[str, BlobProperties] - **kwargs - ): - # type: (...) -> Iterator[HttpResponse] - """Sets the page blob tiers on the blobs. This API is only supported for page blobs on premium accounts. - - :param premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier - :param blobs: The blobs with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - :type blobs: str or ~azure.storage.blob.BlobProperties - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. - :return: An iterator of responses, one for each blob in order - :rtype: iterator[~azure.core.pipeline.transport.HttpResponse] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - if premium_page_blob_tier is None: - raise ValueError("A PremiumPageBlobTier must be specified") - - query_parameters, header_parameters = self._generate_set_tier_options( - tier=premium_page_blob_tier, - lease_access_conditions=access_conditions, - **kwargs - ) - # To pass kwargs to "_batch_send", we need to remove anything that was - # in the Autorest signature for Autorest, otherwise transport will be upset - for possible_param in ['timeout', 'lease']: - kwargs.pop(possible_param, None) - - reqs = [] - for blob in blobs: - blob_name = _get_blob_name(blob) - req = HttpRequest( - "PUT", - "/{}/{}".format(self.container_name, blob_name), - headers=header_parameters - ) - req.format_parameters(query_parameters) - reqs.append(req) - - return self._batch_send(*reqs, **kwargs) - - def get_blob_client( - self, blob, # type: Union[str, BlobProperties] - snapshot=None # type: str - ): - # type: (...) -> BlobClient - """Get a client to interact with the specified blob. - - The blob need not already exist. - - :param blob: - The blob with which to interact. - :type blob: str or ~azure.storage.blob.BlobProperties - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`~BlobClient.create_snapshot()`. - :returns: A BlobClient. - :rtype: ~azure.storage.blob.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START get_blob_client] - :end-before: [END get_blob_client] - :language: python - :dedent: 8 - :caption: Get the blob client. - """ - blob_name = _get_blob_name(blob) - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return BlobClient( - self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot, - credential=self.credential, _configuration=self._config, - _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_deserialize.py b/azure/multiapi/storagev2/blob/v2019_02_02/_deserialize.py deleted file mode 100644 index 17d7fe7..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_deserialize.py +++ /dev/null @@ -1,57 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from typing import ( # pylint: disable=unused-import - Tuple, Dict, List, - TYPE_CHECKING -) - -from ._shared.response_handlers import deserialize_metadata -from ._models import BlobProperties, ContainerProperties - -if TYPE_CHECKING: - from ._generated.models import PageList - - -def deserialize_blob_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - blob_properties = BlobProperties( - metadata=metadata, - **headers - ) - if 'Content-Range' in headers: - if 'x-ms-blob-content-md5' in headers: - blob_properties.content_settings.content_md5 = headers['x-ms-blob-content-md5'] - else: - blob_properties.content_settings.content_md5 = None - return blob_properties - - -def deserialize_blob_stream(response, obj, headers): - blob_properties = deserialize_blob_properties(response, obj, headers) - obj.properties = blob_properties - return response.location_mode, obj - - -def deserialize_container_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - container_properties = ContainerProperties( - metadata=metadata, - **headers - ) - return container_properties - - -def get_page_ranges_result(ranges): - # type: (PageList) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - page_range = [] # type: ignore - clear_range = [] # type: List - if ranges.page_range: - page_range = [{'start': b.start, 'end': b.end} for b in ranges.page_range] # type: ignore - if ranges.clear_range: - clear_range = [{'start': b.start, 'end': b.end} for b in ranges.clear_range] - return page_range, clear_range # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_download.py b/azure/multiapi/storagev2/blob/v2019_02_02/_download.py deleted file mode 100644 index 478ed3c..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_download.py +++ /dev/null @@ -1,579 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -import threading -import warnings -from io import BytesIO - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.common import with_current_context -from ._shared.encryption import decrypt_blob -from ._shared.request_handlers import validate_and_format_range_headers -from ._shared.response_handlers import process_storage_error, parse_length_from_content_range -from ._deserialize import get_page_ranges_result - - -def process_range_and_offset(start_range, end_range, length, encryption): - start_offset, end_offset = 0, 0 - if encryption.get("key") is not None or encryption.get("resolver") is not None: - if start_range is not None: - # Align the start of the range along a 16 byte block - start_offset = start_range % 16 - start_range -= start_offset - - # Include an extra 16 bytes for the IV if necessary - # Because of the previous offsetting, start_range will always - # be a multiple of 16. - if start_range > 0: - start_offset += 16 - start_range -= 16 - - if length is not None: - # Align the end of the range along a 16 byte block - end_offset = 15 - (end_range % 16) - end_range += end_offset - - return (start_range, end_range), (start_offset, end_offset) - - -def process_content(data, start_offset, end_offset, encryption): - if data is None: - raise ValueError("Response cannot be None.") - try: - content = b"".join(list(data)) - except Exception as error: - raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) - if content and encryption.get("key") is not None or encryption.get("resolver") is not None: - try: - return decrypt_blob( - encryption.get("required"), - encryption.get("key"), - encryption.get("resolver"), - content, - start_offset, - end_offset, - data.response.headers, - ) - except Exception as error: - raise HttpResponseError(message="Decryption failed.", response=data.response, error=error) - return content - - -class _ChunkDownloader(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - client=None, - non_empty_ranges=None, - total_size=None, - chunk_size=None, - current_progress=None, - start_range=None, - end_range=None, - stream=None, - parallel=None, - validate_content=None, - encryption_options=None, - **kwargs - ): - self.client = client - self.non_empty_ranges = non_empty_ranges - - # Information on the download range/chunk size - self.chunk_size = chunk_size - self.total_size = total_size - self.start_index = start_range - self.end_index = end_range - - # The destination that we will write to - self.stream = stream - self.stream_lock = threading.Lock() if parallel else None - self.progress_lock = threading.Lock() if parallel else None - - # For a parallel download, the stream is always seekable, so we note down the current position - # in order to seek to the right place when out-of-order chunks come in - self.stream_start = stream.tell() if parallel else None - - # Download progress so far - self.progress_total = current_progress - - # Encryption - self.encryption_options = encryption_options - - # Parameters for each get operation - self.validate_content = validate_content - self.request_options = kwargs - - def _calculate_range(self, chunk_start): - if chunk_start + self.chunk_size > self.end_index: - chunk_end = self.end_index - else: - chunk_end = chunk_start + self.chunk_size - return chunk_start, chunk_end - - def get_chunk_offsets(self): - index = self.start_index - while index < self.end_index: - yield index - index += self.chunk_size - - def process_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - chunk_data = self._download_chunk(chunk_start, chunk_end - 1) - length = chunk_end - chunk_start - if length > 0: - self._write_to_stream(chunk_data, chunk_start) - self._update_progress(length) - - def yield_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - return self._download_chunk(chunk_start, chunk_end - 1) - - def _update_progress(self, length): - if self.progress_lock: - with self.progress_lock: # pylint: disable=not-context-manager - self.progress_total += length - else: - self.progress_total += length - - def _write_to_stream(self, chunk_data, chunk_start): - if self.stream_lock: - with self.stream_lock: # pylint: disable=not-context-manager - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - else: - self.stream.write(chunk_data) - - def _do_optimize(self, given_range_start, given_range_end): - # If we have no page range list stored, then assume there's data everywhere for that page blob - # or it's a block blob or append blob - if self.non_empty_ranges is None: - return False - - for source_range in self.non_empty_ranges: - # Case 1: As the range list is sorted, if we've reached such a source_range - # we've checked all the appropriate source_range already and haven't found any overlapping. - # so the given range doesn't have any data and download optimization could be applied. - # given range: | | - # source range: | | - if given_range_end < source_range['start']: # pylint:disable=no-else-return - return True - # Case 2: the given range comes after source_range, continue checking. - # given range: | | - # source range: | | - elif source_range['end'] < given_range_start: - pass - # Case 3: source_range and given range overlap somehow, no need to optimize. - else: - return False - # Went through all src_ranges, but nothing overlapped. Optimization will be applied. - return True - - def _download_chunk(self, chunk_start, chunk_end): - download_range, offset = process_range_and_offset( - chunk_start, chunk_end, chunk_end, self.encryption_options - ) - - # No need to download the empty chunk from server if there's no data in the chunk to be downloaded. - # Do optimize and create empty chunk locally if condition is met. - if self._do_optimize(download_range[0], download_range[1]): - chunk_data = b"\x00" * self.chunk_size - else: - range_header, range_validation = validate_and_format_range_headers( - download_range[0], - download_range[1], - check_content_md5=self.validate_content - ) - - try: - _, response = self.client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self.validate_content, - data_stream_total=self.total_size, - download_stream_current=self.progress_total, - **self.request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - chunk_data = process_content(response, offset[0], offset[1], self.encryption_options) - - # This makes sure that if_match is set so that we can validate - # that subsequent downloads are to an unmodified blob - if self.request_options.get("modified_access_conditions"): - self.request_options["modified_access_conditions"].if_match = response.properties.etag - - return chunk_data - - -class _ChunkIterator(object): - """Async iterator for chunks in blob download stream.""" - - def __init__(self, size, content, downloader): - self.size = size - self._current_content = content - self._iter_downloader = downloader - self._iter_chunks = None - self._complete = (size == 0) - - def __len__(self): - return self.size - - def __iter__(self): - return self - - def __next__(self): - """Iterate through responses.""" - if self._complete: - raise StopIteration("Download complete") - if not self._iter_downloader: - # If no iterator was supplied, the download completed with - # the initial GET, so we just return that data - self._complete = True - return self._current_content - - if not self._iter_chunks: - self._iter_chunks = self._iter_downloader.get_chunk_offsets() - else: - chunk = next(self._iter_chunks) - self._current_content = self._iter_downloader.yield_chunk(chunk) - - return self._current_content - - next = __next__ # Python 2 compatibility. - - -class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the blob being downloaded. - :ivar str container: - The name of the container where the blob is. - :ivar ~azure.storage.blob.BlobProperties properties: - The properties of the blob being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the blob. - """ - - def __init__( - self, - clients=None, - config=None, - start_range=None, - end_range=None, - validate_content=None, - encryption_options=None, - max_concurrency=1, - name=None, - container=None, - encoding=None, - **kwargs - ): - self.name = name - self.container = container - self.properties = None - self.size = None - - self._clients = clients - self._config = config - self._start_range = start_range - self._end_range = end_range - self._max_concurrency = max_concurrency - self._encoding = encoding - self._validate_content = validate_content - self._encryption_options = encryption_options or {} - self._request_options = kwargs - self._location_mode = None - self._download_complete = False - self._current_content = None - self._file_size = None - self._non_empty_ranges = None - self._response = None - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - self._first_get_size = ( - self._config.max_single_get_size if not self._validate_content else self._config.max_chunk_get_size - ) - initial_request_start = self._start_range if self._start_range is not None else 0 - if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: - initial_request_end = self._end_range - else: - initial_request_end = initial_request_start + self._first_get_size - 1 - - self._initial_range, self._initial_offset = process_range_and_offset( - initial_request_start, initial_request_end, self._end_range, self._encryption_options - ) - - self._response = self._initial_request() - self.properties = self._response.properties - self.properties.name = self.name - self.properties.container = self.container - - # Set the content length to the download size instead of the size of - # the last range - self.properties.size = self.size - - # Overwrite the content range to the user requested range - self.properties.content_range = "bytes {0}-{1}/{2}".format( - self._start_range, - self._end_range, - self._file_size - ) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - self.properties.content_md5 = None - - if self.size == 0: - self._current_content = b"" - else: - self._current_content = process_content( - self._response, - self._initial_offset[0], - self._initial_offset[1], - self._encryption_options - ) - - def __len__(self): - return self.size - - def _initial_request(self): - range_header, range_validation = validate_and_format_range_headers( - self._initial_range[0], - self._initial_range[1], - start_range_required=False, - end_range_required=False, - check_content_md5=self._validate_content - ) - - try: - location_mode, response = self._clients.blob.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self._validate_content, - data_stream_total=None, - download_stream_current=0, - **self._request_options - ) - - # Check the location we read from to ensure we use the same one - # for subsequent requests. - self._location_mode = location_mode - - # Parse the total file size and adjust the download size if ranges - # were specified - self._file_size = parse_length_from_content_range(response.properties.content_range) - if self._end_range is not None: - # Use the end range index unless it is over the end of the file - self.size = min(self._file_size, self._end_range - self._start_range + 1) - elif self._start_range is not None: - self.size = self._file_size - self._start_range - else: - self.size = self._file_size - - except HttpResponseError as error: - if self._start_range is None and error.response.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - try: - _, response = self._clients.blob.download( - validate_content=self._validate_content, - data_stream_total=0, - download_stream_current=0, - **self._request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - # Set the download size to empty - self.size = 0 - self._file_size = 0 - else: - process_storage_error(error) - - # get page ranges to optimize downloading sparse page blob - if response.properties.blob_type == 'PageBlob': - try: - page_ranges = self._clients.page_blob.get_page_ranges() - self._non_empty_ranges = get_page_ranges_result(page_ranges)[0] - # according to the REST API documentation: - # in a highly fragmented page blob with a large number of writes, - # a Get Page Ranges request can fail due to an internal server timeout. - # thus, if the page blob is not sparse, it's ok for it to fail - except HttpResponseError: - pass - - # If the file is small, the download is complete at this point. - # If file size is large, download the rest of the file in chunks. - if response.properties.size != self.size: - # Lock on the etag. This can be overriden by the user by specifying '*' - if self._request_options.get("modified_access_conditions"): - if not self._request_options["modified_access_conditions"].if_match: - self._request_options["modified_access_conditions"].if_match = response.properties.etag - else: - self._download_complete = True - return response - - def chunks(self): - if self.size == 0 or self._download_complete: - iter_downloader = None - else: - data_end = self._file_size - if self._end_range is not None: - # Use the end range index unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - iter_downloader = _ChunkDownloader( - client=self._clients.blob, - non_empty_ranges=self._non_empty_ranges, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # start where the first download ended - end_range=data_end, - stream=None, - parallel=False, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options - ) - return _ChunkIterator( - size=self.size, - content=self._current_content, - downloader=iter_downloader) - - def readall(self): - """Download the contents of this blob. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - stream = BytesIO() - self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - def content_as_bytes(self, max_concurrency=1): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :rtype: bytes - """ - warnings.warn( - "content_as_bytes is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - return self.readall() - - def content_as_text(self, max_concurrency=1, encoding="UTF-8"): - """Download the contents of this blob, and decode as text. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :param str encoding: - Test encoding to decode the downloaded bytes. Default is UTF-8. - :rtype: str - """ - warnings.warn( - "content_as_text is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self._encoding = encoding - return self.readall() - - def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - # The stream must be seekable if parallel download is required - parallel = self._max_concurrency > 1 - if parallel: - error_message = "Target stream handle must be seekable." - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(error_message) - - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(error_message) - - # Write the content to the user stream - stream.write(self._current_content) - if self._download_complete: - return self.size - - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - - downloader = _ChunkDownloader( - client=self._clients.blob, - non_empty_ranges=self._non_empty_ranges, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # Start where the first download ended - end_range=data_end, - stream=stream, - parallel=parallel, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options - ) - if parallel: - import concurrent.futures - executor = concurrent.futures.ThreadPoolExecutor(self._max_concurrency) - list(executor.map( - with_current_context(downloader.process_chunk), - downloader.get_chunk_offsets() - )) - else: - for chunk in downloader.get_chunk_offsets(): - downloader.process_chunk(chunk) - return self.size - - def download_to_stream(self, stream, max_concurrency=1): - """Download the contents of this blob to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The properties of the downloaded blob. - :rtype: Any - """ - warnings.warn( - "download_to_stream is deprecated, use readinto instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self.readinto(stream) - return self.properties diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/_azure_blob_storage.py b/azure/multiapi/storagev2/blob/v2019_02_02/_generated/_azure_blob_storage.py deleted file mode 100644 index 957dda1..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/_azure_blob_storage.py +++ /dev/null @@ -1,81 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core import PipelineClient -from msrest import Serializer, Deserializer - -from ._configuration import AzureBlobStorageConfiguration -from azure.core.exceptions import map_error -from .operations import ServiceOperations -from .operations import ContainerOperations -from .operations import DirectoryOperations -from .operations import BlobOperations -from .operations import PageBlobOperations -from .operations import AppendBlobOperations -from .operations import BlockBlobOperations -from . import models - - -class AzureBlobStorage(object): - """AzureBlobStorage - - - :ivar service: Service operations - :vartype service: azure.storage.blob.operations.ServiceOperations - :ivar container: Container operations - :vartype container: azure.storage.blob.operations.ContainerOperations - :ivar directory: Directory operations - :vartype directory: azure.storage.blob.operations.DirectoryOperations - :ivar blob: Blob operations - :vartype blob: azure.storage.blob.operations.BlobOperations - :ivar page_blob: PageBlob operations - :vartype page_blob: azure.storage.blob.operations.PageBlobOperations - :ivar append_blob: AppendBlob operations - :vartype append_blob: azure.storage.blob.operations.AppendBlobOperations - :ivar block_blob: BlockBlob operations - :vartype block_blob: azure.storage.blob.operations.BlockBlobOperations - - :param url: The URL of the service account, container, or blob that is the - targe of the desired operation. - :type url: str - """ - - def __init__(self, url, **kwargs): - - base_url = '{url}' - self._config = AzureBlobStorageConfiguration(url, **kwargs) - self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '2019-02-02' - self._serialize = Serializer(client_models) - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.container = ContainerOperations( - self._client, self._config, self._serialize, self._deserialize) - self.directory = DirectoryOperations( - self._client, self._config, self._serialize, self._deserialize) - self.blob = BlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.page_blob = PageBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.append_blob = AppendBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.block_blob = BlockBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - - def __enter__(self): - self._client.__enter__() - return self - def __exit__(self, *exc_details): - self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/_configuration.py b/azure/multiapi/storagev2/blob/v2019_02_02/_generated/_configuration.py deleted file mode 100644 index 54cb990..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/_configuration.py +++ /dev/null @@ -1,52 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -from .version import VERSION - - -class AzureBlobStorageConfiguration(Configuration): - """Configuration for AzureBlobStorage - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, container, or blob that is the - targe of the desired operation. - :type url: str - :ivar version: Specifies the version of the operation to use for this - request. - :type version: str - """ - - def __init__(self, url, **kwargs): - - if url is None: - raise ValueError("Parameter 'url' must not be None.") - - super(AzureBlobStorageConfiguration, self).__init__(**kwargs) - self._configure(**kwargs) - - self.user_agent_policy.add_user_agent('azsdk-python-azureblobstorage/{}'.format(VERSION)) - self.generate_client_request_id = True - - self.url = url - self.version = "2019-02-02" - - def _configure(self, **kwargs): - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/__init__.py b/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/__init__.py deleted file mode 100644 index 009c965..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from ._azure_blob_storage_async import AzureBlobStorage -__all__ = ['AzureBlobStorage'] diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/_azure_blob_storage_async.py b/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/_azure_blob_storage_async.py deleted file mode 100644 index a1ad1db..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/_azure_blob_storage_async.py +++ /dev/null @@ -1,82 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core import AsyncPipelineClient -from msrest import Serializer, Deserializer - -from ._configuration_async import AzureBlobStorageConfiguration -from azure.core.exceptions import map_error -from .operations_async import ServiceOperations -from .operations_async import ContainerOperations -from .operations_async import DirectoryOperations -from .operations_async import BlobOperations -from .operations_async import PageBlobOperations -from .operations_async import AppendBlobOperations -from .operations_async import BlockBlobOperations -from .. import models - - -class AzureBlobStorage(object): - """AzureBlobStorage - - - :ivar service: Service operations - :vartype service: azure.storage.blob.aio.operations_async.ServiceOperations - :ivar container: Container operations - :vartype container: azure.storage.blob.aio.operations_async.ContainerOperations - :ivar directory: Directory operations - :vartype directory: azure.storage.blob.aio.operations_async.DirectoryOperations - :ivar blob: Blob operations - :vartype blob: azure.storage.blob.aio.operations_async.BlobOperations - :ivar page_blob: PageBlob operations - :vartype page_blob: azure.storage.blob.aio.operations_async.PageBlobOperations - :ivar append_blob: AppendBlob operations - :vartype append_blob: azure.storage.blob.aio.operations_async.AppendBlobOperations - :ivar block_blob: BlockBlob operations - :vartype block_blob: azure.storage.blob.aio.operations_async.BlockBlobOperations - - :param url: The URL of the service account, container, or blob that is the - targe of the desired operation. - :type url: str - """ - - def __init__( - self, url, **kwargs): - - base_url = '{url}' - self._config = AzureBlobStorageConfiguration(url, **kwargs) - self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '2019-02-02' - self._serialize = Serializer(client_models) - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.container = ContainerOperations( - self._client, self._config, self._serialize, self._deserialize) - self.directory = DirectoryOperations( - self._client, self._config, self._serialize, self._deserialize) - self.blob = BlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.page_blob = PageBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.append_blob = AppendBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.block_blob = BlockBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - - async def __aenter__(self): - await self._client.__aenter__() - return self - async def __aexit__(self, *exc_details): - await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/_configuration_async.py b/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/_configuration_async.py deleted file mode 100644 index 8f9f8b4..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/_configuration_async.py +++ /dev/null @@ -1,53 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -from ..version import VERSION - - -class AzureBlobStorageConfiguration(Configuration): - """Configuration for AzureBlobStorage - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, container, or blob that is the - targe of the desired operation. - :type url: str - :ivar version: Specifies the version of the operation to use for this - request. - :type version: str - """ - - def __init__(self, url, **kwargs): - - if url is None: - raise ValueError("Parameter 'url' must not be None.") - - super(AzureBlobStorageConfiguration, self).__init__(**kwargs) - self._configure(**kwargs) - - self.user_agent_policy.add_user_agent('azsdk-python-azureblobstorage/{}'.format(VERSION)) - self.generate_client_request_id = True - self.accept_language = None - - self.url = url - self.version = "2019-02-02" - - def _configure(self, **kwargs): - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/operations_async/__init__.py b/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/operations_async/__init__.py deleted file mode 100644 index dec0519..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/operations_async/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations_async import ServiceOperations -from ._container_operations_async import ContainerOperations -from ._directory_operations_async import DirectoryOperations -from ._blob_operations_async import BlobOperations -from ._page_blob_operations_async import PageBlobOperations -from ._append_blob_operations_async import AppendBlobOperations -from ._block_blob_operations_async import BlockBlobOperations - -__all__ = [ - 'ServiceOperations', - 'ContainerOperations', - 'DirectoryOperations', - 'BlobOperations', - 'PageBlobOperations', - 'AppendBlobOperations', - 'BlockBlobOperations', -] diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/operations_async/_append_blob_operations_async.py b/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/operations_async/_append_blob_operations_async.py deleted file mode 100644 index 40b494b..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/operations_async/_append_blob_operations_async.py +++ /dev/null @@ -1,539 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class AppendBlobOperations: - """AppendBlobOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "AppendBlob". - :ivar comp: . Constant value: "appendblock". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_blob_type = "AppendBlob" - self.comp = "appendblock" - - async def create(self, content_length, timeout=None, metadata=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Create Append Blob operation creates a new append blob. - - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{containerName}/{blob}'} - - async def append_block(self, body, content_length, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, request_id=None, lease_access_conditions=None, append_position_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Append Block operation commits a new block of data to the end of an - existing append blob. The Append Block operation is permitted only if - the blob was created with x-ms-blob-type set to AppendBlob. Append - Block is supported only on version 2015-02-21 version or later. - - :param body: Initial data - :type body: Generator - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param append_position_access_conditions: Additional parameters for - the operation - :type append_position_access_conditions: - ~azure.storage.blob.models.AppendPositionAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - max_size = None - if append_position_access_conditions is not None: - max_size = append_position_access_conditions.max_size - append_position = None - if append_position_access_conditions is not None: - append_position = append_position_access_conditions.append_position - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - # Construct URL - url = self.append_block.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("self.comp", self.comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if max_size is not None: - header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", max_size, 'long') - if append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-append-offset': self._deserialize('str', response.headers.get('x-ms-blob-append-offset')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - append_block.metadata = {'url': '/{containerName}/{blob}'} - - async def append_block_from_url(self, source_url, content_length, source_range=None, source_content_md5=None, source_contentcrc64=None, timeout=None, transactional_content_md5=None, request_id=None, cpk_info=None, lease_access_conditions=None, append_position_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs): - """The Append Block operation commits a new block of data to the end of an - existing append blob where the contents are read from a source url. The - Append Block operation is permitted only if the blob was created with - x-ms-blob-type set to AppendBlob. Append Block is supported only on - version 2015-02-21 version or later. - - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param content_length: The length of the request. - :type content_length: long - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_md5: Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range - of bytes that must be read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param append_position_access_conditions: Additional parameters for - the operation - :type append_position_access_conditions: - ~azure.storage.blob.models.AppendPositionAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - max_size = None - if append_position_access_conditions is not None: - max_size = append_position_access_conditions.max_size - append_position = None - if append_position_access_conditions is not None: - append_position = append_position_access_conditions.append_position - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - # Construct URL - url = self.append_block_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("self.comp", self.comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if max_size is not None: - header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", max_size, 'long') - if append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-append-offset': self._deserialize('str', response.headers.get('x-ms-blob-append-offset')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - append_block_from_url.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/operations_async/_blob_operations_async.py b/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/operations_async/_blob_operations_async.py deleted file mode 100644 index bb66207..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/operations_async/_blob_operations_async.py +++ /dev/null @@ -1,2418 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class BlobOperations: - """BlobOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_requires_sync: . Constant value: "true". - :ivar x_ms_copy_action: . Constant value: "abort". - :ivar restype: . Constant value: "account". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_requires_sync = "true" - self.x_ms_copy_action = "abort" - self.restype = "account" - - async def download(self, snapshot=None, timeout=None, range=None, range_get_content_md5=None, range_get_content_crc64=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Download operation reads or downloads a blob from the system, - including its metadata and properties. You can also call Download to - read a snapshot. - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param range_get_content_md5: When set to true and specified together - with the Range, the service returns the MD5 hash for the range, as - long as the range is less than or equal to 4 MB in size. - :type range_get_content_md5: bool - :param range_get_content_crc64: When set to true and specified - together with the Range, the service returns the CRC64 hash for the - range, as long as the range is less than or equal to 4 MB in size. - :type range_get_content_crc64: bool - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: object or the result of cls(response) - :rtype: Generator - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - # Construct URL - url = self.download.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') - if range_get_content_crc64 is not None: - header_parameters['x-ms-range-get-content-crc64'] = self._serialize.header("range_get_content_crc64", range_get_content_crc64, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - await response.load_body() - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - if response.status_code == 206: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - download.metadata = {'url': '/{containerName}/{blob}'} - - async def get_properties(self, snapshot=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Get Properties operation returns all user-defined metadata, - standard HTTP properties, and system properties for the blob. It does - not return the content of the blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-creation-time': self._deserialize('rfc-1123', response.headers.get('x-ms-creation-time')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-incremental-copy': self._deserialize('bool', response.headers.get('x-ms-incremental-copy')), - 'x-ms-copy-destination-snapshot': self._deserialize('str', response.headers.get('x-ms-copy-destination-snapshot')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-access-tier': self._deserialize('str', response.headers.get('x-ms-access-tier')), - 'x-ms-access-tier-inferred': self._deserialize('bool', response.headers.get('x-ms-access-tier-inferred')), - 'x-ms-archive-status': self._deserialize('str', response.headers.get('x-ms-archive-status')), - 'x-ms-access-tier-change-time': self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{containerName}/{blob}'} - - async def delete(self, snapshot=None, timeout=None, delete_snapshots=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """If the storage account's soft delete feature is disabled then, when a - blob is deleted, it is permanently removed from the storage account. If - the storage account's soft delete feature is enabled, then, when a blob - is deleted, it is marked for deletion and becomes inaccessible - immediately. However, the blob service retains the blob or snapshot for - the number of days specified by the DeleteRetentionPolicy section of - [Storage service properties] (Set-Blob-Service-Properties.md). After - the specified number of days has passed, the blob's data is permanently - removed from the storage account. Note that you continue to be charged - for the soft-deleted blob's storage until it is permanently removed. - Use the List Blobs API and specify the "include=deleted" query - parameter to discover which blobs and snapshots have been soft deleted. - You can then use the Undelete Blob API to restore a soft-deleted blob. - All other operations on a soft-deleted blob or snapshot causes the - service to return an HTTP status code of 404 (ResourceNotFound). - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param delete_snapshots: Required if the blob has associated - snapshots. Specify one of the following two options: include: Delete - the base blob and all of its snapshots. only: Delete only the blob's - snapshots and not the blob itself. Possible values include: 'include', - 'only' - :type delete_snapshots: str or - ~azure.storage.blob.models.DeleteSnapshotsOptionType - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{containerName}/{blob}'} - - async def set_access_control(self, timeout=None, owner=None, group=None, posix_permissions=None, posix_acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Set the owner, group, permissions, or access control list for a blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_acl: Sets POSIX access control rights on files and - directories. The value is a comma-separated list of access control - entries. Each access control entry (ACE) consists of a scope, a type, - a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type posix_acl: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "setAccessControl" - - # Construct URL - url = self.set_access_control.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - } - return cls(response, None, response_headers) - set_access_control.metadata = {'url': '/{filesystem}/{path}'} - - async def get_access_control(self, timeout=None, upn=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Get the owner, group, permissions, or access control list for a blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param upn: Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the identity values returned in - the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. - :type upn: bool - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "getAccessControl" - - # Construct URL - url = self.get_access_control.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')), - 'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')), - 'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')), - 'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - } - return cls(response, None, response_headers) - get_access_control.metadata = {'url': '/{filesystem}/{path}'} - - async def rename(self, rename_source, timeout=None, path_rename_mode=None, directory_properties=None, posix_permissions=None, posix_umask=None, source_lease_id=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs): - """Rename a blob/file. By default, the destination is overwritten and if - the destination already exists and has a lease the lease is broken. - This operation supports conditional HTTP requests. For more - information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - To fail if the destination already exists, use a conditional request - with If-None-Match: "*". - - :param rename_source: The file or directory to be renamed. The value - must have the following format: "/{filesysystem}/{path}". If - "x-ms-properties" is specified, the properties will overwrite the - existing properties; otherwise, the existing properties will be - preserved. - :type rename_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param path_rename_mode: Determines the behavior of the rename - operation. Possible values include: 'legacy', 'posix' - :type path_rename_mode: str or - ~azure.storage.blob.models.PathRenameMode - :param directory_properties: Optional. User-defined properties to be - stored with the file or directory, in the format of a comma-separated - list of name and value pairs "n1=v1, n2=v2, ...", where each value is - base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled - for the account. This umask restricts permission settings for file and - directory, and will only be applied when default Acl does not exist in - parent directory. If the umask bit has set, it means that the - corresponding permission will be disabled. Otherwise the corresponding - permission will be determined by the permission. A 4-digit octal - notation (e.g. 0022) is supported here. If no umask was specified, a - default umask - 0027 will be used. - :type posix_umask: str - :param source_lease_id: A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :type source_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param directory_http_headers: Additional parameters for the operation - :type directory_http_headers: - ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - cache_control = None - if directory_http_headers is not None: - cache_control = directory_http_headers.cache_control - content_type = None - if directory_http_headers is not None: - content_type = directory_http_headers.content_type - content_encoding = None - if directory_http_headers is not None: - content_encoding = directory_http_headers.content_encoding - content_language = None - if directory_http_headers is not None: - content_language = directory_http_headers.content_language - content_disposition = None - if directory_http_headers is not None: - content_disposition = directory_http_headers.content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - # Construct URL - url = self.rename.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if path_rename_mode is not None: - query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'PathRenameMode') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') - if content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') - if content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') - if content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') - if content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - } - return cls(response, None, response_headers) - rename.metadata = {'url': '/{filesystem}/{path}'} - - async def undelete(self, timeout=None, request_id=None, *, cls=None, **kwargs): - """Undelete a blob that was previously soft deleted. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "undelete" - - # Construct URL - url = self.undelete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - undelete.metadata = {'url': '/{containerName}/{blob}'} - - async def set_http_headers(self, timeout=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Set HTTP Headers operation sets system properties on the blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "properties" - - # Construct URL - url = self.set_http_headers.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_http_headers.metadata = {'url': '/{containerName}/{blob}'} - - async def set_metadata(self, timeout=None, metadata=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Set Blob Metadata operation sets user-defined metadata for the - specified blob as one or more name-value pairs. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{containerName}/{blob}'} - - async def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or - negative one (-1) for a lease that never expires. A non-infinite lease - can be between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The Blob service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "lease" - action = "acquire" - - # Construct URL - url = self.acquire_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - acquire_lease.metadata = {'url': '/{containerName}/{blob}'} - - async def release_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "lease" - action = "release" - - # Construct URL - url = self.release_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - release_lease.metadata = {'url': '/{containerName}/{blob}'} - - async def renew_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "lease" - action = "renew" - - # Construct URL - url = self.renew_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - renew_lease.metadata = {'url': '/{containerName}/{blob}'} - - async def change_lease(self, lease_id, proposed_lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The Blob service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "lease" - action = "change" - - # Construct URL - url = self.change_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - change_lease.metadata = {'url': '/{containerName}/{blob}'} - - async def break_lease(self, timeout=None, break_period=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param break_period: For a break operation, proposed duration the - lease should continue before it is broken, in seconds, between 0 and - 60. This break period is only used if it is shorter than the time - remaining on the lease. If longer, the time remaining on the lease is - used. A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break period. - If this header does not appear with a break operation, a - fixed-duration lease breaks after the remaining lease period elapses, - and an infinite lease breaks immediately. - :type break_period: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "lease" - action = "break" - - # Construct URL - url = self.break_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - break_lease.metadata = {'url': '/{containerName}/{blob}'} - - async def create_snapshot(self, timeout=None, metadata=None, request_id=None, cpk_info=None, modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs): - """The Create Snapshot operation creates a read-only snapshot of a blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "snapshot" - - # Construct URL - url = self.create_snapshot.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-snapshot': self._deserialize('str', response.headers.get('x-ms-snapshot')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create_snapshot.metadata = {'url': '/{containerName}/{blob}'} - - async def start_copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, rehydrate_priority=None, request_id=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs): - """The Start Copy From URL operation copies a blob or an internet resource - to a new blob. - - :param copy_source: Specifies the name of the source page blob - snapshot. This value is a URL of up to 2 KB in length that specifies a - page blob snapshot. The value should be URL-encoded as it would appear - in a request URI. The source blob must either be public or must be - authenticated via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param rehydrate_priority: Optional: Indicates the priority with which - to rehydrate an archived blob. Possible values include: 'High', - 'Standard' - :type rehydrate_priority: str or - ~azure.storage.blob.models.RehydratePriority - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.start_copy_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} - - async def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, request_id=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs): - """The Copy From URL operation copies a blob or an internet resource to a - new blob. It will not return a response until the copy is complete. - - :param copy_source: Specifies the name of the source page blob - snapshot. This value is a URL of up to 2 KB in length that specifies a - page blob snapshot. The value should be URL-encoded as it would appear - in a request URI. The source blob must either be public or must be - authenticated via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.copy_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-requires-sync'] = self._serialize.header("self.x_ms_requires_sync", self.x_ms_requires_sync, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-status': self._deserialize(models.SyncCopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - copy_from_url.metadata = {'url': '/{containerName}/{blob}'} - - async def abort_copy_from_url(self, copy_id, timeout=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs): - """The Abort Copy From URL operation aborts a pending Copy From URL - operation, and leaves a destination blob with zero length and full - metadata. - - :param copy_id: The copy identifier provided in the x-ms-copy-id - header of the original Copy Blob operation. - :type copy_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "copy" - - # Construct URL - url = self.abort_copy_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-copy-action'] = self._serialize.header("self.x_ms_copy_action", self.x_ms_copy_action, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - abort_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} - - async def set_tier(self, tier, timeout=None, rehydrate_priority=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs): - """The Set Tier operation sets the tier on a blob. The operation is - allowed on a page blob in a premium storage account and on a block blob - in a blob storage account (locally redundant storage only). A premium - page blob's tier determines the allowed size, IOPS, and bandwidth of - the blob. A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param tier: Indicates the tier to be set on the blob. Possible values - include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', 'P40', 'P50', 'P60', - 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierRequired - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param rehydrate_priority: Optional: Indicates the priority with which - to rehydrate an archived blob. Possible values include: 'High', - 'Standard' - :type rehydrate_priority: str or - ~azure.storage.blob.models.RehydratePriority - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "tier" - - # Construct URL - url = self.set_tier.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_tier.metadata = {'url': '/{containerName}/{blob}'} - - async def get_account_info(self, *, cls=None, **kwargs): - """Returns the sku name and account kind . - - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "properties" - - # Construct URL - url = self.get_account_info.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')), - 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_account_info.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/operations_async/_block_blob_operations_async.py b/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/operations_async/_block_blob_operations_async.py deleted file mode 100644 index 95a850b..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/operations_async/_block_blob_operations_async.py +++ /dev/null @@ -1,765 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class BlockBlobOperations: - """BlockBlobOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "BlockBlob". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_blob_type = "BlockBlob" - - async def upload(self, body, content_length, timeout=None, metadata=None, tier=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Upload Block Blob operation updates the content of an existing - block blob. Updating an existing block blob overwrites any existing - metadata on the blob. Partial updates are not supported with Put Blob; - the content of the existing blob is overwritten with the content of the - new blob. To perform a partial update of the content of a block blob, - use the Put Block List operation. - - :param body: Initial data - :type body: Generator - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - # Construct URL - url = self.upload.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload.metadata = {'url': '/{containerName}/{blob}'} - - async def stage_block(self, block_id, content_length, body, transactional_content_md5=None, transactional_content_crc64=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, *, cls=None, **kwargs): - """The Stage Block operation creates a new block to be committed as part - of a blob. - - :param block_id: A valid Base64 string value that identifies the - block. Prior to encoding, the string must be less than or equal to 64 - bytes in size. For a given blob, the length of the value specified for - the blockid parameter must be the same size for each block. - :type block_id: str - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data - :type body: Generator - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - - comp = "block" - - # Construct URL - url = self.stage_block.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - stage_block.metadata = {'url': '/{containerName}/{blob}'} - - async def stage_block_from_url(self, block_id, content_length, source_url, source_range=None, source_content_md5=None, source_contentcrc64=None, timeout=None, request_id=None, cpk_info=None, lease_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs): - """The Stage Block operation creates a new block to be committed as part - of a blob where the contents are read from a URL. - - :param block_id: A valid Base64 string value that identifies the - block. Prior to encoding, the string must be less than or equal to 64 - bytes in size. For a given blob, the length of the value specified for - the blockid parameter must be the same size for each block. - :type block_id: str - :param content_length: The length of the request. - :type content_length: long - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_md5: Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range - of bytes that must be read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - comp = "block" - - # Construct URL - url = self.stage_block_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - stage_block_from_url.metadata = {'url': '/{containerName}/{blob}'} - - async def commit_block_list(self, blocks, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, metadata=None, tier=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Commit Block List operation writes a blob by specifying the list of - block IDs that make up the blob. In order to be written as part of a - blob, a block must have been successfully written to the server in a - prior Put Block operation. You can call Put Block List to update a blob - by uploading only those blocks that have changed, then committing the - new and existing blocks together. You can do this by specifying whether - to commit a block from the committed block list or from the uncommitted - block list, or to commit the most recently uploaded version of the - block, whichever list it may belong to. - - :param blocks: - :type blocks: ~azure.storage.blob.models.BlockLookupList - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "blocklist" - - # Construct URL - url = self.commit_block_list.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct body - body_content = self._serialize.body(blocks, 'BlockLookupList') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - commit_block_list.metadata = {'url': '/{containerName}/{blob}'} - - async def get_block_list(self, list_type="committed", snapshot=None, timeout=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs): - """The Get Block List operation retrieves the list of blocks that have - been uploaded as part of a block blob. - - :param list_type: Specifies whether to return the list of committed - blocks, the list of uncommitted blocks, or both lists together. - Possible values include: 'committed', 'uncommitted', 'all' - :type list_type: str or ~azure.storage.blob.models.BlockListType - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: BlockList or the result of cls(response) - :rtype: ~azure.storage.blob.models.BlockList - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "blocklist" - - # Construct URL - url = self.get_block_list.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - query_parameters['blocklisttype'] = self._serialize.query("list_type", list_type, 'BlockListType') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('BlockList', response) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_block_list.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/operations_async/_container_operations_async.py b/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/operations_async/_container_operations_async.py deleted file mode 100644 index 4e3e6a0..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/operations_async/_container_operations_async.py +++ /dev/null @@ -1,1310 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class ContainerOperations: - """ContainerOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - - async def create(self, timeout=None, metadata=None, access=None, request_id=None, *, cls=None, **kwargs): - """creates a new container under the specified account. If the container - with the same name already exists, the operation fails. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param access: Specifies whether data in the container may be accessed - publicly and the level of access. Possible values include: - 'container', 'blob' - :type access: str or ~azure.storage.blob.models.PublicAccessType - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "container" - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if access is not None: - header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{containerName}'} - - async def get_properties(self, timeout=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs): - """returns all user-defined metadata and system properties for the - specified container. The data returned does not include the container's - list of blobs. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - restype = "container" - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-public-access': self._deserialize('str', response.headers.get('x-ms-blob-public-access')), - 'x-ms-has-immutability-policy': self._deserialize('bool', response.headers.get('x-ms-has-immutability-policy')), - 'x-ms-has-legal-hold': self._deserialize('bool', response.headers.get('x-ms-has-legal-hold')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{containerName}'} - - async def delete(self, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """operation marks the specified container for deletion. The container and - any blobs contained within it are later deleted during garbage - collection. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - restype = "container" - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{containerName}'} - - async def set_metadata(self, timeout=None, metadata=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """operation sets one or more user-defined name-value pairs for the - specified container. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - - restype = "container" - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{containerName}'} - - async def get_access_policy(self, timeout=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs): - """gets the permissions for the specified container. The permissions - indicate whether container data may be accessed publicly. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: list or the result of cls(response) - :rtype: list[~azure.storage.blob.models.SignedIdentifier] - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - restype = "container" - comp = "acl" - - # Construct URL - url = self.get_access_policy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[SignedIdentifier]', response) - header_dict = { - 'x-ms-blob-public-access': self._deserialize('str', response.headers.get('x-ms-blob-public-access')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_access_policy.metadata = {'url': '/{containerName}'} - - async def set_access_policy(self, container_acl=None, timeout=None, access=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """sets the permissions for the specified container. The permissions - indicate whether blobs in a container may be accessed publicly. - - :param container_acl: the acls for the container - :type container_acl: list[~azure.storage.blob.models.SignedIdentifier] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param access: Specifies whether data in the container may be accessed - publicly and the level of access. Possible values include: - 'container', 'blob' - :type access: str or ~azure.storage.blob.models.PublicAccessType - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - restype = "container" - comp = "acl" - - # Construct URL - url = self.set_access_policy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - if access is not None: - header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct body - serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifiers', 'wrapped': True}} - if container_acl is not None: - body_content = self._serialize.body(container_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt) - else: - body_content = None - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_access_policy.metadata = {'url': '/{containerName}'} - - async def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or - negative one (-1) for a lease that never expires. A non-infinite lease - can be between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The Blob service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "acquire" - - # Construct URL - url = self.acquire_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - acquire_lease.metadata = {'url': '/{containerName}'} - - async def release_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "release" - - # Construct URL - url = self.release_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - release_lease.metadata = {'url': '/{containerName}'} - - async def renew_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "renew" - - # Construct URL - url = self.renew_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - renew_lease.metadata = {'url': '/{containerName}'} - - async def break_lease(self, timeout=None, break_period=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param break_period: For a break operation, proposed duration the - lease should continue before it is broken, in seconds, between 0 and - 60. This break period is only used if it is shorter than the time - remaining on the lease. If longer, the time remaining on the lease is - used. A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break period. - If this header does not appear with a break operation, a - fixed-duration lease breaks after the remaining lease period elapses, - and an infinite lease breaks immediately. - :type break_period: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "break" - - # Construct URL - url = self.break_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - break_lease.metadata = {'url': '/{containerName}'} - - async def change_lease(self, lease_id, proposed_lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The Blob service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "change" - - # Construct URL - url = self.change_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - change_lease.metadata = {'url': '/{containerName}'} - - async def list_blob_flat_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, *, cls=None, **kwargs): - """[Update] The List Blobs operation returns a list of the blobs under the - specified container. - - :param prefix: Filters the results to return only containers whose - name begins with the specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list - of containers to be returned with the next listing operation. The - operation returns the NextMarker value within the response body if the - listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value - for the marker parameter in a subsequent call to request the next page - of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to - return. If the request does not specify maxresults, or specifies a - value greater than 5000, the server will return up to 5000 items. Note - that if the listing operation crosses a partition boundary, then the - service will return a continuation token for retrieving the remainder - of the results. For this reason, it is possible that the service will - return fewer results than specified by maxresults, or than the default - of 5000. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets - to include in the response. - :type include: list[str or - ~azure.storage.blob.models.ListBlobsIncludeItem] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListBlobsFlatSegmentResponse or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "container" - comp = "list" - - # Construct URL - url = self.list_blob_flat_segment.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[ListBlobsIncludeItem]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListBlobsFlatSegmentResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_blob_flat_segment.metadata = {'url': '/{containerName}'} - - async def list_blob_hierarchy_segment(self, delimiter, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, *, cls=None, **kwargs): - """[Update] The List Blobs operation returns a list of the blobs under the - specified container. - - :param delimiter: When the request includes this parameter, the - operation returns a BlobPrefix element in the response body that acts - as a placeholder for all blobs whose names begin with the same - substring up to the appearance of the delimiter character. The - delimiter may be a single character or a string. - :type delimiter: str - :param prefix: Filters the results to return only containers whose - name begins with the specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list - of containers to be returned with the next listing operation. The - operation returns the NextMarker value within the response body if the - listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value - for the marker parameter in a subsequent call to request the next page - of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to - return. If the request does not specify maxresults, or specifies a - value greater than 5000, the server will return up to 5000 items. Note - that if the listing operation crosses a partition boundary, then the - service will return a continuation token for retrieving the remainder - of the results. For this reason, it is possible that the service will - return fewer results than specified by maxresults, or than the default - of 5000. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets - to include in the response. - :type include: list[str or - ~azure.storage.blob.models.ListBlobsIncludeItem] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListBlobsHierarchySegmentResponse or the result of - cls(response) - :rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "container" - comp = "list" - - # Construct URL - url = self.list_blob_hierarchy_segment.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[ListBlobsIncludeItem]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_blob_hierarchy_segment.metadata = {'url': '/{containerName}'} - - async def get_account_info(self, *, cls=None, **kwargs): - """Returns the sku name and account kind . - - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "account" - comp = "properties" - - # Construct URL - url = self.get_account_info.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')), - 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_account_info.metadata = {'url': '/{containerName}'} diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/operations_async/_directory_operations_async.py b/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/operations_async/_directory_operations_async.py deleted file mode 100644 index 144bdff..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/operations_async/_directory_operations_async.py +++ /dev/null @@ -1,740 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class DirectoryOperations: - """DirectoryOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar resource: . Constant value: "directory". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.resource = "directory" - - async def create(self, timeout=None, directory_properties=None, posix_permissions=None, posix_umask=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Create a directory. By default, the destination is overwritten and if - the destination already exists and has a lease the lease is broken. - This operation supports conditional HTTP requests. For more - information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - To fail if the destination already exists, use a conditional request - with If-None-Match: "*". - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param directory_properties: Optional. User-defined properties to be - stored with the file or directory, in the format of a comma-separated - list of name and value pairs "n1=v1, n2=v2, ...", where each value is - base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled - for the account. This umask restricts permission settings for file and - directory, and will only be applied when default Acl does not exist in - parent directory. If the umask bit has set, it means that the - corresponding permission will be disabled. Otherwise the corresponding - permission will be determined by the permission. A 4-digit octal - notation (e.g. 0022) is supported here. If no umask was specified, a - default umask - 0027 will be used. - :type posix_umask: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param directory_http_headers: Additional parameters for the operation - :type directory_http_headers: - ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - cache_control = None - if directory_http_headers is not None: - cache_control = directory_http_headers.cache_control - content_type = None - if directory_http_headers is not None: - content_type = directory_http_headers.content_type - content_encoding = None - if directory_http_headers is not None: - content_encoding = directory_http_headers.content_encoding - content_language = None - if directory_http_headers is not None: - content_language = directory_http_headers.content_language - content_disposition = None - if directory_http_headers is not None: - content_disposition = directory_http_headers.content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['resource'] = self._serialize.query("self.resource", self.resource, 'str') - - # Construct headers - header_parameters = {} - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') - if content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') - if content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') - if content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') - if content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{filesystem}/{path}'} - - async def rename(self, rename_source, timeout=None, marker=None, path_rename_mode=None, directory_properties=None, posix_permissions=None, posix_umask=None, source_lease_id=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs): - """Rename a directory. By default, the destination is overwritten and if - the destination already exists and has a lease the lease is broken. - This operation supports conditional HTTP requests. For more - information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - To fail if the destination already exists, use a conditional request - with If-None-Match: "*". - - :param rename_source: The file or directory to be renamed. The value - must have the following format: "/{filesysystem}/{path}". If - "x-ms-properties" is specified, the properties will overwrite the - existing properties; otherwise, the existing properties will be - preserved. - :type rename_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param marker: When renaming a directory, the number of paths that are - renamed with each invocation is limited. If the number of paths to be - renamed exceeds this limit, a continuation token is returned in this - response header. When a continuation token is returned in the - response, it must be specified in a subsequent invocation of the - rename operation to continue renaming the directory. - :type marker: str - :param path_rename_mode: Determines the behavior of the rename - operation. Possible values include: 'legacy', 'posix' - :type path_rename_mode: str or - ~azure.storage.blob.models.PathRenameMode - :param directory_properties: Optional. User-defined properties to be - stored with the file or directory, in the format of a comma-separated - list of name and value pairs "n1=v1, n2=v2, ...", where each value is - base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled - for the account. This umask restricts permission settings for file and - directory, and will only be applied when default Acl does not exist in - parent directory. If the umask bit has set, it means that the - corresponding permission will be disabled. Otherwise the corresponding - permission will be determined by the permission. A 4-digit octal - notation (e.g. 0022) is supported here. If no umask was specified, a - default umask - 0027 will be used. - :type posix_umask: str - :param source_lease_id: A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :type source_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param directory_http_headers: Additional parameters for the operation - :type directory_http_headers: - ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - cache_control = None - if directory_http_headers is not None: - cache_control = directory_http_headers.cache_control - content_type = None - if directory_http_headers is not None: - content_type = directory_http_headers.content_type - content_encoding = None - if directory_http_headers is not None: - content_encoding = directory_http_headers.content_encoding - content_language = None - if directory_http_headers is not None: - content_language = directory_http_headers.content_language - content_disposition = None - if directory_http_headers is not None: - content_disposition = directory_http_headers.content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - # Construct URL - url = self.rename.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') - if path_rename_mode is not None: - query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'PathRenameMode') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') - if content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') - if content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') - if content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') - if content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - } - return cls(response, None, response_headers) - rename.metadata = {'url': '/{filesystem}/{path}'} - - async def delete(self, recursive_directory_delete, timeout=None, marker=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Deletes the directory. - - :param recursive_directory_delete: If "true", all paths beneath the - directory will be deleted. If "false" and the directory is non-empty, - an error occurs. - :type recursive_directory_delete: bool - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param marker: When renaming a directory, the number of paths that are - renamed with each invocation is limited. If the number of paths to be - renamed exceeds this limit, a continuation token is returned in this - response header. When a continuation token is returned in the - response, it must be specified in a subsequent invocation of the - rename operation to continue renaming the directory. - :type marker: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['recursive'] = self._serialize.query("recursive_directory_delete", recursive_directory_delete, 'bool') - if marker is not None: - query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{filesystem}/{path}'} - - async def set_access_control(self, timeout=None, owner=None, group=None, posix_permissions=None, posix_acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Set the owner, group, permissions, or access control list for a - directory. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_acl: Sets POSIX access control rights on files and - directories. The value is a comma-separated list of access control - entries. Each access control entry (ACE) consists of a scope, a type, - a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type posix_acl: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "setAccessControl" - - # Construct URL - url = self.set_access_control.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - } - return cls(response, None, response_headers) - set_access_control.metadata = {'url': '/{filesystem}/{path}'} - - async def get_access_control(self, timeout=None, upn=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Get the owner, group, permissions, or access control list for a - directory. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param upn: Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the identity values returned in - the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. - :type upn: bool - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "getAccessControl" - - # Construct URL - url = self.get_access_control.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')), - 'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')), - 'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')), - 'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - } - return cls(response, None, response_headers) - get_access_control.metadata = {'url': '/{filesystem}/{path}'} diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/operations_async/_page_blob_operations_async.py b/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/operations_async/_page_blob_operations_async.py deleted file mode 100644 index 2563c77..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/aio/operations_async/_page_blob_operations_async.py +++ /dev/null @@ -1,1302 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class PageBlobOperations: - """PageBlobOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "PageBlob". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_blob_type = "PageBlob" - - async def create(self, content_length, blob_content_length, timeout=None, tier=None, metadata=None, blob_sequence_number=0, request_id=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Create operation creates a new page blob. - - :param content_length: The length of the request. - :type content_length: long - :param blob_content_length: This header specifies the maximum size for - the page blob, up to 1 TB. The page blob size must be aligned to a - 512-byte boundary. - :type blob_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param tier: Optional. Indicates the tier to be set on the page blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80' - :type tier: str or - ~azure.storage.blob.models.PremiumPageBlobAccessTier - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param blob_sequence_number: Set for page blobs only. The sequence - number is a user-controlled value that you can use to track requests. - The value of the sequence number must be between 0 and 2^63 - 1. - :type blob_sequence_number: long - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') - if blob_sequence_number is not None: - header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{containerName}/{blob}'} - - async def upload_pages(self, body, content_length, transactional_content_md5=None, transactional_content_crc64=None, timeout=None, range=None, request_id=None, lease_access_conditions=None, cpk_info=None, sequence_number_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Upload Pages operation writes a range of pages to a page blob. - - :param body: Initial data - :type body: Generator - :param content_length: The length of the request. - :type content_length: long - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param sequence_number_access_conditions: Additional parameters for - the operation - :type sequence_number_access_conditions: - ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_sequence_number_less_than_or_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - if_sequence_number_less_than = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - if_sequence_number_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "page" - page_write = "update" - - # Construct URL - url = self.upload_pages.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long') - if if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long') - if if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload_pages.metadata = {'url': '/{containerName}/{blob}'} - - async def clear_pages(self, content_length, timeout=None, range=None, request_id=None, lease_access_conditions=None, cpk_info=None, sequence_number_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Clear Pages operation clears a set of pages from a page blob. - - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param sequence_number_access_conditions: Additional parameters for - the operation - :type sequence_number_access_conditions: - ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_sequence_number_less_than_or_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - if_sequence_number_less_than = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - if_sequence_number_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "page" - page_write = "clear" - - # Construct URL - url = self.clear_pages.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long') - if if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long') - if if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - clear_pages.metadata = {'url': '/{containerName}/{blob}'} - - async def upload_pages_from_url(self, source_url, source_range, content_length, range, source_content_md5=None, source_contentcrc64=None, timeout=None, request_id=None, cpk_info=None, lease_access_conditions=None, sequence_number_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs): - """The Upload Pages operation writes a range of pages to a page blob where - the contents are read from a URL. - - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param source_range: Bytes of source data in the specified range. The - length of this range should match the ContentLength header and - x-ms-range/Range destination range header. - :type source_range: str - :param content_length: The length of the request. - :type content_length: long - :param range: The range of bytes to which the source range would be - written. The range should be 512 aligned and range-end is required. - :type range: str - :param source_content_md5: Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range - of bytes that must be read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param sequence_number_access_conditions: Additional parameters for - the operation - :type sequence_number_access_conditions: - ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_sequence_number_less_than_or_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - if_sequence_number_less_than = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - if_sequence_number_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - comp = "page" - page_write = "update" - - # Construct URL - url = self.upload_pages_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long') - if if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long') - if if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload_pages_from_url.metadata = {'url': '/{containerName}/{blob}'} - - async def get_page_ranges(self, snapshot=None, timeout=None, range=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Get Page Ranges operation returns the list of valid page ranges for - a page blob or snapshot of a page blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: PageList or the result of cls(response) - :rtype: ~azure.storage.blob.models.PageList - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "pagelist" - - # Construct URL - url = self.get_page_ranges.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PageList', response) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_page_ranges.metadata = {'url': '/{containerName}/{blob}'} - - async def get_page_ranges_diff(self, snapshot=None, timeout=None, prevsnapshot=None, range=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Get Page Ranges Diff operation returns the list of valid page - ranges for a page blob that were changed between target blob and - previous snapshot. - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param prevsnapshot: Optional in version 2015-07-08 and newer. The - prevsnapshot parameter is a DateTime value that specifies that the - response will contain only pages that were changed between target blob - and previous snapshot. Changed pages include both updated and cleared - pages. The target blob may be a snapshot, as long as the snapshot - specified by prevsnapshot is the older of the two. Note that - incremental snapshots are currently supported only for blobs created - on or after January 1, 2016. - :type prevsnapshot: str - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: PageList or the result of cls(response) - :rtype: ~azure.storage.blob.models.PageList - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "pagelist" - - # Construct URL - url = self.get_page_ranges_diff.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if prevsnapshot is not None: - query_parameters['prevsnapshot'] = self._serialize.query("prevsnapshot", prevsnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PageList', response) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_page_ranges_diff.metadata = {'url': '/{containerName}/{blob}'} - - async def resize(self, blob_content_length, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Resize the Blob. - - :param blob_content_length: This header specifies the maximum size for - the page blob, up to 1 TB. The page blob size must be aligned to a - 512-byte boundary. - :type blob_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "properties" - - # Construct URL - url = self.resize.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - resize.metadata = {'url': '/{containerName}/{blob}'} - - async def update_sequence_number(self, sequence_number_action, timeout=None, blob_sequence_number=0, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Update the sequence number of the blob. - - :param sequence_number_action: Required if the - x-ms-blob-sequence-number header is set for the request. This property - applies to page blobs only. This property indicates how the service - should modify the blob's sequence number. Possible values include: - 'max', 'update', 'increment' - :type sequence_number_action: str or - ~azure.storage.blob.models.SequenceNumberActionType - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param blob_sequence_number: Set for page blobs only. The sequence - number is a user-controlled value that you can use to track requests. - The value of the sequence number must be between 0 and 2^63 - 1. - :type blob_sequence_number: long - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "properties" - - # Construct URL - url = self.update_sequence_number.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-sequence-number-action'] = self._serialize.header("sequence_number_action", sequence_number_action, 'SequenceNumberActionType') - if blob_sequence_number is not None: - header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - update_sequence_number.metadata = {'url': '/{containerName}/{blob}'} - - async def copy_incremental(self, copy_source, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Copy Incremental operation copies a snapshot of the source page - blob to a destination page blob. The snapshot is copied such that only - the differential changes between the previously copied snapshot are - transferred to the destination. The copied snapshots are complete - copies of the original snapshot and can be read or copied from as - usual. This API is supported since REST version 2016-05-31. - - :param copy_source: Specifies the name of the source page blob - snapshot. This value is a URL of up to 2 KB in length that specifies a - page blob snapshot. The value should be URL-encoded as it would appear - in a request URI. The source blob must either be public or must be - authenticated via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "incrementalcopy" - - # Construct URL - url = self.copy_incremental.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - copy_incremental.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/models/__init__.py b/azure/multiapi/storagev2/blob/v2019_02_02/_generated/models/__init__.py deleted file mode 100644 index 0fa6ad3..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/models/__init__.py +++ /dev/null @@ -1,186 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -try: - from ._models_py3 import AccessPolicy - from ._models_py3 import AppendPositionAccessConditions - from ._models_py3 import BlobFlatListSegment - from ._models_py3 import BlobHierarchyListSegment - from ._models_py3 import BlobHTTPHeaders - from ._models_py3 import BlobItem - from ._models_py3 import BlobMetadata - from ._models_py3 import BlobPrefix - from ._models_py3 import BlobProperties - from ._models_py3 import Block - from ._models_py3 import BlockList - from ._models_py3 import BlockLookupList - from ._models_py3 import ClearRange - from ._models_py3 import ContainerItem - from ._models_py3 import ContainerProperties - from ._models_py3 import CorsRule - from ._models_py3 import CpkInfo - from ._models_py3 import DataLakeStorageError, DataLakeStorageErrorException - from ._models_py3 import DataLakeStorageErrorError - from ._models_py3 import DirectoryHttpHeaders - from ._models_py3 import GeoReplication - from ._models_py3 import KeyInfo - from ._models_py3 import LeaseAccessConditions - from ._models_py3 import ListBlobsFlatSegmentResponse - from ._models_py3 import ListBlobsHierarchySegmentResponse - from ._models_py3 import ListContainersSegmentResponse - from ._models_py3 import Logging - from ._models_py3 import Metrics - from ._models_py3 import ModifiedAccessConditions - from ._models_py3 import PageList - from ._models_py3 import PageRange - from ._models_py3 import RetentionPolicy - from ._models_py3 import SequenceNumberAccessConditions - from ._models_py3 import SignedIdentifier - from ._models_py3 import SourceModifiedAccessConditions - from ._models_py3 import StaticWebsite - from ._models_py3 import StorageError, StorageErrorException - from ._models_py3 import StorageServiceProperties - from ._models_py3 import StorageServiceStats - from ._models_py3 import UserDelegationKey -except (SyntaxError, ImportError): - from ._models import AccessPolicy - from ._models import AppendPositionAccessConditions - from ._models import BlobFlatListSegment - from ._models import BlobHierarchyListSegment - from ._models import BlobHTTPHeaders - from ._models import BlobItem - from ._models import BlobMetadata - from ._models import BlobPrefix - from ._models import BlobProperties - from ._models import Block - from ._models import BlockList - from ._models import BlockLookupList - from ._models import ClearRange - from ._models import ContainerItem - from ._models import ContainerProperties - from ._models import CorsRule - from ._models import CpkInfo - from ._models import DataLakeStorageError, DataLakeStorageErrorException - from ._models import DataLakeStorageErrorError - from ._models import DirectoryHttpHeaders - from ._models import GeoReplication - from ._models import KeyInfo - from ._models import LeaseAccessConditions - from ._models import ListBlobsFlatSegmentResponse - from ._models import ListBlobsHierarchySegmentResponse - from ._models import ListContainersSegmentResponse - from ._models import Logging - from ._models import Metrics - from ._models import ModifiedAccessConditions - from ._models import PageList - from ._models import PageRange - from ._models import RetentionPolicy - from ._models import SequenceNumberAccessConditions - from ._models import SignedIdentifier - from ._models import SourceModifiedAccessConditions - from ._models import StaticWebsite - from ._models import StorageError, StorageErrorException - from ._models import StorageServiceProperties - from ._models import StorageServiceStats - from ._models import UserDelegationKey -from ._azure_blob_storage_enums import ( - AccessTier, - AccessTierOptional, - AccessTierRequired, - AccountKind, - ArchiveStatus, - BlobType, - BlockListType, - CopyStatusType, - DeleteSnapshotsOptionType, - EncryptionAlgorithmType, - GeoReplicationStatusType, - LeaseDurationType, - LeaseStateType, - LeaseStatusType, - ListBlobsIncludeItem, - ListContainersIncludeType, - PathRenameMode, - PremiumPageBlobAccessTier, - PublicAccessType, - RehydratePriority, - SequenceNumberActionType, - SkuName, - StorageErrorCode, - SyncCopyStatusType, -) - -__all__ = [ - 'AccessPolicy', - 'AppendPositionAccessConditions', - 'BlobFlatListSegment', - 'BlobHierarchyListSegment', - 'BlobHTTPHeaders', - 'BlobItem', - 'BlobMetadata', - 'BlobPrefix', - 'BlobProperties', - 'Block', - 'BlockList', - 'BlockLookupList', - 'ClearRange', - 'ContainerItem', - 'ContainerProperties', - 'CorsRule', - 'CpkInfo', - 'DataLakeStorageError', 'DataLakeStorageErrorException', - 'DataLakeStorageErrorError', - 'DirectoryHttpHeaders', - 'GeoReplication', - 'KeyInfo', - 'LeaseAccessConditions', - 'ListBlobsFlatSegmentResponse', - 'ListBlobsHierarchySegmentResponse', - 'ListContainersSegmentResponse', - 'Logging', - 'Metrics', - 'ModifiedAccessConditions', - 'PageList', - 'PageRange', - 'RetentionPolicy', - 'SequenceNumberAccessConditions', - 'SignedIdentifier', - 'SourceModifiedAccessConditions', - 'StaticWebsite', - 'StorageError', 'StorageErrorException', - 'StorageServiceProperties', - 'StorageServiceStats', - 'UserDelegationKey', - 'PublicAccessType', - 'CopyStatusType', - 'LeaseDurationType', - 'LeaseStateType', - 'LeaseStatusType', - 'AccessTier', - 'ArchiveStatus', - 'BlobType', - 'StorageErrorCode', - 'GeoReplicationStatusType', - 'AccessTierRequired', - 'AccessTierOptional', - 'PremiumPageBlobAccessTier', - 'RehydratePriority', - 'BlockListType', - 'DeleteSnapshotsOptionType', - 'EncryptionAlgorithmType', - 'ListBlobsIncludeItem', - 'ListContainersIncludeType', - 'PathRenameMode', - 'SequenceNumberActionType', - 'SkuName', - 'AccountKind', - 'SyncCopyStatusType', -] diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/models/_azure_blob_storage_enums.py b/azure/multiapi/storagev2/blob/v2019_02_02/_generated/models/_azure_blob_storage_enums.py deleted file mode 100644 index 966e2e9..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/models/_azure_blob_storage_enums.py +++ /dev/null @@ -1,322 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from enum import Enum - - -class PublicAccessType(str, Enum): - - container = "container" - blob = "blob" - - -class CopyStatusType(str, Enum): - - pending = "pending" - success = "success" - aborted = "aborted" - failed = "failed" - - -class LeaseDurationType(str, Enum): - - infinite = "infinite" - fixed = "fixed" - - -class LeaseStateType(str, Enum): - - available = "available" - leased = "leased" - expired = "expired" - breaking = "breaking" - broken = "broken" - - -class LeaseStatusType(str, Enum): - - locked = "locked" - unlocked = "unlocked" - - -class AccessTier(str, Enum): - - p4 = "P4" - p6 = "P6" - p10 = "P10" - p15 = "P15" - p20 = "P20" - p30 = "P30" - p40 = "P40" - p50 = "P50" - p60 = "P60" - p70 = "P70" - p80 = "P80" - hot = "Hot" - cool = "Cool" - archive = "Archive" - - -class ArchiveStatus(str, Enum): - - rehydrate_pending_to_hot = "rehydrate-pending-to-hot" - rehydrate_pending_to_cool = "rehydrate-pending-to-cool" - - -class BlobType(str, Enum): - - block_blob = "BlockBlob" - page_blob = "PageBlob" - append_blob = "AppendBlob" - - -class StorageErrorCode(str, Enum): - - account_already_exists = "AccountAlreadyExists" - account_being_created = "AccountBeingCreated" - account_is_disabled = "AccountIsDisabled" - authentication_failed = "AuthenticationFailed" - authorization_failure = "AuthorizationFailure" - condition_headers_not_supported = "ConditionHeadersNotSupported" - condition_not_met = "ConditionNotMet" - empty_metadata_key = "EmptyMetadataKey" - insufficient_account_permissions = "InsufficientAccountPermissions" - internal_error = "InternalError" - invalid_authentication_info = "InvalidAuthenticationInfo" - invalid_header_value = "InvalidHeaderValue" - invalid_http_verb = "InvalidHttpVerb" - invalid_input = "InvalidInput" - invalid_md5 = "InvalidMd5" - invalid_metadata = "InvalidMetadata" - invalid_query_parameter_value = "InvalidQueryParameterValue" - invalid_range = "InvalidRange" - invalid_resource_name = "InvalidResourceName" - invalid_uri = "InvalidUri" - invalid_xml_document = "InvalidXmlDocument" - invalid_xml_node_value = "InvalidXmlNodeValue" - md5_mismatch = "Md5Mismatch" - metadata_too_large = "MetadataTooLarge" - missing_content_length_header = "MissingContentLengthHeader" - missing_required_query_parameter = "MissingRequiredQueryParameter" - missing_required_header = "MissingRequiredHeader" - missing_required_xml_node = "MissingRequiredXmlNode" - multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" - operation_timed_out = "OperationTimedOut" - out_of_range_input = "OutOfRangeInput" - out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" - request_body_too_large = "RequestBodyTooLarge" - resource_type_mismatch = "ResourceTypeMismatch" - request_url_failed_to_parse = "RequestUrlFailedToParse" - resource_already_exists = "ResourceAlreadyExists" - resource_not_found = "ResourceNotFound" - server_busy = "ServerBusy" - unsupported_header = "UnsupportedHeader" - unsupported_xml_node = "UnsupportedXmlNode" - unsupported_query_parameter = "UnsupportedQueryParameter" - unsupported_http_verb = "UnsupportedHttpVerb" - append_position_condition_not_met = "AppendPositionConditionNotMet" - blob_already_exists = "BlobAlreadyExists" - blob_not_found = "BlobNotFound" - blob_overwritten = "BlobOverwritten" - blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" - block_count_exceeds_limit = "BlockCountExceedsLimit" - block_list_too_long = "BlockListTooLong" - cannot_change_to_lower_tier = "CannotChangeToLowerTier" - cannot_verify_copy_source = "CannotVerifyCopySource" - container_already_exists = "ContainerAlreadyExists" - container_being_deleted = "ContainerBeingDeleted" - container_disabled = "ContainerDisabled" - container_not_found = "ContainerNotFound" - content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" - copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" - copy_id_mismatch = "CopyIdMismatch" - feature_version_mismatch = "FeatureVersionMismatch" - incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" - incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" - incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" - infinite_lease_duration_required = "InfiniteLeaseDurationRequired" - invalid_blob_or_block = "InvalidBlobOrBlock" - invalid_blob_tier = "InvalidBlobTier" - invalid_blob_type = "InvalidBlobType" - invalid_block_id = "InvalidBlockId" - invalid_block_list = "InvalidBlockList" - invalid_operation = "InvalidOperation" - invalid_page_range = "InvalidPageRange" - invalid_source_blob_type = "InvalidSourceBlobType" - invalid_source_blob_url = "InvalidSourceBlobUrl" - invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" - lease_already_present = "LeaseAlreadyPresent" - lease_already_broken = "LeaseAlreadyBroken" - lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" - lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" - lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" - lease_id_missing = "LeaseIdMissing" - lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" - lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" - lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" - lease_lost = "LeaseLost" - lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" - lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" - lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" - max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" - no_pending_copy_operation = "NoPendingCopyOperation" - operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" - pending_copy_operation = "PendingCopyOperation" - previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" - previous_snapshot_not_found = "PreviousSnapshotNotFound" - previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" - sequence_number_condition_not_met = "SequenceNumberConditionNotMet" - sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" - snapshot_count_exceeded = "SnapshotCountExceeded" - snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" - snapshots_present = "SnapshotsPresent" - source_condition_not_met = "SourceConditionNotMet" - system_in_use = "SystemInUse" - target_condition_not_met = "TargetConditionNotMet" - unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" - blob_being_rehydrated = "BlobBeingRehydrated" - blob_archived = "BlobArchived" - blob_not_archived = "BlobNotArchived" - authorization_source_ip_mismatch = "AuthorizationSourceIPMismatch" - authorization_protocol_mismatch = "AuthorizationProtocolMismatch" - authorization_permission_mismatch = "AuthorizationPermissionMismatch" - authorization_service_mismatch = "AuthorizationServiceMismatch" - authorization_resource_type_mismatch = "AuthorizationResourceTypeMismatch" - - -class GeoReplicationStatusType(str, Enum): - - live = "live" - bootstrap = "bootstrap" - unavailable = "unavailable" - - -class AccessTierRequired(str, Enum): - - p4 = "P4" - p6 = "P6" - p10 = "P10" - p15 = "P15" - p20 = "P20" - p30 = "P30" - p40 = "P40" - p50 = "P50" - p60 = "P60" - p70 = "P70" - p80 = "P80" - hot = "Hot" - cool = "Cool" - archive = "Archive" - - -class AccessTierOptional(str, Enum): - - p4 = "P4" - p6 = "P6" - p10 = "P10" - p15 = "P15" - p20 = "P20" - p30 = "P30" - p40 = "P40" - p50 = "P50" - p60 = "P60" - p70 = "P70" - p80 = "P80" - hot = "Hot" - cool = "Cool" - archive = "Archive" - - -class PremiumPageBlobAccessTier(str, Enum): - - p4 = "P4" - p6 = "P6" - p10 = "P10" - p15 = "P15" - p20 = "P20" - p30 = "P30" - p40 = "P40" - p50 = "P50" - p60 = "P60" - p70 = "P70" - p80 = "P80" - - -class RehydratePriority(str, Enum): - - high = "High" - standard = "Standard" - - -class BlockListType(str, Enum): - - committed = "committed" - uncommitted = "uncommitted" - all = "all" - - -class DeleteSnapshotsOptionType(str, Enum): - - include = "include" - only = "only" - - -class EncryptionAlgorithmType(str, Enum): - - aes256 = "AES256" - - -class ListBlobsIncludeItem(str, Enum): - - copy = "copy" - deleted = "deleted" - metadata = "metadata" - snapshots = "snapshots" - uncommittedblobs = "uncommittedblobs" - - -class ListContainersIncludeType(str, Enum): - - metadata = "metadata" - - -class PathRenameMode(str, Enum): - - legacy = "legacy" - posix = "posix" - - -class SequenceNumberActionType(str, Enum): - - max = "max" - update = "update" - increment = "increment" - - -class SkuName(str, Enum): - - standard_lrs = "Standard_LRS" - standard_grs = "Standard_GRS" - standard_ragrs = "Standard_RAGRS" - standard_zrs = "Standard_ZRS" - premium_lrs = "Premium_LRS" - - -class AccountKind(str, Enum): - - storage = "Storage" - blob_storage = "BlobStorage" - storage_v2 = "StorageV2" - - -class SyncCopyStatusType(str, Enum): - - success = "success" diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/models/_models.py b/azure/multiapi/storagev2/blob/v2019_02_02/_generated/models/_models.py deleted file mode 100644 index ea5d65d..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/models/_models.py +++ /dev/null @@ -1,1528 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model -from azure.core.exceptions import HttpResponseError - - -class AccessPolicy(Model): - """An Access policy. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. the date-time the policy is active - :type start: str - :param expiry: Required. the date-time the policy expires - :type expiry: str - :param permission: Required. the permissions for the acl policy - :type permission: str - """ - - _validation = { - 'start': {'required': True}, - 'expiry': {'required': True}, - 'permission': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, - 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, - 'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(AccessPolicy, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.expiry = kwargs.get('expiry', None) - self.permission = kwargs.get('permission', None) - - -class AppendPositionAccessConditions(Model): - """Additional parameters for a set of operations, such as: - AppendBlob_append_block, AppendBlob_append_block_from_url. - - :param max_size: Optional conditional header. The max length in bytes - permitted for the append blob. If the Append Block operation would cause - the blob to exceed that limit or if the blob size is already greater than - the value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition - Failed). - :type max_size: long - :param append_position: Optional conditional header, used only for the - Append Block operation. A number indicating the byte offset to compare. - Append Block will succeed only if the append position is equal to this - number. If it is not, the request will fail with the - AppendPositionConditionNotMet error (HTTP status code 412 - Precondition - Failed). - :type append_position: long - """ - - _attribute_map = { - 'max_size': {'key': '', 'type': 'long', 'xml': {'name': 'max_size'}}, - 'append_position': {'key': '', 'type': 'long', 'xml': {'name': 'append_position'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(AppendPositionAccessConditions, self).__init__(**kwargs) - self.max_size = kwargs.get('max_size', None) - self.append_position = kwargs.get('append_position', None) - - -class BlobFlatListSegment(Model): - """BlobFlatListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItem] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItem]', 'xml': {'name': 'BlobItems', 'itemsName': 'Blob'}}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__(self, **kwargs): - super(BlobFlatListSegment, self).__init__(**kwargs) - self.blob_items = kwargs.get('blob_items', None) - - -class BlobHierarchyListSegment(Model): - """BlobHierarchyListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_prefixes: - :type blob_prefixes: list[~azure.storage.blob.models.BlobPrefix] - :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItem] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]', 'xml': {'name': 'BlobPrefix', 'itemsName': 'BlobPrefix'}}, - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItem]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__(self, **kwargs): - super(BlobHierarchyListSegment, self).__init__(**kwargs) - self.blob_prefixes = kwargs.get('blob_prefixes', None) - self.blob_items = kwargs.get('blob_items', None) - - -class BlobHTTPHeaders(Model): - """Additional parameters for a set of operations. - - :param blob_cache_control: Optional. Sets the blob's cache control. If - specified, this property is stored with the blob and returned with a read - request. - :type blob_cache_control: str - :param blob_content_type: Optional. Sets the blob's content type. If - specified, this property is stored with the blob and returned with a read - request. - :type blob_content_type: str - :param blob_content_md5: Optional. An MD5 hash of the blob content. Note - that this hash is not validated, as the hashes for the individual blocks - were validated when each was uploaded. - :type blob_content_md5: bytearray - :param blob_content_encoding: Optional. Sets the blob's content encoding. - If specified, this property is stored with the blob and returned with a - read request. - :type blob_content_encoding: str - :param blob_content_language: Optional. Set the blob's content language. - If specified, this property is stored with the blob and returned with a - read request. - :type blob_content_language: str - :param blob_content_disposition: Optional. Sets the blob's - Content-Disposition header. - :type blob_content_disposition: str - """ - - _attribute_map = { - 'blob_cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'blob_cache_control'}}, - 'blob_content_type': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_type'}}, - 'blob_content_md5': {'key': '', 'type': 'bytearray', 'xml': {'name': 'blob_content_md5'}}, - 'blob_content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_encoding'}}, - 'blob_content_language': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_language'}}, - 'blob_content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_disposition'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(BlobHTTPHeaders, self).__init__(**kwargs) - self.blob_cache_control = kwargs.get('blob_cache_control', None) - self.blob_content_type = kwargs.get('blob_content_type', None) - self.blob_content_md5 = kwargs.get('blob_content_md5', None) - self.blob_content_encoding = kwargs.get('blob_content_encoding', None) - self.blob_content_language = kwargs.get('blob_content_language', None) - self.blob_content_disposition = kwargs.get('blob_content_disposition', None) - - -class BlobItem(Model): - """An Azure Storage blob. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param deleted: Required. - :type deleted: bool - :param snapshot: Required. - :type snapshot: str - :param properties: Required. - :type properties: ~azure.storage.blob.models.BlobProperties - :param metadata: - :type metadata: ~azure.storage.blob.models.BlobMetadata - """ - - _validation = { - 'name': {'required': True}, - 'deleted': {'required': True}, - 'snapshot': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}}, - 'snapshot': {'key': 'Snapshot', 'type': 'str', 'xml': {'name': 'Snapshot'}}, - 'properties': {'key': 'Properties', 'type': 'BlobProperties', 'xml': {'name': 'Properties'}}, - 'metadata': {'key': 'Metadata', 'type': 'BlobMetadata', 'xml': {'name': 'Metadata'}}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__(self, **kwargs): - super(BlobItem, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.deleted = kwargs.get('deleted', None) - self.snapshot = kwargs.get('snapshot', None) - self.properties = kwargs.get('properties', None) - self.metadata = kwargs.get('metadata', None) - - -class BlobMetadata(Model): - """BlobMetadata. - - :param additional_properties: Unmatched properties from the message are - deserialized this collection - :type additional_properties: dict[str, str] - :param encrypted: - :type encrypted: str - """ - - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{str}', 'xml': {'name': 'additional_properties'}}, - 'encrypted': {'key': 'Encrypted', 'type': 'str', 'xml': {'name': 'Encrypted', 'attr': True}}, - } - _xml_map = { - 'name': 'Metadata' - } - - def __init__(self, **kwargs): - super(BlobMetadata, self).__init__(**kwargs) - self.additional_properties = kwargs.get('additional_properties', None) - self.encrypted = kwargs.get('encrypted', None) - - -class BlobPrefix(Model): - """BlobPrefix. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(BlobPrefix, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - - -class BlobProperties(Model): - """Properties of a blob. - - All required parameters must be populated in order to send to Azure. - - :param creation_time: - :type creation_time: datetime - :param last_modified: Required. - :type last_modified: datetime - :param etag: Required. - :type etag: str - :param content_length: Size in bytes - :type content_length: long - :param content_type: - :type content_type: str - :param content_encoding: - :type content_encoding: str - :param content_language: - :type content_language: str - :param content_md5: - :type content_md5: bytearray - :param content_disposition: - :type content_disposition: str - :param cache_control: - :type cache_control: str - :param blob_sequence_number: - :type blob_sequence_number: long - :param blob_type: Possible values include: 'BlockBlob', 'PageBlob', - 'AppendBlob' - :type blob_type: str or ~azure.storage.blob.models.BlobType - :param lease_status: Possible values include: 'locked', 'unlocked' - :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType - :param lease_state: Possible values include: 'available', 'leased', - 'expired', 'breaking', 'broken' - :type lease_state: str or ~azure.storage.blob.models.LeaseStateType - :param lease_duration: Possible values include: 'infinite', 'fixed' - :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType - :param copy_id: - :type copy_id: str - :param copy_status: Possible values include: 'pending', 'success', - 'aborted', 'failed' - :type copy_status: str or ~azure.storage.blob.models.CopyStatusType - :param copy_source: - :type copy_source: str - :param copy_progress: - :type copy_progress: str - :param copy_completion_time: - :type copy_completion_time: datetime - :param copy_status_description: - :type copy_status_description: str - :param server_encrypted: - :type server_encrypted: bool - :param incremental_copy: - :type incremental_copy: bool - :param destination_snapshot: - :type destination_snapshot: str - :param deleted_time: - :type deleted_time: datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param access_tier: Possible values include: 'P4', 'P6', 'P10', 'P15', - 'P20', 'P30', 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type access_tier: str or ~azure.storage.blob.models.AccessTier - :param access_tier_inferred: - :type access_tier_inferred: bool - :param archive_status: Possible values include: - 'rehydrate-pending-to-hot', 'rehydrate-pending-to-cool' - :type archive_status: str or ~azure.storage.blob.models.ArchiveStatus - :param customer_provided_key_sha256: - :type customer_provided_key_sha256: str - :param access_tier_change_time: - :type access_tier_change_time: datetime - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123', 'xml': {'name': 'Creation-Time'}}, - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}}, - 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}}, - 'content_length': {'key': 'Content-Length', 'type': 'long', 'xml': {'name': 'Content-Length'}}, - 'content_type': {'key': 'Content-Type', 'type': 'str', 'xml': {'name': 'Content-Type'}}, - 'content_encoding': {'key': 'Content-Encoding', 'type': 'str', 'xml': {'name': 'Content-Encoding'}}, - 'content_language': {'key': 'Content-Language', 'type': 'str', 'xml': {'name': 'Content-Language'}}, - 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray', 'xml': {'name': 'Content-MD5'}}, - 'content_disposition': {'key': 'Content-Disposition', 'type': 'str', 'xml': {'name': 'Content-Disposition'}}, - 'cache_control': {'key': 'Cache-Control', 'type': 'str', 'xml': {'name': 'Cache-Control'}}, - 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long', 'xml': {'name': 'x-ms-blob-sequence-number'}}, - 'blob_type': {'key': 'BlobType', 'type': 'BlobType', 'xml': {'name': 'BlobType'}}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}}, - 'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}}, - 'copy_id': {'key': 'CopyId', 'type': 'str', 'xml': {'name': 'CopyId'}}, - 'copy_status': {'key': 'CopyStatus', 'type': 'CopyStatusType', 'xml': {'name': 'CopyStatus'}}, - 'copy_source': {'key': 'CopySource', 'type': 'str', 'xml': {'name': 'CopySource'}}, - 'copy_progress': {'key': 'CopyProgress', 'type': 'str', 'xml': {'name': 'CopyProgress'}}, - 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123', 'xml': {'name': 'CopyCompletionTime'}}, - 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str', 'xml': {'name': 'CopyStatusDescription'}}, - 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool', 'xml': {'name': 'ServerEncrypted'}}, - 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool', 'xml': {'name': 'IncrementalCopy'}}, - 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str', 'xml': {'name': 'DestinationSnapshot'}}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}}, - 'access_tier': {'key': 'AccessTier', 'type': 'str', 'xml': {'name': 'AccessTier'}}, - 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool', 'xml': {'name': 'AccessTierInferred'}}, - 'archive_status': {'key': 'ArchiveStatus', 'type': 'str', 'xml': {'name': 'ArchiveStatus'}}, - 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str', 'xml': {'name': 'CustomerProvidedKeySha256'}}, - 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123', 'xml': {'name': 'AccessTierChangeTime'}}, - } - _xml_map = { - 'name': 'Properties' - } - - def __init__(self, **kwargs): - super(BlobProperties, self).__init__(**kwargs) - self.creation_time = kwargs.get('creation_time', None) - self.last_modified = kwargs.get('last_modified', None) - self.etag = kwargs.get('etag', None) - self.content_length = kwargs.get('content_length', None) - self.content_type = kwargs.get('content_type', None) - self.content_encoding = kwargs.get('content_encoding', None) - self.content_language = kwargs.get('content_language', None) - self.content_md5 = kwargs.get('content_md5', None) - self.content_disposition = kwargs.get('content_disposition', None) - self.cache_control = kwargs.get('cache_control', None) - self.blob_sequence_number = kwargs.get('blob_sequence_number', None) - self.blob_type = kwargs.get('blob_type', None) - self.lease_status = kwargs.get('lease_status', None) - self.lease_state = kwargs.get('lease_state', None) - self.lease_duration = kwargs.get('lease_duration', None) - self.copy_id = kwargs.get('copy_id', None) - self.copy_status = kwargs.get('copy_status', None) - self.copy_source = kwargs.get('copy_source', None) - self.copy_progress = kwargs.get('copy_progress', None) - self.copy_completion_time = kwargs.get('copy_completion_time', None) - self.copy_status_description = kwargs.get('copy_status_description', None) - self.server_encrypted = kwargs.get('server_encrypted', None) - self.incremental_copy = kwargs.get('incremental_copy', None) - self.destination_snapshot = kwargs.get('destination_snapshot', None) - self.deleted_time = kwargs.get('deleted_time', None) - self.remaining_retention_days = kwargs.get('remaining_retention_days', None) - self.access_tier = kwargs.get('access_tier', None) - self.access_tier_inferred = kwargs.get('access_tier_inferred', None) - self.archive_status = kwargs.get('archive_status', None) - self.customer_provided_key_sha256 = kwargs.get('customer_provided_key_sha256', None) - self.access_tier_change_time = kwargs.get('access_tier_change_time', None) - - -class Block(Model): - """Represents a single block in a block blob. It describes the block's ID and - size. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The base64 encoded block ID. - :type name: str - :param size: Required. The block size in bytes. - :type size: int - """ - - _validation = { - 'name': {'required': True}, - 'size': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'size': {'key': 'Size', 'type': 'int', 'xml': {'name': 'Size'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(Block, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.size = kwargs.get('size', None) - - -class BlockList(Model): - """BlockList. - - :param committed_blocks: - :type committed_blocks: list[~azure.storage.blob.models.Block] - :param uncommitted_blocks: - :type uncommitted_blocks: list[~azure.storage.blob.models.Block] - """ - - _attribute_map = { - 'committed_blocks': {'key': 'CommittedBlocks', 'type': '[Block]', 'xml': {'name': 'CommittedBlocks', 'itemsName': 'Block', 'wrapped': True}}, - 'uncommitted_blocks': {'key': 'UncommittedBlocks', 'type': '[Block]', 'xml': {'name': 'UncommittedBlocks', 'itemsName': 'Block', 'wrapped': True}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(BlockList, self).__init__(**kwargs) - self.committed_blocks = kwargs.get('committed_blocks', None) - self.uncommitted_blocks = kwargs.get('uncommitted_blocks', None) - - -class BlockLookupList(Model): - """BlockLookupList. - - :param committed: - :type committed: list[str] - :param uncommitted: - :type uncommitted: list[str] - :param latest: - :type latest: list[str] - """ - - _attribute_map = { - 'committed': {'key': 'Committed', 'type': '[str]', 'xml': {'name': 'Committed', 'itemsName': 'Committed'}}, - 'uncommitted': {'key': 'Uncommitted', 'type': '[str]', 'xml': {'name': 'Uncommitted', 'itemsName': 'Uncommitted'}}, - 'latest': {'key': 'Latest', 'type': '[str]', 'xml': {'name': 'Latest', 'itemsName': 'Latest'}}, - } - _xml_map = { - 'name': 'BlockList' - } - - def __init__(self, **kwargs): - super(BlockLookupList, self).__init__(**kwargs) - self.committed = kwargs.get('committed', None) - self.uncommitted = kwargs.get('uncommitted', None) - self.latest = kwargs.get('latest', None) - - -class ClearRange(Model): - """ClearRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'ClearRange' - } - - def __init__(self, **kwargs): - super(ClearRange, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.end = kwargs.get('end', None) - - -class ContainerItem(Model): - """An Azure Storage container. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param properties: Required. - :type properties: ~azure.storage.blob.models.ContainerProperties - :param metadata: - :type metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'properties': {'key': 'Properties', 'type': 'ContainerProperties', 'xml': {'name': 'Properties'}}, - 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}}, - } - _xml_map = { - 'name': 'Container' - } - - def __init__(self, **kwargs): - super(ContainerItem, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.properties = kwargs.get('properties', None) - self.metadata = kwargs.get('metadata', None) - - -class ContainerProperties(Model): - """Properties of a container. - - All required parameters must be populated in order to send to Azure. - - :param last_modified: Required. - :type last_modified: datetime - :param etag: Required. - :type etag: str - :param lease_status: Possible values include: 'locked', 'unlocked' - :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType - :param lease_state: Possible values include: 'available', 'leased', - 'expired', 'breaking', 'broken' - :type lease_state: str or ~azure.storage.blob.models.LeaseStateType - :param lease_duration: Possible values include: 'infinite', 'fixed' - :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType - :param public_access: Possible values include: 'container', 'blob' - :type public_access: str or ~azure.storage.blob.models.PublicAccessType - :param has_immutability_policy: - :type has_immutability_policy: bool - :param has_legal_hold: - :type has_legal_hold: bool - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}}, - 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}}, - 'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}}, - 'public_access': {'key': 'PublicAccess', 'type': 'str', 'xml': {'name': 'PublicAccess'}}, - 'has_immutability_policy': {'key': 'HasImmutabilityPolicy', 'type': 'bool', 'xml': {'name': 'HasImmutabilityPolicy'}}, - 'has_legal_hold': {'key': 'HasLegalHold', 'type': 'bool', 'xml': {'name': 'HasLegalHold'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(ContainerProperties, self).__init__(**kwargs) - self.last_modified = kwargs.get('last_modified', None) - self.etag = kwargs.get('etag', None) - self.lease_status = kwargs.get('lease_status', None) - self.lease_state = kwargs.get('lease_state', None) - self.lease_duration = kwargs.get('lease_duration', None) - self.public_access = kwargs.get('public_access', None) - self.has_immutability_policy = kwargs.get('has_immutability_policy', None) - self.has_legal_hold = kwargs.get('has_legal_hold', None) - - -class CorsRule(Model): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param allowed_origins: Required. The origin domains that are permitted to - make a request against the storage service via CORS. The origin domain is - the domain from which the request originates. Note that the origin must be - an exact case-sensitive match with the origin that the user age sends to - the service. You can also use the wildcard character '*' to allow all - origin domains to make requests via CORS. - :type allowed_origins: str - :param allowed_methods: Required. The methods (HTTP request verbs) that - the origin domain may use for a CORS request. (comma separated) - :type allowed_methods: str - :param allowed_headers: Required. the request headers that the origin - domain may specify on the CORS request. - :type allowed_headers: str - :param exposed_headers: Required. The response headers that may be sent in - the response to the CORS request and exposed by the browser to the request - issuer - :type exposed_headers: str - :param max_age_in_seconds: Required. The maximum amount time that a - browser should cache the preflight OPTIONS request. - :type max_age_in_seconds: int - """ - - _validation = { - 'allowed_origins': {'required': True}, - 'allowed_methods': {'required': True}, - 'allowed_headers': {'required': True}, - 'exposed_headers': {'required': True}, - 'max_age_in_seconds': {'required': True, 'minimum': 0}, - } - - _attribute_map = { - 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}}, - 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}}, - 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}}, - 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}}, - 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(CorsRule, self).__init__(**kwargs) - self.allowed_origins = kwargs.get('allowed_origins', None) - self.allowed_methods = kwargs.get('allowed_methods', None) - self.allowed_headers = kwargs.get('allowed_headers', None) - self.exposed_headers = kwargs.get('exposed_headers', None) - self.max_age_in_seconds = kwargs.get('max_age_in_seconds', None) - - -class CpkInfo(Model): - """Additional parameters for a set of operations. - - :param encryption_key: Optional. Specifies the encryption key to use to - encrypt the data provided in the request. If not specified, encryption is - performed with the root account encryption key. For more information, see - Encryption at Rest for Azure Storage Services. - :type encryption_key: str - :param encryption_key_sha256: The SHA-256 hash of the provided encryption - key. Must be provided if the x-ms-encryption-key header is provided. - :type encryption_key_sha256: str - :param encryption_algorithm: The algorithm used to produce the encryption - key hash. Currently, the only accepted value is "AES256". Must be provided - if the x-ms-encryption-key header is provided. Possible values include: - 'AES256' - :type encryption_algorithm: str or - ~azure.storage.blob.models.EncryptionAlgorithmType - """ - - _attribute_map = { - 'encryption_key': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_key'}}, - 'encryption_key_sha256': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_key_sha256'}}, - 'encryption_algorithm': {'key': '', 'type': 'EncryptionAlgorithmType', 'xml': {'name': 'encryption_algorithm'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(CpkInfo, self).__init__(**kwargs) - self.encryption_key = kwargs.get('encryption_key', None) - self.encryption_key_sha256 = kwargs.get('encryption_key_sha256', None) - self.encryption_algorithm = kwargs.get('encryption_algorithm', None) - - -class DataLakeStorageError(Model): - """DataLakeStorageError. - - :param error: The service error response object. - :type error: ~azure.storage.blob.models.DataLakeStorageErrorError - """ - - _attribute_map = { - 'error': {'key': 'error', 'type': 'DataLakeStorageErrorError', 'xml': {'name': 'error'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(DataLakeStorageError, self).__init__(**kwargs) - self.error = kwargs.get('error', None) - - -class DataLakeStorageErrorException(HttpResponseError): - """Server responsed with exception of type: 'DataLakeStorageError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, response, deserialize, *args): - - model_name = 'DataLakeStorageError' - self.error = deserialize(model_name, response) - if self.error is None: - self.error = deserialize.dependencies[model_name]() - super(DataLakeStorageErrorException, self).__init__(response=response) - - -class DataLakeStorageErrorError(Model): - """The service error response object. - - :param code: The service error code. - :type code: str - :param message: The service error message. - :type message: str - """ - - _attribute_map = { - 'code': {'key': 'Code', 'type': 'str', 'xml': {'name': 'Code'}}, - 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(DataLakeStorageErrorError, self).__init__(**kwargs) - self.code = kwargs.get('code', None) - self.message = kwargs.get('message', None) - - -class DirectoryHttpHeaders(Model): - """Additional parameters for a set of operations, such as: Directory_create, - Directory_rename, Blob_rename. - - :param cache_control: Cache control for given resource - :type cache_control: str - :param content_type: Content type for given resource - :type content_type: str - :param content_encoding: Content encoding for given resource - :type content_encoding: str - :param content_language: Content language for given resource - :type content_language: str - :param content_disposition: Content disposition for given resource - :type content_disposition: str - """ - - _attribute_map = { - 'cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'cache_control'}}, - 'content_type': {'key': '', 'type': 'str', 'xml': {'name': 'content_type'}}, - 'content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'content_encoding'}}, - 'content_language': {'key': '', 'type': 'str', 'xml': {'name': 'content_language'}}, - 'content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'content_disposition'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(DirectoryHttpHeaders, self).__init__(**kwargs) - self.cache_control = kwargs.get('cache_control', None) - self.content_type = kwargs.get('content_type', None) - self.content_encoding = kwargs.get('content_encoding', None) - self.content_language = kwargs.get('content_language', None) - self.content_disposition = kwargs.get('content_disposition', None) - - -class GeoReplication(Model): - """Geo-Replication information for the Secondary Storage Service. - - All required parameters must be populated in order to send to Azure. - - :param status: Required. The status of the secondary location. Possible - values include: 'live', 'bootstrap', 'unavailable' - :type status: str or ~azure.storage.blob.models.GeoReplicationStatusType - :param last_sync_time: Required. A GMT date/time value, to the second. All - primary writes preceding this value are guaranteed to be available for - read operations at the secondary. Primary writes after this point in time - may or may not be available for reads. - :type last_sync_time: datetime - """ - - _validation = { - 'status': {'required': True}, - 'last_sync_time': {'required': True}, - } - - _attribute_map = { - 'status': {'key': 'Status', 'type': 'str', 'xml': {'name': 'Status'}}, - 'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123', 'xml': {'name': 'LastSyncTime'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(GeoReplication, self).__init__(**kwargs) - self.status = kwargs.get('status', None) - self.last_sync_time = kwargs.get('last_sync_time', None) - - -class KeyInfo(Model): - """Key information. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. The date-time the key is active in ISO 8601 UTC - time - :type start: str - :param expiry: Required. The date-time the key expires in ISO 8601 UTC - time - :type expiry: str - """ - - _validation = { - 'start': {'required': True}, - 'expiry': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, - 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(KeyInfo, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.expiry = kwargs.get('expiry', None) - - -class LeaseAccessConditions(Model): - """Additional parameters for a set of operations. - - :param lease_id: If specified, the operation only succeeds if the - resource's lease is active and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': '', 'type': 'str', 'xml': {'name': 'lease_id'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = kwargs.get('lease_id', None) - - -class ListBlobsFlatSegmentResponse(Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param segment: Required. - :type segment: ~azure.storage.blob.models.BlobFlatListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'segment': {'key': 'Segment', 'type': 'BlobFlatListSegment', 'xml': {'name': 'Segment'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, **kwargs): - super(ListBlobsFlatSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs.get('service_endpoint', None) - self.container_name = kwargs.get('container_name', None) - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.segment = kwargs.get('segment', None) - self.next_marker = kwargs.get('next_marker', None) - - -class ListBlobsHierarchySegmentResponse(Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param delimiter: - :type delimiter: str - :param segment: Required. - :type segment: ~azure.storage.blob.models.BlobHierarchyListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'delimiter': {'key': 'Delimiter', 'type': 'str', 'xml': {'name': 'Delimiter'}}, - 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment', 'xml': {'name': 'Segment'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, **kwargs): - super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs.get('service_endpoint', None) - self.container_name = kwargs.get('container_name', None) - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.delimiter = kwargs.get('delimiter', None) - self.segment = kwargs.get('segment', None) - self.next_marker = kwargs.get('next_marker', None) - - -class ListContainersSegmentResponse(Model): - """An enumeration of containers. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param container_items: Required. - :type container_items: list[~azure.storage.blob.models.ContainerItem] - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_items': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'container_items': {'key': 'ContainerItems', 'type': '[ContainerItem]', 'xml': {'name': 'Containers', 'itemsName': 'Containers', 'wrapped': True}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, **kwargs): - super(ListContainersSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs.get('service_endpoint', None) - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.container_items = kwargs.get('container_items', None) - self.next_marker = kwargs.get('next_marker', None) - - -class Logging(Model): - """Azure Analytics Logging settings. - - All required parameters must be populated in order to send to Azure. - - :param version: Required. The version of Storage Analytics to configure. - :type version: str - :param delete: Required. Indicates whether all delete requests should be - logged. - :type delete: bool - :param read: Required. Indicates whether all read requests should be - logged. - :type read: bool - :param write: Required. Indicates whether all write requests should be - logged. - :type write: bool - :param retention_policy: Required. - :type retention_policy: ~azure.storage.blob.models.RetentionPolicy - """ - - _validation = { - 'version': {'required': True}, - 'delete': {'required': True}, - 'read': {'required': True}, - 'write': {'required': True}, - 'retention_policy': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, - 'delete': {'key': 'Delete', 'type': 'bool', 'xml': {'name': 'Delete'}}, - 'read': {'key': 'Read', 'type': 'bool', 'xml': {'name': 'Read'}}, - 'write': {'key': 'Write', 'type': 'bool', 'xml': {'name': 'Write'}}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(Logging, self).__init__(**kwargs) - self.version = kwargs.get('version', None) - self.delete = kwargs.get('delete', None) - self.read = kwargs.get('read', None) - self.write = kwargs.get('write', None) - self.retention_policy = kwargs.get('retention_policy', None) - - -class Metrics(Model): - """a summary of request statistics grouped by API in hour or minute aggregates - for blobs. - - All required parameters must be populated in order to send to Azure. - - :param version: The version of Storage Analytics to configure. - :type version: str - :param enabled: Required. Indicates whether metrics are enabled for the - Blob service. - :type enabled: bool - :param include_apis: Indicates whether metrics should generate summary - statistics for called API operations. - :type include_apis: bool - :param retention_policy: - :type retention_policy: ~azure.storage.blob.models.RetentionPolicy - """ - - _validation = { - 'enabled': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(Metrics, self).__init__(**kwargs) - self.version = kwargs.get('version', None) - self.enabled = kwargs.get('enabled', None) - self.include_apis = kwargs.get('include_apis', None) - self.retention_policy = kwargs.get('retention_policy', None) - - -class ModifiedAccessConditions(Model): - """Additional parameters for a set of operations. - - :param if_modified_since: Specify this header value to operate only on a - blob if it has been modified since the specified date/time. - :type if_modified_since: datetime - :param if_unmodified_since: Specify this header value to operate only on a - blob if it has not been modified since the specified date/time. - :type if_unmodified_since: datetime - :param if_match: Specify an ETag value to operate only on blobs with a - matching value. - :type if_match: str - :param if_none_match: Specify an ETag value to operate only on blobs - without a matching value. - :type if_none_match: str - """ - - _attribute_map = { - 'if_modified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'if_modified_since'}}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'if_unmodified_since'}}, - 'if_match': {'key': '', 'type': 'str', 'xml': {'name': 'if_match'}}, - 'if_none_match': {'key': '', 'type': 'str', 'xml': {'name': 'if_none_match'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(ModifiedAccessConditions, self).__init__(**kwargs) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - - -class PageList(Model): - """the list of pages. - - :param page_range: - :type page_range: list[~azure.storage.blob.models.PageRange] - :param clear_range: - :type clear_range: list[~azure.storage.blob.models.ClearRange] - """ - - _attribute_map = { - 'page_range': {'key': 'PageRange', 'type': '[PageRange]', 'xml': {'name': 'PageRange', 'itemsName': 'PageRange'}}, - 'clear_range': {'key': 'ClearRange', 'type': '[ClearRange]', 'xml': {'name': 'ClearRange', 'itemsName': 'ClearRange'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(PageList, self).__init__(**kwargs) - self.page_range = kwargs.get('page_range', None) - self.clear_range = kwargs.get('clear_range', None) - - -class PageRange(Model): - """PageRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'PageRange' - } - - def __init__(self, **kwargs): - super(PageRange, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.end = kwargs.get('end', None) - - -class RetentionPolicy(Model): - """the retention policy which determines how long the associated data should - persist. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether a retention policy is enabled - for the storage service - :type enabled: bool - :param days: Indicates the number of days that metrics or logging or - soft-deleted data should be retained. All data older than this value will - be deleted - :type days: int - """ - - _validation = { - 'enabled': {'required': True}, - 'days': {'minimum': 1}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(RetentionPolicy, self).__init__(**kwargs) - self.enabled = kwargs.get('enabled', None) - self.days = kwargs.get('days', None) - - -class SequenceNumberAccessConditions(Model): - """Additional parameters for a set of operations, such as: - PageBlob_upload_pages, PageBlob_clear_pages, - PageBlob_upload_pages_from_url. - - :param if_sequence_number_less_than_or_equal_to: Specify this header value - to operate only on a blob if it has a sequence number less than or equal - to the specified. - :type if_sequence_number_less_than_or_equal_to: long - :param if_sequence_number_less_than: Specify this header value to operate - only on a blob if it has a sequence number less than the specified. - :type if_sequence_number_less_than: long - :param if_sequence_number_equal_to: Specify this header value to operate - only on a blob if it has the specified sequence number. - :type if_sequence_number_equal_to: long - """ - - _attribute_map = { - 'if_sequence_number_less_than_or_equal_to': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_less_than_or_equal_to'}}, - 'if_sequence_number_less_than': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_less_than'}}, - 'if_sequence_number_equal_to': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_equal_to'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(SequenceNumberAccessConditions, self).__init__(**kwargs) - self.if_sequence_number_less_than_or_equal_to = kwargs.get('if_sequence_number_less_than_or_equal_to', None) - self.if_sequence_number_less_than = kwargs.get('if_sequence_number_less_than', None) - self.if_sequence_number_equal_to = kwargs.get('if_sequence_number_equal_to', None) - - -class SignedIdentifier(Model): - """signed identifier. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. a unique id - :type id: str - :param access_policy: - :type access_policy: ~azure.storage.blob.models.AccessPolicy - """ - - _validation = { - 'id': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}}, - 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}}, - } - _xml_map = { - 'name': 'SignedIdentifier' - } - - def __init__(self, **kwargs): - super(SignedIdentifier, self).__init__(**kwargs) - self.id = kwargs.get('id', None) - self.access_policy = kwargs.get('access_policy', None) - - -class SourceModifiedAccessConditions(Model): - """Additional parameters for a set of operations. - - :param source_if_modified_since: Specify this header value to operate only - on a blob if it has been modified since the specified date/time. - :type source_if_modified_since: datetime - :param source_if_unmodified_since: Specify this header value to operate - only on a blob if it has not been modified since the specified date/time. - :type source_if_unmodified_since: datetime - :param source_if_match: Specify an ETag value to operate only on blobs - with a matching value. - :type source_if_match: str - :param source_if_none_match: Specify an ETag value to operate only on - blobs without a matching value. - :type source_if_none_match: str - """ - - _attribute_map = { - 'source_if_modified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'source_if_modified_since'}}, - 'source_if_unmodified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'source_if_unmodified_since'}}, - 'source_if_match': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_match'}}, - 'source_if_none_match': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_none_match'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_modified_since = kwargs.get('source_if_modified_since', None) - self.source_if_unmodified_since = kwargs.get('source_if_unmodified_since', None) - self.source_if_match = kwargs.get('source_if_match', None) - self.source_if_none_match = kwargs.get('source_if_none_match', None) - - -class StaticWebsite(Model): - """The properties that enable an account to host a static website. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether this account is hosting a - static website - :type enabled: bool - :param index_document: The default name of the index page under each - directory - :type index_document: str - :param error_document404_path: The absolute path of the custom 404 page - :type error_document404_path: str - """ - - _validation = { - 'enabled': {'required': True}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'index_document': {'key': 'IndexDocument', 'type': 'str', 'xml': {'name': 'IndexDocument'}}, - 'error_document404_path': {'key': 'ErrorDocument404Path', 'type': 'str', 'xml': {'name': 'ErrorDocument404Path'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(StaticWebsite, self).__init__(**kwargs) - self.enabled = kwargs.get('enabled', None) - self.index_document = kwargs.get('index_document', None) - self.error_document404_path = kwargs.get('error_document404_path', None) - - -class StorageError(Model): - """StorageError. - - :param message: - :type message: str - """ - - _attribute_map = { - 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(StorageError, self).__init__(**kwargs) - self.message = kwargs.get('message', None) - - -class StorageErrorException(HttpResponseError): - """Server responsed with exception of type: 'StorageError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, response, deserialize, *args): - - model_name = 'StorageError' - self.error = deserialize(model_name, response) - if self.error is None: - self.error = deserialize.dependencies[model_name]() - super(StorageErrorException, self).__init__(response=response) - - -class StorageServiceProperties(Model): - """Storage Service Properties. - - :param logging: - :type logging: ~azure.storage.blob.models.Logging - :param hour_metrics: - :type hour_metrics: ~azure.storage.blob.models.Metrics - :param minute_metrics: - :type minute_metrics: ~azure.storage.blob.models.Metrics - :param cors: The set of CORS rules. - :type cors: list[~azure.storage.blob.models.CorsRule] - :param default_service_version: The default version to use for requests to - the Blob service if an incoming request's version is not specified. - Possible values include version 2008-10-27 and all more recent versions - :type default_service_version: str - :param delete_retention_policy: - :type delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy - :param static_website: - :type static_website: ~azure.storage.blob.models.StaticWebsite - """ - - _attribute_map = { - 'logging': {'key': 'Logging', 'type': 'Logging', 'xml': {'name': 'Logging'}}, - 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}}, - 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}}, - 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}}, - 'default_service_version': {'key': 'DefaultServiceVersion', 'type': 'str', 'xml': {'name': 'DefaultServiceVersion'}}, - 'delete_retention_policy': {'key': 'DeleteRetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'DeleteRetentionPolicy'}}, - 'static_website': {'key': 'StaticWebsite', 'type': 'StaticWebsite', 'xml': {'name': 'StaticWebsite'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(StorageServiceProperties, self).__init__(**kwargs) - self.logging = kwargs.get('logging', None) - self.hour_metrics = kwargs.get('hour_metrics', None) - self.minute_metrics = kwargs.get('minute_metrics', None) - self.cors = kwargs.get('cors', None) - self.default_service_version = kwargs.get('default_service_version', None) - self.delete_retention_policy = kwargs.get('delete_retention_policy', None) - self.static_website = kwargs.get('static_website', None) - - -class StorageServiceStats(Model): - """Stats for the storage service. - - :param geo_replication: - :type geo_replication: ~azure.storage.blob.models.GeoReplication - """ - - _attribute_map = { - 'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication', 'xml': {'name': 'GeoReplication'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(StorageServiceStats, self).__init__(**kwargs) - self.geo_replication = kwargs.get('geo_replication', None) - - -class UserDelegationKey(Model): - """A user delegation key. - - All required parameters must be populated in order to send to Azure. - - :param signed_oid: Required. The Azure Active Directory object ID in GUID - format. - :type signed_oid: str - :param signed_tid: Required. The Azure Active Directory tenant ID in GUID - format - :type signed_tid: str - :param signed_start: Required. The date-time the key is active - :type signed_start: datetime - :param signed_expiry: Required. The date-time the key expires - :type signed_expiry: datetime - :param signed_service: Required. Abbreviation of the Azure Storage service - that accepts the key - :type signed_service: str - :param signed_version: Required. The service version that created the key - :type signed_version: str - :param value: Required. The key as a base64 string - :type value: str - """ - - _validation = { - 'signed_oid': {'required': True}, - 'signed_tid': {'required': True}, - 'signed_start': {'required': True}, - 'signed_expiry': {'required': True}, - 'signed_service': {'required': True}, - 'signed_version': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'signed_oid': {'key': 'SignedOid', 'type': 'str', 'xml': {'name': 'SignedOid'}}, - 'signed_tid': {'key': 'SignedTid', 'type': 'str', 'xml': {'name': 'SignedTid'}}, - 'signed_start': {'key': 'SignedStart', 'type': 'iso-8601', 'xml': {'name': 'SignedStart'}}, - 'signed_expiry': {'key': 'SignedExpiry', 'type': 'iso-8601', 'xml': {'name': 'SignedExpiry'}}, - 'signed_service': {'key': 'SignedService', 'type': 'str', 'xml': {'name': 'SignedService'}}, - 'signed_version': {'key': 'SignedVersion', 'type': 'str', 'xml': {'name': 'SignedVersion'}}, - 'value': {'key': 'Value', 'type': 'str', 'xml': {'name': 'Value'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(UserDelegationKey, self).__init__(**kwargs) - self.signed_oid = kwargs.get('signed_oid', None) - self.signed_tid = kwargs.get('signed_tid', None) - self.signed_start = kwargs.get('signed_start', None) - self.signed_expiry = kwargs.get('signed_expiry', None) - self.signed_service = kwargs.get('signed_service', None) - self.signed_version = kwargs.get('signed_version', None) - self.value = kwargs.get('value', None) diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/models/_models_py3.py b/azure/multiapi/storagev2/blob/v2019_02_02/_generated/models/_models_py3.py deleted file mode 100644 index 66f84b1..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/models/_models_py3.py +++ /dev/null @@ -1,1528 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model -from azure.core.exceptions import HttpResponseError - - -class AccessPolicy(Model): - """An Access policy. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. the date-time the policy is active - :type start: str - :param expiry: Required. the date-time the policy expires - :type expiry: str - :param permission: Required. the permissions for the acl policy - :type permission: str - """ - - _validation = { - 'start': {'required': True}, - 'expiry': {'required': True}, - 'permission': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, - 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, - 'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}}, - } - _xml_map = { - } - - def __init__(self, *, start: str, expiry: str, permission: str, **kwargs) -> None: - super(AccessPolicy, self).__init__(**kwargs) - self.start = start - self.expiry = expiry - self.permission = permission - - -class AppendPositionAccessConditions(Model): - """Additional parameters for a set of operations, such as: - AppendBlob_append_block, AppendBlob_append_block_from_url. - - :param max_size: Optional conditional header. The max length in bytes - permitted for the append blob. If the Append Block operation would cause - the blob to exceed that limit or if the blob size is already greater than - the value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition - Failed). - :type max_size: long - :param append_position: Optional conditional header, used only for the - Append Block operation. A number indicating the byte offset to compare. - Append Block will succeed only if the append position is equal to this - number. If it is not, the request will fail with the - AppendPositionConditionNotMet error (HTTP status code 412 - Precondition - Failed). - :type append_position: long - """ - - _attribute_map = { - 'max_size': {'key': '', 'type': 'long', 'xml': {'name': 'max_size'}}, - 'append_position': {'key': '', 'type': 'long', 'xml': {'name': 'append_position'}}, - } - _xml_map = { - } - - def __init__(self, *, max_size: int=None, append_position: int=None, **kwargs) -> None: - super(AppendPositionAccessConditions, self).__init__(**kwargs) - self.max_size = max_size - self.append_position = append_position - - -class BlobFlatListSegment(Model): - """BlobFlatListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItem] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItem]', 'xml': {'name': 'BlobItems', 'itemsName': 'Blob'}}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__(self, *, blob_items, **kwargs) -> None: - super(BlobFlatListSegment, self).__init__(**kwargs) - self.blob_items = blob_items - - -class BlobHierarchyListSegment(Model): - """BlobHierarchyListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_prefixes: - :type blob_prefixes: list[~azure.storage.blob.models.BlobPrefix] - :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItem] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]', 'xml': {'name': 'BlobPrefix', 'itemsName': 'BlobPrefix'}}, - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItem]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__(self, *, blob_items, blob_prefixes=None, **kwargs) -> None: - super(BlobHierarchyListSegment, self).__init__(**kwargs) - self.blob_prefixes = blob_prefixes - self.blob_items = blob_items - - -class BlobHTTPHeaders(Model): - """Additional parameters for a set of operations. - - :param blob_cache_control: Optional. Sets the blob's cache control. If - specified, this property is stored with the blob and returned with a read - request. - :type blob_cache_control: str - :param blob_content_type: Optional. Sets the blob's content type. If - specified, this property is stored with the blob and returned with a read - request. - :type blob_content_type: str - :param blob_content_md5: Optional. An MD5 hash of the blob content. Note - that this hash is not validated, as the hashes for the individual blocks - were validated when each was uploaded. - :type blob_content_md5: bytearray - :param blob_content_encoding: Optional. Sets the blob's content encoding. - If specified, this property is stored with the blob and returned with a - read request. - :type blob_content_encoding: str - :param blob_content_language: Optional. Set the blob's content language. - If specified, this property is stored with the blob and returned with a - read request. - :type blob_content_language: str - :param blob_content_disposition: Optional. Sets the blob's - Content-Disposition header. - :type blob_content_disposition: str - """ - - _attribute_map = { - 'blob_cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'blob_cache_control'}}, - 'blob_content_type': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_type'}}, - 'blob_content_md5': {'key': '', 'type': 'bytearray', 'xml': {'name': 'blob_content_md5'}}, - 'blob_content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_encoding'}}, - 'blob_content_language': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_language'}}, - 'blob_content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_disposition'}}, - } - _xml_map = { - } - - def __init__(self, *, blob_cache_control: str=None, blob_content_type: str=None, blob_content_md5: bytearray=None, blob_content_encoding: str=None, blob_content_language: str=None, blob_content_disposition: str=None, **kwargs) -> None: - super(BlobHTTPHeaders, self).__init__(**kwargs) - self.blob_cache_control = blob_cache_control - self.blob_content_type = blob_content_type - self.blob_content_md5 = blob_content_md5 - self.blob_content_encoding = blob_content_encoding - self.blob_content_language = blob_content_language - self.blob_content_disposition = blob_content_disposition - - -class BlobItem(Model): - """An Azure Storage blob. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param deleted: Required. - :type deleted: bool - :param snapshot: Required. - :type snapshot: str - :param properties: Required. - :type properties: ~azure.storage.blob.models.BlobProperties - :param metadata: - :type metadata: ~azure.storage.blob.models.BlobMetadata - """ - - _validation = { - 'name': {'required': True}, - 'deleted': {'required': True}, - 'snapshot': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}}, - 'snapshot': {'key': 'Snapshot', 'type': 'str', 'xml': {'name': 'Snapshot'}}, - 'properties': {'key': 'Properties', 'type': 'BlobProperties', 'xml': {'name': 'Properties'}}, - 'metadata': {'key': 'Metadata', 'type': 'BlobMetadata', 'xml': {'name': 'Metadata'}}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__(self, *, name: str, deleted: bool, snapshot: str, properties, metadata=None, **kwargs) -> None: - super(BlobItem, self).__init__(**kwargs) - self.name = name - self.deleted = deleted - self.snapshot = snapshot - self.properties = properties - self.metadata = metadata - - -class BlobMetadata(Model): - """BlobMetadata. - - :param additional_properties: Unmatched properties from the message are - deserialized this collection - :type additional_properties: dict[str, str] - :param encrypted: - :type encrypted: str - """ - - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{str}', 'xml': {'name': 'additional_properties'}}, - 'encrypted': {'key': 'Encrypted', 'type': 'str', 'xml': {'name': 'Encrypted', 'attr': True}}, - } - _xml_map = { - 'name': 'Metadata' - } - - def __init__(self, *, additional_properties=None, encrypted: str=None, **kwargs) -> None: - super(BlobMetadata, self).__init__(**kwargs) - self.additional_properties = additional_properties - self.encrypted = encrypted - - -class BlobPrefix(Model): - """BlobPrefix. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - } - _xml_map = { - } - - def __init__(self, *, name: str, **kwargs) -> None: - super(BlobPrefix, self).__init__(**kwargs) - self.name = name - - -class BlobProperties(Model): - """Properties of a blob. - - All required parameters must be populated in order to send to Azure. - - :param creation_time: - :type creation_time: datetime - :param last_modified: Required. - :type last_modified: datetime - :param etag: Required. - :type etag: str - :param content_length: Size in bytes - :type content_length: long - :param content_type: - :type content_type: str - :param content_encoding: - :type content_encoding: str - :param content_language: - :type content_language: str - :param content_md5: - :type content_md5: bytearray - :param content_disposition: - :type content_disposition: str - :param cache_control: - :type cache_control: str - :param blob_sequence_number: - :type blob_sequence_number: long - :param blob_type: Possible values include: 'BlockBlob', 'PageBlob', - 'AppendBlob' - :type blob_type: str or ~azure.storage.blob.models.BlobType - :param lease_status: Possible values include: 'locked', 'unlocked' - :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType - :param lease_state: Possible values include: 'available', 'leased', - 'expired', 'breaking', 'broken' - :type lease_state: str or ~azure.storage.blob.models.LeaseStateType - :param lease_duration: Possible values include: 'infinite', 'fixed' - :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType - :param copy_id: - :type copy_id: str - :param copy_status: Possible values include: 'pending', 'success', - 'aborted', 'failed' - :type copy_status: str or ~azure.storage.blob.models.CopyStatusType - :param copy_source: - :type copy_source: str - :param copy_progress: - :type copy_progress: str - :param copy_completion_time: - :type copy_completion_time: datetime - :param copy_status_description: - :type copy_status_description: str - :param server_encrypted: - :type server_encrypted: bool - :param incremental_copy: - :type incremental_copy: bool - :param destination_snapshot: - :type destination_snapshot: str - :param deleted_time: - :type deleted_time: datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param access_tier: Possible values include: 'P4', 'P6', 'P10', 'P15', - 'P20', 'P30', 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type access_tier: str or ~azure.storage.blob.models.AccessTier - :param access_tier_inferred: - :type access_tier_inferred: bool - :param archive_status: Possible values include: - 'rehydrate-pending-to-hot', 'rehydrate-pending-to-cool' - :type archive_status: str or ~azure.storage.blob.models.ArchiveStatus - :param customer_provided_key_sha256: - :type customer_provided_key_sha256: str - :param access_tier_change_time: - :type access_tier_change_time: datetime - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123', 'xml': {'name': 'Creation-Time'}}, - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}}, - 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}}, - 'content_length': {'key': 'Content-Length', 'type': 'long', 'xml': {'name': 'Content-Length'}}, - 'content_type': {'key': 'Content-Type', 'type': 'str', 'xml': {'name': 'Content-Type'}}, - 'content_encoding': {'key': 'Content-Encoding', 'type': 'str', 'xml': {'name': 'Content-Encoding'}}, - 'content_language': {'key': 'Content-Language', 'type': 'str', 'xml': {'name': 'Content-Language'}}, - 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray', 'xml': {'name': 'Content-MD5'}}, - 'content_disposition': {'key': 'Content-Disposition', 'type': 'str', 'xml': {'name': 'Content-Disposition'}}, - 'cache_control': {'key': 'Cache-Control', 'type': 'str', 'xml': {'name': 'Cache-Control'}}, - 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long', 'xml': {'name': 'x-ms-blob-sequence-number'}}, - 'blob_type': {'key': 'BlobType', 'type': 'BlobType', 'xml': {'name': 'BlobType'}}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}}, - 'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}}, - 'copy_id': {'key': 'CopyId', 'type': 'str', 'xml': {'name': 'CopyId'}}, - 'copy_status': {'key': 'CopyStatus', 'type': 'CopyStatusType', 'xml': {'name': 'CopyStatus'}}, - 'copy_source': {'key': 'CopySource', 'type': 'str', 'xml': {'name': 'CopySource'}}, - 'copy_progress': {'key': 'CopyProgress', 'type': 'str', 'xml': {'name': 'CopyProgress'}}, - 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123', 'xml': {'name': 'CopyCompletionTime'}}, - 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str', 'xml': {'name': 'CopyStatusDescription'}}, - 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool', 'xml': {'name': 'ServerEncrypted'}}, - 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool', 'xml': {'name': 'IncrementalCopy'}}, - 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str', 'xml': {'name': 'DestinationSnapshot'}}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}}, - 'access_tier': {'key': 'AccessTier', 'type': 'str', 'xml': {'name': 'AccessTier'}}, - 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool', 'xml': {'name': 'AccessTierInferred'}}, - 'archive_status': {'key': 'ArchiveStatus', 'type': 'str', 'xml': {'name': 'ArchiveStatus'}}, - 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str', 'xml': {'name': 'CustomerProvidedKeySha256'}}, - 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123', 'xml': {'name': 'AccessTierChangeTime'}}, - } - _xml_map = { - 'name': 'Properties' - } - - def __init__(self, *, last_modified, etag: str, creation_time=None, content_length: int=None, content_type: str=None, content_encoding: str=None, content_language: str=None, content_md5: bytearray=None, content_disposition: str=None, cache_control: str=None, blob_sequence_number: int=None, blob_type=None, lease_status=None, lease_state=None, lease_duration=None, copy_id: str=None, copy_status=None, copy_source: str=None, copy_progress: str=None, copy_completion_time=None, copy_status_description: str=None, server_encrypted: bool=None, incremental_copy: bool=None, destination_snapshot: str=None, deleted_time=None, remaining_retention_days: int=None, access_tier=None, access_tier_inferred: bool=None, archive_status=None, customer_provided_key_sha256: str=None, access_tier_change_time=None, **kwargs) -> None: - super(BlobProperties, self).__init__(**kwargs) - self.creation_time = creation_time - self.last_modified = last_modified - self.etag = etag - self.content_length = content_length - self.content_type = content_type - self.content_encoding = content_encoding - self.content_language = content_language - self.content_md5 = content_md5 - self.content_disposition = content_disposition - self.cache_control = cache_control - self.blob_sequence_number = blob_sequence_number - self.blob_type = blob_type - self.lease_status = lease_status - self.lease_state = lease_state - self.lease_duration = lease_duration - self.copy_id = copy_id - self.copy_status = copy_status - self.copy_source = copy_source - self.copy_progress = copy_progress - self.copy_completion_time = copy_completion_time - self.copy_status_description = copy_status_description - self.server_encrypted = server_encrypted - self.incremental_copy = incremental_copy - self.destination_snapshot = destination_snapshot - self.deleted_time = deleted_time - self.remaining_retention_days = remaining_retention_days - self.access_tier = access_tier - self.access_tier_inferred = access_tier_inferred - self.archive_status = archive_status - self.customer_provided_key_sha256 = customer_provided_key_sha256 - self.access_tier_change_time = access_tier_change_time - - -class Block(Model): - """Represents a single block in a block blob. It describes the block's ID and - size. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The base64 encoded block ID. - :type name: str - :param size: Required. The block size in bytes. - :type size: int - """ - - _validation = { - 'name': {'required': True}, - 'size': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'size': {'key': 'Size', 'type': 'int', 'xml': {'name': 'Size'}}, - } - _xml_map = { - } - - def __init__(self, *, name: str, size: int, **kwargs) -> None: - super(Block, self).__init__(**kwargs) - self.name = name - self.size = size - - -class BlockList(Model): - """BlockList. - - :param committed_blocks: - :type committed_blocks: list[~azure.storage.blob.models.Block] - :param uncommitted_blocks: - :type uncommitted_blocks: list[~azure.storage.blob.models.Block] - """ - - _attribute_map = { - 'committed_blocks': {'key': 'CommittedBlocks', 'type': '[Block]', 'xml': {'name': 'CommittedBlocks', 'itemsName': 'Block', 'wrapped': True}}, - 'uncommitted_blocks': {'key': 'UncommittedBlocks', 'type': '[Block]', 'xml': {'name': 'UncommittedBlocks', 'itemsName': 'Block', 'wrapped': True}}, - } - _xml_map = { - } - - def __init__(self, *, committed_blocks=None, uncommitted_blocks=None, **kwargs) -> None: - super(BlockList, self).__init__(**kwargs) - self.committed_blocks = committed_blocks - self.uncommitted_blocks = uncommitted_blocks - - -class BlockLookupList(Model): - """BlockLookupList. - - :param committed: - :type committed: list[str] - :param uncommitted: - :type uncommitted: list[str] - :param latest: - :type latest: list[str] - """ - - _attribute_map = { - 'committed': {'key': 'Committed', 'type': '[str]', 'xml': {'name': 'Committed', 'itemsName': 'Committed'}}, - 'uncommitted': {'key': 'Uncommitted', 'type': '[str]', 'xml': {'name': 'Uncommitted', 'itemsName': 'Uncommitted'}}, - 'latest': {'key': 'Latest', 'type': '[str]', 'xml': {'name': 'Latest', 'itemsName': 'Latest'}}, - } - _xml_map = { - 'name': 'BlockList' - } - - def __init__(self, *, committed=None, uncommitted=None, latest=None, **kwargs) -> None: - super(BlockLookupList, self).__init__(**kwargs) - self.committed = committed - self.uncommitted = uncommitted - self.latest = latest - - -class ClearRange(Model): - """ClearRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'ClearRange' - } - - def __init__(self, *, start: int, end: int, **kwargs) -> None: - super(ClearRange, self).__init__(**kwargs) - self.start = start - self.end = end - - -class ContainerItem(Model): - """An Azure Storage container. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param properties: Required. - :type properties: ~azure.storage.blob.models.ContainerProperties - :param metadata: - :type metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'properties': {'key': 'Properties', 'type': 'ContainerProperties', 'xml': {'name': 'Properties'}}, - 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}}, - } - _xml_map = { - 'name': 'Container' - } - - def __init__(self, *, name: str, properties, metadata=None, **kwargs) -> None: - super(ContainerItem, self).__init__(**kwargs) - self.name = name - self.properties = properties - self.metadata = metadata - - -class ContainerProperties(Model): - """Properties of a container. - - All required parameters must be populated in order to send to Azure. - - :param last_modified: Required. - :type last_modified: datetime - :param etag: Required. - :type etag: str - :param lease_status: Possible values include: 'locked', 'unlocked' - :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType - :param lease_state: Possible values include: 'available', 'leased', - 'expired', 'breaking', 'broken' - :type lease_state: str or ~azure.storage.blob.models.LeaseStateType - :param lease_duration: Possible values include: 'infinite', 'fixed' - :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType - :param public_access: Possible values include: 'container', 'blob' - :type public_access: str or ~azure.storage.blob.models.PublicAccessType - :param has_immutability_policy: - :type has_immutability_policy: bool - :param has_legal_hold: - :type has_legal_hold: bool - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}}, - 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}}, - 'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}}, - 'public_access': {'key': 'PublicAccess', 'type': 'str', 'xml': {'name': 'PublicAccess'}}, - 'has_immutability_policy': {'key': 'HasImmutabilityPolicy', 'type': 'bool', 'xml': {'name': 'HasImmutabilityPolicy'}}, - 'has_legal_hold': {'key': 'HasLegalHold', 'type': 'bool', 'xml': {'name': 'HasLegalHold'}}, - } - _xml_map = { - } - - def __init__(self, *, last_modified, etag: str, lease_status=None, lease_state=None, lease_duration=None, public_access=None, has_immutability_policy: bool=None, has_legal_hold: bool=None, **kwargs) -> None: - super(ContainerProperties, self).__init__(**kwargs) - self.last_modified = last_modified - self.etag = etag - self.lease_status = lease_status - self.lease_state = lease_state - self.lease_duration = lease_duration - self.public_access = public_access - self.has_immutability_policy = has_immutability_policy - self.has_legal_hold = has_legal_hold - - -class CorsRule(Model): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param allowed_origins: Required. The origin domains that are permitted to - make a request against the storage service via CORS. The origin domain is - the domain from which the request originates. Note that the origin must be - an exact case-sensitive match with the origin that the user age sends to - the service. You can also use the wildcard character '*' to allow all - origin domains to make requests via CORS. - :type allowed_origins: str - :param allowed_methods: Required. The methods (HTTP request verbs) that - the origin domain may use for a CORS request. (comma separated) - :type allowed_methods: str - :param allowed_headers: Required. the request headers that the origin - domain may specify on the CORS request. - :type allowed_headers: str - :param exposed_headers: Required. The response headers that may be sent in - the response to the CORS request and exposed by the browser to the request - issuer - :type exposed_headers: str - :param max_age_in_seconds: Required. The maximum amount time that a - browser should cache the preflight OPTIONS request. - :type max_age_in_seconds: int - """ - - _validation = { - 'allowed_origins': {'required': True}, - 'allowed_methods': {'required': True}, - 'allowed_headers': {'required': True}, - 'exposed_headers': {'required': True}, - 'max_age_in_seconds': {'required': True, 'minimum': 0}, - } - - _attribute_map = { - 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}}, - 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}}, - 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}}, - 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}}, - 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}}, - } - _xml_map = { - } - - def __init__(self, *, allowed_origins: str, allowed_methods: str, allowed_headers: str, exposed_headers: str, max_age_in_seconds: int, **kwargs) -> None: - super(CorsRule, self).__init__(**kwargs) - self.allowed_origins = allowed_origins - self.allowed_methods = allowed_methods - self.allowed_headers = allowed_headers - self.exposed_headers = exposed_headers - self.max_age_in_seconds = max_age_in_seconds - - -class CpkInfo(Model): - """Additional parameters for a set of operations. - - :param encryption_key: Optional. Specifies the encryption key to use to - encrypt the data provided in the request. If not specified, encryption is - performed with the root account encryption key. For more information, see - Encryption at Rest for Azure Storage Services. - :type encryption_key: str - :param encryption_key_sha256: The SHA-256 hash of the provided encryption - key. Must be provided if the x-ms-encryption-key header is provided. - :type encryption_key_sha256: str - :param encryption_algorithm: The algorithm used to produce the encryption - key hash. Currently, the only accepted value is "AES256". Must be provided - if the x-ms-encryption-key header is provided. Possible values include: - 'AES256' - :type encryption_algorithm: str or - ~azure.storage.blob.models.EncryptionAlgorithmType - """ - - _attribute_map = { - 'encryption_key': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_key'}}, - 'encryption_key_sha256': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_key_sha256'}}, - 'encryption_algorithm': {'key': '', 'type': 'EncryptionAlgorithmType', 'xml': {'name': 'encryption_algorithm'}}, - } - _xml_map = { - } - - def __init__(self, *, encryption_key: str=None, encryption_key_sha256: str=None, encryption_algorithm=None, **kwargs) -> None: - super(CpkInfo, self).__init__(**kwargs) - self.encryption_key = encryption_key - self.encryption_key_sha256 = encryption_key_sha256 - self.encryption_algorithm = encryption_algorithm - - -class DataLakeStorageError(Model): - """DataLakeStorageError. - - :param error: The service error response object. - :type error: ~azure.storage.blob.models.DataLakeStorageErrorError - """ - - _attribute_map = { - 'error': {'key': 'error', 'type': 'DataLakeStorageErrorError', 'xml': {'name': 'error'}}, - } - _xml_map = { - } - - def __init__(self, *, error=None, **kwargs) -> None: - super(DataLakeStorageError, self).__init__(**kwargs) - self.error = error - - -class DataLakeStorageErrorException(HttpResponseError): - """Server responsed with exception of type: 'DataLakeStorageError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, response, deserialize, *args): - - model_name = 'DataLakeStorageError' - self.error = deserialize(model_name, response) - if self.error is None: - self.error = deserialize.dependencies[model_name]() - super(DataLakeStorageErrorException, self).__init__(response=response) - - -class DataLakeStorageErrorError(Model): - """The service error response object. - - :param code: The service error code. - :type code: str - :param message: The service error message. - :type message: str - """ - - _attribute_map = { - 'code': {'key': 'Code', 'type': 'str', 'xml': {'name': 'Code'}}, - 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, - } - _xml_map = { - } - - def __init__(self, *, code: str=None, message: str=None, **kwargs) -> None: - super(DataLakeStorageErrorError, self).__init__(**kwargs) - self.code = code - self.message = message - - -class DirectoryHttpHeaders(Model): - """Additional parameters for a set of operations, such as: Directory_create, - Directory_rename, Blob_rename. - - :param cache_control: Cache control for given resource - :type cache_control: str - :param content_type: Content type for given resource - :type content_type: str - :param content_encoding: Content encoding for given resource - :type content_encoding: str - :param content_language: Content language for given resource - :type content_language: str - :param content_disposition: Content disposition for given resource - :type content_disposition: str - """ - - _attribute_map = { - 'cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'cache_control'}}, - 'content_type': {'key': '', 'type': 'str', 'xml': {'name': 'content_type'}}, - 'content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'content_encoding'}}, - 'content_language': {'key': '', 'type': 'str', 'xml': {'name': 'content_language'}}, - 'content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'content_disposition'}}, - } - _xml_map = { - } - - def __init__(self, *, cache_control: str=None, content_type: str=None, content_encoding: str=None, content_language: str=None, content_disposition: str=None, **kwargs) -> None: - super(DirectoryHttpHeaders, self).__init__(**kwargs) - self.cache_control = cache_control - self.content_type = content_type - self.content_encoding = content_encoding - self.content_language = content_language - self.content_disposition = content_disposition - - -class GeoReplication(Model): - """Geo-Replication information for the Secondary Storage Service. - - All required parameters must be populated in order to send to Azure. - - :param status: Required. The status of the secondary location. Possible - values include: 'live', 'bootstrap', 'unavailable' - :type status: str or ~azure.storage.blob.models.GeoReplicationStatusType - :param last_sync_time: Required. A GMT date/time value, to the second. All - primary writes preceding this value are guaranteed to be available for - read operations at the secondary. Primary writes after this point in time - may or may not be available for reads. - :type last_sync_time: datetime - """ - - _validation = { - 'status': {'required': True}, - 'last_sync_time': {'required': True}, - } - - _attribute_map = { - 'status': {'key': 'Status', 'type': 'str', 'xml': {'name': 'Status'}}, - 'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123', 'xml': {'name': 'LastSyncTime'}}, - } - _xml_map = { - } - - def __init__(self, *, status, last_sync_time, **kwargs) -> None: - super(GeoReplication, self).__init__(**kwargs) - self.status = status - self.last_sync_time = last_sync_time - - -class KeyInfo(Model): - """Key information. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. The date-time the key is active in ISO 8601 UTC - time - :type start: str - :param expiry: Required. The date-time the key expires in ISO 8601 UTC - time - :type expiry: str - """ - - _validation = { - 'start': {'required': True}, - 'expiry': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, - 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, - } - _xml_map = { - } - - def __init__(self, *, start: str, expiry: str, **kwargs) -> None: - super(KeyInfo, self).__init__(**kwargs) - self.start = start - self.expiry = expiry - - -class LeaseAccessConditions(Model): - """Additional parameters for a set of operations. - - :param lease_id: If specified, the operation only succeeds if the - resource's lease is active and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': '', 'type': 'str', 'xml': {'name': 'lease_id'}}, - } - _xml_map = { - } - - def __init__(self, *, lease_id: str=None, **kwargs) -> None: - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = lease_id - - -class ListBlobsFlatSegmentResponse(Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param segment: Required. - :type segment: ~azure.storage.blob.models.BlobFlatListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'segment': {'key': 'Segment', 'type': 'BlobFlatListSegment', 'xml': {'name': 'Segment'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, *, service_endpoint: str, container_name: str, segment, prefix: str=None, marker: str=None, max_results: int=None, next_marker: str=None, **kwargs) -> None: - super(ListBlobsFlatSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.container_name = container_name - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.segment = segment - self.next_marker = next_marker - - -class ListBlobsHierarchySegmentResponse(Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param delimiter: - :type delimiter: str - :param segment: Required. - :type segment: ~azure.storage.blob.models.BlobHierarchyListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'delimiter': {'key': 'Delimiter', 'type': 'str', 'xml': {'name': 'Delimiter'}}, - 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment', 'xml': {'name': 'Segment'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, *, service_endpoint: str, container_name: str, segment, prefix: str=None, marker: str=None, max_results: int=None, delimiter: str=None, next_marker: str=None, **kwargs) -> None: - super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.container_name = container_name - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.delimiter = delimiter - self.segment = segment - self.next_marker = next_marker - - -class ListContainersSegmentResponse(Model): - """An enumeration of containers. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param container_items: Required. - :type container_items: list[~azure.storage.blob.models.ContainerItem] - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_items': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'container_items': {'key': 'ContainerItems', 'type': '[ContainerItem]', 'xml': {'name': 'Containers', 'itemsName': 'Containers', 'wrapped': True}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, *, service_endpoint: str, container_items, prefix: str=None, marker: str=None, max_results: int=None, next_marker: str=None, **kwargs) -> None: - super(ListContainersSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.container_items = container_items - self.next_marker = next_marker - - -class Logging(Model): - """Azure Analytics Logging settings. - - All required parameters must be populated in order to send to Azure. - - :param version: Required. The version of Storage Analytics to configure. - :type version: str - :param delete: Required. Indicates whether all delete requests should be - logged. - :type delete: bool - :param read: Required. Indicates whether all read requests should be - logged. - :type read: bool - :param write: Required. Indicates whether all write requests should be - logged. - :type write: bool - :param retention_policy: Required. - :type retention_policy: ~azure.storage.blob.models.RetentionPolicy - """ - - _validation = { - 'version': {'required': True}, - 'delete': {'required': True}, - 'read': {'required': True}, - 'write': {'required': True}, - 'retention_policy': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, - 'delete': {'key': 'Delete', 'type': 'bool', 'xml': {'name': 'Delete'}}, - 'read': {'key': 'Read', 'type': 'bool', 'xml': {'name': 'Read'}}, - 'write': {'key': 'Write', 'type': 'bool', 'xml': {'name': 'Write'}}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, - } - _xml_map = { - } - - def __init__(self, *, version: str, delete: bool, read: bool, write: bool, retention_policy, **kwargs) -> None: - super(Logging, self).__init__(**kwargs) - self.version = version - self.delete = delete - self.read = read - self.write = write - self.retention_policy = retention_policy - - -class Metrics(Model): - """a summary of request statistics grouped by API in hour or minute aggregates - for blobs. - - All required parameters must be populated in order to send to Azure. - - :param version: The version of Storage Analytics to configure. - :type version: str - :param enabled: Required. Indicates whether metrics are enabled for the - Blob service. - :type enabled: bool - :param include_apis: Indicates whether metrics should generate summary - statistics for called API operations. - :type include_apis: bool - :param retention_policy: - :type retention_policy: ~azure.storage.blob.models.RetentionPolicy - """ - - _validation = { - 'enabled': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, - } - _xml_map = { - } - - def __init__(self, *, enabled: bool, version: str=None, include_apis: bool=None, retention_policy=None, **kwargs) -> None: - super(Metrics, self).__init__(**kwargs) - self.version = version - self.enabled = enabled - self.include_apis = include_apis - self.retention_policy = retention_policy - - -class ModifiedAccessConditions(Model): - """Additional parameters for a set of operations. - - :param if_modified_since: Specify this header value to operate only on a - blob if it has been modified since the specified date/time. - :type if_modified_since: datetime - :param if_unmodified_since: Specify this header value to operate only on a - blob if it has not been modified since the specified date/time. - :type if_unmodified_since: datetime - :param if_match: Specify an ETag value to operate only on blobs with a - matching value. - :type if_match: str - :param if_none_match: Specify an ETag value to operate only on blobs - without a matching value. - :type if_none_match: str - """ - - _attribute_map = { - 'if_modified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'if_modified_since'}}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'if_unmodified_since'}}, - 'if_match': {'key': '', 'type': 'str', 'xml': {'name': 'if_match'}}, - 'if_none_match': {'key': '', 'type': 'str', 'xml': {'name': 'if_none_match'}}, - } - _xml_map = { - } - - def __init__(self, *, if_modified_since=None, if_unmodified_since=None, if_match: str=None, if_none_match: str=None, **kwargs) -> None: - super(ModifiedAccessConditions, self).__init__(**kwargs) - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - self.if_match = if_match - self.if_none_match = if_none_match - - -class PageList(Model): - """the list of pages. - - :param page_range: - :type page_range: list[~azure.storage.blob.models.PageRange] - :param clear_range: - :type clear_range: list[~azure.storage.blob.models.ClearRange] - """ - - _attribute_map = { - 'page_range': {'key': 'PageRange', 'type': '[PageRange]', 'xml': {'name': 'PageRange', 'itemsName': 'PageRange'}}, - 'clear_range': {'key': 'ClearRange', 'type': '[ClearRange]', 'xml': {'name': 'ClearRange', 'itemsName': 'ClearRange'}}, - } - _xml_map = { - } - - def __init__(self, *, page_range=None, clear_range=None, **kwargs) -> None: - super(PageList, self).__init__(**kwargs) - self.page_range = page_range - self.clear_range = clear_range - - -class PageRange(Model): - """PageRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'PageRange' - } - - def __init__(self, *, start: int, end: int, **kwargs) -> None: - super(PageRange, self).__init__(**kwargs) - self.start = start - self.end = end - - -class RetentionPolicy(Model): - """the retention policy which determines how long the associated data should - persist. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether a retention policy is enabled - for the storage service - :type enabled: bool - :param days: Indicates the number of days that metrics or logging or - soft-deleted data should be retained. All data older than this value will - be deleted - :type days: int - """ - - _validation = { - 'enabled': {'required': True}, - 'days': {'minimum': 1}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}}, - } - _xml_map = { - } - - def __init__(self, *, enabled: bool, days: int=None, **kwargs) -> None: - super(RetentionPolicy, self).__init__(**kwargs) - self.enabled = enabled - self.days = days - - -class SequenceNumberAccessConditions(Model): - """Additional parameters for a set of operations, such as: - PageBlob_upload_pages, PageBlob_clear_pages, - PageBlob_upload_pages_from_url. - - :param if_sequence_number_less_than_or_equal_to: Specify this header value - to operate only on a blob if it has a sequence number less than or equal - to the specified. - :type if_sequence_number_less_than_or_equal_to: long - :param if_sequence_number_less_than: Specify this header value to operate - only on a blob if it has a sequence number less than the specified. - :type if_sequence_number_less_than: long - :param if_sequence_number_equal_to: Specify this header value to operate - only on a blob if it has the specified sequence number. - :type if_sequence_number_equal_to: long - """ - - _attribute_map = { - 'if_sequence_number_less_than_or_equal_to': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_less_than_or_equal_to'}}, - 'if_sequence_number_less_than': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_less_than'}}, - 'if_sequence_number_equal_to': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_equal_to'}}, - } - _xml_map = { - } - - def __init__(self, *, if_sequence_number_less_than_or_equal_to: int=None, if_sequence_number_less_than: int=None, if_sequence_number_equal_to: int=None, **kwargs) -> None: - super(SequenceNumberAccessConditions, self).__init__(**kwargs) - self.if_sequence_number_less_than_or_equal_to = if_sequence_number_less_than_or_equal_to - self.if_sequence_number_less_than = if_sequence_number_less_than - self.if_sequence_number_equal_to = if_sequence_number_equal_to - - -class SignedIdentifier(Model): - """signed identifier. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. a unique id - :type id: str - :param access_policy: - :type access_policy: ~azure.storage.blob.models.AccessPolicy - """ - - _validation = { - 'id': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}}, - 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}}, - } - _xml_map = { - 'name': 'SignedIdentifier' - } - - def __init__(self, *, id: str, access_policy=None, **kwargs) -> None: - super(SignedIdentifier, self).__init__(**kwargs) - self.id = id - self.access_policy = access_policy - - -class SourceModifiedAccessConditions(Model): - """Additional parameters for a set of operations. - - :param source_if_modified_since: Specify this header value to operate only - on a blob if it has been modified since the specified date/time. - :type source_if_modified_since: datetime - :param source_if_unmodified_since: Specify this header value to operate - only on a blob if it has not been modified since the specified date/time. - :type source_if_unmodified_since: datetime - :param source_if_match: Specify an ETag value to operate only on blobs - with a matching value. - :type source_if_match: str - :param source_if_none_match: Specify an ETag value to operate only on - blobs without a matching value. - :type source_if_none_match: str - """ - - _attribute_map = { - 'source_if_modified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'source_if_modified_since'}}, - 'source_if_unmodified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'source_if_unmodified_since'}}, - 'source_if_match': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_match'}}, - 'source_if_none_match': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_none_match'}}, - } - _xml_map = { - } - - def __init__(self, *, source_if_modified_since=None, source_if_unmodified_since=None, source_if_match: str=None, source_if_none_match: str=None, **kwargs) -> None: - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_modified_since = source_if_modified_since - self.source_if_unmodified_since = source_if_unmodified_since - self.source_if_match = source_if_match - self.source_if_none_match = source_if_none_match - - -class StaticWebsite(Model): - """The properties that enable an account to host a static website. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether this account is hosting a - static website - :type enabled: bool - :param index_document: The default name of the index page under each - directory - :type index_document: str - :param error_document404_path: The absolute path of the custom 404 page - :type error_document404_path: str - """ - - _validation = { - 'enabled': {'required': True}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'index_document': {'key': 'IndexDocument', 'type': 'str', 'xml': {'name': 'IndexDocument'}}, - 'error_document404_path': {'key': 'ErrorDocument404Path', 'type': 'str', 'xml': {'name': 'ErrorDocument404Path'}}, - } - _xml_map = { - } - - def __init__(self, *, enabled: bool, index_document: str=None, error_document404_path: str=None, **kwargs) -> None: - super(StaticWebsite, self).__init__(**kwargs) - self.enabled = enabled - self.index_document = index_document - self.error_document404_path = error_document404_path - - -class StorageError(Model): - """StorageError. - - :param message: - :type message: str - """ - - _attribute_map = { - 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, - } - _xml_map = { - } - - def __init__(self, *, message: str=None, **kwargs) -> None: - super(StorageError, self).__init__(**kwargs) - self.message = message - - -class StorageErrorException(HttpResponseError): - """Server responsed with exception of type: 'StorageError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, response, deserialize, *args): - - model_name = 'StorageError' - self.error = deserialize(model_name, response) - if self.error is None: - self.error = deserialize.dependencies[model_name]() - super(StorageErrorException, self).__init__(response=response) - - -class StorageServiceProperties(Model): - """Storage Service Properties. - - :param logging: - :type logging: ~azure.storage.blob.models.Logging - :param hour_metrics: - :type hour_metrics: ~azure.storage.blob.models.Metrics - :param minute_metrics: - :type minute_metrics: ~azure.storage.blob.models.Metrics - :param cors: The set of CORS rules. - :type cors: list[~azure.storage.blob.models.CorsRule] - :param default_service_version: The default version to use for requests to - the Blob service if an incoming request's version is not specified. - Possible values include version 2008-10-27 and all more recent versions - :type default_service_version: str - :param delete_retention_policy: - :type delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy - :param static_website: - :type static_website: ~azure.storage.blob.models.StaticWebsite - """ - - _attribute_map = { - 'logging': {'key': 'Logging', 'type': 'Logging', 'xml': {'name': 'Logging'}}, - 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}}, - 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}}, - 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}}, - 'default_service_version': {'key': 'DefaultServiceVersion', 'type': 'str', 'xml': {'name': 'DefaultServiceVersion'}}, - 'delete_retention_policy': {'key': 'DeleteRetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'DeleteRetentionPolicy'}}, - 'static_website': {'key': 'StaticWebsite', 'type': 'StaticWebsite', 'xml': {'name': 'StaticWebsite'}}, - } - _xml_map = { - } - - def __init__(self, *, logging=None, hour_metrics=None, minute_metrics=None, cors=None, default_service_version: str=None, delete_retention_policy=None, static_website=None, **kwargs) -> None: - super(StorageServiceProperties, self).__init__(**kwargs) - self.logging = logging - self.hour_metrics = hour_metrics - self.minute_metrics = minute_metrics - self.cors = cors - self.default_service_version = default_service_version - self.delete_retention_policy = delete_retention_policy - self.static_website = static_website - - -class StorageServiceStats(Model): - """Stats for the storage service. - - :param geo_replication: - :type geo_replication: ~azure.storage.blob.models.GeoReplication - """ - - _attribute_map = { - 'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication', 'xml': {'name': 'GeoReplication'}}, - } - _xml_map = { - } - - def __init__(self, *, geo_replication=None, **kwargs) -> None: - super(StorageServiceStats, self).__init__(**kwargs) - self.geo_replication = geo_replication - - -class UserDelegationKey(Model): - """A user delegation key. - - All required parameters must be populated in order to send to Azure. - - :param signed_oid: Required. The Azure Active Directory object ID in GUID - format. - :type signed_oid: str - :param signed_tid: Required. The Azure Active Directory tenant ID in GUID - format - :type signed_tid: str - :param signed_start: Required. The date-time the key is active - :type signed_start: datetime - :param signed_expiry: Required. The date-time the key expires - :type signed_expiry: datetime - :param signed_service: Required. Abbreviation of the Azure Storage service - that accepts the key - :type signed_service: str - :param signed_version: Required. The service version that created the key - :type signed_version: str - :param value: Required. The key as a base64 string - :type value: str - """ - - _validation = { - 'signed_oid': {'required': True}, - 'signed_tid': {'required': True}, - 'signed_start': {'required': True}, - 'signed_expiry': {'required': True}, - 'signed_service': {'required': True}, - 'signed_version': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'signed_oid': {'key': 'SignedOid', 'type': 'str', 'xml': {'name': 'SignedOid'}}, - 'signed_tid': {'key': 'SignedTid', 'type': 'str', 'xml': {'name': 'SignedTid'}}, - 'signed_start': {'key': 'SignedStart', 'type': 'iso-8601', 'xml': {'name': 'SignedStart'}}, - 'signed_expiry': {'key': 'SignedExpiry', 'type': 'iso-8601', 'xml': {'name': 'SignedExpiry'}}, - 'signed_service': {'key': 'SignedService', 'type': 'str', 'xml': {'name': 'SignedService'}}, - 'signed_version': {'key': 'SignedVersion', 'type': 'str', 'xml': {'name': 'SignedVersion'}}, - 'value': {'key': 'Value', 'type': 'str', 'xml': {'name': 'Value'}}, - } - _xml_map = { - } - - def __init__(self, *, signed_oid: str, signed_tid: str, signed_start, signed_expiry, signed_service: str, signed_version: str, value: str, **kwargs) -> None: - super(UserDelegationKey, self).__init__(**kwargs) - self.signed_oid = signed_oid - self.signed_tid = signed_tid - self.signed_start = signed_start - self.signed_expiry = signed_expiry - self.signed_service = signed_service - self.signed_version = signed_version - self.value = value diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/operations/__init__.py b/azure/multiapi/storagev2/blob/v2019_02_02/_generated/operations/__init__.py deleted file mode 100644 index 1ea0453..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/operations/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._container_operations import ContainerOperations -from ._directory_operations import DirectoryOperations -from ._blob_operations import BlobOperations -from ._page_blob_operations import PageBlobOperations -from ._append_blob_operations import AppendBlobOperations -from ._block_blob_operations import BlockBlobOperations - -__all__ = [ - 'ServiceOperations', - 'ContainerOperations', - 'DirectoryOperations', - 'BlobOperations', - 'PageBlobOperations', - 'AppendBlobOperations', - 'BlockBlobOperations', -] diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/operations/_append_blob_operations.py b/azure/multiapi/storagev2/blob/v2019_02_02/_generated/operations/_append_blob_operations.py deleted file mode 100644 index c06ec34..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/operations/_append_blob_operations.py +++ /dev/null @@ -1,539 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class AppendBlobOperations(object): - """AppendBlobOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "AppendBlob". - :ivar comp: . Constant value: "appendblock". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_blob_type = "AppendBlob" - self.comp = "appendblock" - - def create(self, content_length, timeout=None, metadata=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, cls=None, **kwargs): - """The Create Append Blob operation creates a new append blob. - - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{containerName}/{blob}'} - - def append_block(self, body, content_length, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, request_id=None, lease_access_conditions=None, append_position_access_conditions=None, cpk_info=None, modified_access_conditions=None, cls=None, **kwargs): - """The Append Block operation commits a new block of data to the end of an - existing append blob. The Append Block operation is permitted only if - the blob was created with x-ms-blob-type set to AppendBlob. Append - Block is supported only on version 2015-02-21 version or later. - - :param body: Initial data - :type body: Generator - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param append_position_access_conditions: Additional parameters for - the operation - :type append_position_access_conditions: - ~azure.storage.blob.models.AppendPositionAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - max_size = None - if append_position_access_conditions is not None: - max_size = append_position_access_conditions.max_size - append_position = None - if append_position_access_conditions is not None: - append_position = append_position_access_conditions.append_position - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - # Construct URL - url = self.append_block.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("self.comp", self.comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if max_size is not None: - header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", max_size, 'long') - if append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-append-offset': self._deserialize('str', response.headers.get('x-ms-blob-append-offset')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - append_block.metadata = {'url': '/{containerName}/{blob}'} - - def append_block_from_url(self, source_url, content_length, source_range=None, source_content_md5=None, source_contentcrc64=None, timeout=None, transactional_content_md5=None, request_id=None, cpk_info=None, lease_access_conditions=None, append_position_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs): - """The Append Block operation commits a new block of data to the end of an - existing append blob where the contents are read from a source url. The - Append Block operation is permitted only if the blob was created with - x-ms-blob-type set to AppendBlob. Append Block is supported only on - version 2015-02-21 version or later. - - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param content_length: The length of the request. - :type content_length: long - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_md5: Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range - of bytes that must be read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param append_position_access_conditions: Additional parameters for - the operation - :type append_position_access_conditions: - ~azure.storage.blob.models.AppendPositionAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - max_size = None - if append_position_access_conditions is not None: - max_size = append_position_access_conditions.max_size - append_position = None - if append_position_access_conditions is not None: - append_position = append_position_access_conditions.append_position - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - # Construct URL - url = self.append_block_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("self.comp", self.comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if max_size is not None: - header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", max_size, 'long') - if append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-append-offset': self._deserialize('str', response.headers.get('x-ms-blob-append-offset')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - append_block_from_url.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/operations/_blob_operations.py b/azure/multiapi/storagev2/blob/v2019_02_02/_generated/operations/_blob_operations.py deleted file mode 100644 index 2822fcc..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/operations/_blob_operations.py +++ /dev/null @@ -1,2417 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class BlobOperations(object): - """BlobOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_requires_sync: . Constant value: "true". - :ivar x_ms_copy_action: . Constant value: "abort". - :ivar restype: . Constant value: "account". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_requires_sync = "true" - self.x_ms_copy_action = "abort" - self.restype = "account" - - def download(self, snapshot=None, timeout=None, range=None, range_get_content_md5=None, range_get_content_crc64=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, cls=None, **kwargs): - """The Download operation reads or downloads a blob from the system, - including its metadata and properties. You can also call Download to - read a snapshot. - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param range_get_content_md5: When set to true and specified together - with the Range, the service returns the MD5 hash for the range, as - long as the range is less than or equal to 4 MB in size. - :type range_get_content_md5: bool - :param range_get_content_crc64: When set to true and specified - together with the Range, the service returns the CRC64 hash for the - range, as long as the range is less than or equal to 4 MB in size. - :type range_get_content_crc64: bool - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: object or the result of cls(response) - :rtype: Generator - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - # Construct URL - url = self.download.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') - if range_get_content_crc64 is not None: - header_parameters['x-ms-range-get-content-crc64'] = self._serialize.header("range_get_content_crc64", range_get_content_crc64, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - if response.status_code == 206: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - download.metadata = {'url': '/{containerName}/{blob}'} - - def get_properties(self, snapshot=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, cls=None, **kwargs): - """The Get Properties operation returns all user-defined metadata, - standard HTTP properties, and system properties for the blob. It does - not return the content of the blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-creation-time': self._deserialize('rfc-1123', response.headers.get('x-ms-creation-time')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-incremental-copy': self._deserialize('bool', response.headers.get('x-ms-incremental-copy')), - 'x-ms-copy-destination-snapshot': self._deserialize('str', response.headers.get('x-ms-copy-destination-snapshot')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-access-tier': self._deserialize('str', response.headers.get('x-ms-access-tier')), - 'x-ms-access-tier-inferred': self._deserialize('bool', response.headers.get('x-ms-access-tier-inferred')), - 'x-ms-archive-status': self._deserialize('str', response.headers.get('x-ms-archive-status')), - 'x-ms-access-tier-change-time': self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{containerName}/{blob}'} - - def delete(self, snapshot=None, timeout=None, delete_snapshots=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """If the storage account's soft delete feature is disabled then, when a - blob is deleted, it is permanently removed from the storage account. If - the storage account's soft delete feature is enabled, then, when a blob - is deleted, it is marked for deletion and becomes inaccessible - immediately. However, the blob service retains the blob or snapshot for - the number of days specified by the DeleteRetentionPolicy section of - [Storage service properties] (Set-Blob-Service-Properties.md). After - the specified number of days has passed, the blob's data is permanently - removed from the storage account. Note that you continue to be charged - for the soft-deleted blob's storage until it is permanently removed. - Use the List Blobs API and specify the "include=deleted" query - parameter to discover which blobs and snapshots have been soft deleted. - You can then use the Undelete Blob API to restore a soft-deleted blob. - All other operations on a soft-deleted blob or snapshot causes the - service to return an HTTP status code of 404 (ResourceNotFound). - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param delete_snapshots: Required if the blob has associated - snapshots. Specify one of the following two options: include: Delete - the base blob and all of its snapshots. only: Delete only the blob's - snapshots and not the blob itself. Possible values include: 'include', - 'only' - :type delete_snapshots: str or - ~azure.storage.blob.models.DeleteSnapshotsOptionType - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{containerName}/{blob}'} - - def set_access_control(self, timeout=None, owner=None, group=None, posix_permissions=None, posix_acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """Set the owner, group, permissions, or access control list for a blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_acl: Sets POSIX access control rights on files and - directories. The value is a comma-separated list of access control - entries. Each access control entry (ACE) consists of a scope, a type, - a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type posix_acl: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "setAccessControl" - - # Construct URL - url = self.set_access_control.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - } - return cls(response, None, response_headers) - set_access_control.metadata = {'url': '/{filesystem}/{path}'} - - def get_access_control(self, timeout=None, upn=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """Get the owner, group, permissions, or access control list for a blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param upn: Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the identity values returned in - the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. - :type upn: bool - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "getAccessControl" - - # Construct URL - url = self.get_access_control.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')), - 'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')), - 'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')), - 'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - } - return cls(response, None, response_headers) - get_access_control.metadata = {'url': '/{filesystem}/{path}'} - - def rename(self, rename_source, timeout=None, path_rename_mode=None, directory_properties=None, posix_permissions=None, posix_umask=None, source_lease_id=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs): - """Rename a blob/file. By default, the destination is overwritten and if - the destination already exists and has a lease the lease is broken. - This operation supports conditional HTTP requests. For more - information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - To fail if the destination already exists, use a conditional request - with If-None-Match: "*". - - :param rename_source: The file or directory to be renamed. The value - must have the following format: "/{filesysystem}/{path}". If - "x-ms-properties" is specified, the properties will overwrite the - existing properties; otherwise, the existing properties will be - preserved. - :type rename_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param path_rename_mode: Determines the behavior of the rename - operation. Possible values include: 'legacy', 'posix' - :type path_rename_mode: str or - ~azure.storage.blob.models.PathRenameMode - :param directory_properties: Optional. User-defined properties to be - stored with the file or directory, in the format of a comma-separated - list of name and value pairs "n1=v1, n2=v2, ...", where each value is - base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled - for the account. This umask restricts permission settings for file and - directory, and will only be applied when default Acl does not exist in - parent directory. If the umask bit has set, it means that the - corresponding permission will be disabled. Otherwise the corresponding - permission will be determined by the permission. A 4-digit octal - notation (e.g. 0022) is supported here. If no umask was specified, a - default umask - 0027 will be used. - :type posix_umask: str - :param source_lease_id: A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :type source_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param directory_http_headers: Additional parameters for the operation - :type directory_http_headers: - ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - cache_control = None - if directory_http_headers is not None: - cache_control = directory_http_headers.cache_control - content_type = None - if directory_http_headers is not None: - content_type = directory_http_headers.content_type - content_encoding = None - if directory_http_headers is not None: - content_encoding = directory_http_headers.content_encoding - content_language = None - if directory_http_headers is not None: - content_language = directory_http_headers.content_language - content_disposition = None - if directory_http_headers is not None: - content_disposition = directory_http_headers.content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - # Construct URL - url = self.rename.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if path_rename_mode is not None: - query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'PathRenameMode') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') - if content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') - if content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') - if content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') - if content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - } - return cls(response, None, response_headers) - rename.metadata = {'url': '/{filesystem}/{path}'} - - def undelete(self, timeout=None, request_id=None, cls=None, **kwargs): - """Undelete a blob that was previously soft deleted. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "undelete" - - # Construct URL - url = self.undelete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - undelete.metadata = {'url': '/{containerName}/{blob}'} - - def set_http_headers(self, timeout=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """The Set HTTP Headers operation sets system properties on the blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "properties" - - # Construct URL - url = self.set_http_headers.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_http_headers.metadata = {'url': '/{containerName}/{blob}'} - - def set_metadata(self, timeout=None, metadata=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, cls=None, **kwargs): - """The Set Blob Metadata operation sets user-defined metadata for the - specified blob as one or more name-value pairs. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{containerName}/{blob}'} - - def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or - negative one (-1) for a lease that never expires. A non-infinite lease - can be between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The Blob service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "lease" - action = "acquire" - - # Construct URL - url = self.acquire_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - acquire_lease.metadata = {'url': '/{containerName}/{blob}'} - - def release_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "lease" - action = "release" - - # Construct URL - url = self.release_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - release_lease.metadata = {'url': '/{containerName}/{blob}'} - - def renew_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "lease" - action = "renew" - - # Construct URL - url = self.renew_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - renew_lease.metadata = {'url': '/{containerName}/{blob}'} - - def change_lease(self, lease_id, proposed_lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The Blob service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "lease" - action = "change" - - # Construct URL - url = self.change_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - change_lease.metadata = {'url': '/{containerName}/{blob}'} - - def break_lease(self, timeout=None, break_period=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param break_period: For a break operation, proposed duration the - lease should continue before it is broken, in seconds, between 0 and - 60. This break period is only used if it is shorter than the time - remaining on the lease. If longer, the time remaining on the lease is - used. A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break period. - If this header does not appear with a break operation, a - fixed-duration lease breaks after the remaining lease period elapses, - and an infinite lease breaks immediately. - :type break_period: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "lease" - action = "break" - - # Construct URL - url = self.break_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - break_lease.metadata = {'url': '/{containerName}/{blob}'} - - def create_snapshot(self, timeout=None, metadata=None, request_id=None, cpk_info=None, modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs): - """The Create Snapshot operation creates a read-only snapshot of a blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "snapshot" - - # Construct URL - url = self.create_snapshot.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-snapshot': self._deserialize('str', response.headers.get('x-ms-snapshot')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create_snapshot.metadata = {'url': '/{containerName}/{blob}'} - - def start_copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, rehydrate_priority=None, request_id=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs): - """The Start Copy From URL operation copies a blob or an internet resource - to a new blob. - - :param copy_source: Specifies the name of the source page blob - snapshot. This value is a URL of up to 2 KB in length that specifies a - page blob snapshot. The value should be URL-encoded as it would appear - in a request URI. The source blob must either be public or must be - authenticated via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param rehydrate_priority: Optional: Indicates the priority with which - to rehydrate an archived blob. Possible values include: 'High', - 'Standard' - :type rehydrate_priority: str or - ~azure.storage.blob.models.RehydratePriority - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.start_copy_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} - - def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, request_id=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs): - """The Copy From URL operation copies a blob or an internet resource to a - new blob. It will not return a response until the copy is complete. - - :param copy_source: Specifies the name of the source page blob - snapshot. This value is a URL of up to 2 KB in length that specifies a - page blob snapshot. The value should be URL-encoded as it would appear - in a request URI. The source blob must either be public or must be - authenticated via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.copy_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-requires-sync'] = self._serialize.header("self.x_ms_requires_sync", self.x_ms_requires_sync, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-status': self._deserialize(models.SyncCopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - copy_from_url.metadata = {'url': '/{containerName}/{blob}'} - - def abort_copy_from_url(self, copy_id, timeout=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs): - """The Abort Copy From URL operation aborts a pending Copy From URL - operation, and leaves a destination blob with zero length and full - metadata. - - :param copy_id: The copy identifier provided in the x-ms-copy-id - header of the original Copy Blob operation. - :type copy_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "copy" - - # Construct URL - url = self.abort_copy_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-copy-action'] = self._serialize.header("self.x_ms_copy_action", self.x_ms_copy_action, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - abort_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} - - def set_tier(self, tier, timeout=None, rehydrate_priority=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs): - """The Set Tier operation sets the tier on a blob. The operation is - allowed on a page blob in a premium storage account and on a block blob - in a blob storage account (locally redundant storage only). A premium - page blob's tier determines the allowed size, IOPS, and bandwidth of - the blob. A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param tier: Indicates the tier to be set on the blob. Possible values - include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', 'P40', 'P50', 'P60', - 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierRequired - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param rehydrate_priority: Optional: Indicates the priority with which - to rehydrate an archived blob. Possible values include: 'High', - 'Standard' - :type rehydrate_priority: str or - ~azure.storage.blob.models.RehydratePriority - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "tier" - - # Construct URL - url = self.set_tier.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_tier.metadata = {'url': '/{containerName}/{blob}'} - - def get_account_info(self, cls=None, **kwargs): - """Returns the sku name and account kind . - - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "properties" - - # Construct URL - url = self.get_account_info.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')), - 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_account_info.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/operations/_block_blob_operations.py b/azure/multiapi/storagev2/blob/v2019_02_02/_generated/operations/_block_blob_operations.py deleted file mode 100644 index fa76c8c..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/operations/_block_blob_operations.py +++ /dev/null @@ -1,765 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class BlockBlobOperations(object): - """BlockBlobOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "BlockBlob". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_blob_type = "BlockBlob" - - def upload(self, body, content_length, timeout=None, metadata=None, tier=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, cls=None, **kwargs): - """The Upload Block Blob operation updates the content of an existing - block blob. Updating an existing block blob overwrites any existing - metadata on the blob. Partial updates are not supported with Put Blob; - the content of the existing blob is overwritten with the content of the - new blob. To perform a partial update of the content of a block blob, - use the Put Block List operation. - - :param body: Initial data - :type body: Generator - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - # Construct URL - url = self.upload.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload.metadata = {'url': '/{containerName}/{blob}'} - - def stage_block(self, block_id, content_length, body, transactional_content_md5=None, transactional_content_crc64=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, cls=None, **kwargs): - """The Stage Block operation creates a new block to be committed as part - of a blob. - - :param block_id: A valid Base64 string value that identifies the - block. Prior to encoding, the string must be less than or equal to 64 - bytes in size. For a given blob, the length of the value specified for - the blockid parameter must be the same size for each block. - :type block_id: str - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data - :type body: Generator - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - - comp = "block" - - # Construct URL - url = self.stage_block.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - stage_block.metadata = {'url': '/{containerName}/{blob}'} - - def stage_block_from_url(self, block_id, content_length, source_url, source_range=None, source_content_md5=None, source_contentcrc64=None, timeout=None, request_id=None, cpk_info=None, lease_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs): - """The Stage Block operation creates a new block to be committed as part - of a blob where the contents are read from a URL. - - :param block_id: A valid Base64 string value that identifies the - block. Prior to encoding, the string must be less than or equal to 64 - bytes in size. For a given blob, the length of the value specified for - the blockid parameter must be the same size for each block. - :type block_id: str - :param content_length: The length of the request. - :type content_length: long - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_md5: Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range - of bytes that must be read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - comp = "block" - - # Construct URL - url = self.stage_block_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - stage_block_from_url.metadata = {'url': '/{containerName}/{blob}'} - - def commit_block_list(self, blocks, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, metadata=None, tier=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, cls=None, **kwargs): - """The Commit Block List operation writes a blob by specifying the list of - block IDs that make up the blob. In order to be written as part of a - blob, a block must have been successfully written to the server in a - prior Put Block operation. You can call Put Block List to update a blob - by uploading only those blocks that have changed, then committing the - new and existing blocks together. You can do this by specifying whether - to commit a block from the committed block list or from the uncommitted - block list, or to commit the most recently uploaded version of the - block, whichever list it may belong to. - - :param blocks: - :type blocks: ~azure.storage.blob.models.BlockLookupList - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "blocklist" - - # Construct URL - url = self.commit_block_list.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct body - body_content = self._serialize.body(blocks, 'BlockLookupList') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - commit_block_list.metadata = {'url': '/{containerName}/{blob}'} - - def get_block_list(self, list_type="committed", snapshot=None, timeout=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs): - """The Get Block List operation retrieves the list of blocks that have - been uploaded as part of a block blob. - - :param list_type: Specifies whether to return the list of committed - blocks, the list of uncommitted blocks, or both lists together. - Possible values include: 'committed', 'uncommitted', 'all' - :type list_type: str or ~azure.storage.blob.models.BlockListType - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: BlockList or the result of cls(response) - :rtype: ~azure.storage.blob.models.BlockList - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "blocklist" - - # Construct URL - url = self.get_block_list.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - query_parameters['blocklisttype'] = self._serialize.query("list_type", list_type, 'BlockListType') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('BlockList', response) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_block_list.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/operations/_container_operations.py b/azure/multiapi/storagev2/blob/v2019_02_02/_generated/operations/_container_operations.py deleted file mode 100644 index b9dee57..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/operations/_container_operations.py +++ /dev/null @@ -1,1310 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class ContainerOperations(object): - """ContainerOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - - def create(self, timeout=None, metadata=None, access=None, request_id=None, cls=None, **kwargs): - """creates a new container under the specified account. If the container - with the same name already exists, the operation fails. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param access: Specifies whether data in the container may be accessed - publicly and the level of access. Possible values include: - 'container', 'blob' - :type access: str or ~azure.storage.blob.models.PublicAccessType - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "container" - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if access is not None: - header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{containerName}'} - - def get_properties(self, timeout=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs): - """returns all user-defined metadata and system properties for the - specified container. The data returned does not include the container's - list of blobs. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - restype = "container" - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-public-access': self._deserialize('str', response.headers.get('x-ms-blob-public-access')), - 'x-ms-has-immutability-policy': self._deserialize('bool', response.headers.get('x-ms-has-immutability-policy')), - 'x-ms-has-legal-hold': self._deserialize('bool', response.headers.get('x-ms-has-legal-hold')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{containerName}'} - - def delete(self, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """operation marks the specified container for deletion. The container and - any blobs contained within it are later deleted during garbage - collection. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - restype = "container" - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{containerName}'} - - def set_metadata(self, timeout=None, metadata=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """operation sets one or more user-defined name-value pairs for the - specified container. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - - restype = "container" - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{containerName}'} - - def get_access_policy(self, timeout=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs): - """gets the permissions for the specified container. The permissions - indicate whether container data may be accessed publicly. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: list or the result of cls(response) - :rtype: list[~azure.storage.blob.models.SignedIdentifier] - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - restype = "container" - comp = "acl" - - # Construct URL - url = self.get_access_policy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[SignedIdentifier]', response) - header_dict = { - 'x-ms-blob-public-access': self._deserialize('str', response.headers.get('x-ms-blob-public-access')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_access_policy.metadata = {'url': '/{containerName}'} - - def set_access_policy(self, container_acl=None, timeout=None, access=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """sets the permissions for the specified container. The permissions - indicate whether blobs in a container may be accessed publicly. - - :param container_acl: the acls for the container - :type container_acl: list[~azure.storage.blob.models.SignedIdentifier] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param access: Specifies whether data in the container may be accessed - publicly and the level of access. Possible values include: - 'container', 'blob' - :type access: str or ~azure.storage.blob.models.PublicAccessType - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - restype = "container" - comp = "acl" - - # Construct URL - url = self.set_access_policy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - if access is not None: - header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct body - serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifiers', 'wrapped': True}} - if container_acl is not None: - body_content = self._serialize.body(container_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt) - else: - body_content = None - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_access_policy.metadata = {'url': '/{containerName}'} - - def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or - negative one (-1) for a lease that never expires. A non-infinite lease - can be between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The Blob service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "acquire" - - # Construct URL - url = self.acquire_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - acquire_lease.metadata = {'url': '/{containerName}'} - - def release_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "release" - - # Construct URL - url = self.release_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - release_lease.metadata = {'url': '/{containerName}'} - - def renew_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "renew" - - # Construct URL - url = self.renew_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - renew_lease.metadata = {'url': '/{containerName}'} - - def break_lease(self, timeout=None, break_period=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param break_period: For a break operation, proposed duration the - lease should continue before it is broken, in seconds, between 0 and - 60. This break period is only used if it is shorter than the time - remaining on the lease. If longer, the time remaining on the lease is - used. A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break period. - If this header does not appear with a break operation, a - fixed-duration lease breaks after the remaining lease period elapses, - and an infinite lease breaks immediately. - :type break_period: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "break" - - # Construct URL - url = self.break_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - break_lease.metadata = {'url': '/{containerName}'} - - def change_lease(self, lease_id, proposed_lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The Blob service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "change" - - # Construct URL - url = self.change_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - change_lease.metadata = {'url': '/{containerName}'} - - def list_blob_flat_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, cls=None, **kwargs): - """[Update] The List Blobs operation returns a list of the blobs under the - specified container. - - :param prefix: Filters the results to return only containers whose - name begins with the specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list - of containers to be returned with the next listing operation. The - operation returns the NextMarker value within the response body if the - listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value - for the marker parameter in a subsequent call to request the next page - of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to - return. If the request does not specify maxresults, or specifies a - value greater than 5000, the server will return up to 5000 items. Note - that if the listing operation crosses a partition boundary, then the - service will return a continuation token for retrieving the remainder - of the results. For this reason, it is possible that the service will - return fewer results than specified by maxresults, or than the default - of 5000. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets - to include in the response. - :type include: list[str or - ~azure.storage.blob.models.ListBlobsIncludeItem] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListBlobsFlatSegmentResponse or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "container" - comp = "list" - - # Construct URL - url = self.list_blob_flat_segment.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[ListBlobsIncludeItem]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListBlobsFlatSegmentResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_blob_flat_segment.metadata = {'url': '/{containerName}'} - - def list_blob_hierarchy_segment(self, delimiter, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, cls=None, **kwargs): - """[Update] The List Blobs operation returns a list of the blobs under the - specified container. - - :param delimiter: When the request includes this parameter, the - operation returns a BlobPrefix element in the response body that acts - as a placeholder for all blobs whose names begin with the same - substring up to the appearance of the delimiter character. The - delimiter may be a single character or a string. - :type delimiter: str - :param prefix: Filters the results to return only containers whose - name begins with the specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list - of containers to be returned with the next listing operation. The - operation returns the NextMarker value within the response body if the - listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value - for the marker parameter in a subsequent call to request the next page - of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to - return. If the request does not specify maxresults, or specifies a - value greater than 5000, the server will return up to 5000 items. Note - that if the listing operation crosses a partition boundary, then the - service will return a continuation token for retrieving the remainder - of the results. For this reason, it is possible that the service will - return fewer results than specified by maxresults, or than the default - of 5000. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets - to include in the response. - :type include: list[str or - ~azure.storage.blob.models.ListBlobsIncludeItem] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListBlobsHierarchySegmentResponse or the result of - cls(response) - :rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "container" - comp = "list" - - # Construct URL - url = self.list_blob_hierarchy_segment.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[ListBlobsIncludeItem]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_blob_hierarchy_segment.metadata = {'url': '/{containerName}'} - - def get_account_info(self, cls=None, **kwargs): - """Returns the sku name and account kind . - - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "account" - comp = "properties" - - # Construct URL - url = self.get_account_info.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')), - 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_account_info.metadata = {'url': '/{containerName}'} diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/operations/_directory_operations.py b/azure/multiapi/storagev2/blob/v2019_02_02/_generated/operations/_directory_operations.py deleted file mode 100644 index bfaf633..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/operations/_directory_operations.py +++ /dev/null @@ -1,740 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class DirectoryOperations(object): - """DirectoryOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar resource: . Constant value: "directory". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.resource = "directory" - - def create(self, timeout=None, directory_properties=None, posix_permissions=None, posix_umask=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """Create a directory. By default, the destination is overwritten and if - the destination already exists and has a lease the lease is broken. - This operation supports conditional HTTP requests. For more - information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - To fail if the destination already exists, use a conditional request - with If-None-Match: "*". - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param directory_properties: Optional. User-defined properties to be - stored with the file or directory, in the format of a comma-separated - list of name and value pairs "n1=v1, n2=v2, ...", where each value is - base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled - for the account. This umask restricts permission settings for file and - directory, and will only be applied when default Acl does not exist in - parent directory. If the umask bit has set, it means that the - corresponding permission will be disabled. Otherwise the corresponding - permission will be determined by the permission. A 4-digit octal - notation (e.g. 0022) is supported here. If no umask was specified, a - default umask - 0027 will be used. - :type posix_umask: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param directory_http_headers: Additional parameters for the operation - :type directory_http_headers: - ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - cache_control = None - if directory_http_headers is not None: - cache_control = directory_http_headers.cache_control - content_type = None - if directory_http_headers is not None: - content_type = directory_http_headers.content_type - content_encoding = None - if directory_http_headers is not None: - content_encoding = directory_http_headers.content_encoding - content_language = None - if directory_http_headers is not None: - content_language = directory_http_headers.content_language - content_disposition = None - if directory_http_headers is not None: - content_disposition = directory_http_headers.content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['resource'] = self._serialize.query("self.resource", self.resource, 'str') - - # Construct headers - header_parameters = {} - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') - if content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') - if content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') - if content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') - if content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{filesystem}/{path}'} - - def rename(self, rename_source, timeout=None, marker=None, path_rename_mode=None, directory_properties=None, posix_permissions=None, posix_umask=None, source_lease_id=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs): - """Rename a directory. By default, the destination is overwritten and if - the destination already exists and has a lease the lease is broken. - This operation supports conditional HTTP requests. For more - information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - To fail if the destination already exists, use a conditional request - with If-None-Match: "*". - - :param rename_source: The file or directory to be renamed. The value - must have the following format: "/{filesysystem}/{path}". If - "x-ms-properties" is specified, the properties will overwrite the - existing properties; otherwise, the existing properties will be - preserved. - :type rename_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param marker: When renaming a directory, the number of paths that are - renamed with each invocation is limited. If the number of paths to be - renamed exceeds this limit, a continuation token is returned in this - response header. When a continuation token is returned in the - response, it must be specified in a subsequent invocation of the - rename operation to continue renaming the directory. - :type marker: str - :param path_rename_mode: Determines the behavior of the rename - operation. Possible values include: 'legacy', 'posix' - :type path_rename_mode: str or - ~azure.storage.blob.models.PathRenameMode - :param directory_properties: Optional. User-defined properties to be - stored with the file or directory, in the format of a comma-separated - list of name and value pairs "n1=v1, n2=v2, ...", where each value is - base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled - for the account. This umask restricts permission settings for file and - directory, and will only be applied when default Acl does not exist in - parent directory. If the umask bit has set, it means that the - corresponding permission will be disabled. Otherwise the corresponding - permission will be determined by the permission. A 4-digit octal - notation (e.g. 0022) is supported here. If no umask was specified, a - default umask - 0027 will be used. - :type posix_umask: str - :param source_lease_id: A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :type source_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param directory_http_headers: Additional parameters for the operation - :type directory_http_headers: - ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - cache_control = None - if directory_http_headers is not None: - cache_control = directory_http_headers.cache_control - content_type = None - if directory_http_headers is not None: - content_type = directory_http_headers.content_type - content_encoding = None - if directory_http_headers is not None: - content_encoding = directory_http_headers.content_encoding - content_language = None - if directory_http_headers is not None: - content_language = directory_http_headers.content_language - content_disposition = None - if directory_http_headers is not None: - content_disposition = directory_http_headers.content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - # Construct URL - url = self.rename.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') - if path_rename_mode is not None: - query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'PathRenameMode') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') - if content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') - if content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') - if content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') - if content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - } - return cls(response, None, response_headers) - rename.metadata = {'url': '/{filesystem}/{path}'} - - def delete(self, recursive_directory_delete, timeout=None, marker=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """Deletes the directory. - - :param recursive_directory_delete: If "true", all paths beneath the - directory will be deleted. If "false" and the directory is non-empty, - an error occurs. - :type recursive_directory_delete: bool - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param marker: When renaming a directory, the number of paths that are - renamed with each invocation is limited. If the number of paths to be - renamed exceeds this limit, a continuation token is returned in this - response header. When a continuation token is returned in the - response, it must be specified in a subsequent invocation of the - rename operation to continue renaming the directory. - :type marker: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['recursive'] = self._serialize.query("recursive_directory_delete", recursive_directory_delete, 'bool') - if marker is not None: - query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{filesystem}/{path}'} - - def set_access_control(self, timeout=None, owner=None, group=None, posix_permissions=None, posix_acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """Set the owner, group, permissions, or access control list for a - directory. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_acl: Sets POSIX access control rights on files and - directories. The value is a comma-separated list of access control - entries. Each access control entry (ACE) consists of a scope, a type, - a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type posix_acl: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "setAccessControl" - - # Construct URL - url = self.set_access_control.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - } - return cls(response, None, response_headers) - set_access_control.metadata = {'url': '/{filesystem}/{path}'} - - def get_access_control(self, timeout=None, upn=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """Get the owner, group, permissions, or access control list for a - directory. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param upn: Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the identity values returned in - the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. - :type upn: bool - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "getAccessControl" - - # Construct URL - url = self.get_access_control.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')), - 'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')), - 'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')), - 'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - } - return cls(response, None, response_headers) - get_access_control.metadata = {'url': '/{filesystem}/{path}'} diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/operations/_page_blob_operations.py b/azure/multiapi/storagev2/blob/v2019_02_02/_generated/operations/_page_blob_operations.py deleted file mode 100644 index 53724a0..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/operations/_page_blob_operations.py +++ /dev/null @@ -1,1302 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class PageBlobOperations(object): - """PageBlobOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "PageBlob". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_blob_type = "PageBlob" - - def create(self, content_length, blob_content_length, timeout=None, tier=None, metadata=None, blob_sequence_number=0, request_id=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, cls=None, **kwargs): - """The Create operation creates a new page blob. - - :param content_length: The length of the request. - :type content_length: long - :param blob_content_length: This header specifies the maximum size for - the page blob, up to 1 TB. The page blob size must be aligned to a - 512-byte boundary. - :type blob_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param tier: Optional. Indicates the tier to be set on the page blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80' - :type tier: str or - ~azure.storage.blob.models.PremiumPageBlobAccessTier - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param blob_sequence_number: Set for page blobs only. The sequence - number is a user-controlled value that you can use to track requests. - The value of the sequence number must be between 0 and 2^63 - 1. - :type blob_sequence_number: long - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') - if blob_sequence_number is not None: - header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{containerName}/{blob}'} - - def upload_pages(self, body, content_length, transactional_content_md5=None, transactional_content_crc64=None, timeout=None, range=None, request_id=None, lease_access_conditions=None, cpk_info=None, sequence_number_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """The Upload Pages operation writes a range of pages to a page blob. - - :param body: Initial data - :type body: Generator - :param content_length: The length of the request. - :type content_length: long - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param sequence_number_access_conditions: Additional parameters for - the operation - :type sequence_number_access_conditions: - ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_sequence_number_less_than_or_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - if_sequence_number_less_than = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - if_sequence_number_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "page" - page_write = "update" - - # Construct URL - url = self.upload_pages.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long') - if if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long') - if if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload_pages.metadata = {'url': '/{containerName}/{blob}'} - - def clear_pages(self, content_length, timeout=None, range=None, request_id=None, lease_access_conditions=None, cpk_info=None, sequence_number_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """The Clear Pages operation clears a set of pages from a page blob. - - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param sequence_number_access_conditions: Additional parameters for - the operation - :type sequence_number_access_conditions: - ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_sequence_number_less_than_or_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - if_sequence_number_less_than = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - if_sequence_number_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "page" - page_write = "clear" - - # Construct URL - url = self.clear_pages.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long') - if if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long') - if if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - clear_pages.metadata = {'url': '/{containerName}/{blob}'} - - def upload_pages_from_url(self, source_url, source_range, content_length, range, source_content_md5=None, source_contentcrc64=None, timeout=None, request_id=None, cpk_info=None, lease_access_conditions=None, sequence_number_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs): - """The Upload Pages operation writes a range of pages to a page blob where - the contents are read from a URL. - - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param source_range: Bytes of source data in the specified range. The - length of this range should match the ContentLength header and - x-ms-range/Range destination range header. - :type source_range: str - :param content_length: The length of the request. - :type content_length: long - :param range: The range of bytes to which the source range would be - written. The range should be 512 aligned and range-end is required. - :type range: str - :param source_content_md5: Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range - of bytes that must be read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param sequence_number_access_conditions: Additional parameters for - the operation - :type sequence_number_access_conditions: - ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_sequence_number_less_than_or_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - if_sequence_number_less_than = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - if_sequence_number_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - comp = "page" - page_write = "update" - - # Construct URL - url = self.upload_pages_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long') - if if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long') - if if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload_pages_from_url.metadata = {'url': '/{containerName}/{blob}'} - - def get_page_ranges(self, snapshot=None, timeout=None, range=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """The Get Page Ranges operation returns the list of valid page ranges for - a page blob or snapshot of a page blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: PageList or the result of cls(response) - :rtype: ~azure.storage.blob.models.PageList - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "pagelist" - - # Construct URL - url = self.get_page_ranges.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PageList', response) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_page_ranges.metadata = {'url': '/{containerName}/{blob}'} - - def get_page_ranges_diff(self, snapshot=None, timeout=None, prevsnapshot=None, range=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """The Get Page Ranges Diff operation returns the list of valid page - ranges for a page blob that were changed between target blob and - previous snapshot. - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param prevsnapshot: Optional in version 2015-07-08 and newer. The - prevsnapshot parameter is a DateTime value that specifies that the - response will contain only pages that were changed between target blob - and previous snapshot. Changed pages include both updated and cleared - pages. The target blob may be a snapshot, as long as the snapshot - specified by prevsnapshot is the older of the two. Note that - incremental snapshots are currently supported only for blobs created - on or after January 1, 2016. - :type prevsnapshot: str - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: PageList or the result of cls(response) - :rtype: ~azure.storage.blob.models.PageList - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "pagelist" - - # Construct URL - url = self.get_page_ranges_diff.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if prevsnapshot is not None: - query_parameters['prevsnapshot'] = self._serialize.query("prevsnapshot", prevsnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PageList', response) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_page_ranges_diff.metadata = {'url': '/{containerName}/{blob}'} - - def resize(self, blob_content_length, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, cls=None, **kwargs): - """Resize the Blob. - - :param blob_content_length: This header specifies the maximum size for - the page blob, up to 1 TB. The page blob size must be aligned to a - 512-byte boundary. - :type blob_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "properties" - - # Construct URL - url = self.resize.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - resize.metadata = {'url': '/{containerName}/{blob}'} - - def update_sequence_number(self, sequence_number_action, timeout=None, blob_sequence_number=0, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """Update the sequence number of the blob. - - :param sequence_number_action: Required if the - x-ms-blob-sequence-number header is set for the request. This property - applies to page blobs only. This property indicates how the service - should modify the blob's sequence number. Possible values include: - 'max', 'update', 'increment' - :type sequence_number_action: str or - ~azure.storage.blob.models.SequenceNumberActionType - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param blob_sequence_number: Set for page blobs only. The sequence - number is a user-controlled value that you can use to track requests. - The value of the sequence number must be between 0 and 2^63 - 1. - :type blob_sequence_number: long - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "properties" - - # Construct URL - url = self.update_sequence_number.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-sequence-number-action'] = self._serialize.header("sequence_number_action", sequence_number_action, 'SequenceNumberActionType') - if blob_sequence_number is not None: - header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - update_sequence_number.metadata = {'url': '/{containerName}/{blob}'} - - def copy_incremental(self, copy_source, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """The Copy Incremental operation copies a snapshot of the source page - blob to a destination page blob. The snapshot is copied such that only - the differential changes between the previously copied snapshot are - transferred to the destination. The copied snapshots are complete - copies of the original snapshot and can be read or copied from as - usual. This API is supported since REST version 2016-05-31. - - :param copy_source: Specifies the name of the source page blob - snapshot. This value is a URL of up to 2 KB in length that specifies a - page blob snapshot. The value should be URL-encoded as it would appear - in a request URI. The source blob must either be public or must be - authenticated via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - comp = "incrementalcopy" - - # Construct URL - url = self.copy_incremental.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - copy_incremental.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/version.py b/azure/multiapi/storagev2/blob/v2019_02_02/_generated/version.py deleted file mode 100644 index 9c89a27..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/version.py +++ /dev/null @@ -1,13 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -VERSION = "2019-02-02" - diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_lease.py b/azure/multiapi/storagev2/blob/v2019_02_02/_lease.py deleted file mode 100644 index 92dd0a3..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_lease.py +++ /dev/null @@ -1,311 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import uuid - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, - TypeVar, TYPE_CHECKING -) - -from azure.core.tracing.decorator import distributed_trace - -from ._shared.response_handlers import return_response_headers, process_storage_error -from ._generated.models import StorageErrorException, LeaseAccessConditions -from ._serialize import get_modify_conditions - -if TYPE_CHECKING: - from datetime import datetime - from ._generated.operations import BlobOperations, ContainerOperations - BlobClient = TypeVar("BlobClient") - ContainerClient = TypeVar("ContainerClient") - - -def get_access_conditions(lease): - # type: (Optional[Union[BlobLeaseClient, str]]) -> Union[LeaseAccessConditions, None] - try: - lease_id = lease.id # type: ignore - except AttributeError: - lease_id = lease # type: ignore - return LeaseAccessConditions(lease_id=lease_id) if lease_id else None - - -class BlobLeaseClient(object): - """Creates a new BlobLeaseClient. - - This client provides lease operations on a BlobClient or ContainerClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the blob or container to lease. - :type client: ~azure.storage.blob.BlobClient or - ~azure.storage.blob.ContainerClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - def __init__( - self, client, lease_id=None - ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs - # type: (Union[BlobClient, ContainerClient], Optional[str]) -> None - self.id = lease_id or str(uuid.uuid4()) - self.last_modified = None - self.etag = None - if hasattr(client, 'blob_name'): - self._client = client._client.blob # type: ignore # pylint: disable=protected-access - elif hasattr(client, 'container_name'): - self._client = client._client.container # type: ignore # pylint: disable=protected-access - else: - raise TypeError("Lease must use either BlobClient or ContainerClient.") - - def __enter__(self): - return self - - def __exit__(self, *args): - self.release() - - @distributed_trace - def acquire(self, lease_duration=-1, **kwargs): - # type: (int, **Any) -> None - """Requests a new lease. - - If the container does not have an active lease, the Blob service creates a - lease on the container and returns a new lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.acquire_lease( - timeout=kwargs.pop('timeout', None), - duration=lease_duration, - proposed_lease_id=self.id, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - self.etag = kwargs.get('etag') # type: str - - @distributed_trace - def renew(self, **kwargs): - # type: (Any) -> None - """Renews the lease. - - The lease can be renewed if the lease ID specified in the - lease client matches that associated with the container or blob. Note that - the lease may be renewed even if it has expired as long as the container - or blob has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.renew_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def release(self, **kwargs): - # type: (Any) -> None - """Release the lease. - - The lease may be released if the client lease id specified matches - that associated with the container or blob. Releasing the lease allows another client - to immediately acquire the lease for the container or blob as soon as the release is complete. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.release_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """Change the lease ID of an active lease. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.change_lease( - lease_id=self.id, - proposed_lease_id=proposed_lease_id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def break_lease(self, lease_break_period=None, **kwargs): - # type: (Optional[int], Any) -> int - """Break the lease, if the container or blob has an active lease. - - Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. When a lease - is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the container or blob. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :param int lease_break_period: - This is the proposed duration of seconds that the lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the lease. If longer, the time remaining on the lease is used. - A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.break_lease( - timeout=kwargs.pop('timeout', None), - break_period=lease_break_period, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_models.py b/azure/multiapi/storagev2/blob/v2019_02_02/_models.py deleted file mode 100644 index 5b0a61f..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_models.py +++ /dev/null @@ -1,1066 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines - -from enum import Enum -from typing import List, Any, TYPE_CHECKING # pylint: disable=unused-import - -from azure.core.paging import PageIterator, ItemPaged - -from ._shared import decode_base64_to_text -from ._shared.response_handlers import return_context_and_deserialized, process_storage_error -from ._shared.models import DictMixin, get_enum_value -from ._generated.models import Logging as GeneratedLogging -from ._generated.models import Metrics as GeneratedMetrics -from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy -from ._generated.models import StaticWebsite as GeneratedStaticWebsite -from ._generated.models import CorsRule as GeneratedCorsRule -from ._generated.models import AccessPolicy as GenAccessPolicy -from ._generated.models import StorageErrorException -from ._generated.models import BlobPrefix as GenBlobPrefix -from ._generated.models import BlobItem - - -class BlobType(str, Enum): - - BlockBlob = "BlockBlob" - PageBlob = "PageBlob" - AppendBlob = "AppendBlob" - - -class BlockState(str, Enum): - """Block blob block types.""" - - Committed = 'Committed' #: Committed blocks. - Latest = 'Latest' #: Latest blocks. - Uncommitted = 'Uncommitted' #: Uncommitted blocks. - - -class StandardBlobTier(str, Enum): - """ - Specifies the blob tier to set the blob to. This is only applicable for - block blobs on standard storage accounts. - """ - - Archive = 'Archive' #: Archive - Cool = 'Cool' #: Cool - Hot = 'Hot' #: Hot - - -class PremiumPageBlobTier(str, Enum): - """ - Specifies the page blob tier to set the blob to. This is only applicable to page - blobs on premium storage accounts. Please take a look at: - https://docs.microsoft.com/en-us/azure/storage/storage-premium-storage#scalability-and-performance-targets - for detailed information on the corresponding IOPS and throughput per PageBlobTier. - """ - - P4 = 'P4' #: P4 Tier - P6 = 'P6' #: P6 Tier - P10 = 'P10' #: P10 Tier - P20 = 'P20' #: P20 Tier - P30 = 'P30' #: P30 Tier - P40 = 'P40' #: P40 Tier - P50 = 'P50' #: P50 Tier - P60 = 'P60' #: P60 Tier - - -class SequenceNumberAction(str, Enum): - """Sequence number actions.""" - - Increment = 'increment' - """ - Increments the value of the sequence number by 1. If specifying this option, - do not include the x-ms-blob-sequence-number header. - """ - - Max = 'max' - """ - Sets the sequence number to be the higher of the value included with the - request and the value currently stored for the blob. - """ - - Update = 'update' - """Sets the sequence number to the value included with the request.""" - - -class PublicAccess(str, Enum): - """ - Specifies whether data in the container may be accessed publicly and the level of access. - """ - - OFF = 'off' - """ - Specifies that there is no public read access for both the container and blobs within the container. - Clients cannot enumerate the containers within the storage account as well as the blobs within the container. - """ - - Blob = 'blob' - """ - Specifies public read access for blobs. Blob data within this container can be read - via anonymous request, but container data is not available. Clients cannot enumerate - blobs within the container via anonymous request. - """ - - Container = 'container' - """ - Specifies full public read access for container and blob data. Clients can enumerate - blobs within the container via anonymous request, but cannot enumerate containers - within the storage account. - """ - - -class BlobAnalyticsLogging(GeneratedLogging): - """Azure Analytics Logging settings. - - :keyword str version: - The version of Storage Analytics to configure. The default value is 1.0. - :keyword bool delete: - Indicates whether all delete requests should be logged. The default value is `False`. - :keyword bool read: - Indicates whether all read requests should be logged. The default value is `False`. - :keyword bool write: - Indicates whether all write requests should be logged. The default value is `False`. - :keyword ~azure.storage.blob.RetentionPolicy retention_policy: - Determines how long the associated data should persist. If not specified the retention - policy will be disabled by default. - """ - - def __init__(self, **kwargs): - self.version = kwargs.get('version', u'1.0') - self.delete = kwargs.get('delete', False) - self.read = kwargs.get('read', False) - self.write = kwargs.get('write', False) - self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - version=generated.version, - delete=generated.delete, - read=generated.read, - write=generated.write, - retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access - ) - - -class Metrics(GeneratedMetrics): - """A summary of request statistics grouped by API in hour or minute aggregates - for blobs. - - :keyword str version: - The version of Storage Analytics to configure. The default value is 1.0. - :keyword bool enabled: - Indicates whether metrics are enabled for the Blob service. - The default value is `False`. - :keyword bool include_apis: - Indicates whether metrics should generate summary statistics for called API operations. - :keyword ~azure.storage.blob.RetentionPolicy retention_policy: - Determines how long the associated data should persist. If not specified the retention - policy will be disabled by default. - """ - - def __init__(self, **kwargs): - self.version = kwargs.get('version', u'1.0') - self.enabled = kwargs.get('enabled', False) - self.include_apis = kwargs.get('include_apis') - self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - version=generated.version, - enabled=generated.enabled, - include_apis=generated.include_apis, - retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access - ) - - -class RetentionPolicy(GeneratedRetentionPolicy): - """The retention policy which determines how long the associated data should - persist. - - :param bool enabled: - Indicates whether a retention policy is enabled for the storage service. - The default value is False. - :param int days: - Indicates the number of days that metrics or logging or - soft-deleted data should be retained. All data older than this value will - be deleted. If enabled=True, the number of days must be specified. - """ - - def __init__(self, enabled=False, days=None): - self.enabled = enabled - self.days = days - if self.enabled and (self.days is None): - raise ValueError("If policy is enabled, 'days' must be specified.") - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - enabled=generated.enabled, - days=generated.days, - ) - - -class StaticWebsite(GeneratedStaticWebsite): - """The properties that enable an account to host a static website. - - :keyword bool enabled: - Indicates whether this account is hosting a static website. - The default value is `False`. - :keyword str index_document: - The default name of the index page under each directory. - :keyword str error_document404_path: - The absolute path of the custom 404 page. - """ - - def __init__(self, **kwargs): - self.enabled = kwargs.get('enabled', False) - if self.enabled: - self.index_document = kwargs.get('index_document') - self.error_document404_path = kwargs.get('error_document404_path') - else: - self.index_document = None - self.error_document404_path = None - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - enabled=generated.enabled, - index_document=generated.index_document, - error_document404_path=generated.error_document404_path, - ) - -class CorsRule(GeneratedCorsRule): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. - - :param list(str) allowed_origins: - A list of origin domains that will be allowed via CORS, or "*" to allow - all domains. The list of must contain at least one entry. Limited to 64 - origin domains. Each allowed origin can have up to 256 characters. - :param list(str) allowed_methods: - A list of HTTP methods that are allowed to be executed by the origin. - The list of must contain at least one entry. For Azure Storage, - permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. - :keyword list(str) allowed_headers: - Defaults to an empty list. A list of headers allowed to be part of - the cross-origin request. Limited to 64 defined headers and 2 prefixed - headers. Each header can be up to 256 characters. - :keyword list(str) exposed_headers: - Defaults to an empty list. A list of response headers to expose to CORS - clients. Limited to 64 defined headers and two prefixed headers. Each - header can be up to 256 characters. - :keyword int max_age_in_seconds: - The number of seconds that the client/browser should cache a - preflight response. - """ - - def __init__(self, allowed_origins, allowed_methods, **kwargs): - self.allowed_origins = ','.join(allowed_origins) - self.allowed_methods = ','.join(allowed_methods) - self.allowed_headers = ','.join(kwargs.get('allowed_headers', [])) - self.exposed_headers = ','.join(kwargs.get('exposed_headers', [])) - self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0) - - @classmethod - def _from_generated(cls, generated): - return cls( - [generated.allowed_origins], - [generated.allowed_methods], - allowed_headers=[generated.allowed_headers], - exposed_headers=[generated.exposed_headers], - max_age_in_seconds=generated.max_age_in_seconds, - ) - - -class ContainerProperties(DictMixin): - """Blob container's properties class. - - Returned ``ContainerProperties`` instances expose these values through a - dictionary interface, for example: ``container_props["last_modified"]``. - Additionally, the container name is available as ``container_props["name"]``. - - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the container was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar ~azure.storage.blob.LeaseProperties lease: - Stores all the lease information for the container. - :ivar str public_access: Specifies whether data in the container may be accessed - publicly and the level of access. - :ivar bool has_immutability_policy: - Represents whether the container has an immutability policy. - :ivar bool has_legal_hold: - Represents whether the container has a legal hold. - :ivar dict metadata: A dict with name-value pairs to associate with the - container as metadata. - """ - - def __init__(self, **kwargs): - self.name = None - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.lease = LeaseProperties(**kwargs) - self.public_access = kwargs.get('x-ms-blob-public-access') - self.has_immutability_policy = kwargs.get('x-ms-has-immutability-policy') - self.has_legal_hold = kwargs.get('x-ms-has-legal-hold') - self.metadata = kwargs.get('metadata') - - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.last_modified = generated.properties.last_modified - props.etag = generated.properties.etag - props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access - props.public_access = generated.properties.public_access - props.has_immutability_policy = generated.properties.has_immutability_policy - props.has_legal_hold = generated.properties.has_legal_hold - props.metadata = generated.metadata - return props - - -class ContainerPropertiesPaged(PageIterator): - """An Iterable of Container properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A container name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.ContainerProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only containers whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of container names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(ContainerPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [self._build_item(item) for item in self._response.container_items] - - return self._response.next_marker or None, self.current_page - - @staticmethod - def _build_item(item): - return ContainerProperties._from_generated(item) # pylint: disable=protected-access - - -class BlobProperties(DictMixin): - """ - Blob Properties. - - :ivar str name: - The name of the blob. - :ivar str container: - The container in which the blob resides. - :ivar str snapshot: - Datetime value that uniquely identifies the blob snapshot. - :ivar ~azure.blob.storage.BlobType blob_type: - String indicating this blob's type. - :ivar dict metadata: - Name-value pairs associated with the blob as metadata. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the blob was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar int size: - The size of the content returned. If the entire blob was requested, - the length of blob in bytes. If a subset of the blob was requested, the - length of the returned subset. - :ivar str content_range: - Indicates the range of bytes returned in the event that the client - requested a subset of the blob. - :ivar int append_blob_committed_block_count: - (For Append Blobs) Number of committed blocks in the blob. - :ivar int page_blob_sequence_number: - (For Page Blobs) Sequence number for page blob used for coordinating - concurrent writes. - :ivar bool server_encrypted: - Set to true if the blob is encrypted on the server. - :ivar ~azure.storage.blob.CopyProperties copy: - Stores all the copy properties for the blob. - :ivar ~azure.storage.blob.ContentSettings content_settings: - Stores all the content settings for the blob. - :ivar ~azure.storage.blob.LeaseProperties lease: - Stores all the lease information for the blob. - :ivar ~azure.storage.blob.StandardBlobTier blob_tier: - Indicates the access tier of the blob. The hot tier is optimized - for storing data that is accessed frequently. The cool storage tier - is optimized for storing data that is infrequently accessed and stored - for at least a month. The archive tier is optimized for storing - data that is rarely accessed and stored for at least six months - with flexible latency requirements. - :ivar ~datetime.datetime blob_tier_change_time: - Indicates when the access tier was last changed. - :ivar bool blob_tier_inferred: - Indicates whether the access tier was inferred by the service. - If false, it indicates that the tier was set explicitly. - :ivar bool deleted: - Whether this blob was deleted. - :ivar ~datetime.datetime deleted_time: - A datetime object representing the time at which the blob was deleted. - :ivar int remaining_retention_days: - The number of days that the blob will be retained before being permanently deleted by the service. - :ivar ~datetime.datetime creation_time: - Indicates when the blob was created, in UTC. - :ivar str archive_status: - Archive status of blob. - """ - - def __init__(self, **kwargs): - self.name = kwargs.get('name') - self.container = None - self.snapshot = kwargs.get('x-ms-snapshot') - self.blob_type = BlobType(kwargs['x-ms-blob-type']) if kwargs.get('x-ms-blob-type') else None - self.metadata = kwargs.get('metadata') - self.encrypted_metadata = kwargs.get('encrypted_metadata') - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.size = kwargs.get('Content-Length') - self.content_range = kwargs.get('Content-Range') - self.append_blob_committed_block_count = kwargs.get('x-ms-blob-committed-block-count') - self.page_blob_sequence_number = kwargs.get('x-ms-blob-sequence-number') - self.server_encrypted = kwargs.get('x-ms-server-encrypted') - self.copy = CopyProperties(**kwargs) - self.content_settings = ContentSettings(**kwargs) - self.lease = LeaseProperties(**kwargs) - self.blob_tier = kwargs.get('x-ms-access-tier') - self.blob_tier_change_time = kwargs.get('x-ms-access-tier-change-time') - self.blob_tier_inferred = kwargs.get('x-ms-access-tier-inferred') - self.deleted = False - self.deleted_time = None - self.remaining_retention_days = None - self.creation_time = kwargs.get('x-ms-creation-time') - self.archive_status = kwargs.get('x-ms-archive-status') - self.encryption_key_sha256 = kwargs.get('x-ms-encryption-key-sha256') - self.request_server_encrypted = kwargs.get('x-ms-server-encrypted') - - @classmethod - def _from_generated(cls, generated): - blob = BlobProperties() - blob.name = generated.name - blob_type = get_enum_value(generated.properties.blob_type) - blob.blob_type = BlobType(blob_type) if blob_type else None - blob.etag = generated.properties.etag - blob.deleted = generated.deleted - blob.snapshot = generated.snapshot - blob.metadata = generated.metadata.additional_properties if generated.metadata else {} - blob.encrypted_metadata = generated.metadata.encrypted if generated.metadata else None - blob.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access - blob.copy = CopyProperties._from_generated(generated) # pylint: disable=protected-access - blob.last_modified = generated.properties.last_modified - blob.creation_time = generated.properties.creation_time - blob.content_settings = ContentSettings._from_generated(generated) # pylint: disable=protected-access - blob.size = generated.properties.content_length - blob.page_blob_sequence_number = generated.properties.blob_sequence_number - blob.server_encrypted = generated.properties.server_encrypted - blob.deleted_time = generated.properties.deleted_time - blob.remaining_retention_days = generated.properties.remaining_retention_days - blob.blob_tier = generated.properties.access_tier - blob.blob_tier_inferred = generated.properties.access_tier_inferred - blob.archive_status = generated.properties.archive_status - blob.blob_tier_change_time = generated.properties.access_tier_change_time - return blob - - -class BlobPropertiesPaged(PageIterator): - """An Iterable of Blob properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - :param str container: The name of the container. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str continuation_token: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__( - self, command, - container=None, - prefix=None, - results_per_page=None, - continuation_token=None, - delimiter=None, - location_mode=None): - super(BlobPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.container = container - self.delimiter = delimiter - self.current_page = None - self.location_mode = location_mode - - def _get_next_cb(self, continuation_token): - try: - return self._command( - prefix=self.prefix, - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.container = self._response.container_name - self.current_page = [self._build_item(item) for item in self._response.segment.blob_items] - - return self._response.next_marker or None, self.current_page - - def _build_item(self, item): - if isinstance(item, BlobProperties): - return item - if isinstance(item, BlobItem): - blob = BlobProperties._from_generated(item) # pylint: disable=protected-access - blob.container = self.container - return blob - return item - - -class BlobPrefix(ItemPaged, DictMixin): - """An Iterable of Blob properties. - - Returned from walk_blobs when a delimiter is used. - Can be thought of as a virtual blob directory. - - :ivar str name: The prefix, or "directory name" of the blob. - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str next_marker: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str marker: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__(self, *args, **kwargs): - super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs) - self.name = kwargs.get('prefix') - self.prefix = kwargs.get('prefix') - self.results_per_page = kwargs.get('results_per_page') - self.container = kwargs.get('container') - self.delimiter = kwargs.get('delimiter') - self.location_mode = kwargs.get('location_mode') - - -class BlobPrefixPaged(BlobPropertiesPaged): - def __init__(self, *args, **kwargs): - super(BlobPrefixPaged, self).__init__(*args, **kwargs) - self.name = self.prefix - - def _extract_data_cb(self, get_next_return): - continuation_token, _ = super(BlobPrefixPaged, self)._extract_data_cb(get_next_return) - self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items - self.current_page = [self._build_item(item) for item in self.current_page] - self.delimiter = self._response.delimiter - - return continuation_token, self.current_page - - def _build_item(self, item): - item = super(BlobPrefixPaged, self)._build_item(item) - if isinstance(item, GenBlobPrefix): - return BlobPrefix( - self._command, - container=self.container, - prefix=item.name, - results_per_page=self.results_per_page, - location_mode=self.location_mode) - return item - - -class LeaseProperties(DictMixin): - """Blob Lease Properties. - - :ivar str status: - The lease status of the blob. Possible values: locked|unlocked - :ivar str state: - Lease state of the blob. Possible values: available|leased|expired|breaking|broken - :ivar str duration: - When a blob is leased, specifies whether the lease is of infinite or fixed duration. - """ - - def __init__(self, **kwargs): - self.status = get_enum_value(kwargs.get('x-ms-lease-status')) - self.state = get_enum_value(kwargs.get('x-ms-lease-state')) - self.duration = get_enum_value(kwargs.get('x-ms-lease-duration')) - - @classmethod - def _from_generated(cls, generated): - lease = cls() - lease.status = get_enum_value(generated.properties.lease_status) - lease.state = get_enum_value(generated.properties.lease_state) - lease.duration = get_enum_value(generated.properties.lease_duration) - return lease - - -class ContentSettings(DictMixin): - """The content settings of a blob. - - :param str content_type: - The content type specified for the blob. If no content type was - specified, the default content type is application/octet-stream. - :param str content_encoding: - If the content_encoding has previously been set - for the blob, that value is stored. - :param str content_language: - If the content_language has previously been set - for the blob, that value is stored. - :param str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the blob, that value is stored. - :param str cache_control: - If the cache_control has previously been set for - the blob, that value is stored. - :param str content_md5: - If the content_md5 has been set for the blob, this response - header is stored so that the client can check for message content - integrity. - """ - - def __init__( - self, content_type=None, content_encoding=None, - content_language=None, content_disposition=None, - cache_control=None, content_md5=None, **kwargs): - - self.content_type = content_type or kwargs.get('Content-Type') - self.content_encoding = content_encoding or kwargs.get('Content-Encoding') - self.content_language = content_language or kwargs.get('Content-Language') - self.content_md5 = content_md5 or kwargs.get('Content-MD5') - self.content_disposition = content_disposition or kwargs.get('Content-Disposition') - self.cache_control = cache_control or kwargs.get('Cache-Control') - - @classmethod - def _from_generated(cls, generated): - settings = cls() - settings.content_type = generated.properties.content_type or None - settings.content_encoding = generated.properties.content_encoding or None - settings.content_language = generated.properties.content_language or None - settings.content_md5 = generated.properties.content_md5 or None - settings.content_disposition = generated.properties.content_disposition or None - settings.cache_control = generated.properties.cache_control or None - return settings - - -class CopyProperties(DictMixin): - """Blob Copy Properties. - - These properties will be `None` if this blob has never been the destination - in a Copy Blob operation, or if this blob has been modified after a concluded - Copy Blob operation, for example, using Set Blob Properties, Upload Blob, or Commit Block List. - - :ivar str id: - String identifier for the last attempted Copy Blob operation where this blob - was the destination blob. - :ivar str source: - URL up to 2 KB in length that specifies the source blob used in the last attempted - Copy Blob operation where this blob was the destination blob. - :ivar str status: - State of the copy operation identified by Copy ID, with these values: - success: - Copy completed successfully. - pending: - Copy is in progress. Check copy_status_description if intermittent, - non-fatal errors impede copy progress but don't cause failure. - aborted: - Copy was ended by Abort Copy Blob. - failed: - Copy failed. See copy_status_description for failure details. - :ivar str progress: - Contains the number of bytes copied and the total bytes in the source in the last - attempted Copy Blob operation where this blob was the destination blob. Can show - between 0 and Content-Length bytes copied. - :ivar ~datetime.datetime completion_time: - Conclusion time of the last attempted Copy Blob operation where this blob was the - destination blob. This value can specify the time of a completed, aborted, or - failed copy attempt. - :ivar str status_description: - Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal - or non-fatal copy operation failure. - :ivar bool incremental_copy: - Copies the snapshot of the source page blob to a destination page blob. - The snapshot is copied such that only the differential changes between - the previously copied snapshot are transferred to the destination - :ivar ~datetime.datetime destination_snapshot: - Included if the blob is incremental copy blob or incremental copy snapshot, - if x-ms-copy-status is success. Snapshot time of the last successful - incremental copy snapshot for this blob. - """ - - def __init__(self, **kwargs): - self.id = kwargs.get('x-ms-copy-id') - self.source = kwargs.get('x-ms-copy-source') - self.status = get_enum_value(kwargs.get('x-ms-copy-status')) - self.progress = kwargs.get('x-ms-copy-progress') - self.completion_time = kwargs.get('x-ms-copy-completion_time') - self.status_description = kwargs.get('x-ms-copy-status-description') - self.incremental_copy = kwargs.get('x-ms-incremental-copy') - self.destination_snapshot = kwargs.get('x-ms-copy-destination-snapshot') - - @classmethod - def _from_generated(cls, generated): - copy = cls() - copy.id = generated.properties.copy_id or None - copy.status = get_enum_value(generated.properties.copy_status) or None - copy.source = generated.properties.copy_source or None - copy.progress = generated.properties.copy_progress or None - copy.completion_time = generated.properties.copy_completion_time or None - copy.status_description = generated.properties.copy_status_description or None - copy.incremental_copy = generated.properties.incremental_copy or None - copy.destination_snapshot = generated.properties.destination_snapshot or None - return copy - - -class BlobBlock(DictMixin): - """BlockBlob Block class. - - :param str block_id: - Block id. - :param str state: - Block state. Possible values: committed|uncommitted - :ivar int size: - Block size in bytes. - """ - - def __init__(self, block_id, state=BlockState.Latest): - self.id = block_id - self.state = state - self.size = None - - @classmethod - def _from_generated(cls, generated): - block = cls(decode_base64_to_text(generated.name)) - block.size = generated.size - return block - - -class PageRange(DictMixin): - """Page Range for page blob. - - :param int start: - Start of page range in bytes. - :param int end: - End of page range in bytes. - """ - - def __init__(self, start=None, end=None): - self.start = start - self.end = end - - -class AccessPolicy(GenAccessPolicy): - """Access Policy class used by the set and get access policy methods in each service. - - A stored access policy can specify the start time, expiry time, and - permissions for the Shared Access Signatures with which it's associated. - Depending on how you want to control access to your resource, you can - specify all of these parameters within the stored access policy, and omit - them from the URL for the Shared Access Signature. Doing so permits you to - modify the associated signature's behavior at any time, as well as to revoke - it. Or you can specify one or more of the access policy parameters within - the stored access policy, and the others on the URL. Finally, you can - specify all of the parameters on the URL. In this case, you can use the - stored access policy to revoke the signature, but not to modify its behavior. - - Together the Shared Access Signature and the stored access policy must - include all fields required to authenticate the signature. If any required - fields are missing, the request will fail. Likewise, if a field is specified - both in the Shared Access Signature URL and in the stored access policy, the - request will fail with status code 400 (Bad Request). - - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.blob.ContainerSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - """ - def __init__(self, permission=None, expiry=None, start=None): - self.start = start - self.expiry = expiry - self.permission = permission - - -class ContainerSasPermissions(object): - """ContainerSasPermissions class to be used with the - :func:`~azure.storage.blob.generate_container_sas` function and - for the AccessPolicies used with - :func:`~azure.storage.blob.ContainerClient.set_container_access_policy`. - - :param bool read: - Read the content, properties, metadata or block list of any blob in the - container. Use any blob in the container as the source of a copy operation. - :param bool write: - For any blob in the container, create or write content, properties, - metadata, or block list. Snapshot or lease the blob. Resize the blob - (page blob only). Use the blob as the destination of a copy operation - within the same account. Note: You cannot grant permissions to read or - write container properties or metadata, nor to lease a container, with - a container SAS. Use an account SAS instead. - :param bool delete: - Delete any blob in the container. Note: You cannot grant permissions to - delete a container with a container SAS. Use an account SAS instead. - :param bool list: - List blobs in the container. - """ - def __init__(self, read=False, write=False, delete=False, list=False): # pylint: disable=redefined-builtin - self.read = read - self.write = write - self.delete = delete - self.list = list - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('l' if self.list else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a ContainerSasPermissions from a string. - - To specify read, write, delete, or list permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, write, delete, - and list permissions. - :return: A ContainerSasPermissions object - :rtype: ~azure.storage.blob.ContainerSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_list = 'l' in permission - parsed = cls(p_read, p_write, p_delete, p_list) - parsed._str = permission # pylint: disable = protected-access - return parsed - - -class BlobSasPermissions(object): - """BlobSasPermissions class to be used with the - :func:`~azure.storage.blob.generate_blob_sas` function. - - :param bool read: - Read the content, properties, metadata and block list. Use the blob as - the source of a copy operation. - :param bool add: - Add a block to an append blob. - :param bool create: - Write a new blob, snapshot a blob, or copy a blob to a new blob. - :param bool write: - Create or write content, properties, metadata, or block list. Snapshot - or lease the blob. Resize the blob (page blob only). Use the blob as the - destination of a copy operation within the same account. - :param bool delete: - Delete the blob. - """ - def __init__(self, read=False, add=False, create=False, write=False, - delete=False): - self.read = read - self.add = add - self.create = create - self.write = write - self.delete = delete - self._str = (('r' if self.read else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a BlobSasPermissions from a string. - - To specify read, add, create, write, or delete permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, add, create, - write, or delete permissions. - :return: A BlobSasPermissions object - :rtype: ~azure.storage.blob.BlobSasPermissions - """ - p_read = 'r' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - - parsed = cls(p_read, p_add, p_create, p_write, p_delete) - parsed._str = permission # pylint: disable = protected-access - return parsed - - -class CustomerProvidedEncryptionKey(object): - """ - All data in Azure Storage is encrypted at-rest using an account-level encryption key. - In versions 2018-06-17 and newer, you can manage the key used to encrypt blob contents - and application metadata per-blob by providing an AES-256 encryption key in requests to the storage service. - - When you use a customer-provided key, Azure Storage does not manage or persist your key. - When writing data to a blob, the provided key is used to encrypt your data before writing it to disk. - A SHA-256 hash of the encryption key is written alongside the blob contents, - and is used to verify that all subsequent operations against the blob use the same encryption key. - This hash cannot be used to retrieve the encryption key or decrypt the contents of the blob. - When reading a blob, the provided key is used to decrypt your data after reading it from disk. - In both cases, the provided encryption key is securely discarded - as soon as the encryption or decryption process completes. - - :param str key_value: - Base64-encoded AES-256 encryption key value. - :param str key_hash: - Base64-encoded SHA256 of the encryption key. - :ivar str algorithm: - Specifies the algorithm to use when encrypting data using the given key. Must be AES256. - """ - def __init__(self, key_value, key_hash): - self.key_value = key_value - self.key_hash = key_hash - self.algorithm = 'AES256' - - -def service_stats_deserialize(generated): - """Deserialize a ServiceStats objects into a dict. - """ - return { - 'geo_replication': { - 'status': generated.geo_replication.status, - 'last_sync_time': generated.geo_replication.last_sync_time, - } - } - -def service_properties_deserialize(generated): - """Deserialize a ServiceProperties objects into a dict. - """ - return { - 'analytics_logging': BlobAnalyticsLogging._from_generated(generated.logging), # pylint: disable=protected-access - 'hour_metrics': Metrics._from_generated(generated.hour_metrics), # pylint: disable=protected-access - 'minute_metrics': Metrics._from_generated(generated.minute_metrics), # pylint: disable=protected-access - 'cors': [CorsRule._from_generated(cors) for cors in generated.cors], # pylint: disable=protected-access - 'target_version': generated.default_service_version, # pylint: disable=protected-access - 'delete_retention_policy': RetentionPolicy._from_generated(generated.delete_retention_policy), # pylint: disable=protected-access - 'static_website': StaticWebsite._from_generated(generated.static_website), # pylint: disable=protected-access - } diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_serialize.py b/azure/multiapi/storagev2/blob/v2019_02_02/_serialize.py deleted file mode 100644 index 2f21d8b..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_serialize.py +++ /dev/null @@ -1,57 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from azure.core import MatchConditions - -from ._generated.models import ModifiedAccessConditions, SourceModifiedAccessConditions - - -def _get_match_headers(kwargs, match_param, etag_param): - # type: (str) -> Tuple(Dict[str, Any], Optional[str], Optional[str]) - if_match = None - if_none_match = None - match_condition = kwargs.pop(match_param, None) - if match_condition == MatchConditions.IfNotModified: - if_match = kwargs.pop(etag_param, None) - if not if_match: - raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) - elif match_condition == MatchConditions.IfPresent: - if_match = '*' - elif match_condition == MatchConditions.IfModified: - if_none_match = kwargs.pop(etag_param, None) - if not if_none_match: - raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) - elif match_condition == MatchConditions.IfMissing: - if_none_match = '*' - elif match_condition is None: - if etag_param in kwargs: - raise ValueError("'{}' specified without '{}'.".format(etag_param, match_param)) - else: - raise TypeError("Invalid match condition: {}".format(match_condition)) - return if_match, if_none_match - - -def get_modify_conditions(kwargs): - # type: (Dict[str, Any]) -> ModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'match_condition', 'etag') - return ModifiedAccessConditions( - if_modified_since=kwargs.pop('if_modified_since', None), - if_unmodified_since=kwargs.pop('if_unmodified_since', None), - if_match=if_match or kwargs.pop('if_match', None), - if_none_match=if_none_match or kwargs.pop('if_none_match', None) - ) - - -def get_source_conditions(kwargs): - # type: (Dict[str, Any]) -> SourceModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') - return SourceModifiedAccessConditions( - source_if_modified_since=kwargs.pop('source_if_modified_since', None), - source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), - source_if_match=if_match or kwargs.pop('source_if_match', None), - source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None) - ) diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_shared/base_client.py b/azure/multiapi/storagev2/blob/v2019_02_02/_shared/base_client.py deleted file mode 100644 index 9ad754b..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_shared/base_client.py +++ /dev/null @@ -1,411 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, - Optional, - Any, - Iterable, - Dict, - List, - Type, - Tuple, - TYPE_CHECKING, -) -import logging - -try: - from urllib.parse import parse_qs, quote -except ImportError: - from urlparse import parse_qs # type: ignore - from urllib2 import quote # type: ignore - -import six - -from azure.core.configuration import Configuration -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline import Pipeline -from azure.core.pipeline.transport import RequestsTransport, HttpTransport -from azure.core.pipeline.policies import ( - RedirectPolicy, - ContentDecodePolicy, - BearerTokenCredentialPolicy, - ProxyPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, -) - -from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, DEFAULT_SOCKET_TIMEOUT -from .models import LocationMode -from .authentication import SharedKeyCredentialPolicy -from .shared_access_signature import QueryStringConstants -from .policies import ( - StorageHeadersPolicy, - StorageUserAgentPolicy, - StorageContentValidation, - StorageRequestHook, - StorageResponseHook, - StorageLoggingPolicy, - StorageHosts, - QueueMessagePolicy, - ExponentialRetry, -) -from .._generated.models import StorageErrorException -from .response_handlers import process_storage_error, PartialBatchErrorException - - -_LOGGER = logging.getLogger(__name__) -_SERVICE_PARAMS = { - "blob": {"primary": "BlobEndpoint", "secondary": "BlobSecondaryEndpoint"}, - "queue": {"primary": "QueueEndpoint", "secondary": "QueueSecondaryEndpoint"}, - "file": {"primary": "FileEndpoint", "secondary": "FileSecondaryEndpoint"}, -} - - -class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - parsed_url, # type: Any - service, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) - self._hosts = kwargs.get("_hosts") - self.scheme = parsed_url.scheme - - if service not in ["blob", "queue", "file-share"]: - raise ValueError("Invalid service: {}".format(service)) - service_name = service.split('-')[0] - account = parsed_url.netloc.split(".{}.core.".format(service_name)) - self.account_name = account[0] if len(account) > 1 else None - secondary_hostname = None - - self.credential = format_shared_key_credential(account, credential) - if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): - raise ValueError("Token credential is only supported with HTTPS.") - if hasattr(self.credential, "account_name"): - self.account_name = self.credential.account_name - secondary_hostname = "{}-secondary.{}.{}".format( - self.credential.account_name, service_name, SERVICE_HOST_BASE) - - if not self._hosts: - if len(account) > 1: - secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") - if kwargs.get("secondary_hostname"): - secondary_hostname = kwargs["secondary_hostname"] - primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') - self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} - - self.require_encryption = kwargs.get("require_encryption", False) - self.key_encryption_key = kwargs.get("key_encryption_key") - self.key_resolver_function = kwargs.get("key_resolver_function") - self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) - - def __enter__(self): - self._client.__enter__() - return self - - def __exit__(self, *args): - self._client.__exit__(*args) - - @property - def url(self): - """The full endpoint URL to this entity, including SAS token if used. - - This could be either the primary endpoint, - or the secondary endpoint depending on the current :func:`location_mode`. - """ - return self._format_url(self._hosts[self._location_mode]) - - @property - def primary_endpoint(self): - """The full primary endpoint URL. - - :type: str - """ - return self._format_url(self._hosts[LocationMode.PRIMARY]) - - @property - def primary_hostname(self): - """The hostname of the primary endpoint. - - :type: str - """ - return self._hosts[LocationMode.PRIMARY] - - @property - def secondary_endpoint(self): - """The full secondary endpoint URL if configured. - - If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str - :raise ValueError: - """ - if not self._hosts[LocationMode.SECONDARY]: - raise ValueError("No secondary host configured.") - return self._format_url(self._hosts[LocationMode.SECONDARY]) - - @property - def secondary_hostname(self): - """The hostname of the secondary endpoint. - - If not available this will be None. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str or None - """ - return self._hosts[LocationMode.SECONDARY] - - @property - def location_mode(self): - """The location mode that the client is currently using. - - By default this will be "primary". Options include "primary" and "secondary". - - :type: str - """ - - return self._location_mode - - @location_mode.setter - def location_mode(self, value): - if self._hosts.get(value): - self._location_mode = value - self._client._config.url = self.url # pylint: disable=protected-access - else: - raise ValueError("No host URL for location mode: {}".format(value)) - - def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): - query_str = "?" - if snapshot: - query_str += "snapshot={}&".format(self.snapshot) - if share_snapshot: - query_str += "sharesnapshot={}&".format(self.snapshot) - if sas_token and not credential: - query_str += sas_token - elif is_credential_sastoken(credential): - query_str += credential.lstrip("?") - credential = None - return query_str.rstrip("?&"), credential - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, "get_token"): - self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - - config = kwargs.get("_configuration") or create_configuration(**kwargs) - if kwargs.get("_pipeline"): - return config, kwargs["_pipeline"] - config.transport = kwargs.get("transport") # type: ignore - if "connection_timeout" not in kwargs: - kwargs["connection_timeout"] = DEFAULT_SOCKET_TIMEOUT - if not config.transport: - config.transport = RequestsTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.headers_policy, - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - StorageRequestHook(**kwargs), - self._credential_policy, - ContentDecodePolicy(), - RedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), - config.retry_policy, - config.logging_policy, - StorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs) - ] - return config, Pipeline(config.transport, policies=policies) - - def _batch_send( - self, *reqs, # type: HttpRequest - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - request = self._client._client.post( # pylint: disable=protected-access - url='https://{}/?comp=batch'.format(self.primary_hostname), - headers={ - 'x-ms-version': self._client._config.version # pylint: disable=protected-access - } - ) - - request.set_multipart_mixed( - *reqs, - policies=[ - StorageHeadersPolicy(), - self._credential_policy - ] - ) - - pipeline_response = self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() - if raise_on_any_failure: - parts = list(response.parts()) - if any(p for p in parts if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts - ) - raise error - return iter(parts) - return parts - except StorageErrorException as error: - process_storage_error(error) - -class TransportWrapper(HttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, transport): - self._transport = transport - - def send(self, request, **kwargs): - return self._transport.send(request, **kwargs) - - def open(self): - pass - - def close(self): - pass - - def __enter__(self): - pass - - def __exit__(self, *args): # pylint: disable=arguments-differ - pass - - -def format_shared_key_credential(account, credential): - if isinstance(credential, six.string_types): - if len(account) < 2: - raise ValueError("Unable to determine account name for shared key credential.") - credential = {"account_name": account[0], "account_key": credential} - if isinstance(credential, dict): - if "account_name" not in credential: - raise ValueError("Shared key credential missing 'account_name") - if "account_key" not in credential: - raise ValueError("Shared key credential missing 'account_key") - return SharedKeyCredentialPolicy(**credential) - return credential - - -def parse_connection_str(conn_str, credential, service): - conn_str = conn_str.rstrip(";") - conn_settings = dict( # pylint: disable=consider-using-dict-comprehension - [s.split("=", 1) for s in conn_str.split(";")] - ) - endpoints = _SERVICE_PARAMS[service] - primary = None - secondary = None - if not credential: - try: - credential = {"account_name": conn_settings["AccountName"], "account_key": conn_settings["AccountKey"]} - except KeyError: - credential = conn_settings.get("SharedAccessSignature") - if endpoints["primary"] in conn_settings: - primary = conn_settings[endpoints["primary"]] - if endpoints["secondary"] in conn_settings: - secondary = conn_settings[endpoints["secondary"]] - else: - if endpoints["secondary"] in conn_settings: - raise ValueError("Connection string specifies only secondary endpoint.") - try: - primary = "{}://{}.{}.{}".format( - conn_settings["DefaultEndpointsProtocol"], - conn_settings["AccountName"], - service, - conn_settings["EndpointSuffix"], - ) - secondary = "{}-secondary.{}.{}".format( - conn_settings["AccountName"], service, conn_settings["EndpointSuffix"] - ) - except KeyError: - pass - - if not primary: - try: - primary = "https://{}.{}.{}".format( - conn_settings["AccountName"], service, conn_settings.get("EndpointSuffix", SERVICE_HOST_BASE) - ) - except KeyError: - raise ValueError("Connection string missing required connection details.") - return primary, secondary, credential - - -def create_configuration(**kwargs): - # type: (**Any) -> Configuration - config = Configuration(**kwargs) - config.headers_policy = StorageHeadersPolicy(**kwargs) - config.user_agent_policy = StorageUserAgentPolicy(**kwargs) - config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) - config.logging_policy = StorageLoggingPolicy(**kwargs) - config.proxy_policy = ProxyPolicy(**kwargs) - - # Storage settings - config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) - config.copy_polling_interval = 15 - - # Block blob uploads - config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) - config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) - config.use_byte_buffer = kwargs.get("use_byte_buffer", False) - - # Page blob uploads - config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) - - # Blob downloads - config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) - config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) - - # File uploads - config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) - return config - - -def parse_query(query_str): - sas_values = QueryStringConstants.to_list() - parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} - sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values] - sas_token = None - if sas_params: - sas_token = "&".join(sas_params) - - snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") - return snapshot, sas_token - - -def is_credential_sastoken(credential): - if not credential or not isinstance(credential, six.string_types): - return False - - sas_values = QueryStringConstants.to_list() - parsed_query = parse_qs(credential.lstrip("?")) - if parsed_query and all([k in sas_values for k in parsed_query.keys()]): - return True - return False diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_shared/base_client_async.py b/azure/multiapi/storagev2/blob/v2019_02_02/_shared/base_client_async.py deleted file mode 100644 index a64531d..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_shared/base_client_async.py +++ /dev/null @@ -1,170 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging -from azure.core.pipeline import AsyncPipeline -from azure.core.async_paging import AsyncList -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline.policies import ( - ContentDecodePolicy, - AsyncBearerTokenCredentialPolicy, - AsyncRedirectPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, -) -from azure.core.pipeline.transport import AsyncHttpTransport - -from .constants import STORAGE_OAUTH_SCOPE, DEFAULT_SOCKET_TIMEOUT -from .authentication import SharedKeyCredentialPolicy -from .base_client import create_configuration -from .policies import ( - StorageContentValidation, - StorageRequestHook, - StorageHosts, - StorageHeadersPolicy, - QueueMessagePolicy -) -from .policies_async import AsyncStorageResponseHook - -from .._generated.models import StorageErrorException -from .response_handlers import process_storage_error, PartialBatchErrorException - -if TYPE_CHECKING: - from azure.core.pipeline import Pipeline - from azure.core.pipeline.transport import HttpRequest - from azure.core.configuration import Configuration -_LOGGER = logging.getLogger(__name__) - - -class AsyncStorageAccountHostsMixin(object): - - def __enter__(self): - raise TypeError("Async client only supports 'async with'.") - - def __exit__(self, *args): - pass - - async def __aenter__(self): - await self._client.__aenter__() - return self - - async def __aexit__(self, *args): - await self._client.__aexit__(*args) - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, 'get_token'): - self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - config = kwargs.get('_configuration') or create_configuration(**kwargs) - if kwargs.get('_pipeline'): - return config, kwargs['_pipeline'] - config.transport = kwargs.get('transport') # type: ignore - if 'connection_timeout' not in kwargs: - kwargs['connection_timeout'] = DEFAULT_SOCKET_TIMEOUT[0] # type: ignore - if not config.transport: - try: - from azure.core.pipeline.transport import AioHttpTransport - except ImportError: - raise ImportError("Unable to create async transport. Please check aiohttp is installed.") - config.transport = AioHttpTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.headers_policy, - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - StorageRequestHook(**kwargs), - self._credential_policy, - ContentDecodePolicy(), - AsyncRedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), # type: ignore - config.retry_policy, - config.logging_policy, - AsyncStorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs), - ] - return config, AsyncPipeline(config.transport, policies=policies) - - async def _batch_send( - self, *reqs: 'HttpRequest', - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - request = self._client._client.post( # pylint: disable=protected-access - url='https://{}/?comp=batch'.format(self.primary_hostname), - headers={ - 'x-ms-version': self._client._config.version # pylint: disable=protected-access - } - ) - - request.set_multipart_mixed( - *reqs, - policies=[ - StorageHeadersPolicy(), - self._credential_policy - ] - ) - - pipeline_response = await self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() # Return an AsyncIterator - if raise_on_any_failure: - parts_list = [] - async for part in parts: - parts_list.append(part) - if any(p for p in parts_list if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts_list - ) - raise error - return AsyncList(parts_list) - return parts - except StorageErrorException as error: - process_storage_error(error) - - -class AsyncTransportWrapper(AsyncHttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, async_transport): - self._transport = async_transport - - async def send(self, request, **kwargs): - return await self._transport.send(request, **kwargs) - - async def open(self): - pass - - async def close(self): - pass - - async def __aenter__(self): - pass - - async def __aexit__(self, *args): # pylint: disable=arguments-differ - pass diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_shared/constants.py b/azure/multiapi/storagev2/blob/v2019_02_02/_shared/constants.py deleted file mode 100644 index ff5732e..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_shared/constants.py +++ /dev/null @@ -1,25 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -from .._generated.version import VERSION - - -X_MS_VERSION = VERSION - -# Socket timeout in seconds -DEFAULT_SOCKET_TIMEOUT = 20 - -# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned) -# The socket timeout is now the maximum total duration to send all data. -if sys.version_info >= (3, 5): - # the timeout to connect is 20 seconds, and the read timeout is 2000 seconds - # the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) - DEFAULT_SOCKET_TIMEOUT = (20, 2000) # type: ignore - -STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" - -SERVICE_HOST_BASE = 'core.windows.net' diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_shared/request_handlers.py b/azure/multiapi/storagev2/blob/v2019_02_02/_shared/request_handlers.py deleted file mode 100644 index 873c67d..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_shared/request_handlers.py +++ /dev/null @@ -1,143 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) - -import logging -from os import fstat -from io import (SEEK_END, SEEK_SET, UnsupportedOperation) - -import isodate - -from azure.core.configuration import Configuration -from azure.core.exceptions import raise_with_traceback - - -_LOGGER = logging.getLogger(__name__) - - -def serialize_iso(attr): - """Serialize Datetime object into ISO-8601 formatted string. - - :param Datetime attr: Object to be serialized. - :rtype: str - :raises: ValueError if format invalid. - """ - if not attr: - return None - if isinstance(attr, str): - attr = isodate.parse_datetime(attr) - try: - utc = attr.utctimetuple() - if utc.tm_year > 9999 or utc.tm_year < 1: - raise OverflowError("Hit max or min date") - - date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( - utc.tm_year, utc.tm_mon, utc.tm_mday, - utc.tm_hour, utc.tm_min, utc.tm_sec) - return date + 'Z' - except (ValueError, OverflowError) as err: - msg = "Unable to serialize datetime object." - raise_with_traceback(ValueError, msg, err) - except AttributeError as err: - msg = "ISO-8601 object must be valid Datetime object." - raise_with_traceback(TypeError, msg, err) - - -def get_length(data): - length = None - # Check if object implements the __len__ method, covers most input cases such as bytearray. - try: - length = len(data) - except: # pylint: disable=bare-except - pass - - if not length: - # Check if the stream is a file-like stream object. - # If so, calculate the size using the file descriptor. - try: - fileno = data.fileno() - except (AttributeError, UnsupportedOperation): - pass - else: - return fstat(fileno).st_size - - # If the stream is seekable and tell() is implemented, calculate the stream size. - try: - current_position = data.tell() - data.seek(0, SEEK_END) - length = data.tell() - current_position - data.seek(current_position, SEEK_SET) - except (AttributeError, UnsupportedOperation): - pass - - return length - - -def read_length(data): - try: - if hasattr(data, 'read'): - read_data = b'' - for chunk in iter(lambda: data.read(4096), b""): - read_data += chunk - return len(read_data), read_data - if hasattr(data, '__iter__'): - read_data = b'' - for chunk in data: - read_data += chunk - return len(read_data), read_data - except: # pylint: disable=bare-except - pass - raise ValueError("Unable to calculate content length, please specify.") - - -def validate_and_format_range_headers( - start_range, end_range, start_range_required=True, - end_range_required=True, check_content_md5=False, align_to_page=False): - # If end range is provided, start range must be provided - if (start_range_required or end_range is not None) and start_range is None: - raise ValueError("start_range value cannot be None.") - if end_range_required and end_range is None: - raise ValueError("end_range value cannot be None.") - - # Page ranges must be 512 aligned - if align_to_page: - if start_range is not None and start_range % 512 != 0: - raise ValueError("Invalid page blob start_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(start_range)) - if end_range is not None and end_range % 512 != 511: - raise ValueError("Invalid page blob end_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(end_range)) - - # Format based on whether end_range is present - range_header = None - if end_range is not None: - range_header = 'bytes={0}-{1}'.format(start_range, end_range) - elif start_range is not None: - range_header = "bytes={0}-".format(start_range) - - # Content MD5 can only be provided for a complete range less than 4MB in size - range_validation = None - if check_content_md5: - if start_range is None or end_range is None: - raise ValueError("Both start and end range requied for MD5 content validation.") - if end_range - start_range > 4 * 1024 * 1024: - raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") - range_validation = 'true' - - return range_header, range_validation - - -def add_metadata_headers(metadata=None): - # type: (Optional[Dict[str, str]]) -> Dict[str, str] - headers = {} - if metadata: - for key, value in metadata.items(): - headers['x-ms-meta-{}'.format(key)] = value - return headers diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_shared_access_signature.py b/azure/multiapi/storagev2/blob/v2019_02_02/_shared_access_signature.py deleted file mode 100644 index 8c80c0a..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_shared_access_signature.py +++ /dev/null @@ -1,571 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, TYPE_CHECKING -) - -from ._shared import sign_string, url_quote -from ._shared.constants import X_MS_VERSION -from ._shared.models import Services -from ._shared.shared_access_signature import SharedAccessSignature, _SharedAccessHelper, \ - QueryStringConstants - -if TYPE_CHECKING: - from datetime import datetime - from . import ( - ResourceTypes, - AccountSasPermissions, - UserDelegationKey, - ContainerSasPermissions, - BlobSasPermissions - ) - -class BlobQueryStringConstants(object): - SIGNED_TIMESTAMP = 'snapshot' - - -class BlobSharedAccessSignature(SharedAccessSignature): - ''' - Provides a factory for creating blob and container access - signature tokens with a common account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key=None, user_delegation_key=None): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - :param ~azure.storage.blob.models.UserDelegationKey user_delegation_key: - Instead of an account key, the user could pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished by calling get_user_delegation_key on any Blob service object. - ''' - super(BlobSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) - self.user_delegation_key = user_delegation_key - - def generate_blob(self, container_name, blob_name, snapshot=None, permission=None, - expiry=None, start=None, policy_id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None): - ''' - Generates a shared access signature for the blob or one of its snapshots. - Use the returned signature with the sas_token parameter of any BlobService. - - :param str container_name: - Name of container. - :param str blob_name: - Name of blob. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to grant permission. - :param BlobSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_blob_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - resource_path = container_name + '/' + blob_name - - sas = _BlobSharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(policy_id) - sas.add_resource('b' if snapshot is None else 'bs') - sas.add_timestamp(snapshot) - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_resource_signature(self.account_name, self.account_key, resource_path, - user_delegation_key=self.user_delegation_key) - - return sas.get_token() - - def generate_container(self, container_name, permission=None, expiry=None, - start=None, policy_id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None): - ''' - Generates a shared access signature for the container. - Use the returned signature with the sas_token parameter of any BlobService. - - :param str container_name: - Name of container. - :param ContainerSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_blob_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - sas = _BlobSharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(policy_id) - sas.add_resource('c') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_resource_signature(self.account_name, self.account_key, container_name, - user_delegation_key=self.user_delegation_key) - return sas.get_token() - - -class _BlobSharedAccessHelper(_SharedAccessHelper): - - def add_timestamp(self, timestamp): - self._add_query(BlobQueryStringConstants.SIGNED_TIMESTAMP, timestamp) - - def get_value_to_append(self, query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - def add_resource_signature(self, account_name, account_key, path, user_delegation_key=None): - # pylint: disable = no-member - if path[0] != '/': - path = '/' + path - - canonicalized_resource = '/blob/' + account_name + path + '\n' - - # Form the string to sign from shared_access_policy and canonicalized - # resource. The order of values is important. - string_to_sign = \ - (self.get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - self.get_value_to_append(QueryStringConstants.SIGNED_START) + - self.get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - canonicalized_resource) - - if user_delegation_key is not None: - self._add_query(QueryStringConstants.SIGNED_OID, user_delegation_key.signed_oid) - self._add_query(QueryStringConstants.SIGNED_TID, user_delegation_key.signed_tid) - self._add_query(QueryStringConstants.SIGNED_KEY_START, user_delegation_key.signed_start) - self._add_query(QueryStringConstants.SIGNED_KEY_EXPIRY, user_delegation_key.signed_expiry) - self._add_query(QueryStringConstants.SIGNED_KEY_SERVICE, user_delegation_key.signed_service) - self._add_query(QueryStringConstants.SIGNED_KEY_VERSION, user_delegation_key.signed_version) - - string_to_sign += \ - (self.get_value_to_append(QueryStringConstants.SIGNED_OID) + - self.get_value_to_append(QueryStringConstants.SIGNED_TID) + - self.get_value_to_append(QueryStringConstants.SIGNED_KEY_START) + - self.get_value_to_append(QueryStringConstants.SIGNED_KEY_EXPIRY) + - self.get_value_to_append(QueryStringConstants.SIGNED_KEY_SERVICE) + - self.get_value_to_append(QueryStringConstants.SIGNED_KEY_VERSION)) - else: - string_to_sign += self.get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER) - - string_to_sign += \ - (self.get_value_to_append(QueryStringConstants.SIGNED_IP) + - self.get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - self.get_value_to_append(QueryStringConstants.SIGNED_VERSION) + - self.get_value_to_append(QueryStringConstants.SIGNED_RESOURCE) + - self.get_value_to_append(BlobQueryStringConstants.SIGNED_TIMESTAMP) + - self.get_value_to_append(QueryStringConstants.SIGNED_CACHE_CONTROL) + - self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_DISPOSITION) + - self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_ENCODING) + - self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_LANGUAGE) + - self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_TYPE)) - - # remove the trailing newline - if string_to_sign[-1] == '\n': - string_to_sign = string_to_sign[:-1] - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key if user_delegation_key is None else user_delegation_key.value, - string_to_sign)) - - def get_token(self): - # a conscious decision was made to exclude the timestamp in the generated token - # this is to avoid having two snapshot ids in the query parameters when the user appends the snapshot timestamp - exclude = [BlobQueryStringConstants.SIGNED_TIMESTAMP] - return '&'.join(['{0}={1}'.format(n, url_quote(v)) - for n, v in self.query_dict.items() if v is not None and n not in exclude]) - - -def generate_account_sas( - account_name, # type: str - account_key, # type: str - resource_types, # type: Union[ResourceTypes, str] - permission, # type: Union[AccountSasPermissions, str] - expiry, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): # type: (...) -> str - """Generates a shared access signature for the blob service. - - Use the returned signature with the credential parameter of any BlobServiceClient, - ContainerClient or BlobClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - :param resource_types: - Specifies the resource types that are accessible with the account SAS. - :type resource_types: str or ~azure.storage.blob.ResourceTypes - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.blob.AccountSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_sas_token] - :end-before: [END create_sas_token] - :language: python - :dedent: 8 - :caption: Generating a shared access signature. - """ - sas = SharedAccessSignature(account_name, account_key) - return sas.generate_account( - services=Services(blob=True), - resource_types=resource_types, - permission=permission, - expiry=expiry, - start=start, - ip=ip, - **kwargs - ) # type: ignore - - -def generate_container_sas( - account_name, # type: str - container_name, # type: str - account_key=None, # type: Optional[str] - user_delegation_key=None, # type: Optional[UserDelegationKey] - permission=None, # type: Optional[Union[ContainerSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - policy_id=None, # type: Optional[str] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Any - """Generates a shared access signature for a container. - - Use the returned signature with the credential parameter of any BlobServiceClient, - ContainerClient or BlobClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str container_name: - The name of the container. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - Either `account_key` or `user_delegation_key` must be specified. - :param ~azure.storage.blob.UserDelegationKey user_delegation_key: - Instead of an account shared key, the user could pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.blob.ContainerSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - :func:`~azure.storage.blob.ContainerClient.set_container_access_policy`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :return: A Shared Access Signature (sas) token. - :rtype: str - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START generate_sas_token] - :end-before: [END generate_sas_token] - :language: python - :dedent: 12 - :caption: Generating a sas token. - """ - if not user_delegation_key and not account_key: - raise ValueError("Either user_delegation_key or account_key must be provided.") - - if user_delegation_key: - sas = BlobSharedAccessSignature(account_name, user_delegation_key=user_delegation_key) - else: - sas = BlobSharedAccessSignature(account_name, account_key=account_key) - return sas.generate_container( - container_name, - permission=permission, - expiry=expiry, - start=start, - policy_id=policy_id, - ip=ip, - **kwargs - ) - - -def generate_blob_sas( - account_name, # type: str - container_name, # type: str - blob_name, # type: str - snapshot=None, # type: Optional[str] - account_key=None, # type: Optional[str] - user_delegation_key=None, # type: Optional[UserDelegationKey] - permission=None, # type: Optional[Union[BlobSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - policy_id=None, # type: Optional[str] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Any - """Generates a shared access signature for a blob. - - Use the returned signature with the credential parameter of any BlobServiceClient, - ContainerClient or BlobClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str container_name: - The name of the container. - :param str blob_name: - The name of the blob. - :param str snapshot: - An optional blob snapshot ID. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - Either `account_key` or `user_delegation_key` must be specified. - :param ~azure.storage.blob.UserDelegationKey user_delegation_key: - Instead of an account shared key, the user could pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.blob.BlobSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - :func:`~azure.storage.blob.ContainerClient.set_container_access_policy()`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - if not user_delegation_key and not account_key: - raise ValueError("Either user_delegation_key or account_key must be provided.") - - if user_delegation_key: - sas = BlobSharedAccessSignature(account_name, user_delegation_key=user_delegation_key) - else: - sas = BlobSharedAccessSignature(account_name, account_key=account_key) - return sas.generate_blob( - container_name, - blob_name, - snapshot=snapshot, - permission=permission, - expiry=expiry, - start=start, - policy_id=policy_id, - ip=ip, - **kwargs - ) diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_upload_helpers.py b/azure/multiapi/storagev2/blob/v2019_02_02/_upload_helpers.py deleted file mode 100644 index d684524..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_upload_helpers.py +++ /dev/null @@ -1,282 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from io import SEEK_SET, UnsupportedOperation -from typing import Optional, Union, Any, TypeVar, TYPE_CHECKING # pylint: disable=unused-import - -import six -from azure.core.exceptions import ResourceExistsError, ResourceModifiedError - -from ._shared.response_handlers import ( - process_storage_error, - return_response_headers) -from ._shared.models import StorageErrorCode -from ._shared.uploads import ( - upload_data_chunks, - upload_substream_blocks, - BlockBlobChunkUploader, - PageBlobChunkUploader, - AppendBlobChunkUploader) -from ._shared.encryption import generate_blob_encryption_data, encrypt_blob -from ._generated.models import ( - StorageErrorException, - BlockLookupList, - AppendPositionAccessConditions, - ModifiedAccessConditions, -) -from ._models import BlobProperties, ContainerProperties - -if TYPE_CHECKING: - from datetime import datetime # pylint: disable=unused-import - BlobLeaseClient = TypeVar("BlobLeaseClient") - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' - - -def _convert_mod_error(error): - message = error.message.replace( - "The condition specified using HTTP conditional header(s) is not met.", - "The specified blob already exists.") - message = message.replace("ConditionNotMet", "BlobAlreadyExists") - overwrite_error = ResourceExistsError( - message=message, - response=error.response, - error=error) - overwrite_error.error_code = StorageErrorCode.blob_already_exists - raise overwrite_error - - -def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument - return any([ - modified_access_conditions.if_modified_since, - modified_access_conditions.if_unmodified_since, - modified_access_conditions.if_none_match, - modified_access_conditions.if_match - ]) - - -def upload_block_blob( # pylint: disable=too-many-locals - client=None, - data=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if not overwrite and not _any_conditions(**kwargs): - kwargs['modified_access_conditions'].if_none_match = '*' - adjusted_count = length - if (encryption_options.get('key') is not None) and (adjusted_count is not None): - adjusted_count += (16 - (length % 16)) - blob_headers = kwargs.pop('blob_headers', None) - tier = kwargs.pop('standard_blob_tier', None) - - # Do single put if the size is smaller than config.max_single_put_size - if adjusted_count is not None and (adjusted_count < blob_settings.max_single_put_size): - try: - data = data.read(length) - if not isinstance(data, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - except AttributeError: - pass - if encryption_options.get('key'): - encryption_data, data = encrypt_blob(data, encryption_options['key']) - headers['x-ms-meta-encryptiondata'] = encryption_data - return client.upload( - data, - content_length=adjusted_count, - blob_http_headers=blob_headers, - headers=headers, - cls=return_response_headers, - validate_content=validate_content, - data_stream_total=adjusted_count, - upload_stream_current=0, - tier=tier.value if tier else None, - **kwargs) - - use_original_upload_path = blob_settings.use_byte_buffer or \ - validate_content or encryption_options.get('required') or \ - blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \ - hasattr(stream, 'seekable') and not stream.seekable() or \ - not hasattr(stream, 'seek') or not hasattr(stream, 'tell') - - if use_original_upload_path: - if encryption_options.get('key'): - cek, iv, encryption_data = generate_blob_encryption_data(encryption_options['key']) - headers['x-ms-meta-encryptiondata'] = encryption_data - encryption_options['cek'] = cek - encryption_options['vector'] = iv - block_ids = upload_data_chunks( - service=client, - uploader_class=BlockBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - encryption_options=encryption_options, - **kwargs - ) - else: - block_ids = upload_substream_blocks( - service=client, - uploader_class=BlockBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - **kwargs - ) - - block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) - block_lookup.latest = block_ids - return client.commit_block_list( - block_lookup, - blob_http_headers=blob_headers, - cls=return_response_headers, - validate_content=validate_content, - headers=headers, - tier=tier.value if tier else None, - **kwargs) - except StorageErrorException as error: - try: - process_storage_error(error) - except ResourceModifiedError as mod_error: - if not overwrite: - _convert_mod_error(mod_error) - raise - - -def upload_page_blob( - client=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if not overwrite and not _any_conditions(**kwargs): - kwargs['modified_access_conditions'].if_none_match = '*' - if length is None or length < 0: - raise ValueError("A content length must be specified for a Page Blob.") - if length % 512 != 0: - raise ValueError("Invalid page blob size: {0}. " - "The size must be aligned to a 512-byte boundary.".format(length)) - if kwargs.get('premium_page_blob_tier'): - premium_page_blob_tier = kwargs.pop('premium_page_blob_tier') - try: - headers['x-ms-access-tier'] = premium_page_blob_tier.value - except AttributeError: - headers['x-ms-access-tier'] = premium_page_blob_tier - if encryption_options and encryption_options.get('data'): - headers['x-ms-meta-encryptiondata'] = encryption_options['data'] - response = client.create( - content_length=0, - blob_content_length=length, - blob_sequence_number=None, - blob_http_headers=kwargs.pop('blob_headers', None), - cls=return_response_headers, - headers=headers, - **kwargs) - if length == 0: - return response - - kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag']) - return upload_data_chunks( - service=client, - uploader_class=PageBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_page_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - encryption_options=encryption_options, - **kwargs) - - except StorageErrorException as error: - try: - process_storage_error(error) - except ResourceModifiedError as mod_error: - if not overwrite: - _convert_mod_error(mod_error) - raise - - -def upload_append_blob( # pylint: disable=unused-argument - client=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if length == 0: - return {} - blob_headers = kwargs.pop('blob_headers', None) - append_conditions = AppendPositionAccessConditions( - max_size=kwargs.pop('maxsize_condition', None), - append_position=None) - try: - if overwrite: - client.create( - content_length=0, - blob_http_headers=blob_headers, - headers=headers, - **kwargs) - return upload_data_chunks( - service=client, - uploader_class=AppendBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - append_position_access_conditions=append_conditions, - **kwargs) - except StorageErrorException as error: - if error.response.status_code != 404: - raise - # rewind the request body if it is a stream - if hasattr(stream, 'read'): - try: - # attempt to rewind the body to the initial position - stream.seek(0, SEEK_SET) - except UnsupportedOperation: - # if body is not seekable, then retry would not work - raise error - client.create( - content_length=0, - blob_http_headers=blob_headers, - headers=headers, - **kwargs) - return upload_data_chunks( - service=client, - uploader_class=AppendBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - append_position_access_conditions=append_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_version.py b/azure/multiapi/storagev2/blob/v2019_02_02/_version.py deleted file mode 100644 index 63dfa66..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_version.py +++ /dev/null @@ -1,7 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -VERSION = "12.0.0" diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/aio/__init__.py b/azure/multiapi/storagev2/blob/v2019_02_02/aio/__init__.py deleted file mode 100644 index 6b49ff2..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/aio/__init__.py +++ /dev/null @@ -1,135 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os - -from .._models import BlobType -from .._shared.policies_async import ExponentialRetry, LinearRetry -from ._blob_client_async import BlobClient -from ._container_client_async import ContainerClient -from ._blob_service_client_async import BlobServiceClient -from ._lease_async import BlobLeaseClient - - -async def upload_blob_to_url( - blob_url, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - credential=None, # type: Any - **kwargs): - # type: (...) -> dict[str, Any] - """Upload data to a given URL - - The data will be uploaded as a block blob. - - :param str blob_url: - The full URI to the blob. This can also include a SAS token. - :param data: - The data to upload. This can be bytes, text, an iterable or a file-like object. - :type data: bytes or str or Iterable - :param credential: - The credentials with which to authenticate. This is optional if the - blob URL already has a SAS token. The value can be a SAS token string, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword bool overwrite: - Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob_to_url will overwrite any existing data. If set to False, the - operation will fail with a ResourceExistsError. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword dict(str,str) metadata: - Name-value pairs associated with the blob as metadata. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword str encoding: - Encoding to use if text is supplied as input. Defaults to UTF-8. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: dict(str, Any) - """ - async with BlobClient.from_blob_url(blob_url, credential=credential) as client: - return await client.upload_blob(data=data, blob_type=BlobType.BlockBlob, **kwargs) - - -async def _download_to_stream(client, handle, **kwargs): - """Download data to specified open file-handle.""" - stream = await client.download_blob(**kwargs) - await stream.readinto(handle) - - -async def download_blob_from_url( - blob_url, # type: str - output, # type: str - credential=None, # type: Any - **kwargs): - # type: (...) -> None - """Download the contents of a blob to a local file or stream. - - :param str blob_url: - The full URI to the blob. This can also include a SAS token. - :param output: - Where the data should be downloaded to. This could be either a file path to write to, - or an open IO handle to write to. - :type output: str or writable stream - :param credential: - The credentials with which to authenticate. This is optional if the - blob URL already has a SAS token or the blob is public. The value can be a SAS token string, - an account shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword bool overwrite: - Whether the local file should be overwritten if it already exists. The default value is - `False` - in which case a ValueError will be raised if the file already exists. If set to - `True`, an attempt will be made to write to the existing file. If a stream handle is passed - in, this value is ignored. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :keyword int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :rtype: None - """ - overwrite = kwargs.pop('overwrite', False) - async with BlobClient.from_blob_url(blob_url, credential=credential) as client: - if hasattr(output, 'write'): - await _download_to_stream(client, output, **kwargs) - else: - if not overwrite and os.path.isfile(output): - raise ValueError("The file '{}' already exists.".format(output)) - with open(output, 'wb') as file_handle: - await _download_to_stream(client, file_handle, **kwargs) - - -__all__ = [ - 'upload_blob_to_url', - 'download_blob_from_url', - 'BlobServiceClient', - 'ContainerClient', - 'BlobClient', - 'BlobLeaseClient', - 'ExponentialRetry', - 'LinearRetry' -] diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/aio/_blob_client_async.py b/azure/multiapi/storagev2/blob/v2019_02_02/aio/_blob_client_async.py deleted file mode 100644 index e9c01c1..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/aio/_blob_client_async.py +++ /dev/null @@ -1,1811 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, - TYPE_CHECKING -) - -from azure.core.tracing.decorator_async import distributed_trace_async - -from .._shared.base_client_async import AsyncStorageAccountHostsMixin -from .._shared.policies_async import ExponentialRetry -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._deserialize import get_page_ranges_result -from .._serialize import get_modify_conditions -from .._generated.aio import AzureBlobStorage -from .._generated.models import StorageErrorException, CpkInfo -from .._deserialize import deserialize_blob_properties -from .._blob_client import BlobClient as BlobClientBase -from ._upload_helpers import ( - upload_block_blob, - upload_append_blob, - upload_page_blob) -from .._models import BlobType, BlobBlock -from .._lease import get_access_conditions -from ._lease_async import BlobLeaseClient -from ._download_async import StorageStreamDownloader - -if TYPE_CHECKING: - from datetime import datetime - from azure.core.pipeline.policies import HTTPPolicy - from .._models import ( # pylint: disable=unused-import - ContainerProperties, - BlobProperties, - BlobSasPermissions, - ContentSettings, - PremiumPageBlobTier, - StandardBlobTier, - SequenceNumberAction - ) - - -class BlobClient(AsyncStorageAccountHostsMixin, BlobClientBase): # pylint: disable=too-many-public-methods - """A client to interact with a specific blob, although that blob may not yet exist. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the blob, - use the :func:`from_blob_url` classmethod. - :param container_name: The container name for the blob. - :type container_name: str - :param blob_name: The name of the blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob_name: str - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication_async.py - :start-after: [START create_blob_client] - :end-before: [END create_blob_client] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a URL to a public blob (no auth needed). - - .. literalinclude:: ../samples/blob_samples_authentication_async.py - :start-after: [START create_blob_client_sas_url] - :end-before: [END create_blob_client_sas_url] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a SAS URL to a blob. - """ - def __init__( - self, account_url, # type: str - container_name, # type: str - blob_name, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(BlobClient, self).__init__( - account_url, - container_name=container_name, - blob_name=blob_name, - snapshot=snapshot, - credential=credential, - **kwargs) - self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) - self._loop = kwargs.get('loop', None) - - @distributed_trace_async - async def get_account_information(self, **kwargs): # type: ignore - # type: (Optional[int]) -> Dict[str, str] - """Gets information related to the storage account in which the blob resides. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - """ - try: - return await self._client.blob.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_blob( - self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Any - """Creates a new blob from a data source with automatic chunking. - - :param data: The blob data to upload. - :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be - either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. The exception to the above is with Append - blob types: if set to False and the data already exists, an error will not be raised - and the data will be appended to the existing blob. If set overwrite=True, then the existing - append blob will be deleted, and a new one created. Defaults to False. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - If specified, upload_blob only succeeds if the - blob's lease is active and matches this ID. - Required if the blob has an active lease. - :paramtype: ~azure.storage.blob.aio.BlobLeaseClient - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int max_concurrency: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world_async.py - :start-after: [START upload_a_blob] - :end-before: [END upload_a_blob] - :language: python - :dedent: 16 - :caption: Upload a blob to the container. - """ - options = self._upload_blob_options( - data, - blob_type=blob_type, - length=length, - metadata=metadata, - **kwargs) - if blob_type == BlobType.BlockBlob: - return await upload_block_blob(**options) - if blob_type == BlobType.PageBlob: - return await upload_page_blob(**options) - return await upload_append_blob(**options) - - @distributed_trace_async - async def download_blob(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], Any) -> Iterable[bytes] - """Downloads a blob to a stream with automatic chunking. - - :param int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, download_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword str encoding: - Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A iterable data generator (stream) - :rtype: ~azure.storage.blob.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world_async.py - :start-after: [START download_a_blob] - :end-before: [END download_a_blob] - :language: python - :dedent: 16 - :caption: Download a blob. - """ - options = self._download_blob_options( - offset=offset, - length=length, - **kwargs) - downloader = StorageStreamDownloader(**options) - await downloader._setup() # pylint: disable=protected-access - return downloader - - @distributed_trace_async - async def delete_blob(self, delete_snapshots=False, **kwargs): - # type: (bool, Any) -> None - """Marks the specified blob for deletion. - - The blob is later deleted during garbage collection. - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the delete_blob() - operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob - and retains the blob for a specified number of days. - After the specified number of days, the blob's data is removed from the service during garbage collection. - Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']` - option. Soft-deleted blob can be restored using :func:`undelete` operation. - - :param str delete_snapshots: - Required if the blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword lease: - Required if the blob has an active lease. If specified, delete_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world_async.py - :start-after: [START delete_blob] - :end-before: [END delete_blob] - :language: python - :dedent: 16 - :caption: Delete a blob. - """ - options = self._delete_blob_options(delete_snapshots=delete_snapshots, **kwargs) - try: - await self._client.blob.delete(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def undelete_blob(self, **kwargs): - # type: (Any) -> None - """Restores soft-deleted blobs or snapshots. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START undelete_blob] - :end-before: [END undelete_blob] - :language: python - :dedent: 12 - :caption: Undeleting a blob. - """ - try: - await self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_blob_properties(self, **kwargs): - # type: (Any) -> BlobProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the blob. It does not return the content of the blob. - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: BlobProperties - :rtype: ~azure.storage.blob.BlobProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START get_blob_properties] - :end-before: [END get_blob_properties] - :language: python - :dedent: 12 - :caption: Getting the properties for a blob. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - try: - blob_props = await self._client.blob.get_properties( - timeout=kwargs.pop('timeout', None), - snapshot=self.snapshot, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=deserialize_blob_properties, - cpk_info=cpk_info, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - blob_props.name = self.blob_name - blob_props.container = self.container_name - return blob_props # type: ignore - - @distributed_trace_async - async def set_http_headers(self, content_settings=None, **kwargs): - # type: (Optional[ContentSettings], Any) -> None - """Sets system properties on the blob. - - If one property is set for the content_settings, all properties will be overridden. - - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - options = self._set_http_headers_options(content_settings=content_settings, **kwargs) - try: - return await self._client.blob.set_http_headers(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def set_blob_metadata(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] - """Sets user-defined metadata for the blob as one or more name-value pairs. - - :param metadata: - Dict containing name and value pairs. Each call to this operation - replaces all existing metadata attached to the blob. To remove all - metadata from the blob, call this operation with no metadata headers. - :type metadata: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - """ - options = self._set_blob_metadata_options(metadata=metadata, **kwargs) - try: - return await self._client.blob.set_metadata(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def create_page_blob( # type: ignore - self, size, # type: int - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Creates a new Page Blob of the specified size. - - :param int size: - This specifies the maximum size for the page blob, up to 1 TB. - The page blob size must be aligned to a 512-byte boundary. - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword int sequence_number: - Only for Page blobs. The sequence number is a user-controlled value that you can use to - track requests. The value of the sequence number must be between 0 - and 2^63 - 1.The default value is 0. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict[str, Any] - """ - options = self._create_page_blob_options( - size, - content_settings=content_settings, - metadata=metadata, - premium_page_blob_tier=premium_page_blob_tier, - **kwargs) - try: - return await self._client.page_blob.create(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def create_append_blob(self, content_settings=None, metadata=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] - """Creates a new Append Blob. - - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict[str, Any] - """ - options = self._create_append_blob_options( - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return await self._client.append_blob.create(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def create_snapshot(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] - """Creates a snapshot of the blob. - - A snapshot is a read-only version of a blob that's taken at a point in time. - It can be read, copied, or deleted, but not modified. Snapshots provide a way - to back up a blob as it appears at a moment in time. - - A snapshot of a blob has the same name as the base blob from which the snapshot - is taken, with a DateTime value appended to indicate the time at which the - snapshot was taken. - - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified). - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START create_blob_snapshot] - :end-before: [END create_blob_snapshot] - :language: python - :dedent: 12 - :caption: Create a snapshot of the blob. - """ - options = self._create_snapshot_options(metadata=metadata, **kwargs) - try: - return await self._client.blob.create_snapshot(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def start_copy_from_url(self, source_url, metadata=None, incremental_copy=False, **kwargs): - # type: (str, Optional[Dict[str, str]], bool, Any) -> Any - """Copies a blob asynchronously. - - This operation returns a copy operation - object that can be used to wait on the completion of the operation, - as well as check status or abort the copy operation. - The Blob service copies blobs on a best-effort basis. - - The source blob for a copy operation may be a block blob, an append blob, - or a page blob. If the destination blob already exists, it must be of the - same blob type as the source blob. Any existing destination blob will be - overwritten. The destination blob cannot be modified while a copy operation - is in progress. - - When copying from a page blob, the Blob service creates a destination page - blob of the source blob's length, initially containing all zeroes. Then - the source page ranges are enumerated, and non-empty ranges are copied. - - For a block blob or an append blob, the Blob service creates a committed - blob of zero length before returning from this operation. When copying - from a block blob, all committed blocks and their block IDs are copied. - Uncommitted blocks are not copied. At the end of the copy operation, the - destination blob will have the same committed block count as the source. - - When copying from an append blob, all committed blocks are copied. At the - end of the copy operation, the destination blob will have the same committed - block count as the source. - - For all blob types, you can call status() on the returned polling object - to check the status of the copy operation, or wait() to block until the - operation is complete. The final blob will be committed when the copy completes. - - :param str source_url: - A URL of up to 2 KB in length that specifies a file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.blob.core.windows.net/mycontainer/myblob - - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - - https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken - :param metadata: - Name-value pairs associated with the blob as metadata. If no name-value - pairs are specified, the operation will copy the metadata from the - source blob or file to the destination blob. If one or more name-value - pairs are specified, the destination blob is created with the specified - metadata, and metadata is not copied from the source blob or file. - :type metadata: dict(str, str) - :param bool incremental_copy: - Copies the snapshot of the source page blob to a destination page blob. - The snapshot is copied such that only the differential changes between - the previously copied snapshot are transferred to the destination. - The copied snapshots are complete copies of the original snapshot and - can be read or copied from as usual. Defaults to False. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source - blob has been modified since the specified date/time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source blob - has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has been modified since the specified date/time. - If the destination blob has not been modified, the Blob service returns - status code 412 (Precondition Failed). - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has not been modified since the specified - date/time. If the destination blob has been modified, the Blob service - returns status code 412 (Precondition Failed). - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword destination_lease: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :paramtype destination_lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword source_lease: - Specify this to perform the Copy Blob operation only if - the lease ID given matches the active lease ID of the source blob. - :paramtype source_lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword bool requires_sync: - Enforces that the service will not return a response until the copy is complete. - :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status). - :rtype: dict[str, str or ~datetime.datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START copy_blob_from_url] - :end-before: [END copy_blob_from_url] - :language: python - :dedent: 16 - :caption: Copy a blob from a URL. - """ - options = self._start_copy_from_url_options( - source_url, - metadata=metadata, - incremental_copy=incremental_copy, - **kwargs) - try: - if incremental_copy: - return await self._client.page_blob.copy_incremental(**options) - return await self._client.blob.start_copy_from_url(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def abort_copy(self, copy_id, **kwargs): - # type: (Union[str, Dict[str, Any], BlobProperties], Any) -> None - """Abort an ongoing copy operation. - - This will leave a destination blob with zero length and full metadata. - This will raise an error if the copy operation has already ended. - - :param copy_id: - The copy operation to abort. This can be either an ID, or an - instance of BlobProperties. - :type copy_id: str or ~azure.storage.blob.BlobProperties - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START abort_copy_blob_from_url] - :end-before: [END abort_copy_blob_from_url] - :language: python - :dedent: 16 - :caption: Abort copying a blob from URL. - """ - options = self._abort_copy_options(copy_id, **kwargs) - try: - await self._client.blob.abort_copy_from_url(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): - # type: (int, Optional[str], Any) -> BlobLeaseClient - """Requests a new lease. - - If the blob does not have an active lease, the Blob - Service creates a lease on the blob and returns a new lease. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Blob Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A BlobLeaseClient object. - :rtype: ~azure.storage.blob.aio.BlobLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START acquire_lease_on_blob] - :end-before: [END acquire_lease_on_blob] - :language: python - :dedent: 12 - :caption: Acquiring a lease on a blob. - """ - lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore - await lease.acquire(lease_duration=lease_duration, **kwargs) - return lease - - @distributed_trace_async - async def set_standard_blob_tier(self, standard_blob_tier, **kwargs): - # type: (Union[str, StandardBlobTier], Any) -> None - """This operation sets the tier on a block blob. - - A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param standard_blob_tier: - Indicates the tier to be set on the blob. Options include 'Hot', 'Cool', - 'Archive'. The hot tier is optimized for storing data that is accessed - frequently. The cool storage tier is optimized for storing data that - is infrequently accessed and stored for at least a month. The archive - tier is optimized for storing data that is rarely accessed and stored - for at least six months with flexible latency requirements. - :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - if standard_blob_tier is None: - raise ValueError("A StandardBlobTier must be specified") - try: - await self._client.blob.set_tier( - tier=standard_blob_tier, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def stage_block( - self, block_id, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> None - """Creates a new block to be committed as part of a blob. - - :param str block_id: A valid Base64 string value that identifies the - block. Prior to encoding, the string must be less than or equal to 64 - bytes in size. For a given blob, the length of the value specified for - the block_id parameter must be the same size for each block. - :param data: The blob data. - :param int length: Size of the block. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword str encoding: - Defaults to UTF-8. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - options = self._stage_block_options( - block_id, - data, - length=length, - **kwargs) - try: - await self._client.block_blob.stage_block(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def stage_block_from_url( - self, block_id, # type: str - source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - source_content_md5=None, # type: Optional[Union[bytes, bytearray]] - **kwargs - ): - # type: (...) -> None - """Creates a new block to be committed as part of a blob where - the contents are read from a URL. - - :param str block_id: A valid Base64 string value that identifies the - block. Prior to encoding, the string must be less than or equal to 64 - bytes in size. For a given blob, the length of the value specified for - the block_id parameter must be the same size for each block. - :param str source_url: The URL. - :param int source_offset: - Start of byte range to use for the block. - Must be set if source length is provided. - :param int source_length: The size of the block in bytes. - :param bytearray source_content_md5: - Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - options = self._stage_block_from_url_options( - block_id, - source_url, - source_offset=source_offset, - source_length=source_length, - source_content_md5=source_content_md5, - **kwargs) - try: - await self._client.block_blob.stage_block_from_url(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_block_list(self, block_list_type="committed", **kwargs): - # type: (Optional[str], Any) -> Tuple[List[BlobBlock], List[BlobBlock]] - """The Get Block List operation retrieves the list of blocks that have - been uploaded as part of a block blob. - - :param str block_list_type: - Specifies whether to return the list of committed - blocks, the list of uncommitted blocks, or both lists together. - Possible values include: 'committed', 'uncommitted', 'all' - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A tuple of two lists - committed and uncommitted blocks - :rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock)) - """ - access_conditions = get_access_conditions(kwargs.pop('kease', None)) - try: - blocks = await self._client.block_blob.get_block_list( - list_type=block_list_type, - snapshot=self.snapshot, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return self._get_block_list_result(blocks) - - @distributed_trace_async - async def commit_block_list( # type: ignore - self, block_list, # type: List[BlobBlock] - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """The Commit Block List operation writes a blob by specifying the list of - block IDs that make up the blob. - - :param list block_list: - List of Blockblobs. - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict[str, str] - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._commit_block_list_options( - block_list, - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return await self._client.block_blob.commit_block_list(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): - # type: (Union[str, PremiumPageBlobTier], **Any) -> None - """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts. - - :param premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - if premium_page_blob_tier is None: - raise ValueError("A PremiumPageBlobTiermust be specified") - try: - await self._client.blob.set_tier( - tier=premium_page_blob_tier, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_page_ranges( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] - **kwargs - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a Page Blob or snapshot - of a page blob. - - :param int offset: - Start of byte range to use for getting valid page ranges. - If no length is given, all bytes after the offset will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for getting valid page ranges. - If length is given, offset must be provided. - This range will return valid page ranges from the offset start up to - the specified length. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param str previous_snapshot_diff: - The snapshot diff parameter that contains an opaque DateTime value that - specifies a previous blob snapshot to be compared - against a more recent snapshot or the current blob. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. - The first element are filled page ranges, the 2nd element is cleared page ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_page_ranges_options( - offset=offset, - length=length, - previous_snapshot_diff=previous_snapshot_diff, - **kwargs) - try: - if previous_snapshot_diff: - ranges = await self._client.page_blob.get_page_ranges_diff(**options) - else: - ranges = await self._client.page_blob.get_page_ranges(**options) - except StorageErrorException as error: - process_storage_error(error) - return get_page_ranges_result(ranges) - - @distributed_trace_async - async def set_sequence_number( # type: ignore - self, sequence_number_action, # type: Union[str, SequenceNumberAction] - sequence_number=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the blob sequence number. - - :param str sequence_number_action: - This property indicates how the service should modify the blob's sequence - number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information. - :param str sequence_number: - This property sets the blob's sequence number. The sequence number is a - user-controlled property that you can use to track requests and manage - concurrency issues. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._set_sequence_number_options( - sequence_number_action, sequence_number=sequence_number, **kwargs) - try: - return await self._client.page_blob.update_sequence_number(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def resize_blob(self, size, **kwargs): - # type: (int, Any) -> Dict[str, Union[str, datetime]] - """Resizes a page blob to the specified size. - - If the specified value is less than the current size of the blob, - then all pages above the specified value are cleared. - - :param int size: - Size used to resize blob. Maximum size for a page blob is up to 1 TB. - The page blob size must be aligned to a 512-byte boundary. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._resize_blob_options(size, **kwargs) - try: - return await self._client.page_blob.resize(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_page( # type: ignore - self, page, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """The Upload Pages operation writes a range of pages to a page blob. - - :param bytes page: - Content of the page. - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._upload_page_options( - page=page, - offset=offset, - length=length, - **kwargs) - try: - return await self._client.page_blob.upload_pages(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_pages_from_url(self, source_url, # type: str - offset, # type: int - length, # type: int - source_offset, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """ - The Upload Pages operation writes a range of pages to a page blob where - the contents are read from a URL. - - :param str source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - The service will read the same number of bytes as the destination range (length-offset). - :keyword bytes source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - - options = self._upload_pages_from_url_options( - source_url=source_url, - offset=offset, - length=length, - source_offset=source_offset, - **kwargs - ) - try: - return await self._client.page_blob.upload_pages_from_url(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def clear_page(self, offset, length, **kwargs): - # type: (int, int, Any) -> Dict[str, Union[str, datetime]] - """Clears a range of pages. - - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._clear_page_options(offset, length, **kwargs) - try: - return await self._client.page_blob.clear_pages(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def append_block( # type: ignore - self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """Commits a new block of data to the end of the existing append blob. - - :param data: - Content of the block. - :param int length: - Size of the block in bytes. - :keyword bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str encoding: - Defaults to UTF-8. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). - :rtype: dict(str, Any) - """ - options = self._append_block_options( - data, - length=length, - **kwargs - ) - try: - return await self._client.append_blob.append_block(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async() - async def append_block_from_url(self, copy_source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """ - Creates a new block to be committed as part of a blob, where the contents are read from a source url. - - :param str copy_source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - :param int source_length: - This indicates the end of the range of bytes that has to be taken from the copy source. - :keyword bytearray source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the - AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._append_block_from_url_options( - copy_source_url, - source_offset=source_offset, - source_length=source_length, - **kwargs - ) - try: - return await self._client.append_blob.append_block_from_url(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/aio/_blob_service_client_async.py b/azure/multiapi/storagev2/blob/v2019_02_02/aio/_blob_service_client_async.py deleted file mode 100644 index f5a666d..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/aio/_blob_service_client_async.py +++ /dev/null @@ -1,558 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, - TYPE_CHECKING -) - -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import AsyncPipeline -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.async_paging import AsyncItemPaged - -from .._shared.models import LocationMode -from .._shared.policies_async import ExponentialRetry -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._shared.parser import _to_utc_datetime -from .._shared.response_handlers import parse_to_internal_user_delegation_key -from .._generated.aio import AzureBlobStorage -from .._generated.models import StorageErrorException, StorageServiceProperties, KeyInfo -from .._blob_service_client import BlobServiceClient as BlobServiceClientBase -from ._container_client_async import ContainerClient -from ._blob_client_async import BlobClient -from .._models import ( - ContainerProperties, - service_stats_deserialize, - service_properties_deserialize, -) -from ._models import ContainerPropertiesPaged - -if TYPE_CHECKING: - from datetime import datetime - from azure.core.pipeline.transport import HttpTransport - from azure.core.pipeline.policies import HTTPPolicy - from .._shared.models import AccountSasPermissions, ResourceTypes, UserDelegationKey - from ._lease_async import BlobLeaseClient - from .._models import ( - BlobProperties, - PublicAccess, - BlobAnalyticsLogging, - Metrics, - CorsRule, - RetentionPolicy, - StaticWebsite, - ) - - -class BlobServiceClient(AsyncStorageAccountHostsMixin, BlobServiceClientBase): - """A client to interact with the Blob Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete containers within the account. - For operations relating to a specific container or blob, clients for those entities - can also be retrieved using the `get_client` functions. - - :param str account_url: - The URL to the blob storage account. Any other entities included - in the URL path (e.g. container or blob) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication_async.py - :start-after: [START create_blob_service_client] - :end-before: [END create_blob_service_client] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient with account url and credential. - - .. literalinclude:: ../samples/blob_samples_authentication_async.py - :start-after: [START create_blob_service_client_oauth] - :end-before: [END create_blob_service_client_oauth] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient with Azure Identity credentials. - """ - - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(BlobServiceClient, self).__init__( - account_url, - credential=credential, - **kwargs) - self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) - self._loop = kwargs.get('loop', None) - - @distributed_trace_async - async def get_user_delegation_key(self, key_start_time, # type: datetime - key_expiry_time, # type: datetime - **kwargs # type: Any - ): - # type: (...) -> UserDelegationKey - """ - Obtain a user delegation key for the purpose of signing SAS tokens. - A token credential must be present on the service object for this request to succeed. - - :param ~datetime.datetime key_start_time: - A DateTime value. Indicates when the key becomes valid. - :param ~datetime.datetime key_expiry_time: - A DateTime value. Indicates when the key stops being valid. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The user delegation key. - :rtype: ~azure.storage.blob.UserDelegationKey - """ - key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time)) - timeout = kwargs.pop('timeout', None) - try: - user_delegation_key = await self._client.service.get_user_delegation_key(key_info=key_info, - timeout=timeout, - **kwargs) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - return parse_to_internal_user_delegation_key(user_delegation_key) # type: ignore - - @distributed_trace_async - async def get_account_information(self, **kwargs): - # type: (Any) -> Dict[str, str] - """Gets information related to the storage account. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START get_blob_service_account_info] - :end-before: [END get_blob_service_account_info] - :language: python - :dedent: 12 - :caption: Getting account information for the blob service. - """ - try: - return await self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_service_stats(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Retrieves statistics related to replication for the Blob service. - - It is only available when read-access geo-redundant replication is enabled for - the storage account. - - With geo-redundant replication, Azure Storage maintains your data durable - in two locations. In both locations, Azure Storage constantly maintains - multiple healthy replicas of your data. The location where you read, - create, update, or delete data is the primary storage account location. - The primary location exists in the region you choose at the time you - create an account via the Azure Management Azure classic portal, for - example, North Central US. The location to which your data is replicated - is the secondary location. The secondary location is automatically - determined based on the location of the primary; it is in a second data - center that resides in the same region as the primary location. Read-only - access is available from the secondary location, if read-access geo-redundant - replication is enabled for your storage account. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The blob service stats. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START get_blob_service_stats] - :end-before: [END get_blob_service_stats] - :language: python - :dedent: 12 - :caption: Getting service stats for the blob service. - """ - timeout = kwargs.pop('timeout', None) - try: - stats = await self._client.service.get_statistics( # type: ignore - timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs) - return service_stats_deserialize(stats) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_service_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An object containing blob service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START get_blob_service_properties] - :end-before: [END get_blob_service_properties] - :language: python - :dedent: 12 - :caption: Getting service properties for the blob service. - """ - timeout = kwargs.pop('timeout', None) - try: - service_props = await self._client.service.get_properties(timeout=timeout, **kwargs) - return service_properties_deserialize(service_props) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def set_service_properties( - self, analytics_logging=None, # type: Optional[BlobAnalyticsLogging] - hour_metrics=None, # type: Optional[Metrics] - minute_metrics=None, # type: Optional[Metrics] - cors=None, # type: Optional[List[CorsRule]] - target_version=None, # type: Optional[str] - delete_retention_policy=None, # type: Optional[RetentionPolicy] - static_website=None, # type: Optional[StaticWebsite] - **kwargs - ): - # type: (...) -> None - """Sets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - If an element (e.g. analytics_logging) is left as None, the - existing settings on the service for that functionality are preserved. - - :param analytics_logging: - Groups the Azure Analytics Logging settings. - :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging - :param hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for blobs. - :type hour_metrics: ~azure.storage.blob.Metrics - :param minute_metrics: - The minute metrics settings provide request statistics - for each minute for blobs. - :type minute_metrics: ~azure.storage.blob.Metrics - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list[~azure.storage.blob.CorsRule] - :param str target_version: - Indicates the default version to use for requests if an incoming - request's version is not specified. - :param delete_retention_policy: - The delete retention policy specifies whether to retain deleted blobs. - It also specifies the number of days and versions of blob to keep. - :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy - :param static_website: - Specifies whether the static website feature is enabled, - and if yes, indicates the index document and 404 error document to use. - :type static_website: ~azure.storage.blob.StaticWebsite - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START set_blob_service_properties] - :end-before: [END set_blob_service_properties] - :language: python - :dedent: 12 - :caption: Setting service properties for the blob service. - """ - props = StorageServiceProperties( - logging=analytics_logging, - hour_metrics=hour_metrics, - minute_metrics=minute_metrics, - cors=cors, - default_service_version=target_version, - delete_retention_policy=delete_retention_policy, - static_website=static_website - ) - timeout = kwargs.pop('timeout', None) - try: - await self._client.service.set_properties(props, timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_containers( - self, name_starts_with=None, # type: Optional[str] - include_metadata=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> AsyncItemPaged[ContainerProperties] - """Returns a generator to list the containers under the specified account. - - The generator will lazily follow the continuation tokens returned by - the service and stop when all containers have been returned. - - :param str name_starts_with: - Filters the results to return only containers whose names - begin with the specified prefix. - :param bool include_metadata: - Specifies that container metadata to be returned in the response. - The default value is `False`. - :keyword int results_per_page: - The maximum number of container names to retrieve per API - call. If the request does not specify the server will return up to 5,000 items. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) of ContainerProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.ContainerProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_list_containers] - :end-before: [END bsc_list_containers] - :language: python - :dedent: 16 - :caption: Listing the containers in the blob service. - """ - include = 'metadata' if include_metadata else None - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.service.list_containers_segment, - prefix=name_starts_with, - include=include, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - page_iterator_class=ContainerPropertiesPaged - ) - - @distributed_trace_async - async def create_container( - self, name, # type: str - metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[Union[PublicAccess, str]] - **kwargs - ): - # type: (...) -> ContainerClient - """Creates a new container under the specified account. - - If the container with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created container. - - :param str name: The name of the container to create. - :param metadata: - A dict with name-value pairs to associate with the - container as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - Possible values include: 'container', 'blob'. - :type public_access: str or ~azure.storage.blob.PublicAccess - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.aio.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_create_container] - :end-before: [END bsc_create_container] - :language: python - :dedent: 16 - :caption: Creating a container in the blob service. - """ - container = self.get_container_client(name) - timeout = kwargs.pop('timeout', None) - kwargs.setdefault('merge_span', True) - await container.create_container( - metadata=metadata, public_access=public_access, timeout=timeout, **kwargs) - return container - - @distributed_trace_async - async def delete_container( - self, container, # type: Union[ContainerProperties, str] - lease=None, # type: Optional[Union[BlobLeaseClient, str]] - **kwargs - ): - # type: (...) -> None - """Marks the specified container for deletion. - - The container and any blobs contained within it are later deleted during garbage collection. - If the container is not found, a ResourceNotFoundError will be raised. - - :param container: - The container to delete. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :param lease: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_delete_container] - :end-before: [END bsc_delete_container] - :language: python - :dedent: 16 - :caption: Deleting a container in the blob service. - """ - container = self.get_container_client(container) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - await container.delete_container( # type: ignore - lease=lease, - timeout=timeout, - **kwargs) - - def get_container_client(self, container): - # type: (Union[ContainerProperties, str]) -> ContainerClient - """Get a client to interact with the specified container. - - The container need not already exist. - - :param container: - The container. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :returns: A ContainerClient. - :rtype: ~azure.storage.blob.aio.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_get_container_client] - :end-before: [END bsc_get_container_client] - :language: python - :dedent: 12 - :caption: Getting the container client to interact with a specific container. - """ - try: - container_name = container.name - except AttributeError: - container_name = container - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ContainerClient( - self.url, container_name=container_name, - credential=self.credential, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function, loop=self._loop) - - def get_blob_client( - self, container, # type: Union[ContainerProperties, str] - blob, # type: Union[BlobProperties, str] - snapshot=None # type: Optional[Union[Dict[str, Any], str]] - ): - # type: (...) -> BlobClient - """Get a client to interact with the specified blob. - - The blob need not already exist. - - :param container: - The container that the blob is in. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :param blob: - The blob with which to interact. This can either be the name of the blob, - or an instance of BlobProperties. - :type blob: str or ~azure.storage.blob.BlobProperties - :param snapshot: - The optional blob snapshot on which to operate. This can either be the ID of the snapshot, - or a dictionary output returned by - :func:`~azure.storage.blob.aio.BlobClient.create_snapshot()`. - :type snapshot: str or dict(str, Any) - :returns: A BlobClient. - :rtype: ~azure.storage.blob.aio.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_get_blob_client] - :end-before: [END bsc_get_blob_client] - :language: python - :dedent: 16 - :caption: Getting the blob client to interact with a specific blob. - """ - try: - container_name = container.name - except AttributeError: - container_name = container - - try: - blob_name = blob.name - except AttributeError: - blob_name = blob - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return BlobClient( # type: ignore - self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot, - credential=self.credential, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function, loop=self._loop) diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/aio/_container_client_async.py b/azure/multiapi/storagev2/blob/v2019_02_02/aio/_container_client_async.py deleted file mode 100644 index 75d611a..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/aio/_container_client_async.py +++ /dev/null @@ -1,1012 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, AnyStr, Dict, List, Tuple, IO, AsyncIterator, - TYPE_CHECKING -) - -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.async_paging import AsyncItemPaged -from azure.core.pipeline import AsyncPipeline -from azure.core.pipeline.transport import HttpRequest, AsyncHttpResponse - -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.policies_async import ExponentialRetry -from .._shared.request_handlers import add_metadata_headers, serialize_iso -from .._shared.response_handlers import ( - process_storage_error, - return_response_headers, - return_headers_and_deserialized) -from .._generated.aio import AzureBlobStorage -from .._generated.models import ( - StorageErrorException, - SignedIdentifier) -from .._deserialize import deserialize_container_properties -from .._serialize import get_modify_conditions -from .._container_client import ContainerClient as ContainerClientBase, _get_blob_name -from .._lease import get_access_conditions -from .._models import ContainerProperties, BlobProperties, BlobType # pylint: disable=unused-import -from ._models import BlobPropertiesPaged, BlobPrefix -from ._lease_async import BlobLeaseClient -from ._blob_client_async import BlobClient - -if TYPE_CHECKING: - from azure.core.pipeline.transport import HttpTransport - from azure.core.pipeline.policies import HTTPPolicy - from .._models import ContainerSasPermissions, PublicAccess - from datetime import datetime - from .._models import ( # pylint: disable=unused-import - AccessPolicy, - ContentSettings, - StandardBlobTier, - PremiumPageBlobTier) - - -class ContainerClient(AsyncStorageAccountHostsMixin, ContainerClientBase): - """A client to interact with a specific container, although that container - may not yet exist. - - For operations relating to a specific blob within this container, a blob client can be - retrieved using the :func:`~get_blob_client` function. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the container, - use the :func:`from_container_url` classmethod. - :param container_name: - The name of the container for the blob. - :type container_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START create_container_client_from_service] - :end-before: [END create_container_client_from_service] - :language: python - :dedent: 8 - :caption: Get a ContainerClient from an existing BlobServiceClient. - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START create_container_client_sasurl] - :end-before: [END create_container_client_sasurl] - :language: python - :dedent: 12 - :caption: Creating the container client directly. - """ - def __init__( - self, account_url, # type: str - container_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(ContainerClient, self).__init__( - account_url, - container_name=container_name, - credential=credential, - **kwargs) - self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) - self._loop = kwargs.get('loop', None) - - @distributed_trace_async - async def create_container(self, metadata=None, public_access=None, **kwargs): - # type: (Optional[Dict[str, str]], Optional[Union[PublicAccess, str]], **Any) -> None - """ - Creates a new container under the specified account. If the container - with the same name already exists, the operation fails. - - :param metadata: - A dict with name_value pairs to associate with the - container as metadata. Example:{'Category':'test'} - :type metadata: dict[str, str] - :param ~azure.storage.blob.PublicAccess public_access: - Possible values include: 'container', 'blob'. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START create_container] - :end-before: [END create_container] - :language: python - :dedent: 16 - :caption: Creating a container to store blobs. - """ - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - timeout = kwargs.pop('timeout', None) - try: - return await self._client.container.create( # type: ignore - timeout=timeout, - access=public_access, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def delete_container( - self, **kwargs): - # type: (Any) -> None - """ - Marks the specified container for deletion. The container and any blobs - contained within it are later deleted during garbage collection. - - :keyword lease: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START delete_container] - :end-before: [END delete_container] - :language: python - :dedent: 16 - :caption: Delete a container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - mod_conditions = get_modify_conditions(kwargs) - timeout = kwargs.pop('timeout', None) - try: - await self._client.container.delete( - timeout=timeout, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def acquire_lease( - self, lease_duration=-1, # type: int - lease_id=None, # type: Optional[str] - **kwargs): - # type: (...) -> BlobLeaseClient - """ - Requests a new lease. If the container does not have an active lease, - the Blob service creates a lease on the container and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A BlobLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.blob.aio.BlobLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START acquire_lease_on_container] - :end-before: [END acquire_lease_on_container] - :language: python - :dedent: 12 - :caption: Acquiring a lease on the container. - """ - lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - await lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs) - return lease - - @distributed_trace_async - async def get_account_information(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """Gets information related to the storage account. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - """ - try: - return await self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_container_properties(self, **kwargs): - # type: (**Any) -> ContainerProperties - """Returns all user-defined metadata and system properties for the specified - container. The data returned does not include the container's list of blobs. - - :keyword lease: - If specified, get_container_properties only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Properties for the specified container within a container object. - :rtype: ~azure.storage.blob.ContainerProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START get_container_properties] - :end-before: [END get_container_properties] - :language: python - :dedent: 16 - :caption: Getting properties on the container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - response = await self._client.container.get_properties( - timeout=timeout, - lease_access_conditions=access_conditions, - cls=deserialize_container_properties, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - response.name = self.container_name - return response # type: ignore - - @distributed_trace_async - async def set_container_metadata( # type: ignore - self, metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - container. Each call to this operation replaces all existing metadata - attached to the container. To remove all metadata from the container, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the container as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword lease: - If specified, set_container_metadata only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Container-updated property dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START set_container_metadata] - :end-before: [END set_container_metadata] - :language: python - :dedent: 16 - :caption: Setting metadata on the container. - """ - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - mod_conditions = get_modify_conditions(kwargs) - timeout = kwargs.pop('timeout', None) - try: - return await self._client.container.set_metadata( # type: ignore - timeout=timeout, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_container_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the specified container. - The permissions indicate whether container data may be accessed publicly. - - :keyword lease: - If specified, get_container_access_policy only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START get_container_access_policy] - :end-before: [END get_container_access_policy] - :language: python - :dedent: 16 - :caption: Getting the access policy on the container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - response, identifiers = await self._client.container.get_access_policy( - timeout=timeout, - lease_access_conditions=access_conditions, - cls=return_headers_and_deserialized, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return { - 'public_access': response.get('blob_public_access'), - 'signed_identifiers': identifiers or [] - } - - @distributed_trace_async - async def set_container_access_policy( - self, signed_identifiers, # type: Dict[str, AccessPolicy] - public_access=None, # type: Optional[Union[str, PublicAccess]] - **kwargs # type: Any - ): # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the permissions for the specified container or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether blobs in a container may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the container. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy] - :param ~azure.storage.blob.PublicAccess public_access: - Possible values include: 'container', 'blob'. - :keyword lease: - Required if the container has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified date/time. - :keyword ~datetime.datetime if_unmodified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Container-updated property dict (Etag and last modified). - :rtype: dict[str, str or ~datetime.datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START set_container_access_policy] - :end-before: [END set_container_access_policy] - :language: python - :dedent: 16 - :caption: Setting access policy on the container. - """ - timeout = kwargs.pop('timeout', None) - lease = kwargs.pop('lease', None) - if len(signed_identifiers) > 5: - raise ValueError( - 'Too many access policies provided. The server does not support setting ' - 'more than 5 access policies on a single resource.') - identifiers = [] - for key, value in signed_identifiers.items(): - if value: - value.start = serialize_iso(value.start) - value.expiry = serialize_iso(value.expiry) - identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore - signed_identifiers = identifiers # type: ignore - - mod_conditions = get_modify_conditions(kwargs) - access_conditions = get_access_conditions(lease) - try: - return await self._client.container.set_access_policy( - container_acl=signed_identifiers or None, - timeout=timeout, - access=public_access, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_blobs(self, name_starts_with=None, include=None, **kwargs): - # type: (Optional[str], Optional[Any], **Any) -> AsyncItemPaged[BlobProperties] - """Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service. - - :param str name_starts_with: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param list[str] include: - Specifies one or more additional datasets to include in the response. - Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START list_blobs_in_container] - :end-before: [END list_blobs_in_container] - :language: python - :dedent: 12 - :caption: List the blobs in the container. - """ - if include and not isinstance(include, list): - include = [include] - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.list_blob_flat_segment, - include=include, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - page_iterator_class=BlobPropertiesPaged - ) - - @distributed_trace - def walk_blobs( - self, name_starts_with=None, # type: Optional[str] - include=None, # type: Optional[Any] - delimiter="/", # type: str - **kwargs # type: Optional[Any] - ): - # type: (...) -> AsyncItemPaged[BlobProperties] - """Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service. This operation will list blobs in accordance with a hierarchy, - as delimited by the specified delimiter character. - - :param str name_starts_with: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param list[str] include: - Specifies one or more additional datasets to include in the response. - Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'. - :param str delimiter: - When the request includes this parameter, the operation returns a BlobPrefix - element in the response body that acts as a placeholder for all blobs whose - names begin with the same substring up to the appearance of the delimiter - character. The delimiter may be a single character or a string. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties] - """ - if include and not isinstance(include, list): - include = [include] - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.list_blob_hierarchy_segment, - delimiter=delimiter, - include=include, - timeout=timeout, - **kwargs) - return BlobPrefix( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - delimiter=delimiter) - - @distributed_trace_async - async def upload_blob( - self, name, # type: Union[str, BlobProperties] - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> BlobClient - """Creates a new blob from a data source with automatic chunking. - - :param name: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type name: str or ~azure.storage.blob.BlobProperties - :param data: The blob data to upload. - :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be - either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. The exception to the above is with Append - blob types: if set to False and the data already exists, an error will not be raised - and the data will be appended to the existing blob. If set overwrite=True, then the existing - append blob will be deleted, and a new one created. Defaults to False. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the container has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int max_concurrency: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encoding: - Defaults to UTF-8. - :returns: A BlobClient to interact with the newly uploaded blob. - :rtype: ~azure.storage.blob.aio.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START upload_blob_to_container] - :end-before: [END upload_blob_to_container] - :language: python - :dedent: 12 - :caption: Upload blob to the container. - """ - blob = self.get_blob_client(name) - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - await blob.upload_blob( - data, - blob_type=blob_type, - length=length, - metadata=metadata, - timeout=timeout, - encoding=encoding, - **kwargs - ) - return blob - - @distributed_trace_async - async def delete_blob( - self, blob, # type: Union[str, BlobProperties] - delete_snapshots=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> None - """Marks the specified blob or snapshot for deletion. - - The blob is later deleted during garbage collection. - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the delete_blob - operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot - and retains the blob or snapshot for specified number of days. - After specified number of days, blob's data is removed from the service during garbage collection. - Soft deleted blob or snapshot is accessible through :func:`list_blobs()` specifying `include=["deleted"]` - option. Soft-deleted blob or snapshot can be restored using :func:`~BlobClient.undelete()` - - :param blob: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob: str or ~azure.storage.blob.BlobProperties - :param str delete_snapshots: - Required if the blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword lease: - Required if the blob has an active lease. Value can be a Lease object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - blob = self.get_blob_client(blob) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - await blob.delete_blob( # type: ignore - delete_snapshots=delete_snapshots, - timeout=timeout, - **kwargs) - - @distributed_trace_async - async def delete_blobs( # pylint: disable=arguments-differ - self, *blobs: Union[str, BlobProperties], - delete_snapshots: Optional[str] = None, - lease: Optional[Union[str, BlobLeaseClient]] = None, - **kwargs - ) -> AsyncIterator[AsyncHttpResponse]: - """Marks the specified blobs or snapshots for deletion. - - The blobs are later deleted during garbage collection. - Note that in order to delete blobs, you must delete all of their - snapshots. You can delete both at the same time with the delete_blobs operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots - and retains the blobs or snapshots for specified number of days. - After specified number of days, blobs' data is removed from the service during garbage collection. - Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]` - Soft-deleted blobs or snapshots can be restored using :func:`~BlobClient.undelete()` - - :param blobs: The blob names with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - :type blobs: str or ~azure.storage.blob.BlobProperties - :param str delete_snapshots: - Required if a blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :param lease: - Required if a blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :type lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. For optimal performance, - this should be set to False - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: An async iterator of responses, one for each blob in order - :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START delete_multiple_blobs] - :end-before: [END delete_multiple_blobs] - :language: python - :dedent: 12 - :caption: Deleting multiple blobs. - """ - raise_on_any_failure = kwargs.pop('raise_on_any_failure', True) - timeout = kwargs.pop('timeout', None) - options = BlobClient._generic_delete_blob_options( # pylint: disable=protected-access - delete_snapshots=delete_snapshots, - lease=lease, - timeout=timeout, - **kwargs - ) - options.update({'raise_on_any_failure': raise_on_any_failure}) - query_parameters, header_parameters = self._generate_delete_blobs_options(**options) - # To pass kwargs to "_batch_send", we need to remove anything that was - # in the Autorest signature for Autorest, otherwise transport will be upset - for possible_param in ['timeout', 'delete_snapshots', 'lease_access_conditions', 'modified_access_conditions']: - options.pop(possible_param, None) - - reqs = [] - for blob in blobs: - blob_name = _get_blob_name(blob) - req = HttpRequest( - "DELETE", - "/{}/{}".format(self.container_name, blob_name), - headers=header_parameters - ) - req.format_parameters(query_parameters) - reqs.append(req) - - return await self._batch_send(*reqs, **options) - - @distributed_trace - async def set_standard_blob_tier_blobs( - self, - standard_blob_tier: Union[str, 'StandardBlobTier'], - *blobs: Union[str, BlobProperties], - **kwargs - ) -> AsyncIterator[AsyncHttpResponse]: - """This operation sets the tier on block blobs. - - A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param standard_blob_tier: - Indicates the tier to be set on the blob. Options include 'Hot', 'Cool', - 'Archive'. The hot tier is optimized for storing data that is accessed - frequently. The cool storage tier is optimized for storing data that - is infrequently accessed and stored for at least a month. The archive - tier is optimized for storing data that is rarely accessed and stored - for at least six months with flexible latency requirements. - :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier - :param blobs: The blobs with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - :type blobs: str or ~azure.storage.blob.BlobProperties - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. For optimal performance, - this should be set to False. - :return: An async iterator of responses, one for each blob in order - :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - if standard_blob_tier is None: - raise ValueError("A StandardBlobTier must be specified") - - query_parameters, header_parameters = self._generate_set_tier_options( - tier=standard_blob_tier, - lease_access_conditions=access_conditions, - **kwargs - ) - # To pass kwargs to "_batch_send", we need to remove anything that was - # in the Autorest signature for Autorest, otherwise transport will be upset - for possible_param in ['timeout', 'lease']: - kwargs.pop(possible_param, None) - - reqs = [] - for blob in blobs: - blob_name = _get_blob_name(blob) - req = HttpRequest( - "PUT", - "/{}/{}".format(self.container_name, blob_name), - headers=header_parameters - ) - req.format_parameters(query_parameters) - reqs.append(req) - - return await self._batch_send(*reqs, **kwargs) - - @distributed_trace - async def set_premium_page_blob_tier_blobs( - self, - premium_page_blob_tier: Union[str, 'PremiumPageBlobTier'], - *blobs: Union[str, BlobProperties], - **kwargs - ) -> AsyncIterator[AsyncHttpResponse]: - """Sets the page blob tiers on the blobs. This API is only supported for page blobs on premium accounts. - - :param premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier - :param blobs: The blobs with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - :type blobs: str or ~azure.storage.blob.BlobProperties - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. For optimal performance, - this should be set to False. - :return: An async iterator of responses, one for each blob in order - :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - if premium_page_blob_tier is None: - raise ValueError("A PremiumPageBlobTier must be specified") - - query_parameters, header_parameters = self._generate_set_tier_options( - tier=premium_page_blob_tier, - lease_access_conditions=access_conditions, - **kwargs - ) - # To pass kwargs to "_batch_send", we need to remove anything that was - # in the Autorest signature for Autorest, otherwise transport will be upset - for possible_param in ['timeout', 'lease']: - kwargs.pop(possible_param, None) - - reqs = [] - for blob in blobs: - blob_name = _get_blob_name(blob) - req = HttpRequest( - "PUT", - "/{}/{}".format(self.container_name, blob_name), - headers=header_parameters - ) - req.format_parameters(query_parameters) - reqs.append(req) - - return await self._batch_send(*reqs, **kwargs) - - def get_blob_client( - self, blob, # type: Union[BlobProperties, str] - snapshot=None # type: str - ): - # type: (...) -> BlobClient - """Get a client to interact with the specified blob. - - The blob need not already exist. - - :param blob: - The blob with which to interact. - :type blob: str or ~azure.storage.blob.BlobProperties - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`~BlobClient.create_snapshot()`. - :returns: A BlobClient. - :rtype: ~azure.storage.blob.aio.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START get_blob_client] - :end-before: [END get_blob_client] - :language: python - :dedent: 12 - :caption: Get the blob client. - """ - blob_name = _get_blob_name(blob) - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return BlobClient( - self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot, - credential=self.credential, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function, loop=self._loop) diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/aio/_download_async.py b/azure/multiapi/storagev2/blob/v2019_02_02/aio/_download_async.py deleted file mode 100644 index ea83862..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/aio/_download_async.py +++ /dev/null @@ -1,490 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import asyncio -import sys -from io import BytesIO -from itertools import islice -import warnings - -from azure.core.exceptions import HttpResponseError -from .._shared.encryption import decrypt_blob -from .._shared.request_handlers import validate_and_format_range_headers -from .._shared.response_handlers import process_storage_error, parse_length_from_content_range -from .._deserialize import get_page_ranges_result -from .._download import process_range_and_offset, _ChunkDownloader - - -async def process_content(data, start_offset, end_offset, encryption): - if data is None: - raise ValueError("Response cannot be None.") - try: - content = data.response.body() - except Exception as error: - raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) - if encryption.get('key') is not None or encryption.get('resolver') is not None: - try: - return decrypt_blob( - encryption.get('required'), - encryption.get('key'), - encryption.get('resolver'), - content, - start_offset, - end_offset, - data.response.headers) - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=data.response, - error=error) - return content - - -class _AsyncChunkDownloader(_ChunkDownloader): - def __init__(self, **kwargs): - super(_AsyncChunkDownloader, self).__init__(**kwargs) - self.stream_lock = asyncio.Lock() if kwargs.get('parallel') else None - self.progress_lock = asyncio.Lock() if kwargs.get('parallel') else None - - async def process_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - chunk_data = await self._download_chunk(chunk_start, chunk_end - 1) - length = chunk_end - chunk_start - if length > 0: - await self._write_to_stream(chunk_data, chunk_start) - await self._update_progress(length) - - async def yield_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - return await self._download_chunk(chunk_start, chunk_end - 1) - - async def _update_progress(self, length): - if self.progress_lock: - async with self.progress_lock: # pylint: disable=not-async-context-manager - self.progress_total += length - else: - self.progress_total += length - - async def _write_to_stream(self, chunk_data, chunk_start): - if self.stream_lock: - async with self.stream_lock: # pylint: disable=not-async-context-manager - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - else: - self.stream.write(chunk_data) - - async def _download_chunk(self, chunk_start, chunk_end): - download_range, offset = process_range_and_offset( - chunk_start, chunk_end, chunk_end, self.encryption_options) - - # No need to download the empty chunk from server if there's no data in the chunk to be downloaded. - # Do optimize and create empty chunk locally if condition is met. - if self._do_optimize(download_range[0], download_range[1]): - chunk_data = b"\x00" * self.chunk_size - else: - range_header, range_validation = validate_and_format_range_headers( - download_range[0], - download_range[1], - check_content_md5=self.validate_content - ) - try: - _, response = await self.client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self.validate_content, - data_stream_total=self.total_size, - download_stream_current=self.progress_total, - **self.request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - chunk_data = await process_content(response, offset[0], offset[1], self.encryption_options) - - # This makes sure that if_match is set so that we can validate - # that subsequent downloads are to an unmodified blob - if self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = response.properties.etag - - return chunk_data - - -class _AsyncChunkIterator(object): - """Async iterator for chunks in blob download stream.""" - - def __init__(self, size, content, downloader): - self.size = size - self._current_content = content - self._iter_downloader = downloader - self._iter_chunks = None - self._complete = (size == 0) - - def __len__(self): - return self.size - - def __iter__(self): - raise TypeError("Async stream must be iterated asynchronously.") - - def __aiter__(self): - return self - - async def __anext__(self): - """Iterate through responses.""" - if self._complete: - raise StopAsyncIteration("Download complete") - if not self._iter_downloader: - # If no iterator was supplied, the download completed with - # the initial GET, so we just return that data - self._complete = True - return self._current_content - - if not self._iter_chunks: - self._iter_chunks = self._iter_downloader.get_chunk_offsets() - else: - try: - chunk = next(self._iter_chunks) - except StopIteration: - raise StopAsyncIteration("Download complete") - self._current_content = await self._iter_downloader.yield_chunk(chunk) - - return self._current_content - - -class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the blob being downloaded. - :ivar str container: - The name of the container where the blob is. - :ivar ~azure.storage.blob.BlobProperties properties: - The properties of the blob being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the blob. - """ - - def __init__( - self, - clients=None, - config=None, - start_range=None, - end_range=None, - validate_content=None, - encryption_options=None, - max_concurrency=1, - name=None, - container=None, - encoding=None, - **kwargs - ): - self.name = name - self.container = container - self.properties = None - self.size = None - - self._clients = clients - self._config = config - self._start_range = start_range - self._end_range = end_range - self._max_concurrency = max_concurrency - self._encoding = encoding - self._validate_content = validate_content - self._encryption_options = encryption_options or {} - self._request_options = kwargs - self._location_mode = None - self._download_complete = False - self._current_content = None - self._file_size = None - self._non_empty_ranges = None - self._response = None - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - self._first_get_size = self._config.max_single_get_size if not self._validate_content \ - else self._config.max_chunk_get_size - initial_request_start = self._start_range if self._start_range is not None else 0 - if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: - initial_request_end = self._end_range - else: - initial_request_end = initial_request_start + self._first_get_size - 1 - - self._initial_range, self._initial_offset = process_range_and_offset( - initial_request_start, initial_request_end, self._end_range, self._encryption_options - ) - - def __len__(self): - return self.size - - async def _setup(self): - self._response = await self._initial_request() - self.properties = self._response.properties - self.properties.name = self.name - self.properties.container = self.container - - # Set the content length to the download size instead of the size of - # the last range - self.properties.size = self.size - - # Overwrite the content range to the user requested range - self.properties.content_range = 'bytes {0}-{1}/{2}'.format( - self._start_range, - self._end_range, - self._file_size - ) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - self.properties.content_md5 = None - - if self.size == 0: - self._current_content = b"" - else: - self._current_content = await process_content( - self._response, - self._initial_offset[0], - self._initial_offset[1], - self._encryption_options - ) - - async def _initial_request(self): - range_header, range_validation = validate_and_format_range_headers( - self._initial_range[0], - self._initial_range[1], - start_range_required=False, - end_range_required=False, - check_content_md5=self._validate_content) - - try: - location_mode, response = await self._clients.blob.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self._validate_content, - data_stream_total=None, - download_stream_current=0, - **self._request_options) - - # Check the location we read from to ensure we use the same one - # for subsequent requests. - self._location_mode = location_mode - - # Parse the total file size and adjust the download size if ranges - # were specified - self._file_size = parse_length_from_content_range(response.properties.content_range) - if self._end_range is not None: - # Use the length unless it is over the end of the file - self.size = min(self._file_size, self._end_range - self._start_range + 1) - elif self._start_range is not None: - self.size = self._file_size - self._start_range - else: - self.size = self._file_size - - except HttpResponseError as error: - if self._start_range is None and error.response.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - try: - _, response = await self._clients.blob.download( - validate_content=self._validate_content, - data_stream_total=0, - download_stream_current=0, - **self._request_options) - except HttpResponseError as error: - process_storage_error(error) - - # Set the download size to empty - self.size = 0 - self._file_size = 0 - else: - process_storage_error(error) - - # get page ranges to optimize downloading sparse page blob - if response.properties.blob_type == 'PageBlob': - try: - page_ranges = await self._clients.page_blob.get_page_ranges() - self._non_empty_ranges = get_page_ranges_result(page_ranges)[0] - except HttpResponseError: - pass - - # If the file is small, the download is complete at this point. - # If file size is large, download the rest of the file in chunks. - if response.properties.size != self.size: - # Lock on the etag. This can be overriden by the user by specifying '*' - if self._request_options.get('modified_access_conditions'): - if not self._request_options['modified_access_conditions'].if_match: - self._request_options['modified_access_conditions'].if_match = response.properties.etag - else: - self._download_complete = True - return response - - def chunks(self): - """Iterate over chunks in the download stream. - - :rtype: Iterable[bytes] - """ - if self.size == 0 or self._download_complete: - iter_downloader = None - else: - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - iter_downloader = _AsyncChunkDownloader( - client=self._clients.blob, - non_empty_ranges=self._non_empty_ranges, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # Start where the first download ended - end_range=data_end, - stream=None, - parallel=False, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options) - return _AsyncChunkIterator( - size=self.size, - content=self._current_content, - downloader=iter_downloader) - - async def readall(self): - """Download the contents of this blob. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - stream = BytesIO() - await self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - async def content_as_bytes(self, max_concurrency=1): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :rtype: bytes - """ - warnings.warn( - "content_as_bytes is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - return await self.readall() - - async def content_as_text(self, max_concurrency=1, encoding="UTF-8"): - """Download the contents of this blob, and decode as text. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :param str encoding: - Test encoding to decode the downloaded bytes. Default is UTF-8. - :rtype: str - """ - warnings.warn( - "content_as_text is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self._encoding = encoding - return await self.readall() - - async def readinto(self, stream): - """Download the contents of this blob to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - # the stream must be seekable if parallel download is required - parallel = self._max_concurrency > 1 - if parallel: - error_message = "Target stream handle must be seekable." - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(error_message) - - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(error_message) - - # Write the content to the user stream - stream.write(self._current_content) - if self._download_complete: - return self.size - - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - - downloader = _AsyncChunkDownloader( - client=self._clients.blob, - non_empty_ranges=self._non_empty_ranges, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # start where the first download ended - end_range=data_end, - stream=stream, - parallel=parallel, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options) - - dl_tasks = downloader.get_chunk_offsets() - running_futures = [ - asyncio.ensure_future(downloader.process_chunk(d)) - for d in islice(dl_tasks, 0, self._max_concurrency) - ] - while running_futures: - # Wait for some download to finish before adding a new one - _done, running_futures = await asyncio.wait( - running_futures, return_when=asyncio.FIRST_COMPLETED) - try: - next_chunk = next(dl_tasks) - except StopIteration: - break - else: - running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk))) - - if running_futures: - # Wait for the remaining downloads to finish - await asyncio.wait(running_futures) - return self.size - - async def download_to_stream(self, stream, max_concurrency=1): - """Download the contents of this blob to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The properties of the downloaded blob. - :rtype: Any - """ - warnings.warn( - "download_to_stream is deprecated, use readinto instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - await self.readinto(stream) - return self.properties diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/aio/_lease_async.py b/azure/multiapi/storagev2/blob/v2019_02_02/aio/_lease_async.py deleted file mode 100644 index ecd9076..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/aio/_lease_async.py +++ /dev/null @@ -1,296 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, - TypeVar, TYPE_CHECKING -) - -from azure.core.tracing.decorator_async import distributed_trace_async - -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._generated.models import ( - StorageErrorException, - LeaseAccessConditions) -from .._serialize import get_modify_conditions -from .._lease import BlobLeaseClient as LeaseClientBase - -if TYPE_CHECKING: - from datetime import datetime - from .._generated.operations import BlobOperations, ContainerOperations - BlobClient = TypeVar("BlobClient") - ContainerClient = TypeVar("ContainerClient") - - -class BlobLeaseClient(LeaseClientBase): - """Creates a new BlobLeaseClient. - - This client provides lease operations on a BlobClient or ContainerClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the blob or container to lease. - :type client: ~azure.storage.blob.aio.BlobClient or - ~azure.storage.blob.aio.ContainerClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - - def __enter__(self): - raise TypeError("Async lease must use 'async with'.") - - def __exit__(self, *args): - self.release() - - async def __aenter__(self): - return self - - async def __aexit__(self, *args): - await self.release() - - @distributed_trace_async - async def acquire(self, lease_duration=-1, **kwargs): - # type: (int, Any) -> None - """Requests a new lease. - - If the container does not have an active lease, the Blob service creates a - lease on the container and returns a new lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.acquire_lease( - timeout=kwargs.pop('timeout', None), - duration=lease_duration, - proposed_lease_id=self.id, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - self.etag = kwargs.get('etag') # type: str - - @distributed_trace_async - async def renew(self, **kwargs): - # type: (Any) -> None - """Renews the lease. - - The lease can be renewed if the lease ID specified in the - lease client matches that associated with the container or blob. Note that - the lease may be renewed even if it has expired as long as the container - or blob has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.renew_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def release(self, **kwargs): - # type: (Any) -> None - """Release the lease. - - The lease may be released if the client lease id specified matches - that associated with the container or blob. Releasing the lease allows another client - to immediately acquire the lease for the container or blob as soon as the release is complete. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.release_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """Change the lease ID of an active lease. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.change_lease( - lease_id=self.id, - proposed_lease_id=proposed_lease_id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def break_lease(self, lease_break_period=None, **kwargs): - # type: (Optional[int], Any) -> int - """Break the lease, if the container or blob has an active lease. - - Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. When a lease - is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the container or blob. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :param int lease_break_period: - This is the proposed duration of seconds that the lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the lease. If longer, the time remaining on the lease is used. - A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.break_lease( - timeout=kwargs.pop('timeout', None), - break_period=lease_break_period, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/aio/_models.py b/azure/multiapi/storagev2/blob/v2019_02_02/aio/_models.py deleted file mode 100644 index 3128022..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/aio/_models.py +++ /dev/null @@ -1,226 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines - -from typing import List, Any, TYPE_CHECKING # pylint: disable=unused-import - -from azure.core.async_paging import AsyncPageIterator, AsyncItemPaged - -from .._models import BlobProperties, ContainerProperties -from .._shared.response_handlers import return_context_and_deserialized, process_storage_error -from .._shared.models import DictMixin - -from .._generated.models import StorageErrorException -from .._generated.models import BlobPrefix as GenBlobPrefix -from .._generated.models import BlobItem - - -class ContainerPropertiesPaged(AsyncPageIterator): - """An Iterable of Container properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A container name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.models.ContainerProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only containers whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of container names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(ContainerPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [self._build_item(item) for item in self._response.container_items] - - return self._response.next_marker or None, self.current_page - - @staticmethod - def _build_item(item): - return ContainerProperties._from_generated(item) # pylint: disable=protected-access - - -class BlobPropertiesPaged(AsyncPageIterator): - """An Iterable of Blob properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.models.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - :param str container: The container that the blobs are listed from. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str continuation_token: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__( - self, command, - container=None, - prefix=None, - results_per_page=None, - continuation_token=None, - delimiter=None, - location_mode=None): - super(BlobPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.container = container - self.delimiter = delimiter - self.current_page = None - self.location_mode = location_mode - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - prefix=self.prefix, - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.container = self._response.container_name - self.current_page = [self._build_item(item) for item in self._response.segment.blob_items] - - return self._response.next_marker or None, self.current_page - - def _build_item(self, item): - if isinstance(item, BlobProperties): - return item - if isinstance(item, BlobItem): - blob = BlobProperties._from_generated(item) # pylint: disable=protected-access - blob.container = self.container - return blob - return item - - -class BlobPrefix(AsyncItemPaged, DictMixin): - """An Iterable of Blob properties. - - Returned from walk_blobs when a delimiter is used. - Can be thought of as a virtual blob directory. - - :ivar str name: The prefix, or "directory name" of the blob. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str marker: The continuation token of the current page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.models.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str marker: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__(self, *args, **kwargs): - super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs) - self.name = kwargs.get('prefix') - self.prefix = kwargs.get('prefix') - self.results_per_page = kwargs.get('results_per_page') - self.container = kwargs.get('container') - self.delimiter = kwargs.get('delimiter') - self.location_mode = kwargs.get('location_mode') - - -class BlobPrefixPaged(BlobPropertiesPaged): - def __init__(self, *args, **kwargs): - super(BlobPrefixPaged, self).__init__(*args, **kwargs) - self.name = self.prefix - - async def _extract_data_cb(self, get_next_return): - continuation_token, _ = await super(BlobPrefixPaged, self)._extract_data_cb(get_next_return) - self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items - self.current_page = [self._build_item(item) for item in self.current_page] - self.delimiter = self._response.delimiter - - return continuation_token, self.current_page - - def _build_item(self, item): - item = super(BlobPrefixPaged, self)._build_item(item) - if isinstance(item, GenBlobPrefix): - return BlobPrefix( - self._command, - container=self.container, - prefix=item.name, - results_per_page=self.results_per_page, - location_mode=self.location_mode) - return item diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/aio/_upload_helpers.py b/azure/multiapi/storagev2/blob/v2019_02_02/aio/_upload_helpers.py deleted file mode 100644 index b936ee6..0000000 --- a/azure/multiapi/storagev2/blob/v2019_02_02/aio/_upload_helpers.py +++ /dev/null @@ -1,256 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from io import SEEK_SET, UnsupportedOperation -from typing import Optional, Union, Any, TypeVar, TYPE_CHECKING # pylint: disable=unused-import - -import six -from azure.core.exceptions import ResourceModifiedError - -from .._shared.response_handlers import ( - process_storage_error, - return_response_headers) -from .._shared.uploads_async import ( - upload_data_chunks, - upload_substream_blocks, - BlockBlobChunkUploader, - PageBlobChunkUploader, - AppendBlobChunkUploader) -from .._shared.encryption import generate_blob_encryption_data, encrypt_blob -from .._generated.models import ( - StorageErrorException, - BlockLookupList, - AppendPositionAccessConditions, - ModifiedAccessConditions, -) -from .._upload_helpers import _convert_mod_error, _any_conditions - -if TYPE_CHECKING: - from datetime import datetime # pylint: disable=unused-import - BlobLeaseClient = TypeVar("BlobLeaseClient") - - -async def upload_block_blob( # pylint: disable=too-many-locals - client=None, - data=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if not overwrite and not _any_conditions(**kwargs): - kwargs['modified_access_conditions'].if_none_match = '*' - adjusted_count = length - if (encryption_options.get('key') is not None) and (adjusted_count is not None): - adjusted_count += (16 - (length % 16)) - blob_headers = kwargs.pop('blob_headers', None) - tier = kwargs.pop('standard_blob_tier', None) - - # Do single put if the size is smaller than config.max_single_put_size - if adjusted_count is not None and (adjusted_count < blob_settings.max_single_put_size): - try: - data = data.read(length) - if not isinstance(data, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - except AttributeError: - pass - if encryption_options.get('key'): - encryption_data, data = encrypt_blob(data, encryption_options['key']) - headers['x-ms-meta-encryptiondata'] = encryption_data - return await client.upload( - data, - content_length=adjusted_count, - blob_http_headers=blob_headers, - headers=headers, - cls=return_response_headers, - validate_content=validate_content, - data_stream_total=adjusted_count, - upload_stream_current=0, - tier=tier.value if tier else None, - **kwargs) - - use_original_upload_path = blob_settings.use_byte_buffer or \ - validate_content or encryption_options.get('required') or \ - blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \ - hasattr(stream, 'seekable') and not stream.seekable() or \ - not hasattr(stream, 'seek') or not hasattr(stream, 'tell') - - if use_original_upload_path: - if encryption_options.get('key'): - cek, iv, encryption_data = generate_blob_encryption_data(encryption_options['key']) - headers['x-ms-meta-encryptiondata'] = encryption_data - encryption_options['cek'] = cek - encryption_options['vector'] = iv - block_ids = await upload_data_chunks( - service=client, - uploader_class=BlockBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - encryption_options=encryption_options, - **kwargs - ) - else: - block_ids = await upload_substream_blocks( - service=client, - uploader_class=BlockBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - **kwargs - ) - - block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) - block_lookup.latest = block_ids - return await client.commit_block_list( - block_lookup, - blob_http_headers=blob_headers, - cls=return_response_headers, - validate_content=validate_content, - headers=headers, - tier=tier.value if tier else None, - **kwargs) - except StorageErrorException as error: - try: - process_storage_error(error) - except ResourceModifiedError as mod_error: - if not overwrite: - _convert_mod_error(mod_error) - raise - - -async def upload_page_blob( - client=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if not overwrite and not _any_conditions(**kwargs): - kwargs['modified_access_conditions'].if_none_match = '*' - if length is None or length < 0: - raise ValueError("A content length must be specified for a Page Blob.") - if length % 512 != 0: - raise ValueError("Invalid page blob size: {0}. " - "The size must be aligned to a 512-byte boundary.".format(length)) - if kwargs.get('premium_page_blob_tier'): - premium_page_blob_tier = kwargs.pop('premium_page_blob_tier') - try: - headers['x-ms-access-tier'] = premium_page_blob_tier.value - except AttributeError: - headers['x-ms-access-tier'] = premium_page_blob_tier - if encryption_options and encryption_options.get('data'): - headers['x-ms-meta-encryptiondata'] = encryption_options['data'] - response = await client.create( - content_length=0, - blob_content_length=length, - blob_sequence_number=None, - blob_http_headers=kwargs.pop('blob_headers', None), - cls=return_response_headers, - headers=headers, - **kwargs) - if length == 0: - return response - - kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag']) - return await upload_data_chunks( - service=client, - uploader_class=PageBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_page_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - encryption_options=encryption_options, - **kwargs) - - except StorageErrorException as error: - try: - process_storage_error(error) - except ResourceModifiedError as mod_error: - if not overwrite: - _convert_mod_error(mod_error) - raise - - -async def upload_append_blob( # pylint: disable=unused-argument - client=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if length == 0: - return {} - blob_headers = kwargs.pop('blob_headers', None) - append_conditions = AppendPositionAccessConditions( - max_size=kwargs.pop('maxsize_condition', None), - append_position=None) - try: - if overwrite: - await client.create( - content_length=0, - blob_http_headers=blob_headers, - headers=headers, - **kwargs) - return await upload_data_chunks( - service=client, - uploader_class=AppendBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - append_position_access_conditions=append_conditions, - **kwargs) - except StorageErrorException as error: - if error.response.status_code != 404: - raise - # rewind the request body if it is a stream - if hasattr(stream, 'read'): - try: - # attempt to rewind the body to the initial position - stream.seek(0, SEEK_SET) - except UnsupportedOperation: - # if body is not seekable, then retry would not work - raise error - await client.create( - content_length=0, - blob_http_headers=blob_headers, - headers=headers, - **kwargs) - return await upload_data_chunks( - service=client, - uploader_class=AppendBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - append_position_access_conditions=append_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/__init__.py b/azure/multiapi/storagev2/blob/v2019_12_12/__init__.py deleted file mode 100644 index caa0040..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/__init__.py +++ /dev/null @@ -1,225 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import os - -from typing import Union, Iterable, AnyStr, IO, Any, Dict # pylint: disable=unused-import -from ._version import VERSION -from ._blob_client import BlobClient -from ._container_client import ContainerClient -from ._blob_service_client import BlobServiceClient -from ._lease import BlobLeaseClient -from ._download import StorageStreamDownloader -from ._quick_query_helper import BlobQueryReader -from ._shared_access_signature import generate_account_sas, generate_container_sas, generate_blob_sas -from ._shared.policies import ExponentialRetry, LinearRetry -from ._shared.response_handlers import PartialBatchErrorException -from ._shared.models import( - LocationMode, - ResourceTypes, - AccountSasPermissions, - StorageErrorCode, - UserDelegationKey -) -from ._generated.models import ( - RehydratePriority -) -from ._models import ( - BlobType, - BlockState, - StandardBlobTier, - PremiumPageBlobTier, - SequenceNumberAction, - PublicAccess, - BlobAnalyticsLogging, - Metrics, - RetentionPolicy, - StaticWebsite, - CorsRule, - ContainerProperties, - BlobProperties, - FilteredBlob, - LeaseProperties, - ContentSettings, - CopyProperties, - BlobBlock, - PageRange, - AccessPolicy, - ContainerSasPermissions, - BlobSasPermissions, - CustomerProvidedEncryptionKey, - ContainerEncryptionScope, - BlobQueryError, - DelimitedJsonDialect, - DelimitedTextDialect, - ObjectReplicationPolicy, - ObjectReplicationRule -) -from ._list_blobs_helper import BlobPrefix - -__version__ = VERSION - - -def upload_blob_to_url( - blob_url, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - credential=None, # type: Any - **kwargs): - # type: (...) -> Dict[str, Any] - """Upload data to a given URL - - The data will be uploaded as a block blob. - - :param str blob_url: - The full URI to the blob. This can also include a SAS token. - :param data: - The data to upload. This can be bytes, text, an iterable or a file-like object. - :type data: bytes or str or Iterable - :param credential: - The credentials with which to authenticate. This is optional if the - blob URL already has a SAS token. The value can be a SAS token string, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword bool overwrite: - Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob_to_url will overwrite any existing data. If set to False, the - operation will fail with a ResourceExistsError. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword dict(str,str) metadata: - Name-value pairs associated with the blob as metadata. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword str encoding: - Encoding to use if text is supplied as input. Defaults to UTF-8. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: dict(str, Any) - """ - with BlobClient.from_blob_url(blob_url, credential=credential) as client: - return client.upload_blob(data=data, blob_type=BlobType.BlockBlob, **kwargs) - - -def _download_to_stream(client, handle, **kwargs): - """Download data to specified open file-handle.""" - stream = client.download_blob(**kwargs) - stream.readinto(handle) - - -def download_blob_from_url( - blob_url, # type: str - output, # type: str - credential=None, # type: Any - **kwargs): - # type: (...) -> None - """Download the contents of a blob to a local file or stream. - - :param str blob_url: - The full URI to the blob. This can also include a SAS token. - :param output: - Where the data should be downloaded to. This could be either a file path to write to, - or an open IO handle to write to. - :type output: str or writable stream. - :param credential: - The credentials with which to authenticate. This is optional if the - blob URL already has a SAS token or the blob is public. The value can be a SAS token string, - an account shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword bool overwrite: - Whether the local file should be overwritten if it already exists. The default value is - `False` - in which case a ValueError will be raised if the file already exists. If set to - `True`, an attempt will be made to write to the existing file. If a stream handle is passed - in, this value is ignored. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :keyword int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :rtype: None - """ - overwrite = kwargs.pop('overwrite', False) - with BlobClient.from_blob_url(blob_url, credential=credential) as client: - if hasattr(output, 'write'): - _download_to_stream(client, output, **kwargs) - else: - if not overwrite and os.path.isfile(output): - raise ValueError("The file '{}' already exists.".format(output)) - with open(output, 'wb') as file_handle: - _download_to_stream(client, file_handle, **kwargs) - - -__all__ = [ - 'upload_blob_to_url', - 'download_blob_from_url', - 'BlobServiceClient', - 'ContainerClient', - 'BlobClient', - 'BlobType', - 'BlobLeaseClient', - 'StorageErrorCode', - 'UserDelegationKey', - 'ExponentialRetry', - 'LinearRetry', - 'LocationMode', - 'BlockState', - 'StandardBlobTier', - 'PremiumPageBlobTier', - 'SequenceNumberAction', - 'PublicAccess', - 'BlobAnalyticsLogging', - 'Metrics', - 'RetentionPolicy', - 'StaticWebsite', - 'CorsRule', - 'ContainerProperties', - 'BlobProperties', - 'BlobPrefix', - 'FilteredBlob', - 'LeaseProperties', - 'ContentSettings', - 'CopyProperties', - 'BlobBlock', - 'PageRange', - 'AccessPolicy', - 'ContainerSasPermissions', - 'BlobSasPermissions', - 'ResourceTypes', - 'AccountSasPermissions', - 'StorageStreamDownloader', - 'CustomerProvidedEncryptionKey', - 'RehydratePriority', - 'generate_account_sas', - 'generate_container_sas', - 'generate_blob_sas', - 'PartialBatchErrorException', - 'ContainerEncryptionScope', - 'BlobQueryError', - 'DelimitedJsonDialect', - 'DelimitedTextDialect', - 'BlobQueryReader', - 'ObjectReplicationPolicy', - 'ObjectReplicationRule' -] diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_blob_client.py b/azure/multiapi/storagev2/blob/v2019_12_12/_blob_client.py deleted file mode 100644 index b536605..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_blob_client.py +++ /dev/null @@ -1,3535 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines,no-self-use - -from io import BytesIO -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, - TYPE_CHECKING -) -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six -from azure.core.tracing.decorator import distributed_trace - -from ._shared import encode_base64 -from ._shared.base_client import StorageAccountHostsMixin, parse_connection_str, parse_query -from ._shared.encryption import generate_blob_encryption_data -from ._shared.uploads import IterStreamer -from ._shared.request_handlers import ( - add_metadata_headers, get_length, read_length, - validate_and_format_range_headers) -from ._shared.response_handlers import return_response_headers, process_storage_error, return_headers_and_deserialized -from ._generated import AzureBlobStorage, VERSION -from ._generated.models import ( # pylint: disable=unused-import - DeleteSnapshotsOptionType, - BlobHTTPHeaders, - BlockLookupList, - AppendPositionAccessConditions, - SequenceNumberAccessConditions, - StorageErrorException, - QueryRequest, - CpkInfo) -from ._serialize import ( - get_modify_conditions, - get_source_conditions, - get_cpk_scope_info, - get_api_version, - serialize_blob_tags_header, - serialize_blob_tags, - serialize_query_format -) -from ._deserialize import get_page_ranges_result, deserialize_blob_properties, deserialize_blob_stream, parse_tags -from ._quick_query_helper import BlobQueryReader -from ._upload_helpers import ( - upload_block_blob, - upload_append_blob, - upload_page_blob) -from ._models import BlobType, BlobBlock, BlobProperties, BlobQueryError -from ._download import StorageStreamDownloader -from ._lease import BlobLeaseClient, get_access_conditions - -if TYPE_CHECKING: - from datetime import datetime - from ._generated.models import BlockList - from ._models import ( # pylint: disable=unused-import - ContentSettings, - PremiumPageBlobTier, - StandardBlobTier, - SequenceNumberAction - ) - -_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = ( - 'The require_encryption flag is set, but encryption is not supported' - ' for this method.') - - -class BlobClient(StorageAccountHostsMixin): # pylint: disable=too-many-public-methods - """A client to interact with a specific blob, although that blob may not yet exist. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the blob, - use the :func:`from_blob_url` classmethod. - :param container_name: The container name for the blob. - :type container_name: str - :param blob_name: The name of the blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob_name: str - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_blob_client] - :end-before: [END create_blob_client] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a URL to a public blob (no auth needed). - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_blob_client_sas_url] - :end-before: [END create_blob_client_sas_url] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a SAS URL to a blob. - """ - def __init__( - self, account_url, # type: str - container_name, # type: str - blob_name, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - - if not (container_name and blob_name): - raise ValueError("Please specify a container name and blob name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - path_snapshot, sas_token = parse_query(parsed_url.query) - - self.container_name = container_name - self.blob_name = blob_name - try: - self.snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - self.snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - self.snapshot = snapshot or path_snapshot - - self._query_str, credential = self._format_query_string(sas_token, credential, snapshot=self.snapshot) - super(BlobClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) - self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access - - def _format_url(self, hostname): - container_name = self.container_name - if isinstance(container_name, six.text_type): - container_name = container_name.encode('UTF-8') - return "{}://{}/{}/{}{}".format( - self.scheme, - hostname, - quote(container_name), - quote(self.blob_name, safe='~/'), - self._query_str) - - @classmethod - def from_blob_url(cls, blob_url, credential=None, snapshot=None, **kwargs): - # type: (str, Optional[Any], Optional[Union[str, Dict[str, Any]]], Any) -> BlobClient - """Create BlobClient from a blob url. This doesn't support customized blob url with '/' in blob name. - - :param str blob_url: - The full endpoint URL to the Blob, including SAS token and snapshot if used. This could be - either the primary endpoint, or the secondary endpoint depending on the current `location_mode`. - :type blob_url: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. If specified, this will override - the snapshot in the url. - :returns: A Blob client. - :rtype: ~azure.storage.blob.BlobClient - """ - try: - if not blob_url.lower().startswith('http'): - blob_url = "https://" + blob_url - except AttributeError: - raise ValueError("Blob URL must be a string.") - parsed_url = urlparse(blob_url.rstrip('/')) - - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(blob_url)) - - account_path = "" - if ".core." in parsed_url.netloc: - # .core. is indicating non-customized url. Blob name with directory info can also be parsed. - path_blob = parsed_url.path.lstrip('/').split('/', 1) - elif "localhost" in parsed_url.netloc or "127.0.0.1" in parsed_url.netloc: - path_blob = parsed_url.path.lstrip('/').split('/', 2) - account_path += path_blob[0] - else: - # for customized url. blob name that has directory info cannot be parsed. - path_blob = parsed_url.path.lstrip('/').split('/') - if len(path_blob) > 2: - account_path = "/" + "/".join(path_blob[:-2]) - account_url = "{}://{}{}?{}".format( - parsed_url.scheme, - parsed_url.netloc.rstrip('/'), - account_path, - parsed_url.query) - container_name, blob_name = unquote(path_blob[-2]), unquote(path_blob[-1]) - if not container_name or not blob_name: - raise ValueError("Invalid URL. Provide a blob_url with a valid blob and container name.") - - path_snapshot, _ = parse_query(parsed_url.query) - if snapshot: - try: - path_snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - path_snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - path_snapshot = snapshot - - return cls( - account_url, container_name=container_name, blob_name=blob_name, - snapshot=path_snapshot, credential=credential, **kwargs - ) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - container_name, # type: str - blob_name, # type: str - snapshot=None, # type: Optional[str] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> BlobClient - """Create BlobClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param container_name: The container name for the blob. - :type container_name: str - :param blob_name: The name of the blob with which to interact. - :type blob_name: str - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :returns: A Blob client. - :rtype: ~azure.storage.blob.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START auth_from_connection_string_blob] - :end-before: [END auth_from_connection_string_blob] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, container_name=container_name, blob_name=blob_name, - snapshot=snapshot, credential=credential, **kwargs - ) - - @distributed_trace - def get_account_information(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """Gets information related to the storage account in which the blob resides. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - """ - try: - return self._client.blob.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _upload_blob_options( # pylint:disable=too-many-statements - self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption and not self.key_encryption_key: - raise ValueError("Encryption required but no key was provided.") - encryption_options = { - 'required': self.require_encryption, - 'key': self.key_encryption_key, - 'resolver': self.key_resolver_function, - } - if self.key_encryption_key is not None: - cek, iv, encryption_data = generate_blob_encryption_data(self.key_encryption_key) - encryption_options['cek'] = cek - encryption_options['vector'] = iv - encryption_options['data'] = encryption_data - - encoding = kwargs.pop('encoding', 'UTF-8') - if isinstance(data, six.text_type): - data = data.encode(encoding) # type: ignore - if length is None: - length = get_length(data) - if isinstance(data, bytes): - data = data[:length] - - if isinstance(data, bytes): - stream = BytesIO(data) - elif hasattr(data, 'read'): - stream = data - elif hasattr(data, '__iter__'): - stream = IterStreamer(data, encoding=encoding) - else: - raise TypeError("Unsupported data type: {}".format(type(data))) - - validate_content = kwargs.pop('validate_content', False) - content_settings = kwargs.pop('content_settings', None) - overwrite = kwargs.pop('overwrite', False) - max_concurrency = kwargs.pop('max_concurrency', 1) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - kwargs['cpk_info'] = cpk_info - - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None)) - kwargs['modified_access_conditions'] = get_modify_conditions(kwargs) - kwargs['cpk_scope_info'] = get_cpk_scope_info(kwargs) - if content_settings: - kwargs['blob_headers'] = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - kwargs['blob_tags_string'] = serialize_blob_tags_header(kwargs.pop('tags', None)) - kwargs['stream'] = stream - kwargs['length'] = length - kwargs['overwrite'] = overwrite - kwargs['headers'] = headers - kwargs['validate_content'] = validate_content - kwargs['blob_settings'] = self._config - kwargs['max_concurrency'] = max_concurrency - kwargs['encryption_options'] = encryption_options - if blob_type == BlobType.BlockBlob: - kwargs['client'] = self._client.block_blob - kwargs['data'] = data - elif blob_type == BlobType.PageBlob: - kwargs['client'] = self._client.page_blob - elif blob_type == BlobType.AppendBlob: - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - kwargs['client'] = self._client.append_blob - else: - raise ValueError("Unsupported BlobType: {}".format(blob_type)) - return kwargs - - @distributed_trace - def upload_blob( # pylint: disable=too-many-locals - self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Any - """Creates a new blob from a data source with automatic chunking. - - :param data: The blob data to upload. - :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be - either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. The exception to the above is with Append - blob types: if set to False and the data already exists, an error will not be raised - and the data will be appended to the existing blob. If set overwrite=True, then the existing - append blob will be deleted, and a new one created. Defaults to False. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, upload_blob only succeeds if the - blob's lease is active and matches this ID. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int max_concurrency: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world.py - :start-after: [START upload_a_blob] - :end-before: [END upload_a_blob] - :language: python - :dedent: 12 - :caption: Upload a blob to the container. - """ - options = self._upload_blob_options( - data, - blob_type=blob_type, - length=length, - metadata=metadata, - **kwargs) - if blob_type == BlobType.BlockBlob: - return upload_block_blob(**options) - if blob_type == BlobType.PageBlob: - return upload_page_blob(**options) - return upload_append_blob(**options) - - def _download_blob_options(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], **Any) -> Dict[str, Any] - if self.require_encryption and not self.key_encryption_key: - raise ValueError("Encryption required but no key was provided.") - if length is not None and offset is None: - raise ValueError("Offset value must not be None if length is set.") - if length is not None: - length = offset + length - 1 # Service actually uses an end-range inclusive index - - validate_content = kwargs.pop('validate_content', False) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'clients': self._client, - 'config': self._config, - 'start_range': offset, - 'end_range': length, - 'version_id': kwargs.pop('version_id', None), - 'validate_content': validate_content, - 'encryption_options': { - 'required': self.require_encryption, - 'key': self.key_encryption_key, - 'resolver': self.key_resolver_function}, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'cls': deserialize_blob_stream, - 'max_concurrency':kwargs.pop('max_concurrency', 1), - 'encoding': kwargs.pop('encoding', None), - 'timeout': kwargs.pop('timeout', None), - 'name': self.blob_name, - 'container': self.container_name} - options.update(kwargs) - return options - - @distributed_trace - def download_blob(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], **Any) -> StorageStreamDownloader - """Downloads a blob to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the blob into - a stream. - - :param int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to download. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, download_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword str encoding: - Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.blob.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world.py - :start-after: [START download_a_blob] - :end-before: [END download_a_blob] - :language: python - :dedent: 12 - :caption: Download a blob. - """ - options = self._download_blob_options( - offset=offset, - length=length, - **kwargs) - return StorageStreamDownloader(**options) - - def _quick_query_options(self, query_expression, - **kwargs): - # type: (str, **Any) -> Dict[str, Any] - delimiter = '\n' - input_format = kwargs.pop('blob_format', None) - if input_format: - try: - delimiter = input_format.lineterminator - except AttributeError: - delimiter = input_format.delimiter - output_format = kwargs.pop('output_format', None) - if output_format: - try: - delimiter = output_format.lineterminator - except AttributeError: - delimiter = output_format.delimiter - else: - output_format = input_format - query_request = QueryRequest( - expression=query_expression, - input_serialization=serialize_query_format(input_format), - output_serialization=serialize_query_format(output_format) - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo( - encryption_key=cpk.key_value, - encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm - ) - options = { - 'query_request': query_request, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'snapshot': self.snapshot, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_headers_and_deserialized, - } - options.update(kwargs) - return options, delimiter - - @distributed_trace - def query_blob(self, query_expression, **kwargs): - # type: (str, **Any) -> BlobQueryReader - """Enables users to select/project on blob/or blob snapshot data by providing simple query expressions. - This operations returns a BlobQueryReader, users need to use readall() or readinto() to get query data. - - :param str query_expression: - Required. a query statement. - :keyword Callable[Exception] on_error: - A function to be called on any processing errors returned by the service. - :keyword blob_format: - Optional. Defines the serialization of the data currently stored in the blob. The default is to - treat the blob data as CSV data formatted in the default dialect. This can be overridden with - a custom DelimitedTextDialect, or alternatively a DelimitedJsonDialect. - :paramtype blob_format: ~azure.storage.blob.DelimitedTextDialect or ~azure.storage.blob.DelimitedJsonDialect - :keyword output_format: - Optional. Defines the output serialization for the data stream. By default the data will be returned - as it is represented in the blob. By providing an output format, the blob data will be reformatted - according to that profile. This value can be a DelimitedTextDialect or a DelimitedJsonDialect. - :paramtype output_format: ~azure.storage.blob.DelimitedTextDialect or ~azure.storage.blob.DelimitedJsonDialect - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A streaming object (BlobQueryReader) - :rtype: ~azure.storage.blob.BlobQueryReader - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_query.py - :start-after: [START query] - :end-before: [END query] - :language: python - :dedent: 4 - :caption: select/project on blob/or blob snapshot data by providing simple query expressions. - """ - errors = kwargs.pop("on_error", None) - error_cls = kwargs.pop("error_cls", BlobQueryError) - encoding = kwargs.pop("encoding", None) - options, delimiter = self._quick_query_options(query_expression, **kwargs) - try: - headers, raw_response_body = self._client.blob.query(**options) - except StorageErrorException as error: - process_storage_error(error) - return BlobQueryReader( - name=self.blob_name, - container=self.container_name, - errors=errors, - record_delimiter=delimiter, - encoding=encoding, - headers=headers, - response=raw_response_body, - error_cls=error_cls) - - @staticmethod - def _generic_delete_blob_options(delete_snapshots=False, **kwargs): - # type: (bool, **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if delete_snapshots: - delete_snapshots = DeleteSnapshotsOptionType(delete_snapshots) - options = { - 'timeout': kwargs.pop('timeout', None), - 'snapshot': kwargs.pop('snapshot', None), # this is added for delete_blobs - 'delete_snapshots': delete_snapshots or None, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions} - options.update(kwargs) - return options - - def _delete_blob_options(self, delete_snapshots=False, **kwargs): - # type: (bool, **Any) -> Dict[str, Any] - if self.snapshot and delete_snapshots: - raise ValueError("The delete_snapshots option cannot be used with a specific snapshot.") - options = self._generic_delete_blob_options(delete_snapshots, **kwargs) - options['snapshot'] = self.snapshot - options['version_id'] = kwargs.pop('version_id', None) - return options - - @distributed_trace - def delete_blob(self, delete_snapshots=False, **kwargs): - # type: (bool, **Any) -> None - """Marks the specified blob for deletion. - - The blob is later deleted during garbage collection. - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the delete_blob() - operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob - and retains the blob for a specified number of days. - After the specified number of days, the blob's data is removed from the service during garbage collection. - Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']` - option. Soft-deleted blob can be restored using :func:`undelete` operation. - - :param str delete_snapshots: - Required if the blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to delete. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword lease: - Required if the blob has an active lease. If specified, delete_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world.py - :start-after: [START delete_blob] - :end-before: [END delete_blob] - :language: python - :dedent: 12 - :caption: Delete a blob. - """ - options = self._delete_blob_options(delete_snapshots=delete_snapshots, **kwargs) - try: - self._client.blob.delete(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def undelete_blob(self, **kwargs): - # type: (**Any) -> None - """Restores soft-deleted blobs or snapshots. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START undelete_blob] - :end-before: [END undelete_blob] - :language: python - :dedent: 8 - :caption: Undeleting a blob. - """ - try: - self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_blob_properties(self, **kwargs): - # type: (**Any) -> BlobProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the blob. It does not return the content of the blob. - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to get properties. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: BlobProperties - :rtype: ~azure.storage.blob.BlobProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START get_blob_properties] - :end-before: [END get_blob_properties] - :language: python - :dedent: 8 - :caption: Getting the properties for a blob. - """ - # TODO: extract this out as _get_blob_properties_options - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - try: - blob_props = self._client.blob.get_properties( - timeout=kwargs.pop('timeout', None), - version_id=kwargs.pop('version_id', None), - snapshot=self.snapshot, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=deserialize_blob_properties, - cpk_info=cpk_info, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - blob_props.name = self.blob_name - blob_props.container = self.container_name - return blob_props # type: ignore - - def _set_http_headers_options(self, content_settings=None, **kwargs): - # type: (Optional[ContentSettings], **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - blob_headers = None - if content_settings: - blob_headers = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - options = { - 'timeout': kwargs.pop('timeout', None), - 'blob_http_headers': blob_headers, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def set_http_headers(self, content_settings=None, **kwargs): - # type: (Optional[ContentSettings], **Any) -> None - """Sets system properties on the blob. - - If one property is set for the content_settings, all properties will be overridden. - - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - options = self._set_http_headers_options(content_settings=content_settings, **kwargs) - try: - return self._client.blob.set_http_headers(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _set_blob_metadata_options(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - 'headers': headers} - options.update(kwargs) - return options - - @distributed_trace - def set_blob_metadata(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] - """Sets user-defined metadata for the blob as one or more name-value pairs. - - :param metadata: - Dict containing name and value pairs. Each call to this operation - replaces all existing metadata attached to the blob. To remove all - metadata from the blob, call this operation with no metadata headers. - :type metadata: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - """ - options = self._set_blob_metadata_options(metadata=metadata, **kwargs) - try: - return self._client.blob.set_metadata(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _create_page_blob_options( # type: ignore - self, size, # type: int - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - blob_headers = None - if content_settings: - blob_headers = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - - sequence_number = kwargs.pop('sequence_number', None) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - if premium_page_blob_tier: - try: - headers['x-ms-access-tier'] = premium_page_blob_tier.value # type: ignore - except AttributeError: - headers['x-ms-access-tier'] = premium_page_blob_tier # type: ignore - - blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) - - options = { - 'content_length': 0, - 'blob_content_length': size, - 'blob_sequence_number': sequence_number, - 'blob_http_headers': blob_headers, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'blob_tags_string': blob_tags_string, - 'cls': return_response_headers, - 'headers': headers} - options.update(kwargs) - return options - - @distributed_trace - def create_page_blob( # type: ignore - self, size, # type: int - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Creates a new Page Blob of the specified size. - - :param int size: - This specifies the maximum size for the page blob, up to 1 TB. - The page blob size must be aligned to a 512-byte boundary. - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword int sequence_number: - Only for Page blobs. The sequence number is a user-controlled value that you can use to - track requests. The value of the sequence number must be between 0 - and 2^63 - 1.The default value is 0. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict[str, Any] - """ - options = self._create_page_blob_options( - size, - content_settings=content_settings, - metadata=metadata, - premium_page_blob_tier=premium_page_blob_tier, - **kwargs) - try: - return self._client.page_blob.create(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _create_append_blob_options(self, content_settings=None, metadata=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - blob_headers = None - if content_settings: - blob_headers = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) - - options = { - 'content_length': 0, - 'blob_http_headers': blob_headers, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'blob_tags_string': blob_tags_string, - 'cls': return_response_headers, - 'headers': headers} - options.update(kwargs) - return options - - @distributed_trace - def create_append_blob(self, content_settings=None, metadata=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] - """Creates a new Append Blob. - - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict[str, Any] - """ - options = self._create_append_blob_options( - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return self._client.append_blob.create(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _create_snapshot_options(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - 'headers': headers} - options.update(kwargs) - return options - - @distributed_trace - def create_snapshot(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] - """Creates a snapshot of the blob. - - A snapshot is a read-only version of a blob that's taken at a point in time. - It can be read, copied, or deleted, but not modified. Snapshots provide a way - to back up a blob as it appears at a moment in time. - - A snapshot of a blob has the same name as the base blob from which the snapshot - is taken, with a DateTime value appended to indicate the time at which the - snapshot was taken. - - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - - .. versionadded:: 12.4.0 - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified). - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START create_blob_snapshot] - :end-before: [END create_blob_snapshot] - :language: python - :dedent: 8 - :caption: Create a snapshot of the blob. - """ - options = self._create_snapshot_options(metadata=metadata, **kwargs) - try: - return self._client.blob.create_snapshot(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _start_copy_from_url_options(self, source_url, metadata=None, incremental_copy=False, **kwargs): - # type: (str, Optional[Dict[str, str]], bool, **Any) -> Dict[str, Any] - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - if 'source_lease' in kwargs: - source_lease = kwargs.pop('source_lease') - try: - headers['x-ms-source-lease-id'] = source_lease.id # type: str - except AttributeError: - headers['x-ms-source-lease-id'] = source_lease - - tier = kwargs.pop('premium_page_blob_tier', None) or kwargs.pop('standard_blob_tier', None) - - if kwargs.get('requires_sync'): - headers['x-ms-requires-sync'] = str(kwargs.pop('requires_sync')) - - timeout = kwargs.pop('timeout', None) - dest_mod_conditions = get_modify_conditions(kwargs) - blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) - - options = { - 'copy_source': source_url, - 'seal_blob': kwargs.pop('seal_destination_blob', None), - 'timeout': timeout, - 'modified_access_conditions': dest_mod_conditions, - 'blob_tags_string': blob_tags_string, - 'headers': headers, - 'cls': return_response_headers, - } - if not incremental_copy: - source_mod_conditions = get_source_conditions(kwargs) - dest_access_conditions = get_access_conditions(kwargs.pop('destination_lease', None)) - options['source_modified_access_conditions'] = source_mod_conditions - options['lease_access_conditions'] = dest_access_conditions - options['tier'] = tier.value if tier else None - options.update(kwargs) - return options - - @distributed_trace - def start_copy_from_url(self, source_url, metadata=None, incremental_copy=False, **kwargs): - # type: (str, Optional[Dict[str, str]], bool, **Any) -> Dict[str, Union[str, datetime]] - """Copies a blob asynchronously. - - This operation returns a copy operation - object that can be used to wait on the completion of the operation, - as well as check status or abort the copy operation. - The Blob service copies blobs on a best-effort basis. - - The source blob for a copy operation may be a block blob, an append blob, - or a page blob. If the destination blob already exists, it must be of the - same blob type as the source blob. Any existing destination blob will be - overwritten. The destination blob cannot be modified while a copy operation - is in progress. - - When copying from a page blob, the Blob service creates a destination page - blob of the source blob's length, initially containing all zeroes. Then - the source page ranges are enumerated, and non-empty ranges are copied. - - For a block blob or an append blob, the Blob service creates a committed - blob of zero length before returning from this operation. When copying - from a block blob, all committed blocks and their block IDs are copied. - Uncommitted blocks are not copied. At the end of the copy operation, the - destination blob will have the same committed block count as the source. - - When copying from an append blob, all committed blocks are copied. At the - end of the copy operation, the destination blob will have the same committed - block count as the source. - - For all blob types, you can call status() on the returned polling object - to check the status of the copy operation, or wait() to block until the - operation is complete. The final blob will be committed when the copy completes. - - :param str source_url: - A URL of up to 2 KB in length that specifies a file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.blob.core.windows.net/mycontainer/myblob - - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - - https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken - :param metadata: - Name-value pairs associated with the blob as metadata. If no name-value - pairs are specified, the operation will copy the metadata from the - source blob or file to the destination blob. If one or more name-value - pairs are specified, the destination blob is created with the specified - metadata, and metadata is not copied from the source blob or file. - :type metadata: dict(str, str) - :param bool incremental_copy: - Copies the snapshot of the source page blob to a destination page blob. - The snapshot is copied such that only the differential changes between - the previously copied snapshot are transferred to the destination. - The copied snapshots are complete copies of the original snapshot and - can be read or copied from as usual. Defaults to False. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source - blob has been modified since the specified date/time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source blob - has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has been modified since the specified date/time. - If the destination blob has not been modified, the Blob service returns - status code 412 (Precondition Failed). - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has not been modified since the specified - date/time. If the destination blob has been modified, the Blob service - returns status code 412 (Precondition Failed). - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword destination_lease: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword source_lease: - Specify this to perform the Copy Blob operation only if - the lease ID given matches the active lease ID of the source blob. - :paramtype source_lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword bool seal_destination_blob: - Seal the destination append blob. This operation is only for append blob. - - .. versionadded:: 12.4.0 - - :keyword bool requires_sync: - Enforces that the service will not return a response until the copy is complete. - :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status). - :rtype: dict[str, str or ~datetime.datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START copy_blob_from_url] - :end-before: [END copy_blob_from_url] - :language: python - :dedent: 12 - :caption: Copy a blob from a URL. - """ - options = self._start_copy_from_url_options( - source_url, - metadata=metadata, - incremental_copy=incremental_copy, - **kwargs) - try: - if incremental_copy: - return self._client.page_blob.copy_incremental(**options) - return self._client.blob.start_copy_from_url(**options) - except StorageErrorException as error: - process_storage_error(error) - - def _abort_copy_options(self, copy_id, **kwargs): - # type: (Union[str, Dict[str, Any], BlobProperties], **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - try: - copy_id = copy_id.copy.id - except AttributeError: - try: - copy_id = copy_id['copy_id'] - except TypeError: - pass - options = { - 'copy_id': copy_id, - 'lease_access_conditions': access_conditions, - 'timeout': kwargs.pop('timeout', None)} - options.update(kwargs) - return options - - @distributed_trace - def abort_copy(self, copy_id, **kwargs): - # type: (Union[str, Dict[str, Any], BlobProperties], **Any) -> None - """Abort an ongoing copy operation. - - This will leave a destination blob with zero length and full metadata. - This will raise an error if the copy operation has already ended. - - :param copy_id: - The copy operation to abort. This can be either an ID string, or an - instance of BlobProperties. - :type copy_id: str or ~azure.storage.blob.BlobProperties - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START abort_copy_blob_from_url] - :end-before: [END abort_copy_blob_from_url] - :language: python - :dedent: 12 - :caption: Abort copying a blob from URL. - """ - options = self._abort_copy_options(copy_id, **kwargs) - try: - self._client.blob.abort_copy_from_url(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): - # type: (int, Optional[str], **Any) -> BlobLeaseClient - """Requests a new lease. - - If the blob does not have an active lease, the Blob - Service creates a lease on the blob and returns a new lease. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Blob Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A BlobLeaseClient object. - :rtype: ~azure.storage.blob.BlobLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START acquire_lease_on_blob] - :end-before: [END acquire_lease_on_blob] - :language: python - :dedent: 8 - :caption: Acquiring a lease on a blob. - """ - lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore - lease.acquire(lease_duration=lease_duration, **kwargs) - return lease - - @distributed_trace - def set_standard_blob_tier(self, standard_blob_tier, **kwargs): - # type: (Union[str, StandardBlobTier], Any) -> None - """This operation sets the tier on a block blob. - - A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param standard_blob_tier: - Indicates the tier to be set on the blob. Options include 'Hot', 'Cool', - 'Archive'. The hot tier is optimized for storing data that is accessed - frequently. The cool storage tier is optimized for storing data that - is infrequently accessed and stored for at least a month. The archive - tier is optimized for storing data that is rarely accessed and stored - for at least six months with flexible latency requirements. - :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to download. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if standard_blob_tier is None: - raise ValueError("A StandardBlobTier must be specified") - if self.snapshot and kwargs.get('version_id'): - raise ValueError("Snapshot and version_id cannot be set at the same time") - try: - self._client.blob.set_tier( - tier=standard_blob_tier, - snapshot=self.snapshot, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - lease_access_conditions=access_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - def _stage_block_options( - self, block_id, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - block_id = encode_base64(str(block_id)) - if isinstance(data, six.text_type): - data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - if length is None: - length = get_length(data) - if length is None: - length, data = read_length(data) - if isinstance(data, bytes): - data = data[:length] - - validate_content = kwargs.pop('validate_content', False) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'block_id': block_id, - 'content_length': length, - 'body': data, - 'transactional_content_md5': None, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'validate_content': validate_content, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - } - options.update(kwargs) - return options - - @distributed_trace - def stage_block( - self, block_id, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Creates a new block to be committed as part of a blob. - - :param str block_id: A valid Base64 string value that identifies the - block. Prior to encoding, the string must be less than or equal to 64 - bytes in size. For a given blob, the length of the value specified for - the block_id parameter must be the same size for each block. - :param data: The blob data. - :param int length: Size of the block. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword str encoding: - Defaults to UTF-8. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob property dict. - :rtype: dict[str, Any] - """ - options = self._stage_block_options( - block_id, - data, - length=length, - **kwargs) - try: - return self._client.block_blob.stage_block(**options) - except StorageErrorException as error: - process_storage_error(error) - - def _stage_block_from_url_options( - self, block_id, # type: str - source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - source_content_md5=None, # type: Optional[Union[bytes, bytearray]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if source_length is not None and source_offset is None: - raise ValueError("Source offset value must not be None if length is set.") - if source_length is not None: - source_length = source_offset + source_length - 1 - block_id = encode_base64(str(block_id)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - range_header = None - if source_offset is not None: - range_header, _ = validate_and_format_range_headers(source_offset, source_length) - - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'block_id': block_id, - 'content_length': 0, - 'source_url': source_url, - 'source_range': range_header, - 'source_content_md5': bytearray(source_content_md5) if source_content_md5 else None, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - } - options.update(kwargs) - return options - - @distributed_trace - def stage_block_from_url( - self, block_id, # type: str - source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - source_content_md5=None, # type: Optional[Union[bytes, bytearray]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Creates a new block to be committed as part of a blob where - the contents are read from a URL. - - :param str block_id: A valid Base64 string value that identifies the - block. Prior to encoding, the string must be less than or equal to 64 - bytes in size. For a given blob, the length of the value specified for - the block_id parameter must be the same size for each block. - :param str source_url: The URL. - :param int source_offset: - Start of byte range to use for the block. - Must be set if source length is provided. - :param int source_length: The size of the block in bytes. - :param bytearray source_content_md5: - Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob property dict. - :rtype: dict[str, Any] - """ - options = self._stage_block_from_url_options( - block_id, - source_url, - source_offset=source_offset, - source_length=source_length, - source_content_md5=source_content_md5, - **kwargs) - try: - return self._client.block_blob.stage_block_from_url(**options) - except StorageErrorException as error: - process_storage_error(error) - - def _get_block_list_result(self, blocks): - # type: (BlockList) -> Tuple[List[BlobBlock], List[BlobBlock]] - committed = [] # type: List - uncommitted = [] # type: List - if blocks.committed_blocks: - committed = [BlobBlock._from_generated(b) for b in blocks.committed_blocks] # pylint: disable=protected-access - if blocks.uncommitted_blocks: - uncommitted = [BlobBlock._from_generated(b) for b in blocks.uncommitted_blocks] # pylint: disable=protected-access - return committed, uncommitted - - @distributed_trace - def get_block_list(self, block_list_type="committed", **kwargs): - # type: (Optional[str], **Any) -> Tuple[List[BlobBlock], List[BlobBlock]] - """The Get Block List operation retrieves the list of blocks that have - been uploaded as part of a block blob. - - :param str block_list_type: - Specifies whether to return the list of committed - blocks, the list of uncommitted blocks, or both lists together. - Possible values include: 'committed', 'uncommitted', 'all' - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A tuple of two lists - committed and uncommitted blocks - :rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock)) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - try: - blocks = self._client.block_blob.get_block_list( - list_type=block_list_type, - snapshot=self.snapshot, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return self._get_block_list_result(blocks) - - def _commit_block_list_options( # type: ignore - self, block_list, # type: List[BlobBlock] - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) - for block in block_list: - try: - if block.state.value == 'committed': - block_lookup.committed.append(encode_base64(str(block.id))) - elif block.state.value == 'uncommitted': - block_lookup.uncommitted.append(encode_base64(str(block.id))) - else: - block_lookup.latest.append(encode_base64(str(block.id))) - except AttributeError: - block_lookup.latest.append(encode_base64(str(block))) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - blob_headers = None - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if content_settings: - blob_headers = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - - validate_content = kwargs.pop('validate_content', False) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - tier = kwargs.pop('standard_blob_tier', None) - blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) - - options = { - 'blocks': block_lookup, - 'blob_http_headers': blob_headers, - 'lease_access_conditions': access_conditions, - 'timeout': kwargs.pop('timeout', None), - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers, - 'validate_content': validate_content, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'tier': tier.value if tier else None, - 'blob_tags_string': blob_tags_string, - 'headers': headers - } - options.update(kwargs) - return options - - @distributed_trace - def commit_block_list( # type: ignore - self, block_list, # type: List[BlobBlock] - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """The Commit Block List operation writes a blob by specifying the list of - block IDs that make up the blob. - - :param list block_list: - List of Blockblobs. - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict[str, str] - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._commit_block_list_options( - block_list, - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return self._client.block_blob.commit_block_list(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): - # type: (Union[str, PremiumPageBlobTier], **Any) -> None - """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts. - - :param premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if premium_page_blob_tier is None: - raise ValueError("A PremiumPageBlobTier must be specified") - try: - self._client.blob.set_tier( - tier=premium_page_blob_tier, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - def _set_blob_tags_options(self, tags=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - tags = serialize_blob_tags(tags) - mod_conditions = get_modify_conditions(kwargs) - - options = { - 'tags': tags, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def set_blob_tags(self, tags=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - """The Set Tags operation enables users to set tags on a blob or specific blob version, but not snapshot. - Each call to this operation replaces all existing tags attached to the blob. To remove all - tags from the blob, call this operation with no tags set. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :param tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - :type tags: dict(str, str) - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to add tags to. - :keyword bool validate_content: - If true, calculates an MD5 hash of the tags content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - options = self._set_blob_tags_options(tags=tags, **kwargs) - try: - return self._client.blob.set_tags(**options) - except StorageErrorException as error: - process_storage_error(error) - - def _get_blob_tags_options(self, **kwargs): - # type: (**Any) -> Dict[str, str] - mod_conditions = get_modify_conditions(kwargs) - - options = { - 'version_id': kwargs.pop('version_id', None), - 'snapshot': self.snapshot, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_headers_and_deserialized} - return options - - @distributed_trace - def get_blob_tags(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """The Get Tags operation enables users to get tags on a blob or specific blob version, or snapshot. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to add tags to. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Key value pairs of blob tags. - :rtype: Dict[str, str] - """ - options = self._get_blob_tags_options(**kwargs) - try: - _, tags = self._client.blob.get_tags(**options) - return parse_tags(tags) # pylint: disable=protected-access - except StorageErrorException as error: - process_storage_error(error) - - def _get_page_ranges_options( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if length is not None and offset is None: - raise ValueError("Offset value must not be None if length is set.") - if length is not None: - length = offset + length - 1 # Reformat to an inclusive range index - page_range, _ = validate_and_format_range_headers( - offset, length, start_range_required=False, end_range_required=False, align_to_page=True - ) - options = { - 'snapshot': self.snapshot, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'range': page_range} - if previous_snapshot_diff: - try: - options['prevsnapshot'] = previous_snapshot_diff.snapshot # type: ignore - except AttributeError: - try: - options['prevsnapshot'] = previous_snapshot_diff['snapshot'] # type: ignore - except TypeError: - options['prevsnapshot'] = previous_snapshot_diff - options.update(kwargs) - return options - - @distributed_trace - def get_page_ranges( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] - **kwargs - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a Page Blob or snapshot - of a page blob. - - :param int offset: - Start of byte range to use for getting valid page ranges. - If no length is given, all bytes after the offset will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for getting valid page ranges. - If length is given, offset must be provided. - This range will return valid page ranges from the offset start up to - the specified length. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param str previous_snapshot_diff: - The snapshot diff parameter that contains an opaque DateTime value that - specifies a previous blob snapshot to be compared - against a more recent snapshot or the current blob. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. - The first element are filled page ranges, the 2nd element is cleared page ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_page_ranges_options( - offset=offset, - length=length, - previous_snapshot_diff=previous_snapshot_diff, - **kwargs) - try: - if previous_snapshot_diff: - ranges = self._client.page_blob.get_page_ranges_diff(**options) - else: - ranges = self._client.page_blob.get_page_ranges(**options) - except StorageErrorException as error: - process_storage_error(error) - return get_page_ranges_result(ranges) - - @distributed_trace - def get_page_range_diff_for_managed_disk( - self, previous_snapshot_url, # type: str - offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a managed disk or snapshot. - - .. note:: - This operation is only available for managed disk accounts. - - .. versionadded:: 12.2.0 - This operation was introduced in API version '2019-07-07'. - - :param previous_snapshot_url: - Specifies the URL of a previous snapshot of the managed disk. - The response will only contain pages that were changed between the target blob and - its previous snapshot. - :param int offset: - Start of byte range to use for getting valid page ranges. - If no length is given, all bytes after the offset will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for getting valid page ranges. - If length is given, offset must be provided. - This range will return valid page ranges from the offset start up to - the specified length. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. - The first element are filled page ranges, the 2nd element is cleared page ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_page_ranges_options( - offset=offset, - length=length, - prev_snapshot_url=previous_snapshot_url, - **kwargs) - try: - ranges = self._client.page_blob.get_page_ranges_diff(**options) - except StorageErrorException as error: - process_storage_error(error) - return get_page_ranges_result(ranges) - - def _set_sequence_number_options(self, sequence_number_action, sequence_number=None, **kwargs): - # type: (Union[str, SequenceNumberAction], Optional[str], **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if sequence_number_action is None: - raise ValueError("A sequence number action must be specified") - options = { - 'sequence_number_action': sequence_number_action, - 'timeout': kwargs.pop('timeout', None), - 'blob_sequence_number': sequence_number, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def set_sequence_number(self, sequence_number_action, sequence_number=None, **kwargs): - # type: (Union[str, SequenceNumberAction], Optional[str], **Any) -> Dict[str, Union[str, datetime]] - """Sets the blob sequence number. - - :param str sequence_number_action: - This property indicates how the service should modify the blob's sequence - number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information. - :param str sequence_number: - This property sets the blob's sequence number. The sequence number is a - user-controlled property that you can use to track requests and manage - concurrency issues. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._set_sequence_number_options( - sequence_number_action, sequence_number=sequence_number, **kwargs) - try: - return self._client.page_blob.update_sequence_number(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _resize_blob_options(self, size, **kwargs): - # type: (int, **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if size is None: - raise ValueError("A content length must be specified for a Page Blob.") - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'blob_content_length': size, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def resize_blob(self, size, **kwargs): - # type: (int, **Any) -> Dict[str, Union[str, datetime]] - """Resizes a page blob to the specified size. - - If the specified value is less than the current size of the blob, - then all pages above the specified value are cleared. - - :param int size: - Size used to resize blob. Maximum size for a page blob is up to 1 TB. - The page blob size must be aligned to a 512-byte boundary. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._resize_blob_options(size, **kwargs) - try: - return self._client.page_blob.resize(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _upload_page_options( # type: ignore - self, page, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - if isinstance(page, six.text_type): - page = page.encode(kwargs.pop('encoding', 'UTF-8')) - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 page size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 page size") - end_range = offset + length - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) # type: ignore - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - seq_conditions = SequenceNumberAccessConditions( - if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), - if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), - if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) - ) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - validate_content = kwargs.pop('validate_content', False) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'body': page[:length], - 'content_length': length, - 'transactional_content_md5': None, - 'timeout': kwargs.pop('timeout', None), - 'range': content_range, - 'lease_access_conditions': access_conditions, - 'sequence_number_access_conditions': seq_conditions, - 'modified_access_conditions': mod_conditions, - 'validate_content': validate_content, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def upload_page( # type: ignore - self, page, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """The Upload Pages operation writes a range of pages to a page blob. - - :param bytes page: - Content of the page. - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._upload_page_options( - page=page, - offset=offset, - length=length, - **kwargs) - try: - return self._client.page_blob.upload_pages(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _upload_pages_from_url_options( # type: ignore - self, source_url, # type: str - offset, # type: int - length, # type: int - source_offset, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - # TODO: extract the code to a method format_range - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 page size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 page size") - if source_offset is None or offset % 512 != 0: - raise ValueError("source_offset must be an integer that aligns with 512 page size") - - # Format range - end_range = offset + length - 1 - destination_range = 'bytes={0}-{1}'.format(offset, end_range) - source_range = 'bytes={0}-{1}'.format(source_offset, source_offset + length - 1) # should subtract 1 here? - - seq_conditions = SequenceNumberAccessConditions( - if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), - if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), - if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - source_mod_conditions = get_source_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - source_content_md5 = kwargs.pop('source_content_md5', None) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'source_url': source_url, - 'content_length': 0, - 'source_range': source_range, - 'range': destination_range, - 'source_content_md5': bytearray(source_content_md5) if source_content_md5 else None, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'sequence_number_access_conditions': seq_conditions, - 'modified_access_conditions': mod_conditions, - 'source_modified_access_conditions': source_mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def upload_pages_from_url(self, source_url, # type: str - offset, # type: int - length, # type: int - source_offset, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """ - The Upload Pages operation writes a range of pages to a page blob where - the contents are read from a URL. - - :param str source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - The service will read the same number of bytes as the destination range (length-offset). - :keyword bytes source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._upload_pages_from_url_options( - source_url=source_url, - offset=offset, - length=length, - source_offset=source_offset, - **kwargs - ) - try: - return self._client.page_blob.upload_pages_from_url(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _clear_page_options(self, offset, length, **kwargs): - # type: (int, int, **Any) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - seq_conditions = SequenceNumberAccessConditions( - if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), - if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), - if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) - ) - mod_conditions = get_modify_conditions(kwargs) - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 page size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 page size") - end_range = length + offset - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'content_length': 0, - 'timeout': kwargs.pop('timeout', None), - 'range': content_range, - 'lease_access_conditions': access_conditions, - 'sequence_number_access_conditions': seq_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def clear_page(self, offset, length, **kwargs): - # type: (int, int, **Any) -> Dict[str, Union[str, datetime]] - """Clears a range of pages. - - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._clear_page_options(offset, length, **kwargs) - try: - return self._client.page_blob.clear_pages(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _append_block_options( # type: ignore - self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - if isinstance(data, six.text_type): - data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore - if length is None: - length = get_length(data) - if length is None: - length, data = read_length(data) - if length == 0: - return {} - if isinstance(data, bytes): - data = data[:length] - - appendpos_condition = kwargs.pop('appendpos_condition', None) - maxsize_condition = kwargs.pop('maxsize_condition', None) - validate_content = kwargs.pop('validate_content', False) - append_conditions = None - if maxsize_condition or appendpos_condition is not None: - append_conditions = AppendPositionAccessConditions( - max_size=maxsize_condition, - append_position=appendpos_condition - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'body': data, - 'content_length': length, - 'timeout': kwargs.pop('timeout', None), - 'transactional_content_md5': None, - 'lease_access_conditions': access_conditions, - 'append_position_access_conditions': append_conditions, - 'modified_access_conditions': mod_conditions, - 'validate_content': validate_content, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def append_block( # type: ignore - self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """Commits a new block of data to the end of the existing append blob. - - :param data: - Content of the block. This can be bytes, text, an iterable or a file-like object. - :type data: bytes or str or Iterable - :param int length: - Size of the block in bytes. - :keyword bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). - :rtype: dict(str, Any) - """ - options = self._append_block_options( - data, - length=length, - **kwargs - ) - try: - return self._client.append_blob.append_block(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _append_block_from_url_options( # type: ignore - self, copy_source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - # If end range is provided, start range must be provided - if source_length is not None and source_offset is None: - raise ValueError("source_offset should also be specified if source_length is specified") - # Format based on whether length is present - source_range = None - if source_length is not None: - end_range = source_offset + source_length - 1 - source_range = 'bytes={0}-{1}'.format(source_offset, end_range) - elif source_offset is not None: - source_range = "bytes={0}-".format(source_offset) - - appendpos_condition = kwargs.pop('appendpos_condition', None) - maxsize_condition = kwargs.pop('maxsize_condition', None) - source_content_md5 = kwargs.pop('source_content_md5', None) - append_conditions = None - if maxsize_condition or appendpos_condition is not None: - append_conditions = AppendPositionAccessConditions( - max_size=maxsize_condition, - append_position=appendpos_condition - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - source_mod_conditions = get_source_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'source_url': copy_source_url, - 'content_length': 0, - 'source_range': source_range, - 'source_content_md5': source_content_md5, - 'transactional_content_md5': None, - 'lease_access_conditions': access_conditions, - 'append_position_access_conditions': append_conditions, - 'modified_access_conditions': mod_conditions, - 'source_modified_access_conditions': source_mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - 'timeout': kwargs.pop('timeout', None)} - options.update(kwargs) - return options - - @distributed_trace - def append_block_from_url(self, copy_source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """ - Creates a new block to be committed as part of a blob, where the contents are read from a source url. - - :param str copy_source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int source_offset: - This indicates the start of the range of bytes (inclusive) that has to be taken from the copy source. - :param int source_length: - This indicates the end of the range of bytes that has to be taken from the copy source. - :keyword bytearray source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the - AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._append_block_from_url_options( - copy_source_url, - source_offset=source_offset, - source_length=source_length, - **kwargs - ) - try: - return self._client.append_blob.append_block_from_url(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _seal_append_blob_options(self, **kwargs): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - appendpos_condition = kwargs.pop('appendpos_condition', None) - append_conditions = None - if appendpos_condition is not None: - append_conditions = AppendPositionAccessConditions( - append_position=appendpos_condition - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - options = { - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'append_position_access_conditions': append_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def seal_append_blob(self, **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """The Seal operation seals the Append Blob to make it read-only. - - .. versionadded:: 12.4.0 - - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). - :rtype: dict(str, Any) - """ - options = self._seal_append_blob_options(**kwargs) - try: - return self._client.append_blob.seal(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_blob_service_client.py b/azure/multiapi/storagev2/blob/v2019_12_12/_blob_service_client.py deleted file mode 100644 index bde8475..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_blob_service_client.py +++ /dev/null @@ -1,677 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, - TYPE_CHECKING -) - -try: - from urllib.parse import urlparse -except ImportError: - from urlparse import urlparse # type: ignore - -from azure.core.paging import ItemPaged -from azure.core.pipeline import Pipeline -from azure.core.tracing.decorator import distributed_trace - -from ._shared.models import LocationMode -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.parser import _to_utc_datetime -from ._shared.response_handlers import return_response_headers, process_storage_error, \ - parse_to_internal_user_delegation_key -from ._generated import AzureBlobStorage, VERSION -from ._generated.models import StorageErrorException, StorageServiceProperties, KeyInfo -from ._container_client import ContainerClient -from ._blob_client import BlobClient -from ._models import ContainerPropertiesPaged, FilteredBlobPaged -from ._serialize import get_api_version -from ._deserialize import service_stats_deserialize, service_properties_deserialize - -if TYPE_CHECKING: - from datetime import datetime - from azure.core.pipeline.transport import HttpTransport - from azure.core.pipeline.policies import HTTPPolicy - from ._shared.models import UserDelegationKey - from ._lease import BlobLeaseClient - from ._models import ( - ContainerProperties, - BlobProperties, - PublicAccess, - BlobAnalyticsLogging, - Metrics, - CorsRule, - RetentionPolicy, - StaticWebsite, - ) - - -class BlobServiceClient(StorageAccountHostsMixin): - """A client to interact with the Blob Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete containers within the account. - For operations relating to a specific container or blob, clients for those entities - can also be retrieved using the `get_client` functions. - - :param str account_url: - The URL to the blob storage account. Any other entities included - in the URL path (e.g. container or blob) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_blob_service_client] - :end-before: [END create_blob_service_client] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient with account url and credential. - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_blob_service_client_oauth] - :end-before: [END create_blob_service_client_oauth] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient with Azure Identity credentials. - """ - - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - _, sas_token = parse_query(parsed_url.query) - self._query_str, credential = self._format_query_string(sas_token, credential) - super(BlobServiceClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) - self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - return "{}://{}/{}".format(self.scheme, hostname, self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> BlobServiceClient - """Create BlobServiceClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :returns: A Blob service client. - :rtype: ~azure.storage.blob.BlobServiceClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START auth_from_connection_string] - :end-before: [END auth_from_connection_string] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient from a connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls(account_url, credential=credential, **kwargs) - - @distributed_trace - def get_user_delegation_key(self, key_start_time, # type: datetime - key_expiry_time, # type: datetime - **kwargs # type: Any - ): - # type: (...) -> UserDelegationKey - """ - Obtain a user delegation key for the purpose of signing SAS tokens. - A token credential must be present on the service object for this request to succeed. - - :param ~datetime.datetime key_start_time: - A DateTime value. Indicates when the key becomes valid. - :param ~datetime.datetime key_expiry_time: - A DateTime value. Indicates when the key stops being valid. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The user delegation key. - :rtype: ~azure.storage.blob.UserDelegationKey - """ - key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time)) - timeout = kwargs.pop('timeout', None) - try: - user_delegation_key = self._client.service.get_user_delegation_key(key_info=key_info, - timeout=timeout, - **kwargs) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - return parse_to_internal_user_delegation_key(user_delegation_key) # type: ignore - - @distributed_trace - def get_account_information(self, **kwargs): - # type: (Any) -> Dict[str, str] - """Gets information related to the storage account. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START get_blob_service_account_info] - :end-before: [END get_blob_service_account_info] - :language: python - :dedent: 8 - :caption: Getting account information for the blob service. - """ - try: - return self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_service_stats(self, **kwargs): - # type: (**Any) -> Dict[str, Any] - """Retrieves statistics related to replication for the Blob service. - - It is only available when read-access geo-redundant replication is enabled for - the storage account. - - With geo-redundant replication, Azure Storage maintains your data durable - in two locations. In both locations, Azure Storage constantly maintains - multiple healthy replicas of your data. The location where you read, - create, update, or delete data is the primary storage account location. - The primary location exists in the region you choose at the time you - create an account via the Azure Management Azure classic portal, for - example, North Central US. The location to which your data is replicated - is the secondary location. The secondary location is automatically - determined based on the location of the primary; it is in a second data - center that resides in the same region as the primary location. Read-only - access is available from the secondary location, if read-access geo-redundant - replication is enabled for your storage account. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The blob service stats. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START get_blob_service_stats] - :end-before: [END get_blob_service_stats] - :language: python - :dedent: 8 - :caption: Getting service stats for the blob service. - """ - timeout = kwargs.pop('timeout', None) - try: - stats = self._client.service.get_statistics( # type: ignore - timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs) - return service_stats_deserialize(stats) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_service_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An object containing blob service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START get_blob_service_properties] - :end-before: [END get_blob_service_properties] - :language: python - :dedent: 8 - :caption: Getting service properties for the blob service. - """ - timeout = kwargs.pop('timeout', None) - try: - service_props = self._client.service.get_properties(timeout=timeout, **kwargs) - return service_properties_deserialize(service_props) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def set_service_properties( - self, analytics_logging=None, # type: Optional[BlobAnalyticsLogging] - hour_metrics=None, # type: Optional[Metrics] - minute_metrics=None, # type: Optional[Metrics] - cors=None, # type: Optional[List[CorsRule]] - target_version=None, # type: Optional[str] - delete_retention_policy=None, # type: Optional[RetentionPolicy] - static_website=None, # type: Optional[StaticWebsite] - **kwargs - ): - # type: (...) -> None - """Sets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - If an element (e.g. analytics_logging) is left as None, the - existing settings on the service for that functionality are preserved. - - :param analytics_logging: - Groups the Azure Analytics Logging settings. - :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging - :param hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for blobs. - :type hour_metrics: ~azure.storage.blob.Metrics - :param minute_metrics: - The minute metrics settings provide request statistics - for each minute for blobs. - :type minute_metrics: ~azure.storage.blob.Metrics - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list[~azure.storage.blob.CorsRule] - :param str target_version: - Indicates the default version to use for requests if an incoming - request's version is not specified. - :param delete_retention_policy: - The delete retention policy specifies whether to retain deleted blobs. - It also specifies the number of days and versions of blob to keep. - :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy - :param static_website: - Specifies whether the static website feature is enabled, - and if yes, indicates the index document and 404 error document to use. - :type static_website: ~azure.storage.blob.StaticWebsite - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START set_blob_service_properties] - :end-before: [END set_blob_service_properties] - :language: python - :dedent: 8 - :caption: Setting service properties for the blob service. - """ - props = StorageServiceProperties( - logging=analytics_logging, - hour_metrics=hour_metrics, - minute_metrics=minute_metrics, - cors=cors, - default_service_version=target_version, - delete_retention_policy=delete_retention_policy, - static_website=static_website - ) - timeout = kwargs.pop('timeout', None) - try: - self._client.service.set_properties(props, timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_containers( - self, name_starts_with=None, # type: Optional[str] - include_metadata=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> ItemPaged[ContainerProperties] - """Returns a generator to list the containers under the specified account. - - The generator will lazily follow the continuation tokens returned by - the service and stop when all containers have been returned. - - :param str name_starts_with: - Filters the results to return only containers whose names - begin with the specified prefix. - :param bool include_metadata: - Specifies that container metadata to be returned in the response. - The default value is `False`. - :keyword int results_per_page: - The maximum number of container names to retrieve per API - call. If the request does not specify the server will return up to 5,000 items. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) of ContainerProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.ContainerProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_list_containers] - :end-before: [END bsc_list_containers] - :language: python - :dedent: 12 - :caption: Listing the containers in the blob service. - """ - include = ['metadata'] if include_metadata else [] - - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.service.list_containers_segment, - prefix=name_starts_with, - include=include, - timeout=timeout, - **kwargs) - return ItemPaged( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - page_iterator_class=ContainerPropertiesPaged - ) - - @distributed_trace - def find_blobs_by_tags(self, filter_expression, **kwargs): - # type: (str, **Any) -> ItemPaged[FilteredBlob] - """The Filter Blobs operation enables callers to list blobs across all - containers whose tags match a given search expression. Filter blobs - searches across all containers within a storage account but can be - scoped within the expression to a single container. - - :param str filter_expression: - The expression to find blobs whose tags matches the specified condition. - eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'" - To specify a container, eg. "@container='containerName' and \"Name\"='C'" - :keyword int results_per_page: - The max result per page when paginating. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.FilteredBlob] - """ - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.service.filter_blobs, - where=filter_expression, - timeout=timeout, - **kwargs) - return ItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=FilteredBlobPaged) - - @distributed_trace - def create_container( - self, name, # type: str - metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[Union[PublicAccess, str]] - **kwargs - ): - # type: (...) -> ContainerClient - """Creates a new container under the specified account. - - If the container with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created container. - - :param str name: The name of the container to create. - :param metadata: - A dict with name-value pairs to associate with the - container as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - Possible values include: 'container', 'blob'. - :type public_access: str or ~azure.storage.blob.PublicAccess - :keyword container_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - - .. versionadded:: 12.2.0 - - :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_create_container] - :end-before: [END bsc_create_container] - :language: python - :dedent: 12 - :caption: Creating a container in the blob service. - """ - container = self.get_container_client(name) - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - container.create_container( - metadata=metadata, public_access=public_access, timeout=timeout, **kwargs) - return container - - @distributed_trace - def delete_container( - self, container, # type: Union[ContainerProperties, str] - lease=None, # type: Optional[Union[BlobLeaseClient, str]] - **kwargs - ): - # type: (...) -> None - """Marks the specified container for deletion. - - The container and any blobs contained within it are later deleted during garbage collection. - If the container is not found, a ResourceNotFoundError will be raised. - - :param container: - The container to delete. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :param lease: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_delete_container] - :end-before: [END bsc_delete_container] - :language: python - :dedent: 12 - :caption: Deleting a container in the blob service. - """ - container = self.get_container_client(container) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - container.delete_container( # type: ignore - lease=lease, - timeout=timeout, - **kwargs) - - @distributed_trace - def _undelete_container(self, deleted_container_name, deleted_container_version, new_name=None, **kwargs): - # type: (str, str, str, **Any) -> ContainerClient - """Restores soft-deleted container. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :param str deleted_container_name: - Specifies the name of the deleted container to restore. - :param str deleted_container_version: - Specifies the version of the deleted container to restore. - :param str new_name: - The new name for the deleted container to be restored to. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.ContainerClient - """ - container = self.get_container_client(new_name or deleted_container_name) - try: - container._client.container.restore(deleted_container_name=deleted_container_name, # pylint: disable = protected-access - deleted_container_version=deleted_container_version, - timeout=kwargs.pop('timeout', None), **kwargs) - return container - except StorageErrorException as error: - process_storage_error(error) - - def get_container_client(self, container): - # type: (Union[ContainerProperties, str]) -> ContainerClient - """Get a client to interact with the specified container. - - The container need not already exist. - - :param container: - The container. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :returns: A ContainerClient. - :rtype: ~azure.storage.blob.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_get_container_client] - :end-before: [END bsc_get_container_client] - :language: python - :dedent: 8 - :caption: Getting the container client to interact with a specific container. - """ - try: - container_name = container.name - except AttributeError: - container_name = container - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ContainerClient( - self.url, container_name=container_name, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_blob_client( - self, container, # type: Union[ContainerProperties, str] - blob, # type: Union[BlobProperties, str] - snapshot=None # type: Optional[Union[Dict[str, Any], str]] - ): - # type: (...) -> BlobClient - """Get a client to interact with the specified blob. - - The blob need not already exist. - - :param container: - The container that the blob is in. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :param blob: - The blob with which to interact. This can either be the name of the blob, - or an instance of BlobProperties. - :type blob: str or ~azure.storage.blob.BlobProperties - :param snapshot: - The optional blob snapshot on which to operate. This can either be the ID of the snapshot, - or a dictionary output returned by :func:`~azure.storage.blob.BlobClient.create_snapshot()`. - :type snapshot: str or dict(str, Any) - :returns: A BlobClient. - :rtype: ~azure.storage.blob.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_get_blob_client] - :end-before: [END bsc_get_blob_client] - :language: python - :dedent: 12 - :caption: Getting the blob client to interact with a specific blob. - """ - try: - container_name = container.name - except AttributeError: - container_name = container - try: - blob_name = blob.name - except AttributeError: - blob_name = blob - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return BlobClient( # type: ignore - self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_container_client.py b/azure/multiapi/storagev2/blob/v2019_12_12/_container_client.py deleted file mode 100644 index d771d19..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_container_client.py +++ /dev/null @@ -1,1441 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, AnyStr, Dict, List, Tuple, IO, Iterator, - TYPE_CHECKING -) - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six - -from azure.core import MatchConditions -from azure.core.paging import ItemPaged -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import Pipeline -from azure.core.pipeline.transport import HttpRequest - -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.request_handlers import add_metadata_headers, serialize_iso -from ._shared.response_handlers import ( - process_storage_error, - return_response_headers, - return_headers_and_deserialized) -from ._generated import AzureBlobStorage, VERSION -from ._generated.models import ( - StorageErrorException, - SignedIdentifier) -from ._deserialize import deserialize_container_properties -from ._serialize import get_modify_conditions, get_container_cpk_scope_info, get_api_version -from ._models import ( # pylint: disable=unused-import - ContainerProperties, - BlobProperties, - BlobType) -from ._list_blobs_helper import BlobPrefix, BlobPropertiesPaged -from ._lease import BlobLeaseClient, get_access_conditions -from ._blob_client import BlobClient - -if TYPE_CHECKING: - from azure.core.pipeline.transport import HttpTransport, HttpResponse # pylint: disable=ungrouped-imports - from azure.core.pipeline.policies import HTTPPolicy # pylint: disable=ungrouped-imports - from datetime import datetime - from ._models import ( # pylint: disable=unused-import - PublicAccess, - AccessPolicy, - ContentSettings, - StandardBlobTier, - PremiumPageBlobTier) - - -def _get_blob_name(blob): - """Return the blob name. - - :param blob: A blob string or BlobProperties - :rtype: str - """ - try: - return blob.get('name') - except AttributeError: - return blob - - -class ContainerClient(StorageAccountHostsMixin): - """A client to interact with a specific container, although that container - may not yet exist. - - For operations relating to a specific blob within this container, a blob client can be - retrieved using the :func:`~get_blob_client` function. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the container, - use the :func:`from_container_url` classmethod. - :param container_name: - The name of the container for the blob. - :type container_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START create_container_client_from_service] - :end-before: [END create_container_client_from_service] - :language: python - :dedent: 8 - :caption: Get a ContainerClient from an existing BlobServiceClient. - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START create_container_client_sasurl] - :end-before: [END create_container_client_sasurl] - :language: python - :dedent: 8 - :caption: Creating the container client directly. - """ - def __init__( - self, account_url, # type: str - container_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Container URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not container_name: - raise ValueError("Please specify a container name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - _, sas_token = parse_query(parsed_url.query) - self.container_name = container_name - self._query_str, credential = self._format_query_string(sas_token, credential) - super(ContainerClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) - self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access - - def _format_url(self, hostname): - container_name = self.container_name - if isinstance(container_name, six.text_type): - container_name = container_name.encode('UTF-8') - return "{}://{}/{}{}".format( - self.scheme, - hostname, - quote(container_name), - self._query_str) - - @classmethod - def from_container_url(cls, container_url, credential=None, **kwargs): - # type: (str, Optional[Any], Any) -> ContainerClient - """Create ContainerClient from a container url. - - :param str container_url: - The full endpoint URL to the Container, including SAS token if used. This could be - either the primary endpoint, or the secondary endpoint depending on the current `location_mode`. - :type container_url: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :returns: A container client. - :rtype: ~azure.storage.blob.ContainerClient - """ - try: - if not container_url.lower().startswith('http'): - container_url = "https://" + container_url - except AttributeError: - raise ValueError("Container URL must be a string.") - parsed_url = urlparse(container_url.rstrip('/')) - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(container_url)) - - container_path = parsed_url.path.lstrip('/').split('/') - account_path = "" - if len(container_path) > 1: - account_path = "/" + "/".join(container_path[:-1]) - account_url = "{}://{}{}?{}".format( - parsed_url.scheme, - parsed_url.netloc.rstrip('/'), - account_path, - parsed_url.query) - container_name = unquote(container_path[-1]) - if not container_name: - raise ValueError("Invalid URL. Please provide a URL with a valid container name") - return cls(account_url, container_name=container_name, credential=credential, **kwargs) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - container_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> ContainerClient - """Create ContainerClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param container_name: - The container name for the blob. - :type container_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :returns: A container client. - :rtype: ~azure.storage.blob.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START auth_from_connection_string_container] - :end-before: [END auth_from_connection_string_container] - :language: python - :dedent: 8 - :caption: Creating the ContainerClient from a connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, container_name=container_name, credential=credential, **kwargs) - - @distributed_trace - def create_container(self, metadata=None, public_access=None, **kwargs): - # type: (Optional[Dict[str, str]], Optional[Union[PublicAccess, str]], **Any) -> None - """ - Creates a new container under the specified account. If the container - with the same name already exists, the operation fails. - - :param metadata: - A dict with name_value pairs to associate with the - container as metadata. Example:{'Category':'test'} - :type metadata: dict[str, str] - :param ~azure.storage.blob.PublicAccess public_access: - Possible values include: 'container', 'blob'. - :keyword container_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - - .. versionadded:: 12.2.0 - - :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START create_container] - :end-before: [END create_container] - :language: python - :dedent: 12 - :caption: Creating a container to store blobs. - """ - headers = kwargs.pop('headers', {}) - timeout = kwargs.pop('timeout', None) - headers.update(add_metadata_headers(metadata)) # type: ignore - container_cpk_scope_info = get_container_cpk_scope_info(kwargs) - try: - return self._client.container.create( # type: ignore - timeout=timeout, - access=public_access, - container_cpk_scope_info=container_cpk_scope_info, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def delete_container( - self, **kwargs): - # type: (Any) -> None - """ - Marks the specified container for deletion. The container and any blobs - contained within it are later deleted during garbage collection. - - :keyword lease: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START delete_container] - :end-before: [END delete_container] - :language: python - :dedent: 12 - :caption: Delete a container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - mod_conditions = get_modify_conditions(kwargs) - timeout = kwargs.pop('timeout', None) - try: - self._client.container.delete( - timeout=timeout, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def acquire_lease( - self, lease_duration=-1, # type: int - lease_id=None, # type: Optional[str] - **kwargs): - # type: (...) -> BlobLeaseClient - """ - Requests a new lease. If the container does not have an active lease, - the Blob service creates a lease on the container and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A BlobLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.blob.BlobLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START acquire_lease_on_container] - :end-before: [END acquire_lease_on_container] - :language: python - :dedent: 8 - :caption: Acquiring a lease on the container. - """ - lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs) - return lease - - @distributed_trace - def get_account_information(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """Gets information related to the storage account. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - """ - try: - return self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_container_properties(self, **kwargs): - # type: (Any) -> ContainerProperties - """Returns all user-defined metadata and system properties for the specified - container. The data returned does not include the container's list of blobs. - - :keyword lease: - If specified, get_container_properties only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Properties for the specified container within a container object. - :rtype: ~azure.storage.blob.ContainerProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START get_container_properties] - :end-before: [END get_container_properties] - :language: python - :dedent: 12 - :caption: Getting properties on the container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - response = self._client.container.get_properties( - timeout=timeout, - lease_access_conditions=access_conditions, - cls=deserialize_container_properties, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - response.name = self.container_name - return response # type: ignore - - @distributed_trace - def set_container_metadata( # type: ignore - self, metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - container. Each call to this operation replaces all existing metadata - attached to the container. To remove all metadata from the container, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the container as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword lease: - If specified, set_container_metadata only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Container-updated property dict (Etag and last modified). - :rtype: dict[str, str or datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START set_container_metadata] - :end-before: [END set_container_metadata] - :language: python - :dedent: 12 - :caption: Setting metadata on the container. - """ - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - mod_conditions = get_modify_conditions(kwargs) - timeout = kwargs.pop('timeout', None) - try: - return self._client.container.set_metadata( # type: ignore - timeout=timeout, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_container_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the specified container. - The permissions indicate whether container data may be accessed publicly. - - :keyword lease: - If specified, get_container_access_policy only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START get_container_access_policy] - :end-before: [END get_container_access_policy] - :language: python - :dedent: 12 - :caption: Getting the access policy on the container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - response, identifiers = self._client.container.get_access_policy( - timeout=timeout, - lease_access_conditions=access_conditions, - cls=return_headers_and_deserialized, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return { - 'public_access': response.get('blob_public_access'), - 'signed_identifiers': identifiers or [] - } - - @distributed_trace - def set_container_access_policy( - self, signed_identifiers, # type: Dict[str, AccessPolicy] - public_access=None, # type: Optional[Union[str, PublicAccess]] - **kwargs - ): # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the permissions for the specified container or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether blobs in a container may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the container. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy] - :param ~azure.storage.blob.PublicAccess public_access: - Possible values include: 'container', 'blob'. - :keyword lease: - Required if the container has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified date/time. - :keyword ~datetime.datetime if_unmodified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Container-updated property dict (Etag and last modified). - :rtype: dict[str, str or ~datetime.datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START set_container_access_policy] - :end-before: [END set_container_access_policy] - :language: python - :dedent: 12 - :caption: Setting access policy on the container. - """ - if len(signed_identifiers) > 5: - raise ValueError( - 'Too many access policies provided. The server does not support setting ' - 'more than 5 access policies on a single resource.') - identifiers = [] - for key, value in signed_identifiers.items(): - if value: - value.start = serialize_iso(value.start) - value.expiry = serialize_iso(value.expiry) - identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore - signed_identifiers = identifiers # type: ignore - lease = kwargs.pop('lease', None) - mod_conditions = get_modify_conditions(kwargs) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - return self._client.container.set_access_policy( - container_acl=signed_identifiers or None, - timeout=timeout, - access=public_access, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_blobs(self, name_starts_with=None, include=None, **kwargs): - # type: (Optional[str], Optional[Union[str, List[str]]], **Any) -> ItemPaged[BlobProperties] - """Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service. - - :param str name_starts_with: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param list[str] or str include: - Specifies one or more additional datasets to include in the response. - Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'tags'. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START list_blobs_in_container] - :end-before: [END list_blobs_in_container] - :language: python - :dedent: 8 - :caption: List the blobs in the container. - """ - if include and not isinstance(include, list): - include = [include] - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.list_blob_flat_segment, - include=include, - timeout=timeout, - **kwargs) - return ItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=BlobPropertiesPaged) - - @distributed_trace - def walk_blobs( - self, name_starts_with=None, # type: Optional[str] - include=None, # type: Optional[Any] - delimiter="/", # type: str - **kwargs # type: Optional[Any] - ): - # type: (...) -> ItemPaged[BlobProperties] - """Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service. This operation will list blobs in accordance with a hierarchy, - as delimited by the specified delimiter character. - - :param str name_starts_with: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param list[str] include: - Specifies one or more additional datasets to include in the response. - Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'. - :param str delimiter: - When the request includes this parameter, the operation returns a BlobPrefix - element in the response body that acts as a placeholder for all blobs whose - names begin with the same substring up to the appearance of the delimiter - character. The delimiter may be a single character or a string. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties] - """ - if include and not isinstance(include, list): - include = [include] - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.list_blob_hierarchy_segment, - delimiter=delimiter, - include=include, - timeout=timeout, - **kwargs) - return BlobPrefix( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - delimiter=delimiter) - - @distributed_trace - def upload_blob( - self, name, # type: Union[str, BlobProperties] - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> BlobClient - """Creates a new blob from a data source with automatic chunking. - - :param name: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type name: str or ~azure.storage.blob.BlobProperties - :param data: The blob data to upload. - :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be - either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. The exception to the above is with Append - blob types: if set to False and the data already exists, an error will not be raised - and the data will be appended to the existing blob. If set overwrite=True, then the existing - append blob will be deleted, and a new one created. Defaults to False. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the container has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int max_concurrency: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :returns: A BlobClient to interact with the newly uploaded blob. - :rtype: ~azure.storage.blob.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START upload_blob_to_container] - :end-before: [END upload_blob_to_container] - :language: python - :dedent: 8 - :caption: Upload blob to the container. - """ - blob = self.get_blob_client(name) - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - blob.upload_blob( - data, - blob_type=blob_type, - length=length, - metadata=metadata, - timeout=timeout, - encoding=encoding, - **kwargs - ) - return blob - - @distributed_trace - def delete_blob( - self, blob, # type: Union[str, BlobProperties] - delete_snapshots=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> None - """Marks the specified blob or snapshot for deletion. - - The blob is later deleted during garbage collection. - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the delete_blob - operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot - and retains the blob or snapshot for specified number of days. - After specified number of days, blob's data is removed from the service during garbage collection. - Soft deleted blob or snapshot is accessible through :func:`list_blobs()` specifying `include=["deleted"]` - option. Soft-deleted blob or snapshot can be restored using :func:`~BlobClient.undelete()` - - :param blob: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob: str or ~azure.storage.blob.BlobProperties - :param str delete_snapshots: - Required if the blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - blob_client = self.get_blob_client(blob) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - blob_client.delete_blob( # type: ignore - delete_snapshots=delete_snapshots, - timeout=timeout, - **kwargs) - - @distributed_trace - def download_blob(self, blob, offset=None, length=None, **kwargs): - # type: (Union[str, BlobProperties], Optional[int], Optional[int], **Any) -> StorageStreamDownloader - """Downloads a blob to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the blob into - a stream. - - :param blob: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob: str or ~azure.storage.blob.BlobProperties - :param int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, download_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword str encoding: - Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.blob.StorageStreamDownloader - """ - blob_client = self.get_blob_client(blob) # type: ignore - kwargs.setdefault('merge_span', True) - return blob_client.download_blob(offset=offset, length=length, **kwargs) - - def _generate_delete_blobs_subrequest_options( - self, snapshot=None, - delete_snapshots=None, - lease_access_conditions=None, - modified_access_conditions=None, - **kwargs - ): - """This code is a copy from _generated. - - Once Autorest is able to provide request preparation this code should be removed. - """ - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct parameters - timeout = kwargs.pop('timeout', None) - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._client._serialize.query("snapshot", snapshot, 'str') # pylint: disable=protected-access - if timeout is not None: - query_parameters['timeout'] = self._client._serialize.query("timeout", timeout, 'int', minimum=0) # pylint: disable=protected-access - - # Construct headers - header_parameters = {} - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._client._serialize.header( # pylint: disable=protected-access - "delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._client._serialize.header( # pylint: disable=protected-access - "lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._client._serialize.header( # pylint: disable=protected-access - "if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._client._serialize.header( # pylint: disable=protected-access - "if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._client._serialize.header( # pylint: disable=protected-access - "if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._client._serialize.header( # pylint: disable=protected-access - "if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._client._serialize.header("if_tags", if_tags, 'str') # pylint: disable=protected-access - - return query_parameters, header_parameters - - def _generate_delete_blobs_options(self, - *blobs, # type: List[Union[str, BlobProperties, dict]] - **kwargs - ): - timeout = kwargs.pop('timeout', None) - raise_on_any_failure = kwargs.pop('raise_on_any_failure', True) - delete_snapshots = kwargs.pop('delete_snapshots', None) - if_modified_since = kwargs.pop('if_modified_since', None) - if_unmodified_since = kwargs.pop('if_unmodified_since', None) - if_tags_match_condition = kwargs.pop('if_tags_match_condition', None) - kwargs.update({'raise_on_any_failure': raise_on_any_failure, - 'sas': self._query_str.replace('?', '&'), - 'timeout': '&timeout=' + str(timeout) if timeout else "" - }) - - reqs = [] - for blob in blobs: - blob_name = _get_blob_name(blob) - container_name = self.container_name - - try: - options = BlobClient._generic_delete_blob_options( # pylint: disable=protected-access - snapshot=blob.get('snapshot'), - delete_snapshots=delete_snapshots or blob.get('delete_snapshots'), - lease=blob.get('lease_id'), - if_modified_since=if_modified_since or blob.get('if_modified_since'), - if_unmodified_since=if_unmodified_since or blob.get('if_unmodified_since'), - etag=blob.get('etag'), - if_tags_match_condition=if_tags_match_condition or blob.get('if_tags_match_condition'), - match_condition=blob.get('match_condition') or MatchConditions.IfNotModified if blob.get('etag') - else None, - timeout=blob.get('timeout'), - ) - except AttributeError: - options = BlobClient._generic_delete_blob_options( # pylint: disable=protected-access - delete_snapshots=delete_snapshots, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_tags_match_condition=if_tags_match_condition - ) - - query_parameters, header_parameters = self._generate_delete_blobs_subrequest_options(**options) - - req = HttpRequest( - "DELETE", - "/{}/{}{}".format(quote(container_name), quote(blob_name, safe='/~'), self._query_str), - headers=header_parameters - ) - req.format_parameters(query_parameters) - reqs.append(req) - - return reqs, kwargs - - @distributed_trace - def delete_blobs(self, *blobs, **kwargs): - # type: (...) -> Iterator[HttpResponse] - """Marks the specified blobs or snapshots for deletion. - - The blobs are later deleted during garbage collection. - Note that in order to delete blobs, you must delete all of their - snapshots. You can delete both at the same time with the delete_blobs operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots - and retains the blobs or snapshots for specified number of days. - After specified number of days, blobs' data is removed from the service during garbage collection. - Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]` - Soft-deleted blobs or snapshots can be restored using :func:`~BlobClient.undelete()` - - :param blobs: - The blobs to delete. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - snapshot you want to delete: - key: 'snapshot', value type: str - whether to delete snapthots when deleting blob: - key: 'delete_snapshots', value: 'include' or 'only' - if the blob modified or not: - key: 'if_modified_since', 'if_unmodified_since', value type: datetime - etag: - key: 'etag', value type: str - match the etag or not: - key: 'match_condition', value type: MatchConditions - tags match condition: - key: 'if_tags_match_condition', value type: str - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword str delete_snapshots: - Required if a blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: An iterator of responses, one for each blob in order - :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START delete_multiple_blobs] - :end-before: [END delete_multiple_blobs] - :language: python - :dedent: 8 - :caption: Deleting multiple blobs. - """ - if len(blobs) == 0: - return iter(list()) - - reqs, options = self._generate_delete_blobs_options(*blobs, **kwargs) - - return self._batch_send(*reqs, **options) - - def _generate_set_tiers_subrequest_options( - self, tier, snapshot=None, version_id=None, rehydrate_priority=None, lease_access_conditions=None, **kwargs - ): - """This code is a copy from _generated. - - Once Autorest is able to provide request preparation this code should be removed. - """ - if not tier: - raise ValueError("A blob tier must be specified") - if snapshot and version_id: - raise ValueError("Snapshot and version_id cannot be set at the same time") - if_tags = kwargs.pop('if_tags', None) - - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "tier" - timeout = kwargs.pop('timeout', None) - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._client._serialize.query("snapshot", snapshot, 'str') # pylint: disable=protected-access - if version_id is not None: - query_parameters['versionid'] = self._client._serialize.query("version_id", version_id, 'str') # pylint: disable=protected-access - if timeout is not None: - query_parameters['timeout'] = self._client._serialize.query("timeout", timeout, 'int', minimum=0) # pylint: disable=protected-access - query_parameters['comp'] = self._client._serialize.query("comp", comp, 'str') # pylint: disable=protected-access, specify-parameter-names-in-call - - # Construct headers - header_parameters = {} - header_parameters['x-ms-access-tier'] = self._client._serialize.header("tier", tier, 'str') # pylint: disable=protected-access, specify-parameter-names-in-call - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._client._serialize.header( # pylint: disable=protected-access - "rehydrate_priority", rehydrate_priority, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._client._serialize.header("lease_id", lease_id, 'str') # pylint: disable=protected-access - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._client._serialize.header("if_tags", if_tags, 'str') # pylint: disable=protected-access - - return query_parameters, header_parameters - - def _generate_set_tiers_options(self, - blob_tier, # type: Optional[Union[str, StandardBlobTier, PremiumPageBlobTier]] - *blobs, # type: List[Union[str, BlobProperties, dict]] - **kwargs - ): - timeout = kwargs.pop('timeout', None) - raise_on_any_failure = kwargs.pop('raise_on_any_failure', True) - rehydrate_priority = kwargs.pop('rehydrate_priority', None) - if_tags = kwargs.pop('if_tags_match_condition', None) - kwargs.update({'raise_on_any_failure': raise_on_any_failure, - 'sas': self._query_str.replace('?', '&'), - 'timeout': '&timeout=' + str(timeout) if timeout else "" - }) - - reqs = [] - for blob in blobs: - blob_name = _get_blob_name(blob) - container_name = self.container_name - - try: - tier = blob_tier or blob.get('blob_tier') - query_parameters, header_parameters = self._generate_set_tiers_subrequest_options( - tier=tier, - snapshot=blob.get('snapshot'), - version_id=blob.get('version_id'), - rehydrate_priority=rehydrate_priority or blob.get('rehydrate_priority'), - lease_access_conditions=blob.get('lease_id'), - if_tags=if_tags or blob.get('if_tags_match_condition'), - timeout=timeout or blob.get('timeout') - ) - except AttributeError: - query_parameters, header_parameters = self._generate_set_tiers_subrequest_options( - blob_tier, rehydrate_priority=rehydrate_priority, if_tags=if_tags) - - req = HttpRequest( - "PUT", - "/{}/{}{}".format(quote(container_name), quote(blob_name, safe='/~'), self._query_str), - headers=header_parameters - ) - req.format_parameters(query_parameters) - reqs.append(req) - - return reqs, kwargs - - @distributed_trace - def set_standard_blob_tier_blobs( - self, - standard_blob_tier, # type: Optional[Union[str, StandardBlobTier]] - *blobs, # type: List[Union[str, BlobProperties, dict]] - **kwargs - ): - # type: (...) -> Iterator[HttpResponse] - """This operation sets the tier on block blobs. - - A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param standard_blob_tier: - Indicates the tier to be set on all blobs. Options include 'Hot', 'Cool', - 'Archive'. The hot tier is optimized for storing data that is accessed - frequently. The cool storage tier is optimized for storing data that - is infrequently accessed and stored for at least a month. The archive - tier is optimized for storing data that is rarely accessed and stored - for at least six months with flexible latency requirements. - - .. note:: - If you want to set different tier on different blobs please set this positional parameter to None. - Then the blob tier on every BlobProperties will be taken. - - :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier - :param blobs: - The blobs with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - standard blob tier: - key: 'blob_tier', value type: StandardBlobTier - rehydrate priority: - key: 'rehydrate_priority', value type: RehydratePriority - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - snapshot: - key: "snapshost", value type: str - version id: - key: "version_id", value type: str - tags match condition: - key: 'if_tags_match_condition', value type: str - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. - :return: An iterator of responses, one for each blob in order - :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse] - """ - reqs, options = self._generate_set_tiers_options(standard_blob_tier, *blobs, **kwargs) - - return self._batch_send(*reqs, **options) - - @distributed_trace - def set_premium_page_blob_tier_blobs( - self, - premium_page_blob_tier, # type: Optional[Union[str, PremiumPageBlobTier]] - *blobs, # type: List[Union[str, BlobProperties, dict]] - **kwargs - ): - # type: (...) -> Iterator[HttpResponse] - """Sets the page blob tiers on all blobs. This API is only supported for page blobs on premium accounts. - - :param premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - - .. note:: - If you want to set different tier on different blobs please set this positional parameter to None. - Then the blob tier on every BlobProperties will be taken. - - :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier - :param blobs: - The blobs with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - premium blob tier: - key: 'blob_tier', value type: PremiumPageBlobTier - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. - :return: An iterator of responses, one for each blob in order - :rtype: iterator[~azure.core.pipeline.transport.HttpResponse] - """ - reqs, options = self._generate_set_tiers_options(premium_page_blob_tier, *blobs, **kwargs) - - return self._batch_send(*reqs, **options) - - def get_blob_client( - self, blob, # type: Union[str, BlobProperties] - snapshot=None # type: str - ): - # type: (...) -> BlobClient - """Get a client to interact with the specified blob. - - The blob need not already exist. - - :param blob: - The blob with which to interact. - :type blob: str or ~azure.storage.blob.BlobProperties - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`~BlobClient.create_snapshot()`. - :returns: A BlobClient. - :rtype: ~azure.storage.blob.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START get_blob_client] - :end-before: [END get_blob_client] - :language: python - :dedent: 8 - :caption: Get the blob client. - """ - blob_name = _get_blob_name(blob) - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return BlobClient( - self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_deserialize.py b/azure/multiapi/storagev2/blob/v2019_12_12/_deserialize.py deleted file mode 100644 index a8b48b7..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_deserialize.py +++ /dev/null @@ -1,157 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use -from typing import ( # pylint: disable=unused-import - Tuple, Dict, List, - TYPE_CHECKING -) - -from ._models import BlobType, CopyProperties, ContentSettings, LeaseProperties, BlobProperties -from ._shared.models import get_enum_value - -from ._shared.response_handlers import deserialize_metadata -from ._models import ContainerProperties, BlobAnalyticsLogging, Metrics, CorsRule, RetentionPolicy, \ - StaticWebsite, ObjectReplicationPolicy, ObjectReplicationRule - -if TYPE_CHECKING: - from ._generated.models import PageList - - -def deserialize_blob_properties(response, obj, headers): - blob_properties = BlobProperties( - metadata=deserialize_metadata(response, obj, headers), - object_replication_source_properties=deserialize_ors_policies(response.headers), - **headers - ) - if 'Content-Range' in headers: - if 'x-ms-blob-content-md5' in headers: - blob_properties.content_settings.content_md5 = headers['x-ms-blob-content-md5'] - else: - blob_properties.content_settings.content_md5 = None - return blob_properties - - -def deserialize_ors_policies(policy_dictionary): - - if policy_dictionary is None: - return None - # For source blobs (blobs that have policy ids and rule ids applied to them), - # the header will be formatted as "x-ms-or-_: {Complete, Failed}". - # The value of this header is the status of the replication. - or_policy_status_headers = {key: val for key, val in policy_dictionary.items() - if 'or-' in key and key != 'x-ms-or-policy-id'} - - parsed_result = {} - - for key, val in or_policy_status_headers.items(): - # list blobs gives or-policy_rule and get blob properties gives x-ms-or-policy_rule - policy_and_rule_ids = key.split('or-')[1].split('_') - policy_id = policy_and_rule_ids[0] - rule_id = policy_and_rule_ids[1] - - # If we are seeing this policy for the first time, create a new list to store rule_id -> result - parsed_result[policy_id] = parsed_result.get(policy_id) or list() - parsed_result[policy_id].append(ObjectReplicationRule(rule_id=rule_id, status=val)) - - result_list = [ObjectReplicationPolicy(policy_id=k, rules=v) for k, v in parsed_result.items()] - - return result_list - - -def deserialize_blob_stream(response, obj, headers): - blob_properties = deserialize_blob_properties(response, obj, headers) - obj.properties = blob_properties - return response.location_mode, obj - - -def deserialize_container_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - container_properties = ContainerProperties( - metadata=metadata, - **headers - ) - return container_properties - - -def get_page_ranges_result(ranges): - # type: (PageList) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - page_range = [] # type: ignore - clear_range = [] # type: List - if ranges.page_range: - page_range = [{'start': b.start, 'end': b.end} for b in ranges.page_range] # type: ignore - if ranges.clear_range: - clear_range = [{'start': b.start, 'end': b.end} for b in ranges.clear_range] - return page_range, clear_range # type: ignore - - -def service_stats_deserialize(generated): - """Deserialize a ServiceStats objects into a dict. - """ - return { - 'geo_replication': { - 'status': generated.geo_replication.status, - 'last_sync_time': generated.geo_replication.last_sync_time, - } - } - - -def service_properties_deserialize(generated): - """Deserialize a ServiceProperties objects into a dict. - """ - return { - 'analytics_logging': BlobAnalyticsLogging._from_generated(generated.logging), # pylint: disable=protected-access - 'hour_metrics': Metrics._from_generated(generated.hour_metrics), # pylint: disable=protected-access - 'minute_metrics': Metrics._from_generated(generated.minute_metrics), # pylint: disable=protected-access - 'cors': [CorsRule._from_generated(cors) for cors in generated.cors], # pylint: disable=protected-access - 'target_version': generated.default_service_version, # pylint: disable=protected-access - 'delete_retention_policy': RetentionPolicy._from_generated(generated.delete_retention_policy), # pylint: disable=protected-access - 'static_website': StaticWebsite._from_generated(generated.static_website), # pylint: disable=protected-access - } - - -def get_blob_properties_from_generated_code(generated): - blob = BlobProperties() - blob.name = generated.name - blob_type = get_enum_value(generated.properties.blob_type) - blob.blob_type = BlobType(blob_type) if blob_type else None - blob.etag = generated.properties.etag - blob.deleted = generated.deleted - blob.snapshot = generated.snapshot - blob.is_append_blob_sealed = generated.properties.is_sealed - blob.metadata = generated.metadata.additional_properties if generated.metadata else {} - blob.encrypted_metadata = generated.metadata.encrypted if generated.metadata else None - blob.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access - blob.copy = CopyProperties._from_generated(generated) # pylint: disable=protected-access - blob.last_modified = generated.properties.last_modified - blob.creation_time = generated.properties.creation_time - blob.content_settings = ContentSettings._from_generated(generated) # pylint: disable=protected-access - blob.size = generated.properties.content_length - blob.page_blob_sequence_number = generated.properties.blob_sequence_number - blob.server_encrypted = generated.properties.server_encrypted - blob.encryption_scope = generated.properties.encryption_scope - blob.deleted_time = generated.properties.deleted_time - blob.remaining_retention_days = generated.properties.remaining_retention_days - blob.blob_tier = generated.properties.access_tier - blob.rehydrate_priority = generated.properties.rehydrate_priority - blob.blob_tier_inferred = generated.properties.access_tier_inferred - blob.archive_status = generated.properties.archive_status - blob.blob_tier_change_time = generated.properties.access_tier_change_time - blob.version_id = generated.version_id - blob.is_current_version = generated.is_current_version - blob.tag_count = generated.properties.tag_count - blob.tags = parse_tags(generated.blob_tags) # pylint: disable=protected-access - blob.object_replication_source_properties = deserialize_ors_policies(generated.object_replication_metadata) - return blob - - -def parse_tags(generated_tags): - # type: (Optional[List[BlobTag]]) -> Union[Dict[str, str], None] - """Deserialize a list of BlobTag objects into a dict. - """ - if generated_tags: - tag_dict = {t.key: t.value for t in generated_tags.blob_tag_set} - return tag_dict - return None diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_download.py b/azure/multiapi/storagev2/blob/v2019_12_12/_download.py deleted file mode 100644 index e11023c..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_download.py +++ /dev/null @@ -1,579 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -import threading -import warnings -from io import BytesIO - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.common import with_current_context -from ._shared.encryption import decrypt_blob -from ._shared.request_handlers import validate_and_format_range_headers -from ._shared.response_handlers import process_storage_error, parse_length_from_content_range -from ._deserialize import get_page_ranges_result - - -def process_range_and_offset(start_range, end_range, length, encryption): - start_offset, end_offset = 0, 0 - if encryption.get("key") is not None or encryption.get("resolver") is not None: - if start_range is not None: - # Align the start of the range along a 16 byte block - start_offset = start_range % 16 - start_range -= start_offset - - # Include an extra 16 bytes for the IV if necessary - # Because of the previous offsetting, start_range will always - # be a multiple of 16. - if start_range > 0: - start_offset += 16 - start_range -= 16 - - if length is not None: - # Align the end of the range along a 16 byte block - end_offset = 15 - (end_range % 16) - end_range += end_offset - - return (start_range, end_range), (start_offset, end_offset) - - -def process_content(data, start_offset, end_offset, encryption): - if data is None: - raise ValueError("Response cannot be None.") - try: - content = b"".join(list(data)) - except Exception as error: - raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) - if content and encryption.get("key") is not None or encryption.get("resolver") is not None: - try: - return decrypt_blob( - encryption.get("required"), - encryption.get("key"), - encryption.get("resolver"), - content, - start_offset, - end_offset, - data.response.headers, - ) - except Exception as error: - raise HttpResponseError(message="Decryption failed.", response=data.response, error=error) - return content - - -class _ChunkDownloader(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - client=None, - non_empty_ranges=None, - total_size=None, - chunk_size=None, - current_progress=None, - start_range=None, - end_range=None, - stream=None, - parallel=None, - validate_content=None, - encryption_options=None, - **kwargs - ): - self.client = client - self.non_empty_ranges = non_empty_ranges - - # Information on the download range/chunk size - self.chunk_size = chunk_size - self.total_size = total_size - self.start_index = start_range - self.end_index = end_range - - # The destination that we will write to - self.stream = stream - self.stream_lock = threading.Lock() if parallel else None - self.progress_lock = threading.Lock() if parallel else None - - # For a parallel download, the stream is always seekable, so we note down the current position - # in order to seek to the right place when out-of-order chunks come in - self.stream_start = stream.tell() if parallel else None - - # Download progress so far - self.progress_total = current_progress - - # Encryption - self.encryption_options = encryption_options - - # Parameters for each get operation - self.validate_content = validate_content - self.request_options = kwargs - - def _calculate_range(self, chunk_start): - if chunk_start + self.chunk_size > self.end_index: - chunk_end = self.end_index - else: - chunk_end = chunk_start + self.chunk_size - return chunk_start, chunk_end - - def get_chunk_offsets(self): - index = self.start_index - while index < self.end_index: - yield index - index += self.chunk_size - - def process_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - chunk_data = self._download_chunk(chunk_start, chunk_end - 1) - length = chunk_end - chunk_start - if length > 0: - self._write_to_stream(chunk_data, chunk_start) - self._update_progress(length) - - def yield_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - return self._download_chunk(chunk_start, chunk_end - 1) - - def _update_progress(self, length): - if self.progress_lock: - with self.progress_lock: # pylint: disable=not-context-manager - self.progress_total += length - else: - self.progress_total += length - - def _write_to_stream(self, chunk_data, chunk_start): - if self.stream_lock: - with self.stream_lock: # pylint: disable=not-context-manager - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - else: - self.stream.write(chunk_data) - - def _do_optimize(self, given_range_start, given_range_end): - # If we have no page range list stored, then assume there's data everywhere for that page blob - # or it's a block blob or append blob - if self.non_empty_ranges is None: - return False - - for source_range in self.non_empty_ranges: - # Case 1: As the range list is sorted, if we've reached such a source_range - # we've checked all the appropriate source_range already and haven't found any overlapping. - # so the given range doesn't have any data and download optimization could be applied. - # given range: | | - # source range: | | - if given_range_end < source_range['start']: # pylint:disable=no-else-return - return True - # Case 2: the given range comes after source_range, continue checking. - # given range: | | - # source range: | | - elif source_range['end'] < given_range_start: - pass - # Case 3: source_range and given range overlap somehow, no need to optimize. - else: - return False - # Went through all src_ranges, but nothing overlapped. Optimization will be applied. - return True - - def _download_chunk(self, chunk_start, chunk_end): - download_range, offset = process_range_and_offset( - chunk_start, chunk_end, chunk_end, self.encryption_options - ) - - # No need to download the empty chunk from server if there's no data in the chunk to be downloaded. - # Do optimize and create empty chunk locally if condition is met. - if self._do_optimize(download_range[0], download_range[1]): - chunk_data = b"\x00" * self.chunk_size - else: - range_header, range_validation = validate_and_format_range_headers( - download_range[0], - download_range[1], - check_content_md5=self.validate_content - ) - - try: - _, response = self.client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self.validate_content, - data_stream_total=self.total_size, - download_stream_current=self.progress_total, - **self.request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - chunk_data = process_content(response, offset[0], offset[1], self.encryption_options) - - # This makes sure that if_match is set so that we can validate - # that subsequent downloads are to an unmodified blob - if self.request_options.get("modified_access_conditions"): - self.request_options["modified_access_conditions"].if_match = response.properties.etag - - return chunk_data - - -class _ChunkIterator(object): - """Async iterator for chunks in blob download stream.""" - - def __init__(self, size, content, downloader): - self.size = size - self._current_content = content - self._iter_downloader = downloader - self._iter_chunks = None - self._complete = (size == 0) - - def __len__(self): - return self.size - - def __iter__(self): - return self - - def __next__(self): - """Iterate through responses.""" - if self._complete: - raise StopIteration("Download complete") - if not self._iter_downloader: - # If no iterator was supplied, the download completed with - # the initial GET, so we just return that data - self._complete = True - return self._current_content - - if not self._iter_chunks: - self._iter_chunks = self._iter_downloader.get_chunk_offsets() - else: - chunk = next(self._iter_chunks) - self._current_content = self._iter_downloader.yield_chunk(chunk) - - return self._current_content - - next = __next__ # Python 2 compatibility. - - -class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the blob being downloaded. - :ivar str container: - The name of the container where the blob is. - :ivar ~azure.storage.blob.BlobProperties properties: - The properties of the blob being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if specified, - otherwise the total size of the blob. - """ - - def __init__( - self, - clients=None, - config=None, - start_range=None, - end_range=None, - validate_content=None, - encryption_options=None, - max_concurrency=1, - name=None, - container=None, - encoding=None, - **kwargs - ): - self.name = name - self.container = container - self.properties = None - self.size = None - - self._clients = clients - self._config = config - self._start_range = start_range - self._end_range = end_range - self._max_concurrency = max_concurrency - self._encoding = encoding - self._validate_content = validate_content - self._encryption_options = encryption_options or {} - self._request_options = kwargs - self._location_mode = None - self._download_complete = False - self._current_content = None - self._file_size = None - self._non_empty_ranges = None - self._response = None - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - self._first_get_size = ( - self._config.max_single_get_size if not self._validate_content else self._config.max_chunk_get_size - ) - initial_request_start = self._start_range if self._start_range is not None else 0 - if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: - initial_request_end = self._end_range - else: - initial_request_end = initial_request_start + self._first_get_size - 1 - - self._initial_range, self._initial_offset = process_range_and_offset( - initial_request_start, initial_request_end, self._end_range, self._encryption_options - ) - - self._response = self._initial_request() - self.properties = self._response.properties - self.properties.name = self.name - self.properties.container = self.container - - # Set the content length to the download size instead of the size of - # the last range - self.properties.size = self.size - - # Overwrite the content range to the user requested range - self.properties.content_range = "bytes {0}-{1}/{2}".format( - self._start_range, - self._end_range, - self._file_size - ) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - self.properties.content_md5 = None - - if self.size == 0: - self._current_content = b"" - else: - self._current_content = process_content( - self._response, - self._initial_offset[0], - self._initial_offset[1], - self._encryption_options - ) - - def __len__(self): - return self.size - - def _initial_request(self): - range_header, range_validation = validate_and_format_range_headers( - self._initial_range[0], - self._initial_range[1], - start_range_required=False, - end_range_required=False, - check_content_md5=self._validate_content - ) - - try: - location_mode, response = self._clients.blob.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self._validate_content, - data_stream_total=None, - download_stream_current=0, - **self._request_options - ) - - # Check the location we read from to ensure we use the same one - # for subsequent requests. - self._location_mode = location_mode - - # Parse the total file size and adjust the download size if ranges - # were specified - self._file_size = parse_length_from_content_range(response.properties.content_range) - if self._end_range is not None: - # Use the end range index unless it is over the end of the file - self.size = min(self._file_size, self._end_range - self._start_range + 1) - elif self._start_range is not None: - self.size = self._file_size - self._start_range - else: - self.size = self._file_size - - except HttpResponseError as error: - if self._start_range is None and error.response.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - try: - _, response = self._clients.blob.download( - validate_content=self._validate_content, - data_stream_total=0, - download_stream_current=0, - **self._request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - # Set the download size to empty - self.size = 0 - self._file_size = 0 - else: - process_storage_error(error) - - # get page ranges to optimize downloading sparse page blob - if response.properties.blob_type == 'PageBlob': - try: - page_ranges = self._clients.page_blob.get_page_ranges() - self._non_empty_ranges = get_page_ranges_result(page_ranges)[0] - # according to the REST API documentation: - # in a highly fragmented page blob with a large number of writes, - # a Get Page Ranges request can fail due to an internal server timeout. - # thus, if the page blob is not sparse, it's ok for it to fail - except HttpResponseError: - pass - - # If the file is small, the download is complete at this point. - # If file size is large, download the rest of the file in chunks. - if response.properties.size != self.size: - # Lock on the etag. This can be overriden by the user by specifying '*' - if self._request_options.get("modified_access_conditions"): - if not self._request_options["modified_access_conditions"].if_match: - self._request_options["modified_access_conditions"].if_match = response.properties.etag - else: - self._download_complete = True - return response - - def chunks(self): - if self.size == 0 or self._download_complete: - iter_downloader = None - else: - data_end = self._file_size - if self._end_range is not None: - # Use the end range index unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - iter_downloader = _ChunkDownloader( - client=self._clients.blob, - non_empty_ranges=self._non_empty_ranges, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # start where the first download ended - end_range=data_end, - stream=None, - parallel=False, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options - ) - return _ChunkIterator( - size=self.size, - content=self._current_content, - downloader=iter_downloader) - - def readall(self): - """Download the contents of this blob. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - stream = BytesIO() - self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - def content_as_bytes(self, max_concurrency=1): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :rtype: bytes - """ - warnings.warn( - "content_as_bytes is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - return self.readall() - - def content_as_text(self, max_concurrency=1, encoding="UTF-8"): - """Download the contents of this blob, and decode as text. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :param str encoding: - Test encoding to decode the downloaded bytes. Default is UTF-8. - :rtype: str - """ - warnings.warn( - "content_as_text is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self._encoding = encoding - return self.readall() - - def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - # The stream must be seekable if parallel download is required - parallel = self._max_concurrency > 1 - if parallel: - error_message = "Target stream handle must be seekable." - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(error_message) - - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(error_message) - - # Write the content to the user stream - stream.write(self._current_content) - if self._download_complete: - return self.size - - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - - downloader = _ChunkDownloader( - client=self._clients.blob, - non_empty_ranges=self._non_empty_ranges, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # Start where the first download ended - end_range=data_end, - stream=stream, - parallel=parallel, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options - ) - if parallel: - import concurrent.futures - executor = concurrent.futures.ThreadPoolExecutor(self._max_concurrency) - list(executor.map( - with_current_context(downloader.process_chunk), - downloader.get_chunk_offsets() - )) - else: - for chunk in downloader.get_chunk_offsets(): - downloader.process_chunk(chunk) - return self.size - - def download_to_stream(self, stream, max_concurrency=1): - """Download the contents of this blob to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The properties of the downloaded blob. - :rtype: Any - """ - warnings.warn( - "download_to_stream is deprecated, use readinto instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self.readinto(stream) - return self.properties diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/__init__.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/__init__.py deleted file mode 100644 index f5c8f4a..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from ._azure_blob_storage import AzureBlobStorage -__all__ = ['AzureBlobStorage'] - -from .version import VERSION - -__version__ = VERSION - diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/_azure_blob_storage.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/_azure_blob_storage.py deleted file mode 100644 index aa27842..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/_azure_blob_storage.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core import PipelineClient -from msrest import Serializer, Deserializer - -from ._configuration import AzureBlobStorageConfiguration -from azure.core.exceptions import map_error -from .operations import ServiceOperations -from .operations import ContainerOperations -from .operations import DirectoryOperations -from .operations import BlobOperations -from .operations import PageBlobOperations -from .operations import AppendBlobOperations -from .operations import BlockBlobOperations -from . import models - - -class AzureBlobStorage(object): - """AzureBlobStorage - - - :ivar service: Service operations - :vartype service: azure.storage.blob.operations.ServiceOperations - :ivar container: Container operations - :vartype container: azure.storage.blob.operations.ContainerOperations - :ivar directory: Directory operations - :vartype directory: azure.storage.blob.operations.DirectoryOperations - :ivar blob: Blob operations - :vartype blob: azure.storage.blob.operations.BlobOperations - :ivar page_blob: PageBlob operations - :vartype page_blob: azure.storage.blob.operations.PageBlobOperations - :ivar append_blob: AppendBlob operations - :vartype append_blob: azure.storage.blob.operations.AppendBlobOperations - :ivar block_blob: BlockBlob operations - :vartype block_blob: azure.storage.blob.operations.BlockBlobOperations - - :param url: The URL of the service account, container, or blob that is the - targe of the desired operation. - :type url: str - """ - - def __init__(self, url, **kwargs): - - base_url = '{url}' - self._config = AzureBlobStorageConfiguration(url, **kwargs) - self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '2019-12-12' - self._serialize = Serializer(client_models) - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.container = ContainerOperations( - self._client, self._config, self._serialize, self._deserialize) - self.directory = DirectoryOperations( - self._client, self._config, self._serialize, self._deserialize) - self.blob = BlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.page_blob = PageBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.append_blob = AppendBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.block_blob = BlockBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - - def close(self): - self._client.close() - def __enter__(self): - self._client.__enter__() - return self - def __exit__(self, *exc_details): - self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/_configuration.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/_configuration.py deleted file mode 100644 index 5bf5671..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/_configuration.py +++ /dev/null @@ -1,52 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -from .version import VERSION - - -class AzureBlobStorageConfiguration(Configuration): - """Configuration for AzureBlobStorage - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, container, or blob that is the - targe of the desired operation. - :type url: str - :ivar version: Specifies the version of the operation to use for this - request. - :type version: str - """ - - def __init__(self, url, **kwargs): - - if url is None: - raise ValueError("Parameter 'url' must not be None.") - - super(AzureBlobStorageConfiguration, self).__init__(**kwargs) - self._configure(**kwargs) - - self.user_agent_policy.add_user_agent('azsdk-python-azureblobstorage/{}'.format(VERSION)) - self.generate_client_request_id = True - - self.url = url - self.version = "2019-12-12" - - def _configure(self, **kwargs): - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/__init__.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/__init__.py deleted file mode 100644 index 009c965..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from ._azure_blob_storage_async import AzureBlobStorage -__all__ = ['AzureBlobStorage'] diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/_azure_blob_storage_async.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/_azure_blob_storage_async.py deleted file mode 100644 index 7b1aa34..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/_azure_blob_storage_async.py +++ /dev/null @@ -1,84 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core import AsyncPipelineClient -from msrest import Serializer, Deserializer - -from ._configuration_async import AzureBlobStorageConfiguration -from azure.core.exceptions import map_error -from .operations_async import ServiceOperations -from .operations_async import ContainerOperations -from .operations_async import DirectoryOperations -from .operations_async import BlobOperations -from .operations_async import PageBlobOperations -from .operations_async import AppendBlobOperations -from .operations_async import BlockBlobOperations -from .. import models - - -class AzureBlobStorage(object): - """AzureBlobStorage - - - :ivar service: Service operations - :vartype service: azure.storage.blob.aio.operations_async.ServiceOperations - :ivar container: Container operations - :vartype container: azure.storage.blob.aio.operations_async.ContainerOperations - :ivar directory: Directory operations - :vartype directory: azure.storage.blob.aio.operations_async.DirectoryOperations - :ivar blob: Blob operations - :vartype blob: azure.storage.blob.aio.operations_async.BlobOperations - :ivar page_blob: PageBlob operations - :vartype page_blob: azure.storage.blob.aio.operations_async.PageBlobOperations - :ivar append_blob: AppendBlob operations - :vartype append_blob: azure.storage.blob.aio.operations_async.AppendBlobOperations - :ivar block_blob: BlockBlob operations - :vartype block_blob: azure.storage.blob.aio.operations_async.BlockBlobOperations - - :param url: The URL of the service account, container, or blob that is the - targe of the desired operation. - :type url: str - """ - - def __init__( - self, url, **kwargs): - - base_url = '{url}' - self._config = AzureBlobStorageConfiguration(url, **kwargs) - self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '2019-12-12' - self._serialize = Serializer(client_models) - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.container = ContainerOperations( - self._client, self._config, self._serialize, self._deserialize) - self.directory = DirectoryOperations( - self._client, self._config, self._serialize, self._deserialize) - self.blob = BlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.page_blob = PageBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.append_blob = AppendBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.block_blob = BlockBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - - async def close(self): - await self._client.close() - async def __aenter__(self): - await self._client.__aenter__() - return self - async def __aexit__(self, *exc_details): - await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/__init__.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/__init__.py deleted file mode 100644 index dec0519..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations_async import ServiceOperations -from ._container_operations_async import ContainerOperations -from ._directory_operations_async import DirectoryOperations -from ._blob_operations_async import BlobOperations -from ._page_blob_operations_async import PageBlobOperations -from ._append_blob_operations_async import AppendBlobOperations -from ._block_blob_operations_async import BlockBlobOperations - -__all__ = [ - 'ServiceOperations', - 'ContainerOperations', - 'DirectoryOperations', - 'BlobOperations', - 'PageBlobOperations', - 'AppendBlobOperations', - 'BlockBlobOperations', -] diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_append_blob_operations_async.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_append_blob_operations_async.py deleted file mode 100644 index ea79827..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_append_blob_operations_async.py +++ /dev/null @@ -1,694 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class AppendBlobOperations: - """AppendBlobOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "AppendBlob". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_blob_type = "AppendBlob" - - async def create(self, content_length, timeout=None, metadata=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Create Append Blob operation creates a new append blob. - - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_tags_string: Optional. Used to set blob tags in various - blob operations. - :type blob_tags_string: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{containerName}/{blob}'} - - async def append_block(self, body, content_length, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, request_id=None, lease_access_conditions=None, append_position_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Append Block operation commits a new block of data to the end of an - existing append blob. The Append Block operation is permitted only if - the blob was created with x-ms-blob-type set to AppendBlob. Append - Block is supported only on version 2015-02-21 version or later. - - :param body: Initial data - :type body: Generator - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param append_position_access_conditions: Additional parameters for - the operation - :type append_position_access_conditions: - ~azure.storage.blob.models.AppendPositionAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - max_size = None - if append_position_access_conditions is not None: - max_size = append_position_access_conditions.max_size - append_position = None - if append_position_access_conditions is not None: - append_position = append_position_access_conditions.append_position - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "appendblock" - - # Construct URL - url = self.append_block.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if max_size is not None: - header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", max_size, 'long') - if append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-append-offset': self._deserialize('str', response.headers.get('x-ms-blob-append-offset')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - append_block.metadata = {'url': '/{containerName}/{blob}'} - - async def append_block_from_url(self, source_url, content_length, source_range=None, source_content_md5=None, source_contentcrc64=None, timeout=None, transactional_content_md5=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, append_position_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs): - """The Append Block operation commits a new block of data to the end of an - existing append blob where the contents are read from a source url. The - Append Block operation is permitted only if the blob was created with - x-ms-blob-type set to AppendBlob. Append Block is supported only on - version 2015-02-21 version or later. - - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param content_length: The length of the request. - :type content_length: long - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_md5: Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range - of bytes that must be read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param append_position_access_conditions: Additional parameters for - the operation - :type append_position_access_conditions: - ~azure.storage.blob.models.AppendPositionAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - max_size = None - if append_position_access_conditions is not None: - max_size = append_position_access_conditions.max_size - append_position = None - if append_position_access_conditions is not None: - append_position = append_position_access_conditions.append_position - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - comp = "appendblock" - - # Construct URL - url = self.append_block_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if max_size is not None: - header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", max_size, 'long') - if append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-append-offset': self._deserialize('str', response.headers.get('x-ms-blob-append-offset')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - append_block_from_url.metadata = {'url': '/{containerName}/{blob}'} - - async def seal(self, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, append_position_access_conditions=None, *, cls=None, **kwargs): - """The Seal operation seals the Append Blob to make it read-only. Seal is - supported only on version 2019-12-12 version or later. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param append_position_access_conditions: Additional parameters for - the operation - :type append_position_access_conditions: - ~azure.storage.blob.models.AppendPositionAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - append_position = None - if append_position_access_conditions is not None: - append_position = append_position_access_conditions.append_position - - comp = "seal" - - # Construct URL - url = self.seal.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - seal.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_blob_operations_async.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_blob_operations_async.py deleted file mode 100644 index 24aea41..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_blob_operations_async.py +++ /dev/null @@ -1,3064 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class BlobOperations: - """BlobOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_requires_sync: . Constant value: "true". - :ivar x_ms_copy_action: . Constant value: "abort". - :ivar restype: . Constant value: "account". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_requires_sync = "true" - self.x_ms_copy_action = "abort" - self.restype = "account" - - async def download(self, snapshot=None, version_id=None, timeout=None, range=None, range_get_content_md5=None, range_get_content_crc64=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Download operation reads or downloads a blob from the system, - including its metadata and properties. You can also call Download to - read a snapshot. - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to operate - on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param range_get_content_md5: When set to true and specified together - with the Range, the service returns the MD5 hash for the range, as - long as the range is less than or equal to 4 MB in size. - :type range_get_content_md5: bool - :param range_get_content_crc64: When set to true and specified - together with the Range, the service returns the CRC64 hash for the - range, as long as the range is less than or equal to 4 MB in size. - :type range_get_content_crc64: bool - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: object or the result of cls(response) - :rtype: Generator - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct URL - url = self.download.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') - if range_get_content_crc64 is not None: - header_parameters['x-ms-range-get-content-crc64'] = self._serialize.header("range_get_content_crc64", range_get_content_crc64, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - await response.load_body() - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')), - 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), - 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')), - 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - if response.status_code == 206: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')), - 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), - 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')), - 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - download.metadata = {'url': '/{containerName}/{blob}'} - - async def get_properties(self, snapshot=None, version_id=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Get Properties operation returns all user-defined metadata, - standard HTTP properties, and system properties for the blob. It does - not return the content of the blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to operate - on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-creation-time': self._deserialize('rfc-1123', response.headers.get('x-ms-creation-time')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')), - 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')), - 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-incremental-copy': self._deserialize('bool', response.headers.get('x-ms-incremental-copy')), - 'x-ms-copy-destination-snapshot': self._deserialize('str', response.headers.get('x-ms-copy-destination-snapshot')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-access-tier': self._deserialize('str', response.headers.get('x-ms-access-tier')), - 'x-ms-access-tier-inferred': self._deserialize('bool', response.headers.get('x-ms-access-tier-inferred')), - 'x-ms-archive-status': self._deserialize('str', response.headers.get('x-ms-archive-status')), - 'x-ms-access-tier-change-time': self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'x-ms-is-current-version': self._deserialize('bool', response.headers.get('x-ms-is-current-version')), - 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')), - 'x-ms-expiry-time': self._deserialize('rfc-1123', response.headers.get('x-ms-expiry-time')), - 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), - 'x-ms-rehydrate-priority': self._deserialize('str', response.headers.get('x-ms-rehydrate-priority')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{containerName}/{blob}'} - - async def delete(self, snapshot=None, version_id=None, timeout=None, delete_snapshots=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """If the storage account's soft delete feature is disabled then, when a - blob is deleted, it is permanently removed from the storage account. If - the storage account's soft delete feature is enabled, then, when a blob - is deleted, it is marked for deletion and becomes inaccessible - immediately. However, the blob service retains the blob or snapshot for - the number of days specified by the DeleteRetentionPolicy section of - [Storage service properties] (Set-Blob-Service-Properties.md). After - the specified number of days has passed, the blob's data is permanently - removed from the storage account. Note that you continue to be charged - for the soft-deleted blob's storage until it is permanently removed. - Use the List Blobs API and specify the "include=deleted" query - parameter to discover which blobs and snapshots have been soft deleted. - You can then use the Undelete Blob API to restore a soft-deleted blob. - All other operations on a soft-deleted blob or snapshot causes the - service to return an HTTP status code of 404 (ResourceNotFound). - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to operate - on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param delete_snapshots: Required if the blob has associated - snapshots. Specify one of the following two options: include: Delete - the base blob and all of its snapshots. only: Delete only the blob's - snapshots and not the blob itself. Possible values include: 'include', - 'only' - :type delete_snapshots: str or - ~azure.storage.blob.models.DeleteSnapshotsOptionType - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{containerName}/{blob}'} - - async def set_access_control(self, timeout=None, owner=None, group=None, posix_permissions=None, posix_acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Set the owner, group, permissions, or access control list for a blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_acl: Sets POSIX access control rights on files and - directories. The value is a comma-separated list of access control - entries. Each access control entry (ACE) consists of a scope, a type, - a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type posix_acl: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "setAccessControl" - - # Construct URL - url = self.set_access_control.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - } - return cls(response, None, response_headers) - set_access_control.metadata = {'url': '/{filesystem}/{path}'} - - async def get_access_control(self, timeout=None, upn=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Get the owner, group, permissions, or access control list for a blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param upn: Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the identity values returned in - the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. - :type upn: bool - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "getAccessControl" - - # Construct URL - url = self.get_access_control.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')), - 'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')), - 'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')), - 'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - } - return cls(response, None, response_headers) - get_access_control.metadata = {'url': '/{filesystem}/{path}'} - - async def rename(self, rename_source, timeout=None, path_rename_mode=None, directory_properties=None, posix_permissions=None, posix_umask=None, source_lease_id=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs): - """Rename a blob/file. By default, the destination is overwritten and if - the destination already exists and has a lease the lease is broken. - This operation supports conditional HTTP requests. For more - information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - To fail if the destination already exists, use a conditional request - with If-None-Match: "*". - - :param rename_source: The file or directory to be renamed. The value - must have the following format: "/{filesysystem}/{path}". If - "x-ms-properties" is specified, the properties will overwrite the - existing properties; otherwise, the existing properties will be - preserved. - :type rename_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param path_rename_mode: Determines the behavior of the rename - operation. Possible values include: 'legacy', 'posix' - :type path_rename_mode: str or - ~azure.storage.blob.models.PathRenameMode - :param directory_properties: Optional. User-defined properties to be - stored with the file or directory, in the format of a comma-separated - list of name and value pairs "n1=v1, n2=v2, ...", where each value is - base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled - for the account. This umask restricts permission settings for file and - directory, and will only be applied when default Acl does not exist in - parent directory. If the umask bit has set, it means that the - corresponding permission will be disabled. Otherwise the corresponding - permission will be determined by the permission. A 4-digit octal - notation (e.g. 0022) is supported here. If no umask was specified, a - default umask - 0027 will be used. - :type posix_umask: str - :param source_lease_id: A lease ID for the source path. If specified, - the source path must have an active lease and the lease ID must match. - :type source_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param directory_http_headers: Additional parameters for the operation - :type directory_http_headers: - ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - cache_control = None - if directory_http_headers is not None: - cache_control = directory_http_headers.cache_control - content_type = None - if directory_http_headers is not None: - content_type = directory_http_headers.content_type - content_encoding = None - if directory_http_headers is not None: - content_encoding = directory_http_headers.content_encoding - content_language = None - if directory_http_headers is not None: - content_language = directory_http_headers.content_language - content_disposition = None - if directory_http_headers is not None: - content_disposition = directory_http_headers.content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - # Construct URL - url = self.rename.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if path_rename_mode is not None: - query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'PathRenameMode') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') - if content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') - if content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') - if content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') - if content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - } - return cls(response, None, response_headers) - rename.metadata = {'url': '/{filesystem}/{path}'} - - async def undelete(self, timeout=None, request_id=None, *, cls=None, **kwargs): - """Undelete a blob that was previously soft deleted. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "undelete" - - # Construct URL - url = self.undelete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - undelete.metadata = {'url': '/{containerName}/{blob}'} - - async def set_expiry(self, expiry_options, timeout=None, request_id=None, expires_on=None, *, cls=None, **kwargs): - """Sets the time a blob will expire and be deleted. - - :param expiry_options: Required. Indicates mode of the expiry time. - Possible values include: 'NeverExpire', 'RelativeToCreation', - 'RelativeToNow', 'Absolute' - :type expiry_options: str or - ~azure.storage.blob.models.BlobExpiryOptions - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param expires_on: The time to set the blob to expiry - :type expires_on: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "expiry" - - # Construct URL - url = self.set_expiry.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') - if expires_on is not None: - header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_expiry.metadata = {'url': '/{containerName}/{blob}'} - - async def set_http_headers(self, timeout=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Set HTTP Headers operation sets system properties on the blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "properties" - - # Construct URL - url = self.set_http_headers.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_http_headers.metadata = {'url': '/{containerName}/{blob}'} - - async def set_metadata(self, timeout=None, metadata=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Set Blob Metadata operation sets user-defined metadata for the - specified blob as one or more name-value pairs. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{containerName}/{blob}'} - - async def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or - negative one (-1) for a lease that never expires. A non-infinite lease - can be between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The Blob service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "lease" - action = "acquire" - - # Construct URL - url = self.acquire_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - acquire_lease.metadata = {'url': '/{containerName}/{blob}'} - - async def release_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "lease" - action = "release" - - # Construct URL - url = self.release_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - release_lease.metadata = {'url': '/{containerName}/{blob}'} - - async def renew_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "lease" - action = "renew" - - # Construct URL - url = self.renew_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - renew_lease.metadata = {'url': '/{containerName}/{blob}'} - - async def change_lease(self, lease_id, proposed_lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The Blob service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "lease" - action = "change" - - # Construct URL - url = self.change_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - change_lease.metadata = {'url': '/{containerName}/{blob}'} - - async def break_lease(self, timeout=None, break_period=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param break_period: For a break operation, proposed duration the - lease should continue before it is broken, in seconds, between 0 and - 60. This break period is only used if it is shorter than the time - remaining on the lease. If longer, the time remaining on the lease is - used. A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break period. - If this header does not appear with a break operation, a - fixed-duration lease breaks after the remaining lease period elapses, - and an infinite lease breaks immediately. - :type break_period: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "lease" - action = "break" - - # Construct URL - url = self.break_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - break_lease.metadata = {'url': '/{containerName}/{blob}'} - - async def create_snapshot(self, timeout=None, metadata=None, request_id=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs): - """The Create Snapshot operation creates a read-only snapshot of a blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "snapshot" - - # Construct URL - url = self.create_snapshot.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-snapshot': self._deserialize('str', response.headers.get('x-ms-snapshot')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create_snapshot.metadata = {'url': '/{containerName}/{blob}'} - - async def start_copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, rehydrate_priority=None, request_id=None, blob_tags_string=None, seal_blob=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs): - """The Start Copy From URL operation copies a blob or an internet resource - to a new blob. - - :param copy_source: Specifies the name of the source page blob - snapshot. This value is a URL of up to 2 KB in length that specifies a - page blob snapshot. The value should be URL-encoded as it would appear - in a request URI. The source blob must either be public or must be - authenticated via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param rehydrate_priority: Optional: Indicates the priority with which - to rehydrate an archived blob. Possible values include: 'High', - 'Standard' - :type rehydrate_priority: str or - ~azure.storage.blob.models.RehydratePriority - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_tags_string: Optional. Used to set blob tags in various - blob operations. - :type blob_tags_string: str - :param seal_blob: Overrides the sealed state of the destination blob. - Service version 2019-12-12 and newer. - :type seal_blob: bool - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - source_if_tags = None - if source_modified_access_conditions is not None: - source_if_tags = source_modified_access_conditions.source_if_tags - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.start_copy_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if seal_blob is not None: - header_parameters['x-ms-seal-blob'] = self._serialize.header("seal_blob", seal_blob, 'bool') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - if source_if_tags is not None: - header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", source_if_tags, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} - - async def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, request_id=None, source_content_md5=None, blob_tags_string=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs): - """The Copy From URL operation copies a blob or an internet resource to a - new blob. It will not return a response until the copy is complete. - - :param copy_source: Specifies the name of the source page blob - snapshot. This value is a URL of up to 2 KB in length that specifies a - page blob snapshot. The value should be URL-encoded as it would appear - in a request URI. The source blob must either be public or must be - authenticated via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param source_content_md5: Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :type source_content_md5: bytearray - :param blob_tags_string: Optional. Used to set blob tags in various - blob operations. - :type blob_tags_string: str - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.copy_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['x-ms-requires-sync'] = self._serialize.header("self.x_ms_requires_sync", self.x_ms_requires_sync, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-status': self._deserialize(models.SyncCopyStatusType, response.headers.get('x-ms-copy-status')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - copy_from_url.metadata = {'url': '/{containerName}/{blob}'} - - async def abort_copy_from_url(self, copy_id, timeout=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs): - """The Abort Copy From URL operation aborts a pending Copy From URL - operation, and leaves a destination blob with zero length and full - metadata. - - :param copy_id: The copy identifier provided in the x-ms-copy-id - header of the original Copy Blob operation. - :type copy_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "copy" - - # Construct URL - url = self.abort_copy_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-copy-action'] = self._serialize.header("self.x_ms_copy_action", self.x_ms_copy_action, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - abort_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} - - async def set_tier(self, tier, snapshot=None, version_id=None, timeout=None, rehydrate_priority=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Set Tier operation sets the tier on a blob. The operation is - allowed on a page blob in a premium storage account and on a block blob - in a blob storage account (locally redundant storage only). A premium - page blob's tier determines the allowed size, IOPS, and bandwidth of - the blob. A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param tier: Indicates the tier to be set on the blob. Possible values - include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', 'P40', 'P50', 'P60', - 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierRequired - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to operate - on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param rehydrate_priority: Optional: Indicates the priority with which - to rehydrate an archived blob. Possible values include: 'High', - 'Standard' - :type rehydrate_priority: str or - ~azure.storage.blob.models.RehydratePriority - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "tier" - - # Construct URL - url = self.set_tier.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_tier.metadata = {'url': '/{containerName}/{blob}'} - - async def get_account_info(self, *, cls=None, **kwargs): - """Returns the sku name and account kind . - - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "properties" - - # Construct URL - url = self.get_account_info.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')), - 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_account_info.metadata = {'url': '/{containerName}/{blob}'} - - async def query(self, query_request=None, snapshot=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Query operation enables users to select/project on blob data by - providing simple query expressions. - - :param query_request: the query request - :type query_request: ~azure.storage.blob.models.QueryRequest - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: object or the result of cls(response) - :rtype: Generator - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "query" - - # Construct URL - url = self.query.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct body - if query_request is not None: - body_content = self._serialize.body(query_request, 'QueryRequest') - else: - body_content = None - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - await response.load_body() - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - if response.status_code == 206: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - query.metadata = {'url': '/{containerName}/{blob}'} - - async def get_tags(self, timeout=None, request_id=None, snapshot=None, version_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Get Tags operation enables users to get the tags associated with a - blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to operate - on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: BlobTags or the result of cls(response) - :rtype: ~azure.storage.blob.models.BlobTags - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "tags" - - # Construct URL - url = self.get_tags.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('BlobTags', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_tags.metadata = {'url': '/{containerName}/{blob}'} - - async def set_tags(self, timeout=None, version_id=None, transactional_content_md5=None, transactional_content_crc64=None, request_id=None, tags=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Set Tags operation enables users to set tags on a blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param version_id: The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to operate - on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param tags: Blob tags - :type tags: ~azure.storage.blob.models.BlobTags - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "tags" - - # Construct URL - url = self.set_tags.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct body - if tags is not None: - body_content = self._serialize.body(tags, 'BlobTags') - else: - body_content = None - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_tags.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_block_blob_operations_async.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_block_blob_operations_async.py deleted file mode 100644 index e069370..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_block_blob_operations_async.py +++ /dev/null @@ -1,833 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class BlockBlobOperations: - """BlockBlobOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "BlockBlob". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_blob_type = "BlockBlob" - - async def upload(self, body, content_length, timeout=None, transactional_content_md5=None, metadata=None, tier=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Upload Block Blob operation updates the content of an existing - block blob. Updating an existing block blob overwrites any existing - metadata on the blob. Partial updates are not supported with Put Blob; - the content of the existing blob is overwritten with the content of the - new blob. To perform a partial update of the content of a block blob, - use the Put Block List operation. - - :param body: Initial data - :type body: Generator - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_tags_string: Optional. Used to set blob tags in various - blob operations. - :type blob_tags_string: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct URL - url = self.upload.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload.metadata = {'url': '/{containerName}/{blob}'} - - async def stage_block(self, block_id, content_length, body, transactional_content_md5=None, transactional_content_crc64=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, *, cls=None, **kwargs): - """The Stage Block operation creates a new block to be committed as part - of a blob. - - :param block_id: A valid Base64 string value that identifies the - block. Prior to encoding, the string must be less than or equal to 64 - bytes in size. For a given blob, the length of the value specified for - the blockid parameter must be the same size for each block. - :type block_id: str - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data - :type body: Generator - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - - comp = "block" - - # Construct URL - url = self.stage_block.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - stage_block.metadata = {'url': '/{containerName}/{blob}'} - - async def stage_block_from_url(self, block_id, content_length, source_url, source_range=None, source_content_md5=None, source_contentcrc64=None, timeout=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs): - """The Stage Block operation creates a new block to be committed as part - of a blob where the contents are read from a URL. - - :param block_id: A valid Base64 string value that identifies the - block. Prior to encoding, the string must be less than or equal to 64 - bytes in size. For a given blob, the length of the value specified for - the blockid parameter must be the same size for each block. - :type block_id: str - :param content_length: The length of the request. - :type content_length: long - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_md5: Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range - of bytes that must be read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - comp = "block" - - # Construct URL - url = self.stage_block_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - stage_block_from_url.metadata = {'url': '/{containerName}/{blob}'} - - async def commit_block_list(self, blocks, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, metadata=None, tier=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Commit Block List operation writes a blob by specifying the list of - block IDs that make up the blob. In order to be written as part of a - blob, a block must have been successfully written to the server in a - prior Put Block operation. You can call Put Block List to update a blob - by uploading only those blocks that have changed, then committing the - new and existing blocks together. You can do this by specifying whether - to commit a block from the committed block list or from the uncommitted - block list, or to commit the most recently uploaded version of the - block, whichever list it may belong to. - - :param blocks: - :type blocks: ~azure.storage.blob.models.BlockLookupList - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_tags_string: Optional. Used to set blob tags in various - blob operations. - :type blob_tags_string: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "blocklist" - - # Construct URL - url = self.commit_block_list.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct body - body_content = self._serialize.body(blocks, 'BlockLookupList') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - commit_block_list.metadata = {'url': '/{containerName}/{blob}'} - - async def get_block_list(self, list_type="committed", snapshot=None, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Get Block List operation retrieves the list of blocks that have - been uploaded as part of a block blob. - - :param list_type: Specifies whether to return the list of committed - blocks, the list of uncommitted blocks, or both lists together. - Possible values include: 'committed', 'uncommitted', 'all' - :type list_type: str or ~azure.storage.blob.models.BlockListType - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: BlockList or the result of cls(response) - :rtype: ~azure.storage.blob.models.BlockList - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "blocklist" - - # Construct URL - url = self.get_block_list.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - query_parameters['blocklisttype'] = self._serialize.query("list_type", list_type, 'BlockListType') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('BlockList', response) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_block_list.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_container_operations_async.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_container_operations_async.py deleted file mode 100644 index b7e1eb8..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_container_operations_async.py +++ /dev/null @@ -1,1400 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class ContainerOperations: - """ContainerOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - - async def create(self, timeout=None, metadata=None, access=None, request_id=None, container_cpk_scope_info=None, *, cls=None, **kwargs): - """creates a new container under the specified account. If the container - with the same name already exists, the operation fails. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param access: Specifies whether data in the container may be accessed - publicly and the level of access. Possible values include: - 'container', 'blob' - :type access: str or ~azure.storage.blob.models.PublicAccessType - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param container_cpk_scope_info: Additional parameters for the - operation - :type container_cpk_scope_info: - ~azure.storage.blob.models.ContainerCpkScopeInfo - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - default_encryption_scope = None - if container_cpk_scope_info is not None: - default_encryption_scope = container_cpk_scope_info.default_encryption_scope - prevent_encryption_scope_override = None - if container_cpk_scope_info is not None: - prevent_encryption_scope_override = container_cpk_scope_info.prevent_encryption_scope_override - - restype = "container" - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if access is not None: - header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if default_encryption_scope is not None: - header_parameters['x-ms-default-encryption-scope'] = self._serialize.header("default_encryption_scope", default_encryption_scope, 'str') - if prevent_encryption_scope_override is not None: - header_parameters['x-ms-deny-encryption-scope-override'] = self._serialize.header("prevent_encryption_scope_override", prevent_encryption_scope_override, 'bool') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{containerName}'} - - async def get_properties(self, timeout=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs): - """returns all user-defined metadata and system properties for the - specified container. The data returned does not include the container's - list of blobs. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - restype = "container" - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-public-access': self._deserialize('str', response.headers.get('x-ms-blob-public-access')), - 'x-ms-has-immutability-policy': self._deserialize('bool', response.headers.get('x-ms-has-immutability-policy')), - 'x-ms-has-legal-hold': self._deserialize('bool', response.headers.get('x-ms-has-legal-hold')), - 'x-ms-default-encryption-scope': self._deserialize('str', response.headers.get('x-ms-default-encryption-scope')), - 'x-ms-deny-encryption-scope-override': self._deserialize('bool', response.headers.get('x-ms-deny-encryption-scope-override')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{containerName}'} - - async def delete(self, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """operation marks the specified container for deletion. The container and - any blobs contained within it are later deleted during garbage - collection. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - restype = "container" - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{containerName}'} - - async def set_metadata(self, timeout=None, metadata=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """operation sets one or more user-defined name-value pairs for the - specified container. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - - restype = "container" - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{containerName}'} - - async def get_access_policy(self, timeout=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs): - """gets the permissions for the specified container. The permissions - indicate whether container data may be accessed publicly. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: list or the result of cls(response) - :rtype: list[~azure.storage.blob.models.SignedIdentifier] - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - restype = "container" - comp = "acl" - - # Construct URL - url = self.get_access_policy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[SignedIdentifier]', response) - header_dict = { - 'x-ms-blob-public-access': self._deserialize('str', response.headers.get('x-ms-blob-public-access')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_access_policy.metadata = {'url': '/{containerName}'} - - async def set_access_policy(self, container_acl=None, timeout=None, access=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """sets the permissions for the specified container. The permissions - indicate whether blobs in a container may be accessed publicly. - - :param container_acl: the acls for the container - :type container_acl: list[~azure.storage.blob.models.SignedIdentifier] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param access: Specifies whether data in the container may be accessed - publicly and the level of access. Possible values include: - 'container', 'blob' - :type access: str or ~azure.storage.blob.models.PublicAccessType - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - restype = "container" - comp = "acl" - - # Construct URL - url = self.set_access_policy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - if access is not None: - header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct body - serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifiers', 'wrapped': True}} - if container_acl is not None: - body_content = self._serialize.body(container_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt) - else: - body_content = None - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_access_policy.metadata = {'url': '/{containerName}'} - - async def restore(self, timeout=None, request_id=None, deleted_container_name=None, deleted_container_version=None, *, cls=None, **kwargs): - """Restores a previously-deleted container. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param deleted_container_name: Optional. Version 2019-12-12 and - laster. Specifies the name of the deleted container to restore. - :type deleted_container_name: str - :param deleted_container_version: Optional. Version 2019-12-12 and - laster. Specifies the version of the deleted container to restore. - :type deleted_container_version: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "container" - comp = "undelete" - - # Construct URL - url = self.restore.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if deleted_container_name is not None: - header_parameters['x-ms-deleted-container-name'] = self._serialize.header("deleted_container_name", deleted_container_name, 'str') - if deleted_container_version is not None: - header_parameters['x-ms-deleted-container-version'] = self._serialize.header("deleted_container_version", deleted_container_version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - restore.metadata = {'url': '/{containerName}'} - - async def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or - negative one (-1) for a lease that never expires. A non-infinite lease - can be between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The Blob service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "acquire" - - # Construct URL - url = self.acquire_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - acquire_lease.metadata = {'url': '/{containerName}'} - - async def release_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "release" - - # Construct URL - url = self.release_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - release_lease.metadata = {'url': '/{containerName}'} - - async def renew_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "renew" - - # Construct URL - url = self.renew_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - renew_lease.metadata = {'url': '/{containerName}'} - - async def break_lease(self, timeout=None, break_period=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param break_period: For a break operation, proposed duration the - lease should continue before it is broken, in seconds, between 0 and - 60. This break period is only used if it is shorter than the time - remaining on the lease. If longer, the time remaining on the lease is - used. A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break period. - If this header does not appear with a break operation, a - fixed-duration lease breaks after the remaining lease period elapses, - and an infinite lease breaks immediately. - :type break_period: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "break" - - # Construct URL - url = self.break_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - break_lease.metadata = {'url': '/{containerName}'} - - async def change_lease(self, lease_id, proposed_lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The Blob service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "change" - - # Construct URL - url = self.change_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - change_lease.metadata = {'url': '/{containerName}'} - - async def list_blob_flat_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, *, cls=None, **kwargs): - """[Update] The List Blobs operation returns a list of the blobs under the - specified container. - - :param prefix: Filters the results to return only containers whose - name begins with the specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list - of containers to be returned with the next listing operation. The - operation returns the NextMarker value within the response body if the - listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value - for the marker parameter in a subsequent call to request the next page - of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to - return. If the request does not specify maxresults, or specifies a - value greater than 5000, the server will return up to 5000 items. Note - that if the listing operation crosses a partition boundary, then the - service will return a continuation token for retrieving the remainder - of the results. For this reason, it is possible that the service will - return fewer results than specified by maxresults, or than the default - of 5000. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets - to include in the response. - :type include: list[str or - ~azure.storage.blob.models.ListBlobsIncludeItem] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListBlobsFlatSegmentResponse or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "container" - comp = "list" - - # Construct URL - url = self.list_blob_flat_segment.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[ListBlobsIncludeItem]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListBlobsFlatSegmentResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_blob_flat_segment.metadata = {'url': '/{containerName}'} - - async def list_blob_hierarchy_segment(self, delimiter, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, *, cls=None, **kwargs): - """[Update] The List Blobs operation returns a list of the blobs under the - specified container. - - :param delimiter: When the request includes this parameter, the - operation returns a BlobPrefix element in the response body that acts - as a placeholder for all blobs whose names begin with the same - substring up to the appearance of the delimiter character. The - delimiter may be a single character or a string. - :type delimiter: str - :param prefix: Filters the results to return only containers whose - name begins with the specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list - of containers to be returned with the next listing operation. The - operation returns the NextMarker value within the response body if the - listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value - for the marker parameter in a subsequent call to request the next page - of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to - return. If the request does not specify maxresults, or specifies a - value greater than 5000, the server will return up to 5000 items. Note - that if the listing operation crosses a partition boundary, then the - service will return a continuation token for retrieving the remainder - of the results. For this reason, it is possible that the service will - return fewer results than specified by maxresults, or than the default - of 5000. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets - to include in the response. - :type include: list[str or - ~azure.storage.blob.models.ListBlobsIncludeItem] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListBlobsHierarchySegmentResponse or the result of - cls(response) - :rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "container" - comp = "list" - - # Construct URL - url = self.list_blob_hierarchy_segment.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[ListBlobsIncludeItem]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_blob_hierarchy_segment.metadata = {'url': '/{containerName}'} - - async def get_account_info(self, *, cls=None, **kwargs): - """Returns the sku name and account kind . - - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "account" - comp = "properties" - - # Construct URL - url = self.get_account_info.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')), - 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_account_info.metadata = {'url': '/{containerName}'} diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_directory_operations_async.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_directory_operations_async.py deleted file mode 100644 index 590c0f8..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_directory_operations_async.py +++ /dev/null @@ -1,739 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class DirectoryOperations: - """DirectoryOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar resource: . Constant value: "directory". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.resource = "directory" - - async def create(self, timeout=None, directory_properties=None, posix_permissions=None, posix_umask=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Create a directory. By default, the destination is overwritten and if - the destination already exists and has a lease the lease is broken. - This operation supports conditional HTTP requests. For more - information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - To fail if the destination already exists, use a conditional request - with If-None-Match: "*". - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param directory_properties: Optional. User-defined properties to be - stored with the file or directory, in the format of a comma-separated - list of name and value pairs "n1=v1, n2=v2, ...", where each value is - base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled - for the account. This umask restricts permission settings for file and - directory, and will only be applied when default Acl does not exist in - parent directory. If the umask bit has set, it means that the - corresponding permission will be disabled. Otherwise the corresponding - permission will be determined by the permission. A 4-digit octal - notation (e.g. 0022) is supported here. If no umask was specified, a - default umask - 0027 will be used. - :type posix_umask: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param directory_http_headers: Additional parameters for the operation - :type directory_http_headers: - ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - cache_control = None - if directory_http_headers is not None: - cache_control = directory_http_headers.cache_control - content_type = None - if directory_http_headers is not None: - content_type = directory_http_headers.content_type - content_encoding = None - if directory_http_headers is not None: - content_encoding = directory_http_headers.content_encoding - content_language = None - if directory_http_headers is not None: - content_language = directory_http_headers.content_language - content_disposition = None - if directory_http_headers is not None: - content_disposition = directory_http_headers.content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['resource'] = self._serialize.query("self.resource", self.resource, 'str') - - # Construct headers - header_parameters = {} - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') - if content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') - if content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') - if content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') - if content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{filesystem}/{path}'} - - async def rename(self, rename_source, timeout=None, marker=None, path_rename_mode=None, directory_properties=None, posix_permissions=None, posix_umask=None, source_lease_id=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs): - """Rename a directory. By default, the destination is overwritten and if - the destination already exists and has a lease the lease is broken. - This operation supports conditional HTTP requests. For more - information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - To fail if the destination already exists, use a conditional request - with If-None-Match: "*". - - :param rename_source: The file or directory to be renamed. The value - must have the following format: "/{filesysystem}/{path}". If - "x-ms-properties" is specified, the properties will overwrite the - existing properties; otherwise, the existing properties will be - preserved. - :type rename_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param marker: When renaming a directory, the number of paths that are - renamed with each invocation is limited. If the number of paths to be - renamed exceeds this limit, a continuation token is returned in this - response header. When a continuation token is returned in the - response, it must be specified in a subsequent invocation of the - rename operation to continue renaming the directory. - :type marker: str - :param path_rename_mode: Determines the behavior of the rename - operation. Possible values include: 'legacy', 'posix' - :type path_rename_mode: str or - ~azure.storage.blob.models.PathRenameMode - :param directory_properties: Optional. User-defined properties to be - stored with the file or directory, in the format of a comma-separated - list of name and value pairs "n1=v1, n2=v2, ...", where each value is - base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled - for the account. This umask restricts permission settings for file and - directory, and will only be applied when default Acl does not exist in - parent directory. If the umask bit has set, it means that the - corresponding permission will be disabled. Otherwise the corresponding - permission will be determined by the permission. A 4-digit octal - notation (e.g. 0022) is supported here. If no umask was specified, a - default umask - 0027 will be used. - :type posix_umask: str - :param source_lease_id: A lease ID for the source path. If specified, - the source path must have an active lease and the lease ID must match. - :type source_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param directory_http_headers: Additional parameters for the operation - :type directory_http_headers: - ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - cache_control = None - if directory_http_headers is not None: - cache_control = directory_http_headers.cache_control - content_type = None - if directory_http_headers is not None: - content_type = directory_http_headers.content_type - content_encoding = None - if directory_http_headers is not None: - content_encoding = directory_http_headers.content_encoding - content_language = None - if directory_http_headers is not None: - content_language = directory_http_headers.content_language - content_disposition = None - if directory_http_headers is not None: - content_disposition = directory_http_headers.content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - # Construct URL - url = self.rename.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') - if path_rename_mode is not None: - query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'PathRenameMode') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') - if content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') - if content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') - if content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') - if content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - } - return cls(response, None, response_headers) - rename.metadata = {'url': '/{filesystem}/{path}'} - - async def delete(self, recursive_directory_delete, timeout=None, marker=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Deletes the directory. - - :param recursive_directory_delete: If "true", all paths beneath the - directory will be deleted. If "false" and the directory is non-empty, - an error occurs. - :type recursive_directory_delete: bool - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param marker: When renaming a directory, the number of paths that are - renamed with each invocation is limited. If the number of paths to be - renamed exceeds this limit, a continuation token is returned in this - response header. When a continuation token is returned in the - response, it must be specified in a subsequent invocation of the - rename operation to continue renaming the directory. - :type marker: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['recursive'] = self._serialize.query("recursive_directory_delete", recursive_directory_delete, 'bool') - if marker is not None: - query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{filesystem}/{path}'} - - async def set_access_control(self, timeout=None, owner=None, group=None, posix_permissions=None, posix_acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Set the owner, group, permissions, or access control list for a - directory. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_acl: Sets POSIX access control rights on files and - directories. The value is a comma-separated list of access control - entries. Each access control entry (ACE) consists of a scope, a type, - a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type posix_acl: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "setAccessControl" - - # Construct URL - url = self.set_access_control.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - } - return cls(response, None, response_headers) - set_access_control.metadata = {'url': '/{filesystem}/{path}'} - - async def get_access_control(self, timeout=None, upn=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Get the owner, group, permissions, or access control list for a - directory. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param upn: Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the identity values returned in - the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. - :type upn: bool - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "getAccessControl" - - # Construct URL - url = self.get_access_control.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')), - 'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')), - 'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')), - 'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - } - return cls(response, None, response_headers) - get_access_control.metadata = {'url': '/{filesystem}/{path}'} diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_page_blob_operations_async.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_page_blob_operations_async.py deleted file mode 100644 index c54a27c..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_page_blob_operations_async.py +++ /dev/null @@ -1,1399 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class PageBlobOperations: - """PageBlobOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "PageBlob". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_blob_type = "PageBlob" - - async def create(self, content_length, blob_content_length, timeout=None, tier=None, metadata=None, blob_sequence_number=0, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Create operation creates a new page blob. - - :param content_length: The length of the request. - :type content_length: long - :param blob_content_length: This header specifies the maximum size for - the page blob, up to 1 TB. The page blob size must be aligned to a - 512-byte boundary. - :type blob_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param tier: Optional. Indicates the tier to be set on the page blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80' - :type tier: str or - ~azure.storage.blob.models.PremiumPageBlobAccessTier - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param blob_sequence_number: Set for page blobs only. The sequence - number is a user-controlled value that you can use to track requests. - The value of the sequence number must be between 0 and 2^63 - 1. - :type blob_sequence_number: long - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_tags_string: Optional. Used to set blob tags in various - blob operations. - :type blob_tags_string: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') - if blob_sequence_number is not None: - header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{containerName}/{blob}'} - - async def upload_pages(self, body, content_length, transactional_content_md5=None, transactional_content_crc64=None, timeout=None, range=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, sequence_number_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Upload Pages operation writes a range of pages to a page blob. - - :param body: Initial data - :type body: Generator - :param content_length: The length of the request. - :type content_length: long - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param sequence_number_access_conditions: Additional parameters for - the operation - :type sequence_number_access_conditions: - ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_sequence_number_less_than_or_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - if_sequence_number_less_than = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - if_sequence_number_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "page" - page_write = "update" - - # Construct URL - url = self.upload_pages.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long') - if if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long') - if if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload_pages.metadata = {'url': '/{containerName}/{blob}'} - - async def clear_pages(self, content_length, timeout=None, range=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, sequence_number_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Clear Pages operation clears a set of pages from a page blob. - - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param sequence_number_access_conditions: Additional parameters for - the operation - :type sequence_number_access_conditions: - ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_sequence_number_less_than_or_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - if_sequence_number_less_than = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - if_sequence_number_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "page" - page_write = "clear" - - # Construct URL - url = self.clear_pages.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long') - if if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long') - if if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - clear_pages.metadata = {'url': '/{containerName}/{blob}'} - - async def upload_pages_from_url(self, source_url, source_range, content_length, range, source_content_md5=None, source_contentcrc64=None, timeout=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, sequence_number_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs): - """The Upload Pages operation writes a range of pages to a page blob where - the contents are read from a URL. - - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param source_range: Bytes of source data in the specified range. The - length of this range should match the ContentLength header and - x-ms-range/Range destination range header. - :type source_range: str - :param content_length: The length of the request. - :type content_length: long - :param range: The range of bytes to which the source range would be - written. The range should be 512 aligned and range-end is required. - :type range: str - :param source_content_md5: Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range - of bytes that must be read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param sequence_number_access_conditions: Additional parameters for - the operation - :type sequence_number_access_conditions: - ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_sequence_number_less_than_or_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - if_sequence_number_less_than = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - if_sequence_number_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - comp = "page" - page_write = "update" - - # Construct URL - url = self.upload_pages_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long') - if if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long') - if if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload_pages_from_url.metadata = {'url': '/{containerName}/{blob}'} - - async def get_page_ranges(self, snapshot=None, timeout=None, range=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Get Page Ranges operation returns the list of valid page ranges for - a page blob or snapshot of a page blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: PageList or the result of cls(response) - :rtype: ~azure.storage.blob.models.PageList - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "pagelist" - - # Construct URL - url = self.get_page_ranges.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PageList', response) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_page_ranges.metadata = {'url': '/{containerName}/{blob}'} - - async def get_page_ranges_diff(self, snapshot=None, timeout=None, prevsnapshot=None, prev_snapshot_url=None, range=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Get Page Ranges Diff operation returns the list of valid page - ranges for a page blob that were changed between target blob and - previous snapshot. - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param prevsnapshot: Optional in version 2015-07-08 and newer. The - prevsnapshot parameter is a DateTime value that specifies that the - response will contain only pages that were changed between target blob - and previous snapshot. Changed pages include both updated and cleared - pages. The target blob may be a snapshot, as long as the snapshot - specified by prevsnapshot is the older of the two. Note that - incremental snapshots are currently supported only for blobs created - on or after January 1, 2016. - :type prevsnapshot: str - :param prev_snapshot_url: Optional. This header is only supported in - service versions 2019-04-19 and after and specifies the URL of a - previous snapshot of the target blob. The response will only contain - pages that were changed between the target blob and its previous - snapshot. - :type prev_snapshot_url: str - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: PageList or the result of cls(response) - :rtype: ~azure.storage.blob.models.PageList - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "pagelist" - - # Construct URL - url = self.get_page_ranges_diff.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if prevsnapshot is not None: - query_parameters['prevsnapshot'] = self._serialize.query("prevsnapshot", prevsnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - if prev_snapshot_url is not None: - header_parameters['x-ms-previous-snapshot-url'] = self._serialize.header("prev_snapshot_url", prev_snapshot_url, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PageList', response) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_page_ranges_diff.metadata = {'url': '/{containerName}/{blob}'} - - async def resize(self, blob_content_length, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Resize the Blob. - - :param blob_content_length: This header specifies the maximum size for - the page blob, up to 1 TB. The page blob size must be aligned to a - 512-byte boundary. - :type blob_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "properties" - - # Construct URL - url = self.resize.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - resize.metadata = {'url': '/{containerName}/{blob}'} - - async def update_sequence_number(self, sequence_number_action, timeout=None, blob_sequence_number=0, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Update the sequence number of the blob. - - :param sequence_number_action: Required if the - x-ms-blob-sequence-number header is set for the request. This property - applies to page blobs only. This property indicates how the service - should modify the blob's sequence number. Possible values include: - 'max', 'update', 'increment' - :type sequence_number_action: str or - ~azure.storage.blob.models.SequenceNumberActionType - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param blob_sequence_number: Set for page blobs only. The sequence - number is a user-controlled value that you can use to track requests. - The value of the sequence number must be between 0 and 2^63 - 1. - :type blob_sequence_number: long - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "properties" - - # Construct URL - url = self.update_sequence_number.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-sequence-number-action'] = self._serialize.header("sequence_number_action", sequence_number_action, 'SequenceNumberActionType') - if blob_sequence_number is not None: - header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - update_sequence_number.metadata = {'url': '/{containerName}/{blob}'} - - async def copy_incremental(self, copy_source, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Copy Incremental operation copies a snapshot of the source page - blob to a destination page blob. The snapshot is copied such that only - the differential changes between the previously copied snapshot are - transferred to the destination. The copied snapshots are complete - copies of the original snapshot and can be read or copied from as - usual. This API is supported since REST version 2016-05-31. - - :param copy_source: Specifies the name of the source page blob - snapshot. This value is a URL of up to 2 KB in length that specifies a - page blob snapshot. The value should be URL-encoded as it would appear - in a request URI. The source blob must either be public or must be - authenticated via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "incrementalcopy" - - # Construct URL - url = self.copy_incremental.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - copy_incremental.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_service_operations_async.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_service_operations_async.py deleted file mode 100644 index e12c2b9..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/operations_async/_service_operations_async.py +++ /dev/null @@ -1,664 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class ServiceOperations: - """ServiceOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - - async def set_properties(self, storage_service_properties, timeout=None, request_id=None, *, cls=None, **kwargs): - """Sets properties for a storage account's Blob service endpoint, - including properties for Storage Analytics and CORS (Cross-Origin - Resource Sharing) rules. - - :param storage_service_properties: The StorageService properties. - :type storage_service_properties: - ~azure.storage.blob.models.StorageServiceProperties - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "service" - comp = "properties" - - # Construct URL - url = self.set_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct body - body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_properties.metadata = {'url': '/'} - - async def get_properties(self, timeout=None, request_id=None, *, cls=None, **kwargs): - """gets the properties of a storage account's Blob service, including - properties for Storage Analytics and CORS (Cross-Origin Resource - Sharing) rules. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: StorageServiceProperties or the result of cls(response) - :rtype: ~azure.storage.blob.models.StorageServiceProperties - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "service" - comp = "properties" - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('StorageServiceProperties', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_properties.metadata = {'url': '/'} - - async def get_statistics(self, timeout=None, request_id=None, *, cls=None, **kwargs): - """Retrieves statistics related to replication for the Blob service. It is - only available on the secondary location endpoint when read-access - geo-redundant replication is enabled for the storage account. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: StorageServiceStats or the result of cls(response) - :rtype: ~azure.storage.blob.models.StorageServiceStats - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "service" - comp = "stats" - - # Construct URL - url = self.get_statistics.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('StorageServiceStats', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_statistics.metadata = {'url': '/'} - - async def list_containers_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, *, cls=None, **kwargs): - """The List Containers Segment operation returns a list of the containers - under the specified account. - - :param prefix: Filters the results to return only containers whose - name begins with the specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list - of containers to be returned with the next listing operation. The - operation returns the NextMarker value within the response body if the - listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value - for the marker parameter in a subsequent call to request the next page - of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to - return. If the request does not specify maxresults, or specifies a - value greater than 5000, the server will return up to 5000 items. Note - that if the listing operation crosses a partition boundary, then the - service will return a continuation token for retrieving the remainder - of the results. For this reason, it is possible that the service will - return fewer results than specified by maxresults, or than the default - of 5000. - :type maxresults: int - :param include: Include this parameter to specify that the container's - metadata be returned as part of the response body. - :type include: list[str or - ~azure.storage.blob.models.ListContainersIncludeType] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListContainersSegmentResponse or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListContainersSegmentResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "list" - - # Construct URL - url = self.list_containers_segment.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[ListContainersIncludeType]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListContainersSegmentResponse', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_containers_segment.metadata = {'url': '/'} - - async def get_user_delegation_key(self, key_info, timeout=None, request_id=None, *, cls=None, **kwargs): - """Retrieves a user delegation key for the Blob service. This is only a - valid operation when using bearer token authentication. - - :param key_info: - :type key_info: ~azure.storage.blob.models.KeyInfo - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: UserDelegationKey or the result of cls(response) - :rtype: ~azure.storage.blob.models.UserDelegationKey - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "service" - comp = "userdelegationkey" - - # Construct URL - url = self.get_user_delegation_key.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct body - body_content = self._serialize.body(key_info, 'KeyInfo') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('UserDelegationKey', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_user_delegation_key.metadata = {'url': '/'} - - async def get_account_info(self, *, cls=None, **kwargs): - """Returns the sku name and account kind . - - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "account" - comp = "properties" - - # Construct URL - url = self.get_account_info.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')), - 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_account_info.metadata = {'url': '/'} - - async def submit_batch(self, body, content_length, multipart_content_type, timeout=None, request_id=None, *, cls=None, **kwargs): - """The Batch operation allows multiple API calls to be embedded into a - single HTTP request. - - :param body: Initial data - :type body: Generator - :param content_length: The length of the request. - :type content_length: long - :param multipart_content_type: Required. The value of this header must - be multipart/mixed with a batch boundary. Example header value: - multipart/mixed; boundary=batch_ - :type multipart_content_type: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: object or the result of cls(response) - :rtype: Generator - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "batch" - - # Construct URL - url = self.submit_batch.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct body - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - await response.load_body() - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - submit_batch.metadata = {'url': '/'} - - async def filter_blobs(self, timeout=None, request_id=None, where=None, marker=None, maxresults=None, *, cls=None, **kwargs): - """The Filter Blobs operation enables callers to list blobs across all - containers whose tags match a given search expression. Filter blobs - searches across all containers within a storage account but can be - scoped within the expression to a single container. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param where: Filters the results to return only to return only blobs - whose tags match the specified expression. - :type where: str - :param marker: A string value that identifies the portion of the list - of containers to be returned with the next listing operation. The - operation returns the NextMarker value within the response body if the - listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value - for the marker parameter in a subsequent call to request the next page - of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to - return. If the request does not specify maxresults, or specifies a - value greater than 5000, the server will return up to 5000 items. Note - that if the listing operation crosses a partition boundary, then the - service will return a continuation token for retrieving the remainder - of the results. For this reason, it is possible that the service will - return fewer results than specified by maxresults, or than the default - of 5000. - :type maxresults: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: FilterBlobSegment or the result of cls(response) - :rtype: ~azure.storage.blob.models.FilterBlobSegment - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "blobs" - - # Construct URL - url = self.filter_blobs.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if where is not None: - query_parameters['where'] = self._serialize.query("where", where, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('FilterBlobSegment', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - filter_blobs.metadata = {'url': '/'} diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/models/__init__.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/models/__init__.py deleted file mode 100644 index 6709caf..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/models/__init__.py +++ /dev/null @@ -1,223 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -try: - from ._models_py3 import AccessPolicy - from ._models_py3 import AppendPositionAccessConditions - from ._models_py3 import BlobFlatListSegment - from ._models_py3 import BlobHierarchyListSegment - from ._models_py3 import BlobHTTPHeaders - from ._models_py3 import BlobItemInternal - from ._models_py3 import BlobMetadata - from ._models_py3 import BlobPrefix - from ._models_py3 import BlobPropertiesInternal - from ._models_py3 import BlobTag - from ._models_py3 import BlobTags - from ._models_py3 import Block - from ._models_py3 import BlockList - from ._models_py3 import BlockLookupList - from ._models_py3 import ClearRange - from ._models_py3 import ContainerCpkScopeInfo - from ._models_py3 import ContainerItem - from ._models_py3 import ContainerProperties - from ._models_py3 import CorsRule - from ._models_py3 import CpkInfo - from ._models_py3 import CpkScopeInfo - from ._models_py3 import DataLakeStorageError, DataLakeStorageErrorException - from ._models_py3 import DataLakeStorageErrorError - from ._models_py3 import DelimitedTextConfiguration - from ._models_py3 import DirectoryHttpHeaders - from ._models_py3 import FilterBlobItem - from ._models_py3 import FilterBlobSegment - from ._models_py3 import GeoReplication - from ._models_py3 import JsonTextConfiguration - from ._models_py3 import KeyInfo - from ._models_py3 import LeaseAccessConditions - from ._models_py3 import ListBlobsFlatSegmentResponse - from ._models_py3 import ListBlobsHierarchySegmentResponse - from ._models_py3 import ListContainersSegmentResponse - from ._models_py3 import Logging - from ._models_py3 import Metrics - from ._models_py3 import ModifiedAccessConditions - from ._models_py3 import PageList - from ._models_py3 import PageRange - from ._models_py3 import QueryFormat - from ._models_py3 import QueryRequest - from ._models_py3 import QuerySerialization - from ._models_py3 import RetentionPolicy - from ._models_py3 import SequenceNumberAccessConditions - from ._models_py3 import SignedIdentifier - from ._models_py3 import SourceModifiedAccessConditions - from ._models_py3 import StaticWebsite - from ._models_py3 import StorageError, StorageErrorException - from ._models_py3 import StorageServiceProperties - from ._models_py3 import StorageServiceStats - from ._models_py3 import UserDelegationKey -except (SyntaxError, ImportError): - from ._models import AccessPolicy - from ._models import AppendPositionAccessConditions - from ._models import BlobFlatListSegment - from ._models import BlobHierarchyListSegment - from ._models import BlobHTTPHeaders - from ._models import BlobItemInternal - from ._models import BlobMetadata - from ._models import BlobPrefix - from ._models import BlobPropertiesInternal - from ._models import BlobTag - from ._models import BlobTags - from ._models import Block - from ._models import BlockList - from ._models import BlockLookupList - from ._models import ClearRange - from ._models import ContainerCpkScopeInfo - from ._models import ContainerItem - from ._models import ContainerProperties - from ._models import CorsRule - from ._models import CpkInfo - from ._models import CpkScopeInfo - from ._models import DataLakeStorageError, DataLakeStorageErrorException - from ._models import DataLakeStorageErrorError - from ._models import DelimitedTextConfiguration - from ._models import DirectoryHttpHeaders - from ._models import FilterBlobItem - from ._models import FilterBlobSegment - from ._models import GeoReplication - from ._models import JsonTextConfiguration - from ._models import KeyInfo - from ._models import LeaseAccessConditions - from ._models import ListBlobsFlatSegmentResponse - from ._models import ListBlobsHierarchySegmentResponse - from ._models import ListContainersSegmentResponse - from ._models import Logging - from ._models import Metrics - from ._models import ModifiedAccessConditions - from ._models import PageList - from ._models import PageRange - from ._models import QueryFormat - from ._models import QueryRequest - from ._models import QuerySerialization - from ._models import RetentionPolicy - from ._models import SequenceNumberAccessConditions - from ._models import SignedIdentifier - from ._models import SourceModifiedAccessConditions - from ._models import StaticWebsite - from ._models import StorageError, StorageErrorException - from ._models import StorageServiceProperties - from ._models import StorageServiceStats - from ._models import UserDelegationKey -from ._azure_blob_storage_enums import ( - AccessTier, - AccessTierOptional, - AccessTierRequired, - AccountKind, - ArchiveStatus, - BlobExpiryOptions, - BlobType, - BlockListType, - CopyStatusType, - DeleteSnapshotsOptionType, - EncryptionAlgorithmType, - GeoReplicationStatusType, - LeaseDurationType, - LeaseStateType, - LeaseStatusType, - ListBlobsIncludeItem, - ListContainersIncludeType, - PathRenameMode, - PremiumPageBlobAccessTier, - PublicAccessType, - QueryFormatType, - RehydratePriority, - SequenceNumberActionType, - SkuName, - StorageErrorCode, - SyncCopyStatusType, -) - -__all__ = [ - 'AccessPolicy', - 'AppendPositionAccessConditions', - 'BlobFlatListSegment', - 'BlobHierarchyListSegment', - 'BlobHTTPHeaders', - 'BlobItemInternal', - 'BlobMetadata', - 'BlobPrefix', - 'BlobPropertiesInternal', - 'BlobTag', - 'BlobTags', - 'Block', - 'BlockList', - 'BlockLookupList', - 'ClearRange', - 'ContainerCpkScopeInfo', - 'ContainerItem', - 'ContainerProperties', - 'CorsRule', - 'CpkInfo', - 'CpkScopeInfo', - 'DataLakeStorageError', 'DataLakeStorageErrorException', - 'DataLakeStorageErrorError', - 'DelimitedTextConfiguration', - 'DirectoryHttpHeaders', - 'FilterBlobItem', - 'FilterBlobSegment', - 'GeoReplication', - 'JsonTextConfiguration', - 'KeyInfo', - 'LeaseAccessConditions', - 'ListBlobsFlatSegmentResponse', - 'ListBlobsHierarchySegmentResponse', - 'ListContainersSegmentResponse', - 'Logging', - 'Metrics', - 'ModifiedAccessConditions', - 'PageList', - 'PageRange', - 'QueryFormat', - 'QueryRequest', - 'QuerySerialization', - 'RetentionPolicy', - 'SequenceNumberAccessConditions', - 'SignedIdentifier', - 'SourceModifiedAccessConditions', - 'StaticWebsite', - 'StorageError', 'StorageErrorException', - 'StorageServiceProperties', - 'StorageServiceStats', - 'UserDelegationKey', - 'PublicAccessType', - 'CopyStatusType', - 'LeaseDurationType', - 'LeaseStateType', - 'LeaseStatusType', - 'AccessTier', - 'ArchiveStatus', - 'BlobType', - 'RehydratePriority', - 'StorageErrorCode', - 'GeoReplicationStatusType', - 'QueryFormatType', - 'AccessTierRequired', - 'AccessTierOptional', - 'PremiumPageBlobAccessTier', - 'BlobExpiryOptions', - 'BlockListType', - 'DeleteSnapshotsOptionType', - 'EncryptionAlgorithmType', - 'ListBlobsIncludeItem', - 'ListContainersIncludeType', - 'PathRenameMode', - 'SequenceNumberActionType', - 'SkuName', - 'AccountKind', - 'SyncCopyStatusType', -] diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/models/_azure_blob_storage_enums.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/models/_azure_blob_storage_enums.py deleted file mode 100644 index d89e858..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/models/_azure_blob_storage_enums.py +++ /dev/null @@ -1,342 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from enum import Enum - - -class PublicAccessType(str, Enum): - - container = "container" - blob = "blob" - - -class CopyStatusType(str, Enum): - - pending = "pending" - success = "success" - aborted = "aborted" - failed = "failed" - - -class LeaseDurationType(str, Enum): - - infinite = "infinite" - fixed = "fixed" - - -class LeaseStateType(str, Enum): - - available = "available" - leased = "leased" - expired = "expired" - breaking = "breaking" - broken = "broken" - - -class LeaseStatusType(str, Enum): - - locked = "locked" - unlocked = "unlocked" - - -class AccessTier(str, Enum): - - p4 = "P4" - p6 = "P6" - p10 = "P10" - p15 = "P15" - p20 = "P20" - p30 = "P30" - p40 = "P40" - p50 = "P50" - p60 = "P60" - p70 = "P70" - p80 = "P80" - hot = "Hot" - cool = "Cool" - archive = "Archive" - - -class ArchiveStatus(str, Enum): - - rehydrate_pending_to_hot = "rehydrate-pending-to-hot" - rehydrate_pending_to_cool = "rehydrate-pending-to-cool" - - -class BlobType(str, Enum): - - block_blob = "BlockBlob" - page_blob = "PageBlob" - append_blob = "AppendBlob" - - -class RehydratePriority(str, Enum): - - high = "High" - standard = "Standard" - - -class StorageErrorCode(str, Enum): - - account_already_exists = "AccountAlreadyExists" - account_being_created = "AccountBeingCreated" - account_is_disabled = "AccountIsDisabled" - authentication_failed = "AuthenticationFailed" - authorization_failure = "AuthorizationFailure" - condition_headers_not_supported = "ConditionHeadersNotSupported" - condition_not_met = "ConditionNotMet" - empty_metadata_key = "EmptyMetadataKey" - insufficient_account_permissions = "InsufficientAccountPermissions" - internal_error = "InternalError" - invalid_authentication_info = "InvalidAuthenticationInfo" - invalid_header_value = "InvalidHeaderValue" - invalid_http_verb = "InvalidHttpVerb" - invalid_input = "InvalidInput" - invalid_md5 = "InvalidMd5" - invalid_metadata = "InvalidMetadata" - invalid_query_parameter_value = "InvalidQueryParameterValue" - invalid_range = "InvalidRange" - invalid_resource_name = "InvalidResourceName" - invalid_uri = "InvalidUri" - invalid_xml_document = "InvalidXmlDocument" - invalid_xml_node_value = "InvalidXmlNodeValue" - md5_mismatch = "Md5Mismatch" - metadata_too_large = "MetadataTooLarge" - missing_content_length_header = "MissingContentLengthHeader" - missing_required_query_parameter = "MissingRequiredQueryParameter" - missing_required_header = "MissingRequiredHeader" - missing_required_xml_node = "MissingRequiredXmlNode" - multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" - operation_timed_out = "OperationTimedOut" - out_of_range_input = "OutOfRangeInput" - out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" - request_body_too_large = "RequestBodyTooLarge" - resource_type_mismatch = "ResourceTypeMismatch" - request_url_failed_to_parse = "RequestUrlFailedToParse" - resource_already_exists = "ResourceAlreadyExists" - resource_not_found = "ResourceNotFound" - server_busy = "ServerBusy" - unsupported_header = "UnsupportedHeader" - unsupported_xml_node = "UnsupportedXmlNode" - unsupported_query_parameter = "UnsupportedQueryParameter" - unsupported_http_verb = "UnsupportedHttpVerb" - append_position_condition_not_met = "AppendPositionConditionNotMet" - blob_already_exists = "BlobAlreadyExists" - blob_not_found = "BlobNotFound" - blob_overwritten = "BlobOverwritten" - blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" - block_count_exceeds_limit = "BlockCountExceedsLimit" - block_list_too_long = "BlockListTooLong" - cannot_change_to_lower_tier = "CannotChangeToLowerTier" - cannot_verify_copy_source = "CannotVerifyCopySource" - container_already_exists = "ContainerAlreadyExists" - container_being_deleted = "ContainerBeingDeleted" - container_disabled = "ContainerDisabled" - container_not_found = "ContainerNotFound" - content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" - copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" - copy_id_mismatch = "CopyIdMismatch" - feature_version_mismatch = "FeatureVersionMismatch" - incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" - incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" - incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" - infinite_lease_duration_required = "InfiniteLeaseDurationRequired" - invalid_blob_or_block = "InvalidBlobOrBlock" - invalid_blob_tier = "InvalidBlobTier" - invalid_blob_type = "InvalidBlobType" - invalid_block_id = "InvalidBlockId" - invalid_block_list = "InvalidBlockList" - invalid_operation = "InvalidOperation" - invalid_page_range = "InvalidPageRange" - invalid_source_blob_type = "InvalidSourceBlobType" - invalid_source_blob_url = "InvalidSourceBlobUrl" - invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" - lease_already_present = "LeaseAlreadyPresent" - lease_already_broken = "LeaseAlreadyBroken" - lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" - lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" - lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" - lease_id_missing = "LeaseIdMissing" - lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" - lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" - lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" - lease_lost = "LeaseLost" - lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" - lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" - lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" - max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" - no_authentication_information = "NoAuthenticationInformation" - no_pending_copy_operation = "NoPendingCopyOperation" - operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" - pending_copy_operation = "PendingCopyOperation" - previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" - previous_snapshot_not_found = "PreviousSnapshotNotFound" - previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" - sequence_number_condition_not_met = "SequenceNumberConditionNotMet" - sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" - snapshot_count_exceeded = "SnapshotCountExceeded" - snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" - snapshots_present = "SnapshotsPresent" - source_condition_not_met = "SourceConditionNotMet" - system_in_use = "SystemInUse" - target_condition_not_met = "TargetConditionNotMet" - unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" - blob_being_rehydrated = "BlobBeingRehydrated" - blob_archived = "BlobArchived" - blob_not_archived = "BlobNotArchived" - authorization_source_ip_mismatch = "AuthorizationSourceIPMismatch" - authorization_protocol_mismatch = "AuthorizationProtocolMismatch" - authorization_permission_mismatch = "AuthorizationPermissionMismatch" - authorization_service_mismatch = "AuthorizationServiceMismatch" - authorization_resource_type_mismatch = "AuthorizationResourceTypeMismatch" - - -class GeoReplicationStatusType(str, Enum): - - live = "live" - bootstrap = "bootstrap" - unavailable = "unavailable" - - -class QueryFormatType(str, Enum): - - delimited = "delimited" - json = "json" - - -class AccessTierRequired(str, Enum): - - p4 = "P4" - p6 = "P6" - p10 = "P10" - p15 = "P15" - p20 = "P20" - p30 = "P30" - p40 = "P40" - p50 = "P50" - p60 = "P60" - p70 = "P70" - p80 = "P80" - hot = "Hot" - cool = "Cool" - archive = "Archive" - - -class AccessTierOptional(str, Enum): - - p4 = "P4" - p6 = "P6" - p10 = "P10" - p15 = "P15" - p20 = "P20" - p30 = "P30" - p40 = "P40" - p50 = "P50" - p60 = "P60" - p70 = "P70" - p80 = "P80" - hot = "Hot" - cool = "Cool" - archive = "Archive" - - -class PremiumPageBlobAccessTier(str, Enum): - - p4 = "P4" - p6 = "P6" - p10 = "P10" - p15 = "P15" - p20 = "P20" - p30 = "P30" - p40 = "P40" - p50 = "P50" - p60 = "P60" - p70 = "P70" - p80 = "P80" - - -class BlobExpiryOptions(str, Enum): - - never_expire = "NeverExpire" - relative_to_creation = "RelativeToCreation" - relative_to_now = "RelativeToNow" - absolute = "Absolute" - - -class BlockListType(str, Enum): - - committed = "committed" - uncommitted = "uncommitted" - all = "all" - - -class DeleteSnapshotsOptionType(str, Enum): - - include = "include" - only = "only" - - -class EncryptionAlgorithmType(str, Enum): - - aes256 = "AES256" - - -class ListBlobsIncludeItem(str, Enum): - - copy = "copy" - deleted = "deleted" - metadata = "metadata" - snapshots = "snapshots" - uncommittedblobs = "uncommittedblobs" - versions = "versions" - tags = "tags" - - -class ListContainersIncludeType(str, Enum): - - metadata = "metadata" - deleted = "deleted" - - -class PathRenameMode(str, Enum): - - legacy = "legacy" - posix = "posix" - - -class SequenceNumberActionType(str, Enum): - - max = "max" - update = "update" - increment = "increment" - - -class SkuName(str, Enum): - - standard_lrs = "Standard_LRS" - standard_grs = "Standard_GRS" - standard_ragrs = "Standard_RAGRS" - standard_zrs = "Standard_ZRS" - premium_lrs = "Premium_LRS" - - -class AccountKind(str, Enum): - - storage = "Storage" - blob_storage = "BlobStorage" - storage_v2 = "StorageV2" - file_storage = "FileStorage" - block_blob_storage = "BlockBlobStorage" - - -class SyncCopyStatusType(str, Enum): - - success = "success" diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/models/_models.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/models/_models.py deleted file mode 100644 index acb79c0..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/models/_models.py +++ /dev/null @@ -1,1939 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model -from azure.core.exceptions import HttpResponseError - - -class AccessPolicy(Model): - """An Access policy. - - :param start: the date-time the policy is active - :type start: str - :param expiry: the date-time the policy expires - :type expiry: str - :param permission: the permissions for the acl policy - :type permission: str - """ - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, - 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, - 'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(AccessPolicy, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.expiry = kwargs.get('expiry', None) - self.permission = kwargs.get('permission', None) - - -class AppendPositionAccessConditions(Model): - """Additional parameters for a set of operations, such as: - AppendBlob_append_block, AppendBlob_append_block_from_url, AppendBlob_seal. - - :param max_size: Optional conditional header. The max length in bytes - permitted for the append blob. If the Append Block operation would cause - the blob to exceed that limit or if the blob size is already greater than - the value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition - Failed). - :type max_size: long - :param append_position: Optional conditional header, used only for the - Append Block operation. A number indicating the byte offset to compare. - Append Block will succeed only if the append position is equal to this - number. If it is not, the request will fail with the - AppendPositionConditionNotMet error (HTTP status code 412 - Precondition - Failed). - :type append_position: long - """ - - _attribute_map = { - 'max_size': {'key': '', 'type': 'long', 'xml': {'name': 'max_size'}}, - 'append_position': {'key': '', 'type': 'long', 'xml': {'name': 'append_position'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(AppendPositionAccessConditions, self).__init__(**kwargs) - self.max_size = kwargs.get('max_size', None) - self.append_position = kwargs.get('append_position', None) - - -class BlobFlatListSegment(Model): - """BlobFlatListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'BlobItems', 'itemsName': 'Blob'}}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__(self, **kwargs): - super(BlobFlatListSegment, self).__init__(**kwargs) - self.blob_items = kwargs.get('blob_items', None) - - -class BlobHierarchyListSegment(Model): - """BlobHierarchyListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_prefixes: - :type blob_prefixes: list[~azure.storage.blob.models.BlobPrefix] - :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]', 'xml': {'name': 'BlobPrefix', 'itemsName': 'BlobPrefix'}}, - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__(self, **kwargs): - super(BlobHierarchyListSegment, self).__init__(**kwargs) - self.blob_prefixes = kwargs.get('blob_prefixes', None) - self.blob_items = kwargs.get('blob_items', None) - - -class BlobHTTPHeaders(Model): - """Additional parameters for a set of operations. - - :param blob_cache_control: Optional. Sets the blob's cache control. If - specified, this property is stored with the blob and returned with a read - request. - :type blob_cache_control: str - :param blob_content_type: Optional. Sets the blob's content type. If - specified, this property is stored with the blob and returned with a read - request. - :type blob_content_type: str - :param blob_content_md5: Optional. An MD5 hash of the blob content. Note - that this hash is not validated, as the hashes for the individual blocks - were validated when each was uploaded. - :type blob_content_md5: bytearray - :param blob_content_encoding: Optional. Sets the blob's content encoding. - If specified, this property is stored with the blob and returned with a - read request. - :type blob_content_encoding: str - :param blob_content_language: Optional. Set the blob's content language. - If specified, this property is stored with the blob and returned with a - read request. - :type blob_content_language: str - :param blob_content_disposition: Optional. Sets the blob's - Content-Disposition header. - :type blob_content_disposition: str - """ - - _attribute_map = { - 'blob_cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'blob_cache_control'}}, - 'blob_content_type': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_type'}}, - 'blob_content_md5': {'key': '', 'type': 'bytearray', 'xml': {'name': 'blob_content_md5'}}, - 'blob_content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_encoding'}}, - 'blob_content_language': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_language'}}, - 'blob_content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_disposition'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(BlobHTTPHeaders, self).__init__(**kwargs) - self.blob_cache_control = kwargs.get('blob_cache_control', None) - self.blob_content_type = kwargs.get('blob_content_type', None) - self.blob_content_md5 = kwargs.get('blob_content_md5', None) - self.blob_content_encoding = kwargs.get('blob_content_encoding', None) - self.blob_content_language = kwargs.get('blob_content_language', None) - self.blob_content_disposition = kwargs.get('blob_content_disposition', None) - - -class BlobItemInternal(Model): - """An Azure Storage blob. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param deleted: Required. - :type deleted: bool - :param snapshot: Required. - :type snapshot: str - :param version_id: - :type version_id: str - :param is_current_version: - :type is_current_version: bool - :param properties: Required. - :type properties: ~azure.storage.blob.models.BlobPropertiesInternal - :param metadata: - :type metadata: ~azure.storage.blob.models.BlobMetadata - :param blob_tags: - :type blob_tags: ~azure.storage.blob.models.BlobTags - :param object_replication_metadata: - :type object_replication_metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'deleted': {'required': True}, - 'snapshot': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}}, - 'snapshot': {'key': 'Snapshot', 'type': 'str', 'xml': {'name': 'Snapshot'}}, - 'version_id': {'key': 'VersionId', 'type': 'str', 'xml': {'name': 'VersionId'}}, - 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool', 'xml': {'name': 'IsCurrentVersion'}}, - 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal', 'xml': {'name': 'Properties'}}, - 'metadata': {'key': 'Metadata', 'type': 'BlobMetadata', 'xml': {'name': 'Metadata'}}, - 'blob_tags': {'key': 'BlobTags', 'type': 'BlobTags', 'xml': {'name': 'BlobTags'}}, - 'object_replication_metadata': {'key': 'OrMetadata', 'type': '{str}', 'xml': {'name': 'OrMetadata'}}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__(self, **kwargs): - super(BlobItemInternal, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.deleted = kwargs.get('deleted', None) - self.snapshot = kwargs.get('snapshot', None) - self.version_id = kwargs.get('version_id', None) - self.is_current_version = kwargs.get('is_current_version', None) - self.properties = kwargs.get('properties', None) - self.metadata = kwargs.get('metadata', None) - self.blob_tags = kwargs.get('blob_tags', None) - self.object_replication_metadata = kwargs.get('object_replication_metadata', None) - - -class BlobMetadata(Model): - """BlobMetadata. - - :param additional_properties: Unmatched properties from the message are - deserialized this collection - :type additional_properties: dict[str, str] - :param encrypted: - :type encrypted: str - """ - - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{str}', 'xml': {'name': 'additional_properties'}}, - 'encrypted': {'key': 'Encrypted', 'type': 'str', 'xml': {'name': 'Encrypted', 'attr': True}}, - } - _xml_map = { - 'name': 'Metadata' - } - - def __init__(self, **kwargs): - super(BlobMetadata, self).__init__(**kwargs) - self.additional_properties = kwargs.get('additional_properties', None) - self.encrypted = kwargs.get('encrypted', None) - - -class BlobPrefix(Model): - """BlobPrefix. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(BlobPrefix, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - - -class BlobPropertiesInternal(Model): - """Properties of a blob. - - All required parameters must be populated in order to send to Azure. - - :param creation_time: - :type creation_time: datetime - :param last_modified: Required. - :type last_modified: datetime - :param etag: Required. - :type etag: str - :param content_length: Size in bytes - :type content_length: long - :param content_type: - :type content_type: str - :param content_encoding: - :type content_encoding: str - :param content_language: - :type content_language: str - :param content_md5: - :type content_md5: bytearray - :param content_disposition: - :type content_disposition: str - :param cache_control: - :type cache_control: str - :param blob_sequence_number: - :type blob_sequence_number: long - :param blob_type: Possible values include: 'BlockBlob', 'PageBlob', - 'AppendBlob' - :type blob_type: str or ~azure.storage.blob.models.BlobType - :param lease_status: Possible values include: 'locked', 'unlocked' - :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType - :param lease_state: Possible values include: 'available', 'leased', - 'expired', 'breaking', 'broken' - :type lease_state: str or ~azure.storage.blob.models.LeaseStateType - :param lease_duration: Possible values include: 'infinite', 'fixed' - :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType - :param copy_id: - :type copy_id: str - :param copy_status: Possible values include: 'pending', 'success', - 'aborted', 'failed' - :type copy_status: str or ~azure.storage.blob.models.CopyStatusType - :param copy_source: - :type copy_source: str - :param copy_progress: - :type copy_progress: str - :param copy_completion_time: - :type copy_completion_time: datetime - :param copy_status_description: - :type copy_status_description: str - :param server_encrypted: - :type server_encrypted: bool - :param incremental_copy: - :type incremental_copy: bool - :param destination_snapshot: - :type destination_snapshot: str - :param deleted_time: - :type deleted_time: datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param access_tier: Possible values include: 'P4', 'P6', 'P10', 'P15', - 'P20', 'P30', 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type access_tier: str or ~azure.storage.blob.models.AccessTier - :param access_tier_inferred: - :type access_tier_inferred: bool - :param archive_status: Possible values include: - 'rehydrate-pending-to-hot', 'rehydrate-pending-to-cool' - :type archive_status: str or ~azure.storage.blob.models.ArchiveStatus - :param customer_provided_key_sha256: - :type customer_provided_key_sha256: str - :param encryption_scope: The name of the encryption scope under which the - blob is encrypted. - :type encryption_scope: str - :param access_tier_change_time: - :type access_tier_change_time: datetime - :param tag_count: - :type tag_count: int - :param expires_on: - :type expires_on: datetime - :param is_sealed: - :type is_sealed: bool - :param rehydrate_priority: Possible values include: 'High', 'Standard' - :type rehydrate_priority: str or - ~azure.storage.blob.models.RehydratePriority - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123', 'xml': {'name': 'Creation-Time'}}, - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}}, - 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}}, - 'content_length': {'key': 'Content-Length', 'type': 'long', 'xml': {'name': 'Content-Length'}}, - 'content_type': {'key': 'Content-Type', 'type': 'str', 'xml': {'name': 'Content-Type'}}, - 'content_encoding': {'key': 'Content-Encoding', 'type': 'str', 'xml': {'name': 'Content-Encoding'}}, - 'content_language': {'key': 'Content-Language', 'type': 'str', 'xml': {'name': 'Content-Language'}}, - 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray', 'xml': {'name': 'Content-MD5'}}, - 'content_disposition': {'key': 'Content-Disposition', 'type': 'str', 'xml': {'name': 'Content-Disposition'}}, - 'cache_control': {'key': 'Cache-Control', 'type': 'str', 'xml': {'name': 'Cache-Control'}}, - 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long', 'xml': {'name': 'x-ms-blob-sequence-number'}}, - 'blob_type': {'key': 'BlobType', 'type': 'BlobType', 'xml': {'name': 'BlobType'}}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}}, - 'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}}, - 'copy_id': {'key': 'CopyId', 'type': 'str', 'xml': {'name': 'CopyId'}}, - 'copy_status': {'key': 'CopyStatus', 'type': 'CopyStatusType', 'xml': {'name': 'CopyStatus'}}, - 'copy_source': {'key': 'CopySource', 'type': 'str', 'xml': {'name': 'CopySource'}}, - 'copy_progress': {'key': 'CopyProgress', 'type': 'str', 'xml': {'name': 'CopyProgress'}}, - 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123', 'xml': {'name': 'CopyCompletionTime'}}, - 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str', 'xml': {'name': 'CopyStatusDescription'}}, - 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool', 'xml': {'name': 'ServerEncrypted'}}, - 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool', 'xml': {'name': 'IncrementalCopy'}}, - 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str', 'xml': {'name': 'DestinationSnapshot'}}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}}, - 'access_tier': {'key': 'AccessTier', 'type': 'str', 'xml': {'name': 'AccessTier'}}, - 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool', 'xml': {'name': 'AccessTierInferred'}}, - 'archive_status': {'key': 'ArchiveStatus', 'type': 'str', 'xml': {'name': 'ArchiveStatus'}}, - 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str', 'xml': {'name': 'CustomerProvidedKeySha256'}}, - 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str', 'xml': {'name': 'EncryptionScope'}}, - 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123', 'xml': {'name': 'AccessTierChangeTime'}}, - 'tag_count': {'key': 'TagCount', 'type': 'int', 'xml': {'name': 'TagCount'}}, - 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123', 'xml': {'name': 'Expiry-Time'}}, - 'is_sealed': {'key': 'Sealed', 'type': 'bool', 'xml': {'name': 'Sealed'}}, - 'rehydrate_priority': {'key': 'RehydratePriority', 'type': 'str', 'xml': {'name': 'RehydratePriority'}}, - } - _xml_map = { - 'name': 'Properties' - } - - def __init__(self, **kwargs): - super(BlobPropertiesInternal, self).__init__(**kwargs) - self.creation_time = kwargs.get('creation_time', None) - self.last_modified = kwargs.get('last_modified', None) - self.etag = kwargs.get('etag', None) - self.content_length = kwargs.get('content_length', None) - self.content_type = kwargs.get('content_type', None) - self.content_encoding = kwargs.get('content_encoding', None) - self.content_language = kwargs.get('content_language', None) - self.content_md5 = kwargs.get('content_md5', None) - self.content_disposition = kwargs.get('content_disposition', None) - self.cache_control = kwargs.get('cache_control', None) - self.blob_sequence_number = kwargs.get('blob_sequence_number', None) - self.blob_type = kwargs.get('blob_type', None) - self.lease_status = kwargs.get('lease_status', None) - self.lease_state = kwargs.get('lease_state', None) - self.lease_duration = kwargs.get('lease_duration', None) - self.copy_id = kwargs.get('copy_id', None) - self.copy_status = kwargs.get('copy_status', None) - self.copy_source = kwargs.get('copy_source', None) - self.copy_progress = kwargs.get('copy_progress', None) - self.copy_completion_time = kwargs.get('copy_completion_time', None) - self.copy_status_description = kwargs.get('copy_status_description', None) - self.server_encrypted = kwargs.get('server_encrypted', None) - self.incremental_copy = kwargs.get('incremental_copy', None) - self.destination_snapshot = kwargs.get('destination_snapshot', None) - self.deleted_time = kwargs.get('deleted_time', None) - self.remaining_retention_days = kwargs.get('remaining_retention_days', None) - self.access_tier = kwargs.get('access_tier', None) - self.access_tier_inferred = kwargs.get('access_tier_inferred', None) - self.archive_status = kwargs.get('archive_status', None) - self.customer_provided_key_sha256 = kwargs.get('customer_provided_key_sha256', None) - self.encryption_scope = kwargs.get('encryption_scope', None) - self.access_tier_change_time = kwargs.get('access_tier_change_time', None) - self.tag_count = kwargs.get('tag_count', None) - self.expires_on = kwargs.get('expires_on', None) - self.is_sealed = kwargs.get('is_sealed', None) - self.rehydrate_priority = kwargs.get('rehydrate_priority', None) - - -class BlobTag(Model): - """BlobTag. - - All required parameters must be populated in order to send to Azure. - - :param key: Required. - :type key: str - :param value: Required. - :type value: str - """ - - _validation = { - 'key': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'key': {'key': 'Key', 'type': 'str', 'xml': {'name': 'Key'}}, - 'value': {'key': 'Value', 'type': 'str', 'xml': {'name': 'Value'}}, - } - _xml_map = { - 'name': 'Tag' - } - - def __init__(self, **kwargs): - super(BlobTag, self).__init__(**kwargs) - self.key = kwargs.get('key', None) - self.value = kwargs.get('value', None) - - -class BlobTags(Model): - """Blob tags. - - All required parameters must be populated in order to send to Azure. - - :param blob_tag_set: Required. - :type blob_tag_set: list[~azure.storage.blob.models.BlobTag] - """ - - _validation = { - 'blob_tag_set': {'required': True}, - } - - _attribute_map = { - 'blob_tag_set': {'key': 'BlobTagSet', 'type': '[BlobTag]', 'xml': {'name': 'TagSet', 'itemsName': 'TagSet', 'wrapped': True}}, - } - _xml_map = { - 'name': 'Tags' - } - - def __init__(self, **kwargs): - super(BlobTags, self).__init__(**kwargs) - self.blob_tag_set = kwargs.get('blob_tag_set', None) - - -class Block(Model): - """Represents a single block in a block blob. It describes the block's ID and - size. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The base64 encoded block ID. - :type name: str - :param size: Required. The block size in bytes. - :type size: int - """ - - _validation = { - 'name': {'required': True}, - 'size': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'size': {'key': 'Size', 'type': 'int', 'xml': {'name': 'Size'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(Block, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.size = kwargs.get('size', None) - - -class BlockList(Model): - """BlockList. - - :param committed_blocks: - :type committed_blocks: list[~azure.storage.blob.models.Block] - :param uncommitted_blocks: - :type uncommitted_blocks: list[~azure.storage.blob.models.Block] - """ - - _attribute_map = { - 'committed_blocks': {'key': 'CommittedBlocks', 'type': '[Block]', 'xml': {'name': 'CommittedBlocks', 'itemsName': 'Block', 'wrapped': True}}, - 'uncommitted_blocks': {'key': 'UncommittedBlocks', 'type': '[Block]', 'xml': {'name': 'UncommittedBlocks', 'itemsName': 'Block', 'wrapped': True}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(BlockList, self).__init__(**kwargs) - self.committed_blocks = kwargs.get('committed_blocks', None) - self.uncommitted_blocks = kwargs.get('uncommitted_blocks', None) - - -class BlockLookupList(Model): - """BlockLookupList. - - :param committed: - :type committed: list[str] - :param uncommitted: - :type uncommitted: list[str] - :param latest: - :type latest: list[str] - """ - - _attribute_map = { - 'committed': {'key': 'Committed', 'type': '[str]', 'xml': {'name': 'Committed', 'itemsName': 'Committed'}}, - 'uncommitted': {'key': 'Uncommitted', 'type': '[str]', 'xml': {'name': 'Uncommitted', 'itemsName': 'Uncommitted'}}, - 'latest': {'key': 'Latest', 'type': '[str]', 'xml': {'name': 'Latest', 'itemsName': 'Latest'}}, - } - _xml_map = { - 'name': 'BlockList' - } - - def __init__(self, **kwargs): - super(BlockLookupList, self).__init__(**kwargs) - self.committed = kwargs.get('committed', None) - self.uncommitted = kwargs.get('uncommitted', None) - self.latest = kwargs.get('latest', None) - - -class ClearRange(Model): - """ClearRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'ClearRange' - } - - def __init__(self, **kwargs): - super(ClearRange, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.end = kwargs.get('end', None) - - -class ContainerCpkScopeInfo(Model): - """Additional parameters for create operation. - - :param default_encryption_scope: Optional. Version 2019-07-07 and later. - Specifies the default encryption scope to set on the container and use for - all future writes. - :type default_encryption_scope: str - :param prevent_encryption_scope_override: Optional. Version 2019-07-07 - and newer. If true, prevents any request from specifying a different - encryption scope than the scope set on the container. - :type prevent_encryption_scope_override: bool - """ - - _attribute_map = { - 'default_encryption_scope': {'key': '', 'type': 'str', 'xml': {'name': 'default_encryption_scope'}}, - 'prevent_encryption_scope_override': {'key': '', 'type': 'bool', 'xml': {'name': 'prevent_encryption_scope_override'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(ContainerCpkScopeInfo, self).__init__(**kwargs) - self.default_encryption_scope = kwargs.get('default_encryption_scope', None) - self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', None) - - -class ContainerItem(Model): - """An Azure Storage container. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param deleted: - :type deleted: bool - :param version: - :type version: str - :param properties: Required. - :type properties: ~azure.storage.blob.models.ContainerProperties - :param metadata: - :type metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}}, - 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, - 'properties': {'key': 'Properties', 'type': 'ContainerProperties', 'xml': {'name': 'Properties'}}, - 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}}, - } - _xml_map = { - 'name': 'Container' - } - - def __init__(self, **kwargs): - super(ContainerItem, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.deleted = kwargs.get('deleted', None) - self.version = kwargs.get('version', None) - self.properties = kwargs.get('properties', None) - self.metadata = kwargs.get('metadata', None) - - -class ContainerProperties(Model): - """Properties of a container. - - All required parameters must be populated in order to send to Azure. - - :param last_modified: Required. - :type last_modified: datetime - :param etag: Required. - :type etag: str - :param lease_status: Possible values include: 'locked', 'unlocked' - :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType - :param lease_state: Possible values include: 'available', 'leased', - 'expired', 'breaking', 'broken' - :type lease_state: str or ~azure.storage.blob.models.LeaseStateType - :param lease_duration: Possible values include: 'infinite', 'fixed' - :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType - :param public_access: Possible values include: 'container', 'blob' - :type public_access: str or ~azure.storage.blob.models.PublicAccessType - :param has_immutability_policy: - :type has_immutability_policy: bool - :param has_legal_hold: - :type has_legal_hold: bool - :param default_encryption_scope: - :type default_encryption_scope: str - :param prevent_encryption_scope_override: - :type prevent_encryption_scope_override: bool - :param deleted_time: - :type deleted_time: datetime - :param remaining_retention_days: - :type remaining_retention_days: int - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}}, - 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}}, - 'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}}, - 'public_access': {'key': 'PublicAccess', 'type': 'str', 'xml': {'name': 'PublicAccess'}}, - 'has_immutability_policy': {'key': 'HasImmutabilityPolicy', 'type': 'bool', 'xml': {'name': 'HasImmutabilityPolicy'}}, - 'has_legal_hold': {'key': 'HasLegalHold', 'type': 'bool', 'xml': {'name': 'HasLegalHold'}}, - 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str', 'xml': {'name': 'DefaultEncryptionScope'}}, - 'prevent_encryption_scope_override': {'key': 'DenyEncryptionScopeOverride', 'type': 'bool', 'xml': {'name': 'DenyEncryptionScopeOverride'}}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(ContainerProperties, self).__init__(**kwargs) - self.last_modified = kwargs.get('last_modified', None) - self.etag = kwargs.get('etag', None) - self.lease_status = kwargs.get('lease_status', None) - self.lease_state = kwargs.get('lease_state', None) - self.lease_duration = kwargs.get('lease_duration', None) - self.public_access = kwargs.get('public_access', None) - self.has_immutability_policy = kwargs.get('has_immutability_policy', None) - self.has_legal_hold = kwargs.get('has_legal_hold', None) - self.default_encryption_scope = kwargs.get('default_encryption_scope', None) - self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', None) - self.deleted_time = kwargs.get('deleted_time', None) - self.remaining_retention_days = kwargs.get('remaining_retention_days', None) - - -class CorsRule(Model): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param allowed_origins: Required. The origin domains that are permitted to - make a request against the storage service via CORS. The origin domain is - the domain from which the request originates. Note that the origin must be - an exact case-sensitive match with the origin that the user age sends to - the service. You can also use the wildcard character '*' to allow all - origin domains to make requests via CORS. - :type allowed_origins: str - :param allowed_methods: Required. The methods (HTTP request verbs) that - the origin domain may use for a CORS request. (comma separated) - :type allowed_methods: str - :param allowed_headers: Required. the request headers that the origin - domain may specify on the CORS request. - :type allowed_headers: str - :param exposed_headers: Required. The response headers that may be sent in - the response to the CORS request and exposed by the browser to the request - issuer - :type exposed_headers: str - :param max_age_in_seconds: Required. The maximum amount time that a - browser should cache the preflight OPTIONS request. - :type max_age_in_seconds: int - """ - - _validation = { - 'allowed_origins': {'required': True}, - 'allowed_methods': {'required': True}, - 'allowed_headers': {'required': True}, - 'exposed_headers': {'required': True}, - 'max_age_in_seconds': {'required': True, 'minimum': 0}, - } - - _attribute_map = { - 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}}, - 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}}, - 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}}, - 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}}, - 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(CorsRule, self).__init__(**kwargs) - self.allowed_origins = kwargs.get('allowed_origins', None) - self.allowed_methods = kwargs.get('allowed_methods', None) - self.allowed_headers = kwargs.get('allowed_headers', None) - self.exposed_headers = kwargs.get('exposed_headers', None) - self.max_age_in_seconds = kwargs.get('max_age_in_seconds', None) - - -class CpkInfo(Model): - """Additional parameters for a set of operations. - - :param encryption_key: Optional. Specifies the encryption key to use to - encrypt the data provided in the request. If not specified, encryption is - performed with the root account encryption key. For more information, see - Encryption at Rest for Azure Storage Services. - :type encryption_key: str - :param encryption_key_sha256: The SHA-256 hash of the provided encryption - key. Must be provided if the x-ms-encryption-key header is provided. - :type encryption_key_sha256: str - :param encryption_algorithm: The algorithm used to produce the encryption - key hash. Currently, the only accepted value is "AES256". Must be provided - if the x-ms-encryption-key header is provided. Possible values include: - 'AES256' - :type encryption_algorithm: str or - ~azure.storage.blob.models.EncryptionAlgorithmType - """ - - _attribute_map = { - 'encryption_key': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_key'}}, - 'encryption_key_sha256': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_key_sha256'}}, - 'encryption_algorithm': {'key': '', 'type': 'EncryptionAlgorithmType', 'xml': {'name': 'encryption_algorithm'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(CpkInfo, self).__init__(**kwargs) - self.encryption_key = kwargs.get('encryption_key', None) - self.encryption_key_sha256 = kwargs.get('encryption_key_sha256', None) - self.encryption_algorithm = kwargs.get('encryption_algorithm', None) - - -class CpkScopeInfo(Model): - """Additional parameters for a set of operations. - - :param encryption_scope: Optional. Version 2019-07-07 and later. - Specifies the name of the encryption scope to use to encrypt the data - provided in the request. If not specified, encryption is performed with - the default account encryption scope. For more information, see - Encryption at Rest for Azure Storage Services. - :type encryption_scope: str - """ - - _attribute_map = { - 'encryption_scope': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_scope'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(CpkScopeInfo, self).__init__(**kwargs) - self.encryption_scope = kwargs.get('encryption_scope', None) - - -class DataLakeStorageError(Model): - """DataLakeStorageError. - - :param data_lake_storage_error_details: The service error response object. - :type data_lake_storage_error_details: - ~azure.storage.blob.models.DataLakeStorageErrorError - """ - - _attribute_map = { - 'data_lake_storage_error_details': {'key': 'error', 'type': 'DataLakeStorageErrorError', 'xml': {'name': 'error'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(DataLakeStorageError, self).__init__(**kwargs) - self.data_lake_storage_error_details = kwargs.get('data_lake_storage_error_details', None) - - -class DataLakeStorageErrorException(HttpResponseError): - """Server responsed with exception of type: 'DataLakeStorageError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, response, deserialize, *args): - - model_name = 'DataLakeStorageError' - self.error = deserialize(model_name, response) - if self.error is None: - self.error = deserialize.dependencies[model_name]() - super(DataLakeStorageErrorException, self).__init__(response=response) - - -class DataLakeStorageErrorError(Model): - """The service error response object. - - :param code: The service error code. - :type code: str - :param message: The service error message. - :type message: str - """ - - _attribute_map = { - 'code': {'key': 'Code', 'type': 'str', 'xml': {'name': 'Code'}}, - 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(DataLakeStorageErrorError, self).__init__(**kwargs) - self.code = kwargs.get('code', None) - self.message = kwargs.get('message', None) - - -class DelimitedTextConfiguration(Model): - """delimited text configuration. - - All required parameters must be populated in order to send to Azure. - - :param column_separator: Required. column separator - :type column_separator: str - :param field_quote: Required. field quote - :type field_quote: str - :param record_separator: Required. record separator - :type record_separator: str - :param escape_char: Required. escape char - :type escape_char: str - :param headers_present: Required. has headers - :type headers_present: bool - """ - - _validation = { - 'column_separator': {'required': True}, - 'field_quote': {'required': True}, - 'record_separator': {'required': True}, - 'escape_char': {'required': True}, - 'headers_present': {'required': True}, - } - - _attribute_map = { - 'column_separator': {'key': 'ColumnSeparator', 'type': 'str', 'xml': {'name': 'ColumnSeparator'}}, - 'field_quote': {'key': 'FieldQuote', 'type': 'str', 'xml': {'name': 'FieldQuote'}}, - 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, - 'escape_char': {'key': 'EscapeChar', 'type': 'str', 'xml': {'name': 'EscapeChar'}}, - 'headers_present': {'key': 'HeadersPresent', 'type': 'bool', 'xml': {'name': 'HasHeaders'}}, - } - _xml_map = { - 'name': 'DelimitedTextConfiguration' - } - - def __init__(self, **kwargs): - super(DelimitedTextConfiguration, self).__init__(**kwargs) - self.column_separator = kwargs.get('column_separator', None) - self.field_quote = kwargs.get('field_quote', None) - self.record_separator = kwargs.get('record_separator', None) - self.escape_char = kwargs.get('escape_char', None) - self.headers_present = kwargs.get('headers_present', None) - - -class DirectoryHttpHeaders(Model): - """Additional parameters for a set of operations, such as: Directory_create, - Directory_rename, Blob_rename. - - :param cache_control: Cache control for given resource - :type cache_control: str - :param content_type: Content type for given resource - :type content_type: str - :param content_encoding: Content encoding for given resource - :type content_encoding: str - :param content_language: Content language for given resource - :type content_language: str - :param content_disposition: Content disposition for given resource - :type content_disposition: str - """ - - _attribute_map = { - 'cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'cache_control'}}, - 'content_type': {'key': '', 'type': 'str', 'xml': {'name': 'content_type'}}, - 'content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'content_encoding'}}, - 'content_language': {'key': '', 'type': 'str', 'xml': {'name': 'content_language'}}, - 'content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'content_disposition'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(DirectoryHttpHeaders, self).__init__(**kwargs) - self.cache_control = kwargs.get('cache_control', None) - self.content_type = kwargs.get('content_type', None) - self.content_encoding = kwargs.get('content_encoding', None) - self.content_language = kwargs.get('content_language', None) - self.content_disposition = kwargs.get('content_disposition', None) - - -class FilterBlobItem(Model): - """Blob info from a Filter Blobs API call. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param container_name: Required. - :type container_name: str - :param tag_value: Required. - :type tag_value: str - """ - - _validation = { - 'name': {'required': True}, - 'container_name': {'required': True}, - 'tag_value': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName'}}, - 'tag_value': {'key': 'TagValue', 'type': 'str', 'xml': {'name': 'TagValue'}}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__(self, **kwargs): - super(FilterBlobItem, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.container_name = kwargs.get('container_name', None) - self.tag_value = kwargs.get('tag_value', None) - - -class FilterBlobSegment(Model): - """The result of a Filter Blobs API call. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param where: Required. - :type where: str - :param blobs: Required. - :type blobs: list[~azure.storage.blob.models.FilterBlobItem] - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'where': {'required': True}, - 'blobs': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'where': {'key': 'Where', 'type': 'str', 'xml': {'name': 'Where'}}, - 'blobs': {'key': 'Blobs', 'type': '[FilterBlobItem]', 'xml': {'name': 'Blobs', 'itemsName': 'Blobs', 'wrapped': True}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, **kwargs): - super(FilterBlobSegment, self).__init__(**kwargs) - self.service_endpoint = kwargs.get('service_endpoint', None) - self.where = kwargs.get('where', None) - self.blobs = kwargs.get('blobs', None) - self.next_marker = kwargs.get('next_marker', None) - - -class GeoReplication(Model): - """Geo-Replication information for the Secondary Storage Service. - - All required parameters must be populated in order to send to Azure. - - :param status: Required. The status of the secondary location. Possible - values include: 'live', 'bootstrap', 'unavailable' - :type status: str or ~azure.storage.blob.models.GeoReplicationStatusType - :param last_sync_time: Required. A GMT date/time value, to the second. All - primary writes preceding this value are guaranteed to be available for - read operations at the secondary. Primary writes after this point in time - may or may not be available for reads. - :type last_sync_time: datetime - """ - - _validation = { - 'status': {'required': True}, - 'last_sync_time': {'required': True}, - } - - _attribute_map = { - 'status': {'key': 'Status', 'type': 'str', 'xml': {'name': 'Status'}}, - 'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123', 'xml': {'name': 'LastSyncTime'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(GeoReplication, self).__init__(**kwargs) - self.status = kwargs.get('status', None) - self.last_sync_time = kwargs.get('last_sync_time', None) - - -class JsonTextConfiguration(Model): - """json text configuration. - - All required parameters must be populated in order to send to Azure. - - :param record_separator: Required. record separator - :type record_separator: str - """ - - _validation = { - 'record_separator': {'required': True}, - } - - _attribute_map = { - 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, - } - _xml_map = { - 'name': 'JsonTextConfiguration' - } - - def __init__(self, **kwargs): - super(JsonTextConfiguration, self).__init__(**kwargs) - self.record_separator = kwargs.get('record_separator', None) - - -class KeyInfo(Model): - """Key information. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. The date-time the key is active in ISO 8601 UTC - time - :type start: str - :param expiry: Required. The date-time the key expires in ISO 8601 UTC - time - :type expiry: str - """ - - _validation = { - 'start': {'required': True}, - 'expiry': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, - 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(KeyInfo, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.expiry = kwargs.get('expiry', None) - - -class LeaseAccessConditions(Model): - """Additional parameters for a set of operations. - - :param lease_id: If specified, the operation only succeeds if the - resource's lease is active and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': '', 'type': 'str', 'xml': {'name': 'lease_id'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = kwargs.get('lease_id', None) - - -class ListBlobsFlatSegmentResponse(Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param segment: Required. - :type segment: ~azure.storage.blob.models.BlobFlatListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'segment': {'key': 'Segment', 'type': 'BlobFlatListSegment', 'xml': {'name': 'Segment'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, **kwargs): - super(ListBlobsFlatSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs.get('service_endpoint', None) - self.container_name = kwargs.get('container_name', None) - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.segment = kwargs.get('segment', None) - self.next_marker = kwargs.get('next_marker', None) - - -class ListBlobsHierarchySegmentResponse(Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param delimiter: - :type delimiter: str - :param segment: Required. - :type segment: ~azure.storage.blob.models.BlobHierarchyListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'delimiter': {'key': 'Delimiter', 'type': 'str', 'xml': {'name': 'Delimiter'}}, - 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment', 'xml': {'name': 'Segment'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, **kwargs): - super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs.get('service_endpoint', None) - self.container_name = kwargs.get('container_name', None) - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.delimiter = kwargs.get('delimiter', None) - self.segment = kwargs.get('segment', None) - self.next_marker = kwargs.get('next_marker', None) - - -class ListContainersSegmentResponse(Model): - """An enumeration of containers. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param container_items: Required. - :type container_items: list[~azure.storage.blob.models.ContainerItem] - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_items': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'container_items': {'key': 'ContainerItems', 'type': '[ContainerItem]', 'xml': {'name': 'Containers', 'itemsName': 'Containers', 'wrapped': True}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, **kwargs): - super(ListContainersSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs.get('service_endpoint', None) - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.container_items = kwargs.get('container_items', None) - self.next_marker = kwargs.get('next_marker', None) - - -class Logging(Model): - """Azure Analytics Logging settings. - - All required parameters must be populated in order to send to Azure. - - :param version: Required. The version of Storage Analytics to configure. - :type version: str - :param delete: Required. Indicates whether all delete requests should be - logged. - :type delete: bool - :param read: Required. Indicates whether all read requests should be - logged. - :type read: bool - :param write: Required. Indicates whether all write requests should be - logged. - :type write: bool - :param retention_policy: Required. - :type retention_policy: ~azure.storage.blob.models.RetentionPolicy - """ - - _validation = { - 'version': {'required': True}, - 'delete': {'required': True}, - 'read': {'required': True}, - 'write': {'required': True}, - 'retention_policy': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, - 'delete': {'key': 'Delete', 'type': 'bool', 'xml': {'name': 'Delete'}}, - 'read': {'key': 'Read', 'type': 'bool', 'xml': {'name': 'Read'}}, - 'write': {'key': 'Write', 'type': 'bool', 'xml': {'name': 'Write'}}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(Logging, self).__init__(**kwargs) - self.version = kwargs.get('version', None) - self.delete = kwargs.get('delete', None) - self.read = kwargs.get('read', None) - self.write = kwargs.get('write', None) - self.retention_policy = kwargs.get('retention_policy', None) - - -class Metrics(Model): - """a summary of request statistics grouped by API in hour or minute aggregates - for blobs. - - All required parameters must be populated in order to send to Azure. - - :param version: The version of Storage Analytics to configure. - :type version: str - :param enabled: Required. Indicates whether metrics are enabled for the - Blob service. - :type enabled: bool - :param include_apis: Indicates whether metrics should generate summary - statistics for called API operations. - :type include_apis: bool - :param retention_policy: - :type retention_policy: ~azure.storage.blob.models.RetentionPolicy - """ - - _validation = { - 'enabled': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(Metrics, self).__init__(**kwargs) - self.version = kwargs.get('version', None) - self.enabled = kwargs.get('enabled', None) - self.include_apis = kwargs.get('include_apis', None) - self.retention_policy = kwargs.get('retention_policy', None) - - -class ModifiedAccessConditions(Model): - """Additional parameters for a set of operations. - - :param if_modified_since: Specify this header value to operate only on a - blob if it has been modified since the specified date/time. - :type if_modified_since: datetime - :param if_unmodified_since: Specify this header value to operate only on a - blob if it has not been modified since the specified date/time. - :type if_unmodified_since: datetime - :param if_match: Specify an ETag value to operate only on blobs with a - matching value. - :type if_match: str - :param if_none_match: Specify an ETag value to operate only on blobs - without a matching value. - :type if_none_match: str - :param if_tags: Specify a SQL where clause on blob tags to operate only on - blobs with a matching value. - :type if_tags: str - """ - - _attribute_map = { - 'if_modified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'if_modified_since'}}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'if_unmodified_since'}}, - 'if_match': {'key': '', 'type': 'str', 'xml': {'name': 'if_match'}}, - 'if_none_match': {'key': '', 'type': 'str', 'xml': {'name': 'if_none_match'}}, - 'if_tags': {'key': '', 'type': 'str', 'xml': {'name': 'if_tags'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(ModifiedAccessConditions, self).__init__(**kwargs) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_tags = kwargs.get('if_tags', None) - - -class PageList(Model): - """the list of pages. - - :param page_range: - :type page_range: list[~azure.storage.blob.models.PageRange] - :param clear_range: - :type clear_range: list[~azure.storage.blob.models.ClearRange] - """ - - _attribute_map = { - 'page_range': {'key': 'PageRange', 'type': '[PageRange]', 'xml': {'name': 'PageRange', 'itemsName': 'PageRange'}}, - 'clear_range': {'key': 'ClearRange', 'type': '[ClearRange]', 'xml': {'name': 'ClearRange', 'itemsName': 'ClearRange'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(PageList, self).__init__(**kwargs) - self.page_range = kwargs.get('page_range', None) - self.clear_range = kwargs.get('clear_range', None) - - -class PageRange(Model): - """PageRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'PageRange' - } - - def __init__(self, **kwargs): - super(PageRange, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.end = kwargs.get('end', None) - - -class QueryFormat(Model): - """QueryFormat. - - :param type: Possible values include: 'delimited', 'json' - :type type: str or ~azure.storage.blob.models.QueryFormatType - :param delimited_text_configuration: - :type delimited_text_configuration: - ~azure.storage.blob.models.DelimitedTextConfiguration - :param json_text_configuration: - :type json_text_configuration: - ~azure.storage.blob.models.JsonTextConfiguration - """ - - _attribute_map = { - 'type': {'key': 'Type', 'type': 'QueryFormatType', 'xml': {'name': 'Type'}}, - 'delimited_text_configuration': {'key': 'DelimitedTextConfiguration', 'type': 'DelimitedTextConfiguration', 'xml': {'name': 'DelimitedTextConfiguration'}}, - 'json_text_configuration': {'key': 'JsonTextConfiguration', 'type': 'JsonTextConfiguration', 'xml': {'name': 'JsonTextConfiguration'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(QueryFormat, self).__init__(**kwargs) - self.type = kwargs.get('type', None) - self.delimited_text_configuration = kwargs.get('delimited_text_configuration', None) - self.json_text_configuration = kwargs.get('json_text_configuration', None) - - -class QueryRequest(Model): - """the quick query body. - - Variables are only populated by the server, and will be ignored when - sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar query_type: Required. the query type. Default value: "SQL" . - :vartype query_type: str - :param expression: Required. a query statement - :type expression: str - :param input_serialization: - :type input_serialization: ~azure.storage.blob.models.QuerySerialization - :param output_serialization: - :type output_serialization: ~azure.storage.blob.models.QuerySerialization - """ - - _validation = { - 'query_type': {'required': True, 'constant': True}, - 'expression': {'required': True}, - } - - _attribute_map = { - 'query_type': {'key': 'QueryType', 'type': 'str', 'xml': {'name': 'QueryType'}}, - 'expression': {'key': 'Expression', 'type': 'str', 'xml': {'name': 'Expression'}}, - 'input_serialization': {'key': 'InputSerialization', 'type': 'QuerySerialization', 'xml': {'name': 'InputSerialization'}}, - 'output_serialization': {'key': 'OutputSerialization', 'type': 'QuerySerialization', 'xml': {'name': 'OutputSerialization'}}, - } - _xml_map = { - 'name': 'QueryRequest' - } - - query_type = "SQL" - - def __init__(self, **kwargs): - super(QueryRequest, self).__init__(**kwargs) - self.expression = kwargs.get('expression', None) - self.input_serialization = kwargs.get('input_serialization', None) - self.output_serialization = kwargs.get('output_serialization', None) - - -class QuerySerialization(Model): - """QuerySerialization. - - All required parameters must be populated in order to send to Azure. - - :param format: Required. - :type format: ~azure.storage.blob.models.QueryFormat - """ - - _validation = { - 'format': {'required': True}, - } - - _attribute_map = { - 'format': {'key': 'Format', 'type': 'QueryFormat', 'xml': {'name': 'Format'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(QuerySerialization, self).__init__(**kwargs) - self.format = kwargs.get('format', None) - - -class RetentionPolicy(Model): - """the retention policy which determines how long the associated data should - persist. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether a retention policy is enabled - for the storage service - :type enabled: bool - :param days: Indicates the number of days that metrics or logging or - soft-deleted data should be retained. All data older than this value will - be deleted - :type days: int - """ - - _validation = { - 'enabled': {'required': True}, - 'days': {'minimum': 1}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(RetentionPolicy, self).__init__(**kwargs) - self.enabled = kwargs.get('enabled', None) - self.days = kwargs.get('days', None) - - -class SequenceNumberAccessConditions(Model): - """Additional parameters for a set of operations, such as: - PageBlob_upload_pages, PageBlob_clear_pages, - PageBlob_upload_pages_from_url. - - :param if_sequence_number_less_than_or_equal_to: Specify this header value - to operate only on a blob if it has a sequence number less than or equal - to the specified. - :type if_sequence_number_less_than_or_equal_to: long - :param if_sequence_number_less_than: Specify this header value to operate - only on a blob if it has a sequence number less than the specified. - :type if_sequence_number_less_than: long - :param if_sequence_number_equal_to: Specify this header value to operate - only on a blob if it has the specified sequence number. - :type if_sequence_number_equal_to: long - """ - - _attribute_map = { - 'if_sequence_number_less_than_or_equal_to': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_less_than_or_equal_to'}}, - 'if_sequence_number_less_than': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_less_than'}}, - 'if_sequence_number_equal_to': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_equal_to'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(SequenceNumberAccessConditions, self).__init__(**kwargs) - self.if_sequence_number_less_than_or_equal_to = kwargs.get('if_sequence_number_less_than_or_equal_to', None) - self.if_sequence_number_less_than = kwargs.get('if_sequence_number_less_than', None) - self.if_sequence_number_equal_to = kwargs.get('if_sequence_number_equal_to', None) - - -class SignedIdentifier(Model): - """signed identifier. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. a unique id - :type id: str - :param access_policy: - :type access_policy: ~azure.storage.blob.models.AccessPolicy - """ - - _validation = { - 'id': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}}, - 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}}, - } - _xml_map = { - 'name': 'SignedIdentifier' - } - - def __init__(self, **kwargs): - super(SignedIdentifier, self).__init__(**kwargs) - self.id = kwargs.get('id', None) - self.access_policy = kwargs.get('access_policy', None) - - -class SourceModifiedAccessConditions(Model): - """Additional parameters for a set of operations. - - :param source_if_modified_since: Specify this header value to operate only - on a blob if it has been modified since the specified date/time. - :type source_if_modified_since: datetime - :param source_if_unmodified_since: Specify this header value to operate - only on a blob if it has not been modified since the specified date/time. - :type source_if_unmodified_since: datetime - :param source_if_match: Specify an ETag value to operate only on blobs - with a matching value. - :type source_if_match: str - :param source_if_none_match: Specify an ETag value to operate only on - blobs without a matching value. - :type source_if_none_match: str - :param source_if_tags: Specify a SQL where clause on blob tags to operate - only on blobs with a matching value. - :type source_if_tags: str - """ - - _attribute_map = { - 'source_if_modified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'source_if_modified_since'}}, - 'source_if_unmodified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'source_if_unmodified_since'}}, - 'source_if_match': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_match'}}, - 'source_if_none_match': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_none_match'}}, - 'source_if_tags': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_tags'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_modified_since = kwargs.get('source_if_modified_since', None) - self.source_if_unmodified_since = kwargs.get('source_if_unmodified_since', None) - self.source_if_match = kwargs.get('source_if_match', None) - self.source_if_none_match = kwargs.get('source_if_none_match', None) - self.source_if_tags = kwargs.get('source_if_tags', None) - - -class StaticWebsite(Model): - """The properties that enable an account to host a static website. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether this account is hosting a - static website - :type enabled: bool - :param index_document: The default name of the index page under each - directory - :type index_document: str - :param error_document404_path: The absolute path of the custom 404 page - :type error_document404_path: str - :param default_index_document_path: Absolute path of the default index - page - :type default_index_document_path: str - """ - - _validation = { - 'enabled': {'required': True}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'index_document': {'key': 'IndexDocument', 'type': 'str', 'xml': {'name': 'IndexDocument'}}, - 'error_document404_path': {'key': 'ErrorDocument404Path', 'type': 'str', 'xml': {'name': 'ErrorDocument404Path'}}, - 'default_index_document_path': {'key': 'DefaultIndexDocumentPath', 'type': 'str', 'xml': {'name': 'DefaultIndexDocumentPath'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(StaticWebsite, self).__init__(**kwargs) - self.enabled = kwargs.get('enabled', None) - self.index_document = kwargs.get('index_document', None) - self.error_document404_path = kwargs.get('error_document404_path', None) - self.default_index_document_path = kwargs.get('default_index_document_path', None) - - -class StorageError(Model): - """StorageError. - - :param message: - :type message: str - """ - - _attribute_map = { - 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(StorageError, self).__init__(**kwargs) - self.message = kwargs.get('message', None) - - -class StorageErrorException(HttpResponseError): - """Server responsed with exception of type: 'StorageError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, response, deserialize, *args): - - model_name = 'StorageError' - self.error = deserialize(model_name, response) - if self.error is None: - self.error = deserialize.dependencies[model_name]() - super(StorageErrorException, self).__init__(response=response) - - -class StorageServiceProperties(Model): - """Storage Service Properties. - - :param logging: - :type logging: ~azure.storage.blob.models.Logging - :param hour_metrics: - :type hour_metrics: ~azure.storage.blob.models.Metrics - :param minute_metrics: - :type minute_metrics: ~azure.storage.blob.models.Metrics - :param cors: The set of CORS rules. - :type cors: list[~azure.storage.blob.models.CorsRule] - :param default_service_version: The default version to use for requests to - the Blob service if an incoming request's version is not specified. - Possible values include version 2008-10-27 and all more recent versions - :type default_service_version: str - :param delete_retention_policy: - :type delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy - :param static_website: - :type static_website: ~azure.storage.blob.models.StaticWebsite - """ - - _attribute_map = { - 'logging': {'key': 'Logging', 'type': 'Logging', 'xml': {'name': 'Logging'}}, - 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}}, - 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}}, - 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}}, - 'default_service_version': {'key': 'DefaultServiceVersion', 'type': 'str', 'xml': {'name': 'DefaultServiceVersion'}}, - 'delete_retention_policy': {'key': 'DeleteRetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'DeleteRetentionPolicy'}}, - 'static_website': {'key': 'StaticWebsite', 'type': 'StaticWebsite', 'xml': {'name': 'StaticWebsite'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(StorageServiceProperties, self).__init__(**kwargs) - self.logging = kwargs.get('logging', None) - self.hour_metrics = kwargs.get('hour_metrics', None) - self.minute_metrics = kwargs.get('minute_metrics', None) - self.cors = kwargs.get('cors', None) - self.default_service_version = kwargs.get('default_service_version', None) - self.delete_retention_policy = kwargs.get('delete_retention_policy', None) - self.static_website = kwargs.get('static_website', None) - - -class StorageServiceStats(Model): - """Stats for the storage service. - - :param geo_replication: - :type geo_replication: ~azure.storage.blob.models.GeoReplication - """ - - _attribute_map = { - 'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication', 'xml': {'name': 'GeoReplication'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(StorageServiceStats, self).__init__(**kwargs) - self.geo_replication = kwargs.get('geo_replication', None) - - -class UserDelegationKey(Model): - """A user delegation key. - - All required parameters must be populated in order to send to Azure. - - :param signed_oid: Required. The Azure Active Directory object ID in GUID - format. - :type signed_oid: str - :param signed_tid: Required. The Azure Active Directory tenant ID in GUID - format - :type signed_tid: str - :param signed_start: Required. The date-time the key is active - :type signed_start: datetime - :param signed_expiry: Required. The date-time the key expires - :type signed_expiry: datetime - :param signed_service: Required. Abbreviation of the Azure Storage service - that accepts the key - :type signed_service: str - :param signed_version: Required. The service version that created the key - :type signed_version: str - :param value: Required. The key as a base64 string - :type value: str - """ - - _validation = { - 'signed_oid': {'required': True}, - 'signed_tid': {'required': True}, - 'signed_start': {'required': True}, - 'signed_expiry': {'required': True}, - 'signed_service': {'required': True}, - 'signed_version': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'signed_oid': {'key': 'SignedOid', 'type': 'str', 'xml': {'name': 'SignedOid'}}, - 'signed_tid': {'key': 'SignedTid', 'type': 'str', 'xml': {'name': 'SignedTid'}}, - 'signed_start': {'key': 'SignedStart', 'type': 'iso-8601', 'xml': {'name': 'SignedStart'}}, - 'signed_expiry': {'key': 'SignedExpiry', 'type': 'iso-8601', 'xml': {'name': 'SignedExpiry'}}, - 'signed_service': {'key': 'SignedService', 'type': 'str', 'xml': {'name': 'SignedService'}}, - 'signed_version': {'key': 'SignedVersion', 'type': 'str', 'xml': {'name': 'SignedVersion'}}, - 'value': {'key': 'Value', 'type': 'str', 'xml': {'name': 'Value'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(UserDelegationKey, self).__init__(**kwargs) - self.signed_oid = kwargs.get('signed_oid', None) - self.signed_tid = kwargs.get('signed_tid', None) - self.signed_start = kwargs.get('signed_start', None) - self.signed_expiry = kwargs.get('signed_expiry', None) - self.signed_service = kwargs.get('signed_service', None) - self.signed_version = kwargs.get('signed_version', None) - self.value = kwargs.get('value', None) diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/models/_models_py3.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/models/_models_py3.py deleted file mode 100644 index 36c3964..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/models/_models_py3.py +++ /dev/null @@ -1,1939 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model -from azure.core.exceptions import HttpResponseError - - -class AccessPolicy(Model): - """An Access policy. - - :param start: the date-time the policy is active - :type start: str - :param expiry: the date-time the policy expires - :type expiry: str - :param permission: the permissions for the acl policy - :type permission: str - """ - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, - 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, - 'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}}, - } - _xml_map = { - } - - def __init__(self, *, start: str=None, expiry: str=None, permission: str=None, **kwargs) -> None: - super(AccessPolicy, self).__init__(**kwargs) - self.start = start - self.expiry = expiry - self.permission = permission - - -class AppendPositionAccessConditions(Model): - """Additional parameters for a set of operations, such as: - AppendBlob_append_block, AppendBlob_append_block_from_url, AppendBlob_seal. - - :param max_size: Optional conditional header. The max length in bytes - permitted for the append blob. If the Append Block operation would cause - the blob to exceed that limit or if the blob size is already greater than - the value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition - Failed). - :type max_size: long - :param append_position: Optional conditional header, used only for the - Append Block operation. A number indicating the byte offset to compare. - Append Block will succeed only if the append position is equal to this - number. If it is not, the request will fail with the - AppendPositionConditionNotMet error (HTTP status code 412 - Precondition - Failed). - :type append_position: long - """ - - _attribute_map = { - 'max_size': {'key': '', 'type': 'long', 'xml': {'name': 'max_size'}}, - 'append_position': {'key': '', 'type': 'long', 'xml': {'name': 'append_position'}}, - } - _xml_map = { - } - - def __init__(self, *, max_size: int=None, append_position: int=None, **kwargs) -> None: - super(AppendPositionAccessConditions, self).__init__(**kwargs) - self.max_size = max_size - self.append_position = append_position - - -class BlobFlatListSegment(Model): - """BlobFlatListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'BlobItems', 'itemsName': 'Blob'}}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__(self, *, blob_items, **kwargs) -> None: - super(BlobFlatListSegment, self).__init__(**kwargs) - self.blob_items = blob_items - - -class BlobHierarchyListSegment(Model): - """BlobHierarchyListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_prefixes: - :type blob_prefixes: list[~azure.storage.blob.models.BlobPrefix] - :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]', 'xml': {'name': 'BlobPrefix', 'itemsName': 'BlobPrefix'}}, - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__(self, *, blob_items, blob_prefixes=None, **kwargs) -> None: - super(BlobHierarchyListSegment, self).__init__(**kwargs) - self.blob_prefixes = blob_prefixes - self.blob_items = blob_items - - -class BlobHTTPHeaders(Model): - """Additional parameters for a set of operations. - - :param blob_cache_control: Optional. Sets the blob's cache control. If - specified, this property is stored with the blob and returned with a read - request. - :type blob_cache_control: str - :param blob_content_type: Optional. Sets the blob's content type. If - specified, this property is stored with the blob and returned with a read - request. - :type blob_content_type: str - :param blob_content_md5: Optional. An MD5 hash of the blob content. Note - that this hash is not validated, as the hashes for the individual blocks - were validated when each was uploaded. - :type blob_content_md5: bytearray - :param blob_content_encoding: Optional. Sets the blob's content encoding. - If specified, this property is stored with the blob and returned with a - read request. - :type blob_content_encoding: str - :param blob_content_language: Optional. Set the blob's content language. - If specified, this property is stored with the blob and returned with a - read request. - :type blob_content_language: str - :param blob_content_disposition: Optional. Sets the blob's - Content-Disposition header. - :type blob_content_disposition: str - """ - - _attribute_map = { - 'blob_cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'blob_cache_control'}}, - 'blob_content_type': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_type'}}, - 'blob_content_md5': {'key': '', 'type': 'bytearray', 'xml': {'name': 'blob_content_md5'}}, - 'blob_content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_encoding'}}, - 'blob_content_language': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_language'}}, - 'blob_content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_disposition'}}, - } - _xml_map = { - } - - def __init__(self, *, blob_cache_control: str=None, blob_content_type: str=None, blob_content_md5: bytearray=None, blob_content_encoding: str=None, blob_content_language: str=None, blob_content_disposition: str=None, **kwargs) -> None: - super(BlobHTTPHeaders, self).__init__(**kwargs) - self.blob_cache_control = blob_cache_control - self.blob_content_type = blob_content_type - self.blob_content_md5 = blob_content_md5 - self.blob_content_encoding = blob_content_encoding - self.blob_content_language = blob_content_language - self.blob_content_disposition = blob_content_disposition - - -class BlobItemInternal(Model): - """An Azure Storage blob. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param deleted: Required. - :type deleted: bool - :param snapshot: Required. - :type snapshot: str - :param version_id: - :type version_id: str - :param is_current_version: - :type is_current_version: bool - :param properties: Required. - :type properties: ~azure.storage.blob.models.BlobPropertiesInternal - :param metadata: - :type metadata: ~azure.storage.blob.models.BlobMetadata - :param blob_tags: - :type blob_tags: ~azure.storage.blob.models.BlobTags - :param object_replication_metadata: - :type object_replication_metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'deleted': {'required': True}, - 'snapshot': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}}, - 'snapshot': {'key': 'Snapshot', 'type': 'str', 'xml': {'name': 'Snapshot'}}, - 'version_id': {'key': 'VersionId', 'type': 'str', 'xml': {'name': 'VersionId'}}, - 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool', 'xml': {'name': 'IsCurrentVersion'}}, - 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal', 'xml': {'name': 'Properties'}}, - 'metadata': {'key': 'Metadata', 'type': 'BlobMetadata', 'xml': {'name': 'Metadata'}}, - 'blob_tags': {'key': 'BlobTags', 'type': 'BlobTags', 'xml': {'name': 'BlobTags'}}, - 'object_replication_metadata': {'key': 'OrMetadata', 'type': '{str}', 'xml': {'name': 'OrMetadata'}}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__(self, *, name: str, deleted: bool, snapshot: str, properties, version_id: str=None, is_current_version: bool=None, metadata=None, blob_tags=None, object_replication_metadata=None, **kwargs) -> None: - super(BlobItemInternal, self).__init__(**kwargs) - self.name = name - self.deleted = deleted - self.snapshot = snapshot - self.version_id = version_id - self.is_current_version = is_current_version - self.properties = properties - self.metadata = metadata - self.blob_tags = blob_tags - self.object_replication_metadata = object_replication_metadata - - -class BlobMetadata(Model): - """BlobMetadata. - - :param additional_properties: Unmatched properties from the message are - deserialized this collection - :type additional_properties: dict[str, str] - :param encrypted: - :type encrypted: str - """ - - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{str}', 'xml': {'name': 'additional_properties'}}, - 'encrypted': {'key': 'Encrypted', 'type': 'str', 'xml': {'name': 'Encrypted', 'attr': True}}, - } - _xml_map = { - 'name': 'Metadata' - } - - def __init__(self, *, additional_properties=None, encrypted: str=None, **kwargs) -> None: - super(BlobMetadata, self).__init__(**kwargs) - self.additional_properties = additional_properties - self.encrypted = encrypted - - -class BlobPrefix(Model): - """BlobPrefix. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - } - _xml_map = { - } - - def __init__(self, *, name: str, **kwargs) -> None: - super(BlobPrefix, self).__init__(**kwargs) - self.name = name - - -class BlobPropertiesInternal(Model): - """Properties of a blob. - - All required parameters must be populated in order to send to Azure. - - :param creation_time: - :type creation_time: datetime - :param last_modified: Required. - :type last_modified: datetime - :param etag: Required. - :type etag: str - :param content_length: Size in bytes - :type content_length: long - :param content_type: - :type content_type: str - :param content_encoding: - :type content_encoding: str - :param content_language: - :type content_language: str - :param content_md5: - :type content_md5: bytearray - :param content_disposition: - :type content_disposition: str - :param cache_control: - :type cache_control: str - :param blob_sequence_number: - :type blob_sequence_number: long - :param blob_type: Possible values include: 'BlockBlob', 'PageBlob', - 'AppendBlob' - :type blob_type: str or ~azure.storage.blob.models.BlobType - :param lease_status: Possible values include: 'locked', 'unlocked' - :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType - :param lease_state: Possible values include: 'available', 'leased', - 'expired', 'breaking', 'broken' - :type lease_state: str or ~azure.storage.blob.models.LeaseStateType - :param lease_duration: Possible values include: 'infinite', 'fixed' - :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType - :param copy_id: - :type copy_id: str - :param copy_status: Possible values include: 'pending', 'success', - 'aborted', 'failed' - :type copy_status: str or ~azure.storage.blob.models.CopyStatusType - :param copy_source: - :type copy_source: str - :param copy_progress: - :type copy_progress: str - :param copy_completion_time: - :type copy_completion_time: datetime - :param copy_status_description: - :type copy_status_description: str - :param server_encrypted: - :type server_encrypted: bool - :param incremental_copy: - :type incremental_copy: bool - :param destination_snapshot: - :type destination_snapshot: str - :param deleted_time: - :type deleted_time: datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param access_tier: Possible values include: 'P4', 'P6', 'P10', 'P15', - 'P20', 'P30', 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type access_tier: str or ~azure.storage.blob.models.AccessTier - :param access_tier_inferred: - :type access_tier_inferred: bool - :param archive_status: Possible values include: - 'rehydrate-pending-to-hot', 'rehydrate-pending-to-cool' - :type archive_status: str or ~azure.storage.blob.models.ArchiveStatus - :param customer_provided_key_sha256: - :type customer_provided_key_sha256: str - :param encryption_scope: The name of the encryption scope under which the - blob is encrypted. - :type encryption_scope: str - :param access_tier_change_time: - :type access_tier_change_time: datetime - :param tag_count: - :type tag_count: int - :param expires_on: - :type expires_on: datetime - :param is_sealed: - :type is_sealed: bool - :param rehydrate_priority: Possible values include: 'High', 'Standard' - :type rehydrate_priority: str or - ~azure.storage.blob.models.RehydratePriority - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123', 'xml': {'name': 'Creation-Time'}}, - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}}, - 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}}, - 'content_length': {'key': 'Content-Length', 'type': 'long', 'xml': {'name': 'Content-Length'}}, - 'content_type': {'key': 'Content-Type', 'type': 'str', 'xml': {'name': 'Content-Type'}}, - 'content_encoding': {'key': 'Content-Encoding', 'type': 'str', 'xml': {'name': 'Content-Encoding'}}, - 'content_language': {'key': 'Content-Language', 'type': 'str', 'xml': {'name': 'Content-Language'}}, - 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray', 'xml': {'name': 'Content-MD5'}}, - 'content_disposition': {'key': 'Content-Disposition', 'type': 'str', 'xml': {'name': 'Content-Disposition'}}, - 'cache_control': {'key': 'Cache-Control', 'type': 'str', 'xml': {'name': 'Cache-Control'}}, - 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long', 'xml': {'name': 'x-ms-blob-sequence-number'}}, - 'blob_type': {'key': 'BlobType', 'type': 'BlobType', 'xml': {'name': 'BlobType'}}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}}, - 'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}}, - 'copy_id': {'key': 'CopyId', 'type': 'str', 'xml': {'name': 'CopyId'}}, - 'copy_status': {'key': 'CopyStatus', 'type': 'CopyStatusType', 'xml': {'name': 'CopyStatus'}}, - 'copy_source': {'key': 'CopySource', 'type': 'str', 'xml': {'name': 'CopySource'}}, - 'copy_progress': {'key': 'CopyProgress', 'type': 'str', 'xml': {'name': 'CopyProgress'}}, - 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123', 'xml': {'name': 'CopyCompletionTime'}}, - 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str', 'xml': {'name': 'CopyStatusDescription'}}, - 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool', 'xml': {'name': 'ServerEncrypted'}}, - 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool', 'xml': {'name': 'IncrementalCopy'}}, - 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str', 'xml': {'name': 'DestinationSnapshot'}}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}}, - 'access_tier': {'key': 'AccessTier', 'type': 'str', 'xml': {'name': 'AccessTier'}}, - 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool', 'xml': {'name': 'AccessTierInferred'}}, - 'archive_status': {'key': 'ArchiveStatus', 'type': 'str', 'xml': {'name': 'ArchiveStatus'}}, - 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str', 'xml': {'name': 'CustomerProvidedKeySha256'}}, - 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str', 'xml': {'name': 'EncryptionScope'}}, - 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123', 'xml': {'name': 'AccessTierChangeTime'}}, - 'tag_count': {'key': 'TagCount', 'type': 'int', 'xml': {'name': 'TagCount'}}, - 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123', 'xml': {'name': 'Expiry-Time'}}, - 'is_sealed': {'key': 'Sealed', 'type': 'bool', 'xml': {'name': 'Sealed'}}, - 'rehydrate_priority': {'key': 'RehydratePriority', 'type': 'str', 'xml': {'name': 'RehydratePriority'}}, - } - _xml_map = { - 'name': 'Properties' - } - - def __init__(self, *, last_modified, etag: str, creation_time=None, content_length: int=None, content_type: str=None, content_encoding: str=None, content_language: str=None, content_md5: bytearray=None, content_disposition: str=None, cache_control: str=None, blob_sequence_number: int=None, blob_type=None, lease_status=None, lease_state=None, lease_duration=None, copy_id: str=None, copy_status=None, copy_source: str=None, copy_progress: str=None, copy_completion_time=None, copy_status_description: str=None, server_encrypted: bool=None, incremental_copy: bool=None, destination_snapshot: str=None, deleted_time=None, remaining_retention_days: int=None, access_tier=None, access_tier_inferred: bool=None, archive_status=None, customer_provided_key_sha256: str=None, encryption_scope: str=None, access_tier_change_time=None, tag_count: int=None, expires_on=None, is_sealed: bool=None, rehydrate_priority=None, **kwargs) -> None: - super(BlobPropertiesInternal, self).__init__(**kwargs) - self.creation_time = creation_time - self.last_modified = last_modified - self.etag = etag - self.content_length = content_length - self.content_type = content_type - self.content_encoding = content_encoding - self.content_language = content_language - self.content_md5 = content_md5 - self.content_disposition = content_disposition - self.cache_control = cache_control - self.blob_sequence_number = blob_sequence_number - self.blob_type = blob_type - self.lease_status = lease_status - self.lease_state = lease_state - self.lease_duration = lease_duration - self.copy_id = copy_id - self.copy_status = copy_status - self.copy_source = copy_source - self.copy_progress = copy_progress - self.copy_completion_time = copy_completion_time - self.copy_status_description = copy_status_description - self.server_encrypted = server_encrypted - self.incremental_copy = incremental_copy - self.destination_snapshot = destination_snapshot - self.deleted_time = deleted_time - self.remaining_retention_days = remaining_retention_days - self.access_tier = access_tier - self.access_tier_inferred = access_tier_inferred - self.archive_status = archive_status - self.customer_provided_key_sha256 = customer_provided_key_sha256 - self.encryption_scope = encryption_scope - self.access_tier_change_time = access_tier_change_time - self.tag_count = tag_count - self.expires_on = expires_on - self.is_sealed = is_sealed - self.rehydrate_priority = rehydrate_priority - - -class BlobTag(Model): - """BlobTag. - - All required parameters must be populated in order to send to Azure. - - :param key: Required. - :type key: str - :param value: Required. - :type value: str - """ - - _validation = { - 'key': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'key': {'key': 'Key', 'type': 'str', 'xml': {'name': 'Key'}}, - 'value': {'key': 'Value', 'type': 'str', 'xml': {'name': 'Value'}}, - } - _xml_map = { - 'name': 'Tag' - } - - def __init__(self, *, key: str, value: str, **kwargs) -> None: - super(BlobTag, self).__init__(**kwargs) - self.key = key - self.value = value - - -class BlobTags(Model): - """Blob tags. - - All required parameters must be populated in order to send to Azure. - - :param blob_tag_set: Required. - :type blob_tag_set: list[~azure.storage.blob.models.BlobTag] - """ - - _validation = { - 'blob_tag_set': {'required': True}, - } - - _attribute_map = { - 'blob_tag_set': {'key': 'BlobTagSet', 'type': '[BlobTag]', 'xml': {'name': 'TagSet', 'itemsName': 'TagSet', 'wrapped': True}}, - } - _xml_map = { - 'name': 'Tags' - } - - def __init__(self, *, blob_tag_set, **kwargs) -> None: - super(BlobTags, self).__init__(**kwargs) - self.blob_tag_set = blob_tag_set - - -class Block(Model): - """Represents a single block in a block blob. It describes the block's ID and - size. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The base64 encoded block ID. - :type name: str - :param size: Required. The block size in bytes. - :type size: int - """ - - _validation = { - 'name': {'required': True}, - 'size': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'size': {'key': 'Size', 'type': 'int', 'xml': {'name': 'Size'}}, - } - _xml_map = { - } - - def __init__(self, *, name: str, size: int, **kwargs) -> None: - super(Block, self).__init__(**kwargs) - self.name = name - self.size = size - - -class BlockList(Model): - """BlockList. - - :param committed_blocks: - :type committed_blocks: list[~azure.storage.blob.models.Block] - :param uncommitted_blocks: - :type uncommitted_blocks: list[~azure.storage.blob.models.Block] - """ - - _attribute_map = { - 'committed_blocks': {'key': 'CommittedBlocks', 'type': '[Block]', 'xml': {'name': 'CommittedBlocks', 'itemsName': 'Block', 'wrapped': True}}, - 'uncommitted_blocks': {'key': 'UncommittedBlocks', 'type': '[Block]', 'xml': {'name': 'UncommittedBlocks', 'itemsName': 'Block', 'wrapped': True}}, - } - _xml_map = { - } - - def __init__(self, *, committed_blocks=None, uncommitted_blocks=None, **kwargs) -> None: - super(BlockList, self).__init__(**kwargs) - self.committed_blocks = committed_blocks - self.uncommitted_blocks = uncommitted_blocks - - -class BlockLookupList(Model): - """BlockLookupList. - - :param committed: - :type committed: list[str] - :param uncommitted: - :type uncommitted: list[str] - :param latest: - :type latest: list[str] - """ - - _attribute_map = { - 'committed': {'key': 'Committed', 'type': '[str]', 'xml': {'name': 'Committed', 'itemsName': 'Committed'}}, - 'uncommitted': {'key': 'Uncommitted', 'type': '[str]', 'xml': {'name': 'Uncommitted', 'itemsName': 'Uncommitted'}}, - 'latest': {'key': 'Latest', 'type': '[str]', 'xml': {'name': 'Latest', 'itemsName': 'Latest'}}, - } - _xml_map = { - 'name': 'BlockList' - } - - def __init__(self, *, committed=None, uncommitted=None, latest=None, **kwargs) -> None: - super(BlockLookupList, self).__init__(**kwargs) - self.committed = committed - self.uncommitted = uncommitted - self.latest = latest - - -class ClearRange(Model): - """ClearRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'ClearRange' - } - - def __init__(self, *, start: int, end: int, **kwargs) -> None: - super(ClearRange, self).__init__(**kwargs) - self.start = start - self.end = end - - -class ContainerCpkScopeInfo(Model): - """Additional parameters for create operation. - - :param default_encryption_scope: Optional. Version 2019-07-07 and later. - Specifies the default encryption scope to set on the container and use for - all future writes. - :type default_encryption_scope: str - :param prevent_encryption_scope_override: Optional. Version 2019-07-07 - and newer. If true, prevents any request from specifying a different - encryption scope than the scope set on the container. - :type prevent_encryption_scope_override: bool - """ - - _attribute_map = { - 'default_encryption_scope': {'key': '', 'type': 'str', 'xml': {'name': 'default_encryption_scope'}}, - 'prevent_encryption_scope_override': {'key': '', 'type': 'bool', 'xml': {'name': 'prevent_encryption_scope_override'}}, - } - _xml_map = { - } - - def __init__(self, *, default_encryption_scope: str=None, prevent_encryption_scope_override: bool=None, **kwargs) -> None: - super(ContainerCpkScopeInfo, self).__init__(**kwargs) - self.default_encryption_scope = default_encryption_scope - self.prevent_encryption_scope_override = prevent_encryption_scope_override - - -class ContainerItem(Model): - """An Azure Storage container. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param deleted: - :type deleted: bool - :param version: - :type version: str - :param properties: Required. - :type properties: ~azure.storage.blob.models.ContainerProperties - :param metadata: - :type metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}}, - 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, - 'properties': {'key': 'Properties', 'type': 'ContainerProperties', 'xml': {'name': 'Properties'}}, - 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}}, - } - _xml_map = { - 'name': 'Container' - } - - def __init__(self, *, name: str, properties, deleted: bool=None, version: str=None, metadata=None, **kwargs) -> None: - super(ContainerItem, self).__init__(**kwargs) - self.name = name - self.deleted = deleted - self.version = version - self.properties = properties - self.metadata = metadata - - -class ContainerProperties(Model): - """Properties of a container. - - All required parameters must be populated in order to send to Azure. - - :param last_modified: Required. - :type last_modified: datetime - :param etag: Required. - :type etag: str - :param lease_status: Possible values include: 'locked', 'unlocked' - :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType - :param lease_state: Possible values include: 'available', 'leased', - 'expired', 'breaking', 'broken' - :type lease_state: str or ~azure.storage.blob.models.LeaseStateType - :param lease_duration: Possible values include: 'infinite', 'fixed' - :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType - :param public_access: Possible values include: 'container', 'blob' - :type public_access: str or ~azure.storage.blob.models.PublicAccessType - :param has_immutability_policy: - :type has_immutability_policy: bool - :param has_legal_hold: - :type has_legal_hold: bool - :param default_encryption_scope: - :type default_encryption_scope: str - :param prevent_encryption_scope_override: - :type prevent_encryption_scope_override: bool - :param deleted_time: - :type deleted_time: datetime - :param remaining_retention_days: - :type remaining_retention_days: int - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}}, - 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}}, - 'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}}, - 'public_access': {'key': 'PublicAccess', 'type': 'str', 'xml': {'name': 'PublicAccess'}}, - 'has_immutability_policy': {'key': 'HasImmutabilityPolicy', 'type': 'bool', 'xml': {'name': 'HasImmutabilityPolicy'}}, - 'has_legal_hold': {'key': 'HasLegalHold', 'type': 'bool', 'xml': {'name': 'HasLegalHold'}}, - 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str', 'xml': {'name': 'DefaultEncryptionScope'}}, - 'prevent_encryption_scope_override': {'key': 'DenyEncryptionScopeOverride', 'type': 'bool', 'xml': {'name': 'DenyEncryptionScopeOverride'}}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}}, - } - _xml_map = { - } - - def __init__(self, *, last_modified, etag: str, lease_status=None, lease_state=None, lease_duration=None, public_access=None, has_immutability_policy: bool=None, has_legal_hold: bool=None, default_encryption_scope: str=None, prevent_encryption_scope_override: bool=None, deleted_time=None, remaining_retention_days: int=None, **kwargs) -> None: - super(ContainerProperties, self).__init__(**kwargs) - self.last_modified = last_modified - self.etag = etag - self.lease_status = lease_status - self.lease_state = lease_state - self.lease_duration = lease_duration - self.public_access = public_access - self.has_immutability_policy = has_immutability_policy - self.has_legal_hold = has_legal_hold - self.default_encryption_scope = default_encryption_scope - self.prevent_encryption_scope_override = prevent_encryption_scope_override - self.deleted_time = deleted_time - self.remaining_retention_days = remaining_retention_days - - -class CorsRule(Model): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param allowed_origins: Required. The origin domains that are permitted to - make a request against the storage service via CORS. The origin domain is - the domain from which the request originates. Note that the origin must be - an exact case-sensitive match with the origin that the user age sends to - the service. You can also use the wildcard character '*' to allow all - origin domains to make requests via CORS. - :type allowed_origins: str - :param allowed_methods: Required. The methods (HTTP request verbs) that - the origin domain may use for a CORS request. (comma separated) - :type allowed_methods: str - :param allowed_headers: Required. the request headers that the origin - domain may specify on the CORS request. - :type allowed_headers: str - :param exposed_headers: Required. The response headers that may be sent in - the response to the CORS request and exposed by the browser to the request - issuer - :type exposed_headers: str - :param max_age_in_seconds: Required. The maximum amount time that a - browser should cache the preflight OPTIONS request. - :type max_age_in_seconds: int - """ - - _validation = { - 'allowed_origins': {'required': True}, - 'allowed_methods': {'required': True}, - 'allowed_headers': {'required': True}, - 'exposed_headers': {'required': True}, - 'max_age_in_seconds': {'required': True, 'minimum': 0}, - } - - _attribute_map = { - 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}}, - 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}}, - 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}}, - 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}}, - 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}}, - } - _xml_map = { - } - - def __init__(self, *, allowed_origins: str, allowed_methods: str, allowed_headers: str, exposed_headers: str, max_age_in_seconds: int, **kwargs) -> None: - super(CorsRule, self).__init__(**kwargs) - self.allowed_origins = allowed_origins - self.allowed_methods = allowed_methods - self.allowed_headers = allowed_headers - self.exposed_headers = exposed_headers - self.max_age_in_seconds = max_age_in_seconds - - -class CpkInfo(Model): - """Additional parameters for a set of operations. - - :param encryption_key: Optional. Specifies the encryption key to use to - encrypt the data provided in the request. If not specified, encryption is - performed with the root account encryption key. For more information, see - Encryption at Rest for Azure Storage Services. - :type encryption_key: str - :param encryption_key_sha256: The SHA-256 hash of the provided encryption - key. Must be provided if the x-ms-encryption-key header is provided. - :type encryption_key_sha256: str - :param encryption_algorithm: The algorithm used to produce the encryption - key hash. Currently, the only accepted value is "AES256". Must be provided - if the x-ms-encryption-key header is provided. Possible values include: - 'AES256' - :type encryption_algorithm: str or - ~azure.storage.blob.models.EncryptionAlgorithmType - """ - - _attribute_map = { - 'encryption_key': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_key'}}, - 'encryption_key_sha256': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_key_sha256'}}, - 'encryption_algorithm': {'key': '', 'type': 'EncryptionAlgorithmType', 'xml': {'name': 'encryption_algorithm'}}, - } - _xml_map = { - } - - def __init__(self, *, encryption_key: str=None, encryption_key_sha256: str=None, encryption_algorithm=None, **kwargs) -> None: - super(CpkInfo, self).__init__(**kwargs) - self.encryption_key = encryption_key - self.encryption_key_sha256 = encryption_key_sha256 - self.encryption_algorithm = encryption_algorithm - - -class CpkScopeInfo(Model): - """Additional parameters for a set of operations. - - :param encryption_scope: Optional. Version 2019-07-07 and later. - Specifies the name of the encryption scope to use to encrypt the data - provided in the request. If not specified, encryption is performed with - the default account encryption scope. For more information, see - Encryption at Rest for Azure Storage Services. - :type encryption_scope: str - """ - - _attribute_map = { - 'encryption_scope': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_scope'}}, - } - _xml_map = { - } - - def __init__(self, *, encryption_scope: str=None, **kwargs) -> None: - super(CpkScopeInfo, self).__init__(**kwargs) - self.encryption_scope = encryption_scope - - -class DataLakeStorageError(Model): - """DataLakeStorageError. - - :param data_lake_storage_error_details: The service error response object. - :type data_lake_storage_error_details: - ~azure.storage.blob.models.DataLakeStorageErrorError - """ - - _attribute_map = { - 'data_lake_storage_error_details': {'key': 'error', 'type': 'DataLakeStorageErrorError', 'xml': {'name': 'error'}}, - } - _xml_map = { - } - - def __init__(self, *, data_lake_storage_error_details=None, **kwargs) -> None: - super(DataLakeStorageError, self).__init__(**kwargs) - self.data_lake_storage_error_details = data_lake_storage_error_details - - -class DataLakeStorageErrorException(HttpResponseError): - """Server responsed with exception of type: 'DataLakeStorageError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, response, deserialize, *args): - - model_name = 'DataLakeStorageError' - self.error = deserialize(model_name, response) - if self.error is None: - self.error = deserialize.dependencies[model_name]() - super(DataLakeStorageErrorException, self).__init__(response=response) - - -class DataLakeStorageErrorError(Model): - """The service error response object. - - :param code: The service error code. - :type code: str - :param message: The service error message. - :type message: str - """ - - _attribute_map = { - 'code': {'key': 'Code', 'type': 'str', 'xml': {'name': 'Code'}}, - 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, - } - _xml_map = { - } - - def __init__(self, *, code: str=None, message: str=None, **kwargs) -> None: - super(DataLakeStorageErrorError, self).__init__(**kwargs) - self.code = code - self.message = message - - -class DelimitedTextConfiguration(Model): - """delimited text configuration. - - All required parameters must be populated in order to send to Azure. - - :param column_separator: Required. column separator - :type column_separator: str - :param field_quote: Required. field quote - :type field_quote: str - :param record_separator: Required. record separator - :type record_separator: str - :param escape_char: Required. escape char - :type escape_char: str - :param headers_present: Required. has headers - :type headers_present: bool - """ - - _validation = { - 'column_separator': {'required': True}, - 'field_quote': {'required': True}, - 'record_separator': {'required': True}, - 'escape_char': {'required': True}, - 'headers_present': {'required': True}, - } - - _attribute_map = { - 'column_separator': {'key': 'ColumnSeparator', 'type': 'str', 'xml': {'name': 'ColumnSeparator'}}, - 'field_quote': {'key': 'FieldQuote', 'type': 'str', 'xml': {'name': 'FieldQuote'}}, - 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, - 'escape_char': {'key': 'EscapeChar', 'type': 'str', 'xml': {'name': 'EscapeChar'}}, - 'headers_present': {'key': 'HeadersPresent', 'type': 'bool', 'xml': {'name': 'HasHeaders'}}, - } - _xml_map = { - 'name': 'DelimitedTextConfiguration' - } - - def __init__(self, *, column_separator: str, field_quote: str, record_separator: str, escape_char: str, headers_present: bool, **kwargs) -> None: - super(DelimitedTextConfiguration, self).__init__(**kwargs) - self.column_separator = column_separator - self.field_quote = field_quote - self.record_separator = record_separator - self.escape_char = escape_char - self.headers_present = headers_present - - -class DirectoryHttpHeaders(Model): - """Additional parameters for a set of operations, such as: Directory_create, - Directory_rename, Blob_rename. - - :param cache_control: Cache control for given resource - :type cache_control: str - :param content_type: Content type for given resource - :type content_type: str - :param content_encoding: Content encoding for given resource - :type content_encoding: str - :param content_language: Content language for given resource - :type content_language: str - :param content_disposition: Content disposition for given resource - :type content_disposition: str - """ - - _attribute_map = { - 'cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'cache_control'}}, - 'content_type': {'key': '', 'type': 'str', 'xml': {'name': 'content_type'}}, - 'content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'content_encoding'}}, - 'content_language': {'key': '', 'type': 'str', 'xml': {'name': 'content_language'}}, - 'content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'content_disposition'}}, - } - _xml_map = { - } - - def __init__(self, *, cache_control: str=None, content_type: str=None, content_encoding: str=None, content_language: str=None, content_disposition: str=None, **kwargs) -> None: - super(DirectoryHttpHeaders, self).__init__(**kwargs) - self.cache_control = cache_control - self.content_type = content_type - self.content_encoding = content_encoding - self.content_language = content_language - self.content_disposition = content_disposition - - -class FilterBlobItem(Model): - """Blob info from a Filter Blobs API call. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param container_name: Required. - :type container_name: str - :param tag_value: Required. - :type tag_value: str - """ - - _validation = { - 'name': {'required': True}, - 'container_name': {'required': True}, - 'tag_value': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName'}}, - 'tag_value': {'key': 'TagValue', 'type': 'str', 'xml': {'name': 'TagValue'}}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__(self, *, name: str, container_name: str, tag_value: str, **kwargs) -> None: - super(FilterBlobItem, self).__init__(**kwargs) - self.name = name - self.container_name = container_name - self.tag_value = tag_value - - -class FilterBlobSegment(Model): - """The result of a Filter Blobs API call. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param where: Required. - :type where: str - :param blobs: Required. - :type blobs: list[~azure.storage.blob.models.FilterBlobItem] - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'where': {'required': True}, - 'blobs': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'where': {'key': 'Where', 'type': 'str', 'xml': {'name': 'Where'}}, - 'blobs': {'key': 'Blobs', 'type': '[FilterBlobItem]', 'xml': {'name': 'Blobs', 'itemsName': 'Blobs', 'wrapped': True}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, *, service_endpoint: str, where: str, blobs, next_marker: str=None, **kwargs) -> None: - super(FilterBlobSegment, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.where = where - self.blobs = blobs - self.next_marker = next_marker - - -class GeoReplication(Model): - """Geo-Replication information for the Secondary Storage Service. - - All required parameters must be populated in order to send to Azure. - - :param status: Required. The status of the secondary location. Possible - values include: 'live', 'bootstrap', 'unavailable' - :type status: str or ~azure.storage.blob.models.GeoReplicationStatusType - :param last_sync_time: Required. A GMT date/time value, to the second. All - primary writes preceding this value are guaranteed to be available for - read operations at the secondary. Primary writes after this point in time - may or may not be available for reads. - :type last_sync_time: datetime - """ - - _validation = { - 'status': {'required': True}, - 'last_sync_time': {'required': True}, - } - - _attribute_map = { - 'status': {'key': 'Status', 'type': 'str', 'xml': {'name': 'Status'}}, - 'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123', 'xml': {'name': 'LastSyncTime'}}, - } - _xml_map = { - } - - def __init__(self, *, status, last_sync_time, **kwargs) -> None: - super(GeoReplication, self).__init__(**kwargs) - self.status = status - self.last_sync_time = last_sync_time - - -class JsonTextConfiguration(Model): - """json text configuration. - - All required parameters must be populated in order to send to Azure. - - :param record_separator: Required. record separator - :type record_separator: str - """ - - _validation = { - 'record_separator': {'required': True}, - } - - _attribute_map = { - 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, - } - _xml_map = { - 'name': 'JsonTextConfiguration' - } - - def __init__(self, *, record_separator: str, **kwargs) -> None: - super(JsonTextConfiguration, self).__init__(**kwargs) - self.record_separator = record_separator - - -class KeyInfo(Model): - """Key information. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. The date-time the key is active in ISO 8601 UTC - time - :type start: str - :param expiry: Required. The date-time the key expires in ISO 8601 UTC - time - :type expiry: str - """ - - _validation = { - 'start': {'required': True}, - 'expiry': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, - 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, - } - _xml_map = { - } - - def __init__(self, *, start: str, expiry: str, **kwargs) -> None: - super(KeyInfo, self).__init__(**kwargs) - self.start = start - self.expiry = expiry - - -class LeaseAccessConditions(Model): - """Additional parameters for a set of operations. - - :param lease_id: If specified, the operation only succeeds if the - resource's lease is active and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': '', 'type': 'str', 'xml': {'name': 'lease_id'}}, - } - _xml_map = { - } - - def __init__(self, *, lease_id: str=None, **kwargs) -> None: - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = lease_id - - -class ListBlobsFlatSegmentResponse(Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param segment: Required. - :type segment: ~azure.storage.blob.models.BlobFlatListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'segment': {'key': 'Segment', 'type': 'BlobFlatListSegment', 'xml': {'name': 'Segment'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, *, service_endpoint: str, container_name: str, segment, prefix: str=None, marker: str=None, max_results: int=None, next_marker: str=None, **kwargs) -> None: - super(ListBlobsFlatSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.container_name = container_name - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.segment = segment - self.next_marker = next_marker - - -class ListBlobsHierarchySegmentResponse(Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param delimiter: - :type delimiter: str - :param segment: Required. - :type segment: ~azure.storage.blob.models.BlobHierarchyListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'delimiter': {'key': 'Delimiter', 'type': 'str', 'xml': {'name': 'Delimiter'}}, - 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment', 'xml': {'name': 'Segment'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, *, service_endpoint: str, container_name: str, segment, prefix: str=None, marker: str=None, max_results: int=None, delimiter: str=None, next_marker: str=None, **kwargs) -> None: - super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.container_name = container_name - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.delimiter = delimiter - self.segment = segment - self.next_marker = next_marker - - -class ListContainersSegmentResponse(Model): - """An enumeration of containers. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param container_items: Required. - :type container_items: list[~azure.storage.blob.models.ContainerItem] - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_items': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'container_items': {'key': 'ContainerItems', 'type': '[ContainerItem]', 'xml': {'name': 'Containers', 'itemsName': 'Containers', 'wrapped': True}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, *, service_endpoint: str, container_items, prefix: str=None, marker: str=None, max_results: int=None, next_marker: str=None, **kwargs) -> None: - super(ListContainersSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.container_items = container_items - self.next_marker = next_marker - - -class Logging(Model): - """Azure Analytics Logging settings. - - All required parameters must be populated in order to send to Azure. - - :param version: Required. The version of Storage Analytics to configure. - :type version: str - :param delete: Required. Indicates whether all delete requests should be - logged. - :type delete: bool - :param read: Required. Indicates whether all read requests should be - logged. - :type read: bool - :param write: Required. Indicates whether all write requests should be - logged. - :type write: bool - :param retention_policy: Required. - :type retention_policy: ~azure.storage.blob.models.RetentionPolicy - """ - - _validation = { - 'version': {'required': True}, - 'delete': {'required': True}, - 'read': {'required': True}, - 'write': {'required': True}, - 'retention_policy': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, - 'delete': {'key': 'Delete', 'type': 'bool', 'xml': {'name': 'Delete'}}, - 'read': {'key': 'Read', 'type': 'bool', 'xml': {'name': 'Read'}}, - 'write': {'key': 'Write', 'type': 'bool', 'xml': {'name': 'Write'}}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, - } - _xml_map = { - } - - def __init__(self, *, version: str, delete: bool, read: bool, write: bool, retention_policy, **kwargs) -> None: - super(Logging, self).__init__(**kwargs) - self.version = version - self.delete = delete - self.read = read - self.write = write - self.retention_policy = retention_policy - - -class Metrics(Model): - """a summary of request statistics grouped by API in hour or minute aggregates - for blobs. - - All required parameters must be populated in order to send to Azure. - - :param version: The version of Storage Analytics to configure. - :type version: str - :param enabled: Required. Indicates whether metrics are enabled for the - Blob service. - :type enabled: bool - :param include_apis: Indicates whether metrics should generate summary - statistics for called API operations. - :type include_apis: bool - :param retention_policy: - :type retention_policy: ~azure.storage.blob.models.RetentionPolicy - """ - - _validation = { - 'enabled': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, - } - _xml_map = { - } - - def __init__(self, *, enabled: bool, version: str=None, include_apis: bool=None, retention_policy=None, **kwargs) -> None: - super(Metrics, self).__init__(**kwargs) - self.version = version - self.enabled = enabled - self.include_apis = include_apis - self.retention_policy = retention_policy - - -class ModifiedAccessConditions(Model): - """Additional parameters for a set of operations. - - :param if_modified_since: Specify this header value to operate only on a - blob if it has been modified since the specified date/time. - :type if_modified_since: datetime - :param if_unmodified_since: Specify this header value to operate only on a - blob if it has not been modified since the specified date/time. - :type if_unmodified_since: datetime - :param if_match: Specify an ETag value to operate only on blobs with a - matching value. - :type if_match: str - :param if_none_match: Specify an ETag value to operate only on blobs - without a matching value. - :type if_none_match: str - :param if_tags: Specify a SQL where clause on blob tags to operate only on - blobs with a matching value. - :type if_tags: str - """ - - _attribute_map = { - 'if_modified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'if_modified_since'}}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'if_unmodified_since'}}, - 'if_match': {'key': '', 'type': 'str', 'xml': {'name': 'if_match'}}, - 'if_none_match': {'key': '', 'type': 'str', 'xml': {'name': 'if_none_match'}}, - 'if_tags': {'key': '', 'type': 'str', 'xml': {'name': 'if_tags'}}, - } - _xml_map = { - } - - def __init__(self, *, if_modified_since=None, if_unmodified_since=None, if_match: str=None, if_none_match: str=None, if_tags: str=None, **kwargs) -> None: - super(ModifiedAccessConditions, self).__init__(**kwargs) - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - self.if_match = if_match - self.if_none_match = if_none_match - self.if_tags = if_tags - - -class PageList(Model): - """the list of pages. - - :param page_range: - :type page_range: list[~azure.storage.blob.models.PageRange] - :param clear_range: - :type clear_range: list[~azure.storage.blob.models.ClearRange] - """ - - _attribute_map = { - 'page_range': {'key': 'PageRange', 'type': '[PageRange]', 'xml': {'name': 'PageRange', 'itemsName': 'PageRange'}}, - 'clear_range': {'key': 'ClearRange', 'type': '[ClearRange]', 'xml': {'name': 'ClearRange', 'itemsName': 'ClearRange'}}, - } - _xml_map = { - } - - def __init__(self, *, page_range=None, clear_range=None, **kwargs) -> None: - super(PageList, self).__init__(**kwargs) - self.page_range = page_range - self.clear_range = clear_range - - -class PageRange(Model): - """PageRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'PageRange' - } - - def __init__(self, *, start: int, end: int, **kwargs) -> None: - super(PageRange, self).__init__(**kwargs) - self.start = start - self.end = end - - -class QueryFormat(Model): - """QueryFormat. - - :param type: Possible values include: 'delimited', 'json' - :type type: str or ~azure.storage.blob.models.QueryFormatType - :param delimited_text_configuration: - :type delimited_text_configuration: - ~azure.storage.blob.models.DelimitedTextConfiguration - :param json_text_configuration: - :type json_text_configuration: - ~azure.storage.blob.models.JsonTextConfiguration - """ - - _attribute_map = { - 'type': {'key': 'Type', 'type': 'QueryFormatType', 'xml': {'name': 'Type'}}, - 'delimited_text_configuration': {'key': 'DelimitedTextConfiguration', 'type': 'DelimitedTextConfiguration', 'xml': {'name': 'DelimitedTextConfiguration'}}, - 'json_text_configuration': {'key': 'JsonTextConfiguration', 'type': 'JsonTextConfiguration', 'xml': {'name': 'JsonTextConfiguration'}}, - } - _xml_map = { - } - - def __init__(self, *, type=None, delimited_text_configuration=None, json_text_configuration=None, **kwargs) -> None: - super(QueryFormat, self).__init__(**kwargs) - self.type = type - self.delimited_text_configuration = delimited_text_configuration - self.json_text_configuration = json_text_configuration - - -class QueryRequest(Model): - """the quick query body. - - Variables are only populated by the server, and will be ignored when - sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar query_type: Required. the query type. Default value: "SQL" . - :vartype query_type: str - :param expression: Required. a query statement - :type expression: str - :param input_serialization: - :type input_serialization: ~azure.storage.blob.models.QuerySerialization - :param output_serialization: - :type output_serialization: ~azure.storage.blob.models.QuerySerialization - """ - - _validation = { - 'query_type': {'required': True, 'constant': True}, - 'expression': {'required': True}, - } - - _attribute_map = { - 'query_type': {'key': 'QueryType', 'type': 'str', 'xml': {'name': 'QueryType'}}, - 'expression': {'key': 'Expression', 'type': 'str', 'xml': {'name': 'Expression'}}, - 'input_serialization': {'key': 'InputSerialization', 'type': 'QuerySerialization', 'xml': {'name': 'InputSerialization'}}, - 'output_serialization': {'key': 'OutputSerialization', 'type': 'QuerySerialization', 'xml': {'name': 'OutputSerialization'}}, - } - _xml_map = { - 'name': 'QueryRequest' - } - - query_type = "SQL" - - def __init__(self, *, expression: str, input_serialization=None, output_serialization=None, **kwargs) -> None: - super(QueryRequest, self).__init__(**kwargs) - self.expression = expression - self.input_serialization = input_serialization - self.output_serialization = output_serialization - - -class QuerySerialization(Model): - """QuerySerialization. - - All required parameters must be populated in order to send to Azure. - - :param format: Required. - :type format: ~azure.storage.blob.models.QueryFormat - """ - - _validation = { - 'format': {'required': True}, - } - - _attribute_map = { - 'format': {'key': 'Format', 'type': 'QueryFormat', 'xml': {'name': 'Format'}}, - } - _xml_map = { - } - - def __init__(self, *, format, **kwargs) -> None: - super(QuerySerialization, self).__init__(**kwargs) - self.format = format - - -class RetentionPolicy(Model): - """the retention policy which determines how long the associated data should - persist. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether a retention policy is enabled - for the storage service - :type enabled: bool - :param days: Indicates the number of days that metrics or logging or - soft-deleted data should be retained. All data older than this value will - be deleted - :type days: int - """ - - _validation = { - 'enabled': {'required': True}, - 'days': {'minimum': 1}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}}, - } - _xml_map = { - } - - def __init__(self, *, enabled: bool, days: int=None, **kwargs) -> None: - super(RetentionPolicy, self).__init__(**kwargs) - self.enabled = enabled - self.days = days - - -class SequenceNumberAccessConditions(Model): - """Additional parameters for a set of operations, such as: - PageBlob_upload_pages, PageBlob_clear_pages, - PageBlob_upload_pages_from_url. - - :param if_sequence_number_less_than_or_equal_to: Specify this header value - to operate only on a blob if it has a sequence number less than or equal - to the specified. - :type if_sequence_number_less_than_or_equal_to: long - :param if_sequence_number_less_than: Specify this header value to operate - only on a blob if it has a sequence number less than the specified. - :type if_sequence_number_less_than: long - :param if_sequence_number_equal_to: Specify this header value to operate - only on a blob if it has the specified sequence number. - :type if_sequence_number_equal_to: long - """ - - _attribute_map = { - 'if_sequence_number_less_than_or_equal_to': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_less_than_or_equal_to'}}, - 'if_sequence_number_less_than': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_less_than'}}, - 'if_sequence_number_equal_to': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_equal_to'}}, - } - _xml_map = { - } - - def __init__(self, *, if_sequence_number_less_than_or_equal_to: int=None, if_sequence_number_less_than: int=None, if_sequence_number_equal_to: int=None, **kwargs) -> None: - super(SequenceNumberAccessConditions, self).__init__(**kwargs) - self.if_sequence_number_less_than_or_equal_to = if_sequence_number_less_than_or_equal_to - self.if_sequence_number_less_than = if_sequence_number_less_than - self.if_sequence_number_equal_to = if_sequence_number_equal_to - - -class SignedIdentifier(Model): - """signed identifier. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. a unique id - :type id: str - :param access_policy: - :type access_policy: ~azure.storage.blob.models.AccessPolicy - """ - - _validation = { - 'id': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}}, - 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}}, - } - _xml_map = { - 'name': 'SignedIdentifier' - } - - def __init__(self, *, id: str, access_policy=None, **kwargs) -> None: - super(SignedIdentifier, self).__init__(**kwargs) - self.id = id - self.access_policy = access_policy - - -class SourceModifiedAccessConditions(Model): - """Additional parameters for a set of operations. - - :param source_if_modified_since: Specify this header value to operate only - on a blob if it has been modified since the specified date/time. - :type source_if_modified_since: datetime - :param source_if_unmodified_since: Specify this header value to operate - only on a blob if it has not been modified since the specified date/time. - :type source_if_unmodified_since: datetime - :param source_if_match: Specify an ETag value to operate only on blobs - with a matching value. - :type source_if_match: str - :param source_if_none_match: Specify an ETag value to operate only on - blobs without a matching value. - :type source_if_none_match: str - :param source_if_tags: Specify a SQL where clause on blob tags to operate - only on blobs with a matching value. - :type source_if_tags: str - """ - - _attribute_map = { - 'source_if_modified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'source_if_modified_since'}}, - 'source_if_unmodified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'source_if_unmodified_since'}}, - 'source_if_match': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_match'}}, - 'source_if_none_match': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_none_match'}}, - 'source_if_tags': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_tags'}}, - } - _xml_map = { - } - - def __init__(self, *, source_if_modified_since=None, source_if_unmodified_since=None, source_if_match: str=None, source_if_none_match: str=None, source_if_tags: str=None, **kwargs) -> None: - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_modified_since = source_if_modified_since - self.source_if_unmodified_since = source_if_unmodified_since - self.source_if_match = source_if_match - self.source_if_none_match = source_if_none_match - self.source_if_tags = source_if_tags - - -class StaticWebsite(Model): - """The properties that enable an account to host a static website. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether this account is hosting a - static website - :type enabled: bool - :param index_document: The default name of the index page under each - directory - :type index_document: str - :param error_document404_path: The absolute path of the custom 404 page - :type error_document404_path: str - :param default_index_document_path: Absolute path of the default index - page - :type default_index_document_path: str - """ - - _validation = { - 'enabled': {'required': True}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'index_document': {'key': 'IndexDocument', 'type': 'str', 'xml': {'name': 'IndexDocument'}}, - 'error_document404_path': {'key': 'ErrorDocument404Path', 'type': 'str', 'xml': {'name': 'ErrorDocument404Path'}}, - 'default_index_document_path': {'key': 'DefaultIndexDocumentPath', 'type': 'str', 'xml': {'name': 'DefaultIndexDocumentPath'}}, - } - _xml_map = { - } - - def __init__(self, *, enabled: bool, index_document: str=None, error_document404_path: str=None, default_index_document_path: str=None, **kwargs) -> None: - super(StaticWebsite, self).__init__(**kwargs) - self.enabled = enabled - self.index_document = index_document - self.error_document404_path = error_document404_path - self.default_index_document_path = default_index_document_path - - -class StorageError(Model): - """StorageError. - - :param message: - :type message: str - """ - - _attribute_map = { - 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, - } - _xml_map = { - } - - def __init__(self, *, message: str=None, **kwargs) -> None: - super(StorageError, self).__init__(**kwargs) - self.message = message - - -class StorageErrorException(HttpResponseError): - """Server responsed with exception of type: 'StorageError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, response, deserialize, *args): - - model_name = 'StorageError' - self.error = deserialize(model_name, response) - if self.error is None: - self.error = deserialize.dependencies[model_name]() - super(StorageErrorException, self).__init__(response=response) - - -class StorageServiceProperties(Model): - """Storage Service Properties. - - :param logging: - :type logging: ~azure.storage.blob.models.Logging - :param hour_metrics: - :type hour_metrics: ~azure.storage.blob.models.Metrics - :param minute_metrics: - :type minute_metrics: ~azure.storage.blob.models.Metrics - :param cors: The set of CORS rules. - :type cors: list[~azure.storage.blob.models.CorsRule] - :param default_service_version: The default version to use for requests to - the Blob service if an incoming request's version is not specified. - Possible values include version 2008-10-27 and all more recent versions - :type default_service_version: str - :param delete_retention_policy: - :type delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy - :param static_website: - :type static_website: ~azure.storage.blob.models.StaticWebsite - """ - - _attribute_map = { - 'logging': {'key': 'Logging', 'type': 'Logging', 'xml': {'name': 'Logging'}}, - 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}}, - 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}}, - 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}}, - 'default_service_version': {'key': 'DefaultServiceVersion', 'type': 'str', 'xml': {'name': 'DefaultServiceVersion'}}, - 'delete_retention_policy': {'key': 'DeleteRetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'DeleteRetentionPolicy'}}, - 'static_website': {'key': 'StaticWebsite', 'type': 'StaticWebsite', 'xml': {'name': 'StaticWebsite'}}, - } - _xml_map = { - } - - def __init__(self, *, logging=None, hour_metrics=None, minute_metrics=None, cors=None, default_service_version: str=None, delete_retention_policy=None, static_website=None, **kwargs) -> None: - super(StorageServiceProperties, self).__init__(**kwargs) - self.logging = logging - self.hour_metrics = hour_metrics - self.minute_metrics = minute_metrics - self.cors = cors - self.default_service_version = default_service_version - self.delete_retention_policy = delete_retention_policy - self.static_website = static_website - - -class StorageServiceStats(Model): - """Stats for the storage service. - - :param geo_replication: - :type geo_replication: ~azure.storage.blob.models.GeoReplication - """ - - _attribute_map = { - 'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication', 'xml': {'name': 'GeoReplication'}}, - } - _xml_map = { - } - - def __init__(self, *, geo_replication=None, **kwargs) -> None: - super(StorageServiceStats, self).__init__(**kwargs) - self.geo_replication = geo_replication - - -class UserDelegationKey(Model): - """A user delegation key. - - All required parameters must be populated in order to send to Azure. - - :param signed_oid: Required. The Azure Active Directory object ID in GUID - format. - :type signed_oid: str - :param signed_tid: Required. The Azure Active Directory tenant ID in GUID - format - :type signed_tid: str - :param signed_start: Required. The date-time the key is active - :type signed_start: datetime - :param signed_expiry: Required. The date-time the key expires - :type signed_expiry: datetime - :param signed_service: Required. Abbreviation of the Azure Storage service - that accepts the key - :type signed_service: str - :param signed_version: Required. The service version that created the key - :type signed_version: str - :param value: Required. The key as a base64 string - :type value: str - """ - - _validation = { - 'signed_oid': {'required': True}, - 'signed_tid': {'required': True}, - 'signed_start': {'required': True}, - 'signed_expiry': {'required': True}, - 'signed_service': {'required': True}, - 'signed_version': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'signed_oid': {'key': 'SignedOid', 'type': 'str', 'xml': {'name': 'SignedOid'}}, - 'signed_tid': {'key': 'SignedTid', 'type': 'str', 'xml': {'name': 'SignedTid'}}, - 'signed_start': {'key': 'SignedStart', 'type': 'iso-8601', 'xml': {'name': 'SignedStart'}}, - 'signed_expiry': {'key': 'SignedExpiry', 'type': 'iso-8601', 'xml': {'name': 'SignedExpiry'}}, - 'signed_service': {'key': 'SignedService', 'type': 'str', 'xml': {'name': 'SignedService'}}, - 'signed_version': {'key': 'SignedVersion', 'type': 'str', 'xml': {'name': 'SignedVersion'}}, - 'value': {'key': 'Value', 'type': 'str', 'xml': {'name': 'Value'}}, - } - _xml_map = { - } - - def __init__(self, *, signed_oid: str, signed_tid: str, signed_start, signed_expiry, signed_service: str, signed_version: str, value: str, **kwargs) -> None: - super(UserDelegationKey, self).__init__(**kwargs) - self.signed_oid = signed_oid - self.signed_tid = signed_tid - self.signed_start = signed_start - self.signed_expiry = signed_expiry - self.signed_service = signed_service - self.signed_version = signed_version - self.value = value diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/__init__.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/__init__.py deleted file mode 100644 index 1ea0453..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._container_operations import ContainerOperations -from ._directory_operations import DirectoryOperations -from ._blob_operations import BlobOperations -from ._page_blob_operations import PageBlobOperations -from ._append_blob_operations import AppendBlobOperations -from ._block_blob_operations import BlockBlobOperations - -__all__ = [ - 'ServiceOperations', - 'ContainerOperations', - 'DirectoryOperations', - 'BlobOperations', - 'PageBlobOperations', - 'AppendBlobOperations', - 'BlockBlobOperations', -] diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_append_blob_operations.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_append_blob_operations.py deleted file mode 100644 index 000810a..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_append_blob_operations.py +++ /dev/null @@ -1,694 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class AppendBlobOperations(object): - """AppendBlobOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "AppendBlob". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_blob_type = "AppendBlob" - - def create(self, content_length, timeout=None, metadata=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): - """The Create Append Blob operation creates a new append blob. - - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_tags_string: Optional. Used to set blob tags in various - blob operations. - :type blob_tags_string: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{containerName}/{blob}'} - - def append_block(self, body, content_length, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, request_id=None, lease_access_conditions=None, append_position_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): - """The Append Block operation commits a new block of data to the end of an - existing append blob. The Append Block operation is permitted only if - the blob was created with x-ms-blob-type set to AppendBlob. Append - Block is supported only on version 2015-02-21 version or later. - - :param body: Initial data - :type body: Generator - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param append_position_access_conditions: Additional parameters for - the operation - :type append_position_access_conditions: - ~azure.storage.blob.models.AppendPositionAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - max_size = None - if append_position_access_conditions is not None: - max_size = append_position_access_conditions.max_size - append_position = None - if append_position_access_conditions is not None: - append_position = append_position_access_conditions.append_position - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "appendblock" - - # Construct URL - url = self.append_block.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if max_size is not None: - header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", max_size, 'long') - if append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-append-offset': self._deserialize('str', response.headers.get('x-ms-blob-append-offset')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - append_block.metadata = {'url': '/{containerName}/{blob}'} - - def append_block_from_url(self, source_url, content_length, source_range=None, source_content_md5=None, source_contentcrc64=None, timeout=None, transactional_content_md5=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, append_position_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs): - """The Append Block operation commits a new block of data to the end of an - existing append blob where the contents are read from a source url. The - Append Block operation is permitted only if the blob was created with - x-ms-blob-type set to AppendBlob. Append Block is supported only on - version 2015-02-21 version or later. - - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param content_length: The length of the request. - :type content_length: long - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_md5: Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range - of bytes that must be read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param append_position_access_conditions: Additional parameters for - the operation - :type append_position_access_conditions: - ~azure.storage.blob.models.AppendPositionAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - max_size = None - if append_position_access_conditions is not None: - max_size = append_position_access_conditions.max_size - append_position = None - if append_position_access_conditions is not None: - append_position = append_position_access_conditions.append_position - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - comp = "appendblock" - - # Construct URL - url = self.append_block_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if max_size is not None: - header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", max_size, 'long') - if append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-append-offset': self._deserialize('str', response.headers.get('x-ms-blob-append-offset')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - append_block_from_url.metadata = {'url': '/{containerName}/{blob}'} - - def seal(self, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, append_position_access_conditions=None, cls=None, **kwargs): - """The Seal operation seals the Append Blob to make it read-only. Seal is - supported only on version 2019-12-12 version or later. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param append_position_access_conditions: Additional parameters for - the operation - :type append_position_access_conditions: - ~azure.storage.blob.models.AppendPositionAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - append_position = None - if append_position_access_conditions is not None: - append_position = append_position_access_conditions.append_position - - comp = "seal" - - # Construct URL - url = self.seal.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - seal.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_blob_operations.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_blob_operations.py deleted file mode 100644 index 9478016..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_blob_operations.py +++ /dev/null @@ -1,3062 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class BlobOperations(object): - """BlobOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_requires_sync: . Constant value: "true". - :ivar x_ms_copy_action: . Constant value: "abort". - :ivar restype: . Constant value: "account". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_requires_sync = "true" - self.x_ms_copy_action = "abort" - self.restype = "account" - - def download(self, snapshot=None, version_id=None, timeout=None, range=None, range_get_content_md5=None, range_get_content_crc64=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, cls=None, **kwargs): - """The Download operation reads or downloads a blob from the system, - including its metadata and properties. You can also call Download to - read a snapshot. - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to operate - on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param range_get_content_md5: When set to true and specified together - with the Range, the service returns the MD5 hash for the range, as - long as the range is less than or equal to 4 MB in size. - :type range_get_content_md5: bool - :param range_get_content_crc64: When set to true and specified - together with the Range, the service returns the CRC64 hash for the - range, as long as the range is less than or equal to 4 MB in size. - :type range_get_content_crc64: bool - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: object or the result of cls(response) - :rtype: Generator - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct URL - url = self.download.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') - if range_get_content_crc64 is not None: - header_parameters['x-ms-range-get-content-crc64'] = self._serialize.header("range_get_content_crc64", range_get_content_crc64, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')), - 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), - 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')), - 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - if response.status_code == 206: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')), - 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), - 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')), - 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - download.metadata = {'url': '/{containerName}/{blob}'} - - def get_properties(self, snapshot=None, version_id=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, cls=None, **kwargs): - """The Get Properties operation returns all user-defined metadata, - standard HTTP properties, and system properties for the blob. It does - not return the content of the blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to operate - on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-creation-time': self._deserialize('rfc-1123', response.headers.get('x-ms-creation-time')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')), - 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')), - 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-incremental-copy': self._deserialize('bool', response.headers.get('x-ms-incremental-copy')), - 'x-ms-copy-destination-snapshot': self._deserialize('str', response.headers.get('x-ms-copy-destination-snapshot')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-access-tier': self._deserialize('str', response.headers.get('x-ms-access-tier')), - 'x-ms-access-tier-inferred': self._deserialize('bool', response.headers.get('x-ms-access-tier-inferred')), - 'x-ms-archive-status': self._deserialize('str', response.headers.get('x-ms-archive-status')), - 'x-ms-access-tier-change-time': self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'x-ms-is-current-version': self._deserialize('bool', response.headers.get('x-ms-is-current-version')), - 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')), - 'x-ms-expiry-time': self._deserialize('rfc-1123', response.headers.get('x-ms-expiry-time')), - 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), - 'x-ms-rehydrate-priority': self._deserialize('str', response.headers.get('x-ms-rehydrate-priority')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{containerName}/{blob}'} - - def delete(self, snapshot=None, version_id=None, timeout=None, delete_snapshots=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """If the storage account's soft delete feature is disabled then, when a - blob is deleted, it is permanently removed from the storage account. If - the storage account's soft delete feature is enabled, then, when a blob - is deleted, it is marked for deletion and becomes inaccessible - immediately. However, the blob service retains the blob or snapshot for - the number of days specified by the DeleteRetentionPolicy section of - [Storage service properties] (Set-Blob-Service-Properties.md). After - the specified number of days has passed, the blob's data is permanently - removed from the storage account. Note that you continue to be charged - for the soft-deleted blob's storage until it is permanently removed. - Use the List Blobs API and specify the "include=deleted" query - parameter to discover which blobs and snapshots have been soft deleted. - You can then use the Undelete Blob API to restore a soft-deleted blob. - All other operations on a soft-deleted blob or snapshot causes the - service to return an HTTP status code of 404 (ResourceNotFound). - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to operate - on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param delete_snapshots: Required if the blob has associated - snapshots. Specify one of the following two options: include: Delete - the base blob and all of its snapshots. only: Delete only the blob's - snapshots and not the blob itself. Possible values include: 'include', - 'only' - :type delete_snapshots: str or - ~azure.storage.blob.models.DeleteSnapshotsOptionType - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{containerName}/{blob}'} - - def set_access_control(self, timeout=None, owner=None, group=None, posix_permissions=None, posix_acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """Set the owner, group, permissions, or access control list for a blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_acl: Sets POSIX access control rights on files and - directories. The value is a comma-separated list of access control - entries. Each access control entry (ACE) consists of a scope, a type, - a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type posix_acl: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "setAccessControl" - - # Construct URL - url = self.set_access_control.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - } - return cls(response, None, response_headers) - set_access_control.metadata = {'url': '/{filesystem}/{path}'} - - def get_access_control(self, timeout=None, upn=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """Get the owner, group, permissions, or access control list for a blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param upn: Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the identity values returned in - the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. - :type upn: bool - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "getAccessControl" - - # Construct URL - url = self.get_access_control.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')), - 'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')), - 'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')), - 'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - } - return cls(response, None, response_headers) - get_access_control.metadata = {'url': '/{filesystem}/{path}'} - - def rename(self, rename_source, timeout=None, path_rename_mode=None, directory_properties=None, posix_permissions=None, posix_umask=None, source_lease_id=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs): - """Rename a blob/file. By default, the destination is overwritten and if - the destination already exists and has a lease the lease is broken. - This operation supports conditional HTTP requests. For more - information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - To fail if the destination already exists, use a conditional request - with If-None-Match: "*". - - :param rename_source: The file or directory to be renamed. The value - must have the following format: "/{filesysystem}/{path}". If - "x-ms-properties" is specified, the properties will overwrite the - existing properties; otherwise, the existing properties will be - preserved. - :type rename_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param path_rename_mode: Determines the behavior of the rename - operation. Possible values include: 'legacy', 'posix' - :type path_rename_mode: str or - ~azure.storage.blob.models.PathRenameMode - :param directory_properties: Optional. User-defined properties to be - stored with the file or directory, in the format of a comma-separated - list of name and value pairs "n1=v1, n2=v2, ...", where each value is - base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled - for the account. This umask restricts permission settings for file and - directory, and will only be applied when default Acl does not exist in - parent directory. If the umask bit has set, it means that the - corresponding permission will be disabled. Otherwise the corresponding - permission will be determined by the permission. A 4-digit octal - notation (e.g. 0022) is supported here. If no umask was specified, a - default umask - 0027 will be used. - :type posix_umask: str - :param source_lease_id: A lease ID for the source path. If specified, - the source path must have an active lease and the lease ID must match. - :type source_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param directory_http_headers: Additional parameters for the operation - :type directory_http_headers: - ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - cache_control = None - if directory_http_headers is not None: - cache_control = directory_http_headers.cache_control - content_type = None - if directory_http_headers is not None: - content_type = directory_http_headers.content_type - content_encoding = None - if directory_http_headers is not None: - content_encoding = directory_http_headers.content_encoding - content_language = None - if directory_http_headers is not None: - content_language = directory_http_headers.content_language - content_disposition = None - if directory_http_headers is not None: - content_disposition = directory_http_headers.content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - # Construct URL - url = self.rename.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if path_rename_mode is not None: - query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'PathRenameMode') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') - if content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') - if content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') - if content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') - if content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - } - return cls(response, None, response_headers) - rename.metadata = {'url': '/{filesystem}/{path}'} - - def undelete(self, timeout=None, request_id=None, cls=None, **kwargs): - """Undelete a blob that was previously soft deleted. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "undelete" - - # Construct URL - url = self.undelete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - undelete.metadata = {'url': '/{containerName}/{blob}'} - - def set_expiry(self, expiry_options, timeout=None, request_id=None, expires_on=None, cls=None, **kwargs): - """Sets the time a blob will expire and be deleted. - - :param expiry_options: Required. Indicates mode of the expiry time. - Possible values include: 'NeverExpire', 'RelativeToCreation', - 'RelativeToNow', 'Absolute' - :type expiry_options: str or - ~azure.storage.blob.models.BlobExpiryOptions - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param expires_on: The time to set the blob to expiry - :type expires_on: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "expiry" - - # Construct URL - url = self.set_expiry.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') - if expires_on is not None: - header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_expiry.metadata = {'url': '/{containerName}/{blob}'} - - def set_http_headers(self, timeout=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """The Set HTTP Headers operation sets system properties on the blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "properties" - - # Construct URL - url = self.set_http_headers.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_http_headers.metadata = {'url': '/{containerName}/{blob}'} - - def set_metadata(self, timeout=None, metadata=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): - """The Set Blob Metadata operation sets user-defined metadata for the - specified blob as one or more name-value pairs. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{containerName}/{blob}'} - - def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or - negative one (-1) for a lease that never expires. A non-infinite lease - can be between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The Blob service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "lease" - action = "acquire" - - # Construct URL - url = self.acquire_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - acquire_lease.metadata = {'url': '/{containerName}/{blob}'} - - def release_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "lease" - action = "release" - - # Construct URL - url = self.release_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - release_lease.metadata = {'url': '/{containerName}/{blob}'} - - def renew_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "lease" - action = "renew" - - # Construct URL - url = self.renew_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - renew_lease.metadata = {'url': '/{containerName}/{blob}'} - - def change_lease(self, lease_id, proposed_lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The Blob service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "lease" - action = "change" - - # Construct URL - url = self.change_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - change_lease.metadata = {'url': '/{containerName}/{blob}'} - - def break_lease(self, timeout=None, break_period=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param break_period: For a break operation, proposed duration the - lease should continue before it is broken, in seconds, between 0 and - 60. This break period is only used if it is shorter than the time - remaining on the lease. If longer, the time remaining on the lease is - used. A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break period. - If this header does not appear with a break operation, a - fixed-duration lease breaks after the remaining lease period elapses, - and an infinite lease breaks immediately. - :type break_period: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "lease" - action = "break" - - # Construct URL - url = self.break_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - break_lease.metadata = {'url': '/{containerName}/{blob}'} - - def create_snapshot(self, timeout=None, metadata=None, request_id=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs): - """The Create Snapshot operation creates a read-only snapshot of a blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "snapshot" - - # Construct URL - url = self.create_snapshot.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-snapshot': self._deserialize('str', response.headers.get('x-ms-snapshot')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create_snapshot.metadata = {'url': '/{containerName}/{blob}'} - - def start_copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, rehydrate_priority=None, request_id=None, blob_tags_string=None, seal_blob=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs): - """The Start Copy From URL operation copies a blob or an internet resource - to a new blob. - - :param copy_source: Specifies the name of the source page blob - snapshot. This value is a URL of up to 2 KB in length that specifies a - page blob snapshot. The value should be URL-encoded as it would appear - in a request URI. The source blob must either be public or must be - authenticated via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param rehydrate_priority: Optional: Indicates the priority with which - to rehydrate an archived blob. Possible values include: 'High', - 'Standard' - :type rehydrate_priority: str or - ~azure.storage.blob.models.RehydratePriority - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_tags_string: Optional. Used to set blob tags in various - blob operations. - :type blob_tags_string: str - :param seal_blob: Overrides the sealed state of the destination blob. - Service version 2019-12-12 and newer. - :type seal_blob: bool - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - source_if_tags = None - if source_modified_access_conditions is not None: - source_if_tags = source_modified_access_conditions.source_if_tags - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.start_copy_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if seal_blob is not None: - header_parameters['x-ms-seal-blob'] = self._serialize.header("seal_blob", seal_blob, 'bool') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - if source_if_tags is not None: - header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", source_if_tags, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} - - def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, request_id=None, source_content_md5=None, blob_tags_string=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs): - """The Copy From URL operation copies a blob or an internet resource to a - new blob. It will not return a response until the copy is complete. - - :param copy_source: Specifies the name of the source page blob - snapshot. This value is a URL of up to 2 KB in length that specifies a - page blob snapshot. The value should be URL-encoded as it would appear - in a request URI. The source blob must either be public or must be - authenticated via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param source_content_md5: Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :type source_content_md5: bytearray - :param blob_tags_string: Optional. Used to set blob tags in various - blob operations. - :type blob_tags_string: str - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.copy_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['x-ms-requires-sync'] = self._serialize.header("self.x_ms_requires_sync", self.x_ms_requires_sync, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-status': self._deserialize(models.SyncCopyStatusType, response.headers.get('x-ms-copy-status')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - copy_from_url.metadata = {'url': '/{containerName}/{blob}'} - - def abort_copy_from_url(self, copy_id, timeout=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs): - """The Abort Copy From URL operation aborts a pending Copy From URL - operation, and leaves a destination blob with zero length and full - metadata. - - :param copy_id: The copy identifier provided in the x-ms-copy-id - header of the original Copy Blob operation. - :type copy_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "copy" - - # Construct URL - url = self.abort_copy_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-copy-action'] = self._serialize.header("self.x_ms_copy_action", self.x_ms_copy_action, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - abort_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} - - def set_tier(self, tier, snapshot=None, version_id=None, timeout=None, rehydrate_priority=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """The Set Tier operation sets the tier on a blob. The operation is - allowed on a page blob in a premium storage account and on a block blob - in a blob storage account (locally redundant storage only). A premium - page blob's tier determines the allowed size, IOPS, and bandwidth of - the blob. A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param tier: Indicates the tier to be set on the blob. Possible values - include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', 'P40', 'P50', 'P60', - 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierRequired - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to operate - on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param rehydrate_priority: Optional: Indicates the priority with which - to rehydrate an archived blob. Possible values include: 'High', - 'Standard' - :type rehydrate_priority: str or - ~azure.storage.blob.models.RehydratePriority - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "tier" - - # Construct URL - url = self.set_tier.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_tier.metadata = {'url': '/{containerName}/{blob}'} - - def get_account_info(self, cls=None, **kwargs): - """Returns the sku name and account kind . - - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "properties" - - # Construct URL - url = self.get_account_info.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')), - 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_account_info.metadata = {'url': '/{containerName}/{blob}'} - - def query(self, query_request=None, snapshot=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, cls=None, **kwargs): - """The Query operation enables users to select/project on blob data by - providing simple query expressions. - - :param query_request: the query request - :type query_request: ~azure.storage.blob.models.QueryRequest - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: object or the result of cls(response) - :rtype: Generator - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "query" - - # Construct URL - url = self.query.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct body - if query_request is not None: - body_content = self._serialize.body(query_request, 'QueryRequest') - else: - body_content = None - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - if response.status_code == 206: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - query.metadata = {'url': '/{containerName}/{blob}'} - - def get_tags(self, timeout=None, request_id=None, snapshot=None, version_id=None, modified_access_conditions=None, cls=None, **kwargs): - """The Get Tags operation enables users to get the tags associated with a - blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to operate - on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: BlobTags or the result of cls(response) - :rtype: ~azure.storage.blob.models.BlobTags - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "tags" - - # Construct URL - url = self.get_tags.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('BlobTags', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_tags.metadata = {'url': '/{containerName}/{blob}'} - - def set_tags(self, timeout=None, version_id=None, transactional_content_md5=None, transactional_content_crc64=None, request_id=None, tags=None, modified_access_conditions=None, cls=None, **kwargs): - """The Set Tags operation enables users to set tags on a blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param version_id: The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to operate - on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param tags: Blob tags - :type tags: ~azure.storage.blob.models.BlobTags - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "tags" - - # Construct URL - url = self.set_tags.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct body - if tags is not None: - body_content = self._serialize.body(tags, 'BlobTags') - else: - body_content = None - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_tags.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_block_blob_operations.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_block_blob_operations.py deleted file mode 100644 index 8228c47..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_block_blob_operations.py +++ /dev/null @@ -1,833 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class BlockBlobOperations(object): - """BlockBlobOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "BlockBlob". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_blob_type = "BlockBlob" - - def upload(self, body, content_length, timeout=None, transactional_content_md5=None, metadata=None, tier=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): - """The Upload Block Blob operation updates the content of an existing - block blob. Updating an existing block blob overwrites any existing - metadata on the blob. Partial updates are not supported with Put Blob; - the content of the existing blob is overwritten with the content of the - new blob. To perform a partial update of the content of a block blob, - use the Put Block List operation. - - :param body: Initial data - :type body: Generator - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_tags_string: Optional. Used to set blob tags in various - blob operations. - :type blob_tags_string: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct URL - url = self.upload.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload.metadata = {'url': '/{containerName}/{blob}'} - - def stage_block(self, block_id, content_length, body, transactional_content_md5=None, transactional_content_crc64=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, cls=None, **kwargs): - """The Stage Block operation creates a new block to be committed as part - of a blob. - - :param block_id: A valid Base64 string value that identifies the - block. Prior to encoding, the string must be less than or equal to 64 - bytes in size. For a given blob, the length of the value specified for - the blockid parameter must be the same size for each block. - :type block_id: str - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data - :type body: Generator - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - - comp = "block" - - # Construct URL - url = self.stage_block.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - stage_block.metadata = {'url': '/{containerName}/{blob}'} - - def stage_block_from_url(self, block_id, content_length, source_url, source_range=None, source_content_md5=None, source_contentcrc64=None, timeout=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs): - """The Stage Block operation creates a new block to be committed as part - of a blob where the contents are read from a URL. - - :param block_id: A valid Base64 string value that identifies the - block. Prior to encoding, the string must be less than or equal to 64 - bytes in size. For a given blob, the length of the value specified for - the blockid parameter must be the same size for each block. - :type block_id: str - :param content_length: The length of the request. - :type content_length: long - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_md5: Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range - of bytes that must be read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - comp = "block" - - # Construct URL - url = self.stage_block_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - stage_block_from_url.metadata = {'url': '/{containerName}/{blob}'} - - def commit_block_list(self, blocks, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, metadata=None, tier=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): - """The Commit Block List operation writes a blob by specifying the list of - block IDs that make up the blob. In order to be written as part of a - blob, a block must have been successfully written to the server in a - prior Put Block operation. You can call Put Block List to update a blob - by uploading only those blocks that have changed, then committing the - new and existing blocks together. You can do this by specifying whether - to commit a block from the committed block list or from the uncommitted - block list, or to commit the most recently uploaded version of the - block, whichever list it may belong to. - - :param blocks: - :type blocks: ~azure.storage.blob.models.BlockLookupList - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_tags_string: Optional. Used to set blob tags in various - blob operations. - :type blob_tags_string: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "blocklist" - - # Construct URL - url = self.commit_block_list.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct body - body_content = self._serialize.body(blocks, 'BlockLookupList') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - commit_block_list.metadata = {'url': '/{containerName}/{blob}'} - - def get_block_list(self, list_type="committed", snapshot=None, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """The Get Block List operation retrieves the list of blocks that have - been uploaded as part of a block blob. - - :param list_type: Specifies whether to return the list of committed - blocks, the list of uncommitted blocks, or both lists together. - Possible values include: 'committed', 'uncommitted', 'all' - :type list_type: str or ~azure.storage.blob.models.BlockListType - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: BlockList or the result of cls(response) - :rtype: ~azure.storage.blob.models.BlockList - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "blocklist" - - # Construct URL - url = self.get_block_list.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - query_parameters['blocklisttype'] = self._serialize.query("list_type", list_type, 'BlockListType') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('BlockList', response) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_block_list.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_container_operations.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_container_operations.py deleted file mode 100644 index 5730483..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_container_operations.py +++ /dev/null @@ -1,1400 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class ContainerOperations(object): - """ContainerOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - - def create(self, timeout=None, metadata=None, access=None, request_id=None, container_cpk_scope_info=None, cls=None, **kwargs): - """creates a new container under the specified account. If the container - with the same name already exists, the operation fails. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param access: Specifies whether data in the container may be accessed - publicly and the level of access. Possible values include: - 'container', 'blob' - :type access: str or ~azure.storage.blob.models.PublicAccessType - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param container_cpk_scope_info: Additional parameters for the - operation - :type container_cpk_scope_info: - ~azure.storage.blob.models.ContainerCpkScopeInfo - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - default_encryption_scope = None - if container_cpk_scope_info is not None: - default_encryption_scope = container_cpk_scope_info.default_encryption_scope - prevent_encryption_scope_override = None - if container_cpk_scope_info is not None: - prevent_encryption_scope_override = container_cpk_scope_info.prevent_encryption_scope_override - - restype = "container" - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if access is not None: - header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if default_encryption_scope is not None: - header_parameters['x-ms-default-encryption-scope'] = self._serialize.header("default_encryption_scope", default_encryption_scope, 'str') - if prevent_encryption_scope_override is not None: - header_parameters['x-ms-deny-encryption-scope-override'] = self._serialize.header("prevent_encryption_scope_override", prevent_encryption_scope_override, 'bool') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{containerName}'} - - def get_properties(self, timeout=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs): - """returns all user-defined metadata and system properties for the - specified container. The data returned does not include the container's - list of blobs. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - restype = "container" - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-public-access': self._deserialize('str', response.headers.get('x-ms-blob-public-access')), - 'x-ms-has-immutability-policy': self._deserialize('bool', response.headers.get('x-ms-has-immutability-policy')), - 'x-ms-has-legal-hold': self._deserialize('bool', response.headers.get('x-ms-has-legal-hold')), - 'x-ms-default-encryption-scope': self._deserialize('str', response.headers.get('x-ms-default-encryption-scope')), - 'x-ms-deny-encryption-scope-override': self._deserialize('bool', response.headers.get('x-ms-deny-encryption-scope-override')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{containerName}'} - - def delete(self, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """operation marks the specified container for deletion. The container and - any blobs contained within it are later deleted during garbage - collection. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - restype = "container" - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{containerName}'} - - def set_metadata(self, timeout=None, metadata=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """operation sets one or more user-defined name-value pairs for the - specified container. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - - restype = "container" - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{containerName}'} - - def get_access_policy(self, timeout=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs): - """gets the permissions for the specified container. The permissions - indicate whether container data may be accessed publicly. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: list or the result of cls(response) - :rtype: list[~azure.storage.blob.models.SignedIdentifier] - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - restype = "container" - comp = "acl" - - # Construct URL - url = self.get_access_policy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[SignedIdentifier]', response) - header_dict = { - 'x-ms-blob-public-access': self._deserialize('str', response.headers.get('x-ms-blob-public-access')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_access_policy.metadata = {'url': '/{containerName}'} - - def set_access_policy(self, container_acl=None, timeout=None, access=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """sets the permissions for the specified container. The permissions - indicate whether blobs in a container may be accessed publicly. - - :param container_acl: the acls for the container - :type container_acl: list[~azure.storage.blob.models.SignedIdentifier] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param access: Specifies whether data in the container may be accessed - publicly and the level of access. Possible values include: - 'container', 'blob' - :type access: str or ~azure.storage.blob.models.PublicAccessType - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - restype = "container" - comp = "acl" - - # Construct URL - url = self.set_access_policy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - if access is not None: - header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct body - serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifiers', 'wrapped': True}} - if container_acl is not None: - body_content = self._serialize.body(container_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt) - else: - body_content = None - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_access_policy.metadata = {'url': '/{containerName}'} - - def restore(self, timeout=None, request_id=None, deleted_container_name=None, deleted_container_version=None, cls=None, **kwargs): - """Restores a previously-deleted container. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param deleted_container_name: Optional. Version 2019-12-12 and - laster. Specifies the name of the deleted container to restore. - :type deleted_container_name: str - :param deleted_container_version: Optional. Version 2019-12-12 and - laster. Specifies the version of the deleted container to restore. - :type deleted_container_version: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "container" - comp = "undelete" - - # Construct URL - url = self.restore.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if deleted_container_name is not None: - header_parameters['x-ms-deleted-container-name'] = self._serialize.header("deleted_container_name", deleted_container_name, 'str') - if deleted_container_version is not None: - header_parameters['x-ms-deleted-container-version'] = self._serialize.header("deleted_container_version", deleted_container_version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - restore.metadata = {'url': '/{containerName}'} - - def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or - negative one (-1) for a lease that never expires. A non-infinite lease - can be between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The Blob service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "acquire" - - # Construct URL - url = self.acquire_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - acquire_lease.metadata = {'url': '/{containerName}'} - - def release_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "release" - - # Construct URL - url = self.release_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - release_lease.metadata = {'url': '/{containerName}'} - - def renew_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "renew" - - # Construct URL - url = self.renew_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - renew_lease.metadata = {'url': '/{containerName}'} - - def break_lease(self, timeout=None, break_period=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param break_period: For a break operation, proposed duration the - lease should continue before it is broken, in seconds, between 0 and - 60. This break period is only used if it is shorter than the time - remaining on the lease. If longer, the time remaining on the lease is - used. A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break period. - If this header does not appear with a break operation, a - fixed-duration lease breaks after the remaining lease period elapses, - and an infinite lease breaks immediately. - :type break_period: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "break" - - # Construct URL - url = self.break_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - break_lease.metadata = {'url': '/{containerName}'} - - def change_lease(self, lease_id, proposed_lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The Blob service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "change" - - # Construct URL - url = self.change_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - change_lease.metadata = {'url': '/{containerName}'} - - def list_blob_flat_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, cls=None, **kwargs): - """[Update] The List Blobs operation returns a list of the blobs under the - specified container. - - :param prefix: Filters the results to return only containers whose - name begins with the specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list - of containers to be returned with the next listing operation. The - operation returns the NextMarker value within the response body if the - listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value - for the marker parameter in a subsequent call to request the next page - of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to - return. If the request does not specify maxresults, or specifies a - value greater than 5000, the server will return up to 5000 items. Note - that if the listing operation crosses a partition boundary, then the - service will return a continuation token for retrieving the remainder - of the results. For this reason, it is possible that the service will - return fewer results than specified by maxresults, or than the default - of 5000. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets - to include in the response. - :type include: list[str or - ~azure.storage.blob.models.ListBlobsIncludeItem] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListBlobsFlatSegmentResponse or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "container" - comp = "list" - - # Construct URL - url = self.list_blob_flat_segment.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[ListBlobsIncludeItem]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListBlobsFlatSegmentResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_blob_flat_segment.metadata = {'url': '/{containerName}'} - - def list_blob_hierarchy_segment(self, delimiter, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, cls=None, **kwargs): - """[Update] The List Blobs operation returns a list of the blobs under the - specified container. - - :param delimiter: When the request includes this parameter, the - operation returns a BlobPrefix element in the response body that acts - as a placeholder for all blobs whose names begin with the same - substring up to the appearance of the delimiter character. The - delimiter may be a single character or a string. - :type delimiter: str - :param prefix: Filters the results to return only containers whose - name begins with the specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list - of containers to be returned with the next listing operation. The - operation returns the NextMarker value within the response body if the - listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value - for the marker parameter in a subsequent call to request the next page - of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to - return. If the request does not specify maxresults, or specifies a - value greater than 5000, the server will return up to 5000 items. Note - that if the listing operation crosses a partition boundary, then the - service will return a continuation token for retrieving the remainder - of the results. For this reason, it is possible that the service will - return fewer results than specified by maxresults, or than the default - of 5000. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets - to include in the response. - :type include: list[str or - ~azure.storage.blob.models.ListBlobsIncludeItem] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListBlobsHierarchySegmentResponse or the result of - cls(response) - :rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "container" - comp = "list" - - # Construct URL - url = self.list_blob_hierarchy_segment.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[ListBlobsIncludeItem]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_blob_hierarchy_segment.metadata = {'url': '/{containerName}'} - - def get_account_info(self, cls=None, **kwargs): - """Returns the sku name and account kind . - - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "account" - comp = "properties" - - # Construct URL - url = self.get_account_info.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')), - 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_account_info.metadata = {'url': '/{containerName}'} diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_directory_operations.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_directory_operations.py deleted file mode 100644 index c2bf317..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_directory_operations.py +++ /dev/null @@ -1,739 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class DirectoryOperations(object): - """DirectoryOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar resource: . Constant value: "directory". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.resource = "directory" - - def create(self, timeout=None, directory_properties=None, posix_permissions=None, posix_umask=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """Create a directory. By default, the destination is overwritten and if - the destination already exists and has a lease the lease is broken. - This operation supports conditional HTTP requests. For more - information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - To fail if the destination already exists, use a conditional request - with If-None-Match: "*". - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param directory_properties: Optional. User-defined properties to be - stored with the file or directory, in the format of a comma-separated - list of name and value pairs "n1=v1, n2=v2, ...", where each value is - base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled - for the account. This umask restricts permission settings for file and - directory, and will only be applied when default Acl does not exist in - parent directory. If the umask bit has set, it means that the - corresponding permission will be disabled. Otherwise the corresponding - permission will be determined by the permission. A 4-digit octal - notation (e.g. 0022) is supported here. If no umask was specified, a - default umask - 0027 will be used. - :type posix_umask: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param directory_http_headers: Additional parameters for the operation - :type directory_http_headers: - ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - cache_control = None - if directory_http_headers is not None: - cache_control = directory_http_headers.cache_control - content_type = None - if directory_http_headers is not None: - content_type = directory_http_headers.content_type - content_encoding = None - if directory_http_headers is not None: - content_encoding = directory_http_headers.content_encoding - content_language = None - if directory_http_headers is not None: - content_language = directory_http_headers.content_language - content_disposition = None - if directory_http_headers is not None: - content_disposition = directory_http_headers.content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['resource'] = self._serialize.query("self.resource", self.resource, 'str') - - # Construct headers - header_parameters = {} - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') - if content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') - if content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') - if content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') - if content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{filesystem}/{path}'} - - def rename(self, rename_source, timeout=None, marker=None, path_rename_mode=None, directory_properties=None, posix_permissions=None, posix_umask=None, source_lease_id=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs): - """Rename a directory. By default, the destination is overwritten and if - the destination already exists and has a lease the lease is broken. - This operation supports conditional HTTP requests. For more - information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - To fail if the destination already exists, use a conditional request - with If-None-Match: "*". - - :param rename_source: The file or directory to be renamed. The value - must have the following format: "/{filesysystem}/{path}". If - "x-ms-properties" is specified, the properties will overwrite the - existing properties; otherwise, the existing properties will be - preserved. - :type rename_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param marker: When renaming a directory, the number of paths that are - renamed with each invocation is limited. If the number of paths to be - renamed exceeds this limit, a continuation token is returned in this - response header. When a continuation token is returned in the - response, it must be specified in a subsequent invocation of the - rename operation to continue renaming the directory. - :type marker: str - :param path_rename_mode: Determines the behavior of the rename - operation. Possible values include: 'legacy', 'posix' - :type path_rename_mode: str or - ~azure.storage.blob.models.PathRenameMode - :param directory_properties: Optional. User-defined properties to be - stored with the file or directory, in the format of a comma-separated - list of name and value pairs "n1=v1, n2=v2, ...", where each value is - base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled - for the account. This umask restricts permission settings for file and - directory, and will only be applied when default Acl does not exist in - parent directory. If the umask bit has set, it means that the - corresponding permission will be disabled. Otherwise the corresponding - permission will be determined by the permission. A 4-digit octal - notation (e.g. 0022) is supported here. If no umask was specified, a - default umask - 0027 will be used. - :type posix_umask: str - :param source_lease_id: A lease ID for the source path. If specified, - the source path must have an active lease and the lease ID must match. - :type source_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param directory_http_headers: Additional parameters for the operation - :type directory_http_headers: - ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - cache_control = None - if directory_http_headers is not None: - cache_control = directory_http_headers.cache_control - content_type = None - if directory_http_headers is not None: - content_type = directory_http_headers.content_type - content_encoding = None - if directory_http_headers is not None: - content_encoding = directory_http_headers.content_encoding - content_language = None - if directory_http_headers is not None: - content_language = directory_http_headers.content_language - content_disposition = None - if directory_http_headers is not None: - content_disposition = directory_http_headers.content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - # Construct URL - url = self.rename.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') - if path_rename_mode is not None: - query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'PathRenameMode') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') - if content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') - if content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') - if content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') - if content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - } - return cls(response, None, response_headers) - rename.metadata = {'url': '/{filesystem}/{path}'} - - def delete(self, recursive_directory_delete, timeout=None, marker=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """Deletes the directory. - - :param recursive_directory_delete: If "true", all paths beneath the - directory will be deleted. If "false" and the directory is non-empty, - an error occurs. - :type recursive_directory_delete: bool - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param marker: When renaming a directory, the number of paths that are - renamed with each invocation is limited. If the number of paths to be - renamed exceeds this limit, a continuation token is returned in this - response header. When a continuation token is returned in the - response, it must be specified in a subsequent invocation of the - rename operation to continue renaming the directory. - :type marker: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['recursive'] = self._serialize.query("recursive_directory_delete", recursive_directory_delete, 'bool') - if marker is not None: - query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{filesystem}/{path}'} - - def set_access_control(self, timeout=None, owner=None, group=None, posix_permissions=None, posix_acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """Set the owner, group, permissions, or access control list for a - directory. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_acl: Sets POSIX access control rights on files and - directories. The value is a comma-separated list of access control - entries. Each access control entry (ACE) consists of a scope, a type, - a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type posix_acl: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "setAccessControl" - - # Construct URL - url = self.set_access_control.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - } - return cls(response, None, response_headers) - set_access_control.metadata = {'url': '/{filesystem}/{path}'} - - def get_access_control(self, timeout=None, upn=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """Get the owner, group, permissions, or access control list for a - directory. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param upn: Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the identity values returned in - the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. - :type upn: bool - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "getAccessControl" - - # Construct URL - url = self.get_access_control.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')), - 'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')), - 'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')), - 'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - } - return cls(response, None, response_headers) - get_access_control.metadata = {'url': '/{filesystem}/{path}'} diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_page_blob_operations.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_page_blob_operations.py deleted file mode 100644 index fedc96c..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_page_blob_operations.py +++ /dev/null @@ -1,1399 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class PageBlobOperations(object): - """PageBlobOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "PageBlob". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_blob_type = "PageBlob" - - def create(self, content_length, blob_content_length, timeout=None, tier=None, metadata=None, blob_sequence_number=0, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): - """The Create operation creates a new page blob. - - :param content_length: The length of the request. - :type content_length: long - :param blob_content_length: This header specifies the maximum size for - the page blob, up to 1 TB. The page blob size must be aligned to a - 512-byte boundary. - :type blob_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param tier: Optional. Indicates the tier to be set on the page blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80' - :type tier: str or - ~azure.storage.blob.models.PremiumPageBlobAccessTier - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param blob_sequence_number: Set for page blobs only. The sequence - number is a user-controlled value that you can use to track requests. - The value of the sequence number must be between 0 and 2^63 - 1. - :type blob_sequence_number: long - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_tags_string: Optional. Used to set blob tags in various - blob operations. - :type blob_tags_string: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') - if blob_sequence_number is not None: - header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{containerName}/{blob}'} - - def upload_pages(self, body, content_length, transactional_content_md5=None, transactional_content_crc64=None, timeout=None, range=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, sequence_number_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """The Upload Pages operation writes a range of pages to a page blob. - - :param body: Initial data - :type body: Generator - :param content_length: The length of the request. - :type content_length: long - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param sequence_number_access_conditions: Additional parameters for - the operation - :type sequence_number_access_conditions: - ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_sequence_number_less_than_or_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - if_sequence_number_less_than = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - if_sequence_number_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "page" - page_write = "update" - - # Construct URL - url = self.upload_pages.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long') - if if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long') - if if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload_pages.metadata = {'url': '/{containerName}/{blob}'} - - def clear_pages(self, content_length, timeout=None, range=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, sequence_number_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """The Clear Pages operation clears a set of pages from a page blob. - - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param sequence_number_access_conditions: Additional parameters for - the operation - :type sequence_number_access_conditions: - ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_sequence_number_less_than_or_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - if_sequence_number_less_than = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - if_sequence_number_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "page" - page_write = "clear" - - # Construct URL - url = self.clear_pages.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long') - if if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long') - if if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - clear_pages.metadata = {'url': '/{containerName}/{blob}'} - - def upload_pages_from_url(self, source_url, source_range, content_length, range, source_content_md5=None, source_contentcrc64=None, timeout=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, sequence_number_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs): - """The Upload Pages operation writes a range of pages to a page blob where - the contents are read from a URL. - - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param source_range: Bytes of source data in the specified range. The - length of this range should match the ContentLength header and - x-ms-range/Range destination range header. - :type source_range: str - :param content_length: The length of the request. - :type content_length: long - :param range: The range of bytes to which the source range would be - written. The range should be 512 aligned and range-end is required. - :type range: str - :param source_content_md5: Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range - of bytes that must be read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param sequence_number_access_conditions: Additional parameters for - the operation - :type sequence_number_access_conditions: - ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_sequence_number_less_than_or_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - if_sequence_number_less_than = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - if_sequence_number_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - comp = "page" - page_write = "update" - - # Construct URL - url = self.upload_pages_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long') - if if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long') - if if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload_pages_from_url.metadata = {'url': '/{containerName}/{blob}'} - - def get_page_ranges(self, snapshot=None, timeout=None, range=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """The Get Page Ranges operation returns the list of valid page ranges for - a page blob or snapshot of a page blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: PageList or the result of cls(response) - :rtype: ~azure.storage.blob.models.PageList - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "pagelist" - - # Construct URL - url = self.get_page_ranges.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PageList', response) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_page_ranges.metadata = {'url': '/{containerName}/{blob}'} - - def get_page_ranges_diff(self, snapshot=None, timeout=None, prevsnapshot=None, prev_snapshot_url=None, range=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """The Get Page Ranges Diff operation returns the list of valid page - ranges for a page blob that were changed between target blob and - previous snapshot. - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param prevsnapshot: Optional in version 2015-07-08 and newer. The - prevsnapshot parameter is a DateTime value that specifies that the - response will contain only pages that were changed between target blob - and previous snapshot. Changed pages include both updated and cleared - pages. The target blob may be a snapshot, as long as the snapshot - specified by prevsnapshot is the older of the two. Note that - incremental snapshots are currently supported only for blobs created - on or after January 1, 2016. - :type prevsnapshot: str - :param prev_snapshot_url: Optional. This header is only supported in - service versions 2019-04-19 and after and specifies the URL of a - previous snapshot of the target blob. The response will only contain - pages that were changed between the target blob and its previous - snapshot. - :type prev_snapshot_url: str - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: PageList or the result of cls(response) - :rtype: ~azure.storage.blob.models.PageList - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "pagelist" - - # Construct URL - url = self.get_page_ranges_diff.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if prevsnapshot is not None: - query_parameters['prevsnapshot'] = self._serialize.query("prevsnapshot", prevsnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - if prev_snapshot_url is not None: - header_parameters['x-ms-previous-snapshot-url'] = self._serialize.header("prev_snapshot_url", prev_snapshot_url, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PageList', response) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_page_ranges_diff.metadata = {'url': '/{containerName}/{blob}'} - - def resize(self, blob_content_length, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): - """Resize the Blob. - - :param blob_content_length: This header specifies the maximum size for - the page blob, up to 1 TB. The page blob size must be aligned to a - 512-byte boundary. - :type blob_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "properties" - - # Construct URL - url = self.resize.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - resize.metadata = {'url': '/{containerName}/{blob}'} - - def update_sequence_number(self, sequence_number_action, timeout=None, blob_sequence_number=0, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """Update the sequence number of the blob. - - :param sequence_number_action: Required if the - x-ms-blob-sequence-number header is set for the request. This property - applies to page blobs only. This property indicates how the service - should modify the blob's sequence number. Possible values include: - 'max', 'update', 'increment' - :type sequence_number_action: str or - ~azure.storage.blob.models.SequenceNumberActionType - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param blob_sequence_number: Set for page blobs only. The sequence - number is a user-controlled value that you can use to track requests. - The value of the sequence number must be between 0 and 2^63 - 1. - :type blob_sequence_number: long - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "properties" - - # Construct URL - url = self.update_sequence_number.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-sequence-number-action'] = self._serialize.header("sequence_number_action", sequence_number_action, 'SequenceNumberActionType') - if blob_sequence_number is not None: - header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - update_sequence_number.metadata = {'url': '/{containerName}/{blob}'} - - def copy_incremental(self, copy_source, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """The Copy Incremental operation copies a snapshot of the source page - blob to a destination page blob. The snapshot is copied such that only - the differential changes between the previously copied snapshot are - transferred to the destination. The copied snapshots are complete - copies of the original snapshot and can be read or copied from as - usual. This API is supported since REST version 2016-05-31. - - :param copy_source: Specifies the name of the source page blob - snapshot. This value is a URL of up to 2 KB in length that specifies a - page blob snapshot. The value should be URL-encoded as it would appear - in a request URI. The source blob must either be public or must be - authenticated via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "incrementalcopy" - - # Construct URL - url = self.copy_incremental.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - copy_incremental.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_service_operations.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_service_operations.py deleted file mode 100644 index 0a49915..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/operations/_service_operations.py +++ /dev/null @@ -1,663 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class ServiceOperations(object): - """ServiceOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - - def set_properties(self, storage_service_properties, timeout=None, request_id=None, cls=None, **kwargs): - """Sets properties for a storage account's Blob service endpoint, - including properties for Storage Analytics and CORS (Cross-Origin - Resource Sharing) rules. - - :param storage_service_properties: The StorageService properties. - :type storage_service_properties: - ~azure.storage.blob.models.StorageServiceProperties - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "service" - comp = "properties" - - # Construct URL - url = self.set_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct body - body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_properties.metadata = {'url': '/'} - - def get_properties(self, timeout=None, request_id=None, cls=None, **kwargs): - """gets the properties of a storage account's Blob service, including - properties for Storage Analytics and CORS (Cross-Origin Resource - Sharing) rules. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: StorageServiceProperties or the result of cls(response) - :rtype: ~azure.storage.blob.models.StorageServiceProperties - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "service" - comp = "properties" - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('StorageServiceProperties', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_properties.metadata = {'url': '/'} - - def get_statistics(self, timeout=None, request_id=None, cls=None, **kwargs): - """Retrieves statistics related to replication for the Blob service. It is - only available on the secondary location endpoint when read-access - geo-redundant replication is enabled for the storage account. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: StorageServiceStats or the result of cls(response) - :rtype: ~azure.storage.blob.models.StorageServiceStats - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "service" - comp = "stats" - - # Construct URL - url = self.get_statistics.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('StorageServiceStats', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_statistics.metadata = {'url': '/'} - - def list_containers_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, cls=None, **kwargs): - """The List Containers Segment operation returns a list of the containers - under the specified account. - - :param prefix: Filters the results to return only containers whose - name begins with the specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list - of containers to be returned with the next listing operation. The - operation returns the NextMarker value within the response body if the - listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value - for the marker parameter in a subsequent call to request the next page - of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to - return. If the request does not specify maxresults, or specifies a - value greater than 5000, the server will return up to 5000 items. Note - that if the listing operation crosses a partition boundary, then the - service will return a continuation token for retrieving the remainder - of the results. For this reason, it is possible that the service will - return fewer results than specified by maxresults, or than the default - of 5000. - :type maxresults: int - :param include: Include this parameter to specify that the container's - metadata be returned as part of the response body. - :type include: list[str or - ~azure.storage.blob.models.ListContainersIncludeType] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListContainersSegmentResponse or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListContainersSegmentResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "list" - - # Construct URL - url = self.list_containers_segment.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[ListContainersIncludeType]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListContainersSegmentResponse', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_containers_segment.metadata = {'url': '/'} - - def get_user_delegation_key(self, key_info, timeout=None, request_id=None, cls=None, **kwargs): - """Retrieves a user delegation key for the Blob service. This is only a - valid operation when using bearer token authentication. - - :param key_info: - :type key_info: ~azure.storage.blob.models.KeyInfo - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: UserDelegationKey or the result of cls(response) - :rtype: ~azure.storage.blob.models.UserDelegationKey - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "service" - comp = "userdelegationkey" - - # Construct URL - url = self.get_user_delegation_key.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct body - body_content = self._serialize.body(key_info, 'KeyInfo') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('UserDelegationKey', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_user_delegation_key.metadata = {'url': '/'} - - def get_account_info(self, cls=None, **kwargs): - """Returns the sku name and account kind . - - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "account" - comp = "properties" - - # Construct URL - url = self.get_account_info.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')), - 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_account_info.metadata = {'url': '/'} - - def submit_batch(self, body, content_length, multipart_content_type, timeout=None, request_id=None, cls=None, **kwargs): - """The Batch operation allows multiple API calls to be embedded into a - single HTTP request. - - :param body: Initial data - :type body: Generator - :param content_length: The length of the request. - :type content_length: long - :param multipart_content_type: Required. The value of this header must - be multipart/mixed with a batch boundary. Example header value: - multipart/mixed; boundary=batch_ - :type multipart_content_type: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: object or the result of cls(response) - :rtype: Generator - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "batch" - - # Construct URL - url = self.submit_batch.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct body - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - submit_batch.metadata = {'url': '/'} - - def filter_blobs(self, timeout=None, request_id=None, where=None, marker=None, maxresults=None, cls=None, **kwargs): - """The Filter Blobs operation enables callers to list blobs across all - containers whose tags match a given search expression. Filter blobs - searches across all containers within a storage account but can be - scoped within the expression to a single container. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param where: Filters the results to return only to return only blobs - whose tags match the specified expression. - :type where: str - :param marker: A string value that identifies the portion of the list - of containers to be returned with the next listing operation. The - operation returns the NextMarker value within the response body if the - listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value - for the marker parameter in a subsequent call to request the next page - of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to - return. If the request does not specify maxresults, or specifies a - value greater than 5000, the server will return up to 5000 items. Note - that if the listing operation crosses a partition boundary, then the - service will return a continuation token for retrieving the remainder - of the results. For this reason, it is possible that the service will - return fewer results than specified by maxresults, or than the default - of 5000. - :type maxresults: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: FilterBlobSegment or the result of cls(response) - :rtype: ~azure.storage.blob.models.FilterBlobSegment - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "blobs" - - # Construct URL - url = self.filter_blobs.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if where is not None: - query_parameters['where'] = self._serialize.query("where", where, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('FilterBlobSegment', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - filter_blobs.metadata = {'url': '/'} diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/version.py b/azure/multiapi/storagev2/blob/v2019_12_12/_generated/version.py deleted file mode 100644 index be04589..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/version.py +++ /dev/null @@ -1,13 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -VERSION = "2019-12-12" - diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_lease.py b/azure/multiapi/storagev2/blob/v2019_12_12/_lease.py deleted file mode 100644 index 7d38423..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_lease.py +++ /dev/null @@ -1,341 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import uuid - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, - TypeVar, TYPE_CHECKING -) - -from azure.core.tracing.decorator import distributed_trace - -from ._shared.response_handlers import return_response_headers, process_storage_error -from ._generated.models import StorageErrorException, LeaseAccessConditions -from ._serialize import get_modify_conditions - -if TYPE_CHECKING: - from datetime import datetime - from ._generated.operations import BlobOperations, ContainerOperations - BlobClient = TypeVar("BlobClient") - ContainerClient = TypeVar("ContainerClient") - - -def get_access_conditions(lease): - # type: (Optional[Union[BlobLeaseClient, str]]) -> Union[LeaseAccessConditions, None] - try: - lease_id = lease.id # type: ignore - except AttributeError: - lease_id = lease # type: ignore - return LeaseAccessConditions(lease_id=lease_id) if lease_id else None - - -class BlobLeaseClient(object): - """Creates a new BlobLeaseClient. - - This client provides lease operations on a BlobClient or ContainerClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the blob or container to lease. - :type client: ~azure.storage.blob.BlobClient or - ~azure.storage.blob.ContainerClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - def __init__( - self, client, lease_id=None - ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs - # type: (Union[BlobClient, ContainerClient], Optional[str]) -> None - self.id = lease_id or str(uuid.uuid4()) - self.last_modified = None - self.etag = None - if hasattr(client, 'blob_name'): - self._client = client._client.blob # type: ignore # pylint: disable=protected-access - elif hasattr(client, 'container_name'): - self._client = client._client.container # type: ignore # pylint: disable=protected-access - else: - raise TypeError("Lease must use either BlobClient or ContainerClient.") - - def __enter__(self): - return self - - def __exit__(self, *args): - self.release() - - @distributed_trace - def acquire(self, lease_duration=-1, **kwargs): - # type: (int, **Any) -> None - """Requests a new lease. - - If the container does not have an active lease, the Blob service creates a - lease on the container and returns a new lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.acquire_lease( - timeout=kwargs.pop('timeout', None), - duration=lease_duration, - proposed_lease_id=self.id, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - self.etag = response.get('etag') # type: str - - @distributed_trace - def renew(self, **kwargs): - # type: (Any) -> None - """Renews the lease. - - The lease can be renewed if the lease ID specified in the - lease client matches that associated with the container or blob. Note that - the lease may be renewed even if it has expired as long as the container - or blob has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.renew_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def release(self, **kwargs): - # type: (Any) -> None - """Release the lease. - - The lease may be released if the client lease id specified matches - that associated with the container or blob. Releasing the lease allows another client - to immediately acquire the lease for the container or blob as soon as the release is complete. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.release_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """Change the lease ID of an active lease. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.change_lease( - lease_id=self.id, - proposed_lease_id=proposed_lease_id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def break_lease(self, lease_break_period=None, **kwargs): - # type: (Optional[int], Any) -> int - """Break the lease, if the container or blob has an active lease. - - Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. When a lease - is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the container or blob. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :param int lease_break_period: - This is the proposed duration of seconds that the lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the lease. If longer, the time remaining on the lease is used. - A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.break_lease( - timeout=kwargs.pop('timeout', None), - break_period=lease_break_period, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_list_blobs_helper.py b/azure/multiapi/storagev2/blob/v2019_12_12/_list_blobs_helper.py deleted file mode 100644 index f1dd70f..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_list_blobs_helper.py +++ /dev/null @@ -1,166 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from azure.core.paging import PageIterator, ItemPaged -from ._deserialize import get_blob_properties_from_generated_code -from ._generated.models import StorageErrorException, BlobItemInternal, BlobPrefix as GenBlobPrefix -from ._models import BlobProperties -from ._shared.models import DictMixin -from ._shared.response_handlers import return_context_and_deserialized, process_storage_error - - -class BlobPropertiesPaged(PageIterator): - """An Iterable of Blob properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - :param str container: The name of the container. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str continuation_token: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__( - self, command, - container=None, - prefix=None, - results_per_page=None, - continuation_token=None, - delimiter=None, - location_mode=None): - super(BlobPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.container = container - self.delimiter = delimiter - self.current_page = None - self.location_mode = location_mode - - def _get_next_cb(self, continuation_token): - try: - return self._command( - prefix=self.prefix, - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.container = self._response.container_name - self.current_page = [self._build_item(item) for item in self._response.segment.blob_items] - - return self._response.next_marker or None, self.current_page - - def _build_item(self, item): - if isinstance(item, BlobProperties): - return item - if isinstance(item, BlobItemInternal): - blob = get_blob_properties_from_generated_code(item) # pylint: disable=protected-access - blob.container = self.container - return blob - return item - - -class BlobPrefixPaged(BlobPropertiesPaged): - def __init__(self, *args, **kwargs): - super(BlobPrefixPaged, self).__init__(*args, **kwargs) - self.name = self.prefix - - def _extract_data_cb(self, get_next_return): - continuation_token, _ = super(BlobPrefixPaged, self)._extract_data_cb(get_next_return) - self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items - self.current_page = [self._build_item(item) for item in self.current_page] - self.delimiter = self._response.delimiter - - return continuation_token, self.current_page - - def _build_item(self, item): - item = super(BlobPrefixPaged, self)._build_item(item) - if isinstance(item, GenBlobPrefix): - return BlobPrefix( - self._command, - container=self.container, - prefix=item.name, - results_per_page=self.results_per_page, - location_mode=self.location_mode) - return item - - -class BlobPrefix(ItemPaged, DictMixin): - """An Iterable of Blob properties. - - Returned from walk_blobs when a delimiter is used. - Can be thought of as a virtual blob directory. - - :ivar str name: The prefix, or "directory name" of the blob. - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str next_marker: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str marker: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__(self, *args, **kwargs): - super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs) - self.name = kwargs.get('prefix') - self.prefix = kwargs.get('prefix') - self.results_per_page = kwargs.get('results_per_page') - self.container = kwargs.get('container') - self.delimiter = kwargs.get('delimiter') - self.location_mode = kwargs.get('location_mode') diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_models.py b/azure/multiapi/storagev2/blob/v2019_12_12/_models.py deleted file mode 100644 index a97445d..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_models.py +++ /dev/null @@ -1,1143 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines - -from enum import Enum - -from azure.core.paging import PageIterator -from ._generated.models import FilterBlobItem - -from ._shared import decode_base64_to_text -from ._shared.response_handlers import return_context_and_deserialized, process_storage_error -from ._shared.models import DictMixin, get_enum_value -from ._generated.models import Logging as GeneratedLogging -from ._generated.models import Metrics as GeneratedMetrics -from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy -from ._generated.models import StaticWebsite as GeneratedStaticWebsite -from ._generated.models import CorsRule as GeneratedCorsRule -from ._generated.models import AccessPolicy as GenAccessPolicy -from ._generated.models import StorageErrorException - - -class BlobType(str, Enum): - - BlockBlob = "BlockBlob" - PageBlob = "PageBlob" - AppendBlob = "AppendBlob" - - -class BlockState(str, Enum): - """Block blob block types.""" - - Committed = 'Committed' #: Committed blocks. - Latest = 'Latest' #: Latest blocks. - Uncommitted = 'Uncommitted' #: Uncommitted blocks. - - -class StandardBlobTier(str, Enum): - """ - Specifies the blob tier to set the blob to. This is only applicable for - block blobs on standard storage accounts. - """ - - Archive = 'Archive' #: Archive - Cool = 'Cool' #: Cool - Hot = 'Hot' #: Hot - - -class PremiumPageBlobTier(str, Enum): - """ - Specifies the page blob tier to set the blob to. This is only applicable to page - blobs on premium storage accounts. Please take a look at: - https://docs.microsoft.com/en-us/azure/storage/storage-premium-storage#scalability-and-performance-targets - for detailed information on the corresponding IOPS and throughput per PageBlobTier. - """ - - P4 = 'P4' #: P4 Tier - P6 = 'P6' #: P6 Tier - P10 = 'P10' #: P10 Tier - P20 = 'P20' #: P20 Tier - P30 = 'P30' #: P30 Tier - P40 = 'P40' #: P40 Tier - P50 = 'P50' #: P50 Tier - P60 = 'P60' #: P60 Tier - - -class SequenceNumberAction(str, Enum): - """Sequence number actions.""" - - Increment = 'increment' - """ - Increments the value of the sequence number by 1. If specifying this option, - do not include the x-ms-blob-sequence-number header. - """ - - Max = 'max' - """ - Sets the sequence number to be the higher of the value included with the - request and the value currently stored for the blob. - """ - - Update = 'update' - """Sets the sequence number to the value included with the request.""" - - -class PublicAccess(str, Enum): - """ - Specifies whether data in the container may be accessed publicly and the level of access. - """ - - OFF = 'off' - """ - Specifies that there is no public read access for both the container and blobs within the container. - Clients cannot enumerate the containers within the storage account as well as the blobs within the container. - """ - - Blob = 'blob' - """ - Specifies public read access for blobs. Blob data within this container can be read - via anonymous request, but container data is not available. Clients cannot enumerate - blobs within the container via anonymous request. - """ - - Container = 'container' - """ - Specifies full public read access for container and blob data. Clients can enumerate - blobs within the container via anonymous request, but cannot enumerate containers - within the storage account. - """ - - -class BlobAnalyticsLogging(GeneratedLogging): - """Azure Analytics Logging settings. - - :keyword str version: - The version of Storage Analytics to configure. The default value is 1.0. - :keyword bool delete: - Indicates whether all delete requests should be logged. The default value is `False`. - :keyword bool read: - Indicates whether all read requests should be logged. The default value is `False`. - :keyword bool write: - Indicates whether all write requests should be logged. The default value is `False`. - :keyword ~azure.storage.blob.RetentionPolicy retention_policy: - Determines how long the associated data should persist. If not specified the retention - policy will be disabled by default. - """ - - def __init__(self, **kwargs): - self.version = kwargs.get('version', u'1.0') - self.delete = kwargs.get('delete', False) - self.read = kwargs.get('read', False) - self.write = kwargs.get('write', False) - self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - version=generated.version, - delete=generated.delete, - read=generated.read, - write=generated.write, - retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access - ) - - -class Metrics(GeneratedMetrics): - """A summary of request statistics grouped by API in hour or minute aggregates - for blobs. - - :keyword str version: - The version of Storage Analytics to configure. The default value is 1.0. - :keyword bool enabled: - Indicates whether metrics are enabled for the Blob service. - The default value is `False`. - :keyword bool include_apis: - Indicates whether metrics should generate summary statistics for called API operations. - :keyword ~azure.storage.blob.RetentionPolicy retention_policy: - Determines how long the associated data should persist. If not specified the retention - policy will be disabled by default. - """ - - def __init__(self, **kwargs): - self.version = kwargs.get('version', u'1.0') - self.enabled = kwargs.get('enabled', False) - self.include_apis = kwargs.get('include_apis') - self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - version=generated.version, - enabled=generated.enabled, - include_apis=generated.include_apis, - retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access - ) - - -class RetentionPolicy(GeneratedRetentionPolicy): - """The retention policy which determines how long the associated data should - persist. - - :param bool enabled: - Indicates whether a retention policy is enabled for the storage service. - The default value is False. - :param int days: - Indicates the number of days that metrics or logging or - soft-deleted data should be retained. All data older than this value will - be deleted. If enabled=True, the number of days must be specified. - """ - - def __init__(self, enabled=False, days=None): - self.enabled = enabled - self.days = days - if self.enabled and (self.days is None): - raise ValueError("If policy is enabled, 'days' must be specified.") - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - enabled=generated.enabled, - days=generated.days, - ) - - -class StaticWebsite(GeneratedStaticWebsite): - """The properties that enable an account to host a static website. - - :keyword bool enabled: - Indicates whether this account is hosting a static website. - The default value is `False`. - :keyword str index_document: - The default name of the index page under each directory. - :keyword str error_document404_path: - The absolute path of the custom 404 page. - :keyword str default_index_document_path: - Absolute path of the default index page. - """ - - def __init__(self, **kwargs): - self.enabled = kwargs.get('enabled', False) - if self.enabled: - self.index_document = kwargs.get('index_document') - self.error_document404_path = kwargs.get('error_document404_path') - self.default_index_document_path = kwargs.get('default_index_document_path') - else: - self.index_document = None - self.error_document404_path = None - self.default_index_document_path = None - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - enabled=generated.enabled, - index_document=generated.index_document, - error_document404_path=generated.error_document404_path, - default_index_document_path=generated.default_index_document_path - ) - - -class CorsRule(GeneratedCorsRule): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. - - :param list(str) allowed_origins: - A list of origin domains that will be allowed via CORS, or "*" to allow - all domains. The list of must contain at least one entry. Limited to 64 - origin domains. Each allowed origin can have up to 256 characters. - :param list(str) allowed_methods: - A list of HTTP methods that are allowed to be executed by the origin. - The list of must contain at least one entry. For Azure Storage, - permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. - :keyword list(str) allowed_headers: - Defaults to an empty list. A list of headers allowed to be part of - the cross-origin request. Limited to 64 defined headers and 2 prefixed - headers. Each header can be up to 256 characters. - :keyword list(str) exposed_headers: - Defaults to an empty list. A list of response headers to expose to CORS - clients. Limited to 64 defined headers and two prefixed headers. Each - header can be up to 256 characters. - :keyword int max_age_in_seconds: - The number of seconds that the client/browser should cache a - preflight response. - """ - - def __init__(self, allowed_origins, allowed_methods, **kwargs): - self.allowed_origins = ','.join(allowed_origins) - self.allowed_methods = ','.join(allowed_methods) - self.allowed_headers = ','.join(kwargs.get('allowed_headers', [])) - self.exposed_headers = ','.join(kwargs.get('exposed_headers', [])) - self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0) - - @classmethod - def _from_generated(cls, generated): - return cls( - [generated.allowed_origins], - [generated.allowed_methods], - allowed_headers=[generated.allowed_headers], - exposed_headers=[generated.exposed_headers], - max_age_in_seconds=generated.max_age_in_seconds, - ) - - -class ContainerProperties(DictMixin): - """Blob container's properties class. - - Returned ``ContainerProperties`` instances expose these values through a - dictionary interface, for example: ``container_props["last_modified"]``. - Additionally, the container name is available as ``container_props["name"]``. - - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the container was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar ~azure.storage.blob.LeaseProperties lease: - Stores all the lease information for the container. - :ivar str public_access: Specifies whether data in the container may be accessed - publicly and the level of access. - :ivar bool has_immutability_policy: - Represents whether the container has an immutability policy. - :ivar bool has_legal_hold: - Represents whether the container has a legal hold. - :ivar dict metadata: A dict with name-value pairs to associate with the - container as metadata. - :ivar ~azure.storage.blob.ContainerEncryptionScope encryption_scope: - The default encryption scope configuration for the container. - """ - - def __init__(self, **kwargs): - self.name = None - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.lease = LeaseProperties(**kwargs) - self.public_access = kwargs.get('x-ms-blob-public-access') - self.has_immutability_policy = kwargs.get('x-ms-has-immutability-policy') - self.deleted = None - self.version = None - self.has_legal_hold = kwargs.get('x-ms-has-legal-hold') - self.metadata = kwargs.get('metadata') - self.encryption_scope = None - default_encryption_scope = kwargs.get('x-ms-default-encryption-scope') - if default_encryption_scope: - self.encryption_scope = ContainerEncryptionScope( - default_encryption_scope=default_encryption_scope, - prevent_encryption_scope_override=kwargs.get('x-ms-deny-encryption-scope-override', False) - ) - - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.last_modified = generated.properties.last_modified - props.etag = generated.properties.etag - props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access - props.public_access = generated.properties.public_access - props.has_immutability_policy = generated.properties.has_immutability_policy - props.deleted = generated.deleted - props.version = generated.version - props.has_legal_hold = generated.properties.has_legal_hold - props.metadata = generated.metadata - props.encryption_scope = ContainerEncryptionScope._from_generated(generated) #pylint: disable=protected-access - return props - - -class ContainerPropertiesPaged(PageIterator): - """An Iterable of Container properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A container name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.ContainerProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only containers whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of container names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(ContainerPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [self._build_item(item) for item in self._response.container_items] - - return self._response.next_marker or None, self.current_page - - @staticmethod - def _build_item(item): - return ContainerProperties._from_generated(item) # pylint: disable=protected-access - - -class BlobProperties(DictMixin): - """ - Blob Properties. - - :ivar str name: - The name of the blob. - :ivar str container: - The container in which the blob resides. - :ivar str snapshot: - Datetime value that uniquely identifies the blob snapshot. - :ivar ~azure.blob.storage.BlobType blob_type: - String indicating this blob's type. - :ivar dict metadata: - Name-value pairs associated with the blob as metadata. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the blob was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar int size: - The size of the content returned. If the entire blob was requested, - the length of blob in bytes. If a subset of the blob was requested, the - length of the returned subset. - :ivar str content_range: - Indicates the range of bytes returned in the event that the client - requested a subset of the blob. - :ivar int append_blob_committed_block_count: - (For Append Blobs) Number of committed blocks in the blob. - :ivar bool is_append_blob_sealed: - Indicate if the append blob is sealed or not. - - .. versionadded:: 12.4.0 - - :ivar int page_blob_sequence_number: - (For Page Blobs) Sequence number for page blob used for coordinating - concurrent writes. - :ivar bool server_encrypted: - Set to true if the blob is encrypted on the server. - :ivar ~azure.storage.blob.CopyProperties copy: - Stores all the copy properties for the blob. - :ivar ~azure.storage.blob.ContentSettings content_settings: - Stores all the content settings for the blob. - :ivar ~azure.storage.blob.LeaseProperties lease: - Stores all the lease information for the blob. - :ivar ~azure.storage.blob.StandardBlobTier blob_tier: - Indicates the access tier of the blob. The hot tier is optimized - for storing data that is accessed frequently. The cool storage tier - is optimized for storing data that is infrequently accessed and stored - for at least a month. The archive tier is optimized for storing - data that is rarely accessed and stored for at least six months - with flexible latency requirements. - :ivar str rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :ivar ~datetime.datetime blob_tier_change_time: - Indicates when the access tier was last changed. - :ivar bool blob_tier_inferred: - Indicates whether the access tier was inferred by the service. - If false, it indicates that the tier was set explicitly. - :ivar bool deleted: - Whether this blob was deleted. - :ivar ~datetime.datetime deleted_time: - A datetime object representing the time at which the blob was deleted. - :ivar int remaining_retention_days: - The number of days that the blob will be retained before being permanently deleted by the service. - :ivar ~datetime.datetime creation_time: - Indicates when the blob was created, in UTC. - :ivar str archive_status: - Archive status of blob. - :ivar str encryption_key_sha256: - The SHA-256 hash of the provided encryption key. - :ivar str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - :ivar bool request_server_encrypted: - Whether this blob is encrypted. - :ivar list(~azure.storage.blob.ObjectReplicationPolicy) object_replication_source_properties: - Only present for blobs that have policy ids and rule ids applied to them. - - .. versionadded:: 12.4.0 - - :ivar str object_replication_destination_policy: - Represents the Object Replication Policy Id that created this blob. - - .. versionadded:: 12.4.0 - - :ivar int tag_count: - Tags count on this blob. - - .. versionadded:: 12.4.0 - - :ivar dict(str, str) tags: - Key value pair of tags on this blob. - - .. versionadded:: 12.4.0 - - """ - - def __init__(self, **kwargs): - self.name = kwargs.get('name') - self.container = None - self.snapshot = kwargs.get('x-ms-snapshot') - self.version_id = kwargs.get('x-ms-version-id') - self.is_current_version = kwargs.get('x-ms-is-current-version') - self.blob_type = BlobType(kwargs['x-ms-blob-type']) if kwargs.get('x-ms-blob-type') else None - self.metadata = kwargs.get('metadata') - self.encrypted_metadata = kwargs.get('encrypted_metadata') - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.size = kwargs.get('Content-Length') - self.content_range = kwargs.get('Content-Range') - self.append_blob_committed_block_count = kwargs.get('x-ms-blob-committed-block-count') - self.is_append_blob_sealed = kwargs.get('x-ms-blob-sealed') - self.page_blob_sequence_number = kwargs.get('x-ms-blob-sequence-number') - self.server_encrypted = kwargs.get('x-ms-server-encrypted') - self.copy = CopyProperties(**kwargs) - self.content_settings = ContentSettings(**kwargs) - self.lease = LeaseProperties(**kwargs) - self.blob_tier = kwargs.get('x-ms-access-tier') - self.rehydrate_priority = kwargs.get('x-ms-rehydrate-priority') - self.blob_tier_change_time = kwargs.get('x-ms-access-tier-change-time') - self.blob_tier_inferred = kwargs.get('x-ms-access-tier-inferred') - self.deleted = False - self.deleted_time = None - self.remaining_retention_days = None - self.creation_time = kwargs.get('x-ms-creation-time') - self.archive_status = kwargs.get('x-ms-archive-status') - self.encryption_key_sha256 = kwargs.get('x-ms-encryption-key-sha256') - self.encryption_scope = kwargs.get('x-ms-encryption-scope') - self.request_server_encrypted = kwargs.get('x-ms-server-encrypted') - self.object_replication_source_properties = kwargs.get('object_replication_source_properties') - self.object_replication_destination_policy = kwargs.get('x-ms-or-policy-id') - self.tag_count = kwargs.get('x-ms-tag-count') - self.tags = None - - -class FilteredBlob(DictMixin): - """Blob info from a Filter Blobs API call. - - :ivar name: Blob name - :type name: str - :ivar container_name: Container name. - :type container_name: str - """ - def __init__(self, **kwargs): - self.name = kwargs.get('name', None) - self.container_name = kwargs.get('container_name', None) - - -class FilteredBlobPaged(PageIterator): - """An Iterable of Blob properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.FilteredBlob) - :ivar str container: The container that the blobs are listed from. - - :param callable command: Function to retrieve the next page of items. - :param str container: The name of the container. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str continuation_token: An opaque continuation token. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__( - self, command, - container=None, - results_per_page=None, - continuation_token=None, - location_mode=None): - super(FilteredBlobPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.marker = continuation_token - self.results_per_page = results_per_page - self.container = container - self.current_page = None - self.location_mode = location_mode - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.marker = self._response.next_marker - self.current_page = [self._build_item(item) for item in self._response.blobs] - - return self._response.next_marker or None, self.current_page - - @staticmethod - def _build_item(item): - if isinstance(item, FilterBlobItem): - blob = FilteredBlob(name=item.name, container_name=item.container_name) # pylint: disable=protected-access - return blob - return item - - -class LeaseProperties(DictMixin): - """Blob Lease Properties. - - :ivar str status: - The lease status of the blob. Possible values: locked|unlocked - :ivar str state: - Lease state of the blob. Possible values: available|leased|expired|breaking|broken - :ivar str duration: - When a blob is leased, specifies whether the lease is of infinite or fixed duration. - """ - - def __init__(self, **kwargs): - self.status = get_enum_value(kwargs.get('x-ms-lease-status')) - self.state = get_enum_value(kwargs.get('x-ms-lease-state')) - self.duration = get_enum_value(kwargs.get('x-ms-lease-duration')) - - @classmethod - def _from_generated(cls, generated): - lease = cls() - lease.status = get_enum_value(generated.properties.lease_status) - lease.state = get_enum_value(generated.properties.lease_state) - lease.duration = get_enum_value(generated.properties.lease_duration) - return lease - - -class ContentSettings(DictMixin): - """The content settings of a blob. - - :param str content_type: - The content type specified for the blob. If no content type was - specified, the default content type is application/octet-stream. - :param str content_encoding: - If the content_encoding has previously been set - for the blob, that value is stored. - :param str content_language: - If the content_language has previously been set - for the blob, that value is stored. - :param str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the blob, that value is stored. - :param str cache_control: - If the cache_control has previously been set for - the blob, that value is stored. - :param str content_md5: - If the content_md5 has been set for the blob, this response - header is stored so that the client can check for message content - integrity. - """ - - def __init__( - self, content_type=None, content_encoding=None, - content_language=None, content_disposition=None, - cache_control=None, content_md5=None, **kwargs): - - self.content_type = content_type or kwargs.get('Content-Type') - self.content_encoding = content_encoding or kwargs.get('Content-Encoding') - self.content_language = content_language or kwargs.get('Content-Language') - self.content_md5 = content_md5 or kwargs.get('Content-MD5') - self.content_disposition = content_disposition or kwargs.get('Content-Disposition') - self.cache_control = cache_control or kwargs.get('Cache-Control') - - @classmethod - def _from_generated(cls, generated): - settings = cls() - settings.content_type = generated.properties.content_type or None - settings.content_encoding = generated.properties.content_encoding or None - settings.content_language = generated.properties.content_language or None - settings.content_md5 = generated.properties.content_md5 or None - settings.content_disposition = generated.properties.content_disposition or None - settings.cache_control = generated.properties.cache_control or None - return settings - - -class CopyProperties(DictMixin): - """Blob Copy Properties. - - These properties will be `None` if this blob has never been the destination - in a Copy Blob operation, or if this blob has been modified after a concluded - Copy Blob operation, for example, using Set Blob Properties, Upload Blob, or Commit Block List. - - :ivar str id: - String identifier for the last attempted Copy Blob operation where this blob - was the destination blob. - :ivar str source: - URL up to 2 KB in length that specifies the source blob used in the last attempted - Copy Blob operation where this blob was the destination blob. - :ivar str status: - State of the copy operation identified by Copy ID, with these values: - success: - Copy completed successfully. - pending: - Copy is in progress. Check copy_status_description if intermittent, - non-fatal errors impede copy progress but don't cause failure. - aborted: - Copy was ended by Abort Copy Blob. - failed: - Copy failed. See copy_status_description for failure details. - :ivar str progress: - Contains the number of bytes copied and the total bytes in the source in the last - attempted Copy Blob operation where this blob was the destination blob. Can show - between 0 and Content-Length bytes copied. - :ivar ~datetime.datetime completion_time: - Conclusion time of the last attempted Copy Blob operation where this blob was the - destination blob. This value can specify the time of a completed, aborted, or - failed copy attempt. - :ivar str status_description: - Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal - or non-fatal copy operation failure. - :ivar bool incremental_copy: - Copies the snapshot of the source page blob to a destination page blob. - The snapshot is copied such that only the differential changes between - the previously copied snapshot are transferred to the destination - :ivar ~datetime.datetime destination_snapshot: - Included if the blob is incremental copy blob or incremental copy snapshot, - if x-ms-copy-status is success. Snapshot time of the last successful - incremental copy snapshot for this blob. - """ - - def __init__(self, **kwargs): - self.id = kwargs.get('x-ms-copy-id') - self.source = kwargs.get('x-ms-copy-source') - self.status = get_enum_value(kwargs.get('x-ms-copy-status')) - self.progress = kwargs.get('x-ms-copy-progress') - self.completion_time = kwargs.get('x-ms-copy-completion_time') - self.status_description = kwargs.get('x-ms-copy-status-description') - self.incremental_copy = kwargs.get('x-ms-incremental-copy') - self.destination_snapshot = kwargs.get('x-ms-copy-destination-snapshot') - - @classmethod - def _from_generated(cls, generated): - copy = cls() - copy.id = generated.properties.copy_id or None - copy.status = get_enum_value(generated.properties.copy_status) or None - copy.source = generated.properties.copy_source or None - copy.progress = generated.properties.copy_progress or None - copy.completion_time = generated.properties.copy_completion_time or None - copy.status_description = generated.properties.copy_status_description or None - copy.incremental_copy = generated.properties.incremental_copy or None - copy.destination_snapshot = generated.properties.destination_snapshot or None - return copy - - -class BlobBlock(DictMixin): - """BlockBlob Block class. - - :param str block_id: - Block id. - :param str state: - Block state. Possible values: committed|uncommitted - :ivar int size: - Block size in bytes. - """ - - def __init__(self, block_id, state=BlockState.Latest): - self.id = block_id - self.state = state - self.size = None - - @classmethod - def _from_generated(cls, generated): - block = cls(decode_base64_to_text(generated.name)) - block.size = generated.size - return block - - -class PageRange(DictMixin): - """Page Range for page blob. - - :param int start: - Start of page range in bytes. - :param int end: - End of page range in bytes. - """ - - def __init__(self, start=None, end=None): - self.start = start - self.end = end - - -class AccessPolicy(GenAccessPolicy): - """Access Policy class used by the set and get access policy methods in each service. - - A stored access policy can specify the start time, expiry time, and - permissions for the Shared Access Signatures with which it's associated. - Depending on how you want to control access to your resource, you can - specify all of these parameters within the stored access policy, and omit - them from the URL for the Shared Access Signature. Doing so permits you to - modify the associated signature's behavior at any time, as well as to revoke - it. Or you can specify one or more of the access policy parameters within - the stored access policy, and the others on the URL. Finally, you can - specify all of the parameters on the URL. In this case, you can use the - stored access policy to revoke the signature, but not to modify its behavior. - - Together the Shared Access Signature and the stored access policy must - include all fields required to authenticate the signature. If any required - fields are missing, the request will fail. Likewise, if a field is specified - both in the Shared Access Signature URL and in the stored access policy, the - request will fail with status code 400 (Bad Request). - - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.blob.ContainerSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - """ - def __init__(self, permission=None, expiry=None, start=None): - self.start = start - self.expiry = expiry - self.permission = permission - - -class ContainerSasPermissions(object): - """ContainerSasPermissions class to be used with the - :func:`~azure.storage.blob.generate_container_sas` function and - for the AccessPolicies used with - :func:`~azure.storage.blob.ContainerClient.set_container_access_policy`. - - :param bool read: - Read the content, properties, metadata or block list of any blob in the - container. Use any blob in the container as the source of a copy operation. - :param bool write: - For any blob in the container, create or write content, properties, - metadata, or block list. Snapshot or lease the blob. Resize the blob - (page blob only). Use the blob as the destination of a copy operation - within the same account. Note: You cannot grant permissions to read or - write container properties or metadata, nor to lease a container, with - a container SAS. Use an account SAS instead. - :param bool delete: - Delete any blob in the container. Note: You cannot grant permissions to - delete a container with a container SAS. Use an account SAS instead. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool list: - List blobs in the container. - :param bool tag: - Set or get tags on the blobs in the container. - """ - def __init__(self, read=False, write=False, delete=False, list=False, delete_previous_version=False, tag=False): # pylint: disable=redefined-builtin - self.read = read - self.write = write - self.delete = delete - self.list = list - self.delete_previous_version = delete_previous_version - self.tag = tag - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('l' if self.list else '') + - ('t' if self.tag else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a ContainerSasPermissions from a string. - - To specify read, write, delete, or list permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, write, delete, - and list permissions. - :return: A ContainerSasPermissions object - :rtype: ~azure.storage.blob.ContainerSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_list = 'l' in permission - p_delete_previous_version = 'x' in permission - p_tag = 't' in permission - parsed = cls(read=p_read, write=p_write, delete=p_delete, list=p_list, - delete_previous_version=p_delete_previous_version, tag=p_tag) - parsed._str = permission # pylint: disable = protected-access - return parsed - - -class BlobSasPermissions(object): - """BlobSasPermissions class to be used with the - :func:`~azure.storage.blob.generate_blob_sas` function. - - :param bool read: - Read the content, properties, metadata and block list. Use the blob as - the source of a copy operation. - :param bool add: - Add a block to an append blob. - :param bool create: - Write a new blob, snapshot a blob, or copy a blob to a new blob. - :param bool write: - Create or write content, properties, metadata, or block list. Snapshot - or lease the blob. Resize the blob (page blob only). Use the blob as the - destination of a copy operation within the same account. - :param bool delete: - Delete the blob. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool tag: - Set or get tags on the blob. - """ - def __init__(self, read=False, add=False, create=False, write=False, - delete=False, delete_previous_version=False, tag=True): - self.read = read - self.add = add - self.create = create - self.write = write - self.delete = delete - self.delete_previous_version = delete_previous_version - self.tag = tag - self._str = (('r' if self.read else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('t' if self.tag else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a BlobSasPermissions from a string. - - To specify read, add, create, write, or delete permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, add, create, - write, or delete permissions. - :return: A BlobSasPermissions object - :rtype: ~azure.storage.blob.BlobSasPermissions - """ - p_read = 'r' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_delete_previous_version = 'x' in permission - p_tag = 't' in permission - - parsed = cls(read=p_read, add=p_add, create=p_create, write=p_write, delete=p_delete, - delete_previous_version=p_delete_previous_version, tag=p_tag) - parsed._str = permission # pylint: disable = protected-access - return parsed - - -class CustomerProvidedEncryptionKey(object): - """ - All data in Azure Storage is encrypted at-rest using an account-level encryption key. - In versions 2018-06-17 and newer, you can manage the key used to encrypt blob contents - and application metadata per-blob by providing an AES-256 encryption key in requests to the storage service. - - When you use a customer-provided key, Azure Storage does not manage or persist your key. - When writing data to a blob, the provided key is used to encrypt your data before writing it to disk. - A SHA-256 hash of the encryption key is written alongside the blob contents, - and is used to verify that all subsequent operations against the blob use the same encryption key. - This hash cannot be used to retrieve the encryption key or decrypt the contents of the blob. - When reading a blob, the provided key is used to decrypt your data after reading it from disk. - In both cases, the provided encryption key is securely discarded - as soon as the encryption or decryption process completes. - - :param str key_value: - Base64-encoded AES-256 encryption key value. - :param str key_hash: - Base64-encoded SHA256 of the encryption key. - :ivar str algorithm: - Specifies the algorithm to use when encrypting data using the given key. Must be AES256. - """ - def __init__(self, key_value, key_hash): - self.key_value = key_value - self.key_hash = key_hash - self.algorithm = 'AES256' - - -class ContainerEncryptionScope(object): - """The default encryption scope configuration for a container. - - This scope is used implicitly for all future writes within the container, - but can be overridden per blob operation. - - .. versionadded:: 12.2.0 - - :param str default_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - :param bool prevent_encryption_scope_override: - If true, prevents any request from specifying a different encryption scope than the scope - set on the container. Default value is false. - """ - - def __init__(self, default_encryption_scope, **kwargs): - self.default_encryption_scope = default_encryption_scope - self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', False) - - @classmethod - def _from_generated(cls, generated): - if generated.properties.default_encryption_scope: - scope = cls( - generated.properties.default_encryption_scope, - prevent_encryption_scope_override=generated.properties.prevent_encryption_scope_override or False - ) - return scope - return None - - -class DelimitedJsonDialect(object): - """Defines the input or output JSON serialization for a blob data query. - - :keyword str delimiter: The line separator character, default value is '\n' - """ - - def __init__(self, **kwargs): - self.delimiter = kwargs.pop('delimiter', '\n') - - -class DelimitedTextDialect(object): - """Defines the input or output delimited (CSV) serialization for a blob query request. - - :keyword str delimiter: - Column separator, defaults to ','. - :keyword str quotechar: - Field quote, defaults to '"'. - :keyword str lineterminator: - Record separator, defaults to '\n'. - :keyword str escapechar: - Escape char, defaults to empty. - :keyword bool has_header: - Whether the blob data includes headers in the first line. The default value is False, meaning that the - data will be returned inclusive of the first line. If set to True, the data will be returned exclusive - of the first line. - """ - def __init__(self, **kwargs): - self.delimiter = kwargs.pop('delimiter', ',') - self.quotechar = kwargs.pop('quotechar', '"') - self.lineterminator = kwargs.pop('lineterminator', '\n') - self.escapechar = kwargs.pop('escapechar', "") - self.has_header = kwargs.pop('has_header', False) - - -class ObjectReplicationPolicy(DictMixin): - """Policy id and rule ids applied to a blob. - - :ivar str policy_id: - Policy id for the blob. A replication policy gets created (policy id) when creating a source/destination pair. - :ivar list(~azure.storage.blob.ObjectReplicationRule) rules: - Within each policy there may be multiple replication rules. - e.g. rule 1= src/container/.pdf to dst/container2/; rule2 = src/container1/.jpg to dst/container3 - """ - - def __init__(self, **kwargs): - self.policy_id = kwargs.pop('policy_id', None) - self.rules = kwargs.pop('rules', None) - - -class ObjectReplicationRule(DictMixin): - """Policy id and rule ids applied to a blob. - - :ivar str rule_id: - Rule id. - :ivar str status: - The status of the rule. It could be "Complete" or "Failed" - """ - - def __init__(self, **kwargs): - self.rule_id = kwargs.pop('rule_id', None) - self.status = kwargs.pop('status', None) - - -class BlobQueryError(object): - """The error happened during quick query operation. - - :ivar str error: - The name of the error. - :ivar bool is_fatal: - If true, this error prevents further query processing. More result data may be returned, - but there is no guarantee that all of the original data will be processed. - If false, this error does not prevent further query processing. - :ivar str description: - A description of the error. - :ivar int position: - The blob offset at which the error occurred. - """ - def __init__(self, error=None, is_fatal=False, description=None, position=None): - self.error = error - self.is_fatal = is_fatal - self.description = description - self.position = position diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_quick_query_helper.py b/azure/multiapi/storagev2/blob/v2019_12_12/_quick_query_helper.py deleted file mode 100644 index eb51d98..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_quick_query_helper.py +++ /dev/null @@ -1,196 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from io import BytesIO -from typing import Union, Iterable, IO # pylint: disable=unused-import - -from ._shared.avro.datafile import DataFileReader -from ._shared.avro.avro_io import DatumReader - - -class BlobQueryReader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to read query results. - - :ivar str name: - The name of the blob being quered. - :ivar str container: - The name of the container where the blob is. - :ivar dict response_headers: - The response_headers of the quick query request. - :ivar bytes record_delimiter: - The delimiter used to separate lines, or records with the data. The `records` - method will return these lines via a generator. - """ - - def __init__( - self, - name=None, - container=None, - errors=None, - record_delimiter='\n', - encoding=None, - headers=None, - response=None, - error_cls=None, - ): - self.name = name - self.container = container - self.response_headers = headers - self.record_delimiter = record_delimiter - self._size = 0 - self._bytes_processed = 0 - self._errors = errors - self._encoding = encoding - self._parsed_results = DataFileReader(QuickQueryStreamer(response), DatumReader()) - self._first_result = self._process_record(next(self._parsed_results)) - self._error_cls = error_cls - - def __len__(self): - return self._size - - def _process_record(self, result): - self._size = result.get('totalBytes', self._size) - self._bytes_processed = result.get('bytesScanned', self._bytes_processed) - if 'data' in result: - return result.get('data') - if 'fatal' in result: - error = self._error_cls( - error=result['name'], - is_fatal=result['fatal'], - description=result['description'], - position=result['position'] - ) - if self._errors: - self._errors(error) - return None - - def _iter_stream(self): - if self._first_result is not None: - yield self._first_result - for next_result in self._parsed_results: - processed_result = self._process_record(next_result) - if processed_result is not None: - yield processed_result - - def readall(self): - # type: () -> Union[bytes, str] - """Return all query results. - - This operation is blocking until all data is downloaded. - If encoding has been configured - this will be used to decode individual - records are they are received. - - :rtype: Union[bytes, str] - """ - stream = BytesIO() - self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - def readinto(self, stream): - # type: (IO) -> None - """Download the query result to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. - :returns: None - """ - for record in self._iter_stream(): - stream.write(record) - - def records(self): - # type: () -> Iterable[Union[bytes, str]] - """Returns a record generator for the query result. - - Records will be returned line by line. - If encoding has been configured - this will be used to decode individual - records are they are received. - - :rtype: Iterable[Union[bytes, str]] - """ - delimiter = self.record_delimiter.encode('utf-8') - for record_chunk in self._iter_stream(): - for record in record_chunk.split(delimiter): - if self._encoding: - yield record.decode(self._encoding) - else: - yield record - - - -class QuickQueryStreamer(object): - """ - File-like streaming iterator. - """ - - def __init__(self, generator): - self.generator = generator - self.iterator = iter(generator) - self._buf = b"" - self._point = 0 - self._download_offset = 0 - self._buf_start = 0 - self.file_length = None - - def __len__(self): - return self.file_length - - def __iter__(self): - return self.iterator - - @staticmethod - def seekable(): - return True - - def __next__(self): - next_part = next(self.iterator) - self._download_offset += len(next_part) - return next_part - - next = __next__ # Python 2 compatibility. - - def tell(self): - return self._point - - def seek(self, offset, whence=0): - if whence == 0: - self._point = offset - elif whence == 1: - self._point += offset - else: - raise ValueError("whence must be 0, or 1") - if self._point < 0: - self._point = 0 # XXX is this right? - - def read(self, size): - try: - # keep reading from the generator until the buffer of this stream has enough data to read - while self._point + size > self._download_offset: - self._buf += self.__next__() - except StopIteration: - self.file_length = self._download_offset - - start_point = self._point - - # EOF - self._point = min(self._point + size, self._download_offset) - - relative_start = start_point - self._buf_start - if relative_start < 0: - raise ValueError("Buffer has dumped too much data") - relative_end = relative_start + size - data = self._buf[relative_start: relative_end] - - # dump the extra data in buffer - # buffer start--------------------16bytes----current read position - dumped_size = max(relative_end - 16 - relative_start, 0) - self._buf_start += dumped_size - self._buf = self._buf[dumped_size:] - - return data diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_serialize.py b/azure/multiapi/storagev2/blob/v2019_12_12/_serialize.py deleted file mode 100644 index 6781096..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_serialize.py +++ /dev/null @@ -1,179 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use -try: - from urllib.parse import quote -except ImportError: - from urllib2 import quote # type: ignore - -from azure.core import MatchConditions - -from ._models import ( - ContainerEncryptionScope, - DelimitedJsonDialect -) -from ._generated.models import ( - ModifiedAccessConditions, - SourceModifiedAccessConditions, - CpkScopeInfo, - ContainerCpkScopeInfo, - QueryFormat, - QuerySerialization, - DelimitedTextConfiguration, - JsonTextConfiguration, - QueryFormatType, - BlobTag, - BlobTags -) - - -_SUPPORTED_API_VERSIONS = [ - '2019-02-02', - '2019-07-07', - '2019-10-10', - '2019-12-12', -] - - -def _get_match_headers(kwargs, match_param, etag_param): - # type: (str) -> Tuple(Dict[str, Any], Optional[str], Optional[str]) - if_match = None - if_none_match = None - match_condition = kwargs.pop(match_param, None) - if match_condition == MatchConditions.IfNotModified: - if_match = kwargs.pop(etag_param, None) - if not if_match: - raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) - elif match_condition == MatchConditions.IfPresent: - if_match = '*' - elif match_condition == MatchConditions.IfModified: - if_none_match = kwargs.pop(etag_param, None) - if not if_none_match: - raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) - elif match_condition == MatchConditions.IfMissing: - if_none_match = '*' - elif match_condition is None: - if kwargs.get(etag_param): - raise ValueError("'{}' specified without '{}'.".format(etag_param, match_param)) - else: - raise TypeError("Invalid match condition: {}".format(match_condition)) - return if_match, if_none_match - - -def get_modify_conditions(kwargs): - # type: (Dict[str, Any]) -> ModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'match_condition', 'etag') - return ModifiedAccessConditions( - if_modified_since=kwargs.pop('if_modified_since', None), - if_unmodified_since=kwargs.pop('if_unmodified_since', None), - if_match=if_match or kwargs.pop('if_match', None), - if_none_match=if_none_match or kwargs.pop('if_none_match', None), - if_tags=kwargs.pop('if_tags_match_condition', None) - ) - - -def get_source_conditions(kwargs): - # type: (Dict[str, Any]) -> SourceModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') - return SourceModifiedAccessConditions( - source_if_modified_since=kwargs.pop('source_if_modified_since', None), - source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), - source_if_match=if_match or kwargs.pop('source_if_match', None), - source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None), - source_if_tags=kwargs.pop('source_if_tags_match_condition', None) - ) - - -def get_cpk_scope_info(kwargs): - # type: (Dict[str, Any]) -> CpkScopeInfo - if 'encryption_scope' in kwargs: - return CpkScopeInfo(encryption_scope=kwargs.pop('encryption_scope')) - return None - - -def get_container_cpk_scope_info(kwargs): - # type: (Dict[str, Any]) -> ContainerCpkScopeInfo - encryption_scope = kwargs.pop('container_encryption_scope', None) - if encryption_scope: - if isinstance(encryption_scope, ContainerEncryptionScope): - return ContainerCpkScopeInfo( - default_encryption_scope=encryption_scope.default_encryption_scope, - prevent_encryption_scope_override=encryption_scope.prevent_encryption_scope_override - ) - if isinstance(encryption_scope, dict): - return ContainerCpkScopeInfo( - default_encryption_scope=encryption_scope['default_encryption_scope'], - prevent_encryption_scope_override=encryption_scope.get('prevent_encryption_scope_override') - ) - raise TypeError("Container encryption scope must be dict or type ContainerEncryptionScope.") - return None - - -def get_api_version(kwargs, default): - # type: (Dict[str, Any]) -> str - api_version = kwargs.pop('api_version', None) - if api_version and api_version not in _SUPPORTED_API_VERSIONS: - versions = '\n'.join(_SUPPORTED_API_VERSIONS) - raise ValueError("Unsupported API version '{}'. Please select from:\n{}".format(api_version, versions)) - return api_version or default - - -def serialize_blob_tags_header(tags=None): - # type: (Optional[Dict[str, str]]) -> str - if tags is None: - return None - - components = list() - if tags: - for key, value in tags.items(): - components.append(quote(key, safe='.-')) - components.append('=') - components.append(quote(value, safe='.-')) - components.append('&') - - if components: - del components[-1] - - return ''.join(components) - - -def serialize_blob_tags(tags=None): - # type: (Optional[Dict[str, str]]) -> Union[BlobTags, None] - tag_list = list() - if tags: - tag_list = [BlobTag(key=k, value=v) for k, v in tags.items()] - return BlobTags(blob_tag_set=tag_list) - - -def serialize_query_format(formater): - if isinstance(formater, DelimitedJsonDialect): - serialization_settings = JsonTextConfiguration( - record_separator=formater.delimiter - ) - qq_format = QueryFormat( - type=QueryFormatType.json, - json_text_configuration=serialization_settings) - elif hasattr(formater, 'quotechar'): # This supports a csv.Dialect as well - try: - headers = formater.has_header - except AttributeError: - headers = False - serialization_settings = DelimitedTextConfiguration( - column_separator=formater.delimiter, - field_quote=formater.quotechar, - record_separator=formater.lineterminator, - escape_char=formater.escapechar, - headers_present=headers - ) - qq_format = QueryFormat( - type=QueryFormatType.delimited, - delimited_text_configuration=serialization_settings - ) - elif not formater: - return None - else: - raise TypeError("Format must be DelimitedTextDialect or DelimitedJsonDialect.") - return QuerySerialization(format=qq_format) diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/authentication.py b/azure/multiapi/storagev2/blob/v2019_12_12/_shared/authentication.py deleted file mode 100644 index b11dc57..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/authentication.py +++ /dev/null @@ -1,140 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import logging -import sys - -try: - from urllib.parse import urlparse, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import unquote # type: ignore - -try: - from yarl import URL -except ImportError: - pass - -try: - from azure.core.pipeline.transport import AioHttpTransport -except ImportError: - AioHttpTransport = None - -from azure.core.exceptions import ClientAuthenticationError -from azure.core.pipeline.policies import SansIOHTTPPolicy - -from . import sign_string - - -logger = logging.getLogger(__name__) - - - -# wraps a given exception with the desired exception type -def _wrap_exception(ex, desired_type): - msg = "" - if ex.args: - msg = ex.args[0] - if sys.version_info >= (3,): - # Automatic chaining in Python 3 means we keep the trace - return desired_type(msg) - # There isn't a good solution in 2 for keeping the stack trace - # in general, or that will not result in an error in 3 - # However, we can keep the previous error type and message - # TODO: In the future we will log the trace - return desired_type('{}: {}'.format(ex.__class__.__name__, msg)) - - -class AzureSigningError(ClientAuthenticationError): - """ - Represents a fatal error when attempting to sign a request. - In general, the cause of this exception is user error. For example, the given account key is not valid. - Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. - """ - - -# pylint: disable=no-self-use -class SharedKeyCredentialPolicy(SansIOHTTPPolicy): - - def __init__(self, account_name, account_key): - self.account_name = account_name - self.account_key = account_key - super(SharedKeyCredentialPolicy, self).__init__() - - @staticmethod - def _get_headers(request, headers_to_sign): - headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) - if 'content-length' in headers and headers['content-length'] == '0': - del headers['content-length'] - return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' - - @staticmethod - def _get_verb(request): - return request.http_request.method + '\n' - - def _get_canonicalized_resource(self, request): - uri_path = urlparse(request.http_request.url).path - try: - if isinstance(request.context.transport, AioHttpTransport) or \ - isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport): - uri_path = URL(uri_path) - return '/' + self.account_name + str(uri_path) - except TypeError: - pass - return '/' + self.account_name + uri_path - - @staticmethod - def _get_canonicalized_headers(request): - string_to_sign = '' - x_ms_headers = [] - for name, value in request.http_request.headers.items(): - if name.startswith('x-ms-'): - x_ms_headers.append((name.lower(), value)) - x_ms_headers.sort() - for name, value in x_ms_headers: - if value is not None: - string_to_sign += ''.join([name, ':', value, '\n']) - return string_to_sign - - @staticmethod - def _get_canonicalized_resource_query(request): - sorted_queries = list(request.http_request.query.items()) - sorted_queries.sort() - - string_to_sign = '' - for name, value in sorted_queries: - if value is not None: - string_to_sign += '\n' + name.lower() + ':' + unquote(value) - - return string_to_sign - - def _add_authorization_header(self, request, string_to_sign): - try: - signature = sign_string(self.account_key, string_to_sign) - auth_string = 'SharedKey ' + self.account_name + ':' + signature - request.http_request.headers['Authorization'] = auth_string - except Exception as ex: - # Wrap any error that occurred as signing error - # Doing so will clarify/locate the source of problem - raise _wrap_exception(ex, AzureSigningError) - - def on_request(self, request): - string_to_sign = \ - self._get_verb(request) + \ - self._get_headers( - request, - [ - 'content-encoding', 'content-language', 'content-length', - 'content-md5', 'content-type', 'date', 'if-modified-since', - 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' - ] - ) + \ - self._get_canonicalized_headers(request) + \ - self._get_canonicalized_resource(request) + \ - self._get_canonicalized_resource_query(request) - - self._add_authorization_header(request, string_to_sign) - #logger.debug("String_to_sign=%s", string_to_sign) diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/avro/__init__.py b/azure/multiapi/storagev2/blob/v2019_12_12/_shared/avro/__init__.py deleted file mode 100644 index 5b396cd..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/avro/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/avro/avro_io.py b/azure/multiapi/storagev2/blob/v2019_12_12/_shared/avro/avro_io.py deleted file mode 100644 index 93a5c13..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/avro/avro_io.py +++ /dev/null @@ -1,464 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -"""Input/output utilities. - -Includes: - - i/o-specific constants - - i/o-specific exceptions - - schema validation - - leaf value encoding and decoding - - datum reader/writer stuff (?) - -Also includes a generic representation for data, which uses the -following mapping: - - Schema records are implemented as dict. - - Schema arrays are implemented as list. - - Schema maps are implemented as dict. - - Schema strings are implemented as unicode. - - Schema bytes are implemented as str. - - Schema ints are implemented as int. - - Schema longs are implemented as long. - - Schema floats are implemented as float. - - Schema doubles are implemented as float. - - Schema booleans are implemented as bool. -""" - -import json -import logging -import struct -import sys - -from ..avro import schema - -PY3 = sys.version_info[0] == 3 - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Constants - -STRUCT_FLOAT = struct.Struct('= 0), n - input_bytes = self.reader.read(n) - if n > 0 and not input_bytes: - raise StopIteration - assert (len(input_bytes) == n), input_bytes - return input_bytes - - @staticmethod - def read_null(): - """ - null is written as zero bytes - """ - return None - - def read_boolean(self): - """ - a boolean is written as a single byte - whose value is either 0 (false) or 1 (true). - """ - b = ord(self.read(1)) - if b == 1: - return True - if b == 0: - return False - fail_msg = "Invalid value for boolean: %s" % b - raise schema.AvroException(fail_msg) - - def read_int(self): - """ - int and long values are written using variable-length, zig-zag coding. - """ - return self.read_long() - - def read_long(self): - """ - int and long values are written using variable-length, zig-zag coding. - """ - b = ord(self.read(1)) - n = b & 0x7F - shift = 7 - while (b & 0x80) != 0: - b = ord(self.read(1)) - n |= (b & 0x7F) << shift - shift += 7 - datum = (n >> 1) ^ -(n & 1) - return datum - - def read_float(self): - """ - A float is written as 4 bytes. - The float is converted into a 32-bit integer using a method equivalent to - Java's floatToIntBits and then encoded in little-endian format. - """ - return STRUCT_FLOAT.unpack(self.read(4))[0] - - def read_double(self): - """ - A double is written as 8 bytes. - The double is converted into a 64-bit integer using a method equivalent to - Java's doubleToLongBits and then encoded in little-endian format. - """ - return STRUCT_DOUBLE.unpack(self.read(8))[0] - - def read_bytes(self): - """ - Bytes are encoded as a long followed by that many bytes of data. - """ - nbytes = self.read_long() - assert (nbytes >= 0), nbytes - return self.read(nbytes) - - def read_utf8(self): - """ - A string is encoded as a long followed by - that many bytes of UTF-8 encoded character data. - """ - input_bytes = self.read_bytes() - if PY3: - try: - return input_bytes.decode('utf-8') - except UnicodeDecodeError as exn: - logger.error('Invalid UTF-8 input bytes: %r', input_bytes) - raise exn - else: - # PY2 - return unicode(input_bytes, "utf-8") # pylint: disable=undefined-variable - - def skip_null(self): - pass - - def skip_boolean(self): - self.skip(1) - - def skip_int(self): - self.skip_long() - - def skip_long(self): - b = ord(self.read(1)) - while (b & 0x80) != 0: - b = ord(self.read(1)) - - def skip_float(self): - self.skip(4) - - def skip_double(self): - self.skip(8) - - def skip_bytes(self): - self.skip(self.read_long()) - - def skip_utf8(self): - self.skip_bytes() - - def skip(self, n): - self.reader.seek(self.reader.tell() + n) - - -# ------------------------------------------------------------------------------ -# DatumReader - - -class DatumReader(object): - """Deserialize Avro-encoded data into a Python data structure.""" - - def __init__(self, writer_schema=None): - """ - As defined in the Avro specification, we call the schema encoded - in the data the "writer's schema". - """ - self._writer_schema = writer_schema - - # read/write properties - def set_writer_schema(self, writer_schema): - self._writer_schema = writer_schema - - writer_schema = property(lambda self: self._writer_schema, - set_writer_schema) - - def read(self, decoder): - return self.read_data(self.writer_schema, decoder) - - def read_data(self, writer_schema, decoder): - # function dispatch for reading data based on type of writer's schema - if writer_schema.type == 'null': - result = decoder.read_null() - elif writer_schema.type == 'boolean': - result = decoder.read_boolean() - elif writer_schema.type == 'string': - result = decoder.read_utf8() - elif writer_schema.type == 'int': - result = decoder.read_int() - elif writer_schema.type == 'long': - result = decoder.read_long() - elif writer_schema.type == 'float': - result = decoder.read_float() - elif writer_schema.type == 'double': - result = decoder.read_double() - elif writer_schema.type == 'bytes': - result = decoder.read_bytes() - elif writer_schema.type == 'fixed': - result = self.read_fixed(writer_schema, decoder) - elif writer_schema.type == 'enum': - result = self.read_enum(writer_schema, decoder) - elif writer_schema.type == 'array': - result = self.read_array(writer_schema, decoder) - elif writer_schema.type == 'map': - result = self.read_map(writer_schema, decoder) - elif writer_schema.type in ['union', 'error_union']: - result = self.read_union(writer_schema, decoder) - elif writer_schema.type in ['record', 'error', 'request']: - result = self.read_record(writer_schema, decoder) - else: - fail_msg = "Cannot read unknown schema type: %s" % writer_schema.type - raise schema.AvroException(fail_msg) - return result - - def skip_data(self, writer_schema, decoder): - if writer_schema.type == 'null': - result = decoder.skip_null() - elif writer_schema.type == 'boolean': - result = decoder.skip_boolean() - elif writer_schema.type == 'string': - result = decoder.skip_utf8() - elif writer_schema.type == 'int': - result = decoder.skip_int() - elif writer_schema.type == 'long': - result = decoder.skip_long() - elif writer_schema.type == 'float': - result = decoder.skip_float() - elif writer_schema.type == 'double': - result = decoder.skip_double() - elif writer_schema.type == 'bytes': - result = decoder.skip_bytes() - elif writer_schema.type == 'fixed': - result = self.skip_fixed(writer_schema, decoder) - elif writer_schema.type == 'enum': - result = self.skip_enum(decoder) - elif writer_schema.type == 'array': - self.skip_array(writer_schema, decoder) - result = None - elif writer_schema.type == 'map': - self.skip_map(writer_schema, decoder) - result = None - elif writer_schema.type in ['union', 'error_union']: - result = self.skip_union(writer_schema, decoder) - elif writer_schema.type in ['record', 'error', 'request']: - self.skip_record(writer_schema, decoder) - result = None - else: - fail_msg = "Unknown schema type: %s" % writer_schema.type - raise schema.AvroException(fail_msg) - return result - - @staticmethod - def read_fixed(writer_schema, decoder): - """ - Fixed instances are encoded using the number of bytes declared - in the schema. - """ - return decoder.read(writer_schema.size) - - @staticmethod - def skip_fixed(writer_schema, decoder): - return decoder.skip(writer_schema.size) - - @staticmethod - def read_enum(writer_schema, decoder): - """ - An enum is encoded by a int, representing the zero-based position - of the symbol in the schema. - """ - # read data - index_of_symbol = decoder.read_int() - if index_of_symbol >= len(writer_schema.symbols): - fail_msg = "Can't access enum index %d for enum with %d symbols" \ - % (index_of_symbol, len(writer_schema.symbols)) - raise SchemaResolutionException(fail_msg, writer_schema) - read_symbol = writer_schema.symbols[index_of_symbol] - return read_symbol - - @staticmethod - def skip_enum(decoder): - return decoder.skip_int() - - def read_array(self, writer_schema, decoder): - """ - Arrays are encoded as a series of blocks. - - Each block consists of a long count value, - followed by that many array items. - A block with count zero indicates the end of the array. - Each item is encoded per the array's item schema. - - If a block's count is negative, - then the count is followed immediately by a long block size, - indicating the number of bytes in the block. - The actual count in this case - is the absolute value of the count written. - """ - read_items = [] - block_count = decoder.read_long() - while block_count != 0: - if block_count < 0: - block_count = -block_count - decoder.read_long() - for _ in range(block_count): - read_items.append(self.read_data(writer_schema.items, decoder)) - block_count = decoder.read_long() - return read_items - - def skip_array(self, writer_schema, decoder): - block_count = decoder.read_long() - while block_count != 0: - if block_count < 0: - block_size = decoder.read_long() - decoder.skip(block_size) - else: - for _ in range(block_count): - self.skip_data(writer_schema.items, decoder) - block_count = decoder.read_long() - - def read_map(self, writer_schema, decoder): - """ - Maps are encoded as a series of blocks. - - Each block consists of a long count value, - followed by that many key/value pairs. - A block with count zero indicates the end of the map. - Each item is encoded per the map's value schema. - - If a block's count is negative, - then the count is followed immediately by a long block size, - indicating the number of bytes in the block. - The actual count in this case - is the absolute value of the count written. - """ - read_items = {} - block_count = decoder.read_long() - while block_count != 0: - if block_count < 0: - block_count = -block_count - decoder.read_long() - for _ in range(block_count): - key = decoder.read_utf8() - read_items[key] = self.read_data(writer_schema.values, decoder) - block_count = decoder.read_long() - return read_items - - def skip_map(self, writer_schema, decoder): - block_count = decoder.read_long() - while block_count != 0: - if block_count < 0: - block_size = decoder.read_long() - decoder.skip(block_size) - else: - for _ in range(block_count): - decoder.skip_utf8() - self.skip_data(writer_schema.values, decoder) - block_count = decoder.read_long() - - def read_union(self, writer_schema, decoder): - """ - A union is encoded by first writing a long value indicating - the zero-based position within the union of the schema of its value. - The value is then encoded per the indicated schema within the union. - """ - # schema resolution - index_of_schema = int(decoder.read_long()) - if index_of_schema >= len(writer_schema.schemas): - fail_msg = "Can't access branch index %d for union with %d branches" \ - % (index_of_schema, len(writer_schema.schemas)) - raise SchemaResolutionException(fail_msg, writer_schema) - selected_writer_schema = writer_schema.schemas[index_of_schema] - - # read data - return self.read_data(selected_writer_schema, decoder) - - def skip_union(self, writer_schema, decoder): - index_of_schema = int(decoder.read_long()) - if index_of_schema >= len(writer_schema.schemas): - fail_msg = "Can't access branch index %d for union with %d branches" \ - % (index_of_schema, len(writer_schema.schemas)) - raise SchemaResolutionException(fail_msg, writer_schema) - return self.skip_data(writer_schema.schemas[index_of_schema], decoder) - - def read_record(self, writer_schema, decoder): - """ - A record is encoded by encoding the values of its fields - in the order that they are declared. In other words, a record - is encoded as just the concatenation of the encodings of its fields. - Field values are encoded per their schema. - - Schema Resolution: - * the ordering of fields may be different: fields are matched by name. - * schemas for fields with the same name in both records are resolved - recursively. - * if the writer's record contains a field with a name not present in the - reader's record, the writer's value for that field is ignored. - * if the reader's record schema has a field that contains a default value, - and writer's schema does not have a field with the same name, then the - reader should use the default value from its field. - * if the reader's record schema has a field with no default value, and - writer's schema does not have a field with the same name, then the - field's value is unset. - """ - # schema resolution - read_record = {} - for field in writer_schema.fields: - field_val = self.read_data(field.type, decoder) - read_record[field.name] = field_val - return read_record - - def skip_record(self, writer_schema, decoder): - for field in writer_schema.fields: - self.skip_data(field.type, decoder) - - -# ------------------------------------------------------------------------------ - -if __name__ == '__main__': - raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/avro/avro_io_async.py b/azure/multiapi/storagev2/blob/v2019_12_12/_shared/avro/avro_io_async.py deleted file mode 100644 index e981216..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/avro/avro_io_async.py +++ /dev/null @@ -1,448 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -"""Input/output utilities. - -Includes: - - i/o-specific constants - - i/o-specific exceptions - - schema validation - - leaf value encoding and decoding - - datum reader/writer stuff (?) - -Also includes a generic representation for data, which uses the -following mapping: - - Schema records are implemented as dict. - - Schema arrays are implemented as list. - - Schema maps are implemented as dict. - - Schema strings are implemented as unicode. - - Schema bytes are implemented as str. - - Schema ints are implemented as int. - - Schema longs are implemented as long. - - Schema floats are implemented as float. - - Schema doubles are implemented as float. - - Schema booleans are implemented as bool. -""" - -import logging -import sys - -from ..avro import schema - -from .avro_io import STRUCT_FLOAT, STRUCT_DOUBLE, SchemaResolutionException - -PY3 = sys.version_info[0] == 3 - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Decoder - - -class AsyncBinaryDecoder(object): - """Read leaf values.""" - - def __init__(self, reader): - """ - reader is a Python object on which we can call read, seek, and tell. - """ - self._reader = reader - - @property - def reader(self): - """Reports the reader used by this decoder.""" - return self._reader - - async def read(self, n): - """Read n bytes. - - Args: - n: Number of bytes to read. - Returns: - The next n bytes from the input. - """ - assert (n >= 0), n - input_bytes = await self.reader.read(n) - if n > 0 and not input_bytes: - raise StopAsyncIteration - assert (len(input_bytes) == n), input_bytes - return input_bytes - - @staticmethod - def read_null(): - """ - null is written as zero bytes - """ - return None - - async def read_boolean(self): - """ - a boolean is written as a single byte - whose value is either 0 (false) or 1 (true). - """ - b = ord(await self.read(1)) - if b == 1: - return True - if b == 0: - return False - fail_msg = "Invalid value for boolean: %s" % b - raise schema.AvroException(fail_msg) - - async def read_int(self): - """ - int and long values are written using variable-length, zig-zag coding. - """ - return await self.read_long() - - async def read_long(self): - """ - int and long values are written using variable-length, zig-zag coding. - """ - b = ord(await self.read(1)) - n = b & 0x7F - shift = 7 - while (b & 0x80) != 0: - b = ord(await self.read(1)) - n |= (b & 0x7F) << shift - shift += 7 - datum = (n >> 1) ^ -(n & 1) - return datum - - async def read_float(self): - """ - A float is written as 4 bytes. - The float is converted into a 32-bit integer using a method equivalent to - Java's floatToIntBits and then encoded in little-endian format. - """ - return STRUCT_FLOAT.unpack(await self.read(4))[0] - - async def read_double(self): - """ - A double is written as 8 bytes. - The double is converted into a 64-bit integer using a method equivalent to - Java's doubleToLongBits and then encoded in little-endian format. - """ - return STRUCT_DOUBLE.unpack(await self.read(8))[0] - - async def read_bytes(self): - """ - Bytes are encoded as a long followed by that many bytes of data. - """ - nbytes = await self.read_long() - assert (nbytes >= 0), nbytes - return await self.read(nbytes) - - async def read_utf8(self): - """ - A string is encoded as a long followed by - that many bytes of UTF-8 encoded character data. - """ - input_bytes = await self.read_bytes() - if PY3: - try: - return input_bytes.decode('utf-8') - except UnicodeDecodeError as exn: - logger.error('Invalid UTF-8 input bytes: %r', input_bytes) - raise exn - else: - # PY2 - return unicode(input_bytes, "utf-8") # pylint: disable=undefined-variable - - def skip_null(self): - pass - - async def skip_boolean(self): - await self.skip(1) - - async def skip_int(self): - await self.skip_long() - - async def skip_long(self): - b = ord(await self.read(1)) - while (b & 0x80) != 0: - b = ord(await self.read(1)) - - async def skip_float(self): - await self.skip(4) - - async def skip_double(self): - await self.skip(8) - - async def skip_bytes(self): - await self.skip(await self.read_long()) - - async def skip_utf8(self): - await self.skip_bytes() - - async def skip(self, n): - await self.reader.seek(await self.reader.tell() + n) - - -# ------------------------------------------------------------------------------ -# DatumReader - - -class AsyncDatumReader(object): - """Deserialize Avro-encoded data into a Python data structure.""" - - def __init__(self, writer_schema=None): - """ - As defined in the Avro specification, we call the schema encoded - in the data the "writer's schema", and the schema expected by the - reader the "reader's schema". - """ - self._writer_schema = writer_schema - - # read/write properties - def set_writer_schema(self, writer_schema): - self._writer_schema = writer_schema - - writer_schema = property(lambda self: self._writer_schema, - set_writer_schema) - - async def read(self, decoder): - return await self.read_data(self.writer_schema, decoder) - - async def read_data(self, writer_schema, decoder): - # function dispatch for reading data based on type of writer's schema - if writer_schema.type == 'null': - result = decoder.read_null() - elif writer_schema.type == 'boolean': - result = await decoder.read_boolean() - elif writer_schema.type == 'string': - result = await decoder.read_utf8() - elif writer_schema.type == 'int': - result = await decoder.read_int() - elif writer_schema.type == 'long': - result = await decoder.read_long() - elif writer_schema.type == 'float': - result = await decoder.read_float() - elif writer_schema.type == 'double': - result = await decoder.read_double() - elif writer_schema.type == 'bytes': - result = await decoder.read_bytes() - elif writer_schema.type == 'fixed': - result = await self.read_fixed(writer_schema, decoder) - elif writer_schema.type == 'enum': - result = await self.read_enum(writer_schema, decoder) - elif writer_schema.type == 'array': - result = await self.read_array(writer_schema, decoder) - elif writer_schema.type == 'map': - result = await self.read_map(writer_schema, decoder) - elif writer_schema.type in ['union', 'error_union']: - result = await self.read_union(writer_schema, decoder) - elif writer_schema.type in ['record', 'error', 'request']: - result = await self.read_record(writer_schema, decoder) - else: - fail_msg = "Cannot read unknown schema type: %s" % writer_schema.type - raise schema.AvroException(fail_msg) - return result - - async def skip_data(self, writer_schema, decoder): - if writer_schema.type == 'null': - result = decoder.skip_null() - elif writer_schema.type == 'boolean': - result = await decoder.skip_boolean() - elif writer_schema.type == 'string': - result = await decoder.skip_utf8() - elif writer_schema.type == 'int': - result = await decoder.skip_int() - elif writer_schema.type == 'long': - result = await decoder.skip_long() - elif writer_schema.type == 'float': - result = await decoder.skip_float() - elif writer_schema.type == 'double': - result = await decoder.skip_double() - elif writer_schema.type == 'bytes': - result = await decoder.skip_bytes() - elif writer_schema.type == 'fixed': - result = await self.skip_fixed(writer_schema, decoder) - elif writer_schema.type == 'enum': - result = await self.skip_enum(decoder) - elif writer_schema.type == 'array': - await self.skip_array(writer_schema, decoder) - result = None - elif writer_schema.type == 'map': - await self.skip_map(writer_schema, decoder) - result = None - elif writer_schema.type in ['union', 'error_union']: - result = await self.skip_union(writer_schema, decoder) - elif writer_schema.type in ['record', 'error', 'request']: - await self.skip_record(writer_schema, decoder) - result = None - else: - fail_msg = "Unknown schema type: %s" % writer_schema.type - raise schema.AvroException(fail_msg) - return result - - @staticmethod - async def read_fixed(writer_schema, decoder): - """ - Fixed instances are encoded using the number of bytes declared - in the schema. - """ - return await decoder.read(writer_schema.size) - - @staticmethod - async def skip_fixed(writer_schema, decoder): - return await decoder.skip(writer_schema.size) - - @staticmethod - async def read_enum(writer_schema, decoder): - """ - An enum is encoded by a int, representing the zero-based position - of the symbol in the schema. - """ - # read data - index_of_symbol = await decoder.read_int() - if index_of_symbol >= len(writer_schema.symbols): - fail_msg = "Can't access enum index %d for enum with %d symbols" \ - % (index_of_symbol, len(writer_schema.symbols)) - raise SchemaResolutionException(fail_msg, writer_schema) - read_symbol = writer_schema.symbols[index_of_symbol] - return read_symbol - - @staticmethod - async def skip_enum(decoder): - return await decoder.skip_int() - - async def read_array(self, writer_schema, decoder): - """ - Arrays are encoded as a series of blocks. - - Each block consists of a long count value, - followed by that many array items. - A block with count zero indicates the end of the array. - Each item is encoded per the array's item schema. - - If a block's count is negative, - then the count is followed immediately by a long block size, - indicating the number of bytes in the block. - The actual count in this case - is the absolute value of the count written. - """ - read_items = [] - block_count = await decoder.read_long() - while block_count != 0: - if block_count < 0: - block_count = -block_count - await decoder.read_long() - for _ in range(block_count): - read_items.append(await self.read_data(writer_schema.items, decoder)) - block_count = await decoder.read_long() - return read_items - - async def skip_array(self, writer_schema, decoder): - block_count = await decoder.read_long() - while block_count != 0: - if block_count < 0: - block_size = await decoder.read_long() - await decoder.skip(block_size) - else: - for _ in range(block_count): - await self.skip_data(writer_schema.items, decoder) - block_count = await decoder.read_long() - - async def read_map(self, writer_schema, decoder): - """ - Maps are encoded as a series of blocks. - - Each block consists of a long count value, - followed by that many key/value pairs. - A block with count zero indicates the end of the map. - Each item is encoded per the map's value schema. - - If a block's count is negative, - then the count is followed immediately by a long block size, - indicating the number of bytes in the block. - The actual count in this case - is the absolute value of the count written. - """ - read_items = {} - block_count = await decoder.read_long() - while block_count != 0: - if block_count < 0: - block_count = -block_count - await decoder.read_long() - for _ in range(block_count): - key = await decoder.read_utf8() - read_items[key] = await self.read_data(writer_schema.values, decoder) - block_count = await decoder.read_long() - return read_items - - async def skip_map(self, writer_schema, decoder): - block_count = await decoder.read_long() - while block_count != 0: - if block_count < 0: - block_size = await decoder.read_long() - await decoder.skip(block_size) - else: - for _ in range(block_count): - await decoder.skip_utf8() - await self.skip_data(writer_schema.values, decoder) - block_count = await decoder.read_long() - - async def read_union(self, writer_schema, decoder): - """ - A union is encoded by first writing a long value indicating - the zero-based position within the union of the schema of its value. - The value is then encoded per the indicated schema within the union. - """ - # schema resolution - index_of_schema = int(await decoder.read_long()) - if index_of_schema >= len(writer_schema.schemas): - fail_msg = "Can't access branch index %d for union with %d branches" \ - % (index_of_schema, len(writer_schema.schemas)) - raise SchemaResolutionException(fail_msg, writer_schema) - selected_writer_schema = writer_schema.schemas[index_of_schema] - - # read data - return await self.read_data(selected_writer_schema, decoder) - - async def skip_union(self, writer_schema, decoder): - index_of_schema = int(await decoder.read_long()) - if index_of_schema >= len(writer_schema.schemas): - fail_msg = "Can't access branch index %d for union with %d branches" \ - % (index_of_schema, len(writer_schema.schemas)) - raise SchemaResolutionException(fail_msg, writer_schema) - return await self.skip_data(writer_schema.schemas[index_of_schema], decoder) - - async def read_record(self, writer_schema, decoder): - """ - A record is encoded by encoding the values of its fields - in the order that they are declared. In other words, a record - is encoded as just the concatenation of the encodings of its fields. - Field values are encoded per their schema. - - Schema Resolution: - * the ordering of fields may be different: fields are matched by name. - * schemas for fields with the same name in both records are resolved - recursively. - * if the writer's record contains a field with a name not present in the - reader's record, the writer's value for that field is ignored. - * if the reader's record schema has a field that contains a default value, - and writer's schema does not have a field with the same name, then the - reader should use the default value from its field. - * if the reader's record schema has a field with no default value, and - writer's schema does not have a field with the same name, then the - field's value is unset. - """ - # schema resolution - read_record = {} - for field in writer_schema.fields: - field_val = await self.read_data(field.type, decoder) - read_record[field.name] = field_val - return read_record - - async def skip_record(self, writer_schema, decoder): - for field in writer_schema.fields: - await self.skip_data(field.type, decoder) - - -# ------------------------------------------------------------------------------ - -if __name__ == '__main__': - raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/avro/datafile.py b/azure/multiapi/storagev2/blob/v2019_12_12/_shared/avro/datafile.py deleted file mode 100644 index 6e5813d..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/avro/datafile.py +++ /dev/null @@ -1,248 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -"""Read/Write Avro File Object Containers.""" - -import io -import logging -import sys -import zlib - -from ..avro import avro_io -from ..avro import schema - -PY3 = sys.version_info[0] == 3 - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Constants - -# Version of the container file: -VERSION = 1 - -if PY3: - MAGIC = b'Obj' + bytes([VERSION]) - MAGIC_SIZE = len(MAGIC) -else: - MAGIC = 'Obj' + chr(VERSION) - MAGIC_SIZE = len(MAGIC) - -# Size of the synchronization marker, in number of bytes: -SYNC_SIZE = 16 - -# Schema of the container header: -META_SCHEMA = schema.parse(""" -{ - "type": "record", "name": "org.apache.avro.file.Header", - "fields": [{ - "name": "magic", - "type": {"type": "fixed", "name": "magic", "size": %(magic_size)d} - }, { - "name": "meta", - "type": {"type": "map", "values": "bytes"} - }, { - "name": "sync", - "type": {"type": "fixed", "name": "sync", "size": %(sync_size)d} - }] -} -""" % { - 'magic_size': MAGIC_SIZE, - 'sync_size': SYNC_SIZE, -}) - -# Codecs supported by container files: -VALID_CODECS = frozenset(['null', 'deflate']) - -# Metadata key associated to the schema: -SCHEMA_KEY = "avro.schema" - - -# ------------------------------------------------------------------------------ -# Exceptions - - -class DataFileException(schema.AvroException): - """Problem reading or writing file object containers.""" - -# ------------------------------------------------------------------------------ - - -class DataFileReader(object): - """Read files written by DataFileWriter.""" - - def __init__(self, reader, datum_reader, **kwargs): - """Initializes a new data file reader. - - Args: - reader: Open file to read from. - datum_reader: Avro datum reader. - """ - self._reader = reader - self._raw_decoder = avro_io.BinaryDecoder(reader) - self._header_reader = kwargs.pop('header_reader', None) - self._header_decoder = None if self._header_reader is None else avro_io.BinaryDecoder(self._header_reader) - self._datum_decoder = None # Maybe reset at every block. - self._datum_reader = datum_reader - - # In case self._reader only has partial content(without header). - # seek(0, 0) to make sure read the (partial)content from beginning. - self._reader.seek(0, 0) - - # read the header: magic, meta, sync - self._read_header() - - # ensure codec is valid - avro_codec_raw = self.get_meta('avro.codec') - if avro_codec_raw is None: - self.codec = "null" - else: - self.codec = avro_codec_raw.decode('utf-8') - if self.codec not in VALID_CODECS: - raise DataFileException('Unknown codec: %s.' % self.codec) - - # get ready to read - self._block_count = 0 - - # header_reader indicates reader only has partial content. The reader doesn't have block header, - # so we read use the block count stored last time. - # Also ChangeFeed only has codec==null, so use _raw_decoder is good. - if self._header_reader is not None: - self._block_count = self._reader.block_count - self._datum_decoder = self._raw_decoder - - self.datum_reader.writer_schema = ( - schema.parse(self.get_meta(SCHEMA_KEY).decode('utf-8'))) - - def __enter__(self): - return self - - def __exit__(self, data_type, value, traceback): - # Perform a close if there's no exception - if data_type is None: - self.close() - - def __iter__(self): - return self - - # read-only properties - @property - def reader(self): - return self._reader - - @property - def raw_decoder(self): - return self._raw_decoder - - @property - def datum_decoder(self): - return self._datum_decoder - - @property - def datum_reader(self): - return self._datum_reader - - @property - def sync_marker(self): - return self._sync_marker - - @property - def meta(self): - return self._meta - - # read/write properties - @property - def block_count(self): - return self._block_count - - def get_meta(self, key): - """Reports the value of a given metadata key. - - Args: - key: Metadata key (string) to report the value of. - Returns: - Value associated to the metadata key, as bytes. - """ - return self._meta.get(key) - - def _read_header(self): - header_reader = self._header_reader if self._header_reader else self._reader - header_decoder = self._header_decoder if self._header_decoder else self._raw_decoder - - # seek to the beginning of the file to get magic block - header_reader.seek(0, 0) - - # read header into a dict - header = self.datum_reader.read_data(META_SCHEMA, header_decoder) - - # check magic number - if header.get('magic') != MAGIC: - fail_msg = "Not an Avro data file: %s doesn't match %s." \ - % (header.get('magic'), MAGIC) - raise schema.AvroException(fail_msg) - - # set metadata - self._meta = header['meta'] - - # set sync marker - self._sync_marker = header['sync'] - - def _read_block_header(self): - self._block_count = self.raw_decoder.read_long() - if self.codec == "null": - # Skip a long; we don't need to use the length. - self.raw_decoder.skip_long() - self._datum_decoder = self._raw_decoder - elif self.codec == 'deflate': - # Compressed data is stored as (length, data), which - # corresponds to how the "bytes" type is encoded. - data = self.raw_decoder.read_bytes() - # -15 is the log of the window size; negative indicates - # "raw" (no zlib headers) decompression. See zlib.h. - uncompressed = zlib.decompress(data, -15) - self._datum_decoder = avro_io.BinaryDecoder(io.BytesIO(uncompressed)) - else: - raise DataFileException("Unknown codec: %r" % self.codec) - - def _skip_sync(self): - """ - Read the length of the sync marker; if it matches the sync marker, - return True. Otherwise, seek back to where we started and return False. - """ - proposed_sync_marker = self.reader.read(SYNC_SIZE) - if SYNC_SIZE > 0 and not proposed_sync_marker: - raise StopIteration - if proposed_sync_marker != self.sync_marker: - self.reader.seek(-SYNC_SIZE, 1) - - def __next__(self): - """Return the next datum in the file.""" - if self.block_count == 0: - self._skip_sync() - self._read_block_header() - - datum = self.datum_reader.read(self.datum_decoder) - self._block_count -= 1 - - # event_position and block_count are to support reading from current position in the future read, - # no need to downloading from the beginning of avro file with these two attr. - if hasattr(self._reader, 'event_position'): - self.reader.block_count = self.block_count - self.reader.track_event_position() - - return datum - - # PY2 - def next(self): - return self.__next__() - - def close(self): - """Close this reader.""" - self.reader.close() - - -if __name__ == '__main__': - raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/avro/datafile_async.py b/azure/multiapi/storagev2/blob/v2019_12_12/_shared/avro/datafile_async.py deleted file mode 100644 index 1a7324d..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/avro/datafile_async.py +++ /dev/null @@ -1,198 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -"""Read/Write Avro File Object Containers.""" - -import logging -import sys - -from ..avro import avro_io_async -from ..avro import schema -from .datafile import DataFileException -from .datafile import MAGIC, SYNC_SIZE, META_SCHEMA, SCHEMA_KEY - - -PY3 = sys.version_info[0] == 3 - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Constants - -# Codecs supported by container files: -VALID_CODECS = frozenset(['null']) - - -class AsyncDataFileReader(object): - """Read files written by DataFileWriter.""" - - def __init__(self, reader, datum_reader, **kwargs): - """Initializes a new data file reader. - - Args: - reader: Open file to read from. - datum_reader: Avro datum reader. - """ - self._reader = reader - self._raw_decoder = avro_io_async.AsyncBinaryDecoder(reader) - self._header_reader = kwargs.pop('header_reader', None) - self._header_decoder = None if self._header_reader is None else \ - avro_io_async.AsyncBinaryDecoder(self._header_reader) - self._datum_decoder = None # Maybe reset at every block. - self._datum_reader = datum_reader - self.codec = "null" - self._block_count = 0 - self._meta = None - self._sync_marker = None - - async def init(self): - # In case self._reader only has partial content(without header). - # seek(0, 0) to make sure read the (partial)content from beginning. - await self._reader.seek(0, 0) - - # read the header: magic, meta, sync - await self._read_header() - - # ensure codec is valid - avro_codec_raw = self.get_meta('avro.codec') - if avro_codec_raw is None: - self.codec = "null" - else: - self.codec = avro_codec_raw.decode('utf-8') - if self.codec not in VALID_CODECS: - raise DataFileException('Unknown codec: %s.' % self.codec) - - # get ready to read - self._block_count = 0 - - # header_reader indicates reader only has partial content. The reader doesn't have block header, - # so we read use the block count stored last time. - # Also ChangeFeed only has codec==null, so use _raw_decoder is good. - if self._header_reader is not None: - self._block_count = self._reader.block_count - self._datum_decoder = self._raw_decoder - - self.datum_reader.writer_schema = ( - schema.parse(self.get_meta(SCHEMA_KEY).decode('utf-8'))) - return self - - async def __aenter__(self): - return self - - async def __aexit__(self, data_type, value, traceback): - # Perform a close if there's no exception - if data_type is None: - self.close() - - def __aiter__(self): - return self - - # read-only properties - @property - def reader(self): - return self._reader - - @property - def raw_decoder(self): - return self._raw_decoder - - @property - def datum_decoder(self): - return self._datum_decoder - - @property - def datum_reader(self): - return self._datum_reader - - @property - def sync_marker(self): - return self._sync_marker - - @property - def meta(self): - return self._meta - - # read/write properties - @property - def block_count(self): - return self._block_count - - def get_meta(self, key): - """Reports the value of a given metadata key. - - Args: - key: Metadata key (string) to report the value of. - Returns: - Value associated to the metadata key, as bytes. - """ - return self._meta.get(key) - - async def _read_header(self): - header_reader = self._header_reader if self._header_reader else self._reader - header_decoder = self._header_decoder if self._header_decoder else self._raw_decoder - - # seek to the beginning of the file to get magic block - await header_reader.seek(0, 0) - - # read header into a dict - header = await self.datum_reader.read_data(META_SCHEMA, header_decoder) - - # check magic number - if header.get('magic') != MAGIC: - fail_msg = "Not an Avro data file: %s doesn't match %s." \ - % (header.get('magic'), MAGIC) - raise schema.AvroException(fail_msg) - - # set metadata - self._meta = header['meta'] - - # set sync marker - self._sync_marker = header['sync'] - - async def _read_block_header(self): - self._block_count = await self.raw_decoder.read_long() - if self.codec == "null": - # Skip a long; we don't need to use the length. - await self.raw_decoder.skip_long() - self._datum_decoder = self._raw_decoder - else: - raise DataFileException("Unknown codec: %r" % self.codec) - - async def _skip_sync(self): - """ - Read the length of the sync marker; if it matches the sync marker, - return True. Otherwise, seek back to where we started and return False. - """ - proposed_sync_marker = await self.reader.read(SYNC_SIZE) - if SYNC_SIZE > 0 and not proposed_sync_marker: - raise StopAsyncIteration - if proposed_sync_marker != self.sync_marker: - await self.reader.seek(-SYNC_SIZE, 1) - - async def __anext__(self): - """Return the next datum in the file.""" - if self.block_count == 0: - await self._skip_sync() - await self._read_block_header() - - datum = await self.datum_reader.read(self.datum_decoder) - self._block_count -= 1 - - # event_position and block_count are to support reading from current position in the future read, - # no need to downloading from the beginning of avro file with these two attr. - if hasattr(self._reader, 'event_position'): - self.reader.block_count = self.block_count - await self.reader.track_event_position() - - return datum - - def close(self): - """Close this reader.""" - self.reader.close() - - -if __name__ == '__main__': - raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/avro/schema.py b/azure/multiapi/storagev2/blob/v2019_12_12/_shared/avro/schema.py deleted file mode 100644 index ffe2853..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/avro/schema.py +++ /dev/null @@ -1,1221 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines - -"""Representation of Avro schemas. - -A schema may be one of: - - A record, mapping field names to field value data; - - An error, equivalent to a record; - - An enum, containing one of a small set of symbols; - - An array of values, all of the same schema; - - A map containing string/value pairs, each of a declared schema; - - A union of other schemas; - - A fixed sized binary object; - - A unicode string; - - A sequence of bytes; - - A 32-bit signed int; - - A 64-bit signed long; - - A 32-bit floating-point float; - - A 64-bit floating-point double; - - A boolean; - - Null. -""" - -import abc -import json -import logging -import re -import sys -from six import with_metaclass - -PY2 = sys.version_info[0] == 2 - -if PY2: - _str = unicode # pylint: disable=undefined-variable -else: - _str = str - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Constants - -# Log level more verbose than DEBUG=10, INFO=20, etc. -DEBUG_VERBOSE = 5 - -NULL = 'null' -BOOLEAN = 'boolean' -STRING = 'string' -BYTES = 'bytes' -INT = 'int' -LONG = 'long' -FLOAT = 'float' -DOUBLE = 'double' -FIXED = 'fixed' -ENUM = 'enum' -RECORD = 'record' -ERROR = 'error' -ARRAY = 'array' -MAP = 'map' -UNION = 'union' - -# Request and error unions are part of Avro protocols: -REQUEST = 'request' -ERROR_UNION = 'error_union' - -PRIMITIVE_TYPES = frozenset([ - NULL, - BOOLEAN, - STRING, - BYTES, - INT, - LONG, - FLOAT, - DOUBLE, -]) - -NAMED_TYPES = frozenset([ - FIXED, - ENUM, - RECORD, - ERROR, -]) - -VALID_TYPES = frozenset.union( - PRIMITIVE_TYPES, - NAMED_TYPES, - [ - ARRAY, - MAP, - UNION, - REQUEST, - ERROR_UNION, - ], -) - -SCHEMA_RESERVED_PROPS = frozenset([ - 'type', - 'name', - 'namespace', - 'fields', # Record - 'items', # Array - 'size', # Fixed - 'symbols', # Enum - 'values', # Map - 'doc', -]) - -FIELD_RESERVED_PROPS = frozenset([ - 'default', - 'name', - 'doc', - 'order', - 'type', -]) - -VALID_FIELD_SORT_ORDERS = frozenset([ - 'ascending', - 'descending', - 'ignore', -]) - - -# ------------------------------------------------------------------------------ -# Exceptions - - -class Error(Exception): - """Base class for errors in this module.""" - - -class AvroException(Error): - """Generic Avro schema error.""" - - -class SchemaParseException(AvroException): - """Error while parsing a JSON schema descriptor.""" - - -class Schema(with_metaclass(abc.ABCMeta, object)): - """Abstract base class for all Schema classes.""" - - def __init__(self, data_type, other_props=None): - """Initializes a new schema object. - - Args: - data_type: Type of the schema to initialize. - other_props: Optional dictionary of additional properties. - """ - if data_type not in VALID_TYPES: - raise SchemaParseException('%r is not a valid Avro type.' % data_type) - - # All properties of this schema, as a map: property name -> property value - self._props = {} - - self._props['type'] = data_type - self._type = data_type - - if other_props: - self._props.update(other_props) - - @property - def namespace(self): - """Returns: the namespace this schema belongs to, if any, or None.""" - return self._props.get('namespace', None) - - @property - def type(self): - """Returns: the type of this schema.""" - return self._type - - @property - def doc(self): - """Returns: the documentation associated to this schema, if any, or None.""" - return self._props.get('doc', None) - - @property - def props(self): - """Reports all the properties of this schema. - - Includes all properties, reserved and non reserved. - JSON properties of this schema are directly generated from this dict. - - Returns: - A dictionary of properties associated to this schema. - """ - return self._props - - @property - def other_props(self): - """Returns: the dictionary of non-reserved properties.""" - return dict(filter_keys_out(items=self._props, keys=SCHEMA_RESERVED_PROPS)) - - def __str__(self): - """Returns: the JSON representation of this schema.""" - return json.dumps(self.to_json(names=None)) - - @abc.abstractmethod - def to_json(self, names): - """Converts the schema object into its AVRO specification representation. - - Schema types that have names (records, enums, and fixed) must - be aware of not re-defining schemas that are already listed - in the parameter names. - """ - raise Exception('Cannot run abstract method.') - - -# ------------------------------------------------------------------------------ - - -_RE_NAME = re.compile(r'[A-Za-z_][A-Za-z0-9_]*') - -_RE_FULL_NAME = re.compile( - r'^' - r'[.]?(?:[A-Za-z_][A-Za-z0-9_]*[.])*' # optional namespace - r'([A-Za-z_][A-Za-z0-9_]*)' # name - r'$' -) - - -class Name(object): - """Representation of an Avro name.""" - - def __init__(self, name, namespace=None): - """Parses an Avro name. - - Args: - name: Avro name to parse (relative or absolute). - namespace: Optional explicit namespace if the name is relative. - """ - # Normalize: namespace is always defined as a string, possibly empty. - if namespace is None: - namespace = '' - - if '.' in name: - # name is absolute, namespace is ignored: - self._fullname = name - - match = _RE_FULL_NAME.match(self._fullname) - if match is None: - raise SchemaParseException( - 'Invalid absolute schema name: %r.' % self._fullname) - - self._name = match.group(1) - self._namespace = self._fullname[:-(len(self._name) + 1)] - - else: - # name is relative, combine with explicit namespace: - self._name = name - self._namespace = namespace - self._fullname = (self._name - if (not self._namespace) else - '%s.%s' % (self._namespace, self._name)) - - # Validate the fullname: - if _RE_FULL_NAME.match(self._fullname) is None: - raise SchemaParseException( - 'Invalid schema name %r infered from name %r and namespace %r.' - % (self._fullname, self._name, self._namespace)) - - def __eq__(self, other): - if not isinstance(other, Name): - return NotImplemented - return self.fullname == other.fullname - - @property - def simple_name(self): - """Returns: the simple name part of this name.""" - return self._name - - @property - def namespace(self): - """Returns: this name's namespace, possible the empty string.""" - return self._namespace - - @property - def fullname(self): - """Returns: the full name.""" - return self._fullname - - -# ------------------------------------------------------------------------------ - - -class Names(object): - """Tracks Avro named schemas and default namespace during parsing.""" - - def __init__(self, default_namespace=None, names=None): - """Initializes a new name tracker. - - Args: - default_namespace: Optional default namespace. - names: Optional initial mapping of known named schemas. - """ - if names is None: - names = {} - self._names = names - self._default_namespace = default_namespace - - @property - def names(self): - """Returns: the mapping of known named schemas.""" - return self._names - - @property - def default_namespace(self): - """Returns: the default namespace, if any, or None.""" - return self._default_namespace - - def new_with_default_namespace(self, namespace): - """Creates a new name tracker from this tracker, but with a new default ns. - - Args: - namespace: New default namespace to use. - Returns: - New name tracker with the specified default namespace. - """ - return Names(names=self._names, default_namespace=namespace) - - def get_name(self, name, namespace=None): - """Resolves the Avro name according to this name tracker's state. - - Args: - name: Name to resolve (absolute or relative). - namespace: Optional explicit namespace. - Returns: - The specified name, resolved according to this tracker. - """ - if namespace is None: - namespace = self._default_namespace - return Name(name=name, namespace=namespace) - - def get_schema(self, name, namespace=None): - """Resolves an Avro schema by name. - - Args: - name: Name (relative or absolute) of the Avro schema to look up. - namespace: Optional explicit namespace. - Returns: - The schema with the specified name, if any, or None. - """ - avro_name = self.get_name(name=name, namespace=namespace) - return self._names.get(avro_name.fullname, None) - - def prune_namespace(self, properties): - """given a properties, return properties with namespace removed if - it matches the own default namespace - """ - if self.default_namespace is None: - # I have no default -- no change - return properties - if 'namespace' not in properties: - # he has no namespace - no change - return properties - if properties['namespace'] != self.default_namespace: - # we're different - leave his stuff alone - return properties - # we each have a namespace and it's redundant. delete his. - prunable = properties.copy() - del prunable['namespace'] - return prunable - - def register(self, schema): - """Registers a new named schema in this tracker. - - Args: - schema: Named Avro schema to register in this tracker. - """ - if schema.fullname in VALID_TYPES: - raise SchemaParseException( - '%s is a reserved type name.' % schema.fullname) - if schema.fullname in self.names: - raise SchemaParseException( - 'Avro name %r already exists.' % schema.fullname) - - logger.log(DEBUG_VERBOSE, 'Register new name for %r', schema.fullname) - self._names[schema.fullname] = schema - - -# ------------------------------------------------------------------------------ - - -class NamedSchema(Schema): - """Abstract base class for named schemas. - - Named schemas are enumerated in NAMED_TYPES. - """ - - def __init__( - self, - data_type, - name=None, - namespace=None, - names=None, - other_props=None, - ): - """Initializes a new named schema object. - - Args: - data_type: Type of the named schema. - name: Name (absolute or relative) of the schema. - namespace: Optional explicit namespace if name is relative. - names: Tracker to resolve and register Avro names. - other_props: Optional map of additional properties of the schema. - """ - assert (data_type in NAMED_TYPES), ('Invalid named type: %r' % data_type) - self._avro_name = names.get_name(name=name, namespace=namespace) - - super(NamedSchema, self).__init__(data_type, other_props) - - names.register(self) - - self._props['name'] = self.name - if self.namespace: - self._props['namespace'] = self.namespace - - @property - def avro_name(self): - """Returns: the Name object describing this schema's name.""" - return self._avro_name - - @property - def name(self): - return self._avro_name.simple_name - - @property - def namespace(self): - return self._avro_name.namespace - - @property - def fullname(self): - return self._avro_name.fullname - - def name_ref(self, names): - """Reports this schema name relative to the specified name tracker. - - Args: - names: Avro name tracker to relativise this schema name against. - Returns: - This schema name, relativised against the specified name tracker. - """ - if self.namespace == names.default_namespace: - return self.name - return self.fullname - - @abc.abstractmethod - def to_json(self, names): - """Converts the schema object into its AVRO specification representation. - - Schema types that have names (records, enums, and fixed) must - be aware of not re-defining schemas that are already listed - in the parameter names. - """ - raise Exception('Cannot run abstract method.') - -# ------------------------------------------------------------------------------ - - -_NO_DEFAULT = object() - - -class Field(object): - """Representation of the schema of a field in a record.""" - - def __init__( - self, - data_type, - name, - index, - has_default, - default=_NO_DEFAULT, - order=None, - doc=None, - other_props=None - ): - """Initializes a new Field object. - - Args: - data_type: Avro schema of the field. - name: Name of the field. - index: 0-based position of the field. - has_default: - default: - order: - doc: - other_props: - """ - if (not isinstance(name, _str)) or (not name): - raise SchemaParseException('Invalid record field name: %r.' % name) - if (order is not None) and (order not in VALID_FIELD_SORT_ORDERS): - raise SchemaParseException('Invalid record field order: %r.' % order) - - # All properties of this record field: - self._props = {} - - self._has_default = has_default - if other_props: - self._props.update(other_props) - - self._index = index - self._type = self._props['type'] = data_type - self._name = self._props['name'] = name - - if has_default: - self._props['default'] = default - - if order is not None: - self._props['order'] = order - - if doc is not None: - self._props['doc'] = doc - - @property - def type(self): - """Returns: the schema of this field.""" - return self._type - - @property - def name(self): - """Returns: this field name.""" - return self._name - - @property - def index(self): - """Returns: the 0-based index of this field in the record.""" - return self._index - - @property - def default(self): - return self._props['default'] - - @property - def has_default(self): - return self._has_default - - @property - def order(self): - return self._props.get('order', None) - - @property - def doc(self): - return self._props.get('doc', None) - - @property - def props(self): - return self._props - - @property - def other_props(self): - return filter_keys_out(items=self._props, keys=FIELD_RESERVED_PROPS) - - def __str__(self): - return json.dumps(self.to_json()) - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = self.props.copy() - to_dump['type'] = self.type.to_json(names) - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ -# Primitive Types - - -class PrimitiveSchema(Schema): - """Schema of a primitive Avro type. - - Valid primitive types are defined in PRIMITIVE_TYPES. - """ - - def __init__(self, data_type, other_props=None): - """Initializes a new schema object for the specified primitive type. - - Args: - data_type: Type of the schema to construct. Must be primitive. - """ - if data_type not in PRIMITIVE_TYPES: - raise AvroException('%r is not a valid primitive type.' % data_type) - super(PrimitiveSchema, self).__init__(data_type, other_props=other_props) - - @property - def name(self): - """Returns: the simple name of this schema.""" - # The name of a primitive type is the type itself. - return self.type - - @property - def fullname(self): - """Returns: the fully qualified name of this schema.""" - # The full name is the simple name for primitive schema. - return self.name - - def to_json(self, names=None): - if len(self.props) == 1: - return self.fullname - return self.props - - def __eq__(self, that): - return self.props == that.props - - -# ------------------------------------------------------------------------------ -# Complex Types (non-recursive) - - -class FixedSchema(NamedSchema): - def __init__( - self, - name, - namespace, - size, - names=None, - other_props=None, - ): - # Ensure valid ctor args - if not isinstance(size, int): - fail_msg = 'Fixed Schema requires a valid integer for size property.' - raise AvroException(fail_msg) - - super(FixedSchema, self).__init__( - data_type=FIXED, - name=name, - namespace=namespace, - names=names, - other_props=other_props, - ) - self._props['size'] = size - - @property - def size(self): - """Returns: the size of this fixed schema, in bytes.""" - return self._props['size'] - - def to_json(self, names=None): - if names is None: - names = Names() - if self.fullname in names.names: - return self.name_ref(names) - names.names[self.fullname] = self - return names.prune_namespace(self.props) - - def __eq__(self, that): - return self.props == that.props - - -# ------------------------------------------------------------------------------ - - -class EnumSchema(NamedSchema): - def __init__( - self, - name, - namespace, - symbols, - names=None, - doc=None, - other_props=None, - ): - """Initializes a new enumeration schema object. - - Args: - name: Simple name of this enumeration. - namespace: Optional namespace. - symbols: Ordered list of symbols defined in this enumeration. - names: - doc: - other_props: - """ - symbols = tuple(symbols) - symbol_set = frozenset(symbols) - if (len(symbol_set) != len(symbols) - or not all(map(lambda symbol: isinstance(symbol, _str), symbols))): - raise AvroException( - 'Invalid symbols for enum schema: %r.' % (symbols,)) - - super(EnumSchema, self).__init__( - data_type=ENUM, - name=name, - namespace=namespace, - names=names, - other_props=other_props, - ) - - self._props['symbols'] = symbols - if doc is not None: - self._props['doc'] = doc - - @property - def symbols(self): - """Returns: the symbols defined in this enum.""" - return self._props['symbols'] - - def to_json(self, names=None): - if names is None: - names = Names() - if self.fullname in names.names: - return self.name_ref(names) - names.names[self.fullname] = self - return names.prune_namespace(self.props) - - def __eq__(self, that): - return self.props == that.props - - -# ------------------------------------------------------------------------------ -# Complex Types (recursive) - - -class ArraySchema(Schema): - """Schema of an array.""" - - def __init__(self, items, other_props=None): - """Initializes a new array schema object. - - Args: - items: Avro schema of the array items. - other_props: - """ - super(ArraySchema, self).__init__( - data_type=ARRAY, - other_props=other_props, - ) - self._items_schema = items - self._props['items'] = items - - @property - def items(self): - """Returns: the schema of the items in this array.""" - return self._items_schema - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = self.props.copy() - item_schema = self.items - to_dump['items'] = item_schema.to_json(names) - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ - - -class MapSchema(Schema): - """Schema of a map.""" - - def __init__(self, values, other_props=None): - """Initializes a new map schema object. - - Args: - values: Avro schema of the map values. - other_props: - """ - super(MapSchema, self).__init__( - data_type=MAP, - other_props=other_props, - ) - self._values_schema = values - self._props['values'] = values - - @property - def values(self): - """Returns: the schema of the values in this map.""" - return self._values_schema - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = self.props.copy() - to_dump['values'] = self.values.to_json(names) - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ - - -class UnionSchema(Schema): - """Schema of a union.""" - - def __init__(self, schemas): - """Initializes a new union schema object. - - Args: - schemas: Ordered collection of schema branches in the union. - """ - super(UnionSchema, self).__init__(data_type=UNION) - self._schemas = tuple(schemas) - - # Validate the schema branches: - - # All named schema names are unique: - named_branches = tuple( - filter(lambda schema: schema.type in NAMED_TYPES, self._schemas)) - unique_names = frozenset(map(lambda schema: schema.fullname, named_branches)) - if len(unique_names) != len(named_branches): - raise AvroException( - 'Invalid union branches with duplicate schema name:%s' - % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) - - # Types are unique within unnamed schemas, and union is not allowed: - unnamed_branches = tuple( - filter(lambda schema: schema.type not in NAMED_TYPES, self._schemas)) - unique_types = frozenset(map(lambda schema: schema.type, unnamed_branches)) - if UNION in unique_types: - raise AvroException( - 'Invalid union branches contain other unions:%s' - % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) - if len(unique_types) != len(unnamed_branches): - raise AvroException( - 'Invalid union branches with duplicate type:%s' - % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) - - @property - def schemas(self): - """Returns: the ordered list of schema branches in the union.""" - return self._schemas - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = [] - for schema in self.schemas: - to_dump.append(schema.to_json(names)) - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ - - -class ErrorUnionSchema(UnionSchema): - """Schema representing the declared errors of a protocol message.""" - - def __init__(self, schemas): - """Initializes an error-union schema. - - Args: - schema: collection of error schema. - """ - # Prepend "string" to handle system errors - schemas = [PrimitiveSchema(data_type=STRING)] + list(schemas) - super(ErrorUnionSchema, self).__init__(schemas=schemas) - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = [] - for schema in self.schemas: - # Don't print the system error schema - if schema.type == STRING: - continue - to_dump.append(schema.to_json(names)) - return to_dump - - -# ------------------------------------------------------------------------------ - - -class RecordSchema(NamedSchema): - """Schema of a record.""" - - @staticmethod - def _make_field(index, field_desc, names): - """Builds field schemas from a list of field JSON descriptors. - - Args: - index: 0-based index of the field in the record. - field_desc: JSON descriptors of a record field. - Return: - The field schema. - """ - field_schema = schema_from_json_data( - json_data=field_desc['type'], - names=names, - ) - other_props = ( - dict(filter_keys_out(items=field_desc, keys=FIELD_RESERVED_PROPS))) - return Field( - data_type=field_schema, - name=field_desc['name'], - index=index, - has_default=('default' in field_desc), - default=field_desc.get('default', _NO_DEFAULT), - order=field_desc.get('order', None), - doc=field_desc.get('doc', None), - other_props=other_props, - ) - - @staticmethod - def make_field_list(field_desc_list, names): - """Builds field schemas from a list of field JSON descriptors. - - Guarantees field name unicity. - - Args: - field_desc_list: collection of field JSON descriptors. - names: Avro schema tracker. - Yields - Field schemas. - """ - for index, field_desc in enumerate(field_desc_list): - yield RecordSchema._make_field(index, field_desc, names) - - @staticmethod - def _make_field_map(fields): - """Builds the field map. - - Guarantees field name unicity. - - Args: - fields: iterable of field schema. - Returns: - A map of field schemas, indexed by name. - """ - field_map = {} - for field in fields: - if field.name in field_map: - raise SchemaParseException( - 'Duplicate record field name %r.' % field.name) - field_map[field.name] = field - return field_map - - def __init__( - self, - name, - namespace, - fields=None, - make_fields=None, - names=None, - record_type=RECORD, - doc=None, - other_props=None - ): - """Initializes a new record schema object. - - Args: - name: Name of the record (absolute or relative). - namespace: Optional namespace the record belongs to, if name is relative. - fields: collection of fields to add to this record. - Exactly one of fields or make_fields must be specified. - make_fields: function creating the fields that belong to the record. - The function signature is: make_fields(names) -> ordered field list. - Exactly one of fields or make_fields must be specified. - names: - record_type: Type of the record: one of RECORD, ERROR or REQUEST. - Protocol requests are not named. - doc: - other_props: - """ - if record_type == REQUEST: - # Protocol requests are not named: - super(RecordSchema, self).__init__( - data_type=REQUEST, - other_props=other_props, - ) - elif record_type in [RECORD, ERROR]: - # Register this record name in the tracker: - super(RecordSchema, self).__init__( - data_type=record_type, - name=name, - namespace=namespace, - names=names, - other_props=other_props, - ) - else: - raise SchemaParseException( - 'Invalid record type: %r.' % record_type) - - if record_type in [RECORD, ERROR]: - avro_name = names.get_name(name=name, namespace=namespace) - nested_names = names.new_with_default_namespace(namespace=avro_name.namespace) - elif record_type == REQUEST: - # Protocol request has no name: no need to change default namespace: - nested_names = names - - if fields is None: - fields = make_fields(names=nested_names) - else: - assert make_fields is None - self._fields = tuple(fields) - - self._field_map = RecordSchema._make_field_map(self._fields) - - self._props['fields'] = fields - if doc is not None: - self._props['doc'] = doc - - @property - def fields(self): - """Returns: the field schemas, as an ordered tuple.""" - return self._fields - - @property - def field_map(self): - """Returns: a read-only map of the field schemas index by field names.""" - return self._field_map - - def to_json(self, names=None): - if names is None: - names = Names() - # Request records don't have names - if self.type == REQUEST: - return [f.to_json(names) for f in self.fields] - - if self.fullname in names.names: - return self.name_ref(names) - names.names[self.fullname] = self - - to_dump = names.prune_namespace(self.props.copy()) - to_dump['fields'] = [f.to_json(names) for f in self.fields] - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ -# Module functions - - -def filter_keys_out(items, keys): - """Filters a collection of (key, value) items. - - Exclude any item whose key belongs to keys. - - Args: - items: Dictionary of items to filter the keys out of. - keys: Keys to filter out. - Yields: - Filtered items. - """ - for key, value in items.items(): - if key in keys: - continue - yield key, value - - -# ------------------------------------------------------------------------------ - - -def _schema_from_json_string(json_string, names): - if json_string in PRIMITIVE_TYPES: - return PrimitiveSchema(data_type=json_string) - - # Look for a known named schema: - schema = names.get_schema(name=json_string) - if schema is None: - raise SchemaParseException( - 'Unknown named schema %r, known names: %r.' - % (json_string, sorted(names.names))) - return schema - - -def _schema_from_json_array(json_array, names): - def MakeSchema(desc): - return schema_from_json_data(json_data=desc, names=names) - - return UnionSchema(map(MakeSchema, json_array)) - - -def _schema_from_json_object(json_object, names): - data_type = json_object.get('type') - if data_type is None: - raise SchemaParseException( - 'Avro schema JSON descriptor has no "type" property: %r' % json_object) - - other_props = dict( - filter_keys_out(items=json_object, keys=SCHEMA_RESERVED_PROPS)) - - if data_type in PRIMITIVE_TYPES: - # FIXME should not ignore other properties - result = PrimitiveSchema(data_type, other_props=other_props) - - elif data_type in NAMED_TYPES: - name = json_object.get('name') - namespace = json_object.get('namespace', names.default_namespace) - if data_type == FIXED: - size = json_object.get('size') - result = FixedSchema(name, namespace, size, names, other_props) - elif data_type == ENUM: - symbols = json_object.get('symbols') - doc = json_object.get('doc') - result = EnumSchema(name, namespace, symbols, names, doc, other_props) - - elif data_type in [RECORD, ERROR]: - field_desc_list = json_object.get('fields', ()) - - def MakeFields(names): - return tuple(RecordSchema.make_field_list(field_desc_list, names)) - - result = RecordSchema( - name=name, - namespace=namespace, - make_fields=MakeFields, - names=names, - record_type=data_type, - doc=json_object.get('doc'), - other_props=other_props, - ) - else: - raise Exception('Internal error: unknown type %r.' % data_type) - - elif data_type in VALID_TYPES: - # Unnamed, non-primitive Avro type: - - if data_type == ARRAY: - items_desc = json_object.get('items') - if items_desc is None: - raise SchemaParseException( - 'Invalid array schema descriptor with no "items" : %r.' - % json_object) - result = ArraySchema( - items=schema_from_json_data(items_desc, names), - other_props=other_props, - ) - - elif data_type == MAP: - values_desc = json_object.get('values') - if values_desc is None: - raise SchemaParseException( - 'Invalid map schema descriptor with no "values" : %r.' - % json_object) - result = MapSchema( - values=schema_from_json_data(values_desc, names=names), - other_props=other_props, - ) - - elif data_type == ERROR_UNION: - error_desc_list = json_object.get('declared_errors') - assert error_desc_list is not None - error_schemas = map( - lambda desc: schema_from_json_data(desc, names=names), - error_desc_list) - result = ErrorUnionSchema(schemas=error_schemas) - - else: - raise Exception('Internal error: unknown type %r.' % data_type) - else: - raise SchemaParseException( - 'Invalid JSON descriptor for an Avro schema: %r' % json_object) - return result - - -# Parsers for the JSON data types: -_JSONDataParserTypeMap = { - _str: _schema_from_json_string, - list: _schema_from_json_array, - dict: _schema_from_json_object, -} - - -def schema_from_json_data(json_data, names=None): - """Builds an Avro Schema from its JSON descriptor. - - Args: - json_data: JSON data representing the descriptor of the Avro schema. - names: Optional tracker for Avro named schemas. - Returns: - The Avro schema parsed from the JSON descriptor. - Raises: - SchemaParseException: if the descriptor is invalid. - """ - if names is None: - names = Names() - - # Select the appropriate parser based on the JSON data type: - parser = _JSONDataParserTypeMap.get(type(json_data)) - if parser is None: - raise SchemaParseException( - 'Invalid JSON descriptor for an Avro schema: %r.' % json_data) - return parser(json_data, names=names) - - -# ------------------------------------------------------------------------------ - - -def parse(json_string): - """Constructs a Schema from its JSON descriptor in text form. - - Args: - json_string: String representation of the JSON descriptor of the schema. - Returns: - The parsed schema. - Raises: - SchemaParseException: on JSON parsing error, - or if the JSON descriptor is invalid. - """ - try: - json_data = json.loads(json_string) - except Exception as exn: - raise SchemaParseException( - 'Error parsing schema from JSON: %r. ' - 'Error message: %r.' - % (json_string, exn)) - - # Initialize the names object - names = Names() - - # construct the Avro Schema object - return schema_from_json_data(json_data, names) diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/base_client.py b/azure/multiapi/storagev2/blob/v2019_12_12/_shared/base_client.py deleted file mode 100644 index 361931a..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/base_client.py +++ /dev/null @@ -1,443 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, - Optional, - Any, - Iterable, - Dict, - List, - Type, - Tuple, - TYPE_CHECKING, -) -import logging - -try: - from urllib.parse import parse_qs, quote -except ImportError: - from urlparse import parse_qs # type: ignore - from urllib2 import quote # type: ignore - -import six - -from azure.core.configuration import Configuration -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline import Pipeline -from azure.core.pipeline.transport import RequestsTransport, HttpTransport -from azure.core.pipeline.policies import ( - RedirectPolicy, - ContentDecodePolicy, - BearerTokenCredentialPolicy, - ProxyPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, - UserAgentPolicy -) - -from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .models import LocationMode -from .authentication import SharedKeyCredentialPolicy -from .shared_access_signature import QueryStringConstants -from .policies import ( - StorageHeadersPolicy, - StorageContentValidation, - StorageRequestHook, - StorageResponseHook, - StorageLoggingPolicy, - StorageHosts, - QueueMessagePolicy, - ExponentialRetry, -) -from .._version import VERSION -from .._generated.models import StorageErrorException -from .response_handlers import process_storage_error, PartialBatchErrorException - - -_LOGGER = logging.getLogger(__name__) -_SERVICE_PARAMS = { - "blob": {"primary": "BlobEndpoint", "secondary": "BlobSecondaryEndpoint"}, - "queue": {"primary": "QueueEndpoint", "secondary": "QueueSecondaryEndpoint"}, - "file": {"primary": "FileEndpoint", "secondary": "FileSecondaryEndpoint"}, - "dfs": {"primary": "BlobEndpoint", "secondary": "BlobEndpoint"}, -} - - -class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - parsed_url, # type: Any - service, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) - self._hosts = kwargs.get("_hosts") - self.scheme = parsed_url.scheme - - if service not in ["blob", "queue", "file-share", "dfs"]: - raise ValueError("Invalid service: {}".format(service)) - service_name = service.split('-')[0] - account = parsed_url.netloc.split(".{}.core.".format(service_name)) - - self.account_name = account[0] if len(account) > 1 else None - if not self.account_name and parsed_url.netloc.startswith("localhost") \ - or parsed_url.netloc.startswith("127.0.0.1"): - self.account_name = parsed_url.path.strip("/") - - self.credential = _format_shared_key_credential(self.account_name, credential) - if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): - raise ValueError("Token credential is only supported with HTTPS.") - - secondary_hostname = None - if hasattr(self.credential, "account_name"): - self.account_name = self.credential.account_name - secondary_hostname = "{}-secondary.{}.{}".format( - self.credential.account_name, service_name, SERVICE_HOST_BASE) - - if not self._hosts: - if len(account) > 1: - secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") - if kwargs.get("secondary_hostname"): - secondary_hostname = kwargs["secondary_hostname"] - primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') - self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} - - self.require_encryption = kwargs.get("require_encryption", False) - self.key_encryption_key = kwargs.get("key_encryption_key") - self.key_resolver_function = kwargs.get("key_resolver_function") - self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) - - def __enter__(self): - self._client.__enter__() - return self - - def __exit__(self, *args): - self._client.__exit__(*args) - - def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._client.close() - - @property - def url(self): - """The full endpoint URL to this entity, including SAS token if used. - - This could be either the primary endpoint, - or the secondary endpoint depending on the current :func:`location_mode`. - """ - return self._format_url(self._hosts[self._location_mode]) - - @property - def primary_endpoint(self): - """The full primary endpoint URL. - - :type: str - """ - return self._format_url(self._hosts[LocationMode.PRIMARY]) - - @property - def primary_hostname(self): - """The hostname of the primary endpoint. - - :type: str - """ - return self._hosts[LocationMode.PRIMARY] - - @property - def secondary_endpoint(self): - """The full secondary endpoint URL if configured. - - If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str - :raise ValueError: - """ - if not self._hosts[LocationMode.SECONDARY]: - raise ValueError("No secondary host configured.") - return self._format_url(self._hosts[LocationMode.SECONDARY]) - - @property - def secondary_hostname(self): - """The hostname of the secondary endpoint. - - If not available this will be None. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str or None - """ - return self._hosts[LocationMode.SECONDARY] - - @property - def location_mode(self): - """The location mode that the client is currently using. - - By default this will be "primary". Options include "primary" and "secondary". - - :type: str - """ - - return self._location_mode - - @location_mode.setter - def location_mode(self, value): - if self._hosts.get(value): - self._location_mode = value - self._client._config.url = self.url # pylint: disable=protected-access - else: - raise ValueError("No host URL for location mode: {}".format(value)) - - @property - def api_version(self): - """The version of the Storage API used for requests. - - :type: str - """ - return self._client._config.version # pylint: disable=protected-access - - def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): - query_str = "?" - if snapshot: - query_str += "snapshot={}&".format(self.snapshot) - if share_snapshot: - query_str += "sharesnapshot={}&".format(self.snapshot) - if sas_token and not credential: - query_str += sas_token - elif is_credential_sastoken(credential): - query_str += credential.lstrip("?") - credential = None - return query_str.rstrip("?&"), credential - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, "get_token"): - self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - - config = kwargs.get("_configuration") or create_configuration(**kwargs) - if kwargs.get("_pipeline"): - return config, kwargs["_pipeline"] - config.transport = kwargs.get("transport") # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - config.transport = RequestsTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.headers_policy, - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - StorageRequestHook(**kwargs), - self._credential_policy, - ContentDecodePolicy(response_encoding="utf-8"), - RedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), - config.retry_policy, - config.logging_policy, - StorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs) - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, Pipeline(config.transport, policies=policies) - - def _batch_send( - self, *reqs, # type: HttpRequest - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - request = self._client._client.post( # pylint: disable=protected-access - url='{}://{}/?comp=batch{}{}'.format( - self.scheme, - self.primary_hostname, - kwargs.pop('sas', ""), - kwargs.pop('timeout', "") - ), - headers={ - 'x-ms-version': self.api_version - } - ) - - policies = [StorageHeadersPolicy()] - if self._credential_policy: - policies.append(self._credential_policy) - - request.set_multipart_mixed( - *reqs, - policies=policies, - enforce_https=False - ) - - pipeline_response = self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() - if raise_on_any_failure: - parts = list(response.parts()) - if any(p for p in parts if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts - ) - raise error - return iter(parts) - return parts - except StorageErrorException as error: - process_storage_error(error) - -class TransportWrapper(HttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, transport): - self._transport = transport - - def send(self, request, **kwargs): - return self._transport.send(request, **kwargs) - - def open(self): - pass - - def close(self): - pass - - def __enter__(self): - pass - - def __exit__(self, *args): # pylint: disable=arguments-differ - pass - - -def _format_shared_key_credential(account_name, credential): - if isinstance(credential, six.string_types): - if not account_name: - raise ValueError("Unable to determine account name for shared key credential.") - credential = {"account_name": account_name, "account_key": credential} - if isinstance(credential, dict): - if "account_name" not in credential: - raise ValueError("Shared key credential missing 'account_name") - if "account_key" not in credential: - raise ValueError("Shared key credential missing 'account_key") - return SharedKeyCredentialPolicy(**credential) - return credential - - -def parse_connection_str(conn_str, credential, service): - conn_str = conn_str.rstrip(";") - conn_settings = [s.split("=", 1) for s in conn_str.split(";")] - if any(len(tup) != 2 for tup in conn_settings): - raise ValueError("Connection string is either blank or malformed.") - conn_settings = dict(conn_settings) - endpoints = _SERVICE_PARAMS[service] - primary = None - secondary = None - if not credential: - try: - credential = {"account_name": conn_settings["AccountName"], "account_key": conn_settings["AccountKey"]} - except KeyError: - credential = conn_settings.get("SharedAccessSignature") - if endpoints["primary"] in conn_settings: - primary = conn_settings[endpoints["primary"]] - if endpoints["secondary"] in conn_settings: - secondary = conn_settings[endpoints["secondary"]] - else: - if endpoints["secondary"] in conn_settings: - raise ValueError("Connection string specifies only secondary endpoint.") - try: - primary = "{}://{}.{}.{}".format( - conn_settings["DefaultEndpointsProtocol"], - conn_settings["AccountName"], - service, - conn_settings["EndpointSuffix"], - ) - secondary = "{}-secondary.{}.{}".format( - conn_settings["AccountName"], service, conn_settings["EndpointSuffix"] - ) - except KeyError: - pass - - if not primary: - try: - primary = "https://{}.{}.{}".format( - conn_settings["AccountName"], service, conn_settings.get("EndpointSuffix", SERVICE_HOST_BASE) - ) - except KeyError: - raise ValueError("Connection string missing required connection details.") - return primary, secondary, credential - - -def create_configuration(**kwargs): - # type: (**Any) -> Configuration - config = Configuration(**kwargs) - config.headers_policy = StorageHeadersPolicy(**kwargs) - config.user_agent_policy = UserAgentPolicy( - sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) - config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) - config.logging_policy = StorageLoggingPolicy(**kwargs) - config.proxy_policy = ProxyPolicy(**kwargs) - - # Storage settings - config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) - config.copy_polling_interval = 15 - - # Block blob uploads - config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) - config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) - config.use_byte_buffer = kwargs.get("use_byte_buffer", False) - - # Page blob uploads - config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) - - # Blob downloads - config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) - config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) - - # File uploads - config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) - return config - - -def parse_query(query_str): - sas_values = QueryStringConstants.to_list() - parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} - sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values] - sas_token = None - if sas_params: - sas_token = "&".join(sas_params) - - snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") - return snapshot, sas_token - - -def is_credential_sastoken(credential): - if not credential or not isinstance(credential, six.string_types): - return False - - sas_values = QueryStringConstants.to_list() - parsed_query = parse_qs(credential.lstrip("?")) - if parsed_query and all([k in sas_values for k in parsed_query.keys()]): - return True - return False diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/base_client_async.py b/azure/multiapi/storagev2/blob/v2019_12_12/_shared/base_client_async.py deleted file mode 100644 index 1fec883..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/base_client_async.py +++ /dev/null @@ -1,185 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging -from azure.core.pipeline import AsyncPipeline -from azure.core.async_paging import AsyncList -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline.policies import ( - ContentDecodePolicy, - AsyncBearerTokenCredentialPolicy, - AsyncRedirectPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, -) -from azure.core.pipeline.transport import AsyncHttpTransport - -from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .authentication import SharedKeyCredentialPolicy -from .base_client import create_configuration -from .policies import ( - StorageContentValidation, - StorageRequestHook, - StorageHosts, - StorageHeadersPolicy, - QueueMessagePolicy -) -from .policies_async import AsyncStorageResponseHook - -from .._generated.models import StorageErrorException -from .response_handlers import process_storage_error, PartialBatchErrorException - -if TYPE_CHECKING: - from azure.core.pipeline import Pipeline - from azure.core.pipeline.transport import HttpRequest - from azure.core.configuration import Configuration -_LOGGER = logging.getLogger(__name__) - - -class AsyncStorageAccountHostsMixin(object): - - def __enter__(self): - raise TypeError("Async client only supports 'async with'.") - - def __exit__(self, *args): - pass - - async def __aenter__(self): - await self._client.__aenter__() - return self - - async def __aexit__(self, *args): - await self._client.__aexit__(*args) - - async def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._client.close() - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, 'get_token'): - self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - config = kwargs.get('_configuration') or create_configuration(**kwargs) - if kwargs.get('_pipeline'): - return config, kwargs['_pipeline'] - config.transport = kwargs.get('transport') # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - try: - from azure.core.pipeline.transport import AioHttpTransport - except ImportError: - raise ImportError("Unable to create async transport. Please check aiohttp is installed.") - config.transport = AioHttpTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.headers_policy, - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - StorageRequestHook(**kwargs), - self._credential_policy, - ContentDecodePolicy(response_encoding="utf-8"), - AsyncRedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), # type: ignore - config.retry_policy, - config.logging_policy, - AsyncStorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs), - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, AsyncPipeline(config.transport, policies=policies) - - async def _batch_send( - self, *reqs: 'HttpRequest', - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - request = self._client._client.post( # pylint: disable=protected-access - url='{}://{}/?comp=batch{}{}'.format( - self.scheme, - self.primary_hostname, - kwargs.pop('sas', None), - kwargs.pop('timeout', None) - ), - headers={ - 'x-ms-version': self.api_version - } - ) - - policies = [StorageHeadersPolicy()] - if self._credential_policy: - policies.append(self._credential_policy) - - request.set_multipart_mixed( - *reqs, - policies=policies, - enforce_https=False - ) - - pipeline_response = await self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() # Return an AsyncIterator - if raise_on_any_failure: - parts_list = [] - async for part in parts: - parts_list.append(part) - if any(p for p in parts_list if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts_list - ) - raise error - return AsyncList(parts_list) - return parts - except StorageErrorException as error: - process_storage_error(error) - - -class AsyncTransportWrapper(AsyncHttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, async_transport): - self._transport = async_transport - - async def send(self, request, **kwargs): - return await self._transport.send(request, **kwargs) - - async def open(self): - pass - - async def close(self): - pass - - async def __aenter__(self): - pass - - async def __aexit__(self, *args): # pylint: disable=arguments-differ - pass diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/constants.py b/azure/multiapi/storagev2/blob/v2019_12_12/_shared/constants.py deleted file mode 100644 index f67ea29..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/constants.py +++ /dev/null @@ -1,27 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -from .._generated.version import VERSION - - -X_MS_VERSION = VERSION - -# Socket timeout in seconds -CONNECTION_TIMEOUT = 20 -READ_TIMEOUT = 20 - -# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned) -# The socket timeout is now the maximum total duration to send all data. -if sys.version_info >= (3, 5): - # the timeout to connect is 20 seconds, and the read timeout is 80000 seconds - # the 80000 seconds was calculated with: - # 4000MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) - READ_TIMEOUT = 80000 - -STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" - -SERVICE_HOST_BASE = 'core.windows.net' diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/models.py b/azure/multiapi/storagev2/blob/v2019_12_12/_shared/models.py deleted file mode 100644 index 27a9c9f..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/models.py +++ /dev/null @@ -1,466 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-instance-attributes -from enum import Enum - - -def get_enum_value(value): - if value is None or value in ["None", ""]: - return None - try: - return value.value - except AttributeError: - return value - - -class StorageErrorCode(str, Enum): - - # Generic storage values - account_already_exists = "AccountAlreadyExists" - account_being_created = "AccountBeingCreated" - account_is_disabled = "AccountIsDisabled" - authentication_failed = "AuthenticationFailed" - authorization_failure = "AuthorizationFailure" - no_authentication_information = "NoAuthenticationInformation" - condition_headers_not_supported = "ConditionHeadersNotSupported" - condition_not_met = "ConditionNotMet" - empty_metadata_key = "EmptyMetadataKey" - insufficient_account_permissions = "InsufficientAccountPermissions" - internal_error = "InternalError" - invalid_authentication_info = "InvalidAuthenticationInfo" - invalid_header_value = "InvalidHeaderValue" - invalid_http_verb = "InvalidHttpVerb" - invalid_input = "InvalidInput" - invalid_md5 = "InvalidMd5" - invalid_metadata = "InvalidMetadata" - invalid_query_parameter_value = "InvalidQueryParameterValue" - invalid_range = "InvalidRange" - invalid_resource_name = "InvalidResourceName" - invalid_uri = "InvalidUri" - invalid_xml_document = "InvalidXmlDocument" - invalid_xml_node_value = "InvalidXmlNodeValue" - md5_mismatch = "Md5Mismatch" - metadata_too_large = "MetadataTooLarge" - missing_content_length_header = "MissingContentLengthHeader" - missing_required_query_parameter = "MissingRequiredQueryParameter" - missing_required_header = "MissingRequiredHeader" - missing_required_xml_node = "MissingRequiredXmlNode" - multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" - operation_timed_out = "OperationTimedOut" - out_of_range_input = "OutOfRangeInput" - out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" - request_body_too_large = "RequestBodyTooLarge" - resource_type_mismatch = "ResourceTypeMismatch" - request_url_failed_to_parse = "RequestUrlFailedToParse" - resource_already_exists = "ResourceAlreadyExists" - resource_not_found = "ResourceNotFound" - server_busy = "ServerBusy" - unsupported_header = "UnsupportedHeader" - unsupported_xml_node = "UnsupportedXmlNode" - unsupported_query_parameter = "UnsupportedQueryParameter" - unsupported_http_verb = "UnsupportedHttpVerb" - - # Blob values - append_position_condition_not_met = "AppendPositionConditionNotMet" - blob_already_exists = "BlobAlreadyExists" - blob_not_found = "BlobNotFound" - blob_overwritten = "BlobOverwritten" - blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" - block_count_exceeds_limit = "BlockCountExceedsLimit" - block_list_too_long = "BlockListTooLong" - cannot_change_to_lower_tier = "CannotChangeToLowerTier" - cannot_verify_copy_source = "CannotVerifyCopySource" - container_already_exists = "ContainerAlreadyExists" - container_being_deleted = "ContainerBeingDeleted" - container_disabled = "ContainerDisabled" - container_not_found = "ContainerNotFound" - content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" - copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" - copy_id_mismatch = "CopyIdMismatch" - feature_version_mismatch = "FeatureVersionMismatch" - incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" - incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" - incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" - infinite_lease_duration_required = "InfiniteLeaseDurationRequired" - invalid_blob_or_block = "InvalidBlobOrBlock" - invalid_blob_tier = "InvalidBlobTier" - invalid_blob_type = "InvalidBlobType" - invalid_block_id = "InvalidBlockId" - invalid_block_list = "InvalidBlockList" - invalid_operation = "InvalidOperation" - invalid_page_range = "InvalidPageRange" - invalid_source_blob_type = "InvalidSourceBlobType" - invalid_source_blob_url = "InvalidSourceBlobUrl" - invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" - lease_already_present = "LeaseAlreadyPresent" - lease_already_broken = "LeaseAlreadyBroken" - lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" - lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" - lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" - lease_id_missing = "LeaseIdMissing" - lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" - lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" - lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" - lease_lost = "LeaseLost" - lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" - lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" - lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" - max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" - no_pending_copy_operation = "NoPendingCopyOperation" - operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" - pending_copy_operation = "PendingCopyOperation" - previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" - previous_snapshot_not_found = "PreviousSnapshotNotFound" - previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" - sequence_number_condition_not_met = "SequenceNumberConditionNotMet" - sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" - snapshot_count_exceeded = "SnapshotCountExceeded" - snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" - snapshots_present = "SnapshotsPresent" - source_condition_not_met = "SourceConditionNotMet" - system_in_use = "SystemInUse" - target_condition_not_met = "TargetConditionNotMet" - unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" - blob_being_rehydrated = "BlobBeingRehydrated" - blob_archived = "BlobArchived" - blob_not_archived = "BlobNotArchived" - - # Queue values - invalid_marker = "InvalidMarker" - message_not_found = "MessageNotFound" - message_too_large = "MessageTooLarge" - pop_receipt_mismatch = "PopReceiptMismatch" - queue_already_exists = "QueueAlreadyExists" - queue_being_deleted = "QueueBeingDeleted" - queue_disabled = "QueueDisabled" - queue_not_empty = "QueueNotEmpty" - queue_not_found = "QueueNotFound" - - # File values - cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" - client_cache_flush_delay = "ClientCacheFlushDelay" - delete_pending = "DeletePending" - directory_not_empty = "DirectoryNotEmpty" - file_lock_conflict = "FileLockConflict" - invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" - parent_not_found = "ParentNotFound" - read_only_attribute = "ReadOnlyAttribute" - share_already_exists = "ShareAlreadyExists" - share_being_deleted = "ShareBeingDeleted" - share_disabled = "ShareDisabled" - share_not_found = "ShareNotFound" - sharing_violation = "SharingViolation" - share_snapshot_in_progress = "ShareSnapshotInProgress" - share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" - share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" - share_has_snapshots = "ShareHasSnapshots" - container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" - - # DataLake values - content_length_must_be_zero = 'ContentLengthMustBeZero' - path_already_exists = 'PathAlreadyExists' - invalid_flush_position = 'InvalidFlushPosition' - invalid_property_name = 'InvalidPropertyName' - invalid_source_uri = 'InvalidSourceUri' - unsupported_rest_version = 'UnsupportedRestVersion' - file_system_not_found = 'FilesystemNotFound' - path_not_found = 'PathNotFound' - rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound' - source_path_not_found = 'SourcePathNotFound' - destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted' - file_system_already_exists = 'FilesystemAlreadyExists' - file_system_being_deleted = 'FilesystemBeingDeleted' - invalid_destination_path = 'InvalidDestinationPath' - invalid_rename_source_path = 'InvalidRenameSourcePath' - invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType' - lease_is_already_broken = 'LeaseIsAlreadyBroken' - lease_name_mismatch = 'LeaseNameMismatch' - path_conflict = 'PathConflict' - source_path_is_being_deleted = 'SourcePathIsBeingDeleted' - - -class DictMixin(object): - - def __setitem__(self, key, item): - self.__dict__[key] = item - - def __getitem__(self, key): - return self.__dict__[key] - - def __repr__(self): - return str(self) - - def __len__(self): - return len(self.keys()) - - def __delitem__(self, key): - self.__dict__[key] = None - - def __eq__(self, other): - """Compare objects by comparing all attributes.""" - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - def __ne__(self, other): - """Compare objects by comparing all attributes.""" - return not self.__eq__(other) - - def __str__(self): - return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) - - def has_key(self, k): - return k in self.__dict__ - - def update(self, *args, **kwargs): - return self.__dict__.update(*args, **kwargs) - - def keys(self): - return [k for k in self.__dict__ if not k.startswith('_')] - - def values(self): - return [v for k, v in self.__dict__.items() if not k.startswith('_')] - - def items(self): - return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] - - def get(self, key, default=None): - if key in self.__dict__: - return self.__dict__[key] - return default - - -class LocationMode(object): - """ - Specifies the location the request should be sent to. This mode only applies - for RA-GRS accounts which allow secondary read access. All other account types - must use PRIMARY. - """ - - PRIMARY = 'primary' #: Requests should be sent to the primary location. - SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. - - -class ResourceTypes(object): - """ - Specifies the resource types that are accessible with the account SAS. - - :param bool service: - Access to service-level APIs (e.g., Get/Set Service Properties, - Get Service Stats, List Containers/Queues/Shares) - :param bool container: - Access to container-level APIs (e.g., Create/Delete Container, - Create/Delete Queue, Create/Delete Share, - List Blobs/Files and Directories) - :param bool object: - Access to object-level APIs for blobs, queue messages, and - files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) - """ - - def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin - self.service = service - self.container = container - self.object = object - self._str = (('s' if self.service else '') + - ('c' if self.container else '') + - ('o' if self.object else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create a ResourceTypes from a string. - - To specify service, container, or object you need only to - include the first letter of the word in the string. E.g. service and container, - you would provide a string "sc". - - :param str string: Specify service, container, or object in - in the string with the first letter of the word. - :return: A ResourceTypes object - :rtype: ~azure.storage.blob.ResourceTypes - """ - res_service = 's' in string - res_container = 'c' in string - res_object = 'o' in string - - parsed = cls(res_service, res_container, res_object) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class AccountSasPermissions(object): - """ - :class:`~ResourceTypes` class to be used with generate_account_sas - function and for the AccessPolicies used with set_*_acl. There are two types of - SAS which may be used to grant resource access. One is to grant access to a - specific resource (resource-specific). Another is to grant access to the - entire service for a specific account and allow certain operations based on - perms found here. - - :param bool read: - Valid for all signed resources types (Service, Container, and Object). - Permits read permissions to the specified resource type. - :param bool write: - Valid for all signed resources types (Service, Container, and Object). - Permits write permissions to the specified resource type. - :param bool delete: - Valid for Container and Object resource types, except for queue messages. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool list: - Valid for Service and Container resource types only. - :param bool add: - Valid for the following Object resource types only: queue messages, and append blobs. - :param bool create: - Valid for the following Object resource types only: blobs and files. - Users can create new blobs or files, but may not overwrite existing - blobs or files. - :param bool update: - Valid for the following Object resource types only: queue messages. - :param bool process: - Valid for the following Object resource type only: queue messages. - :keyword bool tag: - To enable set or get tags on the blobs in the container. - :keyword bool filter_by_tags: - To enable get blobs by tags, this should be used together with list permission. - """ - def __init__(self, read=False, write=False, delete=False, - list=False, # pylint: disable=redefined-builtin - add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): - self.read = read - self.write = write - self.delete = delete - self.delete_previous_version = delete_previous_version - self.list = list - self.add = add - self.create = create - self.update = update - self.process = process - self.tag = kwargs.pop('tag', False) - self.filter_by_tags = kwargs.pop('filter_by_tags', False) - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('l' if self.list else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('u' if self.update else '') + - ('p' if self.process else '') + - ('f' if self.filter_by_tags else '') + - ('t' if self.tag else '') - ) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create AccountSasPermissions from a string. - - To specify read, write, delete, etc. permissions you need only to - include the first letter of the word in the string. E.g. for read and write - permissions you would provide a string "rw". - - :param str permission: Specify permissions in - the string with the first letter of the word. - :return: An AccountSasPermissions object - :rtype: ~azure.storage.blob.AccountSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_delete_previous_version = 'x' in permission - p_list = 'l' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_update = 'u' in permission - p_process = 'p' in permission - p_tag = 't' in permission - p_filter_by_tags = 'f' in permission - parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, - list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, - filter_by_tags=p_filter_by_tags) - parsed._str = permission # pylint: disable = protected-access - return parsed - -class Services(object): - """Specifies the services accessible with the account SAS. - - :param bool blob: - Access for the `~azure.storage.blob.BlobServiceClient` - :param bool queue: - Access for the `~azure.storage.queue.QueueServiceClient` - :param bool fileshare: - Access for the `~azure.storage.fileshare.ShareServiceClient` - """ - - def __init__(self, blob=False, queue=False, fileshare=False): - self.blob = blob - self.queue = queue - self.fileshare = fileshare - self._str = (('b' if self.blob else '') + - ('q' if self.queue else '') + - ('f' if self.fileshare else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create Services from a string. - - To specify blob, queue, or file you need only to - include the first letter of the word in the string. E.g. for blob and queue - you would provide a string "bq". - - :param str string: Specify blob, queue, or file in - in the string with the first letter of the word. - :return: A Services object - :rtype: ~azure.storage.blob.Services - """ - res_blob = 'b' in string - res_queue = 'q' in string - res_file = 'f' in string - - parsed = cls(res_blob, res_queue, res_file) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class UserDelegationKey(object): - """ - Represents a user delegation key, provided to the user by Azure Storage - based on their Azure Active Directory access token. - - The fields are saved as simple strings since the user does not have to interact with this object; - to generate an identify SAS, the user can simply pass it to the right API. - - :ivar str signed_oid: - Object ID of this token. - :ivar str signed_tid: - Tenant ID of the tenant that issued this token. - :ivar str signed_start: - The datetime this token becomes valid. - :ivar str signed_expiry: - The datetime this token expires. - :ivar str signed_service: - What service this key is valid for. - :ivar str signed_version: - The version identifier of the REST service that created this token. - :ivar str value: - The user delegation key. - """ - def __init__(self): - self.signed_oid = None - self.signed_tid = None - self.signed_start = None - self.signed_expiry = None - self.signed_service = None - self.signed_version = None - self.value = None diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/policies.py b/azure/multiapi/storagev2/blob/v2019_12_12/_shared/policies.py deleted file mode 100644 index c9bc798..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/policies.py +++ /dev/null @@ -1,610 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import re -import random -from time import time -from io import SEEK_SET, UnsupportedOperation -import logging -import uuid -import types -from typing import Any, TYPE_CHECKING -from wsgiref.handlers import format_date_time -try: - from urllib.parse import ( - urlparse, - parse_qsl, - urlunparse, - urlencode, - ) -except ImportError: - from urllib import urlencode # type: ignore - from urlparse import ( # type: ignore - urlparse, - parse_qsl, - urlunparse, - ) - -from azure.core.pipeline.policies import ( - HeadersPolicy, - SansIOHTTPPolicy, - NetworkTraceLoggingPolicy, - HTTPPolicy, - RequestHistory -) -from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError - -from .models import LocationMode - -try: - _unicode_type = unicode # type: ignore -except NameError: - _unicode_type = str - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -def encode_base64(data): - if isinstance(data, _unicode_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def is_exhausted(settings): - """Are we out of retries?""" - retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) - retry_counts = list(filter(None, retry_counts)) - if not retry_counts: - return False - return min(retry_counts) < 0 - - -def retry_hook(settings, **kwargs): - if settings['hook']: - settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) - - -def is_retry(response, mode): - """Is this method/status code retryable? (Based on whitelists and control - variables such as the number of total retries to allow, whether to - respect the Retry-After header, whether this header is present, and - whether the returned status code is on the list of status codes to - be retried upon on the presence of the aforementioned header) - """ - status = response.http_response.status_code - if 300 <= status < 500: - # An exception occured, but in most cases it was expected. Examples could - # include a 309 Conflict or 412 Precondition Failed. - if status == 404 and mode == LocationMode.SECONDARY: - # Response code 404 should be retried if secondary was used. - return True - if status == 408: - # Response code 408 is a timeout and should be retried. - return True - return False - if status >= 500: - # Response codes above 500 with the exception of 501 Not Implemented and - # 505 Version Not Supported indicate a server issue and should be retried. - if status in [501, 505]: - return False - return True - return False - - -def urljoin(base_url, stub_url): - parsed = urlparse(base_url) - parsed = parsed._replace(path=parsed.path + '/' + stub_url) - return parsed.geturl() - - -class QueueMessagePolicy(SansIOHTTPPolicy): - - def on_request(self, request): - message_id = request.context.options.pop('queue_message_id', None) - if message_id: - request.http_request.url = urljoin( - request.http_request.url, - message_id) - - -class StorageHeadersPolicy(HeadersPolicy): - request_id_header_name = 'x-ms-client-request-id' - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - super(StorageHeadersPolicy, self).on_request(request) - current_time = format_date_time(time()) - request.http_request.headers['x-ms-date'] = current_time - - custom_id = request.context.options.pop('client_request_id', None) - request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) - - # def on_response(self, request, response): - # # raise exception if the echoed client request id from the service is not identical to the one we sent - # if self.request_id_header_name in response.http_response.headers: - - # client_request_id = request.http_request.headers.get(self.request_id_header_name) - - # if response.http_response.headers[self.request_id_header_name] != client_request_id: - # raise AzureError( - # "Echoed client request ID: {} does not match sent client request ID: {}. " - # "Service request ID: {}".format( - # response.http_response.headers[self.request_id_header_name], client_request_id, - # response.http_response.headers['x-ms-request-id']), - # response=response.http_response - # ) - - -class StorageHosts(SansIOHTTPPolicy): - - def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument - self.hosts = hosts - super(StorageHosts, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - request.context.options['hosts'] = self.hosts - parsed_url = urlparse(request.http_request.url) - - # Detect what location mode we're currently requesting with - location_mode = LocationMode.PRIMARY - for key, value in self.hosts.items(): - if parsed_url.netloc == value: - location_mode = key - - # See if a specific location mode has been specified, and if so, redirect - use_location = request.context.options.pop('use_location', None) - if use_location: - # Lock retries to the specific location - request.context.options['retry_to_secondary'] = False - if use_location not in self.hosts: - raise ValueError("Attempting to use undefined host location {}".format(use_location)) - if use_location != location_mode: - # Update request URL to use the specified location - updated = parsed_url._replace(netloc=self.hosts[use_location]) - request.http_request.url = updated.geturl() - location_mode = use_location - - request.context.options['location_mode'] = location_mode - - -class StorageLoggingPolicy(NetworkTraceLoggingPolicy): - """A policy that logs HTTP request and response to the DEBUG logger. - - This accepts both global configuration, and per-request level with "enable_http_logger" - """ - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - http_request = request.http_request - options = request.context.options - if options.pop("logging_enable", self.enable_http_logger): - request.context["logging_enable"] = True - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - log_url = http_request.url - query_params = http_request.query - if 'sig' in query_params: - log_url = log_url.replace(query_params['sig'], "sig=*****") - _LOGGER.debug("Request URL: %r", log_url) - _LOGGER.debug("Request method: %r", http_request.method) - _LOGGER.debug("Request headers:") - for header, value in http_request.headers.items(): - if header.lower() == 'authorization': - value = '*****' - elif header.lower() == 'x-ms-copy-source' and 'sig' in value: - # take the url apart and scrub away the signed signature - scheme, netloc, path, params, query, fragment = urlparse(value) - parsed_qs = dict(parse_qsl(query)) - parsed_qs['sig'] = '*****' - - # the SAS needs to be put back together - value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) - - _LOGGER.debug(" %r: %r", header, value) - _LOGGER.debug("Request body:") - - # We don't want to log the binary data of a file upload. - if isinstance(http_request.body, types.GeneratorType): - _LOGGER.debug("File upload") - else: - _LOGGER.debug(str(http_request.body)) - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log request: %r", err) - - def on_response(self, request, response): - # type: (PipelineRequest, PipelineResponse, Any) -> None - if response.context.pop("logging_enable", self.enable_http_logger): - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - _LOGGER.debug("Response status: %r", response.http_response.status_code) - _LOGGER.debug("Response headers:") - for res_header, value in response.http_response.headers.items(): - _LOGGER.debug(" %r: %r", res_header, value) - - # We don't want to log binary data if the response is a file. - _LOGGER.debug("Response content:") - pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) - header = response.http_response.headers.get('content-disposition') - - if header and pattern.match(header): - filename = header.partition('=')[2] - _LOGGER.debug("File attachments: %s", filename) - elif response.http_response.headers.get("content-type", "").endswith("octet-stream"): - _LOGGER.debug("Body contains binary data.") - elif response.http_response.headers.get("content-type", "").startswith("image"): - _LOGGER.debug("Body contains image data.") - else: - if response.context.options.get('stream', False): - _LOGGER.debug("Body is streamable") - else: - _LOGGER.debug(response.http_response.text()) - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log response: %s", repr(err)) - - -class StorageRequestHook(SansIOHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._request_callback = kwargs.get('raw_request_hook') - super(StorageRequestHook, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, **Any) -> PipelineResponse - request_callback = request.context.options.pop('raw_request_hook', self._request_callback) - if request_callback: - request_callback(request) - - -class StorageResponseHook(HTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(StorageResponseHook, self).__init__() - - def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = self.next.send(request) - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - response_callback(response) - request.context['response_callback'] = response_callback - return response - - -class StorageContentValidation(SansIOHTTPPolicy): - """A simple policy that sends the given headers - with the request. - - This will overwrite any headers already defined in the request. - """ - header_name = 'Content-MD5' - - def __init__(self, **kwargs): # pylint: disable=unused-argument - super(StorageContentValidation, self).__init__() - - @staticmethod - def get_content_md5(data): - md5 = hashlib.md5() # nosec - if isinstance(data, bytes): - md5.update(data) - elif hasattr(data, 'read'): - pos = 0 - try: - pos = data.tell() - except: # pylint: disable=bare-except - pass - for chunk in iter(lambda: data.read(4096), b""): - md5.update(chunk) - try: - data.seek(pos, SEEK_SET) - except (AttributeError, IOError): - raise ValueError("Data should be bytes or a seekable file-like object.") - else: - raise ValueError("Data should be bytes or a seekable file-like object.") - - return md5.digest() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - validate_content = request.context.options.pop('validate_content', False) - if validate_content and request.http_request.method != 'GET': - computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) - request.http_request.headers[self.header_name] = computed_md5 - request.context['validate_content_md5'] = computed_md5 - request.context['validate_content'] = validate_content - - def on_response(self, request, response): - if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): - computed_md5 = request.context.get('validate_content_md5') or \ - encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) - if response.http_response.headers['content-md5'] != computed_md5: - raise AzureError( - 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format( - response.http_response.headers['content-md5'], computed_md5), - response=response.http_response - ) - - -class StorageRetryPolicy(HTTPPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - def __init__(self, **kwargs): - self.total_retries = kwargs.pop('retry_total', 10) - self.connect_retries = kwargs.pop('retry_connect', 3) - self.read_retries = kwargs.pop('retry_read', 3) - self.status_retries = kwargs.pop('retry_status', 3) - self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) - super(StorageRetryPolicy, self).__init__() - - def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use - """ - A function which sets the next host location on the request, if applicable. - - :param ~azure.storage.models.RetryContext context: - The retry context containing the previous host location and the request - to evaluate and possibly modify. - """ - if settings['hosts'] and all(settings['hosts'].values()): - url = urlparse(request.url) - # If there's more than one possible location, retry to the alternative - if settings['mode'] == LocationMode.PRIMARY: - settings['mode'] = LocationMode.SECONDARY - else: - settings['mode'] = LocationMode.PRIMARY - updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) - request.url = updated.geturl() - - def configure_retries(self, request): # pylint: disable=no-self-use - body_position = None - if hasattr(request.http_request.body, 'read'): - try: - body_position = request.http_request.body.tell() - except (AttributeError, UnsupportedOperation): - # if body position cannot be obtained, then retries will not work - pass - options = request.context.options - return { - 'total': options.pop("retry_total", self.total_retries), - 'connect': options.pop("retry_connect", self.connect_retries), - 'read': options.pop("retry_read", self.read_retries), - 'status': options.pop("retry_status", self.status_retries), - 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), - 'mode': options.pop("location_mode", LocationMode.PRIMARY), - 'hosts': options.pop("hosts", None), - 'hook': options.pop("retry_hook", None), - 'body_position': body_position, - 'count': 0, - 'history': [] - } - - def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use - """ Formula for computing the current backoff. - Should be calculated by child class. - - :rtype: float - """ - return 0 - - def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - transport.sleep(backoff) - - def increment(self, settings, request, response=None, error=None): - """Increment the retry counters. - - :param response: A pipeline response object. - :param error: An error encountered during the request, or - None if the response was received successfully. - - :return: Whether the retry attempts are exhausted. - """ - settings['total'] -= 1 - - if error and isinstance(error, ServiceRequestError): - # Errors when we're fairly sure that the server did not receive the - # request, so it should be safe to retry. - settings['connect'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - elif error and isinstance(error, ServiceResponseError): - # Errors that occur after the request has been started, so we should - # assume that the server began processing it. - settings['read'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - else: - # Incrementing because of a server error like a 500 in - # status_forcelist and a the given method is in the whitelist - if response: - settings['status'] -= 1 - settings['history'].append(RequestHistory(request, http_response=response)) - - if not is_exhausted(settings): - if request.method not in ['PUT'] and settings['retry_secondary']: - self._set_next_host_location(settings, request) - - # rewind the request body if it is a stream - if request.body and hasattr(request.body, 'read'): - # no position was saved, then retry would not work - if settings['body_position'] is None: - return False - try: - # attempt to rewind the body to the initial position - request.body.seek(settings['body_position'], SEEK_SET) - except (UnsupportedOperation, ValueError): - # if body is not seekable, then retry would not work - return False - settings['count'] += 1 - return True - return False - - def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(StorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(StorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/policies_async.py b/azure/multiapi/storagev2/blob/v2019_12_12/_shared/policies_async.py deleted file mode 100644 index e0926b8..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/policies_async.py +++ /dev/null @@ -1,220 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -import asyncio -import random -import logging -from typing import Any, TYPE_CHECKING - -from azure.core.pipeline.policies import AsyncHTTPPolicy -from azure.core.exceptions import AzureError - -from .policies import is_retry, StorageRetryPolicy - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -async def retry_hook(settings, **kwargs): - if settings['hook']: - if asyncio.iscoroutine(settings['hook']): - await settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - else: - settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - - -class AsyncStorageResponseHook(AsyncHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(AsyncStorageResponseHook, self).__init__() - - async def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = await self.next.send(request) - await response.http_response.load_body() - - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - if asyncio.iscoroutine(response_callback): - await response_callback(response) - else: - response_callback(response) - request.context['response_callback'] = response_callback - return response - -class AsyncStorageRetryPolicy(StorageRetryPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - async def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - await transport.sleep(backoff) - - async def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = await self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - await self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - await self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(AsyncStorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(AsyncStorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/request_handlers.py b/azure/multiapi/storagev2/blob/v2019_12_12/_shared/request_handlers.py deleted file mode 100644 index 4f15b65..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/request_handlers.py +++ /dev/null @@ -1,147 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) - -import logging -from os import fstat -from io import (SEEK_END, SEEK_SET, UnsupportedOperation) - -import isodate - -from azure.core.exceptions import raise_with_traceback - - -_LOGGER = logging.getLogger(__name__) - - -def serialize_iso(attr): - """Serialize Datetime object into ISO-8601 formatted string. - - :param Datetime attr: Object to be serialized. - :rtype: str - :raises: ValueError if format invalid. - """ - if not attr: - return None - if isinstance(attr, str): - attr = isodate.parse_datetime(attr) - try: - utc = attr.utctimetuple() - if utc.tm_year > 9999 or utc.tm_year < 1: - raise OverflowError("Hit max or min date") - - date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( - utc.tm_year, utc.tm_mon, utc.tm_mday, - utc.tm_hour, utc.tm_min, utc.tm_sec) - return date + 'Z' - except (ValueError, OverflowError) as err: - msg = "Unable to serialize datetime object." - raise_with_traceback(ValueError, msg, err) - except AttributeError as err: - msg = "ISO-8601 object must be valid Datetime object." - raise_with_traceback(TypeError, msg, err) - - -def get_length(data): - length = None - # Check if object implements the __len__ method, covers most input cases such as bytearray. - try: - length = len(data) - except: # pylint: disable=bare-except - pass - - if not length: - # Check if the stream is a file-like stream object. - # If so, calculate the size using the file descriptor. - try: - fileno = data.fileno() - except (AttributeError, UnsupportedOperation): - pass - else: - try: - return fstat(fileno).st_size - except OSError: - # Not a valid fileno, may be possible requests returned - # a socket number? - pass - - # If the stream is seekable and tell() is implemented, calculate the stream size. - try: - current_position = data.tell() - data.seek(0, SEEK_END) - length = data.tell() - current_position - data.seek(current_position, SEEK_SET) - except (AttributeError, UnsupportedOperation): - pass - - return length - - -def read_length(data): - try: - if hasattr(data, 'read'): - read_data = b'' - for chunk in iter(lambda: data.read(4096), b""): - read_data += chunk - return len(read_data), read_data - if hasattr(data, '__iter__'): - read_data = b'' - for chunk in data: - read_data += chunk - return len(read_data), read_data - except: # pylint: disable=bare-except - pass - raise ValueError("Unable to calculate content length, please specify.") - - -def validate_and_format_range_headers( - start_range, end_range, start_range_required=True, - end_range_required=True, check_content_md5=False, align_to_page=False): - # If end range is provided, start range must be provided - if (start_range_required or end_range is not None) and start_range is None: - raise ValueError("start_range value cannot be None.") - if end_range_required and end_range is None: - raise ValueError("end_range value cannot be None.") - - # Page ranges must be 512 aligned - if align_to_page: - if start_range is not None and start_range % 512 != 0: - raise ValueError("Invalid page blob start_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(start_range)) - if end_range is not None and end_range % 512 != 511: - raise ValueError("Invalid page blob end_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(end_range)) - - # Format based on whether end_range is present - range_header = None - if end_range is not None: - range_header = 'bytes={0}-{1}'.format(start_range, end_range) - elif start_range is not None: - range_header = "bytes={0}-".format(start_range) - - # Content MD5 can only be provided for a complete range less than 4MB in size - range_validation = None - if check_content_md5: - if start_range is None or end_range is None: - raise ValueError("Both start and end range requied for MD5 content validation.") - if end_range - start_range > 4 * 1024 * 1024: - raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") - range_validation = 'true' - - return range_header, range_validation - - -def add_metadata_headers(metadata=None): - # type: (Optional[Dict[str, str]]) -> Dict[str, str] - headers = {} - if metadata: - for key, value in metadata.items(): - headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value - return headers diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/uploads.py b/azure/multiapi/storagev2/blob/v2019_12_12/_shared/uploads.py deleted file mode 100644 index abf3fb2..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/uploads.py +++ /dev/null @@ -1,550 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from concurrent import futures -from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) -from threading import Lock -from itertools import islice -from math import ceil - -import six - -from azure.core.tracing.common import with_current_context - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." - - -def _parallel_uploads(executor, uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(executor.submit(with_current_context(uploader), next_chunk)) - except StopIteration: - break - - # Wait for the remaining uploads to finish - done, _running = futures.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - validate_content=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - validate_content=validate_content, - **kwargs) - if parallel: - executor = futures.ThreadPoolExecutor(max_concurrency) - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - executor.submit(with_current_context(uploader.process_chunk), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - executor = futures.ThreadPoolExecutor(max_concurrency) - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - executor.submit(with_current_context(uploader.process_substream_block), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] - return sorted(range_ids) - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b"" - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError("Blob data should be of type bytes.") - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b"" or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - def _update_progress(self, length): - if self.progress_lock is not None: - with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = self._upload_chunk(chunk_offset, chunk_data) - self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) - - def process_substream_block(self, block_data): - return self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - def _upload_substream_block(self, block_id, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_substream_block_with_progress(self, block_id, block_stream): - range_id = self._upload_substream_block(block_id, block_stream) - self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop("modified_access_conditions", None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - self.service.stage_block( - block_id, - len(chunk_data), - chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return index, block_id - - def _upload_substream_block(self, block_id, block_stream): - try: - self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - return not any(bytearray(chunk_data)) - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = self.service.upload_pages( - chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = self.service.append_block( - chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - self.current_length = int(self.response_headers["blob_append_offset"]) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = self.service.append_block( - chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response - - -class SubStream(IOBase): - - def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): - # Python 2.7: file-like objects created with open() typically support seek(), but are not - # derivations of io.IOBase and thus do not implement seekable(). - # Python > 3.0: file-like objects created with open() are derived from io.IOBase. - try: - # only the main thread runs this, so there's no need grabbing the lock - wrapped_stream.seek(0, SEEK_CUR) - except: - raise ValueError("Wrapped stream must support seek().") - - self._lock = lockObj - self._wrapped_stream = wrapped_stream - self._position = 0 - self._stream_begin_index = stream_begin_index - self._length = length - self._buffer = BytesIO() - - # we must avoid buffering more than necessary, and also not use up too much memory - # so the max buffer size is capped at 4MB - self._max_buffer_size = ( - length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE - ) - self._current_buffer_start = 0 - self._current_buffer_size = 0 - super(SubStream, self).__init__() - - def __len__(self): - return self._length - - def close(self): - if self._buffer: - self._buffer.close() - self._wrapped_stream = None - IOBase.close(self) - - def fileno(self): - return self._wrapped_stream.fileno() - - def flush(self): - pass - - def read(self, size=None): - if self.closed: # pylint: disable=using-constant-test - raise ValueError("Stream is closed.") - - if size is None: - size = self._length - self._position - - # adjust if out of bounds - if size + self._position >= self._length: - size = self._length - self._position - - # return fast - if size == 0 or self._buffer.closed: - return b"" - - # attempt first read from the read buffer and update position - read_buffer = self._buffer.read(size) - bytes_read = len(read_buffer) - bytes_remaining = size - bytes_read - self._position += bytes_read - - # repopulate the read buffer from the underlying stream to fulfill the request - # ensure the seek and read operations are done atomically (only if a lock is provided) - if bytes_remaining > 0: - with self._buffer: - # either read in the max buffer size specified on the class - # or read in just enough data for the current block/sub stream - current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) - - # lock is only defined if max_concurrency > 1 (parallel uploads) - if self._lock: - with self._lock: - # reposition the underlying stream to match the start of the data to read - absolute_position = self._stream_begin_index + self._position - self._wrapped_stream.seek(absolute_position, SEEK_SET) - # If we can't seek to the right location, our read will be corrupted so fail fast. - if self._wrapped_stream.tell() != absolute_position: - raise IOError("Stream failed to seek to the desired location.") - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - else: - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - - if buffer_from_stream: - # update the buffer with new data from the wrapped stream - # we need to note down the start position and size of the buffer, in case seek is performed later - self._buffer = BytesIO(buffer_from_stream) - self._current_buffer_start = self._position - self._current_buffer_size = len(buffer_from_stream) - - # read the remaining bytes from the new buffer and update position - second_read_buffer = self._buffer.read(bytes_remaining) - read_buffer += second_read_buffer - self._position += len(second_read_buffer) - - return read_buffer - - def readable(self): - return True - - def readinto(self, b): - raise UnsupportedOperation - - def seek(self, offset, whence=0): - if whence is SEEK_SET: - start_index = 0 - elif whence is SEEK_CUR: - start_index = self._position - elif whence is SEEK_END: - start_index = self._length - offset = -offset - else: - raise ValueError("Invalid argument for the 'whence' parameter.") - - pos = start_index + offset - - if pos > self._length: - pos = self._length - elif pos < 0: - pos = 0 - - # check if buffer is still valid - # if not, drop buffer - if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: - self._buffer.close() - self._buffer = BytesIO() - else: # if yes seek to correct position - delta = pos - self._current_buffer_start - self._buffer.seek(delta, SEEK_SET) - - self._position = pos - return pos - - def seekable(self): - return True - - def tell(self): - return self._position - - def write(self): - raise UnsupportedOperation - - def writelines(self): - raise UnsupportedOperation - - def writeable(self): - return False - - -class IterStreamer(object): - """ - File-like streaming iterator. - """ - - def __init__(self, generator, encoding="UTF-8"): - self.generator = generator - self.iterator = iter(generator) - self.leftover = b"" - self.encoding = encoding - - def __len__(self): - return self.generator.__len__() - - def __iter__(self): - return self.iterator - - def seekable(self): - return False - - def __next__(self): - return next(self.iterator) - - next = __next__ # Python 2 compatibility. - - def tell(self, *args, **kwargs): - raise UnsupportedOperation("Data generator does not support tell.") - - def seek(self, *args, **kwargs): - raise UnsupportedOperation("Data generator is unseekable.") - - def read(self, size): - data = self.leftover - count = len(self.leftover) - try: - while count < size: - chunk = self.__next__() - if isinstance(chunk, six.text_type): - chunk = chunk.encode(self.encoding) - data += chunk - count += len(chunk) - except StopIteration: - pass - - if count > size: - self.leftover = data[size:] - - return data[:size] diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/uploads_async.py b/azure/multiapi/storagev2/blob/v2019_12_12/_shared/uploads_async.py deleted file mode 100644 index fe68a2b..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/uploads_async.py +++ /dev/null @@ -1,350 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -import asyncio -from asyncio import Lock -from itertools import islice -import threading - -from math import ceil - -import six - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder -from .uploads import SubStream, IterStreamer # pylint: disable=unused-import - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' - - -async def _parallel_uploads(uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(asyncio.ensure_future(uploader(next_chunk))) - except StopIteration: - break - - # Wait for the remaining uploads to finish - if running: - done, _running = await asyncio.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -async def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - asyncio.ensure_future(uploader.process_chunk(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [] - for chunk in uploader.get_chunk_streams(): - range_ids.append(await uploader.process_chunk(chunk)) - - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -async def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - asyncio.ensure_future(uploader.process_substream_block(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [] - for block in uploader.get_substream_blocks(): - range_ids.append(await uploader.process_substream_block(block)) - return sorted(range_ids) - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = threading.Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b'' - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b'' or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - async def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - async def _update_progress(self, length): - if self.progress_lock is not None: - async with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - async def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = await self._upload_chunk(chunk_offset, chunk_data) - await self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) - - async def process_substream_block(self, block_data): - return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - async def _upload_substream_block(self, block_id, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_substream_block_with_progress(self, block_id, block_stream): - range_id = await self._upload_substream_block(block_id, block_stream) - await self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop('modified_access_conditions', None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - await self.service.stage_block( - block_id, - len(chunk_data), - chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - return index, block_id - - async def _upload_substream_block(self, block_id, block_stream): - try: - await self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - for each_byte in chunk_data: - if each_byte not in [0, b'\x00']: - return False - return True - - async def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = await self.service.upload_pages( - chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = await self.service.append_block( - chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - self.current_length = int(self.response_headers['blob_append_offset']) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = await self.service.append_block( - chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - response = await self.service.upload_range( - chunk_data, - chunk_offset, - chunk_end, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - return range_id, response diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_shared_access_signature.py b/azure/multiapi/storagev2/blob/v2019_12_12/_shared_access_signature.py deleted file mode 100644 index b95f890..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_shared_access_signature.py +++ /dev/null @@ -1,584 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, TYPE_CHECKING -) - -from ._shared import sign_string, url_quote -from ._shared.constants import X_MS_VERSION -from ._shared.models import Services -from ._shared.shared_access_signature import SharedAccessSignature, _SharedAccessHelper, \ - QueryStringConstants - -if TYPE_CHECKING: - from datetime import datetime - from .import ( - ResourceTypes, - AccountSasPermissions, - UserDelegationKey, - ContainerSasPermissions, - BlobSasPermissions - ) - - -class BlobQueryStringConstants(object): - SIGNED_TIMESTAMP = 'snapshot' - - -class BlobSharedAccessSignature(SharedAccessSignature): - ''' - Provides a factory for creating blob and container access - signature tokens with a common account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key=None, user_delegation_key=None): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - :param ~azure.storage.blob.models.UserDelegationKey user_delegation_key: - Instead of an account key, the user could pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished by calling get_user_delegation_key on any Blob service object. - ''' - super(BlobSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) - self.user_delegation_key = user_delegation_key - - def generate_blob(self, container_name, blob_name, snapshot=None, version_id=None, permission=None, - expiry=None, start=None, policy_id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None): - ''' - Generates a shared access signature for the blob or one of its snapshots. - Use the returned signature with the sas_token parameter of any BlobService. - - :param str container_name: - Name of container. - :param str blob_name: - Name of blob. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to grant permission. - :param BlobSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_blob_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - resource_path = container_name + '/' + blob_name - - sas = _BlobSharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(policy_id) - - resource = 'bs' if snapshot else 'b' - resource = 'bv' if version_id else resource - sas.add_resource(resource) - - sas.add_timestamp(snapshot or version_id) - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_resource_signature(self.account_name, self.account_key, resource_path, - user_delegation_key=self.user_delegation_key) - - return sas.get_token() - - def generate_container(self, container_name, permission=None, expiry=None, - start=None, policy_id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None): - ''' - Generates a shared access signature for the container. - Use the returned signature with the sas_token parameter of any BlobService. - - :param str container_name: - Name of container. - :param ContainerSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_blob_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - sas = _BlobSharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(policy_id) - sas.add_resource('c') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_resource_signature(self.account_name, self.account_key, container_name, - user_delegation_key=self.user_delegation_key) - return sas.get_token() - - -class _BlobSharedAccessHelper(_SharedAccessHelper): - - def add_timestamp(self, timestamp): - self._add_query(BlobQueryStringConstants.SIGNED_TIMESTAMP, timestamp) - - def get_value_to_append(self, query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - def add_resource_signature(self, account_name, account_key, path, user_delegation_key=None): - # pylint: disable = no-member - if path[0] != '/': - path = '/' + path - - canonicalized_resource = '/blob/' + account_name + path + '\n' - - # Form the string to sign from shared_access_policy and canonicalized - # resource. The order of values is important. - string_to_sign = \ - (self.get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - self.get_value_to_append(QueryStringConstants.SIGNED_START) + - self.get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - canonicalized_resource) - - if user_delegation_key is not None: - self._add_query(QueryStringConstants.SIGNED_OID, user_delegation_key.signed_oid) - self._add_query(QueryStringConstants.SIGNED_TID, user_delegation_key.signed_tid) - self._add_query(QueryStringConstants.SIGNED_KEY_START, user_delegation_key.signed_start) - self._add_query(QueryStringConstants.SIGNED_KEY_EXPIRY, user_delegation_key.signed_expiry) - self._add_query(QueryStringConstants.SIGNED_KEY_SERVICE, user_delegation_key.signed_service) - self._add_query(QueryStringConstants.SIGNED_KEY_VERSION, user_delegation_key.signed_version) - - string_to_sign += \ - (self.get_value_to_append(QueryStringConstants.SIGNED_OID) + - self.get_value_to_append(QueryStringConstants.SIGNED_TID) + - self.get_value_to_append(QueryStringConstants.SIGNED_KEY_START) + - self.get_value_to_append(QueryStringConstants.SIGNED_KEY_EXPIRY) + - self.get_value_to_append(QueryStringConstants.SIGNED_KEY_SERVICE) + - self.get_value_to_append(QueryStringConstants.SIGNED_KEY_VERSION)) - else: - string_to_sign += self.get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER) - - string_to_sign += \ - (self.get_value_to_append(QueryStringConstants.SIGNED_IP) + - self.get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - self.get_value_to_append(QueryStringConstants.SIGNED_VERSION) + - self.get_value_to_append(QueryStringConstants.SIGNED_RESOURCE) + - self.get_value_to_append(BlobQueryStringConstants.SIGNED_TIMESTAMP) + - self.get_value_to_append(QueryStringConstants.SIGNED_CACHE_CONTROL) + - self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_DISPOSITION) + - self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_ENCODING) + - self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_LANGUAGE) + - self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_TYPE)) - - # remove the trailing newline - if string_to_sign[-1] == '\n': - string_to_sign = string_to_sign[:-1] - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key if user_delegation_key is None else user_delegation_key.value, - string_to_sign)) - - def get_token(self): - # a conscious decision was made to exclude the timestamp in the generated token - # this is to avoid having two snapshot ids in the query parameters when the user appends the snapshot timestamp - exclude = [BlobQueryStringConstants.SIGNED_TIMESTAMP] - return '&'.join(['{0}={1}'.format(n, url_quote(v)) - for n, v in self.query_dict.items() if v is not None and n not in exclude]) - - -def generate_account_sas( - account_name, # type: str - account_key, # type: str - resource_types, # type: Union[ResourceTypes, str] - permission, # type: Union[AccountSasPermissions, str] - expiry, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): # type: (...) -> str - """Generates a shared access signature for the blob service. - - Use the returned signature with the credential parameter of any BlobServiceClient, - ContainerClient or BlobClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - :param resource_types: - Specifies the resource types that are accessible with the account SAS. - :type resource_types: str or ~azure.storage.blob.ResourceTypes - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.blob.AccountSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_sas_token] - :end-before: [END create_sas_token] - :language: python - :dedent: 8 - :caption: Generating a shared access signature. - """ - sas = SharedAccessSignature(account_name, account_key) - return sas.generate_account( - services=Services(blob=True), - resource_types=resource_types, - permission=permission, - expiry=expiry, - start=start, - ip=ip, - **kwargs - ) # type: ignore - - -def generate_container_sas( - account_name, # type: str - container_name, # type: str - account_key=None, # type: Optional[str] - user_delegation_key=None, # type: Optional[UserDelegationKey] - permission=None, # type: Optional[Union[ContainerSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - policy_id=None, # type: Optional[str] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Any - """Generates a shared access signature for a container. - - Use the returned signature with the credential parameter of any BlobServiceClient, - ContainerClient or BlobClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str container_name: - The name of the container. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - Either `account_key` or `user_delegation_key` must be specified. - :param ~azure.storage.blob.UserDelegationKey user_delegation_key: - Instead of an account shared key, the user could pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.blob.ContainerSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - :func:`~azure.storage.blob.ContainerClient.set_container_access_policy`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :return: A Shared Access Signature (sas) token. - :rtype: str - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START generate_sas_token] - :end-before: [END generate_sas_token] - :language: python - :dedent: 12 - :caption: Generating a sas token. - """ - if not user_delegation_key and not account_key: - raise ValueError("Either user_delegation_key or account_key must be provided.") - - if user_delegation_key: - sas = BlobSharedAccessSignature(account_name, user_delegation_key=user_delegation_key) - else: - sas = BlobSharedAccessSignature(account_name, account_key=account_key) - return sas.generate_container( - container_name, - permission=permission, - expiry=expiry, - start=start, - policy_id=policy_id, - ip=ip, - **kwargs - ) - - -def generate_blob_sas( - account_name, # type: str - container_name, # type: str - blob_name, # type: str - snapshot=None, # type: Optional[str] - account_key=None, # type: Optional[str] - user_delegation_key=None, # type: Optional[UserDelegationKey] - permission=None, # type: Optional[Union[BlobSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - policy_id=None, # type: Optional[str] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Any - """Generates a shared access signature for a blob. - - Use the returned signature with the credential parameter of any BlobServiceClient, - ContainerClient or BlobClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str container_name: - The name of the container. - :param str blob_name: - The name of the blob. - :param str snapshot: - An optional blob snapshot ID. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - Either `account_key` or `user_delegation_key` must be specified. - :param ~azure.storage.blob.UserDelegationKey user_delegation_key: - Instead of an account shared key, the user could pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.blob.BlobSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - :func:`~azure.storage.blob.ContainerClient.set_container_access_policy()`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str version_id: - An optional blob version ID. This parameter is only for versioning enabled account - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - if not user_delegation_key and not account_key: - raise ValueError("Either user_delegation_key or account_key must be provided.") - version_id = kwargs.pop('version_id', None) - if version_id and snapshot: - raise ValueError("snapshot and version_id cannot be set at the same time.") - if user_delegation_key: - sas = BlobSharedAccessSignature(account_name, user_delegation_key=user_delegation_key) - else: - sas = BlobSharedAccessSignature(account_name, account_key=account_key) - return sas.generate_blob( - container_name, - blob_name, - snapshot=snapshot, - version_id=version_id, - permission=permission, - expiry=expiry, - start=start, - policy_id=policy_id, - ip=ip, - **kwargs - ) diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_upload_helpers.py b/azure/multiapi/storagev2/blob/v2019_12_12/_upload_helpers.py deleted file mode 100644 index bd59362..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_upload_helpers.py +++ /dev/null @@ -1,291 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from io import SEEK_SET, UnsupportedOperation -from typing import Optional, Union, Any, TypeVar, TYPE_CHECKING # pylint: disable=unused-import - -import six -from azure.core.exceptions import ResourceExistsError, ResourceModifiedError - -from ._shared.response_handlers import ( - process_storage_error, - return_response_headers) -from ._shared.models import StorageErrorCode -from ._shared.uploads import ( - upload_data_chunks, - upload_substream_blocks, - BlockBlobChunkUploader, - PageBlobChunkUploader, - AppendBlobChunkUploader) -from ._shared.encryption import generate_blob_encryption_data, encrypt_blob -from ._generated.models import ( - StorageErrorException, - BlockLookupList, - AppendPositionAccessConditions, - ModifiedAccessConditions, -) - -if TYPE_CHECKING: - from datetime import datetime # pylint: disable=unused-import - BlobLeaseClient = TypeVar("BlobLeaseClient") - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' - - -def _convert_mod_error(error): - message = error.message.replace( - "The condition specified using HTTP conditional header(s) is not met.", - "The specified blob already exists.") - message = message.replace("ConditionNotMet", "BlobAlreadyExists") - overwrite_error = ResourceExistsError( - message=message, - response=error.response, - error=error) - overwrite_error.error_code = StorageErrorCode.blob_already_exists - raise overwrite_error - - -def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument - return any([ - modified_access_conditions.if_modified_since, - modified_access_conditions.if_unmodified_since, - modified_access_conditions.if_none_match, - modified_access_conditions.if_match - ]) - - -def upload_block_blob( # pylint: disable=too-many-locals - client=None, - data=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if not overwrite and not _any_conditions(**kwargs): - kwargs['modified_access_conditions'].if_none_match = '*' - adjusted_count = length - if (encryption_options.get('key') is not None) and (adjusted_count is not None): - adjusted_count += (16 - (length % 16)) - blob_headers = kwargs.pop('blob_headers', None) - tier = kwargs.pop('standard_blob_tier', None) - blob_tags_string = kwargs.pop('blob_tags_string', None) - - # Do single put if the size is smaller than or equal config.max_single_put_size - if adjusted_count is not None and (adjusted_count <= blob_settings.max_single_put_size): - try: - data = data.read(length) - if not isinstance(data, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - except AttributeError: - pass - if encryption_options.get('key'): - encryption_data, data = encrypt_blob(data, encryption_options['key']) - headers['x-ms-meta-encryptiondata'] = encryption_data - return client.upload( - data, - content_length=adjusted_count, - blob_http_headers=blob_headers, - headers=headers, - cls=return_response_headers, - validate_content=validate_content, - data_stream_total=adjusted_count, - upload_stream_current=0, - tier=tier.value if tier else None, - blob_tags_string=blob_tags_string, - **kwargs) - - use_original_upload_path = blob_settings.use_byte_buffer or \ - validate_content or encryption_options.get('required') or \ - blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \ - hasattr(stream, 'seekable') and not stream.seekable() or \ - not hasattr(stream, 'seek') or not hasattr(stream, 'tell') - - if use_original_upload_path: - if encryption_options.get('key'): - cek, iv, encryption_data = generate_blob_encryption_data(encryption_options['key']) - headers['x-ms-meta-encryptiondata'] = encryption_data - encryption_options['cek'] = cek - encryption_options['vector'] = iv - block_ids = upload_data_chunks( - service=client, - uploader_class=BlockBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - encryption_options=encryption_options, - **kwargs - ) - else: - block_ids = upload_substream_blocks( - service=client, - uploader_class=BlockBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - **kwargs - ) - - block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) - block_lookup.latest = block_ids - return client.commit_block_list( - block_lookup, - blob_http_headers=blob_headers, - cls=return_response_headers, - validate_content=validate_content, - headers=headers, - tier=tier.value if tier else None, - blob_tags_string=blob_tags_string, - **kwargs) - except StorageErrorException as error: - try: - process_storage_error(error) - except ResourceModifiedError as mod_error: - if not overwrite: - _convert_mod_error(mod_error) - raise - - -def upload_page_blob( - client=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if not overwrite and not _any_conditions(**kwargs): - kwargs['modified_access_conditions'].if_none_match = '*' - if length is None or length < 0: - raise ValueError("A content length must be specified for a Page Blob.") - if length % 512 != 0: - raise ValueError("Invalid page blob size: {0}. " - "The size must be aligned to a 512-byte boundary.".format(length)) - if kwargs.get('premium_page_blob_tier'): - premium_page_blob_tier = kwargs.pop('premium_page_blob_tier') - try: - headers['x-ms-access-tier'] = premium_page_blob_tier.value - except AttributeError: - headers['x-ms-access-tier'] = premium_page_blob_tier - if encryption_options and encryption_options.get('data'): - headers['x-ms-meta-encryptiondata'] = encryption_options['data'] - blob_tags_string = kwargs.pop('blob_tags_string', None) - - response = client.create( - content_length=0, - blob_content_length=length, - blob_sequence_number=None, - blob_http_headers=kwargs.pop('blob_headers', None), - blob_tags_string=blob_tags_string, - cls=return_response_headers, - headers=headers, - **kwargs) - if length == 0: - return response - - kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag']) - return upload_data_chunks( - service=client, - uploader_class=PageBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_page_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - encryption_options=encryption_options, - **kwargs) - - except StorageErrorException as error: - try: - process_storage_error(error) - except ResourceModifiedError as mod_error: - if not overwrite: - _convert_mod_error(mod_error) - raise - - -def upload_append_blob( # pylint: disable=unused-argument - client=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if length == 0: - return {} - blob_headers = kwargs.pop('blob_headers', None) - append_conditions = AppendPositionAccessConditions( - max_size=kwargs.pop('maxsize_condition', None), - append_position=None) - blob_tags_string = kwargs.pop('blob_tags_string', None) - - try: - if overwrite: - client.create( - content_length=0, - blob_http_headers=blob_headers, - headers=headers, - blob_tags_string=blob_tags_string, - **kwargs) - return upload_data_chunks( - service=client, - uploader_class=AppendBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - append_position_access_conditions=append_conditions, - **kwargs) - except StorageErrorException as error: - if error.response.status_code != 404: - raise - # rewind the request body if it is a stream - if hasattr(stream, 'read'): - try: - # attempt to rewind the body to the initial position - stream.seek(0, SEEK_SET) - except UnsupportedOperation: - # if body is not seekable, then retry would not work - raise error - client.create( - content_length=0, - blob_http_headers=blob_headers, - headers=headers, - blob_tags_string=blob_tags_string, - **kwargs) - return upload_data_chunks( - service=client, - uploader_class=AppendBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - append_position_access_conditions=append_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/aio/__init__.py b/azure/multiapi/storagev2/blob/v2019_12_12/aio/__init__.py deleted file mode 100644 index 247f39e..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/aio/__init__.py +++ /dev/null @@ -1,137 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os - -from .._models import BlobType -from .._shared.policies_async import ExponentialRetry, LinearRetry -from ._blob_client_async import BlobClient -from ._container_client_async import ContainerClient -from ._blob_service_client_async import BlobServiceClient -from ._lease_async import BlobLeaseClient -from ._download_async import StorageStreamDownloader - - -async def upload_blob_to_url( - blob_url, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - credential=None, # type: Any - **kwargs): - # type: (...) -> dict[str, Any] - """Upload data to a given URL - - The data will be uploaded as a block blob. - - :param str blob_url: - The full URI to the blob. This can also include a SAS token. - :param data: - The data to upload. This can be bytes, text, an iterable or a file-like object. - :type data: bytes or str or Iterable - :param credential: - The credentials with which to authenticate. This is optional if the - blob URL already has a SAS token. The value can be a SAS token string, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword bool overwrite: - Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob_to_url will overwrite any existing data. If set to False, the - operation will fail with a ResourceExistsError. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword dict(str,str) metadata: - Name-value pairs associated with the blob as metadata. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword str encoding: - Encoding to use if text is supplied as input. Defaults to UTF-8. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: dict(str, Any) - """ - async with BlobClient.from_blob_url(blob_url, credential=credential) as client: - return await client.upload_blob(data=data, blob_type=BlobType.BlockBlob, **kwargs) - - -async def _download_to_stream(client, handle, **kwargs): - """Download data to specified open file-handle.""" - stream = await client.download_blob(**kwargs) - await stream.readinto(handle) - - -async def download_blob_from_url( - blob_url, # type: str - output, # type: str - credential=None, # type: Any - **kwargs): - # type: (...) -> None - """Download the contents of a blob to a local file or stream. - - :param str blob_url: - The full URI to the blob. This can also include a SAS token. - :param output: - Where the data should be downloaded to. This could be either a file path to write to, - or an open IO handle to write to. - :type output: str or writable stream - :param credential: - The credentials with which to authenticate. This is optional if the - blob URL already has a SAS token or the blob is public. The value can be a SAS token string, - an account shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword bool overwrite: - Whether the local file should be overwritten if it already exists. The default value is - `False` - in which case a ValueError will be raised if the file already exists. If set to - `True`, an attempt will be made to write to the existing file. If a stream handle is passed - in, this value is ignored. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :keyword int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :rtype: None - """ - overwrite = kwargs.pop('overwrite', False) - async with BlobClient.from_blob_url(blob_url, credential=credential) as client: - if hasattr(output, 'write'): - await _download_to_stream(client, output, **kwargs) - else: - if not overwrite and os.path.isfile(output): - raise ValueError("The file '{}' already exists.".format(output)) - with open(output, 'wb') as file_handle: - await _download_to_stream(client, file_handle, **kwargs) - - -__all__ = [ - 'upload_blob_to_url', - 'download_blob_from_url', - 'BlobServiceClient', - 'ContainerClient', - 'BlobClient', - 'BlobLeaseClient', - 'ExponentialRetry', - 'LinearRetry', - 'StorageStreamDownloader' -] diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_blob_client_async.py b/azure/multiapi/storagev2/blob/v2019_12_12/aio/_blob_client_async.py deleted file mode 100644 index 3275d02..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_blob_client_async.py +++ /dev/null @@ -1,2307 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines, invalid-overridden-method - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, - TYPE_CHECKING -) - -from azure.core.tracing.decorator_async import distributed_trace_async - -from .._shared.base_client_async import AsyncStorageAccountHostsMixin -from .._shared.policies_async import ExponentialRetry -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._deserialize import get_page_ranges_result, parse_tags -from .._serialize import get_modify_conditions, get_api_version -from .._generated import VERSION -from .._generated.aio import AzureBlobStorage -from .._generated.models import StorageErrorException, CpkInfo -from .._deserialize import deserialize_blob_properties -from .._blob_client import BlobClient as BlobClientBase -from ._upload_helpers import ( - upload_block_blob, - upload_append_blob, - upload_page_blob) -from .._models import BlobType, BlobBlock, BlobProperties -from .._lease import get_access_conditions -from ._lease_async import BlobLeaseClient -from ._download_async import StorageStreamDownloader - -if TYPE_CHECKING: - from datetime import datetime - from .._models import ( # pylint: disable=unused-import - ContentSettings, - PremiumPageBlobTier, - StandardBlobTier, - SequenceNumberAction - ) - - -class BlobClient(AsyncStorageAccountHostsMixin, BlobClientBase): # pylint: disable=too-many-public-methods - """A client to interact with a specific blob, although that blob may not yet exist. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the blob, - use the :func:`from_blob_url` classmethod. - :param container_name: The container name for the blob. - :type container_name: str - :param blob_name: The name of the blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob_name: str - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication_async.py - :start-after: [START create_blob_client] - :end-before: [END create_blob_client] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a URL to a public blob (no auth needed). - - .. literalinclude:: ../samples/blob_samples_authentication_async.py - :start-after: [START create_blob_client_sas_url] - :end-before: [END create_blob_client_sas_url] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a SAS URL to a blob. - """ - def __init__( - self, account_url, # type: str - container_name, # type: str - blob_name, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(BlobClient, self).__init__( - account_url, - container_name=container_name, - blob_name=blob_name, - snapshot=snapshot, - credential=credential, - **kwargs) - self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access - self._loop = kwargs.get('loop', None) - - @distributed_trace_async - async def get_account_information(self, **kwargs): # type: ignore - # type: (Optional[int]) -> Dict[str, str] - """Gets information related to the storage account in which the blob resides. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - """ - try: - return await self._client.blob.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_blob( - self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Any - """Creates a new blob from a data source with automatic chunking. - - :param data: The blob data to upload. - :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be - either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. The exception to the above is with Append - blob types: if set to False and the data already exists, an error will not be raised - and the data will be appended to the existing blob. If set overwrite=True, then the existing - append blob will be deleted, and a new one created. Defaults to False. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - If specified, upload_blob only succeeds if the - blob's lease is active and matches this ID. - Required if the blob has an active lease. - :paramtype: ~azure.storage.blob.aio.BlobLeaseClient - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int max_concurrency: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world_async.py - :start-after: [START upload_a_blob] - :end-before: [END upload_a_blob] - :language: python - :dedent: 16 - :caption: Upload a blob to the container. - """ - options = self._upload_blob_options( - data, - blob_type=blob_type, - length=length, - metadata=metadata, - **kwargs) - if blob_type == BlobType.BlockBlob: - return await upload_block_blob(**options) - if blob_type == BlobType.PageBlob: - return await upload_page_blob(**options) - return await upload_append_blob(**options) - - @distributed_trace_async - async def download_blob(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader - """Downloads a blob to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the blob into - a stream. - - :param int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to download. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, download_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword str encoding: - Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.blob.aio.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world_async.py - :start-after: [START download_a_blob] - :end-before: [END download_a_blob] - :language: python - :dedent: 16 - :caption: Download a blob. - """ - options = self._download_blob_options( - offset=offset, - length=length, - **kwargs) - downloader = StorageStreamDownloader(**options) - await downloader._setup() # pylint: disable=protected-access - return downloader - - @distributed_trace_async - async def delete_blob(self, delete_snapshots=False, **kwargs): - # type: (bool, Any) -> None - """Marks the specified blob for deletion. - - The blob is later deleted during garbage collection. - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the delete_blob() - operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob - and retains the blob for a specified number of days. - After the specified number of days, the blob's data is removed from the service during garbage collection. - Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']` - option. Soft-deleted blob can be restored using :func:`undelete` operation. - - :param str delete_snapshots: - Required if the blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to delete. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword lease: - Required if the blob has an active lease. If specified, delete_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world_async.py - :start-after: [START delete_blob] - :end-before: [END delete_blob] - :language: python - :dedent: 16 - :caption: Delete a blob. - """ - options = self._delete_blob_options(delete_snapshots=delete_snapshots, **kwargs) - try: - await self._client.blob.delete(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def undelete_blob(self, **kwargs): - # type: (Any) -> None - """Restores soft-deleted blobs or snapshots. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START undelete_blob] - :end-before: [END undelete_blob] - :language: python - :dedent: 12 - :caption: Undeleting a blob. - """ - try: - await self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_blob_properties(self, **kwargs): - # type: (Any) -> BlobProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the blob. It does not return the content of the blob. - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to get properties. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: BlobProperties - :rtype: ~azure.storage.blob.BlobProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START get_blob_properties] - :end-before: [END get_blob_properties] - :language: python - :dedent: 12 - :caption: Getting the properties for a blob. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - try: - blob_props = await self._client.blob.get_properties( - timeout=kwargs.pop('timeout', None), - version_id=kwargs.pop('version_id', None), - snapshot=self.snapshot, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=deserialize_blob_properties, - cpk_info=cpk_info, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - blob_props.name = self.blob_name - blob_props.container = self.container_name - return blob_props # type: ignore - - @distributed_trace_async - async def set_http_headers(self, content_settings=None, **kwargs): - # type: (Optional[ContentSettings], Any) -> None - """Sets system properties on the blob. - - If one property is set for the content_settings, all properties will be overridden. - - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - options = self._set_http_headers_options(content_settings=content_settings, **kwargs) - try: - return await self._client.blob.set_http_headers(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def set_blob_metadata(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] - """Sets user-defined metadata for the blob as one or more name-value pairs. - - :param metadata: - Dict containing name and value pairs. Each call to this operation - replaces all existing metadata attached to the blob. To remove all - metadata from the blob, call this operation with no metadata headers. - :type metadata: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - """ - options = self._set_blob_metadata_options(metadata=metadata, **kwargs) - try: - return await self._client.blob.set_metadata(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def create_page_blob( # type: ignore - self, size, # type: int - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Creates a new Page Blob of the specified size. - - :param int size: - This specifies the maximum size for the page blob, up to 1 TB. - The page blob size must be aligned to a 512-byte boundary. - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword int sequence_number: - Only for Page blobs. The sequence number is a user-controlled value that you can use to - track requests. The value of the sequence number must be between 0 - and 2^63 - 1.The default value is 0. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict[str, Any] - """ - options = self._create_page_blob_options( - size, - content_settings=content_settings, - metadata=metadata, - premium_page_blob_tier=premium_page_blob_tier, - **kwargs) - try: - return await self._client.page_blob.create(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def create_append_blob(self, content_settings=None, metadata=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] - """Creates a new Append Blob. - - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict[str, Any] - """ - options = self._create_append_blob_options( - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return await self._client.append_blob.create(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def create_snapshot(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] - """Creates a snapshot of the blob. - - A snapshot is a read-only version of a blob that's taken at a point in time. - It can be read, copied, or deleted, but not modified. Snapshots provide a way - to back up a blob as it appears at a moment in time. - - A snapshot of a blob has the same name as the base blob from which the snapshot - is taken, with a DateTime value appended to indicate the time at which the - snapshot was taken. - - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified). - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START create_blob_snapshot] - :end-before: [END create_blob_snapshot] - :language: python - :dedent: 12 - :caption: Create a snapshot of the blob. - """ - options = self._create_snapshot_options(metadata=metadata, **kwargs) - try: - return await self._client.blob.create_snapshot(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def start_copy_from_url(self, source_url, metadata=None, incremental_copy=False, **kwargs): - # type: (str, Optional[Dict[str, str]], bool, Any) -> Any - """Copies a blob asynchronously. - - This operation returns a copy operation - object that can be used to wait on the completion of the operation, - as well as check status or abort the copy operation. - The Blob service copies blobs on a best-effort basis. - - The source blob for a copy operation may be a block blob, an append blob, - or a page blob. If the destination blob already exists, it must be of the - same blob type as the source blob. Any existing destination blob will be - overwritten. The destination blob cannot be modified while a copy operation - is in progress. - - When copying from a page blob, the Blob service creates a destination page - blob of the source blob's length, initially containing all zeroes. Then - the source page ranges are enumerated, and non-empty ranges are copied. - - For a block blob or an append blob, the Blob service creates a committed - blob of zero length before returning from this operation. When copying - from a block blob, all committed blocks and their block IDs are copied. - Uncommitted blocks are not copied. At the end of the copy operation, the - destination blob will have the same committed block count as the source. - - When copying from an append blob, all committed blocks are copied. At the - end of the copy operation, the destination blob will have the same committed - block count as the source. - - For all blob types, you can call status() on the returned polling object - to check the status of the copy operation, or wait() to block until the - operation is complete. The final blob will be committed when the copy completes. - - :param str source_url: - A URL of up to 2 KB in length that specifies a file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.blob.core.windows.net/mycontainer/myblob - - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - - https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken - :param metadata: - Name-value pairs associated with the blob as metadata. If no name-value - pairs are specified, the operation will copy the metadata from the - source blob or file to the destination blob. If one or more name-value - pairs are specified, the destination blob is created with the specified - metadata, and metadata is not copied from the source blob or file. - :type metadata: dict(str, str) - :param bool incremental_copy: - Copies the snapshot of the source page blob to a destination page blob. - The snapshot is copied such that only the differential changes between - the previously copied snapshot are transferred to the destination. - The copied snapshots are complete copies of the original snapshot and - can be read or copied from as usual. Defaults to False. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source - blob has been modified since the specified date/time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source blob - has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has been modified since the specified date/time. - If the destination blob has not been modified, the Blob service returns - status code 412 (Precondition Failed). - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has not been modified since the specified - date/time. If the destination blob has been modified, the Blob service - returns status code 412 (Precondition Failed). - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword destination_lease: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :paramtype destination_lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword source_lease: - Specify this to perform the Copy Blob operation only if - the lease ID given matches the active lease ID of the source blob. - :paramtype source_lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword bool seal_destination_blob: - Seal the destination append blob. This operation is only for append blob. - - .. versionadded:: 12.4.0 - - :keyword bool requires_sync: - Enforces that the service will not return a response until the copy is complete. - :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status). - :rtype: dict[str, str or ~datetime.datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START copy_blob_from_url] - :end-before: [END copy_blob_from_url] - :language: python - :dedent: 16 - :caption: Copy a blob from a URL. - """ - options = self._start_copy_from_url_options( - source_url, - metadata=metadata, - incremental_copy=incremental_copy, - **kwargs) - try: - if incremental_copy: - return await self._client.page_blob.copy_incremental(**options) - return await self._client.blob.start_copy_from_url(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def abort_copy(self, copy_id, **kwargs): - # type: (Union[str, Dict[str, Any], BlobProperties], Any) -> None - """Abort an ongoing copy operation. - - This will leave a destination blob with zero length and full metadata. - This will raise an error if the copy operation has already ended. - - :param copy_id: - The copy operation to abort. This can be either an ID, or an - instance of BlobProperties. - :type copy_id: str or ~azure.storage.blob.BlobProperties - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START abort_copy_blob_from_url] - :end-before: [END abort_copy_blob_from_url] - :language: python - :dedent: 16 - :caption: Abort copying a blob from URL. - """ - options = self._abort_copy_options(copy_id, **kwargs) - try: - await self._client.blob.abort_copy_from_url(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): - # type: (int, Optional[str], Any) -> BlobLeaseClient - """Requests a new lease. - - If the blob does not have an active lease, the Blob - Service creates a lease on the blob and returns a new lease. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Blob Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A BlobLeaseClient object. - :rtype: ~azure.storage.blob.aio.BlobLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START acquire_lease_on_blob] - :end-before: [END acquire_lease_on_blob] - :language: python - :dedent: 12 - :caption: Acquiring a lease on a blob. - """ - lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore - await lease.acquire(lease_duration=lease_duration, **kwargs) - return lease - - @distributed_trace_async - async def set_standard_blob_tier(self, standard_blob_tier, **kwargs): - # type: (Union[str, StandardBlobTier], Any) -> None - """This operation sets the tier on a block blob. - - A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param standard_blob_tier: - Indicates the tier to be set on the blob. Options include 'Hot', 'Cool', - 'Archive'. The hot tier is optimized for storing data that is accessed - frequently. The cool storage tier is optimized for storing data that - is infrequently accessed and stored for at least a month. The archive - tier is optimized for storing data that is rarely accessed and stored - for at least six months with flexible latency requirements. - :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if standard_blob_tier is None: - raise ValueError("A StandardBlobTier must be specified") - try: - await self._client.blob.set_tier( - tier=standard_blob_tier, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - lease_access_conditions=access_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def stage_block( - self, block_id, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> None - """Creates a new block to be committed as part of a blob. - - :param str block_id: A valid Base64 string value that identifies the - block. Prior to encoding, the string must be less than or equal to 64 - bytes in size. For a given blob, the length of the value specified for - the block_id parameter must be the same size for each block. - :param data: The blob data. - :param int length: Size of the block. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword str encoding: - Defaults to UTF-8. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - options = self._stage_block_options( - block_id, - data, - length=length, - **kwargs) - try: - return await self._client.block_blob.stage_block(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def stage_block_from_url( - self, block_id, # type: str - source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - source_content_md5=None, # type: Optional[Union[bytes, bytearray]] - **kwargs - ): - # type: (...) -> None - """Creates a new block to be committed as part of a blob where - the contents are read from a URL. - - :param str block_id: A valid Base64 string value that identifies the - block. Prior to encoding, the string must be less than or equal to 64 - bytes in size. For a given blob, the length of the value specified for - the block_id parameter must be the same size for each block. - :param str source_url: The URL. - :param int source_offset: - Start of byte range to use for the block. - Must be set if source length is provided. - :param int source_length: The size of the block in bytes. - :param bytearray source_content_md5: - Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - options = self._stage_block_from_url_options( - block_id, - source_url, - source_offset=source_offset, - source_length=source_length, - source_content_md5=source_content_md5, - **kwargs) - try: - return await self._client.block_blob.stage_block_from_url(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_block_list(self, block_list_type="committed", **kwargs): - # type: (Optional[str], Any) -> Tuple[List[BlobBlock], List[BlobBlock]] - """The Get Block List operation retrieves the list of blocks that have - been uploaded as part of a block blob. - - :param str block_list_type: - Specifies whether to return the list of committed - blocks, the list of uncommitted blocks, or both lists together. - Possible values include: 'committed', 'uncommitted', 'all' - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A tuple of two lists - committed and uncommitted blocks - :rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock)) - """ - access_conditions = get_access_conditions(kwargs.pop('kease', None)) - mod_conditions = get_modify_conditions(kwargs) - try: - blocks = await self._client.block_blob.get_block_list( - list_type=block_list_type, - snapshot=self.snapshot, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return self._get_block_list_result(blocks) - - @distributed_trace_async - async def commit_block_list( # type: ignore - self, block_list, # type: List[BlobBlock] - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """The Commit Block List operation writes a blob by specifying the list of - block IDs that make up the blob. - - :param list block_list: - List of Blockblobs. - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict[str, str] - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._commit_block_list_options( - block_list, - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return await self._client.block_blob.commit_block_list(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): - # type: (Union[str, PremiumPageBlobTier], **Any) -> None - """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts. - - :param premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if premium_page_blob_tier is None: - raise ValueError("A PremiumPageBlobTiermust be specified") - try: - await self._client.blob.set_tier( - tier=premium_page_blob_tier, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def set_blob_tags(self, tags=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - """The Set Tags operation enables users to set tags on a blob or specific blob version, but not snapshot. - Each call to this operation replaces all existing tags attached to the blob. To remove all - tags from the blob, call this operation with no tags set. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :param tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - :type tags: dict(str, str) - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to delete. - :keyword bool validate_content: - If true, calculates an MD5 hash of the tags content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - options = self._set_blob_tags_options(tags=tags, **kwargs) - try: - return await self._client.blob.set_tags(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_blob_tags(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """The Get Tags operation enables users to get tags on a blob or specific blob version, but not snapshot. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to add tags to. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Key value pairs of blob tags. - :rtype: Dict[str, str] - """ - options = self._get_blob_tags_options(**kwargs) - try: - _, tags = await self._client.blob.get_tags(**options) - return parse_tags(tags) # pylint: disable=protected-access - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_page_ranges( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] - **kwargs - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a Page Blob or snapshot - of a page blob. - - :param int offset: - Start of byte range to use for getting valid page ranges. - If no length is given, all bytes after the offset will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for getting valid page ranges. - If length is given, offset must be provided. - This range will return valid page ranges from the offset start up to - the specified length. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param str previous_snapshot_diff: - The snapshot diff parameter that contains an opaque DateTime value that - specifies a previous blob snapshot to be compared - against a more recent snapshot or the current blob. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. - The first element are filled page ranges, the 2nd element is cleared page ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_page_ranges_options( - offset=offset, - length=length, - previous_snapshot_diff=previous_snapshot_diff, - **kwargs) - try: - if previous_snapshot_diff: - ranges = await self._client.page_blob.get_page_ranges_diff(**options) - else: - ranges = await self._client.page_blob.get_page_ranges(**options) - except StorageErrorException as error: - process_storage_error(error) - return get_page_ranges_result(ranges) - - @distributed_trace_async - async def get_page_range_diff_for_managed_disk( - self, previous_snapshot_url, # type: str - offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a managed disk or snapshot. - - .. note:: - This operation is only available for managed disk accounts. - - .. versionadded:: 12.2.0 - This operation was introduced in API version '2019-07-07'. - - :param previous_snapshot_url: - Specifies the URL of a previous snapshot of the managed disk. - The response will only contain pages that were changed between the target blob and - its previous snapshot. - :param int offset: - Start of byte range to use for getting valid page ranges. - If no length is given, all bytes after the offset will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for getting valid page ranges. - If length is given, offset must be provided. - This range will return valid page ranges from the offset start up to - the specified length. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. - The first element are filled page ranges, the 2nd element is cleared page ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_page_ranges_options( - offset=offset, - length=length, - prev_snapshot_url=previous_snapshot_url, - **kwargs) - try: - ranges = await self._client.page_blob.get_page_ranges_diff(**options) - except StorageErrorException as error: - process_storage_error(error) - return get_page_ranges_result(ranges) - - @distributed_trace_async - async def set_sequence_number( # type: ignore - self, sequence_number_action, # type: Union[str, SequenceNumberAction] - sequence_number=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the blob sequence number. - - :param str sequence_number_action: - This property indicates how the service should modify the blob's sequence - number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information. - :param str sequence_number: - This property sets the blob's sequence number. The sequence number is a - user-controlled property that you can use to track requests and manage - concurrency issues. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._set_sequence_number_options( - sequence_number_action, sequence_number=sequence_number, **kwargs) - try: - return await self._client.page_blob.update_sequence_number(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def resize_blob(self, size, **kwargs): - # type: (int, Any) -> Dict[str, Union[str, datetime]] - """Resizes a page blob to the specified size. - - If the specified value is less than the current size of the blob, - then all pages above the specified value are cleared. - - :param int size: - Size used to resize blob. Maximum size for a page blob is up to 1 TB. - The page blob size must be aligned to a 512-byte boundary. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._resize_blob_options(size, **kwargs) - try: - return await self._client.page_blob.resize(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_page( # type: ignore - self, page, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """The Upload Pages operation writes a range of pages to a page blob. - - :param bytes page: - Content of the page. - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._upload_page_options( - page=page, - offset=offset, - length=length, - **kwargs) - try: - return await self._client.page_blob.upload_pages(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_pages_from_url(self, source_url, # type: str - offset, # type: int - length, # type: int - source_offset, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """ - The Upload Pages operation writes a range of pages to a page blob where - the contents are read from a URL. - - :param str source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - The service will read the same number of bytes as the destination range (length-offset). - :keyword bytes source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - - options = self._upload_pages_from_url_options( - source_url=source_url, - offset=offset, - length=length, - source_offset=source_offset, - **kwargs - ) - try: - return await self._client.page_blob.upload_pages_from_url(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def clear_page(self, offset, length, **kwargs): - # type: (int, int, Any) -> Dict[str, Union[str, datetime]] - """Clears a range of pages. - - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._clear_page_options(offset, length, **kwargs) - try: - return await self._client.page_blob.clear_pages(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def append_block( # type: ignore - self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """Commits a new block of data to the end of the existing append blob. - - :param data: - Content of the block. - :param int length: - Size of the block in bytes. - :keyword bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). - :rtype: dict(str, Any) - """ - options = self._append_block_options( - data, - length=length, - **kwargs - ) - try: - return await self._client.append_blob.append_block(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async() - async def append_block_from_url(self, copy_source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """ - Creates a new block to be committed as part of a blob, where the contents are read from a source url. - - :param str copy_source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - :param int source_length: - This indicates the end of the range of bytes that has to be taken from the copy source. - :keyword bytearray source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the - AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._append_block_from_url_options( - copy_source_url, - source_offset=source_offset, - source_length=source_length, - **kwargs - ) - try: - return await self._client.append_blob.append_block_from_url(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async() - async def seal_append_blob(self, **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """The Seal operation seals the Append Blob to make it read-only. - - .. versionadded:: 12.4.0 - - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). - :rtype: dict(str, Any) - """ - options = self._seal_append_blob_options(**kwargs) - try: - return await self._client.append_blob.seal(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_blob_service_client_async.py b/azure/multiapi/storagev2/blob/v2019_12_12/aio/_blob_service_client_async.py deleted file mode 100644 index ab2e8a0..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_blob_service_client_async.py +++ /dev/null @@ -1,632 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, - TYPE_CHECKING -) - -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import AsyncPipeline -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.async_paging import AsyncItemPaged - -from .._shared.models import LocationMode -from .._shared.policies_async import ExponentialRetry -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._shared.parser import _to_utc_datetime -from .._shared.response_handlers import parse_to_internal_user_delegation_key -from .._generated import VERSION -from .._generated.aio import AzureBlobStorage -from .._generated.models import StorageErrorException, StorageServiceProperties, KeyInfo -from .._blob_service_client import BlobServiceClient as BlobServiceClientBase -from ._container_client_async import ContainerClient -from ._blob_client_async import BlobClient -from .._models import ContainerProperties -from .._deserialize import service_stats_deserialize, service_properties_deserialize -from .._serialize import get_api_version -from ._models import ContainerPropertiesPaged, FilteredBlobPaged - -if TYPE_CHECKING: - from datetime import datetime - from azure.core.pipeline.transport import HttpTransport - from azure.core.pipeline.policies import HTTPPolicy - from .._shared.models import AccountSasPermissions, ResourceTypes, UserDelegationKey - from ._lease_async import BlobLeaseClient - from .._models import ( - BlobProperties, - PublicAccess, - BlobAnalyticsLogging, - Metrics, - CorsRule, - RetentionPolicy, - StaticWebsite, - ) - - -class BlobServiceClient(AsyncStorageAccountHostsMixin, BlobServiceClientBase): - """A client to interact with the Blob Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete containers within the account. - For operations relating to a specific container or blob, clients for those entities - can also be retrieved using the `get_client` functions. - - :param str account_url: - The URL to the blob storage account. Any other entities included - in the URL path (e.g. container or blob) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication_async.py - :start-after: [START create_blob_service_client] - :end-before: [END create_blob_service_client] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient with account url and credential. - - .. literalinclude:: ../samples/blob_samples_authentication_async.py - :start-after: [START create_blob_service_client_oauth] - :end-before: [END create_blob_service_client_oauth] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient with Azure Identity credentials. - """ - - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(BlobServiceClient, self).__init__( - account_url, - credential=credential, - **kwargs) - self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access - self._loop = kwargs.get('loop', None) - - @distributed_trace_async - async def get_user_delegation_key(self, key_start_time, # type: datetime - key_expiry_time, # type: datetime - **kwargs # type: Any - ): - # type: (...) -> UserDelegationKey - """ - Obtain a user delegation key for the purpose of signing SAS tokens. - A token credential must be present on the service object for this request to succeed. - - :param ~datetime.datetime key_start_time: - A DateTime value. Indicates when the key becomes valid. - :param ~datetime.datetime key_expiry_time: - A DateTime value. Indicates when the key stops being valid. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The user delegation key. - :rtype: ~azure.storage.blob.UserDelegationKey - """ - key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time)) - timeout = kwargs.pop('timeout', None) - try: - user_delegation_key = await self._client.service.get_user_delegation_key(key_info=key_info, - timeout=timeout, - **kwargs) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - return parse_to_internal_user_delegation_key(user_delegation_key) # type: ignore - - @distributed_trace_async - async def get_account_information(self, **kwargs): - # type: (Any) -> Dict[str, str] - """Gets information related to the storage account. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START get_blob_service_account_info] - :end-before: [END get_blob_service_account_info] - :language: python - :dedent: 12 - :caption: Getting account information for the blob service. - """ - try: - return await self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_service_stats(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Retrieves statistics related to replication for the Blob service. - - It is only available when read-access geo-redundant replication is enabled for - the storage account. - - With geo-redundant replication, Azure Storage maintains your data durable - in two locations. In both locations, Azure Storage constantly maintains - multiple healthy replicas of your data. The location where you read, - create, update, or delete data is the primary storage account location. - The primary location exists in the region you choose at the time you - create an account via the Azure Management Azure classic portal, for - example, North Central US. The location to which your data is replicated - is the secondary location. The secondary location is automatically - determined based on the location of the primary; it is in a second data - center that resides in the same region as the primary location. Read-only - access is available from the secondary location, if read-access geo-redundant - replication is enabled for your storage account. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The blob service stats. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START get_blob_service_stats] - :end-before: [END get_blob_service_stats] - :language: python - :dedent: 12 - :caption: Getting service stats for the blob service. - """ - timeout = kwargs.pop('timeout', None) - try: - stats = await self._client.service.get_statistics( # type: ignore - timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs) - return service_stats_deserialize(stats) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_service_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An object containing blob service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START get_blob_service_properties] - :end-before: [END get_blob_service_properties] - :language: python - :dedent: 12 - :caption: Getting service properties for the blob service. - """ - timeout = kwargs.pop('timeout', None) - try: - service_props = await self._client.service.get_properties(timeout=timeout, **kwargs) - return service_properties_deserialize(service_props) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def set_service_properties( - self, analytics_logging=None, # type: Optional[BlobAnalyticsLogging] - hour_metrics=None, # type: Optional[Metrics] - minute_metrics=None, # type: Optional[Metrics] - cors=None, # type: Optional[List[CorsRule]] - target_version=None, # type: Optional[str] - delete_retention_policy=None, # type: Optional[RetentionPolicy] - static_website=None, # type: Optional[StaticWebsite] - **kwargs - ): - # type: (...) -> None - """Sets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - If an element (e.g. analytics_logging) is left as None, the - existing settings on the service for that functionality are preserved. - - :param analytics_logging: - Groups the Azure Analytics Logging settings. - :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging - :param hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for blobs. - :type hour_metrics: ~azure.storage.blob.Metrics - :param minute_metrics: - The minute metrics settings provide request statistics - for each minute for blobs. - :type minute_metrics: ~azure.storage.blob.Metrics - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list[~azure.storage.blob.CorsRule] - :param str target_version: - Indicates the default version to use for requests if an incoming - request's version is not specified. - :param delete_retention_policy: - The delete retention policy specifies whether to retain deleted blobs. - It also specifies the number of days and versions of blob to keep. - :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy - :param static_website: - Specifies whether the static website feature is enabled, - and if yes, indicates the index document and 404 error document to use. - :type static_website: ~azure.storage.blob.StaticWebsite - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START set_blob_service_properties] - :end-before: [END set_blob_service_properties] - :language: python - :dedent: 12 - :caption: Setting service properties for the blob service. - """ - props = StorageServiceProperties( - logging=analytics_logging, - hour_metrics=hour_metrics, - minute_metrics=minute_metrics, - cors=cors, - default_service_version=target_version, - delete_retention_policy=delete_retention_policy, - static_website=static_website - ) - timeout = kwargs.pop('timeout', None) - try: - await self._client.service.set_properties(props, timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_containers( - self, name_starts_with=None, # type: Optional[str] - include_metadata=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> AsyncItemPaged[ContainerProperties] - """Returns a generator to list the containers under the specified account. - - The generator will lazily follow the continuation tokens returned by - the service and stop when all containers have been returned. - - :param str name_starts_with: - Filters the results to return only containers whose names - begin with the specified prefix. - :param bool include_metadata: - Specifies that container metadata to be returned in the response. - The default value is `False`. - :keyword int results_per_page: - The maximum number of container names to retrieve per API - call. If the request does not specify the server will return up to 5,000 items. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) of ContainerProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.ContainerProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_list_containers] - :end-before: [END bsc_list_containers] - :language: python - :dedent: 16 - :caption: Listing the containers in the blob service. - """ - include = ['metadata'] if include_metadata else [] - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.service.list_containers_segment, - prefix=name_starts_with, - include=include, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - page_iterator_class=ContainerPropertiesPaged - ) - - @distributed_trace - def find_blobs_by_tags(self, filter_expression, **kwargs): - # type: (str, **Any) -> AsyncItemPaged[FilteredBlob] - """The Filter Blobs operation enables callers to list blobs across all - containers whose tags match a given search expression. Filter blobs - searches across all containers within a storage account but can be - scoped within the expression to a single container. - - :param str filter_expression: - The expression to find blobs whose tags matches the specified condition. - eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'" - To specify a container, eg. "@container='containerName' and \"Name\"='C'" - :keyword int results_per_page: - The max result per page when paginating. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.FilteredBlob] - """ - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.service.filter_blobs, - where=filter_expression, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=FilteredBlobPaged) - - @distributed_trace_async - async def create_container( - self, name, # type: str - metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[Union[PublicAccess, str]] - **kwargs - ): - # type: (...) -> ContainerClient - """Creates a new container under the specified account. - - If the container with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created container. - - :param str name: The name of the container to create. - :param metadata: - A dict with name-value pairs to associate with the - container as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - Possible values include: 'container', 'blob'. - :type public_access: str or ~azure.storage.blob.PublicAccess - :keyword container_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - - .. versionadded:: 12.2.0 - - :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.aio.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_create_container] - :end-before: [END bsc_create_container] - :language: python - :dedent: 16 - :caption: Creating a container in the blob service. - """ - container = self.get_container_client(name) - timeout = kwargs.pop('timeout', None) - kwargs.setdefault('merge_span', True) - await container.create_container( - metadata=metadata, public_access=public_access, timeout=timeout, **kwargs) - return container - - @distributed_trace_async - async def delete_container( - self, container, # type: Union[ContainerProperties, str] - lease=None, # type: Optional[Union[BlobLeaseClient, str]] - **kwargs - ): - # type: (...) -> None - """Marks the specified container for deletion. - - The container and any blobs contained within it are later deleted during garbage collection. - If the container is not found, a ResourceNotFoundError will be raised. - - :param container: - The container to delete. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :param lease: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_delete_container] - :end-before: [END bsc_delete_container] - :language: python - :dedent: 16 - :caption: Deleting a container in the blob service. - """ - container = self.get_container_client(container) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - await container.delete_container( # type: ignore - lease=lease, - timeout=timeout, - **kwargs) - - @distributed_trace_async - async def _undelete_container(self, deleted_container_name, deleted_container_version, new_name=None, **kwargs): - # type: (str, str, str, **Any) -> ContainerClient - """Restores soft-deleted container. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :param str deleted_container_name: - Specifies the name of the deleted container to restore. - :param str deleted_container_version: - Specifies the version of the deleted container to restore. - :param str new_name: - The new name for the deleted container to be restored to. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.aio.ContainerClient - """ - container = self.get_container_client(new_name or deleted_container_name) - try: - await container._client.container.restore(deleted_container_name=deleted_container_name, # pylint: disable = protected-access - deleted_container_version=deleted_container_version, - timeout=kwargs.pop('timeout', None), **kwargs) - return container - except StorageErrorException as error: - process_storage_error(error) - - def get_container_client(self, container): - # type: (Union[ContainerProperties, str]) -> ContainerClient - """Get a client to interact with the specified container. - - The container need not already exist. - - :param container: - The container. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :returns: A ContainerClient. - :rtype: ~azure.storage.blob.aio.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_get_container_client] - :end-before: [END bsc_get_container_client] - :language: python - :dedent: 12 - :caption: Getting the container client to interact with a specific container. - """ - try: - container_name = container.name - except AttributeError: - container_name = container - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ContainerClient( - self.url, container_name=container_name, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function, loop=self._loop) - - def get_blob_client( - self, container, # type: Union[ContainerProperties, str] - blob, # type: Union[BlobProperties, str] - snapshot=None # type: Optional[Union[Dict[str, Any], str]] - ): - # type: (...) -> BlobClient - """Get a client to interact with the specified blob. - - The blob need not already exist. - - :param container: - The container that the blob is in. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :param blob: - The blob with which to interact. This can either be the name of the blob, - or an instance of BlobProperties. - :type blob: str or ~azure.storage.blob.BlobProperties - :param snapshot: - The optional blob snapshot on which to operate. This can either be the ID of the snapshot, - or a dictionary output returned by - :func:`~azure.storage.blob.aio.BlobClient.create_snapshot()`. - :type snapshot: str or dict(str, Any) - :returns: A BlobClient. - :rtype: ~azure.storage.blob.aio.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_get_blob_client] - :end-before: [END bsc_get_blob_client] - :language: python - :dedent: 16 - :caption: Getting the blob client to interact with a specific blob. - """ - try: - container_name = container.name - except AttributeError: - container_name = container - - try: - blob_name = blob.name - except AttributeError: - blob_name = blob - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return BlobClient( # type: ignore - self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function, loop=self._loop) diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_container_client_async.py b/azure/multiapi/storagev2/blob/v2019_12_12/aio/_container_client_async.py deleted file mode 100644 index 6217e99..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_container_client_async.py +++ /dev/null @@ -1,1115 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, AnyStr, Dict, List, IO, AsyncIterator, - TYPE_CHECKING -) - -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.async_paging import AsyncItemPaged -from azure.core.pipeline import AsyncPipeline -from azure.core.pipeline.transport import AsyncHttpResponse - -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.policies_async import ExponentialRetry -from .._shared.request_handlers import add_metadata_headers, serialize_iso -from .._shared.response_handlers import ( - process_storage_error, - return_response_headers, - return_headers_and_deserialized) -from .._generated import VERSION -from .._generated.aio import AzureBlobStorage -from .._generated.models import ( - StorageErrorException, - SignedIdentifier) -from .._deserialize import deserialize_container_properties -from .._serialize import get_modify_conditions, get_container_cpk_scope_info, get_api_version -from .._container_client import ContainerClient as ContainerClientBase, _get_blob_name -from .._lease import get_access_conditions -from .._models import ContainerProperties, BlobType, BlobProperties # pylint: disable=unused-import -from ._list_blobs_helper import BlobPropertiesPaged, BlobPrefix -from ._lease_async import BlobLeaseClient -from ._blob_client_async import BlobClient - -if TYPE_CHECKING: - from .._models import PublicAccess - from ._download_async import StorageStreamDownloader - from datetime import datetime - from .._models import ( # pylint: disable=unused-import - AccessPolicy, - StandardBlobTier, - PremiumPageBlobTier) - - -class ContainerClient(AsyncStorageAccountHostsMixin, ContainerClientBase): - """A client to interact with a specific container, although that container - may not yet exist. - - For operations relating to a specific blob within this container, a blob client can be - retrieved using the :func:`~get_blob_client` function. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the container, - use the :func:`from_container_url` classmethod. - :param container_name: - The name of the container for the blob. - :type container_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START create_container_client_from_service] - :end-before: [END create_container_client_from_service] - :language: python - :dedent: 8 - :caption: Get a ContainerClient from an existing BlobServiceClient. - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START create_container_client_sasurl] - :end-before: [END create_container_client_sasurl] - :language: python - :dedent: 12 - :caption: Creating the container client directly. - """ - def __init__( - self, account_url, # type: str - container_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(ContainerClient, self).__init__( - account_url, - container_name=container_name, - credential=credential, - **kwargs) - self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access - self._loop = kwargs.get('loop', None) - - @distributed_trace_async - async def create_container(self, metadata=None, public_access=None, **kwargs): - # type: (Optional[Dict[str, str]], Optional[Union[PublicAccess, str]], **Any) -> None - """ - Creates a new container under the specified account. If the container - with the same name already exists, the operation fails. - - :param metadata: - A dict with name_value pairs to associate with the - container as metadata. Example:{'Category':'test'} - :type metadata: dict[str, str] - :param ~azure.storage.blob.PublicAccess public_access: - Possible values include: 'container', 'blob'. - :keyword container_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - - .. versionadded:: 12.2.0 - - :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START create_container] - :end-before: [END create_container] - :language: python - :dedent: 16 - :caption: Creating a container to store blobs. - """ - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - timeout = kwargs.pop('timeout', None) - container_cpk_scope_info = get_container_cpk_scope_info(kwargs) - try: - return await self._client.container.create( # type: ignore - timeout=timeout, - access=public_access, - container_cpk_scope_info=container_cpk_scope_info, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def delete_container( - self, **kwargs): - # type: (Any) -> None - """ - Marks the specified container for deletion. The container and any blobs - contained within it are later deleted during garbage collection. - - :keyword lease: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START delete_container] - :end-before: [END delete_container] - :language: python - :dedent: 16 - :caption: Delete a container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - mod_conditions = get_modify_conditions(kwargs) - timeout = kwargs.pop('timeout', None) - try: - await self._client.container.delete( - timeout=timeout, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def acquire_lease( - self, lease_duration=-1, # type: int - lease_id=None, # type: Optional[str] - **kwargs): - # type: (...) -> BlobLeaseClient - """ - Requests a new lease. If the container does not have an active lease, - the Blob service creates a lease on the container and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A BlobLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.blob.aio.BlobLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START acquire_lease_on_container] - :end-before: [END acquire_lease_on_container] - :language: python - :dedent: 12 - :caption: Acquiring a lease on the container. - """ - lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - await lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs) - return lease - - @distributed_trace_async - async def get_account_information(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """Gets information related to the storage account. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - """ - try: - return await self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_container_properties(self, **kwargs): - # type: (**Any) -> ContainerProperties - """Returns all user-defined metadata and system properties for the specified - container. The data returned does not include the container's list of blobs. - - :keyword lease: - If specified, get_container_properties only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Properties for the specified container within a container object. - :rtype: ~azure.storage.blob.ContainerProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START get_container_properties] - :end-before: [END get_container_properties] - :language: python - :dedent: 16 - :caption: Getting properties on the container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - response = await self._client.container.get_properties( - timeout=timeout, - lease_access_conditions=access_conditions, - cls=deserialize_container_properties, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - response.name = self.container_name - return response # type: ignore - - @distributed_trace_async - async def set_container_metadata( # type: ignore - self, metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - container. Each call to this operation replaces all existing metadata - attached to the container. To remove all metadata from the container, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the container as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword lease: - If specified, set_container_metadata only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Container-updated property dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START set_container_metadata] - :end-before: [END set_container_metadata] - :language: python - :dedent: 16 - :caption: Setting metadata on the container. - """ - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - mod_conditions = get_modify_conditions(kwargs) - timeout = kwargs.pop('timeout', None) - try: - return await self._client.container.set_metadata( # type: ignore - timeout=timeout, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_container_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the specified container. - The permissions indicate whether container data may be accessed publicly. - - :keyword lease: - If specified, get_container_access_policy only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START get_container_access_policy] - :end-before: [END get_container_access_policy] - :language: python - :dedent: 16 - :caption: Getting the access policy on the container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - response, identifiers = await self._client.container.get_access_policy( - timeout=timeout, - lease_access_conditions=access_conditions, - cls=return_headers_and_deserialized, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return { - 'public_access': response.get('blob_public_access'), - 'signed_identifiers': identifiers or [] - } - - @distributed_trace_async - async def set_container_access_policy( - self, signed_identifiers, # type: Dict[str, AccessPolicy] - public_access=None, # type: Optional[Union[str, PublicAccess]] - **kwargs # type: Any - ): # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the permissions for the specified container or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether blobs in a container may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the container. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy] - :param ~azure.storage.blob.PublicAccess public_access: - Possible values include: 'container', 'blob'. - :keyword lease: - Required if the container has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified date/time. - :keyword ~datetime.datetime if_unmodified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Container-updated property dict (Etag and last modified). - :rtype: dict[str, str or ~datetime.datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START set_container_access_policy] - :end-before: [END set_container_access_policy] - :language: python - :dedent: 16 - :caption: Setting access policy on the container. - """ - timeout = kwargs.pop('timeout', None) - lease = kwargs.pop('lease', None) - if len(signed_identifiers) > 5: - raise ValueError( - 'Too many access policies provided. The server does not support setting ' - 'more than 5 access policies on a single resource.') - identifiers = [] - for key, value in signed_identifiers.items(): - if value: - value.start = serialize_iso(value.start) - value.expiry = serialize_iso(value.expiry) - identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore - signed_identifiers = identifiers # type: ignore - - mod_conditions = get_modify_conditions(kwargs) - access_conditions = get_access_conditions(lease) - try: - return await self._client.container.set_access_policy( - container_acl=signed_identifiers or None, - timeout=timeout, - access=public_access, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_blobs(self, name_starts_with=None, include=None, **kwargs): - # type: (Optional[str], Optional[Union[str, List[str]]], **Any) -> AsyncItemPaged[BlobProperties] - """Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service. - - :param str name_starts_with: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param list[str] or str include: - Specifies one or more additional datasets to include in the response. - Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'tags'. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START list_blobs_in_container] - :end-before: [END list_blobs_in_container] - :language: python - :dedent: 12 - :caption: List the blobs in the container. - """ - if include and not isinstance(include, list): - include = [include] - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.list_blob_flat_segment, - include=include, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - page_iterator_class=BlobPropertiesPaged - ) - - @distributed_trace - def walk_blobs( - self, name_starts_with=None, # type: Optional[str] - include=None, # type: Optional[Any] - delimiter="/", # type: str - **kwargs # type: Optional[Any] - ): - # type: (...) -> AsyncItemPaged[BlobProperties] - """Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service. This operation will list blobs in accordance with a hierarchy, - as delimited by the specified delimiter character. - - :param str name_starts_with: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param list[str] include: - Specifies one or more additional datasets to include in the response. - Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'. - :param str delimiter: - When the request includes this parameter, the operation returns a BlobPrefix - element in the response body that acts as a placeholder for all blobs whose - names begin with the same substring up to the appearance of the delimiter - character. The delimiter may be a single character or a string. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties] - """ - if include and not isinstance(include, list): - include = [include] - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.list_blob_hierarchy_segment, - delimiter=delimiter, - include=include, - timeout=timeout, - **kwargs) - return BlobPrefix( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - delimiter=delimiter) - - @distributed_trace_async - async def upload_blob( - self, name, # type: Union[str, BlobProperties] - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> BlobClient - """Creates a new blob from a data source with automatic chunking. - - :param name: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type name: str or ~azure.storage.blob.BlobProperties - :param data: The blob data to upload. - :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be - either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. The exception to the above is with Append - blob types: if set to False and the data already exists, an error will not be raised - and the data will be appended to the existing blob. If set overwrite=True, then the existing - append blob will be deleted, and a new one created. Defaults to False. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the container has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int max_concurrency: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :returns: A BlobClient to interact with the newly uploaded blob. - :rtype: ~azure.storage.blob.aio.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START upload_blob_to_container] - :end-before: [END upload_blob_to_container] - :language: python - :dedent: 12 - :caption: Upload blob to the container. - """ - blob = self.get_blob_client(name) - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - await blob.upload_blob( - data, - blob_type=blob_type, - length=length, - metadata=metadata, - timeout=timeout, - encoding=encoding, - **kwargs - ) - return blob - - @distributed_trace_async - async def delete_blob( - self, blob, # type: Union[str, BlobProperties] - delete_snapshots=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> None - """Marks the specified blob or snapshot for deletion. - - The blob is later deleted during garbage collection. - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the delete_blob - operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot - and retains the blob or snapshot for specified number of days. - After specified number of days, blob's data is removed from the service during garbage collection. - Soft deleted blob or snapshot is accessible through :func:`list_blobs()` specifying `include=["deleted"]` - option. Soft-deleted blob or snapshot can be restored using :func:`~BlobClient.undelete()` - - :param blob: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob: str or ~azure.storage.blob.BlobProperties - :param str delete_snapshots: - Required if the blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword lease: - Required if the blob has an active lease. Value can be a Lease object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - blob = self.get_blob_client(blob) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - await blob.delete_blob( # type: ignore - delete_snapshots=delete_snapshots, - timeout=timeout, - **kwargs) - - @distributed_trace_async - async def download_blob(self, blob, offset=None, length=None, **kwargs): - # type: (Union[str, BlobProperties], Optional[int], Optional[int], Any) -> StorageStreamDownloader - """Downloads a blob to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the blob into - a stream. - - :param blob: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob: str or ~azure.storage.blob.BlobProperties - :param int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, download_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword str encoding: - Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object. (StorageStreamDownloader) - :rtype: ~azure.storage.blob.aio.StorageStreamDownloader - """ - blob_client = self.get_blob_client(blob) # type: ignore - kwargs.setdefault('merge_span', True) - return await blob_client.download_blob( - offset=offset, - length=length, - **kwargs) - - @distributed_trace_async - async def delete_blobs( # pylint: disable=arguments-differ - self, *blobs: List[Union[str, BlobProperties, dict]], - **kwargs - ) -> AsyncIterator[AsyncHttpResponse]: - """Marks the specified blobs or snapshots for deletion. - - The blobs are later deleted during garbage collection. - Note that in order to delete blobs, you must delete all of their - snapshots. You can delete both at the same time with the delete_blobs operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots - and retains the blobs or snapshots for specified number of days. - After specified number of days, blobs' data is removed from the service during garbage collection. - Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]` - Soft-deleted blobs or snapshots can be restored using :func:`~BlobClient.undelete()` - - :param blobs: - The blobs to delete. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - snapshot you want to delete: - key: 'snapshot', value type: str - whether to delete snapthots when deleting blob: - key: 'delete_snapshots', value: 'include' or 'only' - if the blob modified or not: - key: 'if_modified_since', 'if_unmodified_since', value type: datetime - etag: - key: 'etag', value type: str - match the etag or not: - key: 'match_condition', value type: MatchConditions - tags match condition: - key: 'if_tags_match_condition', value type: str - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword str delete_snapshots: - Required if a blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. For optimal performance, - this should be set to False - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: An async iterator of responses, one for each blob in order - :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START delete_multiple_blobs] - :end-before: [END delete_multiple_blobs] - :language: python - :dedent: 12 - :caption: Deleting multiple blobs. - """ - if len(blobs) == 0: - return iter(list()) - - reqs, options = self._generate_delete_blobs_options(*blobs, **kwargs) - - return await self._batch_send(*reqs, **options) - - @distributed_trace - async def set_standard_blob_tier_blobs( - self, - standard_blob_tier: Union[str, 'StandardBlobTier'], - *blobs: List[Union[str, BlobProperties, dict]], - **kwargs - ) -> AsyncIterator[AsyncHttpResponse]: - """This operation sets the tier on block blobs. - - A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param standard_blob_tier: - Indicates the tier to be set on all blobs. Options include 'Hot', 'Cool', - 'Archive'. The hot tier is optimized for storing data that is accessed - frequently. The cool storage tier is optimized for storing data that - is infrequently accessed and stored for at least a month. The archive - tier is optimized for storing data that is rarely accessed and stored - for at least six months with flexible latency requirements. - - .. note:: - If you want to set different tier on different blobs please set this positional parameter to None. - Then the blob tier on every BlobProperties will be taken. - - :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier - :param blobs: - The blobs with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - blob name: - key: 'name', value type: str - standard blob tier: - key: 'blob_tier', value type: StandardBlobTier - rehydrate priority: - key: 'rehydrate_priority', value type: RehydratePriority - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - tags match condition: - key: 'if_tags_match_condition', value type: str - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. For optimal performance, - this should be set to False. - :return: An async iterator of responses, one for each blob in order - :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] - """ - reqs, options = self._generate_set_tiers_options(standard_blob_tier, *blobs, **kwargs) - - return await self._batch_send(*reqs, **options) - - @distributed_trace - async def set_premium_page_blob_tier_blobs( - self, - premium_page_blob_tier: Union[str, 'PremiumPageBlobTier'], - *blobs: List[Union[str, BlobProperties, dict]], - **kwargs - ) -> AsyncIterator[AsyncHttpResponse]: - """Sets the page blob tiers on the blobs. This API is only supported for page blobs on premium accounts. - - :param premium_page_blob_tier: - A page blob tier value to set on all blobs to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - - .. note:: - If you want to set different tier on different blobs please set this positional parameter to None. - Then the blob tier on every BlobProperties will be taken. - - :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier - :param blobs: The blobs with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - premium blob tier: - key: 'blob_tier', value type: PremiumPageBlobTier - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. For optimal performance, - this should be set to False. - :return: An async iterator of responses, one for each blob in order - :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] - """ - reqs, options = self._generate_set_tiers_options(premium_page_blob_tier, *blobs, **kwargs) - - return await self._batch_send(*reqs, **options) - - def get_blob_client( - self, blob, # type: Union[BlobProperties, str] - snapshot=None # type: str - ): - # type: (...) -> BlobClient - """Get a client to interact with the specified blob. - - The blob need not already exist. - - :param blob: - The blob with which to interact. - :type blob: str or ~azure.storage.blob.BlobProperties - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`~BlobClient.create_snapshot()`. - :returns: A BlobClient. - :rtype: ~azure.storage.blob.aio.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START get_blob_client] - :end-before: [END get_blob_client] - :language: python - :dedent: 12 - :caption: Get the blob client. - """ - blob_name = _get_blob_name(blob) - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return BlobClient( - self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function, loop=self._loop) diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_download_async.py b/azure/multiapi/storagev2/blob/v2019_12_12/aio/_download_async.py deleted file mode 100644 index c698cb4..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_download_async.py +++ /dev/null @@ -1,491 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -import asyncio -import sys -from io import BytesIO -from itertools import islice -import warnings - -from azure.core.exceptions import HttpResponseError -from .._shared.encryption import decrypt_blob -from .._shared.request_handlers import validate_and_format_range_headers -from .._shared.response_handlers import process_storage_error, parse_length_from_content_range -from .._deserialize import get_page_ranges_result -from .._download import process_range_and_offset, _ChunkDownloader - - -async def process_content(data, start_offset, end_offset, encryption): - if data is None: - raise ValueError("Response cannot be None.") - try: - content = data.response.body() - except Exception as error: - raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) - if encryption.get('key') is not None or encryption.get('resolver') is not None: - try: - return decrypt_blob( - encryption.get('required'), - encryption.get('key'), - encryption.get('resolver'), - content, - start_offset, - end_offset, - data.response.headers) - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=data.response, - error=error) - return content - - -class _AsyncChunkDownloader(_ChunkDownloader): - def __init__(self, **kwargs): - super(_AsyncChunkDownloader, self).__init__(**kwargs) - self.stream_lock = asyncio.Lock() if kwargs.get('parallel') else None - self.progress_lock = asyncio.Lock() if kwargs.get('parallel') else None - - async def process_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - chunk_data = await self._download_chunk(chunk_start, chunk_end - 1) - length = chunk_end - chunk_start - if length > 0: - await self._write_to_stream(chunk_data, chunk_start) - await self._update_progress(length) - - async def yield_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - return await self._download_chunk(chunk_start, chunk_end - 1) - - async def _update_progress(self, length): - if self.progress_lock: - async with self.progress_lock: # pylint: disable=not-async-context-manager - self.progress_total += length - else: - self.progress_total += length - - async def _write_to_stream(self, chunk_data, chunk_start): - if self.stream_lock: - async with self.stream_lock: # pylint: disable=not-async-context-manager - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - else: - self.stream.write(chunk_data) - - async def _download_chunk(self, chunk_start, chunk_end): - download_range, offset = process_range_and_offset( - chunk_start, chunk_end, chunk_end, self.encryption_options) - - # No need to download the empty chunk from server if there's no data in the chunk to be downloaded. - # Do optimize and create empty chunk locally if condition is met. - if self._do_optimize(download_range[0], download_range[1]): - chunk_data = b"\x00" * self.chunk_size - else: - range_header, range_validation = validate_and_format_range_headers( - download_range[0], - download_range[1], - check_content_md5=self.validate_content - ) - try: - _, response = await self.client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self.validate_content, - data_stream_total=self.total_size, - download_stream_current=self.progress_total, - **self.request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - chunk_data = await process_content(response, offset[0], offset[1], self.encryption_options) - - # This makes sure that if_match is set so that we can validate - # that subsequent downloads are to an unmodified blob - if self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = response.properties.etag - - return chunk_data - - -class _AsyncChunkIterator(object): - """Async iterator for chunks in blob download stream.""" - - def __init__(self, size, content, downloader): - self.size = size - self._current_content = content - self._iter_downloader = downloader - self._iter_chunks = None - self._complete = (size == 0) - - def __len__(self): - return self.size - - def __iter__(self): - raise TypeError("Async stream must be iterated asynchronously.") - - def __aiter__(self): - return self - - async def __anext__(self): - """Iterate through responses.""" - if self._complete: - raise StopAsyncIteration("Download complete") - if not self._iter_downloader: - # If no iterator was supplied, the download completed with - # the initial GET, so we just return that data - self._complete = True - return self._current_content - - if not self._iter_chunks: - self._iter_chunks = self._iter_downloader.get_chunk_offsets() - else: - try: - chunk = next(self._iter_chunks) - except StopIteration: - raise StopAsyncIteration("Download complete") - self._current_content = await self._iter_downloader.yield_chunk(chunk) - - return self._current_content - - -class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the blob being downloaded. - :ivar str container: - The name of the container where the blob is. - :ivar ~azure.storage.blob.BlobProperties properties: - The properties of the blob being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the blob. - """ - - def __init__( - self, - clients=None, - config=None, - start_range=None, - end_range=None, - validate_content=None, - encryption_options=None, - max_concurrency=1, - name=None, - container=None, - encoding=None, - **kwargs - ): - self.name = name - self.container = container - self.properties = None - self.size = None - - self._clients = clients - self._config = config - self._start_range = start_range - self._end_range = end_range - self._max_concurrency = max_concurrency - self._encoding = encoding - self._validate_content = validate_content - self._encryption_options = encryption_options or {} - self._request_options = kwargs - self._location_mode = None - self._download_complete = False - self._current_content = None - self._file_size = None - self._non_empty_ranges = None - self._response = None - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - self._first_get_size = self._config.max_single_get_size if not self._validate_content \ - else self._config.max_chunk_get_size - initial_request_start = self._start_range if self._start_range is not None else 0 - if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: - initial_request_end = self._end_range - else: - initial_request_end = initial_request_start + self._first_get_size - 1 - - self._initial_range, self._initial_offset = process_range_and_offset( - initial_request_start, initial_request_end, self._end_range, self._encryption_options - ) - - def __len__(self): - return self.size - - async def _setup(self): - self._response = await self._initial_request() - self.properties = self._response.properties - self.properties.name = self.name - self.properties.container = self.container - - # Set the content length to the download size instead of the size of - # the last range - self.properties.size = self.size - - # Overwrite the content range to the user requested range - self.properties.content_range = 'bytes {0}-{1}/{2}'.format( - self._start_range, - self._end_range, - self._file_size - ) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - self.properties.content_md5 = None - - if self.size == 0: - self._current_content = b"" - else: - self._current_content = await process_content( - self._response, - self._initial_offset[0], - self._initial_offset[1], - self._encryption_options - ) - - async def _initial_request(self): - range_header, range_validation = validate_and_format_range_headers( - self._initial_range[0], - self._initial_range[1], - start_range_required=False, - end_range_required=False, - check_content_md5=self._validate_content) - - try: - location_mode, response = await self._clients.blob.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self._validate_content, - data_stream_total=None, - download_stream_current=0, - **self._request_options) - - # Check the location we read from to ensure we use the same one - # for subsequent requests. - self._location_mode = location_mode - - # Parse the total file size and adjust the download size if ranges - # were specified - self._file_size = parse_length_from_content_range(response.properties.content_range) - if self._end_range is not None: - # Use the length unless it is over the end of the file - self.size = min(self._file_size, self._end_range - self._start_range + 1) - elif self._start_range is not None: - self.size = self._file_size - self._start_range - else: - self.size = self._file_size - - except HttpResponseError as error: - if self._start_range is None and error.response.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - try: - _, response = await self._clients.blob.download( - validate_content=self._validate_content, - data_stream_total=0, - download_stream_current=0, - **self._request_options) - except HttpResponseError as error: - process_storage_error(error) - - # Set the download size to empty - self.size = 0 - self._file_size = 0 - else: - process_storage_error(error) - - # get page ranges to optimize downloading sparse page blob - if response.properties.blob_type == 'PageBlob': - try: - page_ranges = await self._clients.page_blob.get_page_ranges() - self._non_empty_ranges = get_page_ranges_result(page_ranges)[0] - except HttpResponseError: - pass - - # If the file is small, the download is complete at this point. - # If file size is large, download the rest of the file in chunks. - if response.properties.size != self.size: - # Lock on the etag. This can be overriden by the user by specifying '*' - if self._request_options.get('modified_access_conditions'): - if not self._request_options['modified_access_conditions'].if_match: - self._request_options['modified_access_conditions'].if_match = response.properties.etag - else: - self._download_complete = True - return response - - def chunks(self): - """Iterate over chunks in the download stream. - - :rtype: Iterable[bytes] - """ - if self.size == 0 or self._download_complete: - iter_downloader = None - else: - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - iter_downloader = _AsyncChunkDownloader( - client=self._clients.blob, - non_empty_ranges=self._non_empty_ranges, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # Start where the first download ended - end_range=data_end, - stream=None, - parallel=False, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options) - return _AsyncChunkIterator( - size=self.size, - content=self._current_content, - downloader=iter_downloader) - - async def readall(self): - """Download the contents of this blob. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - stream = BytesIO() - await self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - async def content_as_bytes(self, max_concurrency=1): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :rtype: bytes - """ - warnings.warn( - "content_as_bytes is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - return await self.readall() - - async def content_as_text(self, max_concurrency=1, encoding="UTF-8"): - """Download the contents of this blob, and decode as text. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :param str encoding: - Test encoding to decode the downloaded bytes. Default is UTF-8. - :rtype: str - """ - warnings.warn( - "content_as_text is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self._encoding = encoding - return await self.readall() - - async def readinto(self, stream): - """Download the contents of this blob to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - # the stream must be seekable if parallel download is required - parallel = self._max_concurrency > 1 - if parallel: - error_message = "Target stream handle must be seekable." - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(error_message) - - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(error_message) - - # Write the content to the user stream - stream.write(self._current_content) - if self._download_complete: - return self.size - - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - - downloader = _AsyncChunkDownloader( - client=self._clients.blob, - non_empty_ranges=self._non_empty_ranges, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # start where the first download ended - end_range=data_end, - stream=stream, - parallel=parallel, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options) - - dl_tasks = downloader.get_chunk_offsets() - running_futures = [ - asyncio.ensure_future(downloader.process_chunk(d)) - for d in islice(dl_tasks, 0, self._max_concurrency) - ] - while running_futures: - # Wait for some download to finish before adding a new one - _done, running_futures = await asyncio.wait( - running_futures, return_when=asyncio.FIRST_COMPLETED) - try: - next_chunk = next(dl_tasks) - except StopIteration: - break - else: - running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk))) - - if running_futures: - # Wait for the remaining downloads to finish - await asyncio.wait(running_futures) - return self.size - - async def download_to_stream(self, stream, max_concurrency=1): - """Download the contents of this blob to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The properties of the downloaded blob. - :rtype: Any - """ - warnings.warn( - "download_to_stream is deprecated, use readinto instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - await self.readinto(stream) - return self.properties diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_lease_async.py b/azure/multiapi/storagev2/blob/v2019_12_12/aio/_lease_async.py deleted file mode 100644 index 5f68a9b..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_lease_async.py +++ /dev/null @@ -1,327 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, - TypeVar, TYPE_CHECKING -) - -from azure.core.tracing.decorator_async import distributed_trace_async - -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._generated.models import ( - StorageErrorException, - LeaseAccessConditions) -from .._serialize import get_modify_conditions -from .._lease import BlobLeaseClient as LeaseClientBase - -if TYPE_CHECKING: - from datetime import datetime - from .._generated.operations import BlobOperations, ContainerOperations - BlobClient = TypeVar("BlobClient") - ContainerClient = TypeVar("ContainerClient") - - -class BlobLeaseClient(LeaseClientBase): - """Creates a new BlobLeaseClient. - - This client provides lease operations on a BlobClient or ContainerClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the blob or container to lease. - :type client: ~azure.storage.blob.aio.BlobClient or - ~azure.storage.blob.aio.ContainerClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - - def __enter__(self): - raise TypeError("Async lease must use 'async with'.") - - def __exit__(self, *args): - self.release() - - async def __aenter__(self): - return self - - async def __aexit__(self, *args): - await self.release() - - @distributed_trace_async - async def acquire(self, lease_duration=-1, **kwargs): - # type: (int, Any) -> None - """Requests a new lease. - - If the container does not have an active lease, the Blob service creates a - lease on the container and returns a new lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.acquire_lease( - timeout=kwargs.pop('timeout', None), - duration=lease_duration, - proposed_lease_id=self.id, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - self.etag = response.get('etag') # type: str - - @distributed_trace_async - async def renew(self, **kwargs): - # type: (Any) -> None - """Renews the lease. - - The lease can be renewed if the lease ID specified in the - lease client matches that associated with the container or blob. Note that - the lease may be renewed even if it has expired as long as the container - or blob has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.renew_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def release(self, **kwargs): - # type: (Any) -> None - """Release the lease. - - The lease may be released if the client lease id specified matches - that associated with the container or blob. Releasing the lease allows another client - to immediately acquire the lease for the container or blob as soon as the release is complete. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.release_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """Change the lease ID of an active lease. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.change_lease( - lease_id=self.id, - proposed_lease_id=proposed_lease_id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def break_lease(self, lease_break_period=None, **kwargs): - # type: (Optional[int], Any) -> int - """Break the lease, if the container or blob has an active lease. - - Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. When a lease - is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the container or blob. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :param int lease_break_period: - This is the proposed duration of seconds that the lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the lease. If longer, the time remaining on the lease is used. - A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str if_tags_match_condition - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.break_lease( - timeout=kwargs.pop('timeout', None), - break_period=lease_break_period, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_list_blobs_helper.py b/azure/multiapi/storagev2/blob/v2019_12_12/aio/_list_blobs_helper.py deleted file mode 100644 index dc09846..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_list_blobs_helper.py +++ /dev/null @@ -1,162 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from azure.core.async_paging import AsyncPageIterator, AsyncItemPaged -from .._deserialize import get_blob_properties_from_generated_code -from .._models import BlobProperties -from .._generated.models import StorageErrorException, BlobItemInternal, BlobPrefix as GenBlobPrefix -from .._shared.models import DictMixin -from .._shared.response_handlers import return_context_and_deserialized, process_storage_error - - -class BlobPropertiesPaged(AsyncPageIterator): - """An Iterable of Blob properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.models.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - :param str container: The container that the blobs are listed from. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str continuation_token: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__( - self, command, - container=None, - prefix=None, - results_per_page=None, - continuation_token=None, - delimiter=None, - location_mode=None): - super(BlobPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.container = container - self.delimiter = delimiter - self.current_page = None - self.location_mode = location_mode - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - prefix=self.prefix, - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.container = self._response.container_name - self.current_page = [self._build_item(item) for item in self._response.segment.blob_items] - - return self._response.next_marker or None, self.current_page - - def _build_item(self, item): - if isinstance(item, BlobProperties): - return item - if isinstance(item, BlobItemInternal): - blob = get_blob_properties_from_generated_code(item) # pylint: disable=protected-access - blob.container = self.container - return blob - return item - - -class BlobPrefix(AsyncItemPaged, DictMixin): - """An Iterable of Blob properties. - - Returned from walk_blobs when a delimiter is used. - Can be thought of as a virtual blob directory. - - :ivar str name: The prefix, or "directory name" of the blob. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str marker: The continuation token of the current page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.models.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str marker: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__(self, *args, **kwargs): - super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs) - self.name = kwargs.get('prefix') - self.prefix = kwargs.get('prefix') - self.results_per_page = kwargs.get('results_per_page') - self.container = kwargs.get('container') - self.delimiter = kwargs.get('delimiter') - self.location_mode = kwargs.get('location_mode') - - -class BlobPrefixPaged(BlobPropertiesPaged): - def __init__(self, *args, **kwargs): - super(BlobPrefixPaged, self).__init__(*args, **kwargs) - self.name = self.prefix - - async def _extract_data_cb(self, get_next_return): - continuation_token, _ = await super(BlobPrefixPaged, self)._extract_data_cb(get_next_return) - self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items - self.current_page = [self._build_item(item) for item in self.current_page] - self.delimiter = self._response.delimiter - - return continuation_token, self.current_page - - def _build_item(self, item): - item = super(BlobPrefixPaged, self)._build_item(item) - if isinstance(item, GenBlobPrefix): - return BlobPrefix( - self._command, - container=self.container, - prefix=item.name, - results_per_page=self.results_per_page, - location_mode=self.location_mode) - return item diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_models.py b/azure/multiapi/storagev2/blob/v2019_12_12/aio/_models.py deleted file mode 100644 index 44d5d63..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_models.py +++ /dev/null @@ -1,141 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines - -from azure.core.async_paging import AsyncPageIterator - -from .._models import ContainerProperties, FilteredBlob -from .._shared.response_handlers import return_context_and_deserialized, process_storage_error - -from .._generated.models import StorageErrorException -from .._generated.models import FilterBlobItem - - -class ContainerPropertiesPaged(AsyncPageIterator): - """An Iterable of Container properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A container name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.models.ContainerProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only containers whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of container names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(ContainerPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [self._build_item(item) for item in self._response.container_items] - - return self._response.next_marker or None, self.current_page - - @staticmethod - def _build_item(item): - return ContainerProperties._from_generated(item) # pylint: disable=protected-access - - -class FilteredBlobPaged(AsyncPageIterator): - """An Iterable of Blob properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.BlobProperties) - :ivar str container: The container that the blobs are listed from. - - :param callable command: Function to retrieve the next page of items. - :param str container: The name of the container. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str continuation_token: An opaque continuation token. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__( - self, command, - container=None, - results_per_page=None, - continuation_token=None, - location_mode=None): - super(FilteredBlobPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.marker = continuation_token - self.results_per_page = results_per_page - self.container = container - self.current_page = None - self.location_mode = location_mode - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.marker = self._response.next_marker - self.current_page = [self._build_item(item) for item in self._response.blobs] - - return self._response.next_marker or None, self.current_page - - @staticmethod - def _build_item(item): - if isinstance(item, FilterBlobItem): - blob = FilteredBlob(name=item.name, container_name=item.container_name, tag_value=item.tag_value) # pylint: disable=protected-access - return blob - return item diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_upload_helpers.py b/azure/multiapi/storagev2/blob/v2019_12_12/aio/_upload_helpers.py deleted file mode 100644 index 3a495b5..0000000 --- a/azure/multiapi/storagev2/blob/v2019_12_12/aio/_upload_helpers.py +++ /dev/null @@ -1,266 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from io import SEEK_SET, UnsupportedOperation -from typing import Optional, Union, Any, TypeVar, TYPE_CHECKING # pylint: disable=unused-import - -import six -from azure.core.exceptions import ResourceModifiedError - -from .._shared.response_handlers import ( - process_storage_error, - return_response_headers) -from .._shared.uploads_async import ( - upload_data_chunks, - upload_substream_blocks, - BlockBlobChunkUploader, - PageBlobChunkUploader, - AppendBlobChunkUploader) -from .._shared.encryption import generate_blob_encryption_data, encrypt_blob -from .._generated.models import ( - StorageErrorException, - BlockLookupList, - AppendPositionAccessConditions, - ModifiedAccessConditions, -) -from .._upload_helpers import _convert_mod_error, _any_conditions - -if TYPE_CHECKING: - from datetime import datetime # pylint: disable=unused-import - BlobLeaseClient = TypeVar("BlobLeaseClient") - - -async def upload_block_blob( # pylint: disable=too-many-locals - client=None, - data=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if not overwrite and not _any_conditions(**kwargs): - kwargs['modified_access_conditions'].if_none_match = '*' - adjusted_count = length - if (encryption_options.get('key') is not None) and (adjusted_count is not None): - adjusted_count += (16 - (length % 16)) - blob_headers = kwargs.pop('blob_headers', None) - tier = kwargs.pop('standard_blob_tier', None) - blob_tags_string = kwargs.pop('blob_tags_string', None) - - # Do single put if the size is smaller than config.max_single_put_size - if adjusted_count is not None and (adjusted_count <= blob_settings.max_single_put_size): - try: - data = data.read(length) - if not isinstance(data, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - except AttributeError: - pass - if encryption_options.get('key'): - encryption_data, data = encrypt_blob(data, encryption_options['key']) - headers['x-ms-meta-encryptiondata'] = encryption_data - return await client.upload( - data, - content_length=adjusted_count, - blob_http_headers=blob_headers, - headers=headers, - cls=return_response_headers, - validate_content=validate_content, - data_stream_total=adjusted_count, - upload_stream_current=0, - tier=tier.value if tier else None, - blob_tags_string=blob_tags_string, - **kwargs) - - use_original_upload_path = blob_settings.use_byte_buffer or \ - validate_content or encryption_options.get('required') or \ - blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \ - hasattr(stream, 'seekable') and not stream.seekable() or \ - not hasattr(stream, 'seek') or not hasattr(stream, 'tell') - - if use_original_upload_path: - if encryption_options.get('key'): - cek, iv, encryption_data = generate_blob_encryption_data(encryption_options['key']) - headers['x-ms-meta-encryptiondata'] = encryption_data - encryption_options['cek'] = cek - encryption_options['vector'] = iv - block_ids = await upload_data_chunks( - service=client, - uploader_class=BlockBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - encryption_options=encryption_options, - **kwargs - ) - else: - block_ids = await upload_substream_blocks( - service=client, - uploader_class=BlockBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - **kwargs - ) - - block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) - block_lookup.latest = block_ids - return await client.commit_block_list( - block_lookup, - blob_http_headers=blob_headers, - cls=return_response_headers, - validate_content=validate_content, - headers=headers, - tier=tier.value if tier else None, - blob_tags_string=blob_tags_string, - **kwargs) - except StorageErrorException as error: - try: - process_storage_error(error) - except ResourceModifiedError as mod_error: - if not overwrite: - _convert_mod_error(mod_error) - raise - - -async def upload_page_blob( - client=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if not overwrite and not _any_conditions(**kwargs): - kwargs['modified_access_conditions'].if_none_match = '*' - if length is None or length < 0: - raise ValueError("A content length must be specified for a Page Blob.") - if length % 512 != 0: - raise ValueError("Invalid page blob size: {0}. " - "The size must be aligned to a 512-byte boundary.".format(length)) - if kwargs.get('premium_page_blob_tier'): - premium_page_blob_tier = kwargs.pop('premium_page_blob_tier') - try: - headers['x-ms-access-tier'] = premium_page_blob_tier.value - except AttributeError: - headers['x-ms-access-tier'] = premium_page_blob_tier - if encryption_options and encryption_options.get('data'): - headers['x-ms-meta-encryptiondata'] = encryption_options['data'] - blob_tags_string = kwargs.pop('blob_tags_string', None) - - response = await client.create( - content_length=0, - blob_content_length=length, - blob_sequence_number=None, - blob_http_headers=kwargs.pop('blob_headers', None), - blob_tags_string=blob_tags_string, - cls=return_response_headers, - headers=headers, - **kwargs) - if length == 0: - return response - - kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag']) - return await upload_data_chunks( - service=client, - uploader_class=PageBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_page_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - encryption_options=encryption_options, - **kwargs) - - except StorageErrorException as error: - try: - process_storage_error(error) - except ResourceModifiedError as mod_error: - if not overwrite: - _convert_mod_error(mod_error) - raise - - -async def upload_append_blob( # pylint: disable=unused-argument - client=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if length == 0: - return {} - blob_headers = kwargs.pop('blob_headers', None) - append_conditions = AppendPositionAccessConditions( - max_size=kwargs.pop('maxsize_condition', None), - append_position=None) - blob_tags_string = kwargs.pop('blob_tags_string', None) - - try: - if overwrite: - await client.create( - content_length=0, - blob_http_headers=blob_headers, - headers=headers, - blob_tags_string=blob_tags_string, - **kwargs) - return await upload_data_chunks( - service=client, - uploader_class=AppendBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - append_position_access_conditions=append_conditions, - **kwargs) - except StorageErrorException as error: - if error.response.status_code != 404: - raise - # rewind the request body if it is a stream - if hasattr(stream, 'read'): - try: - # attempt to rewind the body to the initial position - stream.seek(0, SEEK_SET) - except UnsupportedOperation: - # if body is not seekable, then retry would not work - raise error - await client.create( - content_length=0, - blob_http_headers=blob_headers, - headers=headers, - blob_tags_string=blob_tags_string, - **kwargs) - return await upload_data_chunks( - service=client, - uploader_class=AppendBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - append_position_access_conditions=append_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/__init__.py b/azure/multiapi/storagev2/blob/v2020_02_10/__init__.py deleted file mode 100644 index 937d74b..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/__init__.py +++ /dev/null @@ -1,229 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import os - -from typing import Union, Iterable, AnyStr, IO, Any, Dict # pylint: disable=unused-import -from ._version import VERSION -from ._blob_client import BlobClient -from ._container_client import ContainerClient -from ._blob_service_client import BlobServiceClient -from ._lease import BlobLeaseClient -from ._download import StorageStreamDownloader -from ._quick_query_helper import BlobQueryReader -from ._shared_access_signature import generate_account_sas, generate_container_sas, generate_blob_sas -from ._shared.policies import ExponentialRetry, LinearRetry -from ._shared.response_handlers import PartialBatchErrorException -from ._shared.models import( - LocationMode, - ResourceTypes, - AccountSasPermissions, - StorageErrorCode, - UserDelegationKey -) -from ._generated.models import ( - RehydratePriority -) -from ._models import ( - BlobType, - BlockState, - StandardBlobTier, - PremiumPageBlobTier, - SequenceNumberAction, - PublicAccess, - BlobAnalyticsLogging, - Metrics, - RetentionPolicy, - StaticWebsite, - CorsRule, - ContainerProperties, - BlobProperties, - FilteredBlob, - LeaseProperties, - ContentSettings, - CopyProperties, - BlobBlock, - PageRange, - AccessPolicy, - ContainerSasPermissions, - BlobSasPermissions, - CustomerProvidedEncryptionKey, - ContainerEncryptionScope, - BlobQueryError, - DelimitedJsonDialect, - DelimitedTextDialect, - ArrowDialect, - ArrowType, - ObjectReplicationPolicy, - ObjectReplicationRule -) -from ._list_blobs_helper import BlobPrefix - -__version__ = VERSION - - -def upload_blob_to_url( - blob_url, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - credential=None, # type: Any - **kwargs): - # type: (...) -> Dict[str, Any] - """Upload data to a given URL - - The data will be uploaded as a block blob. - - :param str blob_url: - The full URI to the blob. This can also include a SAS token. - :param data: - The data to upload. This can be bytes, text, an iterable or a file-like object. - :type data: bytes or str or Iterable - :param credential: - The credentials with which to authenticate. This is optional if the - blob URL already has a SAS token. The value can be a SAS token string, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword bool overwrite: - Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob_to_url will overwrite any existing data. If set to False, the - operation will fail with a ResourceExistsError. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword dict(str,str) metadata: - Name-value pairs associated with the blob as metadata. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword str encoding: - Encoding to use if text is supplied as input. Defaults to UTF-8. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: dict(str, Any) - """ - with BlobClient.from_blob_url(blob_url, credential=credential) as client: - return client.upload_blob(data=data, blob_type=BlobType.BlockBlob, **kwargs) - - -def _download_to_stream(client, handle, **kwargs): - """Download data to specified open file-handle.""" - stream = client.download_blob(**kwargs) - stream.readinto(handle) - - -def download_blob_from_url( - blob_url, # type: str - output, # type: str - credential=None, # type: Any - **kwargs): - # type: (...) -> None - """Download the contents of a blob to a local file or stream. - - :param str blob_url: - The full URI to the blob. This can also include a SAS token. - :param output: - Where the data should be downloaded to. This could be either a file path to write to, - or an open IO handle to write to. - :type output: str or writable stream. - :param credential: - The credentials with which to authenticate. This is optional if the - blob URL already has a SAS token or the blob is public. The value can be a SAS token string, - an account shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword bool overwrite: - Whether the local file should be overwritten if it already exists. The default value is - `False` - in which case a ValueError will be raised if the file already exists. If set to - `True`, an attempt will be made to write to the existing file. If a stream handle is passed - in, this value is ignored. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :keyword int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :rtype: None - """ - overwrite = kwargs.pop('overwrite', False) - with BlobClient.from_blob_url(blob_url, credential=credential) as client: - if hasattr(output, 'write'): - _download_to_stream(client, output, **kwargs) - else: - if not overwrite and os.path.isfile(output): - raise ValueError("The file '{}' already exists.".format(output)) - with open(output, 'wb') as file_handle: - _download_to_stream(client, file_handle, **kwargs) - - -__all__ = [ - 'upload_blob_to_url', - 'download_blob_from_url', - 'BlobServiceClient', - 'ContainerClient', - 'BlobClient', - 'BlobType', - 'BlobLeaseClient', - 'StorageErrorCode', - 'UserDelegationKey', - 'ExponentialRetry', - 'LinearRetry', - 'LocationMode', - 'BlockState', - 'StandardBlobTier', - 'PremiumPageBlobTier', - 'SequenceNumberAction', - 'PublicAccess', - 'BlobAnalyticsLogging', - 'Metrics', - 'RetentionPolicy', - 'StaticWebsite', - 'CorsRule', - 'ContainerProperties', - 'BlobProperties', - 'BlobPrefix', - 'FilteredBlob', - 'LeaseProperties', - 'ContentSettings', - 'CopyProperties', - 'BlobBlock', - 'PageRange', - 'AccessPolicy', - 'ContainerSasPermissions', - 'BlobSasPermissions', - 'ResourceTypes', - 'AccountSasPermissions', - 'StorageStreamDownloader', - 'CustomerProvidedEncryptionKey', - 'RehydratePriority', - 'generate_account_sas', - 'generate_container_sas', - 'generate_blob_sas', - 'PartialBatchErrorException', - 'ContainerEncryptionScope', - 'BlobQueryError', - 'DelimitedJsonDialect', - 'DelimitedTextDialect', - 'ArrowDialect', - 'ArrowType', - 'BlobQueryReader', - 'ObjectReplicationPolicy', - 'ObjectReplicationRule' -] diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_blob_client.py b/azure/multiapi/storagev2/blob/v2020_02_10/_blob_client.py deleted file mode 100644 index 7daace2..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_blob_client.py +++ /dev/null @@ -1,3581 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines,no-self-use - -from io import BytesIO -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, - TYPE_CHECKING -) -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six -from azure.core.tracing.decorator import distributed_trace -from azure.core.exceptions import ResourceNotFoundError - -from ._shared import encode_base64 -from ._shared.base_client import StorageAccountHostsMixin, parse_connection_str, parse_query -from ._shared.encryption import generate_blob_encryption_data -from ._shared.uploads import IterStreamer -from ._shared.request_handlers import ( - add_metadata_headers, get_length, read_length, - validate_and_format_range_headers) -from ._shared.response_handlers import return_response_headers, process_storage_error, return_headers_and_deserialized -from ._generated import AzureBlobStorage, VERSION -from ._generated.models import ( # pylint: disable=unused-import - DeleteSnapshotsOptionType, - BlobHTTPHeaders, - BlockLookupList, - AppendPositionAccessConditions, - SequenceNumberAccessConditions, - StorageErrorException, - QueryRequest, - CpkInfo) -from ._serialize import ( - get_modify_conditions, - get_source_conditions, - get_cpk_scope_info, - get_api_version, - serialize_blob_tags_header, - serialize_blob_tags, - serialize_query_format, get_access_conditions -) -from ._deserialize import get_page_ranges_result, deserialize_blob_properties, deserialize_blob_stream, parse_tags -from ._quick_query_helper import BlobQueryReader -from ._upload_helpers import ( - upload_block_blob, - upload_append_blob, - upload_page_blob) -from ._models import BlobType, BlobBlock, BlobProperties, BlobQueryError -from ._download import StorageStreamDownloader -from ._lease import BlobLeaseClient - -if TYPE_CHECKING: - from datetime import datetime - from ._generated.models import BlockList - from ._models import ( # pylint: disable=unused-import - ContentSettings, - PremiumPageBlobTier, - StandardBlobTier, - SequenceNumberAction - ) - -_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = ( - 'The require_encryption flag is set, but encryption is not supported' - ' for this method.') - - -class BlobClient(StorageAccountHostsMixin): # pylint: disable=too-many-public-methods - """A client to interact with a specific blob, although that blob may not yet exist. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the blob, - use the :func:`from_blob_url` classmethod. - :param container_name: The container name for the blob. - :type container_name: str - :param blob_name: The name of the blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob_name: str - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_blob_client] - :end-before: [END create_blob_client] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a URL to a public blob (no auth needed). - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_blob_client_sas_url] - :end-before: [END create_blob_client_sas_url] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a SAS URL to a blob. - """ - def __init__( - self, account_url, # type: str - container_name, # type: str - blob_name, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - - if not (container_name and blob_name): - raise ValueError("Please specify a container name and blob name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - path_snapshot, sas_token = parse_query(parsed_url.query) - - self.container_name = container_name - self.blob_name = blob_name - try: - self.snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - self.snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - self.snapshot = snapshot or path_snapshot - - self._query_str, credential = self._format_query_string(sas_token, credential, snapshot=self.snapshot) - super(BlobClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) - self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access - - def _format_url(self, hostname): - container_name = self.container_name - if isinstance(container_name, six.text_type): - container_name = container_name.encode('UTF-8') - return "{}://{}/{}/{}{}".format( - self.scheme, - hostname, - quote(container_name), - quote(self.blob_name, safe='~/'), - self._query_str) - - def _encode_source_url(self, source_url): - parsed_source_url = urlparse(source_url) - source_scheme = parsed_source_url.scheme - source_hostname = parsed_source_url.netloc.rstrip('/') - source_path = unquote(parsed_source_url.path) - source_query = parsed_source_url.query - result = ["{}://{}{}".format(source_scheme, source_hostname, quote(source_path, safe='~/'))] - if source_query: - result.append(source_query) - return '?'.join(result) - - @classmethod - def from_blob_url(cls, blob_url, credential=None, snapshot=None, **kwargs): - # type: (str, Optional[Any], Optional[Union[str, Dict[str, Any]]], Any) -> BlobClient - """Create BlobClient from a blob url. This doesn't support customized blob url with '/' in blob name. - - :param str blob_url: - The full endpoint URL to the Blob, including SAS token and snapshot if used. This could be - either the primary endpoint, or the secondary endpoint depending on the current `location_mode`. - :type blob_url: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. If specified, this will override - the snapshot in the url. - :returns: A Blob client. - :rtype: ~azure.storage.blob.BlobClient - """ - try: - if not blob_url.lower().startswith('http'): - blob_url = "https://" + blob_url - except AttributeError: - raise ValueError("Blob URL must be a string.") - parsed_url = urlparse(blob_url.rstrip('/')) - - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(blob_url)) - - account_path = "" - if ".core." in parsed_url.netloc: - # .core. is indicating non-customized url. Blob name with directory info can also be parsed. - path_blob = parsed_url.path.lstrip('/').split('/', 1) - elif "localhost" in parsed_url.netloc or "127.0.0.1" in parsed_url.netloc: - path_blob = parsed_url.path.lstrip('/').split('/', 2) - account_path += path_blob[0] - else: - # for customized url. blob name that has directory info cannot be parsed. - path_blob = parsed_url.path.lstrip('/').split('/') - if len(path_blob) > 2: - account_path = "/" + "/".join(path_blob[:-2]) - account_url = "{}://{}{}?{}".format( - parsed_url.scheme, - parsed_url.netloc.rstrip('/'), - account_path, - parsed_url.query) - container_name, blob_name = unquote(path_blob[-2]), unquote(path_blob[-1]) - if not container_name or not blob_name: - raise ValueError("Invalid URL. Provide a blob_url with a valid blob and container name.") - - path_snapshot, _ = parse_query(parsed_url.query) - if snapshot: - try: - path_snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - path_snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - path_snapshot = snapshot - - return cls( - account_url, container_name=container_name, blob_name=blob_name, - snapshot=path_snapshot, credential=credential, **kwargs - ) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - container_name, # type: str - blob_name, # type: str - snapshot=None, # type: Optional[str] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> BlobClient - """Create BlobClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param container_name: The container name for the blob. - :type container_name: str - :param blob_name: The name of the blob with which to interact. - :type blob_name: str - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :returns: A Blob client. - :rtype: ~azure.storage.blob.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START auth_from_connection_string_blob] - :end-before: [END auth_from_connection_string_blob] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, container_name=container_name, blob_name=blob_name, - snapshot=snapshot, credential=credential, **kwargs - ) - - @distributed_trace - def get_account_information(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """Gets information related to the storage account in which the blob resides. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - """ - try: - return self._client.blob.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _upload_blob_options( # pylint:disable=too-many-statements - self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption and not self.key_encryption_key: - raise ValueError("Encryption required but no key was provided.") - encryption_options = { - 'required': self.require_encryption, - 'key': self.key_encryption_key, - 'resolver': self.key_resolver_function, - } - if self.key_encryption_key is not None: - cek, iv, encryption_data = generate_blob_encryption_data(self.key_encryption_key) - encryption_options['cek'] = cek - encryption_options['vector'] = iv - encryption_options['data'] = encryption_data - - encoding = kwargs.pop('encoding', 'UTF-8') - if isinstance(data, six.text_type): - data = data.encode(encoding) # type: ignore - if length is None: - length = get_length(data) - if isinstance(data, bytes): - data = data[:length] - - if isinstance(data, bytes): - stream = BytesIO(data) - elif hasattr(data, 'read'): - stream = data - elif hasattr(data, '__iter__'): - stream = IterStreamer(data, encoding=encoding) - else: - raise TypeError("Unsupported data type: {}".format(type(data))) - - validate_content = kwargs.pop('validate_content', False) - content_settings = kwargs.pop('content_settings', None) - overwrite = kwargs.pop('overwrite', False) - max_concurrency = kwargs.pop('max_concurrency', 1) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - kwargs['cpk_info'] = cpk_info - - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None)) - kwargs['modified_access_conditions'] = get_modify_conditions(kwargs) - kwargs['cpk_scope_info'] = get_cpk_scope_info(kwargs) - if content_settings: - kwargs['blob_headers'] = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - kwargs['blob_tags_string'] = serialize_blob_tags_header(kwargs.pop('tags', None)) - kwargs['stream'] = stream - kwargs['length'] = length - kwargs['overwrite'] = overwrite - kwargs['headers'] = headers - kwargs['validate_content'] = validate_content - kwargs['blob_settings'] = self._config - kwargs['max_concurrency'] = max_concurrency - kwargs['encryption_options'] = encryption_options - if blob_type == BlobType.BlockBlob: - kwargs['client'] = self._client.block_blob - kwargs['data'] = data - elif blob_type == BlobType.PageBlob: - kwargs['client'] = self._client.page_blob - elif blob_type == BlobType.AppendBlob: - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - kwargs['client'] = self._client.append_blob - else: - raise ValueError("Unsupported BlobType: {}".format(blob_type)) - return kwargs - - @distributed_trace - def upload_blob( # pylint: disable=too-many-locals - self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Any - """Creates a new blob from a data source with automatic chunking. - - :param data: The blob data to upload. - :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be - either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. The exception to the above is with Append - blob types: if set to False and the data already exists, an error will not be raised - and the data will be appended to the existing blob. If set overwrite=True, then the existing - append blob will be deleted, and a new one created. Defaults to False. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, upload_blob only succeeds if the - blob's lease is active and matches this ID. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int max_concurrency: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world.py - :start-after: [START upload_a_blob] - :end-before: [END upload_a_blob] - :language: python - :dedent: 12 - :caption: Upload a blob to the container. - """ - options = self._upload_blob_options( - data, - blob_type=blob_type, - length=length, - metadata=metadata, - **kwargs) - if blob_type == BlobType.BlockBlob: - return upload_block_blob(**options) - if blob_type == BlobType.PageBlob: - return upload_page_blob(**options) - return upload_append_blob(**options) - - def _download_blob_options(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], **Any) -> Dict[str, Any] - if self.require_encryption and not self.key_encryption_key: - raise ValueError("Encryption required but no key was provided.") - if length is not None and offset is None: - raise ValueError("Offset value must not be None if length is set.") - if length is not None: - length = offset + length - 1 # Service actually uses an end-range inclusive index - - validate_content = kwargs.pop('validate_content', False) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'clients': self._client, - 'config': self._config, - 'start_range': offset, - 'end_range': length, - 'version_id': kwargs.pop('version_id', None), - 'validate_content': validate_content, - 'encryption_options': { - 'required': self.require_encryption, - 'key': self.key_encryption_key, - 'resolver': self.key_resolver_function}, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'cls': kwargs.pop('cls', None) or deserialize_blob_stream, - 'max_concurrency':kwargs.pop('max_concurrency', 1), - 'encoding': kwargs.pop('encoding', None), - 'timeout': kwargs.pop('timeout', None), - 'name': self.blob_name, - 'container': self.container_name} - options.update(kwargs) - return options - - @distributed_trace - def download_blob(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], **Any) -> StorageStreamDownloader - """Downloads a blob to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the blob into - a stream. - - :param int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to download. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, download_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword str encoding: - Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.blob.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world.py - :start-after: [START download_a_blob] - :end-before: [END download_a_blob] - :language: python - :dedent: 12 - :caption: Download a blob. - """ - options = self._download_blob_options( - offset=offset, - length=length, - **kwargs) - return StorageStreamDownloader(**options) - - def _quick_query_options(self, query_expression, - **kwargs): - # type: (str, **Any) -> Dict[str, Any] - delimiter = '\n' - input_format = kwargs.pop('blob_format', None) - if input_format: - try: - delimiter = input_format.lineterminator - except AttributeError: - try: - delimiter = input_format.delimiter - except AttributeError: - raise ValueError("The Type of blob_format can only be DelimitedTextDialect or DelimitedJsonDialect") - output_format = kwargs.pop('output_format', None) - if output_format: - try: - delimiter = output_format.lineterminator - except AttributeError: - try: - delimiter = output_format.delimiter - except AttributeError: - pass - else: - output_format = input_format - query_request = QueryRequest( - expression=query_expression, - input_serialization=serialize_query_format(input_format), - output_serialization=serialize_query_format(output_format) - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo( - encryption_key=cpk.key_value, - encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm - ) - options = { - 'query_request': query_request, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'snapshot': self.snapshot, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_headers_and_deserialized, - } - options.update(kwargs) - return options, delimiter - - @distributed_trace - def query_blob(self, query_expression, **kwargs): - # type: (str, **Any) -> BlobQueryReader - """Enables users to select/project on blob/or blob snapshot data by providing simple query expressions. - This operations returns a BlobQueryReader, users need to use readall() or readinto() to get query data. - - :param str query_expression: - Required. a query statement. - :keyword Callable[~azure.storage.blob.BlobQueryError] on_error: - A function to be called on any processing errors returned by the service. - :keyword blob_format: - Optional. Defines the serialization of the data currently stored in the blob. The default is to - treat the blob data as CSV data formatted in the default dialect. This can be overridden with - a custom DelimitedTextDialect, or alternatively a DelimitedJsonDialect. - :paramtype blob_format: ~azure.storage.blob.DelimitedTextDialect or ~azure.storage.blob.DelimitedJsonDialect - :keyword output_format: - Optional. Defines the output serialization for the data stream. By default the data will be returned - as it is represented in the blob. By providing an output format, the blob data will be reformatted - according to that profile. This value can be a DelimitedTextDialect or a DelimitedJsonDialect. - :paramtype output_format: ~azure.storage.blob.DelimitedTextDialect, ~azure.storage.blob.DelimitedJsonDialect - or list[~azure.storage.blob.ArrowDialect] - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A streaming object (BlobQueryReader) - :rtype: ~azure.storage.blob.BlobQueryReader - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_query.py - :start-after: [START query] - :end-before: [END query] - :language: python - :dedent: 4 - :caption: select/project on blob/or blob snapshot data by providing simple query expressions. - """ - errors = kwargs.pop("on_error", None) - error_cls = kwargs.pop("error_cls", BlobQueryError) - encoding = kwargs.pop("encoding", None) - options, delimiter = self._quick_query_options(query_expression, **kwargs) - try: - headers, raw_response_body = self._client.blob.query(**options) - except StorageErrorException as error: - process_storage_error(error) - return BlobQueryReader( - name=self.blob_name, - container=self.container_name, - errors=errors, - record_delimiter=delimiter, - encoding=encoding, - headers=headers, - response=raw_response_body, - error_cls=error_cls) - - @staticmethod - def _generic_delete_blob_options(delete_snapshots=False, **kwargs): - # type: (bool, **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if delete_snapshots: - delete_snapshots = DeleteSnapshotsOptionType(delete_snapshots) - options = { - 'timeout': kwargs.pop('timeout', None), - 'snapshot': kwargs.pop('snapshot', None), # this is added for delete_blobs - 'delete_snapshots': delete_snapshots or None, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions} - options.update(kwargs) - return options - - def _delete_blob_options(self, delete_snapshots=False, **kwargs): - # type: (bool, **Any) -> Dict[str, Any] - if self.snapshot and delete_snapshots: - raise ValueError("The delete_snapshots option cannot be used with a specific snapshot.") - options = self._generic_delete_blob_options(delete_snapshots, **kwargs) - options['snapshot'] = self.snapshot - options['version_id'] = kwargs.pop('version_id', None) - return options - - @distributed_trace - def delete_blob(self, delete_snapshots=False, **kwargs): - # type: (str, **Any) -> None - """Marks the specified blob for deletion. - - The blob is later deleted during garbage collection. - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the delete_blob() - operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob - and retains the blob for a specified number of days. - After the specified number of days, the blob's data is removed from the service during garbage collection. - Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']` - option. Soft-deleted blob can be restored using :func:`undelete` operation. - - :param str delete_snapshots: - Required if the blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to delete. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword lease: - Required if the blob has an active lease. If specified, delete_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world.py - :start-after: [START delete_blob] - :end-before: [END delete_blob] - :language: python - :dedent: 12 - :caption: Delete a blob. - """ - options = self._delete_blob_options(delete_snapshots=delete_snapshots, **kwargs) - try: - self._client.blob.delete(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def undelete_blob(self, **kwargs): - # type: (**Any) -> None - """Restores soft-deleted blobs or snapshots. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START undelete_blob] - :end-before: [END undelete_blob] - :language: python - :dedent: 8 - :caption: Undeleting a blob. - """ - try: - self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace() - def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a blob exists with the defined parameters, and returns - False otherwise. - - :param str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to check if it exists. - :param int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - try: - self._client.blob.get_properties( - snapshot=self.snapshot, - **kwargs) - return True - except StorageErrorException as error: - try: - process_storage_error(error) - except ResourceNotFoundError: - return False - - @distributed_trace - def get_blob_properties(self, **kwargs): - # type: (**Any) -> BlobProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the blob. It does not return the content of the blob. - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to get properties. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: BlobProperties - :rtype: ~azure.storage.blob.BlobProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START get_blob_properties] - :end-before: [END get_blob_properties] - :language: python - :dedent: 8 - :caption: Getting the properties for a blob. - """ - # TODO: extract this out as _get_blob_properties_options - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - try: - blob_props = self._client.blob.get_properties( - timeout=kwargs.pop('timeout', None), - version_id=kwargs.pop('version_id', None), - snapshot=self.snapshot, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=kwargs.pop('cls', None) or deserialize_blob_properties, - cpk_info=cpk_info, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - blob_props.name = self.blob_name - if isinstance(blob_props, BlobProperties): - blob_props.container = self.container_name - blob_props.snapshot = self.snapshot - return blob_props # type: ignore - - def _set_http_headers_options(self, content_settings=None, **kwargs): - # type: (Optional[ContentSettings], **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - blob_headers = None - if content_settings: - blob_headers = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - options = { - 'timeout': kwargs.pop('timeout', None), - 'blob_http_headers': blob_headers, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def set_http_headers(self, content_settings=None, **kwargs): - # type: (Optional[ContentSettings], **Any) -> None - """Sets system properties on the blob. - - If one property is set for the content_settings, all properties will be overridden. - - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - options = self._set_http_headers_options(content_settings=content_settings, **kwargs) - try: - return self._client.blob.set_http_headers(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _set_blob_metadata_options(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - 'headers': headers} - options.update(kwargs) - return options - - @distributed_trace - def set_blob_metadata(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] - """Sets user-defined metadata for the blob as one or more name-value pairs. - - :param metadata: - Dict containing name and value pairs. Each call to this operation - replaces all existing metadata attached to the blob. To remove all - metadata from the blob, call this operation with no metadata headers. - :type metadata: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - """ - options = self._set_blob_metadata_options(metadata=metadata, **kwargs) - try: - return self._client.blob.set_metadata(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _create_page_blob_options( # type: ignore - self, size, # type: int - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - blob_headers = None - if content_settings: - blob_headers = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - - sequence_number = kwargs.pop('sequence_number', None) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - if premium_page_blob_tier: - try: - headers['x-ms-access-tier'] = premium_page_blob_tier.value # type: ignore - except AttributeError: - headers['x-ms-access-tier'] = premium_page_blob_tier # type: ignore - - blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) - - options = { - 'content_length': 0, - 'blob_content_length': size, - 'blob_sequence_number': sequence_number, - 'blob_http_headers': blob_headers, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'blob_tags_string': blob_tags_string, - 'cls': return_response_headers, - 'headers': headers} - options.update(kwargs) - return options - - @distributed_trace - def create_page_blob( # type: ignore - self, size, # type: int - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Creates a new Page Blob of the specified size. - - :param int size: - This specifies the maximum size for the page blob, up to 1 TB. - The page blob size must be aligned to a 512-byte boundary. - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword int sequence_number: - Only for Page blobs. The sequence number is a user-controlled value that you can use to - track requests. The value of the sequence number must be between 0 - and 2^63 - 1.The default value is 0. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict[str, Any] - """ - options = self._create_page_blob_options( - size, - content_settings=content_settings, - metadata=metadata, - premium_page_blob_tier=premium_page_blob_tier, - **kwargs) - try: - return self._client.page_blob.create(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _create_append_blob_options(self, content_settings=None, metadata=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - blob_headers = None - if content_settings: - blob_headers = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) - - options = { - 'content_length': 0, - 'blob_http_headers': blob_headers, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'blob_tags_string': blob_tags_string, - 'cls': return_response_headers, - 'headers': headers} - options.update(kwargs) - return options - - @distributed_trace - def create_append_blob(self, content_settings=None, metadata=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] - """Creates a new Append Blob. - - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict[str, Any] - """ - options = self._create_append_blob_options( - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return self._client.append_blob.create(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _create_snapshot_options(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - 'headers': headers} - options.update(kwargs) - return options - - @distributed_trace - def create_snapshot(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] - """Creates a snapshot of the blob. - - A snapshot is a read-only version of a blob that's taken at a point in time. - It can be read, copied, or deleted, but not modified. Snapshots provide a way - to back up a blob as it appears at a moment in time. - - A snapshot of a blob has the same name as the base blob from which the snapshot - is taken, with a DateTime value appended to indicate the time at which the - snapshot was taken. - - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - - .. versionadded:: 12.4.0 - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified). - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START create_blob_snapshot] - :end-before: [END create_blob_snapshot] - :language: python - :dedent: 8 - :caption: Create a snapshot of the blob. - """ - options = self._create_snapshot_options(metadata=metadata, **kwargs) - try: - return self._client.blob.create_snapshot(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _start_copy_from_url_options(self, source_url, metadata=None, incremental_copy=False, **kwargs): - # type: (str, Optional[Dict[str, str]], bool, **Any) -> Dict[str, Any] - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - if 'source_lease' in kwargs: - source_lease = kwargs.pop('source_lease') - try: - headers['x-ms-source-lease-id'] = source_lease.id # type: str - except AttributeError: - headers['x-ms-source-lease-id'] = source_lease - - tier = kwargs.pop('premium_page_blob_tier', None) or kwargs.pop('standard_blob_tier', None) - - if kwargs.get('requires_sync'): - headers['x-ms-requires-sync'] = str(kwargs.pop('requires_sync')) - - timeout = kwargs.pop('timeout', None) - dest_mod_conditions = get_modify_conditions(kwargs) - blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) - - options = { - 'copy_source': source_url, - 'seal_blob': kwargs.pop('seal_destination_blob', None), - 'timeout': timeout, - 'modified_access_conditions': dest_mod_conditions, - 'blob_tags_string': blob_tags_string, - 'headers': headers, - 'cls': return_response_headers, - } - if not incremental_copy: - source_mod_conditions = get_source_conditions(kwargs) - dest_access_conditions = get_access_conditions(kwargs.pop('destination_lease', None)) - options['source_modified_access_conditions'] = source_mod_conditions - options['lease_access_conditions'] = dest_access_conditions - options['tier'] = tier.value if tier else None - options.update(kwargs) - return options - - @distributed_trace - def start_copy_from_url(self, source_url, metadata=None, incremental_copy=False, **kwargs): - # type: (str, Optional[Dict[str, str]], bool, **Any) -> Dict[str, Union[str, datetime]] - """Copies a blob asynchronously. - - This operation returns a copy operation - object that can be used to wait on the completion of the operation, - as well as check status or abort the copy operation. - The Blob service copies blobs on a best-effort basis. - - The source blob for a copy operation may be a block blob, an append blob, - or a page blob. If the destination blob already exists, it must be of the - same blob type as the source blob. Any existing destination blob will be - overwritten. The destination blob cannot be modified while a copy operation - is in progress. - - When copying from a page blob, the Blob service creates a destination page - blob of the source blob's length, initially containing all zeroes. Then - the source page ranges are enumerated, and non-empty ranges are copied. - - For a block blob or an append blob, the Blob service creates a committed - blob of zero length before returning from this operation. When copying - from a block blob, all committed blocks and their block IDs are copied. - Uncommitted blocks are not copied. At the end of the copy operation, the - destination blob will have the same committed block count as the source. - - When copying from an append blob, all committed blocks are copied. At the - end of the copy operation, the destination blob will have the same committed - block count as the source. - - For all blob types, you can call status() on the returned polling object - to check the status of the copy operation, or wait() to block until the - operation is complete. The final blob will be committed when the copy completes. - - :param str source_url: - A URL of up to 2 KB in length that specifies a file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.blob.core.windows.net/mycontainer/myblob - - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - - https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken - :param metadata: - Name-value pairs associated with the blob as metadata. If no name-value - pairs are specified, the operation will copy the metadata from the - source blob or file to the destination blob. If one or more name-value - pairs are specified, the destination blob is created with the specified - metadata, and metadata is not copied from the source blob or file. - :type metadata: dict(str, str) - :param bool incremental_copy: - Copies the snapshot of the source page blob to a destination page blob. - The snapshot is copied such that only the differential changes between - the previously copied snapshot are transferred to the destination. - The copied snapshots are complete copies of the original snapshot and - can be read or copied from as usual. Defaults to False. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source - blob has been modified since the specified date/time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source blob - has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has been modified since the specified date/time. - If the destination blob has not been modified, the Blob service returns - status code 412 (Precondition Failed). - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has not been modified since the specified - date/time. If the destination blob has been modified, the Blob service - returns status code 412 (Precondition Failed). - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword destination_lease: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword source_lease: - Specify this to perform the Copy Blob operation only if - the lease ID given matches the active lease ID of the source blob. - :paramtype source_lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword bool seal_destination_blob: - Seal the destination append blob. This operation is only for append blob. - - .. versionadded:: 12.4.0 - - :keyword bool requires_sync: - Enforces that the service will not return a response until the copy is complete. - :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status). - :rtype: dict[str, str or ~datetime.datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START copy_blob_from_url] - :end-before: [END copy_blob_from_url] - :language: python - :dedent: 12 - :caption: Copy a blob from a URL. - """ - options = self._start_copy_from_url_options( - source_url=self._encode_source_url(source_url), - metadata=metadata, - incremental_copy=incremental_copy, - **kwargs) - try: - if incremental_copy: - return self._client.page_blob.copy_incremental(**options) - return self._client.blob.start_copy_from_url(**options) - except StorageErrorException as error: - process_storage_error(error) - - def _abort_copy_options(self, copy_id, **kwargs): - # type: (Union[str, Dict[str, Any], BlobProperties], **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - try: - copy_id = copy_id.copy.id - except AttributeError: - try: - copy_id = copy_id['copy_id'] - except TypeError: - pass - options = { - 'copy_id': copy_id, - 'lease_access_conditions': access_conditions, - 'timeout': kwargs.pop('timeout', None)} - options.update(kwargs) - return options - - @distributed_trace - def abort_copy(self, copy_id, **kwargs): - # type: (Union[str, Dict[str, Any], BlobProperties], **Any) -> None - """Abort an ongoing copy operation. - - This will leave a destination blob with zero length and full metadata. - This will raise an error if the copy operation has already ended. - - :param copy_id: - The copy operation to abort. This can be either an ID string, or an - instance of BlobProperties. - :type copy_id: str or ~azure.storage.blob.BlobProperties - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START abort_copy_blob_from_url] - :end-before: [END abort_copy_blob_from_url] - :language: python - :dedent: 12 - :caption: Abort copying a blob from URL. - """ - options = self._abort_copy_options(copy_id, **kwargs) - try: - self._client.blob.abort_copy_from_url(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): - # type: (int, Optional[str], **Any) -> BlobLeaseClient - """Requests a new lease. - - If the blob does not have an active lease, the Blob - Service creates a lease on the blob and returns a new lease. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Blob Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A BlobLeaseClient object. - :rtype: ~azure.storage.blob.BlobLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START acquire_lease_on_blob] - :end-before: [END acquire_lease_on_blob] - :language: python - :dedent: 8 - :caption: Acquiring a lease on a blob. - """ - lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore - lease.acquire(lease_duration=lease_duration, **kwargs) - return lease - - @distributed_trace - def set_standard_blob_tier(self, standard_blob_tier, **kwargs): - # type: (Union[str, StandardBlobTier], Any) -> None - """This operation sets the tier on a block blob. - - A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param standard_blob_tier: - Indicates the tier to be set on the blob. Options include 'Hot', 'Cool', - 'Archive'. The hot tier is optimized for storing data that is accessed - frequently. The cool storage tier is optimized for storing data that - is infrequently accessed and stored for at least a month. The archive - tier is optimized for storing data that is rarely accessed and stored - for at least six months with flexible latency requirements. - :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to download. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if standard_blob_tier is None: - raise ValueError("A StandardBlobTier must be specified") - if self.snapshot and kwargs.get('version_id'): - raise ValueError("Snapshot and version_id cannot be set at the same time") - try: - self._client.blob.set_tier( - tier=standard_blob_tier, - snapshot=self.snapshot, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - lease_access_conditions=access_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - def _stage_block_options( - self, block_id, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - block_id = encode_base64(str(block_id)) - if isinstance(data, six.text_type): - data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - if length is None: - length = get_length(data) - if length is None: - length, data = read_length(data) - if isinstance(data, bytes): - data = data[:length] - - validate_content = kwargs.pop('validate_content', False) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'block_id': block_id, - 'content_length': length, - 'body': data, - 'transactional_content_md5': None, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'validate_content': validate_content, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - } - options.update(kwargs) - return options - - @distributed_trace - def stage_block( - self, block_id, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Creates a new block to be committed as part of a blob. - - :param str block_id: A valid Base64 string value that identifies the - block. Prior to encoding, the string must be less than or equal to 64 - bytes in size. For a given blob, the length of the value specified for - the block_id parameter must be the same size for each block. - :param data: The blob data. - :param int length: Size of the block. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword str encoding: - Defaults to UTF-8. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob property dict. - :rtype: dict[str, Any] - """ - options = self._stage_block_options( - block_id, - data, - length=length, - **kwargs) - try: - return self._client.block_blob.stage_block(**options) - except StorageErrorException as error: - process_storage_error(error) - - def _stage_block_from_url_options( - self, block_id, # type: str - source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - source_content_md5=None, # type: Optional[Union[bytes, bytearray]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if source_length is not None and source_offset is None: - raise ValueError("Source offset value must not be None if length is set.") - if source_length is not None: - source_length = source_offset + source_length - 1 - block_id = encode_base64(str(block_id)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - range_header = None - if source_offset is not None: - range_header, _ = validate_and_format_range_headers(source_offset, source_length) - - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'block_id': block_id, - 'content_length': 0, - 'source_url': source_url, - 'source_range': range_header, - 'source_content_md5': bytearray(source_content_md5) if source_content_md5 else None, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - } - options.update(kwargs) - return options - - @distributed_trace - def stage_block_from_url( - self, block_id, # type: str - source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - source_content_md5=None, # type: Optional[Union[bytes, bytearray]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Creates a new block to be committed as part of a blob where - the contents are read from a URL. - - :param str block_id: A valid Base64 string value that identifies the - block. Prior to encoding, the string must be less than or equal to 64 - bytes in size. For a given blob, the length of the value specified for - the block_id parameter must be the same size for each block. - :param str source_url: The URL. - :param int source_offset: - Start of byte range to use for the block. - Must be set if source length is provided. - :param int source_length: The size of the block in bytes. - :param bytearray source_content_md5: - Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob property dict. - :rtype: dict[str, Any] - """ - options = self._stage_block_from_url_options( - block_id, - source_url=self._encode_source_url(source_url), - source_offset=source_offset, - source_length=source_length, - source_content_md5=source_content_md5, - **kwargs) - try: - return self._client.block_blob.stage_block_from_url(**options) - except StorageErrorException as error: - process_storage_error(error) - - def _get_block_list_result(self, blocks): - # type: (BlockList) -> Tuple[List[BlobBlock], List[BlobBlock]] - committed = [] # type: List - uncommitted = [] # type: List - if blocks.committed_blocks: - committed = [BlobBlock._from_generated(b) for b in blocks.committed_blocks] # pylint: disable=protected-access - if blocks.uncommitted_blocks: - uncommitted = [BlobBlock._from_generated(b) for b in blocks.uncommitted_blocks] # pylint: disable=protected-access - return committed, uncommitted - - @distributed_trace - def get_block_list(self, block_list_type="committed", **kwargs): - # type: (Optional[str], **Any) -> Tuple[List[BlobBlock], List[BlobBlock]] - """The Get Block List operation retrieves the list of blocks that have - been uploaded as part of a block blob. - - :param str block_list_type: - Specifies whether to return the list of committed - blocks, the list of uncommitted blocks, or both lists together. - Possible values include: 'committed', 'uncommitted', 'all' - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A tuple of two lists - committed and uncommitted blocks - :rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock)) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - try: - blocks = self._client.block_blob.get_block_list( - list_type=block_list_type, - snapshot=self.snapshot, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return self._get_block_list_result(blocks) - - def _commit_block_list_options( # type: ignore - self, block_list, # type: List[BlobBlock] - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) - for block in block_list: - try: - if block.state.value == 'committed': - block_lookup.committed.append(encode_base64(str(block.id))) - elif block.state.value == 'uncommitted': - block_lookup.uncommitted.append(encode_base64(str(block.id))) - else: - block_lookup.latest.append(encode_base64(str(block.id))) - except AttributeError: - block_lookup.latest.append(encode_base64(str(block))) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - blob_headers = None - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if content_settings: - blob_headers = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - - validate_content = kwargs.pop('validate_content', False) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - tier = kwargs.pop('standard_blob_tier', None) - blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) - - options = { - 'blocks': block_lookup, - 'blob_http_headers': blob_headers, - 'lease_access_conditions': access_conditions, - 'timeout': kwargs.pop('timeout', None), - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers, - 'validate_content': validate_content, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'tier': tier.value if tier else None, - 'blob_tags_string': blob_tags_string, - 'headers': headers - } - options.update(kwargs) - return options - - @distributed_trace - def commit_block_list( # type: ignore - self, block_list, # type: List[BlobBlock] - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """The Commit Block List operation writes a blob by specifying the list of - block IDs that make up the blob. - - :param list block_list: - List of Blockblobs. - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict[str, str] - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._commit_block_list_options( - block_list, - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return self._client.block_blob.commit_block_list(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): - # type: (Union[str, PremiumPageBlobTier], **Any) -> None - """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts. - - :param premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if premium_page_blob_tier is None: - raise ValueError("A PremiumPageBlobTier must be specified") - try: - self._client.blob.set_tier( - tier=premium_page_blob_tier, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - def _set_blob_tags_options(self, tags=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - tags = serialize_blob_tags(tags) - mod_conditions = get_modify_conditions(kwargs) - - options = { - 'tags': tags, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def set_blob_tags(self, tags=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - """The Set Tags operation enables users to set tags on a blob or specific blob version, but not snapshot. - Each call to this operation replaces all existing tags attached to the blob. To remove all - tags from the blob, call this operation with no tags set. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :param tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - :type tags: dict(str, str) - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to add tags to. - :keyword bool validate_content: - If true, calculates an MD5 hash of the tags content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - options = self._set_blob_tags_options(tags=tags, **kwargs) - try: - return self._client.blob.set_tags(**options) - except StorageErrorException as error: - process_storage_error(error) - - def _get_blob_tags_options(self, **kwargs): - # type: (**Any) -> Dict[str, str] - mod_conditions = get_modify_conditions(kwargs) - - options = { - 'version_id': kwargs.pop('version_id', None), - 'snapshot': self.snapshot, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_headers_and_deserialized} - return options - - @distributed_trace - def get_blob_tags(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """The Get Tags operation enables users to get tags on a blob or specific blob version, or snapshot. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to add tags to. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Key value pairs of blob tags. - :rtype: Dict[str, str] - """ - options = self._get_blob_tags_options(**kwargs) - try: - _, tags = self._client.blob.get_tags(**options) - return parse_tags(tags) # pylint: disable=protected-access - except StorageErrorException as error: - process_storage_error(error) - - def _get_page_ranges_options( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if length is not None and offset is None: - raise ValueError("Offset value must not be None if length is set.") - if length is not None: - length = offset + length - 1 # Reformat to an inclusive range index - page_range, _ = validate_and_format_range_headers( - offset, length, start_range_required=False, end_range_required=False, align_to_page=True - ) - options = { - 'snapshot': self.snapshot, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'range': page_range} - if previous_snapshot_diff: - try: - options['prevsnapshot'] = previous_snapshot_diff.snapshot # type: ignore - except AttributeError: - try: - options['prevsnapshot'] = previous_snapshot_diff['snapshot'] # type: ignore - except TypeError: - options['prevsnapshot'] = previous_snapshot_diff - options.update(kwargs) - return options - - @distributed_trace - def get_page_ranges( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] - **kwargs - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a Page Blob or snapshot - of a page blob. - - :param int offset: - Start of byte range to use for getting valid page ranges. - If no length is given, all bytes after the offset will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for getting valid page ranges. - If length is given, offset must be provided. - This range will return valid page ranges from the offset start up to - the specified length. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param str previous_snapshot_diff: - The snapshot diff parameter that contains an opaque DateTime value that - specifies a previous blob snapshot to be compared - against a more recent snapshot or the current blob. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. - The first element are filled page ranges, the 2nd element is cleared page ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_page_ranges_options( - offset=offset, - length=length, - previous_snapshot_diff=previous_snapshot_diff, - **kwargs) - try: - if previous_snapshot_diff: - ranges = self._client.page_blob.get_page_ranges_diff(**options) - else: - ranges = self._client.page_blob.get_page_ranges(**options) - except StorageErrorException as error: - process_storage_error(error) - return get_page_ranges_result(ranges) - - @distributed_trace - def get_page_range_diff_for_managed_disk( - self, previous_snapshot_url, # type: str - offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a managed disk or snapshot. - - .. note:: - This operation is only available for managed disk accounts. - - .. versionadded:: 12.2.0 - This operation was introduced in API version '2019-07-07'. - - :param previous_snapshot_url: - Specifies the URL of a previous snapshot of the managed disk. - The response will only contain pages that were changed between the target blob and - its previous snapshot. - :param int offset: - Start of byte range to use for getting valid page ranges. - If no length is given, all bytes after the offset will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for getting valid page ranges. - If length is given, offset must be provided. - This range will return valid page ranges from the offset start up to - the specified length. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. - The first element are filled page ranges, the 2nd element is cleared page ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_page_ranges_options( - offset=offset, - length=length, - prev_snapshot_url=previous_snapshot_url, - **kwargs) - try: - ranges = self._client.page_blob.get_page_ranges_diff(**options) - except StorageErrorException as error: - process_storage_error(error) - return get_page_ranges_result(ranges) - - def _set_sequence_number_options(self, sequence_number_action, sequence_number=None, **kwargs): - # type: (Union[str, SequenceNumberAction], Optional[str], **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if sequence_number_action is None: - raise ValueError("A sequence number action must be specified") - options = { - 'sequence_number_action': sequence_number_action, - 'timeout': kwargs.pop('timeout', None), - 'blob_sequence_number': sequence_number, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def set_sequence_number(self, sequence_number_action, sequence_number=None, **kwargs): - # type: (Union[str, SequenceNumberAction], Optional[str], **Any) -> Dict[str, Union[str, datetime]] - """Sets the blob sequence number. - - :param str sequence_number_action: - This property indicates how the service should modify the blob's sequence - number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information. - :param str sequence_number: - This property sets the blob's sequence number. The sequence number is a - user-controlled property that you can use to track requests and manage - concurrency issues. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._set_sequence_number_options( - sequence_number_action, sequence_number=sequence_number, **kwargs) - try: - return self._client.page_blob.update_sequence_number(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _resize_blob_options(self, size, **kwargs): - # type: (int, **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if size is None: - raise ValueError("A content length must be specified for a Page Blob.") - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'blob_content_length': size, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def resize_blob(self, size, **kwargs): - # type: (int, **Any) -> Dict[str, Union[str, datetime]] - """Resizes a page blob to the specified size. - - If the specified value is less than the current size of the blob, - then all pages above the specified value are cleared. - - :param int size: - Size used to resize blob. Maximum size for a page blob is up to 1 TB. - The page blob size must be aligned to a 512-byte boundary. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._resize_blob_options(size, **kwargs) - try: - return self._client.page_blob.resize(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _upload_page_options( # type: ignore - self, page, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - if isinstance(page, six.text_type): - page = page.encode(kwargs.pop('encoding', 'UTF-8')) - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 page size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 page size") - end_range = offset + length - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) # type: ignore - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - seq_conditions = SequenceNumberAccessConditions( - if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), - if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), - if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) - ) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - validate_content = kwargs.pop('validate_content', False) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'body': page[:length], - 'content_length': length, - 'transactional_content_md5': None, - 'timeout': kwargs.pop('timeout', None), - 'range': content_range, - 'lease_access_conditions': access_conditions, - 'sequence_number_access_conditions': seq_conditions, - 'modified_access_conditions': mod_conditions, - 'validate_content': validate_content, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def upload_page( # type: ignore - self, page, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """The Upload Pages operation writes a range of pages to a page blob. - - :param bytes page: - Content of the page. - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._upload_page_options( - page=page, - offset=offset, - length=length, - **kwargs) - try: - return self._client.page_blob.upload_pages(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _upload_pages_from_url_options( # type: ignore - self, source_url, # type: str - offset, # type: int - length, # type: int - source_offset, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - # TODO: extract the code to a method format_range - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 page size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 page size") - if source_offset is None or offset % 512 != 0: - raise ValueError("source_offset must be an integer that aligns with 512 page size") - - # Format range - end_range = offset + length - 1 - destination_range = 'bytes={0}-{1}'.format(offset, end_range) - source_range = 'bytes={0}-{1}'.format(source_offset, source_offset + length - 1) # should subtract 1 here? - - seq_conditions = SequenceNumberAccessConditions( - if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), - if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), - if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - source_mod_conditions = get_source_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - source_content_md5 = kwargs.pop('source_content_md5', None) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'source_url': source_url, - 'content_length': 0, - 'source_range': source_range, - 'range': destination_range, - 'source_content_md5': bytearray(source_content_md5) if source_content_md5 else None, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'sequence_number_access_conditions': seq_conditions, - 'modified_access_conditions': mod_conditions, - 'source_modified_access_conditions': source_mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def upload_pages_from_url(self, source_url, # type: str - offset, # type: int - length, # type: int - source_offset, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """ - The Upload Pages operation writes a range of pages to a page blob where - the contents are read from a URL. - - :param str source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - The service will read the same number of bytes as the destination range (length-offset). - :keyword bytes source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._upload_pages_from_url_options( - source_url=self._encode_source_url(source_url), - offset=offset, - length=length, - source_offset=source_offset, - **kwargs - ) - try: - return self._client.page_blob.upload_pages_from_url(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _clear_page_options(self, offset, length, **kwargs): - # type: (int, int, **Any) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - seq_conditions = SequenceNumberAccessConditions( - if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), - if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), - if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) - ) - mod_conditions = get_modify_conditions(kwargs) - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 page size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 page size") - end_range = length + offset - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'content_length': 0, - 'timeout': kwargs.pop('timeout', None), - 'range': content_range, - 'lease_access_conditions': access_conditions, - 'sequence_number_access_conditions': seq_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def clear_page(self, offset, length, **kwargs): - # type: (int, int, **Any) -> Dict[str, Union[str, datetime]] - """Clears a range of pages. - - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._clear_page_options(offset, length, **kwargs) - try: - return self._client.page_blob.clear_pages(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _append_block_options( # type: ignore - self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - if isinstance(data, six.text_type): - data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore - if length is None: - length = get_length(data) - if length is None: - length, data = read_length(data) - if length == 0: - return {} - if isinstance(data, bytes): - data = data[:length] - - appendpos_condition = kwargs.pop('appendpos_condition', None) - maxsize_condition = kwargs.pop('maxsize_condition', None) - validate_content = kwargs.pop('validate_content', False) - append_conditions = None - if maxsize_condition or appendpos_condition is not None: - append_conditions = AppendPositionAccessConditions( - max_size=maxsize_condition, - append_position=appendpos_condition - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'body': data, - 'content_length': length, - 'timeout': kwargs.pop('timeout', None), - 'transactional_content_md5': None, - 'lease_access_conditions': access_conditions, - 'append_position_access_conditions': append_conditions, - 'modified_access_conditions': mod_conditions, - 'validate_content': validate_content, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def append_block( # type: ignore - self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """Commits a new block of data to the end of the existing append blob. - - :param data: - Content of the block. This can be bytes, text, an iterable or a file-like object. - :type data: bytes or str or Iterable - :param int length: - Size of the block in bytes. - :keyword bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). - :rtype: dict(str, Any) - """ - options = self._append_block_options( - data, - length=length, - **kwargs - ) - try: - return self._client.append_blob.append_block(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _append_block_from_url_options( # type: ignore - self, copy_source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - # If end range is provided, start range must be provided - if source_length is not None and source_offset is None: - raise ValueError("source_offset should also be specified if source_length is specified") - # Format based on whether length is present - source_range = None - if source_length is not None: - end_range = source_offset + source_length - 1 - source_range = 'bytes={0}-{1}'.format(source_offset, end_range) - elif source_offset is not None: - source_range = "bytes={0}-".format(source_offset) - - appendpos_condition = kwargs.pop('appendpos_condition', None) - maxsize_condition = kwargs.pop('maxsize_condition', None) - source_content_md5 = kwargs.pop('source_content_md5', None) - append_conditions = None - if maxsize_condition or appendpos_condition is not None: - append_conditions = AppendPositionAccessConditions( - max_size=maxsize_condition, - append_position=appendpos_condition - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - source_mod_conditions = get_source_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'source_url': copy_source_url, - 'content_length': 0, - 'source_range': source_range, - 'source_content_md5': source_content_md5, - 'transactional_content_md5': None, - 'lease_access_conditions': access_conditions, - 'append_position_access_conditions': append_conditions, - 'modified_access_conditions': mod_conditions, - 'source_modified_access_conditions': source_mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - 'timeout': kwargs.pop('timeout', None)} - options.update(kwargs) - return options - - @distributed_trace - def append_block_from_url(self, copy_source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """ - Creates a new block to be committed as part of a blob, where the contents are read from a source url. - - :param str copy_source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int source_offset: - This indicates the start of the range of bytes (inclusive) that has to be taken from the copy source. - :param int source_length: - This indicates the end of the range of bytes that has to be taken from the copy source. - :keyword bytearray source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the - AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._append_block_from_url_options( - copy_source_url=self._encode_source_url(copy_source_url), - source_offset=source_offset, - source_length=source_length, - **kwargs - ) - try: - return self._client.append_blob.append_block_from_url(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _seal_append_blob_options(self, **kwargs): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - appendpos_condition = kwargs.pop('appendpos_condition', None) - append_conditions = None - if appendpos_condition is not None: - append_conditions = AppendPositionAccessConditions( - append_position=appendpos_condition - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - options = { - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'append_position_access_conditions': append_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def seal_append_blob(self, **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """The Seal operation seals the Append Blob to make it read-only. - - .. versionadded:: 12.4.0 - - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). - :rtype: dict(str, Any) - """ - options = self._seal_append_blob_options(**kwargs) - try: - return self._client.append_blob.seal(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_blob_service_client.py b/azure/multiapi/storagev2/blob/v2020_02_10/_blob_service_client.py deleted file mode 100644 index f68a680..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_blob_service_client.py +++ /dev/null @@ -1,686 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, - TYPE_CHECKING -) - -try: - from urllib.parse import urlparse -except ImportError: - from urlparse import urlparse # type: ignore - -from azure.core.paging import ItemPaged -from azure.core.pipeline import Pipeline -from azure.core.tracing.decorator import distributed_trace - -from ._shared.models import LocationMode -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.parser import _to_utc_datetime -from ._shared.response_handlers import return_response_headers, process_storage_error, \ - parse_to_internal_user_delegation_key -from ._generated import AzureBlobStorage, VERSION -from ._generated.models import StorageErrorException, StorageServiceProperties, KeyInfo -from ._container_client import ContainerClient -from ._blob_client import BlobClient -from ._models import ContainerPropertiesPaged, FilteredBlobPaged -from ._serialize import get_api_version -from ._deserialize import service_stats_deserialize, service_properties_deserialize - -if TYPE_CHECKING: - from datetime import datetime - from azure.core.pipeline.transport import HttpTransport - from azure.core.pipeline.policies import HTTPPolicy - from ._shared.models import UserDelegationKey - from ._lease import BlobLeaseClient - from ._models import ( - ContainerProperties, - BlobProperties, - PublicAccess, - BlobAnalyticsLogging, - Metrics, - CorsRule, - RetentionPolicy, - StaticWebsite, - ) - - -class BlobServiceClient(StorageAccountHostsMixin): - """A client to interact with the Blob Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete containers within the account. - For operations relating to a specific container or blob, clients for those entities - can also be retrieved using the `get_client` functions. - - :param str account_url: - The URL to the blob storage account. Any other entities included - in the URL path (e.g. container or blob) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_blob_service_client] - :end-before: [END create_blob_service_client] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient with account url and credential. - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_blob_service_client_oauth] - :end-before: [END create_blob_service_client_oauth] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient with Azure Identity credentials. - """ - - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - _, sas_token = parse_query(parsed_url.query) - self._query_str, credential = self._format_query_string(sas_token, credential) - super(BlobServiceClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) - self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - return "{}://{}/{}".format(self.scheme, hostname, self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> BlobServiceClient - """Create BlobServiceClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :returns: A Blob service client. - :rtype: ~azure.storage.blob.BlobServiceClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START auth_from_connection_string] - :end-before: [END auth_from_connection_string] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient from a connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls(account_url, credential=credential, **kwargs) - - @distributed_trace - def get_user_delegation_key(self, key_start_time, # type: datetime - key_expiry_time, # type: datetime - **kwargs # type: Any - ): - # type: (...) -> UserDelegationKey - """ - Obtain a user delegation key for the purpose of signing SAS tokens. - A token credential must be present on the service object for this request to succeed. - - :param ~datetime.datetime key_start_time: - A DateTime value. Indicates when the key becomes valid. - :param ~datetime.datetime key_expiry_time: - A DateTime value. Indicates when the key stops being valid. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The user delegation key. - :rtype: ~azure.storage.blob.UserDelegationKey - """ - key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time)) - timeout = kwargs.pop('timeout', None) - try: - user_delegation_key = self._client.service.get_user_delegation_key(key_info=key_info, - timeout=timeout, - **kwargs) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - return parse_to_internal_user_delegation_key(user_delegation_key) # type: ignore - - @distributed_trace - def get_account_information(self, **kwargs): - # type: (Any) -> Dict[str, str] - """Gets information related to the storage account. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START get_blob_service_account_info] - :end-before: [END get_blob_service_account_info] - :language: python - :dedent: 8 - :caption: Getting account information for the blob service. - """ - try: - return self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_service_stats(self, **kwargs): - # type: (**Any) -> Dict[str, Any] - """Retrieves statistics related to replication for the Blob service. - - It is only available when read-access geo-redundant replication is enabled for - the storage account. - - With geo-redundant replication, Azure Storage maintains your data durable - in two locations. In both locations, Azure Storage constantly maintains - multiple healthy replicas of your data. The location where you read, - create, update, or delete data is the primary storage account location. - The primary location exists in the region you choose at the time you - create an account via the Azure Management Azure classic portal, for - example, North Central US. The location to which your data is replicated - is the secondary location. The secondary location is automatically - determined based on the location of the primary; it is in a second data - center that resides in the same region as the primary location. Read-only - access is available from the secondary location, if read-access geo-redundant - replication is enabled for your storage account. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The blob service stats. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START get_blob_service_stats] - :end-before: [END get_blob_service_stats] - :language: python - :dedent: 8 - :caption: Getting service stats for the blob service. - """ - timeout = kwargs.pop('timeout', None) - try: - stats = self._client.service.get_statistics( # type: ignore - timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs) - return service_stats_deserialize(stats) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_service_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An object containing blob service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START get_blob_service_properties] - :end-before: [END get_blob_service_properties] - :language: python - :dedent: 8 - :caption: Getting service properties for the blob service. - """ - timeout = kwargs.pop('timeout', None) - try: - service_props = self._client.service.get_properties(timeout=timeout, **kwargs) - return service_properties_deserialize(service_props) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def set_service_properties( - self, analytics_logging=None, # type: Optional[BlobAnalyticsLogging] - hour_metrics=None, # type: Optional[Metrics] - minute_metrics=None, # type: Optional[Metrics] - cors=None, # type: Optional[List[CorsRule]] - target_version=None, # type: Optional[str] - delete_retention_policy=None, # type: Optional[RetentionPolicy] - static_website=None, # type: Optional[StaticWebsite] - **kwargs - ): - # type: (...) -> None - """Sets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - If an element (e.g. analytics_logging) is left as None, the - existing settings on the service for that functionality are preserved. - - :param analytics_logging: - Groups the Azure Analytics Logging settings. - :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging - :param hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for blobs. - :type hour_metrics: ~azure.storage.blob.Metrics - :param minute_metrics: - The minute metrics settings provide request statistics - for each minute for blobs. - :type minute_metrics: ~azure.storage.blob.Metrics - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list[~azure.storage.blob.CorsRule] - :param str target_version: - Indicates the default version to use for requests if an incoming - request's version is not specified. - :param delete_retention_policy: - The delete retention policy specifies whether to retain deleted blobs. - It also specifies the number of days and versions of blob to keep. - :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy - :param static_website: - Specifies whether the static website feature is enabled, - and if yes, indicates the index document and 404 error document to use. - :type static_website: ~azure.storage.blob.StaticWebsite - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START set_blob_service_properties] - :end-before: [END set_blob_service_properties] - :language: python - :dedent: 8 - :caption: Setting service properties for the blob service. - """ - props = StorageServiceProperties( - logging=analytics_logging, - hour_metrics=hour_metrics, - minute_metrics=minute_metrics, - cors=cors, - default_service_version=target_version, - delete_retention_policy=delete_retention_policy, - static_website=static_website - ) - timeout = kwargs.pop('timeout', None) - try: - self._client.service.set_properties(props, timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_containers( - self, name_starts_with=None, # type: Optional[str] - include_metadata=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> ItemPaged[ContainerProperties] - """Returns a generator to list the containers under the specified account. - - The generator will lazily follow the continuation tokens returned by - the service and stop when all containers have been returned. - - :param str name_starts_with: - Filters the results to return only containers whose names - begin with the specified prefix. - :param bool include_metadata: - Specifies that container metadata to be returned in the response. - The default value is `False`. - :keyword bool include_deleted: - Specifies that deleted containers to be returned in the response. This is for container restore enabled - account. The default value is `False`. - .. versionadded:: 12.4.0 - :keyword int results_per_page: - The maximum number of container names to retrieve per API - call. If the request does not specify the server will return up to 5,000 items. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) of ContainerProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.ContainerProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_list_containers] - :end-before: [END bsc_list_containers] - :language: python - :dedent: 12 - :caption: Listing the containers in the blob service. - """ - include = ['metadata'] if include_metadata else [] - include_deleted = kwargs.pop('include_deleted', None) - if include_deleted: - include.append("deleted") - - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.service.list_containers_segment, - prefix=name_starts_with, - include=include, - timeout=timeout, - **kwargs) - return ItemPaged( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - page_iterator_class=ContainerPropertiesPaged - ) - - @distributed_trace - def find_blobs_by_tags(self, filter_expression, **kwargs): - # type: (str, **Any) -> ItemPaged[FilteredBlob] - """The Filter Blobs operation enables callers to list blobs across all - containers whose tags match a given search expression. Filter blobs - searches across all containers within a storage account but can be - scoped within the expression to a single container. - - :param str filter_expression: - The expression to find blobs whose tags matches the specified condition. - eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'" - To specify a container, eg. "@container='containerName' and \"Name\"='C'" - :keyword int results_per_page: - The max result per page when paginating. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.FilteredBlob] - """ - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.service.filter_blobs, - where=filter_expression, - timeout=timeout, - **kwargs) - return ItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=FilteredBlobPaged) - - @distributed_trace - def create_container( - self, name, # type: str - metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[Union[PublicAccess, str]] - **kwargs - ): - # type: (...) -> ContainerClient - """Creates a new container under the specified account. - - If the container with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created container. - - :param str name: The name of the container to create. - :param metadata: - A dict with name-value pairs to associate with the - container as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - Possible values include: 'container', 'blob'. - :type public_access: str or ~azure.storage.blob.PublicAccess - :keyword container_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - - .. versionadded:: 12.2.0 - - :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_create_container] - :end-before: [END bsc_create_container] - :language: python - :dedent: 12 - :caption: Creating a container in the blob service. - """ - container = self.get_container_client(name) - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - container.create_container( - metadata=metadata, public_access=public_access, timeout=timeout, **kwargs) - return container - - @distributed_trace - def delete_container( - self, container, # type: Union[ContainerProperties, str] - lease=None, # type: Optional[Union[BlobLeaseClient, str]] - **kwargs - ): - # type: (...) -> None - """Marks the specified container for deletion. - - The container and any blobs contained within it are later deleted during garbage collection. - If the container is not found, a ResourceNotFoundError will be raised. - - :param container: - The container to delete. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :param lease: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_delete_container] - :end-before: [END bsc_delete_container] - :language: python - :dedent: 12 - :caption: Deleting a container in the blob service. - """ - container = self.get_container_client(container) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - container.delete_container( # type: ignore - lease=lease, - timeout=timeout, - **kwargs) - - @distributed_trace - def undelete_container(self, deleted_container_name, deleted_container_version, **kwargs): - # type: (str, str, str, **Any) -> ContainerClient - """Restores soft-deleted container. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :param str deleted_container_name: - Specifies the name of the deleted container to restore. - :param str deleted_container_version: - Specifies the version of the deleted container to restore. - :keyword str new_name: - The new name for the deleted container to be restored to. - If not specified deleted_container_name will be used as the restored container name. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.ContainerClient - """ - new_name = kwargs.pop('new_name', None) - container = self.get_container_client(new_name or deleted_container_name) - try: - container._client.container.restore(deleted_container_name=deleted_container_name, # pylint: disable = protected-access - deleted_container_version=deleted_container_version, - timeout=kwargs.pop('timeout', None), **kwargs) - return container - except StorageErrorException as error: - process_storage_error(error) - - def get_container_client(self, container): - # type: (Union[ContainerProperties, str]) -> ContainerClient - """Get a client to interact with the specified container. - - The container need not already exist. - - :param container: - The container. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :returns: A ContainerClient. - :rtype: ~azure.storage.blob.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_get_container_client] - :end-before: [END bsc_get_container_client] - :language: python - :dedent: 8 - :caption: Getting the container client to interact with a specific container. - """ - try: - container_name = container.name - except AttributeError: - container_name = container - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ContainerClient( - self.url, container_name=container_name, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_blob_client( - self, container, # type: Union[ContainerProperties, str] - blob, # type: Union[BlobProperties, str] - snapshot=None # type: Optional[Union[Dict[str, Any], str]] - ): - # type: (...) -> BlobClient - """Get a client to interact with the specified blob. - - The blob need not already exist. - - :param container: - The container that the blob is in. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :param blob: - The blob with which to interact. This can either be the name of the blob, - or an instance of BlobProperties. - :type blob: str or ~azure.storage.blob.BlobProperties - :param snapshot: - The optional blob snapshot on which to operate. This can either be the ID of the snapshot, - or a dictionary output returned by :func:`~azure.storage.blob.BlobClient.create_snapshot()`. - :type snapshot: str or dict(str, Any) - :returns: A BlobClient. - :rtype: ~azure.storage.blob.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_get_blob_client] - :end-before: [END bsc_get_blob_client] - :language: python - :dedent: 12 - :caption: Getting the blob client to interact with a specific blob. - """ - try: - container_name = container.name - except AttributeError: - container_name = container - try: - blob_name = blob.name - except AttributeError: - blob_name = blob - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return BlobClient( # type: ignore - self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_container_client.py b/azure/multiapi/storagev2/blob/v2020_02_10/_container_client.py deleted file mode 100644 index ba327c2..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_container_client.py +++ /dev/null @@ -1,1448 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, AnyStr, Dict, List, Tuple, IO, Iterator, - TYPE_CHECKING -) - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six - -from azure.core import MatchConditions -from azure.core.paging import ItemPaged -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import Pipeline -from azure.core.pipeline.transport import HttpRequest - -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.request_handlers import add_metadata_headers, serialize_iso -from ._shared.response_handlers import ( - process_storage_error, - return_response_headers, - return_headers_and_deserialized) -from ._generated import AzureBlobStorage, VERSION -from ._generated.models import ( - StorageErrorException, - SignedIdentifier) -from ._deserialize import deserialize_container_properties -from ._serialize import get_modify_conditions, get_container_cpk_scope_info, get_api_version, get_access_conditions -from ._models import ( # pylint: disable=unused-import - ContainerProperties, - BlobProperties, - BlobType) -from ._list_blobs_helper import BlobPrefix, BlobPropertiesPaged -from ._lease import BlobLeaseClient -from ._blob_client import BlobClient - -if TYPE_CHECKING: - from azure.core.pipeline.transport import HttpTransport, HttpResponse # pylint: disable=ungrouped-imports - from azure.core.pipeline.policies import HTTPPolicy # pylint: disable=ungrouped-imports - from datetime import datetime - from ._models import ( # pylint: disable=unused-import - PublicAccess, - AccessPolicy, - ContentSettings, - StandardBlobTier, - PremiumPageBlobTier) - - -def _get_blob_name(blob): - """Return the blob name. - - :param blob: A blob string or BlobProperties - :rtype: str - """ - try: - return blob.get('name') - except AttributeError: - return blob - - -class ContainerClient(StorageAccountHostsMixin): - """A client to interact with a specific container, although that container - may not yet exist. - - For operations relating to a specific blob within this container, a blob client can be - retrieved using the :func:`~get_blob_client` function. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the container, - use the :func:`from_container_url` classmethod. - :param container_name: - The name of the container for the blob. - :type container_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START create_container_client_from_service] - :end-before: [END create_container_client_from_service] - :language: python - :dedent: 8 - :caption: Get a ContainerClient from an existing BlobServiceClient. - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START create_container_client_sasurl] - :end-before: [END create_container_client_sasurl] - :language: python - :dedent: 8 - :caption: Creating the container client directly. - """ - def __init__( - self, account_url, # type: str - container_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Container URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not container_name: - raise ValueError("Please specify a container name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - _, sas_token = parse_query(parsed_url.query) - self.container_name = container_name - self._query_str, credential = self._format_query_string(sas_token, credential) - super(ContainerClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) - self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access - - def _format_url(self, hostname): - container_name = self.container_name - if isinstance(container_name, six.text_type): - container_name = container_name.encode('UTF-8') - return "{}://{}/{}{}".format( - self.scheme, - hostname, - quote(container_name), - self._query_str) - - @classmethod - def from_container_url(cls, container_url, credential=None, **kwargs): - # type: (str, Optional[Any], Any) -> ContainerClient - """Create ContainerClient from a container url. - - :param str container_url: - The full endpoint URL to the Container, including SAS token if used. This could be - either the primary endpoint, or the secondary endpoint depending on the current `location_mode`. - :type container_url: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :returns: A container client. - :rtype: ~azure.storage.blob.ContainerClient - """ - try: - if not container_url.lower().startswith('http'): - container_url = "https://" + container_url - except AttributeError: - raise ValueError("Container URL must be a string.") - parsed_url = urlparse(container_url.rstrip('/')) - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(container_url)) - - container_path = parsed_url.path.lstrip('/').split('/') - account_path = "" - if len(container_path) > 1: - account_path = "/" + "/".join(container_path[:-1]) - account_url = "{}://{}{}?{}".format( - parsed_url.scheme, - parsed_url.netloc.rstrip('/'), - account_path, - parsed_url.query) - container_name = unquote(container_path[-1]) - if not container_name: - raise ValueError("Invalid URL. Please provide a URL with a valid container name") - return cls(account_url, container_name=container_name, credential=credential, **kwargs) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - container_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> ContainerClient - """Create ContainerClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param container_name: - The container name for the blob. - :type container_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :returns: A container client. - :rtype: ~azure.storage.blob.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START auth_from_connection_string_container] - :end-before: [END auth_from_connection_string_container] - :language: python - :dedent: 8 - :caption: Creating the ContainerClient from a connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, container_name=container_name, credential=credential, **kwargs) - - @distributed_trace - def create_container(self, metadata=None, public_access=None, **kwargs): - # type: (Optional[Dict[str, str]], Optional[Union[PublicAccess, str]], **Any) -> None - """ - Creates a new container under the specified account. If the container - with the same name already exists, the operation fails. - - :param metadata: - A dict with name_value pairs to associate with the - container as metadata. Example:{'Category':'test'} - :type metadata: dict[str, str] - :param ~azure.storage.blob.PublicAccess public_access: - Possible values include: 'container', 'blob'. - :keyword container_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - - .. versionadded:: 12.2.0 - - :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START create_container] - :end-before: [END create_container] - :language: python - :dedent: 12 - :caption: Creating a container to store blobs. - """ - headers = kwargs.pop('headers', {}) - timeout = kwargs.pop('timeout', None) - headers.update(add_metadata_headers(metadata)) # type: ignore - container_cpk_scope_info = get_container_cpk_scope_info(kwargs) - try: - return self._client.container.create( # type: ignore - timeout=timeout, - access=public_access, - container_cpk_scope_info=container_cpk_scope_info, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def delete_container( - self, **kwargs): - # type: (Any) -> None - """ - Marks the specified container for deletion. The container and any blobs - contained within it are later deleted during garbage collection. - - :keyword lease: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START delete_container] - :end-before: [END delete_container] - :language: python - :dedent: 12 - :caption: Delete a container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - mod_conditions = get_modify_conditions(kwargs) - timeout = kwargs.pop('timeout', None) - try: - self._client.container.delete( - timeout=timeout, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def acquire_lease( - self, lease_duration=-1, # type: int - lease_id=None, # type: Optional[str] - **kwargs): - # type: (...) -> BlobLeaseClient - """ - Requests a new lease. If the container does not have an active lease, - the Blob service creates a lease on the container and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A BlobLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.blob.BlobLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START acquire_lease_on_container] - :end-before: [END acquire_lease_on_container] - :language: python - :dedent: 8 - :caption: Acquiring a lease on the container. - """ - lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs) - return lease - - @distributed_trace - def get_account_information(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """Gets information related to the storage account. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - """ - try: - return self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_container_properties(self, **kwargs): - # type: (Any) -> ContainerProperties - """Returns all user-defined metadata and system properties for the specified - container. The data returned does not include the container's list of blobs. - - :keyword lease: - If specified, get_container_properties only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Properties for the specified container within a container object. - :rtype: ~azure.storage.blob.ContainerProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START get_container_properties] - :end-before: [END get_container_properties] - :language: python - :dedent: 12 - :caption: Getting properties on the container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - response = self._client.container.get_properties( - timeout=timeout, - lease_access_conditions=access_conditions, - cls=deserialize_container_properties, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - response.name = self.container_name - return response # type: ignore - - @distributed_trace - def set_container_metadata( # type: ignore - self, metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - container. Each call to this operation replaces all existing metadata - attached to the container. To remove all metadata from the container, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the container as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword lease: - If specified, set_container_metadata only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Container-updated property dict (Etag and last modified). - :rtype: dict[str, str or datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START set_container_metadata] - :end-before: [END set_container_metadata] - :language: python - :dedent: 12 - :caption: Setting metadata on the container. - """ - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - mod_conditions = get_modify_conditions(kwargs) - timeout = kwargs.pop('timeout', None) - try: - return self._client.container.set_metadata( # type: ignore - timeout=timeout, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_container_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the specified container. - The permissions indicate whether container data may be accessed publicly. - - :keyword lease: - If specified, get_container_access_policy only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START get_container_access_policy] - :end-before: [END get_container_access_policy] - :language: python - :dedent: 12 - :caption: Getting the access policy on the container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - response, identifiers = self._client.container.get_access_policy( - timeout=timeout, - lease_access_conditions=access_conditions, - cls=return_headers_and_deserialized, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return { - 'public_access': response.get('blob_public_access'), - 'signed_identifiers': identifiers or [] - } - - @distributed_trace - def set_container_access_policy( - self, signed_identifiers, # type: Dict[str, AccessPolicy] - public_access=None, # type: Optional[Union[str, PublicAccess]] - **kwargs - ): # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the permissions for the specified container or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether blobs in a container may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the container. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy] - :param ~azure.storage.blob.PublicAccess public_access: - Possible values include: 'container', 'blob'. - :keyword lease: - Required if the container has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified date/time. - :keyword ~datetime.datetime if_unmodified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Container-updated property dict (Etag and last modified). - :rtype: dict[str, str or ~datetime.datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START set_container_access_policy] - :end-before: [END set_container_access_policy] - :language: python - :dedent: 12 - :caption: Setting access policy on the container. - """ - if len(signed_identifiers) > 5: - raise ValueError( - 'Too many access policies provided. The server does not support setting ' - 'more than 5 access policies on a single resource.') - identifiers = [] - for key, value in signed_identifiers.items(): - if value: - value.start = serialize_iso(value.start) - value.expiry = serialize_iso(value.expiry) - identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore - signed_identifiers = identifiers # type: ignore - lease = kwargs.pop('lease', None) - mod_conditions = get_modify_conditions(kwargs) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - return self._client.container.set_access_policy( - container_acl=signed_identifiers or None, - timeout=timeout, - access=public_access, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_blobs(self, name_starts_with=None, include=None, **kwargs): - # type: (Optional[str], Optional[Union[str, List[str]]], **Any) -> ItemPaged[BlobProperties] - """Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service. - - :param str name_starts_with: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param list[str] or str include: - Specifies one or more additional datasets to include in the response. - Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'tags'. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START list_blobs_in_container] - :end-before: [END list_blobs_in_container] - :language: python - :dedent: 8 - :caption: List the blobs in the container. - """ - if include and not isinstance(include, list): - include = [include] - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.list_blob_flat_segment, - include=include, - timeout=timeout, - **kwargs) - return ItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=BlobPropertiesPaged) - - @distributed_trace - def walk_blobs( - self, name_starts_with=None, # type: Optional[str] - include=None, # type: Optional[Any] - delimiter="/", # type: str - **kwargs # type: Optional[Any] - ): - # type: (...) -> ItemPaged[BlobProperties] - """Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service. This operation will list blobs in accordance with a hierarchy, - as delimited by the specified delimiter character. - - :param str name_starts_with: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param list[str] include: - Specifies one or more additional datasets to include in the response. - Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'. - :param str delimiter: - When the request includes this parameter, the operation returns a BlobPrefix - element in the response body that acts as a placeholder for all blobs whose - names begin with the same substring up to the appearance of the delimiter - character. The delimiter may be a single character or a string. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties] - """ - if include and not isinstance(include, list): - include = [include] - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.list_blob_hierarchy_segment, - delimiter=delimiter, - include=include, - timeout=timeout, - **kwargs) - return BlobPrefix( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - delimiter=delimiter) - - @distributed_trace - def upload_blob( - self, name, # type: Union[str, BlobProperties] - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> BlobClient - """Creates a new blob from a data source with automatic chunking. - - :param name: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type name: str or ~azure.storage.blob.BlobProperties - :param data: The blob data to upload. - :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be - either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. The exception to the above is with Append - blob types: if set to False and the data already exists, an error will not be raised - and the data will be appended to the existing blob. If set overwrite=True, then the existing - append blob will be deleted, and a new one created. Defaults to False. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the container has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int max_concurrency: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :returns: A BlobClient to interact with the newly uploaded blob. - :rtype: ~azure.storage.blob.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START upload_blob_to_container] - :end-before: [END upload_blob_to_container] - :language: python - :dedent: 8 - :caption: Upload blob to the container. - """ - blob = self.get_blob_client(name) - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - blob.upload_blob( - data, - blob_type=blob_type, - length=length, - metadata=metadata, - timeout=timeout, - encoding=encoding, - **kwargs - ) - return blob - - @distributed_trace - def delete_blob( - self, blob, # type: Union[str, BlobProperties] - delete_snapshots=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> None - """Marks the specified blob or snapshot for deletion. - - The blob is later deleted during garbage collection. - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the delete_blob - operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot - and retains the blob or snapshot for specified number of days. - After specified number of days, blob's data is removed from the service during garbage collection. - Soft deleted blob or snapshot is accessible through :func:`list_blobs()` specifying `include=["deleted"]` - option. Soft-deleted blob or snapshot can be restored using :func:`~BlobClient.undelete()` - - :param blob: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob: str or ~azure.storage.blob.BlobProperties - :param str delete_snapshots: - Required if the blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to delete. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - blob_client = self.get_blob_client(blob) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - blob_client.delete_blob( # type: ignore - delete_snapshots=delete_snapshots, - timeout=timeout, - **kwargs) - - @distributed_trace - def download_blob(self, blob, offset=None, length=None, **kwargs): - # type: (Union[str, BlobProperties], Optional[int], Optional[int], **Any) -> StorageStreamDownloader - """Downloads a blob to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the blob into - a stream. - - :param blob: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob: str or ~azure.storage.blob.BlobProperties - :param int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, download_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword str encoding: - Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.blob.StorageStreamDownloader - """ - blob_client = self.get_blob_client(blob) # type: ignore - kwargs.setdefault('merge_span', True) - return blob_client.download_blob(offset=offset, length=length, **kwargs) - - def _generate_delete_blobs_subrequest_options( - self, snapshot=None, - delete_snapshots=None, - lease_access_conditions=None, - modified_access_conditions=None, - **kwargs - ): - """This code is a copy from _generated. - - Once Autorest is able to provide request preparation this code should be removed. - """ - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct parameters - timeout = kwargs.pop('timeout', None) - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._client._serialize.query("snapshot", snapshot, 'str') # pylint: disable=protected-access - if timeout is not None: - query_parameters['timeout'] = self._client._serialize.query("timeout", timeout, 'int', minimum=0) # pylint: disable=protected-access - - # Construct headers - header_parameters = {} - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._client._serialize.header( # pylint: disable=protected-access - "delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._client._serialize.header( # pylint: disable=protected-access - "lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._client._serialize.header( # pylint: disable=protected-access - "if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._client._serialize.header( # pylint: disable=protected-access - "if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._client._serialize.header( # pylint: disable=protected-access - "if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._client._serialize.header( # pylint: disable=protected-access - "if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._client._serialize.header("if_tags", if_tags, 'str') # pylint: disable=protected-access - - return query_parameters, header_parameters - - def _generate_delete_blobs_options(self, - *blobs, # type: List[Union[str, BlobProperties, dict]] - **kwargs - ): - timeout = kwargs.pop('timeout', None) - raise_on_any_failure = kwargs.pop('raise_on_any_failure', True) - delete_snapshots = kwargs.pop('delete_snapshots', None) - if_modified_since = kwargs.pop('if_modified_since', None) - if_unmodified_since = kwargs.pop('if_unmodified_since', None) - if_tags_match_condition = kwargs.pop('if_tags_match_condition', None) - kwargs.update({'raise_on_any_failure': raise_on_any_failure, - 'sas': self._query_str.replace('?', '&'), - 'timeout': '&timeout=' + str(timeout) if timeout else "" - }) - - reqs = [] - for blob in blobs: - blob_name = _get_blob_name(blob) - container_name = self.container_name - - try: - options = BlobClient._generic_delete_blob_options( # pylint: disable=protected-access - snapshot=blob.get('snapshot'), - delete_snapshots=delete_snapshots or blob.get('delete_snapshots'), - lease=blob.get('lease_id'), - if_modified_since=if_modified_since or blob.get('if_modified_since'), - if_unmodified_since=if_unmodified_since or blob.get('if_unmodified_since'), - etag=blob.get('etag'), - if_tags_match_condition=if_tags_match_condition or blob.get('if_tags_match_condition'), - match_condition=blob.get('match_condition') or MatchConditions.IfNotModified if blob.get('etag') - else None, - timeout=blob.get('timeout'), - ) - except AttributeError: - options = BlobClient._generic_delete_blob_options( # pylint: disable=protected-access - delete_snapshots=delete_snapshots, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_tags_match_condition=if_tags_match_condition - ) - - query_parameters, header_parameters = self._generate_delete_blobs_subrequest_options(**options) - - req = HttpRequest( - "DELETE", - "/{}/{}{}".format(quote(container_name), quote(blob_name, safe='/~'), self._query_str), - headers=header_parameters - ) - req.format_parameters(query_parameters) - reqs.append(req) - - return reqs, kwargs - - @distributed_trace - def delete_blobs(self, *blobs, **kwargs): - # type: (...) -> Iterator[HttpResponse] - """Marks the specified blobs or snapshots for deletion. - - The blobs are later deleted during garbage collection. - Note that in order to delete blobs, you must delete all of their - snapshots. You can delete both at the same time with the delete_blobs operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots - and retains the blobs or snapshots for specified number of days. - After specified number of days, blobs' data is removed from the service during garbage collection. - Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]` - Soft-deleted blobs or snapshots can be restored using :func:`~BlobClient.undelete()` - - :param blobs: - The blobs to delete. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - snapshot you want to delete: - key: 'snapshot', value type: str - whether to delete snapthots when deleting blob: - key: 'delete_snapshots', value: 'include' or 'only' - if the blob modified or not: - key: 'if_modified_since', 'if_unmodified_since', value type: datetime - etag: - key: 'etag', value type: str - match the etag or not: - key: 'match_condition', value type: MatchConditions - tags match condition: - key: 'if_tags_match_condition', value type: str - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword str delete_snapshots: - Required if a blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: An iterator of responses, one for each blob in order - :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START delete_multiple_blobs] - :end-before: [END delete_multiple_blobs] - :language: python - :dedent: 8 - :caption: Deleting multiple blobs. - """ - if len(blobs) == 0: - return iter(list()) - - reqs, options = self._generate_delete_blobs_options(*blobs, **kwargs) - - return self._batch_send(*reqs, **options) - - def _generate_set_tiers_subrequest_options( - self, tier, snapshot=None, version_id=None, rehydrate_priority=None, lease_access_conditions=None, **kwargs - ): - """This code is a copy from _generated. - - Once Autorest is able to provide request preparation this code should be removed. - """ - if not tier: - raise ValueError("A blob tier must be specified") - if snapshot and version_id: - raise ValueError("Snapshot and version_id cannot be set at the same time") - if_tags = kwargs.pop('if_tags', None) - - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "tier" - timeout = kwargs.pop('timeout', None) - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._client._serialize.query("snapshot", snapshot, 'str') # pylint: disable=protected-access - if version_id is not None: - query_parameters['versionid'] = self._client._serialize.query("version_id", version_id, 'str') # pylint: disable=protected-access - if timeout is not None: - query_parameters['timeout'] = self._client._serialize.query("timeout", timeout, 'int', minimum=0) # pylint: disable=protected-access - query_parameters['comp'] = self._client._serialize.query("comp", comp, 'str') # pylint: disable=protected-access, specify-parameter-names-in-call - - # Construct headers - header_parameters = {} - header_parameters['x-ms-access-tier'] = self._client._serialize.header("tier", tier, 'str') # pylint: disable=protected-access, specify-parameter-names-in-call - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._client._serialize.header( # pylint: disable=protected-access - "rehydrate_priority", rehydrate_priority, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._client._serialize.header("lease_id", lease_id, 'str') # pylint: disable=protected-access - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._client._serialize.header("if_tags", if_tags, 'str') # pylint: disable=protected-access - - return query_parameters, header_parameters - - def _generate_set_tiers_options(self, - blob_tier, # type: Optional[Union[str, StandardBlobTier, PremiumPageBlobTier]] - *blobs, # type: List[Union[str, BlobProperties, dict]] - **kwargs - ): - timeout = kwargs.pop('timeout', None) - raise_on_any_failure = kwargs.pop('raise_on_any_failure', True) - rehydrate_priority = kwargs.pop('rehydrate_priority', None) - if_tags = kwargs.pop('if_tags_match_condition', None) - kwargs.update({'raise_on_any_failure': raise_on_any_failure, - 'sas': self._query_str.replace('?', '&'), - 'timeout': '&timeout=' + str(timeout) if timeout else "" - }) - - reqs = [] - for blob in blobs: - blob_name = _get_blob_name(blob) - container_name = self.container_name - - try: - tier = blob_tier or blob.get('blob_tier') - query_parameters, header_parameters = self._generate_set_tiers_subrequest_options( - tier=tier, - snapshot=blob.get('snapshot'), - version_id=blob.get('version_id'), - rehydrate_priority=rehydrate_priority or blob.get('rehydrate_priority'), - lease_access_conditions=blob.get('lease_id'), - if_tags=if_tags or blob.get('if_tags_match_condition'), - timeout=timeout or blob.get('timeout') - ) - except AttributeError: - query_parameters, header_parameters = self._generate_set_tiers_subrequest_options( - blob_tier, rehydrate_priority=rehydrate_priority, if_tags=if_tags) - - req = HttpRequest( - "PUT", - "/{}/{}{}".format(quote(container_name), quote(blob_name, safe='/~'), self._query_str), - headers=header_parameters - ) - req.format_parameters(query_parameters) - reqs.append(req) - - return reqs, kwargs - - @distributed_trace - def set_standard_blob_tier_blobs( - self, - standard_blob_tier, # type: Optional[Union[str, StandardBlobTier]] - *blobs, # type: List[Union[str, BlobProperties, dict]] - **kwargs - ): - # type: (...) -> Iterator[HttpResponse] - """This operation sets the tier on block blobs. - - A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param standard_blob_tier: - Indicates the tier to be set on all blobs. Options include 'Hot', 'Cool', - 'Archive'. The hot tier is optimized for storing data that is accessed - frequently. The cool storage tier is optimized for storing data that - is infrequently accessed and stored for at least a month. The archive - tier is optimized for storing data that is rarely accessed and stored - for at least six months with flexible latency requirements. - - .. note:: - If you want to set different tier on different blobs please set this positional parameter to None. - Then the blob tier on every BlobProperties will be taken. - - :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier - :param blobs: - The blobs with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - standard blob tier: - key: 'blob_tier', value type: StandardBlobTier - rehydrate priority: - key: 'rehydrate_priority', value type: RehydratePriority - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - snapshot: - key: "snapshost", value type: str - version id: - key: "version_id", value type: str - tags match condition: - key: 'if_tags_match_condition', value type: str - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. - :return: An iterator of responses, one for each blob in order - :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse] - """ - reqs, options = self._generate_set_tiers_options(standard_blob_tier, *blobs, **kwargs) - - return self._batch_send(*reqs, **options) - - @distributed_trace - def set_premium_page_blob_tier_blobs( - self, - premium_page_blob_tier, # type: Optional[Union[str, PremiumPageBlobTier]] - *blobs, # type: List[Union[str, BlobProperties, dict]] - **kwargs - ): - # type: (...) -> Iterator[HttpResponse] - """Sets the page blob tiers on all blobs. This API is only supported for page blobs on premium accounts. - - :param premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - - .. note:: - If you want to set different tier on different blobs please set this positional parameter to None. - Then the blob tier on every BlobProperties will be taken. - - :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier - :param blobs: - The blobs with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - premium blob tier: - key: 'blob_tier', value type: PremiumPageBlobTier - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. - :return: An iterator of responses, one for each blob in order - :rtype: iterator[~azure.core.pipeline.transport.HttpResponse] - """ - reqs, options = self._generate_set_tiers_options(premium_page_blob_tier, *blobs, **kwargs) - - return self._batch_send(*reqs, **options) - - def get_blob_client( - self, blob, # type: Union[str, BlobProperties] - snapshot=None # type: str - ): - # type: (...) -> BlobClient - """Get a client to interact with the specified blob. - - The blob need not already exist. - - :param blob: - The blob with which to interact. - :type blob: str or ~azure.storage.blob.BlobProperties - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`~BlobClient.create_snapshot()`. - :returns: A BlobClient. - :rtype: ~azure.storage.blob.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START get_blob_client] - :end-before: [END get_blob_client] - :language: python - :dedent: 8 - :caption: Get the blob client. - """ - blob_name = _get_blob_name(blob) - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return BlobClient( - self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_deserialize.py b/azure/multiapi/storagev2/blob/v2020_02_10/_deserialize.py deleted file mode 100644 index 159e0e6..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_deserialize.py +++ /dev/null @@ -1,158 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use -from typing import ( # pylint: disable=unused-import - Tuple, Dict, List, - TYPE_CHECKING -) - -from ._models import BlobType, CopyProperties, ContentSettings, LeaseProperties, BlobProperties -from ._shared.models import get_enum_value - -from ._shared.response_handlers import deserialize_metadata -from ._models import ContainerProperties, BlobAnalyticsLogging, Metrics, CorsRule, RetentionPolicy, \ - StaticWebsite, ObjectReplicationPolicy, ObjectReplicationRule - -if TYPE_CHECKING: - from ._generated.models import PageList - - -def deserialize_blob_properties(response, obj, headers): - blob_properties = BlobProperties( - metadata=deserialize_metadata(response, obj, headers), - object_replication_source_properties=deserialize_ors_policies(response.headers), - **headers - ) - if 'Content-Range' in headers: - if 'x-ms-blob-content-md5' in headers: - blob_properties.content_settings.content_md5 = headers['x-ms-blob-content-md5'] - else: - blob_properties.content_settings.content_md5 = None - return blob_properties - - -def deserialize_ors_policies(policy_dictionary): - - if policy_dictionary is None: - return None - # For source blobs (blobs that have policy ids and rule ids applied to them), - # the header will be formatted as "x-ms-or-_: {Complete, Failed}". - # The value of this header is the status of the replication. - or_policy_status_headers = {key: val for key, val in policy_dictionary.items() - if 'or-' in key and key != 'x-ms-or-policy-id'} - - parsed_result = {} - - for key, val in or_policy_status_headers.items(): - # list blobs gives or-policy_rule and get blob properties gives x-ms-or-policy_rule - policy_and_rule_ids = key.split('or-')[1].split('_') - policy_id = policy_and_rule_ids[0] - rule_id = policy_and_rule_ids[1] - - # If we are seeing this policy for the first time, create a new list to store rule_id -> result - parsed_result[policy_id] = parsed_result.get(policy_id) or list() - parsed_result[policy_id].append(ObjectReplicationRule(rule_id=rule_id, status=val)) - - result_list = [ObjectReplicationPolicy(policy_id=k, rules=v) for k, v in parsed_result.items()] - - return result_list - - -def deserialize_blob_stream(response, obj, headers): - blob_properties = deserialize_blob_properties(response, obj, headers) - obj.properties = blob_properties - return response.location_mode, obj - - -def deserialize_container_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - container_properties = ContainerProperties( - metadata=metadata, - **headers - ) - return container_properties - - -def get_page_ranges_result(ranges): - # type: (PageList) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - page_range = [] # type: ignore - clear_range = [] # type: List - if ranges.page_range: - page_range = [{'start': b.start, 'end': b.end} for b in ranges.page_range] # type: ignore - if ranges.clear_range: - clear_range = [{'start': b.start, 'end': b.end} for b in ranges.clear_range] - return page_range, clear_range # type: ignore - - -def service_stats_deserialize(generated): - """Deserialize a ServiceStats objects into a dict. - """ - return { - 'geo_replication': { - 'status': generated.geo_replication.status, - 'last_sync_time': generated.geo_replication.last_sync_time, - } - } - - -def service_properties_deserialize(generated): - """Deserialize a ServiceProperties objects into a dict. - """ - return { - 'analytics_logging': BlobAnalyticsLogging._from_generated(generated.logging), # pylint: disable=protected-access - 'hour_metrics': Metrics._from_generated(generated.hour_metrics), # pylint: disable=protected-access - 'minute_metrics': Metrics._from_generated(generated.minute_metrics), # pylint: disable=protected-access - 'cors': [CorsRule._from_generated(cors) for cors in generated.cors], # pylint: disable=protected-access - 'target_version': generated.default_service_version, # pylint: disable=protected-access - 'delete_retention_policy': RetentionPolicy._from_generated(generated.delete_retention_policy), # pylint: disable=protected-access - 'static_website': StaticWebsite._from_generated(generated.static_website), # pylint: disable=protected-access - } - - -def get_blob_properties_from_generated_code(generated): - blob = BlobProperties() - blob.name = generated.name - blob_type = get_enum_value(generated.properties.blob_type) - blob.blob_type = BlobType(blob_type) if blob_type else None - blob.etag = generated.properties.etag - blob.deleted = generated.deleted - blob.snapshot = generated.snapshot - blob.is_append_blob_sealed = generated.properties.is_sealed - blob.metadata = generated.metadata.additional_properties if generated.metadata else {} - blob.encrypted_metadata = generated.metadata.encrypted if generated.metadata else None - blob.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access - blob.copy = CopyProperties._from_generated(generated) # pylint: disable=protected-access - blob.last_modified = generated.properties.last_modified - blob.creation_time = generated.properties.creation_time - blob.content_settings = ContentSettings._from_generated(generated) # pylint: disable=protected-access - blob.size = generated.properties.content_length - blob.page_blob_sequence_number = generated.properties.blob_sequence_number - blob.server_encrypted = generated.properties.server_encrypted - blob.encryption_scope = generated.properties.encryption_scope - blob.deleted_time = generated.properties.deleted_time - blob.remaining_retention_days = generated.properties.remaining_retention_days - blob.blob_tier = generated.properties.access_tier - blob.rehydrate_priority = generated.properties.rehydrate_priority - blob.blob_tier_inferred = generated.properties.access_tier_inferred - blob.archive_status = generated.properties.archive_status - blob.blob_tier_change_time = generated.properties.access_tier_change_time - blob.version_id = generated.version_id - blob.is_current_version = generated.is_current_version - blob.tag_count = generated.properties.tag_count - blob.tags = parse_tags(generated.blob_tags) # pylint: disable=protected-access - blob.object_replication_source_properties = deserialize_ors_policies(generated.object_replication_metadata) - blob.last_accessed_on = generated.properties.last_accessed_on - return blob - - -def parse_tags(generated_tags): - # type: (Optional[List[BlobTag]]) -> Union[Dict[str, str], None] - """Deserialize a list of BlobTag objects into a dict. - """ - if generated_tags: - tag_dict = {t.key: t.value for t in generated_tags.blob_tag_set} - return tag_dict - return None diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_download.py b/azure/multiapi/storagev2/blob/v2020_02_10/_download.py deleted file mode 100644 index 46e59e5..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_download.py +++ /dev/null @@ -1,580 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -import threading -import warnings -from io import BytesIO - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.common import with_current_context -from ._shared.encryption import decrypt_blob -from ._shared.request_handlers import validate_and_format_range_headers -from ._shared.response_handlers import process_storage_error, parse_length_from_content_range -from ._deserialize import get_page_ranges_result - - -def process_range_and_offset(start_range, end_range, length, encryption): - start_offset, end_offset = 0, 0 - if encryption.get("key") is not None or encryption.get("resolver") is not None: - if start_range is not None: - # Align the start of the range along a 16 byte block - start_offset = start_range % 16 - start_range -= start_offset - - # Include an extra 16 bytes for the IV if necessary - # Because of the previous offsetting, start_range will always - # be a multiple of 16. - if start_range > 0: - start_offset += 16 - start_range -= 16 - - if length is not None: - # Align the end of the range along a 16 byte block - end_offset = 15 - (end_range % 16) - end_range += end_offset - - return (start_range, end_range), (start_offset, end_offset) - - -def process_content(data, start_offset, end_offset, encryption): - if data is None: - raise ValueError("Response cannot be None.") - try: - content = b"".join(list(data)) - except Exception as error: - raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) - if content and encryption.get("key") is not None or encryption.get("resolver") is not None: - try: - return decrypt_blob( - encryption.get("required"), - encryption.get("key"), - encryption.get("resolver"), - content, - start_offset, - end_offset, - data.response.headers, - ) - except Exception as error: - raise HttpResponseError(message="Decryption failed.", response=data.response, error=error) - return content - - -class _ChunkDownloader(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - client=None, - non_empty_ranges=None, - total_size=None, - chunk_size=None, - current_progress=None, - start_range=None, - end_range=None, - stream=None, - parallel=None, - validate_content=None, - encryption_options=None, - **kwargs - ): - self.client = client - self.non_empty_ranges = non_empty_ranges - - # Information on the download range/chunk size - self.chunk_size = chunk_size - self.total_size = total_size - self.start_index = start_range - self.end_index = end_range - - # The destination that we will write to - self.stream = stream - self.stream_lock = threading.Lock() if parallel else None - self.progress_lock = threading.Lock() if parallel else None - - # For a parallel download, the stream is always seekable, so we note down the current position - # in order to seek to the right place when out-of-order chunks come in - self.stream_start = stream.tell() if parallel else None - - # Download progress so far - self.progress_total = current_progress - - # Encryption - self.encryption_options = encryption_options - - # Parameters for each get operation - self.validate_content = validate_content - self.request_options = kwargs - - def _calculate_range(self, chunk_start): - if chunk_start + self.chunk_size > self.end_index: - chunk_end = self.end_index - else: - chunk_end = chunk_start + self.chunk_size - return chunk_start, chunk_end - - def get_chunk_offsets(self): - index = self.start_index - while index < self.end_index: - yield index - index += self.chunk_size - - def process_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - chunk_data = self._download_chunk(chunk_start, chunk_end - 1) - length = chunk_end - chunk_start - if length > 0: - self._write_to_stream(chunk_data, chunk_start) - self._update_progress(length) - - def yield_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - return self._download_chunk(chunk_start, chunk_end - 1) - - def _update_progress(self, length): - if self.progress_lock: - with self.progress_lock: # pylint: disable=not-context-manager - self.progress_total += length - else: - self.progress_total += length - - def _write_to_stream(self, chunk_data, chunk_start): - if self.stream_lock: - with self.stream_lock: # pylint: disable=not-context-manager - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - else: - self.stream.write(chunk_data) - - def _do_optimize(self, given_range_start, given_range_end): - # If we have no page range list stored, then assume there's data everywhere for that page blob - # or it's a block blob or append blob - if self.non_empty_ranges is None: - return False - - for source_range in self.non_empty_ranges: - # Case 1: As the range list is sorted, if we've reached such a source_range - # we've checked all the appropriate source_range already and haven't found any overlapping. - # so the given range doesn't have any data and download optimization could be applied. - # given range: | | - # source range: | | - if given_range_end < source_range['start']: # pylint:disable=no-else-return - return True - # Case 2: the given range comes after source_range, continue checking. - # given range: | | - # source range: | | - elif source_range['end'] < given_range_start: - pass - # Case 3: source_range and given range overlap somehow, no need to optimize. - else: - return False - # Went through all src_ranges, but nothing overlapped. Optimization will be applied. - return True - - def _download_chunk(self, chunk_start, chunk_end): - download_range, offset = process_range_and_offset( - chunk_start, chunk_end, chunk_end, self.encryption_options - ) - - # No need to download the empty chunk from server if there's no data in the chunk to be downloaded. - # Do optimize and create empty chunk locally if condition is met. - if self._do_optimize(download_range[0], download_range[1]): - chunk_data = b"\x00" * self.chunk_size - else: - range_header, range_validation = validate_and_format_range_headers( - download_range[0], - download_range[1], - check_content_md5=self.validate_content - ) - - try: - _, response = self.client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self.validate_content, - data_stream_total=self.total_size, - download_stream_current=self.progress_total, - **self.request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - chunk_data = process_content(response, offset[0], offset[1], self.encryption_options) - - # This makes sure that if_match is set so that we can validate - # that subsequent downloads are to an unmodified blob - if self.request_options.get("modified_access_conditions"): - self.request_options["modified_access_conditions"].if_match = response.properties.etag - - return chunk_data - - -class _ChunkIterator(object): - """Async iterator for chunks in blob download stream.""" - - def __init__(self, size, content, downloader): - self.size = size - self._current_content = content - self._iter_downloader = downloader - self._iter_chunks = None - self._complete = (size == 0) - - def __len__(self): - return self.size - - def __iter__(self): - return self - - def __next__(self): - """Iterate through responses.""" - if self._complete: - raise StopIteration("Download complete") - if not self._iter_downloader: - # If no iterator was supplied, the download completed with - # the initial GET, so we just return that data - self._complete = True - return self._current_content - - if not self._iter_chunks: - self._iter_chunks = self._iter_downloader.get_chunk_offsets() - else: - chunk = next(self._iter_chunks) - self._current_content = self._iter_downloader.yield_chunk(chunk) - - return self._current_content - - next = __next__ # Python 2 compatibility. - - -class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the blob being downloaded. - :ivar str container: - The name of the container where the blob is. - :ivar ~azure.storage.blob.BlobProperties properties: - The properties of the blob being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if specified, - otherwise the total size of the blob. - """ - - def __init__( - self, - clients=None, - config=None, - start_range=None, - end_range=None, - validate_content=None, - encryption_options=None, - max_concurrency=1, - name=None, - container=None, - encoding=None, - **kwargs - ): - self.name = name - self.container = container - self.properties = None - self.size = None - - self._clients = clients - self._config = config - self._start_range = start_range - self._end_range = end_range - self._max_concurrency = max_concurrency - self._encoding = encoding - self._validate_content = validate_content - self._encryption_options = encryption_options or {} - self._request_options = kwargs - self._location_mode = None - self._download_complete = False - self._current_content = None - self._file_size = None - self._non_empty_ranges = None - self._response = None - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - self._first_get_size = ( - self._config.max_single_get_size if not self._validate_content else self._config.max_chunk_get_size - ) - initial_request_start = self._start_range if self._start_range is not None else 0 - if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: - initial_request_end = self._end_range - else: - initial_request_end = initial_request_start + self._first_get_size - 1 - - self._initial_range, self._initial_offset = process_range_and_offset( - initial_request_start, initial_request_end, self._end_range, self._encryption_options - ) - - self._response = self._initial_request() - self.properties = self._response.properties - self.properties.name = self.name - self.properties.container = self.container - - # Set the content length to the download size instead of the size of - # the last range - self.properties.size = self.size - - # Overwrite the content range to the user requested range - self.properties.content_range = "bytes {0}-{1}/{2}".format( - self._start_range, - self._end_range, - self._file_size - ) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - self.properties.content_md5 = None - - if self.size == 0: - self._current_content = b"" - else: - self._current_content = process_content( - self._response, - self._initial_offset[0], - self._initial_offset[1], - self._encryption_options - ) - - def __len__(self): - return self.size - - def _initial_request(self): - range_header, range_validation = validate_and_format_range_headers( - self._initial_range[0], - self._initial_range[1], - start_range_required=False, - end_range_required=False, - check_content_md5=self._validate_content - ) - - try: - location_mode, response = self._clients.blob.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self._validate_content, - data_stream_total=None, - download_stream_current=0, - **self._request_options - ) - - # Check the location we read from to ensure we use the same one - # for subsequent requests. - self._location_mode = location_mode - - # Parse the total file size and adjust the download size if ranges - # were specified - self._file_size = parse_length_from_content_range(response.properties.content_range) - if self._end_range is not None: - # Use the end range index unless it is over the end of the file - self.size = min(self._file_size, self._end_range - self._start_range + 1) - elif self._start_range is not None: - self.size = self._file_size - self._start_range - else: - self.size = self._file_size - - except HttpResponseError as error: - if self._start_range is None and error.response.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - try: - _, response = self._clients.blob.download( - validate_content=self._validate_content, - data_stream_total=0, - download_stream_current=0, - **self._request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - # Set the download size to empty - self.size = 0 - self._file_size = 0 - else: - process_storage_error(error) - - # get page ranges to optimize downloading sparse page blob - if response.properties.blob_type == 'PageBlob': - try: - page_ranges = self._clients.page_blob.get_page_ranges() - self._non_empty_ranges = get_page_ranges_result(page_ranges)[0] - # according to the REST API documentation: - # in a highly fragmented page blob with a large number of writes, - # a Get Page Ranges request can fail due to an internal server timeout. - # thus, if the page blob is not sparse, it's ok for it to fail - except HttpResponseError: - pass - - # If the file is small, the download is complete at this point. - # If file size is large, download the rest of the file in chunks. - if response.properties.size != self.size: - # Lock on the etag. This can be overriden by the user by specifying '*' - if self._request_options.get("modified_access_conditions"): - if not self._request_options["modified_access_conditions"].if_match: - self._request_options["modified_access_conditions"].if_match = response.properties.etag - else: - self._download_complete = True - return response - - def chunks(self): - if self.size == 0 or self._download_complete: - iter_downloader = None - else: - data_end = self._file_size - if self._end_range is not None: - # Use the end range index unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - iter_downloader = _ChunkDownloader( - client=self._clients.blob, - non_empty_ranges=self._non_empty_ranges, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # start where the first download ended - end_range=data_end, - stream=None, - parallel=False, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options - ) - return _ChunkIterator( - size=self.size, - content=self._current_content, - downloader=iter_downloader) - - def readall(self): - """Download the contents of this blob. - - This operation is blocking until all data is downloaded. - - :rtype: bytes or str - """ - stream = BytesIO() - self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - def content_as_bytes(self, max_concurrency=1): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :rtype: bytes - """ - warnings.warn( - "content_as_bytes is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - return self.readall() - - def content_as_text(self, max_concurrency=1, encoding="UTF-8"): - """Download the contents of this blob, and decode as text. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :param str encoding: - Test encoding to decode the downloaded bytes. Default is UTF-8. - :rtype: str - """ - warnings.warn( - "content_as_text is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self._encoding = encoding - return self.readall() - - def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - # The stream must be seekable if parallel download is required - parallel = self._max_concurrency > 1 - if parallel: - error_message = "Target stream handle must be seekable." - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(error_message) - - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(error_message) - - # Write the content to the user stream - stream.write(self._current_content) - if self._download_complete: - return self.size - - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - - downloader = _ChunkDownloader( - client=self._clients.blob, - non_empty_ranges=self._non_empty_ranges, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # Start where the first download ended - end_range=data_end, - stream=stream, - parallel=parallel, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options - ) - if parallel: - import concurrent.futures - executor = concurrent.futures.ThreadPoolExecutor(self._max_concurrency) - list(executor.map( - with_current_context(downloader.process_chunk), - downloader.get_chunk_offsets() - )) - else: - for chunk in downloader.get_chunk_offsets(): - downloader.process_chunk(chunk) - return self.size - - def download_to_stream(self, stream, max_concurrency=1): - """Download the contents of this blob to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The properties of the downloaded blob. - :rtype: Any - """ - warnings.warn( - "download_to_stream is deprecated, use readinto instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self.readinto(stream) - return self.properties diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/__init__.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/__init__.py deleted file mode 100644 index f5c8f4a..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from ._azure_blob_storage import AzureBlobStorage -__all__ = ['AzureBlobStorage'] - -from .version import VERSION - -__version__ = VERSION - diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/_azure_blob_storage.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/_azure_blob_storage.py deleted file mode 100644 index 831f6ce..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/_azure_blob_storage.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core import PipelineClient -from msrest import Serializer, Deserializer - -from ._configuration import AzureBlobStorageConfiguration -from azure.core.exceptions import map_error -from .operations import ServiceOperations -from .operations import ContainerOperations -from .operations import DirectoryOperations -from .operations import BlobOperations -from .operations import PageBlobOperations -from .operations import AppendBlobOperations -from .operations import BlockBlobOperations -from . import models - - -class AzureBlobStorage(object): - """AzureBlobStorage - - - :ivar service: Service operations - :vartype service: azure.storage.blob.operations.ServiceOperations - :ivar container: Container operations - :vartype container: azure.storage.blob.operations.ContainerOperations - :ivar directory: Directory operations - :vartype directory: azure.storage.blob.operations.DirectoryOperations - :ivar blob: Blob operations - :vartype blob: azure.storage.blob.operations.BlobOperations - :ivar page_blob: PageBlob operations - :vartype page_blob: azure.storage.blob.operations.PageBlobOperations - :ivar append_blob: AppendBlob operations - :vartype append_blob: azure.storage.blob.operations.AppendBlobOperations - :ivar block_blob: BlockBlob operations - :vartype block_blob: azure.storage.blob.operations.BlockBlobOperations - - :param url: The URL of the service account, container, or blob that is the - targe of the desired operation. - :type url: str - """ - - def __init__(self, url, **kwargs): - - base_url = '{url}' - self._config = AzureBlobStorageConfiguration(url, **kwargs) - self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '2020-02-10' - self._serialize = Serializer(client_models) - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.container = ContainerOperations( - self._client, self._config, self._serialize, self._deserialize) - self.directory = DirectoryOperations( - self._client, self._config, self._serialize, self._deserialize) - self.blob = BlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.page_blob = PageBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.append_blob = AppendBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.block_blob = BlockBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - - def close(self): - self._client.close() - def __enter__(self): - self._client.__enter__() - return self - def __exit__(self, *exc_details): - self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/_azure_blob_storage_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/_azure_blob_storage_async.py deleted file mode 100644 index 367e296..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/_azure_blob_storage_async.py +++ /dev/null @@ -1,84 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core import AsyncPipelineClient -from msrest import Serializer, Deserializer - -from ._configuration_async import AzureBlobStorageConfiguration -from azure.core.exceptions import map_error -from .operations_async import ServiceOperations -from .operations_async import ContainerOperations -from .operations_async import DirectoryOperations -from .operations_async import BlobOperations -from .operations_async import PageBlobOperations -from .operations_async import AppendBlobOperations -from .operations_async import BlockBlobOperations -from .. import models - - -class AzureBlobStorage(object): - """AzureBlobStorage - - - :ivar service: Service operations - :vartype service: azure.storage.blob.aio.operations_async.ServiceOperations - :ivar container: Container operations - :vartype container: azure.storage.blob.aio.operations_async.ContainerOperations - :ivar directory: Directory operations - :vartype directory: azure.storage.blob.aio.operations_async.DirectoryOperations - :ivar blob: Blob operations - :vartype blob: azure.storage.blob.aio.operations_async.BlobOperations - :ivar page_blob: PageBlob operations - :vartype page_blob: azure.storage.blob.aio.operations_async.PageBlobOperations - :ivar append_blob: AppendBlob operations - :vartype append_blob: azure.storage.blob.aio.operations_async.AppendBlobOperations - :ivar block_blob: BlockBlob operations - :vartype block_blob: azure.storage.blob.aio.operations_async.BlockBlobOperations - - :param url: The URL of the service account, container, or blob that is the - targe of the desired operation. - :type url: str - """ - - def __init__( - self, url, **kwargs): - - base_url = '{url}' - self._config = AzureBlobStorageConfiguration(url, **kwargs) - self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '2020-02-10' - self._serialize = Serializer(client_models) - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.container = ContainerOperations( - self._client, self._config, self._serialize, self._deserialize) - self.directory = DirectoryOperations( - self._client, self._config, self._serialize, self._deserialize) - self.blob = BlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.page_blob = PageBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.append_blob = AppendBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.block_blob = BlockBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - - async def close(self): - await self._client.close() - async def __aenter__(self): - await self._client.__aenter__() - return self - async def __aexit__(self, *exc_details): - await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/_configuration_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/_configuration_async.py deleted file mode 100644 index 609cb82..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/_configuration_async.py +++ /dev/null @@ -1,53 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -from ..version import VERSION - - -class AzureBlobStorageConfiguration(Configuration): - """Configuration for AzureBlobStorage - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, container, or blob that is the - targe of the desired operation. - :type url: str - :ivar version: Specifies the version of the operation to use for this - request. - :type version: str - """ - - def __init__(self, url, **kwargs): - - if url is None: - raise ValueError("Parameter 'url' must not be None.") - - super(AzureBlobStorageConfiguration, self).__init__(**kwargs) - self._configure(**kwargs) - - self.user_agent_policy.add_user_agent('azsdk-python-azureblobstorage/{}'.format(VERSION)) - self.generate_client_request_id = True - self.accept_language = None - - self.url = url - self.version = "2020-02-10" - - def _configure(self, **kwargs): - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/__init__.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/__init__.py deleted file mode 100644 index dec0519..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations_async import ServiceOperations -from ._container_operations_async import ContainerOperations -from ._directory_operations_async import DirectoryOperations -from ._blob_operations_async import BlobOperations -from ._page_blob_operations_async import PageBlobOperations -from ._append_blob_operations_async import AppendBlobOperations -from ._block_blob_operations_async import BlockBlobOperations - -__all__ = [ - 'ServiceOperations', - 'ContainerOperations', - 'DirectoryOperations', - 'BlobOperations', - 'PageBlobOperations', - 'AppendBlobOperations', - 'BlockBlobOperations', -] diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_append_blob_operations_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_append_blob_operations_async.py deleted file mode 100644 index ea79827..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_append_blob_operations_async.py +++ /dev/null @@ -1,694 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class AppendBlobOperations: - """AppendBlobOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "AppendBlob". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_blob_type = "AppendBlob" - - async def create(self, content_length, timeout=None, metadata=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Create Append Blob operation creates a new append blob. - - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_tags_string: Optional. Used to set blob tags in various - blob operations. - :type blob_tags_string: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{containerName}/{blob}'} - - async def append_block(self, body, content_length, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, request_id=None, lease_access_conditions=None, append_position_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Append Block operation commits a new block of data to the end of an - existing append blob. The Append Block operation is permitted only if - the blob was created with x-ms-blob-type set to AppendBlob. Append - Block is supported only on version 2015-02-21 version or later. - - :param body: Initial data - :type body: Generator - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param append_position_access_conditions: Additional parameters for - the operation - :type append_position_access_conditions: - ~azure.storage.blob.models.AppendPositionAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - max_size = None - if append_position_access_conditions is not None: - max_size = append_position_access_conditions.max_size - append_position = None - if append_position_access_conditions is not None: - append_position = append_position_access_conditions.append_position - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "appendblock" - - # Construct URL - url = self.append_block.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if max_size is not None: - header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", max_size, 'long') - if append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-append-offset': self._deserialize('str', response.headers.get('x-ms-blob-append-offset')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - append_block.metadata = {'url': '/{containerName}/{blob}'} - - async def append_block_from_url(self, source_url, content_length, source_range=None, source_content_md5=None, source_contentcrc64=None, timeout=None, transactional_content_md5=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, append_position_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs): - """The Append Block operation commits a new block of data to the end of an - existing append blob where the contents are read from a source url. The - Append Block operation is permitted only if the blob was created with - x-ms-blob-type set to AppendBlob. Append Block is supported only on - version 2015-02-21 version or later. - - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param content_length: The length of the request. - :type content_length: long - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_md5: Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range - of bytes that must be read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param append_position_access_conditions: Additional parameters for - the operation - :type append_position_access_conditions: - ~azure.storage.blob.models.AppendPositionAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - max_size = None - if append_position_access_conditions is not None: - max_size = append_position_access_conditions.max_size - append_position = None - if append_position_access_conditions is not None: - append_position = append_position_access_conditions.append_position - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - comp = "appendblock" - - # Construct URL - url = self.append_block_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if max_size is not None: - header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", max_size, 'long') - if append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-append-offset': self._deserialize('str', response.headers.get('x-ms-blob-append-offset')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - append_block_from_url.metadata = {'url': '/{containerName}/{blob}'} - - async def seal(self, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, append_position_access_conditions=None, *, cls=None, **kwargs): - """The Seal operation seals the Append Blob to make it read-only. Seal is - supported only on version 2019-12-12 version or later. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param append_position_access_conditions: Additional parameters for - the operation - :type append_position_access_conditions: - ~azure.storage.blob.models.AppendPositionAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - append_position = None - if append_position_access_conditions is not None: - append_position = append_position_access_conditions.append_position - - comp = "seal" - - # Construct URL - url = self.seal.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - seal.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_blob_operations_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_blob_operations_async.py deleted file mode 100644 index 54d6dab..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_blob_operations_async.py +++ /dev/null @@ -1,3067 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class BlobOperations: - """BlobOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_requires_sync: . Constant value: "true". - :ivar x_ms_copy_action: . Constant value: "abort". - :ivar restype: . Constant value: "account". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_requires_sync = "true" - self.x_ms_copy_action = "abort" - self.restype = "account" - - async def download(self, snapshot=None, version_id=None, timeout=None, range=None, range_get_content_md5=None, range_get_content_crc64=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Download operation reads or downloads a blob from the system, - including its metadata and properties. You can also call Download to - read a snapshot. - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to operate - on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param range_get_content_md5: When set to true and specified together - with the Range, the service returns the MD5 hash for the range, as - long as the range is less than or equal to 4 MB in size. - :type range_get_content_md5: bool - :param range_get_content_crc64: When set to true and specified - together with the Range, the service returns the CRC64 hash for the - range, as long as the range is less than or equal to 4 MB in size. - :type range_get_content_crc64: bool - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: object or the result of cls(response) - :rtype: Generator - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct URL - url = self.download.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') - if range_get_content_crc64 is not None: - header_parameters['x-ms-range-get-content-crc64'] = self._serialize.header("range_get_content_crc64", range_get_content_crc64, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - await response.load_body() - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')), - 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), - 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')), - 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), - 'x-ms-last-access-time': self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - if response.status_code == 206: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')), - 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), - 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')), - 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), - 'x-ms-last-access-time': self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - download.metadata = {'url': '/{containerName}/{blob}'} - - async def get_properties(self, snapshot=None, version_id=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Get Properties operation returns all user-defined metadata, - standard HTTP properties, and system properties for the blob. It does - not return the content of the blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to operate - on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-creation-time': self._deserialize('rfc-1123', response.headers.get('x-ms-creation-time')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')), - 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')), - 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-incremental-copy': self._deserialize('bool', response.headers.get('x-ms-incremental-copy')), - 'x-ms-copy-destination-snapshot': self._deserialize('str', response.headers.get('x-ms-copy-destination-snapshot')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-access-tier': self._deserialize('str', response.headers.get('x-ms-access-tier')), - 'x-ms-access-tier-inferred': self._deserialize('bool', response.headers.get('x-ms-access-tier-inferred')), - 'x-ms-archive-status': self._deserialize('str', response.headers.get('x-ms-archive-status')), - 'x-ms-access-tier-change-time': self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'x-ms-is-current-version': self._deserialize('bool', response.headers.get('x-ms-is-current-version')), - 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')), - 'x-ms-expiry-time': self._deserialize('rfc-1123', response.headers.get('x-ms-expiry-time')), - 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), - 'x-ms-rehydrate-priority': self._deserialize('str', response.headers.get('x-ms-rehydrate-priority')), - 'x-ms-last-access-time': self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{containerName}/{blob}'} - - async def delete(self, snapshot=None, version_id=None, timeout=None, delete_snapshots=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """If the storage account's soft delete feature is disabled then, when a - blob is deleted, it is permanently removed from the storage account. If - the storage account's soft delete feature is enabled, then, when a blob - is deleted, it is marked for deletion and becomes inaccessible - immediately. However, the blob service retains the blob or snapshot for - the number of days specified by the DeleteRetentionPolicy section of - [Storage service properties] (Set-Blob-Service-Properties.md). After - the specified number of days has passed, the blob's data is permanently - removed from the storage account. Note that you continue to be charged - for the soft-deleted blob's storage until it is permanently removed. - Use the List Blobs API and specify the "include=deleted" query - parameter to discover which blobs and snapshots have been soft deleted. - You can then use the Undelete Blob API to restore a soft-deleted blob. - All other operations on a soft-deleted blob or snapshot causes the - service to return an HTTP status code of 404 (ResourceNotFound). - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to operate - on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param delete_snapshots: Required if the blob has associated - snapshots. Specify one of the following two options: include: Delete - the base blob and all of its snapshots. only: Delete only the blob's - snapshots and not the blob itself. Possible values include: 'include', - 'only' - :type delete_snapshots: str or - ~azure.storage.blob.models.DeleteSnapshotsOptionType - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{containerName}/{blob}'} - - async def set_access_control(self, timeout=None, owner=None, group=None, posix_permissions=None, posix_acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Set the owner, group, permissions, or access control list for a blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_acl: Sets POSIX access control rights on files and - directories. The value is a comma-separated list of access control - entries. Each access control entry (ACE) consists of a scope, a type, - a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type posix_acl: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "setAccessControl" - - # Construct URL - url = self.set_access_control.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - } - return cls(response, None, response_headers) - set_access_control.metadata = {'url': '/{filesystem}/{path}'} - - async def get_access_control(self, timeout=None, upn=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Get the owner, group, permissions, or access control list for a blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param upn: Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the identity values returned in - the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. - :type upn: bool - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "getAccessControl" - - # Construct URL - url = self.get_access_control.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')), - 'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')), - 'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')), - 'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - } - return cls(response, None, response_headers) - get_access_control.metadata = {'url': '/{filesystem}/{path}'} - - async def rename(self, rename_source, timeout=None, path_rename_mode=None, directory_properties=None, posix_permissions=None, posix_umask=None, source_lease_id=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs): - """Rename a blob/file. By default, the destination is overwritten and if - the destination already exists and has a lease the lease is broken. - This operation supports conditional HTTP requests. For more - information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - To fail if the destination already exists, use a conditional request - with If-None-Match: "*". - - :param rename_source: The file or directory to be renamed. The value - must have the following format: "/{filesysystem}/{path}". If - "x-ms-properties" is specified, the properties will overwrite the - existing properties; otherwise, the existing properties will be - preserved. - :type rename_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param path_rename_mode: Determines the behavior of the rename - operation. Possible values include: 'legacy', 'posix' - :type path_rename_mode: str or - ~azure.storage.blob.models.PathRenameMode - :param directory_properties: Optional. User-defined properties to be - stored with the file or directory, in the format of a comma-separated - list of name and value pairs "n1=v1, n2=v2, ...", where each value is - base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled - for the account. This umask restricts permission settings for file and - directory, and will only be applied when default Acl does not exist in - parent directory. If the umask bit has set, it means that the - corresponding permission will be disabled. Otherwise the corresponding - permission will be determined by the permission. A 4-digit octal - notation (e.g. 0022) is supported here. If no umask was specified, a - default umask - 0027 will be used. - :type posix_umask: str - :param source_lease_id: A lease ID for the source path. If specified, - the source path must have an active lease and the lease ID must match. - :type source_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param directory_http_headers: Additional parameters for the operation - :type directory_http_headers: - ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - cache_control = None - if directory_http_headers is not None: - cache_control = directory_http_headers.cache_control - content_type = None - if directory_http_headers is not None: - content_type = directory_http_headers.content_type - content_encoding = None - if directory_http_headers is not None: - content_encoding = directory_http_headers.content_encoding - content_language = None - if directory_http_headers is not None: - content_language = directory_http_headers.content_language - content_disposition = None - if directory_http_headers is not None: - content_disposition = directory_http_headers.content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - # Construct URL - url = self.rename.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if path_rename_mode is not None: - query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'PathRenameMode') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') - if content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') - if content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') - if content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') - if content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - } - return cls(response, None, response_headers) - rename.metadata = {'url': '/{filesystem}/{path}'} - - async def undelete(self, timeout=None, request_id=None, *, cls=None, **kwargs): - """Undelete a blob that was previously soft deleted. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "undelete" - - # Construct URL - url = self.undelete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - undelete.metadata = {'url': '/{containerName}/{blob}'} - - async def set_expiry(self, expiry_options, timeout=None, request_id=None, expires_on=None, *, cls=None, **kwargs): - """Sets the time a blob will expire and be deleted. - - :param expiry_options: Required. Indicates mode of the expiry time. - Possible values include: 'NeverExpire', 'RelativeToCreation', - 'RelativeToNow', 'Absolute' - :type expiry_options: str or - ~azure.storage.blob.models.BlobExpiryOptions - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param expires_on: The time to set the blob to expiry - :type expires_on: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "expiry" - - # Construct URL - url = self.set_expiry.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') - if expires_on is not None: - header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_expiry.metadata = {'url': '/{containerName}/{blob}'} - - async def set_http_headers(self, timeout=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Set HTTP Headers operation sets system properties on the blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "properties" - - # Construct URL - url = self.set_http_headers.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_http_headers.metadata = {'url': '/{containerName}/{blob}'} - - async def set_metadata(self, timeout=None, metadata=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Set Blob Metadata operation sets user-defined metadata for the - specified blob as one or more name-value pairs. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{containerName}/{blob}'} - - async def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or - negative one (-1) for a lease that never expires. A non-infinite lease - can be between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The Blob service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "lease" - action = "acquire" - - # Construct URL - url = self.acquire_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - acquire_lease.metadata = {'url': '/{containerName}/{blob}'} - - async def release_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "lease" - action = "release" - - # Construct URL - url = self.release_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - release_lease.metadata = {'url': '/{containerName}/{blob}'} - - async def renew_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "lease" - action = "renew" - - # Construct URL - url = self.renew_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - renew_lease.metadata = {'url': '/{containerName}/{blob}'} - - async def change_lease(self, lease_id, proposed_lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The Blob service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "lease" - action = "change" - - # Construct URL - url = self.change_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - change_lease.metadata = {'url': '/{containerName}/{blob}'} - - async def break_lease(self, timeout=None, break_period=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param break_period: For a break operation, proposed duration the - lease should continue before it is broken, in seconds, between 0 and - 60. This break period is only used if it is shorter than the time - remaining on the lease. If longer, the time remaining on the lease is - used. A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break period. - If this header does not appear with a break operation, a - fixed-duration lease breaks after the remaining lease period elapses, - and an infinite lease breaks immediately. - :type break_period: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "lease" - action = "break" - - # Construct URL - url = self.break_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - break_lease.metadata = {'url': '/{containerName}/{blob}'} - - async def create_snapshot(self, timeout=None, metadata=None, request_id=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs): - """The Create Snapshot operation creates a read-only snapshot of a blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "snapshot" - - # Construct URL - url = self.create_snapshot.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-snapshot': self._deserialize('str', response.headers.get('x-ms-snapshot')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create_snapshot.metadata = {'url': '/{containerName}/{blob}'} - - async def start_copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, rehydrate_priority=None, request_id=None, blob_tags_string=None, seal_blob=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs): - """The Start Copy From URL operation copies a blob or an internet resource - to a new blob. - - :param copy_source: Specifies the name of the source page blob - snapshot. This value is a URL of up to 2 KB in length that specifies a - page blob snapshot. The value should be URL-encoded as it would appear - in a request URI. The source blob must either be public or must be - authenticated via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param rehydrate_priority: Optional: Indicates the priority with which - to rehydrate an archived blob. Possible values include: 'High', - 'Standard' - :type rehydrate_priority: str or - ~azure.storage.blob.models.RehydratePriority - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_tags_string: Optional. Used to set blob tags in various - blob operations. - :type blob_tags_string: str - :param seal_blob: Overrides the sealed state of the destination blob. - Service version 2019-12-12 and newer. - :type seal_blob: bool - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - source_if_tags = None - if source_modified_access_conditions is not None: - source_if_tags = source_modified_access_conditions.source_if_tags - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.start_copy_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if seal_blob is not None: - header_parameters['x-ms-seal-blob'] = self._serialize.header("seal_blob", seal_blob, 'bool') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - if source_if_tags is not None: - header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", source_if_tags, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} - - async def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, request_id=None, source_content_md5=None, blob_tags_string=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs): - """The Copy From URL operation copies a blob or an internet resource to a - new blob. It will not return a response until the copy is complete. - - :param copy_source: Specifies the name of the source page blob - snapshot. This value is a URL of up to 2 KB in length that specifies a - page blob snapshot. The value should be URL-encoded as it would appear - in a request URI. The source blob must either be public or must be - authenticated via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param source_content_md5: Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :type source_content_md5: bytearray - :param blob_tags_string: Optional. Used to set blob tags in various - blob operations. - :type blob_tags_string: str - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.copy_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['x-ms-requires-sync'] = self._serialize.header("self.x_ms_requires_sync", self.x_ms_requires_sync, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-status': self._deserialize(models.SyncCopyStatusType, response.headers.get('x-ms-copy-status')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - copy_from_url.metadata = {'url': '/{containerName}/{blob}'} - - async def abort_copy_from_url(self, copy_id, timeout=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs): - """The Abort Copy From URL operation aborts a pending Copy From URL - operation, and leaves a destination blob with zero length and full - metadata. - - :param copy_id: The copy identifier provided in the x-ms-copy-id - header of the original Copy Blob operation. - :type copy_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "copy" - - # Construct URL - url = self.abort_copy_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-copy-action'] = self._serialize.header("self.x_ms_copy_action", self.x_ms_copy_action, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - abort_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} - - async def set_tier(self, tier, snapshot=None, version_id=None, timeout=None, rehydrate_priority=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Set Tier operation sets the tier on a blob. The operation is - allowed on a page blob in a premium storage account and on a block blob - in a blob storage account (locally redundant storage only). A premium - page blob's tier determines the allowed size, IOPS, and bandwidth of - the blob. A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param tier: Indicates the tier to be set on the blob. Possible values - include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', 'P40', 'P50', 'P60', - 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierRequired - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to operate - on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param rehydrate_priority: Optional: Indicates the priority with which - to rehydrate an archived blob. Possible values include: 'High', - 'Standard' - :type rehydrate_priority: str or - ~azure.storage.blob.models.RehydratePriority - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "tier" - - # Construct URL - url = self.set_tier.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_tier.metadata = {'url': '/{containerName}/{blob}'} - - async def get_account_info(self, *, cls=None, **kwargs): - """Returns the sku name and account kind . - - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "properties" - - # Construct URL - url = self.get_account_info.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')), - 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_account_info.metadata = {'url': '/{containerName}/{blob}'} - - async def query(self, query_request=None, snapshot=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Query operation enables users to select/project on blob data by - providing simple query expressions. - - :param query_request: the query request - :type query_request: ~azure.storage.blob.models.QueryRequest - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: object or the result of cls(response) - :rtype: Generator - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "query" - - # Construct URL - url = self.query.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct body - if query_request is not None: - body_content = self._serialize.body(query_request, 'QueryRequest') - else: - body_content = None - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - await response.load_body() - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - if response.status_code == 206: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - query.metadata = {'url': '/{containerName}/{blob}'} - - async def get_tags(self, timeout=None, request_id=None, snapshot=None, version_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Get Tags operation enables users to get the tags associated with a - blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to operate - on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: BlobTags or the result of cls(response) - :rtype: ~azure.storage.blob.models.BlobTags - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "tags" - - # Construct URL - url = self.get_tags.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('BlobTags', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_tags.metadata = {'url': '/{containerName}/{blob}'} - - async def set_tags(self, timeout=None, version_id=None, transactional_content_md5=None, transactional_content_crc64=None, request_id=None, tags=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Set Tags operation enables users to set tags on a blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param version_id: The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to operate - on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param tags: Blob tags - :type tags: ~azure.storage.blob.models.BlobTags - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "tags" - - # Construct URL - url = self.set_tags.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct body - if tags is not None: - body_content = self._serialize.body(tags, 'BlobTags') - else: - body_content = None - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_tags.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_block_blob_operations_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_block_blob_operations_async.py deleted file mode 100644 index e069370..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_block_blob_operations_async.py +++ /dev/null @@ -1,833 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class BlockBlobOperations: - """BlockBlobOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "BlockBlob". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_blob_type = "BlockBlob" - - async def upload(self, body, content_length, timeout=None, transactional_content_md5=None, metadata=None, tier=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Upload Block Blob operation updates the content of an existing - block blob. Updating an existing block blob overwrites any existing - metadata on the blob. Partial updates are not supported with Put Blob; - the content of the existing blob is overwritten with the content of the - new blob. To perform a partial update of the content of a block blob, - use the Put Block List operation. - - :param body: Initial data - :type body: Generator - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_tags_string: Optional. Used to set blob tags in various - blob operations. - :type blob_tags_string: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct URL - url = self.upload.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload.metadata = {'url': '/{containerName}/{blob}'} - - async def stage_block(self, block_id, content_length, body, transactional_content_md5=None, transactional_content_crc64=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, *, cls=None, **kwargs): - """The Stage Block operation creates a new block to be committed as part - of a blob. - - :param block_id: A valid Base64 string value that identifies the - block. Prior to encoding, the string must be less than or equal to 64 - bytes in size. For a given blob, the length of the value specified for - the blockid parameter must be the same size for each block. - :type block_id: str - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data - :type body: Generator - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - - comp = "block" - - # Construct URL - url = self.stage_block.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - stage_block.metadata = {'url': '/{containerName}/{blob}'} - - async def stage_block_from_url(self, block_id, content_length, source_url, source_range=None, source_content_md5=None, source_contentcrc64=None, timeout=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs): - """The Stage Block operation creates a new block to be committed as part - of a blob where the contents are read from a URL. - - :param block_id: A valid Base64 string value that identifies the - block. Prior to encoding, the string must be less than or equal to 64 - bytes in size. For a given blob, the length of the value specified for - the blockid parameter must be the same size for each block. - :type block_id: str - :param content_length: The length of the request. - :type content_length: long - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_md5: Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range - of bytes that must be read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - comp = "block" - - # Construct URL - url = self.stage_block_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - stage_block_from_url.metadata = {'url': '/{containerName}/{blob}'} - - async def commit_block_list(self, blocks, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, metadata=None, tier=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Commit Block List operation writes a blob by specifying the list of - block IDs that make up the blob. In order to be written as part of a - blob, a block must have been successfully written to the server in a - prior Put Block operation. You can call Put Block List to update a blob - by uploading only those blocks that have changed, then committing the - new and existing blocks together. You can do this by specifying whether - to commit a block from the committed block list or from the uncommitted - block list, or to commit the most recently uploaded version of the - block, whichever list it may belong to. - - :param blocks: - :type blocks: ~azure.storage.blob.models.BlockLookupList - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_tags_string: Optional. Used to set blob tags in various - blob operations. - :type blob_tags_string: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "blocklist" - - # Construct URL - url = self.commit_block_list.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct body - body_content = self._serialize.body(blocks, 'BlockLookupList') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - commit_block_list.metadata = {'url': '/{containerName}/{blob}'} - - async def get_block_list(self, list_type="committed", snapshot=None, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Get Block List operation retrieves the list of blocks that have - been uploaded as part of a block blob. - - :param list_type: Specifies whether to return the list of committed - blocks, the list of uncommitted blocks, or both lists together. - Possible values include: 'committed', 'uncommitted', 'all' - :type list_type: str or ~azure.storage.blob.models.BlockListType - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: BlockList or the result of cls(response) - :rtype: ~azure.storage.blob.models.BlockList - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "blocklist" - - # Construct URL - url = self.get_block_list.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - query_parameters['blocklisttype'] = self._serialize.query("list_type", list_type, 'BlockListType') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('BlockList', response) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_block_list.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_container_operations_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_container_operations_async.py deleted file mode 100644 index b7e1eb8..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_container_operations_async.py +++ /dev/null @@ -1,1400 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class ContainerOperations: - """ContainerOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - - async def create(self, timeout=None, metadata=None, access=None, request_id=None, container_cpk_scope_info=None, *, cls=None, **kwargs): - """creates a new container under the specified account. If the container - with the same name already exists, the operation fails. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param access: Specifies whether data in the container may be accessed - publicly and the level of access. Possible values include: - 'container', 'blob' - :type access: str or ~azure.storage.blob.models.PublicAccessType - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param container_cpk_scope_info: Additional parameters for the - operation - :type container_cpk_scope_info: - ~azure.storage.blob.models.ContainerCpkScopeInfo - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - default_encryption_scope = None - if container_cpk_scope_info is not None: - default_encryption_scope = container_cpk_scope_info.default_encryption_scope - prevent_encryption_scope_override = None - if container_cpk_scope_info is not None: - prevent_encryption_scope_override = container_cpk_scope_info.prevent_encryption_scope_override - - restype = "container" - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if access is not None: - header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if default_encryption_scope is not None: - header_parameters['x-ms-default-encryption-scope'] = self._serialize.header("default_encryption_scope", default_encryption_scope, 'str') - if prevent_encryption_scope_override is not None: - header_parameters['x-ms-deny-encryption-scope-override'] = self._serialize.header("prevent_encryption_scope_override", prevent_encryption_scope_override, 'bool') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{containerName}'} - - async def get_properties(self, timeout=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs): - """returns all user-defined metadata and system properties for the - specified container. The data returned does not include the container's - list of blobs. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - restype = "container" - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-public-access': self._deserialize('str', response.headers.get('x-ms-blob-public-access')), - 'x-ms-has-immutability-policy': self._deserialize('bool', response.headers.get('x-ms-has-immutability-policy')), - 'x-ms-has-legal-hold': self._deserialize('bool', response.headers.get('x-ms-has-legal-hold')), - 'x-ms-default-encryption-scope': self._deserialize('str', response.headers.get('x-ms-default-encryption-scope')), - 'x-ms-deny-encryption-scope-override': self._deserialize('bool', response.headers.get('x-ms-deny-encryption-scope-override')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{containerName}'} - - async def delete(self, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """operation marks the specified container for deletion. The container and - any blobs contained within it are later deleted during garbage - collection. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - restype = "container" - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{containerName}'} - - async def set_metadata(self, timeout=None, metadata=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """operation sets one or more user-defined name-value pairs for the - specified container. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - - restype = "container" - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{containerName}'} - - async def get_access_policy(self, timeout=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs): - """gets the permissions for the specified container. The permissions - indicate whether container data may be accessed publicly. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: list or the result of cls(response) - :rtype: list[~azure.storage.blob.models.SignedIdentifier] - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - restype = "container" - comp = "acl" - - # Construct URL - url = self.get_access_policy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[SignedIdentifier]', response) - header_dict = { - 'x-ms-blob-public-access': self._deserialize('str', response.headers.get('x-ms-blob-public-access')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_access_policy.metadata = {'url': '/{containerName}'} - - async def set_access_policy(self, container_acl=None, timeout=None, access=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """sets the permissions for the specified container. The permissions - indicate whether blobs in a container may be accessed publicly. - - :param container_acl: the acls for the container - :type container_acl: list[~azure.storage.blob.models.SignedIdentifier] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param access: Specifies whether data in the container may be accessed - publicly and the level of access. Possible values include: - 'container', 'blob' - :type access: str or ~azure.storage.blob.models.PublicAccessType - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - restype = "container" - comp = "acl" - - # Construct URL - url = self.set_access_policy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - if access is not None: - header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct body - serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifiers', 'wrapped': True}} - if container_acl is not None: - body_content = self._serialize.body(container_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt) - else: - body_content = None - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_access_policy.metadata = {'url': '/{containerName}'} - - async def restore(self, timeout=None, request_id=None, deleted_container_name=None, deleted_container_version=None, *, cls=None, **kwargs): - """Restores a previously-deleted container. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param deleted_container_name: Optional. Version 2019-12-12 and - laster. Specifies the name of the deleted container to restore. - :type deleted_container_name: str - :param deleted_container_version: Optional. Version 2019-12-12 and - laster. Specifies the version of the deleted container to restore. - :type deleted_container_version: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "container" - comp = "undelete" - - # Construct URL - url = self.restore.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if deleted_container_name is not None: - header_parameters['x-ms-deleted-container-name'] = self._serialize.header("deleted_container_name", deleted_container_name, 'str') - if deleted_container_version is not None: - header_parameters['x-ms-deleted-container-version'] = self._serialize.header("deleted_container_version", deleted_container_version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - restore.metadata = {'url': '/{containerName}'} - - async def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or - negative one (-1) for a lease that never expires. A non-infinite lease - can be between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The Blob service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "acquire" - - # Construct URL - url = self.acquire_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - acquire_lease.metadata = {'url': '/{containerName}'} - - async def release_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "release" - - # Construct URL - url = self.release_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - release_lease.metadata = {'url': '/{containerName}'} - - async def renew_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "renew" - - # Construct URL - url = self.renew_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - renew_lease.metadata = {'url': '/{containerName}'} - - async def break_lease(self, timeout=None, break_period=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param break_period: For a break operation, proposed duration the - lease should continue before it is broken, in seconds, between 0 and - 60. This break period is only used if it is shorter than the time - remaining on the lease. If longer, the time remaining on the lease is - used. A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break period. - If this header does not appear with a break operation, a - fixed-duration lease breaks after the remaining lease period elapses, - and an infinite lease breaks immediately. - :type break_period: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "break" - - # Construct URL - url = self.break_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - break_lease.metadata = {'url': '/{containerName}'} - - async def change_lease(self, lease_id, proposed_lease_id, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The Blob service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "change" - - # Construct URL - url = self.change_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - change_lease.metadata = {'url': '/{containerName}'} - - async def list_blob_flat_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, *, cls=None, **kwargs): - """[Update] The List Blobs operation returns a list of the blobs under the - specified container. - - :param prefix: Filters the results to return only containers whose - name begins with the specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list - of containers to be returned with the next listing operation. The - operation returns the NextMarker value within the response body if the - listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value - for the marker parameter in a subsequent call to request the next page - of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to - return. If the request does not specify maxresults, or specifies a - value greater than 5000, the server will return up to 5000 items. Note - that if the listing operation crosses a partition boundary, then the - service will return a continuation token for retrieving the remainder - of the results. For this reason, it is possible that the service will - return fewer results than specified by maxresults, or than the default - of 5000. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets - to include in the response. - :type include: list[str or - ~azure.storage.blob.models.ListBlobsIncludeItem] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListBlobsFlatSegmentResponse or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "container" - comp = "list" - - # Construct URL - url = self.list_blob_flat_segment.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[ListBlobsIncludeItem]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListBlobsFlatSegmentResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_blob_flat_segment.metadata = {'url': '/{containerName}'} - - async def list_blob_hierarchy_segment(self, delimiter, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, *, cls=None, **kwargs): - """[Update] The List Blobs operation returns a list of the blobs under the - specified container. - - :param delimiter: When the request includes this parameter, the - operation returns a BlobPrefix element in the response body that acts - as a placeholder for all blobs whose names begin with the same - substring up to the appearance of the delimiter character. The - delimiter may be a single character or a string. - :type delimiter: str - :param prefix: Filters the results to return only containers whose - name begins with the specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list - of containers to be returned with the next listing operation. The - operation returns the NextMarker value within the response body if the - listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value - for the marker parameter in a subsequent call to request the next page - of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to - return. If the request does not specify maxresults, or specifies a - value greater than 5000, the server will return up to 5000 items. Note - that if the listing operation crosses a partition boundary, then the - service will return a continuation token for retrieving the remainder - of the results. For this reason, it is possible that the service will - return fewer results than specified by maxresults, or than the default - of 5000. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets - to include in the response. - :type include: list[str or - ~azure.storage.blob.models.ListBlobsIncludeItem] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListBlobsHierarchySegmentResponse or the result of - cls(response) - :rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "container" - comp = "list" - - # Construct URL - url = self.list_blob_hierarchy_segment.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[ListBlobsIncludeItem]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_blob_hierarchy_segment.metadata = {'url': '/{containerName}'} - - async def get_account_info(self, *, cls=None, **kwargs): - """Returns the sku name and account kind . - - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "account" - comp = "properties" - - # Construct URL - url = self.get_account_info.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')), - 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_account_info.metadata = {'url': '/{containerName}'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_directory_operations_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_directory_operations_async.py deleted file mode 100644 index 590c0f8..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_directory_operations_async.py +++ /dev/null @@ -1,739 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class DirectoryOperations: - """DirectoryOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar resource: . Constant value: "directory". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.resource = "directory" - - async def create(self, timeout=None, directory_properties=None, posix_permissions=None, posix_umask=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Create a directory. By default, the destination is overwritten and if - the destination already exists and has a lease the lease is broken. - This operation supports conditional HTTP requests. For more - information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - To fail if the destination already exists, use a conditional request - with If-None-Match: "*". - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param directory_properties: Optional. User-defined properties to be - stored with the file or directory, in the format of a comma-separated - list of name and value pairs "n1=v1, n2=v2, ...", where each value is - base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled - for the account. This umask restricts permission settings for file and - directory, and will only be applied when default Acl does not exist in - parent directory. If the umask bit has set, it means that the - corresponding permission will be disabled. Otherwise the corresponding - permission will be determined by the permission. A 4-digit octal - notation (e.g. 0022) is supported here. If no umask was specified, a - default umask - 0027 will be used. - :type posix_umask: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param directory_http_headers: Additional parameters for the operation - :type directory_http_headers: - ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - cache_control = None - if directory_http_headers is not None: - cache_control = directory_http_headers.cache_control - content_type = None - if directory_http_headers is not None: - content_type = directory_http_headers.content_type - content_encoding = None - if directory_http_headers is not None: - content_encoding = directory_http_headers.content_encoding - content_language = None - if directory_http_headers is not None: - content_language = directory_http_headers.content_language - content_disposition = None - if directory_http_headers is not None: - content_disposition = directory_http_headers.content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['resource'] = self._serialize.query("self.resource", self.resource, 'str') - - # Construct headers - header_parameters = {} - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') - if content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') - if content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') - if content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') - if content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{filesystem}/{path}'} - - async def rename(self, rename_source, timeout=None, marker=None, path_rename_mode=None, directory_properties=None, posix_permissions=None, posix_umask=None, source_lease_id=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs): - """Rename a directory. By default, the destination is overwritten and if - the destination already exists and has a lease the lease is broken. - This operation supports conditional HTTP requests. For more - information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - To fail if the destination already exists, use a conditional request - with If-None-Match: "*". - - :param rename_source: The file or directory to be renamed. The value - must have the following format: "/{filesysystem}/{path}". If - "x-ms-properties" is specified, the properties will overwrite the - existing properties; otherwise, the existing properties will be - preserved. - :type rename_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param marker: When renaming a directory, the number of paths that are - renamed with each invocation is limited. If the number of paths to be - renamed exceeds this limit, a continuation token is returned in this - response header. When a continuation token is returned in the - response, it must be specified in a subsequent invocation of the - rename operation to continue renaming the directory. - :type marker: str - :param path_rename_mode: Determines the behavior of the rename - operation. Possible values include: 'legacy', 'posix' - :type path_rename_mode: str or - ~azure.storage.blob.models.PathRenameMode - :param directory_properties: Optional. User-defined properties to be - stored with the file or directory, in the format of a comma-separated - list of name and value pairs "n1=v1, n2=v2, ...", where each value is - base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled - for the account. This umask restricts permission settings for file and - directory, and will only be applied when default Acl does not exist in - parent directory. If the umask bit has set, it means that the - corresponding permission will be disabled. Otherwise the corresponding - permission will be determined by the permission. A 4-digit octal - notation (e.g. 0022) is supported here. If no umask was specified, a - default umask - 0027 will be used. - :type posix_umask: str - :param source_lease_id: A lease ID for the source path. If specified, - the source path must have an active lease and the lease ID must match. - :type source_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param directory_http_headers: Additional parameters for the operation - :type directory_http_headers: - ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - cache_control = None - if directory_http_headers is not None: - cache_control = directory_http_headers.cache_control - content_type = None - if directory_http_headers is not None: - content_type = directory_http_headers.content_type - content_encoding = None - if directory_http_headers is not None: - content_encoding = directory_http_headers.content_encoding - content_language = None - if directory_http_headers is not None: - content_language = directory_http_headers.content_language - content_disposition = None - if directory_http_headers is not None: - content_disposition = directory_http_headers.content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - # Construct URL - url = self.rename.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') - if path_rename_mode is not None: - query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'PathRenameMode') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') - if content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') - if content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') - if content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') - if content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - } - return cls(response, None, response_headers) - rename.metadata = {'url': '/{filesystem}/{path}'} - - async def delete(self, recursive_directory_delete, timeout=None, marker=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Deletes the directory. - - :param recursive_directory_delete: If "true", all paths beneath the - directory will be deleted. If "false" and the directory is non-empty, - an error occurs. - :type recursive_directory_delete: bool - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param marker: When renaming a directory, the number of paths that are - renamed with each invocation is limited. If the number of paths to be - renamed exceeds this limit, a continuation token is returned in this - response header. When a continuation token is returned in the - response, it must be specified in a subsequent invocation of the - rename operation to continue renaming the directory. - :type marker: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['recursive'] = self._serialize.query("recursive_directory_delete", recursive_directory_delete, 'bool') - if marker is not None: - query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{filesystem}/{path}'} - - async def set_access_control(self, timeout=None, owner=None, group=None, posix_permissions=None, posix_acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Set the owner, group, permissions, or access control list for a - directory. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_acl: Sets POSIX access control rights on files and - directories. The value is a comma-separated list of access control - entries. Each access control entry (ACE) consists of a scope, a type, - a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type posix_acl: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "setAccessControl" - - # Construct URL - url = self.set_access_control.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - } - return cls(response, None, response_headers) - set_access_control.metadata = {'url': '/{filesystem}/{path}'} - - async def get_access_control(self, timeout=None, upn=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Get the owner, group, permissions, or access control list for a - directory. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param upn: Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the identity values returned in - the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. - :type upn: bool - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "getAccessControl" - - # Construct URL - url = self.get_access_control.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')), - 'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')), - 'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')), - 'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - } - return cls(response, None, response_headers) - get_access_control.metadata = {'url': '/{filesystem}/{path}'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_page_blob_operations_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_page_blob_operations_async.py deleted file mode 100644 index c54a27c..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_page_blob_operations_async.py +++ /dev/null @@ -1,1399 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class PageBlobOperations: - """PageBlobOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "PageBlob". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_blob_type = "PageBlob" - - async def create(self, content_length, blob_content_length, timeout=None, tier=None, metadata=None, blob_sequence_number=0, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Create operation creates a new page blob. - - :param content_length: The length of the request. - :type content_length: long - :param blob_content_length: This header specifies the maximum size for - the page blob, up to 1 TB. The page blob size must be aligned to a - 512-byte boundary. - :type blob_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param tier: Optional. Indicates the tier to be set on the page blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80' - :type tier: str or - ~azure.storage.blob.models.PremiumPageBlobAccessTier - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param blob_sequence_number: Set for page blobs only. The sequence - number is a user-controlled value that you can use to track requests. - The value of the sequence number must be between 0 and 2^63 - 1. - :type blob_sequence_number: long - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_tags_string: Optional. Used to set blob tags in various - blob operations. - :type blob_tags_string: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') - if blob_sequence_number is not None: - header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{containerName}/{blob}'} - - async def upload_pages(self, body, content_length, transactional_content_md5=None, transactional_content_crc64=None, timeout=None, range=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, sequence_number_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Upload Pages operation writes a range of pages to a page blob. - - :param body: Initial data - :type body: Generator - :param content_length: The length of the request. - :type content_length: long - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param sequence_number_access_conditions: Additional parameters for - the operation - :type sequence_number_access_conditions: - ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_sequence_number_less_than_or_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - if_sequence_number_less_than = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - if_sequence_number_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "page" - page_write = "update" - - # Construct URL - url = self.upload_pages.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long') - if if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long') - if if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload_pages.metadata = {'url': '/{containerName}/{blob}'} - - async def clear_pages(self, content_length, timeout=None, range=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, sequence_number_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Clear Pages operation clears a set of pages from a page blob. - - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param sequence_number_access_conditions: Additional parameters for - the operation - :type sequence_number_access_conditions: - ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_sequence_number_less_than_or_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - if_sequence_number_less_than = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - if_sequence_number_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "page" - page_write = "clear" - - # Construct URL - url = self.clear_pages.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long') - if if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long') - if if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - clear_pages.metadata = {'url': '/{containerName}/{blob}'} - - async def upload_pages_from_url(self, source_url, source_range, content_length, range, source_content_md5=None, source_contentcrc64=None, timeout=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, sequence_number_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs): - """The Upload Pages operation writes a range of pages to a page blob where - the contents are read from a URL. - - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param source_range: Bytes of source data in the specified range. The - length of this range should match the ContentLength header and - x-ms-range/Range destination range header. - :type source_range: str - :param content_length: The length of the request. - :type content_length: long - :param range: The range of bytes to which the source range would be - written. The range should be 512 aligned and range-end is required. - :type range: str - :param source_content_md5: Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range - of bytes that must be read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param sequence_number_access_conditions: Additional parameters for - the operation - :type sequence_number_access_conditions: - ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_sequence_number_less_than_or_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - if_sequence_number_less_than = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - if_sequence_number_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - comp = "page" - page_write = "update" - - # Construct URL - url = self.upload_pages_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long') - if if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long') - if if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload_pages_from_url.metadata = {'url': '/{containerName}/{blob}'} - - async def get_page_ranges(self, snapshot=None, timeout=None, range=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Get Page Ranges operation returns the list of valid page ranges for - a page blob or snapshot of a page blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: PageList or the result of cls(response) - :rtype: ~azure.storage.blob.models.PageList - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "pagelist" - - # Construct URL - url = self.get_page_ranges.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PageList', response) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_page_ranges.metadata = {'url': '/{containerName}/{blob}'} - - async def get_page_ranges_diff(self, snapshot=None, timeout=None, prevsnapshot=None, prev_snapshot_url=None, range=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Get Page Ranges Diff operation returns the list of valid page - ranges for a page blob that were changed between target blob and - previous snapshot. - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param prevsnapshot: Optional in version 2015-07-08 and newer. The - prevsnapshot parameter is a DateTime value that specifies that the - response will contain only pages that were changed between target blob - and previous snapshot. Changed pages include both updated and cleared - pages. The target blob may be a snapshot, as long as the snapshot - specified by prevsnapshot is the older of the two. Note that - incremental snapshots are currently supported only for blobs created - on or after January 1, 2016. - :type prevsnapshot: str - :param prev_snapshot_url: Optional. This header is only supported in - service versions 2019-04-19 and after and specifies the URL of a - previous snapshot of the target blob. The response will only contain - pages that were changed between the target blob and its previous - snapshot. - :type prev_snapshot_url: str - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: PageList or the result of cls(response) - :rtype: ~azure.storage.blob.models.PageList - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "pagelist" - - # Construct URL - url = self.get_page_ranges_diff.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if prevsnapshot is not None: - query_parameters['prevsnapshot'] = self._serialize.query("prevsnapshot", prevsnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - if prev_snapshot_url is not None: - header_parameters['x-ms-previous-snapshot-url'] = self._serialize.header("prev_snapshot_url", prev_snapshot_url, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PageList', response) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_page_ranges_diff.metadata = {'url': '/{containerName}/{blob}'} - - async def resize(self, blob_content_length, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Resize the Blob. - - :param blob_content_length: This header specifies the maximum size for - the page blob, up to 1 TB. The page blob size must be aligned to a - 512-byte boundary. - :type blob_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "properties" - - # Construct URL - url = self.resize.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - resize.metadata = {'url': '/{containerName}/{blob}'} - - async def update_sequence_number(self, sequence_number_action, timeout=None, blob_sequence_number=0, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Update the sequence number of the blob. - - :param sequence_number_action: Required if the - x-ms-blob-sequence-number header is set for the request. This property - applies to page blobs only. This property indicates how the service - should modify the blob's sequence number. Possible values include: - 'max', 'update', 'increment' - :type sequence_number_action: str or - ~azure.storage.blob.models.SequenceNumberActionType - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param blob_sequence_number: Set for page blobs only. The sequence - number is a user-controlled value that you can use to track requests. - The value of the sequence number must be between 0 and 2^63 - 1. - :type blob_sequence_number: long - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "properties" - - # Construct URL - url = self.update_sequence_number.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-sequence-number-action'] = self._serialize.header("sequence_number_action", sequence_number_action, 'SequenceNumberActionType') - if blob_sequence_number is not None: - header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - update_sequence_number.metadata = {'url': '/{containerName}/{blob}'} - - async def copy_incremental(self, copy_source, timeout=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): - """The Copy Incremental operation copies a snapshot of the source page - blob to a destination page blob. The snapshot is copied such that only - the differential changes between the previously copied snapshot are - transferred to the destination. The copied snapshots are complete - copies of the original snapshot and can be read or copied from as - usual. This API is supported since REST version 2016-05-31. - - :param copy_source: Specifies the name of the source page blob - snapshot. This value is a URL of up to 2 KB in length that specifies a - page blob snapshot. The value should be URL-encoded as it would appear - in a request URI. The source blob must either be public or must be - authenticated via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "incrementalcopy" - - # Construct URL - url = self.copy_incremental.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - copy_incremental.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_service_operations_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_service_operations_async.py deleted file mode 100644 index e12c2b9..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/operations_async/_service_operations_async.py +++ /dev/null @@ -1,664 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class ServiceOperations: - """ServiceOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - - async def set_properties(self, storage_service_properties, timeout=None, request_id=None, *, cls=None, **kwargs): - """Sets properties for a storage account's Blob service endpoint, - including properties for Storage Analytics and CORS (Cross-Origin - Resource Sharing) rules. - - :param storage_service_properties: The StorageService properties. - :type storage_service_properties: - ~azure.storage.blob.models.StorageServiceProperties - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "service" - comp = "properties" - - # Construct URL - url = self.set_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct body - body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_properties.metadata = {'url': '/'} - - async def get_properties(self, timeout=None, request_id=None, *, cls=None, **kwargs): - """gets the properties of a storage account's Blob service, including - properties for Storage Analytics and CORS (Cross-Origin Resource - Sharing) rules. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: StorageServiceProperties or the result of cls(response) - :rtype: ~azure.storage.blob.models.StorageServiceProperties - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "service" - comp = "properties" - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('StorageServiceProperties', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_properties.metadata = {'url': '/'} - - async def get_statistics(self, timeout=None, request_id=None, *, cls=None, **kwargs): - """Retrieves statistics related to replication for the Blob service. It is - only available on the secondary location endpoint when read-access - geo-redundant replication is enabled for the storage account. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: StorageServiceStats or the result of cls(response) - :rtype: ~azure.storage.blob.models.StorageServiceStats - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "service" - comp = "stats" - - # Construct URL - url = self.get_statistics.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('StorageServiceStats', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_statistics.metadata = {'url': '/'} - - async def list_containers_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, *, cls=None, **kwargs): - """The List Containers Segment operation returns a list of the containers - under the specified account. - - :param prefix: Filters the results to return only containers whose - name begins with the specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list - of containers to be returned with the next listing operation. The - operation returns the NextMarker value within the response body if the - listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value - for the marker parameter in a subsequent call to request the next page - of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to - return. If the request does not specify maxresults, or specifies a - value greater than 5000, the server will return up to 5000 items. Note - that if the listing operation crosses a partition boundary, then the - service will return a continuation token for retrieving the remainder - of the results. For this reason, it is possible that the service will - return fewer results than specified by maxresults, or than the default - of 5000. - :type maxresults: int - :param include: Include this parameter to specify that the container's - metadata be returned as part of the response body. - :type include: list[str or - ~azure.storage.blob.models.ListContainersIncludeType] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListContainersSegmentResponse or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListContainersSegmentResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "list" - - # Construct URL - url = self.list_containers_segment.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[ListContainersIncludeType]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListContainersSegmentResponse', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_containers_segment.metadata = {'url': '/'} - - async def get_user_delegation_key(self, key_info, timeout=None, request_id=None, *, cls=None, **kwargs): - """Retrieves a user delegation key for the Blob service. This is only a - valid operation when using bearer token authentication. - - :param key_info: - :type key_info: ~azure.storage.blob.models.KeyInfo - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: UserDelegationKey or the result of cls(response) - :rtype: ~azure.storage.blob.models.UserDelegationKey - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "service" - comp = "userdelegationkey" - - # Construct URL - url = self.get_user_delegation_key.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct body - body_content = self._serialize.body(key_info, 'KeyInfo') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('UserDelegationKey', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_user_delegation_key.metadata = {'url': '/'} - - async def get_account_info(self, *, cls=None, **kwargs): - """Returns the sku name and account kind . - - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "account" - comp = "properties" - - # Construct URL - url = self.get_account_info.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')), - 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_account_info.metadata = {'url': '/'} - - async def submit_batch(self, body, content_length, multipart_content_type, timeout=None, request_id=None, *, cls=None, **kwargs): - """The Batch operation allows multiple API calls to be embedded into a - single HTTP request. - - :param body: Initial data - :type body: Generator - :param content_length: The length of the request. - :type content_length: long - :param multipart_content_type: Required. The value of this header must - be multipart/mixed with a batch boundary. Example header value: - multipart/mixed; boundary=batch_ - :type multipart_content_type: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: object or the result of cls(response) - :rtype: Generator - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "batch" - - # Construct URL - url = self.submit_batch.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct body - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - await response.load_body() - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - submit_batch.metadata = {'url': '/'} - - async def filter_blobs(self, timeout=None, request_id=None, where=None, marker=None, maxresults=None, *, cls=None, **kwargs): - """The Filter Blobs operation enables callers to list blobs across all - containers whose tags match a given search expression. Filter blobs - searches across all containers within a storage account but can be - scoped within the expression to a single container. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param where: Filters the results to return only to return only blobs - whose tags match the specified expression. - :type where: str - :param marker: A string value that identifies the portion of the list - of containers to be returned with the next listing operation. The - operation returns the NextMarker value within the response body if the - listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value - for the marker parameter in a subsequent call to request the next page - of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to - return. If the request does not specify maxresults, or specifies a - value greater than 5000, the server will return up to 5000 items. Note - that if the listing operation crosses a partition boundary, then the - service will return a continuation token for retrieving the remainder - of the results. For this reason, it is possible that the service will - return fewer results than specified by maxresults, or than the default - of 5000. - :type maxresults: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: FilterBlobSegment or the result of cls(response) - :rtype: ~azure.storage.blob.models.FilterBlobSegment - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "blobs" - - # Construct URL - url = self.filter_blobs.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if where is not None: - query_parameters['where'] = self._serialize.query("where", where, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('FilterBlobSegment', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - filter_blobs.metadata = {'url': '/'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/models/__init__.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/models/__init__.py deleted file mode 100644 index 3a6f8ed..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/models/__init__.py +++ /dev/null @@ -1,229 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -try: - from ._models_py3 import AccessPolicy - from ._models_py3 import AppendPositionAccessConditions - from ._models_py3 import ArrowConfiguration - from ._models_py3 import ArrowField - from ._models_py3 import BlobFlatListSegment - from ._models_py3 import BlobHierarchyListSegment - from ._models_py3 import BlobHTTPHeaders - from ._models_py3 import BlobItemInternal - from ._models_py3 import BlobMetadata - from ._models_py3 import BlobPrefix - from ._models_py3 import BlobPropertiesInternal - from ._models_py3 import BlobTag - from ._models_py3 import BlobTags - from ._models_py3 import Block - from ._models_py3 import BlockList - from ._models_py3 import BlockLookupList - from ._models_py3 import ClearRange - from ._models_py3 import ContainerCpkScopeInfo - from ._models_py3 import ContainerItem - from ._models_py3 import ContainerProperties - from ._models_py3 import CorsRule - from ._models_py3 import CpkInfo - from ._models_py3 import CpkScopeInfo - from ._models_py3 import DataLakeStorageError, DataLakeStorageErrorException - from ._models_py3 import DataLakeStorageErrorError - from ._models_py3 import DelimitedTextConfiguration - from ._models_py3 import DirectoryHttpHeaders - from ._models_py3 import FilterBlobItem - from ._models_py3 import FilterBlobSegment - from ._models_py3 import GeoReplication - from ._models_py3 import JsonTextConfiguration - from ._models_py3 import KeyInfo - from ._models_py3 import LeaseAccessConditions - from ._models_py3 import ListBlobsFlatSegmentResponse - from ._models_py3 import ListBlobsHierarchySegmentResponse - from ._models_py3 import ListContainersSegmentResponse - from ._models_py3 import Logging - from ._models_py3 import Metrics - from ._models_py3 import ModifiedAccessConditions - from ._models_py3 import PageList - from ._models_py3 import PageRange - from ._models_py3 import QueryFormat - from ._models_py3 import QueryRequest - from ._models_py3 import QuerySerialization - from ._models_py3 import RetentionPolicy - from ._models_py3 import SequenceNumberAccessConditions - from ._models_py3 import SignedIdentifier - from ._models_py3 import SourceModifiedAccessConditions - from ._models_py3 import StaticWebsite - from ._models_py3 import StorageError, StorageErrorException - from ._models_py3 import StorageServiceProperties - from ._models_py3 import StorageServiceStats - from ._models_py3 import UserDelegationKey -except (SyntaxError, ImportError): - from ._models import AccessPolicy - from ._models import AppendPositionAccessConditions - from ._models import ArrowConfiguration - from ._models import ArrowField - from ._models import BlobFlatListSegment - from ._models import BlobHierarchyListSegment - from ._models import BlobHTTPHeaders - from ._models import BlobItemInternal - from ._models import BlobMetadata - from ._models import BlobPrefix - from ._models import BlobPropertiesInternal - from ._models import BlobTag - from ._models import BlobTags - from ._models import Block - from ._models import BlockList - from ._models import BlockLookupList - from ._models import ClearRange - from ._models import ContainerCpkScopeInfo - from ._models import ContainerItem - from ._models import ContainerProperties - from ._models import CorsRule - from ._models import CpkInfo - from ._models import CpkScopeInfo - from ._models import DataLakeStorageError, DataLakeStorageErrorException - from ._models import DataLakeStorageErrorError - from ._models import DelimitedTextConfiguration - from ._models import DirectoryHttpHeaders - from ._models import FilterBlobItem - from ._models import FilterBlobSegment - from ._models import GeoReplication - from ._models import JsonTextConfiguration - from ._models import KeyInfo - from ._models import LeaseAccessConditions - from ._models import ListBlobsFlatSegmentResponse - from ._models import ListBlobsHierarchySegmentResponse - from ._models import ListContainersSegmentResponse - from ._models import Logging - from ._models import Metrics - from ._models import ModifiedAccessConditions - from ._models import PageList - from ._models import PageRange - from ._models import QueryFormat - from ._models import QueryRequest - from ._models import QuerySerialization - from ._models import RetentionPolicy - from ._models import SequenceNumberAccessConditions - from ._models import SignedIdentifier - from ._models import SourceModifiedAccessConditions - from ._models import StaticWebsite - from ._models import StorageError, StorageErrorException - from ._models import StorageServiceProperties - from ._models import StorageServiceStats - from ._models import UserDelegationKey -from ._azure_blob_storage_enums import ( - AccessTier, - AccessTierOptional, - AccessTierRequired, - AccountKind, - ArchiveStatus, - BlobExpiryOptions, - BlobType, - BlockListType, - CopyStatusType, - DeleteSnapshotsOptionType, - EncryptionAlgorithmType, - GeoReplicationStatusType, - LeaseDurationType, - LeaseStateType, - LeaseStatusType, - ListBlobsIncludeItem, - ListContainersIncludeType, - PathRenameMode, - PremiumPageBlobAccessTier, - PublicAccessType, - QueryFormatType, - RehydratePriority, - SequenceNumberActionType, - SkuName, - StorageErrorCode, - SyncCopyStatusType, -) - -__all__ = [ - 'AccessPolicy', - 'AppendPositionAccessConditions', - 'ArrowConfiguration', - 'ArrowField', - 'BlobFlatListSegment', - 'BlobHierarchyListSegment', - 'BlobHTTPHeaders', - 'BlobItemInternal', - 'BlobMetadata', - 'BlobPrefix', - 'BlobPropertiesInternal', - 'BlobTag', - 'BlobTags', - 'Block', - 'BlockList', - 'BlockLookupList', - 'ClearRange', - 'ContainerCpkScopeInfo', - 'ContainerItem', - 'ContainerProperties', - 'CorsRule', - 'CpkInfo', - 'CpkScopeInfo', - 'DataLakeStorageError', 'DataLakeStorageErrorException', - 'DataLakeStorageErrorError', - 'DelimitedTextConfiguration', - 'DirectoryHttpHeaders', - 'FilterBlobItem', - 'FilterBlobSegment', - 'GeoReplication', - 'JsonTextConfiguration', - 'KeyInfo', - 'LeaseAccessConditions', - 'ListBlobsFlatSegmentResponse', - 'ListBlobsHierarchySegmentResponse', - 'ListContainersSegmentResponse', - 'Logging', - 'Metrics', - 'ModifiedAccessConditions', - 'PageList', - 'PageRange', - 'QueryFormat', - 'QueryRequest', - 'QuerySerialization', - 'RetentionPolicy', - 'SequenceNumberAccessConditions', - 'SignedIdentifier', - 'SourceModifiedAccessConditions', - 'StaticWebsite', - 'StorageError', 'StorageErrorException', - 'StorageServiceProperties', - 'StorageServiceStats', - 'UserDelegationKey', - 'PublicAccessType', - 'CopyStatusType', - 'LeaseDurationType', - 'LeaseStateType', - 'LeaseStatusType', - 'AccessTier', - 'ArchiveStatus', - 'BlobType', - 'RehydratePriority', - 'StorageErrorCode', - 'GeoReplicationStatusType', - 'QueryFormatType', - 'AccessTierRequired', - 'AccessTierOptional', - 'PremiumPageBlobAccessTier', - 'BlobExpiryOptions', - 'BlockListType', - 'DeleteSnapshotsOptionType', - 'EncryptionAlgorithmType', - 'ListBlobsIncludeItem', - 'ListContainersIncludeType', - 'PathRenameMode', - 'SequenceNumberActionType', - 'SkuName', - 'AccountKind', - 'SyncCopyStatusType', -] diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/models/_azure_blob_storage_enums.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/models/_azure_blob_storage_enums.py deleted file mode 100644 index e45eea3..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/models/_azure_blob_storage_enums.py +++ /dev/null @@ -1,343 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from enum import Enum - - -class PublicAccessType(str, Enum): - - container = "container" - blob = "blob" - - -class CopyStatusType(str, Enum): - - pending = "pending" - success = "success" - aborted = "aborted" - failed = "failed" - - -class LeaseDurationType(str, Enum): - - infinite = "infinite" - fixed = "fixed" - - -class LeaseStateType(str, Enum): - - available = "available" - leased = "leased" - expired = "expired" - breaking = "breaking" - broken = "broken" - - -class LeaseStatusType(str, Enum): - - locked = "locked" - unlocked = "unlocked" - - -class AccessTier(str, Enum): - - p4 = "P4" - p6 = "P6" - p10 = "P10" - p15 = "P15" - p20 = "P20" - p30 = "P30" - p40 = "P40" - p50 = "P50" - p60 = "P60" - p70 = "P70" - p80 = "P80" - hot = "Hot" - cool = "Cool" - archive = "Archive" - - -class ArchiveStatus(str, Enum): - - rehydrate_pending_to_hot = "rehydrate-pending-to-hot" - rehydrate_pending_to_cool = "rehydrate-pending-to-cool" - - -class BlobType(str, Enum): - - block_blob = "BlockBlob" - page_blob = "PageBlob" - append_blob = "AppendBlob" - - -class RehydratePriority(str, Enum): - - high = "High" - standard = "Standard" - - -class StorageErrorCode(str, Enum): - - account_already_exists = "AccountAlreadyExists" - account_being_created = "AccountBeingCreated" - account_is_disabled = "AccountIsDisabled" - authentication_failed = "AuthenticationFailed" - authorization_failure = "AuthorizationFailure" - condition_headers_not_supported = "ConditionHeadersNotSupported" - condition_not_met = "ConditionNotMet" - empty_metadata_key = "EmptyMetadataKey" - insufficient_account_permissions = "InsufficientAccountPermissions" - internal_error = "InternalError" - invalid_authentication_info = "InvalidAuthenticationInfo" - invalid_header_value = "InvalidHeaderValue" - invalid_http_verb = "InvalidHttpVerb" - invalid_input = "InvalidInput" - invalid_md5 = "InvalidMd5" - invalid_metadata = "InvalidMetadata" - invalid_query_parameter_value = "InvalidQueryParameterValue" - invalid_range = "InvalidRange" - invalid_resource_name = "InvalidResourceName" - invalid_uri = "InvalidUri" - invalid_xml_document = "InvalidXmlDocument" - invalid_xml_node_value = "InvalidXmlNodeValue" - md5_mismatch = "Md5Mismatch" - metadata_too_large = "MetadataTooLarge" - missing_content_length_header = "MissingContentLengthHeader" - missing_required_query_parameter = "MissingRequiredQueryParameter" - missing_required_header = "MissingRequiredHeader" - missing_required_xml_node = "MissingRequiredXmlNode" - multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" - operation_timed_out = "OperationTimedOut" - out_of_range_input = "OutOfRangeInput" - out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" - request_body_too_large = "RequestBodyTooLarge" - resource_type_mismatch = "ResourceTypeMismatch" - request_url_failed_to_parse = "RequestUrlFailedToParse" - resource_already_exists = "ResourceAlreadyExists" - resource_not_found = "ResourceNotFound" - server_busy = "ServerBusy" - unsupported_header = "UnsupportedHeader" - unsupported_xml_node = "UnsupportedXmlNode" - unsupported_query_parameter = "UnsupportedQueryParameter" - unsupported_http_verb = "UnsupportedHttpVerb" - append_position_condition_not_met = "AppendPositionConditionNotMet" - blob_already_exists = "BlobAlreadyExists" - blob_not_found = "BlobNotFound" - blob_overwritten = "BlobOverwritten" - blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" - block_count_exceeds_limit = "BlockCountExceedsLimit" - block_list_too_long = "BlockListTooLong" - cannot_change_to_lower_tier = "CannotChangeToLowerTier" - cannot_verify_copy_source = "CannotVerifyCopySource" - container_already_exists = "ContainerAlreadyExists" - container_being_deleted = "ContainerBeingDeleted" - container_disabled = "ContainerDisabled" - container_not_found = "ContainerNotFound" - content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" - copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" - copy_id_mismatch = "CopyIdMismatch" - feature_version_mismatch = "FeatureVersionMismatch" - incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" - incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" - incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" - infinite_lease_duration_required = "InfiniteLeaseDurationRequired" - invalid_blob_or_block = "InvalidBlobOrBlock" - invalid_blob_tier = "InvalidBlobTier" - invalid_blob_type = "InvalidBlobType" - invalid_block_id = "InvalidBlockId" - invalid_block_list = "InvalidBlockList" - invalid_operation = "InvalidOperation" - invalid_page_range = "InvalidPageRange" - invalid_source_blob_type = "InvalidSourceBlobType" - invalid_source_blob_url = "InvalidSourceBlobUrl" - invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" - lease_already_present = "LeaseAlreadyPresent" - lease_already_broken = "LeaseAlreadyBroken" - lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" - lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" - lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" - lease_id_missing = "LeaseIdMissing" - lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" - lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" - lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" - lease_lost = "LeaseLost" - lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" - lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" - lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" - max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" - no_authentication_information = "NoAuthenticationInformation" - no_pending_copy_operation = "NoPendingCopyOperation" - operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" - pending_copy_operation = "PendingCopyOperation" - previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" - previous_snapshot_not_found = "PreviousSnapshotNotFound" - previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" - sequence_number_condition_not_met = "SequenceNumberConditionNotMet" - sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" - snapshot_count_exceeded = "SnapshotCountExceeded" - snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" - snapshots_present = "SnapshotsPresent" - source_condition_not_met = "SourceConditionNotMet" - system_in_use = "SystemInUse" - target_condition_not_met = "TargetConditionNotMet" - unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" - blob_being_rehydrated = "BlobBeingRehydrated" - blob_archived = "BlobArchived" - blob_not_archived = "BlobNotArchived" - authorization_source_ip_mismatch = "AuthorizationSourceIPMismatch" - authorization_protocol_mismatch = "AuthorizationProtocolMismatch" - authorization_permission_mismatch = "AuthorizationPermissionMismatch" - authorization_service_mismatch = "AuthorizationServiceMismatch" - authorization_resource_type_mismatch = "AuthorizationResourceTypeMismatch" - - -class GeoReplicationStatusType(str, Enum): - - live = "live" - bootstrap = "bootstrap" - unavailable = "unavailable" - - -class QueryFormatType(str, Enum): - - delimited = "delimited" - json = "json" - arrow = "arrow" - - -class AccessTierRequired(str, Enum): - - p4 = "P4" - p6 = "P6" - p10 = "P10" - p15 = "P15" - p20 = "P20" - p30 = "P30" - p40 = "P40" - p50 = "P50" - p60 = "P60" - p70 = "P70" - p80 = "P80" - hot = "Hot" - cool = "Cool" - archive = "Archive" - - -class AccessTierOptional(str, Enum): - - p4 = "P4" - p6 = "P6" - p10 = "P10" - p15 = "P15" - p20 = "P20" - p30 = "P30" - p40 = "P40" - p50 = "P50" - p60 = "P60" - p70 = "P70" - p80 = "P80" - hot = "Hot" - cool = "Cool" - archive = "Archive" - - -class PremiumPageBlobAccessTier(str, Enum): - - p4 = "P4" - p6 = "P6" - p10 = "P10" - p15 = "P15" - p20 = "P20" - p30 = "P30" - p40 = "P40" - p50 = "P50" - p60 = "P60" - p70 = "P70" - p80 = "P80" - - -class BlobExpiryOptions(str, Enum): - - never_expire = "NeverExpire" - relative_to_creation = "RelativeToCreation" - relative_to_now = "RelativeToNow" - absolute = "Absolute" - - -class BlockListType(str, Enum): - - committed = "committed" - uncommitted = "uncommitted" - all = "all" - - -class DeleteSnapshotsOptionType(str, Enum): - - include = "include" - only = "only" - - -class EncryptionAlgorithmType(str, Enum): - - aes256 = "AES256" - - -class ListBlobsIncludeItem(str, Enum): - - copy = "copy" - deleted = "deleted" - metadata = "metadata" - snapshots = "snapshots" - uncommittedblobs = "uncommittedblobs" - versions = "versions" - tags = "tags" - - -class ListContainersIncludeType(str, Enum): - - metadata = "metadata" - deleted = "deleted" - - -class PathRenameMode(str, Enum): - - legacy = "legacy" - posix = "posix" - - -class SequenceNumberActionType(str, Enum): - - max = "max" - update = "update" - increment = "increment" - - -class SkuName(str, Enum): - - standard_lrs = "Standard_LRS" - standard_grs = "Standard_GRS" - standard_ragrs = "Standard_RAGRS" - standard_zrs = "Standard_ZRS" - premium_lrs = "Premium_LRS" - - -class AccountKind(str, Enum): - - storage = "Storage" - blob_storage = "BlobStorage" - storage_v2 = "StorageV2" - file_storage = "FileStorage" - block_blob_storage = "BlockBlobStorage" - - -class SyncCopyStatusType(str, Enum): - - success = "success" diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/models/_models.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/models/_models.py deleted file mode 100644 index 1fdddbe..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/models/_models.py +++ /dev/null @@ -1,2009 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model -from azure.core.exceptions import HttpResponseError - - -class AccessPolicy(Model): - """An Access policy. - - :param start: the date-time the policy is active - :type start: str - :param expiry: the date-time the policy expires - :type expiry: str - :param permission: the permissions for the acl policy - :type permission: str - """ - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, - 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, - 'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(AccessPolicy, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.expiry = kwargs.get('expiry', None) - self.permission = kwargs.get('permission', None) - - -class AppendPositionAccessConditions(Model): - """Additional parameters for a set of operations, such as: - AppendBlob_append_block, AppendBlob_append_block_from_url, AppendBlob_seal. - - :param max_size: Optional conditional header. The max length in bytes - permitted for the append blob. If the Append Block operation would cause - the blob to exceed that limit or if the blob size is already greater than - the value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition - Failed). - :type max_size: long - :param append_position: Optional conditional header, used only for the - Append Block operation. A number indicating the byte offset to compare. - Append Block will succeed only if the append position is equal to this - number. If it is not, the request will fail with the - AppendPositionConditionNotMet error (HTTP status code 412 - Precondition - Failed). - :type append_position: long - """ - - _attribute_map = { - 'max_size': {'key': '', 'type': 'long', 'xml': {'name': 'max_size'}}, - 'append_position': {'key': '', 'type': 'long', 'xml': {'name': 'append_position'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(AppendPositionAccessConditions, self).__init__(**kwargs) - self.max_size = kwargs.get('max_size', None) - self.append_position = kwargs.get('append_position', None) - - -class ArrowConfiguration(Model): - """arrow configuration. - - All required parameters must be populated in order to send to Azure. - - :param schema: Required. - :type schema: list[~azure.storage.blob.models.ArrowField] - """ - - _validation = { - 'schema': {'required': True}, - } - - _attribute_map = { - 'schema': {'key': 'Schema', 'type': '[ArrowField]', 'xml': {'name': 'Schema', 'itemsName': 'Schema', 'wrapped': True}}, - } - _xml_map = { - 'name': 'ArrowConfiguration' - } - - def __init__(self, **kwargs): - super(ArrowConfiguration, self).__init__(**kwargs) - self.schema = kwargs.get('schema', None) - - -class ArrowField(Model): - """field of an arrow schema. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. - :type type: str - :param name: - :type name: str - :param precision: - :type precision: int - :param scale: - :type scale: int - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': 'Type', 'type': 'str', 'xml': {'name': 'Type'}}, - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'precision': {'key': 'Precision', 'type': 'int', 'xml': {'name': 'Precision'}}, - 'scale': {'key': 'Scale', 'type': 'int', 'xml': {'name': 'Scale'}}, - } - _xml_map = { - 'name': 'Field' - } - - def __init__(self, **kwargs): - super(ArrowField, self).__init__(**kwargs) - self.type = kwargs.get('type', None) - self.name = kwargs.get('name', None) - self.precision = kwargs.get('precision', None) - self.scale = kwargs.get('scale', None) - - -class BlobFlatListSegment(Model): - """BlobFlatListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'BlobItems', 'itemsName': 'Blob'}}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__(self, **kwargs): - super(BlobFlatListSegment, self).__init__(**kwargs) - self.blob_items = kwargs.get('blob_items', None) - - -class BlobHierarchyListSegment(Model): - """BlobHierarchyListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_prefixes: - :type blob_prefixes: list[~azure.storage.blob.models.BlobPrefix] - :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]', 'xml': {'name': 'BlobPrefix', 'itemsName': 'BlobPrefix'}}, - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__(self, **kwargs): - super(BlobHierarchyListSegment, self).__init__(**kwargs) - self.blob_prefixes = kwargs.get('blob_prefixes', None) - self.blob_items = kwargs.get('blob_items', None) - - -class BlobHTTPHeaders(Model): - """Additional parameters for a set of operations. - - :param blob_cache_control: Optional. Sets the blob's cache control. If - specified, this property is stored with the blob and returned with a read - request. - :type blob_cache_control: str - :param blob_content_type: Optional. Sets the blob's content type. If - specified, this property is stored with the blob and returned with a read - request. - :type blob_content_type: str - :param blob_content_md5: Optional. An MD5 hash of the blob content. Note - that this hash is not validated, as the hashes for the individual blocks - were validated when each was uploaded. - :type blob_content_md5: bytearray - :param blob_content_encoding: Optional. Sets the blob's content encoding. - If specified, this property is stored with the blob and returned with a - read request. - :type blob_content_encoding: str - :param blob_content_language: Optional. Set the blob's content language. - If specified, this property is stored with the blob and returned with a - read request. - :type blob_content_language: str - :param blob_content_disposition: Optional. Sets the blob's - Content-Disposition header. - :type blob_content_disposition: str - """ - - _attribute_map = { - 'blob_cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'blob_cache_control'}}, - 'blob_content_type': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_type'}}, - 'blob_content_md5': {'key': '', 'type': 'bytearray', 'xml': {'name': 'blob_content_md5'}}, - 'blob_content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_encoding'}}, - 'blob_content_language': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_language'}}, - 'blob_content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_disposition'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(BlobHTTPHeaders, self).__init__(**kwargs) - self.blob_cache_control = kwargs.get('blob_cache_control', None) - self.blob_content_type = kwargs.get('blob_content_type', None) - self.blob_content_md5 = kwargs.get('blob_content_md5', None) - self.blob_content_encoding = kwargs.get('blob_content_encoding', None) - self.blob_content_language = kwargs.get('blob_content_language', None) - self.blob_content_disposition = kwargs.get('blob_content_disposition', None) - - -class BlobItemInternal(Model): - """An Azure Storage blob. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param deleted: Required. - :type deleted: bool - :param snapshot: Required. - :type snapshot: str - :param version_id: - :type version_id: str - :param is_current_version: - :type is_current_version: bool - :param properties: Required. - :type properties: ~azure.storage.blob.models.BlobPropertiesInternal - :param metadata: - :type metadata: ~azure.storage.blob.models.BlobMetadata - :param blob_tags: - :type blob_tags: ~azure.storage.blob.models.BlobTags - :param object_replication_metadata: - :type object_replication_metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'deleted': {'required': True}, - 'snapshot': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}}, - 'snapshot': {'key': 'Snapshot', 'type': 'str', 'xml': {'name': 'Snapshot'}}, - 'version_id': {'key': 'VersionId', 'type': 'str', 'xml': {'name': 'VersionId'}}, - 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool', 'xml': {'name': 'IsCurrentVersion'}}, - 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal', 'xml': {'name': 'Properties'}}, - 'metadata': {'key': 'Metadata', 'type': 'BlobMetadata', 'xml': {'name': 'Metadata'}}, - 'blob_tags': {'key': 'BlobTags', 'type': 'BlobTags', 'xml': {'name': 'BlobTags'}}, - 'object_replication_metadata': {'key': 'OrMetadata', 'type': '{str}', 'xml': {'name': 'OrMetadata'}}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__(self, **kwargs): - super(BlobItemInternal, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.deleted = kwargs.get('deleted', None) - self.snapshot = kwargs.get('snapshot', None) - self.version_id = kwargs.get('version_id', None) - self.is_current_version = kwargs.get('is_current_version', None) - self.properties = kwargs.get('properties', None) - self.metadata = kwargs.get('metadata', None) - self.blob_tags = kwargs.get('blob_tags', None) - self.object_replication_metadata = kwargs.get('object_replication_metadata', None) - - -class BlobMetadata(Model): - """BlobMetadata. - - :param additional_properties: Unmatched properties from the message are - deserialized this collection - :type additional_properties: dict[str, str] - :param encrypted: - :type encrypted: str - """ - - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{str}', 'xml': {'name': 'additional_properties'}}, - 'encrypted': {'key': 'Encrypted', 'type': 'str', 'xml': {'name': 'Encrypted', 'attr': True}}, - } - _xml_map = { - 'name': 'Metadata' - } - - def __init__(self, **kwargs): - super(BlobMetadata, self).__init__(**kwargs) - self.additional_properties = kwargs.get('additional_properties', None) - self.encrypted = kwargs.get('encrypted', None) - - -class BlobPrefix(Model): - """BlobPrefix. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(BlobPrefix, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - - -class BlobPropertiesInternal(Model): - """Properties of a blob. - - All required parameters must be populated in order to send to Azure. - - :param creation_time: - :type creation_time: datetime - :param last_modified: Required. - :type last_modified: datetime - :param etag: Required. - :type etag: str - :param content_length: Size in bytes - :type content_length: long - :param content_type: - :type content_type: str - :param content_encoding: - :type content_encoding: str - :param content_language: - :type content_language: str - :param content_md5: - :type content_md5: bytearray - :param content_disposition: - :type content_disposition: str - :param cache_control: - :type cache_control: str - :param blob_sequence_number: - :type blob_sequence_number: long - :param blob_type: Possible values include: 'BlockBlob', 'PageBlob', - 'AppendBlob' - :type blob_type: str or ~azure.storage.blob.models.BlobType - :param lease_status: Possible values include: 'locked', 'unlocked' - :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType - :param lease_state: Possible values include: 'available', 'leased', - 'expired', 'breaking', 'broken' - :type lease_state: str or ~azure.storage.blob.models.LeaseStateType - :param lease_duration: Possible values include: 'infinite', 'fixed' - :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType - :param copy_id: - :type copy_id: str - :param copy_status: Possible values include: 'pending', 'success', - 'aborted', 'failed' - :type copy_status: str or ~azure.storage.blob.models.CopyStatusType - :param copy_source: - :type copy_source: str - :param copy_progress: - :type copy_progress: str - :param copy_completion_time: - :type copy_completion_time: datetime - :param copy_status_description: - :type copy_status_description: str - :param server_encrypted: - :type server_encrypted: bool - :param incremental_copy: - :type incremental_copy: bool - :param destination_snapshot: - :type destination_snapshot: str - :param deleted_time: - :type deleted_time: datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param access_tier: Possible values include: 'P4', 'P6', 'P10', 'P15', - 'P20', 'P30', 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type access_tier: str or ~azure.storage.blob.models.AccessTier - :param access_tier_inferred: - :type access_tier_inferred: bool - :param archive_status: Possible values include: - 'rehydrate-pending-to-hot', 'rehydrate-pending-to-cool' - :type archive_status: str or ~azure.storage.blob.models.ArchiveStatus - :param customer_provided_key_sha256: - :type customer_provided_key_sha256: str - :param encryption_scope: The name of the encryption scope under which the - blob is encrypted. - :type encryption_scope: str - :param access_tier_change_time: - :type access_tier_change_time: datetime - :param tag_count: - :type tag_count: int - :param expires_on: - :type expires_on: datetime - :param is_sealed: - :type is_sealed: bool - :param rehydrate_priority: Possible values include: 'High', 'Standard' - :type rehydrate_priority: str or - ~azure.storage.blob.models.RehydratePriority - :param last_accessed_on: - :type last_accessed_on: datetime - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123', 'xml': {'name': 'Creation-Time'}}, - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}}, - 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}}, - 'content_length': {'key': 'Content-Length', 'type': 'long', 'xml': {'name': 'Content-Length'}}, - 'content_type': {'key': 'Content-Type', 'type': 'str', 'xml': {'name': 'Content-Type'}}, - 'content_encoding': {'key': 'Content-Encoding', 'type': 'str', 'xml': {'name': 'Content-Encoding'}}, - 'content_language': {'key': 'Content-Language', 'type': 'str', 'xml': {'name': 'Content-Language'}}, - 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray', 'xml': {'name': 'Content-MD5'}}, - 'content_disposition': {'key': 'Content-Disposition', 'type': 'str', 'xml': {'name': 'Content-Disposition'}}, - 'cache_control': {'key': 'Cache-Control', 'type': 'str', 'xml': {'name': 'Cache-Control'}}, - 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long', 'xml': {'name': 'x-ms-blob-sequence-number'}}, - 'blob_type': {'key': 'BlobType', 'type': 'BlobType', 'xml': {'name': 'BlobType'}}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}}, - 'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}}, - 'copy_id': {'key': 'CopyId', 'type': 'str', 'xml': {'name': 'CopyId'}}, - 'copy_status': {'key': 'CopyStatus', 'type': 'CopyStatusType', 'xml': {'name': 'CopyStatus'}}, - 'copy_source': {'key': 'CopySource', 'type': 'str', 'xml': {'name': 'CopySource'}}, - 'copy_progress': {'key': 'CopyProgress', 'type': 'str', 'xml': {'name': 'CopyProgress'}}, - 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123', 'xml': {'name': 'CopyCompletionTime'}}, - 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str', 'xml': {'name': 'CopyStatusDescription'}}, - 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool', 'xml': {'name': 'ServerEncrypted'}}, - 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool', 'xml': {'name': 'IncrementalCopy'}}, - 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str', 'xml': {'name': 'DestinationSnapshot'}}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}}, - 'access_tier': {'key': 'AccessTier', 'type': 'str', 'xml': {'name': 'AccessTier'}}, - 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool', 'xml': {'name': 'AccessTierInferred'}}, - 'archive_status': {'key': 'ArchiveStatus', 'type': 'str', 'xml': {'name': 'ArchiveStatus'}}, - 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str', 'xml': {'name': 'CustomerProvidedKeySha256'}}, - 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str', 'xml': {'name': 'EncryptionScope'}}, - 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123', 'xml': {'name': 'AccessTierChangeTime'}}, - 'tag_count': {'key': 'TagCount', 'type': 'int', 'xml': {'name': 'TagCount'}}, - 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123', 'xml': {'name': 'Expiry-Time'}}, - 'is_sealed': {'key': 'Sealed', 'type': 'bool', 'xml': {'name': 'Sealed'}}, - 'rehydrate_priority': {'key': 'RehydratePriority', 'type': 'str', 'xml': {'name': 'RehydratePriority'}}, - 'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123', 'xml': {'name': 'LastAccessTime'}}, - } - _xml_map = { - 'name': 'Properties' - } - - def __init__(self, **kwargs): - super(BlobPropertiesInternal, self).__init__(**kwargs) - self.creation_time = kwargs.get('creation_time', None) - self.last_modified = kwargs.get('last_modified', None) - self.etag = kwargs.get('etag', None) - self.content_length = kwargs.get('content_length', None) - self.content_type = kwargs.get('content_type', None) - self.content_encoding = kwargs.get('content_encoding', None) - self.content_language = kwargs.get('content_language', None) - self.content_md5 = kwargs.get('content_md5', None) - self.content_disposition = kwargs.get('content_disposition', None) - self.cache_control = kwargs.get('cache_control', None) - self.blob_sequence_number = kwargs.get('blob_sequence_number', None) - self.blob_type = kwargs.get('blob_type', None) - self.lease_status = kwargs.get('lease_status', None) - self.lease_state = kwargs.get('lease_state', None) - self.lease_duration = kwargs.get('lease_duration', None) - self.copy_id = kwargs.get('copy_id', None) - self.copy_status = kwargs.get('copy_status', None) - self.copy_source = kwargs.get('copy_source', None) - self.copy_progress = kwargs.get('copy_progress', None) - self.copy_completion_time = kwargs.get('copy_completion_time', None) - self.copy_status_description = kwargs.get('copy_status_description', None) - self.server_encrypted = kwargs.get('server_encrypted', None) - self.incremental_copy = kwargs.get('incremental_copy', None) - self.destination_snapshot = kwargs.get('destination_snapshot', None) - self.deleted_time = kwargs.get('deleted_time', None) - self.remaining_retention_days = kwargs.get('remaining_retention_days', None) - self.access_tier = kwargs.get('access_tier', None) - self.access_tier_inferred = kwargs.get('access_tier_inferred', None) - self.archive_status = kwargs.get('archive_status', None) - self.customer_provided_key_sha256 = kwargs.get('customer_provided_key_sha256', None) - self.encryption_scope = kwargs.get('encryption_scope', None) - self.access_tier_change_time = kwargs.get('access_tier_change_time', None) - self.tag_count = kwargs.get('tag_count', None) - self.expires_on = kwargs.get('expires_on', None) - self.is_sealed = kwargs.get('is_sealed', None) - self.rehydrate_priority = kwargs.get('rehydrate_priority', None) - self.last_accessed_on = kwargs.get('last_accessed_on', None) - - -class BlobTag(Model): - """BlobTag. - - All required parameters must be populated in order to send to Azure. - - :param key: Required. - :type key: str - :param value: Required. - :type value: str - """ - - _validation = { - 'key': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'key': {'key': 'Key', 'type': 'str', 'xml': {'name': 'Key'}}, - 'value': {'key': 'Value', 'type': 'str', 'xml': {'name': 'Value'}}, - } - _xml_map = { - 'name': 'Tag' - } - - def __init__(self, **kwargs): - super(BlobTag, self).__init__(**kwargs) - self.key = kwargs.get('key', None) - self.value = kwargs.get('value', None) - - -class BlobTags(Model): - """Blob tags. - - All required parameters must be populated in order to send to Azure. - - :param blob_tag_set: Required. - :type blob_tag_set: list[~azure.storage.blob.models.BlobTag] - """ - - _validation = { - 'blob_tag_set': {'required': True}, - } - - _attribute_map = { - 'blob_tag_set': {'key': 'BlobTagSet', 'type': '[BlobTag]', 'xml': {'name': 'TagSet', 'itemsName': 'TagSet', 'wrapped': True}}, - } - _xml_map = { - 'name': 'Tags' - } - - def __init__(self, **kwargs): - super(BlobTags, self).__init__(**kwargs) - self.blob_tag_set = kwargs.get('blob_tag_set', None) - - -class Block(Model): - """Represents a single block in a block blob. It describes the block's ID and - size. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The base64 encoded block ID. - :type name: str - :param size: Required. The block size in bytes. - :type size: int - """ - - _validation = { - 'name': {'required': True}, - 'size': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'size': {'key': 'Size', 'type': 'int', 'xml': {'name': 'Size'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(Block, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.size = kwargs.get('size', None) - - -class BlockList(Model): - """BlockList. - - :param committed_blocks: - :type committed_blocks: list[~azure.storage.blob.models.Block] - :param uncommitted_blocks: - :type uncommitted_blocks: list[~azure.storage.blob.models.Block] - """ - - _attribute_map = { - 'committed_blocks': {'key': 'CommittedBlocks', 'type': '[Block]', 'xml': {'name': 'CommittedBlocks', 'itemsName': 'Block', 'wrapped': True}}, - 'uncommitted_blocks': {'key': 'UncommittedBlocks', 'type': '[Block]', 'xml': {'name': 'UncommittedBlocks', 'itemsName': 'Block', 'wrapped': True}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(BlockList, self).__init__(**kwargs) - self.committed_blocks = kwargs.get('committed_blocks', None) - self.uncommitted_blocks = kwargs.get('uncommitted_blocks', None) - - -class BlockLookupList(Model): - """BlockLookupList. - - :param committed: - :type committed: list[str] - :param uncommitted: - :type uncommitted: list[str] - :param latest: - :type latest: list[str] - """ - - _attribute_map = { - 'committed': {'key': 'Committed', 'type': '[str]', 'xml': {'name': 'Committed', 'itemsName': 'Committed'}}, - 'uncommitted': {'key': 'Uncommitted', 'type': '[str]', 'xml': {'name': 'Uncommitted', 'itemsName': 'Uncommitted'}}, - 'latest': {'key': 'Latest', 'type': '[str]', 'xml': {'name': 'Latest', 'itemsName': 'Latest'}}, - } - _xml_map = { - 'name': 'BlockList' - } - - def __init__(self, **kwargs): - super(BlockLookupList, self).__init__(**kwargs) - self.committed = kwargs.get('committed', None) - self.uncommitted = kwargs.get('uncommitted', None) - self.latest = kwargs.get('latest', None) - - -class ClearRange(Model): - """ClearRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'ClearRange' - } - - def __init__(self, **kwargs): - super(ClearRange, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.end = kwargs.get('end', None) - - -class ContainerCpkScopeInfo(Model): - """Additional parameters for create operation. - - :param default_encryption_scope: Optional. Version 2019-07-07 and later. - Specifies the default encryption scope to set on the container and use for - all future writes. - :type default_encryption_scope: str - :param prevent_encryption_scope_override: Optional. Version 2019-07-07 - and newer. If true, prevents any request from specifying a different - encryption scope than the scope set on the container. - :type prevent_encryption_scope_override: bool - """ - - _attribute_map = { - 'default_encryption_scope': {'key': '', 'type': 'str', 'xml': {'name': 'default_encryption_scope'}}, - 'prevent_encryption_scope_override': {'key': '', 'type': 'bool', 'xml': {'name': 'prevent_encryption_scope_override'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(ContainerCpkScopeInfo, self).__init__(**kwargs) - self.default_encryption_scope = kwargs.get('default_encryption_scope', None) - self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', None) - - -class ContainerItem(Model): - """An Azure Storage container. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param deleted: - :type deleted: bool - :param version: - :type version: str - :param properties: Required. - :type properties: ~azure.storage.blob.models.ContainerProperties - :param metadata: - :type metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}}, - 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, - 'properties': {'key': 'Properties', 'type': 'ContainerProperties', 'xml': {'name': 'Properties'}}, - 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}}, - } - _xml_map = { - 'name': 'Container' - } - - def __init__(self, **kwargs): - super(ContainerItem, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.deleted = kwargs.get('deleted', None) - self.version = kwargs.get('version', None) - self.properties = kwargs.get('properties', None) - self.metadata = kwargs.get('metadata', None) - - -class ContainerProperties(Model): - """Properties of a container. - - All required parameters must be populated in order to send to Azure. - - :param last_modified: Required. - :type last_modified: datetime - :param etag: Required. - :type etag: str - :param lease_status: Possible values include: 'locked', 'unlocked' - :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType - :param lease_state: Possible values include: 'available', 'leased', - 'expired', 'breaking', 'broken' - :type lease_state: str or ~azure.storage.blob.models.LeaseStateType - :param lease_duration: Possible values include: 'infinite', 'fixed' - :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType - :param public_access: Possible values include: 'container', 'blob' - :type public_access: str or ~azure.storage.blob.models.PublicAccessType - :param has_immutability_policy: - :type has_immutability_policy: bool - :param has_legal_hold: - :type has_legal_hold: bool - :param default_encryption_scope: - :type default_encryption_scope: str - :param prevent_encryption_scope_override: - :type prevent_encryption_scope_override: bool - :param deleted_time: - :type deleted_time: datetime - :param remaining_retention_days: - :type remaining_retention_days: int - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}}, - 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}}, - 'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}}, - 'public_access': {'key': 'PublicAccess', 'type': 'str', 'xml': {'name': 'PublicAccess'}}, - 'has_immutability_policy': {'key': 'HasImmutabilityPolicy', 'type': 'bool', 'xml': {'name': 'HasImmutabilityPolicy'}}, - 'has_legal_hold': {'key': 'HasLegalHold', 'type': 'bool', 'xml': {'name': 'HasLegalHold'}}, - 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str', 'xml': {'name': 'DefaultEncryptionScope'}}, - 'prevent_encryption_scope_override': {'key': 'DenyEncryptionScopeOverride', 'type': 'bool', 'xml': {'name': 'DenyEncryptionScopeOverride'}}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(ContainerProperties, self).__init__(**kwargs) - self.last_modified = kwargs.get('last_modified', None) - self.etag = kwargs.get('etag', None) - self.lease_status = kwargs.get('lease_status', None) - self.lease_state = kwargs.get('lease_state', None) - self.lease_duration = kwargs.get('lease_duration', None) - self.public_access = kwargs.get('public_access', None) - self.has_immutability_policy = kwargs.get('has_immutability_policy', None) - self.has_legal_hold = kwargs.get('has_legal_hold', None) - self.default_encryption_scope = kwargs.get('default_encryption_scope', None) - self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', None) - self.deleted_time = kwargs.get('deleted_time', None) - self.remaining_retention_days = kwargs.get('remaining_retention_days', None) - - -class CorsRule(Model): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param allowed_origins: Required. The origin domains that are permitted to - make a request against the storage service via CORS. The origin domain is - the domain from which the request originates. Note that the origin must be - an exact case-sensitive match with the origin that the user age sends to - the service. You can also use the wildcard character '*' to allow all - origin domains to make requests via CORS. - :type allowed_origins: str - :param allowed_methods: Required. The methods (HTTP request verbs) that - the origin domain may use for a CORS request. (comma separated) - :type allowed_methods: str - :param allowed_headers: Required. the request headers that the origin - domain may specify on the CORS request. - :type allowed_headers: str - :param exposed_headers: Required. The response headers that may be sent in - the response to the CORS request and exposed by the browser to the request - issuer - :type exposed_headers: str - :param max_age_in_seconds: Required. The maximum amount time that a - browser should cache the preflight OPTIONS request. - :type max_age_in_seconds: int - """ - - _validation = { - 'allowed_origins': {'required': True}, - 'allowed_methods': {'required': True}, - 'allowed_headers': {'required': True}, - 'exposed_headers': {'required': True}, - 'max_age_in_seconds': {'required': True, 'minimum': 0}, - } - - _attribute_map = { - 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}}, - 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}}, - 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}}, - 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}}, - 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(CorsRule, self).__init__(**kwargs) - self.allowed_origins = kwargs.get('allowed_origins', None) - self.allowed_methods = kwargs.get('allowed_methods', None) - self.allowed_headers = kwargs.get('allowed_headers', None) - self.exposed_headers = kwargs.get('exposed_headers', None) - self.max_age_in_seconds = kwargs.get('max_age_in_seconds', None) - - -class CpkInfo(Model): - """Additional parameters for a set of operations. - - :param encryption_key: Optional. Specifies the encryption key to use to - encrypt the data provided in the request. If not specified, encryption is - performed with the root account encryption key. For more information, see - Encryption at Rest for Azure Storage Services. - :type encryption_key: str - :param encryption_key_sha256: The SHA-256 hash of the provided encryption - key. Must be provided if the x-ms-encryption-key header is provided. - :type encryption_key_sha256: str - :param encryption_algorithm: The algorithm used to produce the encryption - key hash. Currently, the only accepted value is "AES256". Must be provided - if the x-ms-encryption-key header is provided. Possible values include: - 'AES256' - :type encryption_algorithm: str or - ~azure.storage.blob.models.EncryptionAlgorithmType - """ - - _attribute_map = { - 'encryption_key': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_key'}}, - 'encryption_key_sha256': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_key_sha256'}}, - 'encryption_algorithm': {'key': '', 'type': 'EncryptionAlgorithmType', 'xml': {'name': 'encryption_algorithm'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(CpkInfo, self).__init__(**kwargs) - self.encryption_key = kwargs.get('encryption_key', None) - self.encryption_key_sha256 = kwargs.get('encryption_key_sha256', None) - self.encryption_algorithm = kwargs.get('encryption_algorithm', None) - - -class CpkScopeInfo(Model): - """Additional parameters for a set of operations. - - :param encryption_scope: Optional. Version 2019-07-07 and later. - Specifies the name of the encryption scope to use to encrypt the data - provided in the request. If not specified, encryption is performed with - the default account encryption scope. For more information, see - Encryption at Rest for Azure Storage Services. - :type encryption_scope: str - """ - - _attribute_map = { - 'encryption_scope': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_scope'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(CpkScopeInfo, self).__init__(**kwargs) - self.encryption_scope = kwargs.get('encryption_scope', None) - - -class DataLakeStorageError(Model): - """DataLakeStorageError. - - :param data_lake_storage_error_details: The service error response object. - :type data_lake_storage_error_details: - ~azure.storage.blob.models.DataLakeStorageErrorError - """ - - _attribute_map = { - 'data_lake_storage_error_details': {'key': 'error', 'type': 'DataLakeStorageErrorError', 'xml': {'name': 'error'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(DataLakeStorageError, self).__init__(**kwargs) - self.data_lake_storage_error_details = kwargs.get('data_lake_storage_error_details', None) - - -class DataLakeStorageErrorException(HttpResponseError): - """Server responsed with exception of type: 'DataLakeStorageError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, response, deserialize, *args): - - model_name = 'DataLakeStorageError' - self.error = deserialize(model_name, response) - if self.error is None: - self.error = deserialize.dependencies[model_name]() - super(DataLakeStorageErrorException, self).__init__(response=response) - - -class DataLakeStorageErrorError(Model): - """The service error response object. - - :param code: The service error code. - :type code: str - :param message: The service error message. - :type message: str - """ - - _attribute_map = { - 'code': {'key': 'Code', 'type': 'str', 'xml': {'name': 'Code'}}, - 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(DataLakeStorageErrorError, self).__init__(**kwargs) - self.code = kwargs.get('code', None) - self.message = kwargs.get('message', None) - - -class DelimitedTextConfiguration(Model): - """delimited text configuration. - - All required parameters must be populated in order to send to Azure. - - :param column_separator: Required. column separator - :type column_separator: str - :param field_quote: Required. field quote - :type field_quote: str - :param record_separator: Required. record separator - :type record_separator: str - :param escape_char: Required. escape char - :type escape_char: str - :param headers_present: Required. has headers - :type headers_present: bool - """ - - _validation = { - 'column_separator': {'required': True}, - 'field_quote': {'required': True}, - 'record_separator': {'required': True}, - 'escape_char': {'required': True}, - 'headers_present': {'required': True}, - } - - _attribute_map = { - 'column_separator': {'key': 'ColumnSeparator', 'type': 'str', 'xml': {'name': 'ColumnSeparator'}}, - 'field_quote': {'key': 'FieldQuote', 'type': 'str', 'xml': {'name': 'FieldQuote'}}, - 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, - 'escape_char': {'key': 'EscapeChar', 'type': 'str', 'xml': {'name': 'EscapeChar'}}, - 'headers_present': {'key': 'HeadersPresent', 'type': 'bool', 'xml': {'name': 'HasHeaders'}}, - } - _xml_map = { - 'name': 'DelimitedTextConfiguration' - } - - def __init__(self, **kwargs): - super(DelimitedTextConfiguration, self).__init__(**kwargs) - self.column_separator = kwargs.get('column_separator', None) - self.field_quote = kwargs.get('field_quote', None) - self.record_separator = kwargs.get('record_separator', None) - self.escape_char = kwargs.get('escape_char', None) - self.headers_present = kwargs.get('headers_present', None) - - -class DirectoryHttpHeaders(Model): - """Additional parameters for a set of operations, such as: Directory_create, - Directory_rename, Blob_rename. - - :param cache_control: Cache control for given resource - :type cache_control: str - :param content_type: Content type for given resource - :type content_type: str - :param content_encoding: Content encoding for given resource - :type content_encoding: str - :param content_language: Content language for given resource - :type content_language: str - :param content_disposition: Content disposition for given resource - :type content_disposition: str - """ - - _attribute_map = { - 'cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'cache_control'}}, - 'content_type': {'key': '', 'type': 'str', 'xml': {'name': 'content_type'}}, - 'content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'content_encoding'}}, - 'content_language': {'key': '', 'type': 'str', 'xml': {'name': 'content_language'}}, - 'content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'content_disposition'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(DirectoryHttpHeaders, self).__init__(**kwargs) - self.cache_control = kwargs.get('cache_control', None) - self.content_type = kwargs.get('content_type', None) - self.content_encoding = kwargs.get('content_encoding', None) - self.content_language = kwargs.get('content_language', None) - self.content_disposition = kwargs.get('content_disposition', None) - - -class FilterBlobItem(Model): - """Blob info from a Filter Blobs API call. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param container_name: Required. - :type container_name: str - :param tag_value: Required. - :type tag_value: str - """ - - _validation = { - 'name': {'required': True}, - 'container_name': {'required': True}, - 'tag_value': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName'}}, - 'tag_value': {'key': 'TagValue', 'type': 'str', 'xml': {'name': 'TagValue'}}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__(self, **kwargs): - super(FilterBlobItem, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.container_name = kwargs.get('container_name', None) - self.tag_value = kwargs.get('tag_value', None) - - -class FilterBlobSegment(Model): - """The result of a Filter Blobs API call. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param where: Required. - :type where: str - :param blobs: Required. - :type blobs: list[~azure.storage.blob.models.FilterBlobItem] - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'where': {'required': True}, - 'blobs': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'where': {'key': 'Where', 'type': 'str', 'xml': {'name': 'Where'}}, - 'blobs': {'key': 'Blobs', 'type': '[FilterBlobItem]', 'xml': {'name': 'Blobs', 'itemsName': 'Blobs', 'wrapped': True}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, **kwargs): - super(FilterBlobSegment, self).__init__(**kwargs) - self.service_endpoint = kwargs.get('service_endpoint', None) - self.where = kwargs.get('where', None) - self.blobs = kwargs.get('blobs', None) - self.next_marker = kwargs.get('next_marker', None) - - -class GeoReplication(Model): - """Geo-Replication information for the Secondary Storage Service. - - All required parameters must be populated in order to send to Azure. - - :param status: Required. The status of the secondary location. Possible - values include: 'live', 'bootstrap', 'unavailable' - :type status: str or ~azure.storage.blob.models.GeoReplicationStatusType - :param last_sync_time: Required. A GMT date/time value, to the second. All - primary writes preceding this value are guaranteed to be available for - read operations at the secondary. Primary writes after this point in time - may or may not be available for reads. - :type last_sync_time: datetime - """ - - _validation = { - 'status': {'required': True}, - 'last_sync_time': {'required': True}, - } - - _attribute_map = { - 'status': {'key': 'Status', 'type': 'str', 'xml': {'name': 'Status'}}, - 'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123', 'xml': {'name': 'LastSyncTime'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(GeoReplication, self).__init__(**kwargs) - self.status = kwargs.get('status', None) - self.last_sync_time = kwargs.get('last_sync_time', None) - - -class JsonTextConfiguration(Model): - """json text configuration. - - All required parameters must be populated in order to send to Azure. - - :param record_separator: Required. record separator - :type record_separator: str - """ - - _validation = { - 'record_separator': {'required': True}, - } - - _attribute_map = { - 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, - } - _xml_map = { - 'name': 'JsonTextConfiguration' - } - - def __init__(self, **kwargs): - super(JsonTextConfiguration, self).__init__(**kwargs) - self.record_separator = kwargs.get('record_separator', None) - - -class KeyInfo(Model): - """Key information. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. The date-time the key is active in ISO 8601 UTC - time - :type start: str - :param expiry: Required. The date-time the key expires in ISO 8601 UTC - time - :type expiry: str - """ - - _validation = { - 'start': {'required': True}, - 'expiry': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, - 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(KeyInfo, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.expiry = kwargs.get('expiry', None) - - -class LeaseAccessConditions(Model): - """Additional parameters for a set of operations. - - :param lease_id: If specified, the operation only succeeds if the - resource's lease is active and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': '', 'type': 'str', 'xml': {'name': 'lease_id'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = kwargs.get('lease_id', None) - - -class ListBlobsFlatSegmentResponse(Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param segment: Required. - :type segment: ~azure.storage.blob.models.BlobFlatListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'segment': {'key': 'Segment', 'type': 'BlobFlatListSegment', 'xml': {'name': 'Segment'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, **kwargs): - super(ListBlobsFlatSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs.get('service_endpoint', None) - self.container_name = kwargs.get('container_name', None) - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.segment = kwargs.get('segment', None) - self.next_marker = kwargs.get('next_marker', None) - - -class ListBlobsHierarchySegmentResponse(Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param delimiter: - :type delimiter: str - :param segment: Required. - :type segment: ~azure.storage.blob.models.BlobHierarchyListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'delimiter': {'key': 'Delimiter', 'type': 'str', 'xml': {'name': 'Delimiter'}}, - 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment', 'xml': {'name': 'Segment'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, **kwargs): - super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs.get('service_endpoint', None) - self.container_name = kwargs.get('container_name', None) - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.delimiter = kwargs.get('delimiter', None) - self.segment = kwargs.get('segment', None) - self.next_marker = kwargs.get('next_marker', None) - - -class ListContainersSegmentResponse(Model): - """An enumeration of containers. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param container_items: Required. - :type container_items: list[~azure.storage.blob.models.ContainerItem] - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_items': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'container_items': {'key': 'ContainerItems', 'type': '[ContainerItem]', 'xml': {'name': 'Containers', 'itemsName': 'Containers', 'wrapped': True}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, **kwargs): - super(ListContainersSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs.get('service_endpoint', None) - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.container_items = kwargs.get('container_items', None) - self.next_marker = kwargs.get('next_marker', None) - - -class Logging(Model): - """Azure Analytics Logging settings. - - All required parameters must be populated in order to send to Azure. - - :param version: Required. The version of Storage Analytics to configure. - :type version: str - :param delete: Required. Indicates whether all delete requests should be - logged. - :type delete: bool - :param read: Required. Indicates whether all read requests should be - logged. - :type read: bool - :param write: Required. Indicates whether all write requests should be - logged. - :type write: bool - :param retention_policy: Required. - :type retention_policy: ~azure.storage.blob.models.RetentionPolicy - """ - - _validation = { - 'version': {'required': True}, - 'delete': {'required': True}, - 'read': {'required': True}, - 'write': {'required': True}, - 'retention_policy': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, - 'delete': {'key': 'Delete', 'type': 'bool', 'xml': {'name': 'Delete'}}, - 'read': {'key': 'Read', 'type': 'bool', 'xml': {'name': 'Read'}}, - 'write': {'key': 'Write', 'type': 'bool', 'xml': {'name': 'Write'}}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(Logging, self).__init__(**kwargs) - self.version = kwargs.get('version', None) - self.delete = kwargs.get('delete', None) - self.read = kwargs.get('read', None) - self.write = kwargs.get('write', None) - self.retention_policy = kwargs.get('retention_policy', None) - - -class Metrics(Model): - """a summary of request statistics grouped by API in hour or minute aggregates - for blobs. - - All required parameters must be populated in order to send to Azure. - - :param version: The version of Storage Analytics to configure. - :type version: str - :param enabled: Required. Indicates whether metrics are enabled for the - Blob service. - :type enabled: bool - :param include_apis: Indicates whether metrics should generate summary - statistics for called API operations. - :type include_apis: bool - :param retention_policy: - :type retention_policy: ~azure.storage.blob.models.RetentionPolicy - """ - - _validation = { - 'enabled': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(Metrics, self).__init__(**kwargs) - self.version = kwargs.get('version', None) - self.enabled = kwargs.get('enabled', None) - self.include_apis = kwargs.get('include_apis', None) - self.retention_policy = kwargs.get('retention_policy', None) - - -class ModifiedAccessConditions(Model): - """Additional parameters for a set of operations. - - :param if_modified_since: Specify this header value to operate only on a - blob if it has been modified since the specified date/time. - :type if_modified_since: datetime - :param if_unmodified_since: Specify this header value to operate only on a - blob if it has not been modified since the specified date/time. - :type if_unmodified_since: datetime - :param if_match: Specify an ETag value to operate only on blobs with a - matching value. - :type if_match: str - :param if_none_match: Specify an ETag value to operate only on blobs - without a matching value. - :type if_none_match: str - :param if_tags: Specify a SQL where clause on blob tags to operate only on - blobs with a matching value. - :type if_tags: str - """ - - _attribute_map = { - 'if_modified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'if_modified_since'}}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'if_unmodified_since'}}, - 'if_match': {'key': '', 'type': 'str', 'xml': {'name': 'if_match'}}, - 'if_none_match': {'key': '', 'type': 'str', 'xml': {'name': 'if_none_match'}}, - 'if_tags': {'key': '', 'type': 'str', 'xml': {'name': 'if_tags'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(ModifiedAccessConditions, self).__init__(**kwargs) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_tags = kwargs.get('if_tags', None) - - -class PageList(Model): - """the list of pages. - - :param page_range: - :type page_range: list[~azure.storage.blob.models.PageRange] - :param clear_range: - :type clear_range: list[~azure.storage.blob.models.ClearRange] - """ - - _attribute_map = { - 'page_range': {'key': 'PageRange', 'type': '[PageRange]', 'xml': {'name': 'PageRange', 'itemsName': 'PageRange'}}, - 'clear_range': {'key': 'ClearRange', 'type': '[ClearRange]', 'xml': {'name': 'ClearRange', 'itemsName': 'ClearRange'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(PageList, self).__init__(**kwargs) - self.page_range = kwargs.get('page_range', None) - self.clear_range = kwargs.get('clear_range', None) - - -class PageRange(Model): - """PageRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'PageRange' - } - - def __init__(self, **kwargs): - super(PageRange, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.end = kwargs.get('end', None) - - -class QueryFormat(Model): - """QueryFormat. - - :param type: Possible values include: 'delimited', 'json', 'arrow' - :type type: str or ~azure.storage.blob.models.QueryFormatType - :param delimited_text_configuration: - :type delimited_text_configuration: - ~azure.storage.blob.models.DelimitedTextConfiguration - :param json_text_configuration: - :type json_text_configuration: - ~azure.storage.blob.models.JsonTextConfiguration - :param arrow_configuration: - :type arrow_configuration: ~azure.storage.blob.models.ArrowConfiguration - """ - - _attribute_map = { - 'type': {'key': 'Type', 'type': 'QueryFormatType', 'xml': {'name': 'Type'}}, - 'delimited_text_configuration': {'key': 'DelimitedTextConfiguration', 'type': 'DelimitedTextConfiguration', 'xml': {'name': 'DelimitedTextConfiguration'}}, - 'json_text_configuration': {'key': 'JsonTextConfiguration', 'type': 'JsonTextConfiguration', 'xml': {'name': 'JsonTextConfiguration'}}, - 'arrow_configuration': {'key': 'ArrowConfiguration', 'type': 'ArrowConfiguration', 'xml': {'name': 'ArrowConfiguration'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(QueryFormat, self).__init__(**kwargs) - self.type = kwargs.get('type', None) - self.delimited_text_configuration = kwargs.get('delimited_text_configuration', None) - self.json_text_configuration = kwargs.get('json_text_configuration', None) - self.arrow_configuration = kwargs.get('arrow_configuration', None) - - -class QueryRequest(Model): - """the quick query body. - - Variables are only populated by the server, and will be ignored when - sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar query_type: Required. the query type. Default value: "SQL" . - :vartype query_type: str - :param expression: Required. a query statement - :type expression: str - :param input_serialization: - :type input_serialization: ~azure.storage.blob.models.QuerySerialization - :param output_serialization: - :type output_serialization: ~azure.storage.blob.models.QuerySerialization - """ - - _validation = { - 'query_type': {'required': True, 'constant': True}, - 'expression': {'required': True}, - } - - _attribute_map = { - 'query_type': {'key': 'QueryType', 'type': 'str', 'xml': {'name': 'QueryType'}}, - 'expression': {'key': 'Expression', 'type': 'str', 'xml': {'name': 'Expression'}}, - 'input_serialization': {'key': 'InputSerialization', 'type': 'QuerySerialization', 'xml': {'name': 'InputSerialization'}}, - 'output_serialization': {'key': 'OutputSerialization', 'type': 'QuerySerialization', 'xml': {'name': 'OutputSerialization'}}, - } - _xml_map = { - 'name': 'QueryRequest' - } - - query_type = "SQL" - - def __init__(self, **kwargs): - super(QueryRequest, self).__init__(**kwargs) - self.expression = kwargs.get('expression', None) - self.input_serialization = kwargs.get('input_serialization', None) - self.output_serialization = kwargs.get('output_serialization', None) - - -class QuerySerialization(Model): - """QuerySerialization. - - All required parameters must be populated in order to send to Azure. - - :param format: Required. - :type format: ~azure.storage.blob.models.QueryFormat - """ - - _validation = { - 'format': {'required': True}, - } - - _attribute_map = { - 'format': {'key': 'Format', 'type': 'QueryFormat', 'xml': {'name': 'Format'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(QuerySerialization, self).__init__(**kwargs) - self.format = kwargs.get('format', None) - - -class RetentionPolicy(Model): - """the retention policy which determines how long the associated data should - persist. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether a retention policy is enabled - for the storage service - :type enabled: bool - :param days: Indicates the number of days that metrics or logging or - soft-deleted data should be retained. All data older than this value will - be deleted - :type days: int - """ - - _validation = { - 'enabled': {'required': True}, - 'days': {'minimum': 1}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(RetentionPolicy, self).__init__(**kwargs) - self.enabled = kwargs.get('enabled', None) - self.days = kwargs.get('days', None) - - -class SequenceNumberAccessConditions(Model): - """Additional parameters for a set of operations, such as: - PageBlob_upload_pages, PageBlob_clear_pages, - PageBlob_upload_pages_from_url. - - :param if_sequence_number_less_than_or_equal_to: Specify this header value - to operate only on a blob if it has a sequence number less than or equal - to the specified. - :type if_sequence_number_less_than_or_equal_to: long - :param if_sequence_number_less_than: Specify this header value to operate - only on a blob if it has a sequence number less than the specified. - :type if_sequence_number_less_than: long - :param if_sequence_number_equal_to: Specify this header value to operate - only on a blob if it has the specified sequence number. - :type if_sequence_number_equal_to: long - """ - - _attribute_map = { - 'if_sequence_number_less_than_or_equal_to': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_less_than_or_equal_to'}}, - 'if_sequence_number_less_than': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_less_than'}}, - 'if_sequence_number_equal_to': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_equal_to'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(SequenceNumberAccessConditions, self).__init__(**kwargs) - self.if_sequence_number_less_than_or_equal_to = kwargs.get('if_sequence_number_less_than_or_equal_to', None) - self.if_sequence_number_less_than = kwargs.get('if_sequence_number_less_than', None) - self.if_sequence_number_equal_to = kwargs.get('if_sequence_number_equal_to', None) - - -class SignedIdentifier(Model): - """signed identifier. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. a unique id - :type id: str - :param access_policy: - :type access_policy: ~azure.storage.blob.models.AccessPolicy - """ - - _validation = { - 'id': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}}, - 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}}, - } - _xml_map = { - 'name': 'SignedIdentifier' - } - - def __init__(self, **kwargs): - super(SignedIdentifier, self).__init__(**kwargs) - self.id = kwargs.get('id', None) - self.access_policy = kwargs.get('access_policy', None) - - -class SourceModifiedAccessConditions(Model): - """Additional parameters for a set of operations. - - :param source_if_modified_since: Specify this header value to operate only - on a blob if it has been modified since the specified date/time. - :type source_if_modified_since: datetime - :param source_if_unmodified_since: Specify this header value to operate - only on a blob if it has not been modified since the specified date/time. - :type source_if_unmodified_since: datetime - :param source_if_match: Specify an ETag value to operate only on blobs - with a matching value. - :type source_if_match: str - :param source_if_none_match: Specify an ETag value to operate only on - blobs without a matching value. - :type source_if_none_match: str - :param source_if_tags: Specify a SQL where clause on blob tags to operate - only on blobs with a matching value. - :type source_if_tags: str - """ - - _attribute_map = { - 'source_if_modified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'source_if_modified_since'}}, - 'source_if_unmodified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'source_if_unmodified_since'}}, - 'source_if_match': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_match'}}, - 'source_if_none_match': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_none_match'}}, - 'source_if_tags': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_tags'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_modified_since = kwargs.get('source_if_modified_since', None) - self.source_if_unmodified_since = kwargs.get('source_if_unmodified_since', None) - self.source_if_match = kwargs.get('source_if_match', None) - self.source_if_none_match = kwargs.get('source_if_none_match', None) - self.source_if_tags = kwargs.get('source_if_tags', None) - - -class StaticWebsite(Model): - """The properties that enable an account to host a static website. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether this account is hosting a - static website - :type enabled: bool - :param index_document: The default name of the index page under each - directory - :type index_document: str - :param error_document404_path: The absolute path of the custom 404 page - :type error_document404_path: str - :param default_index_document_path: Absolute path of the default index - page - :type default_index_document_path: str - """ - - _validation = { - 'enabled': {'required': True}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'index_document': {'key': 'IndexDocument', 'type': 'str', 'xml': {'name': 'IndexDocument'}}, - 'error_document404_path': {'key': 'ErrorDocument404Path', 'type': 'str', 'xml': {'name': 'ErrorDocument404Path'}}, - 'default_index_document_path': {'key': 'DefaultIndexDocumentPath', 'type': 'str', 'xml': {'name': 'DefaultIndexDocumentPath'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(StaticWebsite, self).__init__(**kwargs) - self.enabled = kwargs.get('enabled', None) - self.index_document = kwargs.get('index_document', None) - self.error_document404_path = kwargs.get('error_document404_path', None) - self.default_index_document_path = kwargs.get('default_index_document_path', None) - - -class StorageError(Model): - """StorageError. - - :param message: - :type message: str - """ - - _attribute_map = { - 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(StorageError, self).__init__(**kwargs) - self.message = kwargs.get('message', None) - - -class StorageErrorException(HttpResponseError): - """Server responsed with exception of type: 'StorageError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, response, deserialize, *args): - - model_name = 'StorageError' - self.error = deserialize(model_name, response) - if self.error is None: - self.error = deserialize.dependencies[model_name]() - super(StorageErrorException, self).__init__(response=response) - - -class StorageServiceProperties(Model): - """Storage Service Properties. - - :param logging: - :type logging: ~azure.storage.blob.models.Logging - :param hour_metrics: - :type hour_metrics: ~azure.storage.blob.models.Metrics - :param minute_metrics: - :type minute_metrics: ~azure.storage.blob.models.Metrics - :param cors: The set of CORS rules. - :type cors: list[~azure.storage.blob.models.CorsRule] - :param default_service_version: The default version to use for requests to - the Blob service if an incoming request's version is not specified. - Possible values include version 2008-10-27 and all more recent versions - :type default_service_version: str - :param delete_retention_policy: - :type delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy - :param static_website: - :type static_website: ~azure.storage.blob.models.StaticWebsite - """ - - _attribute_map = { - 'logging': {'key': 'Logging', 'type': 'Logging', 'xml': {'name': 'Logging'}}, - 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}}, - 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}}, - 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}}, - 'default_service_version': {'key': 'DefaultServiceVersion', 'type': 'str', 'xml': {'name': 'DefaultServiceVersion'}}, - 'delete_retention_policy': {'key': 'DeleteRetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'DeleteRetentionPolicy'}}, - 'static_website': {'key': 'StaticWebsite', 'type': 'StaticWebsite', 'xml': {'name': 'StaticWebsite'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(StorageServiceProperties, self).__init__(**kwargs) - self.logging = kwargs.get('logging', None) - self.hour_metrics = kwargs.get('hour_metrics', None) - self.minute_metrics = kwargs.get('minute_metrics', None) - self.cors = kwargs.get('cors', None) - self.default_service_version = kwargs.get('default_service_version', None) - self.delete_retention_policy = kwargs.get('delete_retention_policy', None) - self.static_website = kwargs.get('static_website', None) - - -class StorageServiceStats(Model): - """Stats for the storage service. - - :param geo_replication: - :type geo_replication: ~azure.storage.blob.models.GeoReplication - """ - - _attribute_map = { - 'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication', 'xml': {'name': 'GeoReplication'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(StorageServiceStats, self).__init__(**kwargs) - self.geo_replication = kwargs.get('geo_replication', None) - - -class UserDelegationKey(Model): - """A user delegation key. - - All required parameters must be populated in order to send to Azure. - - :param signed_oid: Required. The Azure Active Directory object ID in GUID - format. - :type signed_oid: str - :param signed_tid: Required. The Azure Active Directory tenant ID in GUID - format - :type signed_tid: str - :param signed_start: Required. The date-time the key is active - :type signed_start: datetime - :param signed_expiry: Required. The date-time the key expires - :type signed_expiry: datetime - :param signed_service: Required. Abbreviation of the Azure Storage service - that accepts the key - :type signed_service: str - :param signed_version: Required. The service version that created the key - :type signed_version: str - :param value: Required. The key as a base64 string - :type value: str - """ - - _validation = { - 'signed_oid': {'required': True}, - 'signed_tid': {'required': True}, - 'signed_start': {'required': True}, - 'signed_expiry': {'required': True}, - 'signed_service': {'required': True}, - 'signed_version': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'signed_oid': {'key': 'SignedOid', 'type': 'str', 'xml': {'name': 'SignedOid'}}, - 'signed_tid': {'key': 'SignedTid', 'type': 'str', 'xml': {'name': 'SignedTid'}}, - 'signed_start': {'key': 'SignedStart', 'type': 'iso-8601', 'xml': {'name': 'SignedStart'}}, - 'signed_expiry': {'key': 'SignedExpiry', 'type': 'iso-8601', 'xml': {'name': 'SignedExpiry'}}, - 'signed_service': {'key': 'SignedService', 'type': 'str', 'xml': {'name': 'SignedService'}}, - 'signed_version': {'key': 'SignedVersion', 'type': 'str', 'xml': {'name': 'SignedVersion'}}, - 'value': {'key': 'Value', 'type': 'str', 'xml': {'name': 'Value'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(UserDelegationKey, self).__init__(**kwargs) - self.signed_oid = kwargs.get('signed_oid', None) - self.signed_tid = kwargs.get('signed_tid', None) - self.signed_start = kwargs.get('signed_start', None) - self.signed_expiry = kwargs.get('signed_expiry', None) - self.signed_service = kwargs.get('signed_service', None) - self.signed_version = kwargs.get('signed_version', None) - self.value = kwargs.get('value', None) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/models/_models_py3.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/models/_models_py3.py deleted file mode 100644 index 7e5a3fc..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/models/_models_py3.py +++ /dev/null @@ -1,2009 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model -from azure.core.exceptions import HttpResponseError - - -class AccessPolicy(Model): - """An Access policy. - - :param start: the date-time the policy is active - :type start: str - :param expiry: the date-time the policy expires - :type expiry: str - :param permission: the permissions for the acl policy - :type permission: str - """ - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, - 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, - 'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}}, - } - _xml_map = { - } - - def __init__(self, *, start: str=None, expiry: str=None, permission: str=None, **kwargs) -> None: - super(AccessPolicy, self).__init__(**kwargs) - self.start = start - self.expiry = expiry - self.permission = permission - - -class AppendPositionAccessConditions(Model): - """Additional parameters for a set of operations, such as: - AppendBlob_append_block, AppendBlob_append_block_from_url, AppendBlob_seal. - - :param max_size: Optional conditional header. The max length in bytes - permitted for the append blob. If the Append Block operation would cause - the blob to exceed that limit or if the blob size is already greater than - the value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition - Failed). - :type max_size: long - :param append_position: Optional conditional header, used only for the - Append Block operation. A number indicating the byte offset to compare. - Append Block will succeed only if the append position is equal to this - number. If it is not, the request will fail with the - AppendPositionConditionNotMet error (HTTP status code 412 - Precondition - Failed). - :type append_position: long - """ - - _attribute_map = { - 'max_size': {'key': '', 'type': 'long', 'xml': {'name': 'max_size'}}, - 'append_position': {'key': '', 'type': 'long', 'xml': {'name': 'append_position'}}, - } - _xml_map = { - } - - def __init__(self, *, max_size: int=None, append_position: int=None, **kwargs) -> None: - super(AppendPositionAccessConditions, self).__init__(**kwargs) - self.max_size = max_size - self.append_position = append_position - - -class ArrowConfiguration(Model): - """arrow configuration. - - All required parameters must be populated in order to send to Azure. - - :param schema: Required. - :type schema: list[~azure.storage.blob.models.ArrowField] - """ - - _validation = { - 'schema': {'required': True}, - } - - _attribute_map = { - 'schema': {'key': 'Schema', 'type': '[ArrowField]', 'xml': {'name': 'Schema', 'itemsName': 'Schema', 'wrapped': True}}, - } - _xml_map = { - 'name': 'ArrowConfiguration' - } - - def __init__(self, *, schema, **kwargs) -> None: - super(ArrowConfiguration, self).__init__(**kwargs) - self.schema = schema - - -class ArrowField(Model): - """field of an arrow schema. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. - :type type: str - :param name: - :type name: str - :param precision: - :type precision: int - :param scale: - :type scale: int - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': 'Type', 'type': 'str', 'xml': {'name': 'Type'}}, - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'precision': {'key': 'Precision', 'type': 'int', 'xml': {'name': 'Precision'}}, - 'scale': {'key': 'Scale', 'type': 'int', 'xml': {'name': 'Scale'}}, - } - _xml_map = { - 'name': 'Field' - } - - def __init__(self, *, type: str, name: str=None, precision: int=None, scale: int=None, **kwargs) -> None: - super(ArrowField, self).__init__(**kwargs) - self.type = type - self.name = name - self.precision = precision - self.scale = scale - - -class BlobFlatListSegment(Model): - """BlobFlatListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'BlobItems', 'itemsName': 'Blob'}}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__(self, *, blob_items, **kwargs) -> None: - super(BlobFlatListSegment, self).__init__(**kwargs) - self.blob_items = blob_items - - -class BlobHierarchyListSegment(Model): - """BlobHierarchyListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_prefixes: - :type blob_prefixes: list[~azure.storage.blob.models.BlobPrefix] - :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]', 'xml': {'name': 'BlobPrefix', 'itemsName': 'BlobPrefix'}}, - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__(self, *, blob_items, blob_prefixes=None, **kwargs) -> None: - super(BlobHierarchyListSegment, self).__init__(**kwargs) - self.blob_prefixes = blob_prefixes - self.blob_items = blob_items - - -class BlobHTTPHeaders(Model): - """Additional parameters for a set of operations. - - :param blob_cache_control: Optional. Sets the blob's cache control. If - specified, this property is stored with the blob and returned with a read - request. - :type blob_cache_control: str - :param blob_content_type: Optional. Sets the blob's content type. If - specified, this property is stored with the blob and returned with a read - request. - :type blob_content_type: str - :param blob_content_md5: Optional. An MD5 hash of the blob content. Note - that this hash is not validated, as the hashes for the individual blocks - were validated when each was uploaded. - :type blob_content_md5: bytearray - :param blob_content_encoding: Optional. Sets the blob's content encoding. - If specified, this property is stored with the blob and returned with a - read request. - :type blob_content_encoding: str - :param blob_content_language: Optional. Set the blob's content language. - If specified, this property is stored with the blob and returned with a - read request. - :type blob_content_language: str - :param blob_content_disposition: Optional. Sets the blob's - Content-Disposition header. - :type blob_content_disposition: str - """ - - _attribute_map = { - 'blob_cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'blob_cache_control'}}, - 'blob_content_type': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_type'}}, - 'blob_content_md5': {'key': '', 'type': 'bytearray', 'xml': {'name': 'blob_content_md5'}}, - 'blob_content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_encoding'}}, - 'blob_content_language': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_language'}}, - 'blob_content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'blob_content_disposition'}}, - } - _xml_map = { - } - - def __init__(self, *, blob_cache_control: str=None, blob_content_type: str=None, blob_content_md5: bytearray=None, blob_content_encoding: str=None, blob_content_language: str=None, blob_content_disposition: str=None, **kwargs) -> None: - super(BlobHTTPHeaders, self).__init__(**kwargs) - self.blob_cache_control = blob_cache_control - self.blob_content_type = blob_content_type - self.blob_content_md5 = blob_content_md5 - self.blob_content_encoding = blob_content_encoding - self.blob_content_language = blob_content_language - self.blob_content_disposition = blob_content_disposition - - -class BlobItemInternal(Model): - """An Azure Storage blob. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param deleted: Required. - :type deleted: bool - :param snapshot: Required. - :type snapshot: str - :param version_id: - :type version_id: str - :param is_current_version: - :type is_current_version: bool - :param properties: Required. - :type properties: ~azure.storage.blob.models.BlobPropertiesInternal - :param metadata: - :type metadata: ~azure.storage.blob.models.BlobMetadata - :param blob_tags: - :type blob_tags: ~azure.storage.blob.models.BlobTags - :param object_replication_metadata: - :type object_replication_metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'deleted': {'required': True}, - 'snapshot': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}}, - 'snapshot': {'key': 'Snapshot', 'type': 'str', 'xml': {'name': 'Snapshot'}}, - 'version_id': {'key': 'VersionId', 'type': 'str', 'xml': {'name': 'VersionId'}}, - 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool', 'xml': {'name': 'IsCurrentVersion'}}, - 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal', 'xml': {'name': 'Properties'}}, - 'metadata': {'key': 'Metadata', 'type': 'BlobMetadata', 'xml': {'name': 'Metadata'}}, - 'blob_tags': {'key': 'BlobTags', 'type': 'BlobTags', 'xml': {'name': 'BlobTags'}}, - 'object_replication_metadata': {'key': 'OrMetadata', 'type': '{str}', 'xml': {'name': 'OrMetadata'}}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__(self, *, name: str, deleted: bool, snapshot: str, properties, version_id: str=None, is_current_version: bool=None, metadata=None, blob_tags=None, object_replication_metadata=None, **kwargs) -> None: - super(BlobItemInternal, self).__init__(**kwargs) - self.name = name - self.deleted = deleted - self.snapshot = snapshot - self.version_id = version_id - self.is_current_version = is_current_version - self.properties = properties - self.metadata = metadata - self.blob_tags = blob_tags - self.object_replication_metadata = object_replication_metadata - - -class BlobMetadata(Model): - """BlobMetadata. - - :param additional_properties: Unmatched properties from the message are - deserialized this collection - :type additional_properties: dict[str, str] - :param encrypted: - :type encrypted: str - """ - - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{str}', 'xml': {'name': 'additional_properties'}}, - 'encrypted': {'key': 'Encrypted', 'type': 'str', 'xml': {'name': 'Encrypted', 'attr': True}}, - } - _xml_map = { - 'name': 'Metadata' - } - - def __init__(self, *, additional_properties=None, encrypted: str=None, **kwargs) -> None: - super(BlobMetadata, self).__init__(**kwargs) - self.additional_properties = additional_properties - self.encrypted = encrypted - - -class BlobPrefix(Model): - """BlobPrefix. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - } - _xml_map = { - } - - def __init__(self, *, name: str, **kwargs) -> None: - super(BlobPrefix, self).__init__(**kwargs) - self.name = name - - -class BlobPropertiesInternal(Model): - """Properties of a blob. - - All required parameters must be populated in order to send to Azure. - - :param creation_time: - :type creation_time: datetime - :param last_modified: Required. - :type last_modified: datetime - :param etag: Required. - :type etag: str - :param content_length: Size in bytes - :type content_length: long - :param content_type: - :type content_type: str - :param content_encoding: - :type content_encoding: str - :param content_language: - :type content_language: str - :param content_md5: - :type content_md5: bytearray - :param content_disposition: - :type content_disposition: str - :param cache_control: - :type cache_control: str - :param blob_sequence_number: - :type blob_sequence_number: long - :param blob_type: Possible values include: 'BlockBlob', 'PageBlob', - 'AppendBlob' - :type blob_type: str or ~azure.storage.blob.models.BlobType - :param lease_status: Possible values include: 'locked', 'unlocked' - :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType - :param lease_state: Possible values include: 'available', 'leased', - 'expired', 'breaking', 'broken' - :type lease_state: str or ~azure.storage.blob.models.LeaseStateType - :param lease_duration: Possible values include: 'infinite', 'fixed' - :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType - :param copy_id: - :type copy_id: str - :param copy_status: Possible values include: 'pending', 'success', - 'aborted', 'failed' - :type copy_status: str or ~azure.storage.blob.models.CopyStatusType - :param copy_source: - :type copy_source: str - :param copy_progress: - :type copy_progress: str - :param copy_completion_time: - :type copy_completion_time: datetime - :param copy_status_description: - :type copy_status_description: str - :param server_encrypted: - :type server_encrypted: bool - :param incremental_copy: - :type incremental_copy: bool - :param destination_snapshot: - :type destination_snapshot: str - :param deleted_time: - :type deleted_time: datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param access_tier: Possible values include: 'P4', 'P6', 'P10', 'P15', - 'P20', 'P30', 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type access_tier: str or ~azure.storage.blob.models.AccessTier - :param access_tier_inferred: - :type access_tier_inferred: bool - :param archive_status: Possible values include: - 'rehydrate-pending-to-hot', 'rehydrate-pending-to-cool' - :type archive_status: str or ~azure.storage.blob.models.ArchiveStatus - :param customer_provided_key_sha256: - :type customer_provided_key_sha256: str - :param encryption_scope: The name of the encryption scope under which the - blob is encrypted. - :type encryption_scope: str - :param access_tier_change_time: - :type access_tier_change_time: datetime - :param tag_count: - :type tag_count: int - :param expires_on: - :type expires_on: datetime - :param is_sealed: - :type is_sealed: bool - :param rehydrate_priority: Possible values include: 'High', 'Standard' - :type rehydrate_priority: str or - ~azure.storage.blob.models.RehydratePriority - :param last_accessed_on: - :type last_accessed_on: datetime - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123', 'xml': {'name': 'Creation-Time'}}, - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}}, - 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}}, - 'content_length': {'key': 'Content-Length', 'type': 'long', 'xml': {'name': 'Content-Length'}}, - 'content_type': {'key': 'Content-Type', 'type': 'str', 'xml': {'name': 'Content-Type'}}, - 'content_encoding': {'key': 'Content-Encoding', 'type': 'str', 'xml': {'name': 'Content-Encoding'}}, - 'content_language': {'key': 'Content-Language', 'type': 'str', 'xml': {'name': 'Content-Language'}}, - 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray', 'xml': {'name': 'Content-MD5'}}, - 'content_disposition': {'key': 'Content-Disposition', 'type': 'str', 'xml': {'name': 'Content-Disposition'}}, - 'cache_control': {'key': 'Cache-Control', 'type': 'str', 'xml': {'name': 'Cache-Control'}}, - 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long', 'xml': {'name': 'x-ms-blob-sequence-number'}}, - 'blob_type': {'key': 'BlobType', 'type': 'BlobType', 'xml': {'name': 'BlobType'}}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}}, - 'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}}, - 'copy_id': {'key': 'CopyId', 'type': 'str', 'xml': {'name': 'CopyId'}}, - 'copy_status': {'key': 'CopyStatus', 'type': 'CopyStatusType', 'xml': {'name': 'CopyStatus'}}, - 'copy_source': {'key': 'CopySource', 'type': 'str', 'xml': {'name': 'CopySource'}}, - 'copy_progress': {'key': 'CopyProgress', 'type': 'str', 'xml': {'name': 'CopyProgress'}}, - 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123', 'xml': {'name': 'CopyCompletionTime'}}, - 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str', 'xml': {'name': 'CopyStatusDescription'}}, - 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool', 'xml': {'name': 'ServerEncrypted'}}, - 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool', 'xml': {'name': 'IncrementalCopy'}}, - 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str', 'xml': {'name': 'DestinationSnapshot'}}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}}, - 'access_tier': {'key': 'AccessTier', 'type': 'str', 'xml': {'name': 'AccessTier'}}, - 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool', 'xml': {'name': 'AccessTierInferred'}}, - 'archive_status': {'key': 'ArchiveStatus', 'type': 'str', 'xml': {'name': 'ArchiveStatus'}}, - 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str', 'xml': {'name': 'CustomerProvidedKeySha256'}}, - 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str', 'xml': {'name': 'EncryptionScope'}}, - 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123', 'xml': {'name': 'AccessTierChangeTime'}}, - 'tag_count': {'key': 'TagCount', 'type': 'int', 'xml': {'name': 'TagCount'}}, - 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123', 'xml': {'name': 'Expiry-Time'}}, - 'is_sealed': {'key': 'Sealed', 'type': 'bool', 'xml': {'name': 'Sealed'}}, - 'rehydrate_priority': {'key': 'RehydratePriority', 'type': 'str', 'xml': {'name': 'RehydratePriority'}}, - 'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123', 'xml': {'name': 'LastAccessTime'}}, - } - _xml_map = { - 'name': 'Properties' - } - - def __init__(self, *, last_modified, etag: str, creation_time=None, content_length: int=None, content_type: str=None, content_encoding: str=None, content_language: str=None, content_md5: bytearray=None, content_disposition: str=None, cache_control: str=None, blob_sequence_number: int=None, blob_type=None, lease_status=None, lease_state=None, lease_duration=None, copy_id: str=None, copy_status=None, copy_source: str=None, copy_progress: str=None, copy_completion_time=None, copy_status_description: str=None, server_encrypted: bool=None, incremental_copy: bool=None, destination_snapshot: str=None, deleted_time=None, remaining_retention_days: int=None, access_tier=None, access_tier_inferred: bool=None, archive_status=None, customer_provided_key_sha256: str=None, encryption_scope: str=None, access_tier_change_time=None, tag_count: int=None, expires_on=None, is_sealed: bool=None, rehydrate_priority=None, last_accessed_on=None, **kwargs) -> None: - super(BlobPropertiesInternal, self).__init__(**kwargs) - self.creation_time = creation_time - self.last_modified = last_modified - self.etag = etag - self.content_length = content_length - self.content_type = content_type - self.content_encoding = content_encoding - self.content_language = content_language - self.content_md5 = content_md5 - self.content_disposition = content_disposition - self.cache_control = cache_control - self.blob_sequence_number = blob_sequence_number - self.blob_type = blob_type - self.lease_status = lease_status - self.lease_state = lease_state - self.lease_duration = lease_duration - self.copy_id = copy_id - self.copy_status = copy_status - self.copy_source = copy_source - self.copy_progress = copy_progress - self.copy_completion_time = copy_completion_time - self.copy_status_description = copy_status_description - self.server_encrypted = server_encrypted - self.incremental_copy = incremental_copy - self.destination_snapshot = destination_snapshot - self.deleted_time = deleted_time - self.remaining_retention_days = remaining_retention_days - self.access_tier = access_tier - self.access_tier_inferred = access_tier_inferred - self.archive_status = archive_status - self.customer_provided_key_sha256 = customer_provided_key_sha256 - self.encryption_scope = encryption_scope - self.access_tier_change_time = access_tier_change_time - self.tag_count = tag_count - self.expires_on = expires_on - self.is_sealed = is_sealed - self.rehydrate_priority = rehydrate_priority - self.last_accessed_on = last_accessed_on - - -class BlobTag(Model): - """BlobTag. - - All required parameters must be populated in order to send to Azure. - - :param key: Required. - :type key: str - :param value: Required. - :type value: str - """ - - _validation = { - 'key': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'key': {'key': 'Key', 'type': 'str', 'xml': {'name': 'Key'}}, - 'value': {'key': 'Value', 'type': 'str', 'xml': {'name': 'Value'}}, - } - _xml_map = { - 'name': 'Tag' - } - - def __init__(self, *, key: str, value: str, **kwargs) -> None: - super(BlobTag, self).__init__(**kwargs) - self.key = key - self.value = value - - -class BlobTags(Model): - """Blob tags. - - All required parameters must be populated in order to send to Azure. - - :param blob_tag_set: Required. - :type blob_tag_set: list[~azure.storage.blob.models.BlobTag] - """ - - _validation = { - 'blob_tag_set': {'required': True}, - } - - _attribute_map = { - 'blob_tag_set': {'key': 'BlobTagSet', 'type': '[BlobTag]', 'xml': {'name': 'TagSet', 'itemsName': 'TagSet', 'wrapped': True}}, - } - _xml_map = { - 'name': 'Tags' - } - - def __init__(self, *, blob_tag_set, **kwargs) -> None: - super(BlobTags, self).__init__(**kwargs) - self.blob_tag_set = blob_tag_set - - -class Block(Model): - """Represents a single block in a block blob. It describes the block's ID and - size. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The base64 encoded block ID. - :type name: str - :param size: Required. The block size in bytes. - :type size: int - """ - - _validation = { - 'name': {'required': True}, - 'size': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'size': {'key': 'Size', 'type': 'int', 'xml': {'name': 'Size'}}, - } - _xml_map = { - } - - def __init__(self, *, name: str, size: int, **kwargs) -> None: - super(Block, self).__init__(**kwargs) - self.name = name - self.size = size - - -class BlockList(Model): - """BlockList. - - :param committed_blocks: - :type committed_blocks: list[~azure.storage.blob.models.Block] - :param uncommitted_blocks: - :type uncommitted_blocks: list[~azure.storage.blob.models.Block] - """ - - _attribute_map = { - 'committed_blocks': {'key': 'CommittedBlocks', 'type': '[Block]', 'xml': {'name': 'CommittedBlocks', 'itemsName': 'Block', 'wrapped': True}}, - 'uncommitted_blocks': {'key': 'UncommittedBlocks', 'type': '[Block]', 'xml': {'name': 'UncommittedBlocks', 'itemsName': 'Block', 'wrapped': True}}, - } - _xml_map = { - } - - def __init__(self, *, committed_blocks=None, uncommitted_blocks=None, **kwargs) -> None: - super(BlockList, self).__init__(**kwargs) - self.committed_blocks = committed_blocks - self.uncommitted_blocks = uncommitted_blocks - - -class BlockLookupList(Model): - """BlockLookupList. - - :param committed: - :type committed: list[str] - :param uncommitted: - :type uncommitted: list[str] - :param latest: - :type latest: list[str] - """ - - _attribute_map = { - 'committed': {'key': 'Committed', 'type': '[str]', 'xml': {'name': 'Committed', 'itemsName': 'Committed'}}, - 'uncommitted': {'key': 'Uncommitted', 'type': '[str]', 'xml': {'name': 'Uncommitted', 'itemsName': 'Uncommitted'}}, - 'latest': {'key': 'Latest', 'type': '[str]', 'xml': {'name': 'Latest', 'itemsName': 'Latest'}}, - } - _xml_map = { - 'name': 'BlockList' - } - - def __init__(self, *, committed=None, uncommitted=None, latest=None, **kwargs) -> None: - super(BlockLookupList, self).__init__(**kwargs) - self.committed = committed - self.uncommitted = uncommitted - self.latest = latest - - -class ClearRange(Model): - """ClearRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'ClearRange' - } - - def __init__(self, *, start: int, end: int, **kwargs) -> None: - super(ClearRange, self).__init__(**kwargs) - self.start = start - self.end = end - - -class ContainerCpkScopeInfo(Model): - """Additional parameters for create operation. - - :param default_encryption_scope: Optional. Version 2019-07-07 and later. - Specifies the default encryption scope to set on the container and use for - all future writes. - :type default_encryption_scope: str - :param prevent_encryption_scope_override: Optional. Version 2019-07-07 - and newer. If true, prevents any request from specifying a different - encryption scope than the scope set on the container. - :type prevent_encryption_scope_override: bool - """ - - _attribute_map = { - 'default_encryption_scope': {'key': '', 'type': 'str', 'xml': {'name': 'default_encryption_scope'}}, - 'prevent_encryption_scope_override': {'key': '', 'type': 'bool', 'xml': {'name': 'prevent_encryption_scope_override'}}, - } - _xml_map = { - } - - def __init__(self, *, default_encryption_scope: str=None, prevent_encryption_scope_override: bool=None, **kwargs) -> None: - super(ContainerCpkScopeInfo, self).__init__(**kwargs) - self.default_encryption_scope = default_encryption_scope - self.prevent_encryption_scope_override = prevent_encryption_scope_override - - -class ContainerItem(Model): - """An Azure Storage container. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param deleted: - :type deleted: bool - :param version: - :type version: str - :param properties: Required. - :type properties: ~azure.storage.blob.models.ContainerProperties - :param metadata: - :type metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}}, - 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, - 'properties': {'key': 'Properties', 'type': 'ContainerProperties', 'xml': {'name': 'Properties'}}, - 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}}, - } - _xml_map = { - 'name': 'Container' - } - - def __init__(self, *, name: str, properties, deleted: bool=None, version: str=None, metadata=None, **kwargs) -> None: - super(ContainerItem, self).__init__(**kwargs) - self.name = name - self.deleted = deleted - self.version = version - self.properties = properties - self.metadata = metadata - - -class ContainerProperties(Model): - """Properties of a container. - - All required parameters must be populated in order to send to Azure. - - :param last_modified: Required. - :type last_modified: datetime - :param etag: Required. - :type etag: str - :param lease_status: Possible values include: 'locked', 'unlocked' - :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType - :param lease_state: Possible values include: 'available', 'leased', - 'expired', 'breaking', 'broken' - :type lease_state: str or ~azure.storage.blob.models.LeaseStateType - :param lease_duration: Possible values include: 'infinite', 'fixed' - :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType - :param public_access: Possible values include: 'container', 'blob' - :type public_access: str or ~azure.storage.blob.models.PublicAccessType - :param has_immutability_policy: - :type has_immutability_policy: bool - :param has_legal_hold: - :type has_legal_hold: bool - :param default_encryption_scope: - :type default_encryption_scope: str - :param prevent_encryption_scope_override: - :type prevent_encryption_scope_override: bool - :param deleted_time: - :type deleted_time: datetime - :param remaining_retention_days: - :type remaining_retention_days: int - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}}, - 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}}, - 'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}}, - 'public_access': {'key': 'PublicAccess', 'type': 'str', 'xml': {'name': 'PublicAccess'}}, - 'has_immutability_policy': {'key': 'HasImmutabilityPolicy', 'type': 'bool', 'xml': {'name': 'HasImmutabilityPolicy'}}, - 'has_legal_hold': {'key': 'HasLegalHold', 'type': 'bool', 'xml': {'name': 'HasLegalHold'}}, - 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str', 'xml': {'name': 'DefaultEncryptionScope'}}, - 'prevent_encryption_scope_override': {'key': 'DenyEncryptionScopeOverride', 'type': 'bool', 'xml': {'name': 'DenyEncryptionScopeOverride'}}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}}, - } - _xml_map = { - } - - def __init__(self, *, last_modified, etag: str, lease_status=None, lease_state=None, lease_duration=None, public_access=None, has_immutability_policy: bool=None, has_legal_hold: bool=None, default_encryption_scope: str=None, prevent_encryption_scope_override: bool=None, deleted_time=None, remaining_retention_days: int=None, **kwargs) -> None: - super(ContainerProperties, self).__init__(**kwargs) - self.last_modified = last_modified - self.etag = etag - self.lease_status = lease_status - self.lease_state = lease_state - self.lease_duration = lease_duration - self.public_access = public_access - self.has_immutability_policy = has_immutability_policy - self.has_legal_hold = has_legal_hold - self.default_encryption_scope = default_encryption_scope - self.prevent_encryption_scope_override = prevent_encryption_scope_override - self.deleted_time = deleted_time - self.remaining_retention_days = remaining_retention_days - - -class CorsRule(Model): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param allowed_origins: Required. The origin domains that are permitted to - make a request against the storage service via CORS. The origin domain is - the domain from which the request originates. Note that the origin must be - an exact case-sensitive match with the origin that the user age sends to - the service. You can also use the wildcard character '*' to allow all - origin domains to make requests via CORS. - :type allowed_origins: str - :param allowed_methods: Required. The methods (HTTP request verbs) that - the origin domain may use for a CORS request. (comma separated) - :type allowed_methods: str - :param allowed_headers: Required. the request headers that the origin - domain may specify on the CORS request. - :type allowed_headers: str - :param exposed_headers: Required. The response headers that may be sent in - the response to the CORS request and exposed by the browser to the request - issuer - :type exposed_headers: str - :param max_age_in_seconds: Required. The maximum amount time that a - browser should cache the preflight OPTIONS request. - :type max_age_in_seconds: int - """ - - _validation = { - 'allowed_origins': {'required': True}, - 'allowed_methods': {'required': True}, - 'allowed_headers': {'required': True}, - 'exposed_headers': {'required': True}, - 'max_age_in_seconds': {'required': True, 'minimum': 0}, - } - - _attribute_map = { - 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}}, - 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}}, - 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}}, - 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}}, - 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}}, - } - _xml_map = { - } - - def __init__(self, *, allowed_origins: str, allowed_methods: str, allowed_headers: str, exposed_headers: str, max_age_in_seconds: int, **kwargs) -> None: - super(CorsRule, self).__init__(**kwargs) - self.allowed_origins = allowed_origins - self.allowed_methods = allowed_methods - self.allowed_headers = allowed_headers - self.exposed_headers = exposed_headers - self.max_age_in_seconds = max_age_in_seconds - - -class CpkInfo(Model): - """Additional parameters for a set of operations. - - :param encryption_key: Optional. Specifies the encryption key to use to - encrypt the data provided in the request. If not specified, encryption is - performed with the root account encryption key. For more information, see - Encryption at Rest for Azure Storage Services. - :type encryption_key: str - :param encryption_key_sha256: The SHA-256 hash of the provided encryption - key. Must be provided if the x-ms-encryption-key header is provided. - :type encryption_key_sha256: str - :param encryption_algorithm: The algorithm used to produce the encryption - key hash. Currently, the only accepted value is "AES256". Must be provided - if the x-ms-encryption-key header is provided. Possible values include: - 'AES256' - :type encryption_algorithm: str or - ~azure.storage.blob.models.EncryptionAlgorithmType - """ - - _attribute_map = { - 'encryption_key': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_key'}}, - 'encryption_key_sha256': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_key_sha256'}}, - 'encryption_algorithm': {'key': '', 'type': 'EncryptionAlgorithmType', 'xml': {'name': 'encryption_algorithm'}}, - } - _xml_map = { - } - - def __init__(self, *, encryption_key: str=None, encryption_key_sha256: str=None, encryption_algorithm=None, **kwargs) -> None: - super(CpkInfo, self).__init__(**kwargs) - self.encryption_key = encryption_key - self.encryption_key_sha256 = encryption_key_sha256 - self.encryption_algorithm = encryption_algorithm - - -class CpkScopeInfo(Model): - """Additional parameters for a set of operations. - - :param encryption_scope: Optional. Version 2019-07-07 and later. - Specifies the name of the encryption scope to use to encrypt the data - provided in the request. If not specified, encryption is performed with - the default account encryption scope. For more information, see - Encryption at Rest for Azure Storage Services. - :type encryption_scope: str - """ - - _attribute_map = { - 'encryption_scope': {'key': '', 'type': 'str', 'xml': {'name': 'encryption_scope'}}, - } - _xml_map = { - } - - def __init__(self, *, encryption_scope: str=None, **kwargs) -> None: - super(CpkScopeInfo, self).__init__(**kwargs) - self.encryption_scope = encryption_scope - - -class DataLakeStorageError(Model): - """DataLakeStorageError. - - :param data_lake_storage_error_details: The service error response object. - :type data_lake_storage_error_details: - ~azure.storage.blob.models.DataLakeStorageErrorError - """ - - _attribute_map = { - 'data_lake_storage_error_details': {'key': 'error', 'type': 'DataLakeStorageErrorError', 'xml': {'name': 'error'}}, - } - _xml_map = { - } - - def __init__(self, *, data_lake_storage_error_details=None, **kwargs) -> None: - super(DataLakeStorageError, self).__init__(**kwargs) - self.data_lake_storage_error_details = data_lake_storage_error_details - - -class DataLakeStorageErrorException(HttpResponseError): - """Server responsed with exception of type: 'DataLakeStorageError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, response, deserialize, *args): - - model_name = 'DataLakeStorageError' - self.error = deserialize(model_name, response) - if self.error is None: - self.error = deserialize.dependencies[model_name]() - super(DataLakeStorageErrorException, self).__init__(response=response) - - -class DataLakeStorageErrorError(Model): - """The service error response object. - - :param code: The service error code. - :type code: str - :param message: The service error message. - :type message: str - """ - - _attribute_map = { - 'code': {'key': 'Code', 'type': 'str', 'xml': {'name': 'Code'}}, - 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, - } - _xml_map = { - } - - def __init__(self, *, code: str=None, message: str=None, **kwargs) -> None: - super(DataLakeStorageErrorError, self).__init__(**kwargs) - self.code = code - self.message = message - - -class DelimitedTextConfiguration(Model): - """delimited text configuration. - - All required parameters must be populated in order to send to Azure. - - :param column_separator: Required. column separator - :type column_separator: str - :param field_quote: Required. field quote - :type field_quote: str - :param record_separator: Required. record separator - :type record_separator: str - :param escape_char: Required. escape char - :type escape_char: str - :param headers_present: Required. has headers - :type headers_present: bool - """ - - _validation = { - 'column_separator': {'required': True}, - 'field_quote': {'required': True}, - 'record_separator': {'required': True}, - 'escape_char': {'required': True}, - 'headers_present': {'required': True}, - } - - _attribute_map = { - 'column_separator': {'key': 'ColumnSeparator', 'type': 'str', 'xml': {'name': 'ColumnSeparator'}}, - 'field_quote': {'key': 'FieldQuote', 'type': 'str', 'xml': {'name': 'FieldQuote'}}, - 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, - 'escape_char': {'key': 'EscapeChar', 'type': 'str', 'xml': {'name': 'EscapeChar'}}, - 'headers_present': {'key': 'HeadersPresent', 'type': 'bool', 'xml': {'name': 'HasHeaders'}}, - } - _xml_map = { - 'name': 'DelimitedTextConfiguration' - } - - def __init__(self, *, column_separator: str, field_quote: str, record_separator: str, escape_char: str, headers_present: bool, **kwargs) -> None: - super(DelimitedTextConfiguration, self).__init__(**kwargs) - self.column_separator = column_separator - self.field_quote = field_quote - self.record_separator = record_separator - self.escape_char = escape_char - self.headers_present = headers_present - - -class DirectoryHttpHeaders(Model): - """Additional parameters for a set of operations, such as: Directory_create, - Directory_rename, Blob_rename. - - :param cache_control: Cache control for given resource - :type cache_control: str - :param content_type: Content type for given resource - :type content_type: str - :param content_encoding: Content encoding for given resource - :type content_encoding: str - :param content_language: Content language for given resource - :type content_language: str - :param content_disposition: Content disposition for given resource - :type content_disposition: str - """ - - _attribute_map = { - 'cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'cache_control'}}, - 'content_type': {'key': '', 'type': 'str', 'xml': {'name': 'content_type'}}, - 'content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'content_encoding'}}, - 'content_language': {'key': '', 'type': 'str', 'xml': {'name': 'content_language'}}, - 'content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'content_disposition'}}, - } - _xml_map = { - } - - def __init__(self, *, cache_control: str=None, content_type: str=None, content_encoding: str=None, content_language: str=None, content_disposition: str=None, **kwargs) -> None: - super(DirectoryHttpHeaders, self).__init__(**kwargs) - self.cache_control = cache_control - self.content_type = content_type - self.content_encoding = content_encoding - self.content_language = content_language - self.content_disposition = content_disposition - - -class FilterBlobItem(Model): - """Blob info from a Filter Blobs API call. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param container_name: Required. - :type container_name: str - :param tag_value: Required. - :type tag_value: str - """ - - _validation = { - 'name': {'required': True}, - 'container_name': {'required': True}, - 'tag_value': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName'}}, - 'tag_value': {'key': 'TagValue', 'type': 'str', 'xml': {'name': 'TagValue'}}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__(self, *, name: str, container_name: str, tag_value: str, **kwargs) -> None: - super(FilterBlobItem, self).__init__(**kwargs) - self.name = name - self.container_name = container_name - self.tag_value = tag_value - - -class FilterBlobSegment(Model): - """The result of a Filter Blobs API call. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param where: Required. - :type where: str - :param blobs: Required. - :type blobs: list[~azure.storage.blob.models.FilterBlobItem] - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'where': {'required': True}, - 'blobs': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'where': {'key': 'Where', 'type': 'str', 'xml': {'name': 'Where'}}, - 'blobs': {'key': 'Blobs', 'type': '[FilterBlobItem]', 'xml': {'name': 'Blobs', 'itemsName': 'Blobs', 'wrapped': True}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, *, service_endpoint: str, where: str, blobs, next_marker: str=None, **kwargs) -> None: - super(FilterBlobSegment, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.where = where - self.blobs = blobs - self.next_marker = next_marker - - -class GeoReplication(Model): - """Geo-Replication information for the Secondary Storage Service. - - All required parameters must be populated in order to send to Azure. - - :param status: Required. The status of the secondary location. Possible - values include: 'live', 'bootstrap', 'unavailable' - :type status: str or ~azure.storage.blob.models.GeoReplicationStatusType - :param last_sync_time: Required. A GMT date/time value, to the second. All - primary writes preceding this value are guaranteed to be available for - read operations at the secondary. Primary writes after this point in time - may or may not be available for reads. - :type last_sync_time: datetime - """ - - _validation = { - 'status': {'required': True}, - 'last_sync_time': {'required': True}, - } - - _attribute_map = { - 'status': {'key': 'Status', 'type': 'str', 'xml': {'name': 'Status'}}, - 'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123', 'xml': {'name': 'LastSyncTime'}}, - } - _xml_map = { - } - - def __init__(self, *, status, last_sync_time, **kwargs) -> None: - super(GeoReplication, self).__init__(**kwargs) - self.status = status - self.last_sync_time = last_sync_time - - -class JsonTextConfiguration(Model): - """json text configuration. - - All required parameters must be populated in order to send to Azure. - - :param record_separator: Required. record separator - :type record_separator: str - """ - - _validation = { - 'record_separator': {'required': True}, - } - - _attribute_map = { - 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, - } - _xml_map = { - 'name': 'JsonTextConfiguration' - } - - def __init__(self, *, record_separator: str, **kwargs) -> None: - super(JsonTextConfiguration, self).__init__(**kwargs) - self.record_separator = record_separator - - -class KeyInfo(Model): - """Key information. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. The date-time the key is active in ISO 8601 UTC - time - :type start: str - :param expiry: Required. The date-time the key expires in ISO 8601 UTC - time - :type expiry: str - """ - - _validation = { - 'start': {'required': True}, - 'expiry': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, - 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, - } - _xml_map = { - } - - def __init__(self, *, start: str, expiry: str, **kwargs) -> None: - super(KeyInfo, self).__init__(**kwargs) - self.start = start - self.expiry = expiry - - -class LeaseAccessConditions(Model): - """Additional parameters for a set of operations. - - :param lease_id: If specified, the operation only succeeds if the - resource's lease is active and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': '', 'type': 'str', 'xml': {'name': 'lease_id'}}, - } - _xml_map = { - } - - def __init__(self, *, lease_id: str=None, **kwargs) -> None: - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = lease_id - - -class ListBlobsFlatSegmentResponse(Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param segment: Required. - :type segment: ~azure.storage.blob.models.BlobFlatListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'segment': {'key': 'Segment', 'type': 'BlobFlatListSegment', 'xml': {'name': 'Segment'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, *, service_endpoint: str, container_name: str, segment, prefix: str=None, marker: str=None, max_results: int=None, next_marker: str=None, **kwargs) -> None: - super(ListBlobsFlatSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.container_name = container_name - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.segment = segment - self.next_marker = next_marker - - -class ListBlobsHierarchySegmentResponse(Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param delimiter: - :type delimiter: str - :param segment: Required. - :type segment: ~azure.storage.blob.models.BlobHierarchyListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'delimiter': {'key': 'Delimiter', 'type': 'str', 'xml': {'name': 'Delimiter'}}, - 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment', 'xml': {'name': 'Segment'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, *, service_endpoint: str, container_name: str, segment, prefix: str=None, marker: str=None, max_results: int=None, delimiter: str=None, next_marker: str=None, **kwargs) -> None: - super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.container_name = container_name - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.delimiter = delimiter - self.segment = segment - self.next_marker = next_marker - - -class ListContainersSegmentResponse(Model): - """An enumeration of containers. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param container_items: Required. - :type container_items: list[~azure.storage.blob.models.ContainerItem] - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_items': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'container_items': {'key': 'ContainerItems', 'type': '[ContainerItem]', 'xml': {'name': 'Containers', 'itemsName': 'Containers', 'wrapped': True}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, *, service_endpoint: str, container_items, prefix: str=None, marker: str=None, max_results: int=None, next_marker: str=None, **kwargs) -> None: - super(ListContainersSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.container_items = container_items - self.next_marker = next_marker - - -class Logging(Model): - """Azure Analytics Logging settings. - - All required parameters must be populated in order to send to Azure. - - :param version: Required. The version of Storage Analytics to configure. - :type version: str - :param delete: Required. Indicates whether all delete requests should be - logged. - :type delete: bool - :param read: Required. Indicates whether all read requests should be - logged. - :type read: bool - :param write: Required. Indicates whether all write requests should be - logged. - :type write: bool - :param retention_policy: Required. - :type retention_policy: ~azure.storage.blob.models.RetentionPolicy - """ - - _validation = { - 'version': {'required': True}, - 'delete': {'required': True}, - 'read': {'required': True}, - 'write': {'required': True}, - 'retention_policy': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, - 'delete': {'key': 'Delete', 'type': 'bool', 'xml': {'name': 'Delete'}}, - 'read': {'key': 'Read', 'type': 'bool', 'xml': {'name': 'Read'}}, - 'write': {'key': 'Write', 'type': 'bool', 'xml': {'name': 'Write'}}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, - } - _xml_map = { - } - - def __init__(self, *, version: str, delete: bool, read: bool, write: bool, retention_policy, **kwargs) -> None: - super(Logging, self).__init__(**kwargs) - self.version = version - self.delete = delete - self.read = read - self.write = write - self.retention_policy = retention_policy - - -class Metrics(Model): - """a summary of request statistics grouped by API in hour or minute aggregates - for blobs. - - All required parameters must be populated in order to send to Azure. - - :param version: The version of Storage Analytics to configure. - :type version: str - :param enabled: Required. Indicates whether metrics are enabled for the - Blob service. - :type enabled: bool - :param include_apis: Indicates whether metrics should generate summary - statistics for called API operations. - :type include_apis: bool - :param retention_policy: - :type retention_policy: ~azure.storage.blob.models.RetentionPolicy - """ - - _validation = { - 'enabled': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, - } - _xml_map = { - } - - def __init__(self, *, enabled: bool, version: str=None, include_apis: bool=None, retention_policy=None, **kwargs) -> None: - super(Metrics, self).__init__(**kwargs) - self.version = version - self.enabled = enabled - self.include_apis = include_apis - self.retention_policy = retention_policy - - -class ModifiedAccessConditions(Model): - """Additional parameters for a set of operations. - - :param if_modified_since: Specify this header value to operate only on a - blob if it has been modified since the specified date/time. - :type if_modified_since: datetime - :param if_unmodified_since: Specify this header value to operate only on a - blob if it has not been modified since the specified date/time. - :type if_unmodified_since: datetime - :param if_match: Specify an ETag value to operate only on blobs with a - matching value. - :type if_match: str - :param if_none_match: Specify an ETag value to operate only on blobs - without a matching value. - :type if_none_match: str - :param if_tags: Specify a SQL where clause on blob tags to operate only on - blobs with a matching value. - :type if_tags: str - """ - - _attribute_map = { - 'if_modified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'if_modified_since'}}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'if_unmodified_since'}}, - 'if_match': {'key': '', 'type': 'str', 'xml': {'name': 'if_match'}}, - 'if_none_match': {'key': '', 'type': 'str', 'xml': {'name': 'if_none_match'}}, - 'if_tags': {'key': '', 'type': 'str', 'xml': {'name': 'if_tags'}}, - } - _xml_map = { - } - - def __init__(self, *, if_modified_since=None, if_unmodified_since=None, if_match: str=None, if_none_match: str=None, if_tags: str=None, **kwargs) -> None: - super(ModifiedAccessConditions, self).__init__(**kwargs) - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - self.if_match = if_match - self.if_none_match = if_none_match - self.if_tags = if_tags - - -class PageList(Model): - """the list of pages. - - :param page_range: - :type page_range: list[~azure.storage.blob.models.PageRange] - :param clear_range: - :type clear_range: list[~azure.storage.blob.models.ClearRange] - """ - - _attribute_map = { - 'page_range': {'key': 'PageRange', 'type': '[PageRange]', 'xml': {'name': 'PageRange', 'itemsName': 'PageRange'}}, - 'clear_range': {'key': 'ClearRange', 'type': '[ClearRange]', 'xml': {'name': 'ClearRange', 'itemsName': 'ClearRange'}}, - } - _xml_map = { - } - - def __init__(self, *, page_range=None, clear_range=None, **kwargs) -> None: - super(PageList, self).__init__(**kwargs) - self.page_range = page_range - self.clear_range = clear_range - - -class PageRange(Model): - """PageRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'PageRange' - } - - def __init__(self, *, start: int, end: int, **kwargs) -> None: - super(PageRange, self).__init__(**kwargs) - self.start = start - self.end = end - - -class QueryFormat(Model): - """QueryFormat. - - :param type: Possible values include: 'delimited', 'json', 'arrow' - :type type: str or ~azure.storage.blob.models.QueryFormatType - :param delimited_text_configuration: - :type delimited_text_configuration: - ~azure.storage.blob.models.DelimitedTextConfiguration - :param json_text_configuration: - :type json_text_configuration: - ~azure.storage.blob.models.JsonTextConfiguration - :param arrow_configuration: - :type arrow_configuration: ~azure.storage.blob.models.ArrowConfiguration - """ - - _attribute_map = { - 'type': {'key': 'Type', 'type': 'QueryFormatType', 'xml': {'name': 'Type'}}, - 'delimited_text_configuration': {'key': 'DelimitedTextConfiguration', 'type': 'DelimitedTextConfiguration', 'xml': {'name': 'DelimitedTextConfiguration'}}, - 'json_text_configuration': {'key': 'JsonTextConfiguration', 'type': 'JsonTextConfiguration', 'xml': {'name': 'JsonTextConfiguration'}}, - 'arrow_configuration': {'key': 'ArrowConfiguration', 'type': 'ArrowConfiguration', 'xml': {'name': 'ArrowConfiguration'}}, - } - _xml_map = { - } - - def __init__(self, *, type=None, delimited_text_configuration=None, json_text_configuration=None, arrow_configuration=None, **kwargs) -> None: - super(QueryFormat, self).__init__(**kwargs) - self.type = type - self.delimited_text_configuration = delimited_text_configuration - self.json_text_configuration = json_text_configuration - self.arrow_configuration = arrow_configuration - - -class QueryRequest(Model): - """the quick query body. - - Variables are only populated by the server, and will be ignored when - sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar query_type: Required. the query type. Default value: "SQL" . - :vartype query_type: str - :param expression: Required. a query statement - :type expression: str - :param input_serialization: - :type input_serialization: ~azure.storage.blob.models.QuerySerialization - :param output_serialization: - :type output_serialization: ~azure.storage.blob.models.QuerySerialization - """ - - _validation = { - 'query_type': {'required': True, 'constant': True}, - 'expression': {'required': True}, - } - - _attribute_map = { - 'query_type': {'key': 'QueryType', 'type': 'str', 'xml': {'name': 'QueryType'}}, - 'expression': {'key': 'Expression', 'type': 'str', 'xml': {'name': 'Expression'}}, - 'input_serialization': {'key': 'InputSerialization', 'type': 'QuerySerialization', 'xml': {'name': 'InputSerialization'}}, - 'output_serialization': {'key': 'OutputSerialization', 'type': 'QuerySerialization', 'xml': {'name': 'OutputSerialization'}}, - } - _xml_map = { - 'name': 'QueryRequest' - } - - query_type = "SQL" - - def __init__(self, *, expression: str, input_serialization=None, output_serialization=None, **kwargs) -> None: - super(QueryRequest, self).__init__(**kwargs) - self.expression = expression - self.input_serialization = input_serialization - self.output_serialization = output_serialization - - -class QuerySerialization(Model): - """QuerySerialization. - - All required parameters must be populated in order to send to Azure. - - :param format: Required. - :type format: ~azure.storage.blob.models.QueryFormat - """ - - _validation = { - 'format': {'required': True}, - } - - _attribute_map = { - 'format': {'key': 'Format', 'type': 'QueryFormat', 'xml': {'name': 'Format'}}, - } - _xml_map = { - } - - def __init__(self, *, format, **kwargs) -> None: - super(QuerySerialization, self).__init__(**kwargs) - self.format = format - - -class RetentionPolicy(Model): - """the retention policy which determines how long the associated data should - persist. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether a retention policy is enabled - for the storage service - :type enabled: bool - :param days: Indicates the number of days that metrics or logging or - soft-deleted data should be retained. All data older than this value will - be deleted - :type days: int - """ - - _validation = { - 'enabled': {'required': True}, - 'days': {'minimum': 1}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}}, - } - _xml_map = { - } - - def __init__(self, *, enabled: bool, days: int=None, **kwargs) -> None: - super(RetentionPolicy, self).__init__(**kwargs) - self.enabled = enabled - self.days = days - - -class SequenceNumberAccessConditions(Model): - """Additional parameters for a set of operations, such as: - PageBlob_upload_pages, PageBlob_clear_pages, - PageBlob_upload_pages_from_url. - - :param if_sequence_number_less_than_or_equal_to: Specify this header value - to operate only on a blob if it has a sequence number less than or equal - to the specified. - :type if_sequence_number_less_than_or_equal_to: long - :param if_sequence_number_less_than: Specify this header value to operate - only on a blob if it has a sequence number less than the specified. - :type if_sequence_number_less_than: long - :param if_sequence_number_equal_to: Specify this header value to operate - only on a blob if it has the specified sequence number. - :type if_sequence_number_equal_to: long - """ - - _attribute_map = { - 'if_sequence_number_less_than_or_equal_to': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_less_than_or_equal_to'}}, - 'if_sequence_number_less_than': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_less_than'}}, - 'if_sequence_number_equal_to': {'key': '', 'type': 'long', 'xml': {'name': 'if_sequence_number_equal_to'}}, - } - _xml_map = { - } - - def __init__(self, *, if_sequence_number_less_than_or_equal_to: int=None, if_sequence_number_less_than: int=None, if_sequence_number_equal_to: int=None, **kwargs) -> None: - super(SequenceNumberAccessConditions, self).__init__(**kwargs) - self.if_sequence_number_less_than_or_equal_to = if_sequence_number_less_than_or_equal_to - self.if_sequence_number_less_than = if_sequence_number_less_than - self.if_sequence_number_equal_to = if_sequence_number_equal_to - - -class SignedIdentifier(Model): - """signed identifier. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. a unique id - :type id: str - :param access_policy: - :type access_policy: ~azure.storage.blob.models.AccessPolicy - """ - - _validation = { - 'id': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}}, - 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}}, - } - _xml_map = { - 'name': 'SignedIdentifier' - } - - def __init__(self, *, id: str, access_policy=None, **kwargs) -> None: - super(SignedIdentifier, self).__init__(**kwargs) - self.id = id - self.access_policy = access_policy - - -class SourceModifiedAccessConditions(Model): - """Additional parameters for a set of operations. - - :param source_if_modified_since: Specify this header value to operate only - on a blob if it has been modified since the specified date/time. - :type source_if_modified_since: datetime - :param source_if_unmodified_since: Specify this header value to operate - only on a blob if it has not been modified since the specified date/time. - :type source_if_unmodified_since: datetime - :param source_if_match: Specify an ETag value to operate only on blobs - with a matching value. - :type source_if_match: str - :param source_if_none_match: Specify an ETag value to operate only on - blobs without a matching value. - :type source_if_none_match: str - :param source_if_tags: Specify a SQL where clause on blob tags to operate - only on blobs with a matching value. - :type source_if_tags: str - """ - - _attribute_map = { - 'source_if_modified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'source_if_modified_since'}}, - 'source_if_unmodified_since': {'key': '', 'type': 'rfc-1123', 'xml': {'name': 'source_if_unmodified_since'}}, - 'source_if_match': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_match'}}, - 'source_if_none_match': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_none_match'}}, - 'source_if_tags': {'key': '', 'type': 'str', 'xml': {'name': 'source_if_tags'}}, - } - _xml_map = { - } - - def __init__(self, *, source_if_modified_since=None, source_if_unmodified_since=None, source_if_match: str=None, source_if_none_match: str=None, source_if_tags: str=None, **kwargs) -> None: - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_modified_since = source_if_modified_since - self.source_if_unmodified_since = source_if_unmodified_since - self.source_if_match = source_if_match - self.source_if_none_match = source_if_none_match - self.source_if_tags = source_if_tags - - -class StaticWebsite(Model): - """The properties that enable an account to host a static website. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether this account is hosting a - static website - :type enabled: bool - :param index_document: The default name of the index page under each - directory - :type index_document: str - :param error_document404_path: The absolute path of the custom 404 page - :type error_document404_path: str - :param default_index_document_path: Absolute path of the default index - page - :type default_index_document_path: str - """ - - _validation = { - 'enabled': {'required': True}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'index_document': {'key': 'IndexDocument', 'type': 'str', 'xml': {'name': 'IndexDocument'}}, - 'error_document404_path': {'key': 'ErrorDocument404Path', 'type': 'str', 'xml': {'name': 'ErrorDocument404Path'}}, - 'default_index_document_path': {'key': 'DefaultIndexDocumentPath', 'type': 'str', 'xml': {'name': 'DefaultIndexDocumentPath'}}, - } - _xml_map = { - } - - def __init__(self, *, enabled: bool, index_document: str=None, error_document404_path: str=None, default_index_document_path: str=None, **kwargs) -> None: - super(StaticWebsite, self).__init__(**kwargs) - self.enabled = enabled - self.index_document = index_document - self.error_document404_path = error_document404_path - self.default_index_document_path = default_index_document_path - - -class StorageError(Model): - """StorageError. - - :param message: - :type message: str - """ - - _attribute_map = { - 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, - } - _xml_map = { - } - - def __init__(self, *, message: str=None, **kwargs) -> None: - super(StorageError, self).__init__(**kwargs) - self.message = message - - -class StorageErrorException(HttpResponseError): - """Server responsed with exception of type: 'StorageError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, response, deserialize, *args): - - model_name = 'StorageError' - self.error = deserialize(model_name, response) - if self.error is None: - self.error = deserialize.dependencies[model_name]() - super(StorageErrorException, self).__init__(response=response) - - -class StorageServiceProperties(Model): - """Storage Service Properties. - - :param logging: - :type logging: ~azure.storage.blob.models.Logging - :param hour_metrics: - :type hour_metrics: ~azure.storage.blob.models.Metrics - :param minute_metrics: - :type minute_metrics: ~azure.storage.blob.models.Metrics - :param cors: The set of CORS rules. - :type cors: list[~azure.storage.blob.models.CorsRule] - :param default_service_version: The default version to use for requests to - the Blob service if an incoming request's version is not specified. - Possible values include version 2008-10-27 and all more recent versions - :type default_service_version: str - :param delete_retention_policy: - :type delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy - :param static_website: - :type static_website: ~azure.storage.blob.models.StaticWebsite - """ - - _attribute_map = { - 'logging': {'key': 'Logging', 'type': 'Logging', 'xml': {'name': 'Logging'}}, - 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}}, - 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}}, - 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}}, - 'default_service_version': {'key': 'DefaultServiceVersion', 'type': 'str', 'xml': {'name': 'DefaultServiceVersion'}}, - 'delete_retention_policy': {'key': 'DeleteRetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'DeleteRetentionPolicy'}}, - 'static_website': {'key': 'StaticWebsite', 'type': 'StaticWebsite', 'xml': {'name': 'StaticWebsite'}}, - } - _xml_map = { - } - - def __init__(self, *, logging=None, hour_metrics=None, minute_metrics=None, cors=None, default_service_version: str=None, delete_retention_policy=None, static_website=None, **kwargs) -> None: - super(StorageServiceProperties, self).__init__(**kwargs) - self.logging = logging - self.hour_metrics = hour_metrics - self.minute_metrics = minute_metrics - self.cors = cors - self.default_service_version = default_service_version - self.delete_retention_policy = delete_retention_policy - self.static_website = static_website - - -class StorageServiceStats(Model): - """Stats for the storage service. - - :param geo_replication: - :type geo_replication: ~azure.storage.blob.models.GeoReplication - """ - - _attribute_map = { - 'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication', 'xml': {'name': 'GeoReplication'}}, - } - _xml_map = { - } - - def __init__(self, *, geo_replication=None, **kwargs) -> None: - super(StorageServiceStats, self).__init__(**kwargs) - self.geo_replication = geo_replication - - -class UserDelegationKey(Model): - """A user delegation key. - - All required parameters must be populated in order to send to Azure. - - :param signed_oid: Required. The Azure Active Directory object ID in GUID - format. - :type signed_oid: str - :param signed_tid: Required. The Azure Active Directory tenant ID in GUID - format - :type signed_tid: str - :param signed_start: Required. The date-time the key is active - :type signed_start: datetime - :param signed_expiry: Required. The date-time the key expires - :type signed_expiry: datetime - :param signed_service: Required. Abbreviation of the Azure Storage service - that accepts the key - :type signed_service: str - :param signed_version: Required. The service version that created the key - :type signed_version: str - :param value: Required. The key as a base64 string - :type value: str - """ - - _validation = { - 'signed_oid': {'required': True}, - 'signed_tid': {'required': True}, - 'signed_start': {'required': True}, - 'signed_expiry': {'required': True}, - 'signed_service': {'required': True}, - 'signed_version': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'signed_oid': {'key': 'SignedOid', 'type': 'str', 'xml': {'name': 'SignedOid'}}, - 'signed_tid': {'key': 'SignedTid', 'type': 'str', 'xml': {'name': 'SignedTid'}}, - 'signed_start': {'key': 'SignedStart', 'type': 'iso-8601', 'xml': {'name': 'SignedStart'}}, - 'signed_expiry': {'key': 'SignedExpiry', 'type': 'iso-8601', 'xml': {'name': 'SignedExpiry'}}, - 'signed_service': {'key': 'SignedService', 'type': 'str', 'xml': {'name': 'SignedService'}}, - 'signed_version': {'key': 'SignedVersion', 'type': 'str', 'xml': {'name': 'SignedVersion'}}, - 'value': {'key': 'Value', 'type': 'str', 'xml': {'name': 'Value'}}, - } - _xml_map = { - } - - def __init__(self, *, signed_oid: str, signed_tid: str, signed_start, signed_expiry, signed_service: str, signed_version: str, value: str, **kwargs) -> None: - super(UserDelegationKey, self).__init__(**kwargs) - self.signed_oid = signed_oid - self.signed_tid = signed_tid - self.signed_start = signed_start - self.signed_expiry = signed_expiry - self.signed_service = signed_service - self.signed_version = signed_version - self.value = value diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/__init__.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/__init__.py deleted file mode 100644 index 1ea0453..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._container_operations import ContainerOperations -from ._directory_operations import DirectoryOperations -from ._blob_operations import BlobOperations -from ._page_blob_operations import PageBlobOperations -from ._append_blob_operations import AppendBlobOperations -from ._block_blob_operations import BlockBlobOperations - -__all__ = [ - 'ServiceOperations', - 'ContainerOperations', - 'DirectoryOperations', - 'BlobOperations', - 'PageBlobOperations', - 'AppendBlobOperations', - 'BlockBlobOperations', -] diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_append_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_append_blob_operations.py deleted file mode 100644 index 000810a..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_append_blob_operations.py +++ /dev/null @@ -1,694 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class AppendBlobOperations(object): - """AppendBlobOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "AppendBlob". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_blob_type = "AppendBlob" - - def create(self, content_length, timeout=None, metadata=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): - """The Create Append Blob operation creates a new append blob. - - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_tags_string: Optional. Used to set blob tags in various - blob operations. - :type blob_tags_string: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{containerName}/{blob}'} - - def append_block(self, body, content_length, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, request_id=None, lease_access_conditions=None, append_position_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): - """The Append Block operation commits a new block of data to the end of an - existing append blob. The Append Block operation is permitted only if - the blob was created with x-ms-blob-type set to AppendBlob. Append - Block is supported only on version 2015-02-21 version or later. - - :param body: Initial data - :type body: Generator - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param append_position_access_conditions: Additional parameters for - the operation - :type append_position_access_conditions: - ~azure.storage.blob.models.AppendPositionAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - max_size = None - if append_position_access_conditions is not None: - max_size = append_position_access_conditions.max_size - append_position = None - if append_position_access_conditions is not None: - append_position = append_position_access_conditions.append_position - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "appendblock" - - # Construct URL - url = self.append_block.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if max_size is not None: - header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", max_size, 'long') - if append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-append-offset': self._deserialize('str', response.headers.get('x-ms-blob-append-offset')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - append_block.metadata = {'url': '/{containerName}/{blob}'} - - def append_block_from_url(self, source_url, content_length, source_range=None, source_content_md5=None, source_contentcrc64=None, timeout=None, transactional_content_md5=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, append_position_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs): - """The Append Block operation commits a new block of data to the end of an - existing append blob where the contents are read from a source url. The - Append Block operation is permitted only if the blob was created with - x-ms-blob-type set to AppendBlob. Append Block is supported only on - version 2015-02-21 version or later. - - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param content_length: The length of the request. - :type content_length: long - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_md5: Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range - of bytes that must be read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param append_position_access_conditions: Additional parameters for - the operation - :type append_position_access_conditions: - ~azure.storage.blob.models.AppendPositionAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - max_size = None - if append_position_access_conditions is not None: - max_size = append_position_access_conditions.max_size - append_position = None - if append_position_access_conditions is not None: - append_position = append_position_access_conditions.append_position - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - comp = "appendblock" - - # Construct URL - url = self.append_block_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if max_size is not None: - header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", max_size, 'long') - if append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-append-offset': self._deserialize('str', response.headers.get('x-ms-blob-append-offset')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - append_block_from_url.metadata = {'url': '/{containerName}/{blob}'} - - def seal(self, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, append_position_access_conditions=None, cls=None, **kwargs): - """The Seal operation seals the Append Blob to make it read-only. Seal is - supported only on version 2019-12-12 version or later. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param append_position_access_conditions: Additional parameters for - the operation - :type append_position_access_conditions: - ~azure.storage.blob.models.AppendPositionAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - append_position = None - if append_position_access_conditions is not None: - append_position = append_position_access_conditions.append_position - - comp = "seal" - - # Construct URL - url = self.seal.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - seal.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_blob_operations.py deleted file mode 100644 index 394a519..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_blob_operations.py +++ /dev/null @@ -1,3065 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class BlobOperations(object): - """BlobOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_requires_sync: . Constant value: "true". - :ivar x_ms_copy_action: . Constant value: "abort". - :ivar restype: . Constant value: "account". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_requires_sync = "true" - self.x_ms_copy_action = "abort" - self.restype = "account" - - def download(self, snapshot=None, version_id=None, timeout=None, range=None, range_get_content_md5=None, range_get_content_crc64=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, cls=None, **kwargs): - """The Download operation reads or downloads a blob from the system, - including its metadata and properties. You can also call Download to - read a snapshot. - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to operate - on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param range_get_content_md5: When set to true and specified together - with the Range, the service returns the MD5 hash for the range, as - long as the range is less than or equal to 4 MB in size. - :type range_get_content_md5: bool - :param range_get_content_crc64: When set to true and specified - together with the Range, the service returns the CRC64 hash for the - range, as long as the range is less than or equal to 4 MB in size. - :type range_get_content_crc64: bool - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: object or the result of cls(response) - :rtype: Generator - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct URL - url = self.download.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') - if range_get_content_crc64 is not None: - header_parameters['x-ms-range-get-content-crc64'] = self._serialize.header("range_get_content_crc64", range_get_content_crc64, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')), - 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), - 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')), - 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), - 'x-ms-last-access-time': self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - if response.status_code == 206: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')), - 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), - 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')), - 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), - 'x-ms-last-access-time': self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - download.metadata = {'url': '/{containerName}/{blob}'} - - def get_properties(self, snapshot=None, version_id=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, cls=None, **kwargs): - """The Get Properties operation returns all user-defined metadata, - standard HTTP properties, and system properties for the blob. It does - not return the content of the blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to operate - on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-creation-time': self._deserialize('rfc-1123', response.headers.get('x-ms-creation-time')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')), - 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')), - 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-incremental-copy': self._deserialize('bool', response.headers.get('x-ms-incremental-copy')), - 'x-ms-copy-destination-snapshot': self._deserialize('str', response.headers.get('x-ms-copy-destination-snapshot')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-access-tier': self._deserialize('str', response.headers.get('x-ms-access-tier')), - 'x-ms-access-tier-inferred': self._deserialize('bool', response.headers.get('x-ms-access-tier-inferred')), - 'x-ms-archive-status': self._deserialize('str', response.headers.get('x-ms-archive-status')), - 'x-ms-access-tier-change-time': self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'x-ms-is-current-version': self._deserialize('bool', response.headers.get('x-ms-is-current-version')), - 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')), - 'x-ms-expiry-time': self._deserialize('rfc-1123', response.headers.get('x-ms-expiry-time')), - 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), - 'x-ms-rehydrate-priority': self._deserialize('str', response.headers.get('x-ms-rehydrate-priority')), - 'x-ms-last-access-time': self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{containerName}/{blob}'} - - def delete(self, snapshot=None, version_id=None, timeout=None, delete_snapshots=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """If the storage account's soft delete feature is disabled then, when a - blob is deleted, it is permanently removed from the storage account. If - the storage account's soft delete feature is enabled, then, when a blob - is deleted, it is marked for deletion and becomes inaccessible - immediately. However, the blob service retains the blob or snapshot for - the number of days specified by the DeleteRetentionPolicy section of - [Storage service properties] (Set-Blob-Service-Properties.md). After - the specified number of days has passed, the blob's data is permanently - removed from the storage account. Note that you continue to be charged - for the soft-deleted blob's storage until it is permanently removed. - Use the List Blobs API and specify the "include=deleted" query - parameter to discover which blobs and snapshots have been soft deleted. - You can then use the Undelete Blob API to restore a soft-deleted blob. - All other operations on a soft-deleted blob or snapshot causes the - service to return an HTTP status code of 404 (ResourceNotFound). - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to operate - on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param delete_snapshots: Required if the blob has associated - snapshots. Specify one of the following two options: include: Delete - the base blob and all of its snapshots. only: Delete only the blob's - snapshots and not the blob itself. Possible values include: 'include', - 'only' - :type delete_snapshots: str or - ~azure.storage.blob.models.DeleteSnapshotsOptionType - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{containerName}/{blob}'} - - def set_access_control(self, timeout=None, owner=None, group=None, posix_permissions=None, posix_acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """Set the owner, group, permissions, or access control list for a blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_acl: Sets POSIX access control rights on files and - directories. The value is a comma-separated list of access control - entries. Each access control entry (ACE) consists of a scope, a type, - a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type posix_acl: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "setAccessControl" - - # Construct URL - url = self.set_access_control.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - } - return cls(response, None, response_headers) - set_access_control.metadata = {'url': '/{filesystem}/{path}'} - - def get_access_control(self, timeout=None, upn=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """Get the owner, group, permissions, or access control list for a blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param upn: Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the identity values returned in - the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. - :type upn: bool - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "getAccessControl" - - # Construct URL - url = self.get_access_control.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')), - 'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')), - 'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')), - 'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - } - return cls(response, None, response_headers) - get_access_control.metadata = {'url': '/{filesystem}/{path}'} - - def rename(self, rename_source, timeout=None, path_rename_mode=None, directory_properties=None, posix_permissions=None, posix_umask=None, source_lease_id=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs): - """Rename a blob/file. By default, the destination is overwritten and if - the destination already exists and has a lease the lease is broken. - This operation supports conditional HTTP requests. For more - information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - To fail if the destination already exists, use a conditional request - with If-None-Match: "*". - - :param rename_source: The file or directory to be renamed. The value - must have the following format: "/{filesysystem}/{path}". If - "x-ms-properties" is specified, the properties will overwrite the - existing properties; otherwise, the existing properties will be - preserved. - :type rename_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param path_rename_mode: Determines the behavior of the rename - operation. Possible values include: 'legacy', 'posix' - :type path_rename_mode: str or - ~azure.storage.blob.models.PathRenameMode - :param directory_properties: Optional. User-defined properties to be - stored with the file or directory, in the format of a comma-separated - list of name and value pairs "n1=v1, n2=v2, ...", where each value is - base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled - for the account. This umask restricts permission settings for file and - directory, and will only be applied when default Acl does not exist in - parent directory. If the umask bit has set, it means that the - corresponding permission will be disabled. Otherwise the corresponding - permission will be determined by the permission. A 4-digit octal - notation (e.g. 0022) is supported here. If no umask was specified, a - default umask - 0027 will be used. - :type posix_umask: str - :param source_lease_id: A lease ID for the source path. If specified, - the source path must have an active lease and the lease ID must match. - :type source_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param directory_http_headers: Additional parameters for the operation - :type directory_http_headers: - ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - cache_control = None - if directory_http_headers is not None: - cache_control = directory_http_headers.cache_control - content_type = None - if directory_http_headers is not None: - content_type = directory_http_headers.content_type - content_encoding = None - if directory_http_headers is not None: - content_encoding = directory_http_headers.content_encoding - content_language = None - if directory_http_headers is not None: - content_language = directory_http_headers.content_language - content_disposition = None - if directory_http_headers is not None: - content_disposition = directory_http_headers.content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - # Construct URL - url = self.rename.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if path_rename_mode is not None: - query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'PathRenameMode') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') - if content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') - if content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') - if content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') - if content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - } - return cls(response, None, response_headers) - rename.metadata = {'url': '/{filesystem}/{path}'} - - def undelete(self, timeout=None, request_id=None, cls=None, **kwargs): - """Undelete a blob that was previously soft deleted. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "undelete" - - # Construct URL - url = self.undelete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - undelete.metadata = {'url': '/{containerName}/{blob}'} - - def set_expiry(self, expiry_options, timeout=None, request_id=None, expires_on=None, cls=None, **kwargs): - """Sets the time a blob will expire and be deleted. - - :param expiry_options: Required. Indicates mode of the expiry time. - Possible values include: 'NeverExpire', 'RelativeToCreation', - 'RelativeToNow', 'Absolute' - :type expiry_options: str or - ~azure.storage.blob.models.BlobExpiryOptions - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param expires_on: The time to set the blob to expiry - :type expires_on: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "expiry" - - # Construct URL - url = self.set_expiry.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') - if expires_on is not None: - header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_expiry.metadata = {'url': '/{containerName}/{blob}'} - - def set_http_headers(self, timeout=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """The Set HTTP Headers operation sets system properties on the blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "properties" - - # Construct URL - url = self.set_http_headers.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_http_headers.metadata = {'url': '/{containerName}/{blob}'} - - def set_metadata(self, timeout=None, metadata=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): - """The Set Blob Metadata operation sets user-defined metadata for the - specified blob as one or more name-value pairs. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{containerName}/{blob}'} - - def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or - negative one (-1) for a lease that never expires. A non-infinite lease - can be between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The Blob service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "lease" - action = "acquire" - - # Construct URL - url = self.acquire_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - acquire_lease.metadata = {'url': '/{containerName}/{blob}'} - - def release_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "lease" - action = "release" - - # Construct URL - url = self.release_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - release_lease.metadata = {'url': '/{containerName}/{blob}'} - - def renew_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "lease" - action = "renew" - - # Construct URL - url = self.renew_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - renew_lease.metadata = {'url': '/{containerName}/{blob}'} - - def change_lease(self, lease_id, proposed_lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The Blob service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "lease" - action = "change" - - # Construct URL - url = self.change_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - change_lease.metadata = {'url': '/{containerName}/{blob}'} - - def break_lease(self, timeout=None, break_period=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] The Lease Blob operation establishes and manages a lock on a - blob for write and delete operations. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param break_period: For a break operation, proposed duration the - lease should continue before it is broken, in seconds, between 0 and - 60. This break period is only used if it is shorter than the time - remaining on the lease. If longer, the time remaining on the lease is - used. A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break period. - If this header does not appear with a break operation, a - fixed-duration lease breaks after the remaining lease period elapses, - and an infinite lease breaks immediately. - :type break_period: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "lease" - action = "break" - - # Construct URL - url = self.break_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - break_lease.metadata = {'url': '/{containerName}/{blob}'} - - def create_snapshot(self, timeout=None, metadata=None, request_id=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs): - """The Create Snapshot operation creates a read-only snapshot of a blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "snapshot" - - # Construct URL - url = self.create_snapshot.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-snapshot': self._deserialize('str', response.headers.get('x-ms-snapshot')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create_snapshot.metadata = {'url': '/{containerName}/{blob}'} - - def start_copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, rehydrate_priority=None, request_id=None, blob_tags_string=None, seal_blob=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs): - """The Start Copy From URL operation copies a blob or an internet resource - to a new blob. - - :param copy_source: Specifies the name of the source page blob - snapshot. This value is a URL of up to 2 KB in length that specifies a - page blob snapshot. The value should be URL-encoded as it would appear - in a request URI. The source blob must either be public or must be - authenticated via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param rehydrate_priority: Optional: Indicates the priority with which - to rehydrate an archived blob. Possible values include: 'High', - 'Standard' - :type rehydrate_priority: str or - ~azure.storage.blob.models.RehydratePriority - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_tags_string: Optional. Used to set blob tags in various - blob operations. - :type blob_tags_string: str - :param seal_blob: Overrides the sealed state of the destination blob. - Service version 2019-12-12 and newer. - :type seal_blob: bool - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - source_if_tags = None - if source_modified_access_conditions is not None: - source_if_tags = source_modified_access_conditions.source_if_tags - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.start_copy_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if seal_blob is not None: - header_parameters['x-ms-seal-blob'] = self._serialize.header("seal_blob", seal_blob, 'bool') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - if source_if_tags is not None: - header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", source_if_tags, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} - - def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, request_id=None, source_content_md5=None, blob_tags_string=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs): - """The Copy From URL operation copies a blob or an internet resource to a - new blob. It will not return a response until the copy is complete. - - :param copy_source: Specifies the name of the source page blob - snapshot. This value is a URL of up to 2 KB in length that specifies a - page blob snapshot. The value should be URL-encoded as it would appear - in a request URI. The source blob must either be public or must be - authenticated via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param source_content_md5: Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :type source_content_md5: bytearray - :param blob_tags_string: Optional. Used to set blob tags in various - blob operations. - :type blob_tags_string: str - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.copy_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['x-ms-requires-sync'] = self._serialize.header("self.x_ms_requires_sync", self.x_ms_requires_sync, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-status': self._deserialize(models.SyncCopyStatusType, response.headers.get('x-ms-copy-status')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - copy_from_url.metadata = {'url': '/{containerName}/{blob}'} - - def abort_copy_from_url(self, copy_id, timeout=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs): - """The Abort Copy From URL operation aborts a pending Copy From URL - operation, and leaves a destination blob with zero length and full - metadata. - - :param copy_id: The copy identifier provided in the x-ms-copy-id - header of the original Copy Blob operation. - :type copy_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "copy" - - # Construct URL - url = self.abort_copy_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-copy-action'] = self._serialize.header("self.x_ms_copy_action", self.x_ms_copy_action, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - abort_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} - - def set_tier(self, tier, snapshot=None, version_id=None, timeout=None, rehydrate_priority=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """The Set Tier operation sets the tier on a blob. The operation is - allowed on a page blob in a premium storage account and on a block blob - in a blob storage account (locally redundant storage only). A premium - page blob's tier determines the allowed size, IOPS, and bandwidth of - the blob. A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param tier: Indicates the tier to be set on the blob. Possible values - include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', 'P40', 'P50', 'P60', - 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierRequired - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to operate - on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param rehydrate_priority: Optional: Indicates the priority with which - to rehydrate an archived blob. Possible values include: 'High', - 'Standard' - :type rehydrate_priority: str or - ~azure.storage.blob.models.RehydratePriority - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "tier" - - # Construct URL - url = self.set_tier.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_tier.metadata = {'url': '/{containerName}/{blob}'} - - def get_account_info(self, cls=None, **kwargs): - """Returns the sku name and account kind . - - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "properties" - - # Construct URL - url = self.get_account_info.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')), - 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_account_info.metadata = {'url': '/{containerName}/{blob}'} - - def query(self, query_request=None, snapshot=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, cls=None, **kwargs): - """The Query operation enables users to select/project on blob data by - providing simple query expressions. - - :param query_request: the query request - :type query_request: ~azure.storage.blob.models.QueryRequest - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: object or the result of cls(response) - :rtype: Generator - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "query" - - # Construct URL - url = self.query.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct body - if query_request is not None: - body_content = self._serialize.body(query_request, 'QueryRequest') - else: - body_content = None - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - if response.status_code == 206: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - query.metadata = {'url': '/{containerName}/{blob}'} - - def get_tags(self, timeout=None, request_id=None, snapshot=None, version_id=None, modified_access_conditions=None, cls=None, **kwargs): - """The Get Tags operation enables users to get the tags associated with a - blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to operate - on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: BlobTags or the result of cls(response) - :rtype: ~azure.storage.blob.models.BlobTags - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "tags" - - # Construct URL - url = self.get_tags.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('BlobTags', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_tags.metadata = {'url': '/{containerName}/{blob}'} - - def set_tags(self, timeout=None, version_id=None, transactional_content_md5=None, transactional_content_crc64=None, request_id=None, tags=None, modified_access_conditions=None, cls=None, **kwargs): - """The Set Tags operation enables users to set tags on a blob. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param version_id: The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to operate - on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param tags: Blob tags - :type tags: ~azure.storage.blob.models.BlobTags - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "tags" - - # Construct URL - url = self.set_tags.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct body - if tags is not None: - body_content = self._serialize.body(tags, 'BlobTags') - else: - body_content = None - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_tags.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_block_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_block_blob_operations.py deleted file mode 100644 index 8228c47..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_block_blob_operations.py +++ /dev/null @@ -1,833 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class BlockBlobOperations(object): - """BlockBlobOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "BlockBlob". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_blob_type = "BlockBlob" - - def upload(self, body, content_length, timeout=None, transactional_content_md5=None, metadata=None, tier=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): - """The Upload Block Blob operation updates the content of an existing - block blob. Updating an existing block blob overwrites any existing - metadata on the blob. Partial updates are not supported with Put Blob; - the content of the existing blob is overwritten with the content of the - new blob. To perform a partial update of the content of a block blob, - use the Put Block List operation. - - :param body: Initial data - :type body: Generator - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_tags_string: Optional. Used to set blob tags in various - blob operations. - :type blob_tags_string: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct URL - url = self.upload.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload.metadata = {'url': '/{containerName}/{blob}'} - - def stage_block(self, block_id, content_length, body, transactional_content_md5=None, transactional_content_crc64=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, cls=None, **kwargs): - """The Stage Block operation creates a new block to be committed as part - of a blob. - - :param block_id: A valid Base64 string value that identifies the - block. Prior to encoding, the string must be less than or equal to 64 - bytes in size. For a given blob, the length of the value specified for - the blockid parameter must be the same size for each block. - :type block_id: str - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data - :type body: Generator - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - - comp = "block" - - # Construct URL - url = self.stage_block.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - stage_block.metadata = {'url': '/{containerName}/{blob}'} - - def stage_block_from_url(self, block_id, content_length, source_url, source_range=None, source_content_md5=None, source_contentcrc64=None, timeout=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs): - """The Stage Block operation creates a new block to be committed as part - of a blob where the contents are read from a URL. - - :param block_id: A valid Base64 string value that identifies the - block. Prior to encoding, the string must be less than or equal to 64 - bytes in size. For a given blob, the length of the value specified for - the blockid parameter must be the same size for each block. - :type block_id: str - :param content_length: The length of the request. - :type content_length: long - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_md5: Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range - of bytes that must be read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - comp = "block" - - # Construct URL - url = self.stage_block_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - stage_block_from_url.metadata = {'url': '/{containerName}/{blob}'} - - def commit_block_list(self, blocks, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, metadata=None, tier=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): - """The Commit Block List operation writes a blob by specifying the list of - block IDs that make up the blob. In order to be written as part of a - blob, a block must have been successfully written to the server in a - prior Put Block operation. You can call Put Block List to update a blob - by uploading only those blocks that have changed, then committing the - new and existing blocks together. You can do this by specifying whether - to commit a block from the committed block list or from the uncommitted - block list, or to commit the most recently uploaded version of the - block, whichever list it may belong to. - - :param blocks: - :type blocks: ~azure.storage.blob.models.BlockLookupList - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_tags_string: Optional. Used to set blob tags in various - blob operations. - :type blob_tags_string: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "blocklist" - - # Construct URL - url = self.commit_block_list.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct body - body_content = self._serialize.body(blocks, 'BlockLookupList') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - commit_block_list.metadata = {'url': '/{containerName}/{blob}'} - - def get_block_list(self, list_type="committed", snapshot=None, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """The Get Block List operation retrieves the list of blocks that have - been uploaded as part of a block blob. - - :param list_type: Specifies whether to return the list of committed - blocks, the list of uncommitted blocks, or both lists together. - Possible values include: 'committed', 'uncommitted', 'all' - :type list_type: str or ~azure.storage.blob.models.BlockListType - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: BlockList or the result of cls(response) - :rtype: ~azure.storage.blob.models.BlockList - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "blocklist" - - # Construct URL - url = self.get_block_list.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - query_parameters['blocklisttype'] = self._serialize.query("list_type", list_type, 'BlockListType') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('BlockList', response) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_block_list.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_container_operations.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_container_operations.py deleted file mode 100644 index 5730483..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_container_operations.py +++ /dev/null @@ -1,1400 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class ContainerOperations(object): - """ContainerOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - - def create(self, timeout=None, metadata=None, access=None, request_id=None, container_cpk_scope_info=None, cls=None, **kwargs): - """creates a new container under the specified account. If the container - with the same name already exists, the operation fails. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param access: Specifies whether data in the container may be accessed - publicly and the level of access. Possible values include: - 'container', 'blob' - :type access: str or ~azure.storage.blob.models.PublicAccessType - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param container_cpk_scope_info: Additional parameters for the - operation - :type container_cpk_scope_info: - ~azure.storage.blob.models.ContainerCpkScopeInfo - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - default_encryption_scope = None - if container_cpk_scope_info is not None: - default_encryption_scope = container_cpk_scope_info.default_encryption_scope - prevent_encryption_scope_override = None - if container_cpk_scope_info is not None: - prevent_encryption_scope_override = container_cpk_scope_info.prevent_encryption_scope_override - - restype = "container" - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if access is not None: - header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if default_encryption_scope is not None: - header_parameters['x-ms-default-encryption-scope'] = self._serialize.header("default_encryption_scope", default_encryption_scope, 'str') - if prevent_encryption_scope_override is not None: - header_parameters['x-ms-deny-encryption-scope-override'] = self._serialize.header("prevent_encryption_scope_override", prevent_encryption_scope_override, 'bool') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{containerName}'} - - def get_properties(self, timeout=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs): - """returns all user-defined metadata and system properties for the - specified container. The data returned does not include the container's - list of blobs. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - restype = "container" - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-blob-public-access': self._deserialize('str', response.headers.get('x-ms-blob-public-access')), - 'x-ms-has-immutability-policy': self._deserialize('bool', response.headers.get('x-ms-has-immutability-policy')), - 'x-ms-has-legal-hold': self._deserialize('bool', response.headers.get('x-ms-has-legal-hold')), - 'x-ms-default-encryption-scope': self._deserialize('str', response.headers.get('x-ms-default-encryption-scope')), - 'x-ms-deny-encryption-scope-override': self._deserialize('bool', response.headers.get('x-ms-deny-encryption-scope-override')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{containerName}'} - - def delete(self, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """operation marks the specified container for deletion. The container and - any blobs contained within it are later deleted during garbage - collection. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - restype = "container" - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{containerName}'} - - def set_metadata(self, timeout=None, metadata=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """operation sets one or more user-defined name-value pairs for the - specified container. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - - restype = "container" - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{containerName}'} - - def get_access_policy(self, timeout=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs): - """gets the permissions for the specified container. The permissions - indicate whether container data may be accessed publicly. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: list or the result of cls(response) - :rtype: list[~azure.storage.blob.models.SignedIdentifier] - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - restype = "container" - comp = "acl" - - # Construct URL - url = self.get_access_policy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[SignedIdentifier]', response) - header_dict = { - 'x-ms-blob-public-access': self._deserialize('str', response.headers.get('x-ms-blob-public-access')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_access_policy.metadata = {'url': '/{containerName}'} - - def set_access_policy(self, container_acl=None, timeout=None, access=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """sets the permissions for the specified container. The permissions - indicate whether blobs in a container may be accessed publicly. - - :param container_acl: the acls for the container - :type container_acl: list[~azure.storage.blob.models.SignedIdentifier] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param access: Specifies whether data in the container may be accessed - publicly and the level of access. Possible values include: - 'container', 'blob' - :type access: str or ~azure.storage.blob.models.PublicAccessType - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - restype = "container" - comp = "acl" - - # Construct URL - url = self.set_access_policy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - if access is not None: - header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct body - serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifiers', 'wrapped': True}} - if container_acl is not None: - body_content = self._serialize.body(container_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt) - else: - body_content = None - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_access_policy.metadata = {'url': '/{containerName}'} - - def restore(self, timeout=None, request_id=None, deleted_container_name=None, deleted_container_version=None, cls=None, **kwargs): - """Restores a previously-deleted container. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param deleted_container_name: Optional. Version 2019-12-12 and - laster. Specifies the name of the deleted container to restore. - :type deleted_container_name: str - :param deleted_container_version: Optional. Version 2019-12-12 and - laster. Specifies the version of the deleted container to restore. - :type deleted_container_version: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "container" - comp = "undelete" - - # Construct URL - url = self.restore.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if deleted_container_name is not None: - header_parameters['x-ms-deleted-container-name'] = self._serialize.header("deleted_container_name", deleted_container_name, 'str') - if deleted_container_version is not None: - header_parameters['x-ms-deleted-container-version'] = self._serialize.header("deleted_container_version", deleted_container_version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - restore.metadata = {'url': '/{containerName}'} - - def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or - negative one (-1) for a lease that never expires. A non-infinite lease - can be between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The Blob service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "acquire" - - # Construct URL - url = self.acquire_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - acquire_lease.metadata = {'url': '/{containerName}'} - - def release_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "release" - - # Construct URL - url = self.release_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - release_lease.metadata = {'url': '/{containerName}'} - - def renew_lease(self, lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "renew" - - # Construct URL - url = self.renew_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - renew_lease.metadata = {'url': '/{containerName}'} - - def break_lease(self, timeout=None, break_period=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param break_period: For a break operation, proposed duration the - lease should continue before it is broken, in seconds, between 0 and - 60. This break period is only used if it is shorter than the time - remaining on the lease. If longer, the time remaining on the lease is - used. A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break period. - If this header does not appear with a break operation, a - fixed-duration lease breaks after the remaining lease period elapses, - and an infinite lease breaks immediately. - :type break_period: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "break" - - # Construct URL - url = self.break_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - break_lease.metadata = {'url': '/{containerName}'} - - def change_lease(self, lease_id, proposed_lease_id, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """[Update] establishes and manages a lock on a container for delete - operations. The lock duration can be 15 to 60 seconds, or can be - infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The Blob service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - comp = "lease" - restype = "container" - action = "change" - - # Construct URL - url = self.change_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - change_lease.metadata = {'url': '/{containerName}'} - - def list_blob_flat_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, cls=None, **kwargs): - """[Update] The List Blobs operation returns a list of the blobs under the - specified container. - - :param prefix: Filters the results to return only containers whose - name begins with the specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list - of containers to be returned with the next listing operation. The - operation returns the NextMarker value within the response body if the - listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value - for the marker parameter in a subsequent call to request the next page - of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to - return. If the request does not specify maxresults, or specifies a - value greater than 5000, the server will return up to 5000 items. Note - that if the listing operation crosses a partition boundary, then the - service will return a continuation token for retrieving the remainder - of the results. For this reason, it is possible that the service will - return fewer results than specified by maxresults, or than the default - of 5000. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets - to include in the response. - :type include: list[str or - ~azure.storage.blob.models.ListBlobsIncludeItem] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListBlobsFlatSegmentResponse or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "container" - comp = "list" - - # Construct URL - url = self.list_blob_flat_segment.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[ListBlobsIncludeItem]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListBlobsFlatSegmentResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_blob_flat_segment.metadata = {'url': '/{containerName}'} - - def list_blob_hierarchy_segment(self, delimiter, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, cls=None, **kwargs): - """[Update] The List Blobs operation returns a list of the blobs under the - specified container. - - :param delimiter: When the request includes this parameter, the - operation returns a BlobPrefix element in the response body that acts - as a placeholder for all blobs whose names begin with the same - substring up to the appearance of the delimiter character. The - delimiter may be a single character or a string. - :type delimiter: str - :param prefix: Filters the results to return only containers whose - name begins with the specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list - of containers to be returned with the next listing operation. The - operation returns the NextMarker value within the response body if the - listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value - for the marker parameter in a subsequent call to request the next page - of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to - return. If the request does not specify maxresults, or specifies a - value greater than 5000, the server will return up to 5000 items. Note - that if the listing operation crosses a partition boundary, then the - service will return a continuation token for retrieving the remainder - of the results. For this reason, it is possible that the service will - return fewer results than specified by maxresults, or than the default - of 5000. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets - to include in the response. - :type include: list[str or - ~azure.storage.blob.models.ListBlobsIncludeItem] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListBlobsHierarchySegmentResponse or the result of - cls(response) - :rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "container" - comp = "list" - - # Construct URL - url = self.list_blob_hierarchy_segment.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[ListBlobsIncludeItem]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_blob_hierarchy_segment.metadata = {'url': '/{containerName}'} - - def get_account_info(self, cls=None, **kwargs): - """Returns the sku name and account kind . - - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "account" - comp = "properties" - - # Construct URL - url = self.get_account_info.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')), - 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_account_info.metadata = {'url': '/{containerName}'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_directory_operations.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_directory_operations.py deleted file mode 100644 index c2bf317..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_directory_operations.py +++ /dev/null @@ -1,739 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class DirectoryOperations(object): - """DirectoryOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar resource: . Constant value: "directory". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.resource = "directory" - - def create(self, timeout=None, directory_properties=None, posix_permissions=None, posix_umask=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """Create a directory. By default, the destination is overwritten and if - the destination already exists and has a lease the lease is broken. - This operation supports conditional HTTP requests. For more - information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - To fail if the destination already exists, use a conditional request - with If-None-Match: "*". - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param directory_properties: Optional. User-defined properties to be - stored with the file or directory, in the format of a comma-separated - list of name and value pairs "n1=v1, n2=v2, ...", where each value is - base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled - for the account. This umask restricts permission settings for file and - directory, and will only be applied when default Acl does not exist in - parent directory. If the umask bit has set, it means that the - corresponding permission will be disabled. Otherwise the corresponding - permission will be determined by the permission. A 4-digit octal - notation (e.g. 0022) is supported here. If no umask was specified, a - default umask - 0027 will be used. - :type posix_umask: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param directory_http_headers: Additional parameters for the operation - :type directory_http_headers: - ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - cache_control = None - if directory_http_headers is not None: - cache_control = directory_http_headers.cache_control - content_type = None - if directory_http_headers is not None: - content_type = directory_http_headers.content_type - content_encoding = None - if directory_http_headers is not None: - content_encoding = directory_http_headers.content_encoding - content_language = None - if directory_http_headers is not None: - content_language = directory_http_headers.content_language - content_disposition = None - if directory_http_headers is not None: - content_disposition = directory_http_headers.content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['resource'] = self._serialize.query("self.resource", self.resource, 'str') - - # Construct headers - header_parameters = {} - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') - if content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') - if content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') - if content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') - if content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{filesystem}/{path}'} - - def rename(self, rename_source, timeout=None, marker=None, path_rename_mode=None, directory_properties=None, posix_permissions=None, posix_umask=None, source_lease_id=None, request_id=None, directory_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs): - """Rename a directory. By default, the destination is overwritten and if - the destination already exists and has a lease the lease is broken. - This operation supports conditional HTTP requests. For more - information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - To fail if the destination already exists, use a conditional request - with If-None-Match: "*". - - :param rename_source: The file or directory to be renamed. The value - must have the following format: "/{filesysystem}/{path}". If - "x-ms-properties" is specified, the properties will overwrite the - existing properties; otherwise, the existing properties will be - preserved. - :type rename_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param marker: When renaming a directory, the number of paths that are - renamed with each invocation is limited. If the number of paths to be - renamed exceeds this limit, a continuation token is returned in this - response header. When a continuation token is returned in the - response, it must be specified in a subsequent invocation of the - rename operation to continue renaming the directory. - :type marker: str - :param path_rename_mode: Determines the behavior of the rename - operation. Possible values include: 'legacy', 'posix' - :type path_rename_mode: str or - ~azure.storage.blob.models.PathRenameMode - :param directory_properties: Optional. User-defined properties to be - stored with the file or directory, in the format of a comma-separated - list of name and value pairs "n1=v1, n2=v2, ...", where each value is - base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled - for the account. This umask restricts permission settings for file and - directory, and will only be applied when default Acl does not exist in - parent directory. If the umask bit has set, it means that the - corresponding permission will be disabled. Otherwise the corresponding - permission will be determined by the permission. A 4-digit octal - notation (e.g. 0022) is supported here. If no umask was specified, a - default umask - 0027 will be used. - :type posix_umask: str - :param source_lease_id: A lease ID for the source path. If specified, - the source path must have an active lease and the lease ID must match. - :type source_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param directory_http_headers: Additional parameters for the operation - :type directory_http_headers: - ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - cache_control = None - if directory_http_headers is not None: - cache_control = directory_http_headers.cache_control - content_type = None - if directory_http_headers is not None: - content_type = directory_http_headers.content_type - content_encoding = None - if directory_http_headers is not None: - content_encoding = directory_http_headers.content_encoding - content_language = None - if directory_http_headers is not None: - content_language = directory_http_headers.content_language - content_disposition = None - if directory_http_headers is not None: - content_disposition = directory_http_headers.content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - # Construct URL - url = self.rename.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') - if path_rename_mode is not None: - query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'PathRenameMode') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') - if content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') - if content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') - if content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') - if content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - } - return cls(response, None, response_headers) - rename.metadata = {'url': '/{filesystem}/{path}'} - - def delete(self, recursive_directory_delete, timeout=None, marker=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """Deletes the directory. - - :param recursive_directory_delete: If "true", all paths beneath the - directory will be deleted. If "false" and the directory is non-empty, - an error occurs. - :type recursive_directory_delete: bool - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param marker: When renaming a directory, the number of paths that are - renamed with each invocation is limited. If the number of paths to be - renamed exceeds this limit, a continuation token is returned in this - response header. When a continuation token is returned in the - response, it must be specified in a subsequent invocation of the - rename operation to continue renaming the directory. - :type marker: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['recursive'] = self._serialize.query("recursive_directory_delete", recursive_directory_delete, 'bool') - if marker is not None: - query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{filesystem}/{path}'} - - def set_access_control(self, timeout=None, owner=None, group=None, posix_permissions=None, posix_acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """Set the owner, group, permissions, or access control list for a - directory. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param posix_permissions: Optional and only valid if Hierarchical - Namespace is enabled for the account. Sets POSIX access permissions - for the file owner, the file owning group, and others. Each class may - be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. - 0766) are supported. - :type posix_permissions: str - :param posix_acl: Sets POSIX access control rights on files and - directories. The value is a comma-separated list of access control - entries. Each access control entry (ACE) consists of a scope, a type, - a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type posix_acl: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "setAccessControl" - - # Construct URL - url = self.set_access_control.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - } - return cls(response, None, response_headers) - set_access_control.metadata = {'url': '/{filesystem}/{path}'} - - def get_access_control(self, timeout=None, upn=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """Get the owner, group, permissions, or access control list for a - directory. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param upn: Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the identity values returned in - the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. - :type upn: bool - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`DataLakeStorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "getAccessControl" - - # Construct URL - url = self.get_access_control.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.DataLakeStorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')), - 'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')), - 'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')), - 'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - } - return cls(response, None, response_headers) - get_access_control.metadata = {'url': '/{filesystem}/{path}'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_page_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_page_blob_operations.py deleted file mode 100644 index fedc96c..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_page_blob_operations.py +++ /dev/null @@ -1,1399 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class PageBlobOperations(object): - """PageBlobOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "PageBlob". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_blob_type = "PageBlob" - - def create(self, content_length, blob_content_length, timeout=None, tier=None, metadata=None, blob_sequence_number=0, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): - """The Create operation creates a new page blob. - - :param content_length: The length of the request. - :type content_length: long - :param blob_content_length: This header specifies the maximum size for - the page blob, up to 1 TB. The page blob size must be aligned to a - 512-byte boundary. - :type blob_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param tier: Optional. Indicates the tier to be set on the page blob. - Possible values include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', - 'P40', 'P50', 'P60', 'P70', 'P80' - :type tier: str or - ~azure.storage.blob.models.PremiumPageBlobAccessTier - :param metadata: Optional. Specifies a user-defined name-value pair - associated with the blob. If no name-value pairs are specified, the - operation will copy the metadata from the source blob or file to the - destination blob. If one or more name-value pairs are specified, the - destination blob is created with the specified metadata, and metadata - is not copied from the source blob or file. Note that beginning with - version 2009-09-19, metadata names must adhere to the naming rules for - C# identifiers. See Naming and Referencing Containers, Blobs, and - Metadata for more information. - :type metadata: str - :param blob_sequence_number: Set for page blobs only. The sequence - number is a user-controlled value that you can use to track requests. - The value of the sequence number must be between 0 and 2^63 - 1. - :type blob_sequence_number: long - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param blob_tags_string: Optional. Used to set blob tags in various - blob operations. - :type blob_tags_string: str - :param blob_http_headers: Additional parameters for the operation - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - blob_content_type = None - if blob_http_headers is not None: - blob_content_type = blob_http_headers.blob_content_type - blob_content_encoding = None - if blob_http_headers is not None: - blob_content_encoding = blob_http_headers.blob_content_encoding - blob_content_language = None - if blob_http_headers is not None: - blob_content_language = blob_http_headers.blob_content_language - blob_content_md5 = None - if blob_http_headers is not None: - blob_content_md5 = blob_http_headers.blob_content_md5 - blob_cache_control = None - if blob_http_headers is not None: - blob_cache_control = blob_http_headers.blob_cache_control - blob_content_disposition = None - if blob_http_headers is not None: - blob_content_disposition = blob_http_headers.blob_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') - if blob_sequence_number is not None: - header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') - if blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') - if blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", blob_content_encoding, 'str') - if blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", blob_content_language, 'str') - if blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", blob_content_md5, 'bytearray') - if blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') - if blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", blob_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{containerName}/{blob}'} - - def upload_pages(self, body, content_length, transactional_content_md5=None, transactional_content_crc64=None, timeout=None, range=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, sequence_number_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """The Upload Pages operation writes a range of pages to a page blob. - - :param body: Initial data - :type body: Generator - :param content_length: The length of the request. - :type content_length: long - :param transactional_content_md5: Specify the transactional md5 for - the body, to be validated by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param sequence_number_access_conditions: Additional parameters for - the operation - :type sequence_number_access_conditions: - ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_sequence_number_less_than_or_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - if_sequence_number_less_than = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - if_sequence_number_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "page" - page_write = "update" - - # Construct URL - url = self.upload_pages.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long') - if if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long') - if if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload_pages.metadata = {'url': '/{containerName}/{blob}'} - - def clear_pages(self, content_length, timeout=None, range=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, sequence_number_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """The Clear Pages operation clears a set of pages from a page blob. - - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param sequence_number_access_conditions: Additional parameters for - the operation - :type sequence_number_access_conditions: - ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_sequence_number_less_than_or_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - if_sequence_number_less_than = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - if_sequence_number_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "page" - page_write = "clear" - - # Construct URL - url = self.clear_pages.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long') - if if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long') - if if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - clear_pages.metadata = {'url': '/{containerName}/{blob}'} - - def upload_pages_from_url(self, source_url, source_range, content_length, range, source_content_md5=None, source_contentcrc64=None, timeout=None, request_id=None, cpk_info=None, cpk_scope_info=None, lease_access_conditions=None, sequence_number_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs): - """The Upload Pages operation writes a range of pages to a page blob where - the contents are read from a URL. - - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param source_range: Bytes of source data in the specified range. The - length of this range should match the ContentLength header and - x-ms-range/Range destination range header. - :type source_range: str - :param content_length: The length of the request. - :type content_length: long - :param range: The range of bytes to which the source range would be - written. The range should be 512 aligned and range-end is required. - :type range: str - :param source_content_md5: Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range - of bytes that must be read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param sequence_number_access_conditions: Additional parameters for - the operation - :type sequence_number_access_conditions: - ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.blob.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_sequence_number_less_than_or_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - if_sequence_number_less_than = None - if sequence_number_access_conditions is not None: - if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - if_sequence_number_equal_to = None - if sequence_number_access_conditions is not None: - if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - - comp = "page" - page_write = "update" - - # Construct URL - url = self.upload_pages_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, 'long') - if if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", if_sequence_number_less_than, 'long') - if if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", if_sequence_number_equal_to, 'long') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), - 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload_pages_from_url.metadata = {'url': '/{containerName}/{blob}'} - - def get_page_ranges(self, snapshot=None, timeout=None, range=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """The Get Page Ranges operation returns the list of valid page ranges for - a page blob or snapshot of a page blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: PageList or the result of cls(response) - :rtype: ~azure.storage.blob.models.PageList - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "pagelist" - - # Construct URL - url = self.get_page_ranges.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PageList', response) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_page_ranges.metadata = {'url': '/{containerName}/{blob}'} - - def get_page_ranges_diff(self, snapshot=None, timeout=None, prevsnapshot=None, prev_snapshot_url=None, range=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """The Get Page Ranges Diff operation returns the list of valid page - ranges for a page blob that were changed between target blob and - previous snapshot. - - :param snapshot: The snapshot parameter is an opaque DateTime value - that, when present, specifies the blob snapshot to retrieve. For more - information on working with blob snapshots, see Creating - a Snapshot of a Blob. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param prevsnapshot: Optional in version 2015-07-08 and newer. The - prevsnapshot parameter is a DateTime value that specifies that the - response will contain only pages that were changed between target blob - and previous snapshot. Changed pages include both updated and cleared - pages. The target blob may be a snapshot, as long as the snapshot - specified by prevsnapshot is the older of the two. Note that - incremental snapshots are currently supported only for blobs created - on or after January 1, 2016. - :type prevsnapshot: str - :param prev_snapshot_url: Optional. This header is only supported in - service versions 2019-04-19 and after and specifies the URL of a - previous snapshot of the target blob. The response will only contain - pages that were changed between the target blob and its previous - snapshot. - :type prev_snapshot_url: str - :param range: Return only the bytes of the blob in the specified - range. - :type range: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: PageList or the result of cls(response) - :rtype: ~azure.storage.blob.models.PageList - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "pagelist" - - # Construct URL - url = self.get_page_ranges_diff.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if prevsnapshot is not None: - query_parameters['prevsnapshot'] = self._serialize.query("prevsnapshot", prevsnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - if prev_snapshot_url is not None: - header_parameters['x-ms-previous-snapshot-url'] = self._serialize.header("prev_snapshot_url", prev_snapshot_url, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PageList', response) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-blob-content-length': self._deserialize('long', response.headers.get('x-ms-blob-content-length')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_page_ranges_diff.metadata = {'url': '/{containerName}/{blob}'} - - def resize(self, blob_content_length, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): - """Resize the Blob. - - :param blob_content_length: This header specifies the maximum size for - the page blob, up to 1 TB. The page blob size must be aligned to a - 512-byte boundary. - :type blob_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Additional parameters for the operation - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Additional parameters for the operation - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - encryption_key = None - if cpk_info is not None: - encryption_key = cpk_info.encryption_key - encryption_key_sha256 = None - if cpk_info is not None: - encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = None - if cpk_info is not None: - encryption_algorithm = cpk_info.encryption_algorithm - encryption_scope = None - if cpk_scope_info is not None: - encryption_scope = cpk_scope_info.encryption_scope - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "properties" - - # Construct URL - url = self.resize.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') - if encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", encryption_scope, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - resize.metadata = {'url': '/{containerName}/{blob}'} - - def update_sequence_number(self, sequence_number_action, timeout=None, blob_sequence_number=0, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """Update the sequence number of the blob. - - :param sequence_number_action: Required if the - x-ms-blob-sequence-number header is set for the request. This property - applies to page blobs only. This property indicates how the service - should modify the blob's sequence number. Possible values include: - 'max', 'update', 'increment' - :type sequence_number_action: str or - ~azure.storage.blob.models.SequenceNumberActionType - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param blob_sequence_number: Set for page blobs only. The sequence - number is a user-controlled value that you can use to track requests. - The value of the sequence number must be between 0 and 2^63 - 1. - :type blob_sequence_number: long - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "properties" - - # Construct URL - url = self.update_sequence_number.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-sequence-number-action'] = self._serialize.header("sequence_number_action", sequence_number_action, 'SequenceNumberActionType') - if blob_sequence_number is not None: - header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - update_sequence_number.metadata = {'url': '/{containerName}/{blob}'} - - def copy_incremental(self, copy_source, timeout=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): - """The Copy Incremental operation copies a snapshot of the source page - blob to a destination page blob. The snapshot is copied such that only - the differential changes between the previously copied snapshot are - transferred to the destination. The copied snapshots are complete - copies of the original snapshot and can be read or copied from as - usual. This API is supported since REST version 2016-05-31. - - :param copy_source: Specifies the name of the source page blob - snapshot. This value is a URL of up to 2 KB in length that specifies a - page blob snapshot. The value should be URL-encoded as it would appear - in a request URI. The source blob must either be public or must be - authenticated via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.blob.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - comp = "incrementalcopy" - - # Construct URL - url = self.copy_incremental.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", if_tags, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - copy_incremental.metadata = {'url': '/{containerName}/{blob}'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_service_operations.py b/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_service_operations.py deleted file mode 100644 index 0a49915..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/operations/_service_operations.py +++ /dev/null @@ -1,663 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class ServiceOperations(object): - """ServiceOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - - def set_properties(self, storage_service_properties, timeout=None, request_id=None, cls=None, **kwargs): - """Sets properties for a storage account's Blob service endpoint, - including properties for Storage Analytics and CORS (Cross-Origin - Resource Sharing) rules. - - :param storage_service_properties: The StorageService properties. - :type storage_service_properties: - ~azure.storage.blob.models.StorageServiceProperties - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "service" - comp = "properties" - - # Construct URL - url = self.set_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct body - body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_properties.metadata = {'url': '/'} - - def get_properties(self, timeout=None, request_id=None, cls=None, **kwargs): - """gets the properties of a storage account's Blob service, including - properties for Storage Analytics and CORS (Cross-Origin Resource - Sharing) rules. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: StorageServiceProperties or the result of cls(response) - :rtype: ~azure.storage.blob.models.StorageServiceProperties - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "service" - comp = "properties" - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('StorageServiceProperties', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_properties.metadata = {'url': '/'} - - def get_statistics(self, timeout=None, request_id=None, cls=None, **kwargs): - """Retrieves statistics related to replication for the Blob service. It is - only available on the secondary location endpoint when read-access - geo-redundant replication is enabled for the storage account. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: StorageServiceStats or the result of cls(response) - :rtype: ~azure.storage.blob.models.StorageServiceStats - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "service" - comp = "stats" - - # Construct URL - url = self.get_statistics.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('StorageServiceStats', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_statistics.metadata = {'url': '/'} - - def list_containers_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, cls=None, **kwargs): - """The List Containers Segment operation returns a list of the containers - under the specified account. - - :param prefix: Filters the results to return only containers whose - name begins with the specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list - of containers to be returned with the next listing operation. The - operation returns the NextMarker value within the response body if the - listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value - for the marker parameter in a subsequent call to request the next page - of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to - return. If the request does not specify maxresults, or specifies a - value greater than 5000, the server will return up to 5000 items. Note - that if the listing operation crosses a partition boundary, then the - service will return a continuation token for retrieving the remainder - of the results. For this reason, it is possible that the service will - return fewer results than specified by maxresults, or than the default - of 5000. - :type maxresults: int - :param include: Include this parameter to specify that the container's - metadata be returned as part of the response body. - :type include: list[str or - ~azure.storage.blob.models.ListContainersIncludeType] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListContainersSegmentResponse or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListContainersSegmentResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "list" - - # Construct URL - url = self.list_containers_segment.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[ListContainersIncludeType]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListContainersSegmentResponse', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_containers_segment.metadata = {'url': '/'} - - def get_user_delegation_key(self, key_info, timeout=None, request_id=None, cls=None, **kwargs): - """Retrieves a user delegation key for the Blob service. This is only a - valid operation when using bearer token authentication. - - :param key_info: - :type key_info: ~azure.storage.blob.models.KeyInfo - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: UserDelegationKey or the result of cls(response) - :rtype: ~azure.storage.blob.models.UserDelegationKey - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "service" - comp = "userdelegationkey" - - # Construct URL - url = self.get_user_delegation_key.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct body - body_content = self._serialize.body(key_info, 'KeyInfo') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('UserDelegationKey', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_user_delegation_key.metadata = {'url': '/'} - - def get_account_info(self, cls=None, **kwargs): - """Returns the sku name and account kind . - - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "account" - comp = "properties" - - # Construct URL - url = self.get_account_info.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')), - 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_account_info.metadata = {'url': '/'} - - def submit_batch(self, body, content_length, multipart_content_type, timeout=None, request_id=None, cls=None, **kwargs): - """The Batch operation allows multiple API calls to be embedded into a - single HTTP request. - - :param body: Initial data - :type body: Generator - :param content_length: The length of the request. - :type content_length: long - :param multipart_content_type: Required. The value of this header must - be multipart/mixed with a batch boundary. Example header value: - multipart/mixed; boundary=batch_ - :type multipart_content_type: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: object or the result of cls(response) - :rtype: Generator - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "batch" - - # Construct URL - url = self.submit_batch.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct body - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - submit_batch.metadata = {'url': '/'} - - def filter_blobs(self, timeout=None, request_id=None, where=None, marker=None, maxresults=None, cls=None, **kwargs): - """The Filter Blobs operation enables callers to list blobs across all - containers whose tags match a given search expression. Filter blobs - searches across all containers within a storage account but can be - scoped within the expression to a single container. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param where: Filters the results to return only to return only blobs - whose tags match the specified expression. - :type where: str - :param marker: A string value that identifies the portion of the list - of containers to be returned with the next listing operation. The - operation returns the NextMarker value within the response body if the - listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value - for the marker parameter in a subsequent call to request the next page - of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to - return. If the request does not specify maxresults, or specifies a - value greater than 5000, the server will return up to 5000 items. Note - that if the listing operation crosses a partition boundary, then the - service will return a continuation token for retrieving the remainder - of the results. For this reason, it is possible that the service will - return fewer results than specified by maxresults, or than the default - of 5000. - :type maxresults: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: FilterBlobSegment or the result of cls(response) - :rtype: ~azure.storage.blob.models.FilterBlobSegment - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "blobs" - - # Construct URL - url = self.filter_blobs.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if where is not None: - query_parameters['where'] = self._serialize.query("where", where, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('FilterBlobSegment', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - filter_blobs.metadata = {'url': '/'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_lease.py b/azure/multiapi/storagev2/blob/v2020_02_10/_lease.py deleted file mode 100644 index 1fd668c..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_lease.py +++ /dev/null @@ -1,331 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import uuid - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, TypeVar, TYPE_CHECKING -) - -from azure.core.tracing.decorator import distributed_trace - -from ._shared.response_handlers import return_response_headers, process_storage_error -from ._generated.models import StorageErrorException -from ._serialize import get_modify_conditions - -if TYPE_CHECKING: - from datetime import datetime - - BlobClient = TypeVar("BlobClient") - ContainerClient = TypeVar("ContainerClient") - - -class BlobLeaseClient(object): - """Creates a new BlobLeaseClient. - - This client provides lease operations on a BlobClient or ContainerClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the blob or container to lease. - :type client: ~azure.storage.blob.BlobClient or - ~azure.storage.blob.ContainerClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - def __init__( - self, client, lease_id=None - ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs - # type: (Union[BlobClient, ContainerClient], Optional[str]) -> None - self.id = lease_id or str(uuid.uuid4()) - self.last_modified = None - self.etag = None - if hasattr(client, 'blob_name'): - self._client = client._client.blob # type: ignore # pylint: disable=protected-access - elif hasattr(client, 'container_name'): - self._client = client._client.container # type: ignore # pylint: disable=protected-access - else: - raise TypeError("Lease must use either BlobClient or ContainerClient.") - - def __enter__(self): - return self - - def __exit__(self, *args): - self.release() - - @distributed_trace - def acquire(self, lease_duration=-1, **kwargs): - # type: (int, **Any) -> None - """Requests a new lease. - - If the container does not have an active lease, the Blob service creates a - lease on the container and returns a new lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.acquire_lease( - timeout=kwargs.pop('timeout', None), - duration=lease_duration, - proposed_lease_id=self.id, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - self.etag = response.get('etag') # type: str - - @distributed_trace - def renew(self, **kwargs): - # type: (Any) -> None - """Renews the lease. - - The lease can be renewed if the lease ID specified in the - lease client matches that associated with the container or blob. Note that - the lease may be renewed even if it has expired as long as the container - or blob has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.renew_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def release(self, **kwargs): - # type: (Any) -> None - """Release the lease. - - The lease may be released if the client lease id specified matches - that associated with the container or blob. Releasing the lease allows another client - to immediately acquire the lease for the container or blob as soon as the release is complete. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.release_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """Change the lease ID of an active lease. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.change_lease( - lease_id=self.id, - proposed_lease_id=proposed_lease_id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def break_lease(self, lease_break_period=None, **kwargs): - # type: (Optional[int], Any) -> int - """Break the lease, if the container or blob has an active lease. - - Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. When a lease - is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the container or blob. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :param int lease_break_period: - This is the proposed duration of seconds that the lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the lease. If longer, the time remaining on the lease is used. - A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.break_lease( - timeout=kwargs.pop('timeout', None), - break_period=lease_break_period, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_list_blobs_helper.py b/azure/multiapi/storagev2/blob/v2020_02_10/_list_blobs_helper.py deleted file mode 100644 index f1dd70f..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_list_blobs_helper.py +++ /dev/null @@ -1,166 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from azure.core.paging import PageIterator, ItemPaged -from ._deserialize import get_blob_properties_from_generated_code -from ._generated.models import StorageErrorException, BlobItemInternal, BlobPrefix as GenBlobPrefix -from ._models import BlobProperties -from ._shared.models import DictMixin -from ._shared.response_handlers import return_context_and_deserialized, process_storage_error - - -class BlobPropertiesPaged(PageIterator): - """An Iterable of Blob properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - :param str container: The name of the container. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str continuation_token: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__( - self, command, - container=None, - prefix=None, - results_per_page=None, - continuation_token=None, - delimiter=None, - location_mode=None): - super(BlobPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.container = container - self.delimiter = delimiter - self.current_page = None - self.location_mode = location_mode - - def _get_next_cb(self, continuation_token): - try: - return self._command( - prefix=self.prefix, - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.container = self._response.container_name - self.current_page = [self._build_item(item) for item in self._response.segment.blob_items] - - return self._response.next_marker or None, self.current_page - - def _build_item(self, item): - if isinstance(item, BlobProperties): - return item - if isinstance(item, BlobItemInternal): - blob = get_blob_properties_from_generated_code(item) # pylint: disable=protected-access - blob.container = self.container - return blob - return item - - -class BlobPrefixPaged(BlobPropertiesPaged): - def __init__(self, *args, **kwargs): - super(BlobPrefixPaged, self).__init__(*args, **kwargs) - self.name = self.prefix - - def _extract_data_cb(self, get_next_return): - continuation_token, _ = super(BlobPrefixPaged, self)._extract_data_cb(get_next_return) - self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items - self.current_page = [self._build_item(item) for item in self.current_page] - self.delimiter = self._response.delimiter - - return continuation_token, self.current_page - - def _build_item(self, item): - item = super(BlobPrefixPaged, self)._build_item(item) - if isinstance(item, GenBlobPrefix): - return BlobPrefix( - self._command, - container=self.container, - prefix=item.name, - results_per_page=self.results_per_page, - location_mode=self.location_mode) - return item - - -class BlobPrefix(ItemPaged, DictMixin): - """An Iterable of Blob properties. - - Returned from walk_blobs when a delimiter is used. - Can be thought of as a virtual blob directory. - - :ivar str name: The prefix, or "directory name" of the blob. - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str next_marker: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str marker: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__(self, *args, **kwargs): - super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs) - self.name = kwargs.get('prefix') - self.prefix = kwargs.get('prefix') - self.results_per_page = kwargs.get('results_per_page') - self.container = kwargs.get('container') - self.delimiter = kwargs.get('delimiter') - self.location_mode = kwargs.get('location_mode') diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_models.py b/azure/multiapi/storagev2/blob/v2020_02_10/_models.py deleted file mode 100644 index a7658cc..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_models.py +++ /dev/null @@ -1,1173 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines - -from enum import Enum - -from azure.core.paging import PageIterator -from ._generated.models import FilterBlobItem, ArrowField - -from ._shared import decode_base64_to_text -from ._shared.response_handlers import return_context_and_deserialized, process_storage_error -from ._shared.models import DictMixin, get_enum_value -from ._generated.models import Logging as GeneratedLogging -from ._generated.models import Metrics as GeneratedMetrics -from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy -from ._generated.models import StaticWebsite as GeneratedStaticWebsite -from ._generated.models import CorsRule as GeneratedCorsRule -from ._generated.models import AccessPolicy as GenAccessPolicy -from ._generated.models import StorageErrorException - - -class BlobType(str, Enum): - - BlockBlob = "BlockBlob" - PageBlob = "PageBlob" - AppendBlob = "AppendBlob" - - -class BlockState(str, Enum): - """Block blob block types.""" - - Committed = 'Committed' #: Committed blocks. - Latest = 'Latest' #: Latest blocks. - Uncommitted = 'Uncommitted' #: Uncommitted blocks. - - -class StandardBlobTier(str, Enum): - """ - Specifies the blob tier to set the blob to. This is only applicable for - block blobs on standard storage accounts. - """ - - Archive = 'Archive' #: Archive - Cool = 'Cool' #: Cool - Hot = 'Hot' #: Hot - - -class PremiumPageBlobTier(str, Enum): - """ - Specifies the page blob tier to set the blob to. This is only applicable to page - blobs on premium storage accounts. Please take a look at: - https://docs.microsoft.com/en-us/azure/storage/storage-premium-storage#scalability-and-performance-targets - for detailed information on the corresponding IOPS and throughput per PageBlobTier. - """ - - P4 = 'P4' #: P4 Tier - P6 = 'P6' #: P6 Tier - P10 = 'P10' #: P10 Tier - P20 = 'P20' #: P20 Tier - P30 = 'P30' #: P30 Tier - P40 = 'P40' #: P40 Tier - P50 = 'P50' #: P50 Tier - P60 = 'P60' #: P60 Tier - - -class SequenceNumberAction(str, Enum): - """Sequence number actions.""" - - Increment = 'increment' - """ - Increments the value of the sequence number by 1. If specifying this option, - do not include the x-ms-blob-sequence-number header. - """ - - Max = 'max' - """ - Sets the sequence number to be the higher of the value included with the - request and the value currently stored for the blob. - """ - - Update = 'update' - """Sets the sequence number to the value included with the request.""" - - -class PublicAccess(str, Enum): - """ - Specifies whether data in the container may be accessed publicly and the level of access. - """ - - OFF = 'off' - """ - Specifies that there is no public read access for both the container and blobs within the container. - Clients cannot enumerate the containers within the storage account as well as the blobs within the container. - """ - - Blob = 'blob' - """ - Specifies public read access for blobs. Blob data within this container can be read - via anonymous request, but container data is not available. Clients cannot enumerate - blobs within the container via anonymous request. - """ - - Container = 'container' - """ - Specifies full public read access for container and blob data. Clients can enumerate - blobs within the container via anonymous request, but cannot enumerate containers - within the storage account. - """ - - -class BlobAnalyticsLogging(GeneratedLogging): - """Azure Analytics Logging settings. - - :keyword str version: - The version of Storage Analytics to configure. The default value is 1.0. - :keyword bool delete: - Indicates whether all delete requests should be logged. The default value is `False`. - :keyword bool read: - Indicates whether all read requests should be logged. The default value is `False`. - :keyword bool write: - Indicates whether all write requests should be logged. The default value is `False`. - :keyword ~azure.storage.blob.RetentionPolicy retention_policy: - Determines how long the associated data should persist. If not specified the retention - policy will be disabled by default. - """ - - def __init__(self, **kwargs): - self.version = kwargs.get('version', u'1.0') - self.delete = kwargs.get('delete', False) - self.read = kwargs.get('read', False) - self.write = kwargs.get('write', False) - self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - version=generated.version, - delete=generated.delete, - read=generated.read, - write=generated.write, - retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access - ) - - -class Metrics(GeneratedMetrics): - """A summary of request statistics grouped by API in hour or minute aggregates - for blobs. - - :keyword str version: - The version of Storage Analytics to configure. The default value is 1.0. - :keyword bool enabled: - Indicates whether metrics are enabled for the Blob service. - The default value is `False`. - :keyword bool include_apis: - Indicates whether metrics should generate summary statistics for called API operations. - :keyword ~azure.storage.blob.RetentionPolicy retention_policy: - Determines how long the associated data should persist. If not specified the retention - policy will be disabled by default. - """ - - def __init__(self, **kwargs): - self.version = kwargs.get('version', u'1.0') - self.enabled = kwargs.get('enabled', False) - self.include_apis = kwargs.get('include_apis') - self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - version=generated.version, - enabled=generated.enabled, - include_apis=generated.include_apis, - retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access - ) - - -class RetentionPolicy(GeneratedRetentionPolicy): - """The retention policy which determines how long the associated data should - persist. - - :param bool enabled: - Indicates whether a retention policy is enabled for the storage service. - The default value is False. - :param int days: - Indicates the number of days that metrics or logging or - soft-deleted data should be retained. All data older than this value will - be deleted. If enabled=True, the number of days must be specified. - """ - - def __init__(self, enabled=False, days=None): - self.enabled = enabled - self.days = days - if self.enabled and (self.days is None): - raise ValueError("If policy is enabled, 'days' must be specified.") - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - enabled=generated.enabled, - days=generated.days, - ) - - -class StaticWebsite(GeneratedStaticWebsite): - """The properties that enable an account to host a static website. - - :keyword bool enabled: - Indicates whether this account is hosting a static website. - The default value is `False`. - :keyword str index_document: - The default name of the index page under each directory. - :keyword str error_document404_path: - The absolute path of the custom 404 page. - :keyword str default_index_document_path: - Absolute path of the default index page. - """ - - def __init__(self, **kwargs): - self.enabled = kwargs.get('enabled', False) - if self.enabled: - self.index_document = kwargs.get('index_document') - self.error_document404_path = kwargs.get('error_document404_path') - self.default_index_document_path = kwargs.get('default_index_document_path') - else: - self.index_document = None - self.error_document404_path = None - self.default_index_document_path = None - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - enabled=generated.enabled, - index_document=generated.index_document, - error_document404_path=generated.error_document404_path, - default_index_document_path=generated.default_index_document_path - ) - - -class CorsRule(GeneratedCorsRule): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. - - :param list(str) allowed_origins: - A list of origin domains that will be allowed via CORS, or "*" to allow - all domains. The list of must contain at least one entry. Limited to 64 - origin domains. Each allowed origin can have up to 256 characters. - :param list(str) allowed_methods: - A list of HTTP methods that are allowed to be executed by the origin. - The list of must contain at least one entry. For Azure Storage, - permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. - :keyword list(str) allowed_headers: - Defaults to an empty list. A list of headers allowed to be part of - the cross-origin request. Limited to 64 defined headers and 2 prefixed - headers. Each header can be up to 256 characters. - :keyword list(str) exposed_headers: - Defaults to an empty list. A list of response headers to expose to CORS - clients. Limited to 64 defined headers and two prefixed headers. Each - header can be up to 256 characters. - :keyword int max_age_in_seconds: - The number of seconds that the client/browser should cache a - preflight response. - """ - - def __init__(self, allowed_origins, allowed_methods, **kwargs): - self.allowed_origins = ','.join(allowed_origins) - self.allowed_methods = ','.join(allowed_methods) - self.allowed_headers = ','.join(kwargs.get('allowed_headers', [])) - self.exposed_headers = ','.join(kwargs.get('exposed_headers', [])) - self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0) - - @classmethod - def _from_generated(cls, generated): - return cls( - [generated.allowed_origins], - [generated.allowed_methods], - allowed_headers=[generated.allowed_headers], - exposed_headers=[generated.exposed_headers], - max_age_in_seconds=generated.max_age_in_seconds, - ) - - -class ContainerProperties(DictMixin): - """Blob container's properties class. - - Returned ``ContainerProperties`` instances expose these values through a - dictionary interface, for example: ``container_props["last_modified"]``. - Additionally, the container name is available as ``container_props["name"]``. - - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the container was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar ~azure.storage.blob.LeaseProperties lease: - Stores all the lease information for the container. - :ivar str public_access: Specifies whether data in the container may be accessed - publicly and the level of access. - :ivar bool has_immutability_policy: - Represents whether the container has an immutability policy. - :ivar bool has_legal_hold: - Represents whether the container has a legal hold. - :ivar dict metadata: A dict with name-value pairs to associate with the - container as metadata. - :ivar ~azure.storage.blob.ContainerEncryptionScope encryption_scope: - The default encryption scope configuration for the container. - """ - - def __init__(self, **kwargs): - self.name = None - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.lease = LeaseProperties(**kwargs) - self.public_access = kwargs.get('x-ms-blob-public-access') - self.has_immutability_policy = kwargs.get('x-ms-has-immutability-policy') - self.deleted = None - self.version = None - self.has_legal_hold = kwargs.get('x-ms-has-legal-hold') - self.metadata = kwargs.get('metadata') - self.encryption_scope = None - default_encryption_scope = kwargs.get('x-ms-default-encryption-scope') - if default_encryption_scope: - self.encryption_scope = ContainerEncryptionScope( - default_encryption_scope=default_encryption_scope, - prevent_encryption_scope_override=kwargs.get('x-ms-deny-encryption-scope-override', False) - ) - - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.last_modified = generated.properties.last_modified - props.etag = generated.properties.etag - props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access - props.public_access = generated.properties.public_access - props.has_immutability_policy = generated.properties.has_immutability_policy - props.deleted = generated.deleted - props.version = generated.version - props.has_legal_hold = generated.properties.has_legal_hold - props.metadata = generated.metadata - props.encryption_scope = ContainerEncryptionScope._from_generated(generated) #pylint: disable=protected-access - return props - - -class ContainerPropertiesPaged(PageIterator): - """An Iterable of Container properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A container name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.ContainerProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only containers whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of container names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(ContainerPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [self._build_item(item) for item in self._response.container_items] - - return self._response.next_marker or None, self.current_page - - @staticmethod - def _build_item(item): - return ContainerProperties._from_generated(item) # pylint: disable=protected-access - - -class BlobProperties(DictMixin): - """ - Blob Properties. - - :ivar str name: - The name of the blob. - :ivar str container: - The container in which the blob resides. - :ivar str snapshot: - Datetime value that uniquely identifies the blob snapshot. - :ivar ~azure.blob.storage.BlobType blob_type: - String indicating this blob's type. - :ivar dict metadata: - Name-value pairs associated with the blob as metadata. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the blob was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar int size: - The size of the content returned. If the entire blob was requested, - the length of blob in bytes. If a subset of the blob was requested, the - length of the returned subset. - :ivar str content_range: - Indicates the range of bytes returned in the event that the client - requested a subset of the blob. - :ivar int append_blob_committed_block_count: - (For Append Blobs) Number of committed blocks in the blob. - :ivar bool is_append_blob_sealed: - Indicate if the append blob is sealed or not. - - .. versionadded:: 12.4.0 - - :ivar int page_blob_sequence_number: - (For Page Blobs) Sequence number for page blob used for coordinating - concurrent writes. - :ivar bool server_encrypted: - Set to true if the blob is encrypted on the server. - :ivar ~azure.storage.blob.CopyProperties copy: - Stores all the copy properties for the blob. - :ivar ~azure.storage.blob.ContentSettings content_settings: - Stores all the content settings for the blob. - :ivar ~azure.storage.blob.LeaseProperties lease: - Stores all the lease information for the blob. - :ivar ~azure.storage.blob.StandardBlobTier blob_tier: - Indicates the access tier of the blob. The hot tier is optimized - for storing data that is accessed frequently. The cool storage tier - is optimized for storing data that is infrequently accessed and stored - for at least a month. The archive tier is optimized for storing - data that is rarely accessed and stored for at least six months - with flexible latency requirements. - :ivar str rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :ivar ~datetime.datetime blob_tier_change_time: - Indicates when the access tier was last changed. - :ivar bool blob_tier_inferred: - Indicates whether the access tier was inferred by the service. - If false, it indicates that the tier was set explicitly. - :ivar bool deleted: - Whether this blob was deleted. - :ivar ~datetime.datetime deleted_time: - A datetime object representing the time at which the blob was deleted. - :ivar int remaining_retention_days: - The number of days that the blob will be retained before being permanently deleted by the service. - :ivar ~datetime.datetime creation_time: - Indicates when the blob was created, in UTC. - :ivar str archive_status: - Archive status of blob. - :ivar str encryption_key_sha256: - The SHA-256 hash of the provided encryption key. - :ivar str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - :ivar bool request_server_encrypted: - Whether this blob is encrypted. - :ivar list(~azure.storage.blob.ObjectReplicationPolicy) object_replication_source_properties: - Only present for blobs that have policy ids and rule ids applied to them. - - .. versionadded:: 12.4.0 - - :ivar str object_replication_destination_policy: - Represents the Object Replication Policy Id that created this blob. - - .. versionadded:: 12.4.0 - - :ivar ~datetime.datetime last_accessed_on: - Indicates when the last Read/Write operation was performed on a Blob. - - .. versionadded:: 12.6.0 - - :ivar int tag_count: - Tags count on this blob. - - .. versionadded:: 12.4.0 - - :ivar dict(str, str) tags: - Key value pair of tags on this blob. - - .. versionadded:: 12.4.0 - - """ - - def __init__(self, **kwargs): - self.name = kwargs.get('name') - self.container = None - self.snapshot = kwargs.get('x-ms-snapshot') - self.version_id = kwargs.get('x-ms-version-id') - self.is_current_version = kwargs.get('x-ms-is-current-version') - self.blob_type = BlobType(kwargs['x-ms-blob-type']) if kwargs.get('x-ms-blob-type') else None - self.metadata = kwargs.get('metadata') - self.encrypted_metadata = kwargs.get('encrypted_metadata') - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.size = kwargs.get('Content-Length') - self.content_range = kwargs.get('Content-Range') - self.append_blob_committed_block_count = kwargs.get('x-ms-blob-committed-block-count') - self.is_append_blob_sealed = kwargs.get('x-ms-blob-sealed') - self.page_blob_sequence_number = kwargs.get('x-ms-blob-sequence-number') - self.server_encrypted = kwargs.get('x-ms-server-encrypted') - self.copy = CopyProperties(**kwargs) - self.content_settings = ContentSettings(**kwargs) - self.lease = LeaseProperties(**kwargs) - self.blob_tier = kwargs.get('x-ms-access-tier') - self.rehydrate_priority = kwargs.get('x-ms-rehydrate-priority') - self.blob_tier_change_time = kwargs.get('x-ms-access-tier-change-time') - self.blob_tier_inferred = kwargs.get('x-ms-access-tier-inferred') - self.deleted = False - self.deleted_time = None - self.remaining_retention_days = None - self.creation_time = kwargs.get('x-ms-creation-time') - self.archive_status = kwargs.get('x-ms-archive-status') - self.encryption_key_sha256 = kwargs.get('x-ms-encryption-key-sha256') - self.encryption_scope = kwargs.get('x-ms-encryption-scope') - self.request_server_encrypted = kwargs.get('x-ms-server-encrypted') - self.object_replication_source_properties = kwargs.get('object_replication_source_properties') - self.object_replication_destination_policy = kwargs.get('x-ms-or-policy-id') - self.last_accessed_on = kwargs.get('x-ms-last-access-time') - self.tag_count = kwargs.get('x-ms-tag-count') - self.tags = None - - -class FilteredBlob(DictMixin): - """Blob info from a Filter Blobs API call. - - :ivar name: Blob name - :type name: str - :ivar container_name: Container name. - :type container_name: str - """ - def __init__(self, **kwargs): - self.name = kwargs.get('name', None) - self.container_name = kwargs.get('container_name', None) - - -class FilteredBlobPaged(PageIterator): - """An Iterable of Blob properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.FilteredBlob) - :ivar str container: The container that the blobs are listed from. - - :param callable command: Function to retrieve the next page of items. - :param str container: The name of the container. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str continuation_token: An opaque continuation token. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__( - self, command, - container=None, - results_per_page=None, - continuation_token=None, - location_mode=None): - super(FilteredBlobPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.marker = continuation_token - self.results_per_page = results_per_page - self.container = container - self.current_page = None - self.location_mode = location_mode - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.marker = self._response.next_marker - self.current_page = [self._build_item(item) for item in self._response.blobs] - - return self._response.next_marker or None, self.current_page - - @staticmethod - def _build_item(item): - if isinstance(item, FilterBlobItem): - blob = FilteredBlob(name=item.name, container_name=item.container_name) # pylint: disable=protected-access - return blob - return item - - -class LeaseProperties(DictMixin): - """Blob Lease Properties. - - :ivar str status: - The lease status of the blob. Possible values: locked|unlocked - :ivar str state: - Lease state of the blob. Possible values: available|leased|expired|breaking|broken - :ivar str duration: - When a blob is leased, specifies whether the lease is of infinite or fixed duration. - """ - - def __init__(self, **kwargs): - self.status = get_enum_value(kwargs.get('x-ms-lease-status')) - self.state = get_enum_value(kwargs.get('x-ms-lease-state')) - self.duration = get_enum_value(kwargs.get('x-ms-lease-duration')) - - @classmethod - def _from_generated(cls, generated): - lease = cls() - lease.status = get_enum_value(generated.properties.lease_status) - lease.state = get_enum_value(generated.properties.lease_state) - lease.duration = get_enum_value(generated.properties.lease_duration) - return lease - - -class ContentSettings(DictMixin): - """The content settings of a blob. - - :param str content_type: - The content type specified for the blob. If no content type was - specified, the default content type is application/octet-stream. - :param str content_encoding: - If the content_encoding has previously been set - for the blob, that value is stored. - :param str content_language: - If the content_language has previously been set - for the blob, that value is stored. - :param str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the blob, that value is stored. - :param str cache_control: - If the cache_control has previously been set for - the blob, that value is stored. - :param str content_md5: - If the content_md5 has been set for the blob, this response - header is stored so that the client can check for message content - integrity. - """ - - def __init__( - self, content_type=None, content_encoding=None, - content_language=None, content_disposition=None, - cache_control=None, content_md5=None, **kwargs): - - self.content_type = content_type or kwargs.get('Content-Type') - self.content_encoding = content_encoding or kwargs.get('Content-Encoding') - self.content_language = content_language or kwargs.get('Content-Language') - self.content_md5 = content_md5 or kwargs.get('Content-MD5') - self.content_disposition = content_disposition or kwargs.get('Content-Disposition') - self.cache_control = cache_control or kwargs.get('Cache-Control') - - @classmethod - def _from_generated(cls, generated): - settings = cls() - settings.content_type = generated.properties.content_type or None - settings.content_encoding = generated.properties.content_encoding or None - settings.content_language = generated.properties.content_language or None - settings.content_md5 = generated.properties.content_md5 or None - settings.content_disposition = generated.properties.content_disposition or None - settings.cache_control = generated.properties.cache_control or None - return settings - - -class CopyProperties(DictMixin): - """Blob Copy Properties. - - These properties will be `None` if this blob has never been the destination - in a Copy Blob operation, or if this blob has been modified after a concluded - Copy Blob operation, for example, using Set Blob Properties, Upload Blob, or Commit Block List. - - :ivar str id: - String identifier for the last attempted Copy Blob operation where this blob - was the destination blob. - :ivar str source: - URL up to 2 KB in length that specifies the source blob used in the last attempted - Copy Blob operation where this blob was the destination blob. - :ivar str status: - State of the copy operation identified by Copy ID, with these values: - success: - Copy completed successfully. - pending: - Copy is in progress. Check copy_status_description if intermittent, - non-fatal errors impede copy progress but don't cause failure. - aborted: - Copy was ended by Abort Copy Blob. - failed: - Copy failed. See copy_status_description for failure details. - :ivar str progress: - Contains the number of bytes copied and the total bytes in the source in the last - attempted Copy Blob operation where this blob was the destination blob. Can show - between 0 and Content-Length bytes copied. - :ivar ~datetime.datetime completion_time: - Conclusion time of the last attempted Copy Blob operation where this blob was the - destination blob. This value can specify the time of a completed, aborted, or - failed copy attempt. - :ivar str status_description: - Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal - or non-fatal copy operation failure. - :ivar bool incremental_copy: - Copies the snapshot of the source page blob to a destination page blob. - The snapshot is copied such that only the differential changes between - the previously copied snapshot are transferred to the destination - :ivar ~datetime.datetime destination_snapshot: - Included if the blob is incremental copy blob or incremental copy snapshot, - if x-ms-copy-status is success. Snapshot time of the last successful - incremental copy snapshot for this blob. - """ - - def __init__(self, **kwargs): - self.id = kwargs.get('x-ms-copy-id') - self.source = kwargs.get('x-ms-copy-source') - self.status = get_enum_value(kwargs.get('x-ms-copy-status')) - self.progress = kwargs.get('x-ms-copy-progress') - self.completion_time = kwargs.get('x-ms-copy-completion_time') - self.status_description = kwargs.get('x-ms-copy-status-description') - self.incremental_copy = kwargs.get('x-ms-incremental-copy') - self.destination_snapshot = kwargs.get('x-ms-copy-destination-snapshot') - - @classmethod - def _from_generated(cls, generated): - copy = cls() - copy.id = generated.properties.copy_id or None - copy.status = get_enum_value(generated.properties.copy_status) or None - copy.source = generated.properties.copy_source or None - copy.progress = generated.properties.copy_progress or None - copy.completion_time = generated.properties.copy_completion_time or None - copy.status_description = generated.properties.copy_status_description or None - copy.incremental_copy = generated.properties.incremental_copy or None - copy.destination_snapshot = generated.properties.destination_snapshot or None - return copy - - -class BlobBlock(DictMixin): - """BlockBlob Block class. - - :param str block_id: - Block id. - :param str state: - Block state. Possible values: committed|uncommitted - :ivar int size: - Block size in bytes. - """ - - def __init__(self, block_id, state=BlockState.Latest): - self.id = block_id - self.state = state - self.size = None - - @classmethod - def _from_generated(cls, generated): - block = cls(decode_base64_to_text(generated.name)) - block.size = generated.size - return block - - -class PageRange(DictMixin): - """Page Range for page blob. - - :param int start: - Start of page range in bytes. - :param int end: - End of page range in bytes. - """ - - def __init__(self, start=None, end=None): - self.start = start - self.end = end - - -class AccessPolicy(GenAccessPolicy): - """Access Policy class used by the set and get access policy methods in each service. - - A stored access policy can specify the start time, expiry time, and - permissions for the Shared Access Signatures with which it's associated. - Depending on how you want to control access to your resource, you can - specify all of these parameters within the stored access policy, and omit - them from the URL for the Shared Access Signature. Doing so permits you to - modify the associated signature's behavior at any time, as well as to revoke - it. Or you can specify one or more of the access policy parameters within - the stored access policy, and the others on the URL. Finally, you can - specify all of the parameters on the URL. In this case, you can use the - stored access policy to revoke the signature, but not to modify its behavior. - - Together the Shared Access Signature and the stored access policy must - include all fields required to authenticate the signature. If any required - fields are missing, the request will fail. Likewise, if a field is specified - both in the Shared Access Signature URL and in the stored access policy, the - request will fail with status code 400 (Bad Request). - - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.blob.ContainerSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - """ - def __init__(self, permission=None, expiry=None, start=None): - self.start = start - self.expiry = expiry - self.permission = permission - - -class ContainerSasPermissions(object): - """ContainerSasPermissions class to be used with the - :func:`~azure.storage.blob.generate_container_sas` function and - for the AccessPolicies used with - :func:`~azure.storage.blob.ContainerClient.set_container_access_policy`. - - :param bool read: - Read the content, properties, metadata or block list of any blob in the - container. Use any blob in the container as the source of a copy operation. - :param bool write: - For any blob in the container, create or write content, properties, - metadata, or block list. Snapshot or lease the blob. Resize the blob - (page blob only). Use the blob as the destination of a copy operation - within the same account. Note: You cannot grant permissions to read or - write container properties or metadata, nor to lease a container, with - a container SAS. Use an account SAS instead. - :param bool delete: - Delete any blob in the container. Note: You cannot grant permissions to - delete a container with a container SAS. Use an account SAS instead. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool list: - List blobs in the container. - :param bool tag: - Set or get tags on the blobs in the container. - """ - def __init__(self, read=False, write=False, delete=False, list=False, delete_previous_version=False, tag=False): # pylint: disable=redefined-builtin - self.read = read - self.write = write - self.delete = delete - self.list = list - self.delete_previous_version = delete_previous_version - self.tag = tag - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('l' if self.list else '') + - ('t' if self.tag else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a ContainerSasPermissions from a string. - - To specify read, write, delete, or list permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, write, delete, - and list permissions. - :return: A ContainerSasPermissions object - :rtype: ~azure.storage.blob.ContainerSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_list = 'l' in permission - p_delete_previous_version = 'x' in permission - p_tag = 't' in permission - parsed = cls(read=p_read, write=p_write, delete=p_delete, list=p_list, - delete_previous_version=p_delete_previous_version, tag=p_tag) - - return parsed - - -class BlobSasPermissions(object): - """BlobSasPermissions class to be used with the - :func:`~azure.storage.blob.generate_blob_sas` function. - - :param bool read: - Read the content, properties, metadata and block list. Use the blob as - the source of a copy operation. - :param bool add: - Add a block to an append blob. - :param bool create: - Write a new blob, snapshot a blob, or copy a blob to a new blob. - :param bool write: - Create or write content, properties, metadata, or block list. Snapshot - or lease the blob. Resize the blob (page blob only). Use the blob as the - destination of a copy operation within the same account. - :param bool delete: - Delete the blob. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool tag: - Set or get tags on the blob. - """ - def __init__(self, read=False, add=False, create=False, write=False, - delete=False, delete_previous_version=False, tag=True): - self.read = read - self.add = add - self.create = create - self.write = write - self.delete = delete - self.delete_previous_version = delete_previous_version - self.tag = tag - self._str = (('r' if self.read else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('t' if self.tag else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a BlobSasPermissions from a string. - - To specify read, add, create, write, or delete permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, add, create, - write, or delete permissions. - :return: A BlobSasPermissions object - :rtype: ~azure.storage.blob.BlobSasPermissions - """ - p_read = 'r' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_delete_previous_version = 'x' in permission - p_tag = 't' in permission - - parsed = cls(read=p_read, add=p_add, create=p_create, write=p_write, delete=p_delete, - delete_previous_version=p_delete_previous_version, tag=p_tag) - - return parsed - - -class CustomerProvidedEncryptionKey(object): - """ - All data in Azure Storage is encrypted at-rest using an account-level encryption key. - In versions 2018-06-17 and newer, you can manage the key used to encrypt blob contents - and application metadata per-blob by providing an AES-256 encryption key in requests to the storage service. - - When you use a customer-provided key, Azure Storage does not manage or persist your key. - When writing data to a blob, the provided key is used to encrypt your data before writing it to disk. - A SHA-256 hash of the encryption key is written alongside the blob contents, - and is used to verify that all subsequent operations against the blob use the same encryption key. - This hash cannot be used to retrieve the encryption key or decrypt the contents of the blob. - When reading a blob, the provided key is used to decrypt your data after reading it from disk. - In both cases, the provided encryption key is securely discarded - as soon as the encryption or decryption process completes. - - :param str key_value: - Base64-encoded AES-256 encryption key value. - :param str key_hash: - Base64-encoded SHA256 of the encryption key. - :ivar str algorithm: - Specifies the algorithm to use when encrypting data using the given key. Must be AES256. - """ - def __init__(self, key_value, key_hash): - self.key_value = key_value - self.key_hash = key_hash - self.algorithm = 'AES256' - - -class ContainerEncryptionScope(object): - """The default encryption scope configuration for a container. - - This scope is used implicitly for all future writes within the container, - but can be overridden per blob operation. - - .. versionadded:: 12.2.0 - - :param str default_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - :param bool prevent_encryption_scope_override: - If true, prevents any request from specifying a different encryption scope than the scope - set on the container. Default value is false. - """ - - def __init__(self, default_encryption_scope, **kwargs): - self.default_encryption_scope = default_encryption_scope - self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', False) - - @classmethod - def _from_generated(cls, generated): - if generated.properties.default_encryption_scope: - scope = cls( - generated.properties.default_encryption_scope, - prevent_encryption_scope_override=generated.properties.prevent_encryption_scope_override or False - ) - return scope - return None - - -class DelimitedJsonDialect(object): - """Defines the input or output JSON serialization for a blob data query. - - :keyword str delimiter: The line separator character, default value is '\n' - """ - - def __init__(self, **kwargs): - self.delimiter = kwargs.pop('delimiter', '\n') - - -class DelimitedTextDialect(object): - """Defines the input or output delimited (CSV) serialization for a blob query request. - - :keyword str delimiter: - Column separator, defaults to ','. - :keyword str quotechar: - Field quote, defaults to '"'. - :keyword str lineterminator: - Record separator, defaults to '\n'. - :keyword str escapechar: - Escape char, defaults to empty. - :keyword bool has_header: - Whether the blob data includes headers in the first line. The default value is False, meaning that the - data will be returned inclusive of the first line. If set to True, the data will be returned exclusive - of the first line. - """ - def __init__(self, **kwargs): - self.delimiter = kwargs.pop('delimiter', ',') - self.quotechar = kwargs.pop('quotechar', '"') - self.lineterminator = kwargs.pop('lineterminator', '\n') - self.escapechar = kwargs.pop('escapechar', "") - self.has_header = kwargs.pop('has_header', False) - - -class ArrowDialect(ArrowField): - """field of an arrow schema. - - All required parameters must be populated in order to send to Azure. - - :param ~azure.storage.blob.ArrowType type: Arrow field type. - :keyword str name: The name of the field. - :keyword int precision: The precision of the field. - :keyword int scale: The scale of the field. - """ - def __init__(self, type, **kwargs): # pylint: disable=redefined-builtin - super(ArrowDialect, self).__init__(type=type, **kwargs) - - -class ArrowType(str, Enum): - - INT64 = "int64" - BOOL = "bool" - TIMESTAMP_MS = "timestamp[ms]" - STRING = "string" - DOUBLE = "double" - DECIMAL = 'decimal' - - -class ObjectReplicationPolicy(DictMixin): - """Policy id and rule ids applied to a blob. - - :ivar str policy_id: - Policy id for the blob. A replication policy gets created (policy id) when creating a source/destination pair. - :ivar list(~azure.storage.blob.ObjectReplicationRule) rules: - Within each policy there may be multiple replication rules. - e.g. rule 1= src/container/.pdf to dst/container2/; rule2 = src/container1/.jpg to dst/container3 - """ - - def __init__(self, **kwargs): - self.policy_id = kwargs.pop('policy_id', None) - self.rules = kwargs.pop('rules', None) - - -class ObjectReplicationRule(DictMixin): - """Policy id and rule ids applied to a blob. - - :ivar str rule_id: - Rule id. - :ivar str status: - The status of the rule. It could be "Complete" or "Failed" - """ - - def __init__(self, **kwargs): - self.rule_id = kwargs.pop('rule_id', None) - self.status = kwargs.pop('status', None) - - -class BlobQueryError(object): - """The error happened during quick query operation. - - :ivar str error: - The name of the error. - :ivar bool is_fatal: - If true, this error prevents further query processing. More result data may be returned, - but there is no guarantee that all of the original data will be processed. - If false, this error does not prevent further query processing. - :ivar str description: - A description of the error. - :ivar int position: - The blob offset at which the error occurred. - """ - def __init__(self, error=None, is_fatal=False, description=None, position=None): - self.error = error - self.is_fatal = is_fatal - self.description = description - self.position = position diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_quick_query_helper.py b/azure/multiapi/storagev2/blob/v2020_02_10/_quick_query_helper.py deleted file mode 100644 index eb51d98..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_quick_query_helper.py +++ /dev/null @@ -1,196 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from io import BytesIO -from typing import Union, Iterable, IO # pylint: disable=unused-import - -from ._shared.avro.datafile import DataFileReader -from ._shared.avro.avro_io import DatumReader - - -class BlobQueryReader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to read query results. - - :ivar str name: - The name of the blob being quered. - :ivar str container: - The name of the container where the blob is. - :ivar dict response_headers: - The response_headers of the quick query request. - :ivar bytes record_delimiter: - The delimiter used to separate lines, or records with the data. The `records` - method will return these lines via a generator. - """ - - def __init__( - self, - name=None, - container=None, - errors=None, - record_delimiter='\n', - encoding=None, - headers=None, - response=None, - error_cls=None, - ): - self.name = name - self.container = container - self.response_headers = headers - self.record_delimiter = record_delimiter - self._size = 0 - self._bytes_processed = 0 - self._errors = errors - self._encoding = encoding - self._parsed_results = DataFileReader(QuickQueryStreamer(response), DatumReader()) - self._first_result = self._process_record(next(self._parsed_results)) - self._error_cls = error_cls - - def __len__(self): - return self._size - - def _process_record(self, result): - self._size = result.get('totalBytes', self._size) - self._bytes_processed = result.get('bytesScanned', self._bytes_processed) - if 'data' in result: - return result.get('data') - if 'fatal' in result: - error = self._error_cls( - error=result['name'], - is_fatal=result['fatal'], - description=result['description'], - position=result['position'] - ) - if self._errors: - self._errors(error) - return None - - def _iter_stream(self): - if self._first_result is not None: - yield self._first_result - for next_result in self._parsed_results: - processed_result = self._process_record(next_result) - if processed_result is not None: - yield processed_result - - def readall(self): - # type: () -> Union[bytes, str] - """Return all query results. - - This operation is blocking until all data is downloaded. - If encoding has been configured - this will be used to decode individual - records are they are received. - - :rtype: Union[bytes, str] - """ - stream = BytesIO() - self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - def readinto(self, stream): - # type: (IO) -> None - """Download the query result to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. - :returns: None - """ - for record in self._iter_stream(): - stream.write(record) - - def records(self): - # type: () -> Iterable[Union[bytes, str]] - """Returns a record generator for the query result. - - Records will be returned line by line. - If encoding has been configured - this will be used to decode individual - records are they are received. - - :rtype: Iterable[Union[bytes, str]] - """ - delimiter = self.record_delimiter.encode('utf-8') - for record_chunk in self._iter_stream(): - for record in record_chunk.split(delimiter): - if self._encoding: - yield record.decode(self._encoding) - else: - yield record - - - -class QuickQueryStreamer(object): - """ - File-like streaming iterator. - """ - - def __init__(self, generator): - self.generator = generator - self.iterator = iter(generator) - self._buf = b"" - self._point = 0 - self._download_offset = 0 - self._buf_start = 0 - self.file_length = None - - def __len__(self): - return self.file_length - - def __iter__(self): - return self.iterator - - @staticmethod - def seekable(): - return True - - def __next__(self): - next_part = next(self.iterator) - self._download_offset += len(next_part) - return next_part - - next = __next__ # Python 2 compatibility. - - def tell(self): - return self._point - - def seek(self, offset, whence=0): - if whence == 0: - self._point = offset - elif whence == 1: - self._point += offset - else: - raise ValueError("whence must be 0, or 1") - if self._point < 0: - self._point = 0 # XXX is this right? - - def read(self, size): - try: - # keep reading from the generator until the buffer of this stream has enough data to read - while self._point + size > self._download_offset: - self._buf += self.__next__() - except StopIteration: - self.file_length = self._download_offset - - start_point = self._point - - # EOF - self._point = min(self._point + size, self._download_offset) - - relative_start = start_point - self._buf_start - if relative_start < 0: - raise ValueError("Buffer has dumped too much data") - relative_end = relative_start + size - data = self._buf[relative_start: relative_end] - - # dump the extra data in buffer - # buffer start--------------------16bytes----current read position - dumped_size = max(relative_end - 16 - relative_start, 0) - self._buf_start += dumped_size - self._buf = self._buf[dumped_size:] - - return data diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_serialize.py b/azure/multiapi/storagev2/blob/v2020_02_10/_serialize.py deleted file mode 100644 index a4b13da..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_serialize.py +++ /dev/null @@ -1,196 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use -try: - from urllib.parse import quote -except ImportError: - from urllib2 import quote # type: ignore - -from azure.core import MatchConditions - -from ._models import ( - ContainerEncryptionScope, - DelimitedJsonDialect) -from ._generated.models import ( - ModifiedAccessConditions, - SourceModifiedAccessConditions, - CpkScopeInfo, - ContainerCpkScopeInfo, - QueryFormat, - QuerySerialization, - DelimitedTextConfiguration, - JsonTextConfiguration, - ArrowConfiguration, - QueryFormatType, - BlobTag, - BlobTags, LeaseAccessConditions -) - - -_SUPPORTED_API_VERSIONS = [ - '2019-02-02', - '2019-07-07', - '2019-10-10', - '2019-12-12', - '2020-02-10', -] - - -def _get_match_headers(kwargs, match_param, etag_param): - # type: (str) -> Tuple(Dict[str, Any], Optional[str], Optional[str]) - if_match = None - if_none_match = None - match_condition = kwargs.pop(match_param, None) - if match_condition == MatchConditions.IfNotModified: - if_match = kwargs.pop(etag_param, None) - if not if_match: - raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) - elif match_condition == MatchConditions.IfPresent: - if_match = '*' - elif match_condition == MatchConditions.IfModified: - if_none_match = kwargs.pop(etag_param, None) - if not if_none_match: - raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) - elif match_condition == MatchConditions.IfMissing: - if_none_match = '*' - elif match_condition is None: - if kwargs.get(etag_param): - raise ValueError("'{}' specified without '{}'.".format(etag_param, match_param)) - else: - raise TypeError("Invalid match condition: {}".format(match_condition)) - return if_match, if_none_match - - -def get_access_conditions(lease): - # type: (Optional[Union[BlobLeaseClient, str]]) -> Union[LeaseAccessConditions, None] - try: - lease_id = lease.id # type: ignore - except AttributeError: - lease_id = lease # type: ignore - return LeaseAccessConditions(lease_id=lease_id) if lease_id else None - - -def get_modify_conditions(kwargs): - # type: (Dict[str, Any]) -> ModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'match_condition', 'etag') - return ModifiedAccessConditions( - if_modified_since=kwargs.pop('if_modified_since', None), - if_unmodified_since=kwargs.pop('if_unmodified_since', None), - if_match=if_match or kwargs.pop('if_match', None), - if_none_match=if_none_match or kwargs.pop('if_none_match', None), - if_tags=kwargs.pop('if_tags_match_condition', None) - ) - - -def get_source_conditions(kwargs): - # type: (Dict[str, Any]) -> SourceModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') - return SourceModifiedAccessConditions( - source_if_modified_since=kwargs.pop('source_if_modified_since', None), - source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), - source_if_match=if_match or kwargs.pop('source_if_match', None), - source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None), - source_if_tags=kwargs.pop('source_if_tags_match_condition', None) - ) - - -def get_cpk_scope_info(kwargs): - # type: (Dict[str, Any]) -> CpkScopeInfo - if 'encryption_scope' in kwargs: - return CpkScopeInfo(encryption_scope=kwargs.pop('encryption_scope')) - return None - - -def get_container_cpk_scope_info(kwargs): - # type: (Dict[str, Any]) -> ContainerCpkScopeInfo - encryption_scope = kwargs.pop('container_encryption_scope', None) - if encryption_scope: - if isinstance(encryption_scope, ContainerEncryptionScope): - return ContainerCpkScopeInfo( - default_encryption_scope=encryption_scope.default_encryption_scope, - prevent_encryption_scope_override=encryption_scope.prevent_encryption_scope_override - ) - if isinstance(encryption_scope, dict): - return ContainerCpkScopeInfo( - default_encryption_scope=encryption_scope['default_encryption_scope'], - prevent_encryption_scope_override=encryption_scope.get('prevent_encryption_scope_override') - ) - raise TypeError("Container encryption scope must be dict or type ContainerEncryptionScope.") - return None - - -def get_api_version(kwargs, default): - # type: (Dict[str, Any]) -> str - api_version = kwargs.pop('api_version', None) - if api_version and api_version not in _SUPPORTED_API_VERSIONS: - versions = '\n'.join(_SUPPORTED_API_VERSIONS) - raise ValueError("Unsupported API version '{}'. Please select from:\n{}".format(api_version, versions)) - return api_version or default - - -def serialize_blob_tags_header(tags=None): - # type: (Optional[Dict[str, str]]) -> str - if tags is None: - return None - - components = list() - if tags: - for key, value in tags.items(): - components.append(quote(key, safe='.-')) - components.append('=') - components.append(quote(value, safe='.-')) - components.append('&') - - if components: - del components[-1] - - return ''.join(components) - - -def serialize_blob_tags(tags=None): - # type: (Optional[Dict[str, str]]) -> Union[BlobTags, None] - tag_list = list() - if tags: - tag_list = [BlobTag(key=k, value=v) for k, v in tags.items()] - return BlobTags(blob_tag_set=tag_list) - - -def serialize_query_format(formater): - if isinstance(formater, DelimitedJsonDialect): - serialization_settings = JsonTextConfiguration( - record_separator=formater.delimiter - ) - qq_format = QueryFormat( - type=QueryFormatType.json, - json_text_configuration=serialization_settings) - elif hasattr(formater, 'quotechar'): # This supports a csv.Dialect as well - try: - headers = formater.has_header - except AttributeError: - headers = False - serialization_settings = DelimitedTextConfiguration( - column_separator=formater.delimiter, - field_quote=formater.quotechar, - record_separator=formater.lineterminator, - escape_char=formater.escapechar, - headers_present=headers - ) - qq_format = QueryFormat( - type=QueryFormatType.delimited, - delimited_text_configuration=serialization_settings - ) - elif isinstance(formater, list): - serialization_settings = ArrowConfiguration( - schema=formater - ) - qq_format = QueryFormat( - type=QueryFormatType.arrow, - arrow_configuration=serialization_settings) - elif not formater: - return None - else: - raise TypeError("Format must be DelimitedTextDialect or DelimitedJsonDialect.") - return QuerySerialization(format=qq_format) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/__init__.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/__init__.py deleted file mode 100644 index 160f882..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import hmac - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore - -import six - - -def url_quote(url): - return quote(url) - - -def url_unquote(url): - return unquote(url) - - -def encode_base64(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def decode_base64_to_bytes(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - return base64.b64decode(data) - - -def decode_base64_to_text(data): - decoded_bytes = decode_base64_to_bytes(data) - return decoded_bytes.decode('utf-8') - - -def sign_string(key, string_to_sign, key_is_base64=True): - if key_is_base64: - key = decode_base64_to_bytes(key) - else: - if isinstance(key, six.text_type): - key = key.encode('utf-8') - if isinstance(string_to_sign, six.text_type): - string_to_sign = string_to_sign.encode('utf-8') - signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) - digest = signed_hmac_sha256.digest() - encoded_digest = encode_base64(digest) - return encoded_digest diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/authentication.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/authentication.py deleted file mode 100644 index d04c1e4..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/authentication.py +++ /dev/null @@ -1,142 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import logging -import sys - -try: - from urllib.parse import urlparse, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import unquote # type: ignore - -try: - from yarl import URL -except ImportError: - pass - -try: - from azure.core.pipeline.transport import AioHttpTransport -except ImportError: - AioHttpTransport = None - -from azure.core.exceptions import ClientAuthenticationError -from azure.core.pipeline.policies import SansIOHTTPPolicy - -from . import sign_string - - -logger = logging.getLogger(__name__) - - - -# wraps a given exception with the desired exception type -def _wrap_exception(ex, desired_type): - msg = "" - if ex.args: - msg = ex.args[0] - if sys.version_info >= (3,): - # Automatic chaining in Python 3 means we keep the trace - return desired_type(msg) - # There isn't a good solution in 2 for keeping the stack trace - # in general, or that will not result in an error in 3 - # However, we can keep the previous error type and message - # TODO: In the future we will log the trace - return desired_type('{}: {}'.format(ex.__class__.__name__, msg)) - - -class AzureSigningError(ClientAuthenticationError): - """ - Represents a fatal error when attempting to sign a request. - In general, the cause of this exception is user error. For example, the given account key is not valid. - Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. - """ - - -# pylint: disable=no-self-use -class SharedKeyCredentialPolicy(SansIOHTTPPolicy): - - def __init__(self, account_name, account_key): - self.account_name = account_name - self.account_key = account_key - super(SharedKeyCredentialPolicy, self).__init__() - - @staticmethod - def _get_headers(request, headers_to_sign): - headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) - if 'content-length' in headers and headers['content-length'] == '0': - del headers['content-length'] - return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' - - @staticmethod - def _get_verb(request): - return request.http_request.method + '\n' - - def _get_canonicalized_resource(self, request): - uri_path = urlparse(request.http_request.url).path - try: - if isinstance(request.context.transport, AioHttpTransport) or \ - isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \ - isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None), - AioHttpTransport): - uri_path = URL(uri_path) - return '/' + self.account_name + str(uri_path) - except TypeError: - pass - return '/' + self.account_name + uri_path - - @staticmethod - def _get_canonicalized_headers(request): - string_to_sign = '' - x_ms_headers = [] - for name, value in request.http_request.headers.items(): - if name.startswith('x-ms-'): - x_ms_headers.append((name.lower(), value)) - x_ms_headers.sort() - for name, value in x_ms_headers: - if value is not None: - string_to_sign += ''.join([name, ':', value, '\n']) - return string_to_sign - - @staticmethod - def _get_canonicalized_resource_query(request): - sorted_queries = list(request.http_request.query.items()) - sorted_queries.sort() - - string_to_sign = '' - for name, value in sorted_queries: - if value is not None: - string_to_sign += '\n' + name.lower() + ':' + unquote(value) - - return string_to_sign - - def _add_authorization_header(self, request, string_to_sign): - try: - signature = sign_string(self.account_key, string_to_sign) - auth_string = 'SharedKey ' + self.account_name + ':' + signature - request.http_request.headers['Authorization'] = auth_string - except Exception as ex: - # Wrap any error that occurred as signing error - # Doing so will clarify/locate the source of problem - raise _wrap_exception(ex, AzureSigningError) - - def on_request(self, request): - string_to_sign = \ - self._get_verb(request) + \ - self._get_headers( - request, - [ - 'content-encoding', 'content-language', 'content-length', - 'content-md5', 'content-type', 'date', 'if-modified-since', - 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' - ] - ) + \ - self._get_canonicalized_headers(request) + \ - self._get_canonicalized_resource(request) + \ - self._get_canonicalized_resource_query(request) - - self._add_authorization_header(request, string_to_sign) - #logger.debug("String_to_sign=%s", string_to_sign) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/__init__.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/__init__.py deleted file mode 100644 index 5b396cd..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/avro_io.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/avro_io.py deleted file mode 100644 index 93a5c13..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/avro_io.py +++ /dev/null @@ -1,464 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -"""Input/output utilities. - -Includes: - - i/o-specific constants - - i/o-specific exceptions - - schema validation - - leaf value encoding and decoding - - datum reader/writer stuff (?) - -Also includes a generic representation for data, which uses the -following mapping: - - Schema records are implemented as dict. - - Schema arrays are implemented as list. - - Schema maps are implemented as dict. - - Schema strings are implemented as unicode. - - Schema bytes are implemented as str. - - Schema ints are implemented as int. - - Schema longs are implemented as long. - - Schema floats are implemented as float. - - Schema doubles are implemented as float. - - Schema booleans are implemented as bool. -""" - -import json -import logging -import struct -import sys - -from ..avro import schema - -PY3 = sys.version_info[0] == 3 - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Constants - -STRUCT_FLOAT = struct.Struct('= 0), n - input_bytes = self.reader.read(n) - if n > 0 and not input_bytes: - raise StopIteration - assert (len(input_bytes) == n), input_bytes - return input_bytes - - @staticmethod - def read_null(): - """ - null is written as zero bytes - """ - return None - - def read_boolean(self): - """ - a boolean is written as a single byte - whose value is either 0 (false) or 1 (true). - """ - b = ord(self.read(1)) - if b == 1: - return True - if b == 0: - return False - fail_msg = "Invalid value for boolean: %s" % b - raise schema.AvroException(fail_msg) - - def read_int(self): - """ - int and long values are written using variable-length, zig-zag coding. - """ - return self.read_long() - - def read_long(self): - """ - int and long values are written using variable-length, zig-zag coding. - """ - b = ord(self.read(1)) - n = b & 0x7F - shift = 7 - while (b & 0x80) != 0: - b = ord(self.read(1)) - n |= (b & 0x7F) << shift - shift += 7 - datum = (n >> 1) ^ -(n & 1) - return datum - - def read_float(self): - """ - A float is written as 4 bytes. - The float is converted into a 32-bit integer using a method equivalent to - Java's floatToIntBits and then encoded in little-endian format. - """ - return STRUCT_FLOAT.unpack(self.read(4))[0] - - def read_double(self): - """ - A double is written as 8 bytes. - The double is converted into a 64-bit integer using a method equivalent to - Java's doubleToLongBits and then encoded in little-endian format. - """ - return STRUCT_DOUBLE.unpack(self.read(8))[0] - - def read_bytes(self): - """ - Bytes are encoded as a long followed by that many bytes of data. - """ - nbytes = self.read_long() - assert (nbytes >= 0), nbytes - return self.read(nbytes) - - def read_utf8(self): - """ - A string is encoded as a long followed by - that many bytes of UTF-8 encoded character data. - """ - input_bytes = self.read_bytes() - if PY3: - try: - return input_bytes.decode('utf-8') - except UnicodeDecodeError as exn: - logger.error('Invalid UTF-8 input bytes: %r', input_bytes) - raise exn - else: - # PY2 - return unicode(input_bytes, "utf-8") # pylint: disable=undefined-variable - - def skip_null(self): - pass - - def skip_boolean(self): - self.skip(1) - - def skip_int(self): - self.skip_long() - - def skip_long(self): - b = ord(self.read(1)) - while (b & 0x80) != 0: - b = ord(self.read(1)) - - def skip_float(self): - self.skip(4) - - def skip_double(self): - self.skip(8) - - def skip_bytes(self): - self.skip(self.read_long()) - - def skip_utf8(self): - self.skip_bytes() - - def skip(self, n): - self.reader.seek(self.reader.tell() + n) - - -# ------------------------------------------------------------------------------ -# DatumReader - - -class DatumReader(object): - """Deserialize Avro-encoded data into a Python data structure.""" - - def __init__(self, writer_schema=None): - """ - As defined in the Avro specification, we call the schema encoded - in the data the "writer's schema". - """ - self._writer_schema = writer_schema - - # read/write properties - def set_writer_schema(self, writer_schema): - self._writer_schema = writer_schema - - writer_schema = property(lambda self: self._writer_schema, - set_writer_schema) - - def read(self, decoder): - return self.read_data(self.writer_schema, decoder) - - def read_data(self, writer_schema, decoder): - # function dispatch for reading data based on type of writer's schema - if writer_schema.type == 'null': - result = decoder.read_null() - elif writer_schema.type == 'boolean': - result = decoder.read_boolean() - elif writer_schema.type == 'string': - result = decoder.read_utf8() - elif writer_schema.type == 'int': - result = decoder.read_int() - elif writer_schema.type == 'long': - result = decoder.read_long() - elif writer_schema.type == 'float': - result = decoder.read_float() - elif writer_schema.type == 'double': - result = decoder.read_double() - elif writer_schema.type == 'bytes': - result = decoder.read_bytes() - elif writer_schema.type == 'fixed': - result = self.read_fixed(writer_schema, decoder) - elif writer_schema.type == 'enum': - result = self.read_enum(writer_schema, decoder) - elif writer_schema.type == 'array': - result = self.read_array(writer_schema, decoder) - elif writer_schema.type == 'map': - result = self.read_map(writer_schema, decoder) - elif writer_schema.type in ['union', 'error_union']: - result = self.read_union(writer_schema, decoder) - elif writer_schema.type in ['record', 'error', 'request']: - result = self.read_record(writer_schema, decoder) - else: - fail_msg = "Cannot read unknown schema type: %s" % writer_schema.type - raise schema.AvroException(fail_msg) - return result - - def skip_data(self, writer_schema, decoder): - if writer_schema.type == 'null': - result = decoder.skip_null() - elif writer_schema.type == 'boolean': - result = decoder.skip_boolean() - elif writer_schema.type == 'string': - result = decoder.skip_utf8() - elif writer_schema.type == 'int': - result = decoder.skip_int() - elif writer_schema.type == 'long': - result = decoder.skip_long() - elif writer_schema.type == 'float': - result = decoder.skip_float() - elif writer_schema.type == 'double': - result = decoder.skip_double() - elif writer_schema.type == 'bytes': - result = decoder.skip_bytes() - elif writer_schema.type == 'fixed': - result = self.skip_fixed(writer_schema, decoder) - elif writer_schema.type == 'enum': - result = self.skip_enum(decoder) - elif writer_schema.type == 'array': - self.skip_array(writer_schema, decoder) - result = None - elif writer_schema.type == 'map': - self.skip_map(writer_schema, decoder) - result = None - elif writer_schema.type in ['union', 'error_union']: - result = self.skip_union(writer_schema, decoder) - elif writer_schema.type in ['record', 'error', 'request']: - self.skip_record(writer_schema, decoder) - result = None - else: - fail_msg = "Unknown schema type: %s" % writer_schema.type - raise schema.AvroException(fail_msg) - return result - - @staticmethod - def read_fixed(writer_schema, decoder): - """ - Fixed instances are encoded using the number of bytes declared - in the schema. - """ - return decoder.read(writer_schema.size) - - @staticmethod - def skip_fixed(writer_schema, decoder): - return decoder.skip(writer_schema.size) - - @staticmethod - def read_enum(writer_schema, decoder): - """ - An enum is encoded by a int, representing the zero-based position - of the symbol in the schema. - """ - # read data - index_of_symbol = decoder.read_int() - if index_of_symbol >= len(writer_schema.symbols): - fail_msg = "Can't access enum index %d for enum with %d symbols" \ - % (index_of_symbol, len(writer_schema.symbols)) - raise SchemaResolutionException(fail_msg, writer_schema) - read_symbol = writer_schema.symbols[index_of_symbol] - return read_symbol - - @staticmethod - def skip_enum(decoder): - return decoder.skip_int() - - def read_array(self, writer_schema, decoder): - """ - Arrays are encoded as a series of blocks. - - Each block consists of a long count value, - followed by that many array items. - A block with count zero indicates the end of the array. - Each item is encoded per the array's item schema. - - If a block's count is negative, - then the count is followed immediately by a long block size, - indicating the number of bytes in the block. - The actual count in this case - is the absolute value of the count written. - """ - read_items = [] - block_count = decoder.read_long() - while block_count != 0: - if block_count < 0: - block_count = -block_count - decoder.read_long() - for _ in range(block_count): - read_items.append(self.read_data(writer_schema.items, decoder)) - block_count = decoder.read_long() - return read_items - - def skip_array(self, writer_schema, decoder): - block_count = decoder.read_long() - while block_count != 0: - if block_count < 0: - block_size = decoder.read_long() - decoder.skip(block_size) - else: - for _ in range(block_count): - self.skip_data(writer_schema.items, decoder) - block_count = decoder.read_long() - - def read_map(self, writer_schema, decoder): - """ - Maps are encoded as a series of blocks. - - Each block consists of a long count value, - followed by that many key/value pairs. - A block with count zero indicates the end of the map. - Each item is encoded per the map's value schema. - - If a block's count is negative, - then the count is followed immediately by a long block size, - indicating the number of bytes in the block. - The actual count in this case - is the absolute value of the count written. - """ - read_items = {} - block_count = decoder.read_long() - while block_count != 0: - if block_count < 0: - block_count = -block_count - decoder.read_long() - for _ in range(block_count): - key = decoder.read_utf8() - read_items[key] = self.read_data(writer_schema.values, decoder) - block_count = decoder.read_long() - return read_items - - def skip_map(self, writer_schema, decoder): - block_count = decoder.read_long() - while block_count != 0: - if block_count < 0: - block_size = decoder.read_long() - decoder.skip(block_size) - else: - for _ in range(block_count): - decoder.skip_utf8() - self.skip_data(writer_schema.values, decoder) - block_count = decoder.read_long() - - def read_union(self, writer_schema, decoder): - """ - A union is encoded by first writing a long value indicating - the zero-based position within the union of the schema of its value. - The value is then encoded per the indicated schema within the union. - """ - # schema resolution - index_of_schema = int(decoder.read_long()) - if index_of_schema >= len(writer_schema.schemas): - fail_msg = "Can't access branch index %d for union with %d branches" \ - % (index_of_schema, len(writer_schema.schemas)) - raise SchemaResolutionException(fail_msg, writer_schema) - selected_writer_schema = writer_schema.schemas[index_of_schema] - - # read data - return self.read_data(selected_writer_schema, decoder) - - def skip_union(self, writer_schema, decoder): - index_of_schema = int(decoder.read_long()) - if index_of_schema >= len(writer_schema.schemas): - fail_msg = "Can't access branch index %d for union with %d branches" \ - % (index_of_schema, len(writer_schema.schemas)) - raise SchemaResolutionException(fail_msg, writer_schema) - return self.skip_data(writer_schema.schemas[index_of_schema], decoder) - - def read_record(self, writer_schema, decoder): - """ - A record is encoded by encoding the values of its fields - in the order that they are declared. In other words, a record - is encoded as just the concatenation of the encodings of its fields. - Field values are encoded per their schema. - - Schema Resolution: - * the ordering of fields may be different: fields are matched by name. - * schemas for fields with the same name in both records are resolved - recursively. - * if the writer's record contains a field with a name not present in the - reader's record, the writer's value for that field is ignored. - * if the reader's record schema has a field that contains a default value, - and writer's schema does not have a field with the same name, then the - reader should use the default value from its field. - * if the reader's record schema has a field with no default value, and - writer's schema does not have a field with the same name, then the - field's value is unset. - """ - # schema resolution - read_record = {} - for field in writer_schema.fields: - field_val = self.read_data(field.type, decoder) - read_record[field.name] = field_val - return read_record - - def skip_record(self, writer_schema, decoder): - for field in writer_schema.fields: - self.skip_data(field.type, decoder) - - -# ------------------------------------------------------------------------------ - -if __name__ == '__main__': - raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/avro_io_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/avro_io_async.py deleted file mode 100644 index e981216..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/avro_io_async.py +++ /dev/null @@ -1,448 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -"""Input/output utilities. - -Includes: - - i/o-specific constants - - i/o-specific exceptions - - schema validation - - leaf value encoding and decoding - - datum reader/writer stuff (?) - -Also includes a generic representation for data, which uses the -following mapping: - - Schema records are implemented as dict. - - Schema arrays are implemented as list. - - Schema maps are implemented as dict. - - Schema strings are implemented as unicode. - - Schema bytes are implemented as str. - - Schema ints are implemented as int. - - Schema longs are implemented as long. - - Schema floats are implemented as float. - - Schema doubles are implemented as float. - - Schema booleans are implemented as bool. -""" - -import logging -import sys - -from ..avro import schema - -from .avro_io import STRUCT_FLOAT, STRUCT_DOUBLE, SchemaResolutionException - -PY3 = sys.version_info[0] == 3 - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Decoder - - -class AsyncBinaryDecoder(object): - """Read leaf values.""" - - def __init__(self, reader): - """ - reader is a Python object on which we can call read, seek, and tell. - """ - self._reader = reader - - @property - def reader(self): - """Reports the reader used by this decoder.""" - return self._reader - - async def read(self, n): - """Read n bytes. - - Args: - n: Number of bytes to read. - Returns: - The next n bytes from the input. - """ - assert (n >= 0), n - input_bytes = await self.reader.read(n) - if n > 0 and not input_bytes: - raise StopAsyncIteration - assert (len(input_bytes) == n), input_bytes - return input_bytes - - @staticmethod - def read_null(): - """ - null is written as zero bytes - """ - return None - - async def read_boolean(self): - """ - a boolean is written as a single byte - whose value is either 0 (false) or 1 (true). - """ - b = ord(await self.read(1)) - if b == 1: - return True - if b == 0: - return False - fail_msg = "Invalid value for boolean: %s" % b - raise schema.AvroException(fail_msg) - - async def read_int(self): - """ - int and long values are written using variable-length, zig-zag coding. - """ - return await self.read_long() - - async def read_long(self): - """ - int and long values are written using variable-length, zig-zag coding. - """ - b = ord(await self.read(1)) - n = b & 0x7F - shift = 7 - while (b & 0x80) != 0: - b = ord(await self.read(1)) - n |= (b & 0x7F) << shift - shift += 7 - datum = (n >> 1) ^ -(n & 1) - return datum - - async def read_float(self): - """ - A float is written as 4 bytes. - The float is converted into a 32-bit integer using a method equivalent to - Java's floatToIntBits and then encoded in little-endian format. - """ - return STRUCT_FLOAT.unpack(await self.read(4))[0] - - async def read_double(self): - """ - A double is written as 8 bytes. - The double is converted into a 64-bit integer using a method equivalent to - Java's doubleToLongBits and then encoded in little-endian format. - """ - return STRUCT_DOUBLE.unpack(await self.read(8))[0] - - async def read_bytes(self): - """ - Bytes are encoded as a long followed by that many bytes of data. - """ - nbytes = await self.read_long() - assert (nbytes >= 0), nbytes - return await self.read(nbytes) - - async def read_utf8(self): - """ - A string is encoded as a long followed by - that many bytes of UTF-8 encoded character data. - """ - input_bytes = await self.read_bytes() - if PY3: - try: - return input_bytes.decode('utf-8') - except UnicodeDecodeError as exn: - logger.error('Invalid UTF-8 input bytes: %r', input_bytes) - raise exn - else: - # PY2 - return unicode(input_bytes, "utf-8") # pylint: disable=undefined-variable - - def skip_null(self): - pass - - async def skip_boolean(self): - await self.skip(1) - - async def skip_int(self): - await self.skip_long() - - async def skip_long(self): - b = ord(await self.read(1)) - while (b & 0x80) != 0: - b = ord(await self.read(1)) - - async def skip_float(self): - await self.skip(4) - - async def skip_double(self): - await self.skip(8) - - async def skip_bytes(self): - await self.skip(await self.read_long()) - - async def skip_utf8(self): - await self.skip_bytes() - - async def skip(self, n): - await self.reader.seek(await self.reader.tell() + n) - - -# ------------------------------------------------------------------------------ -# DatumReader - - -class AsyncDatumReader(object): - """Deserialize Avro-encoded data into a Python data structure.""" - - def __init__(self, writer_schema=None): - """ - As defined in the Avro specification, we call the schema encoded - in the data the "writer's schema", and the schema expected by the - reader the "reader's schema". - """ - self._writer_schema = writer_schema - - # read/write properties - def set_writer_schema(self, writer_schema): - self._writer_schema = writer_schema - - writer_schema = property(lambda self: self._writer_schema, - set_writer_schema) - - async def read(self, decoder): - return await self.read_data(self.writer_schema, decoder) - - async def read_data(self, writer_schema, decoder): - # function dispatch for reading data based on type of writer's schema - if writer_schema.type == 'null': - result = decoder.read_null() - elif writer_schema.type == 'boolean': - result = await decoder.read_boolean() - elif writer_schema.type == 'string': - result = await decoder.read_utf8() - elif writer_schema.type == 'int': - result = await decoder.read_int() - elif writer_schema.type == 'long': - result = await decoder.read_long() - elif writer_schema.type == 'float': - result = await decoder.read_float() - elif writer_schema.type == 'double': - result = await decoder.read_double() - elif writer_schema.type == 'bytes': - result = await decoder.read_bytes() - elif writer_schema.type == 'fixed': - result = await self.read_fixed(writer_schema, decoder) - elif writer_schema.type == 'enum': - result = await self.read_enum(writer_schema, decoder) - elif writer_schema.type == 'array': - result = await self.read_array(writer_schema, decoder) - elif writer_schema.type == 'map': - result = await self.read_map(writer_schema, decoder) - elif writer_schema.type in ['union', 'error_union']: - result = await self.read_union(writer_schema, decoder) - elif writer_schema.type in ['record', 'error', 'request']: - result = await self.read_record(writer_schema, decoder) - else: - fail_msg = "Cannot read unknown schema type: %s" % writer_schema.type - raise schema.AvroException(fail_msg) - return result - - async def skip_data(self, writer_schema, decoder): - if writer_schema.type == 'null': - result = decoder.skip_null() - elif writer_schema.type == 'boolean': - result = await decoder.skip_boolean() - elif writer_schema.type == 'string': - result = await decoder.skip_utf8() - elif writer_schema.type == 'int': - result = await decoder.skip_int() - elif writer_schema.type == 'long': - result = await decoder.skip_long() - elif writer_schema.type == 'float': - result = await decoder.skip_float() - elif writer_schema.type == 'double': - result = await decoder.skip_double() - elif writer_schema.type == 'bytes': - result = await decoder.skip_bytes() - elif writer_schema.type == 'fixed': - result = await self.skip_fixed(writer_schema, decoder) - elif writer_schema.type == 'enum': - result = await self.skip_enum(decoder) - elif writer_schema.type == 'array': - await self.skip_array(writer_schema, decoder) - result = None - elif writer_schema.type == 'map': - await self.skip_map(writer_schema, decoder) - result = None - elif writer_schema.type in ['union', 'error_union']: - result = await self.skip_union(writer_schema, decoder) - elif writer_schema.type in ['record', 'error', 'request']: - await self.skip_record(writer_schema, decoder) - result = None - else: - fail_msg = "Unknown schema type: %s" % writer_schema.type - raise schema.AvroException(fail_msg) - return result - - @staticmethod - async def read_fixed(writer_schema, decoder): - """ - Fixed instances are encoded using the number of bytes declared - in the schema. - """ - return await decoder.read(writer_schema.size) - - @staticmethod - async def skip_fixed(writer_schema, decoder): - return await decoder.skip(writer_schema.size) - - @staticmethod - async def read_enum(writer_schema, decoder): - """ - An enum is encoded by a int, representing the zero-based position - of the symbol in the schema. - """ - # read data - index_of_symbol = await decoder.read_int() - if index_of_symbol >= len(writer_schema.symbols): - fail_msg = "Can't access enum index %d for enum with %d symbols" \ - % (index_of_symbol, len(writer_schema.symbols)) - raise SchemaResolutionException(fail_msg, writer_schema) - read_symbol = writer_schema.symbols[index_of_symbol] - return read_symbol - - @staticmethod - async def skip_enum(decoder): - return await decoder.skip_int() - - async def read_array(self, writer_schema, decoder): - """ - Arrays are encoded as a series of blocks. - - Each block consists of a long count value, - followed by that many array items. - A block with count zero indicates the end of the array. - Each item is encoded per the array's item schema. - - If a block's count is negative, - then the count is followed immediately by a long block size, - indicating the number of bytes in the block. - The actual count in this case - is the absolute value of the count written. - """ - read_items = [] - block_count = await decoder.read_long() - while block_count != 0: - if block_count < 0: - block_count = -block_count - await decoder.read_long() - for _ in range(block_count): - read_items.append(await self.read_data(writer_schema.items, decoder)) - block_count = await decoder.read_long() - return read_items - - async def skip_array(self, writer_schema, decoder): - block_count = await decoder.read_long() - while block_count != 0: - if block_count < 0: - block_size = await decoder.read_long() - await decoder.skip(block_size) - else: - for _ in range(block_count): - await self.skip_data(writer_schema.items, decoder) - block_count = await decoder.read_long() - - async def read_map(self, writer_schema, decoder): - """ - Maps are encoded as a series of blocks. - - Each block consists of a long count value, - followed by that many key/value pairs. - A block with count zero indicates the end of the map. - Each item is encoded per the map's value schema. - - If a block's count is negative, - then the count is followed immediately by a long block size, - indicating the number of bytes in the block. - The actual count in this case - is the absolute value of the count written. - """ - read_items = {} - block_count = await decoder.read_long() - while block_count != 0: - if block_count < 0: - block_count = -block_count - await decoder.read_long() - for _ in range(block_count): - key = await decoder.read_utf8() - read_items[key] = await self.read_data(writer_schema.values, decoder) - block_count = await decoder.read_long() - return read_items - - async def skip_map(self, writer_schema, decoder): - block_count = await decoder.read_long() - while block_count != 0: - if block_count < 0: - block_size = await decoder.read_long() - await decoder.skip(block_size) - else: - for _ in range(block_count): - await decoder.skip_utf8() - await self.skip_data(writer_schema.values, decoder) - block_count = await decoder.read_long() - - async def read_union(self, writer_schema, decoder): - """ - A union is encoded by first writing a long value indicating - the zero-based position within the union of the schema of its value. - The value is then encoded per the indicated schema within the union. - """ - # schema resolution - index_of_schema = int(await decoder.read_long()) - if index_of_schema >= len(writer_schema.schemas): - fail_msg = "Can't access branch index %d for union with %d branches" \ - % (index_of_schema, len(writer_schema.schemas)) - raise SchemaResolutionException(fail_msg, writer_schema) - selected_writer_schema = writer_schema.schemas[index_of_schema] - - # read data - return await self.read_data(selected_writer_schema, decoder) - - async def skip_union(self, writer_schema, decoder): - index_of_schema = int(await decoder.read_long()) - if index_of_schema >= len(writer_schema.schemas): - fail_msg = "Can't access branch index %d for union with %d branches" \ - % (index_of_schema, len(writer_schema.schemas)) - raise SchemaResolutionException(fail_msg, writer_schema) - return await self.skip_data(writer_schema.schemas[index_of_schema], decoder) - - async def read_record(self, writer_schema, decoder): - """ - A record is encoded by encoding the values of its fields - in the order that they are declared. In other words, a record - is encoded as just the concatenation of the encodings of its fields. - Field values are encoded per their schema. - - Schema Resolution: - * the ordering of fields may be different: fields are matched by name. - * schemas for fields with the same name in both records are resolved - recursively. - * if the writer's record contains a field with a name not present in the - reader's record, the writer's value for that field is ignored. - * if the reader's record schema has a field that contains a default value, - and writer's schema does not have a field with the same name, then the - reader should use the default value from its field. - * if the reader's record schema has a field with no default value, and - writer's schema does not have a field with the same name, then the - field's value is unset. - """ - # schema resolution - read_record = {} - for field in writer_schema.fields: - field_val = await self.read_data(field.type, decoder) - read_record[field.name] = field_val - return read_record - - async def skip_record(self, writer_schema, decoder): - for field in writer_schema.fields: - await self.skip_data(field.type, decoder) - - -# ------------------------------------------------------------------------------ - -if __name__ == '__main__': - raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/datafile.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/datafile.py deleted file mode 100644 index df06fe0..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/datafile.py +++ /dev/null @@ -1,266 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -"""Read/Write Avro File Object Containers.""" - -import io -import logging -import sys -import zlib - -from ..avro import avro_io -from ..avro import schema - -PY3 = sys.version_info[0] == 3 - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Constants - -# Version of the container file: -VERSION = 1 - -if PY3: - MAGIC = b'Obj' + bytes([VERSION]) - MAGIC_SIZE = len(MAGIC) -else: - MAGIC = 'Obj' + chr(VERSION) - MAGIC_SIZE = len(MAGIC) - -# Size of the synchronization marker, in number of bytes: -SYNC_SIZE = 16 - -# Schema of the container header: -META_SCHEMA = schema.parse(""" -{ - "type": "record", "name": "org.apache.avro.file.Header", - "fields": [{ - "name": "magic", - "type": {"type": "fixed", "name": "magic", "size": %(magic_size)d} - }, { - "name": "meta", - "type": {"type": "map", "values": "bytes"} - }, { - "name": "sync", - "type": {"type": "fixed", "name": "sync", "size": %(sync_size)d} - }] -} -""" % { - 'magic_size': MAGIC_SIZE, - 'sync_size': SYNC_SIZE, -}) - -# Codecs supported by container files: -VALID_CODECS = frozenset(['null', 'deflate']) - -# Metadata key associated to the schema: -SCHEMA_KEY = "avro.schema" - - -# ------------------------------------------------------------------------------ -# Exceptions - - -class DataFileException(schema.AvroException): - """Problem reading or writing file object containers.""" - -# ------------------------------------------------------------------------------ - - -class DataFileReader(object): # pylint: disable=too-many-instance-attributes - """Read files written by DataFileWriter.""" - - def __init__(self, reader, datum_reader, **kwargs): - """Initializes a new data file reader. - - Args: - reader: Open file to read from. - datum_reader: Avro datum reader. - """ - self._reader = reader - self._raw_decoder = avro_io.BinaryDecoder(reader) - self._header_reader = kwargs.pop('header_reader', None) - self._header_decoder = None if self._header_reader is None else avro_io.BinaryDecoder(self._header_reader) - self._datum_decoder = None # Maybe reset at every block. - self._datum_reader = datum_reader - - # In case self._reader only has partial content(without header). - # seek(0, 0) to make sure read the (partial)content from beginning. - self._reader.seek(0, 0) - - # read the header: magic, meta, sync - self._read_header() - - # ensure codec is valid - avro_codec_raw = self.get_meta('avro.codec') - if avro_codec_raw is None: - self.codec = "null" - else: - self.codec = avro_codec_raw.decode('utf-8') - if self.codec not in VALID_CODECS: - raise DataFileException('Unknown codec: %s.' % self.codec) - - # get ready to read - self._block_count = 0 - - # object_position is to support reading from current position in the future read, - # no need to downloading from the beginning of avro. - if hasattr(self._reader, 'object_position'): - self.reader.track_object_position() - - self._cur_object_index = 0 - # header_reader indicates reader only has partial content. The reader doesn't have block header, - # so we read use the block count stored last time. - # Also ChangeFeed only has codec==null, so use _raw_decoder is good. - if self._header_reader is not None: - self._datum_decoder = self._raw_decoder - - self.datum_reader.writer_schema = ( - schema.parse(self.get_meta(SCHEMA_KEY).decode('utf-8'))) - - def __enter__(self): - return self - - def __exit__(self, data_type, value, traceback): - # Perform a close if there's no exception - if data_type is None: - self.close() - - def __iter__(self): - return self - - # read-only properties - @property - def reader(self): - return self._reader - - @property - def raw_decoder(self): - return self._raw_decoder - - @property - def datum_decoder(self): - return self._datum_decoder - - @property - def datum_reader(self): - return self._datum_reader - - @property - def sync_marker(self): - return self._sync_marker - - @property - def meta(self): - return self._meta - - # read/write properties - @property - def block_count(self): - return self._block_count - - def get_meta(self, key): - """Reports the value of a given metadata key. - - Args: - key: Metadata key (string) to report the value of. - Returns: - Value associated to the metadata key, as bytes. - """ - return self._meta.get(key) - - def _read_header(self): - header_reader = self._header_reader if self._header_reader else self._reader - header_decoder = self._header_decoder if self._header_decoder else self._raw_decoder - - # seek to the beginning of the file to get magic block - header_reader.seek(0, 0) - - # read header into a dict - header = self.datum_reader.read_data(META_SCHEMA, header_decoder) - - # check magic number - if header.get('magic') != MAGIC: - fail_msg = "Not an Avro data file: %s doesn't match %s." \ - % (header.get('magic'), MAGIC) - raise schema.AvroException(fail_msg) - - # set metadata - self._meta = header['meta'] - - # set sync marker - self._sync_marker = header['sync'] - - def _read_block_header(self): - self._block_count = self.raw_decoder.read_long() - if self.codec == "null": - # Skip a long; we don't need to use the length. - self.raw_decoder.skip_long() - self._datum_decoder = self._raw_decoder - elif self.codec == 'deflate': - # Compressed data is stored as (length, data), which - # corresponds to how the "bytes" type is encoded. - data = self.raw_decoder.read_bytes() - # -15 is the log of the window size; negative indicates - # "raw" (no zlib headers) decompression. See zlib.h. - uncompressed = zlib.decompress(data, -15) - self._datum_decoder = avro_io.BinaryDecoder(io.BytesIO(uncompressed)) - else: - raise DataFileException("Unknown codec: %r" % self.codec) - - def _skip_sync(self): - """ - Read the length of the sync marker; if it matches the sync marker, - return True. Otherwise, seek back to where we started and return False. - """ - proposed_sync_marker = self.reader.read(SYNC_SIZE) - if SYNC_SIZE > 0 and not proposed_sync_marker: - raise StopIteration - if proposed_sync_marker != self.sync_marker: - self.reader.seek(-SYNC_SIZE, 1) - - def __next__(self): - """Return the next datum in the file.""" - if self.block_count == 0: - self._skip_sync() - - # object_position is to support reading from current position in the future read, - # no need to downloading from the beginning of avro file with this attr. - if hasattr(self._reader, 'object_position'): - self.reader.track_object_position() - self._cur_object_index = 0 - - self._read_block_header() - - datum = self.datum_reader.read(self.datum_decoder) - self._block_count -= 1 - self._cur_object_index += 1 - - # object_position is to support reading from current position in the future read, - # This will track the index of the next item to be read. - # This will also track the offset before the next sync marker. - if hasattr(self._reader, 'object_position'): - if self.block_count == 0: - # the next event to be read is at index 0 in the new chunk of blocks, - self.reader.track_object_position() - self.reader.set_object_index(0) - else: - self.reader.set_object_index(self._cur_object_index) - - return datum - - # PY2 - def next(self): - return self.__next__() - - def close(self): - """Close this reader.""" - self.reader.close() - - -if __name__ == '__main__': - raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/datafile_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/datafile_async.py deleted file mode 100644 index 1e9d018..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/datafile_async.py +++ /dev/null @@ -1,215 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -"""Read/Write Avro File Object Containers.""" - -import logging -import sys - -from ..avro import avro_io_async -from ..avro import schema -from .datafile import DataFileException -from .datafile import MAGIC, SYNC_SIZE, META_SCHEMA, SCHEMA_KEY - - -PY3 = sys.version_info[0] == 3 - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Constants - -# Codecs supported by container files: -VALID_CODECS = frozenset(['null']) - - -class AsyncDataFileReader(object): # pylint: disable=too-many-instance-attributes - """Read files written by DataFileWriter.""" - - def __init__(self, reader, datum_reader, **kwargs): - """Initializes a new data file reader. - - Args: - reader: Open file to read from. - datum_reader: Avro datum reader. - """ - self._reader = reader - self._raw_decoder = avro_io_async.AsyncBinaryDecoder(reader) - self._header_reader = kwargs.pop('header_reader', None) - self._header_decoder = None if self._header_reader is None else \ - avro_io_async.AsyncBinaryDecoder(self._header_reader) - self._datum_decoder = None # Maybe reset at every block. - self._datum_reader = datum_reader - self.codec = "null" - self._block_count = 0 - self._cur_object_index = 0 - self._meta = None - self._sync_marker = None - - async def init(self): - # In case self._reader only has partial content(without header). - # seek(0, 0) to make sure read the (partial)content from beginning. - await self._reader.seek(0, 0) - - # read the header: magic, meta, sync - await self._read_header() - - # ensure codec is valid - avro_codec_raw = self.get_meta('avro.codec') - if avro_codec_raw is None: - self.codec = "null" - else: - self.codec = avro_codec_raw.decode('utf-8') - if self.codec not in VALID_CODECS: - raise DataFileException('Unknown codec: %s.' % self.codec) - - # get ready to read - self._block_count = 0 - - # object_position is to support reading from current position in the future read, - # no need to downloading from the beginning of avro. - if hasattr(self._reader, 'object_position'): - self.reader.track_object_position() - - # header_reader indicates reader only has partial content. The reader doesn't have block header, - # so we read use the block count stored last time. - # Also ChangeFeed only has codec==null, so use _raw_decoder is good. - if self._header_reader is not None: - self._datum_decoder = self._raw_decoder - self.datum_reader.writer_schema = ( - schema.parse(self.get_meta(SCHEMA_KEY).decode('utf-8'))) - return self - - async def __aenter__(self): - return self - - async def __aexit__(self, data_type, value, traceback): - # Perform a close if there's no exception - if data_type is None: - self.close() - - def __aiter__(self): - return self - - # read-only properties - @property - def reader(self): - return self._reader - - @property - def raw_decoder(self): - return self._raw_decoder - - @property - def datum_decoder(self): - return self._datum_decoder - - @property - def datum_reader(self): - return self._datum_reader - - @property - def sync_marker(self): - return self._sync_marker - - @property - def meta(self): - return self._meta - - # read/write properties - @property - def block_count(self): - return self._block_count - - def get_meta(self, key): - """Reports the value of a given metadata key. - - Args: - key: Metadata key (string) to report the value of. - Returns: - Value associated to the metadata key, as bytes. - """ - return self._meta.get(key) - - async def _read_header(self): - header_reader = self._header_reader if self._header_reader else self._reader - header_decoder = self._header_decoder if self._header_decoder else self._raw_decoder - - # seek to the beginning of the file to get magic block - await header_reader.seek(0, 0) - - # read header into a dict - header = await self.datum_reader.read_data(META_SCHEMA, header_decoder) - - # check magic number - if header.get('magic') != MAGIC: - fail_msg = "Not an Avro data file: %s doesn't match %s." \ - % (header.get('magic'), MAGIC) - raise schema.AvroException(fail_msg) - - # set metadata - self._meta = header['meta'] - - # set sync marker - self._sync_marker = header['sync'] - - async def _read_block_header(self): - self._block_count = await self.raw_decoder.read_long() - if self.codec == "null": - # Skip a long; we don't need to use the length. - await self.raw_decoder.skip_long() - self._datum_decoder = self._raw_decoder - else: - raise DataFileException("Unknown codec: %r" % self.codec) - - async def _skip_sync(self): - """ - Read the length of the sync marker; if it matches the sync marker, - return True. Otherwise, seek back to where we started and return False. - """ - proposed_sync_marker = await self.reader.read(SYNC_SIZE) - if SYNC_SIZE > 0 and not proposed_sync_marker: - raise StopAsyncIteration - if proposed_sync_marker != self.sync_marker: - await self.reader.seek(-SYNC_SIZE, 1) - - async def __anext__(self): - """Return the next datum in the file.""" - if self.block_count == 0: - await self._skip_sync() - - # object_position is to support reading from current position in the future read, - # no need to downloading from the beginning of avro file with this attr. - if hasattr(self._reader, 'object_position'): - await self.reader.track_object_position() - self._cur_object_index = 0 - - await self._read_block_header() - - datum = await self.datum_reader.read(self.datum_decoder) - self._block_count -= 1 - self._cur_object_index += 1 - - # object_position is to support reading from current position in the future read, - # This will track the index of the next item to be read. - # This will also track the offset before the next sync marker. - if hasattr(self._reader, 'object_position'): - if self.block_count == 0: - # the next event to be read is at index 0 in the new chunk of blocks, - await self.reader.track_object_position() - await self.reader.set_object_index(0) - else: - await self.reader.set_object_index(self._cur_object_index) - - return datum - - def close(self): - """Close this reader.""" - self.reader.close() - - -if __name__ == '__main__': - raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/schema.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/schema.py deleted file mode 100644 index ffe2853..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/avro/schema.py +++ /dev/null @@ -1,1221 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines - -"""Representation of Avro schemas. - -A schema may be one of: - - A record, mapping field names to field value data; - - An error, equivalent to a record; - - An enum, containing one of a small set of symbols; - - An array of values, all of the same schema; - - A map containing string/value pairs, each of a declared schema; - - A union of other schemas; - - A fixed sized binary object; - - A unicode string; - - A sequence of bytes; - - A 32-bit signed int; - - A 64-bit signed long; - - A 32-bit floating-point float; - - A 64-bit floating-point double; - - A boolean; - - Null. -""" - -import abc -import json -import logging -import re -import sys -from six import with_metaclass - -PY2 = sys.version_info[0] == 2 - -if PY2: - _str = unicode # pylint: disable=undefined-variable -else: - _str = str - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Constants - -# Log level more verbose than DEBUG=10, INFO=20, etc. -DEBUG_VERBOSE = 5 - -NULL = 'null' -BOOLEAN = 'boolean' -STRING = 'string' -BYTES = 'bytes' -INT = 'int' -LONG = 'long' -FLOAT = 'float' -DOUBLE = 'double' -FIXED = 'fixed' -ENUM = 'enum' -RECORD = 'record' -ERROR = 'error' -ARRAY = 'array' -MAP = 'map' -UNION = 'union' - -# Request and error unions are part of Avro protocols: -REQUEST = 'request' -ERROR_UNION = 'error_union' - -PRIMITIVE_TYPES = frozenset([ - NULL, - BOOLEAN, - STRING, - BYTES, - INT, - LONG, - FLOAT, - DOUBLE, -]) - -NAMED_TYPES = frozenset([ - FIXED, - ENUM, - RECORD, - ERROR, -]) - -VALID_TYPES = frozenset.union( - PRIMITIVE_TYPES, - NAMED_TYPES, - [ - ARRAY, - MAP, - UNION, - REQUEST, - ERROR_UNION, - ], -) - -SCHEMA_RESERVED_PROPS = frozenset([ - 'type', - 'name', - 'namespace', - 'fields', # Record - 'items', # Array - 'size', # Fixed - 'symbols', # Enum - 'values', # Map - 'doc', -]) - -FIELD_RESERVED_PROPS = frozenset([ - 'default', - 'name', - 'doc', - 'order', - 'type', -]) - -VALID_FIELD_SORT_ORDERS = frozenset([ - 'ascending', - 'descending', - 'ignore', -]) - - -# ------------------------------------------------------------------------------ -# Exceptions - - -class Error(Exception): - """Base class for errors in this module.""" - - -class AvroException(Error): - """Generic Avro schema error.""" - - -class SchemaParseException(AvroException): - """Error while parsing a JSON schema descriptor.""" - - -class Schema(with_metaclass(abc.ABCMeta, object)): - """Abstract base class for all Schema classes.""" - - def __init__(self, data_type, other_props=None): - """Initializes a new schema object. - - Args: - data_type: Type of the schema to initialize. - other_props: Optional dictionary of additional properties. - """ - if data_type not in VALID_TYPES: - raise SchemaParseException('%r is not a valid Avro type.' % data_type) - - # All properties of this schema, as a map: property name -> property value - self._props = {} - - self._props['type'] = data_type - self._type = data_type - - if other_props: - self._props.update(other_props) - - @property - def namespace(self): - """Returns: the namespace this schema belongs to, if any, or None.""" - return self._props.get('namespace', None) - - @property - def type(self): - """Returns: the type of this schema.""" - return self._type - - @property - def doc(self): - """Returns: the documentation associated to this schema, if any, or None.""" - return self._props.get('doc', None) - - @property - def props(self): - """Reports all the properties of this schema. - - Includes all properties, reserved and non reserved. - JSON properties of this schema are directly generated from this dict. - - Returns: - A dictionary of properties associated to this schema. - """ - return self._props - - @property - def other_props(self): - """Returns: the dictionary of non-reserved properties.""" - return dict(filter_keys_out(items=self._props, keys=SCHEMA_RESERVED_PROPS)) - - def __str__(self): - """Returns: the JSON representation of this schema.""" - return json.dumps(self.to_json(names=None)) - - @abc.abstractmethod - def to_json(self, names): - """Converts the schema object into its AVRO specification representation. - - Schema types that have names (records, enums, and fixed) must - be aware of not re-defining schemas that are already listed - in the parameter names. - """ - raise Exception('Cannot run abstract method.') - - -# ------------------------------------------------------------------------------ - - -_RE_NAME = re.compile(r'[A-Za-z_][A-Za-z0-9_]*') - -_RE_FULL_NAME = re.compile( - r'^' - r'[.]?(?:[A-Za-z_][A-Za-z0-9_]*[.])*' # optional namespace - r'([A-Za-z_][A-Za-z0-9_]*)' # name - r'$' -) - - -class Name(object): - """Representation of an Avro name.""" - - def __init__(self, name, namespace=None): - """Parses an Avro name. - - Args: - name: Avro name to parse (relative or absolute). - namespace: Optional explicit namespace if the name is relative. - """ - # Normalize: namespace is always defined as a string, possibly empty. - if namespace is None: - namespace = '' - - if '.' in name: - # name is absolute, namespace is ignored: - self._fullname = name - - match = _RE_FULL_NAME.match(self._fullname) - if match is None: - raise SchemaParseException( - 'Invalid absolute schema name: %r.' % self._fullname) - - self._name = match.group(1) - self._namespace = self._fullname[:-(len(self._name) + 1)] - - else: - # name is relative, combine with explicit namespace: - self._name = name - self._namespace = namespace - self._fullname = (self._name - if (not self._namespace) else - '%s.%s' % (self._namespace, self._name)) - - # Validate the fullname: - if _RE_FULL_NAME.match(self._fullname) is None: - raise SchemaParseException( - 'Invalid schema name %r infered from name %r and namespace %r.' - % (self._fullname, self._name, self._namespace)) - - def __eq__(self, other): - if not isinstance(other, Name): - return NotImplemented - return self.fullname == other.fullname - - @property - def simple_name(self): - """Returns: the simple name part of this name.""" - return self._name - - @property - def namespace(self): - """Returns: this name's namespace, possible the empty string.""" - return self._namespace - - @property - def fullname(self): - """Returns: the full name.""" - return self._fullname - - -# ------------------------------------------------------------------------------ - - -class Names(object): - """Tracks Avro named schemas and default namespace during parsing.""" - - def __init__(self, default_namespace=None, names=None): - """Initializes a new name tracker. - - Args: - default_namespace: Optional default namespace. - names: Optional initial mapping of known named schemas. - """ - if names is None: - names = {} - self._names = names - self._default_namespace = default_namespace - - @property - def names(self): - """Returns: the mapping of known named schemas.""" - return self._names - - @property - def default_namespace(self): - """Returns: the default namespace, if any, or None.""" - return self._default_namespace - - def new_with_default_namespace(self, namespace): - """Creates a new name tracker from this tracker, but with a new default ns. - - Args: - namespace: New default namespace to use. - Returns: - New name tracker with the specified default namespace. - """ - return Names(names=self._names, default_namespace=namespace) - - def get_name(self, name, namespace=None): - """Resolves the Avro name according to this name tracker's state. - - Args: - name: Name to resolve (absolute or relative). - namespace: Optional explicit namespace. - Returns: - The specified name, resolved according to this tracker. - """ - if namespace is None: - namespace = self._default_namespace - return Name(name=name, namespace=namespace) - - def get_schema(self, name, namespace=None): - """Resolves an Avro schema by name. - - Args: - name: Name (relative or absolute) of the Avro schema to look up. - namespace: Optional explicit namespace. - Returns: - The schema with the specified name, if any, or None. - """ - avro_name = self.get_name(name=name, namespace=namespace) - return self._names.get(avro_name.fullname, None) - - def prune_namespace(self, properties): - """given a properties, return properties with namespace removed if - it matches the own default namespace - """ - if self.default_namespace is None: - # I have no default -- no change - return properties - if 'namespace' not in properties: - # he has no namespace - no change - return properties - if properties['namespace'] != self.default_namespace: - # we're different - leave his stuff alone - return properties - # we each have a namespace and it's redundant. delete his. - prunable = properties.copy() - del prunable['namespace'] - return prunable - - def register(self, schema): - """Registers a new named schema in this tracker. - - Args: - schema: Named Avro schema to register in this tracker. - """ - if schema.fullname in VALID_TYPES: - raise SchemaParseException( - '%s is a reserved type name.' % schema.fullname) - if schema.fullname in self.names: - raise SchemaParseException( - 'Avro name %r already exists.' % schema.fullname) - - logger.log(DEBUG_VERBOSE, 'Register new name for %r', schema.fullname) - self._names[schema.fullname] = schema - - -# ------------------------------------------------------------------------------ - - -class NamedSchema(Schema): - """Abstract base class for named schemas. - - Named schemas are enumerated in NAMED_TYPES. - """ - - def __init__( - self, - data_type, - name=None, - namespace=None, - names=None, - other_props=None, - ): - """Initializes a new named schema object. - - Args: - data_type: Type of the named schema. - name: Name (absolute or relative) of the schema. - namespace: Optional explicit namespace if name is relative. - names: Tracker to resolve and register Avro names. - other_props: Optional map of additional properties of the schema. - """ - assert (data_type in NAMED_TYPES), ('Invalid named type: %r' % data_type) - self._avro_name = names.get_name(name=name, namespace=namespace) - - super(NamedSchema, self).__init__(data_type, other_props) - - names.register(self) - - self._props['name'] = self.name - if self.namespace: - self._props['namespace'] = self.namespace - - @property - def avro_name(self): - """Returns: the Name object describing this schema's name.""" - return self._avro_name - - @property - def name(self): - return self._avro_name.simple_name - - @property - def namespace(self): - return self._avro_name.namespace - - @property - def fullname(self): - return self._avro_name.fullname - - def name_ref(self, names): - """Reports this schema name relative to the specified name tracker. - - Args: - names: Avro name tracker to relativise this schema name against. - Returns: - This schema name, relativised against the specified name tracker. - """ - if self.namespace == names.default_namespace: - return self.name - return self.fullname - - @abc.abstractmethod - def to_json(self, names): - """Converts the schema object into its AVRO specification representation. - - Schema types that have names (records, enums, and fixed) must - be aware of not re-defining schemas that are already listed - in the parameter names. - """ - raise Exception('Cannot run abstract method.') - -# ------------------------------------------------------------------------------ - - -_NO_DEFAULT = object() - - -class Field(object): - """Representation of the schema of a field in a record.""" - - def __init__( - self, - data_type, - name, - index, - has_default, - default=_NO_DEFAULT, - order=None, - doc=None, - other_props=None - ): - """Initializes a new Field object. - - Args: - data_type: Avro schema of the field. - name: Name of the field. - index: 0-based position of the field. - has_default: - default: - order: - doc: - other_props: - """ - if (not isinstance(name, _str)) or (not name): - raise SchemaParseException('Invalid record field name: %r.' % name) - if (order is not None) and (order not in VALID_FIELD_SORT_ORDERS): - raise SchemaParseException('Invalid record field order: %r.' % order) - - # All properties of this record field: - self._props = {} - - self._has_default = has_default - if other_props: - self._props.update(other_props) - - self._index = index - self._type = self._props['type'] = data_type - self._name = self._props['name'] = name - - if has_default: - self._props['default'] = default - - if order is not None: - self._props['order'] = order - - if doc is not None: - self._props['doc'] = doc - - @property - def type(self): - """Returns: the schema of this field.""" - return self._type - - @property - def name(self): - """Returns: this field name.""" - return self._name - - @property - def index(self): - """Returns: the 0-based index of this field in the record.""" - return self._index - - @property - def default(self): - return self._props['default'] - - @property - def has_default(self): - return self._has_default - - @property - def order(self): - return self._props.get('order', None) - - @property - def doc(self): - return self._props.get('doc', None) - - @property - def props(self): - return self._props - - @property - def other_props(self): - return filter_keys_out(items=self._props, keys=FIELD_RESERVED_PROPS) - - def __str__(self): - return json.dumps(self.to_json()) - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = self.props.copy() - to_dump['type'] = self.type.to_json(names) - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ -# Primitive Types - - -class PrimitiveSchema(Schema): - """Schema of a primitive Avro type. - - Valid primitive types are defined in PRIMITIVE_TYPES. - """ - - def __init__(self, data_type, other_props=None): - """Initializes a new schema object for the specified primitive type. - - Args: - data_type: Type of the schema to construct. Must be primitive. - """ - if data_type not in PRIMITIVE_TYPES: - raise AvroException('%r is not a valid primitive type.' % data_type) - super(PrimitiveSchema, self).__init__(data_type, other_props=other_props) - - @property - def name(self): - """Returns: the simple name of this schema.""" - # The name of a primitive type is the type itself. - return self.type - - @property - def fullname(self): - """Returns: the fully qualified name of this schema.""" - # The full name is the simple name for primitive schema. - return self.name - - def to_json(self, names=None): - if len(self.props) == 1: - return self.fullname - return self.props - - def __eq__(self, that): - return self.props == that.props - - -# ------------------------------------------------------------------------------ -# Complex Types (non-recursive) - - -class FixedSchema(NamedSchema): - def __init__( - self, - name, - namespace, - size, - names=None, - other_props=None, - ): - # Ensure valid ctor args - if not isinstance(size, int): - fail_msg = 'Fixed Schema requires a valid integer for size property.' - raise AvroException(fail_msg) - - super(FixedSchema, self).__init__( - data_type=FIXED, - name=name, - namespace=namespace, - names=names, - other_props=other_props, - ) - self._props['size'] = size - - @property - def size(self): - """Returns: the size of this fixed schema, in bytes.""" - return self._props['size'] - - def to_json(self, names=None): - if names is None: - names = Names() - if self.fullname in names.names: - return self.name_ref(names) - names.names[self.fullname] = self - return names.prune_namespace(self.props) - - def __eq__(self, that): - return self.props == that.props - - -# ------------------------------------------------------------------------------ - - -class EnumSchema(NamedSchema): - def __init__( - self, - name, - namespace, - symbols, - names=None, - doc=None, - other_props=None, - ): - """Initializes a new enumeration schema object. - - Args: - name: Simple name of this enumeration. - namespace: Optional namespace. - symbols: Ordered list of symbols defined in this enumeration. - names: - doc: - other_props: - """ - symbols = tuple(symbols) - symbol_set = frozenset(symbols) - if (len(symbol_set) != len(symbols) - or not all(map(lambda symbol: isinstance(symbol, _str), symbols))): - raise AvroException( - 'Invalid symbols for enum schema: %r.' % (symbols,)) - - super(EnumSchema, self).__init__( - data_type=ENUM, - name=name, - namespace=namespace, - names=names, - other_props=other_props, - ) - - self._props['symbols'] = symbols - if doc is not None: - self._props['doc'] = doc - - @property - def symbols(self): - """Returns: the symbols defined in this enum.""" - return self._props['symbols'] - - def to_json(self, names=None): - if names is None: - names = Names() - if self.fullname in names.names: - return self.name_ref(names) - names.names[self.fullname] = self - return names.prune_namespace(self.props) - - def __eq__(self, that): - return self.props == that.props - - -# ------------------------------------------------------------------------------ -# Complex Types (recursive) - - -class ArraySchema(Schema): - """Schema of an array.""" - - def __init__(self, items, other_props=None): - """Initializes a new array schema object. - - Args: - items: Avro schema of the array items. - other_props: - """ - super(ArraySchema, self).__init__( - data_type=ARRAY, - other_props=other_props, - ) - self._items_schema = items - self._props['items'] = items - - @property - def items(self): - """Returns: the schema of the items in this array.""" - return self._items_schema - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = self.props.copy() - item_schema = self.items - to_dump['items'] = item_schema.to_json(names) - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ - - -class MapSchema(Schema): - """Schema of a map.""" - - def __init__(self, values, other_props=None): - """Initializes a new map schema object. - - Args: - values: Avro schema of the map values. - other_props: - """ - super(MapSchema, self).__init__( - data_type=MAP, - other_props=other_props, - ) - self._values_schema = values - self._props['values'] = values - - @property - def values(self): - """Returns: the schema of the values in this map.""" - return self._values_schema - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = self.props.copy() - to_dump['values'] = self.values.to_json(names) - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ - - -class UnionSchema(Schema): - """Schema of a union.""" - - def __init__(self, schemas): - """Initializes a new union schema object. - - Args: - schemas: Ordered collection of schema branches in the union. - """ - super(UnionSchema, self).__init__(data_type=UNION) - self._schemas = tuple(schemas) - - # Validate the schema branches: - - # All named schema names are unique: - named_branches = tuple( - filter(lambda schema: schema.type in NAMED_TYPES, self._schemas)) - unique_names = frozenset(map(lambda schema: schema.fullname, named_branches)) - if len(unique_names) != len(named_branches): - raise AvroException( - 'Invalid union branches with duplicate schema name:%s' - % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) - - # Types are unique within unnamed schemas, and union is not allowed: - unnamed_branches = tuple( - filter(lambda schema: schema.type not in NAMED_TYPES, self._schemas)) - unique_types = frozenset(map(lambda schema: schema.type, unnamed_branches)) - if UNION in unique_types: - raise AvroException( - 'Invalid union branches contain other unions:%s' - % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) - if len(unique_types) != len(unnamed_branches): - raise AvroException( - 'Invalid union branches with duplicate type:%s' - % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) - - @property - def schemas(self): - """Returns: the ordered list of schema branches in the union.""" - return self._schemas - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = [] - for schema in self.schemas: - to_dump.append(schema.to_json(names)) - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ - - -class ErrorUnionSchema(UnionSchema): - """Schema representing the declared errors of a protocol message.""" - - def __init__(self, schemas): - """Initializes an error-union schema. - - Args: - schema: collection of error schema. - """ - # Prepend "string" to handle system errors - schemas = [PrimitiveSchema(data_type=STRING)] + list(schemas) - super(ErrorUnionSchema, self).__init__(schemas=schemas) - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = [] - for schema in self.schemas: - # Don't print the system error schema - if schema.type == STRING: - continue - to_dump.append(schema.to_json(names)) - return to_dump - - -# ------------------------------------------------------------------------------ - - -class RecordSchema(NamedSchema): - """Schema of a record.""" - - @staticmethod - def _make_field(index, field_desc, names): - """Builds field schemas from a list of field JSON descriptors. - - Args: - index: 0-based index of the field in the record. - field_desc: JSON descriptors of a record field. - Return: - The field schema. - """ - field_schema = schema_from_json_data( - json_data=field_desc['type'], - names=names, - ) - other_props = ( - dict(filter_keys_out(items=field_desc, keys=FIELD_RESERVED_PROPS))) - return Field( - data_type=field_schema, - name=field_desc['name'], - index=index, - has_default=('default' in field_desc), - default=field_desc.get('default', _NO_DEFAULT), - order=field_desc.get('order', None), - doc=field_desc.get('doc', None), - other_props=other_props, - ) - - @staticmethod - def make_field_list(field_desc_list, names): - """Builds field schemas from a list of field JSON descriptors. - - Guarantees field name unicity. - - Args: - field_desc_list: collection of field JSON descriptors. - names: Avro schema tracker. - Yields - Field schemas. - """ - for index, field_desc in enumerate(field_desc_list): - yield RecordSchema._make_field(index, field_desc, names) - - @staticmethod - def _make_field_map(fields): - """Builds the field map. - - Guarantees field name unicity. - - Args: - fields: iterable of field schema. - Returns: - A map of field schemas, indexed by name. - """ - field_map = {} - for field in fields: - if field.name in field_map: - raise SchemaParseException( - 'Duplicate record field name %r.' % field.name) - field_map[field.name] = field - return field_map - - def __init__( - self, - name, - namespace, - fields=None, - make_fields=None, - names=None, - record_type=RECORD, - doc=None, - other_props=None - ): - """Initializes a new record schema object. - - Args: - name: Name of the record (absolute or relative). - namespace: Optional namespace the record belongs to, if name is relative. - fields: collection of fields to add to this record. - Exactly one of fields or make_fields must be specified. - make_fields: function creating the fields that belong to the record. - The function signature is: make_fields(names) -> ordered field list. - Exactly one of fields or make_fields must be specified. - names: - record_type: Type of the record: one of RECORD, ERROR or REQUEST. - Protocol requests are not named. - doc: - other_props: - """ - if record_type == REQUEST: - # Protocol requests are not named: - super(RecordSchema, self).__init__( - data_type=REQUEST, - other_props=other_props, - ) - elif record_type in [RECORD, ERROR]: - # Register this record name in the tracker: - super(RecordSchema, self).__init__( - data_type=record_type, - name=name, - namespace=namespace, - names=names, - other_props=other_props, - ) - else: - raise SchemaParseException( - 'Invalid record type: %r.' % record_type) - - if record_type in [RECORD, ERROR]: - avro_name = names.get_name(name=name, namespace=namespace) - nested_names = names.new_with_default_namespace(namespace=avro_name.namespace) - elif record_type == REQUEST: - # Protocol request has no name: no need to change default namespace: - nested_names = names - - if fields is None: - fields = make_fields(names=nested_names) - else: - assert make_fields is None - self._fields = tuple(fields) - - self._field_map = RecordSchema._make_field_map(self._fields) - - self._props['fields'] = fields - if doc is not None: - self._props['doc'] = doc - - @property - def fields(self): - """Returns: the field schemas, as an ordered tuple.""" - return self._fields - - @property - def field_map(self): - """Returns: a read-only map of the field schemas index by field names.""" - return self._field_map - - def to_json(self, names=None): - if names is None: - names = Names() - # Request records don't have names - if self.type == REQUEST: - return [f.to_json(names) for f in self.fields] - - if self.fullname in names.names: - return self.name_ref(names) - names.names[self.fullname] = self - - to_dump = names.prune_namespace(self.props.copy()) - to_dump['fields'] = [f.to_json(names) for f in self.fields] - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ -# Module functions - - -def filter_keys_out(items, keys): - """Filters a collection of (key, value) items. - - Exclude any item whose key belongs to keys. - - Args: - items: Dictionary of items to filter the keys out of. - keys: Keys to filter out. - Yields: - Filtered items. - """ - for key, value in items.items(): - if key in keys: - continue - yield key, value - - -# ------------------------------------------------------------------------------ - - -def _schema_from_json_string(json_string, names): - if json_string in PRIMITIVE_TYPES: - return PrimitiveSchema(data_type=json_string) - - # Look for a known named schema: - schema = names.get_schema(name=json_string) - if schema is None: - raise SchemaParseException( - 'Unknown named schema %r, known names: %r.' - % (json_string, sorted(names.names))) - return schema - - -def _schema_from_json_array(json_array, names): - def MakeSchema(desc): - return schema_from_json_data(json_data=desc, names=names) - - return UnionSchema(map(MakeSchema, json_array)) - - -def _schema_from_json_object(json_object, names): - data_type = json_object.get('type') - if data_type is None: - raise SchemaParseException( - 'Avro schema JSON descriptor has no "type" property: %r' % json_object) - - other_props = dict( - filter_keys_out(items=json_object, keys=SCHEMA_RESERVED_PROPS)) - - if data_type in PRIMITIVE_TYPES: - # FIXME should not ignore other properties - result = PrimitiveSchema(data_type, other_props=other_props) - - elif data_type in NAMED_TYPES: - name = json_object.get('name') - namespace = json_object.get('namespace', names.default_namespace) - if data_type == FIXED: - size = json_object.get('size') - result = FixedSchema(name, namespace, size, names, other_props) - elif data_type == ENUM: - symbols = json_object.get('symbols') - doc = json_object.get('doc') - result = EnumSchema(name, namespace, symbols, names, doc, other_props) - - elif data_type in [RECORD, ERROR]: - field_desc_list = json_object.get('fields', ()) - - def MakeFields(names): - return tuple(RecordSchema.make_field_list(field_desc_list, names)) - - result = RecordSchema( - name=name, - namespace=namespace, - make_fields=MakeFields, - names=names, - record_type=data_type, - doc=json_object.get('doc'), - other_props=other_props, - ) - else: - raise Exception('Internal error: unknown type %r.' % data_type) - - elif data_type in VALID_TYPES: - # Unnamed, non-primitive Avro type: - - if data_type == ARRAY: - items_desc = json_object.get('items') - if items_desc is None: - raise SchemaParseException( - 'Invalid array schema descriptor with no "items" : %r.' - % json_object) - result = ArraySchema( - items=schema_from_json_data(items_desc, names), - other_props=other_props, - ) - - elif data_type == MAP: - values_desc = json_object.get('values') - if values_desc is None: - raise SchemaParseException( - 'Invalid map schema descriptor with no "values" : %r.' - % json_object) - result = MapSchema( - values=schema_from_json_data(values_desc, names=names), - other_props=other_props, - ) - - elif data_type == ERROR_UNION: - error_desc_list = json_object.get('declared_errors') - assert error_desc_list is not None - error_schemas = map( - lambda desc: schema_from_json_data(desc, names=names), - error_desc_list) - result = ErrorUnionSchema(schemas=error_schemas) - - else: - raise Exception('Internal error: unknown type %r.' % data_type) - else: - raise SchemaParseException( - 'Invalid JSON descriptor for an Avro schema: %r' % json_object) - return result - - -# Parsers for the JSON data types: -_JSONDataParserTypeMap = { - _str: _schema_from_json_string, - list: _schema_from_json_array, - dict: _schema_from_json_object, -} - - -def schema_from_json_data(json_data, names=None): - """Builds an Avro Schema from its JSON descriptor. - - Args: - json_data: JSON data representing the descriptor of the Avro schema. - names: Optional tracker for Avro named schemas. - Returns: - The Avro schema parsed from the JSON descriptor. - Raises: - SchemaParseException: if the descriptor is invalid. - """ - if names is None: - names = Names() - - # Select the appropriate parser based on the JSON data type: - parser = _JSONDataParserTypeMap.get(type(json_data)) - if parser is None: - raise SchemaParseException( - 'Invalid JSON descriptor for an Avro schema: %r.' % json_data) - return parser(json_data, names=names) - - -# ------------------------------------------------------------------------------ - - -def parse(json_string): - """Constructs a Schema from its JSON descriptor in text form. - - Args: - json_string: String representation of the JSON descriptor of the schema. - Returns: - The parsed schema. - Raises: - SchemaParseException: on JSON parsing error, - or if the JSON descriptor is invalid. - """ - try: - json_data = json.loads(json_string) - except Exception as exn: - raise SchemaParseException( - 'Error parsing schema from JSON: %r. ' - 'Error message: %r.' - % (json_string, exn)) - - # Initialize the names object - names = Names() - - # construct the Avro Schema object - return schema_from_json_data(json_data, names) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/base_client.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/base_client.py deleted file mode 100644 index 361931a..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/base_client.py +++ /dev/null @@ -1,443 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, - Optional, - Any, - Iterable, - Dict, - List, - Type, - Tuple, - TYPE_CHECKING, -) -import logging - -try: - from urllib.parse import parse_qs, quote -except ImportError: - from urlparse import parse_qs # type: ignore - from urllib2 import quote # type: ignore - -import six - -from azure.core.configuration import Configuration -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline import Pipeline -from azure.core.pipeline.transport import RequestsTransport, HttpTransport -from azure.core.pipeline.policies import ( - RedirectPolicy, - ContentDecodePolicy, - BearerTokenCredentialPolicy, - ProxyPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, - UserAgentPolicy -) - -from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .models import LocationMode -from .authentication import SharedKeyCredentialPolicy -from .shared_access_signature import QueryStringConstants -from .policies import ( - StorageHeadersPolicy, - StorageContentValidation, - StorageRequestHook, - StorageResponseHook, - StorageLoggingPolicy, - StorageHosts, - QueueMessagePolicy, - ExponentialRetry, -) -from .._version import VERSION -from .._generated.models import StorageErrorException -from .response_handlers import process_storage_error, PartialBatchErrorException - - -_LOGGER = logging.getLogger(__name__) -_SERVICE_PARAMS = { - "blob": {"primary": "BlobEndpoint", "secondary": "BlobSecondaryEndpoint"}, - "queue": {"primary": "QueueEndpoint", "secondary": "QueueSecondaryEndpoint"}, - "file": {"primary": "FileEndpoint", "secondary": "FileSecondaryEndpoint"}, - "dfs": {"primary": "BlobEndpoint", "secondary": "BlobEndpoint"}, -} - - -class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - parsed_url, # type: Any - service, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) - self._hosts = kwargs.get("_hosts") - self.scheme = parsed_url.scheme - - if service not in ["blob", "queue", "file-share", "dfs"]: - raise ValueError("Invalid service: {}".format(service)) - service_name = service.split('-')[0] - account = parsed_url.netloc.split(".{}.core.".format(service_name)) - - self.account_name = account[0] if len(account) > 1 else None - if not self.account_name and parsed_url.netloc.startswith("localhost") \ - or parsed_url.netloc.startswith("127.0.0.1"): - self.account_name = parsed_url.path.strip("/") - - self.credential = _format_shared_key_credential(self.account_name, credential) - if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): - raise ValueError("Token credential is only supported with HTTPS.") - - secondary_hostname = None - if hasattr(self.credential, "account_name"): - self.account_name = self.credential.account_name - secondary_hostname = "{}-secondary.{}.{}".format( - self.credential.account_name, service_name, SERVICE_HOST_BASE) - - if not self._hosts: - if len(account) > 1: - secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") - if kwargs.get("secondary_hostname"): - secondary_hostname = kwargs["secondary_hostname"] - primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') - self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} - - self.require_encryption = kwargs.get("require_encryption", False) - self.key_encryption_key = kwargs.get("key_encryption_key") - self.key_resolver_function = kwargs.get("key_resolver_function") - self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) - - def __enter__(self): - self._client.__enter__() - return self - - def __exit__(self, *args): - self._client.__exit__(*args) - - def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._client.close() - - @property - def url(self): - """The full endpoint URL to this entity, including SAS token if used. - - This could be either the primary endpoint, - or the secondary endpoint depending on the current :func:`location_mode`. - """ - return self._format_url(self._hosts[self._location_mode]) - - @property - def primary_endpoint(self): - """The full primary endpoint URL. - - :type: str - """ - return self._format_url(self._hosts[LocationMode.PRIMARY]) - - @property - def primary_hostname(self): - """The hostname of the primary endpoint. - - :type: str - """ - return self._hosts[LocationMode.PRIMARY] - - @property - def secondary_endpoint(self): - """The full secondary endpoint URL if configured. - - If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str - :raise ValueError: - """ - if not self._hosts[LocationMode.SECONDARY]: - raise ValueError("No secondary host configured.") - return self._format_url(self._hosts[LocationMode.SECONDARY]) - - @property - def secondary_hostname(self): - """The hostname of the secondary endpoint. - - If not available this will be None. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str or None - """ - return self._hosts[LocationMode.SECONDARY] - - @property - def location_mode(self): - """The location mode that the client is currently using. - - By default this will be "primary". Options include "primary" and "secondary". - - :type: str - """ - - return self._location_mode - - @location_mode.setter - def location_mode(self, value): - if self._hosts.get(value): - self._location_mode = value - self._client._config.url = self.url # pylint: disable=protected-access - else: - raise ValueError("No host URL for location mode: {}".format(value)) - - @property - def api_version(self): - """The version of the Storage API used for requests. - - :type: str - """ - return self._client._config.version # pylint: disable=protected-access - - def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): - query_str = "?" - if snapshot: - query_str += "snapshot={}&".format(self.snapshot) - if share_snapshot: - query_str += "sharesnapshot={}&".format(self.snapshot) - if sas_token and not credential: - query_str += sas_token - elif is_credential_sastoken(credential): - query_str += credential.lstrip("?") - credential = None - return query_str.rstrip("?&"), credential - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, "get_token"): - self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - - config = kwargs.get("_configuration") or create_configuration(**kwargs) - if kwargs.get("_pipeline"): - return config, kwargs["_pipeline"] - config.transport = kwargs.get("transport") # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - config.transport = RequestsTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.headers_policy, - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - StorageRequestHook(**kwargs), - self._credential_policy, - ContentDecodePolicy(response_encoding="utf-8"), - RedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), - config.retry_policy, - config.logging_policy, - StorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs) - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, Pipeline(config.transport, policies=policies) - - def _batch_send( - self, *reqs, # type: HttpRequest - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - request = self._client._client.post( # pylint: disable=protected-access - url='{}://{}/?comp=batch{}{}'.format( - self.scheme, - self.primary_hostname, - kwargs.pop('sas', ""), - kwargs.pop('timeout', "") - ), - headers={ - 'x-ms-version': self.api_version - } - ) - - policies = [StorageHeadersPolicy()] - if self._credential_policy: - policies.append(self._credential_policy) - - request.set_multipart_mixed( - *reqs, - policies=policies, - enforce_https=False - ) - - pipeline_response = self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() - if raise_on_any_failure: - parts = list(response.parts()) - if any(p for p in parts if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts - ) - raise error - return iter(parts) - return parts - except StorageErrorException as error: - process_storage_error(error) - -class TransportWrapper(HttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, transport): - self._transport = transport - - def send(self, request, **kwargs): - return self._transport.send(request, **kwargs) - - def open(self): - pass - - def close(self): - pass - - def __enter__(self): - pass - - def __exit__(self, *args): # pylint: disable=arguments-differ - pass - - -def _format_shared_key_credential(account_name, credential): - if isinstance(credential, six.string_types): - if not account_name: - raise ValueError("Unable to determine account name for shared key credential.") - credential = {"account_name": account_name, "account_key": credential} - if isinstance(credential, dict): - if "account_name" not in credential: - raise ValueError("Shared key credential missing 'account_name") - if "account_key" not in credential: - raise ValueError("Shared key credential missing 'account_key") - return SharedKeyCredentialPolicy(**credential) - return credential - - -def parse_connection_str(conn_str, credential, service): - conn_str = conn_str.rstrip(";") - conn_settings = [s.split("=", 1) for s in conn_str.split(";")] - if any(len(tup) != 2 for tup in conn_settings): - raise ValueError("Connection string is either blank or malformed.") - conn_settings = dict(conn_settings) - endpoints = _SERVICE_PARAMS[service] - primary = None - secondary = None - if not credential: - try: - credential = {"account_name": conn_settings["AccountName"], "account_key": conn_settings["AccountKey"]} - except KeyError: - credential = conn_settings.get("SharedAccessSignature") - if endpoints["primary"] in conn_settings: - primary = conn_settings[endpoints["primary"]] - if endpoints["secondary"] in conn_settings: - secondary = conn_settings[endpoints["secondary"]] - else: - if endpoints["secondary"] in conn_settings: - raise ValueError("Connection string specifies only secondary endpoint.") - try: - primary = "{}://{}.{}.{}".format( - conn_settings["DefaultEndpointsProtocol"], - conn_settings["AccountName"], - service, - conn_settings["EndpointSuffix"], - ) - secondary = "{}-secondary.{}.{}".format( - conn_settings["AccountName"], service, conn_settings["EndpointSuffix"] - ) - except KeyError: - pass - - if not primary: - try: - primary = "https://{}.{}.{}".format( - conn_settings["AccountName"], service, conn_settings.get("EndpointSuffix", SERVICE_HOST_BASE) - ) - except KeyError: - raise ValueError("Connection string missing required connection details.") - return primary, secondary, credential - - -def create_configuration(**kwargs): - # type: (**Any) -> Configuration - config = Configuration(**kwargs) - config.headers_policy = StorageHeadersPolicy(**kwargs) - config.user_agent_policy = UserAgentPolicy( - sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) - config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) - config.logging_policy = StorageLoggingPolicy(**kwargs) - config.proxy_policy = ProxyPolicy(**kwargs) - - # Storage settings - config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) - config.copy_polling_interval = 15 - - # Block blob uploads - config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) - config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) - config.use_byte_buffer = kwargs.get("use_byte_buffer", False) - - # Page blob uploads - config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) - - # Blob downloads - config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) - config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) - - # File uploads - config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) - return config - - -def parse_query(query_str): - sas_values = QueryStringConstants.to_list() - parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} - sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values] - sas_token = None - if sas_params: - sas_token = "&".join(sas_params) - - snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") - return snapshot, sas_token - - -def is_credential_sastoken(credential): - if not credential or not isinstance(credential, six.string_types): - return False - - sas_values = QueryStringConstants.to_list() - parsed_query = parse_qs(credential.lstrip("?")) - if parsed_query and all([k in sas_values for k in parsed_query.keys()]): - return True - return False diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/base_client_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/base_client_async.py deleted file mode 100644 index 1fec883..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/base_client_async.py +++ /dev/null @@ -1,185 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging -from azure.core.pipeline import AsyncPipeline -from azure.core.async_paging import AsyncList -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline.policies import ( - ContentDecodePolicy, - AsyncBearerTokenCredentialPolicy, - AsyncRedirectPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, -) -from azure.core.pipeline.transport import AsyncHttpTransport - -from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .authentication import SharedKeyCredentialPolicy -from .base_client import create_configuration -from .policies import ( - StorageContentValidation, - StorageRequestHook, - StorageHosts, - StorageHeadersPolicy, - QueueMessagePolicy -) -from .policies_async import AsyncStorageResponseHook - -from .._generated.models import StorageErrorException -from .response_handlers import process_storage_error, PartialBatchErrorException - -if TYPE_CHECKING: - from azure.core.pipeline import Pipeline - from azure.core.pipeline.transport import HttpRequest - from azure.core.configuration import Configuration -_LOGGER = logging.getLogger(__name__) - - -class AsyncStorageAccountHostsMixin(object): - - def __enter__(self): - raise TypeError("Async client only supports 'async with'.") - - def __exit__(self, *args): - pass - - async def __aenter__(self): - await self._client.__aenter__() - return self - - async def __aexit__(self, *args): - await self._client.__aexit__(*args) - - async def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._client.close() - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, 'get_token'): - self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - config = kwargs.get('_configuration') or create_configuration(**kwargs) - if kwargs.get('_pipeline'): - return config, kwargs['_pipeline'] - config.transport = kwargs.get('transport') # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - try: - from azure.core.pipeline.transport import AioHttpTransport - except ImportError: - raise ImportError("Unable to create async transport. Please check aiohttp is installed.") - config.transport = AioHttpTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.headers_policy, - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - StorageRequestHook(**kwargs), - self._credential_policy, - ContentDecodePolicy(response_encoding="utf-8"), - AsyncRedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), # type: ignore - config.retry_policy, - config.logging_policy, - AsyncStorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs), - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, AsyncPipeline(config.transport, policies=policies) - - async def _batch_send( - self, *reqs: 'HttpRequest', - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - request = self._client._client.post( # pylint: disable=protected-access - url='{}://{}/?comp=batch{}{}'.format( - self.scheme, - self.primary_hostname, - kwargs.pop('sas', None), - kwargs.pop('timeout', None) - ), - headers={ - 'x-ms-version': self.api_version - } - ) - - policies = [StorageHeadersPolicy()] - if self._credential_policy: - policies.append(self._credential_policy) - - request.set_multipart_mixed( - *reqs, - policies=policies, - enforce_https=False - ) - - pipeline_response = await self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() # Return an AsyncIterator - if raise_on_any_failure: - parts_list = [] - async for part in parts: - parts_list.append(part) - if any(p for p in parts_list if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts_list - ) - raise error - return AsyncList(parts_list) - return parts - except StorageErrorException as error: - process_storage_error(error) - - -class AsyncTransportWrapper(AsyncHttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, async_transport): - self._transport = async_transport - - async def send(self, request, **kwargs): - return await self._transport.send(request, **kwargs) - - async def open(self): - pass - - async def close(self): - pass - - async def __aenter__(self): - pass - - async def __aexit__(self, *args): # pylint: disable=arguments-differ - pass diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/constants.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/constants.py deleted file mode 100644 index f67ea29..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/constants.py +++ /dev/null @@ -1,27 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -from .._generated.version import VERSION - - -X_MS_VERSION = VERSION - -# Socket timeout in seconds -CONNECTION_TIMEOUT = 20 -READ_TIMEOUT = 20 - -# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned) -# The socket timeout is now the maximum total duration to send all data. -if sys.version_info >= (3, 5): - # the timeout to connect is 20 seconds, and the read timeout is 80000 seconds - # the 80000 seconds was calculated with: - # 4000MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) - READ_TIMEOUT = 80000 - -STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" - -SERVICE_HOST_BASE = 'core.windows.net' diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/encryption.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/encryption.py deleted file mode 100644 index 62607cc..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/encryption.py +++ /dev/null @@ -1,542 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os -from os import urandom -from json import ( - dumps, - loads, -) -from collections import OrderedDict - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.ciphers import Cipher -from cryptography.hazmat.primitives.ciphers.algorithms import AES -from cryptography.hazmat.primitives.ciphers.modes import CBC -from cryptography.hazmat.primitives.padding import PKCS7 - -from azure.core.exceptions import HttpResponseError - -from .._version import VERSION -from . import encode_base64, decode_base64_to_bytes - - -_ENCRYPTION_PROTOCOL_V1 = '1.0' -_ERROR_OBJECT_INVALID = \ - '{0} does not define a complete interface. Value of {1} is either missing or invalid.' - - -def _validate_not_none(param_name, param): - if param is None: - raise ValueError('{0} should not be None.'.format(param_name)) - - -def _validate_key_encryption_key_wrap(kek): - # Note that None is not callable and so will fail the second clause of each check. - if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) - if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) - - -class _EncryptionAlgorithm(object): - ''' - Specifies which client encryption algorithm is used. - ''' - AES_CBC_256 = 'AES_CBC_256' - - -class _WrappedContentKey: - ''' - Represents the envelope key details stored on the service. - ''' - - def __init__(self, algorithm, encrypted_key, key_id): - ''' - :param str algorithm: - The algorithm used for wrapping. - :param bytes encrypted_key: - The encrypted content-encryption-key. - :param str key_id: - The key-encryption-key identifier string. - ''' - - _validate_not_none('algorithm', algorithm) - _validate_not_none('encrypted_key', encrypted_key) - _validate_not_none('key_id', key_id) - - self.algorithm = algorithm - self.encrypted_key = encrypted_key - self.key_id = key_id - - -class _EncryptionAgent: - ''' - Represents the encryption agent stored on the service. - It consists of the encryption protocol version and encryption algorithm used. - ''' - - def __init__(self, encryption_algorithm, protocol): - ''' - :param _EncryptionAlgorithm encryption_algorithm: - The algorithm used for encrypting the message contents. - :param str protocol: - The protocol version used for encryption. - ''' - - _validate_not_none('encryption_algorithm', encryption_algorithm) - _validate_not_none('protocol', protocol) - - self.encryption_algorithm = str(encryption_algorithm) - self.protocol = protocol - - -class _EncryptionData: - ''' - Represents the encryption data that is stored on the service. - ''' - - def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, - key_wrapping_metadata): - ''' - :param bytes content_encryption_IV: - The content encryption initialization vector. - :param _EncryptionAgent encryption_agent: - The encryption agent. - :param _WrappedContentKey wrapped_content_key: - An object that stores the wrapping algorithm, the key identifier, - and the encrypted key bytes. - :param dict key_wrapping_metadata: - A dict containing metadata related to the key wrapping. - ''' - - _validate_not_none('content_encryption_IV', content_encryption_IV) - _validate_not_none('encryption_agent', encryption_agent) - _validate_not_none('wrapped_content_key', wrapped_content_key) - - self.content_encryption_IV = content_encryption_IV - self.encryption_agent = encryption_agent - self.wrapped_content_key = wrapped_content_key - self.key_wrapping_metadata = key_wrapping_metadata - - -def _generate_encryption_data_dict(kek, cek, iv): - ''' - Generates and returns the encryption metadata as a dict. - - :param object kek: The key encryption key. See calling functions for more information. - :param bytes cek: The content encryption key. - :param bytes iv: The initialization vector. - :return: A dict containing all the encryption metadata. - :rtype: dict - ''' - # Encrypt the cek. - wrapped_cek = kek.wrap_key(cek) - - # Build the encryption_data dict. - # Use OrderedDict to comply with Java's ordering requirement. - wrapped_content_key = OrderedDict() - wrapped_content_key['KeyId'] = kek.get_kid() - wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek) - wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() - - encryption_agent = OrderedDict() - encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 - encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 - - encryption_data_dict = OrderedDict() - encryption_data_dict['WrappedContentKey'] = wrapped_content_key - encryption_data_dict['EncryptionAgent'] = encryption_agent - encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv) - encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION} - - return encryption_data_dict - - -def _dict_to_encryption_data(encryption_data_dict): - ''' - Converts the specified dictionary to an EncryptionData object for - eventual use in decryption. - - :param dict encryption_data_dict: - The dictionary containing the encryption data. - :return: an _EncryptionData object built from the dictionary. - :rtype: _EncryptionData - ''' - try: - if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: - raise ValueError("Unsupported encryption version.") - except KeyError: - raise ValueError("Unsupported encryption version.") - wrapped_content_key = encryption_data_dict['WrappedContentKey'] - wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], - decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), - wrapped_content_key['KeyId']) - - encryption_agent = encryption_data_dict['EncryptionAgent'] - encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], - encryption_agent['Protocol']) - - if 'KeyWrappingMetadata' in encryption_data_dict: - key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] - else: - key_wrapping_metadata = None - - encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), - encryption_agent, - wrapped_content_key, - key_wrapping_metadata) - - return encryption_data - - -def _generate_AES_CBC_cipher(cek, iv): - ''' - Generates and returns an encryption cipher for AES CBC using the given cek and iv. - - :param bytes[] cek: The content encryption key for the cipher. - :param bytes[] iv: The initialization vector for the cipher. - :return: A cipher for encrypting in AES256 CBC. - :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher - ''' - - backend = default_backend() - algorithm = AES(cek) - mode = CBC(iv) - return Cipher(algorithm, mode, backend) - - -def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): - ''' - Extracts and returns the content_encryption_key stored in the encryption_data object - and performs necessary validation on all parameters. - :param _EncryptionData encryption_data: - The encryption metadata of the retrieved value. - :param obj key_encryption_key: - The key_encryption_key used to unwrap the cek. Please refer to high-level service object - instance variables for more details. - :param func key_resolver: - A function used that, given a key_id, will return a key_encryption_key. Please refer - to high-level service object instance variables for more details. - :return: the content_encryption_key stored in the encryption_data object. - :rtype: bytes[] - ''' - - _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) - _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) - - if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol: - raise ValueError('Encryption version is not supported.') - - content_encryption_key = None - - # If the resolver exists, give priority to the key it finds. - if key_resolver is not None: - key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) - - _validate_not_none('key_encryption_key', key_encryption_key) - if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) - if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid(): - raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.') - # Will throw an exception if the specified algorithm is not supported. - content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, - encryption_data.wrapped_content_key.algorithm) - _validate_not_none('content_encryption_key', content_encryption_key) - - return content_encryption_key - - -def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None): - ''' - Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. - Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). - Returns the original plaintex. - - :param str message: - The ciphertext to be decrypted. - :param _EncryptionData encryption_data: - The metadata associated with this ciphertext. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted plaintext. - :rtype: str - ''' - _validate_not_none('message', message) - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) - - if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm: - raise ValueError('Specified encryption algorithm is not supported.') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) - - # decrypt data - decrypted_data = message - decryptor = cipher.decryptor() - decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) - - # unpad data - unpadder = PKCS7(128).unpadder() - decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) - - return decrypted_data - - -def encrypt_blob(blob, key_encryption_key): - ''' - Encrypts the given blob using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encryption metadata. This method should - only be used when a blob is small enough for single shot upload. Encrypting larger blobs - is done as a part of the upload_data_chunks method. - - :param bytes blob: - The blob to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. - :rtype: (str, bytes) - ''' - - _validate_not_none('blob', blob) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(blob) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - - return dumps(encryption_data), encrypted_data - - -def generate_blob_encryption_data(key_encryption_key): - ''' - Generates the encryption_metadata for the blob. - - :param bytes key_encryption_key: - The key-encryption-key used to wrap the cek associate with this blob. - :return: A tuple containing the cek and iv for this blob as well as the - serialized encryption metadata for the blob. - :rtype: (bytes, bytes, str) - ''' - encryption_data = None - content_encryption_key = None - initialization_vector = None - if key_encryption_key: - _validate_key_encryption_key_wrap(key_encryption_key) - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - encryption_data = _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - encryption_data = dumps(encryption_data) - - return content_encryption_key, initialization_vector, encryption_data - - -def decrypt_blob(require_encryption, key_encryption_key, key_resolver, - content, start_offset, end_offset, response_headers): - ''' - Decrypts the given blob contents and returns only the requested range. - - :param bool require_encryption: - Whether or not the calling blob service requires objects to be decrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :param key_resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted blob content. - :rtype: bytes - ''' - try: - encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata'])) - except: # pylint: disable=bare-except - if require_encryption: - raise ValueError( - 'Encryption required, but received data does not contain appropriate metatadata.' + \ - 'Data was either not encrypted or metadata has been lost.') - - return content - - if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256: - raise ValueError('Specified encryption algorithm is not supported.') - - blob_type = response_headers['x-ms-blob-type'] - - iv = None - unpad = False - if 'content-range' in response_headers: - content_range = response_headers['content-range'] - # Format: 'bytes x-y/size' - - # Ignore the word 'bytes' - content_range = content_range.split(' ') - - content_range = content_range[1].split('-') - content_range = content_range[1].split('/') - end_range = int(content_range[0]) - blob_size = int(content_range[1]) - - if start_offset >= 16: - iv = content[:16] - content = content[16:] - start_offset -= 16 - else: - iv = encryption_data.content_encryption_IV - - if end_range == blob_size - 1: - unpad = True - else: - unpad = True - iv = encryption_data.content_encryption_IV - - if blob_type == 'PageBlob': - unpad = False - - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) - cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) - decryptor = cipher.decryptor() - - content = decryptor.update(content) + decryptor.finalize() - if unpad: - unpadder = PKCS7(128).unpadder() - content = unpadder.update(content) + unpadder.finalize() - - return content[start_offset: len(content) - end_offset] - - -def get_blob_encryptor_and_padder(cek, iv, should_pad): - encryptor = None - padder = None - - if cek is not None and iv is not None: - cipher = _generate_AES_CBC_cipher(cek, iv) - encryptor = cipher.encryptor() - padder = PKCS7(128).padder() if should_pad else None - - return encryptor, padder - - -def encrypt_queue_message(message, key_encryption_key): - ''' - Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encrypted message and the encryption metadata. - - :param object message: - The plain text messge to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A json-formatted string containing the encrypted message and the encryption metadata. - :rtype: str - ''' - - _validate_not_none('message', message) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = os.urandom(32) - initialization_vector = os.urandom(16) - - # Queue encoding functions all return unicode strings, and encryption should - # operate on binary strings. - message = message.encode('utf-8') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(message) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - - # Build the dictionary structure. - queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data), - 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector)} - - return dumps(queue_message) - - -def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver): - ''' - Returns the decrypted message contents from an EncryptedQueueMessage. - If no encryption metadata is present, will return the unaltered message. - :param str message: - The JSON formatted QueueEncryptedMessage contents with all associated metadata. - :param bool require_encryption: - If set, will enforce that the retrieved messages are encrypted and decrypt them. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The plain text message from the queue message. - :rtype: str - ''' - - try: - message = loads(message) - - encryption_data = _dict_to_encryption_data(message['EncryptionData']) - decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents']) - except (KeyError, ValueError): - # Message was not json formatted and so was not encrypted - # or the user provided a json formatted message. - if require_encryption: - raise ValueError('Message was not encrypted.') - - return message - try: - return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=response, - error=error) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/models.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/models.py deleted file mode 100644 index c51356b..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/models.py +++ /dev/null @@ -1,466 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-instance-attributes -from enum import Enum - - -def get_enum_value(value): - if value is None or value in ["None", ""]: - return None - try: - return value.value - except AttributeError: - return value - - -class StorageErrorCode(str, Enum): - - # Generic storage values - account_already_exists = "AccountAlreadyExists" - account_being_created = "AccountBeingCreated" - account_is_disabled = "AccountIsDisabled" - authentication_failed = "AuthenticationFailed" - authorization_failure = "AuthorizationFailure" - no_authentication_information = "NoAuthenticationInformation" - condition_headers_not_supported = "ConditionHeadersNotSupported" - condition_not_met = "ConditionNotMet" - empty_metadata_key = "EmptyMetadataKey" - insufficient_account_permissions = "InsufficientAccountPermissions" - internal_error = "InternalError" - invalid_authentication_info = "InvalidAuthenticationInfo" - invalid_header_value = "InvalidHeaderValue" - invalid_http_verb = "InvalidHttpVerb" - invalid_input = "InvalidInput" - invalid_md5 = "InvalidMd5" - invalid_metadata = "InvalidMetadata" - invalid_query_parameter_value = "InvalidQueryParameterValue" - invalid_range = "InvalidRange" - invalid_resource_name = "InvalidResourceName" - invalid_uri = "InvalidUri" - invalid_xml_document = "InvalidXmlDocument" - invalid_xml_node_value = "InvalidXmlNodeValue" - md5_mismatch = "Md5Mismatch" - metadata_too_large = "MetadataTooLarge" - missing_content_length_header = "MissingContentLengthHeader" - missing_required_query_parameter = "MissingRequiredQueryParameter" - missing_required_header = "MissingRequiredHeader" - missing_required_xml_node = "MissingRequiredXmlNode" - multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" - operation_timed_out = "OperationTimedOut" - out_of_range_input = "OutOfRangeInput" - out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" - request_body_too_large = "RequestBodyTooLarge" - resource_type_mismatch = "ResourceTypeMismatch" - request_url_failed_to_parse = "RequestUrlFailedToParse" - resource_already_exists = "ResourceAlreadyExists" - resource_not_found = "ResourceNotFound" - server_busy = "ServerBusy" - unsupported_header = "UnsupportedHeader" - unsupported_xml_node = "UnsupportedXmlNode" - unsupported_query_parameter = "UnsupportedQueryParameter" - unsupported_http_verb = "UnsupportedHttpVerb" - - # Blob values - append_position_condition_not_met = "AppendPositionConditionNotMet" - blob_already_exists = "BlobAlreadyExists" - blob_not_found = "BlobNotFound" - blob_overwritten = "BlobOverwritten" - blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" - block_count_exceeds_limit = "BlockCountExceedsLimit" - block_list_too_long = "BlockListTooLong" - cannot_change_to_lower_tier = "CannotChangeToLowerTier" - cannot_verify_copy_source = "CannotVerifyCopySource" - container_already_exists = "ContainerAlreadyExists" - container_being_deleted = "ContainerBeingDeleted" - container_disabled = "ContainerDisabled" - container_not_found = "ContainerNotFound" - content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" - copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" - copy_id_mismatch = "CopyIdMismatch" - feature_version_mismatch = "FeatureVersionMismatch" - incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" - incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" - incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" - infinite_lease_duration_required = "InfiniteLeaseDurationRequired" - invalid_blob_or_block = "InvalidBlobOrBlock" - invalid_blob_tier = "InvalidBlobTier" - invalid_blob_type = "InvalidBlobType" - invalid_block_id = "InvalidBlockId" - invalid_block_list = "InvalidBlockList" - invalid_operation = "InvalidOperation" - invalid_page_range = "InvalidPageRange" - invalid_source_blob_type = "InvalidSourceBlobType" - invalid_source_blob_url = "InvalidSourceBlobUrl" - invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" - lease_already_present = "LeaseAlreadyPresent" - lease_already_broken = "LeaseAlreadyBroken" - lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" - lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" - lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" - lease_id_missing = "LeaseIdMissing" - lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" - lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" - lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" - lease_lost = "LeaseLost" - lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" - lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" - lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" - max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" - no_pending_copy_operation = "NoPendingCopyOperation" - operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" - pending_copy_operation = "PendingCopyOperation" - previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" - previous_snapshot_not_found = "PreviousSnapshotNotFound" - previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" - sequence_number_condition_not_met = "SequenceNumberConditionNotMet" - sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" - snapshot_count_exceeded = "SnapshotCountExceeded" - snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" - snapshots_present = "SnapshotsPresent" - source_condition_not_met = "SourceConditionNotMet" - system_in_use = "SystemInUse" - target_condition_not_met = "TargetConditionNotMet" - unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" - blob_being_rehydrated = "BlobBeingRehydrated" - blob_archived = "BlobArchived" - blob_not_archived = "BlobNotArchived" - - # Queue values - invalid_marker = "InvalidMarker" - message_not_found = "MessageNotFound" - message_too_large = "MessageTooLarge" - pop_receipt_mismatch = "PopReceiptMismatch" - queue_already_exists = "QueueAlreadyExists" - queue_being_deleted = "QueueBeingDeleted" - queue_disabled = "QueueDisabled" - queue_not_empty = "QueueNotEmpty" - queue_not_found = "QueueNotFound" - - # File values - cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" - client_cache_flush_delay = "ClientCacheFlushDelay" - delete_pending = "DeletePending" - directory_not_empty = "DirectoryNotEmpty" - file_lock_conflict = "FileLockConflict" - invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" - parent_not_found = "ParentNotFound" - read_only_attribute = "ReadOnlyAttribute" - share_already_exists = "ShareAlreadyExists" - share_being_deleted = "ShareBeingDeleted" - share_disabled = "ShareDisabled" - share_not_found = "ShareNotFound" - sharing_violation = "SharingViolation" - share_snapshot_in_progress = "ShareSnapshotInProgress" - share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" - share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" - share_has_snapshots = "ShareHasSnapshots" - container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" - - # DataLake values - content_length_must_be_zero = 'ContentLengthMustBeZero' - path_already_exists = 'PathAlreadyExists' - invalid_flush_position = 'InvalidFlushPosition' - invalid_property_name = 'InvalidPropertyName' - invalid_source_uri = 'InvalidSourceUri' - unsupported_rest_version = 'UnsupportedRestVersion' - file_system_not_found = 'FilesystemNotFound' - path_not_found = 'PathNotFound' - rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound' - source_path_not_found = 'SourcePathNotFound' - destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted' - file_system_already_exists = 'FilesystemAlreadyExists' - file_system_being_deleted = 'FilesystemBeingDeleted' - invalid_destination_path = 'InvalidDestinationPath' - invalid_rename_source_path = 'InvalidRenameSourcePath' - invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType' - lease_is_already_broken = 'LeaseIsAlreadyBroken' - lease_name_mismatch = 'LeaseNameMismatch' - path_conflict = 'PathConflict' - source_path_is_being_deleted = 'SourcePathIsBeingDeleted' - - -class DictMixin(object): - - def __setitem__(self, key, item): - self.__dict__[key] = item - - def __getitem__(self, key): - return self.__dict__[key] - - def __repr__(self): - return str(self) - - def __len__(self): - return len(self.keys()) - - def __delitem__(self, key): - self.__dict__[key] = None - - def __eq__(self, other): - """Compare objects by comparing all attributes.""" - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - def __ne__(self, other): - """Compare objects by comparing all attributes.""" - return not self.__eq__(other) - - def __str__(self): - return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) - - def has_key(self, k): - return k in self.__dict__ - - def update(self, *args, **kwargs): - return self.__dict__.update(*args, **kwargs) - - def keys(self): - return [k for k in self.__dict__ if not k.startswith('_')] - - def values(self): - return [v for k, v in self.__dict__.items() if not k.startswith('_')] - - def items(self): - return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] - - def get(self, key, default=None): - if key in self.__dict__: - return self.__dict__[key] - return default - - -class LocationMode(object): - """ - Specifies the location the request should be sent to. This mode only applies - for RA-GRS accounts which allow secondary read access. All other account types - must use PRIMARY. - """ - - PRIMARY = 'primary' #: Requests should be sent to the primary location. - SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. - - -class ResourceTypes(object): - """ - Specifies the resource types that are accessible with the account SAS. - - :param bool service: - Access to service-level APIs (e.g., Get/Set Service Properties, - Get Service Stats, List Containers/Queues/Shares) - :param bool container: - Access to container-level APIs (e.g., Create/Delete Container, - Create/Delete Queue, Create/Delete Share, - List Blobs/Files and Directories) - :param bool object: - Access to object-level APIs for blobs, queue messages, and - files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) - """ - - def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin - self.service = service - self.container = container - self.object = object - self._str = (('s' if self.service else '') + - ('c' if self.container else '') + - ('o' if self.object else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create a ResourceTypes from a string. - - To specify service, container, or object you need only to - include the first letter of the word in the string. E.g. service and container, - you would provide a string "sc". - - :param str string: Specify service, container, or object in - in the string with the first letter of the word. - :return: A ResourceTypes object - :rtype: ~azure.storage.blob.ResourceTypes - """ - res_service = 's' in string - res_container = 'c' in string - res_object = 'o' in string - - parsed = cls(res_service, res_container, res_object) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class AccountSasPermissions(object): - """ - :class:`~ResourceTypes` class to be used with generate_account_sas - function and for the AccessPolicies used with set_*_acl. There are two types of - SAS which may be used to grant resource access. One is to grant access to a - specific resource (resource-specific). Another is to grant access to the - entire service for a specific account and allow certain operations based on - perms found here. - - :param bool read: - Valid for all signed resources types (Service, Container, and Object). - Permits read permissions to the specified resource type. - :param bool write: - Valid for all signed resources types (Service, Container, and Object). - Permits write permissions to the specified resource type. - :param bool delete: - Valid for Container and Object resource types, except for queue messages. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool list: - Valid for Service and Container resource types only. - :param bool add: - Valid for the following Object resource types only: queue messages, and append blobs. - :param bool create: - Valid for the following Object resource types only: blobs and files. - Users can create new blobs or files, but may not overwrite existing - blobs or files. - :param bool update: - Valid for the following Object resource types only: queue messages. - :param bool process: - Valid for the following Object resource type only: queue messages. - :keyword bool tag: - To enable set or get tags on the blobs in the container. - :keyword bool filter_by_tags: - To enable get blobs by tags, this should be used together with list permission. - """ - def __init__(self, read=False, write=False, delete=False, - list=False, # pylint: disable=redefined-builtin - add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): - self.read = read - self.write = write - self.delete = delete - self.delete_previous_version = delete_previous_version - self.list = list - self.add = add - self.create = create - self.update = update - self.process = process - self.tag = kwargs.pop('tag', False) - self.filter_by_tags = kwargs.pop('filter_by_tags', False) - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('l' if self.list else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('u' if self.update else '') + - ('p' if self.process else '') + - ('f' if self.filter_by_tags else '') + - ('t' if self.tag else '') - ) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create AccountSasPermissions from a string. - - To specify read, write, delete, etc. permissions you need only to - include the first letter of the word in the string. E.g. for read and write - permissions you would provide a string "rw". - - :param str permission: Specify permissions in - the string with the first letter of the word. - :return: An AccountSasPermissions object - :rtype: ~azure.storage.blob.AccountSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_delete_previous_version = 'x' in permission - p_list = 'l' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_update = 'u' in permission - p_process = 'p' in permission - p_tag = 't' in permission - p_filter_by_tags = 'f' in permission - parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, - list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, - filter_by_tags=p_filter_by_tags) - - return parsed - -class Services(object): - """Specifies the services accessible with the account SAS. - - :param bool blob: - Access for the `~azure.storage.blob.BlobServiceClient` - :param bool queue: - Access for the `~azure.storage.queue.QueueServiceClient` - :param bool fileshare: - Access for the `~azure.storage.fileshare.ShareServiceClient` - """ - - def __init__(self, blob=False, queue=False, fileshare=False): - self.blob = blob - self.queue = queue - self.fileshare = fileshare - self._str = (('b' if self.blob else '') + - ('q' if self.queue else '') + - ('f' if self.fileshare else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create Services from a string. - - To specify blob, queue, or file you need only to - include the first letter of the word in the string. E.g. for blob and queue - you would provide a string "bq". - - :param str string: Specify blob, queue, or file in - in the string with the first letter of the word. - :return: A Services object - :rtype: ~azure.storage.blob.Services - """ - res_blob = 'b' in string - res_queue = 'q' in string - res_file = 'f' in string - - parsed = cls(res_blob, res_queue, res_file) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class UserDelegationKey(object): - """ - Represents a user delegation key, provided to the user by Azure Storage - based on their Azure Active Directory access token. - - The fields are saved as simple strings since the user does not have to interact with this object; - to generate an identify SAS, the user can simply pass it to the right API. - - :ivar str signed_oid: - Object ID of this token. - :ivar str signed_tid: - Tenant ID of the tenant that issued this token. - :ivar str signed_start: - The datetime this token becomes valid. - :ivar str signed_expiry: - The datetime this token expires. - :ivar str signed_service: - What service this key is valid for. - :ivar str signed_version: - The version identifier of the REST service that created this token. - :ivar str value: - The user delegation key. - """ - def __init__(self): - self.signed_oid = None - self.signed_tid = None - self.signed_start = None - self.signed_expiry = None - self.signed_service = None - self.signed_version = None - self.value = None diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/parser.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/parser.py deleted file mode 100644 index c6feba8..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/parser.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys - -if sys.version_info < (3,): - def _str(value): - if isinstance(value, unicode): # pylint: disable=undefined-variable - return value.encode('utf-8') - - return str(value) -else: - _str = str - - -def _to_utc_datetime(value): - return value.strftime('%Y-%m-%dT%H:%M:%SZ') diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/policies.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/policies.py deleted file mode 100644 index c9bc798..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/policies.py +++ /dev/null @@ -1,610 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import re -import random -from time import time -from io import SEEK_SET, UnsupportedOperation -import logging -import uuid -import types -from typing import Any, TYPE_CHECKING -from wsgiref.handlers import format_date_time -try: - from urllib.parse import ( - urlparse, - parse_qsl, - urlunparse, - urlencode, - ) -except ImportError: - from urllib import urlencode # type: ignore - from urlparse import ( # type: ignore - urlparse, - parse_qsl, - urlunparse, - ) - -from azure.core.pipeline.policies import ( - HeadersPolicy, - SansIOHTTPPolicy, - NetworkTraceLoggingPolicy, - HTTPPolicy, - RequestHistory -) -from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError - -from .models import LocationMode - -try: - _unicode_type = unicode # type: ignore -except NameError: - _unicode_type = str - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -def encode_base64(data): - if isinstance(data, _unicode_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def is_exhausted(settings): - """Are we out of retries?""" - retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) - retry_counts = list(filter(None, retry_counts)) - if not retry_counts: - return False - return min(retry_counts) < 0 - - -def retry_hook(settings, **kwargs): - if settings['hook']: - settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) - - -def is_retry(response, mode): - """Is this method/status code retryable? (Based on whitelists and control - variables such as the number of total retries to allow, whether to - respect the Retry-After header, whether this header is present, and - whether the returned status code is on the list of status codes to - be retried upon on the presence of the aforementioned header) - """ - status = response.http_response.status_code - if 300 <= status < 500: - # An exception occured, but in most cases it was expected. Examples could - # include a 309 Conflict or 412 Precondition Failed. - if status == 404 and mode == LocationMode.SECONDARY: - # Response code 404 should be retried if secondary was used. - return True - if status == 408: - # Response code 408 is a timeout and should be retried. - return True - return False - if status >= 500: - # Response codes above 500 with the exception of 501 Not Implemented and - # 505 Version Not Supported indicate a server issue and should be retried. - if status in [501, 505]: - return False - return True - return False - - -def urljoin(base_url, stub_url): - parsed = urlparse(base_url) - parsed = parsed._replace(path=parsed.path + '/' + stub_url) - return parsed.geturl() - - -class QueueMessagePolicy(SansIOHTTPPolicy): - - def on_request(self, request): - message_id = request.context.options.pop('queue_message_id', None) - if message_id: - request.http_request.url = urljoin( - request.http_request.url, - message_id) - - -class StorageHeadersPolicy(HeadersPolicy): - request_id_header_name = 'x-ms-client-request-id' - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - super(StorageHeadersPolicy, self).on_request(request) - current_time = format_date_time(time()) - request.http_request.headers['x-ms-date'] = current_time - - custom_id = request.context.options.pop('client_request_id', None) - request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) - - # def on_response(self, request, response): - # # raise exception if the echoed client request id from the service is not identical to the one we sent - # if self.request_id_header_name in response.http_response.headers: - - # client_request_id = request.http_request.headers.get(self.request_id_header_name) - - # if response.http_response.headers[self.request_id_header_name] != client_request_id: - # raise AzureError( - # "Echoed client request ID: {} does not match sent client request ID: {}. " - # "Service request ID: {}".format( - # response.http_response.headers[self.request_id_header_name], client_request_id, - # response.http_response.headers['x-ms-request-id']), - # response=response.http_response - # ) - - -class StorageHosts(SansIOHTTPPolicy): - - def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument - self.hosts = hosts - super(StorageHosts, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - request.context.options['hosts'] = self.hosts - parsed_url = urlparse(request.http_request.url) - - # Detect what location mode we're currently requesting with - location_mode = LocationMode.PRIMARY - for key, value in self.hosts.items(): - if parsed_url.netloc == value: - location_mode = key - - # See if a specific location mode has been specified, and if so, redirect - use_location = request.context.options.pop('use_location', None) - if use_location: - # Lock retries to the specific location - request.context.options['retry_to_secondary'] = False - if use_location not in self.hosts: - raise ValueError("Attempting to use undefined host location {}".format(use_location)) - if use_location != location_mode: - # Update request URL to use the specified location - updated = parsed_url._replace(netloc=self.hosts[use_location]) - request.http_request.url = updated.geturl() - location_mode = use_location - - request.context.options['location_mode'] = location_mode - - -class StorageLoggingPolicy(NetworkTraceLoggingPolicy): - """A policy that logs HTTP request and response to the DEBUG logger. - - This accepts both global configuration, and per-request level with "enable_http_logger" - """ - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - http_request = request.http_request - options = request.context.options - if options.pop("logging_enable", self.enable_http_logger): - request.context["logging_enable"] = True - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - log_url = http_request.url - query_params = http_request.query - if 'sig' in query_params: - log_url = log_url.replace(query_params['sig'], "sig=*****") - _LOGGER.debug("Request URL: %r", log_url) - _LOGGER.debug("Request method: %r", http_request.method) - _LOGGER.debug("Request headers:") - for header, value in http_request.headers.items(): - if header.lower() == 'authorization': - value = '*****' - elif header.lower() == 'x-ms-copy-source' and 'sig' in value: - # take the url apart and scrub away the signed signature - scheme, netloc, path, params, query, fragment = urlparse(value) - parsed_qs = dict(parse_qsl(query)) - parsed_qs['sig'] = '*****' - - # the SAS needs to be put back together - value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) - - _LOGGER.debug(" %r: %r", header, value) - _LOGGER.debug("Request body:") - - # We don't want to log the binary data of a file upload. - if isinstance(http_request.body, types.GeneratorType): - _LOGGER.debug("File upload") - else: - _LOGGER.debug(str(http_request.body)) - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log request: %r", err) - - def on_response(self, request, response): - # type: (PipelineRequest, PipelineResponse, Any) -> None - if response.context.pop("logging_enable", self.enable_http_logger): - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - _LOGGER.debug("Response status: %r", response.http_response.status_code) - _LOGGER.debug("Response headers:") - for res_header, value in response.http_response.headers.items(): - _LOGGER.debug(" %r: %r", res_header, value) - - # We don't want to log binary data if the response is a file. - _LOGGER.debug("Response content:") - pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) - header = response.http_response.headers.get('content-disposition') - - if header and pattern.match(header): - filename = header.partition('=')[2] - _LOGGER.debug("File attachments: %s", filename) - elif response.http_response.headers.get("content-type", "").endswith("octet-stream"): - _LOGGER.debug("Body contains binary data.") - elif response.http_response.headers.get("content-type", "").startswith("image"): - _LOGGER.debug("Body contains image data.") - else: - if response.context.options.get('stream', False): - _LOGGER.debug("Body is streamable") - else: - _LOGGER.debug(response.http_response.text()) - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log response: %s", repr(err)) - - -class StorageRequestHook(SansIOHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._request_callback = kwargs.get('raw_request_hook') - super(StorageRequestHook, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, **Any) -> PipelineResponse - request_callback = request.context.options.pop('raw_request_hook', self._request_callback) - if request_callback: - request_callback(request) - - -class StorageResponseHook(HTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(StorageResponseHook, self).__init__() - - def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = self.next.send(request) - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - response_callback(response) - request.context['response_callback'] = response_callback - return response - - -class StorageContentValidation(SansIOHTTPPolicy): - """A simple policy that sends the given headers - with the request. - - This will overwrite any headers already defined in the request. - """ - header_name = 'Content-MD5' - - def __init__(self, **kwargs): # pylint: disable=unused-argument - super(StorageContentValidation, self).__init__() - - @staticmethod - def get_content_md5(data): - md5 = hashlib.md5() # nosec - if isinstance(data, bytes): - md5.update(data) - elif hasattr(data, 'read'): - pos = 0 - try: - pos = data.tell() - except: # pylint: disable=bare-except - pass - for chunk in iter(lambda: data.read(4096), b""): - md5.update(chunk) - try: - data.seek(pos, SEEK_SET) - except (AttributeError, IOError): - raise ValueError("Data should be bytes or a seekable file-like object.") - else: - raise ValueError("Data should be bytes or a seekable file-like object.") - - return md5.digest() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - validate_content = request.context.options.pop('validate_content', False) - if validate_content and request.http_request.method != 'GET': - computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) - request.http_request.headers[self.header_name] = computed_md5 - request.context['validate_content_md5'] = computed_md5 - request.context['validate_content'] = validate_content - - def on_response(self, request, response): - if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): - computed_md5 = request.context.get('validate_content_md5') or \ - encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) - if response.http_response.headers['content-md5'] != computed_md5: - raise AzureError( - 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format( - response.http_response.headers['content-md5'], computed_md5), - response=response.http_response - ) - - -class StorageRetryPolicy(HTTPPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - def __init__(self, **kwargs): - self.total_retries = kwargs.pop('retry_total', 10) - self.connect_retries = kwargs.pop('retry_connect', 3) - self.read_retries = kwargs.pop('retry_read', 3) - self.status_retries = kwargs.pop('retry_status', 3) - self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) - super(StorageRetryPolicy, self).__init__() - - def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use - """ - A function which sets the next host location on the request, if applicable. - - :param ~azure.storage.models.RetryContext context: - The retry context containing the previous host location and the request - to evaluate and possibly modify. - """ - if settings['hosts'] and all(settings['hosts'].values()): - url = urlparse(request.url) - # If there's more than one possible location, retry to the alternative - if settings['mode'] == LocationMode.PRIMARY: - settings['mode'] = LocationMode.SECONDARY - else: - settings['mode'] = LocationMode.PRIMARY - updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) - request.url = updated.geturl() - - def configure_retries(self, request): # pylint: disable=no-self-use - body_position = None - if hasattr(request.http_request.body, 'read'): - try: - body_position = request.http_request.body.tell() - except (AttributeError, UnsupportedOperation): - # if body position cannot be obtained, then retries will not work - pass - options = request.context.options - return { - 'total': options.pop("retry_total", self.total_retries), - 'connect': options.pop("retry_connect", self.connect_retries), - 'read': options.pop("retry_read", self.read_retries), - 'status': options.pop("retry_status", self.status_retries), - 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), - 'mode': options.pop("location_mode", LocationMode.PRIMARY), - 'hosts': options.pop("hosts", None), - 'hook': options.pop("retry_hook", None), - 'body_position': body_position, - 'count': 0, - 'history': [] - } - - def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use - """ Formula for computing the current backoff. - Should be calculated by child class. - - :rtype: float - """ - return 0 - - def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - transport.sleep(backoff) - - def increment(self, settings, request, response=None, error=None): - """Increment the retry counters. - - :param response: A pipeline response object. - :param error: An error encountered during the request, or - None if the response was received successfully. - - :return: Whether the retry attempts are exhausted. - """ - settings['total'] -= 1 - - if error and isinstance(error, ServiceRequestError): - # Errors when we're fairly sure that the server did not receive the - # request, so it should be safe to retry. - settings['connect'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - elif error and isinstance(error, ServiceResponseError): - # Errors that occur after the request has been started, so we should - # assume that the server began processing it. - settings['read'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - else: - # Incrementing because of a server error like a 500 in - # status_forcelist and a the given method is in the whitelist - if response: - settings['status'] -= 1 - settings['history'].append(RequestHistory(request, http_response=response)) - - if not is_exhausted(settings): - if request.method not in ['PUT'] and settings['retry_secondary']: - self._set_next_host_location(settings, request) - - # rewind the request body if it is a stream - if request.body and hasattr(request.body, 'read'): - # no position was saved, then retry would not work - if settings['body_position'] is None: - return False - try: - # attempt to rewind the body to the initial position - request.body.seek(settings['body_position'], SEEK_SET) - except (UnsupportedOperation, ValueError): - # if body is not seekable, then retry would not work - return False - settings['count'] += 1 - return True - return False - - def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(StorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(StorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/policies_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/policies_async.py deleted file mode 100644 index e0926b8..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/policies_async.py +++ /dev/null @@ -1,220 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -import asyncio -import random -import logging -from typing import Any, TYPE_CHECKING - -from azure.core.pipeline.policies import AsyncHTTPPolicy -from azure.core.exceptions import AzureError - -from .policies import is_retry, StorageRetryPolicy - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -async def retry_hook(settings, **kwargs): - if settings['hook']: - if asyncio.iscoroutine(settings['hook']): - await settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - else: - settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - - -class AsyncStorageResponseHook(AsyncHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(AsyncStorageResponseHook, self).__init__() - - async def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = await self.next.send(request) - await response.http_response.load_body() - - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - if asyncio.iscoroutine(response_callback): - await response_callback(response) - else: - response_callback(response) - request.context['response_callback'] = response_callback - return response - -class AsyncStorageRetryPolicy(StorageRetryPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - async def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - await transport.sleep(backoff) - - async def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = await self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - await self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - await self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(AsyncStorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(AsyncStorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/request_handlers.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/request_handlers.py deleted file mode 100644 index 4f15b65..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/request_handlers.py +++ /dev/null @@ -1,147 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) - -import logging -from os import fstat -from io import (SEEK_END, SEEK_SET, UnsupportedOperation) - -import isodate - -from azure.core.exceptions import raise_with_traceback - - -_LOGGER = logging.getLogger(__name__) - - -def serialize_iso(attr): - """Serialize Datetime object into ISO-8601 formatted string. - - :param Datetime attr: Object to be serialized. - :rtype: str - :raises: ValueError if format invalid. - """ - if not attr: - return None - if isinstance(attr, str): - attr = isodate.parse_datetime(attr) - try: - utc = attr.utctimetuple() - if utc.tm_year > 9999 or utc.tm_year < 1: - raise OverflowError("Hit max or min date") - - date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( - utc.tm_year, utc.tm_mon, utc.tm_mday, - utc.tm_hour, utc.tm_min, utc.tm_sec) - return date + 'Z' - except (ValueError, OverflowError) as err: - msg = "Unable to serialize datetime object." - raise_with_traceback(ValueError, msg, err) - except AttributeError as err: - msg = "ISO-8601 object must be valid Datetime object." - raise_with_traceback(TypeError, msg, err) - - -def get_length(data): - length = None - # Check if object implements the __len__ method, covers most input cases such as bytearray. - try: - length = len(data) - except: # pylint: disable=bare-except - pass - - if not length: - # Check if the stream is a file-like stream object. - # If so, calculate the size using the file descriptor. - try: - fileno = data.fileno() - except (AttributeError, UnsupportedOperation): - pass - else: - try: - return fstat(fileno).st_size - except OSError: - # Not a valid fileno, may be possible requests returned - # a socket number? - pass - - # If the stream is seekable and tell() is implemented, calculate the stream size. - try: - current_position = data.tell() - data.seek(0, SEEK_END) - length = data.tell() - current_position - data.seek(current_position, SEEK_SET) - except (AttributeError, UnsupportedOperation): - pass - - return length - - -def read_length(data): - try: - if hasattr(data, 'read'): - read_data = b'' - for chunk in iter(lambda: data.read(4096), b""): - read_data += chunk - return len(read_data), read_data - if hasattr(data, '__iter__'): - read_data = b'' - for chunk in data: - read_data += chunk - return len(read_data), read_data - except: # pylint: disable=bare-except - pass - raise ValueError("Unable to calculate content length, please specify.") - - -def validate_and_format_range_headers( - start_range, end_range, start_range_required=True, - end_range_required=True, check_content_md5=False, align_to_page=False): - # If end range is provided, start range must be provided - if (start_range_required or end_range is not None) and start_range is None: - raise ValueError("start_range value cannot be None.") - if end_range_required and end_range is None: - raise ValueError("end_range value cannot be None.") - - # Page ranges must be 512 aligned - if align_to_page: - if start_range is not None and start_range % 512 != 0: - raise ValueError("Invalid page blob start_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(start_range)) - if end_range is not None and end_range % 512 != 511: - raise ValueError("Invalid page blob end_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(end_range)) - - # Format based on whether end_range is present - range_header = None - if end_range is not None: - range_header = 'bytes={0}-{1}'.format(start_range, end_range) - elif start_range is not None: - range_header = "bytes={0}-".format(start_range) - - # Content MD5 can only be provided for a complete range less than 4MB in size - range_validation = None - if check_content_md5: - if start_range is None or end_range is None: - raise ValueError("Both start and end range requied for MD5 content validation.") - if end_range - start_range > 4 * 1024 * 1024: - raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") - range_validation = 'true' - - return range_header, range_validation - - -def add_metadata_headers(metadata=None): - # type: (Optional[Dict[str, str]]) -> Dict[str, str] - headers = {} - if metadata: - for key, value in metadata.items(): - headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value - return headers diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/response_handlers.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/response_handlers.py deleted file mode 100644 index ac526e5..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/response_handlers.py +++ /dev/null @@ -1,159 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging - -from azure.core.pipeline.policies import ContentDecodePolicy -from azure.core.exceptions import ( - HttpResponseError, - ResourceNotFoundError, - ResourceModifiedError, - ResourceExistsError, - ClientAuthenticationError, - DecodeError) - -from .parser import _to_utc_datetime -from .models import StorageErrorCode, UserDelegationKey, get_enum_value - - -if TYPE_CHECKING: - from datetime import datetime - from azure.core.exceptions import AzureError - - -_LOGGER = logging.getLogger(__name__) - - -class PartialBatchErrorException(HttpResponseError): - """There is a partial failure in batch operations. - - :param str message: The message of the exception. - :param response: Server response to be deserialized. - :param list parts: A list of the parts in multipart response. - """ - - def __init__(self, message, response, parts): - self.parts = parts - super(PartialBatchErrorException, self).__init__(message=message, response=response) - - -def parse_length_from_content_range(content_range): - ''' - Parses the blob length from the content range header: bytes 1-3/65537 - ''' - if content_range is None: - return None - - # First, split in space and take the second half: '1-3/65537' - # Next, split on slash and take the second half: '65537' - # Finally, convert to an int: 65537 - return int(content_range.split(' ', 1)[1].split('/', 1)[1]) - - -def normalize_headers(headers): - normalized = {} - for key, value in headers.items(): - if key.startswith('x-ms-'): - key = key[5:] - normalized[key.lower().replace('-', '_')] = get_enum_value(value) - return normalized - - -def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument - raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} - return {k[10:]: v for k, v in raw_metadata.items()} - - -def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers) - - -def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers), deserialized - - -def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return response.location_mode, deserialized - - -def process_storage_error(storage_error): - raise_error = HttpResponseError - error_code = storage_error.response.headers.get('x-ms-error-code') - error_message = storage_error.message - additional_data = {} - try: - error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) - if error_body: - for info in error_body.iter(): - if info.tag.lower() == 'code': - error_code = info.text - elif info.tag.lower() == 'message': - error_message = info.text - else: - additional_data[info.tag] = info.text - except DecodeError: - pass - - try: - if error_code: - error_code = StorageErrorCode(error_code) - if error_code in [StorageErrorCode.condition_not_met, - StorageErrorCode.blob_overwritten]: - raise_error = ResourceModifiedError - if error_code in [StorageErrorCode.invalid_authentication_info, - StorageErrorCode.authentication_failed]: - raise_error = ClientAuthenticationError - if error_code in [StorageErrorCode.resource_not_found, - StorageErrorCode.cannot_verify_copy_source, - StorageErrorCode.blob_not_found, - StorageErrorCode.queue_not_found, - StorageErrorCode.container_not_found, - StorageErrorCode.parent_not_found, - StorageErrorCode.share_not_found]: - raise_error = ResourceNotFoundError - if error_code in [StorageErrorCode.account_already_exists, - StorageErrorCode.account_being_created, - StorageErrorCode.resource_already_exists, - StorageErrorCode.resource_type_mismatch, - StorageErrorCode.blob_already_exists, - StorageErrorCode.queue_already_exists, - StorageErrorCode.container_already_exists, - StorageErrorCode.container_being_deleted, - StorageErrorCode.queue_being_deleted, - StorageErrorCode.share_already_exists, - StorageErrorCode.share_being_deleted]: - raise_error = ResourceExistsError - except ValueError: - # Got an unknown error code - pass - - try: - error_message += "\nErrorCode:{}".format(error_code.value) - except AttributeError: - error_message += "\nErrorCode:{}".format(error_code) - for name, info in additional_data.items(): - error_message += "\n{}:{}".format(name, info) - - error = raise_error(message=error_message, response=storage_error.response) - error.error_code = error_code - error.additional_info = additional_data - raise error - - -def parse_to_internal_user_delegation_key(service_user_delegation_key): - internal_user_delegation_key = UserDelegationKey() - internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid - internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid - internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) - internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) - internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service - internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version - internal_user_delegation_key.value = service_user_delegation_key.value - return internal_user_delegation_key diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/shared_access_signature.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/shared_access_signature.py deleted file mode 100644 index 07aad5f..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/shared_access_signature.py +++ /dev/null @@ -1,220 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from datetime import date - -from .parser import _str, _to_utc_datetime -from .constants import X_MS_VERSION -from . import sign_string, url_quote - - -class QueryStringConstants(object): - SIGNED_SIGNATURE = 'sig' - SIGNED_PERMISSION = 'sp' - SIGNED_START = 'st' - SIGNED_EXPIRY = 'se' - SIGNED_RESOURCE = 'sr' - SIGNED_IDENTIFIER = 'si' - SIGNED_IP = 'sip' - SIGNED_PROTOCOL = 'spr' - SIGNED_VERSION = 'sv' - SIGNED_CACHE_CONTROL = 'rscc' - SIGNED_CONTENT_DISPOSITION = 'rscd' - SIGNED_CONTENT_ENCODING = 'rsce' - SIGNED_CONTENT_LANGUAGE = 'rscl' - SIGNED_CONTENT_TYPE = 'rsct' - START_PK = 'spk' - START_RK = 'srk' - END_PK = 'epk' - END_RK = 'erk' - SIGNED_RESOURCE_TYPES = 'srt' - SIGNED_SERVICES = 'ss' - SIGNED_OID = 'skoid' - SIGNED_TID = 'sktid' - SIGNED_KEY_START = 'skt' - SIGNED_KEY_EXPIRY = 'ske' - SIGNED_KEY_SERVICE = 'sks' - SIGNED_KEY_VERSION = 'skv' - - # for ADLS - SIGNED_AUTHORIZED_OID = 'saoid' - SIGNED_UNAUTHORIZED_OID = 'suoid' - SIGNED_CORRELATION_ID = 'scid' - SIGNED_DIRECTORY_DEPTH = 'sdd' - - @staticmethod - def to_list(): - return [ - QueryStringConstants.SIGNED_SIGNATURE, - QueryStringConstants.SIGNED_PERMISSION, - QueryStringConstants.SIGNED_START, - QueryStringConstants.SIGNED_EXPIRY, - QueryStringConstants.SIGNED_RESOURCE, - QueryStringConstants.SIGNED_IDENTIFIER, - QueryStringConstants.SIGNED_IP, - QueryStringConstants.SIGNED_PROTOCOL, - QueryStringConstants.SIGNED_VERSION, - QueryStringConstants.SIGNED_CACHE_CONTROL, - QueryStringConstants.SIGNED_CONTENT_DISPOSITION, - QueryStringConstants.SIGNED_CONTENT_ENCODING, - QueryStringConstants.SIGNED_CONTENT_LANGUAGE, - QueryStringConstants.SIGNED_CONTENT_TYPE, - QueryStringConstants.START_PK, - QueryStringConstants.START_RK, - QueryStringConstants.END_PK, - QueryStringConstants.END_RK, - QueryStringConstants.SIGNED_RESOURCE_TYPES, - QueryStringConstants.SIGNED_SERVICES, - QueryStringConstants.SIGNED_OID, - QueryStringConstants.SIGNED_TID, - QueryStringConstants.SIGNED_KEY_START, - QueryStringConstants.SIGNED_KEY_EXPIRY, - QueryStringConstants.SIGNED_KEY_SERVICE, - QueryStringConstants.SIGNED_KEY_VERSION, - # for ADLS - QueryStringConstants.SIGNED_AUTHORIZED_OID, - QueryStringConstants.SIGNED_UNAUTHORIZED_OID, - QueryStringConstants.SIGNED_CORRELATION_ID, - QueryStringConstants.SIGNED_DIRECTORY_DEPTH, - ] - - -class SharedAccessSignature(object): - ''' - Provides a factory for creating account access - signature tokens with an account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - :param str x_ms_version: - The service version used to generate the shared access signatures. - ''' - self.account_name = account_name - self.account_key = account_key - self.x_ms_version = x_ms_version - - def generate_account(self, services, resource_types, permission, expiry, start=None, - ip=None, protocol=None): - ''' - Generates a shared access signature for the account. - Use the returned signature with the sas_token parameter of the service - or to create a new account object. - - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account - SAS. You can combine values to provide access to more than one - resource type. - :param AccountSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. You can combine - values to provide more than one permission. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_account(services, resource_types) - sas.add_account_signature(self.account_name, self.account_key) - - return sas.get_token() - - -class _SharedAccessHelper(object): - def __init__(self): - self.query_dict = {} - - def _add_query(self, name, val): - if val: - self.query_dict[name] = _str(val) if val is not None else None - - def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): - if isinstance(start, date): - start = _to_utc_datetime(start) - - if isinstance(expiry, date): - expiry = _to_utc_datetime(expiry) - - self._add_query(QueryStringConstants.SIGNED_START, start) - self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) - self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) - self._add_query(QueryStringConstants.SIGNED_IP, ip) - self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) - self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) - - def add_resource(self, resource): - self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) - - def add_id(self, policy_id): - self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) - - def add_account(self, services, resource_types): - self._add_query(QueryStringConstants.SIGNED_SERVICES, services) - self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) - - def add_override_response_headers(self, cache_control, - content_disposition, - content_encoding, - content_language, - content_type): - self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) - self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) - self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) - self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) - self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) - - def add_account_signature(self, account_name, account_key): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - string_to_sign = \ - (account_name + '\n' + - get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + - get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + - get_value_to_append(QueryStringConstants.SIGNED_START) + - get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - get_value_to_append(QueryStringConstants.SIGNED_IP) + - get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(QueryStringConstants.SIGNED_VERSION)) - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key, string_to_sign)) - - def get_token(self): - return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/uploads.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/uploads.py deleted file mode 100644 index abf3fb2..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/uploads.py +++ /dev/null @@ -1,550 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from concurrent import futures -from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) -from threading import Lock -from itertools import islice -from math import ceil - -import six - -from azure.core.tracing.common import with_current_context - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." - - -def _parallel_uploads(executor, uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(executor.submit(with_current_context(uploader), next_chunk)) - except StopIteration: - break - - # Wait for the remaining uploads to finish - done, _running = futures.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - validate_content=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - validate_content=validate_content, - **kwargs) - if parallel: - executor = futures.ThreadPoolExecutor(max_concurrency) - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - executor.submit(with_current_context(uploader.process_chunk), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - executor = futures.ThreadPoolExecutor(max_concurrency) - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - executor.submit(with_current_context(uploader.process_substream_block), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] - return sorted(range_ids) - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b"" - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError("Blob data should be of type bytes.") - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b"" or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - def _update_progress(self, length): - if self.progress_lock is not None: - with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = self._upload_chunk(chunk_offset, chunk_data) - self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) - - def process_substream_block(self, block_data): - return self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - def _upload_substream_block(self, block_id, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_substream_block_with_progress(self, block_id, block_stream): - range_id = self._upload_substream_block(block_id, block_stream) - self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop("modified_access_conditions", None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - self.service.stage_block( - block_id, - len(chunk_data), - chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return index, block_id - - def _upload_substream_block(self, block_id, block_stream): - try: - self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - return not any(bytearray(chunk_data)) - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = self.service.upload_pages( - chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = self.service.append_block( - chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - self.current_length = int(self.response_headers["blob_append_offset"]) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = self.service.append_block( - chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response - - -class SubStream(IOBase): - - def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): - # Python 2.7: file-like objects created with open() typically support seek(), but are not - # derivations of io.IOBase and thus do not implement seekable(). - # Python > 3.0: file-like objects created with open() are derived from io.IOBase. - try: - # only the main thread runs this, so there's no need grabbing the lock - wrapped_stream.seek(0, SEEK_CUR) - except: - raise ValueError("Wrapped stream must support seek().") - - self._lock = lockObj - self._wrapped_stream = wrapped_stream - self._position = 0 - self._stream_begin_index = stream_begin_index - self._length = length - self._buffer = BytesIO() - - # we must avoid buffering more than necessary, and also not use up too much memory - # so the max buffer size is capped at 4MB - self._max_buffer_size = ( - length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE - ) - self._current_buffer_start = 0 - self._current_buffer_size = 0 - super(SubStream, self).__init__() - - def __len__(self): - return self._length - - def close(self): - if self._buffer: - self._buffer.close() - self._wrapped_stream = None - IOBase.close(self) - - def fileno(self): - return self._wrapped_stream.fileno() - - def flush(self): - pass - - def read(self, size=None): - if self.closed: # pylint: disable=using-constant-test - raise ValueError("Stream is closed.") - - if size is None: - size = self._length - self._position - - # adjust if out of bounds - if size + self._position >= self._length: - size = self._length - self._position - - # return fast - if size == 0 or self._buffer.closed: - return b"" - - # attempt first read from the read buffer and update position - read_buffer = self._buffer.read(size) - bytes_read = len(read_buffer) - bytes_remaining = size - bytes_read - self._position += bytes_read - - # repopulate the read buffer from the underlying stream to fulfill the request - # ensure the seek and read operations are done atomically (only if a lock is provided) - if bytes_remaining > 0: - with self._buffer: - # either read in the max buffer size specified on the class - # or read in just enough data for the current block/sub stream - current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) - - # lock is only defined if max_concurrency > 1 (parallel uploads) - if self._lock: - with self._lock: - # reposition the underlying stream to match the start of the data to read - absolute_position = self._stream_begin_index + self._position - self._wrapped_stream.seek(absolute_position, SEEK_SET) - # If we can't seek to the right location, our read will be corrupted so fail fast. - if self._wrapped_stream.tell() != absolute_position: - raise IOError("Stream failed to seek to the desired location.") - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - else: - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - - if buffer_from_stream: - # update the buffer with new data from the wrapped stream - # we need to note down the start position and size of the buffer, in case seek is performed later - self._buffer = BytesIO(buffer_from_stream) - self._current_buffer_start = self._position - self._current_buffer_size = len(buffer_from_stream) - - # read the remaining bytes from the new buffer and update position - second_read_buffer = self._buffer.read(bytes_remaining) - read_buffer += second_read_buffer - self._position += len(second_read_buffer) - - return read_buffer - - def readable(self): - return True - - def readinto(self, b): - raise UnsupportedOperation - - def seek(self, offset, whence=0): - if whence is SEEK_SET: - start_index = 0 - elif whence is SEEK_CUR: - start_index = self._position - elif whence is SEEK_END: - start_index = self._length - offset = -offset - else: - raise ValueError("Invalid argument for the 'whence' parameter.") - - pos = start_index + offset - - if pos > self._length: - pos = self._length - elif pos < 0: - pos = 0 - - # check if buffer is still valid - # if not, drop buffer - if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: - self._buffer.close() - self._buffer = BytesIO() - else: # if yes seek to correct position - delta = pos - self._current_buffer_start - self._buffer.seek(delta, SEEK_SET) - - self._position = pos - return pos - - def seekable(self): - return True - - def tell(self): - return self._position - - def write(self): - raise UnsupportedOperation - - def writelines(self): - raise UnsupportedOperation - - def writeable(self): - return False - - -class IterStreamer(object): - """ - File-like streaming iterator. - """ - - def __init__(self, generator, encoding="UTF-8"): - self.generator = generator - self.iterator = iter(generator) - self.leftover = b"" - self.encoding = encoding - - def __len__(self): - return self.generator.__len__() - - def __iter__(self): - return self.iterator - - def seekable(self): - return False - - def __next__(self): - return next(self.iterator) - - next = __next__ # Python 2 compatibility. - - def tell(self, *args, **kwargs): - raise UnsupportedOperation("Data generator does not support tell.") - - def seek(self, *args, **kwargs): - raise UnsupportedOperation("Data generator is unseekable.") - - def read(self, size): - data = self.leftover - count = len(self.leftover) - try: - while count < size: - chunk = self.__next__() - if isinstance(chunk, six.text_type): - chunk = chunk.encode(self.encoding) - data += chunk - count += len(chunk) - except StopIteration: - pass - - if count > size: - self.leftover = data[size:] - - return data[:size] diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/uploads_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared/uploads_async.py deleted file mode 100644 index fe68a2b..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_shared/uploads_async.py +++ /dev/null @@ -1,350 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -import asyncio -from asyncio import Lock -from itertools import islice -import threading - -from math import ceil - -import six - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder -from .uploads import SubStream, IterStreamer # pylint: disable=unused-import - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' - - -async def _parallel_uploads(uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(asyncio.ensure_future(uploader(next_chunk))) - except StopIteration: - break - - # Wait for the remaining uploads to finish - if running: - done, _running = await asyncio.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -async def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - asyncio.ensure_future(uploader.process_chunk(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [] - for chunk in uploader.get_chunk_streams(): - range_ids.append(await uploader.process_chunk(chunk)) - - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -async def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - asyncio.ensure_future(uploader.process_substream_block(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [] - for block in uploader.get_substream_blocks(): - range_ids.append(await uploader.process_substream_block(block)) - return sorted(range_ids) - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = threading.Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b'' - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b'' or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - async def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - async def _update_progress(self, length): - if self.progress_lock is not None: - async with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - async def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = await self._upload_chunk(chunk_offset, chunk_data) - await self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) - - async def process_substream_block(self, block_data): - return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - async def _upload_substream_block(self, block_id, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_substream_block_with_progress(self, block_id, block_stream): - range_id = await self._upload_substream_block(block_id, block_stream) - await self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop('modified_access_conditions', None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - await self.service.stage_block( - block_id, - len(chunk_data), - chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - return index, block_id - - async def _upload_substream_block(self, block_id, block_stream): - try: - await self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - for each_byte in chunk_data: - if each_byte not in [0, b'\x00']: - return False - return True - - async def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = await self.service.upload_pages( - chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = await self.service.append_block( - chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - self.current_length = int(self.response_headers['blob_append_offset']) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = await self.service.append_block( - chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - response = await self.service.upload_range( - chunk_data, - chunk_offset, - chunk_end, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - return range_id, response diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_shared_access_signature.py b/azure/multiapi/storagev2/blob/v2020_02_10/_shared_access_signature.py deleted file mode 100644 index 370fe4e..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_shared_access_signature.py +++ /dev/null @@ -1,596 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, TYPE_CHECKING -) - -from ._shared import sign_string, url_quote -from ._shared.constants import X_MS_VERSION -from ._shared.models import Services -from ._shared.shared_access_signature import SharedAccessSignature, _SharedAccessHelper, \ - QueryStringConstants - -if TYPE_CHECKING: - from datetime import datetime - from .import ( - ResourceTypes, - AccountSasPermissions, - UserDelegationKey, - ContainerSasPermissions, - BlobSasPermissions - ) - - -class BlobQueryStringConstants(object): - SIGNED_TIMESTAMP = 'snapshot' - - -class BlobSharedAccessSignature(SharedAccessSignature): - ''' - Provides a factory for creating blob and container access - signature tokens with a common account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key=None, user_delegation_key=None): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - :param ~azure.storage.blob.models.UserDelegationKey user_delegation_key: - Instead of an account key, the user could pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished by calling get_user_delegation_key on any Blob service object. - ''' - super(BlobSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) - self.user_delegation_key = user_delegation_key - - def generate_blob(self, container_name, blob_name, snapshot=None, version_id=None, permission=None, - expiry=None, start=None, policy_id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None, **kwargs): - ''' - Generates a shared access signature for the blob or one of its snapshots. - Use the returned signature with the sas_token parameter of any BlobService. - - :param str container_name: - Name of container. - :param str blob_name: - Name of blob. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to grant permission. - :param BlobSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_blob_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - resource_path = container_name + '/' + blob_name - - sas = _BlobSharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(policy_id) - - resource = 'bs' if snapshot else 'b' - resource = 'bv' if version_id else resource - resource = 'd' if kwargs.pop("is_directory", None) else resource - sas.add_resource(resource) - - sas.add_timestamp(snapshot or version_id) - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_info_for_hns_account(**kwargs) - sas.add_resource_signature(self.account_name, self.account_key, resource_path, - user_delegation_key=self.user_delegation_key) - - return sas.get_token() - - def generate_container(self, container_name, permission=None, expiry=None, - start=None, policy_id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None, **kwargs): - ''' - Generates a shared access signature for the container. - Use the returned signature with the sas_token parameter of any BlobService. - - :param str container_name: - Name of container. - :param ContainerSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_blob_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - sas = _BlobSharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(policy_id) - sas.add_resource('c') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_info_for_hns_account(**kwargs) - sas.add_resource_signature(self.account_name, self.account_key, container_name, - user_delegation_key=self.user_delegation_key) - return sas.get_token() - - -class _BlobSharedAccessHelper(_SharedAccessHelper): - - def add_timestamp(self, timestamp): - self._add_query(BlobQueryStringConstants.SIGNED_TIMESTAMP, timestamp) - - def add_info_for_hns_account(self, **kwargs): - self._add_query(QueryStringConstants.SIGNED_DIRECTORY_DEPTH, kwargs.pop('sdd', None)) - self._add_query(QueryStringConstants.SIGNED_AUTHORIZED_OID, kwargs.pop('preauthorized_agent_object_id', None)) - self._add_query(QueryStringConstants.SIGNED_UNAUTHORIZED_OID, kwargs.pop('agent_object_id', None)) - self._add_query(QueryStringConstants.SIGNED_CORRELATION_ID, kwargs.pop('correlation_id', None)) - - def get_value_to_append(self, query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - def add_resource_signature(self, account_name, account_key, path, user_delegation_key=None): - # pylint: disable = no-member - if path[0] != '/': - path = '/' + path - - canonicalized_resource = '/blob/' + account_name + path + '\n' - - # Form the string to sign from shared_access_policy and canonicalized - # resource. The order of values is important. - string_to_sign = \ - (self.get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - self.get_value_to_append(QueryStringConstants.SIGNED_START) + - self.get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - canonicalized_resource) - - if user_delegation_key is not None: - self._add_query(QueryStringConstants.SIGNED_OID, user_delegation_key.signed_oid) - self._add_query(QueryStringConstants.SIGNED_TID, user_delegation_key.signed_tid) - self._add_query(QueryStringConstants.SIGNED_KEY_START, user_delegation_key.signed_start) - self._add_query(QueryStringConstants.SIGNED_KEY_EXPIRY, user_delegation_key.signed_expiry) - self._add_query(QueryStringConstants.SIGNED_KEY_SERVICE, user_delegation_key.signed_service) - self._add_query(QueryStringConstants.SIGNED_KEY_VERSION, user_delegation_key.signed_version) - - string_to_sign += \ - (self.get_value_to_append(QueryStringConstants.SIGNED_OID) + - self.get_value_to_append(QueryStringConstants.SIGNED_TID) + - self.get_value_to_append(QueryStringConstants.SIGNED_KEY_START) + - self.get_value_to_append(QueryStringConstants.SIGNED_KEY_EXPIRY) + - self.get_value_to_append(QueryStringConstants.SIGNED_KEY_SERVICE) + - self.get_value_to_append(QueryStringConstants.SIGNED_KEY_VERSION) + - self.get_value_to_append(QueryStringConstants.SIGNED_AUTHORIZED_OID) + - self.get_value_to_append(QueryStringConstants.SIGNED_UNAUTHORIZED_OID) + - self.get_value_to_append(QueryStringConstants.SIGNED_CORRELATION_ID)) - else: - string_to_sign += self.get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER) - - string_to_sign += \ - (self.get_value_to_append(QueryStringConstants.SIGNED_IP) + - self.get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - self.get_value_to_append(QueryStringConstants.SIGNED_VERSION) + - self.get_value_to_append(QueryStringConstants.SIGNED_RESOURCE) + - self.get_value_to_append(BlobQueryStringConstants.SIGNED_TIMESTAMP) + - self.get_value_to_append(QueryStringConstants.SIGNED_CACHE_CONTROL) + - self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_DISPOSITION) + - self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_ENCODING) + - self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_LANGUAGE) + - self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_TYPE)) - - # remove the trailing newline - if string_to_sign[-1] == '\n': - string_to_sign = string_to_sign[:-1] - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key if user_delegation_key is None else user_delegation_key.value, - string_to_sign)) - - def get_token(self): - # a conscious decision was made to exclude the timestamp in the generated token - # this is to avoid having two snapshot ids in the query parameters when the user appends the snapshot timestamp - exclude = [BlobQueryStringConstants.SIGNED_TIMESTAMP] - return '&'.join(['{0}={1}'.format(n, url_quote(v)) - for n, v in self.query_dict.items() if v is not None and n not in exclude]) - - -def generate_account_sas( - account_name, # type: str - account_key, # type: str - resource_types, # type: Union[ResourceTypes, str] - permission, # type: Union[AccountSasPermissions, str] - expiry, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): # type: (...) -> str - """Generates a shared access signature for the blob service. - - Use the returned signature with the credential parameter of any BlobServiceClient, - ContainerClient or BlobClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - :param resource_types: - Specifies the resource types that are accessible with the account SAS. - :type resource_types: str or ~azure.storage.blob.ResourceTypes - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.blob.AccountSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_sas_token] - :end-before: [END create_sas_token] - :language: python - :dedent: 8 - :caption: Generating a shared access signature. - """ - sas = SharedAccessSignature(account_name, account_key) - return sas.generate_account( - services=Services(blob=True), - resource_types=resource_types, - permission=permission, - expiry=expiry, - start=start, - ip=ip, - **kwargs - ) # type: ignore - - -def generate_container_sas( - account_name, # type: str - container_name, # type: str - account_key=None, # type: Optional[str] - user_delegation_key=None, # type: Optional[UserDelegationKey] - permission=None, # type: Optional[Union[ContainerSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - policy_id=None, # type: Optional[str] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Any - """Generates a shared access signature for a container. - - Use the returned signature with the credential parameter of any BlobServiceClient, - ContainerClient or BlobClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str container_name: - The name of the container. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - Either `account_key` or `user_delegation_key` must be specified. - :param ~azure.storage.blob.UserDelegationKey user_delegation_key: - Instead of an account shared key, the user could pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.blob.ContainerSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - :func:`~azure.storage.blob.ContainerClient.set_container_access_policy`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :return: A Shared Access Signature (sas) token. - :rtype: str - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START generate_sas_token] - :end-before: [END generate_sas_token] - :language: python - :dedent: 12 - :caption: Generating a sas token. - """ - if not user_delegation_key and not account_key: - raise ValueError("Either user_delegation_key or account_key must be provided.") - - if user_delegation_key: - sas = BlobSharedAccessSignature(account_name, user_delegation_key=user_delegation_key) - else: - sas = BlobSharedAccessSignature(account_name, account_key=account_key) - return sas.generate_container( - container_name, - permission=permission, - expiry=expiry, - start=start, - policy_id=policy_id, - ip=ip, - **kwargs - ) - - -def generate_blob_sas( - account_name, # type: str - container_name, # type: str - blob_name, # type: str - snapshot=None, # type: Optional[str] - account_key=None, # type: Optional[str] - user_delegation_key=None, # type: Optional[UserDelegationKey] - permission=None, # type: Optional[Union[BlobSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - policy_id=None, # type: Optional[str] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Any - """Generates a shared access signature for a blob. - - Use the returned signature with the credential parameter of any BlobServiceClient, - ContainerClient or BlobClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str container_name: - The name of the container. - :param str blob_name: - The name of the blob. - :param str snapshot: - An optional blob snapshot ID. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - Either `account_key` or `user_delegation_key` must be specified. - :param ~azure.storage.blob.UserDelegationKey user_delegation_key: - Instead of an account shared key, the user could pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.blob.BlobSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - :func:`~azure.storage.blob.ContainerClient.set_container_access_policy()`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str version_id: - An optional blob version ID. This parameter is only for versioning enabled account - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - if not user_delegation_key and not account_key: - raise ValueError("Either user_delegation_key or account_key must be provided.") - version_id = kwargs.pop('version_id', None) - if version_id and snapshot: - raise ValueError("snapshot and version_id cannot be set at the same time.") - if user_delegation_key: - sas = BlobSharedAccessSignature(account_name, user_delegation_key=user_delegation_key) - else: - sas = BlobSharedAccessSignature(account_name, account_key=account_key) - return sas.generate_blob( - container_name, - blob_name, - snapshot=snapshot, - version_id=version_id, - permission=permission, - expiry=expiry, - start=start, - policy_id=policy_id, - ip=ip, - **kwargs - ) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_upload_helpers.py b/azure/multiapi/storagev2/blob/v2020_02_10/_upload_helpers.py deleted file mode 100644 index bd59362..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_upload_helpers.py +++ /dev/null @@ -1,291 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from io import SEEK_SET, UnsupportedOperation -from typing import Optional, Union, Any, TypeVar, TYPE_CHECKING # pylint: disable=unused-import - -import six -from azure.core.exceptions import ResourceExistsError, ResourceModifiedError - -from ._shared.response_handlers import ( - process_storage_error, - return_response_headers) -from ._shared.models import StorageErrorCode -from ._shared.uploads import ( - upload_data_chunks, - upload_substream_blocks, - BlockBlobChunkUploader, - PageBlobChunkUploader, - AppendBlobChunkUploader) -from ._shared.encryption import generate_blob_encryption_data, encrypt_blob -from ._generated.models import ( - StorageErrorException, - BlockLookupList, - AppendPositionAccessConditions, - ModifiedAccessConditions, -) - -if TYPE_CHECKING: - from datetime import datetime # pylint: disable=unused-import - BlobLeaseClient = TypeVar("BlobLeaseClient") - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' - - -def _convert_mod_error(error): - message = error.message.replace( - "The condition specified using HTTP conditional header(s) is not met.", - "The specified blob already exists.") - message = message.replace("ConditionNotMet", "BlobAlreadyExists") - overwrite_error = ResourceExistsError( - message=message, - response=error.response, - error=error) - overwrite_error.error_code = StorageErrorCode.blob_already_exists - raise overwrite_error - - -def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument - return any([ - modified_access_conditions.if_modified_since, - modified_access_conditions.if_unmodified_since, - modified_access_conditions.if_none_match, - modified_access_conditions.if_match - ]) - - -def upload_block_blob( # pylint: disable=too-many-locals - client=None, - data=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if not overwrite and not _any_conditions(**kwargs): - kwargs['modified_access_conditions'].if_none_match = '*' - adjusted_count = length - if (encryption_options.get('key') is not None) and (adjusted_count is not None): - adjusted_count += (16 - (length % 16)) - blob_headers = kwargs.pop('blob_headers', None) - tier = kwargs.pop('standard_blob_tier', None) - blob_tags_string = kwargs.pop('blob_tags_string', None) - - # Do single put if the size is smaller than or equal config.max_single_put_size - if adjusted_count is not None and (adjusted_count <= blob_settings.max_single_put_size): - try: - data = data.read(length) - if not isinstance(data, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - except AttributeError: - pass - if encryption_options.get('key'): - encryption_data, data = encrypt_blob(data, encryption_options['key']) - headers['x-ms-meta-encryptiondata'] = encryption_data - return client.upload( - data, - content_length=adjusted_count, - blob_http_headers=blob_headers, - headers=headers, - cls=return_response_headers, - validate_content=validate_content, - data_stream_total=adjusted_count, - upload_stream_current=0, - tier=tier.value if tier else None, - blob_tags_string=blob_tags_string, - **kwargs) - - use_original_upload_path = blob_settings.use_byte_buffer or \ - validate_content or encryption_options.get('required') or \ - blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \ - hasattr(stream, 'seekable') and not stream.seekable() or \ - not hasattr(stream, 'seek') or not hasattr(stream, 'tell') - - if use_original_upload_path: - if encryption_options.get('key'): - cek, iv, encryption_data = generate_blob_encryption_data(encryption_options['key']) - headers['x-ms-meta-encryptiondata'] = encryption_data - encryption_options['cek'] = cek - encryption_options['vector'] = iv - block_ids = upload_data_chunks( - service=client, - uploader_class=BlockBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - encryption_options=encryption_options, - **kwargs - ) - else: - block_ids = upload_substream_blocks( - service=client, - uploader_class=BlockBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - **kwargs - ) - - block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) - block_lookup.latest = block_ids - return client.commit_block_list( - block_lookup, - blob_http_headers=blob_headers, - cls=return_response_headers, - validate_content=validate_content, - headers=headers, - tier=tier.value if tier else None, - blob_tags_string=blob_tags_string, - **kwargs) - except StorageErrorException as error: - try: - process_storage_error(error) - except ResourceModifiedError as mod_error: - if not overwrite: - _convert_mod_error(mod_error) - raise - - -def upload_page_blob( - client=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if not overwrite and not _any_conditions(**kwargs): - kwargs['modified_access_conditions'].if_none_match = '*' - if length is None or length < 0: - raise ValueError("A content length must be specified for a Page Blob.") - if length % 512 != 0: - raise ValueError("Invalid page blob size: {0}. " - "The size must be aligned to a 512-byte boundary.".format(length)) - if kwargs.get('premium_page_blob_tier'): - premium_page_blob_tier = kwargs.pop('premium_page_blob_tier') - try: - headers['x-ms-access-tier'] = premium_page_blob_tier.value - except AttributeError: - headers['x-ms-access-tier'] = premium_page_blob_tier - if encryption_options and encryption_options.get('data'): - headers['x-ms-meta-encryptiondata'] = encryption_options['data'] - blob_tags_string = kwargs.pop('blob_tags_string', None) - - response = client.create( - content_length=0, - blob_content_length=length, - blob_sequence_number=None, - blob_http_headers=kwargs.pop('blob_headers', None), - blob_tags_string=blob_tags_string, - cls=return_response_headers, - headers=headers, - **kwargs) - if length == 0: - return response - - kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag']) - return upload_data_chunks( - service=client, - uploader_class=PageBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_page_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - encryption_options=encryption_options, - **kwargs) - - except StorageErrorException as error: - try: - process_storage_error(error) - except ResourceModifiedError as mod_error: - if not overwrite: - _convert_mod_error(mod_error) - raise - - -def upload_append_blob( # pylint: disable=unused-argument - client=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if length == 0: - return {} - blob_headers = kwargs.pop('blob_headers', None) - append_conditions = AppendPositionAccessConditions( - max_size=kwargs.pop('maxsize_condition', None), - append_position=None) - blob_tags_string = kwargs.pop('blob_tags_string', None) - - try: - if overwrite: - client.create( - content_length=0, - blob_http_headers=blob_headers, - headers=headers, - blob_tags_string=blob_tags_string, - **kwargs) - return upload_data_chunks( - service=client, - uploader_class=AppendBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - append_position_access_conditions=append_conditions, - **kwargs) - except StorageErrorException as error: - if error.response.status_code != 404: - raise - # rewind the request body if it is a stream - if hasattr(stream, 'read'): - try: - # attempt to rewind the body to the initial position - stream.seek(0, SEEK_SET) - except UnsupportedOperation: - # if body is not seekable, then retry would not work - raise error - client.create( - content_length=0, - blob_http_headers=blob_headers, - headers=headers, - blob_tags_string=blob_tags_string, - **kwargs) - return upload_data_chunks( - service=client, - uploader_class=AppendBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - append_position_access_conditions=append_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_version.py b/azure/multiapi/storagev2/blob/v2020_02_10/_version.py deleted file mode 100644 index 202620c..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_version.py +++ /dev/null @@ -1,7 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -VERSION = "12.6.0b1" diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/aio/__init__.py b/azure/multiapi/storagev2/blob/v2020_02_10/aio/__init__.py deleted file mode 100644 index 247f39e..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/aio/__init__.py +++ /dev/null @@ -1,137 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os - -from .._models import BlobType -from .._shared.policies_async import ExponentialRetry, LinearRetry -from ._blob_client_async import BlobClient -from ._container_client_async import ContainerClient -from ._blob_service_client_async import BlobServiceClient -from ._lease_async import BlobLeaseClient -from ._download_async import StorageStreamDownloader - - -async def upload_blob_to_url( - blob_url, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - credential=None, # type: Any - **kwargs): - # type: (...) -> dict[str, Any] - """Upload data to a given URL - - The data will be uploaded as a block blob. - - :param str blob_url: - The full URI to the blob. This can also include a SAS token. - :param data: - The data to upload. This can be bytes, text, an iterable or a file-like object. - :type data: bytes or str or Iterable - :param credential: - The credentials with which to authenticate. This is optional if the - blob URL already has a SAS token. The value can be a SAS token string, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword bool overwrite: - Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob_to_url will overwrite any existing data. If set to False, the - operation will fail with a ResourceExistsError. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword dict(str,str) metadata: - Name-value pairs associated with the blob as metadata. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword str encoding: - Encoding to use if text is supplied as input. Defaults to UTF-8. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: dict(str, Any) - """ - async with BlobClient.from_blob_url(blob_url, credential=credential) as client: - return await client.upload_blob(data=data, blob_type=BlobType.BlockBlob, **kwargs) - - -async def _download_to_stream(client, handle, **kwargs): - """Download data to specified open file-handle.""" - stream = await client.download_blob(**kwargs) - await stream.readinto(handle) - - -async def download_blob_from_url( - blob_url, # type: str - output, # type: str - credential=None, # type: Any - **kwargs): - # type: (...) -> None - """Download the contents of a blob to a local file or stream. - - :param str blob_url: - The full URI to the blob. This can also include a SAS token. - :param output: - Where the data should be downloaded to. This could be either a file path to write to, - or an open IO handle to write to. - :type output: str or writable stream - :param credential: - The credentials with which to authenticate. This is optional if the - blob URL already has a SAS token or the blob is public. The value can be a SAS token string, - an account shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword bool overwrite: - Whether the local file should be overwritten if it already exists. The default value is - `False` - in which case a ValueError will be raised if the file already exists. If set to - `True`, an attempt will be made to write to the existing file. If a stream handle is passed - in, this value is ignored. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :keyword int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :rtype: None - """ - overwrite = kwargs.pop('overwrite', False) - async with BlobClient.from_blob_url(blob_url, credential=credential) as client: - if hasattr(output, 'write'): - await _download_to_stream(client, output, **kwargs) - else: - if not overwrite and os.path.isfile(output): - raise ValueError("The file '{}' already exists.".format(output)) - with open(output, 'wb') as file_handle: - await _download_to_stream(client, file_handle, **kwargs) - - -__all__ = [ - 'upload_blob_to_url', - 'download_blob_from_url', - 'BlobServiceClient', - 'ContainerClient', - 'BlobClient', - 'BlobLeaseClient', - 'ExponentialRetry', - 'LinearRetry', - 'StorageStreamDownloader' -] diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/aio/_blob_client_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/aio/_blob_client_async.py deleted file mode 100644 index 3020da3..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/aio/_blob_client_async.py +++ /dev/null @@ -1,2335 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines, invalid-overridden-method - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, - TYPE_CHECKING -) - -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.exceptions import ResourceNotFoundError - -from .._shared.base_client_async import AsyncStorageAccountHostsMixin -from .._shared.policies_async import ExponentialRetry -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._deserialize import get_page_ranges_result, parse_tags -from .._serialize import get_modify_conditions, get_api_version, get_access_conditions -from .._generated import VERSION -from .._generated.aio import AzureBlobStorage -from .._generated.models import StorageErrorException, CpkInfo -from .._deserialize import deserialize_blob_properties -from .._blob_client import BlobClient as BlobClientBase -from ._upload_helpers import ( - upload_block_blob, - upload_append_blob, - upload_page_blob) -from .._models import BlobType, BlobBlock, BlobProperties -from ._lease_async import BlobLeaseClient -from ._download_async import StorageStreamDownloader - - -if TYPE_CHECKING: - from datetime import datetime - from .._models import ( # pylint: disable=unused-import - ContentSettings, - PremiumPageBlobTier, - StandardBlobTier, - SequenceNumberAction - ) - - -class BlobClient(AsyncStorageAccountHostsMixin, BlobClientBase): # pylint: disable=too-many-public-methods - """A client to interact with a specific blob, although that blob may not yet exist. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the blob, - use the :func:`from_blob_url` classmethod. - :param container_name: The container name for the blob. - :type container_name: str - :param blob_name: The name of the blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob_name: str - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication_async.py - :start-after: [START create_blob_client] - :end-before: [END create_blob_client] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a URL to a public blob (no auth needed). - - .. literalinclude:: ../samples/blob_samples_authentication_async.py - :start-after: [START create_blob_client_sas_url] - :end-before: [END create_blob_client_sas_url] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a SAS URL to a blob. - """ - def __init__( - self, account_url, # type: str - container_name, # type: str - blob_name, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(BlobClient, self).__init__( - account_url, - container_name=container_name, - blob_name=blob_name, - snapshot=snapshot, - credential=credential, - **kwargs) - self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access - self._loop = kwargs.get('loop', None) - - @distributed_trace_async - async def get_account_information(self, **kwargs): # type: ignore - # type: (Optional[int]) -> Dict[str, str] - """Gets information related to the storage account in which the blob resides. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - """ - try: - return await self._client.blob.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_blob( - self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Any - """Creates a new blob from a data source with automatic chunking. - - :param data: The blob data to upload. - :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be - either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. The exception to the above is with Append - blob types: if set to False and the data already exists, an error will not be raised - and the data will be appended to the existing blob. If set overwrite=True, then the existing - append blob will be deleted, and a new one created. Defaults to False. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - If specified, upload_blob only succeeds if the - blob's lease is active and matches this ID. - Required if the blob has an active lease. - :paramtype: ~azure.storage.blob.aio.BlobLeaseClient - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int max_concurrency: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world_async.py - :start-after: [START upload_a_blob] - :end-before: [END upload_a_blob] - :language: python - :dedent: 16 - :caption: Upload a blob to the container. - """ - options = self._upload_blob_options( - data, - blob_type=blob_type, - length=length, - metadata=metadata, - **kwargs) - if blob_type == BlobType.BlockBlob: - return await upload_block_blob(**options) - if blob_type == BlobType.PageBlob: - return await upload_page_blob(**options) - return await upload_append_blob(**options) - - @distributed_trace_async - async def download_blob(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader - """Downloads a blob to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the blob into - a stream. - - :param int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to download. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, download_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword str encoding: - Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.blob.aio.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world_async.py - :start-after: [START download_a_blob] - :end-before: [END download_a_blob] - :language: python - :dedent: 16 - :caption: Download a blob. - """ - options = self._download_blob_options( - offset=offset, - length=length, - **kwargs) - downloader = StorageStreamDownloader(**options) - await downloader._setup() # pylint: disable=protected-access - return downloader - - @distributed_trace_async - async def delete_blob(self, delete_snapshots=False, **kwargs): - # type: (str, Any) -> None - """Marks the specified blob for deletion. - - The blob is later deleted during garbage collection. - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the delete_blob() - operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob - and retains the blob for a specified number of days. - After the specified number of days, the blob's data is removed from the service during garbage collection. - Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']` - option. Soft-deleted blob can be restored using :func:`undelete` operation. - - :param str delete_snapshots: - Required if the blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to delete. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword lease: - Required if the blob has an active lease. If specified, delete_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world_async.py - :start-after: [START delete_blob] - :end-before: [END delete_blob] - :language: python - :dedent: 16 - :caption: Delete a blob. - """ - options = self._delete_blob_options(delete_snapshots=delete_snapshots, **kwargs) - try: - await self._client.blob.delete(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def undelete_blob(self, **kwargs): - # type: (Any) -> None - """Restores soft-deleted blobs or snapshots. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START undelete_blob] - :end-before: [END undelete_blob] - :language: python - :dedent: 12 - :caption: Undeleting a blob. - """ - try: - await self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a blob exists with the defined parameters, and returns - False otherwise. - - :param str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to check if it exists. - :param int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - try: - await self._client.blob.get_properties( - snapshot=self.snapshot, - **kwargs) - return True - except StorageErrorException as error: - try: - process_storage_error(error) - except ResourceNotFoundError: - return False - - @distributed_trace_async - async def get_blob_properties(self, **kwargs): - # type: (Any) -> BlobProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the blob. It does not return the content of the blob. - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to get properties. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: BlobProperties - :rtype: ~azure.storage.blob.BlobProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START get_blob_properties] - :end-before: [END get_blob_properties] - :language: python - :dedent: 12 - :caption: Getting the properties for a blob. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - try: - blob_props = await self._client.blob.get_properties( - timeout=kwargs.pop('timeout', None), - version_id=kwargs.pop('version_id', None), - snapshot=self.snapshot, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=kwargs.pop('cls', None) or deserialize_blob_properties, - cpk_info=cpk_info, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - blob_props.name = self.blob_name - if isinstance(blob_props, BlobProperties): - blob_props.container = self.container_name - blob_props.snapshot = self.snapshot - return blob_props # type: ignore - - @distributed_trace_async - async def set_http_headers(self, content_settings=None, **kwargs): - # type: (Optional[ContentSettings], Any) -> None - """Sets system properties on the blob. - - If one property is set for the content_settings, all properties will be overridden. - - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - options = self._set_http_headers_options(content_settings=content_settings, **kwargs) - try: - return await self._client.blob.set_http_headers(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def set_blob_metadata(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] - """Sets user-defined metadata for the blob as one or more name-value pairs. - - :param metadata: - Dict containing name and value pairs. Each call to this operation - replaces all existing metadata attached to the blob. To remove all - metadata from the blob, call this operation with no metadata headers. - :type metadata: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - """ - options = self._set_blob_metadata_options(metadata=metadata, **kwargs) - try: - return await self._client.blob.set_metadata(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def create_page_blob( # type: ignore - self, size, # type: int - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Creates a new Page Blob of the specified size. - - :param int size: - This specifies the maximum size for the page blob, up to 1 TB. - The page blob size must be aligned to a 512-byte boundary. - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword int sequence_number: - Only for Page blobs. The sequence number is a user-controlled value that you can use to - track requests. The value of the sequence number must be between 0 - and 2^63 - 1.The default value is 0. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict[str, Any] - """ - options = self._create_page_blob_options( - size, - content_settings=content_settings, - metadata=metadata, - premium_page_blob_tier=premium_page_blob_tier, - **kwargs) - try: - return await self._client.page_blob.create(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def create_append_blob(self, content_settings=None, metadata=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] - """Creates a new Append Blob. - - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict[str, Any] - """ - options = self._create_append_blob_options( - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return await self._client.append_blob.create(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def create_snapshot(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] - """Creates a snapshot of the blob. - - A snapshot is a read-only version of a blob that's taken at a point in time. - It can be read, copied, or deleted, but not modified. Snapshots provide a way - to back up a blob as it appears at a moment in time. - - A snapshot of a blob has the same name as the base blob from which the snapshot - is taken, with a DateTime value appended to indicate the time at which the - snapshot was taken. - - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified). - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START create_blob_snapshot] - :end-before: [END create_blob_snapshot] - :language: python - :dedent: 12 - :caption: Create a snapshot of the blob. - """ - options = self._create_snapshot_options(metadata=metadata, **kwargs) - try: - return await self._client.blob.create_snapshot(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def start_copy_from_url(self, source_url, metadata=None, incremental_copy=False, **kwargs): - # type: (str, Optional[Dict[str, str]], bool, Any) -> Any - """Copies a blob asynchronously. - - This operation returns a copy operation - object that can be used to wait on the completion of the operation, - as well as check status or abort the copy operation. - The Blob service copies blobs on a best-effort basis. - - The source blob for a copy operation may be a block blob, an append blob, - or a page blob. If the destination blob already exists, it must be of the - same blob type as the source blob. Any existing destination blob will be - overwritten. The destination blob cannot be modified while a copy operation - is in progress. - - When copying from a page blob, the Blob service creates a destination page - blob of the source blob's length, initially containing all zeroes. Then - the source page ranges are enumerated, and non-empty ranges are copied. - - For a block blob or an append blob, the Blob service creates a committed - blob of zero length before returning from this operation. When copying - from a block blob, all committed blocks and their block IDs are copied. - Uncommitted blocks are not copied. At the end of the copy operation, the - destination blob will have the same committed block count as the source. - - When copying from an append blob, all committed blocks are copied. At the - end of the copy operation, the destination blob will have the same committed - block count as the source. - - For all blob types, you can call status() on the returned polling object - to check the status of the copy operation, or wait() to block until the - operation is complete. The final blob will be committed when the copy completes. - - :param str source_url: - A URL of up to 2 KB in length that specifies a file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.blob.core.windows.net/mycontainer/myblob - - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - - https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken - :param metadata: - Name-value pairs associated with the blob as metadata. If no name-value - pairs are specified, the operation will copy the metadata from the - source blob or file to the destination blob. If one or more name-value - pairs are specified, the destination blob is created with the specified - metadata, and metadata is not copied from the source blob or file. - :type metadata: dict(str, str) - :param bool incremental_copy: - Copies the snapshot of the source page blob to a destination page blob. - The snapshot is copied such that only the differential changes between - the previously copied snapshot are transferred to the destination. - The copied snapshots are complete copies of the original snapshot and - can be read or copied from as usual. Defaults to False. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source - blob has been modified since the specified date/time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source blob - has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has been modified since the specified date/time. - If the destination blob has not been modified, the Blob service returns - status code 412 (Precondition Failed). - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has not been modified since the specified - date/time. If the destination blob has been modified, the Blob service - returns status code 412 (Precondition Failed). - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword destination_lease: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :paramtype destination_lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword source_lease: - Specify this to perform the Copy Blob operation only if - the lease ID given matches the active lease ID of the source blob. - :paramtype source_lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword bool seal_destination_blob: - Seal the destination append blob. This operation is only for append blob. - - .. versionadded:: 12.4.0 - - :keyword bool requires_sync: - Enforces that the service will not return a response until the copy is complete. - :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status). - :rtype: dict[str, str or ~datetime.datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START copy_blob_from_url] - :end-before: [END copy_blob_from_url] - :language: python - :dedent: 16 - :caption: Copy a blob from a URL. - """ - options = self._start_copy_from_url_options( - source_url=self._encode_source_url(source_url), - metadata=metadata, - incremental_copy=incremental_copy, - **kwargs) - try: - if incremental_copy: - return await self._client.page_blob.copy_incremental(**options) - return await self._client.blob.start_copy_from_url(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def abort_copy(self, copy_id, **kwargs): - # type: (Union[str, Dict[str, Any], BlobProperties], Any) -> None - """Abort an ongoing copy operation. - - This will leave a destination blob with zero length and full metadata. - This will raise an error if the copy operation has already ended. - - :param copy_id: - The copy operation to abort. This can be either an ID, or an - instance of BlobProperties. - :type copy_id: str or ~azure.storage.blob.BlobProperties - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START abort_copy_blob_from_url] - :end-before: [END abort_copy_blob_from_url] - :language: python - :dedent: 16 - :caption: Abort copying a blob from URL. - """ - options = self._abort_copy_options(copy_id, **kwargs) - try: - await self._client.blob.abort_copy_from_url(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): - # type: (int, Optional[str], Any) -> BlobLeaseClient - """Requests a new lease. - - If the blob does not have an active lease, the Blob - Service creates a lease on the blob and returns a new lease. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Blob Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A BlobLeaseClient object. - :rtype: ~azure.storage.blob.aio.BlobLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START acquire_lease_on_blob] - :end-before: [END acquire_lease_on_blob] - :language: python - :dedent: 12 - :caption: Acquiring a lease on a blob. - """ - lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore - await lease.acquire(lease_duration=lease_duration, **kwargs) - return lease - - @distributed_trace_async - async def set_standard_blob_tier(self, standard_blob_tier, **kwargs): - # type: (Union[str, StandardBlobTier], Any) -> None - """This operation sets the tier on a block blob. - - A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param standard_blob_tier: - Indicates the tier to be set on the blob. Options include 'Hot', 'Cool', - 'Archive'. The hot tier is optimized for storing data that is accessed - frequently. The cool storage tier is optimized for storing data that - is infrequently accessed and stored for at least a month. The archive - tier is optimized for storing data that is rarely accessed and stored - for at least six months with flexible latency requirements. - :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if standard_blob_tier is None: - raise ValueError("A StandardBlobTier must be specified") - try: - await self._client.blob.set_tier( - tier=standard_blob_tier, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - lease_access_conditions=access_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def stage_block( - self, block_id, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> None - """Creates a new block to be committed as part of a blob. - - :param str block_id: A valid Base64 string value that identifies the - block. Prior to encoding, the string must be less than or equal to 64 - bytes in size. For a given blob, the length of the value specified for - the block_id parameter must be the same size for each block. - :param data: The blob data. - :param int length: Size of the block. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword str encoding: - Defaults to UTF-8. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - options = self._stage_block_options( - block_id, - data, - length=length, - **kwargs) - try: - return await self._client.block_blob.stage_block(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def stage_block_from_url( - self, block_id, # type: str - source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - source_content_md5=None, # type: Optional[Union[bytes, bytearray]] - **kwargs - ): - # type: (...) -> None - """Creates a new block to be committed as part of a blob where - the contents are read from a URL. - - :param str block_id: A valid Base64 string value that identifies the - block. Prior to encoding, the string must be less than or equal to 64 - bytes in size. For a given blob, the length of the value specified for - the block_id parameter must be the same size for each block. - :param str source_url: The URL. - :param int source_offset: - Start of byte range to use for the block. - Must be set if source length is provided. - :param int source_length: The size of the block in bytes. - :param bytearray source_content_md5: - Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - options = self._stage_block_from_url_options( - block_id, - source_url=self._encode_source_url(source_url), - source_offset=source_offset, - source_length=source_length, - source_content_md5=source_content_md5, - **kwargs) - try: - return await self._client.block_blob.stage_block_from_url(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_block_list(self, block_list_type="committed", **kwargs): - # type: (Optional[str], Any) -> Tuple[List[BlobBlock], List[BlobBlock]] - """The Get Block List operation retrieves the list of blocks that have - been uploaded as part of a block blob. - - :param str block_list_type: - Specifies whether to return the list of committed - blocks, the list of uncommitted blocks, or both lists together. - Possible values include: 'committed', 'uncommitted', 'all' - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A tuple of two lists - committed and uncommitted blocks - :rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock)) - """ - access_conditions = get_access_conditions(kwargs.pop('kease', None)) - mod_conditions = get_modify_conditions(kwargs) - try: - blocks = await self._client.block_blob.get_block_list( - list_type=block_list_type, - snapshot=self.snapshot, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return self._get_block_list_result(blocks) - - @distributed_trace_async - async def commit_block_list( # type: ignore - self, block_list, # type: List[BlobBlock] - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """The Commit Block List operation writes a blob by specifying the list of - block IDs that make up the blob. - - :param list block_list: - List of Blockblobs. - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict[str, str] - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._commit_block_list_options( - block_list, - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return await self._client.block_blob.commit_block_list(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): - # type: (Union[str, PremiumPageBlobTier], **Any) -> None - """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts. - - :param premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if premium_page_blob_tier is None: - raise ValueError("A PremiumPageBlobTiermust be specified") - try: - await self._client.blob.set_tier( - tier=premium_page_blob_tier, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def set_blob_tags(self, tags=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - """The Set Tags operation enables users to set tags on a blob or specific blob version, but not snapshot. - Each call to this operation replaces all existing tags attached to the blob. To remove all - tags from the blob, call this operation with no tags set. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :param tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - :type tags: dict(str, str) - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to delete. - :keyword bool validate_content: - If true, calculates an MD5 hash of the tags content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - options = self._set_blob_tags_options(tags=tags, **kwargs) - try: - return await self._client.blob.set_tags(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_blob_tags(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """The Get Tags operation enables users to get tags on a blob or specific blob version, but not snapshot. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to add tags to. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Key value pairs of blob tags. - :rtype: Dict[str, str] - """ - options = self._get_blob_tags_options(**kwargs) - try: - _, tags = await self._client.blob.get_tags(**options) - return parse_tags(tags) # pylint: disable=protected-access - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_page_ranges( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] - **kwargs - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a Page Blob or snapshot - of a page blob. - - :param int offset: - Start of byte range to use for getting valid page ranges. - If no length is given, all bytes after the offset will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for getting valid page ranges. - If length is given, offset must be provided. - This range will return valid page ranges from the offset start up to - the specified length. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param str previous_snapshot_diff: - The snapshot diff parameter that contains an opaque DateTime value that - specifies a previous blob snapshot to be compared - against a more recent snapshot or the current blob. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. - The first element are filled page ranges, the 2nd element is cleared page ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_page_ranges_options( - offset=offset, - length=length, - previous_snapshot_diff=previous_snapshot_diff, - **kwargs) - try: - if previous_snapshot_diff: - ranges = await self._client.page_blob.get_page_ranges_diff(**options) - else: - ranges = await self._client.page_blob.get_page_ranges(**options) - except StorageErrorException as error: - process_storage_error(error) - return get_page_ranges_result(ranges) - - @distributed_trace_async - async def get_page_range_diff_for_managed_disk( - self, previous_snapshot_url, # type: str - offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a managed disk or snapshot. - - .. note:: - This operation is only available for managed disk accounts. - - .. versionadded:: 12.2.0 - This operation was introduced in API version '2019-07-07'. - - :param previous_snapshot_url: - Specifies the URL of a previous snapshot of the managed disk. - The response will only contain pages that were changed between the target blob and - its previous snapshot. - :param int offset: - Start of byte range to use for getting valid page ranges. - If no length is given, all bytes after the offset will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for getting valid page ranges. - If length is given, offset must be provided. - This range will return valid page ranges from the offset start up to - the specified length. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. - The first element are filled page ranges, the 2nd element is cleared page ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_page_ranges_options( - offset=offset, - length=length, - prev_snapshot_url=previous_snapshot_url, - **kwargs) - try: - ranges = await self._client.page_blob.get_page_ranges_diff(**options) - except StorageErrorException as error: - process_storage_error(error) - return get_page_ranges_result(ranges) - - @distributed_trace_async - async def set_sequence_number( # type: ignore - self, sequence_number_action, # type: Union[str, SequenceNumberAction] - sequence_number=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the blob sequence number. - - :param str sequence_number_action: - This property indicates how the service should modify the blob's sequence - number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information. - :param str sequence_number: - This property sets the blob's sequence number. The sequence number is a - user-controlled property that you can use to track requests and manage - concurrency issues. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._set_sequence_number_options( - sequence_number_action, sequence_number=sequence_number, **kwargs) - try: - return await self._client.page_blob.update_sequence_number(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def resize_blob(self, size, **kwargs): - # type: (int, Any) -> Dict[str, Union[str, datetime]] - """Resizes a page blob to the specified size. - - If the specified value is less than the current size of the blob, - then all pages above the specified value are cleared. - - :param int size: - Size used to resize blob. Maximum size for a page blob is up to 1 TB. - The page blob size must be aligned to a 512-byte boundary. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._resize_blob_options(size, **kwargs) - try: - return await self._client.page_blob.resize(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_page( # type: ignore - self, page, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """The Upload Pages operation writes a range of pages to a page blob. - - :param bytes page: - Content of the page. - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._upload_page_options( - page=page, - offset=offset, - length=length, - **kwargs) - try: - return await self._client.page_blob.upload_pages(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_pages_from_url(self, source_url, # type: str - offset, # type: int - length, # type: int - source_offset, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """ - The Upload Pages operation writes a range of pages to a page blob where - the contents are read from a URL. - - :param str source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - The service will read the same number of bytes as the destination range (length-offset). - :keyword bytes source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - - options = self._upload_pages_from_url_options( - source_url=self._encode_source_url(source_url), - offset=offset, - length=length, - source_offset=source_offset, - **kwargs - ) - try: - return await self._client.page_blob.upload_pages_from_url(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def clear_page(self, offset, length, **kwargs): - # type: (int, int, Any) -> Dict[str, Union[str, datetime]] - """Clears a range of pages. - - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._clear_page_options(offset, length, **kwargs) - try: - return await self._client.page_blob.clear_pages(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def append_block( # type: ignore - self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """Commits a new block of data to the end of the existing append blob. - - :param data: - Content of the block. - :param int length: - Size of the block in bytes. - :keyword bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). - :rtype: dict(str, Any) - """ - options = self._append_block_options( - data, - length=length, - **kwargs - ) - try: - return await self._client.append_blob.append_block(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async() - async def append_block_from_url(self, copy_source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """ - Creates a new block to be committed as part of a blob, where the contents are read from a source url. - - :param str copy_source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - :param int source_length: - This indicates the end of the range of bytes that has to be taken from the copy source. - :keyword bytearray source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the - AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._append_block_from_url_options( - copy_source_url=self._encode_source_url(copy_source_url), - source_offset=source_offset, - source_length=source_length, - **kwargs - ) - try: - return await self._client.append_blob.append_block_from_url(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async() - async def seal_append_blob(self, **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """The Seal operation seals the Append Blob to make it read-only. - - .. versionadded:: 12.4.0 - - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). - :rtype: dict(str, Any) - """ - options = self._seal_append_blob_options(**kwargs) - try: - return await self._client.append_blob.seal(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/aio/_blob_service_client_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/aio/_blob_service_client_async.py deleted file mode 100644 index 8642282..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/aio/_blob_service_client_async.py +++ /dev/null @@ -1,641 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, - TYPE_CHECKING -) - -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import AsyncPipeline -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.async_paging import AsyncItemPaged - -from .._shared.models import LocationMode -from .._shared.policies_async import ExponentialRetry -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._shared.parser import _to_utc_datetime -from .._shared.response_handlers import parse_to_internal_user_delegation_key -from .._generated import VERSION -from .._generated.aio import AzureBlobStorage -from .._generated.models import StorageErrorException, StorageServiceProperties, KeyInfo -from .._blob_service_client import BlobServiceClient as BlobServiceClientBase -from ._container_client_async import ContainerClient -from ._blob_client_async import BlobClient -from .._models import ContainerProperties -from .._deserialize import service_stats_deserialize, service_properties_deserialize -from .._serialize import get_api_version -from ._models import ContainerPropertiesPaged, FilteredBlobPaged - -if TYPE_CHECKING: - from datetime import datetime - from azure.core.pipeline.transport import HttpTransport - from azure.core.pipeline.policies import HTTPPolicy - from .._shared.models import AccountSasPermissions, ResourceTypes, UserDelegationKey - from ._lease_async import BlobLeaseClient - from .._models import ( - BlobProperties, - PublicAccess, - BlobAnalyticsLogging, - Metrics, - CorsRule, - RetentionPolicy, - StaticWebsite, - ) - - -class BlobServiceClient(AsyncStorageAccountHostsMixin, BlobServiceClientBase): - """A client to interact with the Blob Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete containers within the account. - For operations relating to a specific container or blob, clients for those entities - can also be retrieved using the `get_client` functions. - - :param str account_url: - The URL to the blob storage account. Any other entities included - in the URL path (e.g. container or blob) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication_async.py - :start-after: [START create_blob_service_client] - :end-before: [END create_blob_service_client] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient with account url and credential. - - .. literalinclude:: ../samples/blob_samples_authentication_async.py - :start-after: [START create_blob_service_client_oauth] - :end-before: [END create_blob_service_client_oauth] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient with Azure Identity credentials. - """ - - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(BlobServiceClient, self).__init__( - account_url, - credential=credential, - **kwargs) - self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access - self._loop = kwargs.get('loop', None) - - @distributed_trace_async - async def get_user_delegation_key(self, key_start_time, # type: datetime - key_expiry_time, # type: datetime - **kwargs # type: Any - ): - # type: (...) -> UserDelegationKey - """ - Obtain a user delegation key for the purpose of signing SAS tokens. - A token credential must be present on the service object for this request to succeed. - - :param ~datetime.datetime key_start_time: - A DateTime value. Indicates when the key becomes valid. - :param ~datetime.datetime key_expiry_time: - A DateTime value. Indicates when the key stops being valid. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The user delegation key. - :rtype: ~azure.storage.blob.UserDelegationKey - """ - key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time)) - timeout = kwargs.pop('timeout', None) - try: - user_delegation_key = await self._client.service.get_user_delegation_key(key_info=key_info, - timeout=timeout, - **kwargs) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - return parse_to_internal_user_delegation_key(user_delegation_key) # type: ignore - - @distributed_trace_async - async def get_account_information(self, **kwargs): - # type: (Any) -> Dict[str, str] - """Gets information related to the storage account. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START get_blob_service_account_info] - :end-before: [END get_blob_service_account_info] - :language: python - :dedent: 12 - :caption: Getting account information for the blob service. - """ - try: - return await self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_service_stats(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Retrieves statistics related to replication for the Blob service. - - It is only available when read-access geo-redundant replication is enabled for - the storage account. - - With geo-redundant replication, Azure Storage maintains your data durable - in two locations. In both locations, Azure Storage constantly maintains - multiple healthy replicas of your data. The location where you read, - create, update, or delete data is the primary storage account location. - The primary location exists in the region you choose at the time you - create an account via the Azure Management Azure classic portal, for - example, North Central US. The location to which your data is replicated - is the secondary location. The secondary location is automatically - determined based on the location of the primary; it is in a second data - center that resides in the same region as the primary location. Read-only - access is available from the secondary location, if read-access geo-redundant - replication is enabled for your storage account. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The blob service stats. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START get_blob_service_stats] - :end-before: [END get_blob_service_stats] - :language: python - :dedent: 12 - :caption: Getting service stats for the blob service. - """ - timeout = kwargs.pop('timeout', None) - try: - stats = await self._client.service.get_statistics( # type: ignore - timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs) - return service_stats_deserialize(stats) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_service_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An object containing blob service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START get_blob_service_properties] - :end-before: [END get_blob_service_properties] - :language: python - :dedent: 12 - :caption: Getting service properties for the blob service. - """ - timeout = kwargs.pop('timeout', None) - try: - service_props = await self._client.service.get_properties(timeout=timeout, **kwargs) - return service_properties_deserialize(service_props) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def set_service_properties( - self, analytics_logging=None, # type: Optional[BlobAnalyticsLogging] - hour_metrics=None, # type: Optional[Metrics] - minute_metrics=None, # type: Optional[Metrics] - cors=None, # type: Optional[List[CorsRule]] - target_version=None, # type: Optional[str] - delete_retention_policy=None, # type: Optional[RetentionPolicy] - static_website=None, # type: Optional[StaticWebsite] - **kwargs - ): - # type: (...) -> None - """Sets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - If an element (e.g. analytics_logging) is left as None, the - existing settings on the service for that functionality are preserved. - - :param analytics_logging: - Groups the Azure Analytics Logging settings. - :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging - :param hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for blobs. - :type hour_metrics: ~azure.storage.blob.Metrics - :param minute_metrics: - The minute metrics settings provide request statistics - for each minute for blobs. - :type minute_metrics: ~azure.storage.blob.Metrics - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list[~azure.storage.blob.CorsRule] - :param str target_version: - Indicates the default version to use for requests if an incoming - request's version is not specified. - :param delete_retention_policy: - The delete retention policy specifies whether to retain deleted blobs. - It also specifies the number of days and versions of blob to keep. - :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy - :param static_website: - Specifies whether the static website feature is enabled, - and if yes, indicates the index document and 404 error document to use. - :type static_website: ~azure.storage.blob.StaticWebsite - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START set_blob_service_properties] - :end-before: [END set_blob_service_properties] - :language: python - :dedent: 12 - :caption: Setting service properties for the blob service. - """ - props = StorageServiceProperties( - logging=analytics_logging, - hour_metrics=hour_metrics, - minute_metrics=minute_metrics, - cors=cors, - default_service_version=target_version, - delete_retention_policy=delete_retention_policy, - static_website=static_website - ) - timeout = kwargs.pop('timeout', None) - try: - await self._client.service.set_properties(props, timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_containers( - self, name_starts_with=None, # type: Optional[str] - include_metadata=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> AsyncItemPaged[ContainerProperties] - """Returns a generator to list the containers under the specified account. - - The generator will lazily follow the continuation tokens returned by - the service and stop when all containers have been returned. - - :param str name_starts_with: - Filters the results to return only containers whose names - begin with the specified prefix. - :param bool include_metadata: - Specifies that container metadata to be returned in the response. - The default value is `False`. - :keyword bool include_deleted: - Specifies that deleted containers to be returned in the response. This is for container restore enabled - account. The default value is `False`. - .. versionadded:: 12.4.0 - :keyword int results_per_page: - The maximum number of container names to retrieve per API - call. If the request does not specify the server will return up to 5,000 items. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) of ContainerProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.ContainerProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_list_containers] - :end-before: [END bsc_list_containers] - :language: python - :dedent: 16 - :caption: Listing the containers in the blob service. - """ - include = ['metadata'] if include_metadata else [] - include_deleted = kwargs.pop('include_deleted', None) - if include_deleted: - include.append("deleted") - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.service.list_containers_segment, - prefix=name_starts_with, - include=include, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - page_iterator_class=ContainerPropertiesPaged - ) - - @distributed_trace - def find_blobs_by_tags(self, filter_expression, **kwargs): - # type: (str, **Any) -> AsyncItemPaged[FilteredBlob] - """The Filter Blobs operation enables callers to list blobs across all - containers whose tags match a given search expression. Filter blobs - searches across all containers within a storage account but can be - scoped within the expression to a single container. - - :param str filter_expression: - The expression to find blobs whose tags matches the specified condition. - eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'" - To specify a container, eg. "@container='containerName' and \"Name\"='C'" - :keyword int results_per_page: - The max result per page when paginating. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.FilteredBlob] - """ - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.service.filter_blobs, - where=filter_expression, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=FilteredBlobPaged) - - @distributed_trace_async - async def create_container( - self, name, # type: str - metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[Union[PublicAccess, str]] - **kwargs - ): - # type: (...) -> ContainerClient - """Creates a new container under the specified account. - - If the container with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created container. - - :param str name: The name of the container to create. - :param metadata: - A dict with name-value pairs to associate with the - container as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - Possible values include: 'container', 'blob'. - :type public_access: str or ~azure.storage.blob.PublicAccess - :keyword container_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - - .. versionadded:: 12.2.0 - - :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.aio.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_create_container] - :end-before: [END bsc_create_container] - :language: python - :dedent: 16 - :caption: Creating a container in the blob service. - """ - container = self.get_container_client(name) - timeout = kwargs.pop('timeout', None) - kwargs.setdefault('merge_span', True) - await container.create_container( - metadata=metadata, public_access=public_access, timeout=timeout, **kwargs) - return container - - @distributed_trace_async - async def delete_container( - self, container, # type: Union[ContainerProperties, str] - lease=None, # type: Optional[Union[BlobLeaseClient, str]] - **kwargs - ): - # type: (...) -> None - """Marks the specified container for deletion. - - The container and any blobs contained within it are later deleted during garbage collection. - If the container is not found, a ResourceNotFoundError will be raised. - - :param container: - The container to delete. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :param lease: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_delete_container] - :end-before: [END bsc_delete_container] - :language: python - :dedent: 16 - :caption: Deleting a container in the blob service. - """ - container = self.get_container_client(container) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - await container.delete_container( # type: ignore - lease=lease, - timeout=timeout, - **kwargs) - - @distributed_trace_async - async def undelete_container(self, deleted_container_name, deleted_container_version, **kwargs): - # type: (str, str, str, **Any) -> ContainerClient - """Restores soft-deleted container. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :param str deleted_container_name: - Specifies the name of the deleted container to restore. - :param str deleted_container_version: - Specifies the version of the deleted container to restore. - :keyword str new_name: - The new name for the deleted container to be restored to. - If not specified deleted_container_name will be used as the restored container name. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.aio.ContainerClient - """ - new_name = kwargs.pop('new_name', None) - container = self.get_container_client(new_name or deleted_container_name) - try: - await container._client.container.restore(deleted_container_name=deleted_container_name, # pylint: disable = protected-access - deleted_container_version=deleted_container_version, - timeout=kwargs.pop('timeout', None), **kwargs) - return container - except StorageErrorException as error: - process_storage_error(error) - - def get_container_client(self, container): - # type: (Union[ContainerProperties, str]) -> ContainerClient - """Get a client to interact with the specified container. - - The container need not already exist. - - :param container: - The container. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :returns: A ContainerClient. - :rtype: ~azure.storage.blob.aio.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_get_container_client] - :end-before: [END bsc_get_container_client] - :language: python - :dedent: 12 - :caption: Getting the container client to interact with a specific container. - """ - try: - container_name = container.name - except AttributeError: - container_name = container - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ContainerClient( - self.url, container_name=container_name, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function, loop=self._loop) - - def get_blob_client( - self, container, # type: Union[ContainerProperties, str] - blob, # type: Union[BlobProperties, str] - snapshot=None # type: Optional[Union[Dict[str, Any], str]] - ): - # type: (...) -> BlobClient - """Get a client to interact with the specified blob. - - The blob need not already exist. - - :param container: - The container that the blob is in. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :param blob: - The blob with which to interact. This can either be the name of the blob, - or an instance of BlobProperties. - :type blob: str or ~azure.storage.blob.BlobProperties - :param snapshot: - The optional blob snapshot on which to operate. This can either be the ID of the snapshot, - or a dictionary output returned by - :func:`~azure.storage.blob.aio.BlobClient.create_snapshot()`. - :type snapshot: str or dict(str, Any) - :returns: A BlobClient. - :rtype: ~azure.storage.blob.aio.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_get_blob_client] - :end-before: [END bsc_get_blob_client] - :language: python - :dedent: 16 - :caption: Getting the blob client to interact with a specific blob. - """ - try: - container_name = container.name - except AttributeError: - container_name = container - - try: - blob_name = blob.name - except AttributeError: - blob_name = blob - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return BlobClient( # type: ignore - self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function, loop=self._loop) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/aio/_container_client_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/aio/_container_client_async.py deleted file mode 100644 index 730a1fd..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/aio/_container_client_async.py +++ /dev/null @@ -1,1121 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, AnyStr, Dict, List, IO, AsyncIterator, - TYPE_CHECKING -) - -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.async_paging import AsyncItemPaged -from azure.core.pipeline import AsyncPipeline -from azure.core.pipeline.transport import AsyncHttpResponse - -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.policies_async import ExponentialRetry -from .._shared.request_handlers import add_metadata_headers, serialize_iso -from .._shared.response_handlers import ( - process_storage_error, - return_response_headers, - return_headers_and_deserialized) -from .._generated import VERSION -from .._generated.aio import AzureBlobStorage -from .._generated.models import ( - StorageErrorException, - SignedIdentifier) -from .._deserialize import deserialize_container_properties -from .._serialize import get_modify_conditions, get_container_cpk_scope_info, get_api_version, get_access_conditions -from .._container_client import ContainerClient as ContainerClientBase, _get_blob_name -from .._models import ContainerProperties, BlobType, BlobProperties # pylint: disable=unused-import -from ._list_blobs_helper import BlobPropertiesPaged, BlobPrefix -from ._lease_async import BlobLeaseClient -from ._blob_client_async import BlobClient - -if TYPE_CHECKING: - from .._models import PublicAccess - from ._download_async import StorageStreamDownloader - from datetime import datetime - from .._models import ( # pylint: disable=unused-import - AccessPolicy, - StandardBlobTier, - PremiumPageBlobTier) - - -class ContainerClient(AsyncStorageAccountHostsMixin, ContainerClientBase): - """A client to interact with a specific container, although that container - may not yet exist. - - For operations relating to a specific blob within this container, a blob client can be - retrieved using the :func:`~get_blob_client` function. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the container, - use the :func:`from_container_url` classmethod. - :param container_name: - The name of the container for the blob. - :type container_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START create_container_client_from_service] - :end-before: [END create_container_client_from_service] - :language: python - :dedent: 8 - :caption: Get a ContainerClient from an existing BlobServiceClient. - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START create_container_client_sasurl] - :end-before: [END create_container_client_sasurl] - :language: python - :dedent: 12 - :caption: Creating the container client directly. - """ - def __init__( - self, account_url, # type: str - container_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(ContainerClient, self).__init__( - account_url, - container_name=container_name, - credential=credential, - **kwargs) - self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access - self._loop = kwargs.get('loop', None) - - @distributed_trace_async - async def create_container(self, metadata=None, public_access=None, **kwargs): - # type: (Optional[Dict[str, str]], Optional[Union[PublicAccess, str]], **Any) -> None - """ - Creates a new container under the specified account. If the container - with the same name already exists, the operation fails. - - :param metadata: - A dict with name_value pairs to associate with the - container as metadata. Example:{'Category':'test'} - :type metadata: dict[str, str] - :param ~azure.storage.blob.PublicAccess public_access: - Possible values include: 'container', 'blob'. - :keyword container_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - - .. versionadded:: 12.2.0 - - :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START create_container] - :end-before: [END create_container] - :language: python - :dedent: 16 - :caption: Creating a container to store blobs. - """ - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - timeout = kwargs.pop('timeout', None) - container_cpk_scope_info = get_container_cpk_scope_info(kwargs) - try: - return await self._client.container.create( # type: ignore - timeout=timeout, - access=public_access, - container_cpk_scope_info=container_cpk_scope_info, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def delete_container( - self, **kwargs): - # type: (Any) -> None - """ - Marks the specified container for deletion. The container and any blobs - contained within it are later deleted during garbage collection. - - :keyword lease: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START delete_container] - :end-before: [END delete_container] - :language: python - :dedent: 16 - :caption: Delete a container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - mod_conditions = get_modify_conditions(kwargs) - timeout = kwargs.pop('timeout', None) - try: - await self._client.container.delete( - timeout=timeout, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def acquire_lease( - self, lease_duration=-1, # type: int - lease_id=None, # type: Optional[str] - **kwargs): - # type: (...) -> BlobLeaseClient - """ - Requests a new lease. If the container does not have an active lease, - the Blob service creates a lease on the container and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A BlobLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.blob.aio.BlobLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START acquire_lease_on_container] - :end-before: [END acquire_lease_on_container] - :language: python - :dedent: 12 - :caption: Acquiring a lease on the container. - """ - lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - await lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs) - return lease - - @distributed_trace_async - async def get_account_information(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """Gets information related to the storage account. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - """ - try: - return await self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_container_properties(self, **kwargs): - # type: (**Any) -> ContainerProperties - """Returns all user-defined metadata and system properties for the specified - container. The data returned does not include the container's list of blobs. - - :keyword lease: - If specified, get_container_properties only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Properties for the specified container within a container object. - :rtype: ~azure.storage.blob.ContainerProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START get_container_properties] - :end-before: [END get_container_properties] - :language: python - :dedent: 16 - :caption: Getting properties on the container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - response = await self._client.container.get_properties( - timeout=timeout, - lease_access_conditions=access_conditions, - cls=deserialize_container_properties, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - response.name = self.container_name - return response # type: ignore - - @distributed_trace_async - async def set_container_metadata( # type: ignore - self, metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - container. Each call to this operation replaces all existing metadata - attached to the container. To remove all metadata from the container, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the container as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword lease: - If specified, set_container_metadata only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Container-updated property dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START set_container_metadata] - :end-before: [END set_container_metadata] - :language: python - :dedent: 16 - :caption: Setting metadata on the container. - """ - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - mod_conditions = get_modify_conditions(kwargs) - timeout = kwargs.pop('timeout', None) - try: - return await self._client.container.set_metadata( # type: ignore - timeout=timeout, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_container_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the specified container. - The permissions indicate whether container data may be accessed publicly. - - :keyword lease: - If specified, get_container_access_policy only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START get_container_access_policy] - :end-before: [END get_container_access_policy] - :language: python - :dedent: 16 - :caption: Getting the access policy on the container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - response, identifiers = await self._client.container.get_access_policy( - timeout=timeout, - lease_access_conditions=access_conditions, - cls=return_headers_and_deserialized, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return { - 'public_access': response.get('blob_public_access'), - 'signed_identifiers': identifiers or [] - } - - @distributed_trace_async - async def set_container_access_policy( - self, signed_identifiers, # type: Dict[str, AccessPolicy] - public_access=None, # type: Optional[Union[str, PublicAccess]] - **kwargs # type: Any - ): # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the permissions for the specified container or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether blobs in a container may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the container. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy] - :param ~azure.storage.blob.PublicAccess public_access: - Possible values include: 'container', 'blob'. - :keyword lease: - Required if the container has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified date/time. - :keyword ~datetime.datetime if_unmodified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Container-updated property dict (Etag and last modified). - :rtype: dict[str, str or ~datetime.datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START set_container_access_policy] - :end-before: [END set_container_access_policy] - :language: python - :dedent: 16 - :caption: Setting access policy on the container. - """ - timeout = kwargs.pop('timeout', None) - lease = kwargs.pop('lease', None) - if len(signed_identifiers) > 5: - raise ValueError( - 'Too many access policies provided. The server does not support setting ' - 'more than 5 access policies on a single resource.') - identifiers = [] - for key, value in signed_identifiers.items(): - if value: - value.start = serialize_iso(value.start) - value.expiry = serialize_iso(value.expiry) - identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore - signed_identifiers = identifiers # type: ignore - - mod_conditions = get_modify_conditions(kwargs) - access_conditions = get_access_conditions(lease) - try: - return await self._client.container.set_access_policy( - container_acl=signed_identifiers or None, - timeout=timeout, - access=public_access, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_blobs(self, name_starts_with=None, include=None, **kwargs): - # type: (Optional[str], Optional[Union[str, List[str]]], **Any) -> AsyncItemPaged[BlobProperties] - """Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service. - - :param str name_starts_with: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param list[str] or str include: - Specifies one or more additional datasets to include in the response. - Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'tags'. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START list_blobs_in_container] - :end-before: [END list_blobs_in_container] - :language: python - :dedent: 12 - :caption: List the blobs in the container. - """ - if include and not isinstance(include, list): - include = [include] - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.list_blob_flat_segment, - include=include, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - page_iterator_class=BlobPropertiesPaged - ) - - @distributed_trace - def walk_blobs( - self, name_starts_with=None, # type: Optional[str] - include=None, # type: Optional[Any] - delimiter="/", # type: str - **kwargs # type: Optional[Any] - ): - # type: (...) -> AsyncItemPaged[BlobProperties] - """Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service. This operation will list blobs in accordance with a hierarchy, - as delimited by the specified delimiter character. - - :param str name_starts_with: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param list[str] include: - Specifies one or more additional datasets to include in the response. - Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'. - :param str delimiter: - When the request includes this parameter, the operation returns a BlobPrefix - element in the response body that acts as a placeholder for all blobs whose - names begin with the same substring up to the appearance of the delimiter - character. The delimiter may be a single character or a string. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties] - """ - if include and not isinstance(include, list): - include = [include] - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.list_blob_hierarchy_segment, - delimiter=delimiter, - include=include, - timeout=timeout, - **kwargs) - return BlobPrefix( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - delimiter=delimiter) - - @distributed_trace_async - async def upload_blob( - self, name, # type: Union[str, BlobProperties] - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> BlobClient - """Creates a new blob from a data source with automatic chunking. - - :param name: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type name: str or ~azure.storage.blob.BlobProperties - :param data: The blob data to upload. - :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be - either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. The exception to the above is with Append - blob types: if set to False and the data already exists, an error will not be raised - and the data will be appended to the existing blob. If set overwrite=True, then the existing - append blob will be deleted, and a new one created. Defaults to False. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the container has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int max_concurrency: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :returns: A BlobClient to interact with the newly uploaded blob. - :rtype: ~azure.storage.blob.aio.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START upload_blob_to_container] - :end-before: [END upload_blob_to_container] - :language: python - :dedent: 12 - :caption: Upload blob to the container. - """ - blob = self.get_blob_client(name) - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - await blob.upload_blob( - data, - blob_type=blob_type, - length=length, - metadata=metadata, - timeout=timeout, - encoding=encoding, - **kwargs - ) - return blob - - @distributed_trace_async - async def delete_blob( - self, blob, # type: Union[str, BlobProperties] - delete_snapshots=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> None - """Marks the specified blob or snapshot for deletion. - - The blob is later deleted during garbage collection. - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the delete_blob - operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot - and retains the blob or snapshot for specified number of days. - After specified number of days, blob's data is removed from the service during garbage collection. - Soft deleted blob or snapshot is accessible through :func:`list_blobs()` specifying `include=["deleted"]` - option. Soft-deleted blob or snapshot can be restored using :func:`~BlobClient.undelete()` - - :param blob: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob: str or ~azure.storage.blob.BlobProperties - :param str delete_snapshots: - Required if the blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to delete. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword lease: - Required if the blob has an active lease. Value can be a Lease object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - blob = self.get_blob_client(blob) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - await blob.delete_blob( # type: ignore - delete_snapshots=delete_snapshots, - timeout=timeout, - **kwargs) - - @distributed_trace_async - async def download_blob(self, blob, offset=None, length=None, **kwargs): - # type: (Union[str, BlobProperties], Optional[int], Optional[int], Any) -> StorageStreamDownloader - """Downloads a blob to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the blob into - a stream. - - :param blob: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob: str or ~azure.storage.blob.BlobProperties - :param int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, download_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword str encoding: - Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object. (StorageStreamDownloader) - :rtype: ~azure.storage.blob.aio.StorageStreamDownloader - """ - blob_client = self.get_blob_client(blob) # type: ignore - kwargs.setdefault('merge_span', True) - return await blob_client.download_blob( - offset=offset, - length=length, - **kwargs) - - @distributed_trace_async - async def delete_blobs( # pylint: disable=arguments-differ - self, *blobs: List[Union[str, BlobProperties, dict]], - **kwargs - ) -> AsyncIterator[AsyncHttpResponse]: - """Marks the specified blobs or snapshots for deletion. - - The blobs are later deleted during garbage collection. - Note that in order to delete blobs, you must delete all of their - snapshots. You can delete both at the same time with the delete_blobs operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots - and retains the blobs or snapshots for specified number of days. - After specified number of days, blobs' data is removed from the service during garbage collection. - Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]` - Soft-deleted blobs or snapshots can be restored using :func:`~BlobClient.undelete()` - - :param blobs: - The blobs to delete. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - snapshot you want to delete: - key: 'snapshot', value type: str - whether to delete snapthots when deleting blob: - key: 'delete_snapshots', value: 'include' or 'only' - if the blob modified or not: - key: 'if_modified_since', 'if_unmodified_since', value type: datetime - etag: - key: 'etag', value type: str - match the etag or not: - key: 'match_condition', value type: MatchConditions - tags match condition: - key: 'if_tags_match_condition', value type: str - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword str delete_snapshots: - Required if a blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. For optimal performance, - this should be set to False - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: An async iterator of responses, one for each blob in order - :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START delete_multiple_blobs] - :end-before: [END delete_multiple_blobs] - :language: python - :dedent: 12 - :caption: Deleting multiple blobs. - """ - if len(blobs) == 0: - return iter(list()) - - reqs, options = self._generate_delete_blobs_options(*blobs, **kwargs) - - return await self._batch_send(*reqs, **options) - - @distributed_trace - async def set_standard_blob_tier_blobs( - self, - standard_blob_tier: Union[str, 'StandardBlobTier'], - *blobs: List[Union[str, BlobProperties, dict]], - **kwargs - ) -> AsyncIterator[AsyncHttpResponse]: - """This operation sets the tier on block blobs. - - A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param standard_blob_tier: - Indicates the tier to be set on all blobs. Options include 'Hot', 'Cool', - 'Archive'. The hot tier is optimized for storing data that is accessed - frequently. The cool storage tier is optimized for storing data that - is infrequently accessed and stored for at least a month. The archive - tier is optimized for storing data that is rarely accessed and stored - for at least six months with flexible latency requirements. - - .. note:: - If you want to set different tier on different blobs please set this positional parameter to None. - Then the blob tier on every BlobProperties will be taken. - - :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier - :param blobs: - The blobs with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - blob name: - key: 'name', value type: str - standard blob tier: - key: 'blob_tier', value type: StandardBlobTier - rehydrate priority: - key: 'rehydrate_priority', value type: RehydratePriority - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - tags match condition: - key: 'if_tags_match_condition', value type: str - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. For optimal performance, - this should be set to False. - :return: An async iterator of responses, one for each blob in order - :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] - """ - reqs, options = self._generate_set_tiers_options(standard_blob_tier, *blobs, **kwargs) - - return await self._batch_send(*reqs, **options) - - @distributed_trace - async def set_premium_page_blob_tier_blobs( - self, - premium_page_blob_tier: Union[str, 'PremiumPageBlobTier'], - *blobs: List[Union[str, BlobProperties, dict]], - **kwargs - ) -> AsyncIterator[AsyncHttpResponse]: - """Sets the page blob tiers on the blobs. This API is only supported for page blobs on premium accounts. - - :param premium_page_blob_tier: - A page blob tier value to set on all blobs to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - - .. note:: - If you want to set different tier on different blobs please set this positional parameter to None. - Then the blob tier on every BlobProperties will be taken. - - :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier - :param blobs: The blobs with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - premium blob tier: - key: 'blob_tier', value type: PremiumPageBlobTier - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. For optimal performance, - this should be set to False. - :return: An async iterator of responses, one for each blob in order - :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] - """ - reqs, options = self._generate_set_tiers_options(premium_page_blob_tier, *blobs, **kwargs) - - return await self._batch_send(*reqs, **options) - - def get_blob_client( - self, blob, # type: Union[BlobProperties, str] - snapshot=None # type: str - ): - # type: (...) -> BlobClient - """Get a client to interact with the specified blob. - - The blob need not already exist. - - :param blob: - The blob with which to interact. - :type blob: str or ~azure.storage.blob.BlobProperties - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`~BlobClient.create_snapshot()`. - :returns: A BlobClient. - :rtype: ~azure.storage.blob.aio.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START get_blob_client] - :end-before: [END get_blob_client] - :language: python - :dedent: 12 - :caption: Get the blob client. - """ - blob_name = _get_blob_name(blob) - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return BlobClient( - self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function, loop=self._loop) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/aio/_download_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/aio/_download_async.py deleted file mode 100644 index c698cb4..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/aio/_download_async.py +++ /dev/null @@ -1,491 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -import asyncio -import sys -from io import BytesIO -from itertools import islice -import warnings - -from azure.core.exceptions import HttpResponseError -from .._shared.encryption import decrypt_blob -from .._shared.request_handlers import validate_and_format_range_headers -from .._shared.response_handlers import process_storage_error, parse_length_from_content_range -from .._deserialize import get_page_ranges_result -from .._download import process_range_and_offset, _ChunkDownloader - - -async def process_content(data, start_offset, end_offset, encryption): - if data is None: - raise ValueError("Response cannot be None.") - try: - content = data.response.body() - except Exception as error: - raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) - if encryption.get('key') is not None or encryption.get('resolver') is not None: - try: - return decrypt_blob( - encryption.get('required'), - encryption.get('key'), - encryption.get('resolver'), - content, - start_offset, - end_offset, - data.response.headers) - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=data.response, - error=error) - return content - - -class _AsyncChunkDownloader(_ChunkDownloader): - def __init__(self, **kwargs): - super(_AsyncChunkDownloader, self).__init__(**kwargs) - self.stream_lock = asyncio.Lock() if kwargs.get('parallel') else None - self.progress_lock = asyncio.Lock() if kwargs.get('parallel') else None - - async def process_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - chunk_data = await self._download_chunk(chunk_start, chunk_end - 1) - length = chunk_end - chunk_start - if length > 0: - await self._write_to_stream(chunk_data, chunk_start) - await self._update_progress(length) - - async def yield_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - return await self._download_chunk(chunk_start, chunk_end - 1) - - async def _update_progress(self, length): - if self.progress_lock: - async with self.progress_lock: # pylint: disable=not-async-context-manager - self.progress_total += length - else: - self.progress_total += length - - async def _write_to_stream(self, chunk_data, chunk_start): - if self.stream_lock: - async with self.stream_lock: # pylint: disable=not-async-context-manager - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - else: - self.stream.write(chunk_data) - - async def _download_chunk(self, chunk_start, chunk_end): - download_range, offset = process_range_and_offset( - chunk_start, chunk_end, chunk_end, self.encryption_options) - - # No need to download the empty chunk from server if there's no data in the chunk to be downloaded. - # Do optimize and create empty chunk locally if condition is met. - if self._do_optimize(download_range[0], download_range[1]): - chunk_data = b"\x00" * self.chunk_size - else: - range_header, range_validation = validate_and_format_range_headers( - download_range[0], - download_range[1], - check_content_md5=self.validate_content - ) - try: - _, response = await self.client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self.validate_content, - data_stream_total=self.total_size, - download_stream_current=self.progress_total, - **self.request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - chunk_data = await process_content(response, offset[0], offset[1], self.encryption_options) - - # This makes sure that if_match is set so that we can validate - # that subsequent downloads are to an unmodified blob - if self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = response.properties.etag - - return chunk_data - - -class _AsyncChunkIterator(object): - """Async iterator for chunks in blob download stream.""" - - def __init__(self, size, content, downloader): - self.size = size - self._current_content = content - self._iter_downloader = downloader - self._iter_chunks = None - self._complete = (size == 0) - - def __len__(self): - return self.size - - def __iter__(self): - raise TypeError("Async stream must be iterated asynchronously.") - - def __aiter__(self): - return self - - async def __anext__(self): - """Iterate through responses.""" - if self._complete: - raise StopAsyncIteration("Download complete") - if not self._iter_downloader: - # If no iterator was supplied, the download completed with - # the initial GET, so we just return that data - self._complete = True - return self._current_content - - if not self._iter_chunks: - self._iter_chunks = self._iter_downloader.get_chunk_offsets() - else: - try: - chunk = next(self._iter_chunks) - except StopIteration: - raise StopAsyncIteration("Download complete") - self._current_content = await self._iter_downloader.yield_chunk(chunk) - - return self._current_content - - -class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the blob being downloaded. - :ivar str container: - The name of the container where the blob is. - :ivar ~azure.storage.blob.BlobProperties properties: - The properties of the blob being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the blob. - """ - - def __init__( - self, - clients=None, - config=None, - start_range=None, - end_range=None, - validate_content=None, - encryption_options=None, - max_concurrency=1, - name=None, - container=None, - encoding=None, - **kwargs - ): - self.name = name - self.container = container - self.properties = None - self.size = None - - self._clients = clients - self._config = config - self._start_range = start_range - self._end_range = end_range - self._max_concurrency = max_concurrency - self._encoding = encoding - self._validate_content = validate_content - self._encryption_options = encryption_options or {} - self._request_options = kwargs - self._location_mode = None - self._download_complete = False - self._current_content = None - self._file_size = None - self._non_empty_ranges = None - self._response = None - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - self._first_get_size = self._config.max_single_get_size if not self._validate_content \ - else self._config.max_chunk_get_size - initial_request_start = self._start_range if self._start_range is not None else 0 - if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: - initial_request_end = self._end_range - else: - initial_request_end = initial_request_start + self._first_get_size - 1 - - self._initial_range, self._initial_offset = process_range_and_offset( - initial_request_start, initial_request_end, self._end_range, self._encryption_options - ) - - def __len__(self): - return self.size - - async def _setup(self): - self._response = await self._initial_request() - self.properties = self._response.properties - self.properties.name = self.name - self.properties.container = self.container - - # Set the content length to the download size instead of the size of - # the last range - self.properties.size = self.size - - # Overwrite the content range to the user requested range - self.properties.content_range = 'bytes {0}-{1}/{2}'.format( - self._start_range, - self._end_range, - self._file_size - ) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - self.properties.content_md5 = None - - if self.size == 0: - self._current_content = b"" - else: - self._current_content = await process_content( - self._response, - self._initial_offset[0], - self._initial_offset[1], - self._encryption_options - ) - - async def _initial_request(self): - range_header, range_validation = validate_and_format_range_headers( - self._initial_range[0], - self._initial_range[1], - start_range_required=False, - end_range_required=False, - check_content_md5=self._validate_content) - - try: - location_mode, response = await self._clients.blob.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self._validate_content, - data_stream_total=None, - download_stream_current=0, - **self._request_options) - - # Check the location we read from to ensure we use the same one - # for subsequent requests. - self._location_mode = location_mode - - # Parse the total file size and adjust the download size if ranges - # were specified - self._file_size = parse_length_from_content_range(response.properties.content_range) - if self._end_range is not None: - # Use the length unless it is over the end of the file - self.size = min(self._file_size, self._end_range - self._start_range + 1) - elif self._start_range is not None: - self.size = self._file_size - self._start_range - else: - self.size = self._file_size - - except HttpResponseError as error: - if self._start_range is None and error.response.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - try: - _, response = await self._clients.blob.download( - validate_content=self._validate_content, - data_stream_total=0, - download_stream_current=0, - **self._request_options) - except HttpResponseError as error: - process_storage_error(error) - - # Set the download size to empty - self.size = 0 - self._file_size = 0 - else: - process_storage_error(error) - - # get page ranges to optimize downloading sparse page blob - if response.properties.blob_type == 'PageBlob': - try: - page_ranges = await self._clients.page_blob.get_page_ranges() - self._non_empty_ranges = get_page_ranges_result(page_ranges)[0] - except HttpResponseError: - pass - - # If the file is small, the download is complete at this point. - # If file size is large, download the rest of the file in chunks. - if response.properties.size != self.size: - # Lock on the etag. This can be overriden by the user by specifying '*' - if self._request_options.get('modified_access_conditions'): - if not self._request_options['modified_access_conditions'].if_match: - self._request_options['modified_access_conditions'].if_match = response.properties.etag - else: - self._download_complete = True - return response - - def chunks(self): - """Iterate over chunks in the download stream. - - :rtype: Iterable[bytes] - """ - if self.size == 0 or self._download_complete: - iter_downloader = None - else: - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - iter_downloader = _AsyncChunkDownloader( - client=self._clients.blob, - non_empty_ranges=self._non_empty_ranges, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # Start where the first download ended - end_range=data_end, - stream=None, - parallel=False, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options) - return _AsyncChunkIterator( - size=self.size, - content=self._current_content, - downloader=iter_downloader) - - async def readall(self): - """Download the contents of this blob. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - stream = BytesIO() - await self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - async def content_as_bytes(self, max_concurrency=1): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :rtype: bytes - """ - warnings.warn( - "content_as_bytes is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - return await self.readall() - - async def content_as_text(self, max_concurrency=1, encoding="UTF-8"): - """Download the contents of this blob, and decode as text. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :param str encoding: - Test encoding to decode the downloaded bytes. Default is UTF-8. - :rtype: str - """ - warnings.warn( - "content_as_text is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self._encoding = encoding - return await self.readall() - - async def readinto(self, stream): - """Download the contents of this blob to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - # the stream must be seekable if parallel download is required - parallel = self._max_concurrency > 1 - if parallel: - error_message = "Target stream handle must be seekable." - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(error_message) - - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(error_message) - - # Write the content to the user stream - stream.write(self._current_content) - if self._download_complete: - return self.size - - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - - downloader = _AsyncChunkDownloader( - client=self._clients.blob, - non_empty_ranges=self._non_empty_ranges, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # start where the first download ended - end_range=data_end, - stream=stream, - parallel=parallel, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options) - - dl_tasks = downloader.get_chunk_offsets() - running_futures = [ - asyncio.ensure_future(downloader.process_chunk(d)) - for d in islice(dl_tasks, 0, self._max_concurrency) - ] - while running_futures: - # Wait for some download to finish before adding a new one - _done, running_futures = await asyncio.wait( - running_futures, return_when=asyncio.FIRST_COMPLETED) - try: - next_chunk = next(dl_tasks) - except StopIteration: - break - else: - running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk))) - - if running_futures: - # Wait for the remaining downloads to finish - await asyncio.wait(running_futures) - return self.size - - async def download_to_stream(self, stream, max_concurrency=1): - """Download the contents of this blob to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The properties of the downloaded blob. - :rtype: Any - """ - warnings.warn( - "download_to_stream is deprecated, use readinto instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - await self.readinto(stream) - return self.properties diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/aio/_lease_async.py b/azure/multiapi/storagev2/blob/v2020_02_10/aio/_lease_async.py deleted file mode 100644 index 91bf93d..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/aio/_lease_async.py +++ /dev/null @@ -1,327 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, - TypeVar, TYPE_CHECKING -) - -from azure.core.tracing.decorator_async import distributed_trace_async - -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._generated.models import ( - StorageErrorException, - LeaseAccessConditions) -from .._serialize import get_modify_conditions -from .._lease import BlobLeaseClient as LeaseClientBase - -if TYPE_CHECKING: - from datetime import datetime - from .._generated.operations import BlobOperations, ContainerOperations - BlobClient = TypeVar("BlobClient") - ContainerClient = TypeVar("ContainerClient") - - -class BlobLeaseClient(LeaseClientBase): - """Creates a new BlobLeaseClient. - - This client provides lease operations on a BlobClient or ContainerClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the blob or container to lease. - :type client: ~azure.storage.blob.aio.BlobClient or - ~azure.storage.blob.aio.ContainerClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - - def __enter__(self): - raise TypeError("Async lease must use 'async with'.") - - def __exit__(self, *args): - self.release() - - async def __aenter__(self): - return self - - async def __aexit__(self, *args): - await self.release() - - @distributed_trace_async - async def acquire(self, lease_duration=-1, **kwargs): - # type: (int, Any) -> None - """Requests a new lease. - - If the container does not have an active lease, the Blob service creates a - lease on the container and returns a new lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.acquire_lease( - timeout=kwargs.pop('timeout', None), - duration=lease_duration, - proposed_lease_id=self.id, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - self.etag = response.get('etag') # type: str - - @distributed_trace_async - async def renew(self, **kwargs): - # type: (Any) -> None - """Renews the lease. - - The lease can be renewed if the lease ID specified in the - lease client matches that associated with the container or blob. Note that - the lease may be renewed even if it has expired as long as the container - or blob has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.renew_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def release(self, **kwargs): - # type: (Any) -> None - """Release the lease. - - The lease may be released if the client lease id specified matches - that associated with the container or blob. Releasing the lease allows another client - to immediately acquire the lease for the container or blob as soon as the release is complete. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.release_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """Change the lease ID of an active lease. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.change_lease( - lease_id=self.id, - proposed_lease_id=proposed_lease_id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def break_lease(self, lease_break_period=None, **kwargs): - # type: (Optional[int], Any) -> int - """Break the lease, if the container or blob has an active lease. - - Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. When a lease - is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the container or blob. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :param int lease_break_period: - This is the proposed duration of seconds that the lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the lease. If longer, the time remaining on the lease is used. - A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. "\"tagname\"='my tag'" - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.break_lease( - timeout=kwargs.pop('timeout', None), - break_period=lease_break_period, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/aio/_list_blobs_helper.py b/azure/multiapi/storagev2/blob/v2020_02_10/aio/_list_blobs_helper.py deleted file mode 100644 index dc09846..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/aio/_list_blobs_helper.py +++ /dev/null @@ -1,162 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from azure.core.async_paging import AsyncPageIterator, AsyncItemPaged -from .._deserialize import get_blob_properties_from_generated_code -from .._models import BlobProperties -from .._generated.models import StorageErrorException, BlobItemInternal, BlobPrefix as GenBlobPrefix -from .._shared.models import DictMixin -from .._shared.response_handlers import return_context_and_deserialized, process_storage_error - - -class BlobPropertiesPaged(AsyncPageIterator): - """An Iterable of Blob properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.models.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - :param str container: The container that the blobs are listed from. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str continuation_token: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__( - self, command, - container=None, - prefix=None, - results_per_page=None, - continuation_token=None, - delimiter=None, - location_mode=None): - super(BlobPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.container = container - self.delimiter = delimiter - self.current_page = None - self.location_mode = location_mode - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - prefix=self.prefix, - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.container = self._response.container_name - self.current_page = [self._build_item(item) for item in self._response.segment.blob_items] - - return self._response.next_marker or None, self.current_page - - def _build_item(self, item): - if isinstance(item, BlobProperties): - return item - if isinstance(item, BlobItemInternal): - blob = get_blob_properties_from_generated_code(item) # pylint: disable=protected-access - blob.container = self.container - return blob - return item - - -class BlobPrefix(AsyncItemPaged, DictMixin): - """An Iterable of Blob properties. - - Returned from walk_blobs when a delimiter is used. - Can be thought of as a virtual blob directory. - - :ivar str name: The prefix, or "directory name" of the blob. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str marker: The continuation token of the current page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.models.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str marker: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__(self, *args, **kwargs): - super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs) - self.name = kwargs.get('prefix') - self.prefix = kwargs.get('prefix') - self.results_per_page = kwargs.get('results_per_page') - self.container = kwargs.get('container') - self.delimiter = kwargs.get('delimiter') - self.location_mode = kwargs.get('location_mode') - - -class BlobPrefixPaged(BlobPropertiesPaged): - def __init__(self, *args, **kwargs): - super(BlobPrefixPaged, self).__init__(*args, **kwargs) - self.name = self.prefix - - async def _extract_data_cb(self, get_next_return): - continuation_token, _ = await super(BlobPrefixPaged, self)._extract_data_cb(get_next_return) - self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items - self.current_page = [self._build_item(item) for item in self.current_page] - self.delimiter = self._response.delimiter - - return continuation_token, self.current_page - - def _build_item(self, item): - item = super(BlobPrefixPaged, self)._build_item(item) - if isinstance(item, GenBlobPrefix): - return BlobPrefix( - self._command, - container=self.container, - prefix=item.name, - results_per_page=self.results_per_page, - location_mode=self.location_mode) - return item diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/aio/_models.py b/azure/multiapi/storagev2/blob/v2020_02_10/aio/_models.py deleted file mode 100644 index 44d5d63..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/aio/_models.py +++ /dev/null @@ -1,141 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines - -from azure.core.async_paging import AsyncPageIterator - -from .._models import ContainerProperties, FilteredBlob -from .._shared.response_handlers import return_context_and_deserialized, process_storage_error - -from .._generated.models import StorageErrorException -from .._generated.models import FilterBlobItem - - -class ContainerPropertiesPaged(AsyncPageIterator): - """An Iterable of Container properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A container name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.models.ContainerProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only containers whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of container names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(ContainerPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [self._build_item(item) for item in self._response.container_items] - - return self._response.next_marker or None, self.current_page - - @staticmethod - def _build_item(item): - return ContainerProperties._from_generated(item) # pylint: disable=protected-access - - -class FilteredBlobPaged(AsyncPageIterator): - """An Iterable of Blob properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.BlobProperties) - :ivar str container: The container that the blobs are listed from. - - :param callable command: Function to retrieve the next page of items. - :param str container: The name of the container. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str continuation_token: An opaque continuation token. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__( - self, command, - container=None, - results_per_page=None, - continuation_token=None, - location_mode=None): - super(FilteredBlobPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.marker = continuation_token - self.results_per_page = results_per_page - self.container = container - self.current_page = None - self.location_mode = location_mode - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.marker = self._response.next_marker - self.current_page = [self._build_item(item) for item in self._response.blobs] - - return self._response.next_marker or None, self.current_page - - @staticmethod - def _build_item(item): - if isinstance(item, FilterBlobItem): - blob = FilteredBlob(name=item.name, container_name=item.container_name, tag_value=item.tag_value) # pylint: disable=protected-access - return blob - return item diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/aio/_upload_helpers.py b/azure/multiapi/storagev2/blob/v2020_02_10/aio/_upload_helpers.py deleted file mode 100644 index 3a495b5..0000000 --- a/azure/multiapi/storagev2/blob/v2020_02_10/aio/_upload_helpers.py +++ /dev/null @@ -1,266 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from io import SEEK_SET, UnsupportedOperation -from typing import Optional, Union, Any, TypeVar, TYPE_CHECKING # pylint: disable=unused-import - -import six -from azure.core.exceptions import ResourceModifiedError - -from .._shared.response_handlers import ( - process_storage_error, - return_response_headers) -from .._shared.uploads_async import ( - upload_data_chunks, - upload_substream_blocks, - BlockBlobChunkUploader, - PageBlobChunkUploader, - AppendBlobChunkUploader) -from .._shared.encryption import generate_blob_encryption_data, encrypt_blob -from .._generated.models import ( - StorageErrorException, - BlockLookupList, - AppendPositionAccessConditions, - ModifiedAccessConditions, -) -from .._upload_helpers import _convert_mod_error, _any_conditions - -if TYPE_CHECKING: - from datetime import datetime # pylint: disable=unused-import - BlobLeaseClient = TypeVar("BlobLeaseClient") - - -async def upload_block_blob( # pylint: disable=too-many-locals - client=None, - data=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if not overwrite and not _any_conditions(**kwargs): - kwargs['modified_access_conditions'].if_none_match = '*' - adjusted_count = length - if (encryption_options.get('key') is not None) and (adjusted_count is not None): - adjusted_count += (16 - (length % 16)) - blob_headers = kwargs.pop('blob_headers', None) - tier = kwargs.pop('standard_blob_tier', None) - blob_tags_string = kwargs.pop('blob_tags_string', None) - - # Do single put if the size is smaller than config.max_single_put_size - if adjusted_count is not None and (adjusted_count <= blob_settings.max_single_put_size): - try: - data = data.read(length) - if not isinstance(data, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - except AttributeError: - pass - if encryption_options.get('key'): - encryption_data, data = encrypt_blob(data, encryption_options['key']) - headers['x-ms-meta-encryptiondata'] = encryption_data - return await client.upload( - data, - content_length=adjusted_count, - blob_http_headers=blob_headers, - headers=headers, - cls=return_response_headers, - validate_content=validate_content, - data_stream_total=adjusted_count, - upload_stream_current=0, - tier=tier.value if tier else None, - blob_tags_string=blob_tags_string, - **kwargs) - - use_original_upload_path = blob_settings.use_byte_buffer or \ - validate_content or encryption_options.get('required') or \ - blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \ - hasattr(stream, 'seekable') and not stream.seekable() or \ - not hasattr(stream, 'seek') or not hasattr(stream, 'tell') - - if use_original_upload_path: - if encryption_options.get('key'): - cek, iv, encryption_data = generate_blob_encryption_data(encryption_options['key']) - headers['x-ms-meta-encryptiondata'] = encryption_data - encryption_options['cek'] = cek - encryption_options['vector'] = iv - block_ids = await upload_data_chunks( - service=client, - uploader_class=BlockBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - encryption_options=encryption_options, - **kwargs - ) - else: - block_ids = await upload_substream_blocks( - service=client, - uploader_class=BlockBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - **kwargs - ) - - block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) - block_lookup.latest = block_ids - return await client.commit_block_list( - block_lookup, - blob_http_headers=blob_headers, - cls=return_response_headers, - validate_content=validate_content, - headers=headers, - tier=tier.value if tier else None, - blob_tags_string=blob_tags_string, - **kwargs) - except StorageErrorException as error: - try: - process_storage_error(error) - except ResourceModifiedError as mod_error: - if not overwrite: - _convert_mod_error(mod_error) - raise - - -async def upload_page_blob( - client=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if not overwrite and not _any_conditions(**kwargs): - kwargs['modified_access_conditions'].if_none_match = '*' - if length is None or length < 0: - raise ValueError("A content length must be specified for a Page Blob.") - if length % 512 != 0: - raise ValueError("Invalid page blob size: {0}. " - "The size must be aligned to a 512-byte boundary.".format(length)) - if kwargs.get('premium_page_blob_tier'): - premium_page_blob_tier = kwargs.pop('premium_page_blob_tier') - try: - headers['x-ms-access-tier'] = premium_page_blob_tier.value - except AttributeError: - headers['x-ms-access-tier'] = premium_page_blob_tier - if encryption_options and encryption_options.get('data'): - headers['x-ms-meta-encryptiondata'] = encryption_options['data'] - blob_tags_string = kwargs.pop('blob_tags_string', None) - - response = await client.create( - content_length=0, - blob_content_length=length, - blob_sequence_number=None, - blob_http_headers=kwargs.pop('blob_headers', None), - blob_tags_string=blob_tags_string, - cls=return_response_headers, - headers=headers, - **kwargs) - if length == 0: - return response - - kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag']) - return await upload_data_chunks( - service=client, - uploader_class=PageBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_page_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - encryption_options=encryption_options, - **kwargs) - - except StorageErrorException as error: - try: - process_storage_error(error) - except ResourceModifiedError as mod_error: - if not overwrite: - _convert_mod_error(mod_error) - raise - - -async def upload_append_blob( # pylint: disable=unused-argument - client=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if length == 0: - return {} - blob_headers = kwargs.pop('blob_headers', None) - append_conditions = AppendPositionAccessConditions( - max_size=kwargs.pop('maxsize_condition', None), - append_position=None) - blob_tags_string = kwargs.pop('blob_tags_string', None) - - try: - if overwrite: - await client.create( - content_length=0, - blob_http_headers=blob_headers, - headers=headers, - blob_tags_string=blob_tags_string, - **kwargs) - return await upload_data_chunks( - service=client, - uploader_class=AppendBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - append_position_access_conditions=append_conditions, - **kwargs) - except StorageErrorException as error: - if error.response.status_code != 404: - raise - # rewind the request body if it is a stream - if hasattr(stream, 'read'): - try: - # attempt to rewind the body to the initial position - stream.seek(0, SEEK_SET) - except UnsupportedOperation: - # if body is not seekable, then retry would not work - raise error - await client.create( - content_length=0, - blob_http_headers=blob_headers, - headers=headers, - blob_tags_string=blob_tags_string, - **kwargs) - return await upload_data_chunks( - service=client, - uploader_class=AppendBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - append_position_access_conditions=append_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/__init__.py b/azure/multiapi/storagev2/blob/v2020_04_08/__init__.py deleted file mode 100644 index 9164961..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/__init__.py +++ /dev/null @@ -1,233 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import os - -from typing import Union, Iterable, AnyStr, IO, Any, Dict # pylint: disable=unused-import -from ._version import VERSION -from ._blob_client import BlobClient -from ._container_client import ContainerClient -from ._blob_service_client import BlobServiceClient -from ._lease import BlobLeaseClient -from ._download import StorageStreamDownloader -from ._quick_query_helper import BlobQueryReader -from ._shared_access_signature import generate_account_sas, generate_container_sas, generate_blob_sas -from ._shared.policies import ExponentialRetry, LinearRetry -from ._shared.response_handlers import PartialBatchErrorException -from ._shared.models import( - LocationMode, - ResourceTypes, - AccountSasPermissions, - StorageErrorCode, - UserDelegationKey -) -from ._generated.models import ( - RehydratePriority -) -from ._models import ( - BlobType, - BlockState, - StandardBlobTier, - PremiumPageBlobTier, - SequenceNumberAction, - PublicAccess, - BlobAnalyticsLogging, - Metrics, - RetentionPolicy, - StaticWebsite, - CorsRule, - ContainerProperties, - BlobProperties, - FilteredBlob, - LeaseProperties, - ContentSettings, - CopyProperties, - BlobBlock, - PageRange, - AccessPolicy, - ContainerSasPermissions, - BlobSasPermissions, - CustomerProvidedEncryptionKey, - ContainerEncryptionScope, - BlobQueryError, - DelimitedJsonDialect, - DelimitedTextDialect, - ArrowDialect, - ArrowType, - ObjectReplicationPolicy, - ObjectReplicationRule -) -from ._list_blobs_helper import BlobPrefix - -__version__ = VERSION - - -def upload_blob_to_url( - blob_url, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - credential=None, # type: Any - **kwargs): - # type: (...) -> Dict[str, Any] - """Upload data to a given URL - - The data will be uploaded as a block blob. - - :param str blob_url: - The full URI to the blob. This can also include a SAS token. - :param data: - The data to upload. This can be bytes, text, an iterable or a file-like object. - :type data: bytes or str or Iterable - :param credential: - The credentials with which to authenticate. This is optional if the - blob URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword bool overwrite: - Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob_to_url will overwrite any existing data. If set to False, the - operation will fail with a ResourceExistsError. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword dict(str,str) metadata: - Name-value pairs associated with the blob as metadata. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword str encoding: - Encoding to use if text is supplied as input. Defaults to UTF-8. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: dict(str, Any) - """ - with BlobClient.from_blob_url(blob_url, credential=credential) as client: - return client.upload_blob(data=data, blob_type=BlobType.BlockBlob, **kwargs) - - -def _download_to_stream(client, handle, **kwargs): - """Download data to specified open file-handle.""" - stream = client.download_blob(**kwargs) - stream.readinto(handle) - - -def download_blob_from_url( - blob_url, # type: str - output, # type: str - credential=None, # type: Any - **kwargs): - # type: (...) -> None - """Download the contents of a blob to a local file or stream. - - :param str blob_url: - The full URI to the blob. This can also include a SAS token. - :param output: - Where the data should be downloaded to. This could be either a file path to write to, - or an open IO handle to write to. - :type output: str or writable stream. - :param credential: - The credentials with which to authenticate. This is optional if the - blob URL already has a SAS token or the blob is public. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, - an account shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword bool overwrite: - Whether the local file should be overwritten if it already exists. The default value is - `False` - in which case a ValueError will be raised if the file already exists. If set to - `True`, an attempt will be made to write to the existing file. If a stream handle is passed - in, this value is ignored. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :keyword int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :rtype: None - """ - overwrite = kwargs.pop('overwrite', False) - with BlobClient.from_blob_url(blob_url, credential=credential) as client: - if hasattr(output, 'write'): - _download_to_stream(client, output, **kwargs) - else: - if not overwrite and os.path.isfile(output): - raise ValueError("The file '{}' already exists.".format(output)) - with open(output, 'wb') as file_handle: - _download_to_stream(client, file_handle, **kwargs) - - -__all__ = [ - 'upload_blob_to_url', - 'download_blob_from_url', - 'BlobServiceClient', - 'ContainerClient', - 'BlobClient', - 'BlobType', - 'BlobLeaseClient', - 'StorageErrorCode', - 'UserDelegationKey', - 'ExponentialRetry', - 'LinearRetry', - 'LocationMode', - 'BlockState', - 'StandardBlobTier', - 'PremiumPageBlobTier', - 'SequenceNumberAction', - 'PublicAccess', - 'BlobAnalyticsLogging', - 'Metrics', - 'RetentionPolicy', - 'StaticWebsite', - 'CorsRule', - 'ContainerProperties', - 'BlobProperties', - 'BlobPrefix', - 'FilteredBlob', - 'LeaseProperties', - 'ContentSettings', - 'CopyProperties', - 'BlobBlock', - 'PageRange', - 'AccessPolicy', - 'ContainerSasPermissions', - 'BlobSasPermissions', - 'ResourceTypes', - 'AccountSasPermissions', - 'StorageStreamDownloader', - 'CustomerProvidedEncryptionKey', - 'RehydratePriority', - 'generate_account_sas', - 'generate_container_sas', - 'generate_blob_sas', - 'PartialBatchErrorException', - 'ContainerEncryptionScope', - 'BlobQueryError', - 'DelimitedJsonDialect', - 'DelimitedTextDialect', - 'ArrowDialect', - 'ArrowType', - 'BlobQueryReader', - 'ObjectReplicationPolicy', - 'ObjectReplicationRule' -] diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_blob_client.py b/azure/multiapi/storagev2/blob/v2020_04_08/_blob_client.py deleted file mode 100644 index f3d2d16..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_blob_client.py +++ /dev/null @@ -1,3743 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines,no-self-use -from functools import partial -from io import BytesIO -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, - TYPE_CHECKING -) -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six -from azure.core.tracing.decorator import distributed_trace -from azure.core.exceptions import ResourceNotFoundError, HttpResponseError - -from ._shared import encode_base64 -from ._shared.base_client import StorageAccountHostsMixin, parse_connection_str, parse_query -from ._shared.encryption import generate_blob_encryption_data -from ._shared.uploads import IterStreamer -from ._shared.request_handlers import ( - add_metadata_headers, get_length, read_length, - validate_and_format_range_headers) -from ._shared.response_handlers import return_response_headers, process_storage_error, return_headers_and_deserialized -from ._generated import AzureBlobStorage -from ._generated.models import ( # pylint: disable=unused-import - DeleteSnapshotsOptionType, - BlobHTTPHeaders, - BlockLookupList, - AppendPositionAccessConditions, - SequenceNumberAccessConditions, - QueryRequest, - CpkInfo) -from ._serialize import ( - get_modify_conditions, - get_source_conditions, - get_cpk_scope_info, - get_api_version, - serialize_blob_tags_header, - serialize_blob_tags, - serialize_query_format, get_access_conditions -) -from ._deserialize import get_page_ranges_result, deserialize_blob_properties, deserialize_blob_stream, parse_tags, \ - deserialize_pipeline_response_into_cls -from ._quick_query_helper import BlobQueryReader -from ._upload_helpers import ( - upload_block_blob, - upload_append_blob, - upload_page_blob, _any_conditions) -from ._models import BlobType, BlobBlock, BlobProperties, BlobQueryError -from ._download import StorageStreamDownloader -from ._lease import BlobLeaseClient - -if TYPE_CHECKING: - from datetime import datetime - from ._generated.models import BlockList - from ._models import ( # pylint: disable=unused-import - ContentSettings, - PremiumPageBlobTier, - StandardBlobTier, - SequenceNumberAction - ) - -_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = ( - 'The require_encryption flag is set, but encryption is not supported' - ' for this method.') - - -class BlobClient(StorageAccountHostsMixin): # pylint: disable=too-many-public-methods - """A client to interact with a specific blob, although that blob may not yet exist. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the blob, - use the :func:`from_blob_url` classmethod. - :param container_name: The container name for the blob. - :type container_name: str - :param blob_name: The name of the blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob_name: str - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_blob_client] - :end-before: [END create_blob_client] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a URL to a public blob (no auth needed). - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_blob_client_sas_url] - :end-before: [END create_blob_client_sas_url] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a SAS URL to a blob. - """ - def __init__( - self, account_url, # type: str - container_name, # type: str - blob_name, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - - if not (container_name and blob_name): - raise ValueError("Please specify a container name and blob name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - path_snapshot, sas_token = parse_query(parsed_url.query) - - self.container_name = container_name - self.blob_name = blob_name - try: - self.snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - self.snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - self.snapshot = snapshot or path_snapshot - - self._query_str, credential = self._format_query_string(sas_token, credential, snapshot=self.snapshot) - super(BlobClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) - self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) - default_api_version = self._client._config.version # pylint: disable=protected-access - self._client._config.version = get_api_version(kwargs, default_api_version) # pylint: disable=protected-access - - def _format_url(self, hostname): - container_name = self.container_name - if isinstance(container_name, six.text_type): - container_name = container_name.encode('UTF-8') - return "{}://{}/{}/{}{}".format( - self.scheme, - hostname, - quote(container_name), - quote(self.blob_name, safe='~/'), - self._query_str) - - def _encode_source_url(self, source_url): - parsed_source_url = urlparse(source_url) - source_scheme = parsed_source_url.scheme - source_hostname = parsed_source_url.netloc.rstrip('/') - source_path = unquote(parsed_source_url.path) - source_query = parsed_source_url.query - result = ["{}://{}{}".format(source_scheme, source_hostname, quote(source_path, safe='~/'))] - if source_query: - result.append(source_query) - return '?'.join(result) - - @classmethod - def from_blob_url(cls, blob_url, credential=None, snapshot=None, **kwargs): - # type: (str, Optional[Any], Optional[Union[str, Dict[str, Any]]], Any) -> BlobClient - """Create BlobClient from a blob url. This doesn't support customized blob url with '/' in blob name. - - :param str blob_url: - The full endpoint URL to the Blob, including SAS token and snapshot if used. This could be - either the primary endpoint, or the secondary endpoint depending on the current `location_mode`. - :type blob_url: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. If specified, this will override - the snapshot in the url. - :returns: A Blob client. - :rtype: ~azure.storage.blob.BlobClient - """ - try: - if not blob_url.lower().startswith('http'): - blob_url = "https://" + blob_url - except AttributeError: - raise ValueError("Blob URL must be a string.") - parsed_url = urlparse(blob_url.rstrip('/')) - - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(blob_url)) - - account_path = "" - if ".core." in parsed_url.netloc: - # .core. is indicating non-customized url. Blob name with directory info can also be parsed. - path_blob = parsed_url.path.lstrip('/').split('/', 1) - elif "localhost" in parsed_url.netloc or "127.0.0.1" in parsed_url.netloc: - path_blob = parsed_url.path.lstrip('/').split('/', 2) - account_path += '/' + path_blob[0] - else: - # for customized url. blob name that has directory info cannot be parsed. - path_blob = parsed_url.path.lstrip('/').split('/') - if len(path_blob) > 2: - account_path = "/" + "/".join(path_blob[:-2]) - account_url = "{}://{}{}?{}".format( - parsed_url.scheme, - parsed_url.netloc.rstrip('/'), - account_path, - parsed_url.query) - container_name, blob_name = unquote(path_blob[-2]), unquote(path_blob[-1]) - if not container_name or not blob_name: - raise ValueError("Invalid URL. Provide a blob_url with a valid blob and container name.") - - path_snapshot, _ = parse_query(parsed_url.query) - if snapshot: - try: - path_snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - path_snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - path_snapshot = snapshot - - return cls( - account_url, container_name=container_name, blob_name=blob_name, - snapshot=path_snapshot, credential=credential, **kwargs - ) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - container_name, # type: str - blob_name, # type: str - snapshot=None, # type: Optional[str] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> BlobClient - """Create BlobClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param container_name: The container name for the blob. - :type container_name: str - :param blob_name: The name of the blob with which to interact. - :type blob_name: str - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :returns: A Blob client. - :rtype: ~azure.storage.blob.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START auth_from_connection_string_blob] - :end-before: [END auth_from_connection_string_blob] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, container_name=container_name, blob_name=blob_name, - snapshot=snapshot, credential=credential, **kwargs - ) - - @distributed_trace - def get_account_information(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """Gets information related to the storage account in which the blob resides. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - """ - try: - return self._client.blob.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _upload_blob_options( # pylint:disable=too-many-statements - self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption and not self.key_encryption_key: - raise ValueError("Encryption required but no key was provided.") - encryption_options = { - 'required': self.require_encryption, - 'key': self.key_encryption_key, - 'resolver': self.key_resolver_function, - } - if self.key_encryption_key is not None: - cek, iv, encryption_data = generate_blob_encryption_data(self.key_encryption_key) - encryption_options['cek'] = cek - encryption_options['vector'] = iv - encryption_options['data'] = encryption_data - - encoding = kwargs.pop('encoding', 'UTF-8') - if isinstance(data, six.text_type): - data = data.encode(encoding) # type: ignore - if length is None: - length = get_length(data) - if isinstance(data, bytes): - data = data[:length] - - if isinstance(data, bytes): - stream = BytesIO(data) - elif hasattr(data, 'read'): - stream = data - elif hasattr(data, '__iter__'): - stream = IterStreamer(data, encoding=encoding) - else: - raise TypeError("Unsupported data type: {}".format(type(data))) - - validate_content = kwargs.pop('validate_content', False) - content_settings = kwargs.pop('content_settings', None) - overwrite = kwargs.pop('overwrite', False) - max_concurrency = kwargs.pop('max_concurrency', 1) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - kwargs['cpk_info'] = cpk_info - - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None)) - kwargs['modified_access_conditions'] = get_modify_conditions(kwargs) - kwargs['cpk_scope_info'] = get_cpk_scope_info(kwargs) - if content_settings: - kwargs['blob_headers'] = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - kwargs['blob_tags_string'] = serialize_blob_tags_header(kwargs.pop('tags', None)) - kwargs['stream'] = stream - kwargs['length'] = length - kwargs['overwrite'] = overwrite - kwargs['headers'] = headers - kwargs['validate_content'] = validate_content - kwargs['blob_settings'] = self._config - kwargs['max_concurrency'] = max_concurrency - kwargs['encryption_options'] = encryption_options - if blob_type == BlobType.BlockBlob: - kwargs['client'] = self._client.block_blob - kwargs['data'] = data - elif blob_type == BlobType.PageBlob: - kwargs['client'] = self._client.page_blob - elif blob_type == BlobType.AppendBlob: - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - kwargs['client'] = self._client.append_blob - else: - raise ValueError("Unsupported BlobType: {}".format(blob_type)) - return kwargs - - def _upload_blob_from_url_options(self, source_url, **kwargs): - # type: (...) -> Dict[str, Any] - tier = kwargs.pop('standard_blob_tier', None) - overwrite = kwargs.pop('overwrite', False) - content_settings = kwargs.pop('content_settings', None) - if content_settings: - kwargs['blob_http_headers'] = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=None, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'content_length': 0, - 'copy_source_blob_properties': kwargs.pop('include_source_blob_properties', True), - 'source_content_md5': kwargs.pop('source_content_md5', None), - 'copy_source': source_url, - 'modified_access_conditions': get_modify_conditions(kwargs), - 'blob_tags_string': serialize_blob_tags_header(kwargs.pop('tags', None)), - 'cls': return_response_headers, - 'lease_access_conditions': get_access_conditions(kwargs.pop('destination_lease', None)), - 'tier': tier.value if tier else None, - 'source_modified_access_conditions': get_source_conditions(kwargs), - 'cpk_info': cpk_info, - 'cpk_scope_info': get_cpk_scope_info(kwargs) - } - options.update(kwargs) - if not overwrite and not _any_conditions(**options): # pylint: disable=protected-access - options['modified_access_conditions'].if_none_match = '*' - return options - - @distributed_trace - def upload_blob_from_url(self, source_url, **kwargs): - # type: (str, Any) -> Dict[str, Any] - """ - Creates a new Block Blob where the content of the blob is read from a given URL. - The content of an existing blob is overwritten with the new blob. - - :param str source_url: - A URL of up to 2 KB in length that specifies a file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.blob.core.windows.net/mycontainer/myblob - - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - - https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. - :keyword bool include_source_blob_properties: - Indicates if properties from the source blob should be copied. Defaults to True. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - :paramtype tags: dict(str, str) - :keyword bytearray source_content_md5: - Specify the md5 that is used to verify the integrity of the source bytes. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword destination_lease: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - """ - options = self._upload_blob_from_url_options( - source_url=self._encode_source_url(source_url), - **kwargs) - try: - return self._client.block_blob.put_blob_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def upload_blob( # pylint: disable=too-many-locals - self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Any - """Creates a new blob from a data source with automatic chunking. - - :param data: The blob data to upload. - :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be - either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. The exception to the above is with Append - blob types: if set to False and the data already exists, an error will not be raised - and the data will be appended to the existing blob. If set overwrite=True, then the existing - append blob will be deleted, and a new one created. Defaults to False. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, upload_blob only succeeds if the - blob's lease is active and matches this ID. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int max_concurrency: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world.py - :start-after: [START upload_a_blob] - :end-before: [END upload_a_blob] - :language: python - :dedent: 12 - :caption: Upload a blob to the container. - """ - options = self._upload_blob_options( - data, - blob_type=blob_type, - length=length, - metadata=metadata, - **kwargs) - if blob_type == BlobType.BlockBlob: - return upload_block_blob(**options) - if blob_type == BlobType.PageBlob: - return upload_page_blob(**options) - return upload_append_blob(**options) - - def _download_blob_options(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], **Any) -> Dict[str, Any] - if self.require_encryption and not self.key_encryption_key: - raise ValueError("Encryption required but no key was provided.") - if length is not None and offset is None: - raise ValueError("Offset value must not be None if length is set.") - if length is not None: - length = offset + length - 1 # Service actually uses an end-range inclusive index - - validate_content = kwargs.pop('validate_content', False) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'clients': self._client, - 'config': self._config, - 'start_range': offset, - 'end_range': length, - 'version_id': kwargs.pop('version_id', None), - 'validate_content': validate_content, - 'encryption_options': { - 'required': self.require_encryption, - 'key': self.key_encryption_key, - 'resolver': self.key_resolver_function}, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'cls': kwargs.pop('cls', None) or deserialize_blob_stream, - 'max_concurrency':kwargs.pop('max_concurrency', 1), - 'encoding': kwargs.pop('encoding', None), - 'timeout': kwargs.pop('timeout', None), - 'name': self.blob_name, - 'container': self.container_name} - options.update(kwargs) - return options - - @distributed_trace - def download_blob(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], **Any) -> StorageStreamDownloader - """Downloads a blob to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the blob into - a stream. - - :param int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to download. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, download_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword str encoding: - Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.blob.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world.py - :start-after: [START download_a_blob] - :end-before: [END download_a_blob] - :language: python - :dedent: 12 - :caption: Download a blob. - """ - options = self._download_blob_options( - offset=offset, - length=length, - **kwargs) - return StorageStreamDownloader(**options) - - def _quick_query_options(self, query_expression, - **kwargs): - # type: (str, **Any) -> Dict[str, Any] - delimiter = '\n' - input_format = kwargs.pop('blob_format', None) - if input_format: - try: - delimiter = input_format.lineterminator - except AttributeError: - try: - delimiter = input_format.delimiter - except AttributeError: - raise ValueError("The Type of blob_format can only be DelimitedTextDialect or DelimitedJsonDialect") - output_format = kwargs.pop('output_format', None) - if output_format: - try: - delimiter = output_format.lineterminator - except AttributeError: - try: - delimiter = output_format.delimiter - except AttributeError: - pass - else: - output_format = input_format - query_request = QueryRequest( - expression=query_expression, - input_serialization=serialize_query_format(input_format), - output_serialization=serialize_query_format(output_format) - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo( - encryption_key=cpk.key_value, - encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm - ) - options = { - 'query_request': query_request, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'snapshot': self.snapshot, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_headers_and_deserialized, - } - options.update(kwargs) - return options, delimiter - - @distributed_trace - def query_blob(self, query_expression, **kwargs): - # type: (str, **Any) -> BlobQueryReader - """Enables users to select/project on blob/or blob snapshot data by providing simple query expressions. - This operations returns a BlobQueryReader, users need to use readall() or readinto() to get query data. - - :param str query_expression: - Required. a query statement. - :keyword Callable[~azure.storage.blob.BlobQueryError] on_error: - A function to be called on any processing errors returned by the service. - :keyword blob_format: - Optional. Defines the serialization of the data currently stored in the blob. The default is to - treat the blob data as CSV data formatted in the default dialect. This can be overridden with - a custom DelimitedTextDialect, or alternatively a DelimitedJsonDialect. - :paramtype blob_format: ~azure.storage.blob.DelimitedTextDialect or ~azure.storage.blob.DelimitedJsonDialect - :keyword output_format: - Optional. Defines the output serialization for the data stream. By default the data will be returned - as it is represented in the blob. By providing an output format, the blob data will be reformatted - according to that profile. This value can be a DelimitedTextDialect or a DelimitedJsonDialect. - :paramtype output_format: ~azure.storage.blob.DelimitedTextDialect, ~azure.storage.blob.DelimitedJsonDialect - or list[~azure.storage.blob.ArrowDialect] - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A streaming object (BlobQueryReader) - :rtype: ~azure.storage.blob.BlobQueryReader - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_query.py - :start-after: [START query] - :end-before: [END query] - :language: python - :dedent: 4 - :caption: select/project on blob/or blob snapshot data by providing simple query expressions. - """ - errors = kwargs.pop("on_error", None) - error_cls = kwargs.pop("error_cls", BlobQueryError) - encoding = kwargs.pop("encoding", None) - options, delimiter = self._quick_query_options(query_expression, **kwargs) - try: - headers, raw_response_body = self._client.blob.query(**options) - except HttpResponseError as error: - process_storage_error(error) - return BlobQueryReader( - name=self.blob_name, - container=self.container_name, - errors=errors, - record_delimiter=delimiter, - encoding=encoding, - headers=headers, - response=raw_response_body, - error_cls=error_cls) - - @staticmethod - def _generic_delete_blob_options(delete_snapshots=False, **kwargs): - # type: (bool, **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if delete_snapshots: - delete_snapshots = DeleteSnapshotsOptionType(delete_snapshots) - options = { - 'timeout': kwargs.pop('timeout', None), - 'snapshot': kwargs.pop('snapshot', None), # this is added for delete_blobs - 'delete_snapshots': delete_snapshots or None, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions} - options.update(kwargs) - return options - - def _delete_blob_options(self, delete_snapshots=False, **kwargs): - # type: (bool, **Any) -> Dict[str, Any] - if self.snapshot and delete_snapshots: - raise ValueError("The delete_snapshots option cannot be used with a specific snapshot.") - options = self._generic_delete_blob_options(delete_snapshots, **kwargs) - options['snapshot'] = self.snapshot - options['version_id'] = kwargs.pop('version_id', None) - options['blob_delete_type'] = kwargs.pop('blob_delete_type', None) - return options - - @distributed_trace - def delete_blob(self, delete_snapshots=False, **kwargs): - # type: (str, **Any) -> None - """Marks the specified blob for deletion. - - The blob is later deleted during garbage collection. - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the delete_blob() - operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob - and retains the blob for a specified number of days. - After the specified number of days, the blob's data is removed from the service during garbage collection. - Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']` - option. Soft-deleted blob can be restored using :func:`undelete` operation. - - :param str delete_snapshots: - Required if the blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to delete. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword lease: - Required if the blob has an active lease. If specified, delete_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world.py - :start-after: [START delete_blob] - :end-before: [END delete_blob] - :language: python - :dedent: 12 - :caption: Delete a blob. - """ - options = self._delete_blob_options(delete_snapshots=delete_snapshots, **kwargs) - try: - self._client.blob.delete(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def undelete_blob(self, **kwargs): - # type: (**Any) -> None - """Restores soft-deleted blobs or snapshots. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START undelete_blob] - :end-before: [END undelete_blob] - :language: python - :dedent: 8 - :caption: Undeleting a blob. - """ - try: - self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace() - def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a blob exists with the defined parameters, and returns - False otherwise. - - :param str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to check if it exists. - :param int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - try: - self._client.blob.get_properties( - snapshot=self.snapshot, - **kwargs) - return True - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceNotFoundError: - return False - - @distributed_trace - def get_blob_properties(self, **kwargs): - # type: (**Any) -> BlobProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the blob. It does not return the content of the blob. - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to get properties. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: BlobProperties - :rtype: ~azure.storage.blob.BlobProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START get_blob_properties] - :end-before: [END get_blob_properties] - :language: python - :dedent: 8 - :caption: Getting the properties for a blob. - """ - # TODO: extract this out as _get_blob_properties_options - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - try: - cls_method = kwargs.pop('cls', None) - if cls_method: - kwargs['cls'] = partial(deserialize_pipeline_response_into_cls, cls_method) - blob_props = self._client.blob.get_properties( - timeout=kwargs.pop('timeout', None), - version_id=kwargs.pop('version_id', None), - snapshot=self.snapshot, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=kwargs.pop('cls', None) or deserialize_blob_properties, - cpk_info=cpk_info, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - blob_props.name = self.blob_name - if isinstance(blob_props, BlobProperties): - blob_props.container = self.container_name - blob_props.snapshot = self.snapshot - return blob_props # type: ignore - - def _set_http_headers_options(self, content_settings=None, **kwargs): - # type: (Optional[ContentSettings], **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - blob_headers = None - if content_settings: - blob_headers = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - options = { - 'timeout': kwargs.pop('timeout', None), - 'blob_http_headers': blob_headers, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def set_http_headers(self, content_settings=None, **kwargs): - # type: (Optional[ContentSettings], **Any) -> None - """Sets system properties on the blob. - - If one property is set for the content_settings, all properties will be overridden. - - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - options = self._set_http_headers_options(content_settings=content_settings, **kwargs) - try: - return self._client.blob.set_http_headers(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _set_blob_metadata_options(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - 'headers': headers} - options.update(kwargs) - return options - - @distributed_trace - def set_blob_metadata(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] - """Sets user-defined metadata for the blob as one or more name-value pairs. - - :param metadata: - Dict containing name and value pairs. Each call to this operation - replaces all existing metadata attached to the blob. To remove all - metadata from the blob, call this operation with no metadata headers. - :type metadata: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - """ - options = self._set_blob_metadata_options(metadata=metadata, **kwargs) - try: - return self._client.blob.set_metadata(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _create_page_blob_options( # type: ignore - self, size, # type: int - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - blob_headers = None - if content_settings: - blob_headers = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - - sequence_number = kwargs.pop('sequence_number', None) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - if premium_page_blob_tier: - try: - headers['x-ms-access-tier'] = premium_page_blob_tier.value # type: ignore - except AttributeError: - headers['x-ms-access-tier'] = premium_page_blob_tier # type: ignore - - blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) - - options = { - 'content_length': 0, - 'blob_content_length': size, - 'blob_sequence_number': sequence_number, - 'blob_http_headers': blob_headers, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'blob_tags_string': blob_tags_string, - 'cls': return_response_headers, - 'headers': headers} - options.update(kwargs) - return options - - @distributed_trace - def create_page_blob( # type: ignore - self, size, # type: int - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Creates a new Page Blob of the specified size. - - :param int size: - This specifies the maximum size for the page blob, up to 1 TB. - The page blob size must be aligned to a 512-byte boundary. - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword int sequence_number: - Only for Page blobs. The sequence number is a user-controlled value that you can use to - track requests. The value of the sequence number must be between 0 - and 2^63 - 1.The default value is 0. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict[str, Any] - """ - options = self._create_page_blob_options( - size, - content_settings=content_settings, - metadata=metadata, - premium_page_blob_tier=premium_page_blob_tier, - **kwargs) - try: - return self._client.page_blob.create(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _create_append_blob_options(self, content_settings=None, metadata=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - blob_headers = None - if content_settings: - blob_headers = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) - - options = { - 'content_length': 0, - 'blob_http_headers': blob_headers, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'blob_tags_string': blob_tags_string, - 'cls': return_response_headers, - 'headers': headers} - options.update(kwargs) - return options - - @distributed_trace - def create_append_blob(self, content_settings=None, metadata=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] - """Creates a new Append Blob. - - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict[str, Any] - """ - options = self._create_append_blob_options( - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return self._client.append_blob.create(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _create_snapshot_options(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - 'headers': headers} - options.update(kwargs) - return options - - @distributed_trace - def create_snapshot(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] - """Creates a snapshot of the blob. - - A snapshot is a read-only version of a blob that's taken at a point in time. - It can be read, copied, or deleted, but not modified. Snapshots provide a way - to back up a blob as it appears at a moment in time. - - A snapshot of a blob has the same name as the base blob from which the snapshot - is taken, with a DateTime value appended to indicate the time at which the - snapshot was taken. - - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - - .. versionadded:: 12.4.0 - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified). - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START create_blob_snapshot] - :end-before: [END create_blob_snapshot] - :language: python - :dedent: 8 - :caption: Create a snapshot of the blob. - """ - options = self._create_snapshot_options(metadata=metadata, **kwargs) - try: - return self._client.blob.create_snapshot(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _start_copy_from_url_options(self, source_url, metadata=None, incremental_copy=False, **kwargs): - # type: (str, Optional[Dict[str, str]], bool, **Any) -> Dict[str, Any] - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - if 'source_lease' in kwargs: - source_lease = kwargs.pop('source_lease') - try: - headers['x-ms-source-lease-id'] = source_lease.id # type: str - except AttributeError: - headers['x-ms-source-lease-id'] = source_lease - - tier = kwargs.pop('premium_page_blob_tier', None) or kwargs.pop('standard_blob_tier', None) - - if kwargs.get('requires_sync'): - headers['x-ms-requires-sync'] = str(kwargs.pop('requires_sync')) - - timeout = kwargs.pop('timeout', None) - dest_mod_conditions = get_modify_conditions(kwargs) - blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) - - options = { - 'copy_source': source_url, - 'seal_blob': kwargs.pop('seal_destination_blob', None), - 'timeout': timeout, - 'modified_access_conditions': dest_mod_conditions, - 'blob_tags_string': blob_tags_string, - 'headers': headers, - 'cls': return_response_headers, - } - if not incremental_copy: - source_mod_conditions = get_source_conditions(kwargs) - dest_access_conditions = get_access_conditions(kwargs.pop('destination_lease', None)) - options['source_modified_access_conditions'] = source_mod_conditions - options['lease_access_conditions'] = dest_access_conditions - options['tier'] = tier.value if tier else None - options.update(kwargs) - return options - - @distributed_trace - def start_copy_from_url(self, source_url, metadata=None, incremental_copy=False, **kwargs): - # type: (str, Optional[Dict[str, str]], bool, **Any) -> Dict[str, Union[str, datetime]] - """Copies a blob asynchronously. - - This operation returns a copy operation - object that can be used to wait on the completion of the operation, - as well as check status or abort the copy operation. - The Blob service copies blobs on a best-effort basis. - - The source blob for a copy operation may be a block blob, an append blob, - or a page blob. If the destination blob already exists, it must be of the - same blob type as the source blob. Any existing destination blob will be - overwritten. The destination blob cannot be modified while a copy operation - is in progress. - - When copying from a page blob, the Blob service creates a destination page - blob of the source blob's length, initially containing all zeroes. Then - the source page ranges are enumerated, and non-empty ranges are copied. - - For a block blob or an append blob, the Blob service creates a committed - blob of zero length before returning from this operation. When copying - from a block blob, all committed blocks and their block IDs are copied. - Uncommitted blocks are not copied. At the end of the copy operation, the - destination blob will have the same committed block count as the source. - - When copying from an append blob, all committed blocks are copied. At the - end of the copy operation, the destination blob will have the same committed - block count as the source. - - For all blob types, you can call status() on the returned polling object - to check the status of the copy operation, or wait() to block until the - operation is complete. The final blob will be committed when the copy completes. - - :param str source_url: - A URL of up to 2 KB in length that specifies a file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.blob.core.windows.net/mycontainer/myblob - - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - - https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken - :param metadata: - Name-value pairs associated with the blob as metadata. If no name-value - pairs are specified, the operation will copy the metadata from the - source blob or file to the destination blob. If one or more name-value - pairs are specified, the destination blob is created with the specified - metadata, and metadata is not copied from the source blob or file. - :type metadata: dict(str, str) - :param bool incremental_copy: - Copies the snapshot of the source page blob to a destination page blob. - The snapshot is copied such that only the differential changes between - the previously copied snapshot are transferred to the destination. - The copied snapshots are complete copies of the original snapshot and - can be read or copied from as usual. Defaults to False. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source - blob has been modified since the specified date/time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source blob - has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has been modified since the specified date/time. - If the destination blob has not been modified, the Blob service returns - status code 412 (Precondition Failed). - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has not been modified since the specified - date/time. If the destination blob has been modified, the Blob service - returns status code 412 (Precondition Failed). - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword destination_lease: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword source_lease: - Specify this to perform the Copy Blob operation only if - the lease ID given matches the active lease ID of the source blob. - :paramtype source_lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword bool seal_destination_blob: - Seal the destination append blob. This operation is only for append blob. - - .. versionadded:: 12.4.0 - - :keyword bool requires_sync: - Enforces that the service will not return a response until the copy is complete. - :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status). - :rtype: dict[str, str or ~datetime.datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START copy_blob_from_url] - :end-before: [END copy_blob_from_url] - :language: python - :dedent: 12 - :caption: Copy a blob from a URL. - """ - options = self._start_copy_from_url_options( - source_url=self._encode_source_url(source_url), - metadata=metadata, - incremental_copy=incremental_copy, - **kwargs) - try: - if incremental_copy: - return self._client.page_blob.copy_incremental(**options) - return self._client.blob.start_copy_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - def _abort_copy_options(self, copy_id, **kwargs): - # type: (Union[str, Dict[str, Any], BlobProperties], **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - try: - copy_id = copy_id.copy.id - except AttributeError: - try: - copy_id = copy_id['copy_id'] - except TypeError: - pass - options = { - 'copy_id': copy_id, - 'lease_access_conditions': access_conditions, - 'timeout': kwargs.pop('timeout', None)} - options.update(kwargs) - return options - - @distributed_trace - def abort_copy(self, copy_id, **kwargs): - # type: (Union[str, Dict[str, Any], BlobProperties], **Any) -> None - """Abort an ongoing copy operation. - - This will leave a destination blob with zero length and full metadata. - This will raise an error if the copy operation has already ended. - - :param copy_id: - The copy operation to abort. This can be either an ID string, or an - instance of BlobProperties. - :type copy_id: str or ~azure.storage.blob.BlobProperties - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START abort_copy_blob_from_url] - :end-before: [END abort_copy_blob_from_url] - :language: python - :dedent: 12 - :caption: Abort copying a blob from URL. - """ - options = self._abort_copy_options(copy_id, **kwargs) - try: - self._client.blob.abort_copy_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): - # type: (int, Optional[str], **Any) -> BlobLeaseClient - """Requests a new lease. - - If the blob does not have an active lease, the Blob - Service creates a lease on the blob and returns a new lease. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Blob Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A BlobLeaseClient object. - :rtype: ~azure.storage.blob.BlobLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START acquire_lease_on_blob] - :end-before: [END acquire_lease_on_blob] - :language: python - :dedent: 8 - :caption: Acquiring a lease on a blob. - """ - lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore - lease.acquire(lease_duration=lease_duration, **kwargs) - return lease - - @distributed_trace - def set_standard_blob_tier(self, standard_blob_tier, **kwargs): - # type: (Union[str, StandardBlobTier], Any) -> None - """This operation sets the tier on a block blob. - - A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param standard_blob_tier: - Indicates the tier to be set on the blob. Options include 'Hot', 'Cool', - 'Archive'. The hot tier is optimized for storing data that is accessed - frequently. The cool storage tier is optimized for storing data that - is infrequently accessed and stored for at least a month. The archive - tier is optimized for storing data that is rarely accessed and stored - for at least six months with flexible latency requirements. - :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to download. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if standard_blob_tier is None: - raise ValueError("A StandardBlobTier must be specified") - if self.snapshot and kwargs.get('version_id'): - raise ValueError("Snapshot and version_id cannot be set at the same time") - try: - self._client.blob.set_tier( - tier=standard_blob_tier, - snapshot=self.snapshot, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - def _stage_block_options( - self, block_id, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - block_id = encode_base64(str(block_id)) - if isinstance(data, six.text_type): - data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - if length is None: - length = get_length(data) - if length is None: - length, data = read_length(data) - if isinstance(data, bytes): - data = data[:length] - - validate_content = kwargs.pop('validate_content', False) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'block_id': block_id, - 'content_length': length, - 'body': data, - 'transactional_content_md5': None, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'validate_content': validate_content, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - } - options.update(kwargs) - return options - - @distributed_trace - def stage_block( - self, block_id, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Creates a new block to be committed as part of a blob. - - :param str block_id: A string value that identifies the block. - The string should be less than or equal to 64 bytes in size. - For a given blob, the block_id must be the same size for each block. - :param data: The blob data. - :param int length: Size of the block. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword str encoding: - Defaults to UTF-8. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob property dict. - :rtype: dict[str, Any] - """ - options = self._stage_block_options( - block_id, - data, - length=length, - **kwargs) - try: - return self._client.block_blob.stage_block(**options) - except HttpResponseError as error: - process_storage_error(error) - - def _stage_block_from_url_options( - self, block_id, # type: str - source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - source_content_md5=None, # type: Optional[Union[bytes, bytearray]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if source_length is not None and source_offset is None: - raise ValueError("Source offset value must not be None if length is set.") - if source_length is not None: - source_length = source_offset + source_length - 1 - block_id = encode_base64(str(block_id)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - range_header = None - if source_offset is not None: - range_header, _ = validate_and_format_range_headers(source_offset, source_length) - - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'block_id': block_id, - 'content_length': 0, - 'source_url': source_url, - 'source_range': range_header, - 'source_content_md5': bytearray(source_content_md5) if source_content_md5 else None, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - } - options.update(kwargs) - return options - - @distributed_trace - def stage_block_from_url( - self, block_id, # type: str - source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - source_content_md5=None, # type: Optional[Union[bytes, bytearray]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Creates a new block to be committed as part of a blob where - the contents are read from a URL. - - :param str block_id: A string value that identifies the block. - The string should be less than or equal to 64 bytes in size. - For a given blob, the block_id must be the same size for each block. - :param str source_url: The URL. - :param int source_offset: - Start of byte range to use for the block. - Must be set if source length is provided. - :param int source_length: The size of the block in bytes. - :param bytearray source_content_md5: - Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob property dict. - :rtype: dict[str, Any] - """ - options = self._stage_block_from_url_options( - block_id, - source_url=self._encode_source_url(source_url), - source_offset=source_offset, - source_length=source_length, - source_content_md5=source_content_md5, - **kwargs) - try: - return self._client.block_blob.stage_block_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - def _get_block_list_result(self, blocks): - # type: (BlockList) -> Tuple[List[BlobBlock], List[BlobBlock]] - committed = [] # type: List - uncommitted = [] # type: List - if blocks.committed_blocks: - committed = [BlobBlock._from_generated(b) for b in blocks.committed_blocks] # pylint: disable=protected-access - if blocks.uncommitted_blocks: - uncommitted = [BlobBlock._from_generated(b) for b in blocks.uncommitted_blocks] # pylint: disable=protected-access - return committed, uncommitted - - @distributed_trace - def get_block_list(self, block_list_type="committed", **kwargs): - # type: (Optional[str], **Any) -> Tuple[List[BlobBlock], List[BlobBlock]] - """The Get Block List operation retrieves the list of blocks that have - been uploaded as part of a block blob. - - :param str block_list_type: - Specifies whether to return the list of committed - blocks, the list of uncommitted blocks, or both lists together. - Possible values include: 'committed', 'uncommitted', 'all' - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A tuple of two lists - committed and uncommitted blocks - :rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock)) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - try: - blocks = self._client.block_blob.get_block_list( - list_type=block_list_type, - snapshot=self.snapshot, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return self._get_block_list_result(blocks) - - def _commit_block_list_options( # type: ignore - self, block_list, # type: List[BlobBlock] - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) - for block in block_list: - try: - if block.state.value == 'committed': - block_lookup.committed.append(encode_base64(str(block.id))) - elif block.state.value == 'uncommitted': - block_lookup.uncommitted.append(encode_base64(str(block.id))) - else: - block_lookup.latest.append(encode_base64(str(block.id))) - except AttributeError: - block_lookup.latest.append(encode_base64(str(block))) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - blob_headers = None - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if content_settings: - blob_headers = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - - validate_content = kwargs.pop('validate_content', False) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - tier = kwargs.pop('standard_blob_tier', None) - blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) - - options = { - 'blocks': block_lookup, - 'blob_http_headers': blob_headers, - 'lease_access_conditions': access_conditions, - 'timeout': kwargs.pop('timeout', None), - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers, - 'validate_content': validate_content, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'tier': tier.value if tier else None, - 'blob_tags_string': blob_tags_string, - 'headers': headers - } - options.update(kwargs) - return options - - @distributed_trace - def commit_block_list( # type: ignore - self, block_list, # type: List[BlobBlock] - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """The Commit Block List operation writes a blob by specifying the list of - block IDs that make up the blob. - - :param list block_list: - List of Blockblobs. - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict[str, str] - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._commit_block_list_options( - block_list, - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return self._client.block_blob.commit_block_list(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): - # type: (Union[str, PremiumPageBlobTier], **Any) -> None - """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts. - - :param premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if premium_page_blob_tier is None: - raise ValueError("A PremiumPageBlobTier must be specified") - try: - self._client.blob.set_tier( - tier=premium_page_blob_tier, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - def _set_blob_tags_options(self, tags=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - tags = serialize_blob_tags(tags) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - options = { - 'tags': tags, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def set_blob_tags(self, tags=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - """The Set Tags operation enables users to set tags on a blob or specific blob version, but not snapshot. - Each call to this operation replaces all existing tags attached to the blob. To remove all - tags from the blob, call this operation with no tags set. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :param tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - :type tags: dict(str, str) - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to add tags to. - :keyword bool validate_content: - If true, calculates an MD5 hash of the tags content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - options = self._set_blob_tags_options(tags=tags, **kwargs) - try: - return self._client.blob.set_tags(**options) - except HttpResponseError as error: - process_storage_error(error) - - def _get_blob_tags_options(self, **kwargs): - # type: (**Any) -> Dict[str, str] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - options = { - 'version_id': kwargs.pop('version_id', None), - 'snapshot': self.snapshot, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_headers_and_deserialized} - return options - - @distributed_trace - def get_blob_tags(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """The Get Tags operation enables users to get tags on a blob or specific blob version, or snapshot. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to add tags to. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Key value pairs of blob tags. - :rtype: Dict[str, str] - """ - options = self._get_blob_tags_options(**kwargs) - try: - _, tags = self._client.blob.get_tags(**options) - return parse_tags(tags) # pylint: disable=protected-access - except HttpResponseError as error: - process_storage_error(error) - - def _get_page_ranges_options( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if length is not None and offset is None: - raise ValueError("Offset value must not be None if length is set.") - if length is not None: - length = offset + length - 1 # Reformat to an inclusive range index - page_range, _ = validate_and_format_range_headers( - offset, length, start_range_required=False, end_range_required=False, align_to_page=True - ) - options = { - 'snapshot': self.snapshot, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'range': page_range} - if previous_snapshot_diff: - try: - options['prevsnapshot'] = previous_snapshot_diff.snapshot # type: ignore - except AttributeError: - try: - options['prevsnapshot'] = previous_snapshot_diff['snapshot'] # type: ignore - except TypeError: - options['prevsnapshot'] = previous_snapshot_diff - options.update(kwargs) - return options - - @distributed_trace - def get_page_ranges( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] - **kwargs - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a Page Blob or snapshot - of a page blob. - - :param int offset: - Start of byte range to use for getting valid page ranges. - If no length is given, all bytes after the offset will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for getting valid page ranges. - If length is given, offset must be provided. - This range will return valid page ranges from the offset start up to - the specified length. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param str previous_snapshot_diff: - The snapshot diff parameter that contains an opaque DateTime value that - specifies a previous blob snapshot to be compared - against a more recent snapshot or the current blob. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. - The first element are filled page ranges, the 2nd element is cleared page ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_page_ranges_options( - offset=offset, - length=length, - previous_snapshot_diff=previous_snapshot_diff, - **kwargs) - try: - if previous_snapshot_diff: - ranges = self._client.page_blob.get_page_ranges_diff(**options) - else: - ranges = self._client.page_blob.get_page_ranges(**options) - except HttpResponseError as error: - process_storage_error(error) - return get_page_ranges_result(ranges) - - @distributed_trace - def get_page_range_diff_for_managed_disk( - self, previous_snapshot_url, # type: str - offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a managed disk or snapshot. - - .. note:: - This operation is only available for managed disk accounts. - - .. versionadded:: 12.2.0 - This operation was introduced in API version '2019-07-07'. - - :param previous_snapshot_url: - Specifies the URL of a previous snapshot of the managed disk. - The response will only contain pages that were changed between the target blob and - its previous snapshot. - :param int offset: - Start of byte range to use for getting valid page ranges. - If no length is given, all bytes after the offset will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for getting valid page ranges. - If length is given, offset must be provided. - This range will return valid page ranges from the offset start up to - the specified length. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. - The first element are filled page ranges, the 2nd element is cleared page ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_page_ranges_options( - offset=offset, - length=length, - prev_snapshot_url=previous_snapshot_url, - **kwargs) - try: - ranges = self._client.page_blob.get_page_ranges_diff(**options) - except HttpResponseError as error: - process_storage_error(error) - return get_page_ranges_result(ranges) - - def _set_sequence_number_options(self, sequence_number_action, sequence_number=None, **kwargs): - # type: (Union[str, SequenceNumberAction], Optional[str], **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if sequence_number_action is None: - raise ValueError("A sequence number action must be specified") - options = { - 'sequence_number_action': sequence_number_action, - 'timeout': kwargs.pop('timeout', None), - 'blob_sequence_number': sequence_number, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def set_sequence_number(self, sequence_number_action, sequence_number=None, **kwargs): - # type: (Union[str, SequenceNumberAction], Optional[str], **Any) -> Dict[str, Union[str, datetime]] - """Sets the blob sequence number. - - :param str sequence_number_action: - This property indicates how the service should modify the blob's sequence - number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information. - :param str sequence_number: - This property sets the blob's sequence number. The sequence number is a - user-controlled property that you can use to track requests and manage - concurrency issues. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._set_sequence_number_options( - sequence_number_action, sequence_number=sequence_number, **kwargs) - try: - return self._client.page_blob.update_sequence_number(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _resize_blob_options(self, size, **kwargs): - # type: (int, **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if size is None: - raise ValueError("A content length must be specified for a Page Blob.") - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'blob_content_length': size, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def resize_blob(self, size, **kwargs): - # type: (int, **Any) -> Dict[str, Union[str, datetime]] - """Resizes a page blob to the specified size. - - If the specified value is less than the current size of the blob, - then all pages above the specified value are cleared. - - :param int size: - Size used to resize blob. Maximum size for a page blob is up to 1 TB. - The page blob size must be aligned to a 512-byte boundary. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._resize_blob_options(size, **kwargs) - try: - return self._client.page_blob.resize(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _upload_page_options( # type: ignore - self, page, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - if isinstance(page, six.text_type): - page = page.encode(kwargs.pop('encoding', 'UTF-8')) - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 page size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 page size") - end_range = offset + length - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) # type: ignore - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - seq_conditions = SequenceNumberAccessConditions( - if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), - if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), - if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) - ) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - validate_content = kwargs.pop('validate_content', False) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'body': page[:length], - 'content_length': length, - 'transactional_content_md5': None, - 'timeout': kwargs.pop('timeout', None), - 'range': content_range, - 'lease_access_conditions': access_conditions, - 'sequence_number_access_conditions': seq_conditions, - 'modified_access_conditions': mod_conditions, - 'validate_content': validate_content, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def upload_page( # type: ignore - self, page, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """The Upload Pages operation writes a range of pages to a page blob. - - :param bytes page: - Content of the page. - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._upload_page_options( - page=page, - offset=offset, - length=length, - **kwargs) - try: - return self._client.page_blob.upload_pages(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _upload_pages_from_url_options( # type: ignore - self, source_url, # type: str - offset, # type: int - length, # type: int - source_offset, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - # TODO: extract the code to a method format_range - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 page size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 page size") - if source_offset is None or offset % 512 != 0: - raise ValueError("source_offset must be an integer that aligns with 512 page size") - - # Format range - end_range = offset + length - 1 - destination_range = 'bytes={0}-{1}'.format(offset, end_range) - source_range = 'bytes={0}-{1}'.format(source_offset, source_offset + length - 1) # should subtract 1 here? - - seq_conditions = SequenceNumberAccessConditions( - if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), - if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), - if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - source_mod_conditions = get_source_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - source_content_md5 = kwargs.pop('source_content_md5', None) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'source_url': source_url, - 'content_length': 0, - 'source_range': source_range, - 'range': destination_range, - 'source_content_md5': bytearray(source_content_md5) if source_content_md5 else None, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'sequence_number_access_conditions': seq_conditions, - 'modified_access_conditions': mod_conditions, - 'source_modified_access_conditions': source_mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def upload_pages_from_url(self, source_url, # type: str - offset, # type: int - length, # type: int - source_offset, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """ - The Upload Pages operation writes a range of pages to a page blob where - the contents are read from a URL. - - :param str source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - The service will read the same number of bytes as the destination range (length-offset). - :keyword bytes source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._upload_pages_from_url_options( - source_url=self._encode_source_url(source_url), - offset=offset, - length=length, - source_offset=source_offset, - **kwargs - ) - try: - return self._client.page_blob.upload_pages_from_url(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _clear_page_options(self, offset, length, **kwargs): - # type: (int, int, **Any) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - seq_conditions = SequenceNumberAccessConditions( - if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), - if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), - if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) - ) - mod_conditions = get_modify_conditions(kwargs) - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 page size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 page size") - end_range = length + offset - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'content_length': 0, - 'timeout': kwargs.pop('timeout', None), - 'range': content_range, - 'lease_access_conditions': access_conditions, - 'sequence_number_access_conditions': seq_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def clear_page(self, offset, length, **kwargs): - # type: (int, int, **Any) -> Dict[str, Union[str, datetime]] - """Clears a range of pages. - - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._clear_page_options(offset, length, **kwargs) - try: - return self._client.page_blob.clear_pages(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _append_block_options( # type: ignore - self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - if isinstance(data, six.text_type): - data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore - if length is None: - length = get_length(data) - if length is None: - length, data = read_length(data) - if length == 0: - return {} - if isinstance(data, bytes): - data = data[:length] - - appendpos_condition = kwargs.pop('appendpos_condition', None) - maxsize_condition = kwargs.pop('maxsize_condition', None) - validate_content = kwargs.pop('validate_content', False) - append_conditions = None - if maxsize_condition or appendpos_condition is not None: - append_conditions = AppendPositionAccessConditions( - max_size=maxsize_condition, - append_position=appendpos_condition - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'body': data, - 'content_length': length, - 'timeout': kwargs.pop('timeout', None), - 'transactional_content_md5': None, - 'lease_access_conditions': access_conditions, - 'append_position_access_conditions': append_conditions, - 'modified_access_conditions': mod_conditions, - 'validate_content': validate_content, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def append_block( # type: ignore - self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """Commits a new block of data to the end of the existing append blob. - - :param data: - Content of the block. This can be bytes, text, an iterable or a file-like object. - :type data: bytes or str or Iterable - :param int length: - Size of the block in bytes. - :keyword bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). - :rtype: dict(str, Any) - """ - options = self._append_block_options( - data, - length=length, - **kwargs - ) - try: - return self._client.append_blob.append_block(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _append_block_from_url_options( # type: ignore - self, copy_source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - # If end range is provided, start range must be provided - if source_length is not None and source_offset is None: - raise ValueError("source_offset should also be specified if source_length is specified") - # Format based on whether length is present - source_range = None - if source_length is not None: - end_range = source_offset + source_length - 1 - source_range = 'bytes={0}-{1}'.format(source_offset, end_range) - elif source_offset is not None: - source_range = "bytes={0}-".format(source_offset) - - appendpos_condition = kwargs.pop('appendpos_condition', None) - maxsize_condition = kwargs.pop('maxsize_condition', None) - source_content_md5 = kwargs.pop('source_content_md5', None) - append_conditions = None - if maxsize_condition or appendpos_condition is not None: - append_conditions = AppendPositionAccessConditions( - max_size=maxsize_condition, - append_position=appendpos_condition - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - source_mod_conditions = get_source_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'source_url': copy_source_url, - 'content_length': 0, - 'source_range': source_range, - 'source_content_md5': source_content_md5, - 'transactional_content_md5': None, - 'lease_access_conditions': access_conditions, - 'append_position_access_conditions': append_conditions, - 'modified_access_conditions': mod_conditions, - 'source_modified_access_conditions': source_mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - 'timeout': kwargs.pop('timeout', None)} - options.update(kwargs) - return options - - @distributed_trace - def append_block_from_url(self, copy_source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """ - Creates a new block to be committed as part of a blob, where the contents are read from a source url. - - :param str copy_source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int source_offset: - This indicates the start of the range of bytes (inclusive) that has to be taken from the copy source. - :param int source_length: - This indicates the end of the range of bytes that has to be taken from the copy source. - :keyword bytearray source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the - AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._append_block_from_url_options( - copy_source_url=self._encode_source_url(copy_source_url), - source_offset=source_offset, - source_length=source_length, - **kwargs - ) - try: - return self._client.append_blob.append_block_from_url(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _seal_append_blob_options(self, **kwargs): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - appendpos_condition = kwargs.pop('appendpos_condition', None) - append_conditions = None - if appendpos_condition is not None: - append_conditions = AppendPositionAccessConditions( - append_position=appendpos_condition - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - options = { - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'append_position_access_conditions': append_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def seal_append_blob(self, **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """The Seal operation seals the Append Blob to make it read-only. - - .. versionadded:: 12.4.0 - - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). - :rtype: dict(str, Any) - """ - options = self._seal_append_blob_options(**kwargs) - try: - return self._client.append_blob.seal(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_blob_service_client.py b/azure/multiapi/storagev2/blob/v2020_04_08/_blob_service_client.py deleted file mode 100644 index 8658363..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_blob_service_client.py +++ /dev/null @@ -1,697 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, - TYPE_CHECKING -) - - -try: - from urllib.parse import urlparse -except ImportError: - from urlparse import urlparse # type: ignore - -from azure.core.paging import ItemPaged -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline import Pipeline -from azure.core.tracing.decorator import distributed_trace - -from ._shared.models import LocationMode -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.parser import _to_utc_datetime -from ._shared.response_handlers import return_response_headers, process_storage_error, \ - parse_to_internal_user_delegation_key -from ._generated import AzureBlobStorage -from ._generated.models import StorageServiceProperties, KeyInfo -from ._container_client import ContainerClient -from ._blob_client import BlobClient -from ._models import ContainerPropertiesPaged -from ._list_blobs_helper import FilteredBlobPaged -from ._serialize import get_api_version -from ._deserialize import service_stats_deserialize, service_properties_deserialize - -if TYPE_CHECKING: - from datetime import datetime - from ._shared.models import UserDelegationKey - from ._lease import BlobLeaseClient - from ._models import ( - ContainerProperties, - BlobProperties, - PublicAccess, - BlobAnalyticsLogging, - Metrics, - CorsRule, - RetentionPolicy, - StaticWebsite, - FilteredBlob - ) - - -class BlobServiceClient(StorageAccountHostsMixin): - """A client to interact with the Blob Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete containers within the account. - For operations relating to a specific container or blob, clients for those entities - can also be retrieved using the `get_client` functions. - - :param str account_url: - The URL to the blob storage account. Any other entities included - in the URL path (e.g. container or blob) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_blob_service_client] - :end-before: [END create_blob_service_client] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient with account url and credential. - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_blob_service_client_oauth] - :end-before: [END create_blob_service_client_oauth] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient with Azure Identity credentials. - """ - - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - _, sas_token = parse_query(parsed_url.query) - self._query_str, credential = self._format_query_string(sas_token, credential) - super(BlobServiceClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) - self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) - default_api_version = self._client._config.version # pylint: disable=protected-access - self._client._config.version = get_api_version(kwargs, default_api_version) # pylint: disable=protected-access - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - return "{}://{}/{}".format(self.scheme, hostname, self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> BlobServiceClient - """Create BlobServiceClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :returns: A Blob service client. - :rtype: ~azure.storage.blob.BlobServiceClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START auth_from_connection_string] - :end-before: [END auth_from_connection_string] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient from a connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls(account_url, credential=credential, **kwargs) - - @distributed_trace - def get_user_delegation_key(self, key_start_time, # type: datetime - key_expiry_time, # type: datetime - **kwargs # type: Any - ): - # type: (...) -> UserDelegationKey - """ - Obtain a user delegation key for the purpose of signing SAS tokens. - A token credential must be present on the service object for this request to succeed. - - :param ~datetime.datetime key_start_time: - A DateTime value. Indicates when the key becomes valid. - :param ~datetime.datetime key_expiry_time: - A DateTime value. Indicates when the key stops being valid. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The user delegation key. - :rtype: ~azure.storage.blob.UserDelegationKey - """ - key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time)) - timeout = kwargs.pop('timeout', None) - try: - user_delegation_key = self._client.service.get_user_delegation_key(key_info=key_info, - timeout=timeout, - **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - return parse_to_internal_user_delegation_key(user_delegation_key) # type: ignore - - @distributed_trace - def get_account_information(self, **kwargs): - # type: (Any) -> Dict[str, str] - """Gets information related to the storage account. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START get_blob_service_account_info] - :end-before: [END get_blob_service_account_info] - :language: python - :dedent: 8 - :caption: Getting account information for the blob service. - """ - try: - return self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def get_service_stats(self, **kwargs): - # type: (**Any) -> Dict[str, Any] - """Retrieves statistics related to replication for the Blob service. - - It is only available when read-access geo-redundant replication is enabled for - the storage account. - - With geo-redundant replication, Azure Storage maintains your data durable - in two locations. In both locations, Azure Storage constantly maintains - multiple healthy replicas of your data. The location where you read, - create, update, or delete data is the primary storage account location. - The primary location exists in the region you choose at the time you - create an account via the Azure Management Azure classic portal, for - example, North Central US. The location to which your data is replicated - is the secondary location. The secondary location is automatically - determined based on the location of the primary; it is in a second data - center that resides in the same region as the primary location. Read-only - access is available from the secondary location, if read-access geo-redundant - replication is enabled for your storage account. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The blob service stats. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START get_blob_service_stats] - :end-before: [END get_blob_service_stats] - :language: python - :dedent: 8 - :caption: Getting service stats for the blob service. - """ - timeout = kwargs.pop('timeout', None) - try: - stats = self._client.service.get_statistics( # type: ignore - timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs) - return service_stats_deserialize(stats) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def get_service_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An object containing blob service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START get_blob_service_properties] - :end-before: [END get_blob_service_properties] - :language: python - :dedent: 8 - :caption: Getting service properties for the blob service. - """ - timeout = kwargs.pop('timeout', None) - try: - service_props = self._client.service.get_properties(timeout=timeout, **kwargs) - return service_properties_deserialize(service_props) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def set_service_properties( - self, analytics_logging=None, # type: Optional[BlobAnalyticsLogging] - hour_metrics=None, # type: Optional[Metrics] - minute_metrics=None, # type: Optional[Metrics] - cors=None, # type: Optional[List[CorsRule]] - target_version=None, # type: Optional[str] - delete_retention_policy=None, # type: Optional[RetentionPolicy] - static_website=None, # type: Optional[StaticWebsite] - **kwargs - ): - # type: (...) -> None - """Sets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - If an element (e.g. analytics_logging) is left as None, the - existing settings on the service for that functionality are preserved. - - :param analytics_logging: - Groups the Azure Analytics Logging settings. - :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging - :param hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for blobs. - :type hour_metrics: ~azure.storage.blob.Metrics - :param minute_metrics: - The minute metrics settings provide request statistics - for each minute for blobs. - :type minute_metrics: ~azure.storage.blob.Metrics - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list[~azure.storage.blob.CorsRule] - :param str target_version: - Indicates the default version to use for requests if an incoming - request's version is not specified. - :param delete_retention_policy: - The delete retention policy specifies whether to retain deleted blobs. - It also specifies the number of days and versions of blob to keep. - :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy - :param static_website: - Specifies whether the static website feature is enabled, - and if yes, indicates the index document and 404 error document to use. - :type static_website: ~azure.storage.blob.StaticWebsite - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START set_blob_service_properties] - :end-before: [END set_blob_service_properties] - :language: python - :dedent: 8 - :caption: Setting service properties for the blob service. - """ - if all(parameter is None for parameter in [ - analytics_logging, hour_metrics, minute_metrics, cors, - target_version, delete_retention_policy, static_website]): - raise ValueError("set_service_properties should be called with at least one parameter") - - props = StorageServiceProperties( - logging=analytics_logging, - hour_metrics=hour_metrics, - minute_metrics=minute_metrics, - cors=cors, - default_service_version=target_version, - delete_retention_policy=delete_retention_policy, - static_website=static_website - ) - timeout = kwargs.pop('timeout', None) - try: - self._client.service.set_properties(props, timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_containers( - self, name_starts_with=None, # type: Optional[str] - include_metadata=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> ItemPaged[ContainerProperties] - """Returns a generator to list the containers under the specified account. - - The generator will lazily follow the continuation tokens returned by - the service and stop when all containers have been returned. - - :param str name_starts_with: - Filters the results to return only containers whose names - begin with the specified prefix. - :param bool include_metadata: - Specifies that container metadata to be returned in the response. - The default value is `False`. - :keyword bool include_deleted: - Specifies that deleted containers to be returned in the response. This is for container restore enabled - account. The default value is `False`. - .. versionadded:: 12.4.0 - :keyword int results_per_page: - The maximum number of container names to retrieve per API - call. If the request does not specify the server will return up to 5,000 items. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) of ContainerProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.ContainerProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_list_containers] - :end-before: [END bsc_list_containers] - :language: python - :dedent: 12 - :caption: Listing the containers in the blob service. - """ - include = ['metadata'] if include_metadata else [] - include_deleted = kwargs.pop('include_deleted', None) - if include_deleted: - include.append("deleted") - - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.service.list_containers_segment, - prefix=name_starts_with, - include=include, - timeout=timeout, - **kwargs) - return ItemPaged( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - page_iterator_class=ContainerPropertiesPaged - ) - - @distributed_trace - def find_blobs_by_tags(self, filter_expression, **kwargs): - # type: (str, **Any) -> ItemPaged[FilteredBlob] - """The Filter Blobs operation enables callers to list blobs across all - containers whose tags match a given search expression. Filter blobs - searches across all containers within a storage account but can be - scoped within the expression to a single container. - - :param str filter_expression: - The expression to find blobs whose tags matches the specified condition. - eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'" - To specify a container, eg. "@container='containerName' and \"Name\"='C'" - :keyword int results_per_page: - The max result per page when paginating. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.FilteredBlob] - """ - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.service.filter_blobs, - where=filter_expression, - timeout=timeout, - **kwargs) - return ItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=FilteredBlobPaged) - - @distributed_trace - def create_container( - self, name, # type: str - metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[Union[PublicAccess, str]] - **kwargs - ): - # type: (...) -> ContainerClient - """Creates a new container under the specified account. - - If the container with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created container. - - :param str name: The name of the container to create. - :param metadata: - A dict with name-value pairs to associate with the - container as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - Possible values include: 'container', 'blob'. - :type public_access: str or ~azure.storage.blob.PublicAccess - :keyword container_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - - .. versionadded:: 12.2.0 - - :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_create_container] - :end-before: [END bsc_create_container] - :language: python - :dedent: 12 - :caption: Creating a container in the blob service. - """ - container = self.get_container_client(name) - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - container.create_container( - metadata=metadata, public_access=public_access, timeout=timeout, **kwargs) - return container - - @distributed_trace - def delete_container( - self, container, # type: Union[ContainerProperties, str] - lease=None, # type: Optional[Union[BlobLeaseClient, str]] - **kwargs - ): - # type: (...) -> None - """Marks the specified container for deletion. - - The container and any blobs contained within it are later deleted during garbage collection. - If the container is not found, a ResourceNotFoundError will be raised. - - :param container: - The container to delete. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :param lease: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_delete_container] - :end-before: [END bsc_delete_container] - :language: python - :dedent: 12 - :caption: Deleting a container in the blob service. - """ - container = self.get_container_client(container) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - container.delete_container( # type: ignore - lease=lease, - timeout=timeout, - **kwargs) - - @distributed_trace - def undelete_container(self, deleted_container_name, deleted_container_version, **kwargs): - # type: (str, str, str, **Any) -> ContainerClient - """Restores soft-deleted container. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :param str deleted_container_name: - Specifies the name of the deleted container to restore. - :param str deleted_container_version: - Specifies the version of the deleted container to restore. - :keyword str new_name: - The new name for the deleted container to be restored to. - If not specified deleted_container_name will be used as the restored container name. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.ContainerClient - """ - new_name = kwargs.pop('new_name', None) - container = self.get_container_client(new_name or deleted_container_name) - try: - container._client.container.restore(deleted_container_name=deleted_container_name, # pylint: disable = protected-access - deleted_container_version=deleted_container_version, - timeout=kwargs.pop('timeout', None), **kwargs) - return container - except HttpResponseError as error: - process_storage_error(error) - - def get_container_client(self, container): - # type: (Union[ContainerProperties, str]) -> ContainerClient - """Get a client to interact with the specified container. - - The container need not already exist. - - :param container: - The container. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :returns: A ContainerClient. - :rtype: ~azure.storage.blob.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_get_container_client] - :end-before: [END bsc_get_container_client] - :language: python - :dedent: 8 - :caption: Getting the container client to interact with a specific container. - """ - try: - container_name = container.name - except AttributeError: - container_name = container - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ContainerClient( - self.url, container_name=container_name, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_blob_client( - self, container, # type: Union[ContainerProperties, str] - blob, # type: Union[BlobProperties, str] - snapshot=None # type: Optional[Union[Dict[str, Any], str]] - ): - # type: (...) -> BlobClient - """Get a client to interact with the specified blob. - - The blob need not already exist. - - :param container: - The container that the blob is in. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :param blob: - The blob with which to interact. This can either be the name of the blob, - or an instance of BlobProperties. - :type blob: str or ~azure.storage.blob.BlobProperties - :param snapshot: - The optional blob snapshot on which to operate. This can either be the ID of the snapshot, - or a dictionary output returned by :func:`~azure.storage.blob.BlobClient.create_snapshot()`. - :type snapshot: str or dict(str, Any) - :returns: A BlobClient. - :rtype: ~azure.storage.blob.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_get_blob_client] - :end-before: [END bsc_get_blob_client] - :language: python - :dedent: 12 - :caption: Getting the blob client to interact with a specific blob. - """ - try: - container_name = container.name - except AttributeError: - container_name = container - try: - blob_name = blob.name - except AttributeError: - blob_name = blob - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return BlobClient( # type: ignore - self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_container_client.py b/azure/multiapi/storagev2/blob/v2020_04_08/_container_client.py deleted file mode 100644 index 8788d14..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_container_client.py +++ /dev/null @@ -1,1454 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, AnyStr, Dict, List, Tuple, IO, Iterator, - TYPE_CHECKING -) - - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six - -from azure.core import MatchConditions -from azure.core.exceptions import HttpResponseError -from azure.core.paging import ItemPaged -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import Pipeline -from azure.core.pipeline.transport import HttpRequest - -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.request_handlers import add_metadata_headers, serialize_iso -from ._shared.response_handlers import ( - process_storage_error, - return_response_headers, - return_headers_and_deserialized) -from ._generated import AzureBlobStorage -from ._generated.models import SignedIdentifier -from ._deserialize import deserialize_container_properties -from ._serialize import get_modify_conditions, get_container_cpk_scope_info, get_api_version, get_access_conditions -from ._models import ( # pylint: disable=unused-import - ContainerProperties, - BlobProperties, - BlobType) -from ._list_blobs_helper import BlobPrefix, BlobPropertiesPaged -from ._lease import BlobLeaseClient -from ._blob_client import BlobClient - -if TYPE_CHECKING: - from azure.core.pipeline.transport import HttpTransport, HttpResponse # pylint: disable=ungrouped-imports - from azure.core.pipeline.policies import HTTPPolicy # pylint: disable=ungrouped-imports - from datetime import datetime - from ._models import ( # pylint: disable=unused-import - PublicAccess, - AccessPolicy, - ContentSettings, - StandardBlobTier, - PremiumPageBlobTier) - - -def _get_blob_name(blob): - """Return the blob name. - - :param blob: A blob string or BlobProperties - :rtype: str - """ - try: - return blob.get('name') - except AttributeError: - return blob - - -class ContainerClient(StorageAccountHostsMixin): - """A client to interact with a specific container, although that container - may not yet exist. - - For operations relating to a specific blob within this container, a blob client can be - retrieved using the :func:`~get_blob_client` function. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the container, - use the :func:`from_container_url` classmethod. - :param container_name: - The name of the container for the blob. - :type container_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START create_container_client_from_service] - :end-before: [END create_container_client_from_service] - :language: python - :dedent: 8 - :caption: Get a ContainerClient from an existing BlobServiceClient. - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START create_container_client_sasurl] - :end-before: [END create_container_client_sasurl] - :language: python - :dedent: 8 - :caption: Creating the container client directly. - """ - def __init__( - self, account_url, # type: str - container_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Container URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not container_name: - raise ValueError("Please specify a container name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - _, sas_token = parse_query(parsed_url.query) - self.container_name = container_name - self._query_str, credential = self._format_query_string(sas_token, credential) - super(ContainerClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) - self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) - default_api_version = self._client._config.version # pylint: disable=protected-access - self._client._config.version = get_api_version(kwargs, default_api_version) # pylint: disable=protected-access - - def _format_url(self, hostname): - container_name = self.container_name - if isinstance(container_name, six.text_type): - container_name = container_name.encode('UTF-8') - return "{}://{}/{}{}".format( - self.scheme, - hostname, - quote(container_name), - self._query_str) - - @classmethod - def from_container_url(cls, container_url, credential=None, **kwargs): - # type: (str, Optional[Any], Any) -> ContainerClient - """Create ContainerClient from a container url. - - :param str container_url: - The full endpoint URL to the Container, including SAS token if used. This could be - either the primary endpoint, or the secondary endpoint depending on the current `location_mode`. - :type container_url: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :returns: A container client. - :rtype: ~azure.storage.blob.ContainerClient - """ - try: - if not container_url.lower().startswith('http'): - container_url = "https://" + container_url - except AttributeError: - raise ValueError("Container URL must be a string.") - parsed_url = urlparse(container_url.rstrip('/')) - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(container_url)) - - container_path = parsed_url.path.lstrip('/').split('/') - account_path = "" - if len(container_path) > 1: - account_path = "/" + "/".join(container_path[:-1]) - account_url = "{}://{}{}?{}".format( - parsed_url.scheme, - parsed_url.netloc.rstrip('/'), - account_path, - parsed_url.query) - container_name = unquote(container_path[-1]) - if not container_name: - raise ValueError("Invalid URL. Please provide a URL with a valid container name") - return cls(account_url, container_name=container_name, credential=credential, **kwargs) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - container_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> ContainerClient - """Create ContainerClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param container_name: - The container name for the blob. - :type container_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :returns: A container client. - :rtype: ~azure.storage.blob.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START auth_from_connection_string_container] - :end-before: [END auth_from_connection_string_container] - :language: python - :dedent: 8 - :caption: Creating the ContainerClient from a connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, container_name=container_name, credential=credential, **kwargs) - - @distributed_trace - def create_container(self, metadata=None, public_access=None, **kwargs): - # type: (Optional[Dict[str, str]], Optional[Union[PublicAccess, str]], **Any) -> None - """ - Creates a new container under the specified account. If the container - with the same name already exists, the operation fails. - - :param metadata: - A dict with name_value pairs to associate with the - container as metadata. Example:{'Category':'test'} - :type metadata: dict[str, str] - :param ~azure.storage.blob.PublicAccess public_access: - Possible values include: 'container', 'blob'. - :keyword container_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - - .. versionadded:: 12.2.0 - - :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START create_container] - :end-before: [END create_container] - :language: python - :dedent: 12 - :caption: Creating a container to store blobs. - """ - headers = kwargs.pop('headers', {}) - timeout = kwargs.pop('timeout', None) - headers.update(add_metadata_headers(metadata)) # type: ignore - container_cpk_scope_info = get_container_cpk_scope_info(kwargs) - try: - return self._client.container.create( # type: ignore - timeout=timeout, - access=public_access, - container_cpk_scope_info=container_cpk_scope_info, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def delete_container( - self, **kwargs): - # type: (Any) -> None - """ - Marks the specified container for deletion. The container and any blobs - contained within it are later deleted during garbage collection. - - :keyword lease: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START delete_container] - :end-before: [END delete_container] - :language: python - :dedent: 12 - :caption: Delete a container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - mod_conditions = get_modify_conditions(kwargs) - timeout = kwargs.pop('timeout', None) - try: - self._client.container.delete( - timeout=timeout, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def acquire_lease( - self, lease_duration=-1, # type: int - lease_id=None, # type: Optional[str] - **kwargs): - # type: (...) -> BlobLeaseClient - """ - Requests a new lease. If the container does not have an active lease, - the Blob service creates a lease on the container and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A BlobLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.blob.BlobLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START acquire_lease_on_container] - :end-before: [END acquire_lease_on_container] - :language: python - :dedent: 8 - :caption: Acquiring a lease on the container. - """ - lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs) - return lease - - @distributed_trace - def get_account_information(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """Gets information related to the storage account. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - """ - try: - return self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def get_container_properties(self, **kwargs): - # type: (Any) -> ContainerProperties - """Returns all user-defined metadata and system properties for the specified - container. The data returned does not include the container's list of blobs. - - :keyword lease: - If specified, get_container_properties only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Properties for the specified container within a container object. - :rtype: ~azure.storage.blob.ContainerProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START get_container_properties] - :end-before: [END get_container_properties] - :language: python - :dedent: 12 - :caption: Getting properties on the container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - response = self._client.container.get_properties( - timeout=timeout, - lease_access_conditions=access_conditions, - cls=deserialize_container_properties, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - response.name = self.container_name - return response # type: ignore - - @distributed_trace - def set_container_metadata( # type: ignore - self, metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - container. Each call to this operation replaces all existing metadata - attached to the container. To remove all metadata from the container, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the container as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword lease: - If specified, set_container_metadata only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Container-updated property dict (Etag and last modified). - :rtype: dict[str, str or datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START set_container_metadata] - :end-before: [END set_container_metadata] - :language: python - :dedent: 12 - :caption: Setting metadata on the container. - """ - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - mod_conditions = get_modify_conditions(kwargs) - timeout = kwargs.pop('timeout', None) - try: - return self._client.container.set_metadata( # type: ignore - timeout=timeout, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def get_container_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the specified container. - The permissions indicate whether container data may be accessed publicly. - - :keyword lease: - If specified, get_container_access_policy only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START get_container_access_policy] - :end-before: [END get_container_access_policy] - :language: python - :dedent: 12 - :caption: Getting the access policy on the container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - response, identifiers = self._client.container.get_access_policy( - timeout=timeout, - lease_access_conditions=access_conditions, - cls=return_headers_and_deserialized, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return { - 'public_access': response.get('blob_public_access'), - 'signed_identifiers': identifiers or [] - } - - @distributed_trace - def set_container_access_policy( - self, signed_identifiers, # type: Dict[str, AccessPolicy] - public_access=None, # type: Optional[Union[str, PublicAccess]] - **kwargs - ): # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the permissions for the specified container or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether blobs in a container may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the container. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy] - :param ~azure.storage.blob.PublicAccess public_access: - Possible values include: 'container', 'blob'. - :keyword lease: - Required if the container has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified date/time. - :keyword ~datetime.datetime if_unmodified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Container-updated property dict (Etag and last modified). - :rtype: dict[str, str or ~datetime.datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START set_container_access_policy] - :end-before: [END set_container_access_policy] - :language: python - :dedent: 12 - :caption: Setting access policy on the container. - """ - if len(signed_identifiers) > 5: - raise ValueError( - 'Too many access policies provided. The server does not support setting ' - 'more than 5 access policies on a single resource.') - identifiers = [] - for key, value in signed_identifiers.items(): - if value: - value.start = serialize_iso(value.start) - value.expiry = serialize_iso(value.expiry) - identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore - signed_identifiers = identifiers # type: ignore - lease = kwargs.pop('lease', None) - mod_conditions = get_modify_conditions(kwargs) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - return self._client.container.set_access_policy( - container_acl=signed_identifiers or None, - timeout=timeout, - access=public_access, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_blobs(self, name_starts_with=None, include=None, **kwargs): - # type: (Optional[str], Optional[Union[str, List[str]]], **Any) -> ItemPaged[BlobProperties] - """Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service. - - :param str name_starts_with: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param list[str] or str include: - Specifies one or more additional datasets to include in the response. - Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'tags'. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START list_blobs_in_container] - :end-before: [END list_blobs_in_container] - :language: python - :dedent: 8 - :caption: List the blobs in the container. - """ - if include and not isinstance(include, list): - include = [include] - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.list_blob_flat_segment, - include=include, - timeout=timeout, - **kwargs) - return ItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=BlobPropertiesPaged) - - @distributed_trace - def walk_blobs( - self, name_starts_with=None, # type: Optional[str] - include=None, # type: Optional[Any] - delimiter="/", # type: str - **kwargs # type: Optional[Any] - ): - # type: (...) -> ItemPaged[BlobProperties] - """Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service. This operation will list blobs in accordance with a hierarchy, - as delimited by the specified delimiter character. - - :param str name_starts_with: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param list[str] include: - Specifies one or more additional datasets to include in the response. - Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'. - :param str delimiter: - When the request includes this parameter, the operation returns a BlobPrefix - element in the response body that acts as a placeholder for all blobs whose - names begin with the same substring up to the appearance of the delimiter - character. The delimiter may be a single character or a string. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties] - """ - if include and not isinstance(include, list): - include = [include] - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.list_blob_hierarchy_segment, - delimiter=delimiter, - include=include, - timeout=timeout, - **kwargs) - return BlobPrefix( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - delimiter=delimiter) - - @distributed_trace - def upload_blob( - self, name, # type: Union[str, BlobProperties] - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> BlobClient - """Creates a new blob from a data source with automatic chunking. - - :param name: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type name: str or ~azure.storage.blob.BlobProperties - :param data: The blob data to upload. - :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be - either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. The exception to the above is with Append - blob types: if set to False and the data already exists, an error will not be raised - and the data will be appended to the existing blob. If set overwrite=True, then the existing - append blob will be deleted, and a new one created. Defaults to False. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the container has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int max_concurrency: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :returns: A BlobClient to interact with the newly uploaded blob. - :rtype: ~azure.storage.blob.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START upload_blob_to_container] - :end-before: [END upload_blob_to_container] - :language: python - :dedent: 8 - :caption: Upload blob to the container. - """ - blob = self.get_blob_client(name) - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - blob.upload_blob( - data, - blob_type=blob_type, - length=length, - metadata=metadata, - timeout=timeout, - encoding=encoding, - **kwargs - ) - return blob - - @distributed_trace - def delete_blob( - self, blob, # type: Union[str, BlobProperties] - delete_snapshots=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> None - """Marks the specified blob or snapshot for deletion. - - The blob is later deleted during garbage collection. - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the delete_blob - operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot - and retains the blob or snapshot for specified number of days. - After specified number of days, blob's data is removed from the service during garbage collection. - Soft deleted blob or snapshot is accessible through :func:`list_blobs()` specifying `include=["deleted"]` - option. Soft-deleted blob or snapshot can be restored using :func:`~BlobClient.undelete()` - - :param blob: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob: str or ~azure.storage.blob.BlobProperties - :param str delete_snapshots: - Required if the blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to delete. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - blob_client = self.get_blob_client(blob) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - blob_client.delete_blob( # type: ignore - delete_snapshots=delete_snapshots, - timeout=timeout, - **kwargs) - - @distributed_trace - def download_blob(self, blob, offset=None, length=None, **kwargs): - # type: (Union[str, BlobProperties], Optional[int], Optional[int], **Any) -> StorageStreamDownloader - """Downloads a blob to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the blob into - a stream. - - :param blob: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob: str or ~azure.storage.blob.BlobProperties - :param int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, download_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword str encoding: - Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.blob.StorageStreamDownloader - """ - blob_client = self.get_blob_client(blob) # type: ignore - kwargs.setdefault('merge_span', True) - return blob_client.download_blob(offset=offset, length=length, **kwargs) - - def _generate_delete_blobs_subrequest_options( - self, snapshot=None, - delete_snapshots=None, - lease_access_conditions=None, - modified_access_conditions=None, - **kwargs - ): - """This code is a copy from _generated. - - Once Autorest is able to provide request preparation this code should be removed. - """ - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct parameters - timeout = kwargs.pop('timeout', None) - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._client._serialize.query("snapshot", snapshot, 'str') # pylint: disable=protected-access - if timeout is not None: - query_parameters['timeout'] = self._client._serialize.query("timeout", timeout, 'int', minimum=0) # pylint: disable=protected-access - - # Construct headers - header_parameters = {} - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._client._serialize.header( # pylint: disable=protected-access - "delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._client._serialize.header( # pylint: disable=protected-access - "lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._client._serialize.header( # pylint: disable=protected-access - "if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._client._serialize.header( # pylint: disable=protected-access - "if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._client._serialize.header( # pylint: disable=protected-access - "if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._client._serialize.header( # pylint: disable=protected-access - "if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._client._serialize.header("if_tags", if_tags, 'str') # pylint: disable=protected-access - - return query_parameters, header_parameters - - def _generate_delete_blobs_options(self, - *blobs, # type: List[Union[str, BlobProperties, dict]] - **kwargs - ): - timeout = kwargs.pop('timeout', None) - raise_on_any_failure = kwargs.pop('raise_on_any_failure', True) - delete_snapshots = kwargs.pop('delete_snapshots', None) - if_modified_since = kwargs.pop('if_modified_since', None) - if_unmodified_since = kwargs.pop('if_unmodified_since', None) - if_tags_match_condition = kwargs.pop('if_tags_match_condition', None) - kwargs.update({'raise_on_any_failure': raise_on_any_failure, - 'sas': self._query_str.replace('?', '&'), - 'timeout': '&timeout=' + str(timeout) if timeout else "" - }) - - reqs = [] - for blob in blobs: - blob_name = _get_blob_name(blob) - container_name = self.container_name - - try: - options = BlobClient._generic_delete_blob_options( # pylint: disable=protected-access - snapshot=blob.get('snapshot'), - delete_snapshots=delete_snapshots or blob.get('delete_snapshots'), - lease=blob.get('lease_id'), - if_modified_since=if_modified_since or blob.get('if_modified_since'), - if_unmodified_since=if_unmodified_since or blob.get('if_unmodified_since'), - etag=blob.get('etag'), - if_tags_match_condition=if_tags_match_condition or blob.get('if_tags_match_condition'), - match_condition=blob.get('match_condition') or MatchConditions.IfNotModified if blob.get('etag') - else None, - timeout=blob.get('timeout'), - ) - except AttributeError: - options = BlobClient._generic_delete_blob_options( # pylint: disable=protected-access - delete_snapshots=delete_snapshots, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_tags_match_condition=if_tags_match_condition - ) - - query_parameters, header_parameters = self._generate_delete_blobs_subrequest_options(**options) - - req = HttpRequest( - "DELETE", - "/{}/{}{}".format(quote(container_name), quote(blob_name, safe='/~'), self._query_str), - headers=header_parameters - ) - req.format_parameters(query_parameters) - reqs.append(req) - - return reqs, kwargs - - @distributed_trace - def delete_blobs(self, *blobs, **kwargs): - # type: (...) -> Iterator[HttpResponse] - """Marks the specified blobs or snapshots for deletion. - - The blobs are later deleted during garbage collection. - Note that in order to delete blobs, you must delete all of their - snapshots. You can delete both at the same time with the delete_blobs operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots - and retains the blobs or snapshots for specified number of days. - After specified number of days, blobs' data is removed from the service during garbage collection. - Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]` - Soft-deleted blobs or snapshots can be restored using :func:`~BlobClient.undelete()` - - :param blobs: - The blobs to delete. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - snapshot you want to delete: - key: 'snapshot', value type: str - whether to delete snapthots when deleting blob: - key: 'delete_snapshots', value: 'include' or 'only' - if the blob modified or not: - key: 'if_modified_since', 'if_unmodified_since', value type: datetime - etag: - key: 'etag', value type: str - match the etag or not: - key: 'match_condition', value type: MatchConditions - tags match condition: - key: 'if_tags_match_condition', value type: str - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword str delete_snapshots: - Required if a blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: An iterator of responses, one for each blob in order - :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START delete_multiple_blobs] - :end-before: [END delete_multiple_blobs] - :language: python - :dedent: 8 - :caption: Deleting multiple blobs. - """ - if len(blobs) == 0: - return iter(list()) - - reqs, options = self._generate_delete_blobs_options(*blobs, **kwargs) - - return self._batch_send(*reqs, **options) - - def _generate_set_tiers_subrequest_options( - self, tier, snapshot=None, version_id=None, rehydrate_priority=None, lease_access_conditions=None, **kwargs - ): - """This code is a copy from _generated. - - Once Autorest is able to provide request preparation this code should be removed. - """ - if not tier: - raise ValueError("A blob tier must be specified") - if snapshot and version_id: - raise ValueError("Snapshot and version_id cannot be set at the same time") - if_tags = kwargs.pop('if_tags', None) - - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "tier" - timeout = kwargs.pop('timeout', None) - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._client._serialize.query("snapshot", snapshot, 'str') # pylint: disable=protected-access - if version_id is not None: - query_parameters['versionid'] = self._client._serialize.query("version_id", version_id, 'str') # pylint: disable=protected-access - if timeout is not None: - query_parameters['timeout'] = self._client._serialize.query("timeout", timeout, 'int', minimum=0) # pylint: disable=protected-access - query_parameters['comp'] = self._client._serialize.query("comp", comp, 'str') # pylint: disable=protected-access, specify-parameter-names-in-call - - # Construct headers - header_parameters = {} - header_parameters['x-ms-access-tier'] = self._client._serialize.header("tier", tier, 'str') # pylint: disable=protected-access, specify-parameter-names-in-call - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._client._serialize.header( # pylint: disable=protected-access - "rehydrate_priority", rehydrate_priority, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._client._serialize.header("lease_id", lease_id, 'str') # pylint: disable=protected-access - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._client._serialize.header("if_tags", if_tags, 'str') # pylint: disable=protected-access - - return query_parameters, header_parameters - - def _generate_set_tiers_options(self, - blob_tier, # type: Optional[Union[str, StandardBlobTier, PremiumPageBlobTier]] - *blobs, # type: List[Union[str, BlobProperties, dict]] - **kwargs - ): - timeout = kwargs.pop('timeout', None) - raise_on_any_failure = kwargs.pop('raise_on_any_failure', True) - rehydrate_priority = kwargs.pop('rehydrate_priority', None) - if_tags = kwargs.pop('if_tags_match_condition', None) - kwargs.update({'raise_on_any_failure': raise_on_any_failure, - 'sas': self._query_str.replace('?', '&'), - 'timeout': '&timeout=' + str(timeout) if timeout else "" - }) - - reqs = [] - for blob in blobs: - blob_name = _get_blob_name(blob) - container_name = self.container_name - - try: - tier = blob_tier or blob.get('blob_tier') - query_parameters, header_parameters = self._generate_set_tiers_subrequest_options( - tier=tier, - snapshot=blob.get('snapshot'), - version_id=blob.get('version_id'), - rehydrate_priority=rehydrate_priority or blob.get('rehydrate_priority'), - lease_access_conditions=blob.get('lease_id'), - if_tags=if_tags or blob.get('if_tags_match_condition'), - timeout=timeout or blob.get('timeout') - ) - except AttributeError: - query_parameters, header_parameters = self._generate_set_tiers_subrequest_options( - blob_tier, rehydrate_priority=rehydrate_priority, if_tags=if_tags) - - req = HttpRequest( - "PUT", - "/{}/{}{}".format(quote(container_name), quote(blob_name, safe='/~'), self._query_str), - headers=header_parameters - ) - req.format_parameters(query_parameters) - reqs.append(req) - - return reqs, kwargs - - @distributed_trace - def set_standard_blob_tier_blobs( - self, - standard_blob_tier, # type: Optional[Union[str, StandardBlobTier]] - *blobs, # type: List[Union[str, BlobProperties, dict]] - **kwargs - ): - # type: (...) -> Iterator[HttpResponse] - """This operation sets the tier on block blobs. - - A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param standard_blob_tier: - Indicates the tier to be set on all blobs. Options include 'Hot', 'Cool', - 'Archive'. The hot tier is optimized for storing data that is accessed - frequently. The cool storage tier is optimized for storing data that - is infrequently accessed and stored for at least a month. The archive - tier is optimized for storing data that is rarely accessed and stored - for at least six months with flexible latency requirements. - - .. note:: - If you want to set different tier on different blobs please set this positional parameter to None. - Then the blob tier on every BlobProperties will be taken. - - :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier - :param blobs: - The blobs with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - standard blob tier: - key: 'blob_tier', value type: StandardBlobTier - rehydrate priority: - key: 'rehydrate_priority', value type: RehydratePriority - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - snapshot: - key: "snapshost", value type: str - version id: - key: "version_id", value type: str - tags match condition: - key: 'if_tags_match_condition', value type: str - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. - :return: An iterator of responses, one for each blob in order - :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse] - """ - reqs, options = self._generate_set_tiers_options(standard_blob_tier, *blobs, **kwargs) - - return self._batch_send(*reqs, **options) - - @distributed_trace - def set_premium_page_blob_tier_blobs( - self, - premium_page_blob_tier, # type: Optional[Union[str, PremiumPageBlobTier]] - *blobs, # type: List[Union[str, BlobProperties, dict]] - **kwargs - ): - # type: (...) -> Iterator[HttpResponse] - """Sets the page blob tiers on all blobs. This API is only supported for page blobs on premium accounts. - - :param premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - - .. note:: - If you want to set different tier on different blobs please set this positional parameter to None. - Then the blob tier on every BlobProperties will be taken. - - :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier - :param blobs: - The blobs with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - premium blob tier: - key: 'blob_tier', value type: PremiumPageBlobTier - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. - :return: An iterator of responses, one for each blob in order - :rtype: iterator[~azure.core.pipeline.transport.HttpResponse] - """ - reqs, options = self._generate_set_tiers_options(premium_page_blob_tier, *blobs, **kwargs) - - return self._batch_send(*reqs, **options) - - def get_blob_client( - self, blob, # type: Union[str, BlobProperties] - snapshot=None # type: str - ): - # type: (...) -> BlobClient - """Get a client to interact with the specified blob. - - The blob need not already exist. - - :param blob: - The blob with which to interact. - :type blob: str or ~azure.storage.blob.BlobProperties - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`~BlobClient.create_snapshot()`. - :returns: A BlobClient. - :rtype: ~azure.storage.blob.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START get_blob_client] - :end-before: [END get_blob_client] - :language: python - :dedent: 8 - :caption: Get the blob client. - """ - blob_name = _get_blob_name(blob) - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return BlobClient( - self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_deserialize.py b/azure/multiapi/storagev2/blob/v2020_04_08/_deserialize.py deleted file mode 100644 index dff3953..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_deserialize.py +++ /dev/null @@ -1,166 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use -from typing import ( # pylint: disable=unused-import - Tuple, Dict, List, - TYPE_CHECKING -) - -from ._models import BlobType, CopyProperties, ContentSettings, LeaseProperties, BlobProperties -from ._shared.models import get_enum_value - -from ._shared.response_handlers import deserialize_metadata -from ._models import ContainerProperties, BlobAnalyticsLogging, Metrics, CorsRule, RetentionPolicy, \ - StaticWebsite, ObjectReplicationPolicy, ObjectReplicationRule - -if TYPE_CHECKING: - from ._generated.models import PageList - - -def deserialize_pipeline_response_into_cls(cls_method, response, obj, headers): - try: - deserialized_response = response.http_response - except AttributeError: - deserialized_response = response - return cls_method(deserialized_response, obj, headers) - - -def deserialize_blob_properties(response, obj, headers): - blob_properties = BlobProperties( - metadata=deserialize_metadata(response, obj, headers), - object_replication_source_properties=deserialize_ors_policies(response.http_response.headers), - **headers - ) - if 'Content-Range' in headers: - if 'x-ms-blob-content-md5' in headers: - blob_properties.content_settings.content_md5 = headers['x-ms-blob-content-md5'] - else: - blob_properties.content_settings.content_md5 = None - return blob_properties - - -def deserialize_ors_policies(policy_dictionary): - - if policy_dictionary is None: - return None - # For source blobs (blobs that have policy ids and rule ids applied to them), - # the header will be formatted as "x-ms-or-_: {Complete, Failed}". - # The value of this header is the status of the replication. - or_policy_status_headers = {key: val for key, val in policy_dictionary.items() - if 'or-' in key and key != 'x-ms-or-policy-id'} - - parsed_result = {} - - for key, val in or_policy_status_headers.items(): - # list blobs gives or-policy_rule and get blob properties gives x-ms-or-policy_rule - policy_and_rule_ids = key.split('or-')[1].split('_') - policy_id = policy_and_rule_ids[0] - rule_id = policy_and_rule_ids[1] - - # If we are seeing this policy for the first time, create a new list to store rule_id -> result - parsed_result[policy_id] = parsed_result.get(policy_id) or list() - parsed_result[policy_id].append(ObjectReplicationRule(rule_id=rule_id, status=val)) - - result_list = [ObjectReplicationPolicy(policy_id=k, rules=v) for k, v in parsed_result.items()] - - return result_list - - -def deserialize_blob_stream(response, obj, headers): - blob_properties = deserialize_blob_properties(response, obj, headers) - obj.properties = blob_properties - return response.http_response.location_mode, obj - - -def deserialize_container_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - container_properties = ContainerProperties( - metadata=metadata, - **headers - ) - return container_properties - - -def get_page_ranges_result(ranges): - # type: (PageList) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - page_range = [] # type: ignore - clear_range = [] # type: List - if ranges.page_range: - page_range = [{'start': b.start, 'end': b.end} for b in ranges.page_range] # type: ignore - if ranges.clear_range: - clear_range = [{'start': b.start, 'end': b.end} for b in ranges.clear_range] - return page_range, clear_range # type: ignore - - -def service_stats_deserialize(generated): - """Deserialize a ServiceStats objects into a dict. - """ - return { - 'geo_replication': { - 'status': generated.geo_replication.status, - 'last_sync_time': generated.geo_replication.last_sync_time, - } - } - - -def service_properties_deserialize(generated): - """Deserialize a ServiceProperties objects into a dict. - """ - return { - 'analytics_logging': BlobAnalyticsLogging._from_generated(generated.logging), # pylint: disable=protected-access - 'hour_metrics': Metrics._from_generated(generated.hour_metrics), # pylint: disable=protected-access - 'minute_metrics': Metrics._from_generated(generated.minute_metrics), # pylint: disable=protected-access - 'cors': [CorsRule._from_generated(cors) for cors in generated.cors], # pylint: disable=protected-access - 'target_version': generated.default_service_version, # pylint: disable=protected-access - 'delete_retention_policy': RetentionPolicy._from_generated(generated.delete_retention_policy), # pylint: disable=protected-access - 'static_website': StaticWebsite._from_generated(generated.static_website), # pylint: disable=protected-access - } - - -def get_blob_properties_from_generated_code(generated): - blob = BlobProperties() - blob.name = generated.name - blob_type = get_enum_value(generated.properties.blob_type) - blob.blob_type = BlobType(blob_type) if blob_type else None - blob.etag = generated.properties.etag - blob.deleted = generated.deleted - blob.snapshot = generated.snapshot - blob.is_append_blob_sealed = generated.properties.is_sealed - blob.metadata = generated.metadata.additional_properties if generated.metadata else {} - blob.encrypted_metadata = generated.metadata.encrypted if generated.metadata else None - blob.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access - blob.copy = CopyProperties._from_generated(generated) # pylint: disable=protected-access - blob.last_modified = generated.properties.last_modified - blob.creation_time = generated.properties.creation_time - blob.content_settings = ContentSettings._from_generated(generated) # pylint: disable=protected-access - blob.size = generated.properties.content_length - blob.page_blob_sequence_number = generated.properties.blob_sequence_number - blob.server_encrypted = generated.properties.server_encrypted - blob.encryption_scope = generated.properties.encryption_scope - blob.deleted_time = generated.properties.deleted_time - blob.remaining_retention_days = generated.properties.remaining_retention_days - blob.blob_tier = generated.properties.access_tier - blob.rehydrate_priority = generated.properties.rehydrate_priority - blob.blob_tier_inferred = generated.properties.access_tier_inferred - blob.archive_status = generated.properties.archive_status - blob.blob_tier_change_time = generated.properties.access_tier_change_time - blob.version_id = generated.version_id - blob.is_current_version = generated.is_current_version - blob.tag_count = generated.properties.tag_count - blob.tags = parse_tags(generated.blob_tags) # pylint: disable=protected-access - blob.object_replication_source_properties = deserialize_ors_policies(generated.object_replication_metadata) - blob.last_accessed_on = generated.properties.last_accessed_on - return blob - - -def parse_tags(generated_tags): - # type: (Optional[List[BlobTag]]) -> Union[Dict[str, str], None] - """Deserialize a list of BlobTag objects into a dict. - """ - if generated_tags: - tag_dict = {t.key: t.value for t in generated_tags.blob_tag_set} - return tag_dict - return None diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_download.py b/azure/multiapi/storagev2/blob/v2020_04_08/_download.py deleted file mode 100644 index 46e59e5..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_download.py +++ /dev/null @@ -1,580 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -import threading -import warnings -from io import BytesIO - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.common import with_current_context -from ._shared.encryption import decrypt_blob -from ._shared.request_handlers import validate_and_format_range_headers -from ._shared.response_handlers import process_storage_error, parse_length_from_content_range -from ._deserialize import get_page_ranges_result - - -def process_range_and_offset(start_range, end_range, length, encryption): - start_offset, end_offset = 0, 0 - if encryption.get("key") is not None or encryption.get("resolver") is not None: - if start_range is not None: - # Align the start of the range along a 16 byte block - start_offset = start_range % 16 - start_range -= start_offset - - # Include an extra 16 bytes for the IV if necessary - # Because of the previous offsetting, start_range will always - # be a multiple of 16. - if start_range > 0: - start_offset += 16 - start_range -= 16 - - if length is not None: - # Align the end of the range along a 16 byte block - end_offset = 15 - (end_range % 16) - end_range += end_offset - - return (start_range, end_range), (start_offset, end_offset) - - -def process_content(data, start_offset, end_offset, encryption): - if data is None: - raise ValueError("Response cannot be None.") - try: - content = b"".join(list(data)) - except Exception as error: - raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) - if content and encryption.get("key") is not None or encryption.get("resolver") is not None: - try: - return decrypt_blob( - encryption.get("required"), - encryption.get("key"), - encryption.get("resolver"), - content, - start_offset, - end_offset, - data.response.headers, - ) - except Exception as error: - raise HttpResponseError(message="Decryption failed.", response=data.response, error=error) - return content - - -class _ChunkDownloader(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - client=None, - non_empty_ranges=None, - total_size=None, - chunk_size=None, - current_progress=None, - start_range=None, - end_range=None, - stream=None, - parallel=None, - validate_content=None, - encryption_options=None, - **kwargs - ): - self.client = client - self.non_empty_ranges = non_empty_ranges - - # Information on the download range/chunk size - self.chunk_size = chunk_size - self.total_size = total_size - self.start_index = start_range - self.end_index = end_range - - # The destination that we will write to - self.stream = stream - self.stream_lock = threading.Lock() if parallel else None - self.progress_lock = threading.Lock() if parallel else None - - # For a parallel download, the stream is always seekable, so we note down the current position - # in order to seek to the right place when out-of-order chunks come in - self.stream_start = stream.tell() if parallel else None - - # Download progress so far - self.progress_total = current_progress - - # Encryption - self.encryption_options = encryption_options - - # Parameters for each get operation - self.validate_content = validate_content - self.request_options = kwargs - - def _calculate_range(self, chunk_start): - if chunk_start + self.chunk_size > self.end_index: - chunk_end = self.end_index - else: - chunk_end = chunk_start + self.chunk_size - return chunk_start, chunk_end - - def get_chunk_offsets(self): - index = self.start_index - while index < self.end_index: - yield index - index += self.chunk_size - - def process_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - chunk_data = self._download_chunk(chunk_start, chunk_end - 1) - length = chunk_end - chunk_start - if length > 0: - self._write_to_stream(chunk_data, chunk_start) - self._update_progress(length) - - def yield_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - return self._download_chunk(chunk_start, chunk_end - 1) - - def _update_progress(self, length): - if self.progress_lock: - with self.progress_lock: # pylint: disable=not-context-manager - self.progress_total += length - else: - self.progress_total += length - - def _write_to_stream(self, chunk_data, chunk_start): - if self.stream_lock: - with self.stream_lock: # pylint: disable=not-context-manager - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - else: - self.stream.write(chunk_data) - - def _do_optimize(self, given_range_start, given_range_end): - # If we have no page range list stored, then assume there's data everywhere for that page blob - # or it's a block blob or append blob - if self.non_empty_ranges is None: - return False - - for source_range in self.non_empty_ranges: - # Case 1: As the range list is sorted, if we've reached such a source_range - # we've checked all the appropriate source_range already and haven't found any overlapping. - # so the given range doesn't have any data and download optimization could be applied. - # given range: | | - # source range: | | - if given_range_end < source_range['start']: # pylint:disable=no-else-return - return True - # Case 2: the given range comes after source_range, continue checking. - # given range: | | - # source range: | | - elif source_range['end'] < given_range_start: - pass - # Case 3: source_range and given range overlap somehow, no need to optimize. - else: - return False - # Went through all src_ranges, but nothing overlapped. Optimization will be applied. - return True - - def _download_chunk(self, chunk_start, chunk_end): - download_range, offset = process_range_and_offset( - chunk_start, chunk_end, chunk_end, self.encryption_options - ) - - # No need to download the empty chunk from server if there's no data in the chunk to be downloaded. - # Do optimize and create empty chunk locally if condition is met. - if self._do_optimize(download_range[0], download_range[1]): - chunk_data = b"\x00" * self.chunk_size - else: - range_header, range_validation = validate_and_format_range_headers( - download_range[0], - download_range[1], - check_content_md5=self.validate_content - ) - - try: - _, response = self.client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self.validate_content, - data_stream_total=self.total_size, - download_stream_current=self.progress_total, - **self.request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - chunk_data = process_content(response, offset[0], offset[1], self.encryption_options) - - # This makes sure that if_match is set so that we can validate - # that subsequent downloads are to an unmodified blob - if self.request_options.get("modified_access_conditions"): - self.request_options["modified_access_conditions"].if_match = response.properties.etag - - return chunk_data - - -class _ChunkIterator(object): - """Async iterator for chunks in blob download stream.""" - - def __init__(self, size, content, downloader): - self.size = size - self._current_content = content - self._iter_downloader = downloader - self._iter_chunks = None - self._complete = (size == 0) - - def __len__(self): - return self.size - - def __iter__(self): - return self - - def __next__(self): - """Iterate through responses.""" - if self._complete: - raise StopIteration("Download complete") - if not self._iter_downloader: - # If no iterator was supplied, the download completed with - # the initial GET, so we just return that data - self._complete = True - return self._current_content - - if not self._iter_chunks: - self._iter_chunks = self._iter_downloader.get_chunk_offsets() - else: - chunk = next(self._iter_chunks) - self._current_content = self._iter_downloader.yield_chunk(chunk) - - return self._current_content - - next = __next__ # Python 2 compatibility. - - -class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the blob being downloaded. - :ivar str container: - The name of the container where the blob is. - :ivar ~azure.storage.blob.BlobProperties properties: - The properties of the blob being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if specified, - otherwise the total size of the blob. - """ - - def __init__( - self, - clients=None, - config=None, - start_range=None, - end_range=None, - validate_content=None, - encryption_options=None, - max_concurrency=1, - name=None, - container=None, - encoding=None, - **kwargs - ): - self.name = name - self.container = container - self.properties = None - self.size = None - - self._clients = clients - self._config = config - self._start_range = start_range - self._end_range = end_range - self._max_concurrency = max_concurrency - self._encoding = encoding - self._validate_content = validate_content - self._encryption_options = encryption_options or {} - self._request_options = kwargs - self._location_mode = None - self._download_complete = False - self._current_content = None - self._file_size = None - self._non_empty_ranges = None - self._response = None - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - self._first_get_size = ( - self._config.max_single_get_size if not self._validate_content else self._config.max_chunk_get_size - ) - initial_request_start = self._start_range if self._start_range is not None else 0 - if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: - initial_request_end = self._end_range - else: - initial_request_end = initial_request_start + self._first_get_size - 1 - - self._initial_range, self._initial_offset = process_range_and_offset( - initial_request_start, initial_request_end, self._end_range, self._encryption_options - ) - - self._response = self._initial_request() - self.properties = self._response.properties - self.properties.name = self.name - self.properties.container = self.container - - # Set the content length to the download size instead of the size of - # the last range - self.properties.size = self.size - - # Overwrite the content range to the user requested range - self.properties.content_range = "bytes {0}-{1}/{2}".format( - self._start_range, - self._end_range, - self._file_size - ) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - self.properties.content_md5 = None - - if self.size == 0: - self._current_content = b"" - else: - self._current_content = process_content( - self._response, - self._initial_offset[0], - self._initial_offset[1], - self._encryption_options - ) - - def __len__(self): - return self.size - - def _initial_request(self): - range_header, range_validation = validate_and_format_range_headers( - self._initial_range[0], - self._initial_range[1], - start_range_required=False, - end_range_required=False, - check_content_md5=self._validate_content - ) - - try: - location_mode, response = self._clients.blob.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self._validate_content, - data_stream_total=None, - download_stream_current=0, - **self._request_options - ) - - # Check the location we read from to ensure we use the same one - # for subsequent requests. - self._location_mode = location_mode - - # Parse the total file size and adjust the download size if ranges - # were specified - self._file_size = parse_length_from_content_range(response.properties.content_range) - if self._end_range is not None: - # Use the end range index unless it is over the end of the file - self.size = min(self._file_size, self._end_range - self._start_range + 1) - elif self._start_range is not None: - self.size = self._file_size - self._start_range - else: - self.size = self._file_size - - except HttpResponseError as error: - if self._start_range is None and error.response.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - try: - _, response = self._clients.blob.download( - validate_content=self._validate_content, - data_stream_total=0, - download_stream_current=0, - **self._request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - # Set the download size to empty - self.size = 0 - self._file_size = 0 - else: - process_storage_error(error) - - # get page ranges to optimize downloading sparse page blob - if response.properties.blob_type == 'PageBlob': - try: - page_ranges = self._clients.page_blob.get_page_ranges() - self._non_empty_ranges = get_page_ranges_result(page_ranges)[0] - # according to the REST API documentation: - # in a highly fragmented page blob with a large number of writes, - # a Get Page Ranges request can fail due to an internal server timeout. - # thus, if the page blob is not sparse, it's ok for it to fail - except HttpResponseError: - pass - - # If the file is small, the download is complete at this point. - # If file size is large, download the rest of the file in chunks. - if response.properties.size != self.size: - # Lock on the etag. This can be overriden by the user by specifying '*' - if self._request_options.get("modified_access_conditions"): - if not self._request_options["modified_access_conditions"].if_match: - self._request_options["modified_access_conditions"].if_match = response.properties.etag - else: - self._download_complete = True - return response - - def chunks(self): - if self.size == 0 or self._download_complete: - iter_downloader = None - else: - data_end = self._file_size - if self._end_range is not None: - # Use the end range index unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - iter_downloader = _ChunkDownloader( - client=self._clients.blob, - non_empty_ranges=self._non_empty_ranges, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # start where the first download ended - end_range=data_end, - stream=None, - parallel=False, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options - ) - return _ChunkIterator( - size=self.size, - content=self._current_content, - downloader=iter_downloader) - - def readall(self): - """Download the contents of this blob. - - This operation is blocking until all data is downloaded. - - :rtype: bytes or str - """ - stream = BytesIO() - self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - def content_as_bytes(self, max_concurrency=1): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :rtype: bytes - """ - warnings.warn( - "content_as_bytes is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - return self.readall() - - def content_as_text(self, max_concurrency=1, encoding="UTF-8"): - """Download the contents of this blob, and decode as text. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :param str encoding: - Test encoding to decode the downloaded bytes. Default is UTF-8. - :rtype: str - """ - warnings.warn( - "content_as_text is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self._encoding = encoding - return self.readall() - - def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - # The stream must be seekable if parallel download is required - parallel = self._max_concurrency > 1 - if parallel: - error_message = "Target stream handle must be seekable." - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(error_message) - - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(error_message) - - # Write the content to the user stream - stream.write(self._current_content) - if self._download_complete: - return self.size - - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - - downloader = _ChunkDownloader( - client=self._clients.blob, - non_empty_ranges=self._non_empty_ranges, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # Start where the first download ended - end_range=data_end, - stream=stream, - parallel=parallel, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options - ) - if parallel: - import concurrent.futures - executor = concurrent.futures.ThreadPoolExecutor(self._max_concurrency) - list(executor.map( - with_current_context(downloader.process_chunk), - downloader.get_chunk_offsets() - )) - else: - for chunk in downloader.get_chunk_offsets(): - downloader.process_chunk(chunk) - return self.size - - def download_to_stream(self, stream, max_concurrency=1): - """Download the contents of this blob to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The properties of the downloaded blob. - :rtype: Any - """ - warnings.warn( - "download_to_stream is deprecated, use readinto instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self.readinto(stream) - return self.properties diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/__init__.py b/azure/multiapi/storagev2/blob/v2020_04_08/_generated/__init__.py deleted file mode 100644 index cc760e7..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._azure_blob_storage import AzureBlobStorage -__all__ = ['AzureBlobStorage'] - -try: - from ._patch import patch_sdk # type: ignore - patch_sdk() -except ImportError: - pass diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/_azure_blob_storage.py b/azure/multiapi/storagev2/blob/v2020_04_08/_generated/_azure_blob_storage.py deleted file mode 100644 index dff7e12..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/_azure_blob_storage.py +++ /dev/null @@ -1,91 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import TYPE_CHECKING - -from azure.core import PipelineClient -from msrest import Deserializer, Serializer - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any - -from ._configuration import AzureBlobStorageConfiguration -from .operations import ServiceOperations -from .operations import ContainerOperations -from .operations import DirectoryOperations -from .operations import BlobOperations -from .operations import PageBlobOperations -from .operations import AppendBlobOperations -from .operations import BlockBlobOperations -from . import models - - -class AzureBlobStorage(object): - """AzureBlobStorage. - - :ivar service: ServiceOperations operations - :vartype service: azure.storage.blob.operations.ServiceOperations - :ivar container: ContainerOperations operations - :vartype container: azure.storage.blob.operations.ContainerOperations - :ivar directory: DirectoryOperations operations - :vartype directory: azure.storage.blob.operations.DirectoryOperations - :ivar blob: BlobOperations operations - :vartype blob: azure.storage.blob.operations.BlobOperations - :ivar page_blob: PageBlobOperations operations - :vartype page_blob: azure.storage.blob.operations.PageBlobOperations - :ivar append_blob: AppendBlobOperations operations - :vartype append_blob: azure.storage.blob.operations.AppendBlobOperations - :ivar block_blob: BlockBlobOperations operations - :vartype block_blob: azure.storage.blob.operations.BlockBlobOperations - :param url: The URL of the service account, container, or blob that is the targe of the desired operation. - :type url: str - """ - - def __init__( - self, - url, # type: str - **kwargs # type: Any - ): - # type: (...) -> None - base_url = '{url}' - self._config = AzureBlobStorageConfiguration(url, **kwargs) - self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._serialize.client_side_validation = False - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.container = ContainerOperations( - self._client, self._config, self._serialize, self._deserialize) - self.directory = DirectoryOperations( - self._client, self._config, self._serialize, self._deserialize) - self.blob = BlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.page_blob = PageBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.append_blob = AppendBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.block_blob = BlockBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - - def close(self): - # type: () -> None - self._client.close() - - def __enter__(self): - # type: () -> AzureBlobStorage - self._client.__enter__() - return self - - def __exit__(self, *exc_details): - # type: (Any) -> None - self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/_configuration.py b/azure/multiapi/storagev2/blob/v2020_04_08/_generated/_configuration.py deleted file mode 100644 index 6c37b24..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/_configuration.py +++ /dev/null @@ -1,58 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import TYPE_CHECKING - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any - -VERSION = "unknown" - -class AzureBlobStorageConfiguration(Configuration): - """Configuration for AzureBlobStorage. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, container, or blob that is the targe of the desired operation. - :type url: str - """ - - def __init__( - self, - url, # type: str - **kwargs # type: Any - ): - # type: (...) -> None - if url is None: - raise ValueError("Parameter 'url' must not be None.") - super(AzureBlobStorageConfiguration, self).__init__(**kwargs) - - self.url = url - self.version = "2020-04-08" - kwargs.setdefault('sdk_moniker', 'azureblobstorage/{}'.format(VERSION)) - self._configure(**kwargs) - - def _configure( - self, - **kwargs # type: Any - ): - # type: (...) -> None - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) - self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/__init__.py b/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/__init__.py deleted file mode 100644 index 12cfcf6..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._azure_blob_storage import AzureBlobStorage -__all__ = ['AzureBlobStorage'] diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/_azure_blob_storage.py b/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/_azure_blob_storage.py deleted file mode 100644 index b537034..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/_azure_blob_storage.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any - -from azure.core import AsyncPipelineClient -from msrest import Deserializer, Serializer - -from ._configuration import AzureBlobStorageConfiguration -from .operations import ServiceOperations -from .operations import ContainerOperations -from .operations import DirectoryOperations -from .operations import BlobOperations -from .operations import PageBlobOperations -from .operations import AppendBlobOperations -from .operations import BlockBlobOperations -from .. import models - - -class AzureBlobStorage(object): - """AzureBlobStorage. - - :ivar service: ServiceOperations operations - :vartype service: azure.storage.blob.aio.operations.ServiceOperations - :ivar container: ContainerOperations operations - :vartype container: azure.storage.blob.aio.operations.ContainerOperations - :ivar directory: DirectoryOperations operations - :vartype directory: azure.storage.blob.aio.operations.DirectoryOperations - :ivar blob: BlobOperations operations - :vartype blob: azure.storage.blob.aio.operations.BlobOperations - :ivar page_blob: PageBlobOperations operations - :vartype page_blob: azure.storage.blob.aio.operations.PageBlobOperations - :ivar append_blob: AppendBlobOperations operations - :vartype append_blob: azure.storage.blob.aio.operations.AppendBlobOperations - :ivar block_blob: BlockBlobOperations operations - :vartype block_blob: azure.storage.blob.aio.operations.BlockBlobOperations - :param url: The URL of the service account, container, or blob that is the targe of the desired operation. - :type url: str - """ - - def __init__( - self, - url: str, - **kwargs: Any - ) -> None: - base_url = '{url}' - self._config = AzureBlobStorageConfiguration(url, **kwargs) - self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._serialize.client_side_validation = False - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.container = ContainerOperations( - self._client, self._config, self._serialize, self._deserialize) - self.directory = DirectoryOperations( - self._client, self._config, self._serialize, self._deserialize) - self.blob = BlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.page_blob = PageBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.append_blob = AppendBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.block_blob = BlockBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - - async def close(self) -> None: - await self._client.close() - - async def __aenter__(self) -> "AzureBlobStorage": - await self._client.__aenter__() - return self - - async def __aexit__(self, *exc_details) -> None: - await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/_configuration.py b/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/_configuration.py deleted file mode 100644 index 5727357..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/_configuration.py +++ /dev/null @@ -1,52 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -VERSION = "unknown" - -class AzureBlobStorageConfiguration(Configuration): - """Configuration for AzureBlobStorage. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, container, or blob that is the targe of the desired operation. - :type url: str - """ - - def __init__( - self, - url: str, - **kwargs: Any - ) -> None: - if url is None: - raise ValueError("Parameter 'url' must not be None.") - super(AzureBlobStorageConfiguration, self).__init__(**kwargs) - - self.url = url - self.version = "2020-04-08" - kwargs.setdefault('sdk_moniker', 'azureblobstorage/{}'.format(VERSION)) - self._configure(**kwargs) - - def _configure( - self, - **kwargs: Any - ) -> None: - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) - self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/operations/__init__.py b/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/operations/__init__.py deleted file mode 100644 index 62f85c9..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/operations/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._container_operations import ContainerOperations -from ._directory_operations import DirectoryOperations -from ._blob_operations import BlobOperations -from ._page_blob_operations import PageBlobOperations -from ._append_blob_operations import AppendBlobOperations -from ._block_blob_operations import BlockBlobOperations - -__all__ = [ - 'ServiceOperations', - 'ContainerOperations', - 'DirectoryOperations', - 'BlobOperations', - 'PageBlobOperations', - 'AppendBlobOperations', - 'BlockBlobOperations', -] diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/operations/_append_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/operations/_append_blob_operations.py deleted file mode 100644 index 333cb9f..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/operations/_append_blob_operations.py +++ /dev/null @@ -1,709 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class AppendBlobOperations: - """AppendBlobOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - content_length: int, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - encryption_algorithm: Optional[str] = "AES256", - request_id_parameter: Optional[str] = None, - blob_tags_string: Optional[str] = None, - blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Create Append Blob operation creates a new append blob. - - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - blob_type = "AppendBlob" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def append_block( - self, - content_length: int, - body: IO, - timeout: Optional[int] = None, - transactional_content_md5: Optional[bytearray] = None, - transactional_content_crc64: Optional[bytearray] = None, - encryption_algorithm: Optional[str] = "AES256", - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - append_position_access_conditions: Optional["_models.AppendPositionAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Append Block operation commits a new block of data to the end of an existing append blob. - The Append Block operation is permitted only if the blob was created with x-ms-blob-type set to - AppendBlob. Append Block is supported only on version 2015-02-21 version or later. - - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param append_position_access_conditions: Parameter group. - :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _max_size = None - _append_position = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if append_position_access_conditions is not None: - _max_size = append_position_access_conditions.max_size - _append_position = append_position_access_conditions.append_position - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "appendblock" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.append_block.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _max_size is not None: - header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", _max_size, 'long') - if _append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-append-offset']=self._deserialize('str', response.headers.get('x-ms-blob-append-offset')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - append_block.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def append_block_from_url( - self, - source_url: str, - content_length: int, - source_range: Optional[str] = None, - source_content_md5: Optional[bytearray] = None, - source_contentcrc64: Optional[bytearray] = None, - timeout: Optional[int] = None, - transactional_content_md5: Optional[bytearray] = None, - encryption_algorithm: Optional[str] = "AES256", - request_id_parameter: Optional[str] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - append_position_access_conditions: Optional["_models.AppendPositionAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Append Block operation commits a new block of data to the end of an existing append blob - where the contents are read from a source url. The Append Block operation is permitted only if - the blob was created with x-ms-blob-type set to AppendBlob. Append Block is supported only on - version 2015-02-21 version or later. - - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param content_length: The length of the request. - :type content_length: long - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param append_position_access_conditions: Parameter group. - :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - _lease_id = None - _max_size = None - _append_position = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if append_position_access_conditions is not None: - _max_size = append_position_access_conditions.max_size - _append_position = append_position_access_conditions.append_position - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - comp = "appendblock" - accept = "application/xml" - - # Construct URL - url = self.append_block_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _max_size is not None: - header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", _max_size, 'long') - if _append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-append-offset']=self._deserialize('str', response.headers.get('x-ms-blob-append-offset')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - append_block_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def seal( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - append_position_access_conditions: Optional["_models.AppendPositionAccessConditions"] = None, - **kwargs - ) -> None: - """The Seal operation seals the Append Blob to make it read-only. Seal is supported only on - version 2019-12-12 version or later. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param append_position_access_conditions: Parameter group. - :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _append_position = None - if append_position_access_conditions is not None: - _append_position = append_position_access_conditions.append_position - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - comp = "seal" - accept = "application/xml" - - # Construct URL - url = self.seal.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - - if cls: - return cls(pipeline_response, None, response_headers) - - seal.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/operations/_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/operations/_blob_operations.py deleted file mode 100644 index 687bcd3..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/operations/_blob_operations.py +++ /dev/null @@ -1,3135 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class BlobOperations: - """BlobOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def download( - self, - snapshot: Optional[str] = None, - version_id: Optional[str] = None, - timeout: Optional[int] = None, - range: Optional[str] = None, - range_get_content_md5: Optional[bool] = None, - range_get_content_crc64: Optional[bool] = None, - encryption_algorithm: Optional[str] = "AES256", - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> IO: - """The Download operation reads or downloads a blob from the system, including its metadata and - properties. You can also call Download to read a snapshot. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param range_get_content_md5: When set to true and specified together with the Range, the - service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB - in size. - :type range_get_content_md5: bool - :param range_get_content_crc64: When set to true and specified together with the Range, the - service returns the CRC64 hash for the range, as long as the range is less than or equal to 4 - MB in size. - :type range_get_content_crc64: bool - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - accept = "application/xml" - - # Construct URL - url = self.download.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') - if range_get_content_crc64 is not None: - header_parameters['x-ms-range-get-content-crc64'] = self._serialize.header("range_get_content_crc64", range_get_content_crc64, 'bool') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) - response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) - response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - download.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def get_properties( - self, - snapshot: Optional[str] = None, - version_id: Optional[str] = None, - timeout: Optional[int] = None, - encryption_algorithm: Optional[str] = "AES256", - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Get Properties operation returns all user-defined metadata, standard HTTP properties, and - system properties for the blob. It does not return the content of the blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-creation-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-creation-time')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) - response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-incremental-copy']=self._deserialize('bool', response.headers.get('x-ms-incremental-copy')) - response_headers['x-ms-copy-destination-snapshot']=self._deserialize('str', response.headers.get('x-ms-copy-destination-snapshot')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-access-tier']=self._deserialize('str', response.headers.get('x-ms-access-tier')) - response_headers['x-ms-access-tier-inferred']=self._deserialize('bool', response.headers.get('x-ms-access-tier-inferred')) - response_headers['x-ms-archive-status']=self._deserialize('str', response.headers.get('x-ms-archive-status')) - response_headers['x-ms-access-tier-change-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) - response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) - response_headers['x-ms-expiry-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-expiry-time')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - response_headers['x-ms-rehydrate-priority']=self._deserialize('str', response.headers.get('x-ms-rehydrate-priority')) - response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def delete( - self, - snapshot: Optional[str] = None, - version_id: Optional[str] = None, - timeout: Optional[int] = None, - delete_snapshots: Optional[Union[str, "_models.DeleteSnapshotsOptionType"]] = None, - request_id_parameter: Optional[str] = None, - blob_delete_type: Optional[str] = "Permanent", - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """If the storage account's soft delete feature is disabled then, when a blob is deleted, it is - permanently removed from the storage account. If the storage account's soft delete feature is - enabled, then, when a blob is deleted, it is marked for deletion and becomes inaccessible - immediately. However, the blob service retains the blob or snapshot for the number of days - specified by the DeleteRetentionPolicy section of [Storage service properties] (Set-Blob- - Service-Properties.md). After the specified number of days has passed, the blob's data is - permanently removed from the storage account. Note that you continue to be charged for the - soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and specify - the "include=deleted" query parameter to discover which blobs and snapshots have been soft - deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. All other - operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code - of 404 (ResourceNotFound). - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param delete_snapshots: Required if the blob has associated snapshots. Specify one of the - following two options: include: Delete the base blob and all of its snapshots. only: Delete - only the blob's snapshots and not the blob itself. - :type delete_snapshots: str or ~azure.storage.blob.models.DeleteSnapshotsOptionType - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_delete_type: Optional. Only possible value is 'permanent', which specifies to - permanently delete a blob if blob soft delete is enabled. - :type blob_delete_type: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if blob_delete_type is not None: - query_parameters['deletetype'] = self._serialize.query("blob_delete_type", blob_delete_type, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def set_access_control( - self, - timeout: Optional[int] = None, - owner: Optional[str] = None, - group: Optional[str] = None, - posix_permissions: Optional[str] = None, - posix_acl: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Set the owner, group, permissions, or access control list for a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_acl: Sets POSIX access control rights on files and directories. The value is a - comma-separated list of access control entries. Each access control entry (ACE) consists of a - scope, a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type posix_acl: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "setAccessControl" - accept = "application/xml" - - # Construct URL - url = self.set_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def get_access_control( - self, - timeout: Optional[int] = None, - upn: Optional[bool] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Get the owner, group, permissions, or access control list for a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If - "true", the identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response - headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If - "false", the values will be returned as Azure Active Directory Object IDs. The default value is - false. - :type upn: bool - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "getAccessControl" - accept = "application/xml" - - # Construct URL - url = self.get_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) - response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) - response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) - response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def rename( - self, - rename_source: str, - timeout: Optional[int] = None, - path_rename_mode: Optional[Union[str, "_models.PathRenameMode"]] = None, - directory_properties: Optional[str] = None, - posix_permissions: Optional[str] = None, - posix_umask: Optional[str] = None, - source_lease_id: Optional[str] = None, - request_id_parameter: Optional[str] = None, - directory_http_headers: Optional["_models.DirectoryHttpHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Rename a blob/file. By default, the destination is overwritten and if the destination already - exists and has a lease the lease is broken. This operation supports conditional HTTP requests. - For more information, see `Specifying Conditional Headers for Blob Service Operations - `_. To fail if the destination already exists, use a conditional - request with If-None-Match: "*". - - :param rename_source: The file or directory to be renamed. The value must have the following - format: "/{filesysystem}/{path}". If "x-ms-properties" is specified, the properties will - overwrite the existing properties; otherwise, the existing properties will be preserved. - :type rename_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param path_rename_mode: Determines the behavior of the rename operation. - :type path_rename_mode: str or ~azure.storage.blob.models.PathRenameMode - :param directory_properties: Optional. User-defined properties to be stored with the file or - directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", - where each value is base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask - restricts permission settings for file and directory, and will only be applied when default Acl - does not exist in parent directory. If the umask bit has set, it means that the corresponding - permission will be disabled. Otherwise the corresponding permission will be determined by the - permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, - a default umask - 0027 will be used. - :type posix_umask: str - :param source_lease_id: A lease ID for the source path. If specified, the source path must have - an active lease and the lease ID must match. - :type source_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param directory_http_headers: Parameter group. - :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _cache_control = None - _content_type = None - _content_encoding = None - _content_language = None - _content_disposition = None - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if directory_http_headers is not None: - _cache_control = directory_http_headers.cache_control - _content_type = directory_http_headers.content_type - _content_encoding = directory_http_headers.content_encoding - _content_language = directory_http_headers.content_language - _content_disposition = directory_http_headers.content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - accept = "application/xml" - - # Construct URL - url = self.rename.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if path_rename_mode is not None: - query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - rename.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def undelete( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> None: - """Undelete a blob that was previously soft deleted. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "undelete" - accept = "application/xml" - - # Construct URL - url = self.undelete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - undelete.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def set_expiry( - self, - expiry_options: Union[str, "_models.BlobExpiryOptions"], - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - expires_on: Optional[str] = None, - **kwargs - ) -> None: - """Sets the time a blob will expire and be deleted. - - :param expiry_options: Required. Indicates mode of the expiry time. - :type expiry_options: str or ~azure.storage.blob.models.BlobExpiryOptions - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param expires_on: The time to set the blob to expiry. - :type expires_on: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "expiry" - accept = "application/xml" - - # Construct URL - url = self.set_expiry.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') - if expires_on is not None: - header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_expiry.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def set_http_headers( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Set HTTP Headers operation sets system properties on the blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_cache_control = None - _blob_content_type = None - _blob_content_md5 = None - _blob_content_encoding = None - _blob_content_language = None - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _blob_content_disposition = None - if blob_http_headers is not None: - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_disposition = blob_http_headers.blob_content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.set_http_headers.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_http_headers.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def set_metadata( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - encryption_algorithm: Optional[str] = "AES256", - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or - more name-value pairs. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def acquire_lease( - self, - timeout: Optional[int] = None, - duration: Optional[int] = None, - proposed_lease_id: Optional[str] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a - lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease - duration cannot be changed using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "acquire" - accept = "application/xml" - - # Construct URL - url = self.acquire_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - acquire_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def release_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "release" - accept = "application/xml" - - # Construct URL - url = self.release_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - release_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def renew_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "renew" - accept = "application/xml" - - # Construct URL - url = self.renew_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - renew_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def change_lease( - self, - lease_id: str, - proposed_lease_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "change" - accept = "application/xml" - - # Construct URL - url = self.change_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - change_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def break_lease( - self, - timeout: Optional[int] = None, - break_period: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param break_period: For a break operation, proposed duration the lease should continue before - it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining on the lease is used. A new - lease will not be available before the break period has expired, but the lease may be held for - longer than the break period. If this header does not appear with a break operation, a fixed- - duration lease breaks after the remaining lease period elapses, and an infinite lease breaks - immediately. - :type break_period: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "break" - accept = "application/xml" - - # Construct URL - url = self.break_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - break_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def create_snapshot( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - encryption_algorithm: Optional[str] = "AES256", - request_id_parameter: Optional[str] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """The Create Snapshot operation creates a read-only snapshot of a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _lease_id = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "snapshot" - accept = "application/xml" - - # Construct URL - url = self.create_snapshot.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-snapshot']=self._deserialize('str', response.headers.get('x-ms-snapshot')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create_snapshot.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def start_copy_from_url( - self, - copy_source: str, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, - rehydrate_priority: Optional[Union[str, "_models.RehydratePriority"]] = None, - request_id_parameter: Optional[str] = None, - blob_tags_string: Optional[str] = None, - seal_blob: Optional[bool] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """The Start Copy From URL operation copies a blob or an internet resource to a new blob. - - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived - blob. - :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param seal_blob: Overrides the sealed state of the destination blob. Service version - 2019-12-12 and newer. - :type seal_blob: bool - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _source_if_tags = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - _source_if_tags = source_modified_access_conditions.source_if_tags - accept = "application/xml" - - # Construct URL - url = self.start_copy_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _source_if_tags is not None: - header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", _source_if_tags, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if seal_blob is not None: - header_parameters['x-ms-seal-blob'] = self._serialize.header("seal_blob", seal_blob, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def copy_from_url( - self, - copy_source: str, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, - request_id_parameter: Optional[str] = None, - source_content_md5: Optional[bytearray] = None, - blob_tags_string: Optional[str] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """The Copy From URL operation copies a blob or an internet resource to a new blob. It will not - return a response until the copy is complete. - - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - x_ms_requires_sync = "true" - accept = "application/xml" - - # Construct URL - url = self.copy_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-requires-sync'] = self._serialize.header("x_ms_requires_sync", x_ms_requires_sync, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - - if cls: - return cls(pipeline_response, None, response_headers) - - copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def abort_copy_from_url( - self, - copy_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a - destination blob with zero length and full metadata. - - :param copy_id: The copy identifier provided in the x-ms-copy-id header of the original Copy - Blob operation. - :type copy_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "copy" - copy_action_abort_constant = "abort" - accept = "application/xml" - - # Construct URL - url = self.abort_copy_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-copy-action'] = self._serialize.header("copy_action_abort_constant", copy_action_abort_constant, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - abort_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def set_tier( - self, - tier: Union[str, "_models.AccessTierRequired"], - snapshot: Optional[str] = None, - version_id: Optional[str] = None, - timeout: Optional[int] = None, - rehydrate_priority: Optional[Union[str, "_models.RehydratePriority"]] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a - premium storage account and on a block blob in a blob storage account (locally redundant - storage only). A premium page blob's tier determines the allowed size, IOPS, and bandwidth of - the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation does not - update the blob's ETag. - - :param tier: Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierRequired - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived - blob. - :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "tier" - accept = "application/xml" - - # Construct URL - url = self.set_tier.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if response.status_code == 202: - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_tier.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def get_account_info( - self, - **kwargs - ) -> None: - """Returns the sku name and account kind. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "account" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_account_info.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) - response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_account_info.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def query( - self, - snapshot: Optional[str] = None, - timeout: Optional[int] = None, - encryption_algorithm: Optional[str] = "AES256", - request_id_parameter: Optional[str] = None, - query_request: Optional["_models.QueryRequest"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> IO: - """The Query operation enables users to select/project on blob data by providing simple query - expressions. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param query_request: the query request. - :type query_request: ~azure.storage.blob.models.QueryRequest - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "query" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.query.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if query_request is not None: - body_content = self._serialize.body(query_request, 'QueryRequest', is_xml=True) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - query.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def get_tags( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - snapshot: Optional[str] = None, - version_id: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> "_models.BlobTags": - """The Get Tags operation enables users to get the tags associated with a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: BlobTags, or the result of cls(response) - :rtype: ~azure.storage.blob.models.BlobTags - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobTags"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "tags" - accept = "application/xml" - - # Construct URL - url = self.get_tags.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('BlobTags', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_tags.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def set_tags( - self, - timeout: Optional[int] = None, - version_id: Optional[str] = None, - transactional_content_md5: Optional[bytearray] = None, - transactional_content_crc64: Optional[bytearray] = None, - request_id_parameter: Optional[str] = None, - tags: Optional["_models.BlobTags"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """The Set Tags operation enables users to set tags on a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param tags: Blob tags. - :type tags: ~azure.storage.blob.models.BlobTags - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "tags" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_tags.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if tags is not None: - body_content = self._serialize.body(tags, 'BlobTags', is_xml=True) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_tags.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/operations/_block_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/operations/_block_blob_operations.py deleted file mode 100644 index 67c90b0..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/operations/_block_blob_operations.py +++ /dev/null @@ -1,1103 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class BlockBlobOperations: - """BlockBlobOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def upload( - self, - content_length: int, - body: IO, - timeout: Optional[int] = None, - transactional_content_md5: Optional[bytearray] = None, - metadata: Optional[str] = None, - encryption_algorithm: Optional[str] = "AES256", - tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, - request_id_parameter: Optional[str] = None, - blob_tags_string: Optional[str] = None, - blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Upload Block Blob operation updates the content of an existing block blob. Updating an - existing block blob overwrites any existing metadata on the blob. Partial updates are not - supported with Put Blob; the content of the existing blob is overwritten with the content of - the new blob. To perform a partial update of the content of a block blob, use the Put Block - List operation. - - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - blob_type = "BlockBlob" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.upload.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def put_blob_from_url( - self, - content_length: int, - copy_source: str, - timeout: Optional[int] = None, - transactional_content_md5: Optional[bytearray] = None, - metadata: Optional[str] = None, - encryption_algorithm: Optional[str] = "AES256", - tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, - request_id_parameter: Optional[str] = None, - source_content_md5: Optional[bytearray] = None, - blob_tags_string: Optional[str] = None, - copy_source_blob_properties: Optional[bool] = None, - blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Put Blob from URL operation creates a new Block Blob where the contents of the blob are - read from a given URL. This API is supported beginning with the 2020-04-08 version. Partial - updates are not supported with Put Blob from URL; the content of an existing blob is - overwritten with the content of the new blob. To perform partial updates to a block blob’s - contents using a source URL, use the Put Block from URL API in conjunction with Put Block List. - - :param content_length: The length of the request. - :type content_length: long - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param copy_source_blob_properties: Optional, default is true. Indicates if properties from - the source blob should be copied. - :type copy_source_blob_properties: bool - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _source_if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - _source_if_tags = source_modified_access_conditions.source_if_tags - blob_type = "BlockBlob" - accept = "application/xml" - - # Construct URL - url = self.put_blob_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _source_if_tags is not None: - header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", _source_if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if copy_source_blob_properties is not None: - header_parameters['x-ms-copy-source-blob-properties'] = self._serialize.header("copy_source_blob_properties", copy_source_blob_properties, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - put_blob_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def stage_block( - self, - block_id: str, - content_length: int, - body: IO, - transactional_content_md5: Optional[bytearray] = None, - transactional_content_crc64: Optional[bytearray] = None, - timeout: Optional[int] = None, - encryption_algorithm: Optional[str] = "AES256", - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - **kwargs - ) -> None: - """The Stage Block operation creates a new block to be committed as part of a blob. - - :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the - string must be less than or equal to 64 bytes in size. For a given blob, the length of the - value specified for the blockid parameter must be the same size for each block. - :type block_id: str - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "block" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.stage_block.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - stage_block.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def stage_block_from_url( - self, - block_id: str, - content_length: int, - source_url: str, - source_range: Optional[str] = None, - source_content_md5: Optional[bytearray] = None, - source_contentcrc64: Optional[bytearray] = None, - timeout: Optional[int] = None, - encryption_algorithm: Optional[str] = "AES256", - request_id_parameter: Optional[str] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Stage Block operation creates a new block to be committed as part of a blob where the - contents are read from a URL. - - :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the - string must be less than or equal to 64 bytes in size. For a given blob, the length of the - value specified for the blockid parameter must be the same size for each block. - :type block_id: str - :param content_length: The length of the request. - :type content_length: long - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - _lease_id = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - comp = "block" - accept = "application/xml" - - # Construct URL - url = self.stage_block_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - stage_block_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def commit_block_list( - self, - blocks: "_models.BlockLookupList", - timeout: Optional[int] = None, - transactional_content_md5: Optional[bytearray] = None, - transactional_content_crc64: Optional[bytearray] = None, - metadata: Optional[str] = None, - encryption_algorithm: Optional[str] = "AES256", - tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, - request_id_parameter: Optional[str] = None, - blob_tags_string: Optional[str] = None, - blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Commit Block List operation writes a blob by specifying the list of block IDs that make up - the blob. In order to be written as part of a blob, a block must have been successfully written - to the server in a prior Put Block operation. You can call Put Block List to update a blob by - uploading only those blocks that have changed, then committing the new and existing blocks - together. You can do this by specifying whether to commit a block from the committed block list - or from the uncommitted block list, or to commit the most recently uploaded version of the - block, whichever list it may belong to. - - :param blocks: - :type blocks: ~azure.storage.blob.models.BlockLookupList - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_cache_control = None - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "blocklist" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.commit_block_list.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(blocks, 'BlockLookupList', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - commit_block_list.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def get_block_list( - self, - snapshot: Optional[str] = None, - list_type: Union[str, "_models.BlockListType"] = "committed", - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> "_models.BlockList": - """The Get Block List operation retrieves the list of blocks that have been uploaded as part of a - block blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param list_type: Specifies whether to return the list of committed blocks, the list of - uncommitted blocks, or both lists together. - :type list_type: str or ~azure.storage.blob.models.BlockListType - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: BlockList, or the result of cls(response) - :rtype: ~azure.storage.blob.models.BlockList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.BlockList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "blocklist" - accept = "application/xml" - - # Construct URL - url = self.get_block_list.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - query_parameters['blocklisttype'] = self._serialize.query("list_type", list_type, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('BlockList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_block_list.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/operations/_container_operations.py b/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/operations/_container_operations.py deleted file mode 100644 index ed32bc9..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/operations/_container_operations.py +++ /dev/null @@ -1,1463 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class ContainerOperations: - """ContainerOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - access: Optional[Union[str, "_models.PublicAccessType"]] = None, - request_id_parameter: Optional[str] = None, - container_cpk_scope_info: Optional["_models.ContainerCpkScopeInfo"] = None, - **kwargs - ) -> None: - """creates a new container under the specified account. If the container with the same name - already exists, the operation fails. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param access: Specifies whether data in the container may be accessed publicly and the level - of access. - :type access: str or ~azure.storage.blob.models.PublicAccessType - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param container_cpk_scope_info: Parameter group. - :type container_cpk_scope_info: ~azure.storage.blob.models.ContainerCpkScopeInfo - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _default_encryption_scope = None - _prevent_encryption_scope_override = None - if container_cpk_scope_info is not None: - _default_encryption_scope = container_cpk_scope_info.default_encryption_scope - _prevent_encryption_scope_override = container_cpk_scope_info.prevent_encryption_scope_override - restype = "container" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if access is not None: - header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _default_encryption_scope is not None: - header_parameters['x-ms-default-encryption-scope'] = self._serialize.header("default_encryption_scope", _default_encryption_scope, 'str') - if _prevent_encryption_scope_override is not None: - header_parameters['x-ms-deny-encryption-scope-override'] = self._serialize.header("prevent_encryption_scope_override", _prevent_encryption_scope_override, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{containerName}'} # type: ignore - - async def get_properties( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """returns all user-defined metadata and system properties for the specified container. The data - returned does not include the container's list of blobs. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "container" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-public-access']=self._deserialize('str', response.headers.get('x-ms-blob-public-access')) - response_headers['x-ms-has-immutability-policy']=self._deserialize('bool', response.headers.get('x-ms-has-immutability-policy')) - response_headers['x-ms-has-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-has-legal-hold')) - response_headers['x-ms-default-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-default-encryption-scope')) - response_headers['x-ms-deny-encryption-scope-override']=self._deserialize('bool', response.headers.get('x-ms-deny-encryption-scope-override')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{containerName}'} # type: ignore - - async def delete( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """operation marks the specified container for deletion. The container and any blobs contained - within it are later deleted during garbage collection. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - restype = "container" - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{containerName}'} # type: ignore - - async def set_metadata( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """operation sets one or more user-defined name-value pairs for the specified container. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - restype = "container" - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{containerName}'} # type: ignore - - async def get_access_policy( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> List["_models.SignedIdentifier"]: - """gets the permissions for the specified container. The permissions indicate whether container - data may be accessed publicly. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of SignedIdentifier, or the result of cls(response) - :rtype: list[~azure.storage.blob.models.SignedIdentifier] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.SignedIdentifier"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "container" - comp = "acl" - accept = "application/xml" - - # Construct URL - url = self.get_access_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-blob-public-access']=self._deserialize('str', response.headers.get('x-ms-blob-public-access')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('[SignedIdentifier]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_access_policy.metadata = {'url': '/{containerName}'} # type: ignore - - async def set_access_policy( - self, - timeout: Optional[int] = None, - access: Optional[Union[str, "_models.PublicAccessType"]] = None, - request_id_parameter: Optional[str] = None, - container_acl: Optional[List["_models.SignedIdentifier"]] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """sets the permissions for the specified container. The permissions indicate whether blobs in a - container may be accessed publicly. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param access: Specifies whether data in the container may be accessed publicly and the level - of access. - :type access: str or ~azure.storage.blob.models.PublicAccessType - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param container_acl: the acls for the container. - :type container_acl: list[~azure.storage.blob.models.SignedIdentifier] - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - restype = "container" - comp = "acl" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_access_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if access is not None: - header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'wrapped': True, 'itemsName': 'SignedIdentifier'}} - if container_acl is not None: - body_content = self._serialize.body(container_acl, '[SignedIdentifier]', is_xml=True, serialization_ctxt=serialization_ctxt) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_policy.metadata = {'url': '/{containerName}'} # type: ignore - - async def restore( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - deleted_container_name: Optional[str] = None, - deleted_container_version: Optional[str] = None, - **kwargs - ) -> None: - """Restores a previously-deleted container. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param deleted_container_name: Optional. Version 2019-12-12 and later. Specifies the name of - the deleted container to restore. - :type deleted_container_name: str - :param deleted_container_version: Optional. Version 2019-12-12 and later. Specifies the - version of the deleted container to restore. - :type deleted_container_version: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "undelete" - accept = "application/xml" - - # Construct URL - url = self.restore.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if deleted_container_name is not None: - header_parameters['x-ms-deleted-container-name'] = self._serialize.header("deleted_container_name", deleted_container_name, 'str') - if deleted_container_version is not None: - header_parameters['x-ms-deleted-container-version'] = self._serialize.header("deleted_container_version", deleted_container_version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - restore.metadata = {'url': '/{containerName}'} # type: ignore - - async def acquire_lease( - self, - timeout: Optional[int] = None, - duration: Optional[int] = None, - proposed_lease_id: Optional[str] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a - lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease - duration cannot be changed using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "acquire" - accept = "application/xml" - - # Construct URL - url = self.acquire_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - acquire_lease.metadata = {'url': '/{containerName}'} # type: ignore - - async def release_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "release" - accept = "application/xml" - - # Construct URL - url = self.release_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - release_lease.metadata = {'url': '/{containerName}'} # type: ignore - - async def renew_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "renew" - accept = "application/xml" - - # Construct URL - url = self.renew_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - renew_lease.metadata = {'url': '/{containerName}'} # type: ignore - - async def break_lease( - self, - timeout: Optional[int] = None, - break_period: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param break_period: For a break operation, proposed duration the lease should continue before - it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining on the lease is used. A new - lease will not be available before the break period has expired, but the lease may be held for - longer than the break period. If this header does not appear with a break operation, a fixed- - duration lease breaks after the remaining lease period elapses, and an infinite lease breaks - immediately. - :type break_period: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "break" - accept = "application/xml" - - # Construct URL - url = self.break_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - break_lease.metadata = {'url': '/{containerName}'} # type: ignore - - async def change_lease( - self, - lease_id: str, - proposed_lease_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "change" - accept = "application/xml" - - # Construct URL - url = self.change_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - change_lease.metadata = {'url': '/{containerName}'} # type: ignore - - async def list_blob_flat_segment( - self, - prefix: Optional[str] = None, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - include: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> "_models.ListBlobsFlatSegmentResponse": - """[Update] The List Blobs operation returns a list of the blobs under the specified container. - - :param prefix: Filters the results to return only containers whose name begins with the - specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListBlobsFlatSegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsFlatSegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_blob_flat_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListBlobsFlatSegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_blob_flat_segment.metadata = {'url': '/{containerName}'} # type: ignore - - async def list_blob_hierarchy_segment( - self, - delimiter: str, - prefix: Optional[str] = None, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - include: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> "_models.ListBlobsHierarchySegmentResponse": - """[Update] The List Blobs operation returns a list of the blobs under the specified container. - - :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix - element in the response body that acts as a placeholder for all blobs whose names begin with - the same substring up to the appearance of the delimiter character. The delimiter may be a - single character or a string. - :type delimiter: str - :param prefix: Filters the results to return only containers whose name begins with the - specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListBlobsHierarchySegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsHierarchySegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_blob_hierarchy_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_blob_hierarchy_segment.metadata = {'url': '/{containerName}'} # type: ignore - - async def get_account_info( - self, - **kwargs - ) -> None: - """Returns the sku name and account kind. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "account" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_account_info.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) - response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_account_info.metadata = {'url': '/{containerName}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/operations/_directory_operations.py b/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/operations/_directory_operations.py deleted file mode 100644 index 338ff69..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/operations/_directory_operations.py +++ /dev/null @@ -1,739 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class DirectoryOperations: - """DirectoryOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - timeout: Optional[int] = None, - directory_properties: Optional[str] = None, - posix_permissions: Optional[str] = None, - posix_umask: Optional[str] = None, - request_id_parameter: Optional[str] = None, - directory_http_headers: Optional["_models.DirectoryHttpHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Create a directory. By default, the destination is overwritten and if the destination already - exists and has a lease the lease is broken. This operation supports conditional HTTP requests. - For more information, see `Specifying Conditional Headers for Blob Service Operations - `_. To fail if the destination already exists, use a conditional - request with If-None-Match: "*". - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param directory_properties: Optional. User-defined properties to be stored with the file or - directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", - where each value is base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask - restricts permission settings for file and directory, and will only be applied when default Acl - does not exist in parent directory. If the umask bit has set, it means that the corresponding - permission will be disabled. Otherwise the corresponding permission will be determined by the - permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, - a default umask - 0027 will be used. - :type posix_umask: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param directory_http_headers: Parameter group. - :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _cache_control = None - _content_type = None - _content_encoding = None - _content_language = None - _content_disposition = None - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - if directory_http_headers is not None: - _cache_control = directory_http_headers.cache_control - _content_type = directory_http_headers.content_type - _content_encoding = directory_http_headers.content_encoding - _content_language = directory_http_headers.content_language - _content_disposition = directory_http_headers.content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - resource = "directory" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("resource", resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def rename( - self, - rename_source: str, - timeout: Optional[int] = None, - marker: Optional[str] = None, - path_rename_mode: Optional[Union[str, "_models.PathRenameMode"]] = None, - directory_properties: Optional[str] = None, - posix_permissions: Optional[str] = None, - posix_umask: Optional[str] = None, - source_lease_id: Optional[str] = None, - request_id_parameter: Optional[str] = None, - directory_http_headers: Optional["_models.DirectoryHttpHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Rename a directory. By default, the destination is overwritten and if the destination already - exists and has a lease the lease is broken. This operation supports conditional HTTP requests. - For more information, see `Specifying Conditional Headers for Blob Service Operations - `_. To fail if the destination already exists, use a conditional - request with If-None-Match: "*". - - :param rename_source: The file or directory to be renamed. The value must have the following - format: "/{filesysystem}/{path}". If "x-ms-properties" is specified, the properties will - overwrite the existing properties; otherwise, the existing properties will be preserved. - :type rename_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param marker: When renaming a directory, the number of paths that are renamed with each - invocation is limited. If the number of paths to be renamed exceeds this limit, a continuation - token is returned in this response header. When a continuation token is returned in the - response, it must be specified in a subsequent invocation of the rename operation to continue - renaming the directory. - :type marker: str - :param path_rename_mode: Determines the behavior of the rename operation. - :type path_rename_mode: str or ~azure.storage.blob.models.PathRenameMode - :param directory_properties: Optional. User-defined properties to be stored with the file or - directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", - where each value is base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask - restricts permission settings for file and directory, and will only be applied when default Acl - does not exist in parent directory. If the umask bit has set, it means that the corresponding - permission will be disabled. Otherwise the corresponding permission will be determined by the - permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, - a default umask - 0027 will be used. - :type posix_umask: str - :param source_lease_id: A lease ID for the source path. If specified, the source path must have - an active lease and the lease ID must match. - :type source_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param directory_http_headers: Parameter group. - :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _cache_control = None - _content_type = None - _content_encoding = None - _content_language = None - _content_disposition = None - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if directory_http_headers is not None: - _cache_control = directory_http_headers.cache_control - _content_type = directory_http_headers.content_type - _content_encoding = directory_http_headers.content_encoding - _content_language = directory_http_headers.content_language - _content_disposition = directory_http_headers.content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - accept = "application/xml" - - # Construct URL - url = self.rename.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') - if path_rename_mode is not None: - query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - rename.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def delete( - self, - recursive_directory_delete: bool, - timeout: Optional[int] = None, - marker: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Deletes the directory. - - :param recursive_directory_delete: If "true", all paths beneath the directory will be deleted. - If "false" and the directory is non-empty, an error occurs. - :type recursive_directory_delete: bool - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param marker: When renaming a directory, the number of paths that are renamed with each - invocation is limited. If the number of paths to be renamed exceeds this limit, a continuation - token is returned in this response header. When a continuation token is returned in the - response, it must be specified in a subsequent invocation of the rename operation to continue - renaming the directory. - :type marker: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['recursive'] = self._serialize.query("recursive_directory_delete", recursive_directory_delete, 'bool') - if marker is not None: - query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def set_access_control( - self, - timeout: Optional[int] = None, - owner: Optional[str] = None, - group: Optional[str] = None, - posix_permissions: Optional[str] = None, - posix_acl: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Set the owner, group, permissions, or access control list for a directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_acl: Sets POSIX access control rights on files and directories. The value is a - comma-separated list of access control entries. Each access control entry (ACE) consists of a - scope, a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type posix_acl: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "setAccessControl" - accept = "application/xml" - - # Construct URL - url = self.set_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def get_access_control( - self, - timeout: Optional[int] = None, - upn: Optional[bool] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Get the owner, group, permissions, or access control list for a directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If - "true", the identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response - headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If - "false", the values will be returned as Azure Active Directory Object IDs. The default value is - false. - :type upn: bool - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "getAccessControl" - accept = "application/xml" - - # Construct URL - url = self.get_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) - response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) - response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) - response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/operations/_page_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/operations/_page_blob_operations.py deleted file mode 100644 index 100f730..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/operations/_page_blob_operations.py +++ /dev/null @@ -1,1408 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class PageBlobOperations: - """PageBlobOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - content_length: int, - blob_content_length: int, - timeout: Optional[int] = None, - tier: Optional[Union[str, "_models.PremiumPageBlobAccessTier"]] = None, - metadata: Optional[str] = None, - encryption_algorithm: Optional[str] = "AES256", - blob_sequence_number: Optional[int] = 0, - request_id_parameter: Optional[str] = None, - blob_tags_string: Optional[str] = None, - blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Create operation creates a new page blob. - - :param content_length: The length of the request. - :type content_length: long - :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 - TB. The page blob size must be aligned to a 512-byte boundary. - :type blob_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param tier: Optional. Indicates the tier to be set on the page blob. - :type tier: str or ~azure.storage.blob.models.PremiumPageBlobAccessTier - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled - value that you can use to track requests. The value of the sequence number must be between 0 - and 2^63 - 1. - :type blob_sequence_number: long - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - blob_type = "PageBlob" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') - if blob_sequence_number is not None: - header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def upload_pages( - self, - content_length: int, - body: IO, - transactional_content_md5: Optional[bytearray] = None, - transactional_content_crc64: Optional[bytearray] = None, - timeout: Optional[int] = None, - range: Optional[str] = None, - encryption_algorithm: Optional[str] = "AES256", - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - sequence_number_access_conditions: Optional["_models.SequenceNumberAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Upload Pages operation writes a range of pages to a page blob. - - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param sequence_number_access_conditions: Parameter group. - :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - _if_sequence_number_less_than_or_equal_to = None - _if_sequence_number_less_than = None - _if_sequence_number_equal_to = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if sequence_number_access_conditions is not None: - _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - comp = "page" - page_write = "update" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.upload_pages.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') - if _if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') - if _if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload_pages.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def clear_pages( - self, - content_length: int, - timeout: Optional[int] = None, - range: Optional[str] = None, - encryption_algorithm: Optional[str] = "AES256", - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - sequence_number_access_conditions: Optional["_models.SequenceNumberAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Clear Pages operation clears a set of pages from a page blob. - - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param sequence_number_access_conditions: Parameter group. - :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - _if_sequence_number_less_than_or_equal_to = None - _if_sequence_number_less_than = None - _if_sequence_number_equal_to = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if sequence_number_access_conditions is not None: - _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - comp = "page" - page_write = "clear" - accept = "application/xml" - - # Construct URL - url = self.clear_pages.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') - if _if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') - if _if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - clear_pages.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def upload_pages_from_url( - self, - source_url: str, - source_range: str, - content_length: int, - range: str, - source_content_md5: Optional[bytearray] = None, - source_contentcrc64: Optional[bytearray] = None, - timeout: Optional[int] = None, - encryption_algorithm: Optional[str] = "AES256", - request_id_parameter: Optional[str] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - sequence_number_access_conditions: Optional["_models.SequenceNumberAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Upload Pages operation writes a range of pages to a page blob where the contents are read - from a URL. - - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param source_range: Bytes of source data in the specified range. The length of this range - should match the ContentLength header and x-ms-range/Range destination range header. - :type source_range: str - :param content_length: The length of the request. - :type content_length: long - :param range: The range of bytes to which the source range would be written. The range should - be 512 aligned and range-end is required. - :type range: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param sequence_number_access_conditions: Parameter group. - :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - _lease_id = None - _if_sequence_number_less_than_or_equal_to = None - _if_sequence_number_less_than = None - _if_sequence_number_equal_to = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if sequence_number_access_conditions is not None: - _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - comp = "page" - page_write = "update" - accept = "application/xml" - - # Construct URL - url = self.upload_pages_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') - if _if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') - if _if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload_pages_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def get_page_ranges( - self, - snapshot: Optional[str] = None, - timeout: Optional[int] = None, - range: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> "_models.PageList": - """The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot - of a page blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PageList, or the result of cls(response) - :rtype: ~azure.storage.blob.models.PageList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PageList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "pagelist" - accept = "application/xml" - - # Construct URL - url = self.get_page_ranges.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('PageList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_page_ranges.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def get_page_ranges_diff( - self, - snapshot: Optional[str] = None, - timeout: Optional[int] = None, - prevsnapshot: Optional[str] = None, - prev_snapshot_url: Optional[str] = None, - range: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> "_models.PageList": - """The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that - were changed between target blob and previous snapshot. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param prevsnapshot: Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a - DateTime value that specifies that the response will contain only pages that were changed - between target blob and previous snapshot. Changed pages include both updated and cleared - pages. The target blob may be a snapshot, as long as the snapshot specified by prevsnapshot is - the older of the two. Note that incremental snapshots are currently supported only for blobs - created on or after January 1, 2016. - :type prevsnapshot: str - :param prev_snapshot_url: Optional. This header is only supported in service versions - 2019-04-19 and after and specifies the URL of a previous snapshot of the target blob. The - response will only contain pages that were changed between the target blob and its previous - snapshot. - :type prev_snapshot_url: str - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PageList, or the result of cls(response) - :rtype: ~azure.storage.blob.models.PageList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PageList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "pagelist" - accept = "application/xml" - - # Construct URL - url = self.get_page_ranges_diff.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if prevsnapshot is not None: - query_parameters['prevsnapshot'] = self._serialize.query("prevsnapshot", prevsnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if prev_snapshot_url is not None: - header_parameters['x-ms-previous-snapshot-url'] = self._serialize.header("prev_snapshot_url", prev_snapshot_url, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('PageList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_page_ranges_diff.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def resize( - self, - blob_content_length: int, - timeout: Optional[int] = None, - encryption_algorithm: Optional[str] = "AES256", - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Resize the Blob. - - :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 - TB. The page blob size must be aligned to a 512-byte boundary. - :type blob_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.resize.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - resize.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def update_sequence_number( - self, - sequence_number_action: Union[str, "_models.SequenceNumberActionType"], - timeout: Optional[int] = None, - blob_sequence_number: Optional[int] = 0, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Update the sequence number of the blob. - - :param sequence_number_action: Required if the x-ms-blob-sequence-number header is set for the - request. This property applies to page blobs only. This property indicates how the service - should modify the blob's sequence number. - :type sequence_number_action: str or ~azure.storage.blob.models.SequenceNumberActionType - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled - value that you can use to track requests. The value of the sequence number must be between 0 - and 2^63 - 1. - :type blob_sequence_number: long - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.update_sequence_number.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-sequence-number-action'] = self._serialize.header("sequence_number_action", sequence_number_action, 'str') - if blob_sequence_number is not None: - header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - update_sequence_number.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def copy_incremental( - self, - copy_source: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Copy Incremental operation copies a snapshot of the source page blob to a destination page - blob. The snapshot is copied such that only the differential changes between the previously - copied snapshot are transferred to the destination. The copied snapshots are complete copies of - the original snapshot and can be read or copied from as usual. This API is supported since REST - version 2016-05-31. - - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "incrementalcopy" - accept = "application/xml" - - # Construct URL - url = self.copy_incremental.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - copy_incremental.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/operations/_service_operations.py b/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/operations/_service_operations.py deleted file mode 100644 index 91a0646..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/aio/operations/_service_operations.py +++ /dev/null @@ -1,691 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class ServiceOperations: - """ServiceOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def set_properties( - self, - storage_service_properties: "_models.StorageServiceProperties", - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> None: - """Sets properties for a storage account's Blob service endpoint, including properties for Storage - Analytics and CORS (Cross-Origin Resource Sharing) rules. - - :param storage_service_properties: The StorageService properties. - :type storage_service_properties: ~azure.storage.blob.models.StorageServiceProperties - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "properties" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/'} # type: ignore - - async def get_properties( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> "_models.StorageServiceProperties": - """gets the properties of a storage account's Blob service, including properties for Storage - Analytics and CORS (Cross-Origin Resource Sharing) rules. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: StorageServiceProperties, or the result of cls(response) - :rtype: ~azure.storage.blob.models.StorageServiceProperties - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceProperties"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('StorageServiceProperties', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_properties.metadata = {'url': '/'} # type: ignore - - async def get_statistics( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> "_models.StorageServiceStats": - """Retrieves statistics related to replication for the Blob service. It is only available on the - secondary location endpoint when read-access geo-redundant replication is enabled for the - storage account. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: StorageServiceStats, or the result of cls(response) - :rtype: ~azure.storage.blob.models.StorageServiceStats - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceStats"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "stats" - accept = "application/xml" - - # Construct URL - url = self.get_statistics.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('StorageServiceStats', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_statistics.metadata = {'url': '/'} # type: ignore - - async def list_containers_segment( - self, - prefix: Optional[str] = None, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - include: Optional[List[Union[str, "_models.ListContainersIncludeType"]]] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> "_models.ListContainersSegmentResponse": - """The List Containers Segment operation returns a list of the containers under the specified - account. - - :param prefix: Filters the results to return only containers whose name begins with the - specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :param include: Include this parameter to specify that the container's metadata be returned as - part of the response body. - :type include: list[str or ~azure.storage.blob.models.ListContainersIncludeType] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListContainersSegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListContainersSegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListContainersSegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_containers_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('ListContainersSegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_containers_segment.metadata = {'url': '/'} # type: ignore - - async def get_user_delegation_key( - self, - key_info: "_models.KeyInfo", - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> "_models.UserDelegationKey": - """Retrieves a user delegation key for the Blob service. This is only a valid operation when using - bearer token authentication. - - :param key_info: - :type key_info: ~azure.storage.blob.models.KeyInfo - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: UserDelegationKey, or the result of cls(response) - :rtype: ~azure.storage.blob.models.UserDelegationKey - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.UserDelegationKey"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "userdelegationkey" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.get_user_delegation_key.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(key_info, 'KeyInfo', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('UserDelegationKey', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_user_delegation_key.metadata = {'url': '/'} # type: ignore - - async def get_account_info( - self, - **kwargs - ) -> None: - """Returns the sku name and account kind. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "account" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_account_info.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) - response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) - response_headers['x-ms-is-hns-enabled']=self._deserialize('bool', response.headers.get('x-ms-is-hns-enabled')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_account_info.metadata = {'url': '/'} # type: ignore - - async def submit_batch( - self, - content_length: int, - multipart_content_type: str, - body: IO, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> IO: - """The Batch operation allows multiple API calls to be embedded into a single HTTP request. - - :param content_length: The length of the request. - :type content_length: long - :param multipart_content_type: Required. The value of this header must be multipart/mixed with - a batch boundary. Example header value: multipart/mixed; boundary=batch_:code:``. - :type multipart_content_type: str - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "batch" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.submit_batch.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(body, 'IO', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - submit_batch.metadata = {'url': '/'} # type: ignore - - async def filter_blobs( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - where: Optional[str] = None, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - **kwargs - ) -> "_models.FilterBlobSegment": - """The Filter Blobs operation enables callers to list blobs across all containers whose tags match - a given search expression. Filter blobs searches across all containers within a storage - account but can be scoped within the expression to a single container. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param where: Filters the results to return only to return only blobs whose tags match the - specified expression. - :type where: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: FilterBlobSegment, or the result of cls(response) - :rtype: ~azure.storage.blob.models.FilterBlobSegment - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.FilterBlobSegment"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "blobs" - accept = "application/xml" - - # Construct URL - url = self.filter_blobs.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if where is not None: - query_parameters['where'] = self._serialize.query("where", where, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('FilterBlobSegment', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - filter_blobs.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/models/__init__.py b/azure/multiapi/storagev2/blob/v2020_04_08/_generated/models/__init__.py deleted file mode 100644 index 9c98989..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/models/__init__.py +++ /dev/null @@ -1,223 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -try: - from ._models_py3 import AccessPolicy - from ._models_py3 import AppendPositionAccessConditions - from ._models_py3 import ArrowConfiguration - from ._models_py3 import ArrowField - from ._models_py3 import BlobFlatListSegment - from ._models_py3 import BlobHTTPHeaders - from ._models_py3 import BlobHierarchyListSegment - from ._models_py3 import BlobItemInternal - from ._models_py3 import BlobMetadata - from ._models_py3 import BlobPrefix - from ._models_py3 import BlobPropertiesInternal - from ._models_py3 import BlobTag - from ._models_py3 import BlobTags - from ._models_py3 import Block - from ._models_py3 import BlockList - from ._models_py3 import BlockLookupList - from ._models_py3 import ClearRange - from ._models_py3 import ContainerCpkScopeInfo - from ._models_py3 import ContainerItem - from ._models_py3 import ContainerProperties - from ._models_py3 import CorsRule - from ._models_py3 import CpkInfo - from ._models_py3 import CpkScopeInfo - from ._models_py3 import DataLakeStorageError - from ._models_py3 import DataLakeStorageErrorDetails - from ._models_py3 import DelimitedTextConfiguration - from ._models_py3 import DirectoryHttpHeaders - from ._models_py3 import FilterBlobItem - from ._models_py3 import FilterBlobSegment - from ._models_py3 import GeoReplication - from ._models_py3 import JsonTextConfiguration - from ._models_py3 import KeyInfo - from ._models_py3 import LeaseAccessConditions - from ._models_py3 import ListBlobsFlatSegmentResponse - from ._models_py3 import ListBlobsHierarchySegmentResponse - from ._models_py3 import ListContainersSegmentResponse - from ._models_py3 import Logging - from ._models_py3 import Metrics - from ._models_py3 import ModifiedAccessConditions - from ._models_py3 import PageList - from ._models_py3 import PageRange - from ._models_py3 import QueryFormat - from ._models_py3 import QueryRequest - from ._models_py3 import QuerySerialization - from ._models_py3 import RetentionPolicy - from ._models_py3 import SequenceNumberAccessConditions - from ._models_py3 import SignedIdentifier - from ._models_py3 import SourceModifiedAccessConditions - from ._models_py3 import StaticWebsite - from ._models_py3 import StorageError - from ._models_py3 import StorageServiceProperties - from ._models_py3 import StorageServiceStats - from ._models_py3 import UserDelegationKey -except (SyntaxError, ImportError): - from ._models import AccessPolicy # type: ignore - from ._models import AppendPositionAccessConditions # type: ignore - from ._models import ArrowConfiguration # type: ignore - from ._models import ArrowField # type: ignore - from ._models import BlobFlatListSegment # type: ignore - from ._models import BlobHTTPHeaders # type: ignore - from ._models import BlobHierarchyListSegment # type: ignore - from ._models import BlobItemInternal # type: ignore - from ._models import BlobMetadata # type: ignore - from ._models import BlobPrefix # type: ignore - from ._models import BlobPropertiesInternal # type: ignore - from ._models import BlobTag # type: ignore - from ._models import BlobTags # type: ignore - from ._models import Block # type: ignore - from ._models import BlockList # type: ignore - from ._models import BlockLookupList # type: ignore - from ._models import ClearRange # type: ignore - from ._models import ContainerCpkScopeInfo # type: ignore - from ._models import ContainerItem # type: ignore - from ._models import ContainerProperties # type: ignore - from ._models import CorsRule # type: ignore - from ._models import CpkInfo # type: ignore - from ._models import CpkScopeInfo # type: ignore - from ._models import DataLakeStorageError # type: ignore - from ._models import DataLakeStorageErrorDetails # type: ignore - from ._models import DelimitedTextConfiguration # type: ignore - from ._models import DirectoryHttpHeaders # type: ignore - from ._models import FilterBlobItem # type: ignore - from ._models import FilterBlobSegment # type: ignore - from ._models import GeoReplication # type: ignore - from ._models import JsonTextConfiguration # type: ignore - from ._models import KeyInfo # type: ignore - from ._models import LeaseAccessConditions # type: ignore - from ._models import ListBlobsFlatSegmentResponse # type: ignore - from ._models import ListBlobsHierarchySegmentResponse # type: ignore - from ._models import ListContainersSegmentResponse # type: ignore - from ._models import Logging # type: ignore - from ._models import Metrics # type: ignore - from ._models import ModifiedAccessConditions # type: ignore - from ._models import PageList # type: ignore - from ._models import PageRange # type: ignore - from ._models import QueryFormat # type: ignore - from ._models import QueryRequest # type: ignore - from ._models import QuerySerialization # type: ignore - from ._models import RetentionPolicy # type: ignore - from ._models import SequenceNumberAccessConditions # type: ignore - from ._models import SignedIdentifier # type: ignore - from ._models import SourceModifiedAccessConditions # type: ignore - from ._models import StaticWebsite # type: ignore - from ._models import StorageError # type: ignore - from ._models import StorageServiceProperties # type: ignore - from ._models import StorageServiceStats # type: ignore - from ._models import UserDelegationKey # type: ignore - -from ._azure_blob_storage_enums import ( - AccessTier, - AccessTierOptional, - AccessTierRequired, - AccountKind, - ArchiveStatus, - BlobExpiryOptions, - BlobType, - BlockListType, - CopyStatusType, - DeleteSnapshotsOptionType, - GeoReplicationStatusType, - LeaseDurationType, - LeaseStateType, - LeaseStatusType, - ListBlobsIncludeItem, - ListContainersIncludeType, - PathRenameMode, - PremiumPageBlobAccessTier, - PublicAccessType, - QueryFormatType, - RehydratePriority, - SequenceNumberActionType, - SkuName, - StorageErrorCode, -) - -__all__ = [ - 'AccessPolicy', - 'AppendPositionAccessConditions', - 'ArrowConfiguration', - 'ArrowField', - 'BlobFlatListSegment', - 'BlobHTTPHeaders', - 'BlobHierarchyListSegment', - 'BlobItemInternal', - 'BlobMetadata', - 'BlobPrefix', - 'BlobPropertiesInternal', - 'BlobTag', - 'BlobTags', - 'Block', - 'BlockList', - 'BlockLookupList', - 'ClearRange', - 'ContainerCpkScopeInfo', - 'ContainerItem', - 'ContainerProperties', - 'CorsRule', - 'CpkInfo', - 'CpkScopeInfo', - 'DataLakeStorageError', - 'DataLakeStorageErrorDetails', - 'DelimitedTextConfiguration', - 'DirectoryHttpHeaders', - 'FilterBlobItem', - 'FilterBlobSegment', - 'GeoReplication', - 'JsonTextConfiguration', - 'KeyInfo', - 'LeaseAccessConditions', - 'ListBlobsFlatSegmentResponse', - 'ListBlobsHierarchySegmentResponse', - 'ListContainersSegmentResponse', - 'Logging', - 'Metrics', - 'ModifiedAccessConditions', - 'PageList', - 'PageRange', - 'QueryFormat', - 'QueryRequest', - 'QuerySerialization', - 'RetentionPolicy', - 'SequenceNumberAccessConditions', - 'SignedIdentifier', - 'SourceModifiedAccessConditions', - 'StaticWebsite', - 'StorageError', - 'StorageServiceProperties', - 'StorageServiceStats', - 'UserDelegationKey', - 'AccessTier', - 'AccessTierOptional', - 'AccessTierRequired', - 'AccountKind', - 'ArchiveStatus', - 'BlobExpiryOptions', - 'BlobType', - 'BlockListType', - 'CopyStatusType', - 'DeleteSnapshotsOptionType', - 'GeoReplicationStatusType', - 'LeaseDurationType', - 'LeaseStateType', - 'LeaseStatusType', - 'ListBlobsIncludeItem', - 'ListContainersIncludeType', - 'PathRenameMode', - 'PremiumPageBlobAccessTier', - 'PublicAccessType', - 'QueryFormatType', - 'RehydratePriority', - 'SequenceNumberActionType', - 'SkuName', - 'StorageErrorCode', -] diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/models/_azure_blob_storage_enums.py b/azure/multiapi/storagev2/blob/v2020_04_08/_generated/models/_azure_blob_storage_enums.py deleted file mode 100644 index 2df7b1a..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/models/_azure_blob_storage_enums.py +++ /dev/null @@ -1,334 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from enum import Enum, EnumMeta -from six import with_metaclass - -class _CaseInsensitiveEnumMeta(EnumMeta): - def __getitem__(self, name): - return super().__getitem__(name.upper()) - - def __getattr__(cls, name): - """Return the enum member matching `name` - We use __getattr__ instead of descriptors or inserting into the enum - class' __dict__ in order to support `name` and `value` being both - properties for enum members (which live in the class' __dict__) and - enum members themselves. - """ - try: - return cls._member_map_[name.upper()] - except KeyError: - raise AttributeError(name) - - -class AccessTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - P4 = "P4" - P6 = "P6" - P10 = "P10" - P15 = "P15" - P20 = "P20" - P30 = "P30" - P40 = "P40" - P50 = "P50" - P60 = "P60" - P70 = "P70" - P80 = "P80" - HOT = "Hot" - COOL = "Cool" - ARCHIVE = "Archive" - -class AccessTierOptional(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - P4 = "P4" - P6 = "P6" - P10 = "P10" - P15 = "P15" - P20 = "P20" - P30 = "P30" - P40 = "P40" - P50 = "P50" - P60 = "P60" - P70 = "P70" - P80 = "P80" - HOT = "Hot" - COOL = "Cool" - ARCHIVE = "Archive" - -class AccessTierRequired(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - P4 = "P4" - P6 = "P6" - P10 = "P10" - P15 = "P15" - P20 = "P20" - P30 = "P30" - P40 = "P40" - P50 = "P50" - P60 = "P60" - P70 = "P70" - P80 = "P80" - HOT = "Hot" - COOL = "Cool" - ARCHIVE = "Archive" - -class AccountKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - STORAGE = "Storage" - BLOB_STORAGE = "BlobStorage" - STORAGE_V2 = "StorageV2" - FILE_STORAGE = "FileStorage" - BLOCK_BLOB_STORAGE = "BlockBlobStorage" - -class ArchiveStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - REHYDRATE_PENDING_TO_HOT = "rehydrate-pending-to-hot" - REHYDRATE_PENDING_TO_COOL = "rehydrate-pending-to-cool" - -class BlobExpiryOptions(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - NEVER_EXPIRE = "NeverExpire" - RELATIVE_TO_CREATION = "RelativeToCreation" - RELATIVE_TO_NOW = "RelativeToNow" - ABSOLUTE = "Absolute" - -class BlobType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - BLOCK_BLOB = "BlockBlob" - PAGE_BLOB = "PageBlob" - APPEND_BLOB = "AppendBlob" - -class BlockListType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - COMMITTED = "committed" - UNCOMMITTED = "uncommitted" - ALL = "all" - -class CopyStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - PENDING = "pending" - SUCCESS = "success" - ABORTED = "aborted" - FAILED = "failed" - -class DeleteSnapshotsOptionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - INCLUDE = "include" - ONLY = "only" - -class GeoReplicationStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The status of the secondary location - """ - - LIVE = "live" - BOOTSTRAP = "bootstrap" - UNAVAILABLE = "unavailable" - -class LeaseDurationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - INFINITE = "infinite" - FIXED = "fixed" - -class LeaseStateType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - AVAILABLE = "available" - LEASED = "leased" - EXPIRED = "expired" - BREAKING = "breaking" - BROKEN = "broken" - -class LeaseStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - LOCKED = "locked" - UNLOCKED = "unlocked" - -class ListBlobsIncludeItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - COPY = "copy" - DELETED = "deleted" - METADATA = "metadata" - SNAPSHOTS = "snapshots" - UNCOMMITTEDBLOBS = "uncommittedblobs" - VERSIONS = "versions" - TAGS = "tags" - -class ListContainersIncludeType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - METADATA = "metadata" - DELETED = "deleted" - -class PathRenameMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - LEGACY = "legacy" - POSIX = "posix" - -class PremiumPageBlobAccessTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - P4 = "P4" - P6 = "P6" - P10 = "P10" - P15 = "P15" - P20 = "P20" - P30 = "P30" - P40 = "P40" - P50 = "P50" - P60 = "P60" - P70 = "P70" - P80 = "P80" - -class PublicAccessType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - CONTAINER = "container" - BLOB = "blob" - -class QueryFormatType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The quick query format type. - """ - - DELIMITED = "delimited" - JSON = "json" - ARROW = "arrow" - -class RehydratePriority(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """If an object is in rehydrate pending state then this header is returned with priority of - rehydrate. Valid values are High and Standard. - """ - - HIGH = "High" - STANDARD = "Standard" - -class SequenceNumberActionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - MAX = "max" - UPDATE = "update" - INCREMENT = "increment" - -class SkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - STANDARD_LRS = "Standard_LRS" - STANDARD_GRS = "Standard_GRS" - STANDARD_RAGRS = "Standard_RAGRS" - STANDARD_ZRS = "Standard_ZRS" - PREMIUM_LRS = "Premium_LRS" - -class StorageErrorCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Error codes returned by the service - """ - - ACCOUNT_ALREADY_EXISTS = "AccountAlreadyExists" - ACCOUNT_BEING_CREATED = "AccountBeingCreated" - ACCOUNT_IS_DISABLED = "AccountIsDisabled" - AUTHENTICATION_FAILED = "AuthenticationFailed" - AUTHORIZATION_FAILURE = "AuthorizationFailure" - CONDITION_HEADERS_NOT_SUPPORTED = "ConditionHeadersNotSupported" - CONDITION_NOT_MET = "ConditionNotMet" - EMPTY_METADATA_KEY = "EmptyMetadataKey" - INSUFFICIENT_ACCOUNT_PERMISSIONS = "InsufficientAccountPermissions" - INTERNAL_ERROR = "InternalError" - INVALID_AUTHENTICATION_INFO = "InvalidAuthenticationInfo" - INVALID_HEADER_VALUE = "InvalidHeaderValue" - INVALID_HTTP_VERB = "InvalidHttpVerb" - INVALID_INPUT = "InvalidInput" - INVALID_MD5 = "InvalidMd5" - INVALID_METADATA = "InvalidMetadata" - INVALID_QUERY_PARAMETER_VALUE = "InvalidQueryParameterValue" - INVALID_RANGE = "InvalidRange" - INVALID_RESOURCE_NAME = "InvalidResourceName" - INVALID_URI = "InvalidUri" - INVALID_XML_DOCUMENT = "InvalidXmlDocument" - INVALID_XML_NODE_VALUE = "InvalidXmlNodeValue" - MD5_MISMATCH = "Md5Mismatch" - METADATA_TOO_LARGE = "MetadataTooLarge" - MISSING_CONTENT_LENGTH_HEADER = "MissingContentLengthHeader" - MISSING_REQUIRED_QUERY_PARAMETER = "MissingRequiredQueryParameter" - MISSING_REQUIRED_HEADER = "MissingRequiredHeader" - MISSING_REQUIRED_XML_NODE = "MissingRequiredXmlNode" - MULTIPLE_CONDITION_HEADERS_NOT_SUPPORTED = "MultipleConditionHeadersNotSupported" - OPERATION_TIMED_OUT = "OperationTimedOut" - OUT_OF_RANGE_INPUT = "OutOfRangeInput" - OUT_OF_RANGE_QUERY_PARAMETER_VALUE = "OutOfRangeQueryParameterValue" - REQUEST_BODY_TOO_LARGE = "RequestBodyTooLarge" - RESOURCE_TYPE_MISMATCH = "ResourceTypeMismatch" - REQUEST_URL_FAILED_TO_PARSE = "RequestUrlFailedToParse" - RESOURCE_ALREADY_EXISTS = "ResourceAlreadyExists" - RESOURCE_NOT_FOUND = "ResourceNotFound" - SERVER_BUSY = "ServerBusy" - UNSUPPORTED_HEADER = "UnsupportedHeader" - UNSUPPORTED_XML_NODE = "UnsupportedXmlNode" - UNSUPPORTED_QUERY_PARAMETER = "UnsupportedQueryParameter" - UNSUPPORTED_HTTP_VERB = "UnsupportedHttpVerb" - APPEND_POSITION_CONDITION_NOT_MET = "AppendPositionConditionNotMet" - BLOB_ALREADY_EXISTS = "BlobAlreadyExists" - BLOB_IMMUTABLE_DUE_TO_POLICY = "BlobImmutableDueToPolicy" - BLOB_NOT_FOUND = "BlobNotFound" - BLOB_OVERWRITTEN = "BlobOverwritten" - BLOB_TIER_INADEQUATE_FOR_CONTENT_LENGTH = "BlobTierInadequateForContentLength" - BLOCK_COUNT_EXCEEDS_LIMIT = "BlockCountExceedsLimit" - BLOCK_LIST_TOO_LONG = "BlockListTooLong" - CANNOT_CHANGE_TO_LOWER_TIER = "CannotChangeToLowerTier" - CANNOT_VERIFY_COPY_SOURCE = "CannotVerifyCopySource" - CONTAINER_ALREADY_EXISTS = "ContainerAlreadyExists" - CONTAINER_BEING_DELETED = "ContainerBeingDeleted" - CONTAINER_DISABLED = "ContainerDisabled" - CONTAINER_NOT_FOUND = "ContainerNotFound" - CONTENT_LENGTH_LARGER_THAN_TIER_LIMIT = "ContentLengthLargerThanTierLimit" - COPY_ACROSS_ACCOUNTS_NOT_SUPPORTED = "CopyAcrossAccountsNotSupported" - COPY_ID_MISMATCH = "CopyIdMismatch" - FEATURE_VERSION_MISMATCH = "FeatureVersionMismatch" - INCREMENTAL_COPY_BLOB_MISMATCH = "IncrementalCopyBlobMismatch" - INCREMENTAL_COPY_OF_ERALIER_VERSION_SNAPSHOT_NOT_ALLOWED = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" - INCREMENTAL_COPY_SOURCE_MUST_BE_SNAPSHOT = "IncrementalCopySourceMustBeSnapshot" - INFINITE_LEASE_DURATION_REQUIRED = "InfiniteLeaseDurationRequired" - INVALID_BLOB_OR_BLOCK = "InvalidBlobOrBlock" - INVALID_BLOB_TIER = "InvalidBlobTier" - INVALID_BLOB_TYPE = "InvalidBlobType" - INVALID_BLOCK_ID = "InvalidBlockId" - INVALID_BLOCK_LIST = "InvalidBlockList" - INVALID_OPERATION = "InvalidOperation" - INVALID_PAGE_RANGE = "InvalidPageRange" - INVALID_SOURCE_BLOB_TYPE = "InvalidSourceBlobType" - INVALID_SOURCE_BLOB_URL = "InvalidSourceBlobUrl" - INVALID_VERSION_FOR_PAGE_BLOB_OPERATION = "InvalidVersionForPageBlobOperation" - LEASE_ALREADY_PRESENT = "LeaseAlreadyPresent" - LEASE_ALREADY_BROKEN = "LeaseAlreadyBroken" - LEASE_ID_MISMATCH_WITH_BLOB_OPERATION = "LeaseIdMismatchWithBlobOperation" - LEASE_ID_MISMATCH_WITH_CONTAINER_OPERATION = "LeaseIdMismatchWithContainerOperation" - LEASE_ID_MISMATCH_WITH_LEASE_OPERATION = "LeaseIdMismatchWithLeaseOperation" - LEASE_ID_MISSING = "LeaseIdMissing" - LEASE_IS_BREAKING_AND_CANNOT_BE_ACQUIRED = "LeaseIsBreakingAndCannotBeAcquired" - LEASE_IS_BREAKING_AND_CANNOT_BE_CHANGED = "LeaseIsBreakingAndCannotBeChanged" - LEASE_IS_BROKEN_AND_CANNOT_BE_RENEWED = "LeaseIsBrokenAndCannotBeRenewed" - LEASE_LOST = "LeaseLost" - LEASE_NOT_PRESENT_WITH_BLOB_OPERATION = "LeaseNotPresentWithBlobOperation" - LEASE_NOT_PRESENT_WITH_CONTAINER_OPERATION = "LeaseNotPresentWithContainerOperation" - LEASE_NOT_PRESENT_WITH_LEASE_OPERATION = "LeaseNotPresentWithLeaseOperation" - MAX_BLOB_SIZE_CONDITION_NOT_MET = "MaxBlobSizeConditionNotMet" - NO_AUTHENTICATION_INFORMATION = "NoAuthenticationInformation" - NO_PENDING_COPY_OPERATION = "NoPendingCopyOperation" - OPERATION_NOT_ALLOWED_ON_INCREMENTAL_COPY_BLOB = "OperationNotAllowedOnIncrementalCopyBlob" - PENDING_COPY_OPERATION = "PendingCopyOperation" - PREVIOUS_SNAPSHOT_CANNOT_BE_NEWER = "PreviousSnapshotCannotBeNewer" - PREVIOUS_SNAPSHOT_NOT_FOUND = "PreviousSnapshotNotFound" - PREVIOUS_SNAPSHOT_OPERATION_NOT_SUPPORTED = "PreviousSnapshotOperationNotSupported" - SEQUENCE_NUMBER_CONDITION_NOT_MET = "SequenceNumberConditionNotMet" - SEQUENCE_NUMBER_INCREMENT_TOO_LARGE = "SequenceNumberIncrementTooLarge" - SNAPSHOT_COUNT_EXCEEDED = "SnapshotCountExceeded" - SNAPHOT_OPERATION_RATE_EXCEEDED = "SnaphotOperationRateExceeded" - SNAPSHOTS_PRESENT = "SnapshotsPresent" - SOURCE_CONDITION_NOT_MET = "SourceConditionNotMet" - SYSTEM_IN_USE = "SystemInUse" - TARGET_CONDITION_NOT_MET = "TargetConditionNotMet" - UNAUTHORIZED_BLOB_OVERWRITE = "UnauthorizedBlobOverwrite" - BLOB_BEING_REHYDRATED = "BlobBeingRehydrated" - BLOB_ARCHIVED = "BlobArchived" - BLOB_NOT_ARCHIVED = "BlobNotArchived" - AUTHORIZATION_SOURCE_IP_MISMATCH = "AuthorizationSourceIPMismatch" - AUTHORIZATION_PROTOCOL_MISMATCH = "AuthorizationProtocolMismatch" - AUTHORIZATION_PERMISSION_MISMATCH = "AuthorizationPermissionMismatch" - AUTHORIZATION_SERVICE_MISMATCH = "AuthorizationServiceMismatch" - AUTHORIZATION_RESOURCE_TYPE_MISMATCH = "AuthorizationResourceTypeMismatch" diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/models/_models.py b/azure/multiapi/storagev2/blob/v2020_04_08/_generated/models/_models.py deleted file mode 100644 index a92bc6b..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/models/_models.py +++ /dev/null @@ -1,2024 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import HttpResponseError -import msrest.serialization - - -class AccessPolicy(msrest.serialization.Model): - """An Access policy. - - :param start: the date-time the policy is active. - :type start: str - :param expiry: the date-time the policy expires. - :type expiry: str - :param permission: the permissions for the acl policy. - :type permission: str - """ - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str'}, - 'expiry': {'key': 'Expiry', 'type': 'str'}, - 'permission': {'key': 'Permission', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(AccessPolicy, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.expiry = kwargs.get('expiry', None) - self.permission = kwargs.get('permission', None) - - -class AppendPositionAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param max_size: Optional conditional header. The max length in bytes permitted for the append - blob. If the Append Block operation would cause the blob to exceed that limit or if the blob - size is already greater than the value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :type max_size: long - :param append_position: Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will succeed only if the append - position is equal to this number. If it is not, the request will fail with the - AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). - :type append_position: long - """ - - _attribute_map = { - 'max_size': {'key': 'maxSize', 'type': 'long'}, - 'append_position': {'key': 'appendPosition', 'type': 'long'}, - } - - def __init__( - self, - **kwargs - ): - super(AppendPositionAccessConditions, self).__init__(**kwargs) - self.max_size = kwargs.get('max_size', None) - self.append_position = kwargs.get('append_position', None) - - -class ArrowConfiguration(msrest.serialization.Model): - """arrow configuration. - - All required parameters must be populated in order to send to Azure. - - :param schema: Required. - :type schema: list[~azure.storage.blob.models.ArrowField] - """ - - _validation = { - 'schema': {'required': True}, - } - - _attribute_map = { - 'schema': {'key': 'Schema', 'type': '[ArrowField]', 'xml': {'name': 'Schema', 'wrapped': True, 'itemsName': 'Field'}}, - } - _xml_map = { - 'name': 'ArrowConfiguration' - } - - def __init__( - self, - **kwargs - ): - super(ArrowConfiguration, self).__init__(**kwargs) - self.schema = kwargs['schema'] - - -class ArrowField(msrest.serialization.Model): - """field of an arrow schema. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. - :type type: str - :param name: - :type name: str - :param precision: - :type precision: int - :param scale: - :type scale: int - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': 'Type', 'type': 'str'}, - 'name': {'key': 'Name', 'type': 'str'}, - 'precision': {'key': 'Precision', 'type': 'int'}, - 'scale': {'key': 'Scale', 'type': 'int'}, - } - _xml_map = { - 'name': 'Field' - } - - def __init__( - self, - **kwargs - ): - super(ArrowField, self).__init__(**kwargs) - self.type = kwargs['type'] - self.name = kwargs.get('name', None) - self.precision = kwargs.get('precision', None) - self.scale = kwargs.get('scale', None) - - -class BlobFlatListSegment(msrest.serialization.Model): - """BlobFlatListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]'}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__( - self, - **kwargs - ): - super(BlobFlatListSegment, self).__init__(**kwargs) - self.blob_items = kwargs['blob_items'] - - -class BlobHierarchyListSegment(msrest.serialization.Model): - """BlobHierarchyListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_prefixes: - :type blob_prefixes: list[~azure.storage.blob.models.BlobPrefix] - :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]', 'xml': {'name': 'BlobPrefix'}}, - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__( - self, - **kwargs - ): - super(BlobHierarchyListSegment, self).__init__(**kwargs) - self.blob_prefixes = kwargs.get('blob_prefixes', None) - self.blob_items = kwargs['blob_items'] - - -class BlobHTTPHeaders(msrest.serialization.Model): - """Parameter group. - - :param blob_cache_control: Optional. Sets the blob's cache control. If specified, this property - is stored with the blob and returned with a read request. - :type blob_cache_control: str - :param blob_content_type: Optional. Sets the blob's content type. If specified, this property - is stored with the blob and returned with a read request. - :type blob_content_type: str - :param blob_content_md5: Optional. An MD5 hash of the blob content. Note that this hash is not - validated, as the hashes for the individual blocks were validated when each was uploaded. - :type blob_content_md5: bytearray - :param blob_content_encoding: Optional. Sets the blob's content encoding. If specified, this - property is stored with the blob and returned with a read request. - :type blob_content_encoding: str - :param blob_content_language: Optional. Set the blob's content language. If specified, this - property is stored with the blob and returned with a read request. - :type blob_content_language: str - :param blob_content_disposition: Optional. Sets the blob's Content-Disposition header. - :type blob_content_disposition: str - """ - - _attribute_map = { - 'blob_cache_control': {'key': 'blobCacheControl', 'type': 'str'}, - 'blob_content_type': {'key': 'blobContentType', 'type': 'str'}, - 'blob_content_md5': {'key': 'blobContentMD5', 'type': 'bytearray'}, - 'blob_content_encoding': {'key': 'blobContentEncoding', 'type': 'str'}, - 'blob_content_language': {'key': 'blobContentLanguage', 'type': 'str'}, - 'blob_content_disposition': {'key': 'blobContentDisposition', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(BlobHTTPHeaders, self).__init__(**kwargs) - self.blob_cache_control = kwargs.get('blob_cache_control', None) - self.blob_content_type = kwargs.get('blob_content_type', None) - self.blob_content_md5 = kwargs.get('blob_content_md5', None) - self.blob_content_encoding = kwargs.get('blob_content_encoding', None) - self.blob_content_language = kwargs.get('blob_content_language', None) - self.blob_content_disposition = kwargs.get('blob_content_disposition', None) - - -class BlobItemInternal(msrest.serialization.Model): - """An Azure Storage blob. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param deleted: Required. - :type deleted: bool - :param snapshot: Required. - :type snapshot: str - :param version_id: - :type version_id: str - :param is_current_version: - :type is_current_version: bool - :param properties: Required. Properties of a blob. - :type properties: ~azure.storage.blob.models.BlobPropertiesInternal - :param metadata: - :type metadata: ~azure.storage.blob.models.BlobMetadata - :param blob_tags: Blob tags. - :type blob_tags: ~azure.storage.blob.models.BlobTags - :param object_replication_metadata: Dictionary of :code:``. - :type object_replication_metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'deleted': {'required': True}, - 'snapshot': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'deleted': {'key': 'Deleted', 'type': 'bool'}, - 'snapshot': {'key': 'Snapshot', 'type': 'str'}, - 'version_id': {'key': 'VersionId', 'type': 'str'}, - 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool'}, - 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal'}, - 'metadata': {'key': 'Metadata', 'type': 'BlobMetadata'}, - 'blob_tags': {'key': 'BlobTags', 'type': 'BlobTags'}, - 'object_replication_metadata': {'key': 'OrMetadata', 'type': '{str}'}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__( - self, - **kwargs - ): - super(BlobItemInternal, self).__init__(**kwargs) - self.name = kwargs['name'] - self.deleted = kwargs['deleted'] - self.snapshot = kwargs['snapshot'] - self.version_id = kwargs.get('version_id', None) - self.is_current_version = kwargs.get('is_current_version', None) - self.properties = kwargs['properties'] - self.metadata = kwargs.get('metadata', None) - self.blob_tags = kwargs.get('blob_tags', None) - self.object_replication_metadata = kwargs.get('object_replication_metadata', None) - - -class BlobMetadata(msrest.serialization.Model): - """BlobMetadata. - - :param additional_properties: Unmatched properties from the message are deserialized to this - collection. - :type additional_properties: dict[str, str] - :param encrypted: - :type encrypted: str - """ - - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{str}'}, - 'encrypted': {'key': 'Encrypted', 'type': 'str', 'xml': {'attr': True}}, - } - _xml_map = { - 'name': 'Metadata' - } - - def __init__( - self, - **kwargs - ): - super(BlobMetadata, self).__init__(**kwargs) - self.additional_properties = kwargs.get('additional_properties', None) - self.encrypted = kwargs.get('encrypted', None) - - -class BlobPrefix(msrest.serialization.Model): - """BlobPrefix. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(BlobPrefix, self).__init__(**kwargs) - self.name = kwargs['name'] - - -class BlobPropertiesInternal(msrest.serialization.Model): - """Properties of a blob. - - All required parameters must be populated in order to send to Azure. - - :param creation_time: - :type creation_time: ~datetime.datetime - :param last_modified: Required. - :type last_modified: ~datetime.datetime - :param etag: Required. - :type etag: str - :param content_length: Size in bytes. - :type content_length: long - :param content_type: - :type content_type: str - :param content_encoding: - :type content_encoding: str - :param content_language: - :type content_language: str - :param content_md5: - :type content_md5: bytearray - :param content_disposition: - :type content_disposition: str - :param cache_control: - :type cache_control: str - :param blob_sequence_number: - :type blob_sequence_number: long - :param blob_type: Possible values include: "BlockBlob", "PageBlob", "AppendBlob". - :type blob_type: str or ~azure.storage.blob.models.BlobType - :param lease_status: Possible values include: "locked", "unlocked". - :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType - :param lease_state: Possible values include: "available", "leased", "expired", "breaking", - "broken". - :type lease_state: str or ~azure.storage.blob.models.LeaseStateType - :param lease_duration: Possible values include: "infinite", "fixed". - :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType - :param copy_id: - :type copy_id: str - :param copy_status: Possible values include: "pending", "success", "aborted", "failed". - :type copy_status: str or ~azure.storage.blob.models.CopyStatusType - :param copy_source: - :type copy_source: str - :param copy_progress: - :type copy_progress: str - :param copy_completion_time: - :type copy_completion_time: ~datetime.datetime - :param copy_status_description: - :type copy_status_description: str - :param server_encrypted: - :type server_encrypted: bool - :param incremental_copy: - :type incremental_copy: bool - :param destination_snapshot: - :type destination_snapshot: str - :param deleted_time: - :type deleted_time: ~datetime.datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param access_tier: Possible values include: "P4", "P6", "P10", "P15", "P20", "P30", "P40", - "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive". - :type access_tier: str or ~azure.storage.blob.models.AccessTier - :param access_tier_inferred: - :type access_tier_inferred: bool - :param archive_status: Possible values include: "rehydrate-pending-to-hot", "rehydrate- - pending-to-cool". - :type archive_status: str or ~azure.storage.blob.models.ArchiveStatus - :param customer_provided_key_sha256: - :type customer_provided_key_sha256: str - :param encryption_scope: The name of the encryption scope under which the blob is encrypted. - :type encryption_scope: str - :param access_tier_change_time: - :type access_tier_change_time: ~datetime.datetime - :param tag_count: - :type tag_count: int - :param expires_on: - :type expires_on: ~datetime.datetime - :param is_sealed: - :type is_sealed: bool - :param rehydrate_priority: If an object is in rehydrate pending state then this header is - returned with priority of rehydrate. Valid values are High and Standard. Possible values - include: "High", "Standard". - :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority - :param last_accessed_on: - :type last_accessed_on: ~datetime.datetime - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123'}, - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - 'content_length': {'key': 'Content-Length', 'type': 'long'}, - 'content_type': {'key': 'Content-Type', 'type': 'str'}, - 'content_encoding': {'key': 'Content-Encoding', 'type': 'str'}, - 'content_language': {'key': 'Content-Language', 'type': 'str'}, - 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray'}, - 'content_disposition': {'key': 'Content-Disposition', 'type': 'str'}, - 'cache_control': {'key': 'Cache-Control', 'type': 'str'}, - 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long'}, - 'blob_type': {'key': 'BlobType', 'type': 'str'}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, - 'lease_state': {'key': 'LeaseState', 'type': 'str'}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, - 'copy_id': {'key': 'CopyId', 'type': 'str'}, - 'copy_status': {'key': 'CopyStatus', 'type': 'str'}, - 'copy_source': {'key': 'CopySource', 'type': 'str'}, - 'copy_progress': {'key': 'CopyProgress', 'type': 'str'}, - 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123'}, - 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str'}, - 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool'}, - 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool'}, - 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str'}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, - 'access_tier': {'key': 'AccessTier', 'type': 'str'}, - 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool'}, - 'archive_status': {'key': 'ArchiveStatus', 'type': 'str'}, - 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str'}, - 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str'}, - 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'}, - 'tag_count': {'key': 'TagCount', 'type': 'int'}, - 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123'}, - 'is_sealed': {'key': 'Sealed', 'type': 'bool'}, - 'rehydrate_priority': {'key': 'RehydratePriority', 'type': 'str'}, - 'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123'}, - } - _xml_map = { - 'name': 'Properties' - } - - def __init__( - self, - **kwargs - ): - super(BlobPropertiesInternal, self).__init__(**kwargs) - self.creation_time = kwargs.get('creation_time', None) - self.last_modified = kwargs['last_modified'] - self.etag = kwargs['etag'] - self.content_length = kwargs.get('content_length', None) - self.content_type = kwargs.get('content_type', None) - self.content_encoding = kwargs.get('content_encoding', None) - self.content_language = kwargs.get('content_language', None) - self.content_md5 = kwargs.get('content_md5', None) - self.content_disposition = kwargs.get('content_disposition', None) - self.cache_control = kwargs.get('cache_control', None) - self.blob_sequence_number = kwargs.get('blob_sequence_number', None) - self.blob_type = kwargs.get('blob_type', None) - self.lease_status = kwargs.get('lease_status', None) - self.lease_state = kwargs.get('lease_state', None) - self.lease_duration = kwargs.get('lease_duration', None) - self.copy_id = kwargs.get('copy_id', None) - self.copy_status = kwargs.get('copy_status', None) - self.copy_source = kwargs.get('copy_source', None) - self.copy_progress = kwargs.get('copy_progress', None) - self.copy_completion_time = kwargs.get('copy_completion_time', None) - self.copy_status_description = kwargs.get('copy_status_description', None) - self.server_encrypted = kwargs.get('server_encrypted', None) - self.incremental_copy = kwargs.get('incremental_copy', None) - self.destination_snapshot = kwargs.get('destination_snapshot', None) - self.deleted_time = kwargs.get('deleted_time', None) - self.remaining_retention_days = kwargs.get('remaining_retention_days', None) - self.access_tier = kwargs.get('access_tier', None) - self.access_tier_inferred = kwargs.get('access_tier_inferred', None) - self.archive_status = kwargs.get('archive_status', None) - self.customer_provided_key_sha256 = kwargs.get('customer_provided_key_sha256', None) - self.encryption_scope = kwargs.get('encryption_scope', None) - self.access_tier_change_time = kwargs.get('access_tier_change_time', None) - self.tag_count = kwargs.get('tag_count', None) - self.expires_on = kwargs.get('expires_on', None) - self.is_sealed = kwargs.get('is_sealed', None) - self.rehydrate_priority = kwargs.get('rehydrate_priority', None) - self.last_accessed_on = kwargs.get('last_accessed_on', None) - - -class BlobTag(msrest.serialization.Model): - """BlobTag. - - All required parameters must be populated in order to send to Azure. - - :param key: Required. - :type key: str - :param value: Required. - :type value: str - """ - - _validation = { - 'key': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'key': {'key': 'Key', 'type': 'str'}, - 'value': {'key': 'Value', 'type': 'str'}, - } - _xml_map = { - 'name': 'Tag' - } - - def __init__( - self, - **kwargs - ): - super(BlobTag, self).__init__(**kwargs) - self.key = kwargs['key'] - self.value = kwargs['value'] - - -class BlobTags(msrest.serialization.Model): - """Blob tags. - - All required parameters must be populated in order to send to Azure. - - :param blob_tag_set: Required. - :type blob_tag_set: list[~azure.storage.blob.models.BlobTag] - """ - - _validation = { - 'blob_tag_set': {'required': True}, - } - - _attribute_map = { - 'blob_tag_set': {'key': 'BlobTagSet', 'type': '[BlobTag]', 'xml': {'name': 'TagSet', 'wrapped': True, 'itemsName': 'Tag'}}, - } - _xml_map = { - 'name': 'Tags' - } - - def __init__( - self, - **kwargs - ): - super(BlobTags, self).__init__(**kwargs) - self.blob_tag_set = kwargs['blob_tag_set'] - - -class Block(msrest.serialization.Model): - """Represents a single block in a block blob. It describes the block's ID and size. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The base64 encoded block ID. - :type name: str - :param size: Required. The block size in bytes. - :type size: int - """ - - _validation = { - 'name': {'required': True}, - 'size': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'size': {'key': 'Size', 'type': 'int'}, - } - - def __init__( - self, - **kwargs - ): - super(Block, self).__init__(**kwargs) - self.name = kwargs['name'] - self.size = kwargs['size'] - - -class BlockList(msrest.serialization.Model): - """BlockList. - - :param committed_blocks: - :type committed_blocks: list[~azure.storage.blob.models.Block] - :param uncommitted_blocks: - :type uncommitted_blocks: list[~azure.storage.blob.models.Block] - """ - - _attribute_map = { - 'committed_blocks': {'key': 'CommittedBlocks', 'type': '[Block]', 'xml': {'wrapped': True}}, - 'uncommitted_blocks': {'key': 'UncommittedBlocks', 'type': '[Block]', 'xml': {'wrapped': True}}, - } - - def __init__( - self, - **kwargs - ): - super(BlockList, self).__init__(**kwargs) - self.committed_blocks = kwargs.get('committed_blocks', None) - self.uncommitted_blocks = kwargs.get('uncommitted_blocks', None) - - -class BlockLookupList(msrest.serialization.Model): - """BlockLookupList. - - :param committed: - :type committed: list[str] - :param uncommitted: - :type uncommitted: list[str] - :param latest: - :type latest: list[str] - """ - - _attribute_map = { - 'committed': {'key': 'Committed', 'type': '[str]', 'xml': {'itemsName': 'Committed'}}, - 'uncommitted': {'key': 'Uncommitted', 'type': '[str]', 'xml': {'itemsName': 'Uncommitted'}}, - 'latest': {'key': 'Latest', 'type': '[str]', 'xml': {'itemsName': 'Latest'}}, - } - _xml_map = { - 'name': 'BlockList' - } - - def __init__( - self, - **kwargs - ): - super(BlockLookupList, self).__init__(**kwargs) - self.committed = kwargs.get('committed', None) - self.uncommitted = kwargs.get('uncommitted', None) - self.latest = kwargs.get('latest', None) - - -class ClearRange(msrest.serialization.Model): - """ClearRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'ClearRange' - } - - def __init__( - self, - **kwargs - ): - super(ClearRange, self).__init__(**kwargs) - self.start = kwargs['start'] - self.end = kwargs['end'] - - -class ContainerCpkScopeInfo(msrest.serialization.Model): - """Parameter group. - - :param default_encryption_scope: Optional. Version 2019-07-07 and later. Specifies the - default encryption scope to set on the container and use for all future writes. - :type default_encryption_scope: str - :param prevent_encryption_scope_override: Optional. Version 2019-07-07 and newer. If true, - prevents any request from specifying a different encryption scope than the scope set on the - container. - :type prevent_encryption_scope_override: bool - """ - - _attribute_map = { - 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str'}, - 'prevent_encryption_scope_override': {'key': 'PreventEncryptionScopeOverride', 'type': 'bool'}, - } - - def __init__( - self, - **kwargs - ): - super(ContainerCpkScopeInfo, self).__init__(**kwargs) - self.default_encryption_scope = kwargs.get('default_encryption_scope', None) - self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', None) - - -class ContainerItem(msrest.serialization.Model): - """An Azure Storage container. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param deleted: - :type deleted: bool - :param version: - :type version: str - :param properties: Required. Properties of a container. - :type properties: ~azure.storage.blob.models.ContainerProperties - :param metadata: Dictionary of :code:``. - :type metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'deleted': {'key': 'Deleted', 'type': 'bool'}, - 'version': {'key': 'Version', 'type': 'str'}, - 'properties': {'key': 'Properties', 'type': 'ContainerProperties'}, - 'metadata': {'key': 'Metadata', 'type': '{str}'}, - } - _xml_map = { - 'name': 'Container' - } - - def __init__( - self, - **kwargs - ): - super(ContainerItem, self).__init__(**kwargs) - self.name = kwargs['name'] - self.deleted = kwargs.get('deleted', None) - self.version = kwargs.get('version', None) - self.properties = kwargs['properties'] - self.metadata = kwargs.get('metadata', None) - - -class ContainerProperties(msrest.serialization.Model): - """Properties of a container. - - All required parameters must be populated in order to send to Azure. - - :param last_modified: Required. - :type last_modified: ~datetime.datetime - :param etag: Required. - :type etag: str - :param lease_status: Possible values include: "locked", "unlocked". - :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType - :param lease_state: Possible values include: "available", "leased", "expired", "breaking", - "broken". - :type lease_state: str or ~azure.storage.blob.models.LeaseStateType - :param lease_duration: Possible values include: "infinite", "fixed". - :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType - :param public_access: Possible values include: "container", "blob". - :type public_access: str or ~azure.storage.blob.models.PublicAccessType - :param has_immutability_policy: - :type has_immutability_policy: bool - :param has_legal_hold: - :type has_legal_hold: bool - :param default_encryption_scope: - :type default_encryption_scope: str - :param prevent_encryption_scope_override: - :type prevent_encryption_scope_override: bool - :param deleted_time: - :type deleted_time: ~datetime.datetime - :param remaining_retention_days: - :type remaining_retention_days: int - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, - 'lease_state': {'key': 'LeaseState', 'type': 'str'}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, - 'public_access': {'key': 'PublicAccess', 'type': 'str'}, - 'has_immutability_policy': {'key': 'HasImmutabilityPolicy', 'type': 'bool'}, - 'has_legal_hold': {'key': 'HasLegalHold', 'type': 'bool'}, - 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str'}, - 'prevent_encryption_scope_override': {'key': 'DenyEncryptionScopeOverride', 'type': 'bool'}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, - } - - def __init__( - self, - **kwargs - ): - super(ContainerProperties, self).__init__(**kwargs) - self.last_modified = kwargs['last_modified'] - self.etag = kwargs['etag'] - self.lease_status = kwargs.get('lease_status', None) - self.lease_state = kwargs.get('lease_state', None) - self.lease_duration = kwargs.get('lease_duration', None) - self.public_access = kwargs.get('public_access', None) - self.has_immutability_policy = kwargs.get('has_immutability_policy', None) - self.has_legal_hold = kwargs.get('has_legal_hold', None) - self.default_encryption_scope = kwargs.get('default_encryption_scope', None) - self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', None) - self.deleted_time = kwargs.get('deleted_time', None) - self.remaining_retention_days = kwargs.get('remaining_retention_days', None) - - -class CorsRule(msrest.serialization.Model): - """CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param allowed_origins: Required. The origin domains that are permitted to make a request - against the storage service via CORS. The origin domain is the domain from which the request - originates. Note that the origin must be an exact case-sensitive match with the origin that the - user age sends to the service. You can also use the wildcard character '*' to allow all origin - domains to make requests via CORS. - :type allowed_origins: str - :param allowed_methods: Required. The methods (HTTP request verbs) that the origin domain may - use for a CORS request. (comma separated). - :type allowed_methods: str - :param allowed_headers: Required. the request headers that the origin domain may specify on the - CORS request. - :type allowed_headers: str - :param exposed_headers: Required. The response headers that may be sent in the response to the - CORS request and exposed by the browser to the request issuer. - :type exposed_headers: str - :param max_age_in_seconds: Required. The maximum amount time that a browser should cache the - preflight OPTIONS request. - :type max_age_in_seconds: int - """ - - _validation = { - 'allowed_origins': {'required': True}, - 'allowed_methods': {'required': True}, - 'allowed_headers': {'required': True}, - 'exposed_headers': {'required': True}, - 'max_age_in_seconds': {'required': True, 'minimum': 0}, - } - - _attribute_map = { - 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str'}, - 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str'}, - 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str'}, - 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str'}, - 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int'}, - } - - def __init__( - self, - **kwargs - ): - super(CorsRule, self).__init__(**kwargs) - self.allowed_origins = kwargs['allowed_origins'] - self.allowed_methods = kwargs['allowed_methods'] - self.allowed_headers = kwargs['allowed_headers'] - self.exposed_headers = kwargs['exposed_headers'] - self.max_age_in_seconds = kwargs['max_age_in_seconds'] - - -class CpkInfo(msrest.serialization.Model): - """Parameter group. - - :param encryption_key: Optional. Specifies the encryption key to use to encrypt the data - provided in the request. If not specified, encryption is performed with the root account - encryption key. For more information, see Encryption at Rest for Azure Storage Services. - :type encryption_key: str - :param encryption_key_sha256: The SHA-256 hash of the provided encryption key. Must be provided - if the x-ms-encryption-key header is provided. - :type encryption_key_sha256: str - """ - - _attribute_map = { - 'encryption_key': {'key': 'encryptionKey', 'type': 'str'}, - 'encryption_key_sha256': {'key': 'encryptionKeySha256', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(CpkInfo, self).__init__(**kwargs) - self.encryption_key = kwargs.get('encryption_key', None) - self.encryption_key_sha256 = kwargs.get('encryption_key_sha256', None) - - -class CpkScopeInfo(msrest.serialization.Model): - """Parameter group. - - :param encryption_scope: Optional. Version 2019-07-07 and later. Specifies the name of the - encryption scope to use to encrypt the data provided in the request. If not specified, - encryption is performed with the default account encryption scope. For more information, see - Encryption at Rest for Azure Storage Services. - :type encryption_scope: str - """ - - _attribute_map = { - 'encryption_scope': {'key': 'encryptionScope', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(CpkScopeInfo, self).__init__(**kwargs) - self.encryption_scope = kwargs.get('encryption_scope', None) - - -class DataLakeStorageError(msrest.serialization.Model): - """DataLakeStorageError. - - :param data_lake_storage_error_details: The service error response object. - :type data_lake_storage_error_details: ~azure.storage.blob.models.DataLakeStorageErrorDetails - """ - - _attribute_map = { - 'data_lake_storage_error_details': {'key': 'error', 'type': 'DataLakeStorageErrorDetails'}, - } - - def __init__( - self, - **kwargs - ): - super(DataLakeStorageError, self).__init__(**kwargs) - self.data_lake_storage_error_details = kwargs.get('data_lake_storage_error_details', None) - - -class DataLakeStorageErrorDetails(msrest.serialization.Model): - """The service error response object. - - :param code: The service error code. - :type code: str - :param message: The service error message. - :type message: str - """ - - _attribute_map = { - 'code': {'key': 'Code', 'type': 'str'}, - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(DataLakeStorageErrorDetails, self).__init__(**kwargs) - self.code = kwargs.get('code', None) - self.message = kwargs.get('message', None) - - -class DelimitedTextConfiguration(msrest.serialization.Model): - """delimited text configuration. - - All required parameters must be populated in order to send to Azure. - - :param column_separator: Required. column separator. - :type column_separator: str - :param field_quote: Required. field quote. - :type field_quote: str - :param record_separator: Required. record separator. - :type record_separator: str - :param escape_char: Required. escape char. - :type escape_char: str - :param headers_present: Required. has headers. - :type headers_present: bool - """ - - _validation = { - 'column_separator': {'required': True}, - 'field_quote': {'required': True}, - 'record_separator': {'required': True}, - 'escape_char': {'required': True}, - 'headers_present': {'required': True}, - } - - _attribute_map = { - 'column_separator': {'key': 'ColumnSeparator', 'type': 'str', 'xml': {'name': 'ColumnSeparator'}}, - 'field_quote': {'key': 'FieldQuote', 'type': 'str', 'xml': {'name': 'FieldQuote'}}, - 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, - 'escape_char': {'key': 'EscapeChar', 'type': 'str', 'xml': {'name': 'EscapeChar'}}, - 'headers_present': {'key': 'HeadersPresent', 'type': 'bool', 'xml': {'name': 'HasHeaders'}}, - } - _xml_map = { - 'name': 'DelimitedTextConfiguration' - } - - def __init__( - self, - **kwargs - ): - super(DelimitedTextConfiguration, self).__init__(**kwargs) - self.column_separator = kwargs['column_separator'] - self.field_quote = kwargs['field_quote'] - self.record_separator = kwargs['record_separator'] - self.escape_char = kwargs['escape_char'] - self.headers_present = kwargs['headers_present'] - - -class DirectoryHttpHeaders(msrest.serialization.Model): - """Parameter group. - - :param cache_control: Cache control for given resource. - :type cache_control: str - :param content_type: Content type for given resource. - :type content_type: str - :param content_encoding: Content encoding for given resource. - :type content_encoding: str - :param content_language: Content language for given resource. - :type content_language: str - :param content_disposition: Content disposition for given resource. - :type content_disposition: str - """ - - _attribute_map = { - 'cache_control': {'key': 'cacheControl', 'type': 'str'}, - 'content_type': {'key': 'contentType', 'type': 'str'}, - 'content_encoding': {'key': 'contentEncoding', 'type': 'str'}, - 'content_language': {'key': 'contentLanguage', 'type': 'str'}, - 'content_disposition': {'key': 'contentDisposition', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(DirectoryHttpHeaders, self).__init__(**kwargs) - self.cache_control = kwargs.get('cache_control', None) - self.content_type = kwargs.get('content_type', None) - self.content_encoding = kwargs.get('content_encoding', None) - self.content_language = kwargs.get('content_language', None) - self.content_disposition = kwargs.get('content_disposition', None) - - -class FilterBlobItem(msrest.serialization.Model): - """Blob info from a Filter Blobs API call. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param container_name: Required. - :type container_name: str - :param tags: A set of tags. Blob tags. - :type tags: ~azure.storage.blob.models.BlobTags - """ - - _validation = { - 'name': {'required': True}, - 'container_name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'container_name': {'key': 'ContainerName', 'type': 'str'}, - 'tags': {'key': 'Tags', 'type': 'BlobTags'}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__( - self, - **kwargs - ): - super(FilterBlobItem, self).__init__(**kwargs) - self.name = kwargs['name'] - self.container_name = kwargs['container_name'] - self.tags = kwargs.get('tags', None) - - -class FilterBlobSegment(msrest.serialization.Model): - """The result of a Filter Blobs API call. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param where: Required. - :type where: str - :param blobs: Required. - :type blobs: list[~azure.storage.blob.models.FilterBlobItem] - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'where': {'required': True}, - 'blobs': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'where': {'key': 'Where', 'type': 'str'}, - 'blobs': {'key': 'Blobs', 'type': '[FilterBlobItem]', 'xml': {'name': 'Blobs', 'wrapped': True, 'itemsName': 'Blob'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - **kwargs - ): - super(FilterBlobSegment, self).__init__(**kwargs) - self.service_endpoint = kwargs['service_endpoint'] - self.where = kwargs['where'] - self.blobs = kwargs['blobs'] - self.next_marker = kwargs.get('next_marker', None) - - -class GeoReplication(msrest.serialization.Model): - """Geo-Replication information for the Secondary Storage Service. - - All required parameters must be populated in order to send to Azure. - - :param status: Required. The status of the secondary location. Possible values include: "live", - "bootstrap", "unavailable". - :type status: str or ~azure.storage.blob.models.GeoReplicationStatusType - :param last_sync_time: Required. A GMT date/time value, to the second. All primary writes - preceding this value are guaranteed to be available for read operations at the secondary. - Primary writes after this point in time may or may not be available for reads. - :type last_sync_time: ~datetime.datetime - """ - - _validation = { - 'status': {'required': True}, - 'last_sync_time': {'required': True}, - } - - _attribute_map = { - 'status': {'key': 'Status', 'type': 'str'}, - 'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123'}, - } - - def __init__( - self, - **kwargs - ): - super(GeoReplication, self).__init__(**kwargs) - self.status = kwargs['status'] - self.last_sync_time = kwargs['last_sync_time'] - - -class JsonTextConfiguration(msrest.serialization.Model): - """json text configuration. - - All required parameters must be populated in order to send to Azure. - - :param record_separator: Required. record separator. - :type record_separator: str - """ - - _validation = { - 'record_separator': {'required': True}, - } - - _attribute_map = { - 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, - } - _xml_map = { - 'name': 'JsonTextConfiguration' - } - - def __init__( - self, - **kwargs - ): - super(JsonTextConfiguration, self).__init__(**kwargs) - self.record_separator = kwargs['record_separator'] - - -class KeyInfo(msrest.serialization.Model): - """Key information. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. The date-time the key is active in ISO 8601 UTC time. - :type start: str - :param expiry: Required. The date-time the key expires in ISO 8601 UTC time. - :type expiry: str - """ - - _validation = { - 'start': {'required': True}, - 'expiry': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str'}, - 'expiry': {'key': 'Expiry', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(KeyInfo, self).__init__(**kwargs) - self.start = kwargs['start'] - self.expiry = kwargs['expiry'] - - -class LeaseAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param lease_id: If specified, the operation only succeeds if the resource's lease is active - and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': 'leaseId', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = kwargs.get('lease_id', None) - - -class ListBlobsFlatSegmentResponse(msrest.serialization.Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param segment: Required. - :type segment: ~azure.storage.blob.models.BlobFlatListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'segment': {'key': 'Segment', 'type': 'BlobFlatListSegment'}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - **kwargs - ): - super(ListBlobsFlatSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs['service_endpoint'] - self.container_name = kwargs['container_name'] - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.segment = kwargs['segment'] - self.next_marker = kwargs.get('next_marker', None) - - -class ListBlobsHierarchySegmentResponse(msrest.serialization.Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param delimiter: - :type delimiter: str - :param segment: Required. - :type segment: ~azure.storage.blob.models.BlobHierarchyListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'delimiter': {'key': 'Delimiter', 'type': 'str'}, - 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment'}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - **kwargs - ): - super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs['service_endpoint'] - self.container_name = kwargs['container_name'] - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.delimiter = kwargs.get('delimiter', None) - self.segment = kwargs['segment'] - self.next_marker = kwargs.get('next_marker', None) - - -class ListContainersSegmentResponse(msrest.serialization.Model): - """An enumeration of containers. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param container_items: Required. - :type container_items: list[~azure.storage.blob.models.ContainerItem] - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_items': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'container_items': {'key': 'ContainerItems', 'type': '[ContainerItem]', 'xml': {'name': 'Containers', 'wrapped': True, 'itemsName': 'Container'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - **kwargs - ): - super(ListContainersSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs['service_endpoint'] - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.container_items = kwargs['container_items'] - self.next_marker = kwargs.get('next_marker', None) - - -class Logging(msrest.serialization.Model): - """Azure Analytics Logging settings. - - All required parameters must be populated in order to send to Azure. - - :param version: Required. The version of Storage Analytics to configure. - :type version: str - :param delete: Required. Indicates whether all delete requests should be logged. - :type delete: bool - :param read: Required. Indicates whether all read requests should be logged. - :type read: bool - :param write: Required. Indicates whether all write requests should be logged. - :type write: bool - :param retention_policy: Required. the retention policy which determines how long the - associated data should persist. - :type retention_policy: ~azure.storage.blob.models.RetentionPolicy - """ - - _validation = { - 'version': {'required': True}, - 'delete': {'required': True}, - 'read': {'required': True}, - 'write': {'required': True}, - 'retention_policy': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str'}, - 'delete': {'key': 'Delete', 'type': 'bool'}, - 'read': {'key': 'Read', 'type': 'bool'}, - 'write': {'key': 'Write', 'type': 'bool'}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, - } - - def __init__( - self, - **kwargs - ): - super(Logging, self).__init__(**kwargs) - self.version = kwargs['version'] - self.delete = kwargs['delete'] - self.read = kwargs['read'] - self.write = kwargs['write'] - self.retention_policy = kwargs['retention_policy'] - - -class Metrics(msrest.serialization.Model): - """a summary of request statistics grouped by API in hour or minute aggregates for blobs. - - All required parameters must be populated in order to send to Azure. - - :param version: The version of Storage Analytics to configure. - :type version: str - :param enabled: Required. Indicates whether metrics are enabled for the Blob service. - :type enabled: bool - :param include_apis: Indicates whether metrics should generate summary statistics for called - API operations. - :type include_apis: bool - :param retention_policy: the retention policy which determines how long the associated data - should persist. - :type retention_policy: ~azure.storage.blob.models.RetentionPolicy - """ - - _validation = { - 'enabled': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str'}, - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool'}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, - } - - def __init__( - self, - **kwargs - ): - super(Metrics, self).__init__(**kwargs) - self.version = kwargs.get('version', None) - self.enabled = kwargs['enabled'] - self.include_apis = kwargs.get('include_apis', None) - self.retention_policy = kwargs.get('retention_policy', None) - - -class ModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param if_modified_since: Specify this header value to operate only on a blob if it has been - modified since the specified date/time. - :type if_modified_since: ~datetime.datetime - :param if_unmodified_since: Specify this header value to operate only on a blob if it has not - been modified since the specified date/time. - :type if_unmodified_since: ~datetime.datetime - :param if_match: Specify an ETag value to operate only on blobs with a matching value. - :type if_match: str - :param if_none_match: Specify an ETag value to operate only on blobs without a matching value. - :type if_none_match: str - :param if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a - matching value. - :type if_tags: str - """ - - _attribute_map = { - 'if_modified_since': {'key': 'ifModifiedSince', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': 'ifUnmodifiedSince', 'type': 'rfc-1123'}, - 'if_match': {'key': 'ifMatch', 'type': 'str'}, - 'if_none_match': {'key': 'ifNoneMatch', 'type': 'str'}, - 'if_tags': {'key': 'ifTags', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(ModifiedAccessConditions, self).__init__(**kwargs) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_tags = kwargs.get('if_tags', None) - - -class PageList(msrest.serialization.Model): - """the list of pages. - - :param page_range: - :type page_range: list[~azure.storage.blob.models.PageRange] - :param clear_range: - :type clear_range: list[~azure.storage.blob.models.ClearRange] - """ - - _attribute_map = { - 'page_range': {'key': 'PageRange', 'type': '[PageRange]'}, - 'clear_range': {'key': 'ClearRange', 'type': '[ClearRange]'}, - } - - def __init__( - self, - **kwargs - ): - super(PageList, self).__init__(**kwargs) - self.page_range = kwargs.get('page_range', None) - self.clear_range = kwargs.get('clear_range', None) - - -class PageRange(msrest.serialization.Model): - """PageRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'PageRange' - } - - def __init__( - self, - **kwargs - ): - super(PageRange, self).__init__(**kwargs) - self.start = kwargs['start'] - self.end = kwargs['end'] - - -class QueryFormat(msrest.serialization.Model): - """QueryFormat. - - :param type: The quick query format type. Possible values include: "delimited", "json", - "arrow". - :type type: str or ~azure.storage.blob.models.QueryFormatType - :param delimited_text_configuration: delimited text configuration. - :type delimited_text_configuration: ~azure.storage.blob.models.DelimitedTextConfiguration - :param json_text_configuration: json text configuration. - :type json_text_configuration: ~azure.storage.blob.models.JsonTextConfiguration - :param arrow_configuration: arrow configuration. - :type arrow_configuration: ~azure.storage.blob.models.ArrowConfiguration - """ - - _attribute_map = { - 'type': {'key': 'Type', 'type': 'str', 'xml': {'name': 'Type'}}, - 'delimited_text_configuration': {'key': 'DelimitedTextConfiguration', 'type': 'DelimitedTextConfiguration'}, - 'json_text_configuration': {'key': 'JsonTextConfiguration', 'type': 'JsonTextConfiguration'}, - 'arrow_configuration': {'key': 'ArrowConfiguration', 'type': 'ArrowConfiguration'}, - } - - def __init__( - self, - **kwargs - ): - super(QueryFormat, self).__init__(**kwargs) - self.type = kwargs.get('type', None) - self.delimited_text_configuration = kwargs.get('delimited_text_configuration', None) - self.json_text_configuration = kwargs.get('json_text_configuration', None) - self.arrow_configuration = kwargs.get('arrow_configuration', None) - - -class QueryRequest(msrest.serialization.Model): - """the quick query body. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar query_type: Required. the query type. Default value: "SQL". - :vartype query_type: str - :param expression: Required. a query statement. - :type expression: str - :param input_serialization: - :type input_serialization: ~azure.storage.blob.models.QuerySerialization - :param output_serialization: - :type output_serialization: ~azure.storage.blob.models.QuerySerialization - """ - - _validation = { - 'query_type': {'required': True, 'constant': True}, - 'expression': {'required': True}, - } - - _attribute_map = { - 'query_type': {'key': 'QueryType', 'type': 'str', 'xml': {'name': 'QueryType'}}, - 'expression': {'key': 'Expression', 'type': 'str', 'xml': {'name': 'Expression'}}, - 'input_serialization': {'key': 'InputSerialization', 'type': 'QuerySerialization'}, - 'output_serialization': {'key': 'OutputSerialization', 'type': 'QuerySerialization'}, - } - _xml_map = { - 'name': 'QueryRequest' - } - - query_type = "SQL" - - def __init__( - self, - **kwargs - ): - super(QueryRequest, self).__init__(**kwargs) - self.expression = kwargs['expression'] - self.input_serialization = kwargs.get('input_serialization', None) - self.output_serialization = kwargs.get('output_serialization', None) - - -class QuerySerialization(msrest.serialization.Model): - """QuerySerialization. - - All required parameters must be populated in order to send to Azure. - - :param format: Required. - :type format: ~azure.storage.blob.models.QueryFormat - """ - - _validation = { - 'format': {'required': True}, - } - - _attribute_map = { - 'format': {'key': 'Format', 'type': 'QueryFormat'}, - } - - def __init__( - self, - **kwargs - ): - super(QuerySerialization, self).__init__(**kwargs) - self.format = kwargs['format'] - - -class RetentionPolicy(msrest.serialization.Model): - """the retention policy which determines how long the associated data should persist. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether a retention policy is enabled for the storage - service. - :type enabled: bool - :param days: Indicates the number of days that metrics or logging or soft-deleted data should - be retained. All data older than this value will be deleted. - :type days: int - :param allow_permanent_delete: Indicates whether permanent delete is allowed on this storage - account. - :type allow_permanent_delete: bool - """ - - _validation = { - 'enabled': {'required': True}, - 'days': {'minimum': 1}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'days': {'key': 'Days', 'type': 'int'}, - 'allow_permanent_delete': {'key': 'AllowPermanentDelete', 'type': 'bool'}, - } - - def __init__( - self, - **kwargs - ): - super(RetentionPolicy, self).__init__(**kwargs) - self.enabled = kwargs['enabled'] - self.days = kwargs.get('days', None) - self.allow_permanent_delete = kwargs.get('allow_permanent_delete', None) - - -class SequenceNumberAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param if_sequence_number_less_than_or_equal_to: Specify this header value to operate only on a - blob if it has a sequence number less than or equal to the specified. - :type if_sequence_number_less_than_or_equal_to: long - :param if_sequence_number_less_than: Specify this header value to operate only on a blob if it - has a sequence number less than the specified. - :type if_sequence_number_less_than: long - :param if_sequence_number_equal_to: Specify this header value to operate only on a blob if it - has the specified sequence number. - :type if_sequence_number_equal_to: long - """ - - _attribute_map = { - 'if_sequence_number_less_than_or_equal_to': {'key': 'ifSequenceNumberLessThanOrEqualTo', 'type': 'long'}, - 'if_sequence_number_less_than': {'key': 'ifSequenceNumberLessThan', 'type': 'long'}, - 'if_sequence_number_equal_to': {'key': 'ifSequenceNumberEqualTo', 'type': 'long'}, - } - - def __init__( - self, - **kwargs - ): - super(SequenceNumberAccessConditions, self).__init__(**kwargs) - self.if_sequence_number_less_than_or_equal_to = kwargs.get('if_sequence_number_less_than_or_equal_to', None) - self.if_sequence_number_less_than = kwargs.get('if_sequence_number_less_than', None) - self.if_sequence_number_equal_to = kwargs.get('if_sequence_number_equal_to', None) - - -class SignedIdentifier(msrest.serialization.Model): - """signed identifier. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. a unique id. - :type id: str - :param access_policy: An Access policy. - :type access_policy: ~azure.storage.blob.models.AccessPolicy - """ - - _validation = { - 'id': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'Id', 'type': 'str'}, - 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy'}, - } - _xml_map = { - 'name': 'SignedIdentifier' - } - - def __init__( - self, - **kwargs - ): - super(SignedIdentifier, self).__init__(**kwargs) - self.id = kwargs['id'] - self.access_policy = kwargs.get('access_policy', None) - - -class SourceModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param source_if_modified_since: Specify this header value to operate only on a blob if it has - been modified since the specified date/time. - :type source_if_modified_since: ~datetime.datetime - :param source_if_unmodified_since: Specify this header value to operate only on a blob if it - has not been modified since the specified date/time. - :type source_if_unmodified_since: ~datetime.datetime - :param source_if_match: Specify an ETag value to operate only on blobs with a matching value. - :type source_if_match: str - :param source_if_none_match: Specify an ETag value to operate only on blobs without a matching - value. - :type source_if_none_match: str - :param source_if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a - matching value. - :type source_if_tags: str - """ - - _attribute_map = { - 'source_if_modified_since': {'key': 'sourceIfModifiedSince', 'type': 'rfc-1123'}, - 'source_if_unmodified_since': {'key': 'sourceIfUnmodifiedSince', 'type': 'rfc-1123'}, - 'source_if_match': {'key': 'sourceIfMatch', 'type': 'str'}, - 'source_if_none_match': {'key': 'sourceIfNoneMatch', 'type': 'str'}, - 'source_if_tags': {'key': 'sourceIfTags', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_modified_since = kwargs.get('source_if_modified_since', None) - self.source_if_unmodified_since = kwargs.get('source_if_unmodified_since', None) - self.source_if_match = kwargs.get('source_if_match', None) - self.source_if_none_match = kwargs.get('source_if_none_match', None) - self.source_if_tags = kwargs.get('source_if_tags', None) - - -class StaticWebsite(msrest.serialization.Model): - """The properties that enable an account to host a static website. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether this account is hosting a static website. - :type enabled: bool - :param index_document: The default name of the index page under each directory. - :type index_document: str - :param error_document404_path: The absolute path of the custom 404 page. - :type error_document404_path: str - :param default_index_document_path: Absolute path of the default index page. - :type default_index_document_path: str - """ - - _validation = { - 'enabled': {'required': True}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'index_document': {'key': 'IndexDocument', 'type': 'str'}, - 'error_document404_path': {'key': 'ErrorDocument404Path', 'type': 'str'}, - 'default_index_document_path': {'key': 'DefaultIndexDocumentPath', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(StaticWebsite, self).__init__(**kwargs) - self.enabled = kwargs['enabled'] - self.index_document = kwargs.get('index_document', None) - self.error_document404_path = kwargs.get('error_document404_path', None) - self.default_index_document_path = kwargs.get('default_index_document_path', None) - - -class StorageError(msrest.serialization.Model): - """StorageError. - - :param message: - :type message: str - """ - - _attribute_map = { - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(StorageError, self).__init__(**kwargs) - self.message = kwargs.get('message', None) - - -class StorageServiceProperties(msrest.serialization.Model): - """Storage Service Properties. - - :param logging: Azure Analytics Logging settings. - :type logging: ~azure.storage.blob.models.Logging - :param hour_metrics: a summary of request statistics grouped by API in hour or minute - aggregates for blobs. - :type hour_metrics: ~azure.storage.blob.models.Metrics - :param minute_metrics: a summary of request statistics grouped by API in hour or minute - aggregates for blobs. - :type minute_metrics: ~azure.storage.blob.models.Metrics - :param cors: The set of CORS rules. - :type cors: list[~azure.storage.blob.models.CorsRule] - :param default_service_version: The default version to use for requests to the Blob service if - an incoming request's version is not specified. Possible values include version 2008-10-27 and - all more recent versions. - :type default_service_version: str - :param delete_retention_policy: the retention policy which determines how long the associated - data should persist. - :type delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy - :param static_website: The properties that enable an account to host a static website. - :type static_website: ~azure.storage.blob.models.StaticWebsite - """ - - _attribute_map = { - 'logging': {'key': 'Logging', 'type': 'Logging'}, - 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics'}, - 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics'}, - 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'wrapped': True}}, - 'default_service_version': {'key': 'DefaultServiceVersion', 'type': 'str'}, - 'delete_retention_policy': {'key': 'DeleteRetentionPolicy', 'type': 'RetentionPolicy'}, - 'static_website': {'key': 'StaticWebsite', 'type': 'StaticWebsite'}, - } - - def __init__( - self, - **kwargs - ): - super(StorageServiceProperties, self).__init__(**kwargs) - self.logging = kwargs.get('logging', None) - self.hour_metrics = kwargs.get('hour_metrics', None) - self.minute_metrics = kwargs.get('minute_metrics', None) - self.cors = kwargs.get('cors', None) - self.default_service_version = kwargs.get('default_service_version', None) - self.delete_retention_policy = kwargs.get('delete_retention_policy', None) - self.static_website = kwargs.get('static_website', None) - - -class StorageServiceStats(msrest.serialization.Model): - """Stats for the storage service. - - :param geo_replication: Geo-Replication information for the Secondary Storage Service. - :type geo_replication: ~azure.storage.blob.models.GeoReplication - """ - - _attribute_map = { - 'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication'}, - } - - def __init__( - self, - **kwargs - ): - super(StorageServiceStats, self).__init__(**kwargs) - self.geo_replication = kwargs.get('geo_replication', None) - - -class UserDelegationKey(msrest.serialization.Model): - """A user delegation key. - - All required parameters must be populated in order to send to Azure. - - :param signed_oid: Required. The Azure Active Directory object ID in GUID format. - :type signed_oid: str - :param signed_tid: Required. The Azure Active Directory tenant ID in GUID format. - :type signed_tid: str - :param signed_start: Required. The date-time the key is active. - :type signed_start: ~datetime.datetime - :param signed_expiry: Required. The date-time the key expires. - :type signed_expiry: ~datetime.datetime - :param signed_service: Required. Abbreviation of the Azure Storage service that accepts the - key. - :type signed_service: str - :param signed_version: Required. The service version that created the key. - :type signed_version: str - :param value: Required. The key as a base64 string. - :type value: str - """ - - _validation = { - 'signed_oid': {'required': True}, - 'signed_tid': {'required': True}, - 'signed_start': {'required': True}, - 'signed_expiry': {'required': True}, - 'signed_service': {'required': True}, - 'signed_version': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'signed_oid': {'key': 'SignedOid', 'type': 'str'}, - 'signed_tid': {'key': 'SignedTid', 'type': 'str'}, - 'signed_start': {'key': 'SignedStart', 'type': 'iso-8601'}, - 'signed_expiry': {'key': 'SignedExpiry', 'type': 'iso-8601'}, - 'signed_service': {'key': 'SignedService', 'type': 'str'}, - 'signed_version': {'key': 'SignedVersion', 'type': 'str'}, - 'value': {'key': 'Value', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(UserDelegationKey, self).__init__(**kwargs) - self.signed_oid = kwargs['signed_oid'] - self.signed_tid = kwargs['signed_tid'] - self.signed_start = kwargs['signed_start'] - self.signed_expiry = kwargs['signed_expiry'] - self.signed_service = kwargs['signed_service'] - self.signed_version = kwargs['signed_version'] - self.value = kwargs['value'] diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/models/_models_py3.py b/azure/multiapi/storagev2/blob/v2020_04_08/_generated/models/_models_py3.py deleted file mode 100644 index b1339f0..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/models/_models_py3.py +++ /dev/null @@ -1,2295 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -import datetime -from typing import Dict, List, Optional, Union - -from azure.core.exceptions import HttpResponseError -import msrest.serialization - -from ._azure_blob_storage_enums import * - - -class AccessPolicy(msrest.serialization.Model): - """An Access policy. - - :param start: the date-time the policy is active. - :type start: str - :param expiry: the date-time the policy expires. - :type expiry: str - :param permission: the permissions for the acl policy. - :type permission: str - """ - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str'}, - 'expiry': {'key': 'Expiry', 'type': 'str'}, - 'permission': {'key': 'Permission', 'type': 'str'}, - } - - def __init__( - self, - *, - start: Optional[str] = None, - expiry: Optional[str] = None, - permission: Optional[str] = None, - **kwargs - ): - super(AccessPolicy, self).__init__(**kwargs) - self.start = start - self.expiry = expiry - self.permission = permission - - -class AppendPositionAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param max_size: Optional conditional header. The max length in bytes permitted for the append - blob. If the Append Block operation would cause the blob to exceed that limit or if the blob - size is already greater than the value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :type max_size: long - :param append_position: Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will succeed only if the append - position is equal to this number. If it is not, the request will fail with the - AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). - :type append_position: long - """ - - _attribute_map = { - 'max_size': {'key': 'maxSize', 'type': 'long'}, - 'append_position': {'key': 'appendPosition', 'type': 'long'}, - } - - def __init__( - self, - *, - max_size: Optional[int] = None, - append_position: Optional[int] = None, - **kwargs - ): - super(AppendPositionAccessConditions, self).__init__(**kwargs) - self.max_size = max_size - self.append_position = append_position - - -class ArrowConfiguration(msrest.serialization.Model): - """arrow configuration. - - All required parameters must be populated in order to send to Azure. - - :param schema: Required. - :type schema: list[~azure.storage.blob.models.ArrowField] - """ - - _validation = { - 'schema': {'required': True}, - } - - _attribute_map = { - 'schema': {'key': 'Schema', 'type': '[ArrowField]', 'xml': {'name': 'Schema', 'wrapped': True, 'itemsName': 'Field'}}, - } - _xml_map = { - 'name': 'ArrowConfiguration' - } - - def __init__( - self, - *, - schema: List["ArrowField"], - **kwargs - ): - super(ArrowConfiguration, self).__init__(**kwargs) - self.schema = schema - - -class ArrowField(msrest.serialization.Model): - """field of an arrow schema. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. - :type type: str - :param name: - :type name: str - :param precision: - :type precision: int - :param scale: - :type scale: int - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': 'Type', 'type': 'str'}, - 'name': {'key': 'Name', 'type': 'str'}, - 'precision': {'key': 'Precision', 'type': 'int'}, - 'scale': {'key': 'Scale', 'type': 'int'}, - } - _xml_map = { - 'name': 'Field' - } - - def __init__( - self, - *, - type: str, - name: Optional[str] = None, - precision: Optional[int] = None, - scale: Optional[int] = None, - **kwargs - ): - super(ArrowField, self).__init__(**kwargs) - self.type = type - self.name = name - self.precision = precision - self.scale = scale - - -class BlobFlatListSegment(msrest.serialization.Model): - """BlobFlatListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]'}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__( - self, - *, - blob_items: List["BlobItemInternal"], - **kwargs - ): - super(BlobFlatListSegment, self).__init__(**kwargs) - self.blob_items = blob_items - - -class BlobHierarchyListSegment(msrest.serialization.Model): - """BlobHierarchyListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_prefixes: - :type blob_prefixes: list[~azure.storage.blob.models.BlobPrefix] - :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]', 'xml': {'name': 'BlobPrefix'}}, - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__( - self, - *, - blob_items: List["BlobItemInternal"], - blob_prefixes: Optional[List["BlobPrefix"]] = None, - **kwargs - ): - super(BlobHierarchyListSegment, self).__init__(**kwargs) - self.blob_prefixes = blob_prefixes - self.blob_items = blob_items - - -class BlobHTTPHeaders(msrest.serialization.Model): - """Parameter group. - - :param blob_cache_control: Optional. Sets the blob's cache control. If specified, this property - is stored with the blob and returned with a read request. - :type blob_cache_control: str - :param blob_content_type: Optional. Sets the blob's content type. If specified, this property - is stored with the blob and returned with a read request. - :type blob_content_type: str - :param blob_content_md5: Optional. An MD5 hash of the blob content. Note that this hash is not - validated, as the hashes for the individual blocks were validated when each was uploaded. - :type blob_content_md5: bytearray - :param blob_content_encoding: Optional. Sets the blob's content encoding. If specified, this - property is stored with the blob and returned with a read request. - :type blob_content_encoding: str - :param blob_content_language: Optional. Set the blob's content language. If specified, this - property is stored with the blob and returned with a read request. - :type blob_content_language: str - :param blob_content_disposition: Optional. Sets the blob's Content-Disposition header. - :type blob_content_disposition: str - """ - - _attribute_map = { - 'blob_cache_control': {'key': 'blobCacheControl', 'type': 'str'}, - 'blob_content_type': {'key': 'blobContentType', 'type': 'str'}, - 'blob_content_md5': {'key': 'blobContentMD5', 'type': 'bytearray'}, - 'blob_content_encoding': {'key': 'blobContentEncoding', 'type': 'str'}, - 'blob_content_language': {'key': 'blobContentLanguage', 'type': 'str'}, - 'blob_content_disposition': {'key': 'blobContentDisposition', 'type': 'str'}, - } - - def __init__( - self, - *, - blob_cache_control: Optional[str] = None, - blob_content_type: Optional[str] = None, - blob_content_md5: Optional[bytearray] = None, - blob_content_encoding: Optional[str] = None, - blob_content_language: Optional[str] = None, - blob_content_disposition: Optional[str] = None, - **kwargs - ): - super(BlobHTTPHeaders, self).__init__(**kwargs) - self.blob_cache_control = blob_cache_control - self.blob_content_type = blob_content_type - self.blob_content_md5 = blob_content_md5 - self.blob_content_encoding = blob_content_encoding - self.blob_content_language = blob_content_language - self.blob_content_disposition = blob_content_disposition - - -class BlobItemInternal(msrest.serialization.Model): - """An Azure Storage blob. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param deleted: Required. - :type deleted: bool - :param snapshot: Required. - :type snapshot: str - :param version_id: - :type version_id: str - :param is_current_version: - :type is_current_version: bool - :param properties: Required. Properties of a blob. - :type properties: ~azure.storage.blob.models.BlobPropertiesInternal - :param metadata: - :type metadata: ~azure.storage.blob.models.BlobMetadata - :param blob_tags: Blob tags. - :type blob_tags: ~azure.storage.blob.models.BlobTags - :param object_replication_metadata: Dictionary of :code:``. - :type object_replication_metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'deleted': {'required': True}, - 'snapshot': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'deleted': {'key': 'Deleted', 'type': 'bool'}, - 'snapshot': {'key': 'Snapshot', 'type': 'str'}, - 'version_id': {'key': 'VersionId', 'type': 'str'}, - 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool'}, - 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal'}, - 'metadata': {'key': 'Metadata', 'type': 'BlobMetadata'}, - 'blob_tags': {'key': 'BlobTags', 'type': 'BlobTags'}, - 'object_replication_metadata': {'key': 'OrMetadata', 'type': '{str}'}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__( - self, - *, - name: str, - deleted: bool, - snapshot: str, - properties: "BlobPropertiesInternal", - version_id: Optional[str] = None, - is_current_version: Optional[bool] = None, - metadata: Optional["BlobMetadata"] = None, - blob_tags: Optional["BlobTags"] = None, - object_replication_metadata: Optional[Dict[str, str]] = None, - **kwargs - ): - super(BlobItemInternal, self).__init__(**kwargs) - self.name = name - self.deleted = deleted - self.snapshot = snapshot - self.version_id = version_id - self.is_current_version = is_current_version - self.properties = properties - self.metadata = metadata - self.blob_tags = blob_tags - self.object_replication_metadata = object_replication_metadata - - -class BlobMetadata(msrest.serialization.Model): - """BlobMetadata. - - :param additional_properties: Unmatched properties from the message are deserialized to this - collection. - :type additional_properties: dict[str, str] - :param encrypted: - :type encrypted: str - """ - - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{str}'}, - 'encrypted': {'key': 'Encrypted', 'type': 'str', 'xml': {'attr': True}}, - } - _xml_map = { - 'name': 'Metadata' - } - - def __init__( - self, - *, - additional_properties: Optional[Dict[str, str]] = None, - encrypted: Optional[str] = None, - **kwargs - ): - super(BlobMetadata, self).__init__(**kwargs) - self.additional_properties = additional_properties - self.encrypted = encrypted - - -class BlobPrefix(msrest.serialization.Model): - """BlobPrefix. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(BlobPrefix, self).__init__(**kwargs) - self.name = name - - -class BlobPropertiesInternal(msrest.serialization.Model): - """Properties of a blob. - - All required parameters must be populated in order to send to Azure. - - :param creation_time: - :type creation_time: ~datetime.datetime - :param last_modified: Required. - :type last_modified: ~datetime.datetime - :param etag: Required. - :type etag: str - :param content_length: Size in bytes. - :type content_length: long - :param content_type: - :type content_type: str - :param content_encoding: - :type content_encoding: str - :param content_language: - :type content_language: str - :param content_md5: - :type content_md5: bytearray - :param content_disposition: - :type content_disposition: str - :param cache_control: - :type cache_control: str - :param blob_sequence_number: - :type blob_sequence_number: long - :param blob_type: Possible values include: "BlockBlob", "PageBlob", "AppendBlob". - :type blob_type: str or ~azure.storage.blob.models.BlobType - :param lease_status: Possible values include: "locked", "unlocked". - :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType - :param lease_state: Possible values include: "available", "leased", "expired", "breaking", - "broken". - :type lease_state: str or ~azure.storage.blob.models.LeaseStateType - :param lease_duration: Possible values include: "infinite", "fixed". - :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType - :param copy_id: - :type copy_id: str - :param copy_status: Possible values include: "pending", "success", "aborted", "failed". - :type copy_status: str or ~azure.storage.blob.models.CopyStatusType - :param copy_source: - :type copy_source: str - :param copy_progress: - :type copy_progress: str - :param copy_completion_time: - :type copy_completion_time: ~datetime.datetime - :param copy_status_description: - :type copy_status_description: str - :param server_encrypted: - :type server_encrypted: bool - :param incremental_copy: - :type incremental_copy: bool - :param destination_snapshot: - :type destination_snapshot: str - :param deleted_time: - :type deleted_time: ~datetime.datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param access_tier: Possible values include: "P4", "P6", "P10", "P15", "P20", "P30", "P40", - "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive". - :type access_tier: str or ~azure.storage.blob.models.AccessTier - :param access_tier_inferred: - :type access_tier_inferred: bool - :param archive_status: Possible values include: "rehydrate-pending-to-hot", "rehydrate- - pending-to-cool". - :type archive_status: str or ~azure.storage.blob.models.ArchiveStatus - :param customer_provided_key_sha256: - :type customer_provided_key_sha256: str - :param encryption_scope: The name of the encryption scope under which the blob is encrypted. - :type encryption_scope: str - :param access_tier_change_time: - :type access_tier_change_time: ~datetime.datetime - :param tag_count: - :type tag_count: int - :param expires_on: - :type expires_on: ~datetime.datetime - :param is_sealed: - :type is_sealed: bool - :param rehydrate_priority: If an object is in rehydrate pending state then this header is - returned with priority of rehydrate. Valid values are High and Standard. Possible values - include: "High", "Standard". - :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority - :param last_accessed_on: - :type last_accessed_on: ~datetime.datetime - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123'}, - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - 'content_length': {'key': 'Content-Length', 'type': 'long'}, - 'content_type': {'key': 'Content-Type', 'type': 'str'}, - 'content_encoding': {'key': 'Content-Encoding', 'type': 'str'}, - 'content_language': {'key': 'Content-Language', 'type': 'str'}, - 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray'}, - 'content_disposition': {'key': 'Content-Disposition', 'type': 'str'}, - 'cache_control': {'key': 'Cache-Control', 'type': 'str'}, - 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long'}, - 'blob_type': {'key': 'BlobType', 'type': 'str'}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, - 'lease_state': {'key': 'LeaseState', 'type': 'str'}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, - 'copy_id': {'key': 'CopyId', 'type': 'str'}, - 'copy_status': {'key': 'CopyStatus', 'type': 'str'}, - 'copy_source': {'key': 'CopySource', 'type': 'str'}, - 'copy_progress': {'key': 'CopyProgress', 'type': 'str'}, - 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123'}, - 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str'}, - 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool'}, - 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool'}, - 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str'}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, - 'access_tier': {'key': 'AccessTier', 'type': 'str'}, - 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool'}, - 'archive_status': {'key': 'ArchiveStatus', 'type': 'str'}, - 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str'}, - 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str'}, - 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'}, - 'tag_count': {'key': 'TagCount', 'type': 'int'}, - 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123'}, - 'is_sealed': {'key': 'Sealed', 'type': 'bool'}, - 'rehydrate_priority': {'key': 'RehydratePriority', 'type': 'str'}, - 'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123'}, - } - _xml_map = { - 'name': 'Properties' - } - - def __init__( - self, - *, - last_modified: datetime.datetime, - etag: str, - creation_time: Optional[datetime.datetime] = None, - content_length: Optional[int] = None, - content_type: Optional[str] = None, - content_encoding: Optional[str] = None, - content_language: Optional[str] = None, - content_md5: Optional[bytearray] = None, - content_disposition: Optional[str] = None, - cache_control: Optional[str] = None, - blob_sequence_number: Optional[int] = None, - blob_type: Optional[Union[str, "BlobType"]] = None, - lease_status: Optional[Union[str, "LeaseStatusType"]] = None, - lease_state: Optional[Union[str, "LeaseStateType"]] = None, - lease_duration: Optional[Union[str, "LeaseDurationType"]] = None, - copy_id: Optional[str] = None, - copy_status: Optional[Union[str, "CopyStatusType"]] = None, - copy_source: Optional[str] = None, - copy_progress: Optional[str] = None, - copy_completion_time: Optional[datetime.datetime] = None, - copy_status_description: Optional[str] = None, - server_encrypted: Optional[bool] = None, - incremental_copy: Optional[bool] = None, - destination_snapshot: Optional[str] = None, - deleted_time: Optional[datetime.datetime] = None, - remaining_retention_days: Optional[int] = None, - access_tier: Optional[Union[str, "AccessTier"]] = None, - access_tier_inferred: Optional[bool] = None, - archive_status: Optional[Union[str, "ArchiveStatus"]] = None, - customer_provided_key_sha256: Optional[str] = None, - encryption_scope: Optional[str] = None, - access_tier_change_time: Optional[datetime.datetime] = None, - tag_count: Optional[int] = None, - expires_on: Optional[datetime.datetime] = None, - is_sealed: Optional[bool] = None, - rehydrate_priority: Optional[Union[str, "RehydratePriority"]] = None, - last_accessed_on: Optional[datetime.datetime] = None, - **kwargs - ): - super(BlobPropertiesInternal, self).__init__(**kwargs) - self.creation_time = creation_time - self.last_modified = last_modified - self.etag = etag - self.content_length = content_length - self.content_type = content_type - self.content_encoding = content_encoding - self.content_language = content_language - self.content_md5 = content_md5 - self.content_disposition = content_disposition - self.cache_control = cache_control - self.blob_sequence_number = blob_sequence_number - self.blob_type = blob_type - self.lease_status = lease_status - self.lease_state = lease_state - self.lease_duration = lease_duration - self.copy_id = copy_id - self.copy_status = copy_status - self.copy_source = copy_source - self.copy_progress = copy_progress - self.copy_completion_time = copy_completion_time - self.copy_status_description = copy_status_description - self.server_encrypted = server_encrypted - self.incremental_copy = incremental_copy - self.destination_snapshot = destination_snapshot - self.deleted_time = deleted_time - self.remaining_retention_days = remaining_retention_days - self.access_tier = access_tier - self.access_tier_inferred = access_tier_inferred - self.archive_status = archive_status - self.customer_provided_key_sha256 = customer_provided_key_sha256 - self.encryption_scope = encryption_scope - self.access_tier_change_time = access_tier_change_time - self.tag_count = tag_count - self.expires_on = expires_on - self.is_sealed = is_sealed - self.rehydrate_priority = rehydrate_priority - self.last_accessed_on = last_accessed_on - - -class BlobTag(msrest.serialization.Model): - """BlobTag. - - All required parameters must be populated in order to send to Azure. - - :param key: Required. - :type key: str - :param value: Required. - :type value: str - """ - - _validation = { - 'key': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'key': {'key': 'Key', 'type': 'str'}, - 'value': {'key': 'Value', 'type': 'str'}, - } - _xml_map = { - 'name': 'Tag' - } - - def __init__( - self, - *, - key: str, - value: str, - **kwargs - ): - super(BlobTag, self).__init__(**kwargs) - self.key = key - self.value = value - - -class BlobTags(msrest.serialization.Model): - """Blob tags. - - All required parameters must be populated in order to send to Azure. - - :param blob_tag_set: Required. - :type blob_tag_set: list[~azure.storage.blob.models.BlobTag] - """ - - _validation = { - 'blob_tag_set': {'required': True}, - } - - _attribute_map = { - 'blob_tag_set': {'key': 'BlobTagSet', 'type': '[BlobTag]', 'xml': {'name': 'TagSet', 'wrapped': True, 'itemsName': 'Tag'}}, - } - _xml_map = { - 'name': 'Tags' - } - - def __init__( - self, - *, - blob_tag_set: List["BlobTag"], - **kwargs - ): - super(BlobTags, self).__init__(**kwargs) - self.blob_tag_set = blob_tag_set - - -class Block(msrest.serialization.Model): - """Represents a single block in a block blob. It describes the block's ID and size. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The base64 encoded block ID. - :type name: str - :param size: Required. The block size in bytes. - :type size: int - """ - - _validation = { - 'name': {'required': True}, - 'size': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'size': {'key': 'Size', 'type': 'int'}, - } - - def __init__( - self, - *, - name: str, - size: int, - **kwargs - ): - super(Block, self).__init__(**kwargs) - self.name = name - self.size = size - - -class BlockList(msrest.serialization.Model): - """BlockList. - - :param committed_blocks: - :type committed_blocks: list[~azure.storage.blob.models.Block] - :param uncommitted_blocks: - :type uncommitted_blocks: list[~azure.storage.blob.models.Block] - """ - - _attribute_map = { - 'committed_blocks': {'key': 'CommittedBlocks', 'type': '[Block]', 'xml': {'wrapped': True}}, - 'uncommitted_blocks': {'key': 'UncommittedBlocks', 'type': '[Block]', 'xml': {'wrapped': True}}, - } - - def __init__( - self, - *, - committed_blocks: Optional[List["Block"]] = None, - uncommitted_blocks: Optional[List["Block"]] = None, - **kwargs - ): - super(BlockList, self).__init__(**kwargs) - self.committed_blocks = committed_blocks - self.uncommitted_blocks = uncommitted_blocks - - -class BlockLookupList(msrest.serialization.Model): - """BlockLookupList. - - :param committed: - :type committed: list[str] - :param uncommitted: - :type uncommitted: list[str] - :param latest: - :type latest: list[str] - """ - - _attribute_map = { - 'committed': {'key': 'Committed', 'type': '[str]', 'xml': {'itemsName': 'Committed'}}, - 'uncommitted': {'key': 'Uncommitted', 'type': '[str]', 'xml': {'itemsName': 'Uncommitted'}}, - 'latest': {'key': 'Latest', 'type': '[str]', 'xml': {'itemsName': 'Latest'}}, - } - _xml_map = { - 'name': 'BlockList' - } - - def __init__( - self, - *, - committed: Optional[List[str]] = None, - uncommitted: Optional[List[str]] = None, - latest: Optional[List[str]] = None, - **kwargs - ): - super(BlockLookupList, self).__init__(**kwargs) - self.committed = committed - self.uncommitted = uncommitted - self.latest = latest - - -class ClearRange(msrest.serialization.Model): - """ClearRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'ClearRange' - } - - def __init__( - self, - *, - start: int, - end: int, - **kwargs - ): - super(ClearRange, self).__init__(**kwargs) - self.start = start - self.end = end - - -class ContainerCpkScopeInfo(msrest.serialization.Model): - """Parameter group. - - :param default_encryption_scope: Optional. Version 2019-07-07 and later. Specifies the - default encryption scope to set on the container and use for all future writes. - :type default_encryption_scope: str - :param prevent_encryption_scope_override: Optional. Version 2019-07-07 and newer. If true, - prevents any request from specifying a different encryption scope than the scope set on the - container. - :type prevent_encryption_scope_override: bool - """ - - _attribute_map = { - 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str'}, - 'prevent_encryption_scope_override': {'key': 'PreventEncryptionScopeOverride', 'type': 'bool'}, - } - - def __init__( - self, - *, - default_encryption_scope: Optional[str] = None, - prevent_encryption_scope_override: Optional[bool] = None, - **kwargs - ): - super(ContainerCpkScopeInfo, self).__init__(**kwargs) - self.default_encryption_scope = default_encryption_scope - self.prevent_encryption_scope_override = prevent_encryption_scope_override - - -class ContainerItem(msrest.serialization.Model): - """An Azure Storage container. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param deleted: - :type deleted: bool - :param version: - :type version: str - :param properties: Required. Properties of a container. - :type properties: ~azure.storage.blob.models.ContainerProperties - :param metadata: Dictionary of :code:``. - :type metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'deleted': {'key': 'Deleted', 'type': 'bool'}, - 'version': {'key': 'Version', 'type': 'str'}, - 'properties': {'key': 'Properties', 'type': 'ContainerProperties'}, - 'metadata': {'key': 'Metadata', 'type': '{str}'}, - } - _xml_map = { - 'name': 'Container' - } - - def __init__( - self, - *, - name: str, - properties: "ContainerProperties", - deleted: Optional[bool] = None, - version: Optional[str] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs - ): - super(ContainerItem, self).__init__(**kwargs) - self.name = name - self.deleted = deleted - self.version = version - self.properties = properties - self.metadata = metadata - - -class ContainerProperties(msrest.serialization.Model): - """Properties of a container. - - All required parameters must be populated in order to send to Azure. - - :param last_modified: Required. - :type last_modified: ~datetime.datetime - :param etag: Required. - :type etag: str - :param lease_status: Possible values include: "locked", "unlocked". - :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType - :param lease_state: Possible values include: "available", "leased", "expired", "breaking", - "broken". - :type lease_state: str or ~azure.storage.blob.models.LeaseStateType - :param lease_duration: Possible values include: "infinite", "fixed". - :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType - :param public_access: Possible values include: "container", "blob". - :type public_access: str or ~azure.storage.blob.models.PublicAccessType - :param has_immutability_policy: - :type has_immutability_policy: bool - :param has_legal_hold: - :type has_legal_hold: bool - :param default_encryption_scope: - :type default_encryption_scope: str - :param prevent_encryption_scope_override: - :type prevent_encryption_scope_override: bool - :param deleted_time: - :type deleted_time: ~datetime.datetime - :param remaining_retention_days: - :type remaining_retention_days: int - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, - 'lease_state': {'key': 'LeaseState', 'type': 'str'}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, - 'public_access': {'key': 'PublicAccess', 'type': 'str'}, - 'has_immutability_policy': {'key': 'HasImmutabilityPolicy', 'type': 'bool'}, - 'has_legal_hold': {'key': 'HasLegalHold', 'type': 'bool'}, - 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str'}, - 'prevent_encryption_scope_override': {'key': 'DenyEncryptionScopeOverride', 'type': 'bool'}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, - } - - def __init__( - self, - *, - last_modified: datetime.datetime, - etag: str, - lease_status: Optional[Union[str, "LeaseStatusType"]] = None, - lease_state: Optional[Union[str, "LeaseStateType"]] = None, - lease_duration: Optional[Union[str, "LeaseDurationType"]] = None, - public_access: Optional[Union[str, "PublicAccessType"]] = None, - has_immutability_policy: Optional[bool] = None, - has_legal_hold: Optional[bool] = None, - default_encryption_scope: Optional[str] = None, - prevent_encryption_scope_override: Optional[bool] = None, - deleted_time: Optional[datetime.datetime] = None, - remaining_retention_days: Optional[int] = None, - **kwargs - ): - super(ContainerProperties, self).__init__(**kwargs) - self.last_modified = last_modified - self.etag = etag - self.lease_status = lease_status - self.lease_state = lease_state - self.lease_duration = lease_duration - self.public_access = public_access - self.has_immutability_policy = has_immutability_policy - self.has_legal_hold = has_legal_hold - self.default_encryption_scope = default_encryption_scope - self.prevent_encryption_scope_override = prevent_encryption_scope_override - self.deleted_time = deleted_time - self.remaining_retention_days = remaining_retention_days - - -class CorsRule(msrest.serialization.Model): - """CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param allowed_origins: Required. The origin domains that are permitted to make a request - against the storage service via CORS. The origin domain is the domain from which the request - originates. Note that the origin must be an exact case-sensitive match with the origin that the - user age sends to the service. You can also use the wildcard character '*' to allow all origin - domains to make requests via CORS. - :type allowed_origins: str - :param allowed_methods: Required. The methods (HTTP request verbs) that the origin domain may - use for a CORS request. (comma separated). - :type allowed_methods: str - :param allowed_headers: Required. the request headers that the origin domain may specify on the - CORS request. - :type allowed_headers: str - :param exposed_headers: Required. The response headers that may be sent in the response to the - CORS request and exposed by the browser to the request issuer. - :type exposed_headers: str - :param max_age_in_seconds: Required. The maximum amount time that a browser should cache the - preflight OPTIONS request. - :type max_age_in_seconds: int - """ - - _validation = { - 'allowed_origins': {'required': True}, - 'allowed_methods': {'required': True}, - 'allowed_headers': {'required': True}, - 'exposed_headers': {'required': True}, - 'max_age_in_seconds': {'required': True, 'minimum': 0}, - } - - _attribute_map = { - 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str'}, - 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str'}, - 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str'}, - 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str'}, - 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int'}, - } - - def __init__( - self, - *, - allowed_origins: str, - allowed_methods: str, - allowed_headers: str, - exposed_headers: str, - max_age_in_seconds: int, - **kwargs - ): - super(CorsRule, self).__init__(**kwargs) - self.allowed_origins = allowed_origins - self.allowed_methods = allowed_methods - self.allowed_headers = allowed_headers - self.exposed_headers = exposed_headers - self.max_age_in_seconds = max_age_in_seconds - - -class CpkInfo(msrest.serialization.Model): - """Parameter group. - - :param encryption_key: Optional. Specifies the encryption key to use to encrypt the data - provided in the request. If not specified, encryption is performed with the root account - encryption key. For more information, see Encryption at Rest for Azure Storage Services. - :type encryption_key: str - :param encryption_key_sha256: The SHA-256 hash of the provided encryption key. Must be provided - if the x-ms-encryption-key header is provided. - :type encryption_key_sha256: str - """ - - _attribute_map = { - 'encryption_key': {'key': 'encryptionKey', 'type': 'str'}, - 'encryption_key_sha256': {'key': 'encryptionKeySha256', 'type': 'str'}, - } - - def __init__( - self, - *, - encryption_key: Optional[str] = None, - encryption_key_sha256: Optional[str] = None, - **kwargs - ): - super(CpkInfo, self).__init__(**kwargs) - self.encryption_key = encryption_key - self.encryption_key_sha256 = encryption_key_sha256 - - -class CpkScopeInfo(msrest.serialization.Model): - """Parameter group. - - :param encryption_scope: Optional. Version 2019-07-07 and later. Specifies the name of the - encryption scope to use to encrypt the data provided in the request. If not specified, - encryption is performed with the default account encryption scope. For more information, see - Encryption at Rest for Azure Storage Services. - :type encryption_scope: str - """ - - _attribute_map = { - 'encryption_scope': {'key': 'encryptionScope', 'type': 'str'}, - } - - def __init__( - self, - *, - encryption_scope: Optional[str] = None, - **kwargs - ): - super(CpkScopeInfo, self).__init__(**kwargs) - self.encryption_scope = encryption_scope - - -class DataLakeStorageError(msrest.serialization.Model): - """DataLakeStorageError. - - :param data_lake_storage_error_details: The service error response object. - :type data_lake_storage_error_details: ~azure.storage.blob.models.DataLakeStorageErrorDetails - """ - - _attribute_map = { - 'data_lake_storage_error_details': {'key': 'error', 'type': 'DataLakeStorageErrorDetails'}, - } - - def __init__( - self, - *, - data_lake_storage_error_details: Optional["DataLakeStorageErrorDetails"] = None, - **kwargs - ): - super(DataLakeStorageError, self).__init__(**kwargs) - self.data_lake_storage_error_details = data_lake_storage_error_details - - -class DataLakeStorageErrorDetails(msrest.serialization.Model): - """The service error response object. - - :param code: The service error code. - :type code: str - :param message: The service error message. - :type message: str - """ - - _attribute_map = { - 'code': {'key': 'Code', 'type': 'str'}, - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__( - self, - *, - code: Optional[str] = None, - message: Optional[str] = None, - **kwargs - ): - super(DataLakeStorageErrorDetails, self).__init__(**kwargs) - self.code = code - self.message = message - - -class DelimitedTextConfiguration(msrest.serialization.Model): - """delimited text configuration. - - All required parameters must be populated in order to send to Azure. - - :param column_separator: Required. column separator. - :type column_separator: str - :param field_quote: Required. field quote. - :type field_quote: str - :param record_separator: Required. record separator. - :type record_separator: str - :param escape_char: Required. escape char. - :type escape_char: str - :param headers_present: Required. has headers. - :type headers_present: bool - """ - - _validation = { - 'column_separator': {'required': True}, - 'field_quote': {'required': True}, - 'record_separator': {'required': True}, - 'escape_char': {'required': True}, - 'headers_present': {'required': True}, - } - - _attribute_map = { - 'column_separator': {'key': 'ColumnSeparator', 'type': 'str', 'xml': {'name': 'ColumnSeparator'}}, - 'field_quote': {'key': 'FieldQuote', 'type': 'str', 'xml': {'name': 'FieldQuote'}}, - 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, - 'escape_char': {'key': 'EscapeChar', 'type': 'str', 'xml': {'name': 'EscapeChar'}}, - 'headers_present': {'key': 'HeadersPresent', 'type': 'bool', 'xml': {'name': 'HasHeaders'}}, - } - _xml_map = { - 'name': 'DelimitedTextConfiguration' - } - - def __init__( - self, - *, - column_separator: str, - field_quote: str, - record_separator: str, - escape_char: str, - headers_present: bool, - **kwargs - ): - super(DelimitedTextConfiguration, self).__init__(**kwargs) - self.column_separator = column_separator - self.field_quote = field_quote - self.record_separator = record_separator - self.escape_char = escape_char - self.headers_present = headers_present - - -class DirectoryHttpHeaders(msrest.serialization.Model): - """Parameter group. - - :param cache_control: Cache control for given resource. - :type cache_control: str - :param content_type: Content type for given resource. - :type content_type: str - :param content_encoding: Content encoding for given resource. - :type content_encoding: str - :param content_language: Content language for given resource. - :type content_language: str - :param content_disposition: Content disposition for given resource. - :type content_disposition: str - """ - - _attribute_map = { - 'cache_control': {'key': 'cacheControl', 'type': 'str'}, - 'content_type': {'key': 'contentType', 'type': 'str'}, - 'content_encoding': {'key': 'contentEncoding', 'type': 'str'}, - 'content_language': {'key': 'contentLanguage', 'type': 'str'}, - 'content_disposition': {'key': 'contentDisposition', 'type': 'str'}, - } - - def __init__( - self, - *, - cache_control: Optional[str] = None, - content_type: Optional[str] = None, - content_encoding: Optional[str] = None, - content_language: Optional[str] = None, - content_disposition: Optional[str] = None, - **kwargs - ): - super(DirectoryHttpHeaders, self).__init__(**kwargs) - self.cache_control = cache_control - self.content_type = content_type - self.content_encoding = content_encoding - self.content_language = content_language - self.content_disposition = content_disposition - - -class FilterBlobItem(msrest.serialization.Model): - """Blob info from a Filter Blobs API call. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param container_name: Required. - :type container_name: str - :param tags: A set of tags. Blob tags. - :type tags: ~azure.storage.blob.models.BlobTags - """ - - _validation = { - 'name': {'required': True}, - 'container_name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'container_name': {'key': 'ContainerName', 'type': 'str'}, - 'tags': {'key': 'Tags', 'type': 'BlobTags'}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__( - self, - *, - name: str, - container_name: str, - tags: Optional["BlobTags"] = None, - **kwargs - ): - super(FilterBlobItem, self).__init__(**kwargs) - self.name = name - self.container_name = container_name - self.tags = tags - - -class FilterBlobSegment(msrest.serialization.Model): - """The result of a Filter Blobs API call. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param where: Required. - :type where: str - :param blobs: Required. - :type blobs: list[~azure.storage.blob.models.FilterBlobItem] - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'where': {'required': True}, - 'blobs': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'where': {'key': 'Where', 'type': 'str'}, - 'blobs': {'key': 'Blobs', 'type': '[FilterBlobItem]', 'xml': {'name': 'Blobs', 'wrapped': True, 'itemsName': 'Blob'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - *, - service_endpoint: str, - where: str, - blobs: List["FilterBlobItem"], - next_marker: Optional[str] = None, - **kwargs - ): - super(FilterBlobSegment, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.where = where - self.blobs = blobs - self.next_marker = next_marker - - -class GeoReplication(msrest.serialization.Model): - """Geo-Replication information for the Secondary Storage Service. - - All required parameters must be populated in order to send to Azure. - - :param status: Required. The status of the secondary location. Possible values include: "live", - "bootstrap", "unavailable". - :type status: str or ~azure.storage.blob.models.GeoReplicationStatusType - :param last_sync_time: Required. A GMT date/time value, to the second. All primary writes - preceding this value are guaranteed to be available for read operations at the secondary. - Primary writes after this point in time may or may not be available for reads. - :type last_sync_time: ~datetime.datetime - """ - - _validation = { - 'status': {'required': True}, - 'last_sync_time': {'required': True}, - } - - _attribute_map = { - 'status': {'key': 'Status', 'type': 'str'}, - 'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123'}, - } - - def __init__( - self, - *, - status: Union[str, "GeoReplicationStatusType"], - last_sync_time: datetime.datetime, - **kwargs - ): - super(GeoReplication, self).__init__(**kwargs) - self.status = status - self.last_sync_time = last_sync_time - - -class JsonTextConfiguration(msrest.serialization.Model): - """json text configuration. - - All required parameters must be populated in order to send to Azure. - - :param record_separator: Required. record separator. - :type record_separator: str - """ - - _validation = { - 'record_separator': {'required': True}, - } - - _attribute_map = { - 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, - } - _xml_map = { - 'name': 'JsonTextConfiguration' - } - - def __init__( - self, - *, - record_separator: str, - **kwargs - ): - super(JsonTextConfiguration, self).__init__(**kwargs) - self.record_separator = record_separator - - -class KeyInfo(msrest.serialization.Model): - """Key information. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. The date-time the key is active in ISO 8601 UTC time. - :type start: str - :param expiry: Required. The date-time the key expires in ISO 8601 UTC time. - :type expiry: str - """ - - _validation = { - 'start': {'required': True}, - 'expiry': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str'}, - 'expiry': {'key': 'Expiry', 'type': 'str'}, - } - - def __init__( - self, - *, - start: str, - expiry: str, - **kwargs - ): - super(KeyInfo, self).__init__(**kwargs) - self.start = start - self.expiry = expiry - - -class LeaseAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param lease_id: If specified, the operation only succeeds if the resource's lease is active - and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': 'leaseId', 'type': 'str'}, - } - - def __init__( - self, - *, - lease_id: Optional[str] = None, - **kwargs - ): - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = lease_id - - -class ListBlobsFlatSegmentResponse(msrest.serialization.Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param segment: Required. - :type segment: ~azure.storage.blob.models.BlobFlatListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'segment': {'key': 'Segment', 'type': 'BlobFlatListSegment'}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - *, - service_endpoint: str, - container_name: str, - segment: "BlobFlatListSegment", - prefix: Optional[str] = None, - marker: Optional[str] = None, - max_results: Optional[int] = None, - next_marker: Optional[str] = None, - **kwargs - ): - super(ListBlobsFlatSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.container_name = container_name - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.segment = segment - self.next_marker = next_marker - - -class ListBlobsHierarchySegmentResponse(msrest.serialization.Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param delimiter: - :type delimiter: str - :param segment: Required. - :type segment: ~azure.storage.blob.models.BlobHierarchyListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'delimiter': {'key': 'Delimiter', 'type': 'str'}, - 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment'}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - *, - service_endpoint: str, - container_name: str, - segment: "BlobHierarchyListSegment", - prefix: Optional[str] = None, - marker: Optional[str] = None, - max_results: Optional[int] = None, - delimiter: Optional[str] = None, - next_marker: Optional[str] = None, - **kwargs - ): - super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.container_name = container_name - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.delimiter = delimiter - self.segment = segment - self.next_marker = next_marker - - -class ListContainersSegmentResponse(msrest.serialization.Model): - """An enumeration of containers. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param container_items: Required. - :type container_items: list[~azure.storage.blob.models.ContainerItem] - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_items': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'container_items': {'key': 'ContainerItems', 'type': '[ContainerItem]', 'xml': {'name': 'Containers', 'wrapped': True, 'itemsName': 'Container'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - *, - service_endpoint: str, - container_items: List["ContainerItem"], - prefix: Optional[str] = None, - marker: Optional[str] = None, - max_results: Optional[int] = None, - next_marker: Optional[str] = None, - **kwargs - ): - super(ListContainersSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.container_items = container_items - self.next_marker = next_marker - - -class Logging(msrest.serialization.Model): - """Azure Analytics Logging settings. - - All required parameters must be populated in order to send to Azure. - - :param version: Required. The version of Storage Analytics to configure. - :type version: str - :param delete: Required. Indicates whether all delete requests should be logged. - :type delete: bool - :param read: Required. Indicates whether all read requests should be logged. - :type read: bool - :param write: Required. Indicates whether all write requests should be logged. - :type write: bool - :param retention_policy: Required. the retention policy which determines how long the - associated data should persist. - :type retention_policy: ~azure.storage.blob.models.RetentionPolicy - """ - - _validation = { - 'version': {'required': True}, - 'delete': {'required': True}, - 'read': {'required': True}, - 'write': {'required': True}, - 'retention_policy': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str'}, - 'delete': {'key': 'Delete', 'type': 'bool'}, - 'read': {'key': 'Read', 'type': 'bool'}, - 'write': {'key': 'Write', 'type': 'bool'}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, - } - - def __init__( - self, - *, - version: str, - delete: bool, - read: bool, - write: bool, - retention_policy: "RetentionPolicy", - **kwargs - ): - super(Logging, self).__init__(**kwargs) - self.version = version - self.delete = delete - self.read = read - self.write = write - self.retention_policy = retention_policy - - -class Metrics(msrest.serialization.Model): - """a summary of request statistics grouped by API in hour or minute aggregates for blobs. - - All required parameters must be populated in order to send to Azure. - - :param version: The version of Storage Analytics to configure. - :type version: str - :param enabled: Required. Indicates whether metrics are enabled for the Blob service. - :type enabled: bool - :param include_apis: Indicates whether metrics should generate summary statistics for called - API operations. - :type include_apis: bool - :param retention_policy: the retention policy which determines how long the associated data - should persist. - :type retention_policy: ~azure.storage.blob.models.RetentionPolicy - """ - - _validation = { - 'enabled': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str'}, - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool'}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, - } - - def __init__( - self, - *, - enabled: bool, - version: Optional[str] = None, - include_apis: Optional[bool] = None, - retention_policy: Optional["RetentionPolicy"] = None, - **kwargs - ): - super(Metrics, self).__init__(**kwargs) - self.version = version - self.enabled = enabled - self.include_apis = include_apis - self.retention_policy = retention_policy - - -class ModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param if_modified_since: Specify this header value to operate only on a blob if it has been - modified since the specified date/time. - :type if_modified_since: ~datetime.datetime - :param if_unmodified_since: Specify this header value to operate only on a blob if it has not - been modified since the specified date/time. - :type if_unmodified_since: ~datetime.datetime - :param if_match: Specify an ETag value to operate only on blobs with a matching value. - :type if_match: str - :param if_none_match: Specify an ETag value to operate only on blobs without a matching value. - :type if_none_match: str - :param if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a - matching value. - :type if_tags: str - """ - - _attribute_map = { - 'if_modified_since': {'key': 'ifModifiedSince', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': 'ifUnmodifiedSince', 'type': 'rfc-1123'}, - 'if_match': {'key': 'ifMatch', 'type': 'str'}, - 'if_none_match': {'key': 'ifNoneMatch', 'type': 'str'}, - 'if_tags': {'key': 'ifTags', 'type': 'str'}, - } - - def __init__( - self, - *, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - if_tags: Optional[str] = None, - **kwargs - ): - super(ModifiedAccessConditions, self).__init__(**kwargs) - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - self.if_match = if_match - self.if_none_match = if_none_match - self.if_tags = if_tags - - -class PageList(msrest.serialization.Model): - """the list of pages. - - :param page_range: - :type page_range: list[~azure.storage.blob.models.PageRange] - :param clear_range: - :type clear_range: list[~azure.storage.blob.models.ClearRange] - """ - - _attribute_map = { - 'page_range': {'key': 'PageRange', 'type': '[PageRange]'}, - 'clear_range': {'key': 'ClearRange', 'type': '[ClearRange]'}, - } - - def __init__( - self, - *, - page_range: Optional[List["PageRange"]] = None, - clear_range: Optional[List["ClearRange"]] = None, - **kwargs - ): - super(PageList, self).__init__(**kwargs) - self.page_range = page_range - self.clear_range = clear_range - - -class PageRange(msrest.serialization.Model): - """PageRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'PageRange' - } - - def __init__( - self, - *, - start: int, - end: int, - **kwargs - ): - super(PageRange, self).__init__(**kwargs) - self.start = start - self.end = end - - -class QueryFormat(msrest.serialization.Model): - """QueryFormat. - - :param type: The quick query format type. Possible values include: "delimited", "json", - "arrow". - :type type: str or ~azure.storage.blob.models.QueryFormatType - :param delimited_text_configuration: delimited text configuration. - :type delimited_text_configuration: ~azure.storage.blob.models.DelimitedTextConfiguration - :param json_text_configuration: json text configuration. - :type json_text_configuration: ~azure.storage.blob.models.JsonTextConfiguration - :param arrow_configuration: arrow configuration. - :type arrow_configuration: ~azure.storage.blob.models.ArrowConfiguration - """ - - _attribute_map = { - 'type': {'key': 'Type', 'type': 'str', 'xml': {'name': 'Type'}}, - 'delimited_text_configuration': {'key': 'DelimitedTextConfiguration', 'type': 'DelimitedTextConfiguration'}, - 'json_text_configuration': {'key': 'JsonTextConfiguration', 'type': 'JsonTextConfiguration'}, - 'arrow_configuration': {'key': 'ArrowConfiguration', 'type': 'ArrowConfiguration'}, - } - - def __init__( - self, - *, - type: Optional[Union[str, "QueryFormatType"]] = None, - delimited_text_configuration: Optional["DelimitedTextConfiguration"] = None, - json_text_configuration: Optional["JsonTextConfiguration"] = None, - arrow_configuration: Optional["ArrowConfiguration"] = None, - **kwargs - ): - super(QueryFormat, self).__init__(**kwargs) - self.type = type - self.delimited_text_configuration = delimited_text_configuration - self.json_text_configuration = json_text_configuration - self.arrow_configuration = arrow_configuration - - -class QueryRequest(msrest.serialization.Model): - """the quick query body. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar query_type: Required. the query type. Default value: "SQL". - :vartype query_type: str - :param expression: Required. a query statement. - :type expression: str - :param input_serialization: - :type input_serialization: ~azure.storage.blob.models.QuerySerialization - :param output_serialization: - :type output_serialization: ~azure.storage.blob.models.QuerySerialization - """ - - _validation = { - 'query_type': {'required': True, 'constant': True}, - 'expression': {'required': True}, - } - - _attribute_map = { - 'query_type': {'key': 'QueryType', 'type': 'str', 'xml': {'name': 'QueryType'}}, - 'expression': {'key': 'Expression', 'type': 'str', 'xml': {'name': 'Expression'}}, - 'input_serialization': {'key': 'InputSerialization', 'type': 'QuerySerialization'}, - 'output_serialization': {'key': 'OutputSerialization', 'type': 'QuerySerialization'}, - } - _xml_map = { - 'name': 'QueryRequest' - } - - query_type = "SQL" - - def __init__( - self, - *, - expression: str, - input_serialization: Optional["QuerySerialization"] = None, - output_serialization: Optional["QuerySerialization"] = None, - **kwargs - ): - super(QueryRequest, self).__init__(**kwargs) - self.expression = expression - self.input_serialization = input_serialization - self.output_serialization = output_serialization - - -class QuerySerialization(msrest.serialization.Model): - """QuerySerialization. - - All required parameters must be populated in order to send to Azure. - - :param format: Required. - :type format: ~azure.storage.blob.models.QueryFormat - """ - - _validation = { - 'format': {'required': True}, - } - - _attribute_map = { - 'format': {'key': 'Format', 'type': 'QueryFormat'}, - } - - def __init__( - self, - *, - format: "QueryFormat", - **kwargs - ): - super(QuerySerialization, self).__init__(**kwargs) - self.format = format - - -class RetentionPolicy(msrest.serialization.Model): - """the retention policy which determines how long the associated data should persist. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether a retention policy is enabled for the storage - service. - :type enabled: bool - :param days: Indicates the number of days that metrics or logging or soft-deleted data should - be retained. All data older than this value will be deleted. - :type days: int - :param allow_permanent_delete: Indicates whether permanent delete is allowed on this storage - account. - :type allow_permanent_delete: bool - """ - - _validation = { - 'enabled': {'required': True}, - 'days': {'minimum': 1}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'days': {'key': 'Days', 'type': 'int'}, - 'allow_permanent_delete': {'key': 'AllowPermanentDelete', 'type': 'bool'}, - } - - def __init__( - self, - *, - enabled: bool, - days: Optional[int] = None, - allow_permanent_delete: Optional[bool] = None, - **kwargs - ): - super(RetentionPolicy, self).__init__(**kwargs) - self.enabled = enabled - self.days = days - self.allow_permanent_delete = allow_permanent_delete - - -class SequenceNumberAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param if_sequence_number_less_than_or_equal_to: Specify this header value to operate only on a - blob if it has a sequence number less than or equal to the specified. - :type if_sequence_number_less_than_or_equal_to: long - :param if_sequence_number_less_than: Specify this header value to operate only on a blob if it - has a sequence number less than the specified. - :type if_sequence_number_less_than: long - :param if_sequence_number_equal_to: Specify this header value to operate only on a blob if it - has the specified sequence number. - :type if_sequence_number_equal_to: long - """ - - _attribute_map = { - 'if_sequence_number_less_than_or_equal_to': {'key': 'ifSequenceNumberLessThanOrEqualTo', 'type': 'long'}, - 'if_sequence_number_less_than': {'key': 'ifSequenceNumberLessThan', 'type': 'long'}, - 'if_sequence_number_equal_to': {'key': 'ifSequenceNumberEqualTo', 'type': 'long'}, - } - - def __init__( - self, - *, - if_sequence_number_less_than_or_equal_to: Optional[int] = None, - if_sequence_number_less_than: Optional[int] = None, - if_sequence_number_equal_to: Optional[int] = None, - **kwargs - ): - super(SequenceNumberAccessConditions, self).__init__(**kwargs) - self.if_sequence_number_less_than_or_equal_to = if_sequence_number_less_than_or_equal_to - self.if_sequence_number_less_than = if_sequence_number_less_than - self.if_sequence_number_equal_to = if_sequence_number_equal_to - - -class SignedIdentifier(msrest.serialization.Model): - """signed identifier. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. a unique id. - :type id: str - :param access_policy: An Access policy. - :type access_policy: ~azure.storage.blob.models.AccessPolicy - """ - - _validation = { - 'id': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'Id', 'type': 'str'}, - 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy'}, - } - _xml_map = { - 'name': 'SignedIdentifier' - } - - def __init__( - self, - *, - id: str, - access_policy: Optional["AccessPolicy"] = None, - **kwargs - ): - super(SignedIdentifier, self).__init__(**kwargs) - self.id = id - self.access_policy = access_policy - - -class SourceModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param source_if_modified_since: Specify this header value to operate only on a blob if it has - been modified since the specified date/time. - :type source_if_modified_since: ~datetime.datetime - :param source_if_unmodified_since: Specify this header value to operate only on a blob if it - has not been modified since the specified date/time. - :type source_if_unmodified_since: ~datetime.datetime - :param source_if_match: Specify an ETag value to operate only on blobs with a matching value. - :type source_if_match: str - :param source_if_none_match: Specify an ETag value to operate only on blobs without a matching - value. - :type source_if_none_match: str - :param source_if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a - matching value. - :type source_if_tags: str - """ - - _attribute_map = { - 'source_if_modified_since': {'key': 'sourceIfModifiedSince', 'type': 'rfc-1123'}, - 'source_if_unmodified_since': {'key': 'sourceIfUnmodifiedSince', 'type': 'rfc-1123'}, - 'source_if_match': {'key': 'sourceIfMatch', 'type': 'str'}, - 'source_if_none_match': {'key': 'sourceIfNoneMatch', 'type': 'str'}, - 'source_if_tags': {'key': 'sourceIfTags', 'type': 'str'}, - } - - def __init__( - self, - *, - source_if_modified_since: Optional[datetime.datetime] = None, - source_if_unmodified_since: Optional[datetime.datetime] = None, - source_if_match: Optional[str] = None, - source_if_none_match: Optional[str] = None, - source_if_tags: Optional[str] = None, - **kwargs - ): - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_modified_since = source_if_modified_since - self.source_if_unmodified_since = source_if_unmodified_since - self.source_if_match = source_if_match - self.source_if_none_match = source_if_none_match - self.source_if_tags = source_if_tags - - -class StaticWebsite(msrest.serialization.Model): - """The properties that enable an account to host a static website. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether this account is hosting a static website. - :type enabled: bool - :param index_document: The default name of the index page under each directory. - :type index_document: str - :param error_document404_path: The absolute path of the custom 404 page. - :type error_document404_path: str - :param default_index_document_path: Absolute path of the default index page. - :type default_index_document_path: str - """ - - _validation = { - 'enabled': {'required': True}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'index_document': {'key': 'IndexDocument', 'type': 'str'}, - 'error_document404_path': {'key': 'ErrorDocument404Path', 'type': 'str'}, - 'default_index_document_path': {'key': 'DefaultIndexDocumentPath', 'type': 'str'}, - } - - def __init__( - self, - *, - enabled: bool, - index_document: Optional[str] = None, - error_document404_path: Optional[str] = None, - default_index_document_path: Optional[str] = None, - **kwargs - ): - super(StaticWebsite, self).__init__(**kwargs) - self.enabled = enabled - self.index_document = index_document - self.error_document404_path = error_document404_path - self.default_index_document_path = default_index_document_path - - -class StorageError(msrest.serialization.Model): - """StorageError. - - :param message: - :type message: str - """ - - _attribute_map = { - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__( - self, - *, - message: Optional[str] = None, - **kwargs - ): - super(StorageError, self).__init__(**kwargs) - self.message = message - - -class StorageServiceProperties(msrest.serialization.Model): - """Storage Service Properties. - - :param logging: Azure Analytics Logging settings. - :type logging: ~azure.storage.blob.models.Logging - :param hour_metrics: a summary of request statistics grouped by API in hour or minute - aggregates for blobs. - :type hour_metrics: ~azure.storage.blob.models.Metrics - :param minute_metrics: a summary of request statistics grouped by API in hour or minute - aggregates for blobs. - :type minute_metrics: ~azure.storage.blob.models.Metrics - :param cors: The set of CORS rules. - :type cors: list[~azure.storage.blob.models.CorsRule] - :param default_service_version: The default version to use for requests to the Blob service if - an incoming request's version is not specified. Possible values include version 2008-10-27 and - all more recent versions. - :type default_service_version: str - :param delete_retention_policy: the retention policy which determines how long the associated - data should persist. - :type delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy - :param static_website: The properties that enable an account to host a static website. - :type static_website: ~azure.storage.blob.models.StaticWebsite - """ - - _attribute_map = { - 'logging': {'key': 'Logging', 'type': 'Logging'}, - 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics'}, - 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics'}, - 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'wrapped': True}}, - 'default_service_version': {'key': 'DefaultServiceVersion', 'type': 'str'}, - 'delete_retention_policy': {'key': 'DeleteRetentionPolicy', 'type': 'RetentionPolicy'}, - 'static_website': {'key': 'StaticWebsite', 'type': 'StaticWebsite'}, - } - - def __init__( - self, - *, - logging: Optional["Logging"] = None, - hour_metrics: Optional["Metrics"] = None, - minute_metrics: Optional["Metrics"] = None, - cors: Optional[List["CorsRule"]] = None, - default_service_version: Optional[str] = None, - delete_retention_policy: Optional["RetentionPolicy"] = None, - static_website: Optional["StaticWebsite"] = None, - **kwargs - ): - super(StorageServiceProperties, self).__init__(**kwargs) - self.logging = logging - self.hour_metrics = hour_metrics - self.minute_metrics = minute_metrics - self.cors = cors - self.default_service_version = default_service_version - self.delete_retention_policy = delete_retention_policy - self.static_website = static_website - - -class StorageServiceStats(msrest.serialization.Model): - """Stats for the storage service. - - :param geo_replication: Geo-Replication information for the Secondary Storage Service. - :type geo_replication: ~azure.storage.blob.models.GeoReplication - """ - - _attribute_map = { - 'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication'}, - } - - def __init__( - self, - *, - geo_replication: Optional["GeoReplication"] = None, - **kwargs - ): - super(StorageServiceStats, self).__init__(**kwargs) - self.geo_replication = geo_replication - - -class UserDelegationKey(msrest.serialization.Model): - """A user delegation key. - - All required parameters must be populated in order to send to Azure. - - :param signed_oid: Required. The Azure Active Directory object ID in GUID format. - :type signed_oid: str - :param signed_tid: Required. The Azure Active Directory tenant ID in GUID format. - :type signed_tid: str - :param signed_start: Required. The date-time the key is active. - :type signed_start: ~datetime.datetime - :param signed_expiry: Required. The date-time the key expires. - :type signed_expiry: ~datetime.datetime - :param signed_service: Required. Abbreviation of the Azure Storage service that accepts the - key. - :type signed_service: str - :param signed_version: Required. The service version that created the key. - :type signed_version: str - :param value: Required. The key as a base64 string. - :type value: str - """ - - _validation = { - 'signed_oid': {'required': True}, - 'signed_tid': {'required': True}, - 'signed_start': {'required': True}, - 'signed_expiry': {'required': True}, - 'signed_service': {'required': True}, - 'signed_version': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'signed_oid': {'key': 'SignedOid', 'type': 'str'}, - 'signed_tid': {'key': 'SignedTid', 'type': 'str'}, - 'signed_start': {'key': 'SignedStart', 'type': 'iso-8601'}, - 'signed_expiry': {'key': 'SignedExpiry', 'type': 'iso-8601'}, - 'signed_service': {'key': 'SignedService', 'type': 'str'}, - 'signed_version': {'key': 'SignedVersion', 'type': 'str'}, - 'value': {'key': 'Value', 'type': 'str'}, - } - - def __init__( - self, - *, - signed_oid: str, - signed_tid: str, - signed_start: datetime.datetime, - signed_expiry: datetime.datetime, - signed_service: str, - signed_version: str, - value: str, - **kwargs - ): - super(UserDelegationKey, self).__init__(**kwargs) - self.signed_oid = signed_oid - self.signed_tid = signed_tid - self.signed_start = signed_start - self.signed_expiry = signed_expiry - self.signed_service = signed_service - self.signed_version = signed_version - self.value = value diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/operations/__init__.py b/azure/multiapi/storagev2/blob/v2020_04_08/_generated/operations/__init__.py deleted file mode 100644 index 62f85c9..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/operations/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._container_operations import ContainerOperations -from ._directory_operations import DirectoryOperations -from ._blob_operations import BlobOperations -from ._page_blob_operations import PageBlobOperations -from ._append_blob_operations import AppendBlobOperations -from ._block_blob_operations import BlockBlobOperations - -__all__ = [ - 'ServiceOperations', - 'ContainerOperations', - 'DirectoryOperations', - 'BlobOperations', - 'PageBlobOperations', - 'AppendBlobOperations', - 'BlockBlobOperations', -] diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/operations/_append_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_04_08/_generated/operations/_append_blob_operations.py deleted file mode 100644 index abbe40e..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/operations/_append_blob_operations.py +++ /dev/null @@ -1,717 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class AppendBlobOperations(object): - """AppendBlobOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - content_length, # type: int - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - encryption_algorithm="AES256", # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - blob_tags_string=None, # type: Optional[str] - blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Create Append Blob operation creates a new append blob. - - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - blob_type = "AppendBlob" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def append_block( - self, - content_length, # type: int - body, # type: IO - timeout=None, # type: Optional[int] - transactional_content_md5=None, # type: Optional[bytearray] - transactional_content_crc64=None, # type: Optional[bytearray] - encryption_algorithm="AES256", # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - append_position_access_conditions=None, # type: Optional["_models.AppendPositionAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Append Block operation commits a new block of data to the end of an existing append blob. - The Append Block operation is permitted only if the blob was created with x-ms-blob-type set to - AppendBlob. Append Block is supported only on version 2015-02-21 version or later. - - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param append_position_access_conditions: Parameter group. - :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _max_size = None - _append_position = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if append_position_access_conditions is not None: - _max_size = append_position_access_conditions.max_size - _append_position = append_position_access_conditions.append_position - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "appendblock" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.append_block.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _max_size is not None: - header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", _max_size, 'long') - if _append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-append-offset']=self._deserialize('str', response.headers.get('x-ms-blob-append-offset')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - append_block.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def append_block_from_url( - self, - source_url, # type: str - content_length, # type: int - source_range=None, # type: Optional[str] - source_content_md5=None, # type: Optional[bytearray] - source_contentcrc64=None, # type: Optional[bytearray] - timeout=None, # type: Optional[int] - transactional_content_md5=None, # type: Optional[bytearray] - encryption_algorithm="AES256", # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - append_position_access_conditions=None, # type: Optional["_models.AppendPositionAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Append Block operation commits a new block of data to the end of an existing append blob - where the contents are read from a source url. The Append Block operation is permitted only if - the blob was created with x-ms-blob-type set to AppendBlob. Append Block is supported only on - version 2015-02-21 version or later. - - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param content_length: The length of the request. - :type content_length: long - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param append_position_access_conditions: Parameter group. - :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - _lease_id = None - _max_size = None - _append_position = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if append_position_access_conditions is not None: - _max_size = append_position_access_conditions.max_size - _append_position = append_position_access_conditions.append_position - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - comp = "appendblock" - accept = "application/xml" - - # Construct URL - url = self.append_block_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _max_size is not None: - header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", _max_size, 'long') - if _append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-append-offset']=self._deserialize('str', response.headers.get('x-ms-blob-append-offset')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - append_block_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def seal( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - append_position_access_conditions=None, # type: Optional["_models.AppendPositionAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Seal operation seals the Append Blob to make it read-only. Seal is supported only on - version 2019-12-12 version or later. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param append_position_access_conditions: Parameter group. - :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _append_position = None - if append_position_access_conditions is not None: - _append_position = append_position_access_conditions.append_position - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - comp = "seal" - accept = "application/xml" - - # Construct URL - url = self.seal.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - - if cls: - return cls(pipeline_response, None, response_headers) - - seal.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/operations/_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_04_08/_generated/operations/_blob_operations.py deleted file mode 100644 index 730a564..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/operations/_blob_operations.py +++ /dev/null @@ -1,3163 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class BlobOperations(object): - """BlobOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def download( - self, - snapshot=None, # type: Optional[str] - version_id=None, # type: Optional[str] - timeout=None, # type: Optional[int] - range=None, # type: Optional[str] - range_get_content_md5=None, # type: Optional[bool] - range_get_content_crc64=None, # type: Optional[bool] - encryption_algorithm="AES256", # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> IO - """The Download operation reads or downloads a blob from the system, including its metadata and - properties. You can also call Download to read a snapshot. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param range_get_content_md5: When set to true and specified together with the Range, the - service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB - in size. - :type range_get_content_md5: bool - :param range_get_content_crc64: When set to true and specified together with the Range, the - service returns the CRC64 hash for the range, as long as the range is less than or equal to 4 - MB in size. - :type range_get_content_crc64: bool - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - accept = "application/xml" - - # Construct URL - url = self.download.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') - if range_get_content_crc64 is not None: - header_parameters['x-ms-range-get-content-crc64'] = self._serialize.header("range_get_content_crc64", range_get_content_crc64, 'bool') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) - response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) - response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - download.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def get_properties( - self, - snapshot=None, # type: Optional[str] - version_id=None, # type: Optional[str] - timeout=None, # type: Optional[int] - encryption_algorithm="AES256", # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Get Properties operation returns all user-defined metadata, standard HTTP properties, and - system properties for the blob. It does not return the content of the blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-creation-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-creation-time')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) - response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-incremental-copy']=self._deserialize('bool', response.headers.get('x-ms-incremental-copy')) - response_headers['x-ms-copy-destination-snapshot']=self._deserialize('str', response.headers.get('x-ms-copy-destination-snapshot')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-access-tier']=self._deserialize('str', response.headers.get('x-ms-access-tier')) - response_headers['x-ms-access-tier-inferred']=self._deserialize('bool', response.headers.get('x-ms-access-tier-inferred')) - response_headers['x-ms-archive-status']=self._deserialize('str', response.headers.get('x-ms-archive-status')) - response_headers['x-ms-access-tier-change-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) - response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) - response_headers['x-ms-expiry-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-expiry-time')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - response_headers['x-ms-rehydrate-priority']=self._deserialize('str', response.headers.get('x-ms-rehydrate-priority')) - response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def delete( - self, - snapshot=None, # type: Optional[str] - version_id=None, # type: Optional[str] - timeout=None, # type: Optional[int] - delete_snapshots=None, # type: Optional[Union[str, "_models.DeleteSnapshotsOptionType"]] - request_id_parameter=None, # type: Optional[str] - blob_delete_type="Permanent", # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """If the storage account's soft delete feature is disabled then, when a blob is deleted, it is - permanently removed from the storage account. If the storage account's soft delete feature is - enabled, then, when a blob is deleted, it is marked for deletion and becomes inaccessible - immediately. However, the blob service retains the blob or snapshot for the number of days - specified by the DeleteRetentionPolicy section of [Storage service properties] (Set-Blob- - Service-Properties.md). After the specified number of days has passed, the blob's data is - permanently removed from the storage account. Note that you continue to be charged for the - soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and specify - the "include=deleted" query parameter to discover which blobs and snapshots have been soft - deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. All other - operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code - of 404 (ResourceNotFound). - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param delete_snapshots: Required if the blob has associated snapshots. Specify one of the - following two options: include: Delete the base blob and all of its snapshots. only: Delete - only the blob's snapshots and not the blob itself. - :type delete_snapshots: str or ~azure.storage.blob.models.DeleteSnapshotsOptionType - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_delete_type: Optional. Only possible value is 'permanent', which specifies to - permanently delete a blob if blob soft delete is enabled. - :type blob_delete_type: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if blob_delete_type is not None: - query_parameters['deletetype'] = self._serialize.query("blob_delete_type", blob_delete_type, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def set_access_control( - self, - timeout=None, # type: Optional[int] - owner=None, # type: Optional[str] - group=None, # type: Optional[str] - posix_permissions=None, # type: Optional[str] - posix_acl=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Set the owner, group, permissions, or access control list for a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_acl: Sets POSIX access control rights on files and directories. The value is a - comma-separated list of access control entries. Each access control entry (ACE) consists of a - scope, a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type posix_acl: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "setAccessControl" - accept = "application/xml" - - # Construct URL - url = self.set_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def get_access_control( - self, - timeout=None, # type: Optional[int] - upn=None, # type: Optional[bool] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Get the owner, group, permissions, or access control list for a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If - "true", the identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response - headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If - "false", the values will be returned as Azure Active Directory Object IDs. The default value is - false. - :type upn: bool - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "getAccessControl" - accept = "application/xml" - - # Construct URL - url = self.get_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) - response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) - response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) - response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def rename( - self, - rename_source, # type: str - timeout=None, # type: Optional[int] - path_rename_mode=None, # type: Optional[Union[str, "_models.PathRenameMode"]] - directory_properties=None, # type: Optional[str] - posix_permissions=None, # type: Optional[str] - posix_umask=None, # type: Optional[str] - source_lease_id=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - directory_http_headers=None, # type: Optional["_models.DirectoryHttpHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Rename a blob/file. By default, the destination is overwritten and if the destination already - exists and has a lease the lease is broken. This operation supports conditional HTTP requests. - For more information, see `Specifying Conditional Headers for Blob Service Operations - `_. To fail if the destination already exists, use a conditional - request with If-None-Match: "*". - - :param rename_source: The file or directory to be renamed. The value must have the following - format: "/{filesysystem}/{path}". If "x-ms-properties" is specified, the properties will - overwrite the existing properties; otherwise, the existing properties will be preserved. - :type rename_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param path_rename_mode: Determines the behavior of the rename operation. - :type path_rename_mode: str or ~azure.storage.blob.models.PathRenameMode - :param directory_properties: Optional. User-defined properties to be stored with the file or - directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", - where each value is base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask - restricts permission settings for file and directory, and will only be applied when default Acl - does not exist in parent directory. If the umask bit has set, it means that the corresponding - permission will be disabled. Otherwise the corresponding permission will be determined by the - permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, - a default umask - 0027 will be used. - :type posix_umask: str - :param source_lease_id: A lease ID for the source path. If specified, the source path must have - an active lease and the lease ID must match. - :type source_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param directory_http_headers: Parameter group. - :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _cache_control = None - _content_type = None - _content_encoding = None - _content_language = None - _content_disposition = None - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if directory_http_headers is not None: - _cache_control = directory_http_headers.cache_control - _content_type = directory_http_headers.content_type - _content_encoding = directory_http_headers.content_encoding - _content_language = directory_http_headers.content_language - _content_disposition = directory_http_headers.content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - accept = "application/xml" - - # Construct URL - url = self.rename.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if path_rename_mode is not None: - query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - rename.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def undelete( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Undelete a blob that was previously soft deleted. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "undelete" - accept = "application/xml" - - # Construct URL - url = self.undelete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - undelete.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def set_expiry( - self, - expiry_options, # type: Union[str, "_models.BlobExpiryOptions"] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - expires_on=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Sets the time a blob will expire and be deleted. - - :param expiry_options: Required. Indicates mode of the expiry time. - :type expiry_options: str or ~azure.storage.blob.models.BlobExpiryOptions - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param expires_on: The time to set the blob to expiry. - :type expires_on: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "expiry" - accept = "application/xml" - - # Construct URL - url = self.set_expiry.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') - if expires_on is not None: - header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_expiry.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def set_http_headers( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Set HTTP Headers operation sets system properties on the blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_cache_control = None - _blob_content_type = None - _blob_content_md5 = None - _blob_content_encoding = None - _blob_content_language = None - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _blob_content_disposition = None - if blob_http_headers is not None: - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_disposition = blob_http_headers.blob_content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.set_http_headers.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_http_headers.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def set_metadata( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - encryption_algorithm="AES256", # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or - more name-value pairs. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def acquire_lease( - self, - timeout=None, # type: Optional[int] - duration=None, # type: Optional[int] - proposed_lease_id=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a - lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease - duration cannot be changed using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "acquire" - accept = "application/xml" - - # Construct URL - url = self.acquire_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - acquire_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def release_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "release" - accept = "application/xml" - - # Construct URL - url = self.release_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - release_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def renew_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "renew" - accept = "application/xml" - - # Construct URL - url = self.renew_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - renew_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def change_lease( - self, - lease_id, # type: str - proposed_lease_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "change" - accept = "application/xml" - - # Construct URL - url = self.change_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - change_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def break_lease( - self, - timeout=None, # type: Optional[int] - break_period=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param break_period: For a break operation, proposed duration the lease should continue before - it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining on the lease is used. A new - lease will not be available before the break period has expired, but the lease may be held for - longer than the break period. If this header does not appear with a break operation, a fixed- - duration lease breaks after the remaining lease period elapses, and an infinite lease breaks - immediately. - :type break_period: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "break" - accept = "application/xml" - - # Construct URL - url = self.break_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - break_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def create_snapshot( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - encryption_algorithm="AES256", # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Create Snapshot operation creates a read-only snapshot of a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _lease_id = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "snapshot" - accept = "application/xml" - - # Construct URL - url = self.create_snapshot.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-snapshot']=self._deserialize('str', response.headers.get('x-ms-snapshot')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create_snapshot.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def start_copy_from_url( - self, - copy_source, # type: str - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] - rehydrate_priority=None, # type: Optional[Union[str, "_models.RehydratePriority"]] - request_id_parameter=None, # type: Optional[str] - blob_tags_string=None, # type: Optional[str] - seal_blob=None, # type: Optional[bool] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Start Copy From URL operation copies a blob or an internet resource to a new blob. - - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived - blob. - :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param seal_blob: Overrides the sealed state of the destination blob. Service version - 2019-12-12 and newer. - :type seal_blob: bool - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _source_if_tags = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - _source_if_tags = source_modified_access_conditions.source_if_tags - accept = "application/xml" - - # Construct URL - url = self.start_copy_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _source_if_tags is not None: - header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", _source_if_tags, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if seal_blob is not None: - header_parameters['x-ms-seal-blob'] = self._serialize.header("seal_blob", seal_blob, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def copy_from_url( - self, - copy_source, # type: str - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] - request_id_parameter=None, # type: Optional[str] - source_content_md5=None, # type: Optional[bytearray] - blob_tags_string=None, # type: Optional[str] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Copy From URL operation copies a blob or an internet resource to a new blob. It will not - return a response until the copy is complete. - - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - x_ms_requires_sync = "true" - accept = "application/xml" - - # Construct URL - url = self.copy_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-requires-sync'] = self._serialize.header("x_ms_requires_sync", x_ms_requires_sync, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - - if cls: - return cls(pipeline_response, None, response_headers) - - copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def abort_copy_from_url( - self, - copy_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a - destination blob with zero length and full metadata. - - :param copy_id: The copy identifier provided in the x-ms-copy-id header of the original Copy - Blob operation. - :type copy_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "copy" - copy_action_abort_constant = "abort" - accept = "application/xml" - - # Construct URL - url = self.abort_copy_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-copy-action'] = self._serialize.header("copy_action_abort_constant", copy_action_abort_constant, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - abort_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def set_tier( - self, - tier, # type: Union[str, "_models.AccessTierRequired"] - snapshot=None, # type: Optional[str] - version_id=None, # type: Optional[str] - timeout=None, # type: Optional[int] - rehydrate_priority=None, # type: Optional[Union[str, "_models.RehydratePriority"]] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a - premium storage account and on a block blob in a blob storage account (locally redundant - storage only). A premium page blob's tier determines the allowed size, IOPS, and bandwidth of - the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation does not - update the blob's ETag. - - :param tier: Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierRequired - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived - blob. - :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "tier" - accept = "application/xml" - - # Construct URL - url = self.set_tier.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if response.status_code == 202: - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_tier.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def get_account_info( - self, - **kwargs # type: Any - ): - # type: (...) -> None - """Returns the sku name and account kind. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "account" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_account_info.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) - response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_account_info.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def query( - self, - snapshot=None, # type: Optional[str] - timeout=None, # type: Optional[int] - encryption_algorithm="AES256", # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - query_request=None, # type: Optional["_models.QueryRequest"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> IO - """The Query operation enables users to select/project on blob data by providing simple query - expressions. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param query_request: the query request. - :type query_request: ~azure.storage.blob.models.QueryRequest - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "query" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.query.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if query_request is not None: - body_content = self._serialize.body(query_request, 'QueryRequest', is_xml=True) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - query.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def get_tags( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - snapshot=None, # type: Optional[str] - version_id=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> "_models.BlobTags" - """The Get Tags operation enables users to get the tags associated with a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: BlobTags, or the result of cls(response) - :rtype: ~azure.storage.blob.models.BlobTags - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobTags"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "tags" - accept = "application/xml" - - # Construct URL - url = self.get_tags.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('BlobTags', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_tags.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def set_tags( - self, - timeout=None, # type: Optional[int] - version_id=None, # type: Optional[str] - transactional_content_md5=None, # type: Optional[bytearray] - transactional_content_crc64=None, # type: Optional[bytearray] - request_id_parameter=None, # type: Optional[str] - tags=None, # type: Optional["_models.BlobTags"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Set Tags operation enables users to set tags on a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param tags: Blob tags. - :type tags: ~azure.storage.blob.models.BlobTags - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "tags" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_tags.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if tags is not None: - body_content = self._serialize.body(tags, 'BlobTags', is_xml=True) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_tags.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/operations/_block_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_04_08/_generated/operations/_block_blob_operations.py deleted file mode 100644 index 7bb13ab..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/operations/_block_blob_operations.py +++ /dev/null @@ -1,1113 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class BlockBlobOperations(object): - """BlockBlobOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def upload( - self, - content_length, # type: int - body, # type: IO - timeout=None, # type: Optional[int] - transactional_content_md5=None, # type: Optional[bytearray] - metadata=None, # type: Optional[str] - encryption_algorithm="AES256", # type: Optional[str] - tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] - request_id_parameter=None, # type: Optional[str] - blob_tags_string=None, # type: Optional[str] - blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Upload Block Blob operation updates the content of an existing block blob. Updating an - existing block blob overwrites any existing metadata on the blob. Partial updates are not - supported with Put Blob; the content of the existing blob is overwritten with the content of - the new blob. To perform a partial update of the content of a block blob, use the Put Block - List operation. - - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - blob_type = "BlockBlob" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.upload.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def put_blob_from_url( - self, - content_length, # type: int - copy_source, # type: str - timeout=None, # type: Optional[int] - transactional_content_md5=None, # type: Optional[bytearray] - metadata=None, # type: Optional[str] - encryption_algorithm="AES256", # type: Optional[str] - tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] - request_id_parameter=None, # type: Optional[str] - source_content_md5=None, # type: Optional[bytearray] - blob_tags_string=None, # type: Optional[str] - copy_source_blob_properties=None, # type: Optional[bool] - blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Put Blob from URL operation creates a new Block Blob where the contents of the blob are - read from a given URL. This API is supported beginning with the 2020-04-08 version. Partial - updates are not supported with Put Blob from URL; the content of an existing blob is - overwritten with the content of the new blob. To perform partial updates to a block blob’s - contents using a source URL, use the Put Block from URL API in conjunction with Put Block List. - - :param content_length: The length of the request. - :type content_length: long - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param copy_source_blob_properties: Optional, default is true. Indicates if properties from - the source blob should be copied. - :type copy_source_blob_properties: bool - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _source_if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - _source_if_tags = source_modified_access_conditions.source_if_tags - blob_type = "BlockBlob" - accept = "application/xml" - - # Construct URL - url = self.put_blob_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _source_if_tags is not None: - header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", _source_if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if copy_source_blob_properties is not None: - header_parameters['x-ms-copy-source-blob-properties'] = self._serialize.header("copy_source_blob_properties", copy_source_blob_properties, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - put_blob_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def stage_block( - self, - block_id, # type: str - content_length, # type: int - body, # type: IO - transactional_content_md5=None, # type: Optional[bytearray] - transactional_content_crc64=None, # type: Optional[bytearray] - timeout=None, # type: Optional[int] - encryption_algorithm="AES256", # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Stage Block operation creates a new block to be committed as part of a blob. - - :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the - string must be less than or equal to 64 bytes in size. For a given blob, the length of the - value specified for the blockid parameter must be the same size for each block. - :type block_id: str - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "block" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.stage_block.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - stage_block.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def stage_block_from_url( - self, - block_id, # type: str - content_length, # type: int - source_url, # type: str - source_range=None, # type: Optional[str] - source_content_md5=None, # type: Optional[bytearray] - source_contentcrc64=None, # type: Optional[bytearray] - timeout=None, # type: Optional[int] - encryption_algorithm="AES256", # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Stage Block operation creates a new block to be committed as part of a blob where the - contents are read from a URL. - - :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the - string must be less than or equal to 64 bytes in size. For a given blob, the length of the - value specified for the blockid parameter must be the same size for each block. - :type block_id: str - :param content_length: The length of the request. - :type content_length: long - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - _lease_id = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - comp = "block" - accept = "application/xml" - - # Construct URL - url = self.stage_block_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - stage_block_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def commit_block_list( - self, - blocks, # type: "_models.BlockLookupList" - timeout=None, # type: Optional[int] - transactional_content_md5=None, # type: Optional[bytearray] - transactional_content_crc64=None, # type: Optional[bytearray] - metadata=None, # type: Optional[str] - encryption_algorithm="AES256", # type: Optional[str] - tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] - request_id_parameter=None, # type: Optional[str] - blob_tags_string=None, # type: Optional[str] - blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Commit Block List operation writes a blob by specifying the list of block IDs that make up - the blob. In order to be written as part of a blob, a block must have been successfully written - to the server in a prior Put Block operation. You can call Put Block List to update a blob by - uploading only those blocks that have changed, then committing the new and existing blocks - together. You can do this by specifying whether to commit a block from the committed block list - or from the uncommitted block list, or to commit the most recently uploaded version of the - block, whichever list it may belong to. - - :param blocks: - :type blocks: ~azure.storage.blob.models.BlockLookupList - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_cache_control = None - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "blocklist" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.commit_block_list.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(blocks, 'BlockLookupList', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - commit_block_list.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def get_block_list( - self, - snapshot=None, # type: Optional[str] - list_type="committed", # type: Union[str, "_models.BlockListType"] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> "_models.BlockList" - """The Get Block List operation retrieves the list of blocks that have been uploaded as part of a - block blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param list_type: Specifies whether to return the list of committed blocks, the list of - uncommitted blocks, or both lists together. - :type list_type: str or ~azure.storage.blob.models.BlockListType - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: BlockList, or the result of cls(response) - :rtype: ~azure.storage.blob.models.BlockList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.BlockList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "blocklist" - accept = "application/xml" - - # Construct URL - url = self.get_block_list.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - query_parameters['blocklisttype'] = self._serialize.query("list_type", list_type, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('BlockList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_block_list.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/operations/_container_operations.py b/azure/multiapi/storagev2/blob/v2020_04_08/_generated/operations/_container_operations.py deleted file mode 100644 index 41a1c8a..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/operations/_container_operations.py +++ /dev/null @@ -1,1482 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class ContainerOperations(object): - """ContainerOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - access=None, # type: Optional[Union[str, "_models.PublicAccessType"]] - request_id_parameter=None, # type: Optional[str] - container_cpk_scope_info=None, # type: Optional["_models.ContainerCpkScopeInfo"] - **kwargs # type: Any - ): - # type: (...) -> None - """creates a new container under the specified account. If the container with the same name - already exists, the operation fails. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param access: Specifies whether data in the container may be accessed publicly and the level - of access. - :type access: str or ~azure.storage.blob.models.PublicAccessType - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param container_cpk_scope_info: Parameter group. - :type container_cpk_scope_info: ~azure.storage.blob.models.ContainerCpkScopeInfo - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _default_encryption_scope = None - _prevent_encryption_scope_override = None - if container_cpk_scope_info is not None: - _default_encryption_scope = container_cpk_scope_info.default_encryption_scope - _prevent_encryption_scope_override = container_cpk_scope_info.prevent_encryption_scope_override - restype = "container" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if access is not None: - header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _default_encryption_scope is not None: - header_parameters['x-ms-default-encryption-scope'] = self._serialize.header("default_encryption_scope", _default_encryption_scope, 'str') - if _prevent_encryption_scope_override is not None: - header_parameters['x-ms-deny-encryption-scope-override'] = self._serialize.header("prevent_encryption_scope_override", _prevent_encryption_scope_override, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{containerName}'} # type: ignore - - def get_properties( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """returns all user-defined metadata and system properties for the specified container. The data - returned does not include the container's list of blobs. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "container" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-public-access']=self._deserialize('str', response.headers.get('x-ms-blob-public-access')) - response_headers['x-ms-has-immutability-policy']=self._deserialize('bool', response.headers.get('x-ms-has-immutability-policy')) - response_headers['x-ms-has-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-has-legal-hold')) - response_headers['x-ms-default-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-default-encryption-scope')) - response_headers['x-ms-deny-encryption-scope-override']=self._deserialize('bool', response.headers.get('x-ms-deny-encryption-scope-override')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{containerName}'} # type: ignore - - def delete( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """operation marks the specified container for deletion. The container and any blobs contained - within it are later deleted during garbage collection. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - restype = "container" - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{containerName}'} # type: ignore - - def set_metadata( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """operation sets one or more user-defined name-value pairs for the specified container. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - restype = "container" - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{containerName}'} # type: ignore - - def get_access_policy( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> List["_models.SignedIdentifier"] - """gets the permissions for the specified container. The permissions indicate whether container - data may be accessed publicly. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of SignedIdentifier, or the result of cls(response) - :rtype: list[~azure.storage.blob.models.SignedIdentifier] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.SignedIdentifier"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "container" - comp = "acl" - accept = "application/xml" - - # Construct URL - url = self.get_access_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-blob-public-access']=self._deserialize('str', response.headers.get('x-ms-blob-public-access')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('[SignedIdentifier]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_access_policy.metadata = {'url': '/{containerName}'} # type: ignore - - def set_access_policy( - self, - timeout=None, # type: Optional[int] - access=None, # type: Optional[Union[str, "_models.PublicAccessType"]] - request_id_parameter=None, # type: Optional[str] - container_acl=None, # type: Optional[List["_models.SignedIdentifier"]] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """sets the permissions for the specified container. The permissions indicate whether blobs in a - container may be accessed publicly. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param access: Specifies whether data in the container may be accessed publicly and the level - of access. - :type access: str or ~azure.storage.blob.models.PublicAccessType - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param container_acl: the acls for the container. - :type container_acl: list[~azure.storage.blob.models.SignedIdentifier] - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - restype = "container" - comp = "acl" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_access_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if access is not None: - header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'wrapped': True, 'itemsName': 'SignedIdentifier'}} - if container_acl is not None: - body_content = self._serialize.body(container_acl, '[SignedIdentifier]', is_xml=True, serialization_ctxt=serialization_ctxt) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_policy.metadata = {'url': '/{containerName}'} # type: ignore - - def restore( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - deleted_container_name=None, # type: Optional[str] - deleted_container_version=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Restores a previously-deleted container. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param deleted_container_name: Optional. Version 2019-12-12 and later. Specifies the name of - the deleted container to restore. - :type deleted_container_name: str - :param deleted_container_version: Optional. Version 2019-12-12 and later. Specifies the - version of the deleted container to restore. - :type deleted_container_version: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "undelete" - accept = "application/xml" - - # Construct URL - url = self.restore.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if deleted_container_name is not None: - header_parameters['x-ms-deleted-container-name'] = self._serialize.header("deleted_container_name", deleted_container_name, 'str') - if deleted_container_version is not None: - header_parameters['x-ms-deleted-container-version'] = self._serialize.header("deleted_container_version", deleted_container_version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - restore.metadata = {'url': '/{containerName}'} # type: ignore - - def acquire_lease( - self, - timeout=None, # type: Optional[int] - duration=None, # type: Optional[int] - proposed_lease_id=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a - lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease - duration cannot be changed using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "acquire" - accept = "application/xml" - - # Construct URL - url = self.acquire_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - acquire_lease.metadata = {'url': '/{containerName}'} # type: ignore - - def release_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "release" - accept = "application/xml" - - # Construct URL - url = self.release_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - release_lease.metadata = {'url': '/{containerName}'} # type: ignore - - def renew_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "renew" - accept = "application/xml" - - # Construct URL - url = self.renew_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - renew_lease.metadata = {'url': '/{containerName}'} # type: ignore - - def break_lease( - self, - timeout=None, # type: Optional[int] - break_period=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param break_period: For a break operation, proposed duration the lease should continue before - it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining on the lease is used. A new - lease will not be available before the break period has expired, but the lease may be held for - longer than the break period. If this header does not appear with a break operation, a fixed- - duration lease breaks after the remaining lease period elapses, and an infinite lease breaks - immediately. - :type break_period: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "break" - accept = "application/xml" - - # Construct URL - url = self.break_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - break_lease.metadata = {'url': '/{containerName}'} # type: ignore - - def change_lease( - self, - lease_id, # type: str - proposed_lease_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "change" - accept = "application/xml" - - # Construct URL - url = self.change_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - change_lease.metadata = {'url': '/{containerName}'} # type: ignore - - def list_blob_flat_segment( - self, - prefix=None, # type: Optional[str] - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - include=None, # type: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.ListBlobsFlatSegmentResponse" - """[Update] The List Blobs operation returns a list of the blobs under the specified container. - - :param prefix: Filters the results to return only containers whose name begins with the - specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListBlobsFlatSegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsFlatSegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_blob_flat_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListBlobsFlatSegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_blob_flat_segment.metadata = {'url': '/{containerName}'} # type: ignore - - def list_blob_hierarchy_segment( - self, - delimiter, # type: str - prefix=None, # type: Optional[str] - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - include=None, # type: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.ListBlobsHierarchySegmentResponse" - """[Update] The List Blobs operation returns a list of the blobs under the specified container. - - :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix - element in the response body that acts as a placeholder for all blobs whose names begin with - the same substring up to the appearance of the delimiter character. The delimiter may be a - single character or a string. - :type delimiter: str - :param prefix: Filters the results to return only containers whose name begins with the - specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListBlobsHierarchySegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsHierarchySegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_blob_hierarchy_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_blob_hierarchy_segment.metadata = {'url': '/{containerName}'} # type: ignore - - def get_account_info( - self, - **kwargs # type: Any - ): - # type: (...) -> None - """Returns the sku name and account kind. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "account" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_account_info.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) - response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_account_info.metadata = {'url': '/{containerName}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/operations/_directory_operations.py b/azure/multiapi/storagev2/blob/v2020_04_08/_generated/operations/_directory_operations.py deleted file mode 100644 index f025757..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/operations/_directory_operations.py +++ /dev/null @@ -1,748 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class DirectoryOperations(object): - """DirectoryOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - timeout=None, # type: Optional[int] - directory_properties=None, # type: Optional[str] - posix_permissions=None, # type: Optional[str] - posix_umask=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - directory_http_headers=None, # type: Optional["_models.DirectoryHttpHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Create a directory. By default, the destination is overwritten and if the destination already - exists and has a lease the lease is broken. This operation supports conditional HTTP requests. - For more information, see `Specifying Conditional Headers for Blob Service Operations - `_. To fail if the destination already exists, use a conditional - request with If-None-Match: "*". - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param directory_properties: Optional. User-defined properties to be stored with the file or - directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", - where each value is base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask - restricts permission settings for file and directory, and will only be applied when default Acl - does not exist in parent directory. If the umask bit has set, it means that the corresponding - permission will be disabled. Otherwise the corresponding permission will be determined by the - permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, - a default umask - 0027 will be used. - :type posix_umask: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param directory_http_headers: Parameter group. - :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _cache_control = None - _content_type = None - _content_encoding = None - _content_language = None - _content_disposition = None - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - if directory_http_headers is not None: - _cache_control = directory_http_headers.cache_control - _content_type = directory_http_headers.content_type - _content_encoding = directory_http_headers.content_encoding - _content_language = directory_http_headers.content_language - _content_disposition = directory_http_headers.content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - resource = "directory" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("resource", resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def rename( - self, - rename_source, # type: str - timeout=None, # type: Optional[int] - marker=None, # type: Optional[str] - path_rename_mode=None, # type: Optional[Union[str, "_models.PathRenameMode"]] - directory_properties=None, # type: Optional[str] - posix_permissions=None, # type: Optional[str] - posix_umask=None, # type: Optional[str] - source_lease_id=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - directory_http_headers=None, # type: Optional["_models.DirectoryHttpHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Rename a directory. By default, the destination is overwritten and if the destination already - exists and has a lease the lease is broken. This operation supports conditional HTTP requests. - For more information, see `Specifying Conditional Headers for Blob Service Operations - `_. To fail if the destination already exists, use a conditional - request with If-None-Match: "*". - - :param rename_source: The file or directory to be renamed. The value must have the following - format: "/{filesysystem}/{path}". If "x-ms-properties" is specified, the properties will - overwrite the existing properties; otherwise, the existing properties will be preserved. - :type rename_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param marker: When renaming a directory, the number of paths that are renamed with each - invocation is limited. If the number of paths to be renamed exceeds this limit, a continuation - token is returned in this response header. When a continuation token is returned in the - response, it must be specified in a subsequent invocation of the rename operation to continue - renaming the directory. - :type marker: str - :param path_rename_mode: Determines the behavior of the rename operation. - :type path_rename_mode: str or ~azure.storage.blob.models.PathRenameMode - :param directory_properties: Optional. User-defined properties to be stored with the file or - directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", - where each value is base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask - restricts permission settings for file and directory, and will only be applied when default Acl - does not exist in parent directory. If the umask bit has set, it means that the corresponding - permission will be disabled. Otherwise the corresponding permission will be determined by the - permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, - a default umask - 0027 will be used. - :type posix_umask: str - :param source_lease_id: A lease ID for the source path. If specified, the source path must have - an active lease and the lease ID must match. - :type source_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param directory_http_headers: Parameter group. - :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _cache_control = None - _content_type = None - _content_encoding = None - _content_language = None - _content_disposition = None - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if directory_http_headers is not None: - _cache_control = directory_http_headers.cache_control - _content_type = directory_http_headers.content_type - _content_encoding = directory_http_headers.content_encoding - _content_language = directory_http_headers.content_language - _content_disposition = directory_http_headers.content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - accept = "application/xml" - - # Construct URL - url = self.rename.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') - if path_rename_mode is not None: - query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - rename.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def delete( - self, - recursive_directory_delete, # type: bool - timeout=None, # type: Optional[int] - marker=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Deletes the directory. - - :param recursive_directory_delete: If "true", all paths beneath the directory will be deleted. - If "false" and the directory is non-empty, an error occurs. - :type recursive_directory_delete: bool - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param marker: When renaming a directory, the number of paths that are renamed with each - invocation is limited. If the number of paths to be renamed exceeds this limit, a continuation - token is returned in this response header. When a continuation token is returned in the - response, it must be specified in a subsequent invocation of the rename operation to continue - renaming the directory. - :type marker: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['recursive'] = self._serialize.query("recursive_directory_delete", recursive_directory_delete, 'bool') - if marker is not None: - query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def set_access_control( - self, - timeout=None, # type: Optional[int] - owner=None, # type: Optional[str] - group=None, # type: Optional[str] - posix_permissions=None, # type: Optional[str] - posix_acl=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Set the owner, group, permissions, or access control list for a directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_acl: Sets POSIX access control rights on files and directories. The value is a - comma-separated list of access control entries. Each access control entry (ACE) consists of a - scope, a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type posix_acl: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "setAccessControl" - accept = "application/xml" - - # Construct URL - url = self.set_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def get_access_control( - self, - timeout=None, # type: Optional[int] - upn=None, # type: Optional[bool] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Get the owner, group, permissions, or access control list for a directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If - "true", the identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response - headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If - "false", the values will be returned as Azure Active Directory Object IDs. The default value is - false. - :type upn: bool - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "getAccessControl" - accept = "application/xml" - - # Construct URL - url = self.get_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) - response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) - response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) - response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/operations/_page_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_04_08/_generated/operations/_page_blob_operations.py deleted file mode 100644 index e7f8a02..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/operations/_page_blob_operations.py +++ /dev/null @@ -1,1421 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class PageBlobOperations(object): - """PageBlobOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - content_length, # type: int - blob_content_length, # type: int - timeout=None, # type: Optional[int] - tier=None, # type: Optional[Union[str, "_models.PremiumPageBlobAccessTier"]] - metadata=None, # type: Optional[str] - encryption_algorithm="AES256", # type: Optional[str] - blob_sequence_number=0, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - blob_tags_string=None, # type: Optional[str] - blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Create operation creates a new page blob. - - :param content_length: The length of the request. - :type content_length: long - :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 - TB. The page blob size must be aligned to a 512-byte boundary. - :type blob_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param tier: Optional. Indicates the tier to be set on the page blob. - :type tier: str or ~azure.storage.blob.models.PremiumPageBlobAccessTier - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled - value that you can use to track requests. The value of the sequence number must be between 0 - and 2^63 - 1. - :type blob_sequence_number: long - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - blob_type = "PageBlob" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') - if blob_sequence_number is not None: - header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def upload_pages( - self, - content_length, # type: int - body, # type: IO - transactional_content_md5=None, # type: Optional[bytearray] - transactional_content_crc64=None, # type: Optional[bytearray] - timeout=None, # type: Optional[int] - range=None, # type: Optional[str] - encryption_algorithm="AES256", # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - sequence_number_access_conditions=None, # type: Optional["_models.SequenceNumberAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Upload Pages operation writes a range of pages to a page blob. - - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param sequence_number_access_conditions: Parameter group. - :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - _if_sequence_number_less_than_or_equal_to = None - _if_sequence_number_less_than = None - _if_sequence_number_equal_to = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if sequence_number_access_conditions is not None: - _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - comp = "page" - page_write = "update" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.upload_pages.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') - if _if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') - if _if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload_pages.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def clear_pages( - self, - content_length, # type: int - timeout=None, # type: Optional[int] - range=None, # type: Optional[str] - encryption_algorithm="AES256", # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - sequence_number_access_conditions=None, # type: Optional["_models.SequenceNumberAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Clear Pages operation clears a set of pages from a page blob. - - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param sequence_number_access_conditions: Parameter group. - :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - _if_sequence_number_less_than_or_equal_to = None - _if_sequence_number_less_than = None - _if_sequence_number_equal_to = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if sequence_number_access_conditions is not None: - _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - comp = "page" - page_write = "clear" - accept = "application/xml" - - # Construct URL - url = self.clear_pages.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') - if _if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') - if _if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - clear_pages.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def upload_pages_from_url( - self, - source_url, # type: str - source_range, # type: str - content_length, # type: int - range, # type: str - source_content_md5=None, # type: Optional[bytearray] - source_contentcrc64=None, # type: Optional[bytearray] - timeout=None, # type: Optional[int] - encryption_algorithm="AES256", # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - sequence_number_access_conditions=None, # type: Optional["_models.SequenceNumberAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Upload Pages operation writes a range of pages to a page blob where the contents are read - from a URL. - - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param source_range: Bytes of source data in the specified range. The length of this range - should match the ContentLength header and x-ms-range/Range destination range header. - :type source_range: str - :param content_length: The length of the request. - :type content_length: long - :param range: The range of bytes to which the source range would be written. The range should - be 512 aligned and range-end is required. - :type range: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param sequence_number_access_conditions: Parameter group. - :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - _lease_id = None - _if_sequence_number_less_than_or_equal_to = None - _if_sequence_number_less_than = None - _if_sequence_number_equal_to = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if sequence_number_access_conditions is not None: - _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - comp = "page" - page_write = "update" - accept = "application/xml" - - # Construct URL - url = self.upload_pages_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') - if _if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') - if _if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload_pages_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def get_page_ranges( - self, - snapshot=None, # type: Optional[str] - timeout=None, # type: Optional[int] - range=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> "_models.PageList" - """The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot - of a page blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PageList, or the result of cls(response) - :rtype: ~azure.storage.blob.models.PageList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PageList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "pagelist" - accept = "application/xml" - - # Construct URL - url = self.get_page_ranges.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('PageList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_page_ranges.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def get_page_ranges_diff( - self, - snapshot=None, # type: Optional[str] - timeout=None, # type: Optional[int] - prevsnapshot=None, # type: Optional[str] - prev_snapshot_url=None, # type: Optional[str] - range=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> "_models.PageList" - """The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that - were changed between target blob and previous snapshot. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param prevsnapshot: Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a - DateTime value that specifies that the response will contain only pages that were changed - between target blob and previous snapshot. Changed pages include both updated and cleared - pages. The target blob may be a snapshot, as long as the snapshot specified by prevsnapshot is - the older of the two. Note that incremental snapshots are currently supported only for blobs - created on or after January 1, 2016. - :type prevsnapshot: str - :param prev_snapshot_url: Optional. This header is only supported in service versions - 2019-04-19 and after and specifies the URL of a previous snapshot of the target blob. The - response will only contain pages that were changed between the target blob and its previous - snapshot. - :type prev_snapshot_url: str - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PageList, or the result of cls(response) - :rtype: ~azure.storage.blob.models.PageList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PageList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "pagelist" - accept = "application/xml" - - # Construct URL - url = self.get_page_ranges_diff.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if prevsnapshot is not None: - query_parameters['prevsnapshot'] = self._serialize.query("prevsnapshot", prevsnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if prev_snapshot_url is not None: - header_parameters['x-ms-previous-snapshot-url'] = self._serialize.header("prev_snapshot_url", prev_snapshot_url, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('PageList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_page_ranges_diff.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def resize( - self, - blob_content_length, # type: int - timeout=None, # type: Optional[int] - encryption_algorithm="AES256", # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Resize the Blob. - - :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 - TB. The page blob size must be aligned to a 512-byte boundary. - :type blob_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. - :type encryption_algorithm: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.resize.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - resize.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def update_sequence_number( - self, - sequence_number_action, # type: Union[str, "_models.SequenceNumberActionType"] - timeout=None, # type: Optional[int] - blob_sequence_number=0, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Update the sequence number of the blob. - - :param sequence_number_action: Required if the x-ms-blob-sequence-number header is set for the - request. This property applies to page blobs only. This property indicates how the service - should modify the blob's sequence number. - :type sequence_number_action: str or ~azure.storage.blob.models.SequenceNumberActionType - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled - value that you can use to track requests. The value of the sequence number must be between 0 - and 2^63 - 1. - :type blob_sequence_number: long - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.update_sequence_number.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-sequence-number-action'] = self._serialize.header("sequence_number_action", sequence_number_action, 'str') - if blob_sequence_number is not None: - header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - update_sequence_number.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def copy_incremental( - self, - copy_source, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Copy Incremental operation copies a snapshot of the source page blob to a destination page - blob. The snapshot is copied such that only the differential changes between the previously - copied snapshot are transferred to the destination. The copied snapshots are complete copies of - the original snapshot and can be read or copied from as usual. This API is supported since REST - version 2016-05-31. - - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "incrementalcopy" - accept = "application/xml" - - # Construct URL - url = self.copy_incremental.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - copy_incremental.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/operations/_service_operations.py b/azure/multiapi/storagev2/blob/v2020_04_08/_generated/operations/_service_operations.py deleted file mode 100644 index 72f7a73..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_generated/operations/_service_operations.py +++ /dev/null @@ -1,703 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class ServiceOperations(object): - """ServiceOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def set_properties( - self, - storage_service_properties, # type: "_models.StorageServiceProperties" - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Sets properties for a storage account's Blob service endpoint, including properties for Storage - Analytics and CORS (Cross-Origin Resource Sharing) rules. - - :param storage_service_properties: The StorageService properties. - :type storage_service_properties: ~azure.storage.blob.models.StorageServiceProperties - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "properties" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/'} # type: ignore - - def get_properties( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.StorageServiceProperties" - """gets the properties of a storage account's Blob service, including properties for Storage - Analytics and CORS (Cross-Origin Resource Sharing) rules. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: StorageServiceProperties, or the result of cls(response) - :rtype: ~azure.storage.blob.models.StorageServiceProperties - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceProperties"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('StorageServiceProperties', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_properties.metadata = {'url': '/'} # type: ignore - - def get_statistics( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.StorageServiceStats" - """Retrieves statistics related to replication for the Blob service. It is only available on the - secondary location endpoint when read-access geo-redundant replication is enabled for the - storage account. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: StorageServiceStats, or the result of cls(response) - :rtype: ~azure.storage.blob.models.StorageServiceStats - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceStats"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "stats" - accept = "application/xml" - - # Construct URL - url = self.get_statistics.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('StorageServiceStats', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_statistics.metadata = {'url': '/'} # type: ignore - - def list_containers_segment( - self, - prefix=None, # type: Optional[str] - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - include=None, # type: Optional[List[Union[str, "_models.ListContainersIncludeType"]]] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.ListContainersSegmentResponse" - """The List Containers Segment operation returns a list of the containers under the specified - account. - - :param prefix: Filters the results to return only containers whose name begins with the - specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :param include: Include this parameter to specify that the container's metadata be returned as - part of the response body. - :type include: list[str or ~azure.storage.blob.models.ListContainersIncludeType] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListContainersSegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListContainersSegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListContainersSegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_containers_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('ListContainersSegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_containers_segment.metadata = {'url': '/'} # type: ignore - - def get_user_delegation_key( - self, - key_info, # type: "_models.KeyInfo" - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.UserDelegationKey" - """Retrieves a user delegation key for the Blob service. This is only a valid operation when using - bearer token authentication. - - :param key_info: - :type key_info: ~azure.storage.blob.models.KeyInfo - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: UserDelegationKey, or the result of cls(response) - :rtype: ~azure.storage.blob.models.UserDelegationKey - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.UserDelegationKey"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "userdelegationkey" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.get_user_delegation_key.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(key_info, 'KeyInfo', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('UserDelegationKey', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_user_delegation_key.metadata = {'url': '/'} # type: ignore - - def get_account_info( - self, - **kwargs # type: Any - ): - # type: (...) -> None - """Returns the sku name and account kind. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "account" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_account_info.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) - response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) - response_headers['x-ms-is-hns-enabled']=self._deserialize('bool', response.headers.get('x-ms-is-hns-enabled')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_account_info.metadata = {'url': '/'} # type: ignore - - def submit_batch( - self, - content_length, # type: int - multipart_content_type, # type: str - body, # type: IO - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> IO - """The Batch operation allows multiple API calls to be embedded into a single HTTP request. - - :param content_length: The length of the request. - :type content_length: long - :param multipart_content_type: Required. The value of this header must be multipart/mixed with - a batch boundary. Example header value: multipart/mixed; boundary=batch_:code:``. - :type multipart_content_type: str - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "batch" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.submit_batch.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(body, 'IO', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - submit_batch.metadata = {'url': '/'} # type: ignore - - def filter_blobs( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - where=None, # type: Optional[str] - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.FilterBlobSegment" - """The Filter Blobs operation enables callers to list blobs across all containers whose tags match - a given search expression. Filter blobs searches across all containers within a storage - account but can be scoped within the expression to a single container. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param where: Filters the results to return only to return only blobs whose tags match the - specified expression. - :type where: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: FilterBlobSegment, or the result of cls(response) - :rtype: ~azure.storage.blob.models.FilterBlobSegment - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.FilterBlobSegment"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "blobs" - accept = "application/xml" - - # Construct URL - url = self.filter_blobs.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if where is not None: - query_parameters['where'] = self._serialize.query("where", where, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('FilterBlobSegment', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - filter_blobs.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_lease.py b/azure/multiapi/storagev2/blob/v2020_04_08/_lease.py deleted file mode 100644 index d495d6e..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_lease.py +++ /dev/null @@ -1,331 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import uuid - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, TypeVar, TYPE_CHECKING -) - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator import distributed_trace - -from ._shared.response_handlers import return_response_headers, process_storage_error -from ._serialize import get_modify_conditions - -if TYPE_CHECKING: - from datetime import datetime - - BlobClient = TypeVar("BlobClient") - ContainerClient = TypeVar("ContainerClient") - - -class BlobLeaseClient(object): - """Creates a new BlobLeaseClient. - - This client provides lease operations on a BlobClient or ContainerClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the blob or container to lease. - :type client: ~azure.storage.blob.BlobClient or - ~azure.storage.blob.ContainerClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - def __init__( - self, client, lease_id=None - ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs - # type: (Union[BlobClient, ContainerClient], Optional[str]) -> None - self.id = lease_id or str(uuid.uuid4()) - self.last_modified = None - self.etag = None - if hasattr(client, 'blob_name'): - self._client = client._client.blob # type: ignore # pylint: disable=protected-access - elif hasattr(client, 'container_name'): - self._client = client._client.container # type: ignore # pylint: disable=protected-access - else: - raise TypeError("Lease must use either BlobClient or ContainerClient.") - - def __enter__(self): - return self - - def __exit__(self, *args): - self.release() - - @distributed_trace - def acquire(self, lease_duration=-1, **kwargs): - # type: (int, **Any) -> None - """Requests a new lease. - - If the container does not have an active lease, the Blob service creates a - lease on the container and returns a new lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.acquire_lease( - timeout=kwargs.pop('timeout', None), - duration=lease_duration, - proposed_lease_id=self.id, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - self.etag = response.get('etag') # type: str - - @distributed_trace - def renew(self, **kwargs): - # type: (Any) -> None - """Renews the lease. - - The lease can be renewed if the lease ID specified in the - lease client matches that associated with the container or blob. Note that - the lease may be renewed even if it has expired as long as the container - or blob has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.renew_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def release(self, **kwargs): - # type: (Any) -> None - """Release the lease. - - The lease may be released if the client lease id specified matches - that associated with the container or blob. Releasing the lease allows another client - to immediately acquire the lease for the container or blob as soon as the release is complete. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.release_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """Change the lease ID of an active lease. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.change_lease( - lease_id=self.id, - proposed_lease_id=proposed_lease_id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def break_lease(self, lease_break_period=None, **kwargs): - # type: (Optional[int], Any) -> int - """Break the lease, if the container or blob has an active lease. - - Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. When a lease - is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the container or blob. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :param int lease_break_period: - This is the proposed duration of seconds that the lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the lease. If longer, the time remaining on the lease is used. - A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.break_lease( - timeout=kwargs.pop('timeout', None), - break_period=lease_break_period, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_list_blobs_helper.py b/azure/multiapi/storagev2/blob/v2020_04_08/_list_blobs_helper.py deleted file mode 100644 index 309d37b..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_list_blobs_helper.py +++ /dev/null @@ -1,236 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from azure.core.paging import PageIterator, ItemPaged -from azure.core.exceptions import HttpResponseError -from ._deserialize import get_blob_properties_from_generated_code, parse_tags -from ._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix, FilterBlobItem -from ._models import BlobProperties, FilteredBlob -from ._shared.models import DictMixin -from ._shared.response_handlers import return_context_and_deserialized, process_storage_error - - -class BlobPropertiesPaged(PageIterator): - """An Iterable of Blob properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - :param str container: The name of the container. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str continuation_token: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__( - self, command, - container=None, - prefix=None, - results_per_page=None, - continuation_token=None, - delimiter=None, - location_mode=None): - super(BlobPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.container = container - self.delimiter = delimiter - self.current_page = None - self.location_mode = location_mode - - def _get_next_cb(self, continuation_token): - try: - return self._command( - prefix=self.prefix, - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.container = self._response.container_name - self.current_page = [self._build_item(item) for item in self._response.segment.blob_items] - - return self._response.next_marker or None, self.current_page - - def _build_item(self, item): - if isinstance(item, BlobProperties): - return item - if isinstance(item, BlobItemInternal): - blob = get_blob_properties_from_generated_code(item) # pylint: disable=protected-access - blob.container = self.container - return blob - return item - - -class BlobPrefixPaged(BlobPropertiesPaged): - def __init__(self, *args, **kwargs): - super(BlobPrefixPaged, self).__init__(*args, **kwargs) - self.name = self.prefix - - def _extract_data_cb(self, get_next_return): - continuation_token, _ = super(BlobPrefixPaged, self)._extract_data_cb(get_next_return) - self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items - self.current_page = [self._build_item(item) for item in self.current_page] - self.delimiter = self._response.delimiter - - return continuation_token, self.current_page - - def _build_item(self, item): - item = super(BlobPrefixPaged, self)._build_item(item) - if isinstance(item, GenBlobPrefix): - return BlobPrefix( - self._command, - container=self.container, - prefix=item.name, - results_per_page=self.results_per_page, - location_mode=self.location_mode) - return item - - -class BlobPrefix(ItemPaged, DictMixin): - """An Iterable of Blob properties. - - Returned from walk_blobs when a delimiter is used. - Can be thought of as a virtual blob directory. - - :ivar str name: The prefix, or "directory name" of the blob. - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str next_marker: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str marker: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__(self, *args, **kwargs): - super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs) - self.name = kwargs.get('prefix') - self.prefix = kwargs.get('prefix') - self.results_per_page = kwargs.get('results_per_page') - self.container = kwargs.get('container') - self.delimiter = kwargs.get('delimiter') - self.location_mode = kwargs.get('location_mode') - - -class FilteredBlobPaged(PageIterator): - """An Iterable of Blob properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.FilteredBlob) - :ivar str container: The container that the blobs are listed from. - - :param callable command: Function to retrieve the next page of items. - :param str container: The name of the container. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str continuation_token: An opaque continuation token. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__( - self, command, - container=None, - results_per_page=None, - continuation_token=None, - location_mode=None): - super(FilteredBlobPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.marker = continuation_token - self.results_per_page = results_per_page - self.container = container - self.current_page = None - self.location_mode = location_mode - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.marker = self._response.next_marker - self.current_page = [self._build_item(item) for item in self._response.blobs] - - return self._response.next_marker or None, self.current_page - - @staticmethod - def _build_item(item): - if isinstance(item, FilterBlobItem): - tags = parse_tags(item.tags) - blob = FilteredBlob(name=item.name, container_name=item.container_name, tags=tags) - return blob - return item diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_models.py b/azure/multiapi/storagev2/blob/v2020_04_08/_models.py deleted file mode 100644 index 1a8237c..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_models.py +++ /dev/null @@ -1,1107 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines - -from enum import Enum - -from azure.core.paging import PageIterator -from azure.core.exceptions import HttpResponseError -from ._generated.models import ArrowField - -from ._shared import decode_base64_to_text -from ._shared.response_handlers import return_context_and_deserialized, process_storage_error -from ._shared.models import DictMixin, get_enum_value -from ._generated.models import Logging as GeneratedLogging -from ._generated.models import Metrics as GeneratedMetrics -from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy -from ._generated.models import StaticWebsite as GeneratedStaticWebsite -from ._generated.models import CorsRule as GeneratedCorsRule -from ._generated.models import AccessPolicy as GenAccessPolicy - - -class BlobType(str, Enum): - - BlockBlob = "BlockBlob" - PageBlob = "PageBlob" - AppendBlob = "AppendBlob" - - -class BlockState(str, Enum): - """Block blob block types.""" - - Committed = 'Committed' #: Committed blocks. - Latest = 'Latest' #: Latest blocks. - Uncommitted = 'Uncommitted' #: Uncommitted blocks. - - -class StandardBlobTier(str, Enum): - """ - Specifies the blob tier to set the blob to. This is only applicable for - block blobs on standard storage accounts. - """ - - Archive = 'Archive' #: Archive - Cool = 'Cool' #: Cool - Hot = 'Hot' #: Hot - - -class PremiumPageBlobTier(str, Enum): - """ - Specifies the page blob tier to set the blob to. This is only applicable to page - blobs on premium storage accounts. Please take a look at: - https://docs.microsoft.com/en-us/azure/storage/storage-premium-storage#scalability-and-performance-targets - for detailed information on the corresponding IOPS and throughput per PageBlobTier. - """ - - P4 = 'P4' #: P4 Tier - P6 = 'P6' #: P6 Tier - P10 = 'P10' #: P10 Tier - P20 = 'P20' #: P20 Tier - P30 = 'P30' #: P30 Tier - P40 = 'P40' #: P40 Tier - P50 = 'P50' #: P50 Tier - P60 = 'P60' #: P60 Tier - - -class SequenceNumberAction(str, Enum): - """Sequence number actions.""" - - Increment = 'increment' - """ - Increments the value of the sequence number by 1. If specifying this option, - do not include the x-ms-blob-sequence-number header. - """ - - Max = 'max' - """ - Sets the sequence number to be the higher of the value included with the - request and the value currently stored for the blob. - """ - - Update = 'update' - """Sets the sequence number to the value included with the request.""" - - -class PublicAccess(str, Enum): - """ - Specifies whether data in the container may be accessed publicly and the level of access. - """ - - OFF = 'off' - """ - Specifies that there is no public read access for both the container and blobs within the container. - Clients cannot enumerate the containers within the storage account as well as the blobs within the container. - """ - - Blob = 'blob' - """ - Specifies public read access for blobs. Blob data within this container can be read - via anonymous request, but container data is not available. Clients cannot enumerate - blobs within the container via anonymous request. - """ - - Container = 'container' - """ - Specifies full public read access for container and blob data. Clients can enumerate - blobs within the container via anonymous request, but cannot enumerate containers - within the storage account. - """ - - -class BlobAnalyticsLogging(GeneratedLogging): - """Azure Analytics Logging settings. - - :keyword str version: - The version of Storage Analytics to configure. The default value is 1.0. - :keyword bool delete: - Indicates whether all delete requests should be logged. The default value is `False`. - :keyword bool read: - Indicates whether all read requests should be logged. The default value is `False`. - :keyword bool write: - Indicates whether all write requests should be logged. The default value is `False`. - :keyword ~azure.storage.blob.RetentionPolicy retention_policy: - Determines how long the associated data should persist. If not specified the retention - policy will be disabled by default. - """ - - def __init__(self, **kwargs): - self.version = kwargs.get('version', u'1.0') - self.delete = kwargs.get('delete', False) - self.read = kwargs.get('read', False) - self.write = kwargs.get('write', False) - self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - version=generated.version, - delete=generated.delete, - read=generated.read, - write=generated.write, - retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access - ) - - -class Metrics(GeneratedMetrics): - """A summary of request statistics grouped by API in hour or minute aggregates - for blobs. - - :keyword str version: - The version of Storage Analytics to configure. The default value is 1.0. - :keyword bool enabled: - Indicates whether metrics are enabled for the Blob service. - The default value is `False`. - :keyword bool include_apis: - Indicates whether metrics should generate summary statistics for called API operations. - :keyword ~azure.storage.blob.RetentionPolicy retention_policy: - Determines how long the associated data should persist. If not specified the retention - policy will be disabled by default. - """ - - def __init__(self, **kwargs): - self.version = kwargs.get('version', u'1.0') - self.enabled = kwargs.get('enabled', False) - self.include_apis = kwargs.get('include_apis') - self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - version=generated.version, - enabled=generated.enabled, - include_apis=generated.include_apis, - retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access - ) - - -class RetentionPolicy(GeneratedRetentionPolicy): - """The retention policy which determines how long the associated data should - persist. - - :param bool enabled: - Indicates whether a retention policy is enabled for the storage service. - The default value is False. - :param int days: - Indicates the number of days that metrics or logging or - soft-deleted data should be retained. All data older than this value will - be deleted. If enabled=True, the number of days must be specified. - """ - - def __init__(self, enabled=False, days=None): - super(RetentionPolicy, self).__init__(enabled=enabled, days=days, allow_permanent_delete=None) - if self.enabled and (self.days is None): - raise ValueError("If policy is enabled, 'days' must be specified.") - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - enabled=generated.enabled, - days=generated.days, - ) - - -class StaticWebsite(GeneratedStaticWebsite): - """The properties that enable an account to host a static website. - - :keyword bool enabled: - Indicates whether this account is hosting a static website. - The default value is `False`. - :keyword str index_document: - The default name of the index page under each directory. - :keyword str error_document404_path: - The absolute path of the custom 404 page. - :keyword str default_index_document_path: - Absolute path of the default index page. - """ - - def __init__(self, **kwargs): - self.enabled = kwargs.get('enabled', False) - if self.enabled: - self.index_document = kwargs.get('index_document') - self.error_document404_path = kwargs.get('error_document404_path') - self.default_index_document_path = kwargs.get('default_index_document_path') - else: - self.index_document = None - self.error_document404_path = None - self.default_index_document_path = None - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - enabled=generated.enabled, - index_document=generated.index_document, - error_document404_path=generated.error_document404_path, - default_index_document_path=generated.default_index_document_path - ) - - -class CorsRule(GeneratedCorsRule): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. - - :param list(str) allowed_origins: - A list of origin domains that will be allowed via CORS, or "*" to allow - all domains. The list of must contain at least one entry. Limited to 64 - origin domains. Each allowed origin can have up to 256 characters. - :param list(str) allowed_methods: - A list of HTTP methods that are allowed to be executed by the origin. - The list of must contain at least one entry. For Azure Storage, - permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. - :keyword list(str) allowed_headers: - Defaults to an empty list. A list of headers allowed to be part of - the cross-origin request. Limited to 64 defined headers and 2 prefixed - headers. Each header can be up to 256 characters. - :keyword list(str) exposed_headers: - Defaults to an empty list. A list of response headers to expose to CORS - clients. Limited to 64 defined headers and two prefixed headers. Each - header can be up to 256 characters. - :keyword int max_age_in_seconds: - The number of seconds that the client/browser should cache a - preflight response. - """ - - def __init__(self, allowed_origins, allowed_methods, **kwargs): - self.allowed_origins = ','.join(allowed_origins) - self.allowed_methods = ','.join(allowed_methods) - self.allowed_headers = ','.join(kwargs.get('allowed_headers', [])) - self.exposed_headers = ','.join(kwargs.get('exposed_headers', [])) - self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0) - - @classmethod - def _from_generated(cls, generated): - return cls( - [generated.allowed_origins], - [generated.allowed_methods], - allowed_headers=[generated.allowed_headers], - exposed_headers=[generated.exposed_headers], - max_age_in_seconds=generated.max_age_in_seconds, - ) - - -class ContainerProperties(DictMixin): - """Blob container's properties class. - - Returned ``ContainerProperties`` instances expose these values through a - dictionary interface, for example: ``container_props["last_modified"]``. - Additionally, the container name is available as ``container_props["name"]``. - - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the container was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar ~azure.storage.blob.LeaseProperties lease: - Stores all the lease information for the container. - :ivar str public_access: Specifies whether data in the container may be accessed - publicly and the level of access. - :ivar bool has_immutability_policy: - Represents whether the container has an immutability policy. - :ivar bool has_legal_hold: - Represents whether the container has a legal hold. - :ivar dict metadata: A dict with name-value pairs to associate with the - container as metadata. - :ivar ~azure.storage.blob.ContainerEncryptionScope encryption_scope: - The default encryption scope configuration for the container. - """ - - def __init__(self, **kwargs): - self.name = None - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.lease = LeaseProperties(**kwargs) - self.public_access = kwargs.get('x-ms-blob-public-access') - self.has_immutability_policy = kwargs.get('x-ms-has-immutability-policy') - self.deleted = None - self.version = None - self.has_legal_hold = kwargs.get('x-ms-has-legal-hold') - self.metadata = kwargs.get('metadata') - self.encryption_scope = None - default_encryption_scope = kwargs.get('x-ms-default-encryption-scope') - if default_encryption_scope: - self.encryption_scope = ContainerEncryptionScope( - default_encryption_scope=default_encryption_scope, - prevent_encryption_scope_override=kwargs.get('x-ms-deny-encryption-scope-override', False) - ) - - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.last_modified = generated.properties.last_modified - props.etag = generated.properties.etag - props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access - props.public_access = generated.properties.public_access - props.has_immutability_policy = generated.properties.has_immutability_policy - props.deleted = generated.deleted - props.version = generated.version - props.has_legal_hold = generated.properties.has_legal_hold - props.metadata = generated.metadata - props.encryption_scope = ContainerEncryptionScope._from_generated(generated) #pylint: disable=protected-access - return props - - -class ContainerPropertiesPaged(PageIterator): - """An Iterable of Container properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A container name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.ContainerProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only containers whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of container names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(ContainerPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [self._build_item(item) for item in self._response.container_items] - - return self._response.next_marker or None, self.current_page - - @staticmethod - def _build_item(item): - return ContainerProperties._from_generated(item) # pylint: disable=protected-access - - -class BlobProperties(DictMixin): - """ - Blob Properties. - - :ivar str name: - The name of the blob. - :ivar str container: - The container in which the blob resides. - :ivar str snapshot: - Datetime value that uniquely identifies the blob snapshot. - :ivar ~azure.blob.storage.BlobType blob_type: - String indicating this blob's type. - :ivar dict metadata: - Name-value pairs associated with the blob as metadata. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the blob was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar int size: - The size of the content returned. If the entire blob was requested, - the length of blob in bytes. If a subset of the blob was requested, the - length of the returned subset. - :ivar str content_range: - Indicates the range of bytes returned in the event that the client - requested a subset of the blob. - :ivar int append_blob_committed_block_count: - (For Append Blobs) Number of committed blocks in the blob. - :ivar bool is_append_blob_sealed: - Indicate if the append blob is sealed or not. - - .. versionadded:: 12.4.0 - - :ivar int page_blob_sequence_number: - (For Page Blobs) Sequence number for page blob used for coordinating - concurrent writes. - :ivar bool server_encrypted: - Set to true if the blob is encrypted on the server. - :ivar ~azure.storage.blob.CopyProperties copy: - Stores all the copy properties for the blob. - :ivar ~azure.storage.blob.ContentSettings content_settings: - Stores all the content settings for the blob. - :ivar ~azure.storage.blob.LeaseProperties lease: - Stores all the lease information for the blob. - :ivar ~azure.storage.blob.StandardBlobTier blob_tier: - Indicates the access tier of the blob. The hot tier is optimized - for storing data that is accessed frequently. The cool storage tier - is optimized for storing data that is infrequently accessed and stored - for at least a month. The archive tier is optimized for storing - data that is rarely accessed and stored for at least six months - with flexible latency requirements. - :ivar str rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :ivar ~datetime.datetime blob_tier_change_time: - Indicates when the access tier was last changed. - :ivar bool blob_tier_inferred: - Indicates whether the access tier was inferred by the service. - If false, it indicates that the tier was set explicitly. - :ivar bool deleted: - Whether this blob was deleted. - :ivar ~datetime.datetime deleted_time: - A datetime object representing the time at which the blob was deleted. - :ivar int remaining_retention_days: - The number of days that the blob will be retained before being permanently deleted by the service. - :ivar ~datetime.datetime creation_time: - Indicates when the blob was created, in UTC. - :ivar str archive_status: - Archive status of blob. - :ivar str encryption_key_sha256: - The SHA-256 hash of the provided encryption key. - :ivar str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - :ivar bool request_server_encrypted: - Whether this blob is encrypted. - :ivar list(~azure.storage.blob.ObjectReplicationPolicy) object_replication_source_properties: - Only present for blobs that have policy ids and rule ids applied to them. - - .. versionadded:: 12.4.0 - - :ivar str object_replication_destination_policy: - Represents the Object Replication Policy Id that created this blob. - - .. versionadded:: 12.4.0 - - :ivar ~datetime.datetime last_accessed_on: - Indicates when the last Read/Write operation was performed on a Blob. - - .. versionadded:: 12.6.0 - - :ivar int tag_count: - Tags count on this blob. - - .. versionadded:: 12.4.0 - - :ivar dict(str, str) tags: - Key value pair of tags on this blob. - - .. versionadded:: 12.4.0 - - """ - - def __init__(self, **kwargs): - self.name = kwargs.get('name') - self.container = None - self.snapshot = kwargs.get('x-ms-snapshot') - self.version_id = kwargs.get('x-ms-version-id') - self.is_current_version = kwargs.get('x-ms-is-current-version') - self.blob_type = BlobType(kwargs['x-ms-blob-type']) if kwargs.get('x-ms-blob-type') else None - self.metadata = kwargs.get('metadata') - self.encrypted_metadata = kwargs.get('encrypted_metadata') - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.size = kwargs.get('Content-Length') - self.content_range = kwargs.get('Content-Range') - self.append_blob_committed_block_count = kwargs.get('x-ms-blob-committed-block-count') - self.is_append_blob_sealed = kwargs.get('x-ms-blob-sealed') - self.page_blob_sequence_number = kwargs.get('x-ms-blob-sequence-number') - self.server_encrypted = kwargs.get('x-ms-server-encrypted') - self.copy = CopyProperties(**kwargs) - self.content_settings = ContentSettings(**kwargs) - self.lease = LeaseProperties(**kwargs) - self.blob_tier = kwargs.get('x-ms-access-tier') - self.rehydrate_priority = kwargs.get('x-ms-rehydrate-priority') - self.blob_tier_change_time = kwargs.get('x-ms-access-tier-change-time') - self.blob_tier_inferred = kwargs.get('x-ms-access-tier-inferred') - self.deleted = False - self.deleted_time = None - self.remaining_retention_days = None - self.creation_time = kwargs.get('x-ms-creation-time') - self.archive_status = kwargs.get('x-ms-archive-status') - self.encryption_key_sha256 = kwargs.get('x-ms-encryption-key-sha256') - self.encryption_scope = kwargs.get('x-ms-encryption-scope') - self.request_server_encrypted = kwargs.get('x-ms-server-encrypted') - self.object_replication_source_properties = kwargs.get('object_replication_source_properties') - self.object_replication_destination_policy = kwargs.get('x-ms-or-policy-id') - self.last_accessed_on = kwargs.get('x-ms-last-access-time') - self.tag_count = kwargs.get('x-ms-tag-count') - self.tags = None - - -class FilteredBlob(DictMixin): - """Blob info from a Filter Blobs API call. - - :ivar name: Blob name - :type name: str - :ivar container_name: Container name. - :type container_name: str - :ivar tags: Key value pairs of blob tags. - :type tags: Dict[str, str] - """ - def __init__(self, **kwargs): - self.name = kwargs.get('name', None) - self.container_name = kwargs.get('container_name', None) - self.tags = kwargs.get('tags', None) - - -class LeaseProperties(DictMixin): - """Blob Lease Properties. - - :ivar str status: - The lease status of the blob. Possible values: locked|unlocked - :ivar str state: - Lease state of the blob. Possible values: available|leased|expired|breaking|broken - :ivar str duration: - When a blob is leased, specifies whether the lease is of infinite or fixed duration. - """ - - def __init__(self, **kwargs): - self.status = get_enum_value(kwargs.get('x-ms-lease-status')) - self.state = get_enum_value(kwargs.get('x-ms-lease-state')) - self.duration = get_enum_value(kwargs.get('x-ms-lease-duration')) - - @classmethod - def _from_generated(cls, generated): - lease = cls() - lease.status = get_enum_value(generated.properties.lease_status) - lease.state = get_enum_value(generated.properties.lease_state) - lease.duration = get_enum_value(generated.properties.lease_duration) - return lease - - -class ContentSettings(DictMixin): - """The content settings of a blob. - - :param str content_type: - The content type specified for the blob. If no content type was - specified, the default content type is application/octet-stream. - :param str content_encoding: - If the content_encoding has previously been set - for the blob, that value is stored. - :param str content_language: - If the content_language has previously been set - for the blob, that value is stored. - :param str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the blob, that value is stored. - :param str cache_control: - If the cache_control has previously been set for - the blob, that value is stored. - :param str content_md5: - If the content_md5 has been set for the blob, this response - header is stored so that the client can check for message content - integrity. - """ - - def __init__( - self, content_type=None, content_encoding=None, - content_language=None, content_disposition=None, - cache_control=None, content_md5=None, **kwargs): - - self.content_type = content_type or kwargs.get('Content-Type') - self.content_encoding = content_encoding or kwargs.get('Content-Encoding') - self.content_language = content_language or kwargs.get('Content-Language') - self.content_md5 = content_md5 or kwargs.get('Content-MD5') - self.content_disposition = content_disposition or kwargs.get('Content-Disposition') - self.cache_control = cache_control or kwargs.get('Cache-Control') - - @classmethod - def _from_generated(cls, generated): - settings = cls() - settings.content_type = generated.properties.content_type or None - settings.content_encoding = generated.properties.content_encoding or None - settings.content_language = generated.properties.content_language or None - settings.content_md5 = generated.properties.content_md5 or None - settings.content_disposition = generated.properties.content_disposition or None - settings.cache_control = generated.properties.cache_control or None - return settings - - -class CopyProperties(DictMixin): - """Blob Copy Properties. - - These properties will be `None` if this blob has never been the destination - in a Copy Blob operation, or if this blob has been modified after a concluded - Copy Blob operation, for example, using Set Blob Properties, Upload Blob, or Commit Block List. - - :ivar str id: - String identifier for the last attempted Copy Blob operation where this blob - was the destination blob. - :ivar str source: - URL up to 2 KB in length that specifies the source blob used in the last attempted - Copy Blob operation where this blob was the destination blob. - :ivar str status: - State of the copy operation identified by Copy ID, with these values: - success: - Copy completed successfully. - pending: - Copy is in progress. Check copy_status_description if intermittent, - non-fatal errors impede copy progress but don't cause failure. - aborted: - Copy was ended by Abort Copy Blob. - failed: - Copy failed. See copy_status_description for failure details. - :ivar str progress: - Contains the number of bytes copied and the total bytes in the source in the last - attempted Copy Blob operation where this blob was the destination blob. Can show - between 0 and Content-Length bytes copied. - :ivar ~datetime.datetime completion_time: - Conclusion time of the last attempted Copy Blob operation where this blob was the - destination blob. This value can specify the time of a completed, aborted, or - failed copy attempt. - :ivar str status_description: - Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal - or non-fatal copy operation failure. - :ivar bool incremental_copy: - Copies the snapshot of the source page blob to a destination page blob. - The snapshot is copied such that only the differential changes between - the previously copied snapshot are transferred to the destination - :ivar ~datetime.datetime destination_snapshot: - Included if the blob is incremental copy blob or incremental copy snapshot, - if x-ms-copy-status is success. Snapshot time of the last successful - incremental copy snapshot for this blob. - """ - - def __init__(self, **kwargs): - self.id = kwargs.get('x-ms-copy-id') - self.source = kwargs.get('x-ms-copy-source') - self.status = get_enum_value(kwargs.get('x-ms-copy-status')) - self.progress = kwargs.get('x-ms-copy-progress') - self.completion_time = kwargs.get('x-ms-copy-completion_time') - self.status_description = kwargs.get('x-ms-copy-status-description') - self.incremental_copy = kwargs.get('x-ms-incremental-copy') - self.destination_snapshot = kwargs.get('x-ms-copy-destination-snapshot') - - @classmethod - def _from_generated(cls, generated): - copy = cls() - copy.id = generated.properties.copy_id or None - copy.status = get_enum_value(generated.properties.copy_status) or None - copy.source = generated.properties.copy_source or None - copy.progress = generated.properties.copy_progress or None - copy.completion_time = generated.properties.copy_completion_time or None - copy.status_description = generated.properties.copy_status_description or None - copy.incremental_copy = generated.properties.incremental_copy or None - copy.destination_snapshot = generated.properties.destination_snapshot or None - return copy - - -class BlobBlock(DictMixin): - """BlockBlob Block class. - - :param str block_id: - Block id. - :param str state: - Block state. Possible values: committed|uncommitted - :ivar int size: - Block size in bytes. - """ - - def __init__(self, block_id, state=BlockState.Latest): - self.id = block_id - self.state = state - self.size = None - - @classmethod - def _from_generated(cls, generated): - block = cls(decode_base64_to_text(generated.name)) - block.size = generated.size - return block - - -class PageRange(DictMixin): - """Page Range for page blob. - - :param int start: - Start of page range in bytes. - :param int end: - End of page range in bytes. - """ - - def __init__(self, start=None, end=None): - self.start = start - self.end = end - - -class AccessPolicy(GenAccessPolicy): - """Access Policy class used by the set and get access policy methods in each service. - - A stored access policy can specify the start time, expiry time, and - permissions for the Shared Access Signatures with which it's associated. - Depending on how you want to control access to your resource, you can - specify all of these parameters within the stored access policy, and omit - them from the URL for the Shared Access Signature. Doing so permits you to - modify the associated signature's behavior at any time, as well as to revoke - it. Or you can specify one or more of the access policy parameters within - the stored access policy, and the others on the URL. Finally, you can - specify all of the parameters on the URL. In this case, you can use the - stored access policy to revoke the signature, but not to modify its behavior. - - Together the Shared Access Signature and the stored access policy must - include all fields required to authenticate the signature. If any required - fields are missing, the request will fail. Likewise, if a field is specified - both in the Shared Access Signature URL and in the stored access policy, the - request will fail with status code 400 (Bad Request). - - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.blob.ContainerSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - """ - def __init__(self, permission=None, expiry=None, start=None): - self.start = start - self.expiry = expiry - self.permission = permission - - -class ContainerSasPermissions(object): - """ContainerSasPermissions class to be used with the - :func:`~azure.storage.blob.generate_container_sas` function and - for the AccessPolicies used with - :func:`~azure.storage.blob.ContainerClient.set_container_access_policy`. - - :param bool read: - Read the content, properties, metadata or block list of any blob in the - container. Use any blob in the container as the source of a copy operation. - :param bool write: - For any blob in the container, create or write content, properties, - metadata, or block list. Snapshot or lease the blob. Resize the blob - (page blob only). Use the blob as the destination of a copy operation - within the same account. Note: You cannot grant permissions to read or - write container properties or metadata, nor to lease a container, with - a container SAS. Use an account SAS instead. - :param bool delete: - Delete any blob in the container. Note: You cannot grant permissions to - delete a container with a container SAS. Use an account SAS instead. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool list: - List blobs in the container. - :param bool tag: - Set or get tags on the blobs in the container. - """ - def __init__(self, read=False, write=False, delete=False, list=False, delete_previous_version=False, tag=False): # pylint: disable=redefined-builtin - self.read = read - self.write = write - self.delete = delete - self.list = list - self.delete_previous_version = delete_previous_version - self.tag = tag - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('l' if self.list else '') + - ('t' if self.tag else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a ContainerSasPermissions from a string. - - To specify read, write, delete, or list permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, write, delete, - and list permissions. - :return: A ContainerSasPermissions object - :rtype: ~azure.storage.blob.ContainerSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_list = 'l' in permission - p_delete_previous_version = 'x' in permission - p_tag = 't' in permission - parsed = cls(read=p_read, write=p_write, delete=p_delete, list=p_list, - delete_previous_version=p_delete_previous_version, tag=p_tag) - - return parsed - - -class BlobSasPermissions(object): - """BlobSasPermissions class to be used with the - :func:`~azure.storage.blob.generate_blob_sas` function. - - :param bool read: - Read the content, properties, metadata and block list. Use the blob as - the source of a copy operation. - :param bool add: - Add a block to an append blob. - :param bool create: - Write a new blob, snapshot a blob, or copy a blob to a new blob. - :param bool write: - Create or write content, properties, metadata, or block list. Snapshot - or lease the blob. Resize the blob (page blob only). Use the blob as the - destination of a copy operation within the same account. - :param bool delete: - Delete the blob. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool tag: - Set or get tags on the blob. - """ - def __init__(self, read=False, add=False, create=False, write=False, - delete=False, delete_previous_version=False, tag=True): - self.read = read - self.add = add - self.create = create - self.write = write - self.delete = delete - self.delete_previous_version = delete_previous_version - self.tag = tag - self._str = (('r' if self.read else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('t' if self.tag else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a BlobSasPermissions from a string. - - To specify read, add, create, write, or delete permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, add, create, - write, or delete permissions. - :return: A BlobSasPermissions object - :rtype: ~azure.storage.blob.BlobSasPermissions - """ - p_read = 'r' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_delete_previous_version = 'x' in permission - p_tag = 't' in permission - - parsed = cls(read=p_read, add=p_add, create=p_create, write=p_write, delete=p_delete, - delete_previous_version=p_delete_previous_version, tag=p_tag) - - return parsed - - -class CustomerProvidedEncryptionKey(object): - """ - All data in Azure Storage is encrypted at-rest using an account-level encryption key. - In versions 2018-06-17 and newer, you can manage the key used to encrypt blob contents - and application metadata per-blob by providing an AES-256 encryption key in requests to the storage service. - - When you use a customer-provided key, Azure Storage does not manage or persist your key. - When writing data to a blob, the provided key is used to encrypt your data before writing it to disk. - A SHA-256 hash of the encryption key is written alongside the blob contents, - and is used to verify that all subsequent operations against the blob use the same encryption key. - This hash cannot be used to retrieve the encryption key or decrypt the contents of the blob. - When reading a blob, the provided key is used to decrypt your data after reading it from disk. - In both cases, the provided encryption key is securely discarded - as soon as the encryption or decryption process completes. - - :param str key_value: - Base64-encoded AES-256 encryption key value. - :param str key_hash: - Base64-encoded SHA256 of the encryption key. - :ivar str algorithm: - Specifies the algorithm to use when encrypting data using the given key. Must be AES256. - """ - def __init__(self, key_value, key_hash): - self.key_value = key_value - self.key_hash = key_hash - self.algorithm = 'AES256' - - -class ContainerEncryptionScope(object): - """The default encryption scope configuration for a container. - - This scope is used implicitly for all future writes within the container, - but can be overridden per blob operation. - - .. versionadded:: 12.2.0 - - :param str default_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - :param bool prevent_encryption_scope_override: - If true, prevents any request from specifying a different encryption scope than the scope - set on the container. Default value is false. - """ - - def __init__(self, default_encryption_scope, **kwargs): - self.default_encryption_scope = default_encryption_scope - self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', False) - - @classmethod - def _from_generated(cls, generated): - if generated.properties.default_encryption_scope: - scope = cls( - generated.properties.default_encryption_scope, - prevent_encryption_scope_override=generated.properties.prevent_encryption_scope_override or False - ) - return scope - return None - - -class DelimitedJsonDialect(object): - """Defines the input or output JSON serialization for a blob data query. - - :keyword str delimiter: The line separator character, default value is '\n' - """ - - def __init__(self, **kwargs): - self.delimiter = kwargs.pop('delimiter', '\n') - - -class DelimitedTextDialect(object): - """Defines the input or output delimited (CSV) serialization for a blob query request. - - :keyword str delimiter: - Column separator, defaults to ','. - :keyword str quotechar: - Field quote, defaults to '"'. - :keyword str lineterminator: - Record separator, defaults to '\n'. - :keyword str escapechar: - Escape char, defaults to empty. - :keyword bool has_header: - Whether the blob data includes headers in the first line. The default value is False, meaning that the - data will be returned inclusive of the first line. If set to True, the data will be returned exclusive - of the first line. - """ - def __init__(self, **kwargs): - self.delimiter = kwargs.pop('delimiter', ',') - self.quotechar = kwargs.pop('quotechar', '"') - self.lineterminator = kwargs.pop('lineterminator', '\n') - self.escapechar = kwargs.pop('escapechar', "") - self.has_header = kwargs.pop('has_header', False) - - -class ArrowDialect(ArrowField): - """field of an arrow schema. - - All required parameters must be populated in order to send to Azure. - - :param ~azure.storage.blob.ArrowType type: Arrow field type. - :keyword str name: The name of the field. - :keyword int precision: The precision of the field. - :keyword int scale: The scale of the field. - """ - def __init__(self, type, **kwargs): # pylint: disable=redefined-builtin - super(ArrowDialect, self).__init__(type=type, **kwargs) - - -class ArrowType(str, Enum): - - INT64 = "int64" - BOOL = "bool" - TIMESTAMP_MS = "timestamp[ms]" - STRING = "string" - DOUBLE = "double" - DECIMAL = 'decimal' - - -class ObjectReplicationPolicy(DictMixin): - """Policy id and rule ids applied to a blob. - - :ivar str policy_id: - Policy id for the blob. A replication policy gets created (policy id) when creating a source/destination pair. - :ivar list(~azure.storage.blob.ObjectReplicationRule) rules: - Within each policy there may be multiple replication rules. - e.g. rule 1= src/container/.pdf to dst/container2/; rule2 = src/container1/.jpg to dst/container3 - """ - - def __init__(self, **kwargs): - self.policy_id = kwargs.pop('policy_id', None) - self.rules = kwargs.pop('rules', None) - - -class ObjectReplicationRule(DictMixin): - """Policy id and rule ids applied to a blob. - - :ivar str rule_id: - Rule id. - :ivar str status: - The status of the rule. It could be "Complete" or "Failed" - """ - - def __init__(self, **kwargs): - self.rule_id = kwargs.pop('rule_id', None) - self.status = kwargs.pop('status', None) - - -class BlobQueryError(object): - """The error happened during quick query operation. - - :ivar str error: - The name of the error. - :ivar bool is_fatal: - If true, this error prevents further query processing. More result data may be returned, - but there is no guarantee that all of the original data will be processed. - If false, this error does not prevent further query processing. - :ivar str description: - A description of the error. - :ivar int position: - The blob offset at which the error occurred. - """ - def __init__(self, error=None, is_fatal=False, description=None, position=None): - self.error = error - self.is_fatal = is_fatal - self.description = description - self.position = position diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_quick_query_helper.py b/azure/multiapi/storagev2/blob/v2020_04_08/_quick_query_helper.py deleted file mode 100644 index eb51d98..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_quick_query_helper.py +++ /dev/null @@ -1,196 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from io import BytesIO -from typing import Union, Iterable, IO # pylint: disable=unused-import - -from ._shared.avro.datafile import DataFileReader -from ._shared.avro.avro_io import DatumReader - - -class BlobQueryReader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to read query results. - - :ivar str name: - The name of the blob being quered. - :ivar str container: - The name of the container where the blob is. - :ivar dict response_headers: - The response_headers of the quick query request. - :ivar bytes record_delimiter: - The delimiter used to separate lines, or records with the data. The `records` - method will return these lines via a generator. - """ - - def __init__( - self, - name=None, - container=None, - errors=None, - record_delimiter='\n', - encoding=None, - headers=None, - response=None, - error_cls=None, - ): - self.name = name - self.container = container - self.response_headers = headers - self.record_delimiter = record_delimiter - self._size = 0 - self._bytes_processed = 0 - self._errors = errors - self._encoding = encoding - self._parsed_results = DataFileReader(QuickQueryStreamer(response), DatumReader()) - self._first_result = self._process_record(next(self._parsed_results)) - self._error_cls = error_cls - - def __len__(self): - return self._size - - def _process_record(self, result): - self._size = result.get('totalBytes', self._size) - self._bytes_processed = result.get('bytesScanned', self._bytes_processed) - if 'data' in result: - return result.get('data') - if 'fatal' in result: - error = self._error_cls( - error=result['name'], - is_fatal=result['fatal'], - description=result['description'], - position=result['position'] - ) - if self._errors: - self._errors(error) - return None - - def _iter_stream(self): - if self._first_result is not None: - yield self._first_result - for next_result in self._parsed_results: - processed_result = self._process_record(next_result) - if processed_result is not None: - yield processed_result - - def readall(self): - # type: () -> Union[bytes, str] - """Return all query results. - - This operation is blocking until all data is downloaded. - If encoding has been configured - this will be used to decode individual - records are they are received. - - :rtype: Union[bytes, str] - """ - stream = BytesIO() - self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - def readinto(self, stream): - # type: (IO) -> None - """Download the query result to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. - :returns: None - """ - for record in self._iter_stream(): - stream.write(record) - - def records(self): - # type: () -> Iterable[Union[bytes, str]] - """Returns a record generator for the query result. - - Records will be returned line by line. - If encoding has been configured - this will be used to decode individual - records are they are received. - - :rtype: Iterable[Union[bytes, str]] - """ - delimiter = self.record_delimiter.encode('utf-8') - for record_chunk in self._iter_stream(): - for record in record_chunk.split(delimiter): - if self._encoding: - yield record.decode(self._encoding) - else: - yield record - - - -class QuickQueryStreamer(object): - """ - File-like streaming iterator. - """ - - def __init__(self, generator): - self.generator = generator - self.iterator = iter(generator) - self._buf = b"" - self._point = 0 - self._download_offset = 0 - self._buf_start = 0 - self.file_length = None - - def __len__(self): - return self.file_length - - def __iter__(self): - return self.iterator - - @staticmethod - def seekable(): - return True - - def __next__(self): - next_part = next(self.iterator) - self._download_offset += len(next_part) - return next_part - - next = __next__ # Python 2 compatibility. - - def tell(self): - return self._point - - def seek(self, offset, whence=0): - if whence == 0: - self._point = offset - elif whence == 1: - self._point += offset - else: - raise ValueError("whence must be 0, or 1") - if self._point < 0: - self._point = 0 # XXX is this right? - - def read(self, size): - try: - # keep reading from the generator until the buffer of this stream has enough data to read - while self._point + size > self._download_offset: - self._buf += self.__next__() - except StopIteration: - self.file_length = self._download_offset - - start_point = self._point - - # EOF - self._point = min(self._point + size, self._download_offset) - - relative_start = start_point - self._buf_start - if relative_start < 0: - raise ValueError("Buffer has dumped too much data") - relative_end = relative_start + size - data = self._buf[relative_start: relative_end] - - # dump the extra data in buffer - # buffer start--------------------16bytes----current read position - dumped_size = max(relative_end - 16 - relative_start, 0) - self._buf_start += dumped_size - self._buf = self._buf[dumped_size:] - - return data diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_serialize.py b/azure/multiapi/storagev2/blob/v2020_04_08/_serialize.py deleted file mode 100644 index cb576c2..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_serialize.py +++ /dev/null @@ -1,197 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use -try: - from urllib.parse import quote -except ImportError: - from urllib2 import quote # type: ignore - -from azure.core import MatchConditions - -from ._models import ( - ContainerEncryptionScope, - DelimitedJsonDialect) -from ._generated.models import ( - ModifiedAccessConditions, - SourceModifiedAccessConditions, - CpkScopeInfo, - ContainerCpkScopeInfo, - QueryFormat, - QuerySerialization, - DelimitedTextConfiguration, - JsonTextConfiguration, - ArrowConfiguration, - QueryFormatType, - BlobTag, - BlobTags, LeaseAccessConditions -) - - -_SUPPORTED_API_VERSIONS = [ - '2019-02-02', - '2019-07-07', - '2019-10-10', - '2019-12-12', - '2020-02-10', - '2020-04-08' -] - - -def _get_match_headers(kwargs, match_param, etag_param): - # type: (str) -> Tuple(Dict[str, Any], Optional[str], Optional[str]) - if_match = None - if_none_match = None - match_condition = kwargs.pop(match_param, None) - if match_condition == MatchConditions.IfNotModified: - if_match = kwargs.pop(etag_param, None) - if not if_match: - raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) - elif match_condition == MatchConditions.IfPresent: - if_match = '*' - elif match_condition == MatchConditions.IfModified: - if_none_match = kwargs.pop(etag_param, None) - if not if_none_match: - raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) - elif match_condition == MatchConditions.IfMissing: - if_none_match = '*' - elif match_condition is None: - if kwargs.get(etag_param): - raise ValueError("'{}' specified without '{}'.".format(etag_param, match_param)) - else: - raise TypeError("Invalid match condition: {}".format(match_condition)) - return if_match, if_none_match - - -def get_access_conditions(lease): - # type: (Optional[Union[BlobLeaseClient, str]]) -> Union[LeaseAccessConditions, None] - try: - lease_id = lease.id # type: ignore - except AttributeError: - lease_id = lease # type: ignore - return LeaseAccessConditions(lease_id=lease_id) if lease_id else None - - -def get_modify_conditions(kwargs): - # type: (Dict[str, Any]) -> ModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'match_condition', 'etag') - return ModifiedAccessConditions( - if_modified_since=kwargs.pop('if_modified_since', None), - if_unmodified_since=kwargs.pop('if_unmodified_since', None), - if_match=if_match or kwargs.pop('if_match', None), - if_none_match=if_none_match or kwargs.pop('if_none_match', None), - if_tags=kwargs.pop('if_tags_match_condition', None) - ) - - -def get_source_conditions(kwargs): - # type: (Dict[str, Any]) -> SourceModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') - return SourceModifiedAccessConditions( - source_if_modified_since=kwargs.pop('source_if_modified_since', None), - source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), - source_if_match=if_match or kwargs.pop('source_if_match', None), - source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None), - source_if_tags=kwargs.pop('source_if_tags_match_condition', None) - ) - - -def get_cpk_scope_info(kwargs): - # type: (Dict[str, Any]) -> CpkScopeInfo - if 'encryption_scope' in kwargs: - return CpkScopeInfo(encryption_scope=kwargs.pop('encryption_scope')) - return None - - -def get_container_cpk_scope_info(kwargs): - # type: (Dict[str, Any]) -> ContainerCpkScopeInfo - encryption_scope = kwargs.pop('container_encryption_scope', None) - if encryption_scope: - if isinstance(encryption_scope, ContainerEncryptionScope): - return ContainerCpkScopeInfo( - default_encryption_scope=encryption_scope.default_encryption_scope, - prevent_encryption_scope_override=encryption_scope.prevent_encryption_scope_override - ) - if isinstance(encryption_scope, dict): - return ContainerCpkScopeInfo( - default_encryption_scope=encryption_scope['default_encryption_scope'], - prevent_encryption_scope_override=encryption_scope.get('prevent_encryption_scope_override') - ) - raise TypeError("Container encryption scope must be dict or type ContainerEncryptionScope.") - return None - - -def get_api_version(kwargs, default): - # type: (Dict[str, Any]) -> str - api_version = kwargs.pop('api_version', None) - if api_version and api_version not in _SUPPORTED_API_VERSIONS: - versions = '\n'.join(_SUPPORTED_API_VERSIONS) - raise ValueError("Unsupported API version '{}'. Please select from:\n{}".format(api_version, versions)) - return api_version or default - - -def serialize_blob_tags_header(tags=None): - # type: (Optional[Dict[str, str]]) -> str - if tags is None: - return None - - components = list() - if tags: - for key, value in tags.items(): - components.append(quote(key, safe='.-')) - components.append('=') - components.append(quote(value, safe='.-')) - components.append('&') - - if components: - del components[-1] - - return ''.join(components) - - -def serialize_blob_tags(tags=None): - # type: (Optional[Dict[str, str]]) -> Union[BlobTags, None] - tag_list = list() - if tags: - tag_list = [BlobTag(key=k, value=v) for k, v in tags.items()] - return BlobTags(blob_tag_set=tag_list) - - -def serialize_query_format(formater): - if isinstance(formater, DelimitedJsonDialect): - serialization_settings = JsonTextConfiguration( - record_separator=formater.delimiter - ) - qq_format = QueryFormat( - type=QueryFormatType.json, - json_text_configuration=serialization_settings) - elif hasattr(formater, 'quotechar'): # This supports a csv.Dialect as well - try: - headers = formater.has_header - except AttributeError: - headers = False - serialization_settings = DelimitedTextConfiguration( - column_separator=formater.delimiter, - field_quote=formater.quotechar, - record_separator=formater.lineterminator, - escape_char=formater.escapechar, - headers_present=headers - ) - qq_format = QueryFormat( - type=QueryFormatType.delimited, - delimited_text_configuration=serialization_settings - ) - elif isinstance(formater, list): - serialization_settings = ArrowConfiguration( - schema=formater - ) - qq_format = QueryFormat( - type=QueryFormatType.arrow, - arrow_configuration=serialization_settings) - elif not formater: - return None - else: - raise TypeError("Format must be DelimitedTextDialect or DelimitedJsonDialect.") - return QuerySerialization(format=qq_format) diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/__init__.py b/azure/multiapi/storagev2/blob/v2020_04_08/_shared/__init__.py deleted file mode 100644 index 160f882..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import hmac - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore - -import six - - -def url_quote(url): - return quote(url) - - -def url_unquote(url): - return unquote(url) - - -def encode_base64(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def decode_base64_to_bytes(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - return base64.b64decode(data) - - -def decode_base64_to_text(data): - decoded_bytes = decode_base64_to_bytes(data) - return decoded_bytes.decode('utf-8') - - -def sign_string(key, string_to_sign, key_is_base64=True): - if key_is_base64: - key = decode_base64_to_bytes(key) - else: - if isinstance(key, six.text_type): - key = key.encode('utf-8') - if isinstance(string_to_sign, six.text_type): - string_to_sign = string_to_sign.encode('utf-8') - signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) - digest = signed_hmac_sha256.digest() - encoded_digest = encode_base64(digest) - return encoded_digest diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/authentication.py b/azure/multiapi/storagev2/blob/v2020_04_08/_shared/authentication.py deleted file mode 100644 index d04c1e4..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/authentication.py +++ /dev/null @@ -1,142 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import logging -import sys - -try: - from urllib.parse import urlparse, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import unquote # type: ignore - -try: - from yarl import URL -except ImportError: - pass - -try: - from azure.core.pipeline.transport import AioHttpTransport -except ImportError: - AioHttpTransport = None - -from azure.core.exceptions import ClientAuthenticationError -from azure.core.pipeline.policies import SansIOHTTPPolicy - -from . import sign_string - - -logger = logging.getLogger(__name__) - - - -# wraps a given exception with the desired exception type -def _wrap_exception(ex, desired_type): - msg = "" - if ex.args: - msg = ex.args[0] - if sys.version_info >= (3,): - # Automatic chaining in Python 3 means we keep the trace - return desired_type(msg) - # There isn't a good solution in 2 for keeping the stack trace - # in general, or that will not result in an error in 3 - # However, we can keep the previous error type and message - # TODO: In the future we will log the trace - return desired_type('{}: {}'.format(ex.__class__.__name__, msg)) - - -class AzureSigningError(ClientAuthenticationError): - """ - Represents a fatal error when attempting to sign a request. - In general, the cause of this exception is user error. For example, the given account key is not valid. - Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. - """ - - -# pylint: disable=no-self-use -class SharedKeyCredentialPolicy(SansIOHTTPPolicy): - - def __init__(self, account_name, account_key): - self.account_name = account_name - self.account_key = account_key - super(SharedKeyCredentialPolicy, self).__init__() - - @staticmethod - def _get_headers(request, headers_to_sign): - headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) - if 'content-length' in headers and headers['content-length'] == '0': - del headers['content-length'] - return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' - - @staticmethod - def _get_verb(request): - return request.http_request.method + '\n' - - def _get_canonicalized_resource(self, request): - uri_path = urlparse(request.http_request.url).path - try: - if isinstance(request.context.transport, AioHttpTransport) or \ - isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \ - isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None), - AioHttpTransport): - uri_path = URL(uri_path) - return '/' + self.account_name + str(uri_path) - except TypeError: - pass - return '/' + self.account_name + uri_path - - @staticmethod - def _get_canonicalized_headers(request): - string_to_sign = '' - x_ms_headers = [] - for name, value in request.http_request.headers.items(): - if name.startswith('x-ms-'): - x_ms_headers.append((name.lower(), value)) - x_ms_headers.sort() - for name, value in x_ms_headers: - if value is not None: - string_to_sign += ''.join([name, ':', value, '\n']) - return string_to_sign - - @staticmethod - def _get_canonicalized_resource_query(request): - sorted_queries = list(request.http_request.query.items()) - sorted_queries.sort() - - string_to_sign = '' - for name, value in sorted_queries: - if value is not None: - string_to_sign += '\n' + name.lower() + ':' + unquote(value) - - return string_to_sign - - def _add_authorization_header(self, request, string_to_sign): - try: - signature = sign_string(self.account_key, string_to_sign) - auth_string = 'SharedKey ' + self.account_name + ':' + signature - request.http_request.headers['Authorization'] = auth_string - except Exception as ex: - # Wrap any error that occurred as signing error - # Doing so will clarify/locate the source of problem - raise _wrap_exception(ex, AzureSigningError) - - def on_request(self, request): - string_to_sign = \ - self._get_verb(request) + \ - self._get_headers( - request, - [ - 'content-encoding', 'content-language', 'content-length', - 'content-md5', 'content-type', 'date', 'if-modified-since', - 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' - ] - ) + \ - self._get_canonicalized_headers(request) + \ - self._get_canonicalized_resource(request) + \ - self._get_canonicalized_resource_query(request) - - self._add_authorization_header(request, string_to_sign) - #logger.debug("String_to_sign=%s", string_to_sign) diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/avro/__init__.py b/azure/multiapi/storagev2/blob/v2020_04_08/_shared/avro/__init__.py deleted file mode 100644 index 5b396cd..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/avro/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/avro/avro_io.py b/azure/multiapi/storagev2/blob/v2020_04_08/_shared/avro/avro_io.py deleted file mode 100644 index 93a5c13..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/avro/avro_io.py +++ /dev/null @@ -1,464 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -"""Input/output utilities. - -Includes: - - i/o-specific constants - - i/o-specific exceptions - - schema validation - - leaf value encoding and decoding - - datum reader/writer stuff (?) - -Also includes a generic representation for data, which uses the -following mapping: - - Schema records are implemented as dict. - - Schema arrays are implemented as list. - - Schema maps are implemented as dict. - - Schema strings are implemented as unicode. - - Schema bytes are implemented as str. - - Schema ints are implemented as int. - - Schema longs are implemented as long. - - Schema floats are implemented as float. - - Schema doubles are implemented as float. - - Schema booleans are implemented as bool. -""" - -import json -import logging -import struct -import sys - -from ..avro import schema - -PY3 = sys.version_info[0] == 3 - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Constants - -STRUCT_FLOAT = struct.Struct('= 0), n - input_bytes = self.reader.read(n) - if n > 0 and not input_bytes: - raise StopIteration - assert (len(input_bytes) == n), input_bytes - return input_bytes - - @staticmethod - def read_null(): - """ - null is written as zero bytes - """ - return None - - def read_boolean(self): - """ - a boolean is written as a single byte - whose value is either 0 (false) or 1 (true). - """ - b = ord(self.read(1)) - if b == 1: - return True - if b == 0: - return False - fail_msg = "Invalid value for boolean: %s" % b - raise schema.AvroException(fail_msg) - - def read_int(self): - """ - int and long values are written using variable-length, zig-zag coding. - """ - return self.read_long() - - def read_long(self): - """ - int and long values are written using variable-length, zig-zag coding. - """ - b = ord(self.read(1)) - n = b & 0x7F - shift = 7 - while (b & 0x80) != 0: - b = ord(self.read(1)) - n |= (b & 0x7F) << shift - shift += 7 - datum = (n >> 1) ^ -(n & 1) - return datum - - def read_float(self): - """ - A float is written as 4 bytes. - The float is converted into a 32-bit integer using a method equivalent to - Java's floatToIntBits and then encoded in little-endian format. - """ - return STRUCT_FLOAT.unpack(self.read(4))[0] - - def read_double(self): - """ - A double is written as 8 bytes. - The double is converted into a 64-bit integer using a method equivalent to - Java's doubleToLongBits and then encoded in little-endian format. - """ - return STRUCT_DOUBLE.unpack(self.read(8))[0] - - def read_bytes(self): - """ - Bytes are encoded as a long followed by that many bytes of data. - """ - nbytes = self.read_long() - assert (nbytes >= 0), nbytes - return self.read(nbytes) - - def read_utf8(self): - """ - A string is encoded as a long followed by - that many bytes of UTF-8 encoded character data. - """ - input_bytes = self.read_bytes() - if PY3: - try: - return input_bytes.decode('utf-8') - except UnicodeDecodeError as exn: - logger.error('Invalid UTF-8 input bytes: %r', input_bytes) - raise exn - else: - # PY2 - return unicode(input_bytes, "utf-8") # pylint: disable=undefined-variable - - def skip_null(self): - pass - - def skip_boolean(self): - self.skip(1) - - def skip_int(self): - self.skip_long() - - def skip_long(self): - b = ord(self.read(1)) - while (b & 0x80) != 0: - b = ord(self.read(1)) - - def skip_float(self): - self.skip(4) - - def skip_double(self): - self.skip(8) - - def skip_bytes(self): - self.skip(self.read_long()) - - def skip_utf8(self): - self.skip_bytes() - - def skip(self, n): - self.reader.seek(self.reader.tell() + n) - - -# ------------------------------------------------------------------------------ -# DatumReader - - -class DatumReader(object): - """Deserialize Avro-encoded data into a Python data structure.""" - - def __init__(self, writer_schema=None): - """ - As defined in the Avro specification, we call the schema encoded - in the data the "writer's schema". - """ - self._writer_schema = writer_schema - - # read/write properties - def set_writer_schema(self, writer_schema): - self._writer_schema = writer_schema - - writer_schema = property(lambda self: self._writer_schema, - set_writer_schema) - - def read(self, decoder): - return self.read_data(self.writer_schema, decoder) - - def read_data(self, writer_schema, decoder): - # function dispatch for reading data based on type of writer's schema - if writer_schema.type == 'null': - result = decoder.read_null() - elif writer_schema.type == 'boolean': - result = decoder.read_boolean() - elif writer_schema.type == 'string': - result = decoder.read_utf8() - elif writer_schema.type == 'int': - result = decoder.read_int() - elif writer_schema.type == 'long': - result = decoder.read_long() - elif writer_schema.type == 'float': - result = decoder.read_float() - elif writer_schema.type == 'double': - result = decoder.read_double() - elif writer_schema.type == 'bytes': - result = decoder.read_bytes() - elif writer_schema.type == 'fixed': - result = self.read_fixed(writer_schema, decoder) - elif writer_schema.type == 'enum': - result = self.read_enum(writer_schema, decoder) - elif writer_schema.type == 'array': - result = self.read_array(writer_schema, decoder) - elif writer_schema.type == 'map': - result = self.read_map(writer_schema, decoder) - elif writer_schema.type in ['union', 'error_union']: - result = self.read_union(writer_schema, decoder) - elif writer_schema.type in ['record', 'error', 'request']: - result = self.read_record(writer_schema, decoder) - else: - fail_msg = "Cannot read unknown schema type: %s" % writer_schema.type - raise schema.AvroException(fail_msg) - return result - - def skip_data(self, writer_schema, decoder): - if writer_schema.type == 'null': - result = decoder.skip_null() - elif writer_schema.type == 'boolean': - result = decoder.skip_boolean() - elif writer_schema.type == 'string': - result = decoder.skip_utf8() - elif writer_schema.type == 'int': - result = decoder.skip_int() - elif writer_schema.type == 'long': - result = decoder.skip_long() - elif writer_schema.type == 'float': - result = decoder.skip_float() - elif writer_schema.type == 'double': - result = decoder.skip_double() - elif writer_schema.type == 'bytes': - result = decoder.skip_bytes() - elif writer_schema.type == 'fixed': - result = self.skip_fixed(writer_schema, decoder) - elif writer_schema.type == 'enum': - result = self.skip_enum(decoder) - elif writer_schema.type == 'array': - self.skip_array(writer_schema, decoder) - result = None - elif writer_schema.type == 'map': - self.skip_map(writer_schema, decoder) - result = None - elif writer_schema.type in ['union', 'error_union']: - result = self.skip_union(writer_schema, decoder) - elif writer_schema.type in ['record', 'error', 'request']: - self.skip_record(writer_schema, decoder) - result = None - else: - fail_msg = "Unknown schema type: %s" % writer_schema.type - raise schema.AvroException(fail_msg) - return result - - @staticmethod - def read_fixed(writer_schema, decoder): - """ - Fixed instances are encoded using the number of bytes declared - in the schema. - """ - return decoder.read(writer_schema.size) - - @staticmethod - def skip_fixed(writer_schema, decoder): - return decoder.skip(writer_schema.size) - - @staticmethod - def read_enum(writer_schema, decoder): - """ - An enum is encoded by a int, representing the zero-based position - of the symbol in the schema. - """ - # read data - index_of_symbol = decoder.read_int() - if index_of_symbol >= len(writer_schema.symbols): - fail_msg = "Can't access enum index %d for enum with %d symbols" \ - % (index_of_symbol, len(writer_schema.symbols)) - raise SchemaResolutionException(fail_msg, writer_schema) - read_symbol = writer_schema.symbols[index_of_symbol] - return read_symbol - - @staticmethod - def skip_enum(decoder): - return decoder.skip_int() - - def read_array(self, writer_schema, decoder): - """ - Arrays are encoded as a series of blocks. - - Each block consists of a long count value, - followed by that many array items. - A block with count zero indicates the end of the array. - Each item is encoded per the array's item schema. - - If a block's count is negative, - then the count is followed immediately by a long block size, - indicating the number of bytes in the block. - The actual count in this case - is the absolute value of the count written. - """ - read_items = [] - block_count = decoder.read_long() - while block_count != 0: - if block_count < 0: - block_count = -block_count - decoder.read_long() - for _ in range(block_count): - read_items.append(self.read_data(writer_schema.items, decoder)) - block_count = decoder.read_long() - return read_items - - def skip_array(self, writer_schema, decoder): - block_count = decoder.read_long() - while block_count != 0: - if block_count < 0: - block_size = decoder.read_long() - decoder.skip(block_size) - else: - for _ in range(block_count): - self.skip_data(writer_schema.items, decoder) - block_count = decoder.read_long() - - def read_map(self, writer_schema, decoder): - """ - Maps are encoded as a series of blocks. - - Each block consists of a long count value, - followed by that many key/value pairs. - A block with count zero indicates the end of the map. - Each item is encoded per the map's value schema. - - If a block's count is negative, - then the count is followed immediately by a long block size, - indicating the number of bytes in the block. - The actual count in this case - is the absolute value of the count written. - """ - read_items = {} - block_count = decoder.read_long() - while block_count != 0: - if block_count < 0: - block_count = -block_count - decoder.read_long() - for _ in range(block_count): - key = decoder.read_utf8() - read_items[key] = self.read_data(writer_schema.values, decoder) - block_count = decoder.read_long() - return read_items - - def skip_map(self, writer_schema, decoder): - block_count = decoder.read_long() - while block_count != 0: - if block_count < 0: - block_size = decoder.read_long() - decoder.skip(block_size) - else: - for _ in range(block_count): - decoder.skip_utf8() - self.skip_data(writer_schema.values, decoder) - block_count = decoder.read_long() - - def read_union(self, writer_schema, decoder): - """ - A union is encoded by first writing a long value indicating - the zero-based position within the union of the schema of its value. - The value is then encoded per the indicated schema within the union. - """ - # schema resolution - index_of_schema = int(decoder.read_long()) - if index_of_schema >= len(writer_schema.schemas): - fail_msg = "Can't access branch index %d for union with %d branches" \ - % (index_of_schema, len(writer_schema.schemas)) - raise SchemaResolutionException(fail_msg, writer_schema) - selected_writer_schema = writer_schema.schemas[index_of_schema] - - # read data - return self.read_data(selected_writer_schema, decoder) - - def skip_union(self, writer_schema, decoder): - index_of_schema = int(decoder.read_long()) - if index_of_schema >= len(writer_schema.schemas): - fail_msg = "Can't access branch index %d for union with %d branches" \ - % (index_of_schema, len(writer_schema.schemas)) - raise SchemaResolutionException(fail_msg, writer_schema) - return self.skip_data(writer_schema.schemas[index_of_schema], decoder) - - def read_record(self, writer_schema, decoder): - """ - A record is encoded by encoding the values of its fields - in the order that they are declared. In other words, a record - is encoded as just the concatenation of the encodings of its fields. - Field values are encoded per their schema. - - Schema Resolution: - * the ordering of fields may be different: fields are matched by name. - * schemas for fields with the same name in both records are resolved - recursively. - * if the writer's record contains a field with a name not present in the - reader's record, the writer's value for that field is ignored. - * if the reader's record schema has a field that contains a default value, - and writer's schema does not have a field with the same name, then the - reader should use the default value from its field. - * if the reader's record schema has a field with no default value, and - writer's schema does not have a field with the same name, then the - field's value is unset. - """ - # schema resolution - read_record = {} - for field in writer_schema.fields: - field_val = self.read_data(field.type, decoder) - read_record[field.name] = field_val - return read_record - - def skip_record(self, writer_schema, decoder): - for field in writer_schema.fields: - self.skip_data(field.type, decoder) - - -# ------------------------------------------------------------------------------ - -if __name__ == '__main__': - raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/avro/avro_io_async.py b/azure/multiapi/storagev2/blob/v2020_04_08/_shared/avro/avro_io_async.py deleted file mode 100644 index e981216..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/avro/avro_io_async.py +++ /dev/null @@ -1,448 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -"""Input/output utilities. - -Includes: - - i/o-specific constants - - i/o-specific exceptions - - schema validation - - leaf value encoding and decoding - - datum reader/writer stuff (?) - -Also includes a generic representation for data, which uses the -following mapping: - - Schema records are implemented as dict. - - Schema arrays are implemented as list. - - Schema maps are implemented as dict. - - Schema strings are implemented as unicode. - - Schema bytes are implemented as str. - - Schema ints are implemented as int. - - Schema longs are implemented as long. - - Schema floats are implemented as float. - - Schema doubles are implemented as float. - - Schema booleans are implemented as bool. -""" - -import logging -import sys - -from ..avro import schema - -from .avro_io import STRUCT_FLOAT, STRUCT_DOUBLE, SchemaResolutionException - -PY3 = sys.version_info[0] == 3 - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Decoder - - -class AsyncBinaryDecoder(object): - """Read leaf values.""" - - def __init__(self, reader): - """ - reader is a Python object on which we can call read, seek, and tell. - """ - self._reader = reader - - @property - def reader(self): - """Reports the reader used by this decoder.""" - return self._reader - - async def read(self, n): - """Read n bytes. - - Args: - n: Number of bytes to read. - Returns: - The next n bytes from the input. - """ - assert (n >= 0), n - input_bytes = await self.reader.read(n) - if n > 0 and not input_bytes: - raise StopAsyncIteration - assert (len(input_bytes) == n), input_bytes - return input_bytes - - @staticmethod - def read_null(): - """ - null is written as zero bytes - """ - return None - - async def read_boolean(self): - """ - a boolean is written as a single byte - whose value is either 0 (false) or 1 (true). - """ - b = ord(await self.read(1)) - if b == 1: - return True - if b == 0: - return False - fail_msg = "Invalid value for boolean: %s" % b - raise schema.AvroException(fail_msg) - - async def read_int(self): - """ - int and long values are written using variable-length, zig-zag coding. - """ - return await self.read_long() - - async def read_long(self): - """ - int and long values are written using variable-length, zig-zag coding. - """ - b = ord(await self.read(1)) - n = b & 0x7F - shift = 7 - while (b & 0x80) != 0: - b = ord(await self.read(1)) - n |= (b & 0x7F) << shift - shift += 7 - datum = (n >> 1) ^ -(n & 1) - return datum - - async def read_float(self): - """ - A float is written as 4 bytes. - The float is converted into a 32-bit integer using a method equivalent to - Java's floatToIntBits and then encoded in little-endian format. - """ - return STRUCT_FLOAT.unpack(await self.read(4))[0] - - async def read_double(self): - """ - A double is written as 8 bytes. - The double is converted into a 64-bit integer using a method equivalent to - Java's doubleToLongBits and then encoded in little-endian format. - """ - return STRUCT_DOUBLE.unpack(await self.read(8))[0] - - async def read_bytes(self): - """ - Bytes are encoded as a long followed by that many bytes of data. - """ - nbytes = await self.read_long() - assert (nbytes >= 0), nbytes - return await self.read(nbytes) - - async def read_utf8(self): - """ - A string is encoded as a long followed by - that many bytes of UTF-8 encoded character data. - """ - input_bytes = await self.read_bytes() - if PY3: - try: - return input_bytes.decode('utf-8') - except UnicodeDecodeError as exn: - logger.error('Invalid UTF-8 input bytes: %r', input_bytes) - raise exn - else: - # PY2 - return unicode(input_bytes, "utf-8") # pylint: disable=undefined-variable - - def skip_null(self): - pass - - async def skip_boolean(self): - await self.skip(1) - - async def skip_int(self): - await self.skip_long() - - async def skip_long(self): - b = ord(await self.read(1)) - while (b & 0x80) != 0: - b = ord(await self.read(1)) - - async def skip_float(self): - await self.skip(4) - - async def skip_double(self): - await self.skip(8) - - async def skip_bytes(self): - await self.skip(await self.read_long()) - - async def skip_utf8(self): - await self.skip_bytes() - - async def skip(self, n): - await self.reader.seek(await self.reader.tell() + n) - - -# ------------------------------------------------------------------------------ -# DatumReader - - -class AsyncDatumReader(object): - """Deserialize Avro-encoded data into a Python data structure.""" - - def __init__(self, writer_schema=None): - """ - As defined in the Avro specification, we call the schema encoded - in the data the "writer's schema", and the schema expected by the - reader the "reader's schema". - """ - self._writer_schema = writer_schema - - # read/write properties - def set_writer_schema(self, writer_schema): - self._writer_schema = writer_schema - - writer_schema = property(lambda self: self._writer_schema, - set_writer_schema) - - async def read(self, decoder): - return await self.read_data(self.writer_schema, decoder) - - async def read_data(self, writer_schema, decoder): - # function dispatch for reading data based on type of writer's schema - if writer_schema.type == 'null': - result = decoder.read_null() - elif writer_schema.type == 'boolean': - result = await decoder.read_boolean() - elif writer_schema.type == 'string': - result = await decoder.read_utf8() - elif writer_schema.type == 'int': - result = await decoder.read_int() - elif writer_schema.type == 'long': - result = await decoder.read_long() - elif writer_schema.type == 'float': - result = await decoder.read_float() - elif writer_schema.type == 'double': - result = await decoder.read_double() - elif writer_schema.type == 'bytes': - result = await decoder.read_bytes() - elif writer_schema.type == 'fixed': - result = await self.read_fixed(writer_schema, decoder) - elif writer_schema.type == 'enum': - result = await self.read_enum(writer_schema, decoder) - elif writer_schema.type == 'array': - result = await self.read_array(writer_schema, decoder) - elif writer_schema.type == 'map': - result = await self.read_map(writer_schema, decoder) - elif writer_schema.type in ['union', 'error_union']: - result = await self.read_union(writer_schema, decoder) - elif writer_schema.type in ['record', 'error', 'request']: - result = await self.read_record(writer_schema, decoder) - else: - fail_msg = "Cannot read unknown schema type: %s" % writer_schema.type - raise schema.AvroException(fail_msg) - return result - - async def skip_data(self, writer_schema, decoder): - if writer_schema.type == 'null': - result = decoder.skip_null() - elif writer_schema.type == 'boolean': - result = await decoder.skip_boolean() - elif writer_schema.type == 'string': - result = await decoder.skip_utf8() - elif writer_schema.type == 'int': - result = await decoder.skip_int() - elif writer_schema.type == 'long': - result = await decoder.skip_long() - elif writer_schema.type == 'float': - result = await decoder.skip_float() - elif writer_schema.type == 'double': - result = await decoder.skip_double() - elif writer_schema.type == 'bytes': - result = await decoder.skip_bytes() - elif writer_schema.type == 'fixed': - result = await self.skip_fixed(writer_schema, decoder) - elif writer_schema.type == 'enum': - result = await self.skip_enum(decoder) - elif writer_schema.type == 'array': - await self.skip_array(writer_schema, decoder) - result = None - elif writer_schema.type == 'map': - await self.skip_map(writer_schema, decoder) - result = None - elif writer_schema.type in ['union', 'error_union']: - result = await self.skip_union(writer_schema, decoder) - elif writer_schema.type in ['record', 'error', 'request']: - await self.skip_record(writer_schema, decoder) - result = None - else: - fail_msg = "Unknown schema type: %s" % writer_schema.type - raise schema.AvroException(fail_msg) - return result - - @staticmethod - async def read_fixed(writer_schema, decoder): - """ - Fixed instances are encoded using the number of bytes declared - in the schema. - """ - return await decoder.read(writer_schema.size) - - @staticmethod - async def skip_fixed(writer_schema, decoder): - return await decoder.skip(writer_schema.size) - - @staticmethod - async def read_enum(writer_schema, decoder): - """ - An enum is encoded by a int, representing the zero-based position - of the symbol in the schema. - """ - # read data - index_of_symbol = await decoder.read_int() - if index_of_symbol >= len(writer_schema.symbols): - fail_msg = "Can't access enum index %d for enum with %d symbols" \ - % (index_of_symbol, len(writer_schema.symbols)) - raise SchemaResolutionException(fail_msg, writer_schema) - read_symbol = writer_schema.symbols[index_of_symbol] - return read_symbol - - @staticmethod - async def skip_enum(decoder): - return await decoder.skip_int() - - async def read_array(self, writer_schema, decoder): - """ - Arrays are encoded as a series of blocks. - - Each block consists of a long count value, - followed by that many array items. - A block with count zero indicates the end of the array. - Each item is encoded per the array's item schema. - - If a block's count is negative, - then the count is followed immediately by a long block size, - indicating the number of bytes in the block. - The actual count in this case - is the absolute value of the count written. - """ - read_items = [] - block_count = await decoder.read_long() - while block_count != 0: - if block_count < 0: - block_count = -block_count - await decoder.read_long() - for _ in range(block_count): - read_items.append(await self.read_data(writer_schema.items, decoder)) - block_count = await decoder.read_long() - return read_items - - async def skip_array(self, writer_schema, decoder): - block_count = await decoder.read_long() - while block_count != 0: - if block_count < 0: - block_size = await decoder.read_long() - await decoder.skip(block_size) - else: - for _ in range(block_count): - await self.skip_data(writer_schema.items, decoder) - block_count = await decoder.read_long() - - async def read_map(self, writer_schema, decoder): - """ - Maps are encoded as a series of blocks. - - Each block consists of a long count value, - followed by that many key/value pairs. - A block with count zero indicates the end of the map. - Each item is encoded per the map's value schema. - - If a block's count is negative, - then the count is followed immediately by a long block size, - indicating the number of bytes in the block. - The actual count in this case - is the absolute value of the count written. - """ - read_items = {} - block_count = await decoder.read_long() - while block_count != 0: - if block_count < 0: - block_count = -block_count - await decoder.read_long() - for _ in range(block_count): - key = await decoder.read_utf8() - read_items[key] = await self.read_data(writer_schema.values, decoder) - block_count = await decoder.read_long() - return read_items - - async def skip_map(self, writer_schema, decoder): - block_count = await decoder.read_long() - while block_count != 0: - if block_count < 0: - block_size = await decoder.read_long() - await decoder.skip(block_size) - else: - for _ in range(block_count): - await decoder.skip_utf8() - await self.skip_data(writer_schema.values, decoder) - block_count = await decoder.read_long() - - async def read_union(self, writer_schema, decoder): - """ - A union is encoded by first writing a long value indicating - the zero-based position within the union of the schema of its value. - The value is then encoded per the indicated schema within the union. - """ - # schema resolution - index_of_schema = int(await decoder.read_long()) - if index_of_schema >= len(writer_schema.schemas): - fail_msg = "Can't access branch index %d for union with %d branches" \ - % (index_of_schema, len(writer_schema.schemas)) - raise SchemaResolutionException(fail_msg, writer_schema) - selected_writer_schema = writer_schema.schemas[index_of_schema] - - # read data - return await self.read_data(selected_writer_schema, decoder) - - async def skip_union(self, writer_schema, decoder): - index_of_schema = int(await decoder.read_long()) - if index_of_schema >= len(writer_schema.schemas): - fail_msg = "Can't access branch index %d for union with %d branches" \ - % (index_of_schema, len(writer_schema.schemas)) - raise SchemaResolutionException(fail_msg, writer_schema) - return await self.skip_data(writer_schema.schemas[index_of_schema], decoder) - - async def read_record(self, writer_schema, decoder): - """ - A record is encoded by encoding the values of its fields - in the order that they are declared. In other words, a record - is encoded as just the concatenation of the encodings of its fields. - Field values are encoded per their schema. - - Schema Resolution: - * the ordering of fields may be different: fields are matched by name. - * schemas for fields with the same name in both records are resolved - recursively. - * if the writer's record contains a field with a name not present in the - reader's record, the writer's value for that field is ignored. - * if the reader's record schema has a field that contains a default value, - and writer's schema does not have a field with the same name, then the - reader should use the default value from its field. - * if the reader's record schema has a field with no default value, and - writer's schema does not have a field with the same name, then the - field's value is unset. - """ - # schema resolution - read_record = {} - for field in writer_schema.fields: - field_val = await self.read_data(field.type, decoder) - read_record[field.name] = field_val - return read_record - - async def skip_record(self, writer_schema, decoder): - for field in writer_schema.fields: - await self.skip_data(field.type, decoder) - - -# ------------------------------------------------------------------------------ - -if __name__ == '__main__': - raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/avro/datafile.py b/azure/multiapi/storagev2/blob/v2020_04_08/_shared/avro/datafile.py deleted file mode 100644 index df06fe0..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/avro/datafile.py +++ /dev/null @@ -1,266 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -"""Read/Write Avro File Object Containers.""" - -import io -import logging -import sys -import zlib - -from ..avro import avro_io -from ..avro import schema - -PY3 = sys.version_info[0] == 3 - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Constants - -# Version of the container file: -VERSION = 1 - -if PY3: - MAGIC = b'Obj' + bytes([VERSION]) - MAGIC_SIZE = len(MAGIC) -else: - MAGIC = 'Obj' + chr(VERSION) - MAGIC_SIZE = len(MAGIC) - -# Size of the synchronization marker, in number of bytes: -SYNC_SIZE = 16 - -# Schema of the container header: -META_SCHEMA = schema.parse(""" -{ - "type": "record", "name": "org.apache.avro.file.Header", - "fields": [{ - "name": "magic", - "type": {"type": "fixed", "name": "magic", "size": %(magic_size)d} - }, { - "name": "meta", - "type": {"type": "map", "values": "bytes"} - }, { - "name": "sync", - "type": {"type": "fixed", "name": "sync", "size": %(sync_size)d} - }] -} -""" % { - 'magic_size': MAGIC_SIZE, - 'sync_size': SYNC_SIZE, -}) - -# Codecs supported by container files: -VALID_CODECS = frozenset(['null', 'deflate']) - -# Metadata key associated to the schema: -SCHEMA_KEY = "avro.schema" - - -# ------------------------------------------------------------------------------ -# Exceptions - - -class DataFileException(schema.AvroException): - """Problem reading or writing file object containers.""" - -# ------------------------------------------------------------------------------ - - -class DataFileReader(object): # pylint: disable=too-many-instance-attributes - """Read files written by DataFileWriter.""" - - def __init__(self, reader, datum_reader, **kwargs): - """Initializes a new data file reader. - - Args: - reader: Open file to read from. - datum_reader: Avro datum reader. - """ - self._reader = reader - self._raw_decoder = avro_io.BinaryDecoder(reader) - self._header_reader = kwargs.pop('header_reader', None) - self._header_decoder = None if self._header_reader is None else avro_io.BinaryDecoder(self._header_reader) - self._datum_decoder = None # Maybe reset at every block. - self._datum_reader = datum_reader - - # In case self._reader only has partial content(without header). - # seek(0, 0) to make sure read the (partial)content from beginning. - self._reader.seek(0, 0) - - # read the header: magic, meta, sync - self._read_header() - - # ensure codec is valid - avro_codec_raw = self.get_meta('avro.codec') - if avro_codec_raw is None: - self.codec = "null" - else: - self.codec = avro_codec_raw.decode('utf-8') - if self.codec not in VALID_CODECS: - raise DataFileException('Unknown codec: %s.' % self.codec) - - # get ready to read - self._block_count = 0 - - # object_position is to support reading from current position in the future read, - # no need to downloading from the beginning of avro. - if hasattr(self._reader, 'object_position'): - self.reader.track_object_position() - - self._cur_object_index = 0 - # header_reader indicates reader only has partial content. The reader doesn't have block header, - # so we read use the block count stored last time. - # Also ChangeFeed only has codec==null, so use _raw_decoder is good. - if self._header_reader is not None: - self._datum_decoder = self._raw_decoder - - self.datum_reader.writer_schema = ( - schema.parse(self.get_meta(SCHEMA_KEY).decode('utf-8'))) - - def __enter__(self): - return self - - def __exit__(self, data_type, value, traceback): - # Perform a close if there's no exception - if data_type is None: - self.close() - - def __iter__(self): - return self - - # read-only properties - @property - def reader(self): - return self._reader - - @property - def raw_decoder(self): - return self._raw_decoder - - @property - def datum_decoder(self): - return self._datum_decoder - - @property - def datum_reader(self): - return self._datum_reader - - @property - def sync_marker(self): - return self._sync_marker - - @property - def meta(self): - return self._meta - - # read/write properties - @property - def block_count(self): - return self._block_count - - def get_meta(self, key): - """Reports the value of a given metadata key. - - Args: - key: Metadata key (string) to report the value of. - Returns: - Value associated to the metadata key, as bytes. - """ - return self._meta.get(key) - - def _read_header(self): - header_reader = self._header_reader if self._header_reader else self._reader - header_decoder = self._header_decoder if self._header_decoder else self._raw_decoder - - # seek to the beginning of the file to get magic block - header_reader.seek(0, 0) - - # read header into a dict - header = self.datum_reader.read_data(META_SCHEMA, header_decoder) - - # check magic number - if header.get('magic') != MAGIC: - fail_msg = "Not an Avro data file: %s doesn't match %s." \ - % (header.get('magic'), MAGIC) - raise schema.AvroException(fail_msg) - - # set metadata - self._meta = header['meta'] - - # set sync marker - self._sync_marker = header['sync'] - - def _read_block_header(self): - self._block_count = self.raw_decoder.read_long() - if self.codec == "null": - # Skip a long; we don't need to use the length. - self.raw_decoder.skip_long() - self._datum_decoder = self._raw_decoder - elif self.codec == 'deflate': - # Compressed data is stored as (length, data), which - # corresponds to how the "bytes" type is encoded. - data = self.raw_decoder.read_bytes() - # -15 is the log of the window size; negative indicates - # "raw" (no zlib headers) decompression. See zlib.h. - uncompressed = zlib.decompress(data, -15) - self._datum_decoder = avro_io.BinaryDecoder(io.BytesIO(uncompressed)) - else: - raise DataFileException("Unknown codec: %r" % self.codec) - - def _skip_sync(self): - """ - Read the length of the sync marker; if it matches the sync marker, - return True. Otherwise, seek back to where we started and return False. - """ - proposed_sync_marker = self.reader.read(SYNC_SIZE) - if SYNC_SIZE > 0 and not proposed_sync_marker: - raise StopIteration - if proposed_sync_marker != self.sync_marker: - self.reader.seek(-SYNC_SIZE, 1) - - def __next__(self): - """Return the next datum in the file.""" - if self.block_count == 0: - self._skip_sync() - - # object_position is to support reading from current position in the future read, - # no need to downloading from the beginning of avro file with this attr. - if hasattr(self._reader, 'object_position'): - self.reader.track_object_position() - self._cur_object_index = 0 - - self._read_block_header() - - datum = self.datum_reader.read(self.datum_decoder) - self._block_count -= 1 - self._cur_object_index += 1 - - # object_position is to support reading from current position in the future read, - # This will track the index of the next item to be read. - # This will also track the offset before the next sync marker. - if hasattr(self._reader, 'object_position'): - if self.block_count == 0: - # the next event to be read is at index 0 in the new chunk of blocks, - self.reader.track_object_position() - self.reader.set_object_index(0) - else: - self.reader.set_object_index(self._cur_object_index) - - return datum - - # PY2 - def next(self): - return self.__next__() - - def close(self): - """Close this reader.""" - self.reader.close() - - -if __name__ == '__main__': - raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/avro/datafile_async.py b/azure/multiapi/storagev2/blob/v2020_04_08/_shared/avro/datafile_async.py deleted file mode 100644 index 1e9d018..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/avro/datafile_async.py +++ /dev/null @@ -1,215 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -"""Read/Write Avro File Object Containers.""" - -import logging -import sys - -from ..avro import avro_io_async -from ..avro import schema -from .datafile import DataFileException -from .datafile import MAGIC, SYNC_SIZE, META_SCHEMA, SCHEMA_KEY - - -PY3 = sys.version_info[0] == 3 - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Constants - -# Codecs supported by container files: -VALID_CODECS = frozenset(['null']) - - -class AsyncDataFileReader(object): # pylint: disable=too-many-instance-attributes - """Read files written by DataFileWriter.""" - - def __init__(self, reader, datum_reader, **kwargs): - """Initializes a new data file reader. - - Args: - reader: Open file to read from. - datum_reader: Avro datum reader. - """ - self._reader = reader - self._raw_decoder = avro_io_async.AsyncBinaryDecoder(reader) - self._header_reader = kwargs.pop('header_reader', None) - self._header_decoder = None if self._header_reader is None else \ - avro_io_async.AsyncBinaryDecoder(self._header_reader) - self._datum_decoder = None # Maybe reset at every block. - self._datum_reader = datum_reader - self.codec = "null" - self._block_count = 0 - self._cur_object_index = 0 - self._meta = None - self._sync_marker = None - - async def init(self): - # In case self._reader only has partial content(without header). - # seek(0, 0) to make sure read the (partial)content from beginning. - await self._reader.seek(0, 0) - - # read the header: magic, meta, sync - await self._read_header() - - # ensure codec is valid - avro_codec_raw = self.get_meta('avro.codec') - if avro_codec_raw is None: - self.codec = "null" - else: - self.codec = avro_codec_raw.decode('utf-8') - if self.codec not in VALID_CODECS: - raise DataFileException('Unknown codec: %s.' % self.codec) - - # get ready to read - self._block_count = 0 - - # object_position is to support reading from current position in the future read, - # no need to downloading from the beginning of avro. - if hasattr(self._reader, 'object_position'): - self.reader.track_object_position() - - # header_reader indicates reader only has partial content. The reader doesn't have block header, - # so we read use the block count stored last time. - # Also ChangeFeed only has codec==null, so use _raw_decoder is good. - if self._header_reader is not None: - self._datum_decoder = self._raw_decoder - self.datum_reader.writer_schema = ( - schema.parse(self.get_meta(SCHEMA_KEY).decode('utf-8'))) - return self - - async def __aenter__(self): - return self - - async def __aexit__(self, data_type, value, traceback): - # Perform a close if there's no exception - if data_type is None: - self.close() - - def __aiter__(self): - return self - - # read-only properties - @property - def reader(self): - return self._reader - - @property - def raw_decoder(self): - return self._raw_decoder - - @property - def datum_decoder(self): - return self._datum_decoder - - @property - def datum_reader(self): - return self._datum_reader - - @property - def sync_marker(self): - return self._sync_marker - - @property - def meta(self): - return self._meta - - # read/write properties - @property - def block_count(self): - return self._block_count - - def get_meta(self, key): - """Reports the value of a given metadata key. - - Args: - key: Metadata key (string) to report the value of. - Returns: - Value associated to the metadata key, as bytes. - """ - return self._meta.get(key) - - async def _read_header(self): - header_reader = self._header_reader if self._header_reader else self._reader - header_decoder = self._header_decoder if self._header_decoder else self._raw_decoder - - # seek to the beginning of the file to get magic block - await header_reader.seek(0, 0) - - # read header into a dict - header = await self.datum_reader.read_data(META_SCHEMA, header_decoder) - - # check magic number - if header.get('magic') != MAGIC: - fail_msg = "Not an Avro data file: %s doesn't match %s." \ - % (header.get('magic'), MAGIC) - raise schema.AvroException(fail_msg) - - # set metadata - self._meta = header['meta'] - - # set sync marker - self._sync_marker = header['sync'] - - async def _read_block_header(self): - self._block_count = await self.raw_decoder.read_long() - if self.codec == "null": - # Skip a long; we don't need to use the length. - await self.raw_decoder.skip_long() - self._datum_decoder = self._raw_decoder - else: - raise DataFileException("Unknown codec: %r" % self.codec) - - async def _skip_sync(self): - """ - Read the length of the sync marker; if it matches the sync marker, - return True. Otherwise, seek back to where we started and return False. - """ - proposed_sync_marker = await self.reader.read(SYNC_SIZE) - if SYNC_SIZE > 0 and not proposed_sync_marker: - raise StopAsyncIteration - if proposed_sync_marker != self.sync_marker: - await self.reader.seek(-SYNC_SIZE, 1) - - async def __anext__(self): - """Return the next datum in the file.""" - if self.block_count == 0: - await self._skip_sync() - - # object_position is to support reading from current position in the future read, - # no need to downloading from the beginning of avro file with this attr. - if hasattr(self._reader, 'object_position'): - await self.reader.track_object_position() - self._cur_object_index = 0 - - await self._read_block_header() - - datum = await self.datum_reader.read(self.datum_decoder) - self._block_count -= 1 - self._cur_object_index += 1 - - # object_position is to support reading from current position in the future read, - # This will track the index of the next item to be read. - # This will also track the offset before the next sync marker. - if hasattr(self._reader, 'object_position'): - if self.block_count == 0: - # the next event to be read is at index 0 in the new chunk of blocks, - await self.reader.track_object_position() - await self.reader.set_object_index(0) - else: - await self.reader.set_object_index(self._cur_object_index) - - return datum - - def close(self): - """Close this reader.""" - self.reader.close() - - -if __name__ == '__main__': - raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/avro/schema.py b/azure/multiapi/storagev2/blob/v2020_04_08/_shared/avro/schema.py deleted file mode 100644 index ffe2853..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/avro/schema.py +++ /dev/null @@ -1,1221 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines - -"""Representation of Avro schemas. - -A schema may be one of: - - A record, mapping field names to field value data; - - An error, equivalent to a record; - - An enum, containing one of a small set of symbols; - - An array of values, all of the same schema; - - A map containing string/value pairs, each of a declared schema; - - A union of other schemas; - - A fixed sized binary object; - - A unicode string; - - A sequence of bytes; - - A 32-bit signed int; - - A 64-bit signed long; - - A 32-bit floating-point float; - - A 64-bit floating-point double; - - A boolean; - - Null. -""" - -import abc -import json -import logging -import re -import sys -from six import with_metaclass - -PY2 = sys.version_info[0] == 2 - -if PY2: - _str = unicode # pylint: disable=undefined-variable -else: - _str = str - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Constants - -# Log level more verbose than DEBUG=10, INFO=20, etc. -DEBUG_VERBOSE = 5 - -NULL = 'null' -BOOLEAN = 'boolean' -STRING = 'string' -BYTES = 'bytes' -INT = 'int' -LONG = 'long' -FLOAT = 'float' -DOUBLE = 'double' -FIXED = 'fixed' -ENUM = 'enum' -RECORD = 'record' -ERROR = 'error' -ARRAY = 'array' -MAP = 'map' -UNION = 'union' - -# Request and error unions are part of Avro protocols: -REQUEST = 'request' -ERROR_UNION = 'error_union' - -PRIMITIVE_TYPES = frozenset([ - NULL, - BOOLEAN, - STRING, - BYTES, - INT, - LONG, - FLOAT, - DOUBLE, -]) - -NAMED_TYPES = frozenset([ - FIXED, - ENUM, - RECORD, - ERROR, -]) - -VALID_TYPES = frozenset.union( - PRIMITIVE_TYPES, - NAMED_TYPES, - [ - ARRAY, - MAP, - UNION, - REQUEST, - ERROR_UNION, - ], -) - -SCHEMA_RESERVED_PROPS = frozenset([ - 'type', - 'name', - 'namespace', - 'fields', # Record - 'items', # Array - 'size', # Fixed - 'symbols', # Enum - 'values', # Map - 'doc', -]) - -FIELD_RESERVED_PROPS = frozenset([ - 'default', - 'name', - 'doc', - 'order', - 'type', -]) - -VALID_FIELD_SORT_ORDERS = frozenset([ - 'ascending', - 'descending', - 'ignore', -]) - - -# ------------------------------------------------------------------------------ -# Exceptions - - -class Error(Exception): - """Base class for errors in this module.""" - - -class AvroException(Error): - """Generic Avro schema error.""" - - -class SchemaParseException(AvroException): - """Error while parsing a JSON schema descriptor.""" - - -class Schema(with_metaclass(abc.ABCMeta, object)): - """Abstract base class for all Schema classes.""" - - def __init__(self, data_type, other_props=None): - """Initializes a new schema object. - - Args: - data_type: Type of the schema to initialize. - other_props: Optional dictionary of additional properties. - """ - if data_type not in VALID_TYPES: - raise SchemaParseException('%r is not a valid Avro type.' % data_type) - - # All properties of this schema, as a map: property name -> property value - self._props = {} - - self._props['type'] = data_type - self._type = data_type - - if other_props: - self._props.update(other_props) - - @property - def namespace(self): - """Returns: the namespace this schema belongs to, if any, or None.""" - return self._props.get('namespace', None) - - @property - def type(self): - """Returns: the type of this schema.""" - return self._type - - @property - def doc(self): - """Returns: the documentation associated to this schema, if any, or None.""" - return self._props.get('doc', None) - - @property - def props(self): - """Reports all the properties of this schema. - - Includes all properties, reserved and non reserved. - JSON properties of this schema are directly generated from this dict. - - Returns: - A dictionary of properties associated to this schema. - """ - return self._props - - @property - def other_props(self): - """Returns: the dictionary of non-reserved properties.""" - return dict(filter_keys_out(items=self._props, keys=SCHEMA_RESERVED_PROPS)) - - def __str__(self): - """Returns: the JSON representation of this schema.""" - return json.dumps(self.to_json(names=None)) - - @abc.abstractmethod - def to_json(self, names): - """Converts the schema object into its AVRO specification representation. - - Schema types that have names (records, enums, and fixed) must - be aware of not re-defining schemas that are already listed - in the parameter names. - """ - raise Exception('Cannot run abstract method.') - - -# ------------------------------------------------------------------------------ - - -_RE_NAME = re.compile(r'[A-Za-z_][A-Za-z0-9_]*') - -_RE_FULL_NAME = re.compile( - r'^' - r'[.]?(?:[A-Za-z_][A-Za-z0-9_]*[.])*' # optional namespace - r'([A-Za-z_][A-Za-z0-9_]*)' # name - r'$' -) - - -class Name(object): - """Representation of an Avro name.""" - - def __init__(self, name, namespace=None): - """Parses an Avro name. - - Args: - name: Avro name to parse (relative or absolute). - namespace: Optional explicit namespace if the name is relative. - """ - # Normalize: namespace is always defined as a string, possibly empty. - if namespace is None: - namespace = '' - - if '.' in name: - # name is absolute, namespace is ignored: - self._fullname = name - - match = _RE_FULL_NAME.match(self._fullname) - if match is None: - raise SchemaParseException( - 'Invalid absolute schema name: %r.' % self._fullname) - - self._name = match.group(1) - self._namespace = self._fullname[:-(len(self._name) + 1)] - - else: - # name is relative, combine with explicit namespace: - self._name = name - self._namespace = namespace - self._fullname = (self._name - if (not self._namespace) else - '%s.%s' % (self._namespace, self._name)) - - # Validate the fullname: - if _RE_FULL_NAME.match(self._fullname) is None: - raise SchemaParseException( - 'Invalid schema name %r infered from name %r and namespace %r.' - % (self._fullname, self._name, self._namespace)) - - def __eq__(self, other): - if not isinstance(other, Name): - return NotImplemented - return self.fullname == other.fullname - - @property - def simple_name(self): - """Returns: the simple name part of this name.""" - return self._name - - @property - def namespace(self): - """Returns: this name's namespace, possible the empty string.""" - return self._namespace - - @property - def fullname(self): - """Returns: the full name.""" - return self._fullname - - -# ------------------------------------------------------------------------------ - - -class Names(object): - """Tracks Avro named schemas and default namespace during parsing.""" - - def __init__(self, default_namespace=None, names=None): - """Initializes a new name tracker. - - Args: - default_namespace: Optional default namespace. - names: Optional initial mapping of known named schemas. - """ - if names is None: - names = {} - self._names = names - self._default_namespace = default_namespace - - @property - def names(self): - """Returns: the mapping of known named schemas.""" - return self._names - - @property - def default_namespace(self): - """Returns: the default namespace, if any, or None.""" - return self._default_namespace - - def new_with_default_namespace(self, namespace): - """Creates a new name tracker from this tracker, but with a new default ns. - - Args: - namespace: New default namespace to use. - Returns: - New name tracker with the specified default namespace. - """ - return Names(names=self._names, default_namespace=namespace) - - def get_name(self, name, namespace=None): - """Resolves the Avro name according to this name tracker's state. - - Args: - name: Name to resolve (absolute or relative). - namespace: Optional explicit namespace. - Returns: - The specified name, resolved according to this tracker. - """ - if namespace is None: - namespace = self._default_namespace - return Name(name=name, namespace=namespace) - - def get_schema(self, name, namespace=None): - """Resolves an Avro schema by name. - - Args: - name: Name (relative or absolute) of the Avro schema to look up. - namespace: Optional explicit namespace. - Returns: - The schema with the specified name, if any, or None. - """ - avro_name = self.get_name(name=name, namespace=namespace) - return self._names.get(avro_name.fullname, None) - - def prune_namespace(self, properties): - """given a properties, return properties with namespace removed if - it matches the own default namespace - """ - if self.default_namespace is None: - # I have no default -- no change - return properties - if 'namespace' not in properties: - # he has no namespace - no change - return properties - if properties['namespace'] != self.default_namespace: - # we're different - leave his stuff alone - return properties - # we each have a namespace and it's redundant. delete his. - prunable = properties.copy() - del prunable['namespace'] - return prunable - - def register(self, schema): - """Registers a new named schema in this tracker. - - Args: - schema: Named Avro schema to register in this tracker. - """ - if schema.fullname in VALID_TYPES: - raise SchemaParseException( - '%s is a reserved type name.' % schema.fullname) - if schema.fullname in self.names: - raise SchemaParseException( - 'Avro name %r already exists.' % schema.fullname) - - logger.log(DEBUG_VERBOSE, 'Register new name for %r', schema.fullname) - self._names[schema.fullname] = schema - - -# ------------------------------------------------------------------------------ - - -class NamedSchema(Schema): - """Abstract base class for named schemas. - - Named schemas are enumerated in NAMED_TYPES. - """ - - def __init__( - self, - data_type, - name=None, - namespace=None, - names=None, - other_props=None, - ): - """Initializes a new named schema object. - - Args: - data_type: Type of the named schema. - name: Name (absolute or relative) of the schema. - namespace: Optional explicit namespace if name is relative. - names: Tracker to resolve and register Avro names. - other_props: Optional map of additional properties of the schema. - """ - assert (data_type in NAMED_TYPES), ('Invalid named type: %r' % data_type) - self._avro_name = names.get_name(name=name, namespace=namespace) - - super(NamedSchema, self).__init__(data_type, other_props) - - names.register(self) - - self._props['name'] = self.name - if self.namespace: - self._props['namespace'] = self.namespace - - @property - def avro_name(self): - """Returns: the Name object describing this schema's name.""" - return self._avro_name - - @property - def name(self): - return self._avro_name.simple_name - - @property - def namespace(self): - return self._avro_name.namespace - - @property - def fullname(self): - return self._avro_name.fullname - - def name_ref(self, names): - """Reports this schema name relative to the specified name tracker. - - Args: - names: Avro name tracker to relativise this schema name against. - Returns: - This schema name, relativised against the specified name tracker. - """ - if self.namespace == names.default_namespace: - return self.name - return self.fullname - - @abc.abstractmethod - def to_json(self, names): - """Converts the schema object into its AVRO specification representation. - - Schema types that have names (records, enums, and fixed) must - be aware of not re-defining schemas that are already listed - in the parameter names. - """ - raise Exception('Cannot run abstract method.') - -# ------------------------------------------------------------------------------ - - -_NO_DEFAULT = object() - - -class Field(object): - """Representation of the schema of a field in a record.""" - - def __init__( - self, - data_type, - name, - index, - has_default, - default=_NO_DEFAULT, - order=None, - doc=None, - other_props=None - ): - """Initializes a new Field object. - - Args: - data_type: Avro schema of the field. - name: Name of the field. - index: 0-based position of the field. - has_default: - default: - order: - doc: - other_props: - """ - if (not isinstance(name, _str)) or (not name): - raise SchemaParseException('Invalid record field name: %r.' % name) - if (order is not None) and (order not in VALID_FIELD_SORT_ORDERS): - raise SchemaParseException('Invalid record field order: %r.' % order) - - # All properties of this record field: - self._props = {} - - self._has_default = has_default - if other_props: - self._props.update(other_props) - - self._index = index - self._type = self._props['type'] = data_type - self._name = self._props['name'] = name - - if has_default: - self._props['default'] = default - - if order is not None: - self._props['order'] = order - - if doc is not None: - self._props['doc'] = doc - - @property - def type(self): - """Returns: the schema of this field.""" - return self._type - - @property - def name(self): - """Returns: this field name.""" - return self._name - - @property - def index(self): - """Returns: the 0-based index of this field in the record.""" - return self._index - - @property - def default(self): - return self._props['default'] - - @property - def has_default(self): - return self._has_default - - @property - def order(self): - return self._props.get('order', None) - - @property - def doc(self): - return self._props.get('doc', None) - - @property - def props(self): - return self._props - - @property - def other_props(self): - return filter_keys_out(items=self._props, keys=FIELD_RESERVED_PROPS) - - def __str__(self): - return json.dumps(self.to_json()) - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = self.props.copy() - to_dump['type'] = self.type.to_json(names) - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ -# Primitive Types - - -class PrimitiveSchema(Schema): - """Schema of a primitive Avro type. - - Valid primitive types are defined in PRIMITIVE_TYPES. - """ - - def __init__(self, data_type, other_props=None): - """Initializes a new schema object for the specified primitive type. - - Args: - data_type: Type of the schema to construct. Must be primitive. - """ - if data_type not in PRIMITIVE_TYPES: - raise AvroException('%r is not a valid primitive type.' % data_type) - super(PrimitiveSchema, self).__init__(data_type, other_props=other_props) - - @property - def name(self): - """Returns: the simple name of this schema.""" - # The name of a primitive type is the type itself. - return self.type - - @property - def fullname(self): - """Returns: the fully qualified name of this schema.""" - # The full name is the simple name for primitive schema. - return self.name - - def to_json(self, names=None): - if len(self.props) == 1: - return self.fullname - return self.props - - def __eq__(self, that): - return self.props == that.props - - -# ------------------------------------------------------------------------------ -# Complex Types (non-recursive) - - -class FixedSchema(NamedSchema): - def __init__( - self, - name, - namespace, - size, - names=None, - other_props=None, - ): - # Ensure valid ctor args - if not isinstance(size, int): - fail_msg = 'Fixed Schema requires a valid integer for size property.' - raise AvroException(fail_msg) - - super(FixedSchema, self).__init__( - data_type=FIXED, - name=name, - namespace=namespace, - names=names, - other_props=other_props, - ) - self._props['size'] = size - - @property - def size(self): - """Returns: the size of this fixed schema, in bytes.""" - return self._props['size'] - - def to_json(self, names=None): - if names is None: - names = Names() - if self.fullname in names.names: - return self.name_ref(names) - names.names[self.fullname] = self - return names.prune_namespace(self.props) - - def __eq__(self, that): - return self.props == that.props - - -# ------------------------------------------------------------------------------ - - -class EnumSchema(NamedSchema): - def __init__( - self, - name, - namespace, - symbols, - names=None, - doc=None, - other_props=None, - ): - """Initializes a new enumeration schema object. - - Args: - name: Simple name of this enumeration. - namespace: Optional namespace. - symbols: Ordered list of symbols defined in this enumeration. - names: - doc: - other_props: - """ - symbols = tuple(symbols) - symbol_set = frozenset(symbols) - if (len(symbol_set) != len(symbols) - or not all(map(lambda symbol: isinstance(symbol, _str), symbols))): - raise AvroException( - 'Invalid symbols for enum schema: %r.' % (symbols,)) - - super(EnumSchema, self).__init__( - data_type=ENUM, - name=name, - namespace=namespace, - names=names, - other_props=other_props, - ) - - self._props['symbols'] = symbols - if doc is not None: - self._props['doc'] = doc - - @property - def symbols(self): - """Returns: the symbols defined in this enum.""" - return self._props['symbols'] - - def to_json(self, names=None): - if names is None: - names = Names() - if self.fullname in names.names: - return self.name_ref(names) - names.names[self.fullname] = self - return names.prune_namespace(self.props) - - def __eq__(self, that): - return self.props == that.props - - -# ------------------------------------------------------------------------------ -# Complex Types (recursive) - - -class ArraySchema(Schema): - """Schema of an array.""" - - def __init__(self, items, other_props=None): - """Initializes a new array schema object. - - Args: - items: Avro schema of the array items. - other_props: - """ - super(ArraySchema, self).__init__( - data_type=ARRAY, - other_props=other_props, - ) - self._items_schema = items - self._props['items'] = items - - @property - def items(self): - """Returns: the schema of the items in this array.""" - return self._items_schema - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = self.props.copy() - item_schema = self.items - to_dump['items'] = item_schema.to_json(names) - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ - - -class MapSchema(Schema): - """Schema of a map.""" - - def __init__(self, values, other_props=None): - """Initializes a new map schema object. - - Args: - values: Avro schema of the map values. - other_props: - """ - super(MapSchema, self).__init__( - data_type=MAP, - other_props=other_props, - ) - self._values_schema = values - self._props['values'] = values - - @property - def values(self): - """Returns: the schema of the values in this map.""" - return self._values_schema - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = self.props.copy() - to_dump['values'] = self.values.to_json(names) - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ - - -class UnionSchema(Schema): - """Schema of a union.""" - - def __init__(self, schemas): - """Initializes a new union schema object. - - Args: - schemas: Ordered collection of schema branches in the union. - """ - super(UnionSchema, self).__init__(data_type=UNION) - self._schemas = tuple(schemas) - - # Validate the schema branches: - - # All named schema names are unique: - named_branches = tuple( - filter(lambda schema: schema.type in NAMED_TYPES, self._schemas)) - unique_names = frozenset(map(lambda schema: schema.fullname, named_branches)) - if len(unique_names) != len(named_branches): - raise AvroException( - 'Invalid union branches with duplicate schema name:%s' - % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) - - # Types are unique within unnamed schemas, and union is not allowed: - unnamed_branches = tuple( - filter(lambda schema: schema.type not in NAMED_TYPES, self._schemas)) - unique_types = frozenset(map(lambda schema: schema.type, unnamed_branches)) - if UNION in unique_types: - raise AvroException( - 'Invalid union branches contain other unions:%s' - % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) - if len(unique_types) != len(unnamed_branches): - raise AvroException( - 'Invalid union branches with duplicate type:%s' - % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) - - @property - def schemas(self): - """Returns: the ordered list of schema branches in the union.""" - return self._schemas - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = [] - for schema in self.schemas: - to_dump.append(schema.to_json(names)) - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ - - -class ErrorUnionSchema(UnionSchema): - """Schema representing the declared errors of a protocol message.""" - - def __init__(self, schemas): - """Initializes an error-union schema. - - Args: - schema: collection of error schema. - """ - # Prepend "string" to handle system errors - schemas = [PrimitiveSchema(data_type=STRING)] + list(schemas) - super(ErrorUnionSchema, self).__init__(schemas=schemas) - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = [] - for schema in self.schemas: - # Don't print the system error schema - if schema.type == STRING: - continue - to_dump.append(schema.to_json(names)) - return to_dump - - -# ------------------------------------------------------------------------------ - - -class RecordSchema(NamedSchema): - """Schema of a record.""" - - @staticmethod - def _make_field(index, field_desc, names): - """Builds field schemas from a list of field JSON descriptors. - - Args: - index: 0-based index of the field in the record. - field_desc: JSON descriptors of a record field. - Return: - The field schema. - """ - field_schema = schema_from_json_data( - json_data=field_desc['type'], - names=names, - ) - other_props = ( - dict(filter_keys_out(items=field_desc, keys=FIELD_RESERVED_PROPS))) - return Field( - data_type=field_schema, - name=field_desc['name'], - index=index, - has_default=('default' in field_desc), - default=field_desc.get('default', _NO_DEFAULT), - order=field_desc.get('order', None), - doc=field_desc.get('doc', None), - other_props=other_props, - ) - - @staticmethod - def make_field_list(field_desc_list, names): - """Builds field schemas from a list of field JSON descriptors. - - Guarantees field name unicity. - - Args: - field_desc_list: collection of field JSON descriptors. - names: Avro schema tracker. - Yields - Field schemas. - """ - for index, field_desc in enumerate(field_desc_list): - yield RecordSchema._make_field(index, field_desc, names) - - @staticmethod - def _make_field_map(fields): - """Builds the field map. - - Guarantees field name unicity. - - Args: - fields: iterable of field schema. - Returns: - A map of field schemas, indexed by name. - """ - field_map = {} - for field in fields: - if field.name in field_map: - raise SchemaParseException( - 'Duplicate record field name %r.' % field.name) - field_map[field.name] = field - return field_map - - def __init__( - self, - name, - namespace, - fields=None, - make_fields=None, - names=None, - record_type=RECORD, - doc=None, - other_props=None - ): - """Initializes a new record schema object. - - Args: - name: Name of the record (absolute or relative). - namespace: Optional namespace the record belongs to, if name is relative. - fields: collection of fields to add to this record. - Exactly one of fields or make_fields must be specified. - make_fields: function creating the fields that belong to the record. - The function signature is: make_fields(names) -> ordered field list. - Exactly one of fields or make_fields must be specified. - names: - record_type: Type of the record: one of RECORD, ERROR or REQUEST. - Protocol requests are not named. - doc: - other_props: - """ - if record_type == REQUEST: - # Protocol requests are not named: - super(RecordSchema, self).__init__( - data_type=REQUEST, - other_props=other_props, - ) - elif record_type in [RECORD, ERROR]: - # Register this record name in the tracker: - super(RecordSchema, self).__init__( - data_type=record_type, - name=name, - namespace=namespace, - names=names, - other_props=other_props, - ) - else: - raise SchemaParseException( - 'Invalid record type: %r.' % record_type) - - if record_type in [RECORD, ERROR]: - avro_name = names.get_name(name=name, namespace=namespace) - nested_names = names.new_with_default_namespace(namespace=avro_name.namespace) - elif record_type == REQUEST: - # Protocol request has no name: no need to change default namespace: - nested_names = names - - if fields is None: - fields = make_fields(names=nested_names) - else: - assert make_fields is None - self._fields = tuple(fields) - - self._field_map = RecordSchema._make_field_map(self._fields) - - self._props['fields'] = fields - if doc is not None: - self._props['doc'] = doc - - @property - def fields(self): - """Returns: the field schemas, as an ordered tuple.""" - return self._fields - - @property - def field_map(self): - """Returns: a read-only map of the field schemas index by field names.""" - return self._field_map - - def to_json(self, names=None): - if names is None: - names = Names() - # Request records don't have names - if self.type == REQUEST: - return [f.to_json(names) for f in self.fields] - - if self.fullname in names.names: - return self.name_ref(names) - names.names[self.fullname] = self - - to_dump = names.prune_namespace(self.props.copy()) - to_dump['fields'] = [f.to_json(names) for f in self.fields] - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ -# Module functions - - -def filter_keys_out(items, keys): - """Filters a collection of (key, value) items. - - Exclude any item whose key belongs to keys. - - Args: - items: Dictionary of items to filter the keys out of. - keys: Keys to filter out. - Yields: - Filtered items. - """ - for key, value in items.items(): - if key in keys: - continue - yield key, value - - -# ------------------------------------------------------------------------------ - - -def _schema_from_json_string(json_string, names): - if json_string in PRIMITIVE_TYPES: - return PrimitiveSchema(data_type=json_string) - - # Look for a known named schema: - schema = names.get_schema(name=json_string) - if schema is None: - raise SchemaParseException( - 'Unknown named schema %r, known names: %r.' - % (json_string, sorted(names.names))) - return schema - - -def _schema_from_json_array(json_array, names): - def MakeSchema(desc): - return schema_from_json_data(json_data=desc, names=names) - - return UnionSchema(map(MakeSchema, json_array)) - - -def _schema_from_json_object(json_object, names): - data_type = json_object.get('type') - if data_type is None: - raise SchemaParseException( - 'Avro schema JSON descriptor has no "type" property: %r' % json_object) - - other_props = dict( - filter_keys_out(items=json_object, keys=SCHEMA_RESERVED_PROPS)) - - if data_type in PRIMITIVE_TYPES: - # FIXME should not ignore other properties - result = PrimitiveSchema(data_type, other_props=other_props) - - elif data_type in NAMED_TYPES: - name = json_object.get('name') - namespace = json_object.get('namespace', names.default_namespace) - if data_type == FIXED: - size = json_object.get('size') - result = FixedSchema(name, namespace, size, names, other_props) - elif data_type == ENUM: - symbols = json_object.get('symbols') - doc = json_object.get('doc') - result = EnumSchema(name, namespace, symbols, names, doc, other_props) - - elif data_type in [RECORD, ERROR]: - field_desc_list = json_object.get('fields', ()) - - def MakeFields(names): - return tuple(RecordSchema.make_field_list(field_desc_list, names)) - - result = RecordSchema( - name=name, - namespace=namespace, - make_fields=MakeFields, - names=names, - record_type=data_type, - doc=json_object.get('doc'), - other_props=other_props, - ) - else: - raise Exception('Internal error: unknown type %r.' % data_type) - - elif data_type in VALID_TYPES: - # Unnamed, non-primitive Avro type: - - if data_type == ARRAY: - items_desc = json_object.get('items') - if items_desc is None: - raise SchemaParseException( - 'Invalid array schema descriptor with no "items" : %r.' - % json_object) - result = ArraySchema( - items=schema_from_json_data(items_desc, names), - other_props=other_props, - ) - - elif data_type == MAP: - values_desc = json_object.get('values') - if values_desc is None: - raise SchemaParseException( - 'Invalid map schema descriptor with no "values" : %r.' - % json_object) - result = MapSchema( - values=schema_from_json_data(values_desc, names=names), - other_props=other_props, - ) - - elif data_type == ERROR_UNION: - error_desc_list = json_object.get('declared_errors') - assert error_desc_list is not None - error_schemas = map( - lambda desc: schema_from_json_data(desc, names=names), - error_desc_list) - result = ErrorUnionSchema(schemas=error_schemas) - - else: - raise Exception('Internal error: unknown type %r.' % data_type) - else: - raise SchemaParseException( - 'Invalid JSON descriptor for an Avro schema: %r' % json_object) - return result - - -# Parsers for the JSON data types: -_JSONDataParserTypeMap = { - _str: _schema_from_json_string, - list: _schema_from_json_array, - dict: _schema_from_json_object, -} - - -def schema_from_json_data(json_data, names=None): - """Builds an Avro Schema from its JSON descriptor. - - Args: - json_data: JSON data representing the descriptor of the Avro schema. - names: Optional tracker for Avro named schemas. - Returns: - The Avro schema parsed from the JSON descriptor. - Raises: - SchemaParseException: if the descriptor is invalid. - """ - if names is None: - names = Names() - - # Select the appropriate parser based on the JSON data type: - parser = _JSONDataParserTypeMap.get(type(json_data)) - if parser is None: - raise SchemaParseException( - 'Invalid JSON descriptor for an Avro schema: %r.' % json_data) - return parser(json_data, names=names) - - -# ------------------------------------------------------------------------------ - - -def parse(json_string): - """Constructs a Schema from its JSON descriptor in text form. - - Args: - json_string: String representation of the JSON descriptor of the schema. - Returns: - The parsed schema. - Raises: - SchemaParseException: on JSON parsing error, - or if the JSON descriptor is invalid. - """ - try: - json_data = json.loads(json_string) - except Exception as exn: - raise SchemaParseException( - 'Error parsing schema from JSON: %r. ' - 'Error message: %r.' - % (json_string, exn)) - - # Initialize the names object - names = Names() - - # construct the Avro Schema object - return schema_from_json_data(json_data, names) diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/base_client.py b/azure/multiapi/storagev2/blob/v2020_04_08/_shared/base_client.py deleted file mode 100644 index 801023d..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/base_client.py +++ /dev/null @@ -1,449 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, - Optional, - Any, - Iterable, - Dict, - List, - Type, - Tuple, - TYPE_CHECKING, -) -import logging - -try: - from urllib.parse import parse_qs, quote -except ImportError: - from urlparse import parse_qs # type: ignore - from urllib2 import quote # type: ignore - -import six - -from azure.core.configuration import Configuration -from azure.core.credentials import AzureSasCredential -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline import Pipeline -from azure.core.pipeline.transport import RequestsTransport, HttpTransport -from azure.core.pipeline.policies import ( - RedirectPolicy, - ContentDecodePolicy, - BearerTokenCredentialPolicy, - ProxyPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, - UserAgentPolicy, - AzureSasCredentialPolicy -) - -from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .models import LocationMode -from .authentication import SharedKeyCredentialPolicy -from .shared_access_signature import QueryStringConstants -from .policies import ( - StorageHeadersPolicy, - StorageContentValidation, - StorageRequestHook, - StorageResponseHook, - StorageLoggingPolicy, - StorageHosts, - QueueMessagePolicy, - ExponentialRetry, -) -from .._version import VERSION -from .response_handlers import process_storage_error, PartialBatchErrorException - - -_LOGGER = logging.getLogger(__name__) -_SERVICE_PARAMS = { - "blob": {"primary": "BlobEndpoint", "secondary": "BlobSecondaryEndpoint"}, - "queue": {"primary": "QueueEndpoint", "secondary": "QueueSecondaryEndpoint"}, - "file": {"primary": "FileEndpoint", "secondary": "FileSecondaryEndpoint"}, - "dfs": {"primary": "BlobEndpoint", "secondary": "BlobEndpoint"}, -} - - -class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - parsed_url, # type: Any - service, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) - self._hosts = kwargs.get("_hosts") - self.scheme = parsed_url.scheme - - if service not in ["blob", "queue", "file-share", "dfs"]: - raise ValueError("Invalid service: {}".format(service)) - service_name = service.split('-')[0] - account = parsed_url.netloc.split(".{}.core.".format(service_name)) - - self.account_name = account[0] if len(account) > 1 else None - if not self.account_name and parsed_url.netloc.startswith("localhost") \ - or parsed_url.netloc.startswith("127.0.0.1"): - self.account_name = parsed_url.path.strip("/") - - self.credential = _format_shared_key_credential(self.account_name, credential) - if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): - raise ValueError("Token credential is only supported with HTTPS.") - - secondary_hostname = None - if hasattr(self.credential, "account_name"): - self.account_name = self.credential.account_name - secondary_hostname = "{}-secondary.{}.{}".format( - self.credential.account_name, service_name, SERVICE_HOST_BASE) - - if not self._hosts: - if len(account) > 1: - secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") - if kwargs.get("secondary_hostname"): - secondary_hostname = kwargs["secondary_hostname"] - primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') - self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} - - self.require_encryption = kwargs.get("require_encryption", False) - self.key_encryption_key = kwargs.get("key_encryption_key") - self.key_resolver_function = kwargs.get("key_resolver_function") - self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) - - def __enter__(self): - self._client.__enter__() - return self - - def __exit__(self, *args): - self._client.__exit__(*args) - - def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._client.close() - - @property - def url(self): - """The full endpoint URL to this entity, including SAS token if used. - - This could be either the primary endpoint, - or the secondary endpoint depending on the current :func:`location_mode`. - """ - return self._format_url(self._hosts[self._location_mode]) - - @property - def primary_endpoint(self): - """The full primary endpoint URL. - - :type: str - """ - return self._format_url(self._hosts[LocationMode.PRIMARY]) - - @property - def primary_hostname(self): - """The hostname of the primary endpoint. - - :type: str - """ - return self._hosts[LocationMode.PRIMARY] - - @property - def secondary_endpoint(self): - """The full secondary endpoint URL if configured. - - If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str - :raise ValueError: - """ - if not self._hosts[LocationMode.SECONDARY]: - raise ValueError("No secondary host configured.") - return self._format_url(self._hosts[LocationMode.SECONDARY]) - - @property - def secondary_hostname(self): - """The hostname of the secondary endpoint. - - If not available this will be None. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str or None - """ - return self._hosts[LocationMode.SECONDARY] - - @property - def location_mode(self): - """The location mode that the client is currently using. - - By default this will be "primary". Options include "primary" and "secondary". - - :type: str - """ - - return self._location_mode - - @location_mode.setter - def location_mode(self, value): - if self._hosts.get(value): - self._location_mode = value - self._client._config.url = self.url # pylint: disable=protected-access - else: - raise ValueError("No host URL for location mode: {}".format(value)) - - @property - def api_version(self): - """The version of the Storage API used for requests. - - :type: str - """ - return self._client._config.version # pylint: disable=protected-access - - def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): - query_str = "?" - if snapshot: - query_str += "snapshot={}&".format(self.snapshot) - if share_snapshot: - query_str += "sharesnapshot={}&".format(self.snapshot) - if sas_token and isinstance(credential, AzureSasCredential): - raise ValueError( - "You cannot use AzureSasCredential when the resource URI also contains a Shared Access Signature.") - if sas_token and not credential: - query_str += sas_token - elif is_credential_sastoken(credential): - query_str += credential.lstrip("?") - credential = None - return query_str.rstrip("?&"), credential - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, "get_token"): - self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif isinstance(credential, AzureSasCredential): - self._credential_policy = AzureSasCredentialPolicy(credential) - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - - config = kwargs.get("_configuration") or create_configuration(**kwargs) - if kwargs.get("_pipeline"): - return config, kwargs["_pipeline"] - config.transport = kwargs.get("transport") # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - config.transport = RequestsTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - ContentDecodePolicy(response_encoding="utf-8"), - RedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), - config.retry_policy, - config.headers_policy, - StorageRequestHook(**kwargs), - self._credential_policy, - config.logging_policy, - StorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs) - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, Pipeline(config.transport, policies=policies) - - def _batch_send( - self, *reqs, # type: HttpRequest - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - request = self._client._client.post( # pylint: disable=protected-access - url='{}://{}/?comp=batch{}{}'.format( - self.scheme, - self.primary_hostname, - kwargs.pop('sas', ""), - kwargs.pop('timeout', "") - ), - headers={ - 'x-ms-version': self.api_version - } - ) - - policies = [StorageHeadersPolicy()] - if self._credential_policy: - policies.append(self._credential_policy) - - request.set_multipart_mixed( - *reqs, - policies=policies, - enforce_https=False - ) - - pipeline_response = self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() - if raise_on_any_failure: - parts = list(response.parts()) - if any(p for p in parts if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts - ) - raise error - return iter(parts) - return parts - except HttpResponseError as error: - process_storage_error(error) - -class TransportWrapper(HttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, transport): - self._transport = transport - - def send(self, request, **kwargs): - return self._transport.send(request, **kwargs) - - def open(self): - pass - - def close(self): - pass - - def __enter__(self): - pass - - def __exit__(self, *args): # pylint: disable=arguments-differ - pass - - -def _format_shared_key_credential(account_name, credential): - if isinstance(credential, six.string_types): - if not account_name: - raise ValueError("Unable to determine account name for shared key credential.") - credential = {"account_name": account_name, "account_key": credential} - if isinstance(credential, dict): - if "account_name" not in credential: - raise ValueError("Shared key credential missing 'account_name") - if "account_key" not in credential: - raise ValueError("Shared key credential missing 'account_key") - return SharedKeyCredentialPolicy(**credential) - return credential - - -def parse_connection_str(conn_str, credential, service): - conn_str = conn_str.rstrip(";") - conn_settings = [s.split("=", 1) for s in conn_str.split(";")] - if any(len(tup) != 2 for tup in conn_settings): - raise ValueError("Connection string is either blank or malformed.") - conn_settings = dict(conn_settings) - endpoints = _SERVICE_PARAMS[service] - primary = None - secondary = None - if not credential: - try: - credential = {"account_name": conn_settings["AccountName"], "account_key": conn_settings["AccountKey"]} - except KeyError: - credential = conn_settings.get("SharedAccessSignature") - if endpoints["primary"] in conn_settings: - primary = conn_settings[endpoints["primary"]] - if endpoints["secondary"] in conn_settings: - secondary = conn_settings[endpoints["secondary"]] - else: - if endpoints["secondary"] in conn_settings: - raise ValueError("Connection string specifies only secondary endpoint.") - try: - primary = "{}://{}.{}.{}".format( - conn_settings["DefaultEndpointsProtocol"], - conn_settings["AccountName"], - service, - conn_settings["EndpointSuffix"], - ) - secondary = "{}-secondary.{}.{}".format( - conn_settings["AccountName"], service, conn_settings["EndpointSuffix"] - ) - except KeyError: - pass - - if not primary: - try: - primary = "https://{}.{}.{}".format( - conn_settings["AccountName"], service, conn_settings.get("EndpointSuffix", SERVICE_HOST_BASE) - ) - except KeyError: - raise ValueError("Connection string missing required connection details.") - return primary, secondary, credential - - -def create_configuration(**kwargs): - # type: (**Any) -> Configuration - config = Configuration(**kwargs) - config.headers_policy = StorageHeadersPolicy(**kwargs) - config.user_agent_policy = UserAgentPolicy( - sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) - config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) - config.logging_policy = StorageLoggingPolicy(**kwargs) - config.proxy_policy = ProxyPolicy(**kwargs) - - # Storage settings - config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) - config.copy_polling_interval = 15 - - # Block blob uploads - config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) - config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) - config.use_byte_buffer = kwargs.get("use_byte_buffer", False) - - # Page blob uploads - config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) - - # Blob downloads - config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) - config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) - - # File uploads - config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) - return config - - -def parse_query(query_str): - sas_values = QueryStringConstants.to_list() - parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} - sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values] - sas_token = None - if sas_params: - sas_token = "&".join(sas_params) - - snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") - return snapshot, sas_token - - -def is_credential_sastoken(credential): - if not credential or not isinstance(credential, six.string_types): - return False - - sas_values = QueryStringConstants.to_list() - parsed_query = parse_qs(credential.lstrip("?")) - if parsed_query and all([k in sas_values for k in parsed_query.keys()]): - return True - return False diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/base_client_async.py b/azure/multiapi/storagev2/blob/v2020_04_08/_shared/base_client_async.py deleted file mode 100644 index 6ce19c7..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/base_client_async.py +++ /dev/null @@ -1,189 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging - -from azure.core.credentials import AzureSasCredential -from azure.core.pipeline import AsyncPipeline -from azure.core.async_paging import AsyncList -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline.policies import ( - ContentDecodePolicy, - AsyncBearerTokenCredentialPolicy, - AsyncRedirectPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, - AzureSasCredentialPolicy, -) -from azure.core.pipeline.transport import AsyncHttpTransport - -from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .authentication import SharedKeyCredentialPolicy -from .base_client import create_configuration -from .policies import ( - StorageContentValidation, - StorageRequestHook, - StorageHosts, - StorageHeadersPolicy, - QueueMessagePolicy -) -from .policies_async import AsyncStorageResponseHook - -from .response_handlers import process_storage_error, PartialBatchErrorException - -if TYPE_CHECKING: - from azure.core.pipeline import Pipeline - from azure.core.pipeline.transport import HttpRequest - from azure.core.configuration import Configuration -_LOGGER = logging.getLogger(__name__) - - -class AsyncStorageAccountHostsMixin(object): - - def __enter__(self): - raise TypeError("Async client only supports 'async with'.") - - def __exit__(self, *args): - pass - - async def __aenter__(self): - await self._client.__aenter__() - return self - - async def __aexit__(self, *args): - await self._client.__aexit__(*args) - - async def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._client.close() - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, 'get_token'): - self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif isinstance(credential, AzureSasCredential): - self._credential_policy = AzureSasCredentialPolicy(credential) - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - config = kwargs.get('_configuration') or create_configuration(**kwargs) - if kwargs.get('_pipeline'): - return config, kwargs['_pipeline'] - config.transport = kwargs.get('transport') # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - try: - from azure.core.pipeline.transport import AioHttpTransport - except ImportError: - raise ImportError("Unable to create async transport. Please check aiohttp is installed.") - config.transport = AioHttpTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.headers_policy, - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - StorageRequestHook(**kwargs), - self._credential_policy, - ContentDecodePolicy(response_encoding="utf-8"), - AsyncRedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), # type: ignore - config.retry_policy, - config.logging_policy, - AsyncStorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs), - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, AsyncPipeline(config.transport, policies=policies) - - async def _batch_send( - self, *reqs: 'HttpRequest', - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - request = self._client._client.post( # pylint: disable=protected-access - url='{}://{}/?comp=batch{}{}'.format( - self.scheme, - self.primary_hostname, - kwargs.pop('sas', None), - kwargs.pop('timeout', None) - ), - headers={ - 'x-ms-version': self.api_version - } - ) - - policies = [StorageHeadersPolicy()] - if self._credential_policy: - policies.append(self._credential_policy) - - request.set_multipart_mixed( - *reqs, - policies=policies, - enforce_https=False - ) - - pipeline_response = await self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() # Return an AsyncIterator - if raise_on_any_failure: - parts_list = [] - async for part in parts: - parts_list.append(part) - if any(p for p in parts_list if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts_list - ) - raise error - return AsyncList(parts_list) - return parts - except HttpResponseError as error: - process_storage_error(error) - - -class AsyncTransportWrapper(AsyncHttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, async_transport): - self._transport = async_transport - - async def send(self, request, **kwargs): - return await self._transport.send(request, **kwargs) - - async def open(self): - pass - - async def close(self): - pass - - async def __aenter__(self): - pass - - async def __aexit__(self, *args): # pylint: disable=arguments-differ - pass diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/constants.py b/azure/multiapi/storagev2/blob/v2020_04_08/_shared/constants.py deleted file mode 100644 index bdee829..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/constants.py +++ /dev/null @@ -1,27 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -from .._generated import AzureBlobStorage - - -X_MS_VERSION = AzureBlobStorage(url="get_api_version")._config.version # pylint: disable=protected-access - -# Socket timeout in seconds -CONNECTION_TIMEOUT = 20 -READ_TIMEOUT = 20 - -# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned) -# The socket timeout is now the maximum total duration to send all data. -if sys.version_info >= (3, 5): - # the timeout to connect is 20 seconds, and the read timeout is 80000 seconds - # the 80000 seconds was calculated with: - # 4000MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) - READ_TIMEOUT = 80000 - -STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" - -SERVICE_HOST_BASE = 'core.windows.net' diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/encryption.py b/azure/multiapi/storagev2/blob/v2020_04_08/_shared/encryption.py deleted file mode 100644 index 62607cc..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/encryption.py +++ /dev/null @@ -1,542 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os -from os import urandom -from json import ( - dumps, - loads, -) -from collections import OrderedDict - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.ciphers import Cipher -from cryptography.hazmat.primitives.ciphers.algorithms import AES -from cryptography.hazmat.primitives.ciphers.modes import CBC -from cryptography.hazmat.primitives.padding import PKCS7 - -from azure.core.exceptions import HttpResponseError - -from .._version import VERSION -from . import encode_base64, decode_base64_to_bytes - - -_ENCRYPTION_PROTOCOL_V1 = '1.0' -_ERROR_OBJECT_INVALID = \ - '{0} does not define a complete interface. Value of {1} is either missing or invalid.' - - -def _validate_not_none(param_name, param): - if param is None: - raise ValueError('{0} should not be None.'.format(param_name)) - - -def _validate_key_encryption_key_wrap(kek): - # Note that None is not callable and so will fail the second clause of each check. - if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) - if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) - - -class _EncryptionAlgorithm(object): - ''' - Specifies which client encryption algorithm is used. - ''' - AES_CBC_256 = 'AES_CBC_256' - - -class _WrappedContentKey: - ''' - Represents the envelope key details stored on the service. - ''' - - def __init__(self, algorithm, encrypted_key, key_id): - ''' - :param str algorithm: - The algorithm used for wrapping. - :param bytes encrypted_key: - The encrypted content-encryption-key. - :param str key_id: - The key-encryption-key identifier string. - ''' - - _validate_not_none('algorithm', algorithm) - _validate_not_none('encrypted_key', encrypted_key) - _validate_not_none('key_id', key_id) - - self.algorithm = algorithm - self.encrypted_key = encrypted_key - self.key_id = key_id - - -class _EncryptionAgent: - ''' - Represents the encryption agent stored on the service. - It consists of the encryption protocol version and encryption algorithm used. - ''' - - def __init__(self, encryption_algorithm, protocol): - ''' - :param _EncryptionAlgorithm encryption_algorithm: - The algorithm used for encrypting the message contents. - :param str protocol: - The protocol version used for encryption. - ''' - - _validate_not_none('encryption_algorithm', encryption_algorithm) - _validate_not_none('protocol', protocol) - - self.encryption_algorithm = str(encryption_algorithm) - self.protocol = protocol - - -class _EncryptionData: - ''' - Represents the encryption data that is stored on the service. - ''' - - def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, - key_wrapping_metadata): - ''' - :param bytes content_encryption_IV: - The content encryption initialization vector. - :param _EncryptionAgent encryption_agent: - The encryption agent. - :param _WrappedContentKey wrapped_content_key: - An object that stores the wrapping algorithm, the key identifier, - and the encrypted key bytes. - :param dict key_wrapping_metadata: - A dict containing metadata related to the key wrapping. - ''' - - _validate_not_none('content_encryption_IV', content_encryption_IV) - _validate_not_none('encryption_agent', encryption_agent) - _validate_not_none('wrapped_content_key', wrapped_content_key) - - self.content_encryption_IV = content_encryption_IV - self.encryption_agent = encryption_agent - self.wrapped_content_key = wrapped_content_key - self.key_wrapping_metadata = key_wrapping_metadata - - -def _generate_encryption_data_dict(kek, cek, iv): - ''' - Generates and returns the encryption metadata as a dict. - - :param object kek: The key encryption key. See calling functions for more information. - :param bytes cek: The content encryption key. - :param bytes iv: The initialization vector. - :return: A dict containing all the encryption metadata. - :rtype: dict - ''' - # Encrypt the cek. - wrapped_cek = kek.wrap_key(cek) - - # Build the encryption_data dict. - # Use OrderedDict to comply with Java's ordering requirement. - wrapped_content_key = OrderedDict() - wrapped_content_key['KeyId'] = kek.get_kid() - wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek) - wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() - - encryption_agent = OrderedDict() - encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 - encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 - - encryption_data_dict = OrderedDict() - encryption_data_dict['WrappedContentKey'] = wrapped_content_key - encryption_data_dict['EncryptionAgent'] = encryption_agent - encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv) - encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION} - - return encryption_data_dict - - -def _dict_to_encryption_data(encryption_data_dict): - ''' - Converts the specified dictionary to an EncryptionData object for - eventual use in decryption. - - :param dict encryption_data_dict: - The dictionary containing the encryption data. - :return: an _EncryptionData object built from the dictionary. - :rtype: _EncryptionData - ''' - try: - if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: - raise ValueError("Unsupported encryption version.") - except KeyError: - raise ValueError("Unsupported encryption version.") - wrapped_content_key = encryption_data_dict['WrappedContentKey'] - wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], - decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), - wrapped_content_key['KeyId']) - - encryption_agent = encryption_data_dict['EncryptionAgent'] - encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], - encryption_agent['Protocol']) - - if 'KeyWrappingMetadata' in encryption_data_dict: - key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] - else: - key_wrapping_metadata = None - - encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), - encryption_agent, - wrapped_content_key, - key_wrapping_metadata) - - return encryption_data - - -def _generate_AES_CBC_cipher(cek, iv): - ''' - Generates and returns an encryption cipher for AES CBC using the given cek and iv. - - :param bytes[] cek: The content encryption key for the cipher. - :param bytes[] iv: The initialization vector for the cipher. - :return: A cipher for encrypting in AES256 CBC. - :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher - ''' - - backend = default_backend() - algorithm = AES(cek) - mode = CBC(iv) - return Cipher(algorithm, mode, backend) - - -def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): - ''' - Extracts and returns the content_encryption_key stored in the encryption_data object - and performs necessary validation on all parameters. - :param _EncryptionData encryption_data: - The encryption metadata of the retrieved value. - :param obj key_encryption_key: - The key_encryption_key used to unwrap the cek. Please refer to high-level service object - instance variables for more details. - :param func key_resolver: - A function used that, given a key_id, will return a key_encryption_key. Please refer - to high-level service object instance variables for more details. - :return: the content_encryption_key stored in the encryption_data object. - :rtype: bytes[] - ''' - - _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) - _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) - - if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol: - raise ValueError('Encryption version is not supported.') - - content_encryption_key = None - - # If the resolver exists, give priority to the key it finds. - if key_resolver is not None: - key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) - - _validate_not_none('key_encryption_key', key_encryption_key) - if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) - if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid(): - raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.') - # Will throw an exception if the specified algorithm is not supported. - content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, - encryption_data.wrapped_content_key.algorithm) - _validate_not_none('content_encryption_key', content_encryption_key) - - return content_encryption_key - - -def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None): - ''' - Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. - Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). - Returns the original plaintex. - - :param str message: - The ciphertext to be decrypted. - :param _EncryptionData encryption_data: - The metadata associated with this ciphertext. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted plaintext. - :rtype: str - ''' - _validate_not_none('message', message) - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) - - if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm: - raise ValueError('Specified encryption algorithm is not supported.') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) - - # decrypt data - decrypted_data = message - decryptor = cipher.decryptor() - decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) - - # unpad data - unpadder = PKCS7(128).unpadder() - decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) - - return decrypted_data - - -def encrypt_blob(blob, key_encryption_key): - ''' - Encrypts the given blob using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encryption metadata. This method should - only be used when a blob is small enough for single shot upload. Encrypting larger blobs - is done as a part of the upload_data_chunks method. - - :param bytes blob: - The blob to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. - :rtype: (str, bytes) - ''' - - _validate_not_none('blob', blob) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(blob) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - - return dumps(encryption_data), encrypted_data - - -def generate_blob_encryption_data(key_encryption_key): - ''' - Generates the encryption_metadata for the blob. - - :param bytes key_encryption_key: - The key-encryption-key used to wrap the cek associate with this blob. - :return: A tuple containing the cek and iv for this blob as well as the - serialized encryption metadata for the blob. - :rtype: (bytes, bytes, str) - ''' - encryption_data = None - content_encryption_key = None - initialization_vector = None - if key_encryption_key: - _validate_key_encryption_key_wrap(key_encryption_key) - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - encryption_data = _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - encryption_data = dumps(encryption_data) - - return content_encryption_key, initialization_vector, encryption_data - - -def decrypt_blob(require_encryption, key_encryption_key, key_resolver, - content, start_offset, end_offset, response_headers): - ''' - Decrypts the given blob contents and returns only the requested range. - - :param bool require_encryption: - Whether or not the calling blob service requires objects to be decrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :param key_resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted blob content. - :rtype: bytes - ''' - try: - encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata'])) - except: # pylint: disable=bare-except - if require_encryption: - raise ValueError( - 'Encryption required, but received data does not contain appropriate metatadata.' + \ - 'Data was either not encrypted or metadata has been lost.') - - return content - - if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256: - raise ValueError('Specified encryption algorithm is not supported.') - - blob_type = response_headers['x-ms-blob-type'] - - iv = None - unpad = False - if 'content-range' in response_headers: - content_range = response_headers['content-range'] - # Format: 'bytes x-y/size' - - # Ignore the word 'bytes' - content_range = content_range.split(' ') - - content_range = content_range[1].split('-') - content_range = content_range[1].split('/') - end_range = int(content_range[0]) - blob_size = int(content_range[1]) - - if start_offset >= 16: - iv = content[:16] - content = content[16:] - start_offset -= 16 - else: - iv = encryption_data.content_encryption_IV - - if end_range == blob_size - 1: - unpad = True - else: - unpad = True - iv = encryption_data.content_encryption_IV - - if blob_type == 'PageBlob': - unpad = False - - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) - cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) - decryptor = cipher.decryptor() - - content = decryptor.update(content) + decryptor.finalize() - if unpad: - unpadder = PKCS7(128).unpadder() - content = unpadder.update(content) + unpadder.finalize() - - return content[start_offset: len(content) - end_offset] - - -def get_blob_encryptor_and_padder(cek, iv, should_pad): - encryptor = None - padder = None - - if cek is not None and iv is not None: - cipher = _generate_AES_CBC_cipher(cek, iv) - encryptor = cipher.encryptor() - padder = PKCS7(128).padder() if should_pad else None - - return encryptor, padder - - -def encrypt_queue_message(message, key_encryption_key): - ''' - Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encrypted message and the encryption metadata. - - :param object message: - The plain text messge to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A json-formatted string containing the encrypted message and the encryption metadata. - :rtype: str - ''' - - _validate_not_none('message', message) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = os.urandom(32) - initialization_vector = os.urandom(16) - - # Queue encoding functions all return unicode strings, and encryption should - # operate on binary strings. - message = message.encode('utf-8') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(message) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - - # Build the dictionary structure. - queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data), - 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector)} - - return dumps(queue_message) - - -def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver): - ''' - Returns the decrypted message contents from an EncryptedQueueMessage. - If no encryption metadata is present, will return the unaltered message. - :param str message: - The JSON formatted QueueEncryptedMessage contents with all associated metadata. - :param bool require_encryption: - If set, will enforce that the retrieved messages are encrypted and decrypt them. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The plain text message from the queue message. - :rtype: str - ''' - - try: - message = loads(message) - - encryption_data = _dict_to_encryption_data(message['EncryptionData']) - decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents']) - except (KeyError, ValueError): - # Message was not json formatted and so was not encrypted - # or the user provided a json formatted message. - if require_encryption: - raise ValueError('Message was not encrypted.') - - return message - try: - return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=response, - error=error) diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/models.py b/azure/multiapi/storagev2/blob/v2020_04_08/_shared/models.py deleted file mode 100644 index c51356b..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/models.py +++ /dev/null @@ -1,466 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-instance-attributes -from enum import Enum - - -def get_enum_value(value): - if value is None or value in ["None", ""]: - return None - try: - return value.value - except AttributeError: - return value - - -class StorageErrorCode(str, Enum): - - # Generic storage values - account_already_exists = "AccountAlreadyExists" - account_being_created = "AccountBeingCreated" - account_is_disabled = "AccountIsDisabled" - authentication_failed = "AuthenticationFailed" - authorization_failure = "AuthorizationFailure" - no_authentication_information = "NoAuthenticationInformation" - condition_headers_not_supported = "ConditionHeadersNotSupported" - condition_not_met = "ConditionNotMet" - empty_metadata_key = "EmptyMetadataKey" - insufficient_account_permissions = "InsufficientAccountPermissions" - internal_error = "InternalError" - invalid_authentication_info = "InvalidAuthenticationInfo" - invalid_header_value = "InvalidHeaderValue" - invalid_http_verb = "InvalidHttpVerb" - invalid_input = "InvalidInput" - invalid_md5 = "InvalidMd5" - invalid_metadata = "InvalidMetadata" - invalid_query_parameter_value = "InvalidQueryParameterValue" - invalid_range = "InvalidRange" - invalid_resource_name = "InvalidResourceName" - invalid_uri = "InvalidUri" - invalid_xml_document = "InvalidXmlDocument" - invalid_xml_node_value = "InvalidXmlNodeValue" - md5_mismatch = "Md5Mismatch" - metadata_too_large = "MetadataTooLarge" - missing_content_length_header = "MissingContentLengthHeader" - missing_required_query_parameter = "MissingRequiredQueryParameter" - missing_required_header = "MissingRequiredHeader" - missing_required_xml_node = "MissingRequiredXmlNode" - multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" - operation_timed_out = "OperationTimedOut" - out_of_range_input = "OutOfRangeInput" - out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" - request_body_too_large = "RequestBodyTooLarge" - resource_type_mismatch = "ResourceTypeMismatch" - request_url_failed_to_parse = "RequestUrlFailedToParse" - resource_already_exists = "ResourceAlreadyExists" - resource_not_found = "ResourceNotFound" - server_busy = "ServerBusy" - unsupported_header = "UnsupportedHeader" - unsupported_xml_node = "UnsupportedXmlNode" - unsupported_query_parameter = "UnsupportedQueryParameter" - unsupported_http_verb = "UnsupportedHttpVerb" - - # Blob values - append_position_condition_not_met = "AppendPositionConditionNotMet" - blob_already_exists = "BlobAlreadyExists" - blob_not_found = "BlobNotFound" - blob_overwritten = "BlobOverwritten" - blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" - block_count_exceeds_limit = "BlockCountExceedsLimit" - block_list_too_long = "BlockListTooLong" - cannot_change_to_lower_tier = "CannotChangeToLowerTier" - cannot_verify_copy_source = "CannotVerifyCopySource" - container_already_exists = "ContainerAlreadyExists" - container_being_deleted = "ContainerBeingDeleted" - container_disabled = "ContainerDisabled" - container_not_found = "ContainerNotFound" - content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" - copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" - copy_id_mismatch = "CopyIdMismatch" - feature_version_mismatch = "FeatureVersionMismatch" - incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" - incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" - incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" - infinite_lease_duration_required = "InfiniteLeaseDurationRequired" - invalid_blob_or_block = "InvalidBlobOrBlock" - invalid_blob_tier = "InvalidBlobTier" - invalid_blob_type = "InvalidBlobType" - invalid_block_id = "InvalidBlockId" - invalid_block_list = "InvalidBlockList" - invalid_operation = "InvalidOperation" - invalid_page_range = "InvalidPageRange" - invalid_source_blob_type = "InvalidSourceBlobType" - invalid_source_blob_url = "InvalidSourceBlobUrl" - invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" - lease_already_present = "LeaseAlreadyPresent" - lease_already_broken = "LeaseAlreadyBroken" - lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" - lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" - lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" - lease_id_missing = "LeaseIdMissing" - lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" - lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" - lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" - lease_lost = "LeaseLost" - lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" - lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" - lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" - max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" - no_pending_copy_operation = "NoPendingCopyOperation" - operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" - pending_copy_operation = "PendingCopyOperation" - previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" - previous_snapshot_not_found = "PreviousSnapshotNotFound" - previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" - sequence_number_condition_not_met = "SequenceNumberConditionNotMet" - sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" - snapshot_count_exceeded = "SnapshotCountExceeded" - snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" - snapshots_present = "SnapshotsPresent" - source_condition_not_met = "SourceConditionNotMet" - system_in_use = "SystemInUse" - target_condition_not_met = "TargetConditionNotMet" - unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" - blob_being_rehydrated = "BlobBeingRehydrated" - blob_archived = "BlobArchived" - blob_not_archived = "BlobNotArchived" - - # Queue values - invalid_marker = "InvalidMarker" - message_not_found = "MessageNotFound" - message_too_large = "MessageTooLarge" - pop_receipt_mismatch = "PopReceiptMismatch" - queue_already_exists = "QueueAlreadyExists" - queue_being_deleted = "QueueBeingDeleted" - queue_disabled = "QueueDisabled" - queue_not_empty = "QueueNotEmpty" - queue_not_found = "QueueNotFound" - - # File values - cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" - client_cache_flush_delay = "ClientCacheFlushDelay" - delete_pending = "DeletePending" - directory_not_empty = "DirectoryNotEmpty" - file_lock_conflict = "FileLockConflict" - invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" - parent_not_found = "ParentNotFound" - read_only_attribute = "ReadOnlyAttribute" - share_already_exists = "ShareAlreadyExists" - share_being_deleted = "ShareBeingDeleted" - share_disabled = "ShareDisabled" - share_not_found = "ShareNotFound" - sharing_violation = "SharingViolation" - share_snapshot_in_progress = "ShareSnapshotInProgress" - share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" - share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" - share_has_snapshots = "ShareHasSnapshots" - container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" - - # DataLake values - content_length_must_be_zero = 'ContentLengthMustBeZero' - path_already_exists = 'PathAlreadyExists' - invalid_flush_position = 'InvalidFlushPosition' - invalid_property_name = 'InvalidPropertyName' - invalid_source_uri = 'InvalidSourceUri' - unsupported_rest_version = 'UnsupportedRestVersion' - file_system_not_found = 'FilesystemNotFound' - path_not_found = 'PathNotFound' - rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound' - source_path_not_found = 'SourcePathNotFound' - destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted' - file_system_already_exists = 'FilesystemAlreadyExists' - file_system_being_deleted = 'FilesystemBeingDeleted' - invalid_destination_path = 'InvalidDestinationPath' - invalid_rename_source_path = 'InvalidRenameSourcePath' - invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType' - lease_is_already_broken = 'LeaseIsAlreadyBroken' - lease_name_mismatch = 'LeaseNameMismatch' - path_conflict = 'PathConflict' - source_path_is_being_deleted = 'SourcePathIsBeingDeleted' - - -class DictMixin(object): - - def __setitem__(self, key, item): - self.__dict__[key] = item - - def __getitem__(self, key): - return self.__dict__[key] - - def __repr__(self): - return str(self) - - def __len__(self): - return len(self.keys()) - - def __delitem__(self, key): - self.__dict__[key] = None - - def __eq__(self, other): - """Compare objects by comparing all attributes.""" - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - def __ne__(self, other): - """Compare objects by comparing all attributes.""" - return not self.__eq__(other) - - def __str__(self): - return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) - - def has_key(self, k): - return k in self.__dict__ - - def update(self, *args, **kwargs): - return self.__dict__.update(*args, **kwargs) - - def keys(self): - return [k for k in self.__dict__ if not k.startswith('_')] - - def values(self): - return [v for k, v in self.__dict__.items() if not k.startswith('_')] - - def items(self): - return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] - - def get(self, key, default=None): - if key in self.__dict__: - return self.__dict__[key] - return default - - -class LocationMode(object): - """ - Specifies the location the request should be sent to. This mode only applies - for RA-GRS accounts which allow secondary read access. All other account types - must use PRIMARY. - """ - - PRIMARY = 'primary' #: Requests should be sent to the primary location. - SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. - - -class ResourceTypes(object): - """ - Specifies the resource types that are accessible with the account SAS. - - :param bool service: - Access to service-level APIs (e.g., Get/Set Service Properties, - Get Service Stats, List Containers/Queues/Shares) - :param bool container: - Access to container-level APIs (e.g., Create/Delete Container, - Create/Delete Queue, Create/Delete Share, - List Blobs/Files and Directories) - :param bool object: - Access to object-level APIs for blobs, queue messages, and - files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) - """ - - def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin - self.service = service - self.container = container - self.object = object - self._str = (('s' if self.service else '') + - ('c' if self.container else '') + - ('o' if self.object else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create a ResourceTypes from a string. - - To specify service, container, or object you need only to - include the first letter of the word in the string. E.g. service and container, - you would provide a string "sc". - - :param str string: Specify service, container, or object in - in the string with the first letter of the word. - :return: A ResourceTypes object - :rtype: ~azure.storage.blob.ResourceTypes - """ - res_service = 's' in string - res_container = 'c' in string - res_object = 'o' in string - - parsed = cls(res_service, res_container, res_object) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class AccountSasPermissions(object): - """ - :class:`~ResourceTypes` class to be used with generate_account_sas - function and for the AccessPolicies used with set_*_acl. There are two types of - SAS which may be used to grant resource access. One is to grant access to a - specific resource (resource-specific). Another is to grant access to the - entire service for a specific account and allow certain operations based on - perms found here. - - :param bool read: - Valid for all signed resources types (Service, Container, and Object). - Permits read permissions to the specified resource type. - :param bool write: - Valid for all signed resources types (Service, Container, and Object). - Permits write permissions to the specified resource type. - :param bool delete: - Valid for Container and Object resource types, except for queue messages. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool list: - Valid for Service and Container resource types only. - :param bool add: - Valid for the following Object resource types only: queue messages, and append blobs. - :param bool create: - Valid for the following Object resource types only: blobs and files. - Users can create new blobs or files, but may not overwrite existing - blobs or files. - :param bool update: - Valid for the following Object resource types only: queue messages. - :param bool process: - Valid for the following Object resource type only: queue messages. - :keyword bool tag: - To enable set or get tags on the blobs in the container. - :keyword bool filter_by_tags: - To enable get blobs by tags, this should be used together with list permission. - """ - def __init__(self, read=False, write=False, delete=False, - list=False, # pylint: disable=redefined-builtin - add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): - self.read = read - self.write = write - self.delete = delete - self.delete_previous_version = delete_previous_version - self.list = list - self.add = add - self.create = create - self.update = update - self.process = process - self.tag = kwargs.pop('tag', False) - self.filter_by_tags = kwargs.pop('filter_by_tags', False) - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('l' if self.list else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('u' if self.update else '') + - ('p' if self.process else '') + - ('f' if self.filter_by_tags else '') + - ('t' if self.tag else '') - ) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create AccountSasPermissions from a string. - - To specify read, write, delete, etc. permissions you need only to - include the first letter of the word in the string. E.g. for read and write - permissions you would provide a string "rw". - - :param str permission: Specify permissions in - the string with the first letter of the word. - :return: An AccountSasPermissions object - :rtype: ~azure.storage.blob.AccountSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_delete_previous_version = 'x' in permission - p_list = 'l' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_update = 'u' in permission - p_process = 'p' in permission - p_tag = 't' in permission - p_filter_by_tags = 'f' in permission - parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, - list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, - filter_by_tags=p_filter_by_tags) - - return parsed - -class Services(object): - """Specifies the services accessible with the account SAS. - - :param bool blob: - Access for the `~azure.storage.blob.BlobServiceClient` - :param bool queue: - Access for the `~azure.storage.queue.QueueServiceClient` - :param bool fileshare: - Access for the `~azure.storage.fileshare.ShareServiceClient` - """ - - def __init__(self, blob=False, queue=False, fileshare=False): - self.blob = blob - self.queue = queue - self.fileshare = fileshare - self._str = (('b' if self.blob else '') + - ('q' if self.queue else '') + - ('f' if self.fileshare else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create Services from a string. - - To specify blob, queue, or file you need only to - include the first letter of the word in the string. E.g. for blob and queue - you would provide a string "bq". - - :param str string: Specify blob, queue, or file in - in the string with the first letter of the word. - :return: A Services object - :rtype: ~azure.storage.blob.Services - """ - res_blob = 'b' in string - res_queue = 'q' in string - res_file = 'f' in string - - parsed = cls(res_blob, res_queue, res_file) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class UserDelegationKey(object): - """ - Represents a user delegation key, provided to the user by Azure Storage - based on their Azure Active Directory access token. - - The fields are saved as simple strings since the user does not have to interact with this object; - to generate an identify SAS, the user can simply pass it to the right API. - - :ivar str signed_oid: - Object ID of this token. - :ivar str signed_tid: - Tenant ID of the tenant that issued this token. - :ivar str signed_start: - The datetime this token becomes valid. - :ivar str signed_expiry: - The datetime this token expires. - :ivar str signed_service: - What service this key is valid for. - :ivar str signed_version: - The version identifier of the REST service that created this token. - :ivar str value: - The user delegation key. - """ - def __init__(self): - self.signed_oid = None - self.signed_tid = None - self.signed_start = None - self.signed_expiry = None - self.signed_service = None - self.signed_version = None - self.value = None diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/parser.py b/azure/multiapi/storagev2/blob/v2020_04_08/_shared/parser.py deleted file mode 100644 index c6feba8..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/parser.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys - -if sys.version_info < (3,): - def _str(value): - if isinstance(value, unicode): # pylint: disable=undefined-variable - return value.encode('utf-8') - - return str(value) -else: - _str = str - - -def _to_utc_datetime(value): - return value.strftime('%Y-%m-%dT%H:%M:%SZ') diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/policies.py b/azure/multiapi/storagev2/blob/v2020_04_08/_shared/policies.py deleted file mode 100644 index c9bc798..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/policies.py +++ /dev/null @@ -1,610 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import re -import random -from time import time -from io import SEEK_SET, UnsupportedOperation -import logging -import uuid -import types -from typing import Any, TYPE_CHECKING -from wsgiref.handlers import format_date_time -try: - from urllib.parse import ( - urlparse, - parse_qsl, - urlunparse, - urlencode, - ) -except ImportError: - from urllib import urlencode # type: ignore - from urlparse import ( # type: ignore - urlparse, - parse_qsl, - urlunparse, - ) - -from azure.core.pipeline.policies import ( - HeadersPolicy, - SansIOHTTPPolicy, - NetworkTraceLoggingPolicy, - HTTPPolicy, - RequestHistory -) -from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError - -from .models import LocationMode - -try: - _unicode_type = unicode # type: ignore -except NameError: - _unicode_type = str - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -def encode_base64(data): - if isinstance(data, _unicode_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def is_exhausted(settings): - """Are we out of retries?""" - retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) - retry_counts = list(filter(None, retry_counts)) - if not retry_counts: - return False - return min(retry_counts) < 0 - - -def retry_hook(settings, **kwargs): - if settings['hook']: - settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) - - -def is_retry(response, mode): - """Is this method/status code retryable? (Based on whitelists and control - variables such as the number of total retries to allow, whether to - respect the Retry-After header, whether this header is present, and - whether the returned status code is on the list of status codes to - be retried upon on the presence of the aforementioned header) - """ - status = response.http_response.status_code - if 300 <= status < 500: - # An exception occured, but in most cases it was expected. Examples could - # include a 309 Conflict or 412 Precondition Failed. - if status == 404 and mode == LocationMode.SECONDARY: - # Response code 404 should be retried if secondary was used. - return True - if status == 408: - # Response code 408 is a timeout and should be retried. - return True - return False - if status >= 500: - # Response codes above 500 with the exception of 501 Not Implemented and - # 505 Version Not Supported indicate a server issue and should be retried. - if status in [501, 505]: - return False - return True - return False - - -def urljoin(base_url, stub_url): - parsed = urlparse(base_url) - parsed = parsed._replace(path=parsed.path + '/' + stub_url) - return parsed.geturl() - - -class QueueMessagePolicy(SansIOHTTPPolicy): - - def on_request(self, request): - message_id = request.context.options.pop('queue_message_id', None) - if message_id: - request.http_request.url = urljoin( - request.http_request.url, - message_id) - - -class StorageHeadersPolicy(HeadersPolicy): - request_id_header_name = 'x-ms-client-request-id' - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - super(StorageHeadersPolicy, self).on_request(request) - current_time = format_date_time(time()) - request.http_request.headers['x-ms-date'] = current_time - - custom_id = request.context.options.pop('client_request_id', None) - request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) - - # def on_response(self, request, response): - # # raise exception if the echoed client request id from the service is not identical to the one we sent - # if self.request_id_header_name in response.http_response.headers: - - # client_request_id = request.http_request.headers.get(self.request_id_header_name) - - # if response.http_response.headers[self.request_id_header_name] != client_request_id: - # raise AzureError( - # "Echoed client request ID: {} does not match sent client request ID: {}. " - # "Service request ID: {}".format( - # response.http_response.headers[self.request_id_header_name], client_request_id, - # response.http_response.headers['x-ms-request-id']), - # response=response.http_response - # ) - - -class StorageHosts(SansIOHTTPPolicy): - - def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument - self.hosts = hosts - super(StorageHosts, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - request.context.options['hosts'] = self.hosts - parsed_url = urlparse(request.http_request.url) - - # Detect what location mode we're currently requesting with - location_mode = LocationMode.PRIMARY - for key, value in self.hosts.items(): - if parsed_url.netloc == value: - location_mode = key - - # See if a specific location mode has been specified, and if so, redirect - use_location = request.context.options.pop('use_location', None) - if use_location: - # Lock retries to the specific location - request.context.options['retry_to_secondary'] = False - if use_location not in self.hosts: - raise ValueError("Attempting to use undefined host location {}".format(use_location)) - if use_location != location_mode: - # Update request URL to use the specified location - updated = parsed_url._replace(netloc=self.hosts[use_location]) - request.http_request.url = updated.geturl() - location_mode = use_location - - request.context.options['location_mode'] = location_mode - - -class StorageLoggingPolicy(NetworkTraceLoggingPolicy): - """A policy that logs HTTP request and response to the DEBUG logger. - - This accepts both global configuration, and per-request level with "enable_http_logger" - """ - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - http_request = request.http_request - options = request.context.options - if options.pop("logging_enable", self.enable_http_logger): - request.context["logging_enable"] = True - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - log_url = http_request.url - query_params = http_request.query - if 'sig' in query_params: - log_url = log_url.replace(query_params['sig'], "sig=*****") - _LOGGER.debug("Request URL: %r", log_url) - _LOGGER.debug("Request method: %r", http_request.method) - _LOGGER.debug("Request headers:") - for header, value in http_request.headers.items(): - if header.lower() == 'authorization': - value = '*****' - elif header.lower() == 'x-ms-copy-source' and 'sig' in value: - # take the url apart and scrub away the signed signature - scheme, netloc, path, params, query, fragment = urlparse(value) - parsed_qs = dict(parse_qsl(query)) - parsed_qs['sig'] = '*****' - - # the SAS needs to be put back together - value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) - - _LOGGER.debug(" %r: %r", header, value) - _LOGGER.debug("Request body:") - - # We don't want to log the binary data of a file upload. - if isinstance(http_request.body, types.GeneratorType): - _LOGGER.debug("File upload") - else: - _LOGGER.debug(str(http_request.body)) - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log request: %r", err) - - def on_response(self, request, response): - # type: (PipelineRequest, PipelineResponse, Any) -> None - if response.context.pop("logging_enable", self.enable_http_logger): - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - _LOGGER.debug("Response status: %r", response.http_response.status_code) - _LOGGER.debug("Response headers:") - for res_header, value in response.http_response.headers.items(): - _LOGGER.debug(" %r: %r", res_header, value) - - # We don't want to log binary data if the response is a file. - _LOGGER.debug("Response content:") - pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) - header = response.http_response.headers.get('content-disposition') - - if header and pattern.match(header): - filename = header.partition('=')[2] - _LOGGER.debug("File attachments: %s", filename) - elif response.http_response.headers.get("content-type", "").endswith("octet-stream"): - _LOGGER.debug("Body contains binary data.") - elif response.http_response.headers.get("content-type", "").startswith("image"): - _LOGGER.debug("Body contains image data.") - else: - if response.context.options.get('stream', False): - _LOGGER.debug("Body is streamable") - else: - _LOGGER.debug(response.http_response.text()) - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log response: %s", repr(err)) - - -class StorageRequestHook(SansIOHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._request_callback = kwargs.get('raw_request_hook') - super(StorageRequestHook, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, **Any) -> PipelineResponse - request_callback = request.context.options.pop('raw_request_hook', self._request_callback) - if request_callback: - request_callback(request) - - -class StorageResponseHook(HTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(StorageResponseHook, self).__init__() - - def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = self.next.send(request) - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - response_callback(response) - request.context['response_callback'] = response_callback - return response - - -class StorageContentValidation(SansIOHTTPPolicy): - """A simple policy that sends the given headers - with the request. - - This will overwrite any headers already defined in the request. - """ - header_name = 'Content-MD5' - - def __init__(self, **kwargs): # pylint: disable=unused-argument - super(StorageContentValidation, self).__init__() - - @staticmethod - def get_content_md5(data): - md5 = hashlib.md5() # nosec - if isinstance(data, bytes): - md5.update(data) - elif hasattr(data, 'read'): - pos = 0 - try: - pos = data.tell() - except: # pylint: disable=bare-except - pass - for chunk in iter(lambda: data.read(4096), b""): - md5.update(chunk) - try: - data.seek(pos, SEEK_SET) - except (AttributeError, IOError): - raise ValueError("Data should be bytes or a seekable file-like object.") - else: - raise ValueError("Data should be bytes or a seekable file-like object.") - - return md5.digest() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - validate_content = request.context.options.pop('validate_content', False) - if validate_content and request.http_request.method != 'GET': - computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) - request.http_request.headers[self.header_name] = computed_md5 - request.context['validate_content_md5'] = computed_md5 - request.context['validate_content'] = validate_content - - def on_response(self, request, response): - if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): - computed_md5 = request.context.get('validate_content_md5') or \ - encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) - if response.http_response.headers['content-md5'] != computed_md5: - raise AzureError( - 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format( - response.http_response.headers['content-md5'], computed_md5), - response=response.http_response - ) - - -class StorageRetryPolicy(HTTPPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - def __init__(self, **kwargs): - self.total_retries = kwargs.pop('retry_total', 10) - self.connect_retries = kwargs.pop('retry_connect', 3) - self.read_retries = kwargs.pop('retry_read', 3) - self.status_retries = kwargs.pop('retry_status', 3) - self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) - super(StorageRetryPolicy, self).__init__() - - def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use - """ - A function which sets the next host location on the request, if applicable. - - :param ~azure.storage.models.RetryContext context: - The retry context containing the previous host location and the request - to evaluate and possibly modify. - """ - if settings['hosts'] and all(settings['hosts'].values()): - url = urlparse(request.url) - # If there's more than one possible location, retry to the alternative - if settings['mode'] == LocationMode.PRIMARY: - settings['mode'] = LocationMode.SECONDARY - else: - settings['mode'] = LocationMode.PRIMARY - updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) - request.url = updated.geturl() - - def configure_retries(self, request): # pylint: disable=no-self-use - body_position = None - if hasattr(request.http_request.body, 'read'): - try: - body_position = request.http_request.body.tell() - except (AttributeError, UnsupportedOperation): - # if body position cannot be obtained, then retries will not work - pass - options = request.context.options - return { - 'total': options.pop("retry_total", self.total_retries), - 'connect': options.pop("retry_connect", self.connect_retries), - 'read': options.pop("retry_read", self.read_retries), - 'status': options.pop("retry_status", self.status_retries), - 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), - 'mode': options.pop("location_mode", LocationMode.PRIMARY), - 'hosts': options.pop("hosts", None), - 'hook': options.pop("retry_hook", None), - 'body_position': body_position, - 'count': 0, - 'history': [] - } - - def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use - """ Formula for computing the current backoff. - Should be calculated by child class. - - :rtype: float - """ - return 0 - - def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - transport.sleep(backoff) - - def increment(self, settings, request, response=None, error=None): - """Increment the retry counters. - - :param response: A pipeline response object. - :param error: An error encountered during the request, or - None if the response was received successfully. - - :return: Whether the retry attempts are exhausted. - """ - settings['total'] -= 1 - - if error and isinstance(error, ServiceRequestError): - # Errors when we're fairly sure that the server did not receive the - # request, so it should be safe to retry. - settings['connect'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - elif error and isinstance(error, ServiceResponseError): - # Errors that occur after the request has been started, so we should - # assume that the server began processing it. - settings['read'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - else: - # Incrementing because of a server error like a 500 in - # status_forcelist and a the given method is in the whitelist - if response: - settings['status'] -= 1 - settings['history'].append(RequestHistory(request, http_response=response)) - - if not is_exhausted(settings): - if request.method not in ['PUT'] and settings['retry_secondary']: - self._set_next_host_location(settings, request) - - # rewind the request body if it is a stream - if request.body and hasattr(request.body, 'read'): - # no position was saved, then retry would not work - if settings['body_position'] is None: - return False - try: - # attempt to rewind the body to the initial position - request.body.seek(settings['body_position'], SEEK_SET) - except (UnsupportedOperation, ValueError): - # if body is not seekable, then retry would not work - return False - settings['count'] += 1 - return True - return False - - def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(StorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(StorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/policies_async.py b/azure/multiapi/storagev2/blob/v2020_04_08/_shared/policies_async.py deleted file mode 100644 index e0926b8..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/policies_async.py +++ /dev/null @@ -1,220 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -import asyncio -import random -import logging -from typing import Any, TYPE_CHECKING - -from azure.core.pipeline.policies import AsyncHTTPPolicy -from azure.core.exceptions import AzureError - -from .policies import is_retry, StorageRetryPolicy - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -async def retry_hook(settings, **kwargs): - if settings['hook']: - if asyncio.iscoroutine(settings['hook']): - await settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - else: - settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - - -class AsyncStorageResponseHook(AsyncHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(AsyncStorageResponseHook, self).__init__() - - async def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = await self.next.send(request) - await response.http_response.load_body() - - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - if asyncio.iscoroutine(response_callback): - await response_callback(response) - else: - response_callback(response) - request.context['response_callback'] = response_callback - return response - -class AsyncStorageRetryPolicy(StorageRetryPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - async def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - await transport.sleep(backoff) - - async def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = await self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - await self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - await self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(AsyncStorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(AsyncStorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/request_handlers.py b/azure/multiapi/storagev2/blob/v2020_04_08/_shared/request_handlers.py deleted file mode 100644 index 4f15b65..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/request_handlers.py +++ /dev/null @@ -1,147 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) - -import logging -from os import fstat -from io import (SEEK_END, SEEK_SET, UnsupportedOperation) - -import isodate - -from azure.core.exceptions import raise_with_traceback - - -_LOGGER = logging.getLogger(__name__) - - -def serialize_iso(attr): - """Serialize Datetime object into ISO-8601 formatted string. - - :param Datetime attr: Object to be serialized. - :rtype: str - :raises: ValueError if format invalid. - """ - if not attr: - return None - if isinstance(attr, str): - attr = isodate.parse_datetime(attr) - try: - utc = attr.utctimetuple() - if utc.tm_year > 9999 or utc.tm_year < 1: - raise OverflowError("Hit max or min date") - - date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( - utc.tm_year, utc.tm_mon, utc.tm_mday, - utc.tm_hour, utc.tm_min, utc.tm_sec) - return date + 'Z' - except (ValueError, OverflowError) as err: - msg = "Unable to serialize datetime object." - raise_with_traceback(ValueError, msg, err) - except AttributeError as err: - msg = "ISO-8601 object must be valid Datetime object." - raise_with_traceback(TypeError, msg, err) - - -def get_length(data): - length = None - # Check if object implements the __len__ method, covers most input cases such as bytearray. - try: - length = len(data) - except: # pylint: disable=bare-except - pass - - if not length: - # Check if the stream is a file-like stream object. - # If so, calculate the size using the file descriptor. - try: - fileno = data.fileno() - except (AttributeError, UnsupportedOperation): - pass - else: - try: - return fstat(fileno).st_size - except OSError: - # Not a valid fileno, may be possible requests returned - # a socket number? - pass - - # If the stream is seekable and tell() is implemented, calculate the stream size. - try: - current_position = data.tell() - data.seek(0, SEEK_END) - length = data.tell() - current_position - data.seek(current_position, SEEK_SET) - except (AttributeError, UnsupportedOperation): - pass - - return length - - -def read_length(data): - try: - if hasattr(data, 'read'): - read_data = b'' - for chunk in iter(lambda: data.read(4096), b""): - read_data += chunk - return len(read_data), read_data - if hasattr(data, '__iter__'): - read_data = b'' - for chunk in data: - read_data += chunk - return len(read_data), read_data - except: # pylint: disable=bare-except - pass - raise ValueError("Unable to calculate content length, please specify.") - - -def validate_and_format_range_headers( - start_range, end_range, start_range_required=True, - end_range_required=True, check_content_md5=False, align_to_page=False): - # If end range is provided, start range must be provided - if (start_range_required or end_range is not None) and start_range is None: - raise ValueError("start_range value cannot be None.") - if end_range_required and end_range is None: - raise ValueError("end_range value cannot be None.") - - # Page ranges must be 512 aligned - if align_to_page: - if start_range is not None and start_range % 512 != 0: - raise ValueError("Invalid page blob start_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(start_range)) - if end_range is not None and end_range % 512 != 511: - raise ValueError("Invalid page blob end_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(end_range)) - - # Format based on whether end_range is present - range_header = None - if end_range is not None: - range_header = 'bytes={0}-{1}'.format(start_range, end_range) - elif start_range is not None: - range_header = "bytes={0}-".format(start_range) - - # Content MD5 can only be provided for a complete range less than 4MB in size - range_validation = None - if check_content_md5: - if start_range is None or end_range is None: - raise ValueError("Both start and end range requied for MD5 content validation.") - if end_range - start_range > 4 * 1024 * 1024: - raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") - range_validation = 'true' - - return range_header, range_validation - - -def add_metadata_headers(metadata=None): - # type: (Optional[Dict[str, str]]) -> Dict[str, str] - headers = {} - if metadata: - for key, value in metadata.items(): - headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value - return headers diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/response_handlers.py b/azure/multiapi/storagev2/blob/v2020_04_08/_shared/response_handlers.py deleted file mode 100644 index 4b591dd..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/response_handlers.py +++ /dev/null @@ -1,162 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging - -from azure.core.pipeline.policies import ContentDecodePolicy -from azure.core.exceptions import ( - HttpResponseError, - ResourceNotFoundError, - ResourceModifiedError, - ResourceExistsError, - ClientAuthenticationError, - DecodeError) - -from .parser import _to_utc_datetime -from .models import StorageErrorCode, UserDelegationKey, get_enum_value - - -if TYPE_CHECKING: - from datetime import datetime - from azure.core.exceptions import AzureError - - -_LOGGER = logging.getLogger(__name__) - - -class PartialBatchErrorException(HttpResponseError): - """There is a partial failure in batch operations. - - :param str message: The message of the exception. - :param response: Server response to be deserialized. - :param list parts: A list of the parts in multipart response. - """ - - def __init__(self, message, response, parts): - self.parts = parts - super(PartialBatchErrorException, self).__init__(message=message, response=response) - - -def parse_length_from_content_range(content_range): - ''' - Parses the blob length from the content range header: bytes 1-3/65537 - ''' - if content_range is None: - return None - - # First, split in space and take the second half: '1-3/65537' - # Next, split on slash and take the second half: '65537' - # Finally, convert to an int: 65537 - return int(content_range.split(' ', 1)[1].split('/', 1)[1]) - - -def normalize_headers(headers): - normalized = {} - for key, value in headers.items(): - if key.startswith('x-ms-'): - key = key[5:] - normalized[key.lower().replace('-', '_')] = get_enum_value(value) - return normalized - - -def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument - raw_metadata = {k: v for k, v in response.http_response.headers.items() if k.startswith("x-ms-meta-")} - return {k[10:]: v for k, v in raw_metadata.items()} - - -def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers) - - -def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers), deserialized - - -def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return response.http_response.location_mode, deserialized - - -def process_storage_error(storage_error): - # If storage_error is one of the two then it has already been processed and serialized to the specific exception. - if isinstance(storage_error, (PartialBatchErrorException, ClientAuthenticationError)): - raise storage_error - raise_error = HttpResponseError - error_code = storage_error.response.headers.get('x-ms-error-code') - error_message = storage_error.message - additional_data = {} - try: - error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) - if error_body: - for info in error_body.iter(): - if info.tag.lower() == 'code': - error_code = info.text - elif info.tag.lower() == 'message': - error_message = info.text - else: - additional_data[info.tag] = info.text - except DecodeError: - pass - - try: - if error_code: - error_code = StorageErrorCode(error_code) - if error_code in [StorageErrorCode.condition_not_met, - StorageErrorCode.blob_overwritten]: - raise_error = ResourceModifiedError - if error_code in [StorageErrorCode.invalid_authentication_info, - StorageErrorCode.authentication_failed]: - raise_error = ClientAuthenticationError - if error_code in [StorageErrorCode.resource_not_found, - StorageErrorCode.cannot_verify_copy_source, - StorageErrorCode.blob_not_found, - StorageErrorCode.queue_not_found, - StorageErrorCode.container_not_found, - StorageErrorCode.parent_not_found, - StorageErrorCode.share_not_found]: - raise_error = ResourceNotFoundError - if error_code in [StorageErrorCode.account_already_exists, - StorageErrorCode.account_being_created, - StorageErrorCode.resource_already_exists, - StorageErrorCode.resource_type_mismatch, - StorageErrorCode.blob_already_exists, - StorageErrorCode.queue_already_exists, - StorageErrorCode.container_already_exists, - StorageErrorCode.container_being_deleted, - StorageErrorCode.queue_being_deleted, - StorageErrorCode.share_already_exists, - StorageErrorCode.share_being_deleted]: - raise_error = ResourceExistsError - except ValueError: - # Got an unknown error code - pass - - try: - error_message += "\nErrorCode:{}".format(error_code.value) - except AttributeError: - error_message += "\nErrorCode:{}".format(error_code) - for name, info in additional_data.items(): - error_message += "\n{}:{}".format(name, info) - - error = raise_error(message=error_message, response=storage_error.response) - error.error_code = error_code - error.additional_info = additional_data - error.raise_with_traceback() - - -def parse_to_internal_user_delegation_key(service_user_delegation_key): - internal_user_delegation_key = UserDelegationKey() - internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid - internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid - internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) - internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) - internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service - internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version - internal_user_delegation_key.value = service_user_delegation_key.value - return internal_user_delegation_key diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/shared_access_signature.py b/azure/multiapi/storagev2/blob/v2020_04_08/_shared/shared_access_signature.py deleted file mode 100644 index 07aad5f..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/shared_access_signature.py +++ /dev/null @@ -1,220 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from datetime import date - -from .parser import _str, _to_utc_datetime -from .constants import X_MS_VERSION -from . import sign_string, url_quote - - -class QueryStringConstants(object): - SIGNED_SIGNATURE = 'sig' - SIGNED_PERMISSION = 'sp' - SIGNED_START = 'st' - SIGNED_EXPIRY = 'se' - SIGNED_RESOURCE = 'sr' - SIGNED_IDENTIFIER = 'si' - SIGNED_IP = 'sip' - SIGNED_PROTOCOL = 'spr' - SIGNED_VERSION = 'sv' - SIGNED_CACHE_CONTROL = 'rscc' - SIGNED_CONTENT_DISPOSITION = 'rscd' - SIGNED_CONTENT_ENCODING = 'rsce' - SIGNED_CONTENT_LANGUAGE = 'rscl' - SIGNED_CONTENT_TYPE = 'rsct' - START_PK = 'spk' - START_RK = 'srk' - END_PK = 'epk' - END_RK = 'erk' - SIGNED_RESOURCE_TYPES = 'srt' - SIGNED_SERVICES = 'ss' - SIGNED_OID = 'skoid' - SIGNED_TID = 'sktid' - SIGNED_KEY_START = 'skt' - SIGNED_KEY_EXPIRY = 'ske' - SIGNED_KEY_SERVICE = 'sks' - SIGNED_KEY_VERSION = 'skv' - - # for ADLS - SIGNED_AUTHORIZED_OID = 'saoid' - SIGNED_UNAUTHORIZED_OID = 'suoid' - SIGNED_CORRELATION_ID = 'scid' - SIGNED_DIRECTORY_DEPTH = 'sdd' - - @staticmethod - def to_list(): - return [ - QueryStringConstants.SIGNED_SIGNATURE, - QueryStringConstants.SIGNED_PERMISSION, - QueryStringConstants.SIGNED_START, - QueryStringConstants.SIGNED_EXPIRY, - QueryStringConstants.SIGNED_RESOURCE, - QueryStringConstants.SIGNED_IDENTIFIER, - QueryStringConstants.SIGNED_IP, - QueryStringConstants.SIGNED_PROTOCOL, - QueryStringConstants.SIGNED_VERSION, - QueryStringConstants.SIGNED_CACHE_CONTROL, - QueryStringConstants.SIGNED_CONTENT_DISPOSITION, - QueryStringConstants.SIGNED_CONTENT_ENCODING, - QueryStringConstants.SIGNED_CONTENT_LANGUAGE, - QueryStringConstants.SIGNED_CONTENT_TYPE, - QueryStringConstants.START_PK, - QueryStringConstants.START_RK, - QueryStringConstants.END_PK, - QueryStringConstants.END_RK, - QueryStringConstants.SIGNED_RESOURCE_TYPES, - QueryStringConstants.SIGNED_SERVICES, - QueryStringConstants.SIGNED_OID, - QueryStringConstants.SIGNED_TID, - QueryStringConstants.SIGNED_KEY_START, - QueryStringConstants.SIGNED_KEY_EXPIRY, - QueryStringConstants.SIGNED_KEY_SERVICE, - QueryStringConstants.SIGNED_KEY_VERSION, - # for ADLS - QueryStringConstants.SIGNED_AUTHORIZED_OID, - QueryStringConstants.SIGNED_UNAUTHORIZED_OID, - QueryStringConstants.SIGNED_CORRELATION_ID, - QueryStringConstants.SIGNED_DIRECTORY_DEPTH, - ] - - -class SharedAccessSignature(object): - ''' - Provides a factory for creating account access - signature tokens with an account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - :param str x_ms_version: - The service version used to generate the shared access signatures. - ''' - self.account_name = account_name - self.account_key = account_key - self.x_ms_version = x_ms_version - - def generate_account(self, services, resource_types, permission, expiry, start=None, - ip=None, protocol=None): - ''' - Generates a shared access signature for the account. - Use the returned signature with the sas_token parameter of the service - or to create a new account object. - - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account - SAS. You can combine values to provide access to more than one - resource type. - :param AccountSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. You can combine - values to provide more than one permission. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_account(services, resource_types) - sas.add_account_signature(self.account_name, self.account_key) - - return sas.get_token() - - -class _SharedAccessHelper(object): - def __init__(self): - self.query_dict = {} - - def _add_query(self, name, val): - if val: - self.query_dict[name] = _str(val) if val is not None else None - - def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): - if isinstance(start, date): - start = _to_utc_datetime(start) - - if isinstance(expiry, date): - expiry = _to_utc_datetime(expiry) - - self._add_query(QueryStringConstants.SIGNED_START, start) - self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) - self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) - self._add_query(QueryStringConstants.SIGNED_IP, ip) - self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) - self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) - - def add_resource(self, resource): - self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) - - def add_id(self, policy_id): - self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) - - def add_account(self, services, resource_types): - self._add_query(QueryStringConstants.SIGNED_SERVICES, services) - self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) - - def add_override_response_headers(self, cache_control, - content_disposition, - content_encoding, - content_language, - content_type): - self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) - self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) - self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) - self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) - self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) - - def add_account_signature(self, account_name, account_key): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - string_to_sign = \ - (account_name + '\n' + - get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + - get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + - get_value_to_append(QueryStringConstants.SIGNED_START) + - get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - get_value_to_append(QueryStringConstants.SIGNED_IP) + - get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(QueryStringConstants.SIGNED_VERSION)) - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key, string_to_sign)) - - def get_token(self): - return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/uploads.py b/azure/multiapi/storagev2/blob/v2020_04_08/_shared/uploads.py deleted file mode 100644 index acdc16f..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/uploads.py +++ /dev/null @@ -1,550 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from concurrent import futures -from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) -from threading import Lock -from itertools import islice -from math import ceil - -import six - -from azure.core.tracing.common import with_current_context - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." - - -def _parallel_uploads(executor, uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(executor.submit(with_current_context(uploader), next_chunk)) - except StopIteration: - break - - # Wait for the remaining uploads to finish - done, _running = futures.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - validate_content=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - validate_content=validate_content, - **kwargs) - if parallel: - executor = futures.ThreadPoolExecutor(max_concurrency) - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - executor.submit(with_current_context(uploader.process_chunk), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - executor = futures.ThreadPoolExecutor(max_concurrency) - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - executor.submit(with_current_context(uploader.process_substream_block), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] - return sorted(range_ids) - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b"" - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError("Blob data should be of type bytes.") - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b"" or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - def _update_progress(self, length): - if self.progress_lock is not None: - with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = self._upload_chunk(chunk_offset, chunk_data) - self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) - - def process_substream_block(self, block_data): - return self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - def _upload_substream_block(self, block_id, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_substream_block_with_progress(self, block_id, block_stream): - range_id = self._upload_substream_block(block_id, block_stream) - self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop("modified_access_conditions", None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - self.service.stage_block( - block_id, - len(chunk_data), - chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return index, block_id - - def _upload_substream_block(self, block_id, block_stream): - try: - self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - return not any(bytearray(chunk_data)) - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = self.service.upload_pages( - body=chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - self.current_length = int(self.response_headers["blob_append_offset"]) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response - - -class SubStream(IOBase): - - def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): - # Python 2.7: file-like objects created with open() typically support seek(), but are not - # derivations of io.IOBase and thus do not implement seekable(). - # Python > 3.0: file-like objects created with open() are derived from io.IOBase. - try: - # only the main thread runs this, so there's no need grabbing the lock - wrapped_stream.seek(0, SEEK_CUR) - except: - raise ValueError("Wrapped stream must support seek().") - - self._lock = lockObj - self._wrapped_stream = wrapped_stream - self._position = 0 - self._stream_begin_index = stream_begin_index - self._length = length - self._buffer = BytesIO() - - # we must avoid buffering more than necessary, and also not use up too much memory - # so the max buffer size is capped at 4MB - self._max_buffer_size = ( - length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE - ) - self._current_buffer_start = 0 - self._current_buffer_size = 0 - super(SubStream, self).__init__() - - def __len__(self): - return self._length - - def close(self): - if self._buffer: - self._buffer.close() - self._wrapped_stream = None - IOBase.close(self) - - def fileno(self): - return self._wrapped_stream.fileno() - - def flush(self): - pass - - def read(self, size=None): - if self.closed: # pylint: disable=using-constant-test - raise ValueError("Stream is closed.") - - if size is None: - size = self._length - self._position - - # adjust if out of bounds - if size + self._position >= self._length: - size = self._length - self._position - - # return fast - if size == 0 or self._buffer.closed: - return b"" - - # attempt first read from the read buffer and update position - read_buffer = self._buffer.read(size) - bytes_read = len(read_buffer) - bytes_remaining = size - bytes_read - self._position += bytes_read - - # repopulate the read buffer from the underlying stream to fulfill the request - # ensure the seek and read operations are done atomically (only if a lock is provided) - if bytes_remaining > 0: - with self._buffer: - # either read in the max buffer size specified on the class - # or read in just enough data for the current block/sub stream - current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) - - # lock is only defined if max_concurrency > 1 (parallel uploads) - if self._lock: - with self._lock: - # reposition the underlying stream to match the start of the data to read - absolute_position = self._stream_begin_index + self._position - self._wrapped_stream.seek(absolute_position, SEEK_SET) - # If we can't seek to the right location, our read will be corrupted so fail fast. - if self._wrapped_stream.tell() != absolute_position: - raise IOError("Stream failed to seek to the desired location.") - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - else: - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - - if buffer_from_stream: - # update the buffer with new data from the wrapped stream - # we need to note down the start position and size of the buffer, in case seek is performed later - self._buffer = BytesIO(buffer_from_stream) - self._current_buffer_start = self._position - self._current_buffer_size = len(buffer_from_stream) - - # read the remaining bytes from the new buffer and update position - second_read_buffer = self._buffer.read(bytes_remaining) - read_buffer += second_read_buffer - self._position += len(second_read_buffer) - - return read_buffer - - def readable(self): - return True - - def readinto(self, b): - raise UnsupportedOperation - - def seek(self, offset, whence=0): - if whence is SEEK_SET: - start_index = 0 - elif whence is SEEK_CUR: - start_index = self._position - elif whence is SEEK_END: - start_index = self._length - offset = -offset - else: - raise ValueError("Invalid argument for the 'whence' parameter.") - - pos = start_index + offset - - if pos > self._length: - pos = self._length - elif pos < 0: - pos = 0 - - # check if buffer is still valid - # if not, drop buffer - if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: - self._buffer.close() - self._buffer = BytesIO() - else: # if yes seek to correct position - delta = pos - self._current_buffer_start - self._buffer.seek(delta, SEEK_SET) - - self._position = pos - return pos - - def seekable(self): - return True - - def tell(self): - return self._position - - def write(self): - raise UnsupportedOperation - - def writelines(self): - raise UnsupportedOperation - - def writeable(self): - return False - - -class IterStreamer(object): - """ - File-like streaming iterator. - """ - - def __init__(self, generator, encoding="UTF-8"): - self.generator = generator - self.iterator = iter(generator) - self.leftover = b"" - self.encoding = encoding - - def __len__(self): - return self.generator.__len__() - - def __iter__(self): - return self.iterator - - def seekable(self): - return False - - def __next__(self): - return next(self.iterator) - - next = __next__ # Python 2 compatibility. - - def tell(self, *args, **kwargs): - raise UnsupportedOperation("Data generator does not support tell.") - - def seek(self, *args, **kwargs): - raise UnsupportedOperation("Data generator is unseekable.") - - def read(self, size): - data = self.leftover - count = len(self.leftover) - try: - while count < size: - chunk = self.__next__() - if isinstance(chunk, six.text_type): - chunk = chunk.encode(self.encoding) - data += chunk - count += len(chunk) - except StopIteration: - pass - - if count > size: - self.leftover = data[size:] - - return data[:size] diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/uploads_async.py b/azure/multiapi/storagev2/blob/v2020_04_08/_shared/uploads_async.py deleted file mode 100644 index e598165..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_shared/uploads_async.py +++ /dev/null @@ -1,350 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -import asyncio -from asyncio import Lock -from itertools import islice -import threading - -from math import ceil - -import six - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder -from .uploads import SubStream, IterStreamer # pylint: disable=unused-import - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' - - -async def _parallel_uploads(uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(asyncio.ensure_future(uploader(next_chunk))) - except StopIteration: - break - - # Wait for the remaining uploads to finish - if running: - done, _running = await asyncio.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -async def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - asyncio.ensure_future(uploader.process_chunk(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [] - for chunk in uploader.get_chunk_streams(): - range_ids.append(await uploader.process_chunk(chunk)) - - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -async def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - asyncio.ensure_future(uploader.process_substream_block(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [] - for block in uploader.get_substream_blocks(): - range_ids.append(await uploader.process_substream_block(block)) - return sorted(range_ids) - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = threading.Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b'' - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b'' or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - async def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - async def _update_progress(self, length): - if self.progress_lock is not None: - async with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - async def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = await self._upload_chunk(chunk_offset, chunk_data) - await self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) - - async def process_substream_block(self, block_data): - return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - async def _upload_substream_block(self, block_id, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_substream_block_with_progress(self, block_id, block_stream): - range_id = await self._upload_substream_block(block_id, block_stream) - await self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop('modified_access_conditions', None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - await self.service.stage_block( - block_id, - len(chunk_data), - chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - return index, block_id - - async def _upload_substream_block(self, block_id, block_stream): - try: - await self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - for each_byte in chunk_data: - if each_byte not in [0, b'\x00']: - return False - return True - - async def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = await self.service.upload_pages( - body=chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = await self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - self.current_length = int(self.response_headers['blob_append_offset']) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = await self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - response = await self.service.upload_range( - chunk_data, - chunk_offset, - chunk_end, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - return range_id, response diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_shared_access_signature.py b/azure/multiapi/storagev2/blob/v2020_04_08/_shared_access_signature.py deleted file mode 100644 index 370fe4e..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_shared_access_signature.py +++ /dev/null @@ -1,596 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, TYPE_CHECKING -) - -from ._shared import sign_string, url_quote -from ._shared.constants import X_MS_VERSION -from ._shared.models import Services -from ._shared.shared_access_signature import SharedAccessSignature, _SharedAccessHelper, \ - QueryStringConstants - -if TYPE_CHECKING: - from datetime import datetime - from .import ( - ResourceTypes, - AccountSasPermissions, - UserDelegationKey, - ContainerSasPermissions, - BlobSasPermissions - ) - - -class BlobQueryStringConstants(object): - SIGNED_TIMESTAMP = 'snapshot' - - -class BlobSharedAccessSignature(SharedAccessSignature): - ''' - Provides a factory for creating blob and container access - signature tokens with a common account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key=None, user_delegation_key=None): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - :param ~azure.storage.blob.models.UserDelegationKey user_delegation_key: - Instead of an account key, the user could pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished by calling get_user_delegation_key on any Blob service object. - ''' - super(BlobSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) - self.user_delegation_key = user_delegation_key - - def generate_blob(self, container_name, blob_name, snapshot=None, version_id=None, permission=None, - expiry=None, start=None, policy_id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None, **kwargs): - ''' - Generates a shared access signature for the blob or one of its snapshots. - Use the returned signature with the sas_token parameter of any BlobService. - - :param str container_name: - Name of container. - :param str blob_name: - Name of blob. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to grant permission. - :param BlobSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_blob_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - resource_path = container_name + '/' + blob_name - - sas = _BlobSharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(policy_id) - - resource = 'bs' if snapshot else 'b' - resource = 'bv' if version_id else resource - resource = 'd' if kwargs.pop("is_directory", None) else resource - sas.add_resource(resource) - - sas.add_timestamp(snapshot or version_id) - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_info_for_hns_account(**kwargs) - sas.add_resource_signature(self.account_name, self.account_key, resource_path, - user_delegation_key=self.user_delegation_key) - - return sas.get_token() - - def generate_container(self, container_name, permission=None, expiry=None, - start=None, policy_id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None, **kwargs): - ''' - Generates a shared access signature for the container. - Use the returned signature with the sas_token parameter of any BlobService. - - :param str container_name: - Name of container. - :param ContainerSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_blob_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - sas = _BlobSharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(policy_id) - sas.add_resource('c') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_info_for_hns_account(**kwargs) - sas.add_resource_signature(self.account_name, self.account_key, container_name, - user_delegation_key=self.user_delegation_key) - return sas.get_token() - - -class _BlobSharedAccessHelper(_SharedAccessHelper): - - def add_timestamp(self, timestamp): - self._add_query(BlobQueryStringConstants.SIGNED_TIMESTAMP, timestamp) - - def add_info_for_hns_account(self, **kwargs): - self._add_query(QueryStringConstants.SIGNED_DIRECTORY_DEPTH, kwargs.pop('sdd', None)) - self._add_query(QueryStringConstants.SIGNED_AUTHORIZED_OID, kwargs.pop('preauthorized_agent_object_id', None)) - self._add_query(QueryStringConstants.SIGNED_UNAUTHORIZED_OID, kwargs.pop('agent_object_id', None)) - self._add_query(QueryStringConstants.SIGNED_CORRELATION_ID, kwargs.pop('correlation_id', None)) - - def get_value_to_append(self, query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - def add_resource_signature(self, account_name, account_key, path, user_delegation_key=None): - # pylint: disable = no-member - if path[0] != '/': - path = '/' + path - - canonicalized_resource = '/blob/' + account_name + path + '\n' - - # Form the string to sign from shared_access_policy and canonicalized - # resource. The order of values is important. - string_to_sign = \ - (self.get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - self.get_value_to_append(QueryStringConstants.SIGNED_START) + - self.get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - canonicalized_resource) - - if user_delegation_key is not None: - self._add_query(QueryStringConstants.SIGNED_OID, user_delegation_key.signed_oid) - self._add_query(QueryStringConstants.SIGNED_TID, user_delegation_key.signed_tid) - self._add_query(QueryStringConstants.SIGNED_KEY_START, user_delegation_key.signed_start) - self._add_query(QueryStringConstants.SIGNED_KEY_EXPIRY, user_delegation_key.signed_expiry) - self._add_query(QueryStringConstants.SIGNED_KEY_SERVICE, user_delegation_key.signed_service) - self._add_query(QueryStringConstants.SIGNED_KEY_VERSION, user_delegation_key.signed_version) - - string_to_sign += \ - (self.get_value_to_append(QueryStringConstants.SIGNED_OID) + - self.get_value_to_append(QueryStringConstants.SIGNED_TID) + - self.get_value_to_append(QueryStringConstants.SIGNED_KEY_START) + - self.get_value_to_append(QueryStringConstants.SIGNED_KEY_EXPIRY) + - self.get_value_to_append(QueryStringConstants.SIGNED_KEY_SERVICE) + - self.get_value_to_append(QueryStringConstants.SIGNED_KEY_VERSION) + - self.get_value_to_append(QueryStringConstants.SIGNED_AUTHORIZED_OID) + - self.get_value_to_append(QueryStringConstants.SIGNED_UNAUTHORIZED_OID) + - self.get_value_to_append(QueryStringConstants.SIGNED_CORRELATION_ID)) - else: - string_to_sign += self.get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER) - - string_to_sign += \ - (self.get_value_to_append(QueryStringConstants.SIGNED_IP) + - self.get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - self.get_value_to_append(QueryStringConstants.SIGNED_VERSION) + - self.get_value_to_append(QueryStringConstants.SIGNED_RESOURCE) + - self.get_value_to_append(BlobQueryStringConstants.SIGNED_TIMESTAMP) + - self.get_value_to_append(QueryStringConstants.SIGNED_CACHE_CONTROL) + - self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_DISPOSITION) + - self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_ENCODING) + - self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_LANGUAGE) + - self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_TYPE)) - - # remove the trailing newline - if string_to_sign[-1] == '\n': - string_to_sign = string_to_sign[:-1] - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key if user_delegation_key is None else user_delegation_key.value, - string_to_sign)) - - def get_token(self): - # a conscious decision was made to exclude the timestamp in the generated token - # this is to avoid having two snapshot ids in the query parameters when the user appends the snapshot timestamp - exclude = [BlobQueryStringConstants.SIGNED_TIMESTAMP] - return '&'.join(['{0}={1}'.format(n, url_quote(v)) - for n, v in self.query_dict.items() if v is not None and n not in exclude]) - - -def generate_account_sas( - account_name, # type: str - account_key, # type: str - resource_types, # type: Union[ResourceTypes, str] - permission, # type: Union[AccountSasPermissions, str] - expiry, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): # type: (...) -> str - """Generates a shared access signature for the blob service. - - Use the returned signature with the credential parameter of any BlobServiceClient, - ContainerClient or BlobClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - :param resource_types: - Specifies the resource types that are accessible with the account SAS. - :type resource_types: str or ~azure.storage.blob.ResourceTypes - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.blob.AccountSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_sas_token] - :end-before: [END create_sas_token] - :language: python - :dedent: 8 - :caption: Generating a shared access signature. - """ - sas = SharedAccessSignature(account_name, account_key) - return sas.generate_account( - services=Services(blob=True), - resource_types=resource_types, - permission=permission, - expiry=expiry, - start=start, - ip=ip, - **kwargs - ) # type: ignore - - -def generate_container_sas( - account_name, # type: str - container_name, # type: str - account_key=None, # type: Optional[str] - user_delegation_key=None, # type: Optional[UserDelegationKey] - permission=None, # type: Optional[Union[ContainerSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - policy_id=None, # type: Optional[str] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Any - """Generates a shared access signature for a container. - - Use the returned signature with the credential parameter of any BlobServiceClient, - ContainerClient or BlobClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str container_name: - The name of the container. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - Either `account_key` or `user_delegation_key` must be specified. - :param ~azure.storage.blob.UserDelegationKey user_delegation_key: - Instead of an account shared key, the user could pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.blob.ContainerSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - :func:`~azure.storage.blob.ContainerClient.set_container_access_policy`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :return: A Shared Access Signature (sas) token. - :rtype: str - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START generate_sas_token] - :end-before: [END generate_sas_token] - :language: python - :dedent: 12 - :caption: Generating a sas token. - """ - if not user_delegation_key and not account_key: - raise ValueError("Either user_delegation_key or account_key must be provided.") - - if user_delegation_key: - sas = BlobSharedAccessSignature(account_name, user_delegation_key=user_delegation_key) - else: - sas = BlobSharedAccessSignature(account_name, account_key=account_key) - return sas.generate_container( - container_name, - permission=permission, - expiry=expiry, - start=start, - policy_id=policy_id, - ip=ip, - **kwargs - ) - - -def generate_blob_sas( - account_name, # type: str - container_name, # type: str - blob_name, # type: str - snapshot=None, # type: Optional[str] - account_key=None, # type: Optional[str] - user_delegation_key=None, # type: Optional[UserDelegationKey] - permission=None, # type: Optional[Union[BlobSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - policy_id=None, # type: Optional[str] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Any - """Generates a shared access signature for a blob. - - Use the returned signature with the credential parameter of any BlobServiceClient, - ContainerClient or BlobClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str container_name: - The name of the container. - :param str blob_name: - The name of the blob. - :param str snapshot: - An optional blob snapshot ID. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - Either `account_key` or `user_delegation_key` must be specified. - :param ~azure.storage.blob.UserDelegationKey user_delegation_key: - Instead of an account shared key, the user could pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.blob.BlobSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - :func:`~azure.storage.blob.ContainerClient.set_container_access_policy()`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str version_id: - An optional blob version ID. This parameter is only for versioning enabled account - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - if not user_delegation_key and not account_key: - raise ValueError("Either user_delegation_key or account_key must be provided.") - version_id = kwargs.pop('version_id', None) - if version_id and snapshot: - raise ValueError("snapshot and version_id cannot be set at the same time.") - if user_delegation_key: - sas = BlobSharedAccessSignature(account_name, user_delegation_key=user_delegation_key) - else: - sas = BlobSharedAccessSignature(account_name, account_key=account_key) - return sas.generate_blob( - container_name, - blob_name, - snapshot=snapshot, - version_id=version_id, - permission=permission, - expiry=expiry, - start=start, - policy_id=policy_id, - ip=ip, - **kwargs - ) diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_upload_helpers.py b/azure/multiapi/storagev2/blob/v2020_04_08/_upload_helpers.py deleted file mode 100644 index 94313f6..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_upload_helpers.py +++ /dev/null @@ -1,295 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from io import SEEK_SET, UnsupportedOperation -from typing import Optional, Union, Any, TypeVar, TYPE_CHECKING # pylint: disable=unused-import - -import six -from azure.core.exceptions import ResourceExistsError, ResourceModifiedError, HttpResponseError - -from ._shared.response_handlers import ( - process_storage_error, - return_response_headers) -from ._shared.models import StorageErrorCode -from ._shared.uploads import ( - upload_data_chunks, - upload_substream_blocks, - BlockBlobChunkUploader, - PageBlobChunkUploader, - AppendBlobChunkUploader) -from ._shared.encryption import generate_blob_encryption_data, encrypt_blob -from ._generated.models import ( - BlockLookupList, - AppendPositionAccessConditions, - ModifiedAccessConditions, -) - -if TYPE_CHECKING: - from datetime import datetime # pylint: disable=unused-import - BlobLeaseClient = TypeVar("BlobLeaseClient") - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' - - -def _convert_mod_error(error): - message = error.message.replace( - "The condition specified using HTTP conditional header(s) is not met.", - "The specified blob already exists.") - message = message.replace("ConditionNotMet", "BlobAlreadyExists") - overwrite_error = ResourceExistsError( - message=message, - response=error.response, - error=error) - overwrite_error.error_code = StorageErrorCode.blob_already_exists - raise overwrite_error - - -def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument - return any([ - modified_access_conditions.if_modified_since, - modified_access_conditions.if_unmodified_since, - modified_access_conditions.if_none_match, - modified_access_conditions.if_match - ]) - - -def upload_block_blob( # pylint: disable=too-many-locals - client=None, - data=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if not overwrite and not _any_conditions(**kwargs): - kwargs['modified_access_conditions'].if_none_match = '*' - adjusted_count = length - if (encryption_options.get('key') is not None) and (adjusted_count is not None): - adjusted_count += (16 - (length % 16)) - blob_headers = kwargs.pop('blob_headers', None) - tier = kwargs.pop('standard_blob_tier', None) - blob_tags_string = kwargs.pop('blob_tags_string', None) - - # Do single put if the size is smaller than or equal config.max_single_put_size - if adjusted_count is not None and (adjusted_count <= blob_settings.max_single_put_size): - try: - data = data.read(length) - if not isinstance(data, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - except AttributeError: - pass - if encryption_options.get('key'): - encryption_data, data = encrypt_blob(data, encryption_options['key']) - headers['x-ms-meta-encryptiondata'] = encryption_data - return client.upload( - body=data, - content_length=adjusted_count, - blob_http_headers=blob_headers, - headers=headers, - cls=return_response_headers, - validate_content=validate_content, - data_stream_total=adjusted_count, - upload_stream_current=0, - tier=tier.value if tier else None, - blob_tags_string=blob_tags_string, - **kwargs) - - use_original_upload_path = blob_settings.use_byte_buffer or \ - validate_content or encryption_options.get('required') or \ - blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \ - hasattr(stream, 'seekable') and not stream.seekable() or \ - not hasattr(stream, 'seek') or not hasattr(stream, 'tell') - - if use_original_upload_path: - if encryption_options.get('key'): - cek, iv, encryption_data = generate_blob_encryption_data(encryption_options['key']) - headers['x-ms-meta-encryptiondata'] = encryption_data - encryption_options['cek'] = cek - encryption_options['vector'] = iv - block_ids = upload_data_chunks( - service=client, - uploader_class=BlockBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - encryption_options=encryption_options, - headers=headers, - **kwargs - ) - else: - block_ids = upload_substream_blocks( - service=client, - uploader_class=BlockBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - headers=headers, - **kwargs - ) - - block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) - block_lookup.latest = block_ids - return client.commit_block_list( - block_lookup, - blob_http_headers=blob_headers, - cls=return_response_headers, - validate_content=validate_content, - headers=headers, - tier=tier.value if tier else None, - blob_tags_string=blob_tags_string, - **kwargs) - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceModifiedError as mod_error: - if not overwrite: - _convert_mod_error(mod_error) - raise - - -def upload_page_blob( - client=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if not overwrite and not _any_conditions(**kwargs): - kwargs['modified_access_conditions'].if_none_match = '*' - if length is None or length < 0: - raise ValueError("A content length must be specified for a Page Blob.") - if length % 512 != 0: - raise ValueError("Invalid page blob size: {0}. " - "The size must be aligned to a 512-byte boundary.".format(length)) - if kwargs.get('premium_page_blob_tier'): - premium_page_blob_tier = kwargs.pop('premium_page_blob_tier') - try: - headers['x-ms-access-tier'] = premium_page_blob_tier.value - except AttributeError: - headers['x-ms-access-tier'] = premium_page_blob_tier - if encryption_options and encryption_options.get('data'): - headers['x-ms-meta-encryptiondata'] = encryption_options['data'] - blob_tags_string = kwargs.pop('blob_tags_string', None) - - response = client.create( - content_length=0, - blob_content_length=length, - blob_sequence_number=None, - blob_http_headers=kwargs.pop('blob_headers', None), - blob_tags_string=blob_tags_string, - cls=return_response_headers, - headers=headers, - **kwargs) - if length == 0: - return response - - kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag']) - return upload_data_chunks( - service=client, - uploader_class=PageBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_page_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - encryption_options=encryption_options, - headers=headers, - **kwargs) - - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceModifiedError as mod_error: - if not overwrite: - _convert_mod_error(mod_error) - raise - - -def upload_append_blob( # pylint: disable=unused-argument - client=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if length == 0: - return {} - blob_headers = kwargs.pop('blob_headers', None) - append_conditions = AppendPositionAccessConditions( - max_size=kwargs.pop('maxsize_condition', None), - append_position=None) - blob_tags_string = kwargs.pop('blob_tags_string', None) - - try: - if overwrite: - client.create( - content_length=0, - blob_http_headers=blob_headers, - headers=headers, - blob_tags_string=blob_tags_string, - **kwargs) - return upload_data_chunks( - service=client, - uploader_class=AppendBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - append_position_access_conditions=append_conditions, - headers=headers, - **kwargs) - except HttpResponseError as error: - if error.response.status_code != 404: - raise - # rewind the request body if it is a stream - if hasattr(stream, 'read'): - try: - # attempt to rewind the body to the initial position - stream.seek(0, SEEK_SET) - except UnsupportedOperation: - # if body is not seekable, then retry would not work - raise error - client.create( - content_length=0, - blob_http_headers=blob_headers, - headers=headers, - blob_tags_string=blob_tags_string, - **kwargs) - return upload_data_chunks( - service=client, - uploader_class=AppendBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - append_position_access_conditions=append_conditions, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/_version.py b/azure/multiapi/storagev2/blob/v2020_04_08/_version.py deleted file mode 100644 index 8d23bd9..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/_version.py +++ /dev/null @@ -1,7 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -VERSION = "12.7.1" diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/aio/__init__.py b/azure/multiapi/storagev2/blob/v2020_04_08/aio/__init__.py deleted file mode 100644 index 33c1031..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/aio/__init__.py +++ /dev/null @@ -1,141 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os - -from .._models import BlobType -from .._shared.policies_async import ExponentialRetry, LinearRetry -from ._blob_client_async import BlobClient -from ._container_client_async import ContainerClient -from ._blob_service_client_async import BlobServiceClient -from ._lease_async import BlobLeaseClient -from ._download_async import StorageStreamDownloader - - -async def upload_blob_to_url( - blob_url, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - credential=None, # type: Any - **kwargs): - # type: (...) -> dict[str, Any] - """Upload data to a given URL - - The data will be uploaded as a block blob. - - :param str blob_url: - The full URI to the blob. This can also include a SAS token. - :param data: - The data to upload. This can be bytes, text, an iterable or a file-like object. - :type data: bytes or str or Iterable - :param credential: - The credentials with which to authenticate. This is optional if the - blob URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword bool overwrite: - Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob_to_url will overwrite any existing data. If set to False, the - operation will fail with a ResourceExistsError. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword dict(str,str) metadata: - Name-value pairs associated with the blob as metadata. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword str encoding: - Encoding to use if text is supplied as input. Defaults to UTF-8. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: dict(str, Any) - """ - async with BlobClient.from_blob_url(blob_url, credential=credential) as client: - return await client.upload_blob(data=data, blob_type=BlobType.BlockBlob, **kwargs) - - -async def _download_to_stream(client, handle, **kwargs): - """Download data to specified open file-handle.""" - stream = await client.download_blob(**kwargs) - await stream.readinto(handle) - - -async def download_blob_from_url( - blob_url, # type: str - output, # type: str - credential=None, # type: Any - **kwargs): - # type: (...) -> None - """Download the contents of a blob to a local file or stream. - - :param str blob_url: - The full URI to the blob. This can also include a SAS token. - :param output: - Where the data should be downloaded to. This could be either a file path to write to, - or an open IO handle to write to. - :type output: str or writable stream - :param credential: - The credentials with which to authenticate. This is optional if the - blob URL already has a SAS token or the blob is public. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, - an account shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword bool overwrite: - Whether the local file should be overwritten if it already exists. The default value is - `False` - in which case a ValueError will be raised if the file already exists. If set to - `True`, an attempt will be made to write to the existing file. If a stream handle is passed - in, this value is ignored. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :keyword int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :rtype: None - """ - overwrite = kwargs.pop('overwrite', False) - async with BlobClient.from_blob_url(blob_url, credential=credential) as client: - if hasattr(output, 'write'): - await _download_to_stream(client, output, **kwargs) - else: - if not overwrite and os.path.isfile(output): - raise ValueError("The file '{}' already exists.".format(output)) - with open(output, 'wb') as file_handle: - await _download_to_stream(client, file_handle, **kwargs) - - -__all__ = [ - 'upload_blob_to_url', - 'download_blob_from_url', - 'BlobServiceClient', - 'ContainerClient', - 'BlobClient', - 'BlobLeaseClient', - 'ExponentialRetry', - 'LinearRetry', - 'StorageStreamDownloader' -] diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/aio/_blob_client_async.py b/azure/multiapi/storagev2/blob/v2020_04_08/aio/_blob_client_async.py deleted file mode 100644 index 1e1ae55..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/aio/_blob_client_async.py +++ /dev/null @@ -1,2445 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines, invalid-overridden-method -from functools import partial -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, - TYPE_CHECKING -) - -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.exceptions import ResourceNotFoundError, HttpResponseError - -from .._shared.base_client_async import AsyncStorageAccountHostsMixin -from .._shared.policies_async import ExponentialRetry -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._deserialize import get_page_ranges_result, parse_tags, deserialize_pipeline_response_into_cls -from .._serialize import get_modify_conditions, get_api_version, get_access_conditions -from .._generated.aio import AzureBlobStorage -from .._generated.models import CpkInfo -from .._deserialize import deserialize_blob_properties -from .._blob_client import BlobClient as BlobClientBase -from ._upload_helpers import ( - upload_block_blob, - upload_append_blob, - upload_page_blob) -from .._models import BlobType, BlobBlock, BlobProperties -from ._lease_async import BlobLeaseClient -from ._download_async import StorageStreamDownloader - - -if TYPE_CHECKING: - from datetime import datetime - from .._models import ( # pylint: disable=unused-import - ContentSettings, - PremiumPageBlobTier, - StandardBlobTier, - SequenceNumberAction - ) - - -class BlobClient(AsyncStorageAccountHostsMixin, BlobClientBase): # pylint: disable=too-many-public-methods - """A client to interact with a specific blob, although that blob may not yet exist. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the blob, - use the :func:`from_blob_url` classmethod. - :param container_name: The container name for the blob. - :type container_name: str - :param blob_name: The name of the blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob_name: str - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication_async.py - :start-after: [START create_blob_client] - :end-before: [END create_blob_client] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a URL to a public blob (no auth needed). - - .. literalinclude:: ../samples/blob_samples_authentication_async.py - :start-after: [START create_blob_client_sas_url] - :end-before: [END create_blob_client_sas_url] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a SAS URL to a blob. - """ - def __init__( - self, account_url, # type: str - container_name, # type: str - blob_name, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(BlobClient, self).__init__( - account_url, - container_name=container_name, - blob_name=blob_name, - snapshot=snapshot, - credential=credential, - **kwargs) - self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) - default_api_version = self._client._config.version # pylint: disable=protected-access - self._client._config.version = get_api_version(kwargs, default_api_version) # pylint: disable=protected-access - self._loop = kwargs.get('loop', None) - - @distributed_trace_async - async def get_account_information(self, **kwargs): # type: ignore - # type: (Optional[int]) -> Dict[str, str] - """Gets information related to the storage account in which the blob resides. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - """ - try: - return await self._client.blob.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_blob_from_url(self, source_url, **kwargs): - # type: (str, Any) -> Dict[str, Any] - """ - Creates a new Block Blob where the content of the blob is read from a given URL. - The content of an existing blob is overwritten with the new blob. - - :param str source_url: - A URL of up to 2 KB in length that specifies a file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.blob.core.windows.net/mycontainer/myblob - - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - - https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. - :keyword bool include_source_blob_properties: - Indicates if properties from the source blob should be copied. Defaults to True. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - :paramtype tags: dict(str, str) - :keyword bytearray source_content_md5: - Specify the md5 that is used to verify the integrity of the source bytes. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword destination_lease: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - """ - options = self._upload_blob_from_url_options( - source_url=self._encode_source_url(source_url), - **kwargs) - try: - return await self._client.block_blob.put_blob_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_blob( - self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Any - """Creates a new blob from a data source with automatic chunking. - - :param data: The blob data to upload. - :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be - either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. The exception to the above is with Append - blob types: if set to False and the data already exists, an error will not be raised - and the data will be appended to the existing blob. If set overwrite=True, then the existing - append blob will be deleted, and a new one created. Defaults to False. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - If specified, upload_blob only succeeds if the - blob's lease is active and matches this ID. - Required if the blob has an active lease. - :paramtype: ~azure.storage.blob.aio.BlobLeaseClient - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int max_concurrency: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world_async.py - :start-after: [START upload_a_blob] - :end-before: [END upload_a_blob] - :language: python - :dedent: 16 - :caption: Upload a blob to the container. - """ - options = self._upload_blob_options( - data, - blob_type=blob_type, - length=length, - metadata=metadata, - **kwargs) - if blob_type == BlobType.BlockBlob: - return await upload_block_blob(**options) - if blob_type == BlobType.PageBlob: - return await upload_page_blob(**options) - return await upload_append_blob(**options) - - @distributed_trace_async - async def download_blob(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader - """Downloads a blob to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the blob into - a stream. - - :param int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to download. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, download_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword str encoding: - Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.blob.aio.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world_async.py - :start-after: [START download_a_blob] - :end-before: [END download_a_blob] - :language: python - :dedent: 16 - :caption: Download a blob. - """ - options = self._download_blob_options( - offset=offset, - length=length, - **kwargs) - downloader = StorageStreamDownloader(**options) - await downloader._setup() # pylint: disable=protected-access - return downloader - - @distributed_trace_async - async def delete_blob(self, delete_snapshots=False, **kwargs): - # type: (str, Any) -> None - """Marks the specified blob for deletion. - - The blob is later deleted during garbage collection. - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the delete_blob() - operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob - and retains the blob for a specified number of days. - After the specified number of days, the blob's data is removed from the service during garbage collection. - Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']` - option. Soft-deleted blob can be restored using :func:`undelete` operation. - - :param str delete_snapshots: - Required if the blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to delete. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword lease: - Required if the blob has an active lease. If specified, delete_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world_async.py - :start-after: [START delete_blob] - :end-before: [END delete_blob] - :language: python - :dedent: 16 - :caption: Delete a blob. - """ - options = self._delete_blob_options(delete_snapshots=delete_snapshots, **kwargs) - try: - await self._client.blob.delete(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def undelete_blob(self, **kwargs): - # type: (Any) -> None - """Restores soft-deleted blobs or snapshots. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START undelete_blob] - :end-before: [END undelete_blob] - :language: python - :dedent: 12 - :caption: Undeleting a blob. - """ - try: - await self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a blob exists with the defined parameters, and returns - False otherwise. - - :param str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to check if it exists. - :param int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - try: - await self._client.blob.get_properties( - snapshot=self.snapshot, - **kwargs) - return True - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceNotFoundError: - return False - - @distributed_trace_async - async def get_blob_properties(self, **kwargs): - # type: (Any) -> BlobProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the blob. It does not return the content of the blob. - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to get properties. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: BlobProperties - :rtype: ~azure.storage.blob.BlobProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START get_blob_properties] - :end-before: [END get_blob_properties] - :language: python - :dedent: 12 - :caption: Getting the properties for a blob. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - try: - cls_method = kwargs.pop('cls', None) - if cls_method: - kwargs['cls'] = partial(deserialize_pipeline_response_into_cls, cls_method) - blob_props = await self._client.blob.get_properties( - timeout=kwargs.pop('timeout', None), - version_id=kwargs.pop('version_id', None), - snapshot=self.snapshot, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=kwargs.pop('cls', None) or deserialize_blob_properties, - cpk_info=cpk_info, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - blob_props.name = self.blob_name - if isinstance(blob_props, BlobProperties): - blob_props.container = self.container_name - blob_props.snapshot = self.snapshot - return blob_props # type: ignore - - @distributed_trace_async - async def set_http_headers(self, content_settings=None, **kwargs): - # type: (Optional[ContentSettings], Any) -> None - """Sets system properties on the blob. - - If one property is set for the content_settings, all properties will be overridden. - - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - options = self._set_http_headers_options(content_settings=content_settings, **kwargs) - try: - return await self._client.blob.set_http_headers(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_blob_metadata(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] - """Sets user-defined metadata for the blob as one or more name-value pairs. - - :param metadata: - Dict containing name and value pairs. Each call to this operation - replaces all existing metadata attached to the blob. To remove all - metadata from the blob, call this operation with no metadata headers. - :type metadata: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - """ - options = self._set_blob_metadata_options(metadata=metadata, **kwargs) - try: - return await self._client.blob.set_metadata(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def create_page_blob( # type: ignore - self, size, # type: int - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Creates a new Page Blob of the specified size. - - :param int size: - This specifies the maximum size for the page blob, up to 1 TB. - The page blob size must be aligned to a 512-byte boundary. - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword int sequence_number: - Only for Page blobs. The sequence number is a user-controlled value that you can use to - track requests. The value of the sequence number must be between 0 - and 2^63 - 1.The default value is 0. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict[str, Any] - """ - options = self._create_page_blob_options( - size, - content_settings=content_settings, - metadata=metadata, - premium_page_blob_tier=premium_page_blob_tier, - **kwargs) - try: - return await self._client.page_blob.create(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def create_append_blob(self, content_settings=None, metadata=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] - """Creates a new Append Blob. - - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict[str, Any] - """ - options = self._create_append_blob_options( - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return await self._client.append_blob.create(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def create_snapshot(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] - """Creates a snapshot of the blob. - - A snapshot is a read-only version of a blob that's taken at a point in time. - It can be read, copied, or deleted, but not modified. Snapshots provide a way - to back up a blob as it appears at a moment in time. - - A snapshot of a blob has the same name as the base blob from which the snapshot - is taken, with a DateTime value appended to indicate the time at which the - snapshot was taken. - - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified). - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START create_blob_snapshot] - :end-before: [END create_blob_snapshot] - :language: python - :dedent: 12 - :caption: Create a snapshot of the blob. - """ - options = self._create_snapshot_options(metadata=metadata, **kwargs) - try: - return await self._client.blob.create_snapshot(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def start_copy_from_url(self, source_url, metadata=None, incremental_copy=False, **kwargs): - # type: (str, Optional[Dict[str, str]], bool, Any) -> Any - """Copies a blob asynchronously. - - This operation returns a copy operation - object that can be used to wait on the completion of the operation, - as well as check status or abort the copy operation. - The Blob service copies blobs on a best-effort basis. - - The source blob for a copy operation may be a block blob, an append blob, - or a page blob. If the destination blob already exists, it must be of the - same blob type as the source blob. Any existing destination blob will be - overwritten. The destination blob cannot be modified while a copy operation - is in progress. - - When copying from a page blob, the Blob service creates a destination page - blob of the source blob's length, initially containing all zeroes. Then - the source page ranges are enumerated, and non-empty ranges are copied. - - For a block blob or an append blob, the Blob service creates a committed - blob of zero length before returning from this operation. When copying - from a block blob, all committed blocks and their block IDs are copied. - Uncommitted blocks are not copied. At the end of the copy operation, the - destination blob will have the same committed block count as the source. - - When copying from an append blob, all committed blocks are copied. At the - end of the copy operation, the destination blob will have the same committed - block count as the source. - - For all blob types, you can call status() on the returned polling object - to check the status of the copy operation, or wait() to block until the - operation is complete. The final blob will be committed when the copy completes. - - :param str source_url: - A URL of up to 2 KB in length that specifies a file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.blob.core.windows.net/mycontainer/myblob - - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - - https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken - :param metadata: - Name-value pairs associated with the blob as metadata. If no name-value - pairs are specified, the operation will copy the metadata from the - source blob or file to the destination blob. If one or more name-value - pairs are specified, the destination blob is created with the specified - metadata, and metadata is not copied from the source blob or file. - :type metadata: dict(str, str) - :param bool incremental_copy: - Copies the snapshot of the source page blob to a destination page blob. - The snapshot is copied such that only the differential changes between - the previously copied snapshot are transferred to the destination. - The copied snapshots are complete copies of the original snapshot and - can be read or copied from as usual. Defaults to False. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source - blob has been modified since the specified date/time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source blob - has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has been modified since the specified date/time. - If the destination blob has not been modified, the Blob service returns - status code 412 (Precondition Failed). - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has not been modified since the specified - date/time. If the destination blob has been modified, the Blob service - returns status code 412 (Precondition Failed). - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword destination_lease: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :paramtype destination_lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword source_lease: - Specify this to perform the Copy Blob operation only if - the lease ID given matches the active lease ID of the source blob. - :paramtype source_lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword bool seal_destination_blob: - Seal the destination append blob. This operation is only for append blob. - - .. versionadded:: 12.4.0 - - :keyword bool requires_sync: - Enforces that the service will not return a response until the copy is complete. - :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status). - :rtype: dict[str, str or ~datetime.datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START copy_blob_from_url] - :end-before: [END copy_blob_from_url] - :language: python - :dedent: 16 - :caption: Copy a blob from a URL. - """ - options = self._start_copy_from_url_options( - source_url=self._encode_source_url(source_url), - metadata=metadata, - incremental_copy=incremental_copy, - **kwargs) - try: - if incremental_copy: - return await self._client.page_blob.copy_incremental(**options) - return await self._client.blob.start_copy_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def abort_copy(self, copy_id, **kwargs): - # type: (Union[str, Dict[str, Any], BlobProperties], Any) -> None - """Abort an ongoing copy operation. - - This will leave a destination blob with zero length and full metadata. - This will raise an error if the copy operation has already ended. - - :param copy_id: - The copy operation to abort. This can be either an ID, or an - instance of BlobProperties. - :type copy_id: str or ~azure.storage.blob.BlobProperties - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START abort_copy_blob_from_url] - :end-before: [END abort_copy_blob_from_url] - :language: python - :dedent: 16 - :caption: Abort copying a blob from URL. - """ - options = self._abort_copy_options(copy_id, **kwargs) - try: - await self._client.blob.abort_copy_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): - # type: (int, Optional[str], Any) -> BlobLeaseClient - """Requests a new lease. - - If the blob does not have an active lease, the Blob - Service creates a lease on the blob and returns a new lease. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Blob Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A BlobLeaseClient object. - :rtype: ~azure.storage.blob.aio.BlobLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START acquire_lease_on_blob] - :end-before: [END acquire_lease_on_blob] - :language: python - :dedent: 12 - :caption: Acquiring a lease on a blob. - """ - lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore - await lease.acquire(lease_duration=lease_duration, **kwargs) - return lease - - @distributed_trace_async - async def set_standard_blob_tier(self, standard_blob_tier, **kwargs): - # type: (Union[str, StandardBlobTier], Any) -> None - """This operation sets the tier on a block blob. - - A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param standard_blob_tier: - Indicates the tier to be set on the blob. Options include 'Hot', 'Cool', - 'Archive'. The hot tier is optimized for storing data that is accessed - frequently. The cool storage tier is optimized for storing data that - is infrequently accessed and stored for at least a month. The archive - tier is optimized for storing data that is rarely accessed and stored - for at least six months with flexible latency requirements. - :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if standard_blob_tier is None: - raise ValueError("A StandardBlobTier must be specified") - try: - await self._client.blob.set_tier( - tier=standard_blob_tier, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def stage_block( - self, block_id, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> None - """Creates a new block to be committed as part of a blob. - - :param str block_id: A string value that identifies the block. - The string should be less than or equal to 64 bytes in size. - For a given blob, the block_id must be the same size for each block. - :param data: The blob data. - :param int length: Size of the block. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword str encoding: - Defaults to UTF-8. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - options = self._stage_block_options( - block_id, - data, - length=length, - **kwargs) - try: - return await self._client.block_blob.stage_block(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def stage_block_from_url( - self, block_id, # type: str - source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - source_content_md5=None, # type: Optional[Union[bytes, bytearray]] - **kwargs - ): - # type: (...) -> None - """Creates a new block to be committed as part of a blob where - the contents are read from a URL. - - :param str block_id: A string value that identifies the block. - The string should be less than or equal to 64 bytes in size. - For a given blob, the block_id must be the same size for each block. - :param str source_url: The URL. - :param int source_offset: - Start of byte range to use for the block. - Must be set if source length is provided. - :param int source_length: The size of the block in bytes. - :param bytearray source_content_md5: - Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - options = self._stage_block_from_url_options( - block_id, - source_url=self._encode_source_url(source_url), - source_offset=source_offset, - source_length=source_length, - source_content_md5=source_content_md5, - **kwargs) - try: - return await self._client.block_blob.stage_block_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_block_list(self, block_list_type="committed", **kwargs): - # type: (Optional[str], Any) -> Tuple[List[BlobBlock], List[BlobBlock]] - """The Get Block List operation retrieves the list of blocks that have - been uploaded as part of a block blob. - - :param str block_list_type: - Specifies whether to return the list of committed - blocks, the list of uncommitted blocks, or both lists together. - Possible values include: 'committed', 'uncommitted', 'all' - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A tuple of two lists - committed and uncommitted blocks - :rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock)) - """ - access_conditions = get_access_conditions(kwargs.pop('kease', None)) - mod_conditions = get_modify_conditions(kwargs) - try: - blocks = await self._client.block_blob.get_block_list( - list_type=block_list_type, - snapshot=self.snapshot, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return self._get_block_list_result(blocks) - - @distributed_trace_async - async def commit_block_list( # type: ignore - self, block_list, # type: List[BlobBlock] - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """The Commit Block List operation writes a blob by specifying the list of - block IDs that make up the blob. - - :param list block_list: - List of Blockblobs. - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict[str, str] - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._commit_block_list_options( - block_list, - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return await self._client.block_blob.commit_block_list(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): - # type: (Union[str, PremiumPageBlobTier], **Any) -> None - """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts. - - :param premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if premium_page_blob_tier is None: - raise ValueError("A PremiumPageBlobTiermust be specified") - try: - await self._client.blob.set_tier( - tier=premium_page_blob_tier, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_blob_tags(self, tags=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - """The Set Tags operation enables users to set tags on a blob or specific blob version, but not snapshot. - Each call to this operation replaces all existing tags attached to the blob. To remove all - tags from the blob, call this operation with no tags set. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :param tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - :type tags: dict(str, str) - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to delete. - :keyword bool validate_content: - If true, calculates an MD5 hash of the tags content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - options = self._set_blob_tags_options(tags=tags, **kwargs) - try: - return await self._client.blob.set_tags(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_blob_tags(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """The Get Tags operation enables users to get tags on a blob or specific blob version, but not snapshot. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to add tags to. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Key value pairs of blob tags. - :rtype: Dict[str, str] - """ - options = self._get_blob_tags_options(**kwargs) - try: - _, tags = await self._client.blob.get_tags(**options) - return parse_tags(tags) # pylint: disable=protected-access - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_page_ranges( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] - **kwargs - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a Page Blob or snapshot - of a page blob. - - :param int offset: - Start of byte range to use for getting valid page ranges. - If no length is given, all bytes after the offset will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for getting valid page ranges. - If length is given, offset must be provided. - This range will return valid page ranges from the offset start up to - the specified length. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param str previous_snapshot_diff: - The snapshot diff parameter that contains an opaque DateTime value that - specifies a previous blob snapshot to be compared - against a more recent snapshot or the current blob. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. - The first element are filled page ranges, the 2nd element is cleared page ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_page_ranges_options( - offset=offset, - length=length, - previous_snapshot_diff=previous_snapshot_diff, - **kwargs) - try: - if previous_snapshot_diff: - ranges = await self._client.page_blob.get_page_ranges_diff(**options) - else: - ranges = await self._client.page_blob.get_page_ranges(**options) - except HttpResponseError as error: - process_storage_error(error) - return get_page_ranges_result(ranges) - - @distributed_trace_async - async def get_page_range_diff_for_managed_disk( - self, previous_snapshot_url, # type: str - offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a managed disk or snapshot. - - .. note:: - This operation is only available for managed disk accounts. - - .. versionadded:: 12.2.0 - This operation was introduced in API version '2019-07-07'. - - :param previous_snapshot_url: - Specifies the URL of a previous snapshot of the managed disk. - The response will only contain pages that were changed between the target blob and - its previous snapshot. - :param int offset: - Start of byte range to use for getting valid page ranges. - If no length is given, all bytes after the offset will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for getting valid page ranges. - If length is given, offset must be provided. - This range will return valid page ranges from the offset start up to - the specified length. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. - The first element are filled page ranges, the 2nd element is cleared page ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_page_ranges_options( - offset=offset, - length=length, - prev_snapshot_url=previous_snapshot_url, - **kwargs) - try: - ranges = await self._client.page_blob.get_page_ranges_diff(**options) - except HttpResponseError as error: - process_storage_error(error) - return get_page_ranges_result(ranges) - - @distributed_trace_async - async def set_sequence_number( # type: ignore - self, sequence_number_action, # type: Union[str, SequenceNumberAction] - sequence_number=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the blob sequence number. - - :param str sequence_number_action: - This property indicates how the service should modify the blob's sequence - number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information. - :param str sequence_number: - This property sets the blob's sequence number. The sequence number is a - user-controlled property that you can use to track requests and manage - concurrency issues. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._set_sequence_number_options( - sequence_number_action, sequence_number=sequence_number, **kwargs) - try: - return await self._client.page_blob.update_sequence_number(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def resize_blob(self, size, **kwargs): - # type: (int, Any) -> Dict[str, Union[str, datetime]] - """Resizes a page blob to the specified size. - - If the specified value is less than the current size of the blob, - then all pages above the specified value are cleared. - - :param int size: - Size used to resize blob. Maximum size for a page blob is up to 1 TB. - The page blob size must be aligned to a 512-byte boundary. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._resize_blob_options(size, **kwargs) - try: - return await self._client.page_blob.resize(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_page( # type: ignore - self, page, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """The Upload Pages operation writes a range of pages to a page blob. - - :param bytes page: - Content of the page. - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._upload_page_options( - page=page, - offset=offset, - length=length, - **kwargs) - try: - return await self._client.page_blob.upload_pages(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_pages_from_url(self, source_url, # type: str - offset, # type: int - length, # type: int - source_offset, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """ - The Upload Pages operation writes a range of pages to a page blob where - the contents are read from a URL. - - :param str source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - The service will read the same number of bytes as the destination range (length-offset). - :keyword bytes source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - - options = self._upload_pages_from_url_options( - source_url=self._encode_source_url(source_url), - offset=offset, - length=length, - source_offset=source_offset, - **kwargs - ) - try: - return await self._client.page_blob.upload_pages_from_url(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def clear_page(self, offset, length, **kwargs): - # type: (int, int, Any) -> Dict[str, Union[str, datetime]] - """Clears a range of pages. - - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._clear_page_options(offset, length, **kwargs) - try: - return await self._client.page_blob.clear_pages(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def append_block( # type: ignore - self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """Commits a new block of data to the end of the existing append blob. - - :param data: - Content of the block. - :param int length: - Size of the block in bytes. - :keyword bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). - :rtype: dict(str, Any) - """ - options = self._append_block_options( - data, - length=length, - **kwargs - ) - try: - return await self._client.append_blob.append_block(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async() - async def append_block_from_url(self, copy_source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """ - Creates a new block to be committed as part of a blob, where the contents are read from a source url. - - :param str copy_source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - :param int source_length: - This indicates the end of the range of bytes that has to be taken from the copy source. - :keyword bytearray source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the - AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._append_block_from_url_options( - copy_source_url=self._encode_source_url(copy_source_url), - source_offset=source_offset, - source_length=source_length, - **kwargs - ) - try: - return await self._client.append_blob.append_block_from_url(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async() - async def seal_append_blob(self, **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """The Seal operation seals the Append Blob to make it read-only. - - .. versionadded:: 12.4.0 - - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). - :rtype: dict(str, Any) - """ - options = self._seal_append_blob_options(**kwargs) - try: - return await self._client.append_blob.seal(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/aio/_blob_service_client_async.py b/azure/multiapi/storagev2/blob/v2020_04_08/aio/_blob_service_client_async.py deleted file mode 100644 index 4e91743..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/aio/_blob_service_client_async.py +++ /dev/null @@ -1,647 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, - TYPE_CHECKING -) - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import AsyncPipeline -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.async_paging import AsyncItemPaged - -from .._shared.models import LocationMode -from .._shared.policies_async import ExponentialRetry -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._shared.parser import _to_utc_datetime -from .._shared.response_handlers import parse_to_internal_user_delegation_key -from .._generated.aio import AzureBlobStorage -from .._generated.models import StorageServiceProperties, KeyInfo -from .._blob_service_client import BlobServiceClient as BlobServiceClientBase -from ._container_client_async import ContainerClient -from ._blob_client_async import BlobClient -from .._models import ContainerProperties -from .._deserialize import service_stats_deserialize, service_properties_deserialize -from .._serialize import get_api_version -from ._models import ContainerPropertiesPaged, FilteredBlobPaged - -if TYPE_CHECKING: - from datetime import datetime - from .._shared.models import AccountSasPermissions, ResourceTypes, UserDelegationKey - from ._lease_async import BlobLeaseClient - from .._models import ( - BlobProperties, - PublicAccess, - BlobAnalyticsLogging, - Metrics, - CorsRule, - RetentionPolicy, - StaticWebsite, - ) - - -class BlobServiceClient(AsyncStorageAccountHostsMixin, BlobServiceClientBase): - """A client to interact with the Blob Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete containers within the account. - For operations relating to a specific container or blob, clients for those entities - can also be retrieved using the `get_client` functions. - - :param str account_url: - The URL to the blob storage account. Any other entities included - in the URL path (e.g. container or blob) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication_async.py - :start-after: [START create_blob_service_client] - :end-before: [END create_blob_service_client] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient with account url and credential. - - .. literalinclude:: ../samples/blob_samples_authentication_async.py - :start-after: [START create_blob_service_client_oauth] - :end-before: [END create_blob_service_client_oauth] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient with Azure Identity credentials. - """ - - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(BlobServiceClient, self).__init__( - account_url, - credential=credential, - **kwargs) - self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) - default_api_version = self._client._config.version # pylint: disable=protected-access - self._client._config.version = get_api_version(kwargs, default_api_version) # pylint: disable=protected-access - self._loop = kwargs.get('loop', None) - - @distributed_trace_async - async def get_user_delegation_key(self, key_start_time, # type: datetime - key_expiry_time, # type: datetime - **kwargs # type: Any - ): - # type: (...) -> UserDelegationKey - """ - Obtain a user delegation key for the purpose of signing SAS tokens. - A token credential must be present on the service object for this request to succeed. - - :param ~datetime.datetime key_start_time: - A DateTime value. Indicates when the key becomes valid. - :param ~datetime.datetime key_expiry_time: - A DateTime value. Indicates when the key stops being valid. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The user delegation key. - :rtype: ~azure.storage.blob.UserDelegationKey - """ - key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time)) - timeout = kwargs.pop('timeout', None) - try: - user_delegation_key = await self._client.service.get_user_delegation_key(key_info=key_info, - timeout=timeout, - **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - return parse_to_internal_user_delegation_key(user_delegation_key) # type: ignore - - @distributed_trace_async - async def get_account_information(self, **kwargs): - # type: (Any) -> Dict[str, str] - """Gets information related to the storage account. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START get_blob_service_account_info] - :end-before: [END get_blob_service_account_info] - :language: python - :dedent: 12 - :caption: Getting account information for the blob service. - """ - try: - return await self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_service_stats(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Retrieves statistics related to replication for the Blob service. - - It is only available when read-access geo-redundant replication is enabled for - the storage account. - - With geo-redundant replication, Azure Storage maintains your data durable - in two locations. In both locations, Azure Storage constantly maintains - multiple healthy replicas of your data. The location where you read, - create, update, or delete data is the primary storage account location. - The primary location exists in the region you choose at the time you - create an account via the Azure Management Azure classic portal, for - example, North Central US. The location to which your data is replicated - is the secondary location. The secondary location is automatically - determined based on the location of the primary; it is in a second data - center that resides in the same region as the primary location. Read-only - access is available from the secondary location, if read-access geo-redundant - replication is enabled for your storage account. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The blob service stats. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START get_blob_service_stats] - :end-before: [END get_blob_service_stats] - :language: python - :dedent: 12 - :caption: Getting service stats for the blob service. - """ - timeout = kwargs.pop('timeout', None) - try: - stats = await self._client.service.get_statistics( # type: ignore - timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs) - return service_stats_deserialize(stats) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_service_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An object containing blob service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START get_blob_service_properties] - :end-before: [END get_blob_service_properties] - :language: python - :dedent: 12 - :caption: Getting service properties for the blob service. - """ - timeout = kwargs.pop('timeout', None) - try: - service_props = await self._client.service.get_properties(timeout=timeout, **kwargs) - return service_properties_deserialize(service_props) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_service_properties( - self, analytics_logging=None, # type: Optional[BlobAnalyticsLogging] - hour_metrics=None, # type: Optional[Metrics] - minute_metrics=None, # type: Optional[Metrics] - cors=None, # type: Optional[List[CorsRule]] - target_version=None, # type: Optional[str] - delete_retention_policy=None, # type: Optional[RetentionPolicy] - static_website=None, # type: Optional[StaticWebsite] - **kwargs - ): - # type: (...) -> None - """Sets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - If an element (e.g. analytics_logging) is left as None, the - existing settings on the service for that functionality are preserved. - - :param analytics_logging: - Groups the Azure Analytics Logging settings. - :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging - :param hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for blobs. - :type hour_metrics: ~azure.storage.blob.Metrics - :param minute_metrics: - The minute metrics settings provide request statistics - for each minute for blobs. - :type minute_metrics: ~azure.storage.blob.Metrics - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list[~azure.storage.blob.CorsRule] - :param str target_version: - Indicates the default version to use for requests if an incoming - request's version is not specified. - :param delete_retention_policy: - The delete retention policy specifies whether to retain deleted blobs. - It also specifies the number of days and versions of blob to keep. - :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy - :param static_website: - Specifies whether the static website feature is enabled, - and if yes, indicates the index document and 404 error document to use. - :type static_website: ~azure.storage.blob.StaticWebsite - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START set_blob_service_properties] - :end-before: [END set_blob_service_properties] - :language: python - :dedent: 12 - :caption: Setting service properties for the blob service. - """ - if all(parameter is None for parameter in [ - analytics_logging, hour_metrics, minute_metrics, cors, - target_version, delete_retention_policy, static_website]): - raise ValueError("set_service_properties should be called with at least one parameter") - - props = StorageServiceProperties( - logging=analytics_logging, - hour_metrics=hour_metrics, - minute_metrics=minute_metrics, - cors=cors, - default_service_version=target_version, - delete_retention_policy=delete_retention_policy, - static_website=static_website - ) - timeout = kwargs.pop('timeout', None) - try: - await self._client.service.set_properties(props, timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_containers( - self, name_starts_with=None, # type: Optional[str] - include_metadata=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> AsyncItemPaged[ContainerProperties] - """Returns a generator to list the containers under the specified account. - - The generator will lazily follow the continuation tokens returned by - the service and stop when all containers have been returned. - - :param str name_starts_with: - Filters the results to return only containers whose names - begin with the specified prefix. - :param bool include_metadata: - Specifies that container metadata to be returned in the response. - The default value is `False`. - :keyword bool include_deleted: - Specifies that deleted containers to be returned in the response. This is for container restore enabled - account. The default value is `False`. - .. versionadded:: 12.4.0 - :keyword int results_per_page: - The maximum number of container names to retrieve per API - call. If the request does not specify the server will return up to 5,000 items. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) of ContainerProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.ContainerProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_list_containers] - :end-before: [END bsc_list_containers] - :language: python - :dedent: 16 - :caption: Listing the containers in the blob service. - """ - include = ['metadata'] if include_metadata else [] - include_deleted = kwargs.pop('include_deleted', None) - if include_deleted: - include.append("deleted") - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.service.list_containers_segment, - prefix=name_starts_with, - include=include, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - page_iterator_class=ContainerPropertiesPaged - ) - - @distributed_trace - def find_blobs_by_tags(self, filter_expression, **kwargs): - # type: (str, **Any) -> AsyncItemPaged[FilteredBlob] - """The Filter Blobs operation enables callers to list blobs across all - containers whose tags match a given search expression. Filter blobs - searches across all containers within a storage account but can be - scoped within the expression to a single container. - - :param str filter_expression: - The expression to find blobs whose tags matches the specified condition. - eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'" - To specify a container, eg. "@container='containerName' and \"Name\"='C'" - :keyword int results_per_page: - The max result per page when paginating. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.FilteredBlob] - """ - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.service.filter_blobs, - where=filter_expression, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=FilteredBlobPaged) - - @distributed_trace_async - async def create_container( - self, name, # type: str - metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[Union[PublicAccess, str]] - **kwargs - ): - # type: (...) -> ContainerClient - """Creates a new container under the specified account. - - If the container with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created container. - - :param str name: The name of the container to create. - :param metadata: - A dict with name-value pairs to associate with the - container as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - Possible values include: 'container', 'blob'. - :type public_access: str or ~azure.storage.blob.PublicAccess - :keyword container_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - - .. versionadded:: 12.2.0 - - :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.aio.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_create_container] - :end-before: [END bsc_create_container] - :language: python - :dedent: 16 - :caption: Creating a container in the blob service. - """ - container = self.get_container_client(name) - timeout = kwargs.pop('timeout', None) - kwargs.setdefault('merge_span', True) - await container.create_container( - metadata=metadata, public_access=public_access, timeout=timeout, **kwargs) - return container - - @distributed_trace_async - async def delete_container( - self, container, # type: Union[ContainerProperties, str] - lease=None, # type: Optional[Union[BlobLeaseClient, str]] - **kwargs - ): - # type: (...) -> None - """Marks the specified container for deletion. - - The container and any blobs contained within it are later deleted during garbage collection. - If the container is not found, a ResourceNotFoundError will be raised. - - :param container: - The container to delete. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :param lease: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_delete_container] - :end-before: [END bsc_delete_container] - :language: python - :dedent: 16 - :caption: Deleting a container in the blob service. - """ - container = self.get_container_client(container) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - await container.delete_container( # type: ignore - lease=lease, - timeout=timeout, - **kwargs) - - @distributed_trace_async - async def undelete_container(self, deleted_container_name, deleted_container_version, **kwargs): - # type: (str, str, str, **Any) -> ContainerClient - """Restores soft-deleted container. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :param str deleted_container_name: - Specifies the name of the deleted container to restore. - :param str deleted_container_version: - Specifies the version of the deleted container to restore. - :keyword str new_name: - The new name for the deleted container to be restored to. - If not specified deleted_container_name will be used as the restored container name. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.aio.ContainerClient - """ - new_name = kwargs.pop('new_name', None) - container = self.get_container_client(new_name or deleted_container_name) - try: - await container._client.container.restore(deleted_container_name=deleted_container_name, # pylint: disable = protected-access - deleted_container_version=deleted_container_version, - timeout=kwargs.pop('timeout', None), **kwargs) - return container - except HttpResponseError as error: - process_storage_error(error) - - def get_container_client(self, container): - # type: (Union[ContainerProperties, str]) -> ContainerClient - """Get a client to interact with the specified container. - - The container need not already exist. - - :param container: - The container. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :returns: A ContainerClient. - :rtype: ~azure.storage.blob.aio.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_get_container_client] - :end-before: [END bsc_get_container_client] - :language: python - :dedent: 12 - :caption: Getting the container client to interact with a specific container. - """ - try: - container_name = container.name - except AttributeError: - container_name = container - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ContainerClient( - self.url, container_name=container_name, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function, loop=self._loop) - - def get_blob_client( - self, container, # type: Union[ContainerProperties, str] - blob, # type: Union[BlobProperties, str] - snapshot=None # type: Optional[Union[Dict[str, Any], str]] - ): - # type: (...) -> BlobClient - """Get a client to interact with the specified blob. - - The blob need not already exist. - - :param container: - The container that the blob is in. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :param blob: - The blob with which to interact. This can either be the name of the blob, - or an instance of BlobProperties. - :type blob: str or ~azure.storage.blob.BlobProperties - :param snapshot: - The optional blob snapshot on which to operate. This can either be the ID of the snapshot, - or a dictionary output returned by - :func:`~azure.storage.blob.aio.BlobClient.create_snapshot()`. - :type snapshot: str or dict(str, Any) - :returns: A BlobClient. - :rtype: ~azure.storage.blob.aio.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_get_blob_client] - :end-before: [END bsc_get_blob_client] - :language: python - :dedent: 16 - :caption: Getting the blob client to interact with a specific blob. - """ - try: - container_name = container.name - except AttributeError: - container_name = container - - try: - blob_name = blob.name - except AttributeError: - blob_name = blob - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return BlobClient( # type: ignore - self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function, loop=self._loop) diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/aio/_container_client_async.py b/azure/multiapi/storagev2/blob/v2020_04_08/aio/_container_client_async.py deleted file mode 100644 index e26fe23..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/aio/_container_client_async.py +++ /dev/null @@ -1,1122 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, AnyStr, Dict, List, IO, AsyncIterator, - TYPE_CHECKING -) - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.async_paging import AsyncItemPaged -from azure.core.pipeline import AsyncPipeline -from azure.core.pipeline.transport import AsyncHttpResponse - -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.policies_async import ExponentialRetry -from .._shared.request_handlers import add_metadata_headers, serialize_iso -from .._shared.response_handlers import ( - process_storage_error, - return_response_headers, - return_headers_and_deserialized) -from .._generated.aio import AzureBlobStorage -from .._generated.models import SignedIdentifier -from .._deserialize import deserialize_container_properties -from .._serialize import get_modify_conditions, get_container_cpk_scope_info, get_api_version, get_access_conditions -from .._container_client import ContainerClient as ContainerClientBase, _get_blob_name -from .._models import ContainerProperties, BlobType, BlobProperties # pylint: disable=unused-import -from ._list_blobs_helper import BlobPropertiesPaged, BlobPrefix -from ._lease_async import BlobLeaseClient -from ._blob_client_async import BlobClient - -if TYPE_CHECKING: - from .._models import PublicAccess - from ._download_async import StorageStreamDownloader - from datetime import datetime - from .._models import ( # pylint: disable=unused-import - AccessPolicy, - StandardBlobTier, - PremiumPageBlobTier) - - -class ContainerClient(AsyncStorageAccountHostsMixin, ContainerClientBase): - """A client to interact with a specific container, although that container - may not yet exist. - - For operations relating to a specific blob within this container, a blob client can be - retrieved using the :func:`~get_blob_client` function. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the container, - use the :func:`from_container_url` classmethod. - :param container_name: - The name of the container for the blob. - :type container_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START create_container_client_from_service] - :end-before: [END create_container_client_from_service] - :language: python - :dedent: 8 - :caption: Get a ContainerClient from an existing BlobServiceClient. - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START create_container_client_sasurl] - :end-before: [END create_container_client_sasurl] - :language: python - :dedent: 12 - :caption: Creating the container client directly. - """ - def __init__( - self, account_url, # type: str - container_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(ContainerClient, self).__init__( - account_url, - container_name=container_name, - credential=credential, - **kwargs) - self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) - default_api_version = self._client._config.version # pylint: disable=protected-access - self._client._config.version = get_api_version(kwargs, default_api_version) # pylint: disable=protected-access - self._loop = kwargs.get('loop', None) - - @distributed_trace_async - async def create_container(self, metadata=None, public_access=None, **kwargs): - # type: (Optional[Dict[str, str]], Optional[Union[PublicAccess, str]], **Any) -> None - """ - Creates a new container under the specified account. If the container - with the same name already exists, the operation fails. - - :param metadata: - A dict with name_value pairs to associate with the - container as metadata. Example:{'Category':'test'} - :type metadata: dict[str, str] - :param ~azure.storage.blob.PublicAccess public_access: - Possible values include: 'container', 'blob'. - :keyword container_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - - .. versionadded:: 12.2.0 - - :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START create_container] - :end-before: [END create_container] - :language: python - :dedent: 16 - :caption: Creating a container to store blobs. - """ - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - timeout = kwargs.pop('timeout', None) - container_cpk_scope_info = get_container_cpk_scope_info(kwargs) - try: - return await self._client.container.create( # type: ignore - timeout=timeout, - access=public_access, - container_cpk_scope_info=container_cpk_scope_info, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def delete_container( - self, **kwargs): - # type: (Any) -> None - """ - Marks the specified container for deletion. The container and any blobs - contained within it are later deleted during garbage collection. - - :keyword lease: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START delete_container] - :end-before: [END delete_container] - :language: python - :dedent: 16 - :caption: Delete a container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - mod_conditions = get_modify_conditions(kwargs) - timeout = kwargs.pop('timeout', None) - try: - await self._client.container.delete( - timeout=timeout, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def acquire_lease( - self, lease_duration=-1, # type: int - lease_id=None, # type: Optional[str] - **kwargs): - # type: (...) -> BlobLeaseClient - """ - Requests a new lease. If the container does not have an active lease, - the Blob service creates a lease on the container and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A BlobLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.blob.aio.BlobLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START acquire_lease_on_container] - :end-before: [END acquire_lease_on_container] - :language: python - :dedent: 12 - :caption: Acquiring a lease on the container. - """ - lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - await lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs) - return lease - - @distributed_trace_async - async def get_account_information(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """Gets information related to the storage account. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - """ - try: - return await self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_container_properties(self, **kwargs): - # type: (**Any) -> ContainerProperties - """Returns all user-defined metadata and system properties for the specified - container. The data returned does not include the container's list of blobs. - - :keyword lease: - If specified, get_container_properties only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Properties for the specified container within a container object. - :rtype: ~azure.storage.blob.ContainerProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START get_container_properties] - :end-before: [END get_container_properties] - :language: python - :dedent: 16 - :caption: Getting properties on the container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - response = await self._client.container.get_properties( - timeout=timeout, - lease_access_conditions=access_conditions, - cls=deserialize_container_properties, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - response.name = self.container_name - return response # type: ignore - - @distributed_trace_async - async def set_container_metadata( # type: ignore - self, metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - container. Each call to this operation replaces all existing metadata - attached to the container. To remove all metadata from the container, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the container as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword lease: - If specified, set_container_metadata only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Container-updated property dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START set_container_metadata] - :end-before: [END set_container_metadata] - :language: python - :dedent: 16 - :caption: Setting metadata on the container. - """ - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - mod_conditions = get_modify_conditions(kwargs) - timeout = kwargs.pop('timeout', None) - try: - return await self._client.container.set_metadata( # type: ignore - timeout=timeout, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_container_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the specified container. - The permissions indicate whether container data may be accessed publicly. - - :keyword lease: - If specified, get_container_access_policy only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START get_container_access_policy] - :end-before: [END get_container_access_policy] - :language: python - :dedent: 16 - :caption: Getting the access policy on the container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - response, identifiers = await self._client.container.get_access_policy( - timeout=timeout, - lease_access_conditions=access_conditions, - cls=return_headers_and_deserialized, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return { - 'public_access': response.get('blob_public_access'), - 'signed_identifiers': identifiers or [] - } - - @distributed_trace_async - async def set_container_access_policy( - self, signed_identifiers, # type: Dict[str, AccessPolicy] - public_access=None, # type: Optional[Union[str, PublicAccess]] - **kwargs # type: Any - ): # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the permissions for the specified container or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether blobs in a container may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the container. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy] - :param ~azure.storage.blob.PublicAccess public_access: - Possible values include: 'container', 'blob'. - :keyword lease: - Required if the container has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified date/time. - :keyword ~datetime.datetime if_unmodified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Container-updated property dict (Etag and last modified). - :rtype: dict[str, str or ~datetime.datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START set_container_access_policy] - :end-before: [END set_container_access_policy] - :language: python - :dedent: 16 - :caption: Setting access policy on the container. - """ - timeout = kwargs.pop('timeout', None) - lease = kwargs.pop('lease', None) - if len(signed_identifiers) > 5: - raise ValueError( - 'Too many access policies provided. The server does not support setting ' - 'more than 5 access policies on a single resource.') - identifiers = [] - for key, value in signed_identifiers.items(): - if value: - value.start = serialize_iso(value.start) - value.expiry = serialize_iso(value.expiry) - identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore - signed_identifiers = identifiers # type: ignore - - mod_conditions = get_modify_conditions(kwargs) - access_conditions = get_access_conditions(lease) - try: - return await self._client.container.set_access_policy( - container_acl=signed_identifiers or None, - timeout=timeout, - access=public_access, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_blobs(self, name_starts_with=None, include=None, **kwargs): - # type: (Optional[str], Optional[Union[str, List[str]]], **Any) -> AsyncItemPaged[BlobProperties] - """Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service. - - :param str name_starts_with: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param list[str] or str include: - Specifies one or more additional datasets to include in the response. - Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'tags'. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START list_blobs_in_container] - :end-before: [END list_blobs_in_container] - :language: python - :dedent: 12 - :caption: List the blobs in the container. - """ - if include and not isinstance(include, list): - include = [include] - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.list_blob_flat_segment, - include=include, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - page_iterator_class=BlobPropertiesPaged - ) - - @distributed_trace - def walk_blobs( - self, name_starts_with=None, # type: Optional[str] - include=None, # type: Optional[Any] - delimiter="/", # type: str - **kwargs # type: Optional[Any] - ): - # type: (...) -> AsyncItemPaged[BlobProperties] - """Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service. This operation will list blobs in accordance with a hierarchy, - as delimited by the specified delimiter character. - - :param str name_starts_with: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param list[str] include: - Specifies one or more additional datasets to include in the response. - Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'. - :param str delimiter: - When the request includes this parameter, the operation returns a BlobPrefix - element in the response body that acts as a placeholder for all blobs whose - names begin with the same substring up to the appearance of the delimiter - character. The delimiter may be a single character or a string. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties] - """ - if include and not isinstance(include, list): - include = [include] - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.list_blob_hierarchy_segment, - delimiter=delimiter, - include=include, - timeout=timeout, - **kwargs) - return BlobPrefix( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - delimiter=delimiter) - - @distributed_trace_async - async def upload_blob( - self, name, # type: Union[str, BlobProperties] - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> BlobClient - """Creates a new blob from a data source with automatic chunking. - - :param name: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type name: str or ~azure.storage.blob.BlobProperties - :param data: The blob data to upload. - :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be - either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. The exception to the above is with Append - blob types: if set to False and the data already exists, an error will not be raised - and the data will be appended to the existing blob. If set overwrite=True, then the existing - append blob will be deleted, and a new one created. Defaults to False. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the container has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int max_concurrency: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :returns: A BlobClient to interact with the newly uploaded blob. - :rtype: ~azure.storage.blob.aio.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START upload_blob_to_container] - :end-before: [END upload_blob_to_container] - :language: python - :dedent: 12 - :caption: Upload blob to the container. - """ - blob = self.get_blob_client(name) - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - await blob.upload_blob( - data, - blob_type=blob_type, - length=length, - metadata=metadata, - timeout=timeout, - encoding=encoding, - **kwargs - ) - return blob - - @distributed_trace_async - async def delete_blob( - self, blob, # type: Union[str, BlobProperties] - delete_snapshots=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> None - """Marks the specified blob or snapshot for deletion. - - The blob is later deleted during garbage collection. - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the delete_blob - operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot - and retains the blob or snapshot for specified number of days. - After specified number of days, blob's data is removed from the service during garbage collection. - Soft deleted blob or snapshot is accessible through :func:`list_blobs()` specifying `include=["deleted"]` - option. Soft-deleted blob or snapshot can be restored using :func:`~BlobClient.undelete()` - - :param blob: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob: str or ~azure.storage.blob.BlobProperties - :param str delete_snapshots: - Required if the blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to delete. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword lease: - Required if the blob has an active lease. Value can be a Lease object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - blob = self.get_blob_client(blob) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - await blob.delete_blob( # type: ignore - delete_snapshots=delete_snapshots, - timeout=timeout, - **kwargs) - - @distributed_trace_async - async def download_blob(self, blob, offset=None, length=None, **kwargs): - # type: (Union[str, BlobProperties], Optional[int], Optional[int], Any) -> StorageStreamDownloader - """Downloads a blob to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the blob into - a stream. - - :param blob: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob: str or ~azure.storage.blob.BlobProperties - :param int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, download_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword str encoding: - Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object. (StorageStreamDownloader) - :rtype: ~azure.storage.blob.aio.StorageStreamDownloader - """ - blob_client = self.get_blob_client(blob) # type: ignore - kwargs.setdefault('merge_span', True) - return await blob_client.download_blob( - offset=offset, - length=length, - **kwargs) - - @distributed_trace_async - async def delete_blobs( # pylint: disable=arguments-differ - self, *blobs: List[Union[str, BlobProperties, dict]], - **kwargs - ) -> AsyncIterator[AsyncHttpResponse]: - """Marks the specified blobs or snapshots for deletion. - - The blobs are later deleted during garbage collection. - Note that in order to delete blobs, you must delete all of their - snapshots. You can delete both at the same time with the delete_blobs operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots - and retains the blobs or snapshots for specified number of days. - After specified number of days, blobs' data is removed from the service during garbage collection. - Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]` - Soft-deleted blobs or snapshots can be restored using :func:`~BlobClient.undelete()` - - :param blobs: - The blobs to delete. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - snapshot you want to delete: - key: 'snapshot', value type: str - whether to delete snapthots when deleting blob: - key: 'delete_snapshots', value: 'include' or 'only' - if the blob modified or not: - key: 'if_modified_since', 'if_unmodified_since', value type: datetime - etag: - key: 'etag', value type: str - match the etag or not: - key: 'match_condition', value type: MatchConditions - tags match condition: - key: 'if_tags_match_condition', value type: str - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword str delete_snapshots: - Required if a blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. For optimal performance, - this should be set to False - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: An async iterator of responses, one for each blob in order - :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START delete_multiple_blobs] - :end-before: [END delete_multiple_blobs] - :language: python - :dedent: 12 - :caption: Deleting multiple blobs. - """ - if len(blobs) == 0: - return iter(list()) - - reqs, options = self._generate_delete_blobs_options(*blobs, **kwargs) - - return await self._batch_send(*reqs, **options) - - @distributed_trace - async def set_standard_blob_tier_blobs( - self, - standard_blob_tier: Union[str, 'StandardBlobTier'], - *blobs: List[Union[str, BlobProperties, dict]], - **kwargs - ) -> AsyncIterator[AsyncHttpResponse]: - """This operation sets the tier on block blobs. - - A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param standard_blob_tier: - Indicates the tier to be set on all blobs. Options include 'Hot', 'Cool', - 'Archive'. The hot tier is optimized for storing data that is accessed - frequently. The cool storage tier is optimized for storing data that - is infrequently accessed and stored for at least a month. The archive - tier is optimized for storing data that is rarely accessed and stored - for at least six months with flexible latency requirements. - - .. note:: - If you want to set different tier on different blobs please set this positional parameter to None. - Then the blob tier on every BlobProperties will be taken. - - :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier - :param blobs: - The blobs with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - blob name: - key: 'name', value type: str - standard blob tier: - key: 'blob_tier', value type: StandardBlobTier - rehydrate priority: - key: 'rehydrate_priority', value type: RehydratePriority - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - tags match condition: - key: 'if_tags_match_condition', value type: str - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. For optimal performance, - this should be set to False. - :return: An async iterator of responses, one for each blob in order - :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] - """ - reqs, options = self._generate_set_tiers_options(standard_blob_tier, *blobs, **kwargs) - - return await self._batch_send(*reqs, **options) - - @distributed_trace - async def set_premium_page_blob_tier_blobs( - self, - premium_page_blob_tier: Union[str, 'PremiumPageBlobTier'], - *blobs: List[Union[str, BlobProperties, dict]], - **kwargs - ) -> AsyncIterator[AsyncHttpResponse]: - """Sets the page blob tiers on the blobs. This API is only supported for page blobs on premium accounts. - - :param premium_page_blob_tier: - A page blob tier value to set on all blobs to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - - .. note:: - If you want to set different tier on different blobs please set this positional parameter to None. - Then the blob tier on every BlobProperties will be taken. - - :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier - :param blobs: The blobs with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - premium blob tier: - key: 'blob_tier', value type: PremiumPageBlobTier - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. For optimal performance, - this should be set to False. - :return: An async iterator of responses, one for each blob in order - :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] - """ - reqs, options = self._generate_set_tiers_options(premium_page_blob_tier, *blobs, **kwargs) - - return await self._batch_send(*reqs, **options) - - def get_blob_client( - self, blob, # type: Union[BlobProperties, str] - snapshot=None # type: str - ): - # type: (...) -> BlobClient - """Get a client to interact with the specified blob. - - The blob need not already exist. - - :param blob: - The blob with which to interact. - :type blob: str or ~azure.storage.blob.BlobProperties - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`~BlobClient.create_snapshot()`. - :returns: A BlobClient. - :rtype: ~azure.storage.blob.aio.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START get_blob_client] - :end-before: [END get_blob_client] - :language: python - :dedent: 12 - :caption: Get the blob client. - """ - blob_name = _get_blob_name(blob) - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return BlobClient( - self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function, loop=self._loop) diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/aio/_download_async.py b/azure/multiapi/storagev2/blob/v2020_04_08/aio/_download_async.py deleted file mode 100644 index 44ba51d..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/aio/_download_async.py +++ /dev/null @@ -1,502 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -import asyncio -import sys -from io import BytesIO -from itertools import islice -import warnings - -from azure.core.exceptions import HttpResponseError -from .._shared.encryption import decrypt_blob -from .._shared.request_handlers import validate_and_format_range_headers -from .._shared.response_handlers import process_storage_error, parse_length_from_content_range -from .._deserialize import get_page_ranges_result -from .._download import process_range_and_offset, _ChunkDownloader - -async def process_content(data, start_offset, end_offset, encryption): - if data is None: - raise ValueError("Response cannot be None.") - try: - content = data.response.body() - except Exception as error: - raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) - if encryption.get('key') is not None or encryption.get('resolver') is not None: - try: - return decrypt_blob( - encryption.get('required'), - encryption.get('key'), - encryption.get('resolver'), - content, - start_offset, - end_offset, - data.response.headers) - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=data.response, - error=error) - return content - - -class _AsyncChunkDownloader(_ChunkDownloader): - def __init__(self, **kwargs): - super(_AsyncChunkDownloader, self).__init__(**kwargs) - self.stream_lock = asyncio.Lock() if kwargs.get('parallel') else None - self.progress_lock = asyncio.Lock() if kwargs.get('parallel') else None - - async def process_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - chunk_data = await self._download_chunk(chunk_start, chunk_end - 1) - length = chunk_end - chunk_start - if length > 0: - await self._write_to_stream(chunk_data, chunk_start) - await self._update_progress(length) - - async def yield_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - return await self._download_chunk(chunk_start, chunk_end - 1) - - async def _update_progress(self, length): - if self.progress_lock: - async with self.progress_lock: # pylint: disable=not-async-context-manager - self.progress_total += length - else: - self.progress_total += length - - async def _write_to_stream(self, chunk_data, chunk_start): - if self.stream_lock: - async with self.stream_lock: # pylint: disable=not-async-context-manager - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - else: - self.stream.write(chunk_data) - - async def _download_chunk(self, chunk_start, chunk_end): - download_range, offset = process_range_and_offset( - chunk_start, chunk_end, chunk_end, self.encryption_options) - - # No need to download the empty chunk from server if there's no data in the chunk to be downloaded. - # Do optimize and create empty chunk locally if condition is met. - if self._do_optimize(download_range[0], download_range[1]): - chunk_data = b"\x00" * self.chunk_size - else: - range_header, range_validation = validate_and_format_range_headers( - download_range[0], - download_range[1], - check_content_md5=self.validate_content - ) - try: - _, response = await self.client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self.validate_content, - data_stream_total=self.total_size, - download_stream_current=self.progress_total, - **self.request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - chunk_data = await process_content(response, offset[0], offset[1], self.encryption_options) - - # This makes sure that if_match is set so that we can validate - # that subsequent downloads are to an unmodified blob - if self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = response.properties.etag - - return chunk_data - - -class _AsyncChunkIterator(object): - """Async iterator for chunks in blob download stream.""" - - def __init__(self, size, content, downloader): - self.size = size - self._current_content = content - self._iter_downloader = downloader - self._iter_chunks = None - self._complete = (size == 0) - - def __len__(self): - return self.size - - def __iter__(self): - raise TypeError("Async stream must be iterated asynchronously.") - - def __aiter__(self): - return self - - async def __anext__(self): - """Iterate through responses.""" - if self._complete: - raise StopAsyncIteration("Download complete") - if not self._iter_downloader: - # If no iterator was supplied, the download completed with - # the initial GET, so we just return that data - self._complete = True - return self._current_content - - if not self._iter_chunks: - self._iter_chunks = self._iter_downloader.get_chunk_offsets() - else: - try: - chunk = next(self._iter_chunks) - except StopIteration: - raise StopAsyncIteration("Download complete") - self._current_content = await self._iter_downloader.yield_chunk(chunk) - - return self._current_content - - -class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the blob being downloaded. - :ivar str container: - The name of the container where the blob is. - :ivar ~azure.storage.blob.BlobProperties properties: - The properties of the blob being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the blob. - """ - - def __init__( - self, - clients=None, - config=None, - start_range=None, - end_range=None, - validate_content=None, - encryption_options=None, - max_concurrency=1, - name=None, - container=None, - encoding=None, - **kwargs - ): - self.name = name - self.container = container - self.properties = None - self.size = None - - self._clients = clients - self._config = config - self._start_range = start_range - self._end_range = end_range - self._max_concurrency = max_concurrency - self._encoding = encoding - self._validate_content = validate_content - self._encryption_options = encryption_options or {} - self._request_options = kwargs - self._location_mode = None - self._download_complete = False - self._current_content = None - self._file_size = None - self._non_empty_ranges = None - self._response = None - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - self._first_get_size = self._config.max_single_get_size if not self._validate_content \ - else self._config.max_chunk_get_size - initial_request_start = self._start_range if self._start_range is not None else 0 - if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: - initial_request_end = self._end_range - else: - initial_request_end = initial_request_start + self._first_get_size - 1 - - self._initial_range, self._initial_offset = process_range_and_offset( - initial_request_start, initial_request_end, self._end_range, self._encryption_options - ) - - def __len__(self): - return self.size - - async def _setup(self): - self._response = await self._initial_request() - self.properties = self._response.properties - self.properties.name = self.name - self.properties.container = self.container - - # Set the content length to the download size instead of the size of - # the last range - self.properties.size = self.size - - # Overwrite the content range to the user requested range - self.properties.content_range = 'bytes {0}-{1}/{2}'.format( - self._start_range, - self._end_range, - self._file_size - ) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - self.properties.content_md5 = None - - if self.size == 0: - self._current_content = b"" - else: - self._current_content = await process_content( - self._response, - self._initial_offset[0], - self._initial_offset[1], - self._encryption_options - ) - - async def _initial_request(self): - range_header, range_validation = validate_and_format_range_headers( - self._initial_range[0], - self._initial_range[1], - start_range_required=False, - end_range_required=False, - check_content_md5=self._validate_content) - - try: - location_mode, response = await self._clients.blob.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self._validate_content, - data_stream_total=None, - download_stream_current=0, - **self._request_options) - - # Check the location we read from to ensure we use the same one - # for subsequent requests. - self._location_mode = location_mode - - # Parse the total file size and adjust the download size if ranges - # were specified - self._file_size = parse_length_from_content_range(response.properties.content_range) - if self._end_range is not None: - # Use the length unless it is over the end of the file - self.size = min(self._file_size, self._end_range - self._start_range + 1) - elif self._start_range is not None: - self.size = self._file_size - self._start_range - else: - self.size = self._file_size - - except HttpResponseError as error: - if self._start_range is None and error.response.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - try: - _, response = await self._clients.blob.download( - validate_content=self._validate_content, - data_stream_total=0, - download_stream_current=0, - **self._request_options) - except HttpResponseError as error: - process_storage_error(error) - - # Set the download size to empty - self.size = 0 - self._file_size = 0 - else: - process_storage_error(error) - - # get page ranges to optimize downloading sparse page blob - if response.properties.blob_type == 'PageBlob': - try: - page_ranges = await self._clients.page_blob.get_page_ranges() - self._non_empty_ranges = get_page_ranges_result(page_ranges)[0] - except HttpResponseError: - pass - - # If the file is small, the download is complete at this point. - # If file size is large, download the rest of the file in chunks. - if response.properties.size != self.size: - # Lock on the etag. This can be overriden by the user by specifying '*' - if self._request_options.get('modified_access_conditions'): - if not self._request_options['modified_access_conditions'].if_match: - self._request_options['modified_access_conditions'].if_match = response.properties.etag - else: - self._download_complete = True - return response - - def chunks(self): - """Iterate over chunks in the download stream. - - :rtype: Iterable[bytes] - """ - if self.size == 0 or self._download_complete: - iter_downloader = None - else: - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - iter_downloader = _AsyncChunkDownloader( - client=self._clients.blob, - non_empty_ranges=self._non_empty_ranges, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # Start where the first download ended - end_range=data_end, - stream=None, - parallel=False, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options) - return _AsyncChunkIterator( - size=self.size, - content=self._current_content, - downloader=iter_downloader) - - async def readall(self): - """Download the contents of this blob. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - stream = BytesIO() - await self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - async def content_as_bytes(self, max_concurrency=1): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :rtype: bytes - """ - warnings.warn( - "content_as_bytes is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - return await self.readall() - - async def content_as_text(self, max_concurrency=1, encoding="UTF-8"): - """Download the contents of this blob, and decode as text. - - This operation is blocking until all data is downloaded. - - :param int max_concurrency: - The number of parallel connections with which to download. - :param str encoding: - Test encoding to decode the downloaded bytes. Default is UTF-8. - :rtype: str - """ - warnings.warn( - "content_as_text is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self._encoding = encoding - return await self.readall() - - async def readinto(self, stream): - """Download the contents of this blob to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - # the stream must be seekable if parallel download is required - parallel = self._max_concurrency > 1 - if parallel: - error_message = "Target stream handle must be seekable." - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(error_message) - - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(error_message) - - # Write the content to the user stream - stream.write(self._current_content) - if self._download_complete: - return self.size - - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - - downloader = _AsyncChunkDownloader( - client=self._clients.blob, - non_empty_ranges=self._non_empty_ranges, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # start where the first download ended - end_range=data_end, - stream=stream, - parallel=parallel, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options) - - dl_tasks = downloader.get_chunk_offsets() - running_futures = [ - asyncio.ensure_future(downloader.process_chunk(d)) - for d in islice(dl_tasks, 0, self._max_concurrency) - ] - while running_futures: - # Wait for some download to finish before adding a new one - done, running_futures = await asyncio.wait( - running_futures, return_when=asyncio.FIRST_COMPLETED) - try: - for task in done: - task.result() - except HttpResponseError as error: - process_storage_error(error) - try: - next_chunk = next(dl_tasks) - except StopIteration: - break - else: - running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk))) - - if running_futures: - # Wait for the remaining downloads to finish - done, _running_futures = await asyncio.wait(running_futures) - try: - for task in done: - task.result() - except HttpResponseError as error: - process_storage_error(error) - return self.size - - async def download_to_stream(self, stream, max_concurrency=1): - """Download the contents of this blob to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :param int max_concurrency: - The number of parallel connections with which to download. - :returns: The properties of the downloaded blob. - :rtype: Any - """ - warnings.warn( - "download_to_stream is deprecated, use readinto instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - await self.readinto(stream) - return self.properties diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/aio/_lease_async.py b/azure/multiapi/storagev2/blob/v2020_04_08/aio/_lease_async.py deleted file mode 100644 index 79e6733..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/aio/_lease_async.py +++ /dev/null @@ -1,325 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, - TypeVar, TYPE_CHECKING -) - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator_async import distributed_trace_async - -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._serialize import get_modify_conditions -from .._lease import BlobLeaseClient as LeaseClientBase - -if TYPE_CHECKING: - from datetime import datetime - from .._generated.operations import BlobOperations, ContainerOperations - BlobClient = TypeVar("BlobClient") - ContainerClient = TypeVar("ContainerClient") - - -class BlobLeaseClient(LeaseClientBase): - """Creates a new BlobLeaseClient. - - This client provides lease operations on a BlobClient or ContainerClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the blob or container to lease. - :type client: ~azure.storage.blob.aio.BlobClient or - ~azure.storage.blob.aio.ContainerClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - - def __enter__(self): - raise TypeError("Async lease must use 'async with'.") - - def __exit__(self, *args): - self.release() - - async def __aenter__(self): - return self - - async def __aexit__(self, *args): - await self.release() - - @distributed_trace_async - async def acquire(self, lease_duration=-1, **kwargs): - # type: (int, Any) -> None - """Requests a new lease. - - If the container does not have an active lease, the Blob service creates a - lease on the container and returns a new lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.acquire_lease( - timeout=kwargs.pop('timeout', None), - duration=lease_duration, - proposed_lease_id=self.id, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - self.etag = response.get('etag') # type: str - - @distributed_trace_async - async def renew(self, **kwargs): - # type: (Any) -> None - """Renews the lease. - - The lease can be renewed if the lease ID specified in the - lease client matches that associated with the container or blob. Note that - the lease may be renewed even if it has expired as long as the container - or blob has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.renew_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def release(self, **kwargs): - # type: (Any) -> None - """Release the lease. - - The lease may be released if the client lease id specified matches - that associated with the container or blob. Releasing the lease allows another client - to immediately acquire the lease for the container or blob as soon as the release is complete. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.release_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """Change the lease ID of an active lease. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.change_lease( - lease_id=self.id, - proposed_lease_id=proposed_lease_id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def break_lease(self, lease_break_period=None, **kwargs): - # type: (Optional[int], Any) -> int - """Break the lease, if the container or blob has an active lease. - - Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. When a lease - is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the container or blob. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :param int lease_break_period: - This is the proposed duration of seconds that the lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the lease. If longer, the time remaining on the lease is used. - A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.break_lease( - timeout=kwargs.pop('timeout', None), - break_period=lease_break_period, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/aio/_list_blobs_helper.py b/azure/multiapi/storagev2/blob/v2020_04_08/aio/_list_blobs_helper.py deleted file mode 100644 index 058572f..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/aio/_list_blobs_helper.py +++ /dev/null @@ -1,163 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from azure.core.async_paging import AsyncPageIterator, AsyncItemPaged -from azure.core.exceptions import HttpResponseError -from .._deserialize import get_blob_properties_from_generated_code -from .._models import BlobProperties -from .._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix -from .._shared.models import DictMixin -from .._shared.response_handlers import return_context_and_deserialized, process_storage_error - - -class BlobPropertiesPaged(AsyncPageIterator): - """An Iterable of Blob properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.models.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - :param str container: The container that the blobs are listed from. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str continuation_token: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__( - self, command, - container=None, - prefix=None, - results_per_page=None, - continuation_token=None, - delimiter=None, - location_mode=None): - super(BlobPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.container = container - self.delimiter = delimiter - self.current_page = None - self.location_mode = location_mode - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - prefix=self.prefix, - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.container = self._response.container_name - self.current_page = [self._build_item(item) for item in self._response.segment.blob_items] - - return self._response.next_marker or None, self.current_page - - def _build_item(self, item): - if isinstance(item, BlobProperties): - return item - if isinstance(item, BlobItemInternal): - blob = get_blob_properties_from_generated_code(item) # pylint: disable=protected-access - blob.container = self.container - return blob - return item - - -class BlobPrefix(AsyncItemPaged, DictMixin): - """An Iterable of Blob properties. - - Returned from walk_blobs when a delimiter is used. - Can be thought of as a virtual blob directory. - - :ivar str name: The prefix, or "directory name" of the blob. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str marker: The continuation token of the current page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.models.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str marker: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__(self, *args, **kwargs): - super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs) - self.name = kwargs.get('prefix') - self.prefix = kwargs.get('prefix') - self.results_per_page = kwargs.get('results_per_page') - self.container = kwargs.get('container') - self.delimiter = kwargs.get('delimiter') - self.location_mode = kwargs.get('location_mode') - - -class BlobPrefixPaged(BlobPropertiesPaged): - def __init__(self, *args, **kwargs): - super(BlobPrefixPaged, self).__init__(*args, **kwargs) - self.name = self.prefix - - async def _extract_data_cb(self, get_next_return): - continuation_token, _ = await super(BlobPrefixPaged, self)._extract_data_cb(get_next_return) - self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items - self.current_page = [self._build_item(item) for item in self.current_page] - self.delimiter = self._response.delimiter - - return continuation_token, self.current_page - - def _build_item(self, item): - item = super(BlobPrefixPaged, self)._build_item(item) - if isinstance(item, GenBlobPrefix): - return BlobPrefix( - self._command, - container=self.container, - prefix=item.name, - results_per_page=self.results_per_page, - location_mode=self.location_mode) - return item diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/aio/_models.py b/azure/multiapi/storagev2/blob/v2020_04_08/aio/_models.py deleted file mode 100644 index 05edd78..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/aio/_models.py +++ /dev/null @@ -1,143 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines - -from azure.core.async_paging import AsyncPageIterator -from azure.core.exceptions import HttpResponseError -from .._deserialize import parse_tags - -from .._models import ContainerProperties, FilteredBlob -from .._shared.response_handlers import return_context_and_deserialized, process_storage_error - -from .._generated.models import FilterBlobItem - - -class ContainerPropertiesPaged(AsyncPageIterator): - """An Iterable of Container properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A container name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.models.ContainerProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only containers whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of container names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(ContainerPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [self._build_item(item) for item in self._response.container_items] - - return self._response.next_marker or None, self.current_page - - @staticmethod - def _build_item(item): - return ContainerProperties._from_generated(item) # pylint: disable=protected-access - - -class FilteredBlobPaged(AsyncPageIterator): - """An Iterable of Blob properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.BlobProperties) - :ivar str container: The container that the blobs are listed from. - - :param callable command: Function to retrieve the next page of items. - :param str container: The name of the container. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str continuation_token: An opaque continuation token. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__( - self, command, - container=None, - results_per_page=None, - continuation_token=None, - location_mode=None): - super(FilteredBlobPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.marker = continuation_token - self.results_per_page = results_per_page - self.container = container - self.current_page = None - self.location_mode = location_mode - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.marker = self._response.next_marker - self.current_page = [self._build_item(item) for item in self._response.blobs] - - return self._response.next_marker or None, self.current_page - - @staticmethod - def _build_item(item): - if isinstance(item, FilterBlobItem): - tags = parse_tags(item.tags) - blob = FilteredBlob(name=item.name, container_name=item.container_name, tags=tags) - return blob - return item diff --git a/azure/multiapi/storagev2/blob/v2020_04_08/aio/_upload_helpers.py b/azure/multiapi/storagev2/blob/v2020_04_08/aio/_upload_helpers.py deleted file mode 100644 index 36d1e44..0000000 --- a/azure/multiapi/storagev2/blob/v2020_04_08/aio/_upload_helpers.py +++ /dev/null @@ -1,270 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from io import SEEK_SET, UnsupportedOperation -from typing import Optional, Union, Any, TypeVar, TYPE_CHECKING # pylint: disable=unused-import - -import six -from azure.core.exceptions import ResourceModifiedError, HttpResponseError - -from .._shared.response_handlers import ( - process_storage_error, - return_response_headers) -from .._shared.uploads_async import ( - upload_data_chunks, - upload_substream_blocks, - BlockBlobChunkUploader, - PageBlobChunkUploader, - AppendBlobChunkUploader) -from .._shared.encryption import generate_blob_encryption_data, encrypt_blob -from .._generated.models import ( - BlockLookupList, - AppendPositionAccessConditions, - ModifiedAccessConditions, -) -from .._upload_helpers import _convert_mod_error, _any_conditions - -if TYPE_CHECKING: - from datetime import datetime # pylint: disable=unused-import - BlobLeaseClient = TypeVar("BlobLeaseClient") - - -async def upload_block_blob( # pylint: disable=too-many-locals - client=None, - data=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if not overwrite and not _any_conditions(**kwargs): - kwargs['modified_access_conditions'].if_none_match = '*' - adjusted_count = length - if (encryption_options.get('key') is not None) and (adjusted_count is not None): - adjusted_count += (16 - (length % 16)) - blob_headers = kwargs.pop('blob_headers', None) - tier = kwargs.pop('standard_blob_tier', None) - blob_tags_string = kwargs.pop('blob_tags_string', None) - - # Do single put if the size is smaller than config.max_single_put_size - if adjusted_count is not None and (adjusted_count <= blob_settings.max_single_put_size): - try: - data = data.read(length) - if not isinstance(data, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - except AttributeError: - pass - if encryption_options.get('key'): - encryption_data, data = encrypt_blob(data, encryption_options['key']) - headers['x-ms-meta-encryptiondata'] = encryption_data - return await client.upload( - body=data, - content_length=adjusted_count, - blob_http_headers=blob_headers, - headers=headers, - cls=return_response_headers, - validate_content=validate_content, - data_stream_total=adjusted_count, - upload_stream_current=0, - tier=tier.value if tier else None, - blob_tags_string=blob_tags_string, - **kwargs) - - use_original_upload_path = blob_settings.use_byte_buffer or \ - validate_content or encryption_options.get('required') or \ - blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \ - hasattr(stream, 'seekable') and not stream.seekable() or \ - not hasattr(stream, 'seek') or not hasattr(stream, 'tell') - - if use_original_upload_path: - if encryption_options.get('key'): - cek, iv, encryption_data = generate_blob_encryption_data(encryption_options['key']) - headers['x-ms-meta-encryptiondata'] = encryption_data - encryption_options['cek'] = cek - encryption_options['vector'] = iv - block_ids = await upload_data_chunks( - service=client, - uploader_class=BlockBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - encryption_options=encryption_options, - headers=headers, - **kwargs - ) - else: - block_ids = await upload_substream_blocks( - service=client, - uploader_class=BlockBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - headers=headers, - **kwargs - ) - - block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) - block_lookup.latest = block_ids - return await client.commit_block_list( - block_lookup, - blob_http_headers=blob_headers, - cls=return_response_headers, - validate_content=validate_content, - headers=headers, - tier=tier.value if tier else None, - blob_tags_string=blob_tags_string, - **kwargs) - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceModifiedError as mod_error: - if not overwrite: - _convert_mod_error(mod_error) - raise - - -async def upload_page_blob( - client=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if not overwrite and not _any_conditions(**kwargs): - kwargs['modified_access_conditions'].if_none_match = '*' - if length is None or length < 0: - raise ValueError("A content length must be specified for a Page Blob.") - if length % 512 != 0: - raise ValueError("Invalid page blob size: {0}. " - "The size must be aligned to a 512-byte boundary.".format(length)) - if kwargs.get('premium_page_blob_tier'): - premium_page_blob_tier = kwargs.pop('premium_page_blob_tier') - try: - headers['x-ms-access-tier'] = premium_page_blob_tier.value - except AttributeError: - headers['x-ms-access-tier'] = premium_page_blob_tier - if encryption_options and encryption_options.get('data'): - headers['x-ms-meta-encryptiondata'] = encryption_options['data'] - blob_tags_string = kwargs.pop('blob_tags_string', None) - - response = await client.create( - content_length=0, - blob_content_length=length, - blob_sequence_number=None, - blob_http_headers=kwargs.pop('blob_headers', None), - blob_tags_string=blob_tags_string, - cls=return_response_headers, - headers=headers, - **kwargs) - if length == 0: - return response - - kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag']) - return await upload_data_chunks( - service=client, - uploader_class=PageBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_page_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - encryption_options=encryption_options, - headers=headers, - **kwargs) - - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceModifiedError as mod_error: - if not overwrite: - _convert_mod_error(mod_error) - raise - - -async def upload_append_blob( # pylint: disable=unused-argument - client=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if length == 0: - return {} - blob_headers = kwargs.pop('blob_headers', None) - append_conditions = AppendPositionAccessConditions( - max_size=kwargs.pop('maxsize_condition', None), - append_position=None) - blob_tags_string = kwargs.pop('blob_tags_string', None) - - try: - if overwrite: - await client.create( - content_length=0, - blob_http_headers=blob_headers, - headers=headers, - blob_tags_string=blob_tags_string, - **kwargs) - return await upload_data_chunks( - service=client, - uploader_class=AppendBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - append_position_access_conditions=append_conditions, - headers=headers, - **kwargs) - except HttpResponseError as error: - if error.response.status_code != 404: - raise - # rewind the request body if it is a stream - if hasattr(stream, 'read'): - try: - # attempt to rewind the body to the initial position - stream.seek(0, SEEK_SET) - except UnsupportedOperation: - # if body is not seekable, then retry would not work - raise error - await client.create( - content_length=0, - blob_http_headers=blob_headers, - headers=headers, - blob_tags_string=blob_tags_string, - **kwargs) - return await upload_data_chunks( - service=client, - uploader_class=AppendBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - append_position_access_conditions=append_conditions, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/__init__.py b/azure/multiapi/storagev2/blob/v2020_06_12/__init__.py deleted file mode 100644 index 9164961..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/__init__.py +++ /dev/null @@ -1,233 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import os - -from typing import Union, Iterable, AnyStr, IO, Any, Dict # pylint: disable=unused-import -from ._version import VERSION -from ._blob_client import BlobClient -from ._container_client import ContainerClient -from ._blob_service_client import BlobServiceClient -from ._lease import BlobLeaseClient -from ._download import StorageStreamDownloader -from ._quick_query_helper import BlobQueryReader -from ._shared_access_signature import generate_account_sas, generate_container_sas, generate_blob_sas -from ._shared.policies import ExponentialRetry, LinearRetry -from ._shared.response_handlers import PartialBatchErrorException -from ._shared.models import( - LocationMode, - ResourceTypes, - AccountSasPermissions, - StorageErrorCode, - UserDelegationKey -) -from ._generated.models import ( - RehydratePriority -) -from ._models import ( - BlobType, - BlockState, - StandardBlobTier, - PremiumPageBlobTier, - SequenceNumberAction, - PublicAccess, - BlobAnalyticsLogging, - Metrics, - RetentionPolicy, - StaticWebsite, - CorsRule, - ContainerProperties, - BlobProperties, - FilteredBlob, - LeaseProperties, - ContentSettings, - CopyProperties, - BlobBlock, - PageRange, - AccessPolicy, - ContainerSasPermissions, - BlobSasPermissions, - CustomerProvidedEncryptionKey, - ContainerEncryptionScope, - BlobQueryError, - DelimitedJsonDialect, - DelimitedTextDialect, - ArrowDialect, - ArrowType, - ObjectReplicationPolicy, - ObjectReplicationRule -) -from ._list_blobs_helper import BlobPrefix - -__version__ = VERSION - - -def upload_blob_to_url( - blob_url, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - credential=None, # type: Any - **kwargs): - # type: (...) -> Dict[str, Any] - """Upload data to a given URL - - The data will be uploaded as a block blob. - - :param str blob_url: - The full URI to the blob. This can also include a SAS token. - :param data: - The data to upload. This can be bytes, text, an iterable or a file-like object. - :type data: bytes or str or Iterable - :param credential: - The credentials with which to authenticate. This is optional if the - blob URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword bool overwrite: - Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob_to_url will overwrite any existing data. If set to False, the - operation will fail with a ResourceExistsError. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword dict(str,str) metadata: - Name-value pairs associated with the blob as metadata. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword str encoding: - Encoding to use if text is supplied as input. Defaults to UTF-8. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: dict(str, Any) - """ - with BlobClient.from_blob_url(blob_url, credential=credential) as client: - return client.upload_blob(data=data, blob_type=BlobType.BlockBlob, **kwargs) - - -def _download_to_stream(client, handle, **kwargs): - """Download data to specified open file-handle.""" - stream = client.download_blob(**kwargs) - stream.readinto(handle) - - -def download_blob_from_url( - blob_url, # type: str - output, # type: str - credential=None, # type: Any - **kwargs): - # type: (...) -> None - """Download the contents of a blob to a local file or stream. - - :param str blob_url: - The full URI to the blob. This can also include a SAS token. - :param output: - Where the data should be downloaded to. This could be either a file path to write to, - or an open IO handle to write to. - :type output: str or writable stream. - :param credential: - The credentials with which to authenticate. This is optional if the - blob URL already has a SAS token or the blob is public. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, - an account shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword bool overwrite: - Whether the local file should be overwritten if it already exists. The default value is - `False` - in which case a ValueError will be raised if the file already exists. If set to - `True`, an attempt will be made to write to the existing file. If a stream handle is passed - in, this value is ignored. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :keyword int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :rtype: None - """ - overwrite = kwargs.pop('overwrite', False) - with BlobClient.from_blob_url(blob_url, credential=credential) as client: - if hasattr(output, 'write'): - _download_to_stream(client, output, **kwargs) - else: - if not overwrite and os.path.isfile(output): - raise ValueError("The file '{}' already exists.".format(output)) - with open(output, 'wb') as file_handle: - _download_to_stream(client, file_handle, **kwargs) - - -__all__ = [ - 'upload_blob_to_url', - 'download_blob_from_url', - 'BlobServiceClient', - 'ContainerClient', - 'BlobClient', - 'BlobType', - 'BlobLeaseClient', - 'StorageErrorCode', - 'UserDelegationKey', - 'ExponentialRetry', - 'LinearRetry', - 'LocationMode', - 'BlockState', - 'StandardBlobTier', - 'PremiumPageBlobTier', - 'SequenceNumberAction', - 'PublicAccess', - 'BlobAnalyticsLogging', - 'Metrics', - 'RetentionPolicy', - 'StaticWebsite', - 'CorsRule', - 'ContainerProperties', - 'BlobProperties', - 'BlobPrefix', - 'FilteredBlob', - 'LeaseProperties', - 'ContentSettings', - 'CopyProperties', - 'BlobBlock', - 'PageRange', - 'AccessPolicy', - 'ContainerSasPermissions', - 'BlobSasPermissions', - 'ResourceTypes', - 'AccountSasPermissions', - 'StorageStreamDownloader', - 'CustomerProvidedEncryptionKey', - 'RehydratePriority', - 'generate_account_sas', - 'generate_container_sas', - 'generate_blob_sas', - 'PartialBatchErrorException', - 'ContainerEncryptionScope', - 'BlobQueryError', - 'DelimitedJsonDialect', - 'DelimitedTextDialect', - 'ArrowDialect', - 'ArrowType', - 'BlobQueryReader', - 'ObjectReplicationPolicy', - 'ObjectReplicationRule' -] diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_blob_client.py b/azure/multiapi/storagev2/blob/v2020_06_12/_blob_client.py deleted file mode 100644 index e82c04f..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_blob_client.py +++ /dev/null @@ -1,3788 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines,no-self-use -from functools import partial -from io import BytesIO -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, - TYPE_CHECKING -) - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six -from azure.core.pipeline import Pipeline -from azure.core.tracing.decorator import distributed_trace -from azure.core.exceptions import ResourceNotFoundError, HttpResponseError, ResourceExistsError - -from ._shared import encode_base64 -from ._shared.base_client import StorageAccountHostsMixin, parse_connection_str, parse_query, TransportWrapper -from ._shared.encryption import generate_blob_encryption_data -from ._shared.uploads import IterStreamer -from ._shared.request_handlers import ( - add_metadata_headers, get_length, read_length, - validate_and_format_range_headers) -from ._shared.response_handlers import return_response_headers, process_storage_error, return_headers_and_deserialized -from ._generated import AzureBlobStorage -from ._generated.models import ( # pylint: disable=unused-import - DeleteSnapshotsOptionType, - BlobHTTPHeaders, - BlockLookupList, - AppendPositionAccessConditions, - SequenceNumberAccessConditions, - QueryRequest, - CpkInfo) -from ._serialize import ( - get_modify_conditions, - get_source_conditions, - get_cpk_scope_info, - get_api_version, - serialize_blob_tags_header, - serialize_blob_tags, - serialize_query_format, get_access_conditions -) -from ._deserialize import get_page_ranges_result, deserialize_blob_properties, deserialize_blob_stream, parse_tags, \ - deserialize_pipeline_response_into_cls -from ._quick_query_helper import BlobQueryReader -from ._upload_helpers import ( - upload_block_blob, - upload_append_blob, - upload_page_blob, _any_conditions) -from ._models import BlobType, BlobBlock, BlobProperties, BlobQueryError -from ._download import StorageStreamDownloader -from ._lease import BlobLeaseClient - -if TYPE_CHECKING: - from datetime import datetime - from ._generated.models import BlockList - from ._models import ( # pylint: disable=unused-import - ContentSettings, - PremiumPageBlobTier, - StandardBlobTier, - SequenceNumberAction - ) - -_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = ( - 'The require_encryption flag is set, but encryption is not supported' - ' for this method.') - - -class BlobClient(StorageAccountHostsMixin): # pylint: disable=too-many-public-methods - """A client to interact with a specific blob, although that blob may not yet exist. - - For more optional configuration, please click - `here `_. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the blob, - use the :func:`from_blob_url` classmethod. - :param container_name: The container name for the blob. - :type container_name: str - :param blob_name: The name of the blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob_name: str - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_blob_client] - :end-before: [END create_blob_client] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a URL to a public blob (no auth needed). - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_blob_client_sas_url] - :end-before: [END create_blob_client_sas_url] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a SAS URL to a blob. - """ - def __init__( - self, account_url, # type: str - container_name, # type: str - blob_name, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - - if not (container_name and blob_name): - raise ValueError("Please specify a container name and blob name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - path_snapshot, sas_token = parse_query(parsed_url.query) - - self.container_name = container_name - self.blob_name = blob_name - try: - self.snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - self.snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - self.snapshot = snapshot or path_snapshot - - # This parameter is used for the hierarchy traversal. Give precedence to credential. - self._raw_credential = credential if credential else sas_token - self._query_str, credential = self._format_query_string(sas_token, credential, snapshot=self.snapshot) - super(BlobClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) - self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) - default_api_version = self._client._config.version # pylint: disable=protected-access - self._client._config.version = get_api_version(kwargs, default_api_version) # pylint: disable=protected-access - - def _format_url(self, hostname): - container_name = self.container_name - if isinstance(container_name, six.text_type): - container_name = container_name.encode('UTF-8') - return "{}://{}/{}/{}{}".format( - self.scheme, - hostname, - quote(container_name), - quote(self.blob_name, safe='~/'), - self._query_str) - - def _encode_source_url(self, source_url): - parsed_source_url = urlparse(source_url) - source_scheme = parsed_source_url.scheme - source_hostname = parsed_source_url.netloc.rstrip('/') - source_path = unquote(parsed_source_url.path) - source_query = parsed_source_url.query - result = ["{}://{}{}".format(source_scheme, source_hostname, quote(source_path, safe='~/'))] - if source_query: - result.append(source_query) - return '?'.join(result) - - @classmethod - def from_blob_url(cls, blob_url, credential=None, snapshot=None, **kwargs): - # type: (str, Optional[Any], Optional[Union[str, Dict[str, Any]]], Any) -> BlobClient - """Create BlobClient from a blob url. This doesn't support customized blob url with '/' in blob name. - - :param str blob_url: - The full endpoint URL to the Blob, including SAS token and snapshot if used. This could be - either the primary endpoint, or the secondary endpoint depending on the current `location_mode`. - :type blob_url: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. If specified, this will override - the snapshot in the url. - :returns: A Blob client. - :rtype: ~azure.storage.blob.BlobClient - """ - try: - if not blob_url.lower().startswith('http'): - blob_url = "https://" + blob_url - except AttributeError: - raise ValueError("Blob URL must be a string.") - parsed_url = urlparse(blob_url.rstrip('/')) - - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(blob_url)) - - account_path = "" - if ".core." in parsed_url.netloc: - # .core. is indicating non-customized url. Blob name with directory info can also be parsed. - path_blob = parsed_url.path.lstrip('/').split('/', 1) - elif "localhost" in parsed_url.netloc or "127.0.0.1" in parsed_url.netloc: - path_blob = parsed_url.path.lstrip('/').split('/', 2) - account_path += '/' + path_blob[0] - else: - # for customized url. blob name that has directory info cannot be parsed. - path_blob = parsed_url.path.lstrip('/').split('/') - if len(path_blob) > 2: - account_path = "/" + "/".join(path_blob[:-2]) - account_url = "{}://{}{}?{}".format( - parsed_url.scheme, - parsed_url.netloc.rstrip('/'), - account_path, - parsed_url.query) - container_name, blob_name = unquote(path_blob[-2]), unquote(path_blob[-1]) - if not container_name or not blob_name: - raise ValueError("Invalid URL. Provide a blob_url with a valid blob and container name.") - - path_snapshot, _ = parse_query(parsed_url.query) - if snapshot: - try: - path_snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - path_snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - path_snapshot = snapshot - - return cls( - account_url, container_name=container_name, blob_name=blob_name, - snapshot=path_snapshot, credential=credential, **kwargs - ) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - container_name, # type: str - blob_name, # type: str - snapshot=None, # type: Optional[str] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> BlobClient - """Create BlobClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param container_name: The container name for the blob. - :type container_name: str - :param blob_name: The name of the blob with which to interact. - :type blob_name: str - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :returns: A Blob client. - :rtype: ~azure.storage.blob.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START auth_from_connection_string_blob] - :end-before: [END auth_from_connection_string_blob] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, container_name=container_name, blob_name=blob_name, - snapshot=snapshot, credential=credential, **kwargs - ) - - @distributed_trace - def get_account_information(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """Gets information related to the storage account in which the blob resides. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - """ - try: - return self._client.blob.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _upload_blob_options( # pylint:disable=too-many-statements - self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption and not self.key_encryption_key: - raise ValueError("Encryption required but no key was provided.") - encryption_options = { - 'required': self.require_encryption, - 'key': self.key_encryption_key, - 'resolver': self.key_resolver_function, - } - if self.key_encryption_key is not None: - cek, iv, encryption_data = generate_blob_encryption_data(self.key_encryption_key) - encryption_options['cek'] = cek - encryption_options['vector'] = iv - encryption_options['data'] = encryption_data - - encoding = kwargs.pop('encoding', 'UTF-8') - if isinstance(data, six.text_type): - data = data.encode(encoding) # type: ignore - if length is None: - length = get_length(data) - if isinstance(data, bytes): - data = data[:length] - - if isinstance(data, bytes): - stream = BytesIO(data) - elif hasattr(data, 'read'): - stream = data - elif hasattr(data, '__iter__'): - stream = IterStreamer(data, encoding=encoding) - else: - raise TypeError("Unsupported data type: {}".format(type(data))) - - validate_content = kwargs.pop('validate_content', False) - content_settings = kwargs.pop('content_settings', None) - overwrite = kwargs.pop('overwrite', False) - max_concurrency = kwargs.pop('max_concurrency', 1) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - kwargs['cpk_info'] = cpk_info - - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None)) - kwargs['modified_access_conditions'] = get_modify_conditions(kwargs) - kwargs['cpk_scope_info'] = get_cpk_scope_info(kwargs) - if content_settings: - kwargs['blob_headers'] = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=content_settings.content_md5, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - kwargs['blob_tags_string'] = serialize_blob_tags_header(kwargs.pop('tags', None)) - kwargs['stream'] = stream - kwargs['length'] = length - kwargs['overwrite'] = overwrite - kwargs['headers'] = headers - kwargs['validate_content'] = validate_content - kwargs['blob_settings'] = self._config - kwargs['max_concurrency'] = max_concurrency - kwargs['encryption_options'] = encryption_options - if blob_type == BlobType.BlockBlob: - kwargs['client'] = self._client.block_blob - kwargs['data'] = data - elif blob_type == BlobType.PageBlob: - kwargs['client'] = self._client.page_blob - elif blob_type == BlobType.AppendBlob: - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - kwargs['client'] = self._client.append_blob - else: - raise ValueError("Unsupported BlobType: {}".format(blob_type)) - return kwargs - - def _upload_blob_from_url_options(self, source_url, **kwargs): - # type: (...) -> Dict[str, Any] - tier = kwargs.pop('standard_blob_tier', None) - overwrite = kwargs.pop('overwrite', False) - content_settings = kwargs.pop('content_settings', None) - if content_settings: - kwargs['blob_http_headers'] = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=None, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'content_length': 0, - 'copy_source_blob_properties': kwargs.pop('include_source_blob_properties', True), - 'source_content_md5': kwargs.pop('source_content_md5', None), - 'copy_source': source_url, - 'modified_access_conditions': get_modify_conditions(kwargs), - 'blob_tags_string': serialize_blob_tags_header(kwargs.pop('tags', None)), - 'cls': return_response_headers, - 'lease_access_conditions': get_access_conditions(kwargs.pop('destination_lease', None)), - 'tier': tier.value if tier else None, - 'source_modified_access_conditions': get_source_conditions(kwargs), - 'cpk_info': cpk_info, - 'cpk_scope_info': get_cpk_scope_info(kwargs) - } - options.update(kwargs) - if not overwrite and not _any_conditions(**options): # pylint: disable=protected-access - options['modified_access_conditions'].if_none_match = '*' - return options - - @distributed_trace - def upload_blob_from_url(self, source_url, **kwargs): - # type: (str, Any) -> Dict[str, Any] - """ - Creates a new Block Blob where the content of the blob is read from a given URL. - The content of an existing blob is overwritten with the new blob. - - :param str source_url: - A URL of up to 2 KB in length that specifies a file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.blob.core.windows.net/mycontainer/myblob - - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - - https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. - :keyword bool include_source_blob_properties: - Indicates if properties from the source blob should be copied. Defaults to True. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - :paramtype tags: dict(str, str) - :keyword bytearray source_content_md5: - Specify the md5 that is used to verify the integrity of the source bytes. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword destination_lease: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - """ - options = self._upload_blob_from_url_options( - source_url=self._encode_source_url(source_url), - **kwargs) - try: - return self._client.block_blob.put_blob_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def upload_blob( # pylint: disable=too-many-locals - self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Any - """Creates a new blob from a data source with automatic chunking. - - :param data: The blob data to upload. - :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be - either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. The exception to the above is with Append - blob types: if set to False and the data already exists, an error will not be raised - and the data will be appended to the existing blob. If set overwrite=True, then the existing - append blob will be deleted, and a new one created. Defaults to False. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, upload_blob only succeeds if the - blob's lease is active and matches this ID. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int max_concurrency: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world.py - :start-after: [START upload_a_blob] - :end-before: [END upload_a_blob] - :language: python - :dedent: 12 - :caption: Upload a blob to the container. - """ - options = self._upload_blob_options( - data, - blob_type=blob_type, - length=length, - metadata=metadata, - **kwargs) - if blob_type == BlobType.BlockBlob: - return upload_block_blob(**options) - if blob_type == BlobType.PageBlob: - return upload_page_blob(**options) - return upload_append_blob(**options) - - def _download_blob_options(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], **Any) -> Dict[str, Any] - if self.require_encryption and not self.key_encryption_key: - raise ValueError("Encryption required but no key was provided.") - if length is not None and offset is None: - raise ValueError("Offset value must not be None if length is set.") - if length is not None: - length = offset + length - 1 # Service actually uses an end-range inclusive index - - validate_content = kwargs.pop('validate_content', False) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'clients': self._client, - 'config': self._config, - 'start_range': offset, - 'end_range': length, - 'version_id': kwargs.pop('version_id', None), - 'validate_content': validate_content, - 'encryption_options': { - 'required': self.require_encryption, - 'key': self.key_encryption_key, - 'resolver': self.key_resolver_function}, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'cls': kwargs.pop('cls', None) or deserialize_blob_stream, - 'max_concurrency':kwargs.pop('max_concurrency', 1), - 'encoding': kwargs.pop('encoding', None), - 'timeout': kwargs.pop('timeout', None), - 'name': self.blob_name, - 'container': self.container_name} - options.update(kwargs) - return options - - @distributed_trace - def download_blob(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], **Any) -> StorageStreamDownloader - """Downloads a blob to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the blob into - a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks. - - :param int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to download. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, download_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword str encoding: - Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.blob.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world.py - :start-after: [START download_a_blob] - :end-before: [END download_a_blob] - :language: python - :dedent: 12 - :caption: Download a blob. - """ - options = self._download_blob_options( - offset=offset, - length=length, - **kwargs) - return StorageStreamDownloader(**options) - - def _quick_query_options(self, query_expression, - **kwargs): - # type: (str, **Any) -> Dict[str, Any] - delimiter = '\n' - input_format = kwargs.pop('blob_format', None) - if input_format: - try: - delimiter = input_format.lineterminator - except AttributeError: - try: - delimiter = input_format.delimiter - except AttributeError: - raise ValueError("The Type of blob_format can only be DelimitedTextDialect or DelimitedJsonDialect") - output_format = kwargs.pop('output_format', None) - if output_format: - try: - delimiter = output_format.lineterminator - except AttributeError: - try: - delimiter = output_format.delimiter - except AttributeError: - pass - else: - output_format = input_format - query_request = QueryRequest( - expression=query_expression, - input_serialization=serialize_query_format(input_format), - output_serialization=serialize_query_format(output_format) - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo( - encryption_key=cpk.key_value, - encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm - ) - options = { - 'query_request': query_request, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'snapshot': self.snapshot, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_headers_and_deserialized, - } - options.update(kwargs) - return options, delimiter - - @distributed_trace - def query_blob(self, query_expression, **kwargs): - # type: (str, **Any) -> BlobQueryReader - """Enables users to select/project on blob/or blob snapshot data by providing simple query expressions. - This operations returns a BlobQueryReader, users need to use readall() or readinto() to get query data. - - :param str query_expression: - Required. a query statement. - :keyword Callable[~azure.storage.blob.BlobQueryError] on_error: - A function to be called on any processing errors returned by the service. - :keyword blob_format: - Optional. Defines the serialization of the data currently stored in the blob. The default is to - treat the blob data as CSV data formatted in the default dialect. This can be overridden with - a custom DelimitedTextDialect, or alternatively a DelimitedJsonDialect. - :paramtype blob_format: ~azure.storage.blob.DelimitedTextDialect or ~azure.storage.blob.DelimitedJsonDialect - :keyword output_format: - Optional. Defines the output serialization for the data stream. By default the data will be returned - as it is represented in the blob. By providing an output format, the blob data will be reformatted - according to that profile. This value can be a DelimitedTextDialect or a DelimitedJsonDialect. - :paramtype output_format: ~azure.storage.blob.DelimitedTextDialect, ~azure.storage.blob.DelimitedJsonDialect - or list[~azure.storage.blob.ArrowDialect] - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A streaming object (BlobQueryReader) - :rtype: ~azure.storage.blob.BlobQueryReader - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_query.py - :start-after: [START query] - :end-before: [END query] - :language: python - :dedent: 4 - :caption: select/project on blob/or blob snapshot data by providing simple query expressions. - """ - errors = kwargs.pop("on_error", None) - error_cls = kwargs.pop("error_cls", BlobQueryError) - encoding = kwargs.pop("encoding", None) - options, delimiter = self._quick_query_options(query_expression, **kwargs) - try: - headers, raw_response_body = self._client.blob.query(**options) - except HttpResponseError as error: - process_storage_error(error) - return BlobQueryReader( - name=self.blob_name, - container=self.container_name, - errors=errors, - record_delimiter=delimiter, - encoding=encoding, - headers=headers, - response=raw_response_body, - error_cls=error_cls) - - @staticmethod - def _generic_delete_blob_options(delete_snapshots=False, **kwargs): - # type: (bool, **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if delete_snapshots: - delete_snapshots = DeleteSnapshotsOptionType(delete_snapshots) - options = { - 'timeout': kwargs.pop('timeout', None), - 'snapshot': kwargs.pop('snapshot', None), # this is added for delete_blobs - 'delete_snapshots': delete_snapshots or None, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions} - options.update(kwargs) - return options - - def _delete_blob_options(self, delete_snapshots=False, **kwargs): - # type: (bool, **Any) -> Dict[str, Any] - if self.snapshot and delete_snapshots: - raise ValueError("The delete_snapshots option cannot be used with a specific snapshot.") - options = self._generic_delete_blob_options(delete_snapshots, **kwargs) - options['snapshot'] = self.snapshot - options['version_id'] = kwargs.pop('version_id', None) - options['blob_delete_type'] = kwargs.pop('blob_delete_type', None) - return options - - @distributed_trace - def delete_blob(self, delete_snapshots=None, **kwargs): - # type: (str, **Any) -> None - """Marks the specified blob for deletion. - - The blob is later deleted during garbage collection. - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the delete_blob() - operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob - and retains the blob for a specified number of days. - After the specified number of days, the blob's data is removed from the service during garbage collection. - Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']` - option. Soft-deleted blob can be restored using :func:`undelete` operation. - - :param str delete_snapshots: - Required if the blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to delete. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword lease: - Required if the blob has an active lease. If specified, delete_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world.py - :start-after: [START delete_blob] - :end-before: [END delete_blob] - :language: python - :dedent: 12 - :caption: Delete a blob. - """ - options = self._delete_blob_options(delete_snapshots=delete_snapshots, **kwargs) - try: - self._client.blob.delete(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def undelete_blob(self, **kwargs): - # type: (**Any) -> None - """Restores soft-deleted blobs or snapshots. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START undelete_blob] - :end-before: [END undelete_blob] - :language: python - :dedent: 8 - :caption: Undeleting a blob. - """ - try: - self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace() - def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a blob exists with the defined parameters, and returns - False otherwise. - - :kwarg str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to check if it exists. - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - try: - self._client.blob.get_properties( - snapshot=self.snapshot, - **kwargs) - return True - # Encrypted with CPK - except ResourceExistsError: - return True - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceNotFoundError: - return False - - @distributed_trace - def get_blob_properties(self, **kwargs): - # type: (**Any) -> BlobProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the blob. It does not return the content of the blob. - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to get properties. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: BlobProperties - :rtype: ~azure.storage.blob.BlobProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START get_blob_properties] - :end-before: [END get_blob_properties] - :language: python - :dedent: 8 - :caption: Getting the properties for a blob. - """ - # TODO: extract this out as _get_blob_properties_options - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - try: - cls_method = kwargs.pop('cls', None) - if cls_method: - kwargs['cls'] = partial(deserialize_pipeline_response_into_cls, cls_method) - blob_props = self._client.blob.get_properties( - timeout=kwargs.pop('timeout', None), - version_id=kwargs.pop('version_id', None), - snapshot=self.snapshot, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=kwargs.pop('cls', None) or deserialize_blob_properties, - cpk_info=cpk_info, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - blob_props.name = self.blob_name - if isinstance(blob_props, BlobProperties): - blob_props.container = self.container_name - blob_props.snapshot = self.snapshot - return blob_props # type: ignore - - def _set_http_headers_options(self, content_settings=None, **kwargs): - # type: (Optional[ContentSettings], **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - blob_headers = None - if content_settings: - blob_headers = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=content_settings.content_md5, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - options = { - 'timeout': kwargs.pop('timeout', None), - 'blob_http_headers': blob_headers, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def set_http_headers(self, content_settings=None, **kwargs): - # type: (Optional[ContentSettings], **Any) -> None - """Sets system properties on the blob. - - If one property is set for the content_settings, all properties will be overridden. - - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - options = self._set_http_headers_options(content_settings=content_settings, **kwargs) - try: - return self._client.blob.set_http_headers(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _set_blob_metadata_options(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - 'headers': headers} - options.update(kwargs) - return options - - @distributed_trace - def set_blob_metadata(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] - """Sets user-defined metadata for the blob as one or more name-value pairs. - - :param metadata: - Dict containing name and value pairs. Each call to this operation - replaces all existing metadata attached to the blob. To remove all - metadata from the blob, call this operation with no metadata headers. - :type metadata: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - """ - options = self._set_blob_metadata_options(metadata=metadata, **kwargs) - try: - return self._client.blob.set_metadata(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _create_page_blob_options( # type: ignore - self, size, # type: int - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - blob_headers = None - if content_settings: - blob_headers = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=content_settings.content_md5, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - - sequence_number = kwargs.pop('sequence_number', None) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - if premium_page_blob_tier: - try: - headers['x-ms-access-tier'] = premium_page_blob_tier.value # type: ignore - except AttributeError: - headers['x-ms-access-tier'] = premium_page_blob_tier # type: ignore - - blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) - - options = { - 'content_length': 0, - 'blob_content_length': size, - 'blob_sequence_number': sequence_number, - 'blob_http_headers': blob_headers, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'blob_tags_string': blob_tags_string, - 'cls': return_response_headers, - 'headers': headers} - options.update(kwargs) - return options - - @distributed_trace - def create_page_blob( # type: ignore - self, size, # type: int - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Creates a new Page Blob of the specified size. - - :param int size: - This specifies the maximum size for the page blob, up to 1 TB. - The page blob size must be aligned to a 512-byte boundary. - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword int sequence_number: - Only for Page blobs. The sequence number is a user-controlled value that you can use to - track requests. The value of the sequence number must be between 0 - and 2^63 - 1.The default value is 0. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict[str, Any] - """ - options = self._create_page_blob_options( - size, - content_settings=content_settings, - metadata=metadata, - premium_page_blob_tier=premium_page_blob_tier, - **kwargs) - try: - return self._client.page_blob.create(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _create_append_blob_options(self, content_settings=None, metadata=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - blob_headers = None - if content_settings: - blob_headers = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=content_settings.content_md5, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) - - options = { - 'content_length': 0, - 'blob_http_headers': blob_headers, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'blob_tags_string': blob_tags_string, - 'cls': return_response_headers, - 'headers': headers} - options.update(kwargs) - return options - - @distributed_trace - def create_append_blob(self, content_settings=None, metadata=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] - """Creates a new Append Blob. - - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict[str, Any] - """ - options = self._create_append_blob_options( - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return self._client.append_blob.create(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _create_snapshot_options(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - 'headers': headers} - options.update(kwargs) - return options - - @distributed_trace - def create_snapshot(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] - """Creates a snapshot of the blob. - - A snapshot is a read-only version of a blob that's taken at a point in time. - It can be read, copied, or deleted, but not modified. Snapshots provide a way - to back up a blob as it appears at a moment in time. - - A snapshot of a blob has the same name as the base blob from which the snapshot - is taken, with a DateTime value appended to indicate the time at which the - snapshot was taken. - - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - - .. versionadded:: 12.4.0 - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified). - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START create_blob_snapshot] - :end-before: [END create_blob_snapshot] - :language: python - :dedent: 8 - :caption: Create a snapshot of the blob. - """ - options = self._create_snapshot_options(metadata=metadata, **kwargs) - try: - return self._client.blob.create_snapshot(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _start_copy_from_url_options(self, source_url, metadata=None, incremental_copy=False, **kwargs): - # type: (str, Optional[Dict[str, str]], bool, **Any) -> Dict[str, Any] - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - if 'source_lease' in kwargs: - source_lease = kwargs.pop('source_lease') - try: - headers['x-ms-source-lease-id'] = source_lease.id # type: str - except AttributeError: - headers['x-ms-source-lease-id'] = source_lease - - tier = kwargs.pop('premium_page_blob_tier', None) or kwargs.pop('standard_blob_tier', None) - - if kwargs.get('requires_sync'): - headers['x-ms-requires-sync'] = str(kwargs.pop('requires_sync')) - - timeout = kwargs.pop('timeout', None) - dest_mod_conditions = get_modify_conditions(kwargs) - blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) - - options = { - 'copy_source': source_url, - 'seal_blob': kwargs.pop('seal_destination_blob', None), - 'timeout': timeout, - 'modified_access_conditions': dest_mod_conditions, - 'blob_tags_string': blob_tags_string, - 'headers': headers, - 'cls': return_response_headers, - } - if not incremental_copy: - source_mod_conditions = get_source_conditions(kwargs) - dest_access_conditions = get_access_conditions(kwargs.pop('destination_lease', None)) - options['source_modified_access_conditions'] = source_mod_conditions - options['lease_access_conditions'] = dest_access_conditions - options['tier'] = tier.value if tier else None - options.update(kwargs) - return options - - @distributed_trace - def start_copy_from_url(self, source_url, metadata=None, incremental_copy=False, **kwargs): - # type: (str, Optional[Dict[str, str]], bool, **Any) -> Dict[str, Union[str, datetime]] - """Copies a blob asynchronously. - - This operation returns a copy operation - object that can be used to wait on the completion of the operation, - as well as check status or abort the copy operation. - The Blob service copies blobs on a best-effort basis. - - The source blob for a copy operation may be a block blob, an append blob, - or a page blob. If the destination blob already exists, it must be of the - same blob type as the source blob. Any existing destination blob will be - overwritten. The destination blob cannot be modified while a copy operation - is in progress. - - When copying from a page blob, the Blob service creates a destination page - blob of the source blob's length, initially containing all zeroes. Then - the source page ranges are enumerated, and non-empty ranges are copied. - - For a block blob or an append blob, the Blob service creates a committed - blob of zero length before returning from this operation. When copying - from a block blob, all committed blocks and their block IDs are copied. - Uncommitted blocks are not copied. At the end of the copy operation, the - destination blob will have the same committed block count as the source. - - When copying from an append blob, all committed blocks are copied. At the - end of the copy operation, the destination blob will have the same committed - block count as the source. - - For all blob types, you can call status() on the returned polling object - to check the status of the copy operation, or wait() to block until the - operation is complete. The final blob will be committed when the copy completes. - - :param str source_url: - A URL of up to 2 KB in length that specifies a file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.blob.core.windows.net/mycontainer/myblob - - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - - https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken - :param metadata: - Name-value pairs associated with the blob as metadata. If no name-value - pairs are specified, the operation will copy the metadata from the - source blob or file to the destination blob. If one or more name-value - pairs are specified, the destination blob is created with the specified - metadata, and metadata is not copied from the source blob or file. - :type metadata: dict(str, str) - :param bool incremental_copy: - Copies the snapshot of the source page blob to a destination page blob. - The snapshot is copied such that only the differential changes between - the previously copied snapshot are transferred to the destination. - The copied snapshots are complete copies of the original snapshot and - can be read or copied from as usual. Defaults to False. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source - blob has been modified since the specified date/time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source blob - has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has been modified since the specified date/time. - If the destination blob has not been modified, the Blob service returns - status code 412 (Precondition Failed). - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has not been modified since the specified - date/time. If the destination blob has been modified, the Blob service - returns status code 412 (Precondition Failed). - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword destination_lease: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword source_lease: - Specify this to perform the Copy Blob operation only if - the lease ID given matches the active lease ID of the source blob. - :paramtype source_lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword bool seal_destination_blob: - Seal the destination append blob. This operation is only for append blob. - - .. versionadded:: 12.4.0 - - :keyword bool requires_sync: - Enforces that the service will not return a response until the copy is complete. - :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status). - :rtype: dict[str, str or ~datetime.datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START copy_blob_from_url] - :end-before: [END copy_blob_from_url] - :language: python - :dedent: 12 - :caption: Copy a blob from a URL. - """ - options = self._start_copy_from_url_options( - source_url=self._encode_source_url(source_url), - metadata=metadata, - incremental_copy=incremental_copy, - **kwargs) - try: - if incremental_copy: - return self._client.page_blob.copy_incremental(**options) - return self._client.blob.start_copy_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - def _abort_copy_options(self, copy_id, **kwargs): - # type: (Union[str, Dict[str, Any], BlobProperties], **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - try: - copy_id = copy_id.copy.id - except AttributeError: - try: - copy_id = copy_id['copy_id'] - except TypeError: - pass - options = { - 'copy_id': copy_id, - 'lease_access_conditions': access_conditions, - 'timeout': kwargs.pop('timeout', None)} - options.update(kwargs) - return options - - @distributed_trace - def abort_copy(self, copy_id, **kwargs): - # type: (Union[str, Dict[str, Any], BlobProperties], **Any) -> None - """Abort an ongoing copy operation. - - This will leave a destination blob with zero length and full metadata. - This will raise an error if the copy operation has already ended. - - :param copy_id: - The copy operation to abort. This can be either an ID string, or an - instance of BlobProperties. - :type copy_id: str or ~azure.storage.blob.BlobProperties - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START abort_copy_blob_from_url] - :end-before: [END abort_copy_blob_from_url] - :language: python - :dedent: 12 - :caption: Abort copying a blob from URL. - """ - options = self._abort_copy_options(copy_id, **kwargs) - try: - self._client.blob.abort_copy_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): - # type: (int, Optional[str], **Any) -> BlobLeaseClient - """Requests a new lease. - - If the blob does not have an active lease, the Blob - Service creates a lease on the blob and returns a new lease. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Blob Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A BlobLeaseClient object. - :rtype: ~azure.storage.blob.BlobLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START acquire_lease_on_blob] - :end-before: [END acquire_lease_on_blob] - :language: python - :dedent: 8 - :caption: Acquiring a lease on a blob. - """ - lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore - lease.acquire(lease_duration=lease_duration, **kwargs) - return lease - - @distributed_trace - def set_standard_blob_tier(self, standard_blob_tier, **kwargs): - # type: (Union[str, StandardBlobTier], Any) -> None - """This operation sets the tier on a block blob. - - A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param standard_blob_tier: - Indicates the tier to be set on the blob. Options include 'Hot', 'Cool', - 'Archive'. The hot tier is optimized for storing data that is accessed - frequently. The cool storage tier is optimized for storing data that - is infrequently accessed and stored for at least a month. The archive - tier is optimized for storing data that is rarely accessed and stored - for at least six months with flexible latency requirements. - :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to download. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if standard_blob_tier is None: - raise ValueError("A StandardBlobTier must be specified") - if self.snapshot and kwargs.get('version_id'): - raise ValueError("Snapshot and version_id cannot be set at the same time") - try: - self._client.blob.set_tier( - tier=standard_blob_tier, - snapshot=self.snapshot, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - def _stage_block_options( - self, block_id, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - block_id = encode_base64(str(block_id)) - if isinstance(data, six.text_type): - data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - if length is None: - length = get_length(data) - if length is None: - length, data = read_length(data) - if isinstance(data, bytes): - data = data[:length] - - validate_content = kwargs.pop('validate_content', False) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'block_id': block_id, - 'content_length': length, - 'body': data, - 'transactional_content_md5': None, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'validate_content': validate_content, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - } - options.update(kwargs) - return options - - @distributed_trace - def stage_block( - self, block_id, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Creates a new block to be committed as part of a blob. - - :param str block_id: A string value that identifies the block. - The string should be less than or equal to 64 bytes in size. - For a given blob, the block_id must be the same size for each block. - :param data: The blob data. - :param int length: Size of the block. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword str encoding: - Defaults to UTF-8. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob property dict. - :rtype: dict[str, Any] - """ - options = self._stage_block_options( - block_id, - data, - length=length, - **kwargs) - try: - return self._client.block_blob.stage_block(**options) - except HttpResponseError as error: - process_storage_error(error) - - def _stage_block_from_url_options( - self, block_id, # type: str - source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - source_content_md5=None, # type: Optional[Union[bytes, bytearray]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if source_length is not None and source_offset is None: - raise ValueError("Source offset value must not be None if length is set.") - if source_length is not None: - source_length = source_offset + source_length - 1 - block_id = encode_base64(str(block_id)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - range_header = None - if source_offset is not None: - range_header, _ = validate_and_format_range_headers(source_offset, source_length) - - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'block_id': block_id, - 'content_length': 0, - 'source_url': source_url, - 'source_range': range_header, - 'source_content_md5': bytearray(source_content_md5) if source_content_md5 else None, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - } - options.update(kwargs) - return options - - @distributed_trace - def stage_block_from_url( - self, block_id, # type: str - source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - source_content_md5=None, # type: Optional[Union[bytes, bytearray]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Creates a new block to be committed as part of a blob where - the contents are read from a URL. - - :param str block_id: A string value that identifies the block. - The string should be less than or equal to 64 bytes in size. - For a given blob, the block_id must be the same size for each block. - :param str source_url: The URL. - :param int source_offset: - Start of byte range to use for the block. - Must be set if source length is provided. - :param int source_length: The size of the block in bytes. - :param bytearray source_content_md5: - Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob property dict. - :rtype: dict[str, Any] - """ - options = self._stage_block_from_url_options( - block_id, - source_url=self._encode_source_url(source_url), - source_offset=source_offset, - source_length=source_length, - source_content_md5=source_content_md5, - **kwargs) - try: - return self._client.block_blob.stage_block_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - def _get_block_list_result(self, blocks): - # type: (BlockList) -> Tuple[List[BlobBlock], List[BlobBlock]] - committed = [] # type: List - uncommitted = [] # type: List - if blocks.committed_blocks: - committed = [BlobBlock._from_generated(b) for b in blocks.committed_blocks] # pylint: disable=protected-access - if blocks.uncommitted_blocks: - uncommitted = [BlobBlock._from_generated(b) for b in blocks.uncommitted_blocks] # pylint: disable=protected-access - return committed, uncommitted - - @distributed_trace - def get_block_list(self, block_list_type="committed", **kwargs): - # type: (Optional[str], **Any) -> Tuple[List[BlobBlock], List[BlobBlock]] - """The Get Block List operation retrieves the list of blocks that have - been uploaded as part of a block blob. - - :param str block_list_type: - Specifies whether to return the list of committed - blocks, the list of uncommitted blocks, or both lists together. - Possible values include: 'committed', 'uncommitted', 'all' - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A tuple of two lists - committed and uncommitted blocks - :rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock)) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - try: - blocks = self._client.block_blob.get_block_list( - list_type=block_list_type, - snapshot=self.snapshot, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return self._get_block_list_result(blocks) - - def _commit_block_list_options( # type: ignore - self, block_list, # type: List[BlobBlock] - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) - for block in block_list: - try: - if block.state.value == 'committed': - block_lookup.committed.append(encode_base64(str(block.id))) - elif block.state.value == 'uncommitted': - block_lookup.uncommitted.append(encode_base64(str(block.id))) - else: - block_lookup.latest.append(encode_base64(str(block.id))) - except AttributeError: - block_lookup.latest.append(encode_base64(str(block))) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - blob_headers = None - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if content_settings: - blob_headers = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=content_settings.content_md5, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - - validate_content = kwargs.pop('validate_content', False) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - tier = kwargs.pop('standard_blob_tier', None) - blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) - - options = { - 'blocks': block_lookup, - 'blob_http_headers': blob_headers, - 'lease_access_conditions': access_conditions, - 'timeout': kwargs.pop('timeout', None), - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers, - 'validate_content': validate_content, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'tier': tier.value if tier else None, - 'blob_tags_string': blob_tags_string, - 'headers': headers - } - options.update(kwargs) - return options - - @distributed_trace - def commit_block_list( # type: ignore - self, block_list, # type: List[BlobBlock] - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """The Commit Block List operation writes a blob by specifying the list of - block IDs that make up the blob. - - :param list block_list: - List of Blockblobs. - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict[str, str] - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._commit_block_list_options( - block_list, - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return self._client.block_blob.commit_block_list(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): - # type: (Union[str, PremiumPageBlobTier], **Any) -> None - """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts. - - :param premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if premium_page_blob_tier is None: - raise ValueError("A PremiumPageBlobTier must be specified") - try: - self._client.blob.set_tier( - tier=premium_page_blob_tier, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - def _set_blob_tags_options(self, tags=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - tags = serialize_blob_tags(tags) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - options = { - 'tags': tags, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def set_blob_tags(self, tags=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - """The Set Tags operation enables users to set tags on a blob or specific blob version, but not snapshot. - Each call to this operation replaces all existing tags attached to the blob. To remove all - tags from the blob, call this operation with no tags set. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :param tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - :type tags: dict(str, str) - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to add tags to. - :keyword bool validate_content: - If true, calculates an MD5 hash of the tags content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - options = self._set_blob_tags_options(tags=tags, **kwargs) - try: - return self._client.blob.set_tags(**options) - except HttpResponseError as error: - process_storage_error(error) - - def _get_blob_tags_options(self, **kwargs): - # type: (**Any) -> Dict[str, str] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - options = { - 'version_id': kwargs.pop('version_id', None), - 'snapshot': self.snapshot, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_headers_and_deserialized} - return options - - @distributed_trace - def get_blob_tags(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """The Get Tags operation enables users to get tags on a blob or specific blob version, or snapshot. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to add tags to. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Key value pairs of blob tags. - :rtype: Dict[str, str] - """ - options = self._get_blob_tags_options(**kwargs) - try: - _, tags = self._client.blob.get_tags(**options) - return parse_tags(tags) # pylint: disable=protected-access - except HttpResponseError as error: - process_storage_error(error) - - def _get_page_ranges_options( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if length is not None and offset is None: - raise ValueError("Offset value must not be None if length is set.") - if length is not None: - length = offset + length - 1 # Reformat to an inclusive range index - page_range, _ = validate_and_format_range_headers( - offset, length, start_range_required=False, end_range_required=False, align_to_page=True - ) - options = { - 'snapshot': self.snapshot, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'range': page_range} - if previous_snapshot_diff: - try: - options['prevsnapshot'] = previous_snapshot_diff.snapshot # type: ignore - except AttributeError: - try: - options['prevsnapshot'] = previous_snapshot_diff['snapshot'] # type: ignore - except TypeError: - options['prevsnapshot'] = previous_snapshot_diff - options.update(kwargs) - return options - - @distributed_trace - def get_page_ranges( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] - **kwargs - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a Page Blob or snapshot - of a page blob. - - :param int offset: - Start of byte range to use for getting valid page ranges. - If no length is given, all bytes after the offset will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for getting valid page ranges. - If length is given, offset must be provided. - This range will return valid page ranges from the offset start up to - the specified length. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param str previous_snapshot_diff: - The snapshot diff parameter that contains an opaque DateTime value that - specifies a previous blob snapshot to be compared - against a more recent snapshot or the current blob. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. - The first element are filled page ranges, the 2nd element is cleared page ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_page_ranges_options( - offset=offset, - length=length, - previous_snapshot_diff=previous_snapshot_diff, - **kwargs) - try: - if previous_snapshot_diff: - ranges = self._client.page_blob.get_page_ranges_diff(**options) - else: - ranges = self._client.page_blob.get_page_ranges(**options) - except HttpResponseError as error: - process_storage_error(error) - return get_page_ranges_result(ranges) - - @distributed_trace - def get_page_range_diff_for_managed_disk( - self, previous_snapshot_url, # type: str - offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a managed disk or snapshot. - - .. note:: - This operation is only available for managed disk accounts. - - .. versionadded:: 12.2.0 - This operation was introduced in API version '2019-07-07'. - - :param previous_snapshot_url: - Specifies the URL of a previous snapshot of the managed disk. - The response will only contain pages that were changed between the target blob and - its previous snapshot. - :param int offset: - Start of byte range to use for getting valid page ranges. - If no length is given, all bytes after the offset will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for getting valid page ranges. - If length is given, offset must be provided. - This range will return valid page ranges from the offset start up to - the specified length. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. - The first element are filled page ranges, the 2nd element is cleared page ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_page_ranges_options( - offset=offset, - length=length, - prev_snapshot_url=previous_snapshot_url, - **kwargs) - try: - ranges = self._client.page_blob.get_page_ranges_diff(**options) - except HttpResponseError as error: - process_storage_error(error) - return get_page_ranges_result(ranges) - - def _set_sequence_number_options(self, sequence_number_action, sequence_number=None, **kwargs): - # type: (Union[str, SequenceNumberAction], Optional[str], **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if sequence_number_action is None: - raise ValueError("A sequence number action must be specified") - options = { - 'sequence_number_action': sequence_number_action, - 'timeout': kwargs.pop('timeout', None), - 'blob_sequence_number': sequence_number, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def set_sequence_number(self, sequence_number_action, sequence_number=None, **kwargs): - # type: (Union[str, SequenceNumberAction], Optional[str], **Any) -> Dict[str, Union[str, datetime]] - """Sets the blob sequence number. - - :param str sequence_number_action: - This property indicates how the service should modify the blob's sequence - number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information. - :param str sequence_number: - This property sets the blob's sequence number. The sequence number is a - user-controlled property that you can use to track requests and manage - concurrency issues. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._set_sequence_number_options( - sequence_number_action, sequence_number=sequence_number, **kwargs) - try: - return self._client.page_blob.update_sequence_number(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _resize_blob_options(self, size, **kwargs): - # type: (int, **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if size is None: - raise ValueError("A content length must be specified for a Page Blob.") - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'blob_content_length': size, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def resize_blob(self, size, **kwargs): - # type: (int, **Any) -> Dict[str, Union[str, datetime]] - """Resizes a page blob to the specified size. - - If the specified value is less than the current size of the blob, - then all pages above the specified value are cleared. - - :param int size: - Size used to resize blob. Maximum size for a page blob is up to 1 TB. - The page blob size must be aligned to a 512-byte boundary. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._resize_blob_options(size, **kwargs) - try: - return self._client.page_blob.resize(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _upload_page_options( # type: ignore - self, page, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - if isinstance(page, six.text_type): - page = page.encode(kwargs.pop('encoding', 'UTF-8')) - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 page size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 page size") - end_range = offset + length - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) # type: ignore - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - seq_conditions = SequenceNumberAccessConditions( - if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), - if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), - if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) - ) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - validate_content = kwargs.pop('validate_content', False) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'body': page[:length], - 'content_length': length, - 'transactional_content_md5': None, - 'timeout': kwargs.pop('timeout', None), - 'range': content_range, - 'lease_access_conditions': access_conditions, - 'sequence_number_access_conditions': seq_conditions, - 'modified_access_conditions': mod_conditions, - 'validate_content': validate_content, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def upload_page( # type: ignore - self, page, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """The Upload Pages operation writes a range of pages to a page blob. - - :param bytes page: - Content of the page. - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._upload_page_options( - page=page, - offset=offset, - length=length, - **kwargs) - try: - return self._client.page_blob.upload_pages(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _upload_pages_from_url_options( # type: ignore - self, source_url, # type: str - offset, # type: int - length, # type: int - source_offset, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - # TODO: extract the code to a method format_range - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 page size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 page size") - if source_offset is None or offset % 512 != 0: - raise ValueError("source_offset must be an integer that aligns with 512 page size") - - # Format range - end_range = offset + length - 1 - destination_range = 'bytes={0}-{1}'.format(offset, end_range) - source_range = 'bytes={0}-{1}'.format(source_offset, source_offset + length - 1) # should subtract 1 here? - - seq_conditions = SequenceNumberAccessConditions( - if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), - if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), - if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - source_mod_conditions = get_source_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - source_content_md5 = kwargs.pop('source_content_md5', None) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'source_url': source_url, - 'content_length': 0, - 'source_range': source_range, - 'range': destination_range, - 'source_content_md5': bytearray(source_content_md5) if source_content_md5 else None, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'sequence_number_access_conditions': seq_conditions, - 'modified_access_conditions': mod_conditions, - 'source_modified_access_conditions': source_mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def upload_pages_from_url(self, source_url, # type: str - offset, # type: int - length, # type: int - source_offset, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """ - The Upload Pages operation writes a range of pages to a page blob where - the contents are read from a URL. - - :param str source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - The service will read the same number of bytes as the destination range (length-offset). - :keyword bytes source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._upload_pages_from_url_options( - source_url=self._encode_source_url(source_url), - offset=offset, - length=length, - source_offset=source_offset, - **kwargs - ) - try: - return self._client.page_blob.upload_pages_from_url(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _clear_page_options(self, offset, length, **kwargs): - # type: (int, int, **Any) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - seq_conditions = SequenceNumberAccessConditions( - if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), - if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), - if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) - ) - mod_conditions = get_modify_conditions(kwargs) - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 page size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 page size") - end_range = length + offset - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'content_length': 0, - 'timeout': kwargs.pop('timeout', None), - 'range': content_range, - 'lease_access_conditions': access_conditions, - 'sequence_number_access_conditions': seq_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def clear_page(self, offset, length, **kwargs): - # type: (int, int, **Any) -> Dict[str, Union[str, datetime]] - """Clears a range of pages. - - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._clear_page_options(offset, length, **kwargs) - try: - return self._client.page_blob.clear_pages(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _append_block_options( # type: ignore - self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - if isinstance(data, six.text_type): - data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore - if length is None: - length = get_length(data) - if length is None: - length, data = read_length(data) - if length == 0: - return {} - if isinstance(data, bytes): - data = data[:length] - - appendpos_condition = kwargs.pop('appendpos_condition', None) - maxsize_condition = kwargs.pop('maxsize_condition', None) - validate_content = kwargs.pop('validate_content', False) - append_conditions = None - if maxsize_condition or appendpos_condition is not None: - append_conditions = AppendPositionAccessConditions( - max_size=maxsize_condition, - append_position=appendpos_condition - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'body': data, - 'content_length': length, - 'timeout': kwargs.pop('timeout', None), - 'transactional_content_md5': None, - 'lease_access_conditions': access_conditions, - 'append_position_access_conditions': append_conditions, - 'modified_access_conditions': mod_conditions, - 'validate_content': validate_content, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def append_block( # type: ignore - self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """Commits a new block of data to the end of the existing append blob. - - :param data: - Content of the block. This can be bytes, text, an iterable or a file-like object. - :type data: bytes or str or Iterable - :param int length: - Size of the block in bytes. - :keyword bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). - :rtype: dict(str, Any) - """ - options = self._append_block_options( - data, - length=length, - **kwargs - ) - try: - return self._client.append_blob.append_block(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _append_block_from_url_options( # type: ignore - self, copy_source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - # If end range is provided, start range must be provided - if source_length is not None and source_offset is None: - raise ValueError("source_offset should also be specified if source_length is specified") - # Format based on whether length is present - source_range = None - if source_length is not None: - end_range = source_offset + source_length - 1 - source_range = 'bytes={0}-{1}'.format(source_offset, end_range) - elif source_offset is not None: - source_range = "bytes={0}-".format(source_offset) - - appendpos_condition = kwargs.pop('appendpos_condition', None) - maxsize_condition = kwargs.pop('maxsize_condition', None) - source_content_md5 = kwargs.pop('source_content_md5', None) - append_conditions = None - if maxsize_condition or appendpos_condition is not None: - append_conditions = AppendPositionAccessConditions( - max_size=maxsize_condition, - append_position=appendpos_condition - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - source_mod_conditions = get_source_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'source_url': copy_source_url, - 'content_length': 0, - 'source_range': source_range, - 'source_content_md5': source_content_md5, - 'transactional_content_md5': None, - 'lease_access_conditions': access_conditions, - 'append_position_access_conditions': append_conditions, - 'modified_access_conditions': mod_conditions, - 'source_modified_access_conditions': source_mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - 'timeout': kwargs.pop('timeout', None)} - options.update(kwargs) - return options - - @distributed_trace - def append_block_from_url(self, copy_source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """ - Creates a new block to be committed as part of a blob, where the contents are read from a source url. - - :param str copy_source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int source_offset: - This indicates the start of the range of bytes (inclusive) that has to be taken from the copy source. - :param int source_length: - This indicates the end of the range of bytes that has to be taken from the copy source. - :keyword bytearray source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the - AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._append_block_from_url_options( - copy_source_url=self._encode_source_url(copy_source_url), - source_offset=source_offset, - source_length=source_length, - **kwargs - ) - try: - return self._client.append_blob.append_block_from_url(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _seal_append_blob_options(self, **kwargs): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - appendpos_condition = kwargs.pop('appendpos_condition', None) - append_conditions = None - if appendpos_condition is not None: - append_conditions = AppendPositionAccessConditions( - append_position=appendpos_condition - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - options = { - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'append_position_access_conditions': append_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def seal_append_blob(self, **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """The Seal operation seals the Append Blob to make it read-only. - - .. versionadded:: 12.4.0 - - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). - :rtype: dict(str, Any) - """ - options = self._seal_append_blob_options(**kwargs) - try: - return self._client.append_blob.seal(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def _get_container_client(self): # pylint: disable=client-method-missing-kwargs - # type: (...) -> ContainerClient - """Get a client to interact with the blob's parent container. - - The container need not already exist. Defaults to current blob's credentials. - - :returns: A ContainerClient. - :rtype: ~azure.storage.blob.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START get_container_client_from_blob_client] - :end-before: [END get_container_client_from_blob_client] - :language: python - :dedent: 8 - :caption: Get container client from blob object. - """ - from ._container_client import ContainerClient - if not isinstance(self._pipeline._transport, TransportWrapper): # pylint: disable = protected-access - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - else: - _pipeline = self._pipeline # pylint: disable = protected-access - return ContainerClient( - "{}://{}".format(self.scheme, self.primary_hostname), container_name=self.container_name, - credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_blob_service_client.py b/azure/multiapi/storagev2/blob/v2020_06_12/_blob_service_client.py deleted file mode 100644 index 6016a8a..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_blob_service_client.py +++ /dev/null @@ -1,732 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, - TYPE_CHECKING -) - - -try: - from urllib.parse import urlparse -except ImportError: - from urlparse import urlparse # type: ignore - -from azure.core.paging import ItemPaged -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline import Pipeline -from azure.core.tracing.decorator import distributed_trace - -from ._shared.models import LocationMode -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.parser import _to_utc_datetime -from ._shared.response_handlers import return_response_headers, process_storage_error, \ - parse_to_internal_user_delegation_key -from ._generated import AzureBlobStorage -from ._generated.models import StorageServiceProperties, KeyInfo -from ._container_client import ContainerClient -from ._blob_client import BlobClient -from ._models import ContainerPropertiesPaged -from ._list_blobs_helper import FilteredBlobPaged -from ._serialize import get_api_version -from ._deserialize import service_stats_deserialize, service_properties_deserialize - -if TYPE_CHECKING: - from datetime import datetime - from ._shared.models import UserDelegationKey - from ._lease import BlobLeaseClient - from ._models import ( - ContainerProperties, - BlobProperties, - PublicAccess, - BlobAnalyticsLogging, - Metrics, - CorsRule, - RetentionPolicy, - StaticWebsite, - FilteredBlob - ) - - -class BlobServiceClient(StorageAccountHostsMixin): - """A client to interact with the Blob Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete containers within the account. - For operations relating to a specific container or blob, clients for those entities - can also be retrieved using the `get_client` functions. - - For more optional configuration, please click - `here `_. - - :param str account_url: - The URL to the blob storage account. Any other entities included - in the URL path (e.g. container or blob) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_blob_service_client] - :end-before: [END create_blob_service_client] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient with account url and credential. - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_blob_service_client_oauth] - :end-before: [END create_blob_service_client_oauth] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient with Azure Identity credentials. - """ - - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - _, sas_token = parse_query(parsed_url.query) - self._query_str, credential = self._format_query_string(sas_token, credential) - super(BlobServiceClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) - self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) - default_api_version = self._client._config.version # pylint: disable=protected-access - self._client._config.version = get_api_version(kwargs, default_api_version) # pylint: disable=protected-access - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - return "{}://{}/{}".format(self.scheme, hostname, self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> BlobServiceClient - """Create BlobServiceClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :returns: A Blob service client. - :rtype: ~azure.storage.blob.BlobServiceClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START auth_from_connection_string] - :end-before: [END auth_from_connection_string] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient from a connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls(account_url, credential=credential, **kwargs) - - @distributed_trace - def get_user_delegation_key(self, key_start_time, # type: datetime - key_expiry_time, # type: datetime - **kwargs # type: Any - ): - # type: (...) -> UserDelegationKey - """ - Obtain a user delegation key for the purpose of signing SAS tokens. - A token credential must be present on the service object for this request to succeed. - - :param ~datetime.datetime key_start_time: - A DateTime value. Indicates when the key becomes valid. - :param ~datetime.datetime key_expiry_time: - A DateTime value. Indicates when the key stops being valid. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The user delegation key. - :rtype: ~azure.storage.blob.UserDelegationKey - """ - key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time)) - timeout = kwargs.pop('timeout', None) - try: - user_delegation_key = self._client.service.get_user_delegation_key(key_info=key_info, - timeout=timeout, - **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - return parse_to_internal_user_delegation_key(user_delegation_key) # type: ignore - - @distributed_trace - def get_account_information(self, **kwargs): - # type: (Any) -> Dict[str, str] - """Gets information related to the storage account. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START get_blob_service_account_info] - :end-before: [END get_blob_service_account_info] - :language: python - :dedent: 8 - :caption: Getting account information for the blob service. - """ - try: - return self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def get_service_stats(self, **kwargs): - # type: (**Any) -> Dict[str, Any] - """Retrieves statistics related to replication for the Blob service. - - It is only available when read-access geo-redundant replication is enabled for - the storage account. - - With geo-redundant replication, Azure Storage maintains your data durable - in two locations. In both locations, Azure Storage constantly maintains - multiple healthy replicas of your data. The location where you read, - create, update, or delete data is the primary storage account location. - The primary location exists in the region you choose at the time you - create an account via the Azure Management Azure classic portal, for - example, North Central US. The location to which your data is replicated - is the secondary location. The secondary location is automatically - determined based on the location of the primary; it is in a second data - center that resides in the same region as the primary location. Read-only - access is available from the secondary location, if read-access geo-redundant - replication is enabled for your storage account. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The blob service stats. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START get_blob_service_stats] - :end-before: [END get_blob_service_stats] - :language: python - :dedent: 8 - :caption: Getting service stats for the blob service. - """ - timeout = kwargs.pop('timeout', None) - try: - stats = self._client.service.get_statistics( # type: ignore - timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs) - return service_stats_deserialize(stats) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def get_service_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An object containing blob service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START get_blob_service_properties] - :end-before: [END get_blob_service_properties] - :language: python - :dedent: 8 - :caption: Getting service properties for the blob service. - """ - timeout = kwargs.pop('timeout', None) - try: - service_props = self._client.service.get_properties(timeout=timeout, **kwargs) - return service_properties_deserialize(service_props) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def set_service_properties( - self, analytics_logging=None, # type: Optional[BlobAnalyticsLogging] - hour_metrics=None, # type: Optional[Metrics] - minute_metrics=None, # type: Optional[Metrics] - cors=None, # type: Optional[List[CorsRule]] - target_version=None, # type: Optional[str] - delete_retention_policy=None, # type: Optional[RetentionPolicy] - static_website=None, # type: Optional[StaticWebsite] - **kwargs - ): - # type: (...) -> None - """Sets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - If an element (e.g. analytics_logging) is left as None, the - existing settings on the service for that functionality are preserved. - - :param analytics_logging: - Groups the Azure Analytics Logging settings. - :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging - :param hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for blobs. - :type hour_metrics: ~azure.storage.blob.Metrics - :param minute_metrics: - The minute metrics settings provide request statistics - for each minute for blobs. - :type minute_metrics: ~azure.storage.blob.Metrics - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list[~azure.storage.blob.CorsRule] - :param str target_version: - Indicates the default version to use for requests if an incoming - request's version is not specified. - :param delete_retention_policy: - The delete retention policy specifies whether to retain deleted blobs. - It also specifies the number of days and versions of blob to keep. - :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy - :param static_website: - Specifies whether the static website feature is enabled, - and if yes, indicates the index document and 404 error document to use. - :type static_website: ~azure.storage.blob.StaticWebsite - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START set_blob_service_properties] - :end-before: [END set_blob_service_properties] - :language: python - :dedent: 8 - :caption: Setting service properties for the blob service. - """ - if all(parameter is None for parameter in [ - analytics_logging, hour_metrics, minute_metrics, cors, - target_version, delete_retention_policy, static_website]): - raise ValueError("set_service_properties should be called with at least one parameter") - - props = StorageServiceProperties( - logging=analytics_logging, - hour_metrics=hour_metrics, - minute_metrics=minute_metrics, - cors=cors, - default_service_version=target_version, - delete_retention_policy=delete_retention_policy, - static_website=static_website - ) - timeout = kwargs.pop('timeout', None) - try: - self._client.service.set_properties(props, timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_containers( - self, name_starts_with=None, # type: Optional[str] - include_metadata=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> ItemPaged[ContainerProperties] - """Returns a generator to list the containers under the specified account. - - The generator will lazily follow the continuation tokens returned by - the service and stop when all containers have been returned. - - :param str name_starts_with: - Filters the results to return only containers whose names - begin with the specified prefix. - :param bool include_metadata: - Specifies that container metadata to be returned in the response. - The default value is `False`. - :keyword bool include_deleted: - Specifies that deleted containers to be returned in the response. This is for container restore enabled - account. The default value is `False`. - .. versionadded:: 12.4.0 - :keyword int results_per_page: - The maximum number of container names to retrieve per API - call. If the request does not specify the server will return up to 5,000 items. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) of ContainerProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.ContainerProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_list_containers] - :end-before: [END bsc_list_containers] - :language: python - :dedent: 12 - :caption: Listing the containers in the blob service. - """ - include = ['metadata'] if include_metadata else [] - include_deleted = kwargs.pop('include_deleted', None) - if include_deleted: - include.append("deleted") - - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.service.list_containers_segment, - prefix=name_starts_with, - include=include, - timeout=timeout, - **kwargs) - return ItemPaged( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - page_iterator_class=ContainerPropertiesPaged - ) - - @distributed_trace - def find_blobs_by_tags(self, filter_expression, **kwargs): - # type: (str, **Any) -> ItemPaged[FilteredBlob] - """The Filter Blobs operation enables callers to list blobs across all - containers whose tags match a given search expression. Filter blobs - searches across all containers within a storage account but can be - scoped within the expression to a single container. - - :param str filter_expression: - The expression to find blobs whose tags matches the specified condition. - eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'" - To specify a container, eg. "@container='containerName' and \"Name\"='C'" - :keyword int results_per_page: - The max result per page when paginating. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.FilteredBlob] - """ - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.service.filter_blobs, - where=filter_expression, - timeout=timeout, - **kwargs) - return ItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=FilteredBlobPaged) - - @distributed_trace - def create_container( - self, name, # type: str - metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[Union[PublicAccess, str]] - **kwargs - ): - # type: (...) -> ContainerClient - """Creates a new container under the specified account. - - If the container with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created container. - - :param str name: The name of the container to create. - :param metadata: - A dict with name-value pairs to associate with the - container as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - Possible values include: 'container', 'blob'. - :type public_access: str or ~azure.storage.blob.PublicAccess - :keyword container_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - - .. versionadded:: 12.2.0 - - :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_create_container] - :end-before: [END bsc_create_container] - :language: python - :dedent: 12 - :caption: Creating a container in the blob service. - """ - container = self.get_container_client(name) - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - container.create_container( - metadata=metadata, public_access=public_access, timeout=timeout, **kwargs) - return container - - @distributed_trace - def delete_container( - self, container, # type: Union[ContainerProperties, str] - lease=None, # type: Optional[Union[BlobLeaseClient, str]] - **kwargs - ): - # type: (...) -> None - """Marks the specified container for deletion. - - The container and any blobs contained within it are later deleted during garbage collection. - If the container is not found, a ResourceNotFoundError will be raised. - - :param container: - The container to delete. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :param lease: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_delete_container] - :end-before: [END bsc_delete_container] - :language: python - :dedent: 12 - :caption: Deleting a container in the blob service. - """ - container = self.get_container_client(container) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - container.delete_container( # type: ignore - lease=lease, - timeout=timeout, - **kwargs) - - @distributed_trace - def _rename_container(self, name, new_name, **kwargs): - # type: (str, str, **Any) -> ContainerClient - """Renames a container. - - Operation is successful only if the source container exists. - - :param str name: - The name of the container to rename. - :param str new_name: - The new container name the user wants to rename to. - :keyword lease: - Specify this to perform only if the lease ID given - matches the active lease ID of the source container. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.ContainerClient - """ - renamed_container = self.get_container_client(new_name) - lease = kwargs.pop('lease', None) - try: - kwargs['source_lease_id'] = lease.id # type: str - except AttributeError: - kwargs['source_lease_id'] = lease - try: - renamed_container._client.container.rename(name, **kwargs) # pylint: disable = protected-access - return renamed_container - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def undelete_container(self, deleted_container_name, deleted_container_version, **kwargs): - # type: (str, str, **Any) -> ContainerClient - """Restores soft-deleted container. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :param str deleted_container_name: - Specifies the name of the deleted container to restore. - :param str deleted_container_version: - Specifies the version of the deleted container to restore. - :keyword str new_name: - The new name for the deleted container to be restored to. - If not specified deleted_container_name will be used as the restored container name. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.ContainerClient - """ - new_name = kwargs.pop('new_name', None) - container = self.get_container_client(new_name or deleted_container_name) - try: - container._client.container.restore(deleted_container_name=deleted_container_name, # pylint: disable = protected-access - deleted_container_version=deleted_container_version, - timeout=kwargs.pop('timeout', None), **kwargs) - return container - except HttpResponseError as error: - process_storage_error(error) - - def get_container_client(self, container): - # type: (Union[ContainerProperties, str]) -> ContainerClient - """Get a client to interact with the specified container. - - The container need not already exist. - - :param container: - The container. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :returns: A ContainerClient. - :rtype: ~azure.storage.blob.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_get_container_client] - :end-before: [END bsc_get_container_client] - :language: python - :dedent: 8 - :caption: Getting the container client to interact with a specific container. - """ - try: - container_name = container.name - except AttributeError: - container_name = container - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ContainerClient( - self.url, container_name=container_name, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_blob_client( - self, container, # type: Union[ContainerProperties, str] - blob, # type: Union[BlobProperties, str] - snapshot=None # type: Optional[Union[Dict[str, Any], str]] - ): - # type: (...) -> BlobClient - """Get a client to interact with the specified blob. - - The blob need not already exist. - - :param container: - The container that the blob is in. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :param blob: - The blob with which to interact. This can either be the name of the blob, - or an instance of BlobProperties. - :type blob: str or ~azure.storage.blob.BlobProperties - :param snapshot: - The optional blob snapshot on which to operate. This can either be the ID of the snapshot, - or a dictionary output returned by :func:`~azure.storage.blob.BlobClient.create_snapshot()`. - :type snapshot: str or dict(str, Any) - :returns: A BlobClient. - :rtype: ~azure.storage.blob.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_get_blob_client] - :end-before: [END bsc_get_blob_client] - :language: python - :dedent: 12 - :caption: Getting the blob client to interact with a specific blob. - """ - try: - container_name = container.name - except AttributeError: - container_name = container - try: - blob_name = blob.name - except AttributeError: - blob_name = blob - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return BlobClient( # type: ignore - self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_container_client.py b/azure/multiapi/storagev2/blob/v2020_06_12/_container_client.py deleted file mode 100644 index d2caf7a..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_container_client.py +++ /dev/null @@ -1,1551 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, AnyStr, Dict, List, Tuple, IO, Iterator, - TYPE_CHECKING -) - - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six - -from azure.core import MatchConditions -from azure.core.exceptions import HttpResponseError, ResourceNotFoundError -from azure.core.paging import ItemPaged -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import Pipeline -from azure.core.pipeline.transport import HttpRequest - -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.request_handlers import add_metadata_headers, serialize_iso -from ._shared.response_handlers import ( - process_storage_error, - return_response_headers, - return_headers_and_deserialized) -from ._generated import AzureBlobStorage -from ._generated.models import SignedIdentifier -from ._deserialize import deserialize_container_properties -from ._serialize import get_modify_conditions, get_container_cpk_scope_info, get_api_version, get_access_conditions -from ._models import ( # pylint: disable=unused-import - ContainerProperties, - BlobProperties, - BlobType) -from ._list_blobs_helper import BlobPrefix, BlobPropertiesPaged -from ._lease import BlobLeaseClient -from ._blob_client import BlobClient - -if TYPE_CHECKING: - from azure.core.pipeline.transport import HttpTransport, HttpResponse # pylint: disable=ungrouped-imports - from azure.core.pipeline.policies import HTTPPolicy # pylint: disable=ungrouped-imports - from datetime import datetime - from ._models import ( # pylint: disable=unused-import - PublicAccess, - AccessPolicy, - ContentSettings, - StandardBlobTier, - PremiumPageBlobTier) - - -def _get_blob_name(blob): - """Return the blob name. - - :param blob: A blob string or BlobProperties - :rtype: str - """ - try: - return blob.get('name') - except AttributeError: - return blob - - -class ContainerClient(StorageAccountHostsMixin): # pylint: disable=too-many-public-methods - """A client to interact with a specific container, although that container - may not yet exist. - - For operations relating to a specific blob within this container, a blob client can be - retrieved using the :func:`~get_blob_client` function. - - For more optional configuration, please click - `here `_. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the container, - use the :func:`from_container_url` classmethod. - :param container_name: - The name of the container for the blob. - :type container_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START create_container_client_from_service] - :end-before: [END create_container_client_from_service] - :language: python - :dedent: 8 - :caption: Get a ContainerClient from an existing BlobServiceClient. - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START create_container_client_sasurl] - :end-before: [END create_container_client_sasurl] - :language: python - :dedent: 8 - :caption: Creating the container client directly. - """ - def __init__( - self, account_url, # type: str - container_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Container URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not container_name: - raise ValueError("Please specify a container name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - _, sas_token = parse_query(parsed_url.query) - self.container_name = container_name - # This parameter is used for the hierarchy traversal. Give precedence to credential. - self._raw_credential = credential if credential else sas_token - self._query_str, credential = self._format_query_string(sas_token, credential) - super(ContainerClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) - self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) - default_api_version = self._client._config.version # pylint: disable=protected-access - self._client._config.version = get_api_version(kwargs, default_api_version) # pylint: disable=protected-access - - def _format_url(self, hostname): - container_name = self.container_name - if isinstance(container_name, six.text_type): - container_name = container_name.encode('UTF-8') - return "{}://{}/{}{}".format( - self.scheme, - hostname, - quote(container_name), - self._query_str) - - @classmethod - def from_container_url(cls, container_url, credential=None, **kwargs): - # type: (str, Optional[Any], Any) -> ContainerClient - """Create ContainerClient from a container url. - - :param str container_url: - The full endpoint URL to the Container, including SAS token if used. This could be - either the primary endpoint, or the secondary endpoint depending on the current `location_mode`. - :type container_url: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :returns: A container client. - :rtype: ~azure.storage.blob.ContainerClient - """ - try: - if not container_url.lower().startswith('http'): - container_url = "https://" + container_url - except AttributeError: - raise ValueError("Container URL must be a string.") - parsed_url = urlparse(container_url.rstrip('/')) - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(container_url)) - - container_path = parsed_url.path.lstrip('/').split('/') - account_path = "" - if len(container_path) > 1: - account_path = "/" + "/".join(container_path[:-1]) - account_url = "{}://{}{}?{}".format( - parsed_url.scheme, - parsed_url.netloc.rstrip('/'), - account_path, - parsed_url.query) - container_name = unquote(container_path[-1]) - if not container_name: - raise ValueError("Invalid URL. Please provide a URL with a valid container name") - return cls(account_url, container_name=container_name, credential=credential, **kwargs) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - container_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> ContainerClient - """Create ContainerClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param container_name: - The container name for the blob. - :type container_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :returns: A container client. - :rtype: ~azure.storage.blob.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START auth_from_connection_string_container] - :end-before: [END auth_from_connection_string_container] - :language: python - :dedent: 8 - :caption: Creating the ContainerClient from a connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, container_name=container_name, credential=credential, **kwargs) - - @distributed_trace - def create_container(self, metadata=None, public_access=None, **kwargs): - # type: (Optional[Dict[str, str]], Optional[Union[PublicAccess, str]], **Any) -> None - """ - Creates a new container under the specified account. If the container - with the same name already exists, the operation fails. - - :param metadata: - A dict with name_value pairs to associate with the - container as metadata. Example:{'Category':'test'} - :type metadata: dict[str, str] - :param ~azure.storage.blob.PublicAccess public_access: - Possible values include: 'container', 'blob'. - :keyword container_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - - .. versionadded:: 12.2.0 - - :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START create_container] - :end-before: [END create_container] - :language: python - :dedent: 12 - :caption: Creating a container to store blobs. - """ - headers = kwargs.pop('headers', {}) - timeout = kwargs.pop('timeout', None) - headers.update(add_metadata_headers(metadata)) # type: ignore - container_cpk_scope_info = get_container_cpk_scope_info(kwargs) - try: - return self._client.container.create( # type: ignore - timeout=timeout, - access=public_access, - container_cpk_scope_info=container_cpk_scope_info, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def _rename_container(self, new_name, **kwargs): - # type: (str, **Any) -> ContainerClient - """Renames a container. - - Operation is successful only if the source container exists. - - :param str new_name: - The new container name the user wants to rename to. - :keyword lease: - Specify this to perform only if the lease ID given - matches the active lease ID of the source container. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.ContainerClient - """ - lease = kwargs.pop('lease', None) - try: - kwargs['source_lease_id'] = lease.id # type: str - except AttributeError: - kwargs['source_lease_id'] = lease - try: - renamed_container = ContainerClient( - "{}://{}".format(self.scheme, self.primary_hostname), container_name=new_name, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - renamed_container._client.container.rename(self.container_name, **kwargs) # pylint: disable = protected-access - return renamed_container - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def delete_container( - self, **kwargs): - # type: (Any) -> None - """ - Marks the specified container for deletion. The container and any blobs - contained within it are later deleted during garbage collection. - - :keyword lease: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START delete_container] - :end-before: [END delete_container] - :language: python - :dedent: 12 - :caption: Delete a container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - mod_conditions = get_modify_conditions(kwargs) - timeout = kwargs.pop('timeout', None) - try: - self._client.container.delete( - timeout=timeout, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def acquire_lease( - self, lease_duration=-1, # type: int - lease_id=None, # type: Optional[str] - **kwargs): - # type: (...) -> BlobLeaseClient - """ - Requests a new lease. If the container does not have an active lease, - the Blob service creates a lease on the container and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A BlobLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.blob.BlobLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START acquire_lease_on_container] - :end-before: [END acquire_lease_on_container] - :language: python - :dedent: 8 - :caption: Acquiring a lease on the container. - """ - lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs) - return lease - - @distributed_trace - def get_account_information(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """Gets information related to the storage account. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - """ - try: - return self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def get_container_properties(self, **kwargs): - # type: (Any) -> ContainerProperties - """Returns all user-defined metadata and system properties for the specified - container. The data returned does not include the container's list of blobs. - - :keyword lease: - If specified, get_container_properties only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Properties for the specified container within a container object. - :rtype: ~azure.storage.blob.ContainerProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START get_container_properties] - :end-before: [END get_container_properties] - :language: python - :dedent: 12 - :caption: Getting properties on the container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - response = self._client.container.get_properties( - timeout=timeout, - lease_access_conditions=access_conditions, - cls=deserialize_container_properties, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - response.name = self.container_name - return response # type: ignore - - @distributed_trace - def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a container exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - try: - self._client.container.get_properties(**kwargs) - return True - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceNotFoundError: - return False - - @distributed_trace - def set_container_metadata( # type: ignore - self, metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - container. Each call to this operation replaces all existing metadata - attached to the container. To remove all metadata from the container, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the container as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword lease: - If specified, set_container_metadata only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Container-updated property dict (Etag and last modified). - :rtype: dict[str, str or datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START set_container_metadata] - :end-before: [END set_container_metadata] - :language: python - :dedent: 12 - :caption: Setting metadata on the container. - """ - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - mod_conditions = get_modify_conditions(kwargs) - timeout = kwargs.pop('timeout', None) - try: - return self._client.container.set_metadata( # type: ignore - timeout=timeout, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def _get_blob_service_client(self): # pylint: disable=client-method-missing-kwargs - # type: (...) -> BlobServiceClient - """Get a client to interact with the container's parent service account. - - Defaults to current container's credentials. - - :returns: A BlobServiceClient. - :rtype: ~azure.storage.blob.BlobServiceClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START get_blob_service_client_from_container_client] - :end-before: [END get_blob_service_client_from_container_client] - :language: python - :dedent: 8 - :caption: Get blob service client from container object. - """ - from ._blob_service_client import BlobServiceClient - if not isinstance(self._pipeline._transport, TransportWrapper): # pylint: disable = protected-access - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - else: - _pipeline = self._pipeline # pylint: disable = protected-access - return BlobServiceClient( - "{}://{}".format(self.scheme, self.primary_hostname), - credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, - _location_mode=self._location_mode, _hosts=self._hosts, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function, - _pipeline=_pipeline) - - @distributed_trace - def get_container_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the specified container. - The permissions indicate whether container data may be accessed publicly. - - :keyword lease: - If specified, get_container_access_policy only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START get_container_access_policy] - :end-before: [END get_container_access_policy] - :language: python - :dedent: 12 - :caption: Getting the access policy on the container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - response, identifiers = self._client.container.get_access_policy( - timeout=timeout, - lease_access_conditions=access_conditions, - cls=return_headers_and_deserialized, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return { - 'public_access': response.get('blob_public_access'), - 'signed_identifiers': identifiers or [] - } - - @distributed_trace - def set_container_access_policy( - self, signed_identifiers, # type: Dict[str, AccessPolicy] - public_access=None, # type: Optional[Union[str, PublicAccess]] - **kwargs - ): # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the permissions for the specified container or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether blobs in a container may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the container. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy] - :param ~azure.storage.blob.PublicAccess public_access: - Possible values include: 'container', 'blob'. - :keyword lease: - Required if the container has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified date/time. - :keyword ~datetime.datetime if_unmodified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Container-updated property dict (Etag and last modified). - :rtype: dict[str, str or ~datetime.datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START set_container_access_policy] - :end-before: [END set_container_access_policy] - :language: python - :dedent: 12 - :caption: Setting access policy on the container. - """ - if len(signed_identifiers) > 5: - raise ValueError( - 'Too many access policies provided. The server does not support setting ' - 'more than 5 access policies on a single resource.') - identifiers = [] - for key, value in signed_identifiers.items(): - if value: - value.start = serialize_iso(value.start) - value.expiry = serialize_iso(value.expiry) - identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore - signed_identifiers = identifiers # type: ignore - lease = kwargs.pop('lease', None) - mod_conditions = get_modify_conditions(kwargs) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - return self._client.container.set_access_policy( - container_acl=signed_identifiers or None, - timeout=timeout, - access=public_access, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_blobs(self, name_starts_with=None, include=None, **kwargs): - # type: (Optional[str], Optional[Union[str, List[str]]], **Any) -> ItemPaged[BlobProperties] - """Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service. - - :param str name_starts_with: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param list[str] or str include: - Specifies one or more additional datasets to include in the response. - Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'tags'. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START list_blobs_in_container] - :end-before: [END list_blobs_in_container] - :language: python - :dedent: 8 - :caption: List the blobs in the container. - """ - if include and not isinstance(include, list): - include = [include] - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.list_blob_flat_segment, - include=include, - timeout=timeout, - **kwargs) - return ItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=BlobPropertiesPaged) - - @distributed_trace - def walk_blobs( - self, name_starts_with=None, # type: Optional[str] - include=None, # type: Optional[Any] - delimiter="/", # type: str - **kwargs # type: Optional[Any] - ): - # type: (...) -> ItemPaged[BlobProperties] - """Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service. This operation will list blobs in accordance with a hierarchy, - as delimited by the specified delimiter character. - - :param str name_starts_with: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param list[str] include: - Specifies one or more additional datasets to include in the response. - Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'. - :param str delimiter: - When the request includes this parameter, the operation returns a BlobPrefix - element in the response body that acts as a placeholder for all blobs whose - names begin with the same substring up to the appearance of the delimiter - character. The delimiter may be a single character or a string. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties] - """ - if include and not isinstance(include, list): - include = [include] - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.list_blob_hierarchy_segment, - delimiter=delimiter, - include=include, - timeout=timeout, - **kwargs) - return BlobPrefix( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - delimiter=delimiter) - - @distributed_trace - def upload_blob( - self, name, # type: Union[str, BlobProperties] - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> BlobClient - """Creates a new blob from a data source with automatic chunking. - - :param name: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type name: str or ~azure.storage.blob.BlobProperties - :param data: The blob data to upload. - :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be - either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. The exception to the above is with Append - blob types: if set to False and the data already exists, an error will not be raised - and the data will be appended to the existing blob. If set overwrite=True, then the existing - append blob will be deleted, and a new one created. Defaults to False. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the container has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int max_concurrency: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :returns: A BlobClient to interact with the newly uploaded blob. - :rtype: ~azure.storage.blob.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START upload_blob_to_container] - :end-before: [END upload_blob_to_container] - :language: python - :dedent: 8 - :caption: Upload blob to the container. - """ - blob = self.get_blob_client(name) - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - blob.upload_blob( - data, - blob_type=blob_type, - length=length, - metadata=metadata, - timeout=timeout, - encoding=encoding, - **kwargs - ) - return blob - - @distributed_trace - def delete_blob( - self, blob, # type: Union[str, BlobProperties] - delete_snapshots=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> None - """Marks the specified blob or snapshot for deletion. - - The blob is later deleted during garbage collection. - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the delete_blob - operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot - and retains the blob or snapshot for specified number of days. - After specified number of days, blob's data is removed from the service during garbage collection. - Soft deleted blob or snapshot is accessible through :func:`list_blobs()` specifying `include=["deleted"]` - option. Soft-deleted blob or snapshot can be restored using :func:`~BlobClient.undelete()` - - :param blob: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob: str or ~azure.storage.blob.BlobProperties - :param str delete_snapshots: - Required if the blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to delete. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - blob_client = self.get_blob_client(blob) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - blob_client.delete_blob( # type: ignore - delete_snapshots=delete_snapshots, - timeout=timeout, - **kwargs) - - @distributed_trace - def download_blob(self, blob, offset=None, length=None, **kwargs): - # type: (Union[str, BlobProperties], Optional[int], Optional[int], **Any) -> StorageStreamDownloader - """Downloads a blob to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the blob into - a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks. - - :param blob: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob: str or ~azure.storage.blob.BlobProperties - :param int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, download_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword str encoding: - Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.blob.StorageStreamDownloader - """ - blob_client = self.get_blob_client(blob) # type: ignore - kwargs.setdefault('merge_span', True) - return blob_client.download_blob(offset=offset, length=length, **kwargs) - - def _generate_delete_blobs_subrequest_options( - self, snapshot=None, - delete_snapshots=None, - lease_access_conditions=None, - modified_access_conditions=None, - **kwargs - ): - """This code is a copy from _generated. - - Once Autorest is able to provide request preparation this code should be removed. - """ - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct parameters - timeout = kwargs.pop('timeout', None) - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._client._serialize.query("snapshot", snapshot, 'str') # pylint: disable=protected-access - if timeout is not None: - query_parameters['timeout'] = self._client._serialize.query("timeout", timeout, 'int', minimum=0) # pylint: disable=protected-access - - # Construct headers - header_parameters = {} - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._client._serialize.header( # pylint: disable=protected-access - "delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._client._serialize.header( # pylint: disable=protected-access - "lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._client._serialize.header( # pylint: disable=protected-access - "if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._client._serialize.header( # pylint: disable=protected-access - "if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._client._serialize.header( # pylint: disable=protected-access - "if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._client._serialize.header( # pylint: disable=protected-access - "if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._client._serialize.header("if_tags", if_tags, 'str') # pylint: disable=protected-access - - return query_parameters, header_parameters - - def _generate_delete_blobs_options(self, - *blobs, # type: List[Union[str, BlobProperties, dict]] - **kwargs - ): - timeout = kwargs.pop('timeout', None) - raise_on_any_failure = kwargs.pop('raise_on_any_failure', True) - delete_snapshots = kwargs.pop('delete_snapshots', None) - if_modified_since = kwargs.pop('if_modified_since', None) - if_unmodified_since = kwargs.pop('if_unmodified_since', None) - if_tags_match_condition = kwargs.pop('if_tags_match_condition', None) - kwargs.update({'raise_on_any_failure': raise_on_any_failure, - 'sas': self._query_str.replace('?', '&'), - 'timeout': '&timeout=' + str(timeout) if timeout else "", - 'path': self.container_name, - 'restype': 'restype=container&' - }) - - reqs = [] - for blob in blobs: - blob_name = _get_blob_name(blob) - container_name = self.container_name - - try: - options = BlobClient._generic_delete_blob_options( # pylint: disable=protected-access - snapshot=blob.get('snapshot'), - delete_snapshots=delete_snapshots or blob.get('delete_snapshots'), - lease=blob.get('lease_id'), - if_modified_since=if_modified_since or blob.get('if_modified_since'), - if_unmodified_since=if_unmodified_since or blob.get('if_unmodified_since'), - etag=blob.get('etag'), - if_tags_match_condition=if_tags_match_condition or blob.get('if_tags_match_condition'), - match_condition=blob.get('match_condition') or MatchConditions.IfNotModified if blob.get('etag') - else None, - timeout=blob.get('timeout'), - ) - except AttributeError: - options = BlobClient._generic_delete_blob_options( # pylint: disable=protected-access - delete_snapshots=delete_snapshots, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_tags_match_condition=if_tags_match_condition - ) - - query_parameters, header_parameters = self._generate_delete_blobs_subrequest_options(**options) - - req = HttpRequest( - "DELETE", - "/{}/{}{}".format(quote(container_name), quote(blob_name, safe='/~'), self._query_str), - headers=header_parameters - ) - req.format_parameters(query_parameters) - reqs.append(req) - - return reqs, kwargs - - @distributed_trace - def delete_blobs(self, *blobs, **kwargs): - # type: (...) -> Iterator[HttpResponse] - """Marks the specified blobs or snapshots for deletion. - - The blobs are later deleted during garbage collection. - Note that in order to delete blobs, you must delete all of their - snapshots. You can delete both at the same time with the delete_blobs operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots - and retains the blobs or snapshots for specified number of days. - After specified number of days, blobs' data is removed from the service during garbage collection. - Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]` - Soft-deleted blobs or snapshots can be restored using :func:`~BlobClient.undelete()` - - :param blobs: - The blobs to delete. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - snapshot you want to delete: - key: 'snapshot', value type: str - whether to delete snapthots when deleting blob: - key: 'delete_snapshots', value: 'include' or 'only' - if the blob modified or not: - key: 'if_modified_since', 'if_unmodified_since', value type: datetime - etag: - key: 'etag', value type: str - match the etag or not: - key: 'match_condition', value type: MatchConditions - tags match condition: - key: 'if_tags_match_condition', value type: str - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword str delete_snapshots: - Required if a blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: An iterator of responses, one for each blob in order - :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START delete_multiple_blobs] - :end-before: [END delete_multiple_blobs] - :language: python - :dedent: 8 - :caption: Deleting multiple blobs. - """ - if len(blobs) == 0: - return iter(list()) - - reqs, options = self._generate_delete_blobs_options(*blobs, **kwargs) - - return self._batch_send(*reqs, **options) - - def _generate_set_tiers_subrequest_options( - self, tier, snapshot=None, version_id=None, rehydrate_priority=None, lease_access_conditions=None, **kwargs - ): - """This code is a copy from _generated. - - Once Autorest is able to provide request preparation this code should be removed. - """ - if not tier: - raise ValueError("A blob tier must be specified") - if snapshot and version_id: - raise ValueError("Snapshot and version_id cannot be set at the same time") - if_tags = kwargs.pop('if_tags', None) - - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "tier" - timeout = kwargs.pop('timeout', None) - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._client._serialize.query("snapshot", snapshot, 'str') # pylint: disable=protected-access - if version_id is not None: - query_parameters['versionid'] = self._client._serialize.query("version_id", version_id, 'str') # pylint: disable=protected-access - if timeout is not None: - query_parameters['timeout'] = self._client._serialize.query("timeout", timeout, 'int', minimum=0) # pylint: disable=protected-access - query_parameters['comp'] = self._client._serialize.query("comp", comp, 'str') # pylint: disable=protected-access, specify-parameter-names-in-call - - # Construct headers - header_parameters = {} - header_parameters['x-ms-access-tier'] = self._client._serialize.header("tier", tier, 'str') # pylint: disable=protected-access, specify-parameter-names-in-call - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._client._serialize.header( # pylint: disable=protected-access - "rehydrate_priority", rehydrate_priority, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._client._serialize.header("lease_id", lease_id, 'str') # pylint: disable=protected-access - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._client._serialize.header("if_tags", if_tags, 'str') # pylint: disable=protected-access - - return query_parameters, header_parameters - - def _generate_set_tiers_options(self, - blob_tier, # type: Optional[Union[str, StandardBlobTier, PremiumPageBlobTier]] - *blobs, # type: List[Union[str, BlobProperties, dict]] - **kwargs - ): - timeout = kwargs.pop('timeout', None) - raise_on_any_failure = kwargs.pop('raise_on_any_failure', True) - rehydrate_priority = kwargs.pop('rehydrate_priority', None) - if_tags = kwargs.pop('if_tags_match_condition', None) - kwargs.update({'raise_on_any_failure': raise_on_any_failure, - 'sas': self._query_str.replace('?', '&'), - 'timeout': '&timeout=' + str(timeout) if timeout else "", - 'path': self.container_name, - 'restype': 'restype=container&' - }) - - reqs = [] - for blob in blobs: - blob_name = _get_blob_name(blob) - container_name = self.container_name - - try: - tier = blob_tier or blob.get('blob_tier') - query_parameters, header_parameters = self._generate_set_tiers_subrequest_options( - tier=tier, - snapshot=blob.get('snapshot'), - version_id=blob.get('version_id'), - rehydrate_priority=rehydrate_priority or blob.get('rehydrate_priority'), - lease_access_conditions=blob.get('lease_id'), - if_tags=if_tags or blob.get('if_tags_match_condition'), - timeout=timeout or blob.get('timeout') - ) - except AttributeError: - query_parameters, header_parameters = self._generate_set_tiers_subrequest_options( - blob_tier, rehydrate_priority=rehydrate_priority, if_tags=if_tags) - - req = HttpRequest( - "PUT", - "/{}/{}{}".format(quote(container_name), quote(blob_name, safe='/~'), self._query_str), - headers=header_parameters - ) - req.format_parameters(query_parameters) - reqs.append(req) - - return reqs, kwargs - - @distributed_trace - def set_standard_blob_tier_blobs( - self, - standard_blob_tier, # type: Optional[Union[str, StandardBlobTier]] - *blobs, # type: List[Union[str, BlobProperties, dict]] - **kwargs - ): - # type: (...) -> Iterator[HttpResponse] - """This operation sets the tier on block blobs. - - A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param standard_blob_tier: - Indicates the tier to be set on all blobs. Options include 'Hot', 'Cool', - 'Archive'. The hot tier is optimized for storing data that is accessed - frequently. The cool storage tier is optimized for storing data that - is infrequently accessed and stored for at least a month. The archive - tier is optimized for storing data that is rarely accessed and stored - for at least six months with flexible latency requirements. - - .. note:: - If you want to set different tier on different blobs please set this positional parameter to None. - Then the blob tier on every BlobProperties will be taken. - - :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier - :param blobs: - The blobs with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - standard blob tier: - key: 'blob_tier', value type: StandardBlobTier - rehydrate priority: - key: 'rehydrate_priority', value type: RehydratePriority - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - snapshot: - key: "snapshost", value type: str - version id: - key: "version_id", value type: str - tags match condition: - key: 'if_tags_match_condition', value type: str - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. - :return: An iterator of responses, one for each blob in order - :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse] - """ - reqs, options = self._generate_set_tiers_options(standard_blob_tier, *blobs, **kwargs) - - return self._batch_send(*reqs, **options) - - @distributed_trace - def set_premium_page_blob_tier_blobs( - self, - premium_page_blob_tier, # type: Optional[Union[str, PremiumPageBlobTier]] - *blobs, # type: List[Union[str, BlobProperties, dict]] - **kwargs - ): - # type: (...) -> Iterator[HttpResponse] - """Sets the page blob tiers on all blobs. This API is only supported for page blobs on premium accounts. - - :param premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - - .. note:: - If you want to set different tier on different blobs please set this positional parameter to None. - Then the blob tier on every BlobProperties will be taken. - - :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier - :param blobs: - The blobs with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - premium blob tier: - key: 'blob_tier', value type: PremiumPageBlobTier - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. - :return: An iterator of responses, one for each blob in order - :rtype: iterator[~azure.core.pipeline.transport.HttpResponse] - """ - reqs, options = self._generate_set_tiers_options(premium_page_blob_tier, *blobs, **kwargs) - - return self._batch_send(*reqs, **options) - - def get_blob_client( - self, blob, # type: Union[str, BlobProperties] - snapshot=None # type: str - ): - # type: (...) -> BlobClient - """Get a client to interact with the specified blob. - - The blob need not already exist. - - :param blob: - The blob with which to interact. - :type blob: str or ~azure.storage.blob.BlobProperties - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`~BlobClient.create_snapshot()`. - :returns: A BlobClient. - :rtype: ~azure.storage.blob.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START get_blob_client] - :end-before: [END get_blob_client] - :language: python - :dedent: 8 - :caption: Get the blob client. - """ - blob_name = _get_blob_name(blob) - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return BlobClient( - self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_deserialize.py b/azure/multiapi/storagev2/blob/v2020_06_12/_deserialize.py deleted file mode 100644 index dff3953..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_deserialize.py +++ /dev/null @@ -1,166 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use -from typing import ( # pylint: disable=unused-import - Tuple, Dict, List, - TYPE_CHECKING -) - -from ._models import BlobType, CopyProperties, ContentSettings, LeaseProperties, BlobProperties -from ._shared.models import get_enum_value - -from ._shared.response_handlers import deserialize_metadata -from ._models import ContainerProperties, BlobAnalyticsLogging, Metrics, CorsRule, RetentionPolicy, \ - StaticWebsite, ObjectReplicationPolicy, ObjectReplicationRule - -if TYPE_CHECKING: - from ._generated.models import PageList - - -def deserialize_pipeline_response_into_cls(cls_method, response, obj, headers): - try: - deserialized_response = response.http_response - except AttributeError: - deserialized_response = response - return cls_method(deserialized_response, obj, headers) - - -def deserialize_blob_properties(response, obj, headers): - blob_properties = BlobProperties( - metadata=deserialize_metadata(response, obj, headers), - object_replication_source_properties=deserialize_ors_policies(response.http_response.headers), - **headers - ) - if 'Content-Range' in headers: - if 'x-ms-blob-content-md5' in headers: - blob_properties.content_settings.content_md5 = headers['x-ms-blob-content-md5'] - else: - blob_properties.content_settings.content_md5 = None - return blob_properties - - -def deserialize_ors_policies(policy_dictionary): - - if policy_dictionary is None: - return None - # For source blobs (blobs that have policy ids and rule ids applied to them), - # the header will be formatted as "x-ms-or-_: {Complete, Failed}". - # The value of this header is the status of the replication. - or_policy_status_headers = {key: val for key, val in policy_dictionary.items() - if 'or-' in key and key != 'x-ms-or-policy-id'} - - parsed_result = {} - - for key, val in or_policy_status_headers.items(): - # list blobs gives or-policy_rule and get blob properties gives x-ms-or-policy_rule - policy_and_rule_ids = key.split('or-')[1].split('_') - policy_id = policy_and_rule_ids[0] - rule_id = policy_and_rule_ids[1] - - # If we are seeing this policy for the first time, create a new list to store rule_id -> result - parsed_result[policy_id] = parsed_result.get(policy_id) or list() - parsed_result[policy_id].append(ObjectReplicationRule(rule_id=rule_id, status=val)) - - result_list = [ObjectReplicationPolicy(policy_id=k, rules=v) for k, v in parsed_result.items()] - - return result_list - - -def deserialize_blob_stream(response, obj, headers): - blob_properties = deserialize_blob_properties(response, obj, headers) - obj.properties = blob_properties - return response.http_response.location_mode, obj - - -def deserialize_container_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - container_properties = ContainerProperties( - metadata=metadata, - **headers - ) - return container_properties - - -def get_page_ranges_result(ranges): - # type: (PageList) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - page_range = [] # type: ignore - clear_range = [] # type: List - if ranges.page_range: - page_range = [{'start': b.start, 'end': b.end} for b in ranges.page_range] # type: ignore - if ranges.clear_range: - clear_range = [{'start': b.start, 'end': b.end} for b in ranges.clear_range] - return page_range, clear_range # type: ignore - - -def service_stats_deserialize(generated): - """Deserialize a ServiceStats objects into a dict. - """ - return { - 'geo_replication': { - 'status': generated.geo_replication.status, - 'last_sync_time': generated.geo_replication.last_sync_time, - } - } - - -def service_properties_deserialize(generated): - """Deserialize a ServiceProperties objects into a dict. - """ - return { - 'analytics_logging': BlobAnalyticsLogging._from_generated(generated.logging), # pylint: disable=protected-access - 'hour_metrics': Metrics._from_generated(generated.hour_metrics), # pylint: disable=protected-access - 'minute_metrics': Metrics._from_generated(generated.minute_metrics), # pylint: disable=protected-access - 'cors': [CorsRule._from_generated(cors) for cors in generated.cors], # pylint: disable=protected-access - 'target_version': generated.default_service_version, # pylint: disable=protected-access - 'delete_retention_policy': RetentionPolicy._from_generated(generated.delete_retention_policy), # pylint: disable=protected-access - 'static_website': StaticWebsite._from_generated(generated.static_website), # pylint: disable=protected-access - } - - -def get_blob_properties_from_generated_code(generated): - blob = BlobProperties() - blob.name = generated.name - blob_type = get_enum_value(generated.properties.blob_type) - blob.blob_type = BlobType(blob_type) if blob_type else None - blob.etag = generated.properties.etag - blob.deleted = generated.deleted - blob.snapshot = generated.snapshot - blob.is_append_blob_sealed = generated.properties.is_sealed - blob.metadata = generated.metadata.additional_properties if generated.metadata else {} - blob.encrypted_metadata = generated.metadata.encrypted if generated.metadata else None - blob.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access - blob.copy = CopyProperties._from_generated(generated) # pylint: disable=protected-access - blob.last_modified = generated.properties.last_modified - blob.creation_time = generated.properties.creation_time - blob.content_settings = ContentSettings._from_generated(generated) # pylint: disable=protected-access - blob.size = generated.properties.content_length - blob.page_blob_sequence_number = generated.properties.blob_sequence_number - blob.server_encrypted = generated.properties.server_encrypted - blob.encryption_scope = generated.properties.encryption_scope - blob.deleted_time = generated.properties.deleted_time - blob.remaining_retention_days = generated.properties.remaining_retention_days - blob.blob_tier = generated.properties.access_tier - blob.rehydrate_priority = generated.properties.rehydrate_priority - blob.blob_tier_inferred = generated.properties.access_tier_inferred - blob.archive_status = generated.properties.archive_status - blob.blob_tier_change_time = generated.properties.access_tier_change_time - blob.version_id = generated.version_id - blob.is_current_version = generated.is_current_version - blob.tag_count = generated.properties.tag_count - blob.tags = parse_tags(generated.blob_tags) # pylint: disable=protected-access - blob.object_replication_source_properties = deserialize_ors_policies(generated.object_replication_metadata) - blob.last_accessed_on = generated.properties.last_accessed_on - return blob - - -def parse_tags(generated_tags): - # type: (Optional[List[BlobTag]]) -> Union[Dict[str, str], None] - """Deserialize a list of BlobTag objects into a dict. - """ - if generated_tags: - tag_dict = {t.key: t.value for t in generated_tags.blob_tag_set} - return tag_dict - return None diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_download.py b/azure/multiapi/storagev2/blob/v2020_06_12/_download.py deleted file mode 100644 index d17a211..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_download.py +++ /dev/null @@ -1,638 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -import threading -import time - -import warnings -from io import BytesIO -from typing import Iterator - -import requests -from azure.core.exceptions import HttpResponseError, ServiceResponseError - -from azure.core.tracing.common import with_current_context -from ._shared.encryption import decrypt_blob -from ._shared.request_handlers import validate_and_format_range_headers -from ._shared.response_handlers import process_storage_error, parse_length_from_content_range -from ._deserialize import get_page_ranges_result - - -def process_range_and_offset(start_range, end_range, length, encryption): - start_offset, end_offset = 0, 0 - if encryption.get("key") is not None or encryption.get("resolver") is not None: - if start_range is not None: - # Align the start of the range along a 16 byte block - start_offset = start_range % 16 - start_range -= start_offset - - # Include an extra 16 bytes for the IV if necessary - # Because of the previous offsetting, start_range will always - # be a multiple of 16. - if start_range > 0: - start_offset += 16 - start_range -= 16 - - if length is not None: - # Align the end of the range along a 16 byte block - end_offset = 15 - (end_range % 16) - end_range += end_offset - - return (start_range, end_range), (start_offset, end_offset) - - -def process_content(data, start_offset, end_offset, encryption): - if data is None: - raise ValueError("Response cannot be None.") - - content = b"".join(list(data)) - - if content and encryption.get("key") is not None or encryption.get("resolver") is not None: - try: - return decrypt_blob( - encryption.get("required"), - encryption.get("key"), - encryption.get("resolver"), - content, - start_offset, - end_offset, - data.response.headers, - ) - except Exception as error: - raise HttpResponseError(message="Decryption failed.", response=data.response, error=error) - return content - - -class _ChunkDownloader(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - client=None, - non_empty_ranges=None, - total_size=None, - chunk_size=None, - current_progress=None, - start_range=None, - end_range=None, - stream=None, - parallel=None, - validate_content=None, - encryption_options=None, - **kwargs - ): - self.client = client - self.non_empty_ranges = non_empty_ranges - - # Information on the download range/chunk size - self.chunk_size = chunk_size - self.total_size = total_size - self.start_index = start_range - self.end_index = end_range - - # The destination that we will write to - self.stream = stream - self.stream_lock = threading.Lock() if parallel else None - self.progress_lock = threading.Lock() if parallel else None - - # For a parallel download, the stream is always seekable, so we note down the current position - # in order to seek to the right place when out-of-order chunks come in - self.stream_start = stream.tell() if parallel else None - - # Download progress so far - self.progress_total = current_progress - - # Encryption - self.encryption_options = encryption_options - - # Parameters for each get operation - self.validate_content = validate_content - self.request_options = kwargs - - def _calculate_range(self, chunk_start): - if chunk_start + self.chunk_size > self.end_index: - chunk_end = self.end_index - else: - chunk_end = chunk_start + self.chunk_size - return chunk_start, chunk_end - - def get_chunk_offsets(self): - index = self.start_index - while index < self.end_index: - yield index - index += self.chunk_size - - def process_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - chunk_data = self._download_chunk(chunk_start, chunk_end - 1) - length = chunk_end - chunk_start - if length > 0: - self._write_to_stream(chunk_data, chunk_start) - self._update_progress(length) - - def yield_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - return self._download_chunk(chunk_start, chunk_end - 1) - - def _update_progress(self, length): - if self.progress_lock: - with self.progress_lock: # pylint: disable=not-context-manager - self.progress_total += length - else: - self.progress_total += length - - def _write_to_stream(self, chunk_data, chunk_start): - if self.stream_lock: - with self.stream_lock: # pylint: disable=not-context-manager - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - else: - self.stream.write(chunk_data) - - def _do_optimize(self, given_range_start, given_range_end): - # If we have no page range list stored, then assume there's data everywhere for that page blob - # or it's a block blob or append blob - if self.non_empty_ranges is None: - return False - - for source_range in self.non_empty_ranges: - # Case 1: As the range list is sorted, if we've reached such a source_range - # we've checked all the appropriate source_range already and haven't found any overlapping. - # so the given range doesn't have any data and download optimization could be applied. - # given range: | | - # source range: | | - if given_range_end < source_range['start']: # pylint:disable=no-else-return - return True - # Case 2: the given range comes after source_range, continue checking. - # given range: | | - # source range: | | - elif source_range['end'] < given_range_start: - pass - # Case 3: source_range and given range overlap somehow, no need to optimize. - else: - return False - # Went through all src_ranges, but nothing overlapped. Optimization will be applied. - return True - - def _download_chunk(self, chunk_start, chunk_end): - download_range, offset = process_range_and_offset( - chunk_start, chunk_end, chunk_end, self.encryption_options - ) - - # No need to download the empty chunk from server if there's no data in the chunk to be downloaded. - # Do optimize and create empty chunk locally if condition is met. - if self._do_optimize(download_range[0], download_range[1]): - chunk_data = b"\x00" * self.chunk_size - else: - range_header, range_validation = validate_and_format_range_headers( - download_range[0], - download_range[1], - check_content_md5=self.validate_content - ) - - retry_active = True - retry_total = 3 - while retry_active: - try: - _, response = self.client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self.validate_content, - data_stream_total=self.total_size, - download_stream_current=self.progress_total, - **self.request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - try: - chunk_data = process_content(response, offset[0], offset[1], self.encryption_options) - retry_active = False - except (requests.exceptions.ChunkedEncodingError, requests.exceptions.ConnectionError) as error: - retry_total -= 1 - if retry_total <= 0: - raise ServiceResponseError(error, error=error) - time.sleep(1) - - # This makes sure that if_match is set so that we can validate - # that subsequent downloads are to an unmodified blob - if self.request_options.get("modified_access_conditions"): - self.request_options["modified_access_conditions"].if_match = response.properties.etag - - return chunk_data - - -class _ChunkIterator(object): - """Async iterator for chunks in blob download stream.""" - - def __init__(self, size, content, downloader, chunk_size): - self.size = size - self._chunk_size = chunk_size - self._current_content = content - self._iter_downloader = downloader - self._iter_chunks = None - self._complete = (size == 0) - - def __len__(self): - return self.size - - def __iter__(self): - return self - - def __next__(self): - """Iterate through responses.""" - if self._complete: - raise StopIteration("Download complete") - if not self._iter_downloader: - # cut the data obtained from initial GET into chunks - if len(self._current_content) > self._chunk_size: - return self._get_chunk_data() - self._complete = True - return self._current_content - - if not self._iter_chunks: - self._iter_chunks = self._iter_downloader.get_chunk_offsets() - - # initial GET result still has more than _chunk_size bytes of data - if len(self._current_content) >= self._chunk_size: - return self._get_chunk_data() - - try: - chunk = next(self._iter_chunks) - self._current_content += self._iter_downloader.yield_chunk(chunk) - except StopIteration as e: - self._complete = True - if self._current_content: - return self._current_content - raise e - - # the current content from the first get is still there but smaller than chunk size - # therefore we want to make sure its also included - return self._get_chunk_data() - - next = __next__ # Python 2 compatibility. - - def _get_chunk_data(self): - chunk_data = self._current_content[: self._chunk_size] - self._current_content = self._current_content[self._chunk_size:] - return chunk_data - - -class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the blob being downloaded. - :ivar str container: - The name of the container where the blob is. - :ivar ~azure.storage.blob.BlobProperties properties: - The properties of the blob being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if specified, - otherwise the total size of the blob. - """ - - def __init__( - self, - clients=None, - config=None, - start_range=None, - end_range=None, - validate_content=None, - encryption_options=None, - max_concurrency=1, - name=None, - container=None, - encoding=None, - **kwargs - ): - self.name = name - self.container = container - self.properties = None - self.size = None - - self._clients = clients - self._config = config - self._start_range = start_range - self._end_range = end_range - self._max_concurrency = max_concurrency - self._encoding = encoding - self._validate_content = validate_content - self._encryption_options = encryption_options or {} - self._request_options = kwargs - self._location_mode = None - self._download_complete = False - self._current_content = None - self._file_size = None - self._non_empty_ranges = None - self._response = None - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - self._first_get_size = ( - self._config.max_single_get_size if not self._validate_content else self._config.max_chunk_get_size - ) - initial_request_start = self._start_range if self._start_range is not None else 0 - if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: - initial_request_end = self._end_range - else: - initial_request_end = initial_request_start + self._first_get_size - 1 - - self._initial_range, self._initial_offset = process_range_and_offset( - initial_request_start, initial_request_end, self._end_range, self._encryption_options - ) - - self._response = self._initial_request() - self.properties = self._response.properties - self.properties.name = self.name - self.properties.container = self.container - - # Set the content length to the download size instead of the size of - # the last range - self.properties.size = self.size - - # Overwrite the content range to the user requested range - self.properties.content_range = "bytes {0}-{1}/{2}".format( - self._start_range, - self._end_range, - self._file_size - ) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - self.properties.content_md5 = None - - def __len__(self): - return self.size - - def _initial_request(self): - range_header, range_validation = validate_and_format_range_headers( - self._initial_range[0], - self._initial_range[1], - start_range_required=False, - end_range_required=False, - check_content_md5=self._validate_content - ) - - retry_active = True - retry_total = 3 - while retry_active: - try: - location_mode, response = self._clients.blob.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self._validate_content, - data_stream_total=None, - download_stream_current=0, - **self._request_options - ) - - # Check the location we read from to ensure we use the same one - # for subsequent requests. - self._location_mode = location_mode - - # Parse the total file size and adjust the download size if ranges - # were specified - self._file_size = parse_length_from_content_range(response.properties.content_range) - if self._end_range is not None: - # Use the end range index unless it is over the end of the file - self.size = min(self._file_size, self._end_range - self._start_range + 1) - elif self._start_range is not None: - self.size = self._file_size - self._start_range - else: - self.size = self._file_size - - except HttpResponseError as error: - if self._start_range is None and error.response.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - try: - _, response = self._clients.blob.download( - validate_content=self._validate_content, - data_stream_total=0, - download_stream_current=0, - **self._request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - # Set the download size to empty - self.size = 0 - self._file_size = 0 - else: - process_storage_error(error) - - try: - if self.size == 0: - self._current_content = b"" - else: - self._current_content = process_content( - response, - self._initial_offset[0], - self._initial_offset[1], - self._encryption_options - ) - retry_active = False - except (requests.exceptions.ChunkedEncodingError, requests.exceptions.ConnectionError) as error: - retry_total -= 1 - if retry_total <= 0: - raise ServiceResponseError(error, error=error) - time.sleep(1) - - # get page ranges to optimize downloading sparse page blob - if response.properties.blob_type == 'PageBlob': - try: - page_ranges = self._clients.page_blob.get_page_ranges() - self._non_empty_ranges = get_page_ranges_result(page_ranges)[0] - # according to the REST API documentation: - # in a highly fragmented page blob with a large number of writes, - # a Get Page Ranges request can fail due to an internal server timeout. - # thus, if the page blob is not sparse, it's ok for it to fail - except HttpResponseError: - pass - - # If the file is small, the download is complete at this point. - # If file size is large, download the rest of the file in chunks. - if response.properties.size != self.size: - # Lock on the etag. This can be overriden by the user by specifying '*' - if self._request_options.get("modified_access_conditions"): - if not self._request_options["modified_access_conditions"].if_match: - self._request_options["modified_access_conditions"].if_match = response.properties.etag - else: - self._download_complete = True - return response - - def chunks(self): - # type: () -> Iterator[bytes] - """Iterate over chunks in the download stream. - - :rtype: Iterator[bytes] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world.py - :start-after: [START download_a_blob_in_chunk] - :end-before: [END download_a_blob_in_chunk] - :language: python - :dedent: 12 - :caption: Download a blob using chunks(). - """ - if self.size == 0 or self._download_complete: - iter_downloader = None - else: - data_end = self._file_size - if self._end_range is not None: - # Use the end range index unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - iter_downloader = _ChunkDownloader( - client=self._clients.blob, - non_empty_ranges=self._non_empty_ranges, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # start where the first download ended - end_range=data_end, - stream=None, - parallel=False, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options - ) - return _ChunkIterator( - size=self.size, - content=self._current_content, - downloader=iter_downloader, - chunk_size=self._config.max_chunk_get_size) - - def readall(self): - """Download the contents of this blob. - - This operation is blocking until all data is downloaded. - - :rtype: bytes or str - """ - stream = BytesIO() - self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - def content_as_bytes(self, max_concurrency=1): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :rtype: bytes - """ - warnings.warn( - "content_as_bytes is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - return self.readall() - - def content_as_text(self, max_concurrency=1, encoding="UTF-8"): - """Download the contents of this blob, and decode as text. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :param str encoding: - Test encoding to decode the downloaded bytes. Default is UTF-8. - :rtype: str - """ - warnings.warn( - "content_as_text is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self._encoding = encoding - return self.readall() - - def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - # The stream must be seekable if parallel download is required - parallel = self._max_concurrency > 1 - if parallel: - error_message = "Target stream handle must be seekable." - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(error_message) - - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(error_message) - - # Write the content to the user stream - stream.write(self._current_content) - if self._download_complete: - return self.size - - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - - downloader = _ChunkDownloader( - client=self._clients.blob, - non_empty_ranges=self._non_empty_ranges, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # Start where the first download ended - end_range=data_end, - stream=stream, - parallel=parallel, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options - ) - if parallel: - import concurrent.futures - with concurrent.futures.ThreadPoolExecutor(self._max_concurrency) as executor: - list(executor.map( - with_current_context(downloader.process_chunk), - downloader.get_chunk_offsets() - )) - else: - for chunk in downloader.get_chunk_offsets(): - downloader.process_chunk(chunk) - return self.size - - def download_to_stream(self, stream, max_concurrency=1): - """Download the contents of this blob to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The properties of the downloaded blob. - :rtype: Any - """ - warnings.warn( - "download_to_stream is deprecated, use readinto instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self.readinto(stream) - return self.properties diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/__init__.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/__init__.py deleted file mode 100644 index cc760e7..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._azure_blob_storage import AzureBlobStorage -__all__ = ['AzureBlobStorage'] - -try: - from ._patch import patch_sdk # type: ignore - patch_sdk() -except ImportError: - pass diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/_azure_blob_storage.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/_azure_blob_storage.py deleted file mode 100644 index dff7e12..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/_azure_blob_storage.py +++ /dev/null @@ -1,91 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import TYPE_CHECKING - -from azure.core import PipelineClient -from msrest import Deserializer, Serializer - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any - -from ._configuration import AzureBlobStorageConfiguration -from .operations import ServiceOperations -from .operations import ContainerOperations -from .operations import DirectoryOperations -from .operations import BlobOperations -from .operations import PageBlobOperations -from .operations import AppendBlobOperations -from .operations import BlockBlobOperations -from . import models - - -class AzureBlobStorage(object): - """AzureBlobStorage. - - :ivar service: ServiceOperations operations - :vartype service: azure.storage.blob.operations.ServiceOperations - :ivar container: ContainerOperations operations - :vartype container: azure.storage.blob.operations.ContainerOperations - :ivar directory: DirectoryOperations operations - :vartype directory: azure.storage.blob.operations.DirectoryOperations - :ivar blob: BlobOperations operations - :vartype blob: azure.storage.blob.operations.BlobOperations - :ivar page_blob: PageBlobOperations operations - :vartype page_blob: azure.storage.blob.operations.PageBlobOperations - :ivar append_blob: AppendBlobOperations operations - :vartype append_blob: azure.storage.blob.operations.AppendBlobOperations - :ivar block_blob: BlockBlobOperations operations - :vartype block_blob: azure.storage.blob.operations.BlockBlobOperations - :param url: The URL of the service account, container, or blob that is the targe of the desired operation. - :type url: str - """ - - def __init__( - self, - url, # type: str - **kwargs # type: Any - ): - # type: (...) -> None - base_url = '{url}' - self._config = AzureBlobStorageConfiguration(url, **kwargs) - self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._serialize.client_side_validation = False - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.container = ContainerOperations( - self._client, self._config, self._serialize, self._deserialize) - self.directory = DirectoryOperations( - self._client, self._config, self._serialize, self._deserialize) - self.blob = BlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.page_blob = PageBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.append_blob = AppendBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.block_blob = BlockBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - - def close(self): - # type: () -> None - self._client.close() - - def __enter__(self): - # type: () -> AzureBlobStorage - self._client.__enter__() - return self - - def __exit__(self, *exc_details): - # type: (Any) -> None - self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/_configuration.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/_configuration.py deleted file mode 100644 index fb74f9e..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/_configuration.py +++ /dev/null @@ -1,58 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import TYPE_CHECKING - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any - -VERSION = "unknown" - -class AzureBlobStorageConfiguration(Configuration): - """Configuration for AzureBlobStorage. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, container, or blob that is the targe of the desired operation. - :type url: str - """ - - def __init__( - self, - url, # type: str - **kwargs # type: Any - ): - # type: (...) -> None - if url is None: - raise ValueError("Parameter 'url' must not be None.") - super(AzureBlobStorageConfiguration, self).__init__(**kwargs) - - self.url = url - self.version = "2020-06-12" - kwargs.setdefault('sdk_moniker', 'azureblobstorage/{}'.format(VERSION)) - self._configure(**kwargs) - - def _configure( - self, - **kwargs # type: Any - ): - # type: (...) -> None - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) - self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/__init__.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/__init__.py deleted file mode 100644 index 12cfcf6..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._azure_blob_storage import AzureBlobStorage -__all__ = ['AzureBlobStorage'] diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/_azure_blob_storage.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/_azure_blob_storage.py deleted file mode 100644 index b537034..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/_azure_blob_storage.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any - -from azure.core import AsyncPipelineClient -from msrest import Deserializer, Serializer - -from ._configuration import AzureBlobStorageConfiguration -from .operations import ServiceOperations -from .operations import ContainerOperations -from .operations import DirectoryOperations -from .operations import BlobOperations -from .operations import PageBlobOperations -from .operations import AppendBlobOperations -from .operations import BlockBlobOperations -from .. import models - - -class AzureBlobStorage(object): - """AzureBlobStorage. - - :ivar service: ServiceOperations operations - :vartype service: azure.storage.blob.aio.operations.ServiceOperations - :ivar container: ContainerOperations operations - :vartype container: azure.storage.blob.aio.operations.ContainerOperations - :ivar directory: DirectoryOperations operations - :vartype directory: azure.storage.blob.aio.operations.DirectoryOperations - :ivar blob: BlobOperations operations - :vartype blob: azure.storage.blob.aio.operations.BlobOperations - :ivar page_blob: PageBlobOperations operations - :vartype page_blob: azure.storage.blob.aio.operations.PageBlobOperations - :ivar append_blob: AppendBlobOperations operations - :vartype append_blob: azure.storage.blob.aio.operations.AppendBlobOperations - :ivar block_blob: BlockBlobOperations operations - :vartype block_blob: azure.storage.blob.aio.operations.BlockBlobOperations - :param url: The URL of the service account, container, or blob that is the targe of the desired operation. - :type url: str - """ - - def __init__( - self, - url: str, - **kwargs: Any - ) -> None: - base_url = '{url}' - self._config = AzureBlobStorageConfiguration(url, **kwargs) - self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._serialize.client_side_validation = False - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.container = ContainerOperations( - self._client, self._config, self._serialize, self._deserialize) - self.directory = DirectoryOperations( - self._client, self._config, self._serialize, self._deserialize) - self.blob = BlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.page_blob = PageBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.append_blob = AppendBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.block_blob = BlockBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - - async def close(self) -> None: - await self._client.close() - - async def __aenter__(self) -> "AzureBlobStorage": - await self._client.__aenter__() - return self - - async def __aexit__(self, *exc_details) -> None: - await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/_configuration.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/_configuration.py deleted file mode 100644 index 1924efa..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/_configuration.py +++ /dev/null @@ -1,52 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -VERSION = "unknown" - -class AzureBlobStorageConfiguration(Configuration): - """Configuration for AzureBlobStorage. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, container, or blob that is the targe of the desired operation. - :type url: str - """ - - def __init__( - self, - url: str, - **kwargs: Any - ) -> None: - if url is None: - raise ValueError("Parameter 'url' must not be None.") - super(AzureBlobStorageConfiguration, self).__init__(**kwargs) - - self.url = url - self.version = "2020-06-12" - kwargs.setdefault('sdk_moniker', 'azureblobstorage/{}'.format(VERSION)) - self._configure(**kwargs) - - def _configure( - self, - **kwargs: Any - ) -> None: - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) - self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/__init__.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/__init__.py deleted file mode 100644 index 62f85c9..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._container_operations import ContainerOperations -from ._directory_operations import DirectoryOperations -from ._blob_operations import BlobOperations -from ._page_blob_operations import PageBlobOperations -from ._append_blob_operations import AppendBlobOperations -from ._block_blob_operations import BlockBlobOperations - -__all__ = [ - 'ServiceOperations', - 'ContainerOperations', - 'DirectoryOperations', - 'BlobOperations', - 'PageBlobOperations', - 'AppendBlobOperations', - 'BlockBlobOperations', -] diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_append_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_append_blob_operations.py deleted file mode 100644 index 934b720..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_append_blob_operations.py +++ /dev/null @@ -1,700 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class AppendBlobOperations: - """AppendBlobOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - content_length: int, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - request_id_parameter: Optional[str] = None, - blob_tags_string: Optional[str] = None, - blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Create Append Blob operation creates a new append blob. - - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - blob_type = "AppendBlob" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def append_block( - self, - content_length: int, - body: IO, - timeout: Optional[int] = None, - transactional_content_md5: Optional[bytearray] = None, - transactional_content_crc64: Optional[bytearray] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - append_position_access_conditions: Optional["_models.AppendPositionAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Append Block operation commits a new block of data to the end of an existing append blob. - The Append Block operation is permitted only if the blob was created with x-ms-blob-type set to - AppendBlob. Append Block is supported only on version 2015-02-21 version or later. - - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param append_position_access_conditions: Parameter group. - :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _max_size = None - _append_position = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if append_position_access_conditions is not None: - _max_size = append_position_access_conditions.max_size - _append_position = append_position_access_conditions.append_position - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "appendblock" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.append_block.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _max_size is not None: - header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", _max_size, 'long') - if _append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-append-offset']=self._deserialize('str', response.headers.get('x-ms-blob-append-offset')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - append_block.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def append_block_from_url( - self, - source_url: str, - content_length: int, - source_range: Optional[str] = None, - source_content_md5: Optional[bytearray] = None, - source_contentcrc64: Optional[bytearray] = None, - timeout: Optional[int] = None, - transactional_content_md5: Optional[bytearray] = None, - request_id_parameter: Optional[str] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - append_position_access_conditions: Optional["_models.AppendPositionAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Append Block operation commits a new block of data to the end of an existing append blob - where the contents are read from a source url. The Append Block operation is permitted only if - the blob was created with x-ms-blob-type set to AppendBlob. Append Block is supported only on - version 2015-02-21 version or later. - - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param content_length: The length of the request. - :type content_length: long - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param append_position_access_conditions: Parameter group. - :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _lease_id = None - _max_size = None - _append_position = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if append_position_access_conditions is not None: - _max_size = append_position_access_conditions.max_size - _append_position = append_position_access_conditions.append_position - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - comp = "appendblock" - accept = "application/xml" - - # Construct URL - url = self.append_block_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _max_size is not None: - header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", _max_size, 'long') - if _append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-append-offset']=self._deserialize('str', response.headers.get('x-ms-blob-append-offset')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - append_block_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def seal( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - append_position_access_conditions: Optional["_models.AppendPositionAccessConditions"] = None, - **kwargs - ) -> None: - """The Seal operation seals the Append Blob to make it read-only. Seal is supported only on - version 2019-12-12 version or later. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param append_position_access_conditions: Parameter group. - :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _append_position = None - if append_position_access_conditions is not None: - _append_position = append_position_access_conditions.append_position - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - comp = "seal" - accept = "application/xml" - - # Construct URL - url = self.seal.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - - if cls: - return cls(pipeline_response, None, response_headers) - - seal.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_blob_operations.py deleted file mode 100644 index 0f296df..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_blob_operations.py +++ /dev/null @@ -1,3122 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class BlobOperations: - """BlobOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def download( - self, - snapshot: Optional[str] = None, - version_id: Optional[str] = None, - timeout: Optional[int] = None, - range: Optional[str] = None, - range_get_content_md5: Optional[bool] = None, - range_get_content_crc64: Optional[bool] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> IO: - """The Download operation reads or downloads a blob from the system, including its metadata and - properties. You can also call Download to read a snapshot. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param range_get_content_md5: When set to true and specified together with the Range, the - service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB - in size. - :type range_get_content_md5: bool - :param range_get_content_crc64: When set to true and specified together with the Range, the - service returns the CRC64 hash for the range, as long as the range is less than or equal to 4 - MB in size. - :type range_get_content_crc64: bool - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - accept = "application/xml" - - # Construct URL - url = self.download.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') - if range_get_content_crc64 is not None: - header_parameters['x-ms-range-get-content-crc64'] = self._serialize.header("range_get_content_crc64", range_get_content_crc64, 'bool') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) - response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) - response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - download.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def get_properties( - self, - snapshot: Optional[str] = None, - version_id: Optional[str] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Get Properties operation returns all user-defined metadata, standard HTTP properties, and - system properties for the blob. It does not return the content of the blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-creation-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-creation-time')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) - response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-incremental-copy']=self._deserialize('bool', response.headers.get('x-ms-incremental-copy')) - response_headers['x-ms-copy-destination-snapshot']=self._deserialize('str', response.headers.get('x-ms-copy-destination-snapshot')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-access-tier']=self._deserialize('str', response.headers.get('x-ms-access-tier')) - response_headers['x-ms-access-tier-inferred']=self._deserialize('bool', response.headers.get('x-ms-access-tier-inferred')) - response_headers['x-ms-archive-status']=self._deserialize('str', response.headers.get('x-ms-archive-status')) - response_headers['x-ms-access-tier-change-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) - response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) - response_headers['x-ms-expiry-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-expiry-time')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - response_headers['x-ms-rehydrate-priority']=self._deserialize('str', response.headers.get('x-ms-rehydrate-priority')) - response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def delete( - self, - snapshot: Optional[str] = None, - version_id: Optional[str] = None, - timeout: Optional[int] = None, - delete_snapshots: Optional[Union[str, "_models.DeleteSnapshotsOptionType"]] = None, - request_id_parameter: Optional[str] = None, - blob_delete_type: Optional[str] = "Permanent", - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """If the storage account's soft delete feature is disabled then, when a blob is deleted, it is - permanently removed from the storage account. If the storage account's soft delete feature is - enabled, then, when a blob is deleted, it is marked for deletion and becomes inaccessible - immediately. However, the blob service retains the blob or snapshot for the number of days - specified by the DeleteRetentionPolicy section of [Storage service properties] (Set-Blob- - Service-Properties.md). After the specified number of days has passed, the blob's data is - permanently removed from the storage account. Note that you continue to be charged for the - soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and specify - the "include=deleted" query parameter to discover which blobs and snapshots have been soft - deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. All other - operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code - of 404 (ResourceNotFound). - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param delete_snapshots: Required if the blob has associated snapshots. Specify one of the - following two options: include: Delete the base blob and all of its snapshots. only: Delete - only the blob's snapshots and not the blob itself. - :type delete_snapshots: str or ~azure.storage.blob.models.DeleteSnapshotsOptionType - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_delete_type: Optional. Only possible value is 'permanent', which specifies to - permanently delete a blob if blob soft delete is enabled. - :type blob_delete_type: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if blob_delete_type is not None: - query_parameters['deletetype'] = self._serialize.query("blob_delete_type", blob_delete_type, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def set_access_control( - self, - timeout: Optional[int] = None, - owner: Optional[str] = None, - group: Optional[str] = None, - posix_permissions: Optional[str] = None, - posix_acl: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Set the owner, group, permissions, or access control list for a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_acl: Sets POSIX access control rights on files and directories. The value is a - comma-separated list of access control entries. Each access control entry (ACE) consists of a - scope, a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type posix_acl: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "setAccessControl" - accept = "application/xml" - - # Construct URL - url = self.set_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def get_access_control( - self, - timeout: Optional[int] = None, - upn: Optional[bool] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Get the owner, group, permissions, or access control list for a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If - "true", the identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response - headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If - "false", the values will be returned as Azure Active Directory Object IDs. The default value is - false. - :type upn: bool - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "getAccessControl" - accept = "application/xml" - - # Construct URL - url = self.get_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) - response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) - response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) - response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def rename( - self, - rename_source: str, - timeout: Optional[int] = None, - path_rename_mode: Optional[Union[str, "_models.PathRenameMode"]] = None, - directory_properties: Optional[str] = None, - posix_permissions: Optional[str] = None, - posix_umask: Optional[str] = None, - source_lease_id: Optional[str] = None, - request_id_parameter: Optional[str] = None, - directory_http_headers: Optional["_models.DirectoryHttpHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Rename a blob/file. By default, the destination is overwritten and if the destination already - exists and has a lease the lease is broken. This operation supports conditional HTTP requests. - For more information, see `Specifying Conditional Headers for Blob Service Operations - `_. To fail if the destination already exists, use a conditional - request with If-None-Match: "*". - - :param rename_source: The file or directory to be renamed. The value must have the following - format: "/{filesysystem}/{path}". If "x-ms-properties" is specified, the properties will - overwrite the existing properties; otherwise, the existing properties will be preserved. - :type rename_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param path_rename_mode: Determines the behavior of the rename operation. - :type path_rename_mode: str or ~azure.storage.blob.models.PathRenameMode - :param directory_properties: Optional. User-defined properties to be stored with the file or - directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", - where each value is base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask - restricts permission settings for file and directory, and will only be applied when default Acl - does not exist in parent directory. If the umask bit has set, it means that the corresponding - permission will be disabled. Otherwise the corresponding permission will be determined by the - permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, - a default umask - 0027 will be used. - :type posix_umask: str - :param source_lease_id: A lease ID for the source path. If specified, the source path must have - an active lease and the lease ID must match. - :type source_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param directory_http_headers: Parameter group. - :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _cache_control = None - _content_type = None - _content_encoding = None - _content_language = None - _content_disposition = None - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if directory_http_headers is not None: - _cache_control = directory_http_headers.cache_control - _content_type = directory_http_headers.content_type - _content_encoding = directory_http_headers.content_encoding - _content_language = directory_http_headers.content_language - _content_disposition = directory_http_headers.content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - accept = "application/xml" - - # Construct URL - url = self.rename.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if path_rename_mode is not None: - query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - rename.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def undelete( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> None: - """Undelete a blob that was previously soft deleted. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "undelete" - accept = "application/xml" - - # Construct URL - url = self.undelete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - undelete.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def set_expiry( - self, - expiry_options: Union[str, "_models.BlobExpiryOptions"], - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - expires_on: Optional[str] = None, - **kwargs - ) -> None: - """Sets the time a blob will expire and be deleted. - - :param expiry_options: Required. Indicates mode of the expiry time. - :type expiry_options: str or ~azure.storage.blob.models.BlobExpiryOptions - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param expires_on: The time to set the blob to expiry. - :type expires_on: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "expiry" - accept = "application/xml" - - # Construct URL - url = self.set_expiry.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') - if expires_on is not None: - header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_expiry.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def set_http_headers( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Set HTTP Headers operation sets system properties on the blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_cache_control = None - _blob_content_type = None - _blob_content_md5 = None - _blob_content_encoding = None - _blob_content_language = None - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _blob_content_disposition = None - if blob_http_headers is not None: - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_disposition = blob_http_headers.blob_content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.set_http_headers.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_http_headers.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def set_metadata( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or - more name-value pairs. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def acquire_lease( - self, - timeout: Optional[int] = None, - duration: Optional[int] = None, - proposed_lease_id: Optional[str] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a - lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease - duration cannot be changed using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "acquire" - accept = "application/xml" - - # Construct URL - url = self.acquire_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - acquire_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def release_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "release" - accept = "application/xml" - - # Construct URL - url = self.release_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - release_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def renew_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "renew" - accept = "application/xml" - - # Construct URL - url = self.renew_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - renew_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def change_lease( - self, - lease_id: str, - proposed_lease_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "change" - accept = "application/xml" - - # Construct URL - url = self.change_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - change_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def break_lease( - self, - timeout: Optional[int] = None, - break_period: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param break_period: For a break operation, proposed duration the lease should continue before - it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining on the lease is used. A new - lease will not be available before the break period has expired, but the lease may be held for - longer than the break period. If this header does not appear with a break operation, a fixed- - duration lease breaks after the remaining lease period elapses, and an infinite lease breaks - immediately. - :type break_period: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "break" - accept = "application/xml" - - # Construct URL - url = self.break_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - break_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def create_snapshot( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - request_id_parameter: Optional[str] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """The Create Snapshot operation creates a read-only snapshot of a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _lease_id = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "snapshot" - accept = "application/xml" - - # Construct URL - url = self.create_snapshot.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-snapshot']=self._deserialize('str', response.headers.get('x-ms-snapshot')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create_snapshot.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def start_copy_from_url( - self, - copy_source: str, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, - rehydrate_priority: Optional[Union[str, "_models.RehydratePriority"]] = None, - request_id_parameter: Optional[str] = None, - blob_tags_string: Optional[str] = None, - seal_blob: Optional[bool] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """The Start Copy From URL operation copies a blob or an internet resource to a new blob. - - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived - blob. - :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param seal_blob: Overrides the sealed state of the destination blob. Service version - 2019-12-12 and newer. - :type seal_blob: bool - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _source_if_tags = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - _source_if_tags = source_modified_access_conditions.source_if_tags - accept = "application/xml" - - # Construct URL - url = self.start_copy_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _source_if_tags is not None: - header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", _source_if_tags, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if seal_blob is not None: - header_parameters['x-ms-seal-blob'] = self._serialize.header("seal_blob", seal_blob, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def copy_from_url( - self, - copy_source: str, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, - request_id_parameter: Optional[str] = None, - source_content_md5: Optional[bytearray] = None, - blob_tags_string: Optional[str] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """The Copy From URL operation copies a blob or an internet resource to a new blob. It will not - return a response until the copy is complete. - - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - x_ms_requires_sync = "true" - accept = "application/xml" - - # Construct URL - url = self.copy_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-requires-sync'] = self._serialize.header("x_ms_requires_sync", x_ms_requires_sync, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - - if cls: - return cls(pipeline_response, None, response_headers) - - copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def abort_copy_from_url( - self, - copy_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a - destination blob with zero length and full metadata. - - :param copy_id: The copy identifier provided in the x-ms-copy-id header of the original Copy - Blob operation. - :type copy_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "copy" - copy_action_abort_constant = "abort" - accept = "application/xml" - - # Construct URL - url = self.abort_copy_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-copy-action'] = self._serialize.header("copy_action_abort_constant", copy_action_abort_constant, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - abort_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def set_tier( - self, - tier: Union[str, "_models.AccessTierRequired"], - snapshot: Optional[str] = None, - version_id: Optional[str] = None, - timeout: Optional[int] = None, - rehydrate_priority: Optional[Union[str, "_models.RehydratePriority"]] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a - premium storage account and on a block blob in a blob storage account (locally redundant - storage only). A premium page blob's tier determines the allowed size, IOPS, and bandwidth of - the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation does not - update the blob's ETag. - - :param tier: Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierRequired - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived - blob. - :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "tier" - accept = "application/xml" - - # Construct URL - url = self.set_tier.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if response.status_code == 202: - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_tier.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def get_account_info( - self, - **kwargs - ) -> None: - """Returns the sku name and account kind. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "account" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_account_info.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) - response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_account_info.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def query( - self, - snapshot: Optional[str] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - query_request: Optional["_models.QueryRequest"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> IO: - """The Query operation enables users to select/project on blob data by providing simple query - expressions. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param query_request: the query request. - :type query_request: ~azure.storage.blob.models.QueryRequest - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "query" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.query.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if query_request is not None: - body_content = self._serialize.body(query_request, 'QueryRequest', is_xml=True) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - query.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def get_tags( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - snapshot: Optional[str] = None, - version_id: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> "_models.BlobTags": - """The Get Tags operation enables users to get the tags associated with a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: BlobTags, or the result of cls(response) - :rtype: ~azure.storage.blob.models.BlobTags - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobTags"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "tags" - accept = "application/xml" - - # Construct URL - url = self.get_tags.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('BlobTags', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_tags.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def set_tags( - self, - timeout: Optional[int] = None, - version_id: Optional[str] = None, - transactional_content_md5: Optional[bytearray] = None, - transactional_content_crc64: Optional[bytearray] = None, - request_id_parameter: Optional[str] = None, - tags: Optional["_models.BlobTags"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """The Set Tags operation enables users to set tags on a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param tags: Blob tags. - :type tags: ~azure.storage.blob.models.BlobTags - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "tags" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_tags.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if tags is not None: - body_content = self._serialize.body(tags, 'BlobTags', is_xml=True) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_tags.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_block_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_block_blob_operations.py deleted file mode 100644 index e90eacd..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_block_blob_operations.py +++ /dev/null @@ -1,1088 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class BlockBlobOperations: - """BlockBlobOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def upload( - self, - content_length: int, - body: IO, - timeout: Optional[int] = None, - transactional_content_md5: Optional[bytearray] = None, - metadata: Optional[str] = None, - tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, - request_id_parameter: Optional[str] = None, - blob_tags_string: Optional[str] = None, - blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Upload Block Blob operation updates the content of an existing block blob. Updating an - existing block blob overwrites any existing metadata on the blob. Partial updates are not - supported with Put Blob; the content of the existing blob is overwritten with the content of - the new blob. To perform a partial update of the content of a block blob, use the Put Block - List operation. - - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - blob_type = "BlockBlob" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.upload.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def put_blob_from_url( - self, - content_length: int, - copy_source: str, - timeout: Optional[int] = None, - transactional_content_md5: Optional[bytearray] = None, - metadata: Optional[str] = None, - tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, - request_id_parameter: Optional[str] = None, - source_content_md5: Optional[bytearray] = None, - blob_tags_string: Optional[str] = None, - copy_source_blob_properties: Optional[bool] = None, - blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Put Blob from URL operation creates a new Block Blob where the contents of the blob are - read from a given URL. This API is supported beginning with the 2020-04-08 version. Partial - updates are not supported with Put Blob from URL; the content of an existing blob is - overwritten with the content of the new blob. To perform partial updates to a block blob’s - contents using a source URL, use the Put Block from URL API in conjunction with Put Block List. - - :param content_length: The length of the request. - :type content_length: long - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param copy_source_blob_properties: Optional, default is true. Indicates if properties from - the source blob should be copied. - :type copy_source_blob_properties: bool - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _source_if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - _source_if_tags = source_modified_access_conditions.source_if_tags - blob_type = "BlockBlob" - accept = "application/xml" - - # Construct URL - url = self.put_blob_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _source_if_tags is not None: - header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", _source_if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if copy_source_blob_properties is not None: - header_parameters['x-ms-copy-source-blob-properties'] = self._serialize.header("copy_source_blob_properties", copy_source_blob_properties, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - put_blob_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def stage_block( - self, - block_id: str, - content_length: int, - body: IO, - transactional_content_md5: Optional[bytearray] = None, - transactional_content_crc64: Optional[bytearray] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - **kwargs - ) -> None: - """The Stage Block operation creates a new block to be committed as part of a blob. - - :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the - string must be less than or equal to 64 bytes in size. For a given blob, the length of the - value specified for the blockid parameter must be the same size for each block. - :type block_id: str - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "block" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.stage_block.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - stage_block.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def stage_block_from_url( - self, - block_id: str, - content_length: int, - source_url: str, - source_range: Optional[str] = None, - source_content_md5: Optional[bytearray] = None, - source_contentcrc64: Optional[bytearray] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Stage Block operation creates a new block to be committed as part of a blob where the - contents are read from a URL. - - :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the - string must be less than or equal to 64 bytes in size. For a given blob, the length of the - value specified for the blockid parameter must be the same size for each block. - :type block_id: str - :param content_length: The length of the request. - :type content_length: long - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _lease_id = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - comp = "block" - accept = "application/xml" - - # Construct URL - url = self.stage_block_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - stage_block_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def commit_block_list( - self, - blocks: "_models.BlockLookupList", - timeout: Optional[int] = None, - transactional_content_md5: Optional[bytearray] = None, - transactional_content_crc64: Optional[bytearray] = None, - metadata: Optional[str] = None, - tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, - request_id_parameter: Optional[str] = None, - blob_tags_string: Optional[str] = None, - blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Commit Block List operation writes a blob by specifying the list of block IDs that make up - the blob. In order to be written as part of a blob, a block must have been successfully written - to the server in a prior Put Block operation. You can call Put Block List to update a blob by - uploading only those blocks that have changed, then committing the new and existing blocks - together. You can do this by specifying whether to commit a block from the committed block list - or from the uncommitted block list, or to commit the most recently uploaded version of the - block, whichever list it may belong to. - - :param blocks: - :type blocks: ~azure.storage.blob.models.BlockLookupList - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_cache_control = None - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "blocklist" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.commit_block_list.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(blocks, 'BlockLookupList', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - commit_block_list.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def get_block_list( - self, - snapshot: Optional[str] = None, - list_type: Union[str, "_models.BlockListType"] = "committed", - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> "_models.BlockList": - """The Get Block List operation retrieves the list of blocks that have been uploaded as part of a - block blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param list_type: Specifies whether to return the list of committed blocks, the list of - uncommitted blocks, or both lists together. - :type list_type: str or ~azure.storage.blob.models.BlockListType - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: BlockList, or the result of cls(response) - :rtype: ~azure.storage.blob.models.BlockList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.BlockList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "blocklist" - accept = "application/xml" - - # Construct URL - url = self.get_block_list.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - query_parameters['blocklisttype'] = self._serialize.query("list_type", list_type, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('BlockList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_block_list.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_container_operations.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_container_operations.py deleted file mode 100644 index 904fea3..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_container_operations.py +++ /dev/null @@ -1,1631 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class ContainerOperations: - """ContainerOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - access: Optional[Union[str, "_models.PublicAccessType"]] = None, - request_id_parameter: Optional[str] = None, - container_cpk_scope_info: Optional["_models.ContainerCpkScopeInfo"] = None, - **kwargs - ) -> None: - """creates a new container under the specified account. If the container with the same name - already exists, the operation fails. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param access: Specifies whether data in the container may be accessed publicly and the level - of access. - :type access: str or ~azure.storage.blob.models.PublicAccessType - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param container_cpk_scope_info: Parameter group. - :type container_cpk_scope_info: ~azure.storage.blob.models.ContainerCpkScopeInfo - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _default_encryption_scope = None - _prevent_encryption_scope_override = None - if container_cpk_scope_info is not None: - _default_encryption_scope = container_cpk_scope_info.default_encryption_scope - _prevent_encryption_scope_override = container_cpk_scope_info.prevent_encryption_scope_override - restype = "container" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if access is not None: - header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _default_encryption_scope is not None: - header_parameters['x-ms-default-encryption-scope'] = self._serialize.header("default_encryption_scope", _default_encryption_scope, 'str') - if _prevent_encryption_scope_override is not None: - header_parameters['x-ms-deny-encryption-scope-override'] = self._serialize.header("prevent_encryption_scope_override", _prevent_encryption_scope_override, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{containerName}'} # type: ignore - - async def get_properties( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """returns all user-defined metadata and system properties for the specified container. The data - returned does not include the container's list of blobs. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "container" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-public-access']=self._deserialize('str', response.headers.get('x-ms-blob-public-access')) - response_headers['x-ms-has-immutability-policy']=self._deserialize('bool', response.headers.get('x-ms-has-immutability-policy')) - response_headers['x-ms-has-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-has-legal-hold')) - response_headers['x-ms-default-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-default-encryption-scope')) - response_headers['x-ms-deny-encryption-scope-override']=self._deserialize('bool', response.headers.get('x-ms-deny-encryption-scope-override')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{containerName}'} # type: ignore - - async def delete( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """operation marks the specified container for deletion. The container and any blobs contained - within it are later deleted during garbage collection. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - restype = "container" - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{containerName}'} # type: ignore - - async def set_metadata( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """operation sets one or more user-defined name-value pairs for the specified container. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - restype = "container" - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{containerName}'} # type: ignore - - async def get_access_policy( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> List["_models.SignedIdentifier"]: - """gets the permissions for the specified container. The permissions indicate whether container - data may be accessed publicly. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of SignedIdentifier, or the result of cls(response) - :rtype: list[~azure.storage.blob.models.SignedIdentifier] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.SignedIdentifier"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "container" - comp = "acl" - accept = "application/xml" - - # Construct URL - url = self.get_access_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-blob-public-access']=self._deserialize('str', response.headers.get('x-ms-blob-public-access')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('[SignedIdentifier]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_access_policy.metadata = {'url': '/{containerName}'} # type: ignore - - async def set_access_policy( - self, - timeout: Optional[int] = None, - access: Optional[Union[str, "_models.PublicAccessType"]] = None, - request_id_parameter: Optional[str] = None, - container_acl: Optional[List["_models.SignedIdentifier"]] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """sets the permissions for the specified container. The permissions indicate whether blobs in a - container may be accessed publicly. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param access: Specifies whether data in the container may be accessed publicly and the level - of access. - :type access: str or ~azure.storage.blob.models.PublicAccessType - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param container_acl: the acls for the container. - :type container_acl: list[~azure.storage.blob.models.SignedIdentifier] - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - restype = "container" - comp = "acl" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_access_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if access is not None: - header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'wrapped': True, 'itemsName': 'SignedIdentifier'}} - if container_acl is not None: - body_content = self._serialize.body(container_acl, '[SignedIdentifier]', is_xml=True, serialization_ctxt=serialization_ctxt) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_policy.metadata = {'url': '/{containerName}'} # type: ignore - - async def restore( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - deleted_container_name: Optional[str] = None, - deleted_container_version: Optional[str] = None, - **kwargs - ) -> None: - """Restores a previously-deleted container. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param deleted_container_name: Optional. Version 2019-12-12 and later. Specifies the name of - the deleted container to restore. - :type deleted_container_name: str - :param deleted_container_version: Optional. Version 2019-12-12 and later. Specifies the - version of the deleted container to restore. - :type deleted_container_version: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "undelete" - accept = "application/xml" - - # Construct URL - url = self.restore.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if deleted_container_name is not None: - header_parameters['x-ms-deleted-container-name'] = self._serialize.header("deleted_container_name", deleted_container_name, 'str') - if deleted_container_version is not None: - header_parameters['x-ms-deleted-container-version'] = self._serialize.header("deleted_container_version", deleted_container_version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - restore.metadata = {'url': '/{containerName}'} # type: ignore - - async def rename( - self, - source_container_name: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - source_lease_id: Optional[str] = None, - **kwargs - ) -> None: - """Renames an existing container. - - :param source_container_name: Required. Specifies the name of the container to rename. - :type source_container_name: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param source_lease_id: A lease ID for the source path. If specified, the source path must have - an active lease and the lease ID must match. - :type source_lease_id: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "rename" - accept = "application/xml" - - # Construct URL - url = self.rename.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-source-container-name'] = self._serialize.header("source_container_name", source_container_name, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - rename.metadata = {'url': '/{containerName}'} # type: ignore - - async def submit_batch( - self, - content_length: int, - multipart_content_type: str, - body: IO, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> IO: - """The Batch operation allows multiple API calls to be embedded into a single HTTP request. - - :param content_length: The length of the request. - :type content_length: long - :param multipart_content_type: Required. The value of this header must be multipart/mixed with - a batch boundary. Example header value: multipart/mixed; boundary=batch_:code:``. - :type multipart_content_type: str - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "batch" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.submit_batch.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(body, 'IO', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - submit_batch.metadata = {'url': '/{containerName}'} # type: ignore - - async def acquire_lease( - self, - timeout: Optional[int] = None, - duration: Optional[int] = None, - proposed_lease_id: Optional[str] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a - lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease - duration cannot be changed using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "acquire" - accept = "application/xml" - - # Construct URL - url = self.acquire_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - acquire_lease.metadata = {'url': '/{containerName}'} # type: ignore - - async def release_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "release" - accept = "application/xml" - - # Construct URL - url = self.release_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - release_lease.metadata = {'url': '/{containerName}'} # type: ignore - - async def renew_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "renew" - accept = "application/xml" - - # Construct URL - url = self.renew_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - renew_lease.metadata = {'url': '/{containerName}'} # type: ignore - - async def break_lease( - self, - timeout: Optional[int] = None, - break_period: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param break_period: For a break operation, proposed duration the lease should continue before - it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining on the lease is used. A new - lease will not be available before the break period has expired, but the lease may be held for - longer than the break period. If this header does not appear with a break operation, a fixed- - duration lease breaks after the remaining lease period elapses, and an infinite lease breaks - immediately. - :type break_period: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "break" - accept = "application/xml" - - # Construct URL - url = self.break_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - break_lease.metadata = {'url': '/{containerName}'} # type: ignore - - async def change_lease( - self, - lease_id: str, - proposed_lease_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "change" - accept = "application/xml" - - # Construct URL - url = self.change_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - change_lease.metadata = {'url': '/{containerName}'} # type: ignore - - async def list_blob_flat_segment( - self, - prefix: Optional[str] = None, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - include: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> "_models.ListBlobsFlatSegmentResponse": - """[Update] The List Blobs operation returns a list of the blobs under the specified container. - - :param prefix: Filters the results to return only containers whose name begins with the - specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListBlobsFlatSegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsFlatSegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_blob_flat_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListBlobsFlatSegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_blob_flat_segment.metadata = {'url': '/{containerName}'} # type: ignore - - async def list_blob_hierarchy_segment( - self, - delimiter: str, - prefix: Optional[str] = None, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - include: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> "_models.ListBlobsHierarchySegmentResponse": - """[Update] The List Blobs operation returns a list of the blobs under the specified container. - - :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix - element in the response body that acts as a placeholder for all blobs whose names begin with - the same substring up to the appearance of the delimiter character. The delimiter may be a - single character or a string. - :type delimiter: str - :param prefix: Filters the results to return only containers whose name begins with the - specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListBlobsHierarchySegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsHierarchySegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_blob_hierarchy_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_blob_hierarchy_segment.metadata = {'url': '/{containerName}'} # type: ignore - - async def get_account_info( - self, - **kwargs - ) -> None: - """Returns the sku name and account kind. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "account" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_account_info.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) - response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_account_info.metadata = {'url': '/{containerName}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_directory_operations.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_directory_operations.py deleted file mode 100644 index 338ff69..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_directory_operations.py +++ /dev/null @@ -1,739 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class DirectoryOperations: - """DirectoryOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - timeout: Optional[int] = None, - directory_properties: Optional[str] = None, - posix_permissions: Optional[str] = None, - posix_umask: Optional[str] = None, - request_id_parameter: Optional[str] = None, - directory_http_headers: Optional["_models.DirectoryHttpHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Create a directory. By default, the destination is overwritten and if the destination already - exists and has a lease the lease is broken. This operation supports conditional HTTP requests. - For more information, see `Specifying Conditional Headers for Blob Service Operations - `_. To fail if the destination already exists, use a conditional - request with If-None-Match: "*". - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param directory_properties: Optional. User-defined properties to be stored with the file or - directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", - where each value is base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask - restricts permission settings for file and directory, and will only be applied when default Acl - does not exist in parent directory. If the umask bit has set, it means that the corresponding - permission will be disabled. Otherwise the corresponding permission will be determined by the - permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, - a default umask - 0027 will be used. - :type posix_umask: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param directory_http_headers: Parameter group. - :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _cache_control = None - _content_type = None - _content_encoding = None - _content_language = None - _content_disposition = None - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - if directory_http_headers is not None: - _cache_control = directory_http_headers.cache_control - _content_type = directory_http_headers.content_type - _content_encoding = directory_http_headers.content_encoding - _content_language = directory_http_headers.content_language - _content_disposition = directory_http_headers.content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - resource = "directory" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("resource", resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def rename( - self, - rename_source: str, - timeout: Optional[int] = None, - marker: Optional[str] = None, - path_rename_mode: Optional[Union[str, "_models.PathRenameMode"]] = None, - directory_properties: Optional[str] = None, - posix_permissions: Optional[str] = None, - posix_umask: Optional[str] = None, - source_lease_id: Optional[str] = None, - request_id_parameter: Optional[str] = None, - directory_http_headers: Optional["_models.DirectoryHttpHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Rename a directory. By default, the destination is overwritten and if the destination already - exists and has a lease the lease is broken. This operation supports conditional HTTP requests. - For more information, see `Specifying Conditional Headers for Blob Service Operations - `_. To fail if the destination already exists, use a conditional - request with If-None-Match: "*". - - :param rename_source: The file or directory to be renamed. The value must have the following - format: "/{filesysystem}/{path}". If "x-ms-properties" is specified, the properties will - overwrite the existing properties; otherwise, the existing properties will be preserved. - :type rename_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param marker: When renaming a directory, the number of paths that are renamed with each - invocation is limited. If the number of paths to be renamed exceeds this limit, a continuation - token is returned in this response header. When a continuation token is returned in the - response, it must be specified in a subsequent invocation of the rename operation to continue - renaming the directory. - :type marker: str - :param path_rename_mode: Determines the behavior of the rename operation. - :type path_rename_mode: str or ~azure.storage.blob.models.PathRenameMode - :param directory_properties: Optional. User-defined properties to be stored with the file or - directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", - where each value is base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask - restricts permission settings for file and directory, and will only be applied when default Acl - does not exist in parent directory. If the umask bit has set, it means that the corresponding - permission will be disabled. Otherwise the corresponding permission will be determined by the - permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, - a default umask - 0027 will be used. - :type posix_umask: str - :param source_lease_id: A lease ID for the source path. If specified, the source path must have - an active lease and the lease ID must match. - :type source_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param directory_http_headers: Parameter group. - :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _cache_control = None - _content_type = None - _content_encoding = None - _content_language = None - _content_disposition = None - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if directory_http_headers is not None: - _cache_control = directory_http_headers.cache_control - _content_type = directory_http_headers.content_type - _content_encoding = directory_http_headers.content_encoding - _content_language = directory_http_headers.content_language - _content_disposition = directory_http_headers.content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - accept = "application/xml" - - # Construct URL - url = self.rename.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') - if path_rename_mode is not None: - query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - rename.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def delete( - self, - recursive_directory_delete: bool, - timeout: Optional[int] = None, - marker: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Deletes the directory. - - :param recursive_directory_delete: If "true", all paths beneath the directory will be deleted. - If "false" and the directory is non-empty, an error occurs. - :type recursive_directory_delete: bool - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param marker: When renaming a directory, the number of paths that are renamed with each - invocation is limited. If the number of paths to be renamed exceeds this limit, a continuation - token is returned in this response header. When a continuation token is returned in the - response, it must be specified in a subsequent invocation of the rename operation to continue - renaming the directory. - :type marker: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['recursive'] = self._serialize.query("recursive_directory_delete", recursive_directory_delete, 'bool') - if marker is not None: - query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def set_access_control( - self, - timeout: Optional[int] = None, - owner: Optional[str] = None, - group: Optional[str] = None, - posix_permissions: Optional[str] = None, - posix_acl: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Set the owner, group, permissions, or access control list for a directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_acl: Sets POSIX access control rights on files and directories. The value is a - comma-separated list of access control entries. Each access control entry (ACE) consists of a - scope, a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type posix_acl: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "setAccessControl" - accept = "application/xml" - - # Construct URL - url = self.set_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def get_access_control( - self, - timeout: Optional[int] = None, - upn: Optional[bool] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Get the owner, group, permissions, or access control list for a directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If - "true", the identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response - headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If - "false", the values will be returned as Azure Active Directory Object IDs. The default value is - false. - :type upn: bool - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "getAccessControl" - accept = "application/xml" - - # Construct URL - url = self.get_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) - response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) - response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) - response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_page_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_page_blob_operations.py deleted file mode 100644 index da920b5..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_page_blob_operations.py +++ /dev/null @@ -1,1393 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class PageBlobOperations: - """PageBlobOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - content_length: int, - blob_content_length: int, - timeout: Optional[int] = None, - tier: Optional[Union[str, "_models.PremiumPageBlobAccessTier"]] = None, - metadata: Optional[str] = None, - blob_sequence_number: Optional[int] = 0, - request_id_parameter: Optional[str] = None, - blob_tags_string: Optional[str] = None, - blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Create operation creates a new page blob. - - :param content_length: The length of the request. - :type content_length: long - :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 - TB. The page blob size must be aligned to a 512-byte boundary. - :type blob_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param tier: Optional. Indicates the tier to be set on the page blob. - :type tier: str or ~azure.storage.blob.models.PremiumPageBlobAccessTier - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled - value that you can use to track requests. The value of the sequence number must be between 0 - and 2^63 - 1. - :type blob_sequence_number: long - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - blob_type = "PageBlob" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') - if blob_sequence_number is not None: - header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def upload_pages( - self, - content_length: int, - body: IO, - transactional_content_md5: Optional[bytearray] = None, - transactional_content_crc64: Optional[bytearray] = None, - timeout: Optional[int] = None, - range: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - sequence_number_access_conditions: Optional["_models.SequenceNumberAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Upload Pages operation writes a range of pages to a page blob. - - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param sequence_number_access_conditions: Parameter group. - :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_sequence_number_less_than_or_equal_to = None - _if_sequence_number_less_than = None - _if_sequence_number_equal_to = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if sequence_number_access_conditions is not None: - _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - comp = "page" - page_write = "update" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.upload_pages.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') - if _if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') - if _if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload_pages.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def clear_pages( - self, - content_length: int, - timeout: Optional[int] = None, - range: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - sequence_number_access_conditions: Optional["_models.SequenceNumberAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Clear Pages operation clears a set of pages from a page blob. - - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param sequence_number_access_conditions: Parameter group. - :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_sequence_number_less_than_or_equal_to = None - _if_sequence_number_less_than = None - _if_sequence_number_equal_to = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if sequence_number_access_conditions is not None: - _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - comp = "page" - page_write = "clear" - accept = "application/xml" - - # Construct URL - url = self.clear_pages.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') - if _if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') - if _if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - clear_pages.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def upload_pages_from_url( - self, - source_url: str, - source_range: str, - content_length: int, - range: str, - source_content_md5: Optional[bytearray] = None, - source_contentcrc64: Optional[bytearray] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - sequence_number_access_conditions: Optional["_models.SequenceNumberAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Upload Pages operation writes a range of pages to a page blob where the contents are read - from a URL. - - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param source_range: Bytes of source data in the specified range. The length of this range - should match the ContentLength header and x-ms-range/Range destination range header. - :type source_range: str - :param content_length: The length of the request. - :type content_length: long - :param range: The range of bytes to which the source range would be written. The range should - be 512 aligned and range-end is required. - :type range: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param sequence_number_access_conditions: Parameter group. - :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _lease_id = None - _if_sequence_number_less_than_or_equal_to = None - _if_sequence_number_less_than = None - _if_sequence_number_equal_to = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if sequence_number_access_conditions is not None: - _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - comp = "page" - page_write = "update" - accept = "application/xml" - - # Construct URL - url = self.upload_pages_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') - if _if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') - if _if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload_pages_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def get_page_ranges( - self, - snapshot: Optional[str] = None, - timeout: Optional[int] = None, - range: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> "_models.PageList": - """The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot - of a page blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PageList, or the result of cls(response) - :rtype: ~azure.storage.blob.models.PageList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PageList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "pagelist" - accept = "application/xml" - - # Construct URL - url = self.get_page_ranges.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('PageList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_page_ranges.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def get_page_ranges_diff( - self, - snapshot: Optional[str] = None, - timeout: Optional[int] = None, - prevsnapshot: Optional[str] = None, - prev_snapshot_url: Optional[str] = None, - range: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> "_models.PageList": - """The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that - were changed between target blob and previous snapshot. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param prevsnapshot: Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a - DateTime value that specifies that the response will contain only pages that were changed - between target blob and previous snapshot. Changed pages include both updated and cleared - pages. The target blob may be a snapshot, as long as the snapshot specified by prevsnapshot is - the older of the two. Note that incremental snapshots are currently supported only for blobs - created on or after January 1, 2016. - :type prevsnapshot: str - :param prev_snapshot_url: Optional. This header is only supported in service versions - 2019-04-19 and after and specifies the URL of a previous snapshot of the target blob. The - response will only contain pages that were changed between the target blob and its previous - snapshot. - :type prev_snapshot_url: str - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PageList, or the result of cls(response) - :rtype: ~azure.storage.blob.models.PageList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PageList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "pagelist" - accept = "application/xml" - - # Construct URL - url = self.get_page_ranges_diff.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if prevsnapshot is not None: - query_parameters['prevsnapshot'] = self._serialize.query("prevsnapshot", prevsnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if prev_snapshot_url is not None: - header_parameters['x-ms-previous-snapshot-url'] = self._serialize.header("prev_snapshot_url", prev_snapshot_url, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('PageList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_page_ranges_diff.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def resize( - self, - blob_content_length: int, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Resize the Blob. - - :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 - TB. The page blob size must be aligned to a 512-byte boundary. - :type blob_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.resize.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - resize.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def update_sequence_number( - self, - sequence_number_action: Union[str, "_models.SequenceNumberActionType"], - timeout: Optional[int] = None, - blob_sequence_number: Optional[int] = 0, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Update the sequence number of the blob. - - :param sequence_number_action: Required if the x-ms-blob-sequence-number header is set for the - request. This property applies to page blobs only. This property indicates how the service - should modify the blob's sequence number. - :type sequence_number_action: str or ~azure.storage.blob.models.SequenceNumberActionType - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled - value that you can use to track requests. The value of the sequence number must be between 0 - and 2^63 - 1. - :type blob_sequence_number: long - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.update_sequence_number.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-sequence-number-action'] = self._serialize.header("sequence_number_action", sequence_number_action, 'str') - if blob_sequence_number is not None: - header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - update_sequence_number.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def copy_incremental( - self, - copy_source: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """The Copy Incremental operation copies a snapshot of the source page blob to a destination page - blob. The snapshot is copied such that only the differential changes between the previously - copied snapshot are transferred to the destination. The copied snapshots are complete copies of - the original snapshot and can be read or copied from as usual. This API is supported since REST - version 2016-05-31. - - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "incrementalcopy" - accept = "application/xml" - - # Construct URL - url = self.copy_incremental.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - copy_incremental.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_service_operations.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_service_operations.py deleted file mode 100644 index 91a0646..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_service_operations.py +++ /dev/null @@ -1,691 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class ServiceOperations: - """ServiceOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def set_properties( - self, - storage_service_properties: "_models.StorageServiceProperties", - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> None: - """Sets properties for a storage account's Blob service endpoint, including properties for Storage - Analytics and CORS (Cross-Origin Resource Sharing) rules. - - :param storage_service_properties: The StorageService properties. - :type storage_service_properties: ~azure.storage.blob.models.StorageServiceProperties - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "properties" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/'} # type: ignore - - async def get_properties( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> "_models.StorageServiceProperties": - """gets the properties of a storage account's Blob service, including properties for Storage - Analytics and CORS (Cross-Origin Resource Sharing) rules. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: StorageServiceProperties, or the result of cls(response) - :rtype: ~azure.storage.blob.models.StorageServiceProperties - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceProperties"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('StorageServiceProperties', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_properties.metadata = {'url': '/'} # type: ignore - - async def get_statistics( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> "_models.StorageServiceStats": - """Retrieves statistics related to replication for the Blob service. It is only available on the - secondary location endpoint when read-access geo-redundant replication is enabled for the - storage account. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: StorageServiceStats, or the result of cls(response) - :rtype: ~azure.storage.blob.models.StorageServiceStats - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceStats"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "stats" - accept = "application/xml" - - # Construct URL - url = self.get_statistics.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('StorageServiceStats', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_statistics.metadata = {'url': '/'} # type: ignore - - async def list_containers_segment( - self, - prefix: Optional[str] = None, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - include: Optional[List[Union[str, "_models.ListContainersIncludeType"]]] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> "_models.ListContainersSegmentResponse": - """The List Containers Segment operation returns a list of the containers under the specified - account. - - :param prefix: Filters the results to return only containers whose name begins with the - specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :param include: Include this parameter to specify that the container's metadata be returned as - part of the response body. - :type include: list[str or ~azure.storage.blob.models.ListContainersIncludeType] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListContainersSegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListContainersSegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListContainersSegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_containers_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('ListContainersSegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_containers_segment.metadata = {'url': '/'} # type: ignore - - async def get_user_delegation_key( - self, - key_info: "_models.KeyInfo", - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> "_models.UserDelegationKey": - """Retrieves a user delegation key for the Blob service. This is only a valid operation when using - bearer token authentication. - - :param key_info: - :type key_info: ~azure.storage.blob.models.KeyInfo - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: UserDelegationKey, or the result of cls(response) - :rtype: ~azure.storage.blob.models.UserDelegationKey - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.UserDelegationKey"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "userdelegationkey" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.get_user_delegation_key.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(key_info, 'KeyInfo', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('UserDelegationKey', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_user_delegation_key.metadata = {'url': '/'} # type: ignore - - async def get_account_info( - self, - **kwargs - ) -> None: - """Returns the sku name and account kind. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "account" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_account_info.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) - response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) - response_headers['x-ms-is-hns-enabled']=self._deserialize('bool', response.headers.get('x-ms-is-hns-enabled')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_account_info.metadata = {'url': '/'} # type: ignore - - async def submit_batch( - self, - content_length: int, - multipart_content_type: str, - body: IO, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> IO: - """The Batch operation allows multiple API calls to be embedded into a single HTTP request. - - :param content_length: The length of the request. - :type content_length: long - :param multipart_content_type: Required. The value of this header must be multipart/mixed with - a batch boundary. Example header value: multipart/mixed; boundary=batch_:code:``. - :type multipart_content_type: str - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "batch" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.submit_batch.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(body, 'IO', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - submit_batch.metadata = {'url': '/'} # type: ignore - - async def filter_blobs( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - where: Optional[str] = None, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - **kwargs - ) -> "_models.FilterBlobSegment": - """The Filter Blobs operation enables callers to list blobs across all containers whose tags match - a given search expression. Filter blobs searches across all containers within a storage - account but can be scoped within the expression to a single container. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param where: Filters the results to return only to return only blobs whose tags match the - specified expression. - :type where: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: FilterBlobSegment, or the result of cls(response) - :rtype: ~azure.storage.blob.models.FilterBlobSegment - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.FilterBlobSegment"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "blobs" - accept = "application/xml" - - # Construct URL - url = self.filter_blobs.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if where is not None: - query_parameters['where'] = self._serialize.query("where", where, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('FilterBlobSegment', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - filter_blobs.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/models/__init__.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/models/__init__.py deleted file mode 100644 index 3d33d25..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/models/__init__.py +++ /dev/null @@ -1,225 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -try: - from ._models_py3 import AccessPolicy - from ._models_py3 import AppendPositionAccessConditions - from ._models_py3 import ArrowConfiguration - from ._models_py3 import ArrowField - from ._models_py3 import BlobFlatListSegment - from ._models_py3 import BlobHTTPHeaders - from ._models_py3 import BlobHierarchyListSegment - from ._models_py3 import BlobItemInternal - from ._models_py3 import BlobMetadata - from ._models_py3 import BlobPrefix - from ._models_py3 import BlobPropertiesInternal - from ._models_py3 import BlobTag - from ._models_py3 import BlobTags - from ._models_py3 import Block - from ._models_py3 import BlockList - from ._models_py3 import BlockLookupList - from ._models_py3 import ClearRange - from ._models_py3 import ContainerCpkScopeInfo - from ._models_py3 import ContainerItem - from ._models_py3 import ContainerProperties - from ._models_py3 import CorsRule - from ._models_py3 import CpkInfo - from ._models_py3 import CpkScopeInfo - from ._models_py3 import DataLakeStorageError - from ._models_py3 import DataLakeStorageErrorAutoGenerated - from ._models_py3 import DelimitedTextConfiguration - from ._models_py3 import DirectoryHttpHeaders - from ._models_py3 import FilterBlobItem - from ._models_py3 import FilterBlobSegment - from ._models_py3 import GeoReplication - from ._models_py3 import JsonTextConfiguration - from ._models_py3 import KeyInfo - from ._models_py3 import LeaseAccessConditions - from ._models_py3 import ListBlobsFlatSegmentResponse - from ._models_py3 import ListBlobsHierarchySegmentResponse - from ._models_py3 import ListContainersSegmentResponse - from ._models_py3 import Logging - from ._models_py3 import Metrics - from ._models_py3 import ModifiedAccessConditions - from ._models_py3 import PageList - from ._models_py3 import PageRange - from ._models_py3 import QueryFormat - from ._models_py3 import QueryRequest - from ._models_py3 import QuerySerialization - from ._models_py3 import RetentionPolicy - from ._models_py3 import SequenceNumberAccessConditions - from ._models_py3 import SignedIdentifier - from ._models_py3 import SourceModifiedAccessConditions - from ._models_py3 import StaticWebsite - from ._models_py3 import StorageError - from ._models_py3 import StorageServiceProperties - from ._models_py3 import StorageServiceStats - from ._models_py3 import UserDelegationKey -except (SyntaxError, ImportError): - from ._models import AccessPolicy # type: ignore - from ._models import AppendPositionAccessConditions # type: ignore - from ._models import ArrowConfiguration # type: ignore - from ._models import ArrowField # type: ignore - from ._models import BlobFlatListSegment # type: ignore - from ._models import BlobHTTPHeaders # type: ignore - from ._models import BlobHierarchyListSegment # type: ignore - from ._models import BlobItemInternal # type: ignore - from ._models import BlobMetadata # type: ignore - from ._models import BlobPrefix # type: ignore - from ._models import BlobPropertiesInternal # type: ignore - from ._models import BlobTag # type: ignore - from ._models import BlobTags # type: ignore - from ._models import Block # type: ignore - from ._models import BlockList # type: ignore - from ._models import BlockLookupList # type: ignore - from ._models import ClearRange # type: ignore - from ._models import ContainerCpkScopeInfo # type: ignore - from ._models import ContainerItem # type: ignore - from ._models import ContainerProperties # type: ignore - from ._models import CorsRule # type: ignore - from ._models import CpkInfo # type: ignore - from ._models import CpkScopeInfo # type: ignore - from ._models import DataLakeStorageError # type: ignore - from ._models import DataLakeStorageErrorAutoGenerated # type: ignore - from ._models import DelimitedTextConfiguration # type: ignore - from ._models import DirectoryHttpHeaders # type: ignore - from ._models import FilterBlobItem # type: ignore - from ._models import FilterBlobSegment # type: ignore - from ._models import GeoReplication # type: ignore - from ._models import JsonTextConfiguration # type: ignore - from ._models import KeyInfo # type: ignore - from ._models import LeaseAccessConditions # type: ignore - from ._models import ListBlobsFlatSegmentResponse # type: ignore - from ._models import ListBlobsHierarchySegmentResponse # type: ignore - from ._models import ListContainersSegmentResponse # type: ignore - from ._models import Logging # type: ignore - from ._models import Metrics # type: ignore - from ._models import ModifiedAccessConditions # type: ignore - from ._models import PageList # type: ignore - from ._models import PageRange # type: ignore - from ._models import QueryFormat # type: ignore - from ._models import QueryRequest # type: ignore - from ._models import QuerySerialization # type: ignore - from ._models import RetentionPolicy # type: ignore - from ._models import SequenceNumberAccessConditions # type: ignore - from ._models import SignedIdentifier # type: ignore - from ._models import SourceModifiedAccessConditions # type: ignore - from ._models import StaticWebsite # type: ignore - from ._models import StorageError # type: ignore - from ._models import StorageServiceProperties # type: ignore - from ._models import StorageServiceStats # type: ignore - from ._models import UserDelegationKey # type: ignore - -from ._azure_blob_storage_enums import ( - AccessTier, - AccessTierOptional, - AccessTierRequired, - AccountKind, - ArchiveStatus, - BlobExpiryOptions, - BlobType, - BlockListType, - CopyStatusType, - DeleteSnapshotsOptionType, - EncryptionAlgorithmType, - GeoReplicationStatusType, - LeaseDurationType, - LeaseStateType, - LeaseStatusType, - ListBlobsIncludeItem, - ListContainersIncludeType, - PathRenameMode, - PremiumPageBlobAccessTier, - PublicAccessType, - QueryFormatType, - RehydratePriority, - SequenceNumberActionType, - SkuName, - StorageErrorCode, -) - -__all__ = [ - 'AccessPolicy', - 'AppendPositionAccessConditions', - 'ArrowConfiguration', - 'ArrowField', - 'BlobFlatListSegment', - 'BlobHTTPHeaders', - 'BlobHierarchyListSegment', - 'BlobItemInternal', - 'BlobMetadata', - 'BlobPrefix', - 'BlobPropertiesInternal', - 'BlobTag', - 'BlobTags', - 'Block', - 'BlockList', - 'BlockLookupList', - 'ClearRange', - 'ContainerCpkScopeInfo', - 'ContainerItem', - 'ContainerProperties', - 'CorsRule', - 'CpkInfo', - 'CpkScopeInfo', - 'DataLakeStorageError', - 'DataLakeStorageErrorAutoGenerated', - 'DelimitedTextConfiguration', - 'DirectoryHttpHeaders', - 'FilterBlobItem', - 'FilterBlobSegment', - 'GeoReplication', - 'JsonTextConfiguration', - 'KeyInfo', - 'LeaseAccessConditions', - 'ListBlobsFlatSegmentResponse', - 'ListBlobsHierarchySegmentResponse', - 'ListContainersSegmentResponse', - 'Logging', - 'Metrics', - 'ModifiedAccessConditions', - 'PageList', - 'PageRange', - 'QueryFormat', - 'QueryRequest', - 'QuerySerialization', - 'RetentionPolicy', - 'SequenceNumberAccessConditions', - 'SignedIdentifier', - 'SourceModifiedAccessConditions', - 'StaticWebsite', - 'StorageError', - 'StorageServiceProperties', - 'StorageServiceStats', - 'UserDelegationKey', - 'AccessTier', - 'AccessTierOptional', - 'AccessTierRequired', - 'AccountKind', - 'ArchiveStatus', - 'BlobExpiryOptions', - 'BlobType', - 'BlockListType', - 'CopyStatusType', - 'DeleteSnapshotsOptionType', - 'EncryptionAlgorithmType', - 'GeoReplicationStatusType', - 'LeaseDurationType', - 'LeaseStateType', - 'LeaseStatusType', - 'ListBlobsIncludeItem', - 'ListContainersIncludeType', - 'PathRenameMode', - 'PremiumPageBlobAccessTier', - 'PublicAccessType', - 'QueryFormatType', - 'RehydratePriority', - 'SequenceNumberActionType', - 'SkuName', - 'StorageErrorCode', -] diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/models/_azure_blob_storage_enums.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/models/_azure_blob_storage_enums.py deleted file mode 100644 index 5d03a10..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/models/_azure_blob_storage_enums.py +++ /dev/null @@ -1,339 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from enum import Enum, EnumMeta -from six import with_metaclass - -class _CaseInsensitiveEnumMeta(EnumMeta): - def __getitem__(self, name): - return super().__getitem__(name.upper()) - - def __getattr__(cls, name): - """Return the enum member matching `name` - We use __getattr__ instead of descriptors or inserting into the enum - class' __dict__ in order to support `name` and `value` being both - properties for enum members (which live in the class' __dict__) and - enum members themselves. - """ - try: - return cls._member_map_[name.upper()] - except KeyError: - raise AttributeError(name) - - -class AccessTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - P4 = "P4" - P6 = "P6" - P10 = "P10" - P15 = "P15" - P20 = "P20" - P30 = "P30" - P40 = "P40" - P50 = "P50" - P60 = "P60" - P70 = "P70" - P80 = "P80" - HOT = "Hot" - COOL = "Cool" - ARCHIVE = "Archive" - -class AccessTierOptional(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - P4 = "P4" - P6 = "P6" - P10 = "P10" - P15 = "P15" - P20 = "P20" - P30 = "P30" - P40 = "P40" - P50 = "P50" - P60 = "P60" - P70 = "P70" - P80 = "P80" - HOT = "Hot" - COOL = "Cool" - ARCHIVE = "Archive" - -class AccessTierRequired(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - P4 = "P4" - P6 = "P6" - P10 = "P10" - P15 = "P15" - P20 = "P20" - P30 = "P30" - P40 = "P40" - P50 = "P50" - P60 = "P60" - P70 = "P70" - P80 = "P80" - HOT = "Hot" - COOL = "Cool" - ARCHIVE = "Archive" - -class AccountKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - STORAGE = "Storage" - BLOB_STORAGE = "BlobStorage" - STORAGE_V2 = "StorageV2" - FILE_STORAGE = "FileStorage" - BLOCK_BLOB_STORAGE = "BlockBlobStorage" - -class ArchiveStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - REHYDRATE_PENDING_TO_HOT = "rehydrate-pending-to-hot" - REHYDRATE_PENDING_TO_COOL = "rehydrate-pending-to-cool" - -class BlobExpiryOptions(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - NEVER_EXPIRE = "NeverExpire" - RELATIVE_TO_CREATION = "RelativeToCreation" - RELATIVE_TO_NOW = "RelativeToNow" - ABSOLUTE = "Absolute" - -class BlobType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - BLOCK_BLOB = "BlockBlob" - PAGE_BLOB = "PageBlob" - APPEND_BLOB = "AppendBlob" - -class BlockListType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - COMMITTED = "committed" - UNCOMMITTED = "uncommitted" - ALL = "all" - -class CopyStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - PENDING = "pending" - SUCCESS = "success" - ABORTED = "aborted" - FAILED = "failed" - -class DeleteSnapshotsOptionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - INCLUDE = "include" - ONLY = "only" - -class EncryptionAlgorithmType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - NONE = "None" - AES256 = "AES256" - -class GeoReplicationStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The status of the secondary location - """ - - LIVE = "live" - BOOTSTRAP = "bootstrap" - UNAVAILABLE = "unavailable" - -class LeaseDurationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - INFINITE = "infinite" - FIXED = "fixed" - -class LeaseStateType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - AVAILABLE = "available" - LEASED = "leased" - EXPIRED = "expired" - BREAKING = "breaking" - BROKEN = "broken" - -class LeaseStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - LOCKED = "locked" - UNLOCKED = "unlocked" - -class ListBlobsIncludeItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - COPY = "copy" - DELETED = "deleted" - METADATA = "metadata" - SNAPSHOTS = "snapshots" - UNCOMMITTEDBLOBS = "uncommittedblobs" - VERSIONS = "versions" - TAGS = "tags" - -class ListContainersIncludeType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - METADATA = "metadata" - DELETED = "deleted" - -class PathRenameMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - LEGACY = "legacy" - POSIX = "posix" - -class PremiumPageBlobAccessTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - P4 = "P4" - P6 = "P6" - P10 = "P10" - P15 = "P15" - P20 = "P20" - P30 = "P30" - P40 = "P40" - P50 = "P50" - P60 = "P60" - P70 = "P70" - P80 = "P80" - -class PublicAccessType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - CONTAINER = "container" - BLOB = "blob" - -class QueryFormatType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The quick query format type. - """ - - DELIMITED = "delimited" - JSON = "json" - ARROW = "arrow" - -class RehydratePriority(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """If an object is in rehydrate pending state then this header is returned with priority of - rehydrate. Valid values are High and Standard. - """ - - HIGH = "High" - STANDARD = "Standard" - -class SequenceNumberActionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - MAX = "max" - UPDATE = "update" - INCREMENT = "increment" - -class SkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - STANDARD_LRS = "Standard_LRS" - STANDARD_GRS = "Standard_GRS" - STANDARD_RAGRS = "Standard_RAGRS" - STANDARD_ZRS = "Standard_ZRS" - PREMIUM_LRS = "Premium_LRS" - -class StorageErrorCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Error codes returned by the service - """ - - ACCOUNT_ALREADY_EXISTS = "AccountAlreadyExists" - ACCOUNT_BEING_CREATED = "AccountBeingCreated" - ACCOUNT_IS_DISABLED = "AccountIsDisabled" - AUTHENTICATION_FAILED = "AuthenticationFailed" - AUTHORIZATION_FAILURE = "AuthorizationFailure" - CONDITION_HEADERS_NOT_SUPPORTED = "ConditionHeadersNotSupported" - CONDITION_NOT_MET = "ConditionNotMet" - EMPTY_METADATA_KEY = "EmptyMetadataKey" - INSUFFICIENT_ACCOUNT_PERMISSIONS = "InsufficientAccountPermissions" - INTERNAL_ERROR = "InternalError" - INVALID_AUTHENTICATION_INFO = "InvalidAuthenticationInfo" - INVALID_HEADER_VALUE = "InvalidHeaderValue" - INVALID_HTTP_VERB = "InvalidHttpVerb" - INVALID_INPUT = "InvalidInput" - INVALID_MD5 = "InvalidMd5" - INVALID_METADATA = "InvalidMetadata" - INVALID_QUERY_PARAMETER_VALUE = "InvalidQueryParameterValue" - INVALID_RANGE = "InvalidRange" - INVALID_RESOURCE_NAME = "InvalidResourceName" - INVALID_URI = "InvalidUri" - INVALID_XML_DOCUMENT = "InvalidXmlDocument" - INVALID_XML_NODE_VALUE = "InvalidXmlNodeValue" - MD5_MISMATCH = "Md5Mismatch" - METADATA_TOO_LARGE = "MetadataTooLarge" - MISSING_CONTENT_LENGTH_HEADER = "MissingContentLengthHeader" - MISSING_REQUIRED_QUERY_PARAMETER = "MissingRequiredQueryParameter" - MISSING_REQUIRED_HEADER = "MissingRequiredHeader" - MISSING_REQUIRED_XML_NODE = "MissingRequiredXmlNode" - MULTIPLE_CONDITION_HEADERS_NOT_SUPPORTED = "MultipleConditionHeadersNotSupported" - OPERATION_TIMED_OUT = "OperationTimedOut" - OUT_OF_RANGE_INPUT = "OutOfRangeInput" - OUT_OF_RANGE_QUERY_PARAMETER_VALUE = "OutOfRangeQueryParameterValue" - REQUEST_BODY_TOO_LARGE = "RequestBodyTooLarge" - RESOURCE_TYPE_MISMATCH = "ResourceTypeMismatch" - REQUEST_URL_FAILED_TO_PARSE = "RequestUrlFailedToParse" - RESOURCE_ALREADY_EXISTS = "ResourceAlreadyExists" - RESOURCE_NOT_FOUND = "ResourceNotFound" - SERVER_BUSY = "ServerBusy" - UNSUPPORTED_HEADER = "UnsupportedHeader" - UNSUPPORTED_XML_NODE = "UnsupportedXmlNode" - UNSUPPORTED_QUERY_PARAMETER = "UnsupportedQueryParameter" - UNSUPPORTED_HTTP_VERB = "UnsupportedHttpVerb" - APPEND_POSITION_CONDITION_NOT_MET = "AppendPositionConditionNotMet" - BLOB_ALREADY_EXISTS = "BlobAlreadyExists" - BLOB_IMMUTABLE_DUE_TO_POLICY = "BlobImmutableDueToPolicy" - BLOB_NOT_FOUND = "BlobNotFound" - BLOB_OVERWRITTEN = "BlobOverwritten" - BLOB_TIER_INADEQUATE_FOR_CONTENT_LENGTH = "BlobTierInadequateForContentLength" - BLOCK_COUNT_EXCEEDS_LIMIT = "BlockCountExceedsLimit" - BLOCK_LIST_TOO_LONG = "BlockListTooLong" - CANNOT_CHANGE_TO_LOWER_TIER = "CannotChangeToLowerTier" - CANNOT_VERIFY_COPY_SOURCE = "CannotVerifyCopySource" - CONTAINER_ALREADY_EXISTS = "ContainerAlreadyExists" - CONTAINER_BEING_DELETED = "ContainerBeingDeleted" - CONTAINER_DISABLED = "ContainerDisabled" - CONTAINER_NOT_FOUND = "ContainerNotFound" - CONTENT_LENGTH_LARGER_THAN_TIER_LIMIT = "ContentLengthLargerThanTierLimit" - COPY_ACROSS_ACCOUNTS_NOT_SUPPORTED = "CopyAcrossAccountsNotSupported" - COPY_ID_MISMATCH = "CopyIdMismatch" - FEATURE_VERSION_MISMATCH = "FeatureVersionMismatch" - INCREMENTAL_COPY_BLOB_MISMATCH = "IncrementalCopyBlobMismatch" - INCREMENTAL_COPY_OF_ERALIER_VERSION_SNAPSHOT_NOT_ALLOWED = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" - INCREMENTAL_COPY_SOURCE_MUST_BE_SNAPSHOT = "IncrementalCopySourceMustBeSnapshot" - INFINITE_LEASE_DURATION_REQUIRED = "InfiniteLeaseDurationRequired" - INVALID_BLOB_OR_BLOCK = "InvalidBlobOrBlock" - INVALID_BLOB_TIER = "InvalidBlobTier" - INVALID_BLOB_TYPE = "InvalidBlobType" - INVALID_BLOCK_ID = "InvalidBlockId" - INVALID_BLOCK_LIST = "InvalidBlockList" - INVALID_OPERATION = "InvalidOperation" - INVALID_PAGE_RANGE = "InvalidPageRange" - INVALID_SOURCE_BLOB_TYPE = "InvalidSourceBlobType" - INVALID_SOURCE_BLOB_URL = "InvalidSourceBlobUrl" - INVALID_VERSION_FOR_PAGE_BLOB_OPERATION = "InvalidVersionForPageBlobOperation" - LEASE_ALREADY_PRESENT = "LeaseAlreadyPresent" - LEASE_ALREADY_BROKEN = "LeaseAlreadyBroken" - LEASE_ID_MISMATCH_WITH_BLOB_OPERATION = "LeaseIdMismatchWithBlobOperation" - LEASE_ID_MISMATCH_WITH_CONTAINER_OPERATION = "LeaseIdMismatchWithContainerOperation" - LEASE_ID_MISMATCH_WITH_LEASE_OPERATION = "LeaseIdMismatchWithLeaseOperation" - LEASE_ID_MISSING = "LeaseIdMissing" - LEASE_IS_BREAKING_AND_CANNOT_BE_ACQUIRED = "LeaseIsBreakingAndCannotBeAcquired" - LEASE_IS_BREAKING_AND_CANNOT_BE_CHANGED = "LeaseIsBreakingAndCannotBeChanged" - LEASE_IS_BROKEN_AND_CANNOT_BE_RENEWED = "LeaseIsBrokenAndCannotBeRenewed" - LEASE_LOST = "LeaseLost" - LEASE_NOT_PRESENT_WITH_BLOB_OPERATION = "LeaseNotPresentWithBlobOperation" - LEASE_NOT_PRESENT_WITH_CONTAINER_OPERATION = "LeaseNotPresentWithContainerOperation" - LEASE_NOT_PRESENT_WITH_LEASE_OPERATION = "LeaseNotPresentWithLeaseOperation" - MAX_BLOB_SIZE_CONDITION_NOT_MET = "MaxBlobSizeConditionNotMet" - NO_AUTHENTICATION_INFORMATION = "NoAuthenticationInformation" - NO_PENDING_COPY_OPERATION = "NoPendingCopyOperation" - OPERATION_NOT_ALLOWED_ON_INCREMENTAL_COPY_BLOB = "OperationNotAllowedOnIncrementalCopyBlob" - PENDING_COPY_OPERATION = "PendingCopyOperation" - PREVIOUS_SNAPSHOT_CANNOT_BE_NEWER = "PreviousSnapshotCannotBeNewer" - PREVIOUS_SNAPSHOT_NOT_FOUND = "PreviousSnapshotNotFound" - PREVIOUS_SNAPSHOT_OPERATION_NOT_SUPPORTED = "PreviousSnapshotOperationNotSupported" - SEQUENCE_NUMBER_CONDITION_NOT_MET = "SequenceNumberConditionNotMet" - SEQUENCE_NUMBER_INCREMENT_TOO_LARGE = "SequenceNumberIncrementTooLarge" - SNAPSHOT_COUNT_EXCEEDED = "SnapshotCountExceeded" - SNAPHOT_OPERATION_RATE_EXCEEDED = "SnaphotOperationRateExceeded" - SNAPSHOTS_PRESENT = "SnapshotsPresent" - SOURCE_CONDITION_NOT_MET = "SourceConditionNotMet" - SYSTEM_IN_USE = "SystemInUse" - TARGET_CONDITION_NOT_MET = "TargetConditionNotMet" - UNAUTHORIZED_BLOB_OVERWRITE = "UnauthorizedBlobOverwrite" - BLOB_BEING_REHYDRATED = "BlobBeingRehydrated" - BLOB_ARCHIVED = "BlobArchived" - BLOB_NOT_ARCHIVED = "BlobNotArchived" - AUTHORIZATION_SOURCE_IP_MISMATCH = "AuthorizationSourceIPMismatch" - AUTHORIZATION_PROTOCOL_MISMATCH = "AuthorizationProtocolMismatch" - AUTHORIZATION_PERMISSION_MISMATCH = "AuthorizationPermissionMismatch" - AUTHORIZATION_SERVICE_MISMATCH = "AuthorizationServiceMismatch" - AUTHORIZATION_RESOURCE_TYPE_MISMATCH = "AuthorizationResourceTypeMismatch" diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/models/_models.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/models/_models.py deleted file mode 100644 index fadcdd5..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/models/_models.py +++ /dev/null @@ -1,2031 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import HttpResponseError -import msrest.serialization - - -class AccessPolicy(msrest.serialization.Model): - """An Access policy. - - :param start: the date-time the policy is active. - :type start: str - :param expiry: the date-time the policy expires. - :type expiry: str - :param permission: the permissions for the acl policy. - :type permission: str - """ - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str'}, - 'expiry': {'key': 'Expiry', 'type': 'str'}, - 'permission': {'key': 'Permission', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(AccessPolicy, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.expiry = kwargs.get('expiry', None) - self.permission = kwargs.get('permission', None) - - -class AppendPositionAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param max_size: Optional conditional header. The max length in bytes permitted for the append - blob. If the Append Block operation would cause the blob to exceed that limit or if the blob - size is already greater than the value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :type max_size: long - :param append_position: Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will succeed only if the append - position is equal to this number. If it is not, the request will fail with the - AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). - :type append_position: long - """ - - _attribute_map = { - 'max_size': {'key': 'maxSize', 'type': 'long'}, - 'append_position': {'key': 'appendPosition', 'type': 'long'}, - } - - def __init__( - self, - **kwargs - ): - super(AppendPositionAccessConditions, self).__init__(**kwargs) - self.max_size = kwargs.get('max_size', None) - self.append_position = kwargs.get('append_position', None) - - -class ArrowConfiguration(msrest.serialization.Model): - """arrow configuration. - - All required parameters must be populated in order to send to Azure. - - :param schema: Required. - :type schema: list[~azure.storage.blob.models.ArrowField] - """ - - _validation = { - 'schema': {'required': True}, - } - - _attribute_map = { - 'schema': {'key': 'Schema', 'type': '[ArrowField]', 'xml': {'name': 'Schema', 'wrapped': True, 'itemsName': 'Field'}}, - } - _xml_map = { - 'name': 'ArrowConfiguration' - } - - def __init__( - self, - **kwargs - ): - super(ArrowConfiguration, self).__init__(**kwargs) - self.schema = kwargs['schema'] - - -class ArrowField(msrest.serialization.Model): - """field of an arrow schema. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. - :type type: str - :param name: - :type name: str - :param precision: - :type precision: int - :param scale: - :type scale: int - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': 'Type', 'type': 'str'}, - 'name': {'key': 'Name', 'type': 'str'}, - 'precision': {'key': 'Precision', 'type': 'int'}, - 'scale': {'key': 'Scale', 'type': 'int'}, - } - _xml_map = { - 'name': 'Field' - } - - def __init__( - self, - **kwargs - ): - super(ArrowField, self).__init__(**kwargs) - self.type = kwargs['type'] - self.name = kwargs.get('name', None) - self.precision = kwargs.get('precision', None) - self.scale = kwargs.get('scale', None) - - -class BlobFlatListSegment(msrest.serialization.Model): - """BlobFlatListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]'}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__( - self, - **kwargs - ): - super(BlobFlatListSegment, self).__init__(**kwargs) - self.blob_items = kwargs['blob_items'] - - -class BlobHierarchyListSegment(msrest.serialization.Model): - """BlobHierarchyListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_prefixes: - :type blob_prefixes: list[~azure.storage.blob.models.BlobPrefix] - :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]', 'xml': {'name': 'BlobPrefix'}}, - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__( - self, - **kwargs - ): - super(BlobHierarchyListSegment, self).__init__(**kwargs) - self.blob_prefixes = kwargs.get('blob_prefixes', None) - self.blob_items = kwargs['blob_items'] - - -class BlobHTTPHeaders(msrest.serialization.Model): - """Parameter group. - - :param blob_cache_control: Optional. Sets the blob's cache control. If specified, this property - is stored with the blob and returned with a read request. - :type blob_cache_control: str - :param blob_content_type: Optional. Sets the blob's content type. If specified, this property - is stored with the blob and returned with a read request. - :type blob_content_type: str - :param blob_content_md5: Optional. An MD5 hash of the blob content. Note that this hash is not - validated, as the hashes for the individual blocks were validated when each was uploaded. - :type blob_content_md5: bytearray - :param blob_content_encoding: Optional. Sets the blob's content encoding. If specified, this - property is stored with the blob and returned with a read request. - :type blob_content_encoding: str - :param blob_content_language: Optional. Set the blob's content language. If specified, this - property is stored with the blob and returned with a read request. - :type blob_content_language: str - :param blob_content_disposition: Optional. Sets the blob's Content-Disposition header. - :type blob_content_disposition: str - """ - - _attribute_map = { - 'blob_cache_control': {'key': 'blobCacheControl', 'type': 'str'}, - 'blob_content_type': {'key': 'blobContentType', 'type': 'str'}, - 'blob_content_md5': {'key': 'blobContentMD5', 'type': 'bytearray'}, - 'blob_content_encoding': {'key': 'blobContentEncoding', 'type': 'str'}, - 'blob_content_language': {'key': 'blobContentLanguage', 'type': 'str'}, - 'blob_content_disposition': {'key': 'blobContentDisposition', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(BlobHTTPHeaders, self).__init__(**kwargs) - self.blob_cache_control = kwargs.get('blob_cache_control', None) - self.blob_content_type = kwargs.get('blob_content_type', None) - self.blob_content_md5 = kwargs.get('blob_content_md5', None) - self.blob_content_encoding = kwargs.get('blob_content_encoding', None) - self.blob_content_language = kwargs.get('blob_content_language', None) - self.blob_content_disposition = kwargs.get('blob_content_disposition', None) - - -class BlobItemInternal(msrest.serialization.Model): - """An Azure Storage blob. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param deleted: Required. - :type deleted: bool - :param snapshot: Required. - :type snapshot: str - :param version_id: - :type version_id: str - :param is_current_version: - :type is_current_version: bool - :param properties: Required. Properties of a blob. - :type properties: ~azure.storage.blob.models.BlobPropertiesInternal - :param metadata: - :type metadata: ~azure.storage.blob.models.BlobMetadata - :param blob_tags: Blob tags. - :type blob_tags: ~azure.storage.blob.models.BlobTags - :param object_replication_metadata: Dictionary of :code:``. - :type object_replication_metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'deleted': {'required': True}, - 'snapshot': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'deleted': {'key': 'Deleted', 'type': 'bool'}, - 'snapshot': {'key': 'Snapshot', 'type': 'str'}, - 'version_id': {'key': 'VersionId', 'type': 'str'}, - 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool'}, - 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal'}, - 'metadata': {'key': 'Metadata', 'type': 'BlobMetadata'}, - 'blob_tags': {'key': 'BlobTags', 'type': 'BlobTags'}, - 'object_replication_metadata': {'key': 'OrMetadata', 'type': '{str}'}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__( - self, - **kwargs - ): - super(BlobItemInternal, self).__init__(**kwargs) - self.name = kwargs['name'] - self.deleted = kwargs['deleted'] - self.snapshot = kwargs['snapshot'] - self.version_id = kwargs.get('version_id', None) - self.is_current_version = kwargs.get('is_current_version', None) - self.properties = kwargs['properties'] - self.metadata = kwargs.get('metadata', None) - self.blob_tags = kwargs.get('blob_tags', None) - self.object_replication_metadata = kwargs.get('object_replication_metadata', None) - - -class BlobMetadata(msrest.serialization.Model): - """BlobMetadata. - - :param additional_properties: Unmatched properties from the message are deserialized to this - collection. - :type additional_properties: dict[str, str] - :param encrypted: - :type encrypted: str - """ - - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{str}'}, - 'encrypted': {'key': 'Encrypted', 'type': 'str', 'xml': {'attr': True}}, - } - _xml_map = { - 'name': 'Metadata' - } - - def __init__( - self, - **kwargs - ): - super(BlobMetadata, self).__init__(**kwargs) - self.additional_properties = kwargs.get('additional_properties', None) - self.encrypted = kwargs.get('encrypted', None) - - -class BlobPrefix(msrest.serialization.Model): - """BlobPrefix. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(BlobPrefix, self).__init__(**kwargs) - self.name = kwargs['name'] - - -class BlobPropertiesInternal(msrest.serialization.Model): - """Properties of a blob. - - All required parameters must be populated in order to send to Azure. - - :param creation_time: - :type creation_time: ~datetime.datetime - :param last_modified: Required. - :type last_modified: ~datetime.datetime - :param etag: Required. - :type etag: str - :param content_length: Size in bytes. - :type content_length: long - :param content_type: - :type content_type: str - :param content_encoding: - :type content_encoding: str - :param content_language: - :type content_language: str - :param content_md5: - :type content_md5: bytearray - :param content_disposition: - :type content_disposition: str - :param cache_control: - :type cache_control: str - :param blob_sequence_number: - :type blob_sequence_number: long - :param blob_type: Possible values include: "BlockBlob", "PageBlob", "AppendBlob". - :type blob_type: str or ~azure.storage.blob.models.BlobType - :param lease_status: Possible values include: "locked", "unlocked". - :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType - :param lease_state: Possible values include: "available", "leased", "expired", "breaking", - "broken". - :type lease_state: str or ~azure.storage.blob.models.LeaseStateType - :param lease_duration: Possible values include: "infinite", "fixed". - :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType - :param copy_id: - :type copy_id: str - :param copy_status: Possible values include: "pending", "success", "aborted", "failed". - :type copy_status: str or ~azure.storage.blob.models.CopyStatusType - :param copy_source: - :type copy_source: str - :param copy_progress: - :type copy_progress: str - :param copy_completion_time: - :type copy_completion_time: ~datetime.datetime - :param copy_status_description: - :type copy_status_description: str - :param server_encrypted: - :type server_encrypted: bool - :param incremental_copy: - :type incremental_copy: bool - :param destination_snapshot: - :type destination_snapshot: str - :param deleted_time: - :type deleted_time: ~datetime.datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param access_tier: Possible values include: "P4", "P6", "P10", "P15", "P20", "P30", "P40", - "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive". - :type access_tier: str or ~azure.storage.blob.models.AccessTier - :param access_tier_inferred: - :type access_tier_inferred: bool - :param archive_status: Possible values include: "rehydrate-pending-to-hot", "rehydrate- - pending-to-cool". - :type archive_status: str or ~azure.storage.blob.models.ArchiveStatus - :param customer_provided_key_sha256: - :type customer_provided_key_sha256: str - :param encryption_scope: The name of the encryption scope under which the blob is encrypted. - :type encryption_scope: str - :param access_tier_change_time: - :type access_tier_change_time: ~datetime.datetime - :param tag_count: - :type tag_count: int - :param expires_on: - :type expires_on: ~datetime.datetime - :param is_sealed: - :type is_sealed: bool - :param rehydrate_priority: If an object is in rehydrate pending state then this header is - returned with priority of rehydrate. Valid values are High and Standard. Possible values - include: "High", "Standard". - :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority - :param last_accessed_on: - :type last_accessed_on: ~datetime.datetime - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123'}, - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - 'content_length': {'key': 'Content-Length', 'type': 'long'}, - 'content_type': {'key': 'Content-Type', 'type': 'str'}, - 'content_encoding': {'key': 'Content-Encoding', 'type': 'str'}, - 'content_language': {'key': 'Content-Language', 'type': 'str'}, - 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray'}, - 'content_disposition': {'key': 'Content-Disposition', 'type': 'str'}, - 'cache_control': {'key': 'Cache-Control', 'type': 'str'}, - 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long'}, - 'blob_type': {'key': 'BlobType', 'type': 'str'}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, - 'lease_state': {'key': 'LeaseState', 'type': 'str'}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, - 'copy_id': {'key': 'CopyId', 'type': 'str'}, - 'copy_status': {'key': 'CopyStatus', 'type': 'str'}, - 'copy_source': {'key': 'CopySource', 'type': 'str'}, - 'copy_progress': {'key': 'CopyProgress', 'type': 'str'}, - 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123'}, - 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str'}, - 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool'}, - 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool'}, - 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str'}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, - 'access_tier': {'key': 'AccessTier', 'type': 'str'}, - 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool'}, - 'archive_status': {'key': 'ArchiveStatus', 'type': 'str'}, - 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str'}, - 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str'}, - 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'}, - 'tag_count': {'key': 'TagCount', 'type': 'int'}, - 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123'}, - 'is_sealed': {'key': 'Sealed', 'type': 'bool'}, - 'rehydrate_priority': {'key': 'RehydratePriority', 'type': 'str'}, - 'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123'}, - } - _xml_map = { - 'name': 'Properties' - } - - def __init__( - self, - **kwargs - ): - super(BlobPropertiesInternal, self).__init__(**kwargs) - self.creation_time = kwargs.get('creation_time', None) - self.last_modified = kwargs['last_modified'] - self.etag = kwargs['etag'] - self.content_length = kwargs.get('content_length', None) - self.content_type = kwargs.get('content_type', None) - self.content_encoding = kwargs.get('content_encoding', None) - self.content_language = kwargs.get('content_language', None) - self.content_md5 = kwargs.get('content_md5', None) - self.content_disposition = kwargs.get('content_disposition', None) - self.cache_control = kwargs.get('cache_control', None) - self.blob_sequence_number = kwargs.get('blob_sequence_number', None) - self.blob_type = kwargs.get('blob_type', None) - self.lease_status = kwargs.get('lease_status', None) - self.lease_state = kwargs.get('lease_state', None) - self.lease_duration = kwargs.get('lease_duration', None) - self.copy_id = kwargs.get('copy_id', None) - self.copy_status = kwargs.get('copy_status', None) - self.copy_source = kwargs.get('copy_source', None) - self.copy_progress = kwargs.get('copy_progress', None) - self.copy_completion_time = kwargs.get('copy_completion_time', None) - self.copy_status_description = kwargs.get('copy_status_description', None) - self.server_encrypted = kwargs.get('server_encrypted', None) - self.incremental_copy = kwargs.get('incremental_copy', None) - self.destination_snapshot = kwargs.get('destination_snapshot', None) - self.deleted_time = kwargs.get('deleted_time', None) - self.remaining_retention_days = kwargs.get('remaining_retention_days', None) - self.access_tier = kwargs.get('access_tier', None) - self.access_tier_inferred = kwargs.get('access_tier_inferred', None) - self.archive_status = kwargs.get('archive_status', None) - self.customer_provided_key_sha256 = kwargs.get('customer_provided_key_sha256', None) - self.encryption_scope = kwargs.get('encryption_scope', None) - self.access_tier_change_time = kwargs.get('access_tier_change_time', None) - self.tag_count = kwargs.get('tag_count', None) - self.expires_on = kwargs.get('expires_on', None) - self.is_sealed = kwargs.get('is_sealed', None) - self.rehydrate_priority = kwargs.get('rehydrate_priority', None) - self.last_accessed_on = kwargs.get('last_accessed_on', None) - - -class BlobTag(msrest.serialization.Model): - """BlobTag. - - All required parameters must be populated in order to send to Azure. - - :param key: Required. - :type key: str - :param value: Required. - :type value: str - """ - - _validation = { - 'key': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'key': {'key': 'Key', 'type': 'str'}, - 'value': {'key': 'Value', 'type': 'str'}, - } - _xml_map = { - 'name': 'Tag' - } - - def __init__( - self, - **kwargs - ): - super(BlobTag, self).__init__(**kwargs) - self.key = kwargs['key'] - self.value = kwargs['value'] - - -class BlobTags(msrest.serialization.Model): - """Blob tags. - - All required parameters must be populated in order to send to Azure. - - :param blob_tag_set: Required. - :type blob_tag_set: list[~azure.storage.blob.models.BlobTag] - """ - - _validation = { - 'blob_tag_set': {'required': True}, - } - - _attribute_map = { - 'blob_tag_set': {'key': 'BlobTagSet', 'type': '[BlobTag]', 'xml': {'name': 'TagSet', 'wrapped': True, 'itemsName': 'Tag'}}, - } - _xml_map = { - 'name': 'Tags' - } - - def __init__( - self, - **kwargs - ): - super(BlobTags, self).__init__(**kwargs) - self.blob_tag_set = kwargs['blob_tag_set'] - - -class Block(msrest.serialization.Model): - """Represents a single block in a block blob. It describes the block's ID and size. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The base64 encoded block ID. - :type name: str - :param size: Required. The block size in bytes. - :type size: int - """ - - _validation = { - 'name': {'required': True}, - 'size': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'size': {'key': 'Size', 'type': 'int'}, - } - - def __init__( - self, - **kwargs - ): - super(Block, self).__init__(**kwargs) - self.name = kwargs['name'] - self.size = kwargs['size'] - - -class BlockList(msrest.serialization.Model): - """BlockList. - - :param committed_blocks: - :type committed_blocks: list[~azure.storage.blob.models.Block] - :param uncommitted_blocks: - :type uncommitted_blocks: list[~azure.storage.blob.models.Block] - """ - - _attribute_map = { - 'committed_blocks': {'key': 'CommittedBlocks', 'type': '[Block]', 'xml': {'wrapped': True}}, - 'uncommitted_blocks': {'key': 'UncommittedBlocks', 'type': '[Block]', 'xml': {'wrapped': True}}, - } - - def __init__( - self, - **kwargs - ): - super(BlockList, self).__init__(**kwargs) - self.committed_blocks = kwargs.get('committed_blocks', None) - self.uncommitted_blocks = kwargs.get('uncommitted_blocks', None) - - -class BlockLookupList(msrest.serialization.Model): - """BlockLookupList. - - :param committed: - :type committed: list[str] - :param uncommitted: - :type uncommitted: list[str] - :param latest: - :type latest: list[str] - """ - - _attribute_map = { - 'committed': {'key': 'Committed', 'type': '[str]', 'xml': {'itemsName': 'Committed'}}, - 'uncommitted': {'key': 'Uncommitted', 'type': '[str]', 'xml': {'itemsName': 'Uncommitted'}}, - 'latest': {'key': 'Latest', 'type': '[str]', 'xml': {'itemsName': 'Latest'}}, - } - _xml_map = { - 'name': 'BlockList' - } - - def __init__( - self, - **kwargs - ): - super(BlockLookupList, self).__init__(**kwargs) - self.committed = kwargs.get('committed', None) - self.uncommitted = kwargs.get('uncommitted', None) - self.latest = kwargs.get('latest', None) - - -class ClearRange(msrest.serialization.Model): - """ClearRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'ClearRange' - } - - def __init__( - self, - **kwargs - ): - super(ClearRange, self).__init__(**kwargs) - self.start = kwargs['start'] - self.end = kwargs['end'] - - -class ContainerCpkScopeInfo(msrest.serialization.Model): - """Parameter group. - - :param default_encryption_scope: Optional. Version 2019-07-07 and later. Specifies the - default encryption scope to set on the container and use for all future writes. - :type default_encryption_scope: str - :param prevent_encryption_scope_override: Optional. Version 2019-07-07 and newer. If true, - prevents any request from specifying a different encryption scope than the scope set on the - container. - :type prevent_encryption_scope_override: bool - """ - - _attribute_map = { - 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str'}, - 'prevent_encryption_scope_override': {'key': 'PreventEncryptionScopeOverride', 'type': 'bool'}, - } - - def __init__( - self, - **kwargs - ): - super(ContainerCpkScopeInfo, self).__init__(**kwargs) - self.default_encryption_scope = kwargs.get('default_encryption_scope', None) - self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', None) - - -class ContainerItem(msrest.serialization.Model): - """An Azure Storage container. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param deleted: - :type deleted: bool - :param version: - :type version: str - :param properties: Required. Properties of a container. - :type properties: ~azure.storage.blob.models.ContainerProperties - :param metadata: Dictionary of :code:``. - :type metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'deleted': {'key': 'Deleted', 'type': 'bool'}, - 'version': {'key': 'Version', 'type': 'str'}, - 'properties': {'key': 'Properties', 'type': 'ContainerProperties'}, - 'metadata': {'key': 'Metadata', 'type': '{str}'}, - } - _xml_map = { - 'name': 'Container' - } - - def __init__( - self, - **kwargs - ): - super(ContainerItem, self).__init__(**kwargs) - self.name = kwargs['name'] - self.deleted = kwargs.get('deleted', None) - self.version = kwargs.get('version', None) - self.properties = kwargs['properties'] - self.metadata = kwargs.get('metadata', None) - - -class ContainerProperties(msrest.serialization.Model): - """Properties of a container. - - All required parameters must be populated in order to send to Azure. - - :param last_modified: Required. - :type last_modified: ~datetime.datetime - :param etag: Required. - :type etag: str - :param lease_status: Possible values include: "locked", "unlocked". - :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType - :param lease_state: Possible values include: "available", "leased", "expired", "breaking", - "broken". - :type lease_state: str or ~azure.storage.blob.models.LeaseStateType - :param lease_duration: Possible values include: "infinite", "fixed". - :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType - :param public_access: Possible values include: "container", "blob". - :type public_access: str or ~azure.storage.blob.models.PublicAccessType - :param has_immutability_policy: - :type has_immutability_policy: bool - :param has_legal_hold: - :type has_legal_hold: bool - :param default_encryption_scope: - :type default_encryption_scope: str - :param prevent_encryption_scope_override: - :type prevent_encryption_scope_override: bool - :param deleted_time: - :type deleted_time: ~datetime.datetime - :param remaining_retention_days: - :type remaining_retention_days: int - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, - 'lease_state': {'key': 'LeaseState', 'type': 'str'}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, - 'public_access': {'key': 'PublicAccess', 'type': 'str'}, - 'has_immutability_policy': {'key': 'HasImmutabilityPolicy', 'type': 'bool'}, - 'has_legal_hold': {'key': 'HasLegalHold', 'type': 'bool'}, - 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str'}, - 'prevent_encryption_scope_override': {'key': 'DenyEncryptionScopeOverride', 'type': 'bool'}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, - } - - def __init__( - self, - **kwargs - ): - super(ContainerProperties, self).__init__(**kwargs) - self.last_modified = kwargs['last_modified'] - self.etag = kwargs['etag'] - self.lease_status = kwargs.get('lease_status', None) - self.lease_state = kwargs.get('lease_state', None) - self.lease_duration = kwargs.get('lease_duration', None) - self.public_access = kwargs.get('public_access', None) - self.has_immutability_policy = kwargs.get('has_immutability_policy', None) - self.has_legal_hold = kwargs.get('has_legal_hold', None) - self.default_encryption_scope = kwargs.get('default_encryption_scope', None) - self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', None) - self.deleted_time = kwargs.get('deleted_time', None) - self.remaining_retention_days = kwargs.get('remaining_retention_days', None) - - -class CorsRule(msrest.serialization.Model): - """CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param allowed_origins: Required. The origin domains that are permitted to make a request - against the storage service via CORS. The origin domain is the domain from which the request - originates. Note that the origin must be an exact case-sensitive match with the origin that the - user age sends to the service. You can also use the wildcard character '*' to allow all origin - domains to make requests via CORS. - :type allowed_origins: str - :param allowed_methods: Required. The methods (HTTP request verbs) that the origin domain may - use for a CORS request. (comma separated). - :type allowed_methods: str - :param allowed_headers: Required. the request headers that the origin domain may specify on the - CORS request. - :type allowed_headers: str - :param exposed_headers: Required. The response headers that may be sent in the response to the - CORS request and exposed by the browser to the request issuer. - :type exposed_headers: str - :param max_age_in_seconds: Required. The maximum amount time that a browser should cache the - preflight OPTIONS request. - :type max_age_in_seconds: int - """ - - _validation = { - 'allowed_origins': {'required': True}, - 'allowed_methods': {'required': True}, - 'allowed_headers': {'required': True}, - 'exposed_headers': {'required': True}, - 'max_age_in_seconds': {'required': True, 'minimum': 0}, - } - - _attribute_map = { - 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str'}, - 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str'}, - 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str'}, - 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str'}, - 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int'}, - } - - def __init__( - self, - **kwargs - ): - super(CorsRule, self).__init__(**kwargs) - self.allowed_origins = kwargs['allowed_origins'] - self.allowed_methods = kwargs['allowed_methods'] - self.allowed_headers = kwargs['allowed_headers'] - self.exposed_headers = kwargs['exposed_headers'] - self.max_age_in_seconds = kwargs['max_age_in_seconds'] - - -class CpkInfo(msrest.serialization.Model): - """Parameter group. - - :param encryption_key: Optional. Specifies the encryption key to use to encrypt the data - provided in the request. If not specified, encryption is performed with the root account - encryption key. For more information, see Encryption at Rest for Azure Storage Services. - :type encryption_key: str - :param encryption_key_sha256: The SHA-256 hash of the provided encryption key. Must be provided - if the x-ms-encryption-key header is provided. - :type encryption_key_sha256: str - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. Possible values include: "None", "AES256". - :type encryption_algorithm: str or ~azure.storage.blob.models.EncryptionAlgorithmType - """ - - _attribute_map = { - 'encryption_key': {'key': 'encryptionKey', 'type': 'str'}, - 'encryption_key_sha256': {'key': 'encryptionKeySha256', 'type': 'str'}, - 'encryption_algorithm': {'key': 'encryptionAlgorithm', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(CpkInfo, self).__init__(**kwargs) - self.encryption_key = kwargs.get('encryption_key', None) - self.encryption_key_sha256 = kwargs.get('encryption_key_sha256', None) - self.encryption_algorithm = kwargs.get('encryption_algorithm', None) - - -class CpkScopeInfo(msrest.serialization.Model): - """Parameter group. - - :param encryption_scope: Optional. Version 2019-07-07 and later. Specifies the name of the - encryption scope to use to encrypt the data provided in the request. If not specified, - encryption is performed with the default account encryption scope. For more information, see - Encryption at Rest for Azure Storage Services. - :type encryption_scope: str - """ - - _attribute_map = { - 'encryption_scope': {'key': 'encryptionScope', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(CpkScopeInfo, self).__init__(**kwargs) - self.encryption_scope = kwargs.get('encryption_scope', None) - - -class DataLakeStorageError(msrest.serialization.Model): - """DataLakeStorageError. - - :param data_lake_storage_error_details: The service error response object. - :type data_lake_storage_error_details: - ~azure.storage.blob.models.DataLakeStorageErrorAutoGenerated - """ - - _attribute_map = { - 'data_lake_storage_error_details': {'key': 'error', 'type': 'DataLakeStorageErrorAutoGenerated'}, - } - - def __init__( - self, - **kwargs - ): - super(DataLakeStorageError, self).__init__(**kwargs) - self.data_lake_storage_error_details = kwargs.get('data_lake_storage_error_details', None) - - -class DataLakeStorageErrorAutoGenerated(msrest.serialization.Model): - """The service error response object. - - :param code: The service error code. - :type code: str - :param message: The service error message. - :type message: str - """ - - _attribute_map = { - 'code': {'key': 'Code', 'type': 'str'}, - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(DataLakeStorageErrorAutoGenerated, self).__init__(**kwargs) - self.code = kwargs.get('code', None) - self.message = kwargs.get('message', None) - - -class DelimitedTextConfiguration(msrest.serialization.Model): - """delimited text configuration. - - All required parameters must be populated in order to send to Azure. - - :param column_separator: Required. column separator. - :type column_separator: str - :param field_quote: Required. field quote. - :type field_quote: str - :param record_separator: Required. record separator. - :type record_separator: str - :param escape_char: Required. escape char. - :type escape_char: str - :param headers_present: Required. has headers. - :type headers_present: bool - """ - - _validation = { - 'column_separator': {'required': True}, - 'field_quote': {'required': True}, - 'record_separator': {'required': True}, - 'escape_char': {'required': True}, - 'headers_present': {'required': True}, - } - - _attribute_map = { - 'column_separator': {'key': 'ColumnSeparator', 'type': 'str', 'xml': {'name': 'ColumnSeparator'}}, - 'field_quote': {'key': 'FieldQuote', 'type': 'str', 'xml': {'name': 'FieldQuote'}}, - 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, - 'escape_char': {'key': 'EscapeChar', 'type': 'str', 'xml': {'name': 'EscapeChar'}}, - 'headers_present': {'key': 'HeadersPresent', 'type': 'bool', 'xml': {'name': 'HasHeaders'}}, - } - _xml_map = { - 'name': 'DelimitedTextConfiguration' - } - - def __init__( - self, - **kwargs - ): - super(DelimitedTextConfiguration, self).__init__(**kwargs) - self.column_separator = kwargs['column_separator'] - self.field_quote = kwargs['field_quote'] - self.record_separator = kwargs['record_separator'] - self.escape_char = kwargs['escape_char'] - self.headers_present = kwargs['headers_present'] - - -class DirectoryHttpHeaders(msrest.serialization.Model): - """Parameter group. - - :param cache_control: Cache control for given resource. - :type cache_control: str - :param content_type: Content type for given resource. - :type content_type: str - :param content_encoding: Content encoding for given resource. - :type content_encoding: str - :param content_language: Content language for given resource. - :type content_language: str - :param content_disposition: Content disposition for given resource. - :type content_disposition: str - """ - - _attribute_map = { - 'cache_control': {'key': 'cacheControl', 'type': 'str'}, - 'content_type': {'key': 'contentType', 'type': 'str'}, - 'content_encoding': {'key': 'contentEncoding', 'type': 'str'}, - 'content_language': {'key': 'contentLanguage', 'type': 'str'}, - 'content_disposition': {'key': 'contentDisposition', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(DirectoryHttpHeaders, self).__init__(**kwargs) - self.cache_control = kwargs.get('cache_control', None) - self.content_type = kwargs.get('content_type', None) - self.content_encoding = kwargs.get('content_encoding', None) - self.content_language = kwargs.get('content_language', None) - self.content_disposition = kwargs.get('content_disposition', None) - - -class FilterBlobItem(msrest.serialization.Model): - """Blob info from a Filter Blobs API call. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param container_name: Required. - :type container_name: str - :param tags: A set of tags. Blob tags. - :type tags: ~azure.storage.blob.models.BlobTags - """ - - _validation = { - 'name': {'required': True}, - 'container_name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'container_name': {'key': 'ContainerName', 'type': 'str'}, - 'tags': {'key': 'Tags', 'type': 'BlobTags'}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__( - self, - **kwargs - ): - super(FilterBlobItem, self).__init__(**kwargs) - self.name = kwargs['name'] - self.container_name = kwargs['container_name'] - self.tags = kwargs.get('tags', None) - - -class FilterBlobSegment(msrest.serialization.Model): - """The result of a Filter Blobs API call. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param where: Required. - :type where: str - :param blobs: Required. - :type blobs: list[~azure.storage.blob.models.FilterBlobItem] - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'where': {'required': True}, - 'blobs': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'where': {'key': 'Where', 'type': 'str'}, - 'blobs': {'key': 'Blobs', 'type': '[FilterBlobItem]', 'xml': {'name': 'Blobs', 'wrapped': True, 'itemsName': 'Blob'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - **kwargs - ): - super(FilterBlobSegment, self).__init__(**kwargs) - self.service_endpoint = kwargs['service_endpoint'] - self.where = kwargs['where'] - self.blobs = kwargs['blobs'] - self.next_marker = kwargs.get('next_marker', None) - - -class GeoReplication(msrest.serialization.Model): - """Geo-Replication information for the Secondary Storage Service. - - All required parameters must be populated in order to send to Azure. - - :param status: Required. The status of the secondary location. Possible values include: "live", - "bootstrap", "unavailable". - :type status: str or ~azure.storage.blob.models.GeoReplicationStatusType - :param last_sync_time: Required. A GMT date/time value, to the second. All primary writes - preceding this value are guaranteed to be available for read operations at the secondary. - Primary writes after this point in time may or may not be available for reads. - :type last_sync_time: ~datetime.datetime - """ - - _validation = { - 'status': {'required': True}, - 'last_sync_time': {'required': True}, - } - - _attribute_map = { - 'status': {'key': 'Status', 'type': 'str'}, - 'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123'}, - } - - def __init__( - self, - **kwargs - ): - super(GeoReplication, self).__init__(**kwargs) - self.status = kwargs['status'] - self.last_sync_time = kwargs['last_sync_time'] - - -class JsonTextConfiguration(msrest.serialization.Model): - """json text configuration. - - All required parameters must be populated in order to send to Azure. - - :param record_separator: Required. record separator. - :type record_separator: str - """ - - _validation = { - 'record_separator': {'required': True}, - } - - _attribute_map = { - 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, - } - _xml_map = { - 'name': 'JsonTextConfiguration' - } - - def __init__( - self, - **kwargs - ): - super(JsonTextConfiguration, self).__init__(**kwargs) - self.record_separator = kwargs['record_separator'] - - -class KeyInfo(msrest.serialization.Model): - """Key information. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. The date-time the key is active in ISO 8601 UTC time. - :type start: str - :param expiry: Required. The date-time the key expires in ISO 8601 UTC time. - :type expiry: str - """ - - _validation = { - 'start': {'required': True}, - 'expiry': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str'}, - 'expiry': {'key': 'Expiry', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(KeyInfo, self).__init__(**kwargs) - self.start = kwargs['start'] - self.expiry = kwargs['expiry'] - - -class LeaseAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param lease_id: If specified, the operation only succeeds if the resource's lease is active - and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': 'leaseId', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = kwargs.get('lease_id', None) - - -class ListBlobsFlatSegmentResponse(msrest.serialization.Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param segment: Required. - :type segment: ~azure.storage.blob.models.BlobFlatListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'segment': {'key': 'Segment', 'type': 'BlobFlatListSegment'}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - **kwargs - ): - super(ListBlobsFlatSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs['service_endpoint'] - self.container_name = kwargs['container_name'] - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.segment = kwargs['segment'] - self.next_marker = kwargs.get('next_marker', None) - - -class ListBlobsHierarchySegmentResponse(msrest.serialization.Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param delimiter: - :type delimiter: str - :param segment: Required. - :type segment: ~azure.storage.blob.models.BlobHierarchyListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'delimiter': {'key': 'Delimiter', 'type': 'str'}, - 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment'}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - **kwargs - ): - super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs['service_endpoint'] - self.container_name = kwargs['container_name'] - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.delimiter = kwargs.get('delimiter', None) - self.segment = kwargs['segment'] - self.next_marker = kwargs.get('next_marker', None) - - -class ListContainersSegmentResponse(msrest.serialization.Model): - """An enumeration of containers. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param container_items: Required. - :type container_items: list[~azure.storage.blob.models.ContainerItem] - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_items': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'container_items': {'key': 'ContainerItems', 'type': '[ContainerItem]', 'xml': {'name': 'Containers', 'wrapped': True, 'itemsName': 'Container'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - **kwargs - ): - super(ListContainersSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs['service_endpoint'] - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.container_items = kwargs['container_items'] - self.next_marker = kwargs.get('next_marker', None) - - -class Logging(msrest.serialization.Model): - """Azure Analytics Logging settings. - - All required parameters must be populated in order to send to Azure. - - :param version: Required. The version of Storage Analytics to configure. - :type version: str - :param delete: Required. Indicates whether all delete requests should be logged. - :type delete: bool - :param read: Required. Indicates whether all read requests should be logged. - :type read: bool - :param write: Required. Indicates whether all write requests should be logged. - :type write: bool - :param retention_policy: Required. the retention policy which determines how long the - associated data should persist. - :type retention_policy: ~azure.storage.blob.models.RetentionPolicy - """ - - _validation = { - 'version': {'required': True}, - 'delete': {'required': True}, - 'read': {'required': True}, - 'write': {'required': True}, - 'retention_policy': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str'}, - 'delete': {'key': 'Delete', 'type': 'bool'}, - 'read': {'key': 'Read', 'type': 'bool'}, - 'write': {'key': 'Write', 'type': 'bool'}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, - } - - def __init__( - self, - **kwargs - ): - super(Logging, self).__init__(**kwargs) - self.version = kwargs['version'] - self.delete = kwargs['delete'] - self.read = kwargs['read'] - self.write = kwargs['write'] - self.retention_policy = kwargs['retention_policy'] - - -class Metrics(msrest.serialization.Model): - """a summary of request statistics grouped by API in hour or minute aggregates for blobs. - - All required parameters must be populated in order to send to Azure. - - :param version: The version of Storage Analytics to configure. - :type version: str - :param enabled: Required. Indicates whether metrics are enabled for the Blob service. - :type enabled: bool - :param include_apis: Indicates whether metrics should generate summary statistics for called - API operations. - :type include_apis: bool - :param retention_policy: the retention policy which determines how long the associated data - should persist. - :type retention_policy: ~azure.storage.blob.models.RetentionPolicy - """ - - _validation = { - 'enabled': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str'}, - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool'}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, - } - - def __init__( - self, - **kwargs - ): - super(Metrics, self).__init__(**kwargs) - self.version = kwargs.get('version', None) - self.enabled = kwargs['enabled'] - self.include_apis = kwargs.get('include_apis', None) - self.retention_policy = kwargs.get('retention_policy', None) - - -class ModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param if_modified_since: Specify this header value to operate only on a blob if it has been - modified since the specified date/time. - :type if_modified_since: ~datetime.datetime - :param if_unmodified_since: Specify this header value to operate only on a blob if it has not - been modified since the specified date/time. - :type if_unmodified_since: ~datetime.datetime - :param if_match: Specify an ETag value to operate only on blobs with a matching value. - :type if_match: str - :param if_none_match: Specify an ETag value to operate only on blobs without a matching value. - :type if_none_match: str - :param if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a - matching value. - :type if_tags: str - """ - - _attribute_map = { - 'if_modified_since': {'key': 'ifModifiedSince', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': 'ifUnmodifiedSince', 'type': 'rfc-1123'}, - 'if_match': {'key': 'ifMatch', 'type': 'str'}, - 'if_none_match': {'key': 'ifNoneMatch', 'type': 'str'}, - 'if_tags': {'key': 'ifTags', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(ModifiedAccessConditions, self).__init__(**kwargs) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_tags = kwargs.get('if_tags', None) - - -class PageList(msrest.serialization.Model): - """the list of pages. - - :param page_range: - :type page_range: list[~azure.storage.blob.models.PageRange] - :param clear_range: - :type clear_range: list[~azure.storage.blob.models.ClearRange] - """ - - _attribute_map = { - 'page_range': {'key': 'PageRange', 'type': '[PageRange]'}, - 'clear_range': {'key': 'ClearRange', 'type': '[ClearRange]'}, - } - - def __init__( - self, - **kwargs - ): - super(PageList, self).__init__(**kwargs) - self.page_range = kwargs.get('page_range', None) - self.clear_range = kwargs.get('clear_range', None) - - -class PageRange(msrest.serialization.Model): - """PageRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'PageRange' - } - - def __init__( - self, - **kwargs - ): - super(PageRange, self).__init__(**kwargs) - self.start = kwargs['start'] - self.end = kwargs['end'] - - -class QueryFormat(msrest.serialization.Model): - """QueryFormat. - - :param type: The quick query format type. Possible values include: "delimited", "json", - "arrow". - :type type: str or ~azure.storage.blob.models.QueryFormatType - :param delimited_text_configuration: delimited text configuration. - :type delimited_text_configuration: ~azure.storage.blob.models.DelimitedTextConfiguration - :param json_text_configuration: json text configuration. - :type json_text_configuration: ~azure.storage.blob.models.JsonTextConfiguration - :param arrow_configuration: arrow configuration. - :type arrow_configuration: ~azure.storage.blob.models.ArrowConfiguration - """ - - _attribute_map = { - 'type': {'key': 'Type', 'type': 'str', 'xml': {'name': 'Type'}}, - 'delimited_text_configuration': {'key': 'DelimitedTextConfiguration', 'type': 'DelimitedTextConfiguration'}, - 'json_text_configuration': {'key': 'JsonTextConfiguration', 'type': 'JsonTextConfiguration'}, - 'arrow_configuration': {'key': 'ArrowConfiguration', 'type': 'ArrowConfiguration'}, - } - - def __init__( - self, - **kwargs - ): - super(QueryFormat, self).__init__(**kwargs) - self.type = kwargs.get('type', None) - self.delimited_text_configuration = kwargs.get('delimited_text_configuration', None) - self.json_text_configuration = kwargs.get('json_text_configuration', None) - self.arrow_configuration = kwargs.get('arrow_configuration', None) - - -class QueryRequest(msrest.serialization.Model): - """the quick query body. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar query_type: Required. the query type. Default value: "SQL". - :vartype query_type: str - :param expression: Required. a query statement. - :type expression: str - :param input_serialization: - :type input_serialization: ~azure.storage.blob.models.QuerySerialization - :param output_serialization: - :type output_serialization: ~azure.storage.blob.models.QuerySerialization - """ - - _validation = { - 'query_type': {'required': True, 'constant': True}, - 'expression': {'required': True}, - } - - _attribute_map = { - 'query_type': {'key': 'QueryType', 'type': 'str', 'xml': {'name': 'QueryType'}}, - 'expression': {'key': 'Expression', 'type': 'str', 'xml': {'name': 'Expression'}}, - 'input_serialization': {'key': 'InputSerialization', 'type': 'QuerySerialization'}, - 'output_serialization': {'key': 'OutputSerialization', 'type': 'QuerySerialization'}, - } - _xml_map = { - 'name': 'QueryRequest' - } - - query_type = "SQL" - - def __init__( - self, - **kwargs - ): - super(QueryRequest, self).__init__(**kwargs) - self.expression = kwargs['expression'] - self.input_serialization = kwargs.get('input_serialization', None) - self.output_serialization = kwargs.get('output_serialization', None) - - -class QuerySerialization(msrest.serialization.Model): - """QuerySerialization. - - All required parameters must be populated in order to send to Azure. - - :param format: Required. - :type format: ~azure.storage.blob.models.QueryFormat - """ - - _validation = { - 'format': {'required': True}, - } - - _attribute_map = { - 'format': {'key': 'Format', 'type': 'QueryFormat'}, - } - - def __init__( - self, - **kwargs - ): - super(QuerySerialization, self).__init__(**kwargs) - self.format = kwargs['format'] - - -class RetentionPolicy(msrest.serialization.Model): - """the retention policy which determines how long the associated data should persist. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether a retention policy is enabled for the storage - service. - :type enabled: bool - :param days: Indicates the number of days that metrics or logging or soft-deleted data should - be retained. All data older than this value will be deleted. - :type days: int - :param allow_permanent_delete: Indicates whether permanent delete is allowed on this storage - account. - :type allow_permanent_delete: bool - """ - - _validation = { - 'enabled': {'required': True}, - 'days': {'minimum': 1}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'days': {'key': 'Days', 'type': 'int'}, - 'allow_permanent_delete': {'key': 'AllowPermanentDelete', 'type': 'bool'}, - } - - def __init__( - self, - **kwargs - ): - super(RetentionPolicy, self).__init__(**kwargs) - self.enabled = kwargs['enabled'] - self.days = kwargs.get('days', None) - self.allow_permanent_delete = kwargs.get('allow_permanent_delete', None) - - -class SequenceNumberAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param if_sequence_number_less_than_or_equal_to: Specify this header value to operate only on a - blob if it has a sequence number less than or equal to the specified. - :type if_sequence_number_less_than_or_equal_to: long - :param if_sequence_number_less_than: Specify this header value to operate only on a blob if it - has a sequence number less than the specified. - :type if_sequence_number_less_than: long - :param if_sequence_number_equal_to: Specify this header value to operate only on a blob if it - has the specified sequence number. - :type if_sequence_number_equal_to: long - """ - - _attribute_map = { - 'if_sequence_number_less_than_or_equal_to': {'key': 'ifSequenceNumberLessThanOrEqualTo', 'type': 'long'}, - 'if_sequence_number_less_than': {'key': 'ifSequenceNumberLessThan', 'type': 'long'}, - 'if_sequence_number_equal_to': {'key': 'ifSequenceNumberEqualTo', 'type': 'long'}, - } - - def __init__( - self, - **kwargs - ): - super(SequenceNumberAccessConditions, self).__init__(**kwargs) - self.if_sequence_number_less_than_or_equal_to = kwargs.get('if_sequence_number_less_than_or_equal_to', None) - self.if_sequence_number_less_than = kwargs.get('if_sequence_number_less_than', None) - self.if_sequence_number_equal_to = kwargs.get('if_sequence_number_equal_to', None) - - -class SignedIdentifier(msrest.serialization.Model): - """signed identifier. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. a unique id. - :type id: str - :param access_policy: An Access policy. - :type access_policy: ~azure.storage.blob.models.AccessPolicy - """ - - _validation = { - 'id': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'Id', 'type': 'str'}, - 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy'}, - } - _xml_map = { - 'name': 'SignedIdentifier' - } - - def __init__( - self, - **kwargs - ): - super(SignedIdentifier, self).__init__(**kwargs) - self.id = kwargs['id'] - self.access_policy = kwargs.get('access_policy', None) - - -class SourceModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param source_if_modified_since: Specify this header value to operate only on a blob if it has - been modified since the specified date/time. - :type source_if_modified_since: ~datetime.datetime - :param source_if_unmodified_since: Specify this header value to operate only on a blob if it - has not been modified since the specified date/time. - :type source_if_unmodified_since: ~datetime.datetime - :param source_if_match: Specify an ETag value to operate only on blobs with a matching value. - :type source_if_match: str - :param source_if_none_match: Specify an ETag value to operate only on blobs without a matching - value. - :type source_if_none_match: str - :param source_if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a - matching value. - :type source_if_tags: str - """ - - _attribute_map = { - 'source_if_modified_since': {'key': 'sourceIfModifiedSince', 'type': 'rfc-1123'}, - 'source_if_unmodified_since': {'key': 'sourceIfUnmodifiedSince', 'type': 'rfc-1123'}, - 'source_if_match': {'key': 'sourceIfMatch', 'type': 'str'}, - 'source_if_none_match': {'key': 'sourceIfNoneMatch', 'type': 'str'}, - 'source_if_tags': {'key': 'sourceIfTags', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_modified_since = kwargs.get('source_if_modified_since', None) - self.source_if_unmodified_since = kwargs.get('source_if_unmodified_since', None) - self.source_if_match = kwargs.get('source_if_match', None) - self.source_if_none_match = kwargs.get('source_if_none_match', None) - self.source_if_tags = kwargs.get('source_if_tags', None) - - -class StaticWebsite(msrest.serialization.Model): - """The properties that enable an account to host a static website. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether this account is hosting a static website. - :type enabled: bool - :param index_document: The default name of the index page under each directory. - :type index_document: str - :param error_document404_path: The absolute path of the custom 404 page. - :type error_document404_path: str - :param default_index_document_path: Absolute path of the default index page. - :type default_index_document_path: str - """ - - _validation = { - 'enabled': {'required': True}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'index_document': {'key': 'IndexDocument', 'type': 'str'}, - 'error_document404_path': {'key': 'ErrorDocument404Path', 'type': 'str'}, - 'default_index_document_path': {'key': 'DefaultIndexDocumentPath', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(StaticWebsite, self).__init__(**kwargs) - self.enabled = kwargs['enabled'] - self.index_document = kwargs.get('index_document', None) - self.error_document404_path = kwargs.get('error_document404_path', None) - self.default_index_document_path = kwargs.get('default_index_document_path', None) - - -class StorageError(msrest.serialization.Model): - """StorageError. - - :param message: - :type message: str - """ - - _attribute_map = { - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(StorageError, self).__init__(**kwargs) - self.message = kwargs.get('message', None) - - -class StorageServiceProperties(msrest.serialization.Model): - """Storage Service Properties. - - :param logging: Azure Analytics Logging settings. - :type logging: ~azure.storage.blob.models.Logging - :param hour_metrics: a summary of request statistics grouped by API in hour or minute - aggregates for blobs. - :type hour_metrics: ~azure.storage.blob.models.Metrics - :param minute_metrics: a summary of request statistics grouped by API in hour or minute - aggregates for blobs. - :type minute_metrics: ~azure.storage.blob.models.Metrics - :param cors: The set of CORS rules. - :type cors: list[~azure.storage.blob.models.CorsRule] - :param default_service_version: The default version to use for requests to the Blob service if - an incoming request's version is not specified. Possible values include version 2008-10-27 and - all more recent versions. - :type default_service_version: str - :param delete_retention_policy: the retention policy which determines how long the associated - data should persist. - :type delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy - :param static_website: The properties that enable an account to host a static website. - :type static_website: ~azure.storage.blob.models.StaticWebsite - """ - - _attribute_map = { - 'logging': {'key': 'Logging', 'type': 'Logging'}, - 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics'}, - 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics'}, - 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'wrapped': True}}, - 'default_service_version': {'key': 'DefaultServiceVersion', 'type': 'str'}, - 'delete_retention_policy': {'key': 'DeleteRetentionPolicy', 'type': 'RetentionPolicy'}, - 'static_website': {'key': 'StaticWebsite', 'type': 'StaticWebsite'}, - } - - def __init__( - self, - **kwargs - ): - super(StorageServiceProperties, self).__init__(**kwargs) - self.logging = kwargs.get('logging', None) - self.hour_metrics = kwargs.get('hour_metrics', None) - self.minute_metrics = kwargs.get('minute_metrics', None) - self.cors = kwargs.get('cors', None) - self.default_service_version = kwargs.get('default_service_version', None) - self.delete_retention_policy = kwargs.get('delete_retention_policy', None) - self.static_website = kwargs.get('static_website', None) - - -class StorageServiceStats(msrest.serialization.Model): - """Stats for the storage service. - - :param geo_replication: Geo-Replication information for the Secondary Storage Service. - :type geo_replication: ~azure.storage.blob.models.GeoReplication - """ - - _attribute_map = { - 'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication'}, - } - - def __init__( - self, - **kwargs - ): - super(StorageServiceStats, self).__init__(**kwargs) - self.geo_replication = kwargs.get('geo_replication', None) - - -class UserDelegationKey(msrest.serialization.Model): - """A user delegation key. - - All required parameters must be populated in order to send to Azure. - - :param signed_oid: Required. The Azure Active Directory object ID in GUID format. - :type signed_oid: str - :param signed_tid: Required. The Azure Active Directory tenant ID in GUID format. - :type signed_tid: str - :param signed_start: Required. The date-time the key is active. - :type signed_start: ~datetime.datetime - :param signed_expiry: Required. The date-time the key expires. - :type signed_expiry: ~datetime.datetime - :param signed_service: Required. Abbreviation of the Azure Storage service that accepts the - key. - :type signed_service: str - :param signed_version: Required. The service version that created the key. - :type signed_version: str - :param value: Required. The key as a base64 string. - :type value: str - """ - - _validation = { - 'signed_oid': {'required': True}, - 'signed_tid': {'required': True}, - 'signed_start': {'required': True}, - 'signed_expiry': {'required': True}, - 'signed_service': {'required': True}, - 'signed_version': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'signed_oid': {'key': 'SignedOid', 'type': 'str'}, - 'signed_tid': {'key': 'SignedTid', 'type': 'str'}, - 'signed_start': {'key': 'SignedStart', 'type': 'iso-8601'}, - 'signed_expiry': {'key': 'SignedExpiry', 'type': 'iso-8601'}, - 'signed_service': {'key': 'SignedService', 'type': 'str'}, - 'signed_version': {'key': 'SignedVersion', 'type': 'str'}, - 'value': {'key': 'Value', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(UserDelegationKey, self).__init__(**kwargs) - self.signed_oid = kwargs['signed_oid'] - self.signed_tid = kwargs['signed_tid'] - self.signed_start = kwargs['signed_start'] - self.signed_expiry = kwargs['signed_expiry'] - self.signed_service = kwargs['signed_service'] - self.signed_version = kwargs['signed_version'] - self.value = kwargs['value'] diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/models/_models_py3.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/models/_models_py3.py deleted file mode 100644 index 2ed0d23..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/models/_models_py3.py +++ /dev/null @@ -1,2303 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -import datetime -from typing import Dict, List, Optional, Union - -from azure.core.exceptions import HttpResponseError -import msrest.serialization - -from ._azure_blob_storage_enums import * - - -class AccessPolicy(msrest.serialization.Model): - """An Access policy. - - :param start: the date-time the policy is active. - :type start: str - :param expiry: the date-time the policy expires. - :type expiry: str - :param permission: the permissions for the acl policy. - :type permission: str - """ - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str'}, - 'expiry': {'key': 'Expiry', 'type': 'str'}, - 'permission': {'key': 'Permission', 'type': 'str'}, - } - - def __init__( - self, - *, - start: Optional[str] = None, - expiry: Optional[str] = None, - permission: Optional[str] = None, - **kwargs - ): - super(AccessPolicy, self).__init__(**kwargs) - self.start = start - self.expiry = expiry - self.permission = permission - - -class AppendPositionAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param max_size: Optional conditional header. The max length in bytes permitted for the append - blob. If the Append Block operation would cause the blob to exceed that limit or if the blob - size is already greater than the value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :type max_size: long - :param append_position: Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will succeed only if the append - position is equal to this number. If it is not, the request will fail with the - AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). - :type append_position: long - """ - - _attribute_map = { - 'max_size': {'key': 'maxSize', 'type': 'long'}, - 'append_position': {'key': 'appendPosition', 'type': 'long'}, - } - - def __init__( - self, - *, - max_size: Optional[int] = None, - append_position: Optional[int] = None, - **kwargs - ): - super(AppendPositionAccessConditions, self).__init__(**kwargs) - self.max_size = max_size - self.append_position = append_position - - -class ArrowConfiguration(msrest.serialization.Model): - """arrow configuration. - - All required parameters must be populated in order to send to Azure. - - :param schema: Required. - :type schema: list[~azure.storage.blob.models.ArrowField] - """ - - _validation = { - 'schema': {'required': True}, - } - - _attribute_map = { - 'schema': {'key': 'Schema', 'type': '[ArrowField]', 'xml': {'name': 'Schema', 'wrapped': True, 'itemsName': 'Field'}}, - } - _xml_map = { - 'name': 'ArrowConfiguration' - } - - def __init__( - self, - *, - schema: List["ArrowField"], - **kwargs - ): - super(ArrowConfiguration, self).__init__(**kwargs) - self.schema = schema - - -class ArrowField(msrest.serialization.Model): - """field of an arrow schema. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. - :type type: str - :param name: - :type name: str - :param precision: - :type precision: int - :param scale: - :type scale: int - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': 'Type', 'type': 'str'}, - 'name': {'key': 'Name', 'type': 'str'}, - 'precision': {'key': 'Precision', 'type': 'int'}, - 'scale': {'key': 'Scale', 'type': 'int'}, - } - _xml_map = { - 'name': 'Field' - } - - def __init__( - self, - *, - type: str, - name: Optional[str] = None, - precision: Optional[int] = None, - scale: Optional[int] = None, - **kwargs - ): - super(ArrowField, self).__init__(**kwargs) - self.type = type - self.name = name - self.precision = precision - self.scale = scale - - -class BlobFlatListSegment(msrest.serialization.Model): - """BlobFlatListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]'}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__( - self, - *, - blob_items: List["BlobItemInternal"], - **kwargs - ): - super(BlobFlatListSegment, self).__init__(**kwargs) - self.blob_items = blob_items - - -class BlobHierarchyListSegment(msrest.serialization.Model): - """BlobHierarchyListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_prefixes: - :type blob_prefixes: list[~azure.storage.blob.models.BlobPrefix] - :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]', 'xml': {'name': 'BlobPrefix'}}, - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__( - self, - *, - blob_items: List["BlobItemInternal"], - blob_prefixes: Optional[List["BlobPrefix"]] = None, - **kwargs - ): - super(BlobHierarchyListSegment, self).__init__(**kwargs) - self.blob_prefixes = blob_prefixes - self.blob_items = blob_items - - -class BlobHTTPHeaders(msrest.serialization.Model): - """Parameter group. - - :param blob_cache_control: Optional. Sets the blob's cache control. If specified, this property - is stored with the blob and returned with a read request. - :type blob_cache_control: str - :param blob_content_type: Optional. Sets the blob's content type. If specified, this property - is stored with the blob and returned with a read request. - :type blob_content_type: str - :param blob_content_md5: Optional. An MD5 hash of the blob content. Note that this hash is not - validated, as the hashes for the individual blocks were validated when each was uploaded. - :type blob_content_md5: bytearray - :param blob_content_encoding: Optional. Sets the blob's content encoding. If specified, this - property is stored with the blob and returned with a read request. - :type blob_content_encoding: str - :param blob_content_language: Optional. Set the blob's content language. If specified, this - property is stored with the blob and returned with a read request. - :type blob_content_language: str - :param blob_content_disposition: Optional. Sets the blob's Content-Disposition header. - :type blob_content_disposition: str - """ - - _attribute_map = { - 'blob_cache_control': {'key': 'blobCacheControl', 'type': 'str'}, - 'blob_content_type': {'key': 'blobContentType', 'type': 'str'}, - 'blob_content_md5': {'key': 'blobContentMD5', 'type': 'bytearray'}, - 'blob_content_encoding': {'key': 'blobContentEncoding', 'type': 'str'}, - 'blob_content_language': {'key': 'blobContentLanguage', 'type': 'str'}, - 'blob_content_disposition': {'key': 'blobContentDisposition', 'type': 'str'}, - } - - def __init__( - self, - *, - blob_cache_control: Optional[str] = None, - blob_content_type: Optional[str] = None, - blob_content_md5: Optional[bytearray] = None, - blob_content_encoding: Optional[str] = None, - blob_content_language: Optional[str] = None, - blob_content_disposition: Optional[str] = None, - **kwargs - ): - super(BlobHTTPHeaders, self).__init__(**kwargs) - self.blob_cache_control = blob_cache_control - self.blob_content_type = blob_content_type - self.blob_content_md5 = blob_content_md5 - self.blob_content_encoding = blob_content_encoding - self.blob_content_language = blob_content_language - self.blob_content_disposition = blob_content_disposition - - -class BlobItemInternal(msrest.serialization.Model): - """An Azure Storage blob. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param deleted: Required. - :type deleted: bool - :param snapshot: Required. - :type snapshot: str - :param version_id: - :type version_id: str - :param is_current_version: - :type is_current_version: bool - :param properties: Required. Properties of a blob. - :type properties: ~azure.storage.blob.models.BlobPropertiesInternal - :param metadata: - :type metadata: ~azure.storage.blob.models.BlobMetadata - :param blob_tags: Blob tags. - :type blob_tags: ~azure.storage.blob.models.BlobTags - :param object_replication_metadata: Dictionary of :code:``. - :type object_replication_metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'deleted': {'required': True}, - 'snapshot': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'deleted': {'key': 'Deleted', 'type': 'bool'}, - 'snapshot': {'key': 'Snapshot', 'type': 'str'}, - 'version_id': {'key': 'VersionId', 'type': 'str'}, - 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool'}, - 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal'}, - 'metadata': {'key': 'Metadata', 'type': 'BlobMetadata'}, - 'blob_tags': {'key': 'BlobTags', 'type': 'BlobTags'}, - 'object_replication_metadata': {'key': 'OrMetadata', 'type': '{str}'}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__( - self, - *, - name: str, - deleted: bool, - snapshot: str, - properties: "BlobPropertiesInternal", - version_id: Optional[str] = None, - is_current_version: Optional[bool] = None, - metadata: Optional["BlobMetadata"] = None, - blob_tags: Optional["BlobTags"] = None, - object_replication_metadata: Optional[Dict[str, str]] = None, - **kwargs - ): - super(BlobItemInternal, self).__init__(**kwargs) - self.name = name - self.deleted = deleted - self.snapshot = snapshot - self.version_id = version_id - self.is_current_version = is_current_version - self.properties = properties - self.metadata = metadata - self.blob_tags = blob_tags - self.object_replication_metadata = object_replication_metadata - - -class BlobMetadata(msrest.serialization.Model): - """BlobMetadata. - - :param additional_properties: Unmatched properties from the message are deserialized to this - collection. - :type additional_properties: dict[str, str] - :param encrypted: - :type encrypted: str - """ - - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{str}'}, - 'encrypted': {'key': 'Encrypted', 'type': 'str', 'xml': {'attr': True}}, - } - _xml_map = { - 'name': 'Metadata' - } - - def __init__( - self, - *, - additional_properties: Optional[Dict[str, str]] = None, - encrypted: Optional[str] = None, - **kwargs - ): - super(BlobMetadata, self).__init__(**kwargs) - self.additional_properties = additional_properties - self.encrypted = encrypted - - -class BlobPrefix(msrest.serialization.Model): - """BlobPrefix. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(BlobPrefix, self).__init__(**kwargs) - self.name = name - - -class BlobPropertiesInternal(msrest.serialization.Model): - """Properties of a blob. - - All required parameters must be populated in order to send to Azure. - - :param creation_time: - :type creation_time: ~datetime.datetime - :param last_modified: Required. - :type last_modified: ~datetime.datetime - :param etag: Required. - :type etag: str - :param content_length: Size in bytes. - :type content_length: long - :param content_type: - :type content_type: str - :param content_encoding: - :type content_encoding: str - :param content_language: - :type content_language: str - :param content_md5: - :type content_md5: bytearray - :param content_disposition: - :type content_disposition: str - :param cache_control: - :type cache_control: str - :param blob_sequence_number: - :type blob_sequence_number: long - :param blob_type: Possible values include: "BlockBlob", "PageBlob", "AppendBlob". - :type blob_type: str or ~azure.storage.blob.models.BlobType - :param lease_status: Possible values include: "locked", "unlocked". - :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType - :param lease_state: Possible values include: "available", "leased", "expired", "breaking", - "broken". - :type lease_state: str or ~azure.storage.blob.models.LeaseStateType - :param lease_duration: Possible values include: "infinite", "fixed". - :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType - :param copy_id: - :type copy_id: str - :param copy_status: Possible values include: "pending", "success", "aborted", "failed". - :type copy_status: str or ~azure.storage.blob.models.CopyStatusType - :param copy_source: - :type copy_source: str - :param copy_progress: - :type copy_progress: str - :param copy_completion_time: - :type copy_completion_time: ~datetime.datetime - :param copy_status_description: - :type copy_status_description: str - :param server_encrypted: - :type server_encrypted: bool - :param incremental_copy: - :type incremental_copy: bool - :param destination_snapshot: - :type destination_snapshot: str - :param deleted_time: - :type deleted_time: ~datetime.datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param access_tier: Possible values include: "P4", "P6", "P10", "P15", "P20", "P30", "P40", - "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive". - :type access_tier: str or ~azure.storage.blob.models.AccessTier - :param access_tier_inferred: - :type access_tier_inferred: bool - :param archive_status: Possible values include: "rehydrate-pending-to-hot", "rehydrate- - pending-to-cool". - :type archive_status: str or ~azure.storage.blob.models.ArchiveStatus - :param customer_provided_key_sha256: - :type customer_provided_key_sha256: str - :param encryption_scope: The name of the encryption scope under which the blob is encrypted. - :type encryption_scope: str - :param access_tier_change_time: - :type access_tier_change_time: ~datetime.datetime - :param tag_count: - :type tag_count: int - :param expires_on: - :type expires_on: ~datetime.datetime - :param is_sealed: - :type is_sealed: bool - :param rehydrate_priority: If an object is in rehydrate pending state then this header is - returned with priority of rehydrate. Valid values are High and Standard. Possible values - include: "High", "Standard". - :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority - :param last_accessed_on: - :type last_accessed_on: ~datetime.datetime - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123'}, - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - 'content_length': {'key': 'Content-Length', 'type': 'long'}, - 'content_type': {'key': 'Content-Type', 'type': 'str'}, - 'content_encoding': {'key': 'Content-Encoding', 'type': 'str'}, - 'content_language': {'key': 'Content-Language', 'type': 'str'}, - 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray'}, - 'content_disposition': {'key': 'Content-Disposition', 'type': 'str'}, - 'cache_control': {'key': 'Cache-Control', 'type': 'str'}, - 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long'}, - 'blob_type': {'key': 'BlobType', 'type': 'str'}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, - 'lease_state': {'key': 'LeaseState', 'type': 'str'}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, - 'copy_id': {'key': 'CopyId', 'type': 'str'}, - 'copy_status': {'key': 'CopyStatus', 'type': 'str'}, - 'copy_source': {'key': 'CopySource', 'type': 'str'}, - 'copy_progress': {'key': 'CopyProgress', 'type': 'str'}, - 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123'}, - 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str'}, - 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool'}, - 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool'}, - 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str'}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, - 'access_tier': {'key': 'AccessTier', 'type': 'str'}, - 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool'}, - 'archive_status': {'key': 'ArchiveStatus', 'type': 'str'}, - 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str'}, - 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str'}, - 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'}, - 'tag_count': {'key': 'TagCount', 'type': 'int'}, - 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123'}, - 'is_sealed': {'key': 'Sealed', 'type': 'bool'}, - 'rehydrate_priority': {'key': 'RehydratePriority', 'type': 'str'}, - 'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123'}, - } - _xml_map = { - 'name': 'Properties' - } - - def __init__( - self, - *, - last_modified: datetime.datetime, - etag: str, - creation_time: Optional[datetime.datetime] = None, - content_length: Optional[int] = None, - content_type: Optional[str] = None, - content_encoding: Optional[str] = None, - content_language: Optional[str] = None, - content_md5: Optional[bytearray] = None, - content_disposition: Optional[str] = None, - cache_control: Optional[str] = None, - blob_sequence_number: Optional[int] = None, - blob_type: Optional[Union[str, "BlobType"]] = None, - lease_status: Optional[Union[str, "LeaseStatusType"]] = None, - lease_state: Optional[Union[str, "LeaseStateType"]] = None, - lease_duration: Optional[Union[str, "LeaseDurationType"]] = None, - copy_id: Optional[str] = None, - copy_status: Optional[Union[str, "CopyStatusType"]] = None, - copy_source: Optional[str] = None, - copy_progress: Optional[str] = None, - copy_completion_time: Optional[datetime.datetime] = None, - copy_status_description: Optional[str] = None, - server_encrypted: Optional[bool] = None, - incremental_copy: Optional[bool] = None, - destination_snapshot: Optional[str] = None, - deleted_time: Optional[datetime.datetime] = None, - remaining_retention_days: Optional[int] = None, - access_tier: Optional[Union[str, "AccessTier"]] = None, - access_tier_inferred: Optional[bool] = None, - archive_status: Optional[Union[str, "ArchiveStatus"]] = None, - customer_provided_key_sha256: Optional[str] = None, - encryption_scope: Optional[str] = None, - access_tier_change_time: Optional[datetime.datetime] = None, - tag_count: Optional[int] = None, - expires_on: Optional[datetime.datetime] = None, - is_sealed: Optional[bool] = None, - rehydrate_priority: Optional[Union[str, "RehydratePriority"]] = None, - last_accessed_on: Optional[datetime.datetime] = None, - **kwargs - ): - super(BlobPropertiesInternal, self).__init__(**kwargs) - self.creation_time = creation_time - self.last_modified = last_modified - self.etag = etag - self.content_length = content_length - self.content_type = content_type - self.content_encoding = content_encoding - self.content_language = content_language - self.content_md5 = content_md5 - self.content_disposition = content_disposition - self.cache_control = cache_control - self.blob_sequence_number = blob_sequence_number - self.blob_type = blob_type - self.lease_status = lease_status - self.lease_state = lease_state - self.lease_duration = lease_duration - self.copy_id = copy_id - self.copy_status = copy_status - self.copy_source = copy_source - self.copy_progress = copy_progress - self.copy_completion_time = copy_completion_time - self.copy_status_description = copy_status_description - self.server_encrypted = server_encrypted - self.incremental_copy = incremental_copy - self.destination_snapshot = destination_snapshot - self.deleted_time = deleted_time - self.remaining_retention_days = remaining_retention_days - self.access_tier = access_tier - self.access_tier_inferred = access_tier_inferred - self.archive_status = archive_status - self.customer_provided_key_sha256 = customer_provided_key_sha256 - self.encryption_scope = encryption_scope - self.access_tier_change_time = access_tier_change_time - self.tag_count = tag_count - self.expires_on = expires_on - self.is_sealed = is_sealed - self.rehydrate_priority = rehydrate_priority - self.last_accessed_on = last_accessed_on - - -class BlobTag(msrest.serialization.Model): - """BlobTag. - - All required parameters must be populated in order to send to Azure. - - :param key: Required. - :type key: str - :param value: Required. - :type value: str - """ - - _validation = { - 'key': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'key': {'key': 'Key', 'type': 'str'}, - 'value': {'key': 'Value', 'type': 'str'}, - } - _xml_map = { - 'name': 'Tag' - } - - def __init__( - self, - *, - key: str, - value: str, - **kwargs - ): - super(BlobTag, self).__init__(**kwargs) - self.key = key - self.value = value - - -class BlobTags(msrest.serialization.Model): - """Blob tags. - - All required parameters must be populated in order to send to Azure. - - :param blob_tag_set: Required. - :type blob_tag_set: list[~azure.storage.blob.models.BlobTag] - """ - - _validation = { - 'blob_tag_set': {'required': True}, - } - - _attribute_map = { - 'blob_tag_set': {'key': 'BlobTagSet', 'type': '[BlobTag]', 'xml': {'name': 'TagSet', 'wrapped': True, 'itemsName': 'Tag'}}, - } - _xml_map = { - 'name': 'Tags' - } - - def __init__( - self, - *, - blob_tag_set: List["BlobTag"], - **kwargs - ): - super(BlobTags, self).__init__(**kwargs) - self.blob_tag_set = blob_tag_set - - -class Block(msrest.serialization.Model): - """Represents a single block in a block blob. It describes the block's ID and size. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The base64 encoded block ID. - :type name: str - :param size: Required. The block size in bytes. - :type size: int - """ - - _validation = { - 'name': {'required': True}, - 'size': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'size': {'key': 'Size', 'type': 'int'}, - } - - def __init__( - self, - *, - name: str, - size: int, - **kwargs - ): - super(Block, self).__init__(**kwargs) - self.name = name - self.size = size - - -class BlockList(msrest.serialization.Model): - """BlockList. - - :param committed_blocks: - :type committed_blocks: list[~azure.storage.blob.models.Block] - :param uncommitted_blocks: - :type uncommitted_blocks: list[~azure.storage.blob.models.Block] - """ - - _attribute_map = { - 'committed_blocks': {'key': 'CommittedBlocks', 'type': '[Block]', 'xml': {'wrapped': True}}, - 'uncommitted_blocks': {'key': 'UncommittedBlocks', 'type': '[Block]', 'xml': {'wrapped': True}}, - } - - def __init__( - self, - *, - committed_blocks: Optional[List["Block"]] = None, - uncommitted_blocks: Optional[List["Block"]] = None, - **kwargs - ): - super(BlockList, self).__init__(**kwargs) - self.committed_blocks = committed_blocks - self.uncommitted_blocks = uncommitted_blocks - - -class BlockLookupList(msrest.serialization.Model): - """BlockLookupList. - - :param committed: - :type committed: list[str] - :param uncommitted: - :type uncommitted: list[str] - :param latest: - :type latest: list[str] - """ - - _attribute_map = { - 'committed': {'key': 'Committed', 'type': '[str]', 'xml': {'itemsName': 'Committed'}}, - 'uncommitted': {'key': 'Uncommitted', 'type': '[str]', 'xml': {'itemsName': 'Uncommitted'}}, - 'latest': {'key': 'Latest', 'type': '[str]', 'xml': {'itemsName': 'Latest'}}, - } - _xml_map = { - 'name': 'BlockList' - } - - def __init__( - self, - *, - committed: Optional[List[str]] = None, - uncommitted: Optional[List[str]] = None, - latest: Optional[List[str]] = None, - **kwargs - ): - super(BlockLookupList, self).__init__(**kwargs) - self.committed = committed - self.uncommitted = uncommitted - self.latest = latest - - -class ClearRange(msrest.serialization.Model): - """ClearRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'ClearRange' - } - - def __init__( - self, - *, - start: int, - end: int, - **kwargs - ): - super(ClearRange, self).__init__(**kwargs) - self.start = start - self.end = end - - -class ContainerCpkScopeInfo(msrest.serialization.Model): - """Parameter group. - - :param default_encryption_scope: Optional. Version 2019-07-07 and later. Specifies the - default encryption scope to set on the container and use for all future writes. - :type default_encryption_scope: str - :param prevent_encryption_scope_override: Optional. Version 2019-07-07 and newer. If true, - prevents any request from specifying a different encryption scope than the scope set on the - container. - :type prevent_encryption_scope_override: bool - """ - - _attribute_map = { - 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str'}, - 'prevent_encryption_scope_override': {'key': 'PreventEncryptionScopeOverride', 'type': 'bool'}, - } - - def __init__( - self, - *, - default_encryption_scope: Optional[str] = None, - prevent_encryption_scope_override: Optional[bool] = None, - **kwargs - ): - super(ContainerCpkScopeInfo, self).__init__(**kwargs) - self.default_encryption_scope = default_encryption_scope - self.prevent_encryption_scope_override = prevent_encryption_scope_override - - -class ContainerItem(msrest.serialization.Model): - """An Azure Storage container. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param deleted: - :type deleted: bool - :param version: - :type version: str - :param properties: Required. Properties of a container. - :type properties: ~azure.storage.blob.models.ContainerProperties - :param metadata: Dictionary of :code:``. - :type metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'deleted': {'key': 'Deleted', 'type': 'bool'}, - 'version': {'key': 'Version', 'type': 'str'}, - 'properties': {'key': 'Properties', 'type': 'ContainerProperties'}, - 'metadata': {'key': 'Metadata', 'type': '{str}'}, - } - _xml_map = { - 'name': 'Container' - } - - def __init__( - self, - *, - name: str, - properties: "ContainerProperties", - deleted: Optional[bool] = None, - version: Optional[str] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs - ): - super(ContainerItem, self).__init__(**kwargs) - self.name = name - self.deleted = deleted - self.version = version - self.properties = properties - self.metadata = metadata - - -class ContainerProperties(msrest.serialization.Model): - """Properties of a container. - - All required parameters must be populated in order to send to Azure. - - :param last_modified: Required. - :type last_modified: ~datetime.datetime - :param etag: Required. - :type etag: str - :param lease_status: Possible values include: "locked", "unlocked". - :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType - :param lease_state: Possible values include: "available", "leased", "expired", "breaking", - "broken". - :type lease_state: str or ~azure.storage.blob.models.LeaseStateType - :param lease_duration: Possible values include: "infinite", "fixed". - :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType - :param public_access: Possible values include: "container", "blob". - :type public_access: str or ~azure.storage.blob.models.PublicAccessType - :param has_immutability_policy: - :type has_immutability_policy: bool - :param has_legal_hold: - :type has_legal_hold: bool - :param default_encryption_scope: - :type default_encryption_scope: str - :param prevent_encryption_scope_override: - :type prevent_encryption_scope_override: bool - :param deleted_time: - :type deleted_time: ~datetime.datetime - :param remaining_retention_days: - :type remaining_retention_days: int - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, - 'lease_state': {'key': 'LeaseState', 'type': 'str'}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, - 'public_access': {'key': 'PublicAccess', 'type': 'str'}, - 'has_immutability_policy': {'key': 'HasImmutabilityPolicy', 'type': 'bool'}, - 'has_legal_hold': {'key': 'HasLegalHold', 'type': 'bool'}, - 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str'}, - 'prevent_encryption_scope_override': {'key': 'DenyEncryptionScopeOverride', 'type': 'bool'}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, - } - - def __init__( - self, - *, - last_modified: datetime.datetime, - etag: str, - lease_status: Optional[Union[str, "LeaseStatusType"]] = None, - lease_state: Optional[Union[str, "LeaseStateType"]] = None, - lease_duration: Optional[Union[str, "LeaseDurationType"]] = None, - public_access: Optional[Union[str, "PublicAccessType"]] = None, - has_immutability_policy: Optional[bool] = None, - has_legal_hold: Optional[bool] = None, - default_encryption_scope: Optional[str] = None, - prevent_encryption_scope_override: Optional[bool] = None, - deleted_time: Optional[datetime.datetime] = None, - remaining_retention_days: Optional[int] = None, - **kwargs - ): - super(ContainerProperties, self).__init__(**kwargs) - self.last_modified = last_modified - self.etag = etag - self.lease_status = lease_status - self.lease_state = lease_state - self.lease_duration = lease_duration - self.public_access = public_access - self.has_immutability_policy = has_immutability_policy - self.has_legal_hold = has_legal_hold - self.default_encryption_scope = default_encryption_scope - self.prevent_encryption_scope_override = prevent_encryption_scope_override - self.deleted_time = deleted_time - self.remaining_retention_days = remaining_retention_days - - -class CorsRule(msrest.serialization.Model): - """CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param allowed_origins: Required. The origin domains that are permitted to make a request - against the storage service via CORS. The origin domain is the domain from which the request - originates. Note that the origin must be an exact case-sensitive match with the origin that the - user age sends to the service. You can also use the wildcard character '*' to allow all origin - domains to make requests via CORS. - :type allowed_origins: str - :param allowed_methods: Required. The methods (HTTP request verbs) that the origin domain may - use for a CORS request. (comma separated). - :type allowed_methods: str - :param allowed_headers: Required. the request headers that the origin domain may specify on the - CORS request. - :type allowed_headers: str - :param exposed_headers: Required. The response headers that may be sent in the response to the - CORS request and exposed by the browser to the request issuer. - :type exposed_headers: str - :param max_age_in_seconds: Required. The maximum amount time that a browser should cache the - preflight OPTIONS request. - :type max_age_in_seconds: int - """ - - _validation = { - 'allowed_origins': {'required': True}, - 'allowed_methods': {'required': True}, - 'allowed_headers': {'required': True}, - 'exposed_headers': {'required': True}, - 'max_age_in_seconds': {'required': True, 'minimum': 0}, - } - - _attribute_map = { - 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str'}, - 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str'}, - 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str'}, - 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str'}, - 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int'}, - } - - def __init__( - self, - *, - allowed_origins: str, - allowed_methods: str, - allowed_headers: str, - exposed_headers: str, - max_age_in_seconds: int, - **kwargs - ): - super(CorsRule, self).__init__(**kwargs) - self.allowed_origins = allowed_origins - self.allowed_methods = allowed_methods - self.allowed_headers = allowed_headers - self.exposed_headers = exposed_headers - self.max_age_in_seconds = max_age_in_seconds - - -class CpkInfo(msrest.serialization.Model): - """Parameter group. - - :param encryption_key: Optional. Specifies the encryption key to use to encrypt the data - provided in the request. If not specified, encryption is performed with the root account - encryption key. For more information, see Encryption at Rest for Azure Storage Services. - :type encryption_key: str - :param encryption_key_sha256: The SHA-256 hash of the provided encryption key. Must be provided - if the x-ms-encryption-key header is provided. - :type encryption_key_sha256: str - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. Possible values include: "None", "AES256". - :type encryption_algorithm: str or ~azure.storage.blob.models.EncryptionAlgorithmType - """ - - _attribute_map = { - 'encryption_key': {'key': 'encryptionKey', 'type': 'str'}, - 'encryption_key_sha256': {'key': 'encryptionKeySha256', 'type': 'str'}, - 'encryption_algorithm': {'key': 'encryptionAlgorithm', 'type': 'str'}, - } - - def __init__( - self, - *, - encryption_key: Optional[str] = None, - encryption_key_sha256: Optional[str] = None, - encryption_algorithm: Optional[Union[str, "EncryptionAlgorithmType"]] = None, - **kwargs - ): - super(CpkInfo, self).__init__(**kwargs) - self.encryption_key = encryption_key - self.encryption_key_sha256 = encryption_key_sha256 - self.encryption_algorithm = encryption_algorithm - - -class CpkScopeInfo(msrest.serialization.Model): - """Parameter group. - - :param encryption_scope: Optional. Version 2019-07-07 and later. Specifies the name of the - encryption scope to use to encrypt the data provided in the request. If not specified, - encryption is performed with the default account encryption scope. For more information, see - Encryption at Rest for Azure Storage Services. - :type encryption_scope: str - """ - - _attribute_map = { - 'encryption_scope': {'key': 'encryptionScope', 'type': 'str'}, - } - - def __init__( - self, - *, - encryption_scope: Optional[str] = None, - **kwargs - ): - super(CpkScopeInfo, self).__init__(**kwargs) - self.encryption_scope = encryption_scope - - -class DataLakeStorageError(msrest.serialization.Model): - """DataLakeStorageError. - - :param data_lake_storage_error_details: The service error response object. - :type data_lake_storage_error_details: - ~azure.storage.blob.models.DataLakeStorageErrorAutoGenerated - """ - - _attribute_map = { - 'data_lake_storage_error_details': {'key': 'error', 'type': 'DataLakeStorageErrorAutoGenerated'}, - } - - def __init__( - self, - *, - data_lake_storage_error_details: Optional["DataLakeStorageErrorAutoGenerated"] = None, - **kwargs - ): - super(DataLakeStorageError, self).__init__(**kwargs) - self.data_lake_storage_error_details = data_lake_storage_error_details - - -class DataLakeStorageErrorAutoGenerated(msrest.serialization.Model): - """The service error response object. - - :param code: The service error code. - :type code: str - :param message: The service error message. - :type message: str - """ - - _attribute_map = { - 'code': {'key': 'Code', 'type': 'str'}, - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__( - self, - *, - code: Optional[str] = None, - message: Optional[str] = None, - **kwargs - ): - super(DataLakeStorageErrorAutoGenerated, self).__init__(**kwargs) - self.code = code - self.message = message - - -class DelimitedTextConfiguration(msrest.serialization.Model): - """delimited text configuration. - - All required parameters must be populated in order to send to Azure. - - :param column_separator: Required. column separator. - :type column_separator: str - :param field_quote: Required. field quote. - :type field_quote: str - :param record_separator: Required. record separator. - :type record_separator: str - :param escape_char: Required. escape char. - :type escape_char: str - :param headers_present: Required. has headers. - :type headers_present: bool - """ - - _validation = { - 'column_separator': {'required': True}, - 'field_quote': {'required': True}, - 'record_separator': {'required': True}, - 'escape_char': {'required': True}, - 'headers_present': {'required': True}, - } - - _attribute_map = { - 'column_separator': {'key': 'ColumnSeparator', 'type': 'str', 'xml': {'name': 'ColumnSeparator'}}, - 'field_quote': {'key': 'FieldQuote', 'type': 'str', 'xml': {'name': 'FieldQuote'}}, - 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, - 'escape_char': {'key': 'EscapeChar', 'type': 'str', 'xml': {'name': 'EscapeChar'}}, - 'headers_present': {'key': 'HeadersPresent', 'type': 'bool', 'xml': {'name': 'HasHeaders'}}, - } - _xml_map = { - 'name': 'DelimitedTextConfiguration' - } - - def __init__( - self, - *, - column_separator: str, - field_quote: str, - record_separator: str, - escape_char: str, - headers_present: bool, - **kwargs - ): - super(DelimitedTextConfiguration, self).__init__(**kwargs) - self.column_separator = column_separator - self.field_quote = field_quote - self.record_separator = record_separator - self.escape_char = escape_char - self.headers_present = headers_present - - -class DirectoryHttpHeaders(msrest.serialization.Model): - """Parameter group. - - :param cache_control: Cache control for given resource. - :type cache_control: str - :param content_type: Content type for given resource. - :type content_type: str - :param content_encoding: Content encoding for given resource. - :type content_encoding: str - :param content_language: Content language for given resource. - :type content_language: str - :param content_disposition: Content disposition for given resource. - :type content_disposition: str - """ - - _attribute_map = { - 'cache_control': {'key': 'cacheControl', 'type': 'str'}, - 'content_type': {'key': 'contentType', 'type': 'str'}, - 'content_encoding': {'key': 'contentEncoding', 'type': 'str'}, - 'content_language': {'key': 'contentLanguage', 'type': 'str'}, - 'content_disposition': {'key': 'contentDisposition', 'type': 'str'}, - } - - def __init__( - self, - *, - cache_control: Optional[str] = None, - content_type: Optional[str] = None, - content_encoding: Optional[str] = None, - content_language: Optional[str] = None, - content_disposition: Optional[str] = None, - **kwargs - ): - super(DirectoryHttpHeaders, self).__init__(**kwargs) - self.cache_control = cache_control - self.content_type = content_type - self.content_encoding = content_encoding - self.content_language = content_language - self.content_disposition = content_disposition - - -class FilterBlobItem(msrest.serialization.Model): - """Blob info from a Filter Blobs API call. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param container_name: Required. - :type container_name: str - :param tags: A set of tags. Blob tags. - :type tags: ~azure.storage.blob.models.BlobTags - """ - - _validation = { - 'name': {'required': True}, - 'container_name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'container_name': {'key': 'ContainerName', 'type': 'str'}, - 'tags': {'key': 'Tags', 'type': 'BlobTags'}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__( - self, - *, - name: str, - container_name: str, - tags: Optional["BlobTags"] = None, - **kwargs - ): - super(FilterBlobItem, self).__init__(**kwargs) - self.name = name - self.container_name = container_name - self.tags = tags - - -class FilterBlobSegment(msrest.serialization.Model): - """The result of a Filter Blobs API call. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param where: Required. - :type where: str - :param blobs: Required. - :type blobs: list[~azure.storage.blob.models.FilterBlobItem] - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'where': {'required': True}, - 'blobs': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'where': {'key': 'Where', 'type': 'str'}, - 'blobs': {'key': 'Blobs', 'type': '[FilterBlobItem]', 'xml': {'name': 'Blobs', 'wrapped': True, 'itemsName': 'Blob'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - *, - service_endpoint: str, - where: str, - blobs: List["FilterBlobItem"], - next_marker: Optional[str] = None, - **kwargs - ): - super(FilterBlobSegment, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.where = where - self.blobs = blobs - self.next_marker = next_marker - - -class GeoReplication(msrest.serialization.Model): - """Geo-Replication information for the Secondary Storage Service. - - All required parameters must be populated in order to send to Azure. - - :param status: Required. The status of the secondary location. Possible values include: "live", - "bootstrap", "unavailable". - :type status: str or ~azure.storage.blob.models.GeoReplicationStatusType - :param last_sync_time: Required. A GMT date/time value, to the second. All primary writes - preceding this value are guaranteed to be available for read operations at the secondary. - Primary writes after this point in time may or may not be available for reads. - :type last_sync_time: ~datetime.datetime - """ - - _validation = { - 'status': {'required': True}, - 'last_sync_time': {'required': True}, - } - - _attribute_map = { - 'status': {'key': 'Status', 'type': 'str'}, - 'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123'}, - } - - def __init__( - self, - *, - status: Union[str, "GeoReplicationStatusType"], - last_sync_time: datetime.datetime, - **kwargs - ): - super(GeoReplication, self).__init__(**kwargs) - self.status = status - self.last_sync_time = last_sync_time - - -class JsonTextConfiguration(msrest.serialization.Model): - """json text configuration. - - All required parameters must be populated in order to send to Azure. - - :param record_separator: Required. record separator. - :type record_separator: str - """ - - _validation = { - 'record_separator': {'required': True}, - } - - _attribute_map = { - 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, - } - _xml_map = { - 'name': 'JsonTextConfiguration' - } - - def __init__( - self, - *, - record_separator: str, - **kwargs - ): - super(JsonTextConfiguration, self).__init__(**kwargs) - self.record_separator = record_separator - - -class KeyInfo(msrest.serialization.Model): - """Key information. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. The date-time the key is active in ISO 8601 UTC time. - :type start: str - :param expiry: Required. The date-time the key expires in ISO 8601 UTC time. - :type expiry: str - """ - - _validation = { - 'start': {'required': True}, - 'expiry': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str'}, - 'expiry': {'key': 'Expiry', 'type': 'str'}, - } - - def __init__( - self, - *, - start: str, - expiry: str, - **kwargs - ): - super(KeyInfo, self).__init__(**kwargs) - self.start = start - self.expiry = expiry - - -class LeaseAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param lease_id: If specified, the operation only succeeds if the resource's lease is active - and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': 'leaseId', 'type': 'str'}, - } - - def __init__( - self, - *, - lease_id: Optional[str] = None, - **kwargs - ): - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = lease_id - - -class ListBlobsFlatSegmentResponse(msrest.serialization.Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param segment: Required. - :type segment: ~azure.storage.blob.models.BlobFlatListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'segment': {'key': 'Segment', 'type': 'BlobFlatListSegment'}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - *, - service_endpoint: str, - container_name: str, - segment: "BlobFlatListSegment", - prefix: Optional[str] = None, - marker: Optional[str] = None, - max_results: Optional[int] = None, - next_marker: Optional[str] = None, - **kwargs - ): - super(ListBlobsFlatSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.container_name = container_name - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.segment = segment - self.next_marker = next_marker - - -class ListBlobsHierarchySegmentResponse(msrest.serialization.Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param delimiter: - :type delimiter: str - :param segment: Required. - :type segment: ~azure.storage.blob.models.BlobHierarchyListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'delimiter': {'key': 'Delimiter', 'type': 'str'}, - 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment'}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - *, - service_endpoint: str, - container_name: str, - segment: "BlobHierarchyListSegment", - prefix: Optional[str] = None, - marker: Optional[str] = None, - max_results: Optional[int] = None, - delimiter: Optional[str] = None, - next_marker: Optional[str] = None, - **kwargs - ): - super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.container_name = container_name - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.delimiter = delimiter - self.segment = segment - self.next_marker = next_marker - - -class ListContainersSegmentResponse(msrest.serialization.Model): - """An enumeration of containers. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param container_items: Required. - :type container_items: list[~azure.storage.blob.models.ContainerItem] - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_items': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'container_items': {'key': 'ContainerItems', 'type': '[ContainerItem]', 'xml': {'name': 'Containers', 'wrapped': True, 'itemsName': 'Container'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - *, - service_endpoint: str, - container_items: List["ContainerItem"], - prefix: Optional[str] = None, - marker: Optional[str] = None, - max_results: Optional[int] = None, - next_marker: Optional[str] = None, - **kwargs - ): - super(ListContainersSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.container_items = container_items - self.next_marker = next_marker - - -class Logging(msrest.serialization.Model): - """Azure Analytics Logging settings. - - All required parameters must be populated in order to send to Azure. - - :param version: Required. The version of Storage Analytics to configure. - :type version: str - :param delete: Required. Indicates whether all delete requests should be logged. - :type delete: bool - :param read: Required. Indicates whether all read requests should be logged. - :type read: bool - :param write: Required. Indicates whether all write requests should be logged. - :type write: bool - :param retention_policy: Required. the retention policy which determines how long the - associated data should persist. - :type retention_policy: ~azure.storage.blob.models.RetentionPolicy - """ - - _validation = { - 'version': {'required': True}, - 'delete': {'required': True}, - 'read': {'required': True}, - 'write': {'required': True}, - 'retention_policy': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str'}, - 'delete': {'key': 'Delete', 'type': 'bool'}, - 'read': {'key': 'Read', 'type': 'bool'}, - 'write': {'key': 'Write', 'type': 'bool'}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, - } - - def __init__( - self, - *, - version: str, - delete: bool, - read: bool, - write: bool, - retention_policy: "RetentionPolicy", - **kwargs - ): - super(Logging, self).__init__(**kwargs) - self.version = version - self.delete = delete - self.read = read - self.write = write - self.retention_policy = retention_policy - - -class Metrics(msrest.serialization.Model): - """a summary of request statistics grouped by API in hour or minute aggregates for blobs. - - All required parameters must be populated in order to send to Azure. - - :param version: The version of Storage Analytics to configure. - :type version: str - :param enabled: Required. Indicates whether metrics are enabled for the Blob service. - :type enabled: bool - :param include_apis: Indicates whether metrics should generate summary statistics for called - API operations. - :type include_apis: bool - :param retention_policy: the retention policy which determines how long the associated data - should persist. - :type retention_policy: ~azure.storage.blob.models.RetentionPolicy - """ - - _validation = { - 'enabled': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str'}, - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool'}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, - } - - def __init__( - self, - *, - enabled: bool, - version: Optional[str] = None, - include_apis: Optional[bool] = None, - retention_policy: Optional["RetentionPolicy"] = None, - **kwargs - ): - super(Metrics, self).__init__(**kwargs) - self.version = version - self.enabled = enabled - self.include_apis = include_apis - self.retention_policy = retention_policy - - -class ModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param if_modified_since: Specify this header value to operate only on a blob if it has been - modified since the specified date/time. - :type if_modified_since: ~datetime.datetime - :param if_unmodified_since: Specify this header value to operate only on a blob if it has not - been modified since the specified date/time. - :type if_unmodified_since: ~datetime.datetime - :param if_match: Specify an ETag value to operate only on blobs with a matching value. - :type if_match: str - :param if_none_match: Specify an ETag value to operate only on blobs without a matching value. - :type if_none_match: str - :param if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a - matching value. - :type if_tags: str - """ - - _attribute_map = { - 'if_modified_since': {'key': 'ifModifiedSince', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': 'ifUnmodifiedSince', 'type': 'rfc-1123'}, - 'if_match': {'key': 'ifMatch', 'type': 'str'}, - 'if_none_match': {'key': 'ifNoneMatch', 'type': 'str'}, - 'if_tags': {'key': 'ifTags', 'type': 'str'}, - } - - def __init__( - self, - *, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - if_tags: Optional[str] = None, - **kwargs - ): - super(ModifiedAccessConditions, self).__init__(**kwargs) - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - self.if_match = if_match - self.if_none_match = if_none_match - self.if_tags = if_tags - - -class PageList(msrest.serialization.Model): - """the list of pages. - - :param page_range: - :type page_range: list[~azure.storage.blob.models.PageRange] - :param clear_range: - :type clear_range: list[~azure.storage.blob.models.ClearRange] - """ - - _attribute_map = { - 'page_range': {'key': 'PageRange', 'type': '[PageRange]'}, - 'clear_range': {'key': 'ClearRange', 'type': '[ClearRange]'}, - } - - def __init__( - self, - *, - page_range: Optional[List["PageRange"]] = None, - clear_range: Optional[List["ClearRange"]] = None, - **kwargs - ): - super(PageList, self).__init__(**kwargs) - self.page_range = page_range - self.clear_range = clear_range - - -class PageRange(msrest.serialization.Model): - """PageRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'PageRange' - } - - def __init__( - self, - *, - start: int, - end: int, - **kwargs - ): - super(PageRange, self).__init__(**kwargs) - self.start = start - self.end = end - - -class QueryFormat(msrest.serialization.Model): - """QueryFormat. - - :param type: The quick query format type. Possible values include: "delimited", "json", - "arrow". - :type type: str or ~azure.storage.blob.models.QueryFormatType - :param delimited_text_configuration: delimited text configuration. - :type delimited_text_configuration: ~azure.storage.blob.models.DelimitedTextConfiguration - :param json_text_configuration: json text configuration. - :type json_text_configuration: ~azure.storage.blob.models.JsonTextConfiguration - :param arrow_configuration: arrow configuration. - :type arrow_configuration: ~azure.storage.blob.models.ArrowConfiguration - """ - - _attribute_map = { - 'type': {'key': 'Type', 'type': 'str', 'xml': {'name': 'Type'}}, - 'delimited_text_configuration': {'key': 'DelimitedTextConfiguration', 'type': 'DelimitedTextConfiguration'}, - 'json_text_configuration': {'key': 'JsonTextConfiguration', 'type': 'JsonTextConfiguration'}, - 'arrow_configuration': {'key': 'ArrowConfiguration', 'type': 'ArrowConfiguration'}, - } - - def __init__( - self, - *, - type: Optional[Union[str, "QueryFormatType"]] = None, - delimited_text_configuration: Optional["DelimitedTextConfiguration"] = None, - json_text_configuration: Optional["JsonTextConfiguration"] = None, - arrow_configuration: Optional["ArrowConfiguration"] = None, - **kwargs - ): - super(QueryFormat, self).__init__(**kwargs) - self.type = type - self.delimited_text_configuration = delimited_text_configuration - self.json_text_configuration = json_text_configuration - self.arrow_configuration = arrow_configuration - - -class QueryRequest(msrest.serialization.Model): - """the quick query body. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar query_type: Required. the query type. Default value: "SQL". - :vartype query_type: str - :param expression: Required. a query statement. - :type expression: str - :param input_serialization: - :type input_serialization: ~azure.storage.blob.models.QuerySerialization - :param output_serialization: - :type output_serialization: ~azure.storage.blob.models.QuerySerialization - """ - - _validation = { - 'query_type': {'required': True, 'constant': True}, - 'expression': {'required': True}, - } - - _attribute_map = { - 'query_type': {'key': 'QueryType', 'type': 'str', 'xml': {'name': 'QueryType'}}, - 'expression': {'key': 'Expression', 'type': 'str', 'xml': {'name': 'Expression'}}, - 'input_serialization': {'key': 'InputSerialization', 'type': 'QuerySerialization'}, - 'output_serialization': {'key': 'OutputSerialization', 'type': 'QuerySerialization'}, - } - _xml_map = { - 'name': 'QueryRequest' - } - - query_type = "SQL" - - def __init__( - self, - *, - expression: str, - input_serialization: Optional["QuerySerialization"] = None, - output_serialization: Optional["QuerySerialization"] = None, - **kwargs - ): - super(QueryRequest, self).__init__(**kwargs) - self.expression = expression - self.input_serialization = input_serialization - self.output_serialization = output_serialization - - -class QuerySerialization(msrest.serialization.Model): - """QuerySerialization. - - All required parameters must be populated in order to send to Azure. - - :param format: Required. - :type format: ~azure.storage.blob.models.QueryFormat - """ - - _validation = { - 'format': {'required': True}, - } - - _attribute_map = { - 'format': {'key': 'Format', 'type': 'QueryFormat'}, - } - - def __init__( - self, - *, - format: "QueryFormat", - **kwargs - ): - super(QuerySerialization, self).__init__(**kwargs) - self.format = format - - -class RetentionPolicy(msrest.serialization.Model): - """the retention policy which determines how long the associated data should persist. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether a retention policy is enabled for the storage - service. - :type enabled: bool - :param days: Indicates the number of days that metrics or logging or soft-deleted data should - be retained. All data older than this value will be deleted. - :type days: int - :param allow_permanent_delete: Indicates whether permanent delete is allowed on this storage - account. - :type allow_permanent_delete: bool - """ - - _validation = { - 'enabled': {'required': True}, - 'days': {'minimum': 1}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'days': {'key': 'Days', 'type': 'int'}, - 'allow_permanent_delete': {'key': 'AllowPermanentDelete', 'type': 'bool'}, - } - - def __init__( - self, - *, - enabled: bool, - days: Optional[int] = None, - allow_permanent_delete: Optional[bool] = None, - **kwargs - ): - super(RetentionPolicy, self).__init__(**kwargs) - self.enabled = enabled - self.days = days - self.allow_permanent_delete = allow_permanent_delete - - -class SequenceNumberAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param if_sequence_number_less_than_or_equal_to: Specify this header value to operate only on a - blob if it has a sequence number less than or equal to the specified. - :type if_sequence_number_less_than_or_equal_to: long - :param if_sequence_number_less_than: Specify this header value to operate only on a blob if it - has a sequence number less than the specified. - :type if_sequence_number_less_than: long - :param if_sequence_number_equal_to: Specify this header value to operate only on a blob if it - has the specified sequence number. - :type if_sequence_number_equal_to: long - """ - - _attribute_map = { - 'if_sequence_number_less_than_or_equal_to': {'key': 'ifSequenceNumberLessThanOrEqualTo', 'type': 'long'}, - 'if_sequence_number_less_than': {'key': 'ifSequenceNumberLessThan', 'type': 'long'}, - 'if_sequence_number_equal_to': {'key': 'ifSequenceNumberEqualTo', 'type': 'long'}, - } - - def __init__( - self, - *, - if_sequence_number_less_than_or_equal_to: Optional[int] = None, - if_sequence_number_less_than: Optional[int] = None, - if_sequence_number_equal_to: Optional[int] = None, - **kwargs - ): - super(SequenceNumberAccessConditions, self).__init__(**kwargs) - self.if_sequence_number_less_than_or_equal_to = if_sequence_number_less_than_or_equal_to - self.if_sequence_number_less_than = if_sequence_number_less_than - self.if_sequence_number_equal_to = if_sequence_number_equal_to - - -class SignedIdentifier(msrest.serialization.Model): - """signed identifier. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. a unique id. - :type id: str - :param access_policy: An Access policy. - :type access_policy: ~azure.storage.blob.models.AccessPolicy - """ - - _validation = { - 'id': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'Id', 'type': 'str'}, - 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy'}, - } - _xml_map = { - 'name': 'SignedIdentifier' - } - - def __init__( - self, - *, - id: str, - access_policy: Optional["AccessPolicy"] = None, - **kwargs - ): - super(SignedIdentifier, self).__init__(**kwargs) - self.id = id - self.access_policy = access_policy - - -class SourceModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param source_if_modified_since: Specify this header value to operate only on a blob if it has - been modified since the specified date/time. - :type source_if_modified_since: ~datetime.datetime - :param source_if_unmodified_since: Specify this header value to operate only on a blob if it - has not been modified since the specified date/time. - :type source_if_unmodified_since: ~datetime.datetime - :param source_if_match: Specify an ETag value to operate only on blobs with a matching value. - :type source_if_match: str - :param source_if_none_match: Specify an ETag value to operate only on blobs without a matching - value. - :type source_if_none_match: str - :param source_if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a - matching value. - :type source_if_tags: str - """ - - _attribute_map = { - 'source_if_modified_since': {'key': 'sourceIfModifiedSince', 'type': 'rfc-1123'}, - 'source_if_unmodified_since': {'key': 'sourceIfUnmodifiedSince', 'type': 'rfc-1123'}, - 'source_if_match': {'key': 'sourceIfMatch', 'type': 'str'}, - 'source_if_none_match': {'key': 'sourceIfNoneMatch', 'type': 'str'}, - 'source_if_tags': {'key': 'sourceIfTags', 'type': 'str'}, - } - - def __init__( - self, - *, - source_if_modified_since: Optional[datetime.datetime] = None, - source_if_unmodified_since: Optional[datetime.datetime] = None, - source_if_match: Optional[str] = None, - source_if_none_match: Optional[str] = None, - source_if_tags: Optional[str] = None, - **kwargs - ): - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_modified_since = source_if_modified_since - self.source_if_unmodified_since = source_if_unmodified_since - self.source_if_match = source_if_match - self.source_if_none_match = source_if_none_match - self.source_if_tags = source_if_tags - - -class StaticWebsite(msrest.serialization.Model): - """The properties that enable an account to host a static website. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether this account is hosting a static website. - :type enabled: bool - :param index_document: The default name of the index page under each directory. - :type index_document: str - :param error_document404_path: The absolute path of the custom 404 page. - :type error_document404_path: str - :param default_index_document_path: Absolute path of the default index page. - :type default_index_document_path: str - """ - - _validation = { - 'enabled': {'required': True}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'index_document': {'key': 'IndexDocument', 'type': 'str'}, - 'error_document404_path': {'key': 'ErrorDocument404Path', 'type': 'str'}, - 'default_index_document_path': {'key': 'DefaultIndexDocumentPath', 'type': 'str'}, - } - - def __init__( - self, - *, - enabled: bool, - index_document: Optional[str] = None, - error_document404_path: Optional[str] = None, - default_index_document_path: Optional[str] = None, - **kwargs - ): - super(StaticWebsite, self).__init__(**kwargs) - self.enabled = enabled - self.index_document = index_document - self.error_document404_path = error_document404_path - self.default_index_document_path = default_index_document_path - - -class StorageError(msrest.serialization.Model): - """StorageError. - - :param message: - :type message: str - """ - - _attribute_map = { - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__( - self, - *, - message: Optional[str] = None, - **kwargs - ): - super(StorageError, self).__init__(**kwargs) - self.message = message - - -class StorageServiceProperties(msrest.serialization.Model): - """Storage Service Properties. - - :param logging: Azure Analytics Logging settings. - :type logging: ~azure.storage.blob.models.Logging - :param hour_metrics: a summary of request statistics grouped by API in hour or minute - aggregates for blobs. - :type hour_metrics: ~azure.storage.blob.models.Metrics - :param minute_metrics: a summary of request statistics grouped by API in hour or minute - aggregates for blobs. - :type minute_metrics: ~azure.storage.blob.models.Metrics - :param cors: The set of CORS rules. - :type cors: list[~azure.storage.blob.models.CorsRule] - :param default_service_version: The default version to use for requests to the Blob service if - an incoming request's version is not specified. Possible values include version 2008-10-27 and - all more recent versions. - :type default_service_version: str - :param delete_retention_policy: the retention policy which determines how long the associated - data should persist. - :type delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy - :param static_website: The properties that enable an account to host a static website. - :type static_website: ~azure.storage.blob.models.StaticWebsite - """ - - _attribute_map = { - 'logging': {'key': 'Logging', 'type': 'Logging'}, - 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics'}, - 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics'}, - 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'wrapped': True}}, - 'default_service_version': {'key': 'DefaultServiceVersion', 'type': 'str'}, - 'delete_retention_policy': {'key': 'DeleteRetentionPolicy', 'type': 'RetentionPolicy'}, - 'static_website': {'key': 'StaticWebsite', 'type': 'StaticWebsite'}, - } - - def __init__( - self, - *, - logging: Optional["Logging"] = None, - hour_metrics: Optional["Metrics"] = None, - minute_metrics: Optional["Metrics"] = None, - cors: Optional[List["CorsRule"]] = None, - default_service_version: Optional[str] = None, - delete_retention_policy: Optional["RetentionPolicy"] = None, - static_website: Optional["StaticWebsite"] = None, - **kwargs - ): - super(StorageServiceProperties, self).__init__(**kwargs) - self.logging = logging - self.hour_metrics = hour_metrics - self.minute_metrics = minute_metrics - self.cors = cors - self.default_service_version = default_service_version - self.delete_retention_policy = delete_retention_policy - self.static_website = static_website - - -class StorageServiceStats(msrest.serialization.Model): - """Stats for the storage service. - - :param geo_replication: Geo-Replication information for the Secondary Storage Service. - :type geo_replication: ~azure.storage.blob.models.GeoReplication - """ - - _attribute_map = { - 'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication'}, - } - - def __init__( - self, - *, - geo_replication: Optional["GeoReplication"] = None, - **kwargs - ): - super(StorageServiceStats, self).__init__(**kwargs) - self.geo_replication = geo_replication - - -class UserDelegationKey(msrest.serialization.Model): - """A user delegation key. - - All required parameters must be populated in order to send to Azure. - - :param signed_oid: Required. The Azure Active Directory object ID in GUID format. - :type signed_oid: str - :param signed_tid: Required. The Azure Active Directory tenant ID in GUID format. - :type signed_tid: str - :param signed_start: Required. The date-time the key is active. - :type signed_start: ~datetime.datetime - :param signed_expiry: Required. The date-time the key expires. - :type signed_expiry: ~datetime.datetime - :param signed_service: Required. Abbreviation of the Azure Storage service that accepts the - key. - :type signed_service: str - :param signed_version: Required. The service version that created the key. - :type signed_version: str - :param value: Required. The key as a base64 string. - :type value: str - """ - - _validation = { - 'signed_oid': {'required': True}, - 'signed_tid': {'required': True}, - 'signed_start': {'required': True}, - 'signed_expiry': {'required': True}, - 'signed_service': {'required': True}, - 'signed_version': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'signed_oid': {'key': 'SignedOid', 'type': 'str'}, - 'signed_tid': {'key': 'SignedTid', 'type': 'str'}, - 'signed_start': {'key': 'SignedStart', 'type': 'iso-8601'}, - 'signed_expiry': {'key': 'SignedExpiry', 'type': 'iso-8601'}, - 'signed_service': {'key': 'SignedService', 'type': 'str'}, - 'signed_version': {'key': 'SignedVersion', 'type': 'str'}, - 'value': {'key': 'Value', 'type': 'str'}, - } - - def __init__( - self, - *, - signed_oid: str, - signed_tid: str, - signed_start: datetime.datetime, - signed_expiry: datetime.datetime, - signed_service: str, - signed_version: str, - value: str, - **kwargs - ): - super(UserDelegationKey, self).__init__(**kwargs) - self.signed_oid = signed_oid - self.signed_tid = signed_tid - self.signed_start = signed_start - self.signed_expiry = signed_expiry - self.signed_service = signed_service - self.signed_version = signed_version - self.value = value diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/__init__.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/__init__.py deleted file mode 100644 index 62f85c9..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._container_operations import ContainerOperations -from ._directory_operations import DirectoryOperations -from ._blob_operations import BlobOperations -from ._page_blob_operations import PageBlobOperations -from ._append_blob_operations import AppendBlobOperations -from ._block_blob_operations import BlockBlobOperations - -__all__ = [ - 'ServiceOperations', - 'ContainerOperations', - 'DirectoryOperations', - 'BlobOperations', - 'PageBlobOperations', - 'AppendBlobOperations', - 'BlockBlobOperations', -] diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_append_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_append_blob_operations.py deleted file mode 100644 index 0825fcf..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_append_blob_operations.py +++ /dev/null @@ -1,708 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class AppendBlobOperations(object): - """AppendBlobOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - content_length, # type: int - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - blob_tags_string=None, # type: Optional[str] - blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Create Append Blob operation creates a new append blob. - - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - blob_type = "AppendBlob" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def append_block( - self, - content_length, # type: int - body, # type: IO - timeout=None, # type: Optional[int] - transactional_content_md5=None, # type: Optional[bytearray] - transactional_content_crc64=None, # type: Optional[bytearray] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - append_position_access_conditions=None, # type: Optional["_models.AppendPositionAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Append Block operation commits a new block of data to the end of an existing append blob. - The Append Block operation is permitted only if the blob was created with x-ms-blob-type set to - AppendBlob. Append Block is supported only on version 2015-02-21 version or later. - - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param append_position_access_conditions: Parameter group. - :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _max_size = None - _append_position = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if append_position_access_conditions is not None: - _max_size = append_position_access_conditions.max_size - _append_position = append_position_access_conditions.append_position - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "appendblock" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.append_block.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _max_size is not None: - header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", _max_size, 'long') - if _append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-append-offset']=self._deserialize('str', response.headers.get('x-ms-blob-append-offset')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - append_block.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def append_block_from_url( - self, - source_url, # type: str - content_length, # type: int - source_range=None, # type: Optional[str] - source_content_md5=None, # type: Optional[bytearray] - source_contentcrc64=None, # type: Optional[bytearray] - timeout=None, # type: Optional[int] - transactional_content_md5=None, # type: Optional[bytearray] - request_id_parameter=None, # type: Optional[str] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - append_position_access_conditions=None, # type: Optional["_models.AppendPositionAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Append Block operation commits a new block of data to the end of an existing append blob - where the contents are read from a source url. The Append Block operation is permitted only if - the blob was created with x-ms-blob-type set to AppendBlob. Append Block is supported only on - version 2015-02-21 version or later. - - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param content_length: The length of the request. - :type content_length: long - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param append_position_access_conditions: Parameter group. - :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _lease_id = None - _max_size = None - _append_position = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if append_position_access_conditions is not None: - _max_size = append_position_access_conditions.max_size - _append_position = append_position_access_conditions.append_position - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - comp = "appendblock" - accept = "application/xml" - - # Construct URL - url = self.append_block_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _max_size is not None: - header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", _max_size, 'long') - if _append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-append-offset']=self._deserialize('str', response.headers.get('x-ms-blob-append-offset')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - append_block_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def seal( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - append_position_access_conditions=None, # type: Optional["_models.AppendPositionAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Seal operation seals the Append Blob to make it read-only. Seal is supported only on - version 2019-12-12 version or later. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param append_position_access_conditions: Parameter group. - :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _append_position = None - if append_position_access_conditions is not None: - _append_position = append_position_access_conditions.append_position - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - comp = "seal" - accept = "application/xml" - - # Construct URL - url = self.seal.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - - if cls: - return cls(pipeline_response, None, response_headers) - - seal.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_blob_operations.py deleted file mode 100644 index a72d4dc..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_blob_operations.py +++ /dev/null @@ -1,3150 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class BlobOperations(object): - """BlobOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def download( - self, - snapshot=None, # type: Optional[str] - version_id=None, # type: Optional[str] - timeout=None, # type: Optional[int] - range=None, # type: Optional[str] - range_get_content_md5=None, # type: Optional[bool] - range_get_content_crc64=None, # type: Optional[bool] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> IO - """The Download operation reads or downloads a blob from the system, including its metadata and - properties. You can also call Download to read a snapshot. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param range_get_content_md5: When set to true and specified together with the Range, the - service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB - in size. - :type range_get_content_md5: bool - :param range_get_content_crc64: When set to true and specified together with the Range, the - service returns the CRC64 hash for the range, as long as the range is less than or equal to 4 - MB in size. - :type range_get_content_crc64: bool - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - accept = "application/xml" - - # Construct URL - url = self.download.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') - if range_get_content_crc64 is not None: - header_parameters['x-ms-range-get-content-crc64'] = self._serialize.header("range_get_content_crc64", range_get_content_crc64, 'bool') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) - response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) - response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - download.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def get_properties( - self, - snapshot=None, # type: Optional[str] - version_id=None, # type: Optional[str] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Get Properties operation returns all user-defined metadata, standard HTTP properties, and - system properties for the blob. It does not return the content of the blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-creation-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-creation-time')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) - response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-incremental-copy']=self._deserialize('bool', response.headers.get('x-ms-incremental-copy')) - response_headers['x-ms-copy-destination-snapshot']=self._deserialize('str', response.headers.get('x-ms-copy-destination-snapshot')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-access-tier']=self._deserialize('str', response.headers.get('x-ms-access-tier')) - response_headers['x-ms-access-tier-inferred']=self._deserialize('bool', response.headers.get('x-ms-access-tier-inferred')) - response_headers['x-ms-archive-status']=self._deserialize('str', response.headers.get('x-ms-archive-status')) - response_headers['x-ms-access-tier-change-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) - response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) - response_headers['x-ms-expiry-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-expiry-time')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - response_headers['x-ms-rehydrate-priority']=self._deserialize('str', response.headers.get('x-ms-rehydrate-priority')) - response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def delete( - self, - snapshot=None, # type: Optional[str] - version_id=None, # type: Optional[str] - timeout=None, # type: Optional[int] - delete_snapshots=None, # type: Optional[Union[str, "_models.DeleteSnapshotsOptionType"]] - request_id_parameter=None, # type: Optional[str] - blob_delete_type="Permanent", # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """If the storage account's soft delete feature is disabled then, when a blob is deleted, it is - permanently removed from the storage account. If the storage account's soft delete feature is - enabled, then, when a blob is deleted, it is marked for deletion and becomes inaccessible - immediately. However, the blob service retains the blob or snapshot for the number of days - specified by the DeleteRetentionPolicy section of [Storage service properties] (Set-Blob- - Service-Properties.md). After the specified number of days has passed, the blob's data is - permanently removed from the storage account. Note that you continue to be charged for the - soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and specify - the "include=deleted" query parameter to discover which blobs and snapshots have been soft - deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. All other - operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code - of 404 (ResourceNotFound). - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param delete_snapshots: Required if the blob has associated snapshots. Specify one of the - following two options: include: Delete the base blob and all of its snapshots. only: Delete - only the blob's snapshots and not the blob itself. - :type delete_snapshots: str or ~azure.storage.blob.models.DeleteSnapshotsOptionType - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_delete_type: Optional. Only possible value is 'permanent', which specifies to - permanently delete a blob if blob soft delete is enabled. - :type blob_delete_type: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if blob_delete_type is not None: - query_parameters['deletetype'] = self._serialize.query("blob_delete_type", blob_delete_type, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def set_access_control( - self, - timeout=None, # type: Optional[int] - owner=None, # type: Optional[str] - group=None, # type: Optional[str] - posix_permissions=None, # type: Optional[str] - posix_acl=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Set the owner, group, permissions, or access control list for a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_acl: Sets POSIX access control rights on files and directories. The value is a - comma-separated list of access control entries. Each access control entry (ACE) consists of a - scope, a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type posix_acl: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "setAccessControl" - accept = "application/xml" - - # Construct URL - url = self.set_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def get_access_control( - self, - timeout=None, # type: Optional[int] - upn=None, # type: Optional[bool] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Get the owner, group, permissions, or access control list for a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If - "true", the identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response - headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If - "false", the values will be returned as Azure Active Directory Object IDs. The default value is - false. - :type upn: bool - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "getAccessControl" - accept = "application/xml" - - # Construct URL - url = self.get_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) - response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) - response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) - response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def rename( - self, - rename_source, # type: str - timeout=None, # type: Optional[int] - path_rename_mode=None, # type: Optional[Union[str, "_models.PathRenameMode"]] - directory_properties=None, # type: Optional[str] - posix_permissions=None, # type: Optional[str] - posix_umask=None, # type: Optional[str] - source_lease_id=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - directory_http_headers=None, # type: Optional["_models.DirectoryHttpHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Rename a blob/file. By default, the destination is overwritten and if the destination already - exists and has a lease the lease is broken. This operation supports conditional HTTP requests. - For more information, see `Specifying Conditional Headers for Blob Service Operations - `_. To fail if the destination already exists, use a conditional - request with If-None-Match: "*". - - :param rename_source: The file or directory to be renamed. The value must have the following - format: "/{filesysystem}/{path}". If "x-ms-properties" is specified, the properties will - overwrite the existing properties; otherwise, the existing properties will be preserved. - :type rename_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param path_rename_mode: Determines the behavior of the rename operation. - :type path_rename_mode: str or ~azure.storage.blob.models.PathRenameMode - :param directory_properties: Optional. User-defined properties to be stored with the file or - directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", - where each value is base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask - restricts permission settings for file and directory, and will only be applied when default Acl - does not exist in parent directory. If the umask bit has set, it means that the corresponding - permission will be disabled. Otherwise the corresponding permission will be determined by the - permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, - a default umask - 0027 will be used. - :type posix_umask: str - :param source_lease_id: A lease ID for the source path. If specified, the source path must have - an active lease and the lease ID must match. - :type source_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param directory_http_headers: Parameter group. - :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _cache_control = None - _content_type = None - _content_encoding = None - _content_language = None - _content_disposition = None - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if directory_http_headers is not None: - _cache_control = directory_http_headers.cache_control - _content_type = directory_http_headers.content_type - _content_encoding = directory_http_headers.content_encoding - _content_language = directory_http_headers.content_language - _content_disposition = directory_http_headers.content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - accept = "application/xml" - - # Construct URL - url = self.rename.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if path_rename_mode is not None: - query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - rename.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def undelete( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Undelete a blob that was previously soft deleted. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "undelete" - accept = "application/xml" - - # Construct URL - url = self.undelete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - undelete.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def set_expiry( - self, - expiry_options, # type: Union[str, "_models.BlobExpiryOptions"] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - expires_on=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Sets the time a blob will expire and be deleted. - - :param expiry_options: Required. Indicates mode of the expiry time. - :type expiry_options: str or ~azure.storage.blob.models.BlobExpiryOptions - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param expires_on: The time to set the blob to expiry. - :type expires_on: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "expiry" - accept = "application/xml" - - # Construct URL - url = self.set_expiry.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') - if expires_on is not None: - header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_expiry.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def set_http_headers( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Set HTTP Headers operation sets system properties on the blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_cache_control = None - _blob_content_type = None - _blob_content_md5 = None - _blob_content_encoding = None - _blob_content_language = None - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _blob_content_disposition = None - if blob_http_headers is not None: - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_disposition = blob_http_headers.blob_content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.set_http_headers.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_http_headers.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def set_metadata( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or - more name-value pairs. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def acquire_lease( - self, - timeout=None, # type: Optional[int] - duration=None, # type: Optional[int] - proposed_lease_id=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a - lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease - duration cannot be changed using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "acquire" - accept = "application/xml" - - # Construct URL - url = self.acquire_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - acquire_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def release_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "release" - accept = "application/xml" - - # Construct URL - url = self.release_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - release_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def renew_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "renew" - accept = "application/xml" - - # Construct URL - url = self.renew_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - renew_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def change_lease( - self, - lease_id, # type: str - proposed_lease_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "change" - accept = "application/xml" - - # Construct URL - url = self.change_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - change_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def break_lease( - self, - timeout=None, # type: Optional[int] - break_period=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param break_period: For a break operation, proposed duration the lease should continue before - it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining on the lease is used. A new - lease will not be available before the break period has expired, but the lease may be held for - longer than the break period. If this header does not appear with a break operation, a fixed- - duration lease breaks after the remaining lease period elapses, and an infinite lease breaks - immediately. - :type break_period: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "break" - accept = "application/xml" - - # Construct URL - url = self.break_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - break_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def create_snapshot( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Create Snapshot operation creates a read-only snapshot of a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _lease_id = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "snapshot" - accept = "application/xml" - - # Construct URL - url = self.create_snapshot.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-snapshot']=self._deserialize('str', response.headers.get('x-ms-snapshot')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create_snapshot.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def start_copy_from_url( - self, - copy_source, # type: str - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] - rehydrate_priority=None, # type: Optional[Union[str, "_models.RehydratePriority"]] - request_id_parameter=None, # type: Optional[str] - blob_tags_string=None, # type: Optional[str] - seal_blob=None, # type: Optional[bool] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Start Copy From URL operation copies a blob or an internet resource to a new blob. - - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived - blob. - :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param seal_blob: Overrides the sealed state of the destination blob. Service version - 2019-12-12 and newer. - :type seal_blob: bool - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _source_if_tags = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - _source_if_tags = source_modified_access_conditions.source_if_tags - accept = "application/xml" - - # Construct URL - url = self.start_copy_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _source_if_tags is not None: - header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", _source_if_tags, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if seal_blob is not None: - header_parameters['x-ms-seal-blob'] = self._serialize.header("seal_blob", seal_blob, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def copy_from_url( - self, - copy_source, # type: str - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] - request_id_parameter=None, # type: Optional[str] - source_content_md5=None, # type: Optional[bytearray] - blob_tags_string=None, # type: Optional[str] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Copy From URL operation copies a blob or an internet resource to a new blob. It will not - return a response until the copy is complete. - - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - x_ms_requires_sync = "true" - accept = "application/xml" - - # Construct URL - url = self.copy_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-requires-sync'] = self._serialize.header("x_ms_requires_sync", x_ms_requires_sync, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - - if cls: - return cls(pipeline_response, None, response_headers) - - copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def abort_copy_from_url( - self, - copy_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a - destination blob with zero length and full metadata. - - :param copy_id: The copy identifier provided in the x-ms-copy-id header of the original Copy - Blob operation. - :type copy_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "copy" - copy_action_abort_constant = "abort" - accept = "application/xml" - - # Construct URL - url = self.abort_copy_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-copy-action'] = self._serialize.header("copy_action_abort_constant", copy_action_abort_constant, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - abort_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def set_tier( - self, - tier, # type: Union[str, "_models.AccessTierRequired"] - snapshot=None, # type: Optional[str] - version_id=None, # type: Optional[str] - timeout=None, # type: Optional[int] - rehydrate_priority=None, # type: Optional[Union[str, "_models.RehydratePriority"]] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a - premium storage account and on a block blob in a blob storage account (locally redundant - storage only). A premium page blob's tier determines the allowed size, IOPS, and bandwidth of - the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation does not - update the blob's ETag. - - :param tier: Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierRequired - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived - blob. - :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "tier" - accept = "application/xml" - - # Construct URL - url = self.set_tier.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if response.status_code == 202: - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_tier.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def get_account_info( - self, - **kwargs # type: Any - ): - # type: (...) -> None - """Returns the sku name and account kind. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "account" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_account_info.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) - response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_account_info.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def query( - self, - snapshot=None, # type: Optional[str] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - query_request=None, # type: Optional["_models.QueryRequest"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> IO - """The Query operation enables users to select/project on blob data by providing simple query - expressions. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param query_request: the query request. - :type query_request: ~azure.storage.blob.models.QueryRequest - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "query" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.query.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if query_request is not None: - body_content = self._serialize.body(query_request, 'QueryRequest', is_xml=True) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - query.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def get_tags( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - snapshot=None, # type: Optional[str] - version_id=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> "_models.BlobTags" - """The Get Tags operation enables users to get the tags associated with a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: BlobTags, or the result of cls(response) - :rtype: ~azure.storage.blob.models.BlobTags - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobTags"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "tags" - accept = "application/xml" - - # Construct URL - url = self.get_tags.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('BlobTags', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_tags.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def set_tags( - self, - timeout=None, # type: Optional[int] - version_id=None, # type: Optional[str] - transactional_content_md5=None, # type: Optional[bytearray] - transactional_content_crc64=None, # type: Optional[bytearray] - request_id_parameter=None, # type: Optional[str] - tags=None, # type: Optional["_models.BlobTags"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Set Tags operation enables users to set tags on a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param tags: Blob tags. - :type tags: ~azure.storage.blob.models.BlobTags - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "tags" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_tags.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if tags is not None: - body_content = self._serialize.body(tags, 'BlobTags', is_xml=True) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_tags.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_block_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_block_blob_operations.py deleted file mode 100644 index f9804ce..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_block_blob_operations.py +++ /dev/null @@ -1,1098 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class BlockBlobOperations(object): - """BlockBlobOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def upload( - self, - content_length, # type: int - body, # type: IO - timeout=None, # type: Optional[int] - transactional_content_md5=None, # type: Optional[bytearray] - metadata=None, # type: Optional[str] - tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] - request_id_parameter=None, # type: Optional[str] - blob_tags_string=None, # type: Optional[str] - blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Upload Block Blob operation updates the content of an existing block blob. Updating an - existing block blob overwrites any existing metadata on the blob. Partial updates are not - supported with Put Blob; the content of the existing blob is overwritten with the content of - the new blob. To perform a partial update of the content of a block blob, use the Put Block - List operation. - - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - blob_type = "BlockBlob" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.upload.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def put_blob_from_url( - self, - content_length, # type: int - copy_source, # type: str - timeout=None, # type: Optional[int] - transactional_content_md5=None, # type: Optional[bytearray] - metadata=None, # type: Optional[str] - tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] - request_id_parameter=None, # type: Optional[str] - source_content_md5=None, # type: Optional[bytearray] - blob_tags_string=None, # type: Optional[str] - copy_source_blob_properties=None, # type: Optional[bool] - blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Put Blob from URL operation creates a new Block Blob where the contents of the blob are - read from a given URL. This API is supported beginning with the 2020-04-08 version. Partial - updates are not supported with Put Blob from URL; the content of an existing blob is - overwritten with the content of the new blob. To perform partial updates to a block blob’s - contents using a source URL, use the Put Block from URL API in conjunction with Put Block List. - - :param content_length: The length of the request. - :type content_length: long - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param copy_source_blob_properties: Optional, default is true. Indicates if properties from - the source blob should be copied. - :type copy_source_blob_properties: bool - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _source_if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - _source_if_tags = source_modified_access_conditions.source_if_tags - blob_type = "BlockBlob" - accept = "application/xml" - - # Construct URL - url = self.put_blob_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _source_if_tags is not None: - header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", _source_if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if copy_source_blob_properties is not None: - header_parameters['x-ms-copy-source-blob-properties'] = self._serialize.header("copy_source_blob_properties", copy_source_blob_properties, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - put_blob_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def stage_block( - self, - block_id, # type: str - content_length, # type: int - body, # type: IO - transactional_content_md5=None, # type: Optional[bytearray] - transactional_content_crc64=None, # type: Optional[bytearray] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Stage Block operation creates a new block to be committed as part of a blob. - - :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the - string must be less than or equal to 64 bytes in size. For a given blob, the length of the - value specified for the blockid parameter must be the same size for each block. - :type block_id: str - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "block" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.stage_block.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - stage_block.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def stage_block_from_url( - self, - block_id, # type: str - content_length, # type: int - source_url, # type: str - source_range=None, # type: Optional[str] - source_content_md5=None, # type: Optional[bytearray] - source_contentcrc64=None, # type: Optional[bytearray] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Stage Block operation creates a new block to be committed as part of a blob where the - contents are read from a URL. - - :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the - string must be less than or equal to 64 bytes in size. For a given blob, the length of the - value specified for the blockid parameter must be the same size for each block. - :type block_id: str - :param content_length: The length of the request. - :type content_length: long - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _lease_id = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - comp = "block" - accept = "application/xml" - - # Construct URL - url = self.stage_block_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - stage_block_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def commit_block_list( - self, - blocks, # type: "_models.BlockLookupList" - timeout=None, # type: Optional[int] - transactional_content_md5=None, # type: Optional[bytearray] - transactional_content_crc64=None, # type: Optional[bytearray] - metadata=None, # type: Optional[str] - tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] - request_id_parameter=None, # type: Optional[str] - blob_tags_string=None, # type: Optional[str] - blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Commit Block List operation writes a blob by specifying the list of block IDs that make up - the blob. In order to be written as part of a blob, a block must have been successfully written - to the server in a prior Put Block operation. You can call Put Block List to update a blob by - uploading only those blocks that have changed, then committing the new and existing blocks - together. You can do this by specifying whether to commit a block from the committed block list - or from the uncommitted block list, or to commit the most recently uploaded version of the - block, whichever list it may belong to. - - :param blocks: - :type blocks: ~azure.storage.blob.models.BlockLookupList - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_cache_control = None - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "blocklist" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.commit_block_list.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(blocks, 'BlockLookupList', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - commit_block_list.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def get_block_list( - self, - snapshot=None, # type: Optional[str] - list_type="committed", # type: Union[str, "_models.BlockListType"] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> "_models.BlockList" - """The Get Block List operation retrieves the list of blocks that have been uploaded as part of a - block blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param list_type: Specifies whether to return the list of committed blocks, the list of - uncommitted blocks, or both lists together. - :type list_type: str or ~azure.storage.blob.models.BlockListType - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: BlockList, or the result of cls(response) - :rtype: ~azure.storage.blob.models.BlockList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.BlockList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "blocklist" - accept = "application/xml" - - # Construct URL - url = self.get_block_list.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - query_parameters['blocklisttype'] = self._serialize.query("list_type", list_type, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('BlockList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_block_list.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_container_operations.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_container_operations.py deleted file mode 100644 index 3219753..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_container_operations.py +++ /dev/null @@ -1,1652 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class ContainerOperations(object): - """ContainerOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - access=None, # type: Optional[Union[str, "_models.PublicAccessType"]] - request_id_parameter=None, # type: Optional[str] - container_cpk_scope_info=None, # type: Optional["_models.ContainerCpkScopeInfo"] - **kwargs # type: Any - ): - # type: (...) -> None - """creates a new container under the specified account. If the container with the same name - already exists, the operation fails. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param access: Specifies whether data in the container may be accessed publicly and the level - of access. - :type access: str or ~azure.storage.blob.models.PublicAccessType - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param container_cpk_scope_info: Parameter group. - :type container_cpk_scope_info: ~azure.storage.blob.models.ContainerCpkScopeInfo - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _default_encryption_scope = None - _prevent_encryption_scope_override = None - if container_cpk_scope_info is not None: - _default_encryption_scope = container_cpk_scope_info.default_encryption_scope - _prevent_encryption_scope_override = container_cpk_scope_info.prevent_encryption_scope_override - restype = "container" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if access is not None: - header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _default_encryption_scope is not None: - header_parameters['x-ms-default-encryption-scope'] = self._serialize.header("default_encryption_scope", _default_encryption_scope, 'str') - if _prevent_encryption_scope_override is not None: - header_parameters['x-ms-deny-encryption-scope-override'] = self._serialize.header("prevent_encryption_scope_override", _prevent_encryption_scope_override, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{containerName}'} # type: ignore - - def get_properties( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """returns all user-defined metadata and system properties for the specified container. The data - returned does not include the container's list of blobs. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "container" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-public-access']=self._deserialize('str', response.headers.get('x-ms-blob-public-access')) - response_headers['x-ms-has-immutability-policy']=self._deserialize('bool', response.headers.get('x-ms-has-immutability-policy')) - response_headers['x-ms-has-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-has-legal-hold')) - response_headers['x-ms-default-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-default-encryption-scope')) - response_headers['x-ms-deny-encryption-scope-override']=self._deserialize('bool', response.headers.get('x-ms-deny-encryption-scope-override')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{containerName}'} # type: ignore - - def delete( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """operation marks the specified container for deletion. The container and any blobs contained - within it are later deleted during garbage collection. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - restype = "container" - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{containerName}'} # type: ignore - - def set_metadata( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """operation sets one or more user-defined name-value pairs for the specified container. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - restype = "container" - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{containerName}'} # type: ignore - - def get_access_policy( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> List["_models.SignedIdentifier"] - """gets the permissions for the specified container. The permissions indicate whether container - data may be accessed publicly. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of SignedIdentifier, or the result of cls(response) - :rtype: list[~azure.storage.blob.models.SignedIdentifier] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.SignedIdentifier"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "container" - comp = "acl" - accept = "application/xml" - - # Construct URL - url = self.get_access_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-blob-public-access']=self._deserialize('str', response.headers.get('x-ms-blob-public-access')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('[SignedIdentifier]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_access_policy.metadata = {'url': '/{containerName}'} # type: ignore - - def set_access_policy( - self, - timeout=None, # type: Optional[int] - access=None, # type: Optional[Union[str, "_models.PublicAccessType"]] - request_id_parameter=None, # type: Optional[str] - container_acl=None, # type: Optional[List["_models.SignedIdentifier"]] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """sets the permissions for the specified container. The permissions indicate whether blobs in a - container may be accessed publicly. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param access: Specifies whether data in the container may be accessed publicly and the level - of access. - :type access: str or ~azure.storage.blob.models.PublicAccessType - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param container_acl: the acls for the container. - :type container_acl: list[~azure.storage.blob.models.SignedIdentifier] - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - restype = "container" - comp = "acl" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_access_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if access is not None: - header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'wrapped': True, 'itemsName': 'SignedIdentifier'}} - if container_acl is not None: - body_content = self._serialize.body(container_acl, '[SignedIdentifier]', is_xml=True, serialization_ctxt=serialization_ctxt) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_policy.metadata = {'url': '/{containerName}'} # type: ignore - - def restore( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - deleted_container_name=None, # type: Optional[str] - deleted_container_version=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Restores a previously-deleted container. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param deleted_container_name: Optional. Version 2019-12-12 and later. Specifies the name of - the deleted container to restore. - :type deleted_container_name: str - :param deleted_container_version: Optional. Version 2019-12-12 and later. Specifies the - version of the deleted container to restore. - :type deleted_container_version: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "undelete" - accept = "application/xml" - - # Construct URL - url = self.restore.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if deleted_container_name is not None: - header_parameters['x-ms-deleted-container-name'] = self._serialize.header("deleted_container_name", deleted_container_name, 'str') - if deleted_container_version is not None: - header_parameters['x-ms-deleted-container-version'] = self._serialize.header("deleted_container_version", deleted_container_version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - restore.metadata = {'url': '/{containerName}'} # type: ignore - - def rename( - self, - source_container_name, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - source_lease_id=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Renames an existing container. - - :param source_container_name: Required. Specifies the name of the container to rename. - :type source_container_name: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param source_lease_id: A lease ID for the source path. If specified, the source path must have - an active lease and the lease ID must match. - :type source_lease_id: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "rename" - accept = "application/xml" - - # Construct URL - url = self.rename.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-source-container-name'] = self._serialize.header("source_container_name", source_container_name, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - rename.metadata = {'url': '/{containerName}'} # type: ignore - - def submit_batch( - self, - content_length, # type: int - multipart_content_type, # type: str - body, # type: IO - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> IO - """The Batch operation allows multiple API calls to be embedded into a single HTTP request. - - :param content_length: The length of the request. - :type content_length: long - :param multipart_content_type: Required. The value of this header must be multipart/mixed with - a batch boundary. Example header value: multipart/mixed; boundary=batch_:code:``. - :type multipart_content_type: str - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "batch" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.submit_batch.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(body, 'IO', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - submit_batch.metadata = {'url': '/{containerName}'} # type: ignore - - def acquire_lease( - self, - timeout=None, # type: Optional[int] - duration=None, # type: Optional[int] - proposed_lease_id=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a - lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease - duration cannot be changed using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "acquire" - accept = "application/xml" - - # Construct URL - url = self.acquire_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - acquire_lease.metadata = {'url': '/{containerName}'} # type: ignore - - def release_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "release" - accept = "application/xml" - - # Construct URL - url = self.release_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - release_lease.metadata = {'url': '/{containerName}'} # type: ignore - - def renew_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "renew" - accept = "application/xml" - - # Construct URL - url = self.renew_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - renew_lease.metadata = {'url': '/{containerName}'} # type: ignore - - def break_lease( - self, - timeout=None, # type: Optional[int] - break_period=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param break_period: For a break operation, proposed duration the lease should continue before - it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining on the lease is used. A new - lease will not be available before the break period has expired, but the lease may be held for - longer than the break period. If this header does not appear with a break operation, a fixed- - duration lease breaks after the remaining lease period elapses, and an infinite lease breaks - immediately. - :type break_period: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "break" - accept = "application/xml" - - # Construct URL - url = self.break_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - break_lease.metadata = {'url': '/{containerName}'} # type: ignore - - def change_lease( - self, - lease_id, # type: str - proposed_lease_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "change" - accept = "application/xml" - - # Construct URL - url = self.change_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - change_lease.metadata = {'url': '/{containerName}'} # type: ignore - - def list_blob_flat_segment( - self, - prefix=None, # type: Optional[str] - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - include=None, # type: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.ListBlobsFlatSegmentResponse" - """[Update] The List Blobs operation returns a list of the blobs under the specified container. - - :param prefix: Filters the results to return only containers whose name begins with the - specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListBlobsFlatSegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsFlatSegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_blob_flat_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListBlobsFlatSegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_blob_flat_segment.metadata = {'url': '/{containerName}'} # type: ignore - - def list_blob_hierarchy_segment( - self, - delimiter, # type: str - prefix=None, # type: Optional[str] - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - include=None, # type: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.ListBlobsHierarchySegmentResponse" - """[Update] The List Blobs operation returns a list of the blobs under the specified container. - - :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix - element in the response body that acts as a placeholder for all blobs whose names begin with - the same substring up to the appearance of the delimiter character. The delimiter may be a - single character or a string. - :type delimiter: str - :param prefix: Filters the results to return only containers whose name begins with the - specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListBlobsHierarchySegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsHierarchySegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_blob_hierarchy_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_blob_hierarchy_segment.metadata = {'url': '/{containerName}'} # type: ignore - - def get_account_info( - self, - **kwargs # type: Any - ): - # type: (...) -> None - """Returns the sku name and account kind. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "account" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_account_info.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) - response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_account_info.metadata = {'url': '/{containerName}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_directory_operations.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_directory_operations.py deleted file mode 100644 index f025757..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_directory_operations.py +++ /dev/null @@ -1,748 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class DirectoryOperations(object): - """DirectoryOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - timeout=None, # type: Optional[int] - directory_properties=None, # type: Optional[str] - posix_permissions=None, # type: Optional[str] - posix_umask=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - directory_http_headers=None, # type: Optional["_models.DirectoryHttpHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Create a directory. By default, the destination is overwritten and if the destination already - exists and has a lease the lease is broken. This operation supports conditional HTTP requests. - For more information, see `Specifying Conditional Headers for Blob Service Operations - `_. To fail if the destination already exists, use a conditional - request with If-None-Match: "*". - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param directory_properties: Optional. User-defined properties to be stored with the file or - directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", - where each value is base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask - restricts permission settings for file and directory, and will only be applied when default Acl - does not exist in parent directory. If the umask bit has set, it means that the corresponding - permission will be disabled. Otherwise the corresponding permission will be determined by the - permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, - a default umask - 0027 will be used. - :type posix_umask: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param directory_http_headers: Parameter group. - :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _cache_control = None - _content_type = None - _content_encoding = None - _content_language = None - _content_disposition = None - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - if directory_http_headers is not None: - _cache_control = directory_http_headers.cache_control - _content_type = directory_http_headers.content_type - _content_encoding = directory_http_headers.content_encoding - _content_language = directory_http_headers.content_language - _content_disposition = directory_http_headers.content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - resource = "directory" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("resource", resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def rename( - self, - rename_source, # type: str - timeout=None, # type: Optional[int] - marker=None, # type: Optional[str] - path_rename_mode=None, # type: Optional[Union[str, "_models.PathRenameMode"]] - directory_properties=None, # type: Optional[str] - posix_permissions=None, # type: Optional[str] - posix_umask=None, # type: Optional[str] - source_lease_id=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - directory_http_headers=None, # type: Optional["_models.DirectoryHttpHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Rename a directory. By default, the destination is overwritten and if the destination already - exists and has a lease the lease is broken. This operation supports conditional HTTP requests. - For more information, see `Specifying Conditional Headers for Blob Service Operations - `_. To fail if the destination already exists, use a conditional - request with If-None-Match: "*". - - :param rename_source: The file or directory to be renamed. The value must have the following - format: "/{filesysystem}/{path}". If "x-ms-properties" is specified, the properties will - overwrite the existing properties; otherwise, the existing properties will be preserved. - :type rename_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param marker: When renaming a directory, the number of paths that are renamed with each - invocation is limited. If the number of paths to be renamed exceeds this limit, a continuation - token is returned in this response header. When a continuation token is returned in the - response, it must be specified in a subsequent invocation of the rename operation to continue - renaming the directory. - :type marker: str - :param path_rename_mode: Determines the behavior of the rename operation. - :type path_rename_mode: str or ~azure.storage.blob.models.PathRenameMode - :param directory_properties: Optional. User-defined properties to be stored with the file or - directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", - where each value is base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask - restricts permission settings for file and directory, and will only be applied when default Acl - does not exist in parent directory. If the umask bit has set, it means that the corresponding - permission will be disabled. Otherwise the corresponding permission will be determined by the - permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, - a default umask - 0027 will be used. - :type posix_umask: str - :param source_lease_id: A lease ID for the source path. If specified, the source path must have - an active lease and the lease ID must match. - :type source_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param directory_http_headers: Parameter group. - :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _cache_control = None - _content_type = None - _content_encoding = None - _content_language = None - _content_disposition = None - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if directory_http_headers is not None: - _cache_control = directory_http_headers.cache_control - _content_type = directory_http_headers.content_type - _content_encoding = directory_http_headers.content_encoding - _content_language = directory_http_headers.content_language - _content_disposition = directory_http_headers.content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - accept = "application/xml" - - # Construct URL - url = self.rename.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') - if path_rename_mode is not None: - query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - rename.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def delete( - self, - recursive_directory_delete, # type: bool - timeout=None, # type: Optional[int] - marker=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Deletes the directory. - - :param recursive_directory_delete: If "true", all paths beneath the directory will be deleted. - If "false" and the directory is non-empty, an error occurs. - :type recursive_directory_delete: bool - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param marker: When renaming a directory, the number of paths that are renamed with each - invocation is limited. If the number of paths to be renamed exceeds this limit, a continuation - token is returned in this response header. When a continuation token is returned in the - response, it must be specified in a subsequent invocation of the rename operation to continue - renaming the directory. - :type marker: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['recursive'] = self._serialize.query("recursive_directory_delete", recursive_directory_delete, 'bool') - if marker is not None: - query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def set_access_control( - self, - timeout=None, # type: Optional[int] - owner=None, # type: Optional[str] - group=None, # type: Optional[str] - posix_permissions=None, # type: Optional[str] - posix_acl=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Set the owner, group, permissions, or access control list for a directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_acl: Sets POSIX access control rights on files and directories. The value is a - comma-separated list of access control entries. Each access control entry (ACE) consists of a - scope, a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type posix_acl: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "setAccessControl" - accept = "application/xml" - - # Construct URL - url = self.set_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def get_access_control( - self, - timeout=None, # type: Optional[int] - upn=None, # type: Optional[bool] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Get the owner, group, permissions, or access control list for a directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If - "true", the identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response - headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If - "false", the values will be returned as Azure Active Directory Object IDs. The default value is - false. - :type upn: bool - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "getAccessControl" - accept = "application/xml" - - # Construct URL - url = self.get_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) - response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) - response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) - response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_page_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_page_blob_operations.py deleted file mode 100644 index ea4b17c..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_page_blob_operations.py +++ /dev/null @@ -1,1406 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class PageBlobOperations(object): - """PageBlobOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - content_length, # type: int - blob_content_length, # type: int - timeout=None, # type: Optional[int] - tier=None, # type: Optional[Union[str, "_models.PremiumPageBlobAccessTier"]] - metadata=None, # type: Optional[str] - blob_sequence_number=0, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - blob_tags_string=None, # type: Optional[str] - blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Create operation creates a new page blob. - - :param content_length: The length of the request. - :type content_length: long - :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 - TB. The page blob size must be aligned to a 512-byte boundary. - :type blob_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param tier: Optional. Indicates the tier to be set on the page blob. - :type tier: str or ~azure.storage.blob.models.PremiumPageBlobAccessTier - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled - value that you can use to track requests. The value of the sequence number must be between 0 - and 2^63 - 1. - :type blob_sequence_number: long - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - blob_type = "PageBlob" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') - if blob_sequence_number is not None: - header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def upload_pages( - self, - content_length, # type: int - body, # type: IO - transactional_content_md5=None, # type: Optional[bytearray] - transactional_content_crc64=None, # type: Optional[bytearray] - timeout=None, # type: Optional[int] - range=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - sequence_number_access_conditions=None, # type: Optional["_models.SequenceNumberAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Upload Pages operation writes a range of pages to a page blob. - - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param sequence_number_access_conditions: Parameter group. - :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_sequence_number_less_than_or_equal_to = None - _if_sequence_number_less_than = None - _if_sequence_number_equal_to = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if sequence_number_access_conditions is not None: - _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - comp = "page" - page_write = "update" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.upload_pages.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') - if _if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') - if _if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload_pages.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def clear_pages( - self, - content_length, # type: int - timeout=None, # type: Optional[int] - range=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - sequence_number_access_conditions=None, # type: Optional["_models.SequenceNumberAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Clear Pages operation clears a set of pages from a page blob. - - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param sequence_number_access_conditions: Parameter group. - :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_sequence_number_less_than_or_equal_to = None - _if_sequence_number_less_than = None - _if_sequence_number_equal_to = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if sequence_number_access_conditions is not None: - _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - comp = "page" - page_write = "clear" - accept = "application/xml" - - # Construct URL - url = self.clear_pages.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') - if _if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') - if _if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - clear_pages.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def upload_pages_from_url( - self, - source_url, # type: str - source_range, # type: str - content_length, # type: int - range, # type: str - source_content_md5=None, # type: Optional[bytearray] - source_contentcrc64=None, # type: Optional[bytearray] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - sequence_number_access_conditions=None, # type: Optional["_models.SequenceNumberAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Upload Pages operation writes a range of pages to a page blob where the contents are read - from a URL. - - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param source_range: Bytes of source data in the specified range. The length of this range - should match the ContentLength header and x-ms-range/Range destination range header. - :type source_range: str - :param content_length: The length of the request. - :type content_length: long - :param range: The range of bytes to which the source range would be written. The range should - be 512 aligned and range-end is required. - :type range: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param sequence_number_access_conditions: Parameter group. - :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _lease_id = None - _if_sequence_number_less_than_or_equal_to = None - _if_sequence_number_less_than = None - _if_sequence_number_equal_to = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if sequence_number_access_conditions is not None: - _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - comp = "page" - page_write = "update" - accept = "application/xml" - - # Construct URL - url = self.upload_pages_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') - if _if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') - if _if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload_pages_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def get_page_ranges( - self, - snapshot=None, # type: Optional[str] - timeout=None, # type: Optional[int] - range=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> "_models.PageList" - """The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot - of a page blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PageList, or the result of cls(response) - :rtype: ~azure.storage.blob.models.PageList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PageList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "pagelist" - accept = "application/xml" - - # Construct URL - url = self.get_page_ranges.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('PageList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_page_ranges.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def get_page_ranges_diff( - self, - snapshot=None, # type: Optional[str] - timeout=None, # type: Optional[int] - prevsnapshot=None, # type: Optional[str] - prev_snapshot_url=None, # type: Optional[str] - range=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> "_models.PageList" - """The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that - were changed between target blob and previous snapshot. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating a Snapshot of - a Blob.`. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param prevsnapshot: Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a - DateTime value that specifies that the response will contain only pages that were changed - between target blob and previous snapshot. Changed pages include both updated and cleared - pages. The target blob may be a snapshot, as long as the snapshot specified by prevsnapshot is - the older of the two. Note that incremental snapshots are currently supported only for blobs - created on or after January 1, 2016. - :type prevsnapshot: str - :param prev_snapshot_url: Optional. This header is only supported in service versions - 2019-04-19 and after and specifies the URL of a previous snapshot of the target blob. The - response will only contain pages that were changed between the target blob and its previous - snapshot. - :type prev_snapshot_url: str - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PageList, or the result of cls(response) - :rtype: ~azure.storage.blob.models.PageList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PageList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "pagelist" - accept = "application/xml" - - # Construct URL - url = self.get_page_ranges_diff.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if prevsnapshot is not None: - query_parameters['prevsnapshot'] = self._serialize.query("prevsnapshot", prevsnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if prev_snapshot_url is not None: - header_parameters['x-ms-previous-snapshot-url'] = self._serialize.header("prev_snapshot_url", prev_snapshot_url, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('PageList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_page_ranges_diff.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def resize( - self, - blob_content_length, # type: int - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Resize the Blob. - - :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 - TB. The page blob size must be aligned to a 512-byte boundary. - :type blob_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.resize.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - resize.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def update_sequence_number( - self, - sequence_number_action, # type: Union[str, "_models.SequenceNumberActionType"] - timeout=None, # type: Optional[int] - blob_sequence_number=0, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Update the sequence number of the blob. - - :param sequence_number_action: Required if the x-ms-blob-sequence-number header is set for the - request. This property applies to page blobs only. This property indicates how the service - should modify the blob's sequence number. - :type sequence_number_action: str or ~azure.storage.blob.models.SequenceNumberActionType - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled - value that you can use to track requests. The value of the sequence number must be between 0 - and 2^63 - 1. - :type blob_sequence_number: long - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.update_sequence_number.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-sequence-number-action'] = self._serialize.header("sequence_number_action", sequence_number_action, 'str') - if blob_sequence_number is not None: - header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - update_sequence_number.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def copy_incremental( - self, - copy_source, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Copy Incremental operation copies a snapshot of the source page blob to a destination page - blob. The snapshot is copied such that only the differential changes between the previously - copied snapshot are transferred to the destination. The copied snapshots are complete copies of - the original snapshot and can be read or copied from as usual. This API is supported since REST - version 2016-05-31. - - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "incrementalcopy" - accept = "application/xml" - - # Construct URL - url = self.copy_incremental.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - copy_incremental.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_service_operations.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_service_operations.py deleted file mode 100644 index 72f7a73..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_service_operations.py +++ /dev/null @@ -1,703 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class ServiceOperations(object): - """ServiceOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def set_properties( - self, - storage_service_properties, # type: "_models.StorageServiceProperties" - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Sets properties for a storage account's Blob service endpoint, including properties for Storage - Analytics and CORS (Cross-Origin Resource Sharing) rules. - - :param storage_service_properties: The StorageService properties. - :type storage_service_properties: ~azure.storage.blob.models.StorageServiceProperties - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "properties" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/'} # type: ignore - - def get_properties( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.StorageServiceProperties" - """gets the properties of a storage account's Blob service, including properties for Storage - Analytics and CORS (Cross-Origin Resource Sharing) rules. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: StorageServiceProperties, or the result of cls(response) - :rtype: ~azure.storage.blob.models.StorageServiceProperties - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceProperties"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('StorageServiceProperties', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_properties.metadata = {'url': '/'} # type: ignore - - def get_statistics( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.StorageServiceStats" - """Retrieves statistics related to replication for the Blob service. It is only available on the - secondary location endpoint when read-access geo-redundant replication is enabled for the - storage account. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: StorageServiceStats, or the result of cls(response) - :rtype: ~azure.storage.blob.models.StorageServiceStats - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceStats"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "stats" - accept = "application/xml" - - # Construct URL - url = self.get_statistics.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('StorageServiceStats', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_statistics.metadata = {'url': '/'} # type: ignore - - def list_containers_segment( - self, - prefix=None, # type: Optional[str] - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - include=None, # type: Optional[List[Union[str, "_models.ListContainersIncludeType"]]] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.ListContainersSegmentResponse" - """The List Containers Segment operation returns a list of the containers under the specified - account. - - :param prefix: Filters the results to return only containers whose name begins with the - specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :param include: Include this parameter to specify that the container's metadata be returned as - part of the response body. - :type include: list[str or ~azure.storage.blob.models.ListContainersIncludeType] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListContainersSegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListContainersSegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListContainersSegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_containers_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('ListContainersSegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_containers_segment.metadata = {'url': '/'} # type: ignore - - def get_user_delegation_key( - self, - key_info, # type: "_models.KeyInfo" - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.UserDelegationKey" - """Retrieves a user delegation key for the Blob service. This is only a valid operation when using - bearer token authentication. - - :param key_info: - :type key_info: ~azure.storage.blob.models.KeyInfo - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: UserDelegationKey, or the result of cls(response) - :rtype: ~azure.storage.blob.models.UserDelegationKey - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.UserDelegationKey"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "userdelegationkey" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.get_user_delegation_key.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(key_info, 'KeyInfo', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('UserDelegationKey', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_user_delegation_key.metadata = {'url': '/'} # type: ignore - - def get_account_info( - self, - **kwargs # type: Any - ): - # type: (...) -> None - """Returns the sku name and account kind. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "account" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_account_info.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) - response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) - response_headers['x-ms-is-hns-enabled']=self._deserialize('bool', response.headers.get('x-ms-is-hns-enabled')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_account_info.metadata = {'url': '/'} # type: ignore - - def submit_batch( - self, - content_length, # type: int - multipart_content_type, # type: str - body, # type: IO - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> IO - """The Batch operation allows multiple API calls to be embedded into a single HTTP request. - - :param content_length: The length of the request. - :type content_length: long - :param multipart_content_type: Required. The value of this header must be multipart/mixed with - a batch boundary. Example header value: multipart/mixed; boundary=batch_:code:``. - :type multipart_content_type: str - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "batch" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.submit_batch.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(body, 'IO', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - submit_batch.metadata = {'url': '/'} # type: ignore - - def filter_blobs( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - where=None, # type: Optional[str] - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.FilterBlobSegment" - """The Filter Blobs operation enables callers to list blobs across all containers whose tags match - a given search expression. Filter blobs searches across all containers within a storage - account but can be scoped within the expression to a single container. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param where: Filters the results to return only to return only blobs whose tags match the - specified expression. - :type where: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: FilterBlobSegment, or the result of cls(response) - :rtype: ~azure.storage.blob.models.FilterBlobSegment - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.FilterBlobSegment"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "blobs" - accept = "application/xml" - - # Construct URL - url = self.filter_blobs.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if where is not None: - query_parameters['where'] = self._serialize.query("where", where, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('FilterBlobSegment', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - filter_blobs.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_lease.py b/azure/multiapi/storagev2/blob/v2020_06_12/_lease.py deleted file mode 100644 index d495d6e..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_lease.py +++ /dev/null @@ -1,331 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import uuid - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, TypeVar, TYPE_CHECKING -) - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator import distributed_trace - -from ._shared.response_handlers import return_response_headers, process_storage_error -from ._serialize import get_modify_conditions - -if TYPE_CHECKING: - from datetime import datetime - - BlobClient = TypeVar("BlobClient") - ContainerClient = TypeVar("ContainerClient") - - -class BlobLeaseClient(object): - """Creates a new BlobLeaseClient. - - This client provides lease operations on a BlobClient or ContainerClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the blob or container to lease. - :type client: ~azure.storage.blob.BlobClient or - ~azure.storage.blob.ContainerClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - def __init__( - self, client, lease_id=None - ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs - # type: (Union[BlobClient, ContainerClient], Optional[str]) -> None - self.id = lease_id or str(uuid.uuid4()) - self.last_modified = None - self.etag = None - if hasattr(client, 'blob_name'): - self._client = client._client.blob # type: ignore # pylint: disable=protected-access - elif hasattr(client, 'container_name'): - self._client = client._client.container # type: ignore # pylint: disable=protected-access - else: - raise TypeError("Lease must use either BlobClient or ContainerClient.") - - def __enter__(self): - return self - - def __exit__(self, *args): - self.release() - - @distributed_trace - def acquire(self, lease_duration=-1, **kwargs): - # type: (int, **Any) -> None - """Requests a new lease. - - If the container does not have an active lease, the Blob service creates a - lease on the container and returns a new lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.acquire_lease( - timeout=kwargs.pop('timeout', None), - duration=lease_duration, - proposed_lease_id=self.id, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - self.etag = response.get('etag') # type: str - - @distributed_trace - def renew(self, **kwargs): - # type: (Any) -> None - """Renews the lease. - - The lease can be renewed if the lease ID specified in the - lease client matches that associated with the container or blob. Note that - the lease may be renewed even if it has expired as long as the container - or blob has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.renew_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def release(self, **kwargs): - # type: (Any) -> None - """Release the lease. - - The lease may be released if the client lease id specified matches - that associated with the container or blob. Releasing the lease allows another client - to immediately acquire the lease for the container or blob as soon as the release is complete. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.release_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """Change the lease ID of an active lease. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.change_lease( - lease_id=self.id, - proposed_lease_id=proposed_lease_id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def break_lease(self, lease_break_period=None, **kwargs): - # type: (Optional[int], Any) -> int - """Break the lease, if the container or blob has an active lease. - - Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. When a lease - is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the container or blob. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :param int lease_break_period: - This is the proposed duration of seconds that the lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the lease. If longer, the time remaining on the lease is used. - A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.break_lease( - timeout=kwargs.pop('timeout', None), - break_period=lease_break_period, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_list_blobs_helper.py b/azure/multiapi/storagev2/blob/v2020_06_12/_list_blobs_helper.py deleted file mode 100644 index 309d37b..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_list_blobs_helper.py +++ /dev/null @@ -1,236 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from azure.core.paging import PageIterator, ItemPaged -from azure.core.exceptions import HttpResponseError -from ._deserialize import get_blob_properties_from_generated_code, parse_tags -from ._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix, FilterBlobItem -from ._models import BlobProperties, FilteredBlob -from ._shared.models import DictMixin -from ._shared.response_handlers import return_context_and_deserialized, process_storage_error - - -class BlobPropertiesPaged(PageIterator): - """An Iterable of Blob properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - :param str container: The name of the container. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str continuation_token: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__( - self, command, - container=None, - prefix=None, - results_per_page=None, - continuation_token=None, - delimiter=None, - location_mode=None): - super(BlobPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.container = container - self.delimiter = delimiter - self.current_page = None - self.location_mode = location_mode - - def _get_next_cb(self, continuation_token): - try: - return self._command( - prefix=self.prefix, - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.container = self._response.container_name - self.current_page = [self._build_item(item) for item in self._response.segment.blob_items] - - return self._response.next_marker or None, self.current_page - - def _build_item(self, item): - if isinstance(item, BlobProperties): - return item - if isinstance(item, BlobItemInternal): - blob = get_blob_properties_from_generated_code(item) # pylint: disable=protected-access - blob.container = self.container - return blob - return item - - -class BlobPrefixPaged(BlobPropertiesPaged): - def __init__(self, *args, **kwargs): - super(BlobPrefixPaged, self).__init__(*args, **kwargs) - self.name = self.prefix - - def _extract_data_cb(self, get_next_return): - continuation_token, _ = super(BlobPrefixPaged, self)._extract_data_cb(get_next_return) - self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items - self.current_page = [self._build_item(item) for item in self.current_page] - self.delimiter = self._response.delimiter - - return continuation_token, self.current_page - - def _build_item(self, item): - item = super(BlobPrefixPaged, self)._build_item(item) - if isinstance(item, GenBlobPrefix): - return BlobPrefix( - self._command, - container=self.container, - prefix=item.name, - results_per_page=self.results_per_page, - location_mode=self.location_mode) - return item - - -class BlobPrefix(ItemPaged, DictMixin): - """An Iterable of Blob properties. - - Returned from walk_blobs when a delimiter is used. - Can be thought of as a virtual blob directory. - - :ivar str name: The prefix, or "directory name" of the blob. - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str next_marker: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str marker: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__(self, *args, **kwargs): - super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs) - self.name = kwargs.get('prefix') - self.prefix = kwargs.get('prefix') - self.results_per_page = kwargs.get('results_per_page') - self.container = kwargs.get('container') - self.delimiter = kwargs.get('delimiter') - self.location_mode = kwargs.get('location_mode') - - -class FilteredBlobPaged(PageIterator): - """An Iterable of Blob properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.FilteredBlob) - :ivar str container: The container that the blobs are listed from. - - :param callable command: Function to retrieve the next page of items. - :param str container: The name of the container. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str continuation_token: An opaque continuation token. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__( - self, command, - container=None, - results_per_page=None, - continuation_token=None, - location_mode=None): - super(FilteredBlobPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.marker = continuation_token - self.results_per_page = results_per_page - self.container = container - self.current_page = None - self.location_mode = location_mode - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.marker = self._response.next_marker - self.current_page = [self._build_item(item) for item in self._response.blobs] - - return self._response.next_marker or None, self.current_page - - @staticmethod - def _build_item(item): - if isinstance(item, FilterBlobItem): - tags = parse_tags(item.tags) - blob = FilteredBlob(name=item.name, container_name=item.container_name, tags=tags) - return blob - return item diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_models.py b/azure/multiapi/storagev2/blob/v2020_06_12/_models.py deleted file mode 100644 index 00a53dc..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_models.py +++ /dev/null @@ -1,1111 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines - -from enum import Enum - -from azure.core.paging import PageIterator -from azure.core.exceptions import HttpResponseError -from ._generated.models import ArrowField - -from ._shared import decode_base64_to_text -from ._shared.response_handlers import return_context_and_deserialized, process_storage_error -from ._shared.models import DictMixin, get_enum_value -from ._generated.models import Logging as GeneratedLogging -from ._generated.models import Metrics as GeneratedMetrics -from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy -from ._generated.models import StaticWebsite as GeneratedStaticWebsite -from ._generated.models import CorsRule as GeneratedCorsRule -from ._generated.models import AccessPolicy as GenAccessPolicy - - -class BlobType(str, Enum): - - BlockBlob = "BlockBlob" - PageBlob = "PageBlob" - AppendBlob = "AppendBlob" - - -class BlockState(str, Enum): - """Block blob block types.""" - - Committed = 'Committed' #: Committed blocks. - Latest = 'Latest' #: Latest blocks. - Uncommitted = 'Uncommitted' #: Uncommitted blocks. - - -class StandardBlobTier(str, Enum): - """ - Specifies the blob tier to set the blob to. This is only applicable for - block blobs on standard storage accounts. - """ - - Archive = 'Archive' #: Archive - Cool = 'Cool' #: Cool - Hot = 'Hot' #: Hot - - -class PremiumPageBlobTier(str, Enum): - """ - Specifies the page blob tier to set the blob to. This is only applicable to page - blobs on premium storage accounts. Please take a look at: - https://docs.microsoft.com/en-us/azure/storage/storage-premium-storage#scalability-and-performance-targets - for detailed information on the corresponding IOPS and throughput per PageBlobTier. - """ - - P4 = 'P4' #: P4 Tier - P6 = 'P6' #: P6 Tier - P10 = 'P10' #: P10 Tier - P20 = 'P20' #: P20 Tier - P30 = 'P30' #: P30 Tier - P40 = 'P40' #: P40 Tier - P50 = 'P50' #: P50 Tier - P60 = 'P60' #: P60 Tier - - -class SequenceNumberAction(str, Enum): - """Sequence number actions.""" - - Increment = 'increment' - """ - Increments the value of the sequence number by 1. If specifying this option, - do not include the x-ms-blob-sequence-number header. - """ - - Max = 'max' - """ - Sets the sequence number to be the higher of the value included with the - request and the value currently stored for the blob. - """ - - Update = 'update' - """Sets the sequence number to the value included with the request.""" - - -class PublicAccess(str, Enum): - """ - Specifies whether data in the container may be accessed publicly and the level of access. - """ - - OFF = 'off' - """ - Specifies that there is no public read access for both the container and blobs within the container. - Clients cannot enumerate the containers within the storage account as well as the blobs within the container. - """ - - Blob = 'blob' - """ - Specifies public read access for blobs. Blob data within this container can be read - via anonymous request, but container data is not available. Clients cannot enumerate - blobs within the container via anonymous request. - """ - - Container = 'container' - """ - Specifies full public read access for container and blob data. Clients can enumerate - blobs within the container via anonymous request, but cannot enumerate containers - within the storage account. - """ - - -class BlobAnalyticsLogging(GeneratedLogging): - """Azure Analytics Logging settings. - - :keyword str version: - The version of Storage Analytics to configure. The default value is 1.0. - :keyword bool delete: - Indicates whether all delete requests should be logged. The default value is `False`. - :keyword bool read: - Indicates whether all read requests should be logged. The default value is `False`. - :keyword bool write: - Indicates whether all write requests should be logged. The default value is `False`. - :keyword ~azure.storage.blob.RetentionPolicy retention_policy: - Determines how long the associated data should persist. If not specified the retention - policy will be disabled by default. - """ - - def __init__(self, **kwargs): - self.version = kwargs.get('version', u'1.0') - self.delete = kwargs.get('delete', False) - self.read = kwargs.get('read', False) - self.write = kwargs.get('write', False) - self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - version=generated.version, - delete=generated.delete, - read=generated.read, - write=generated.write, - retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access - ) - - -class Metrics(GeneratedMetrics): - """A summary of request statistics grouped by API in hour or minute aggregates - for blobs. - - :keyword str version: - The version of Storage Analytics to configure. The default value is 1.0. - :keyword bool enabled: - Indicates whether metrics are enabled for the Blob service. - The default value is `False`. - :keyword bool include_apis: - Indicates whether metrics should generate summary statistics for called API operations. - :keyword ~azure.storage.blob.RetentionPolicy retention_policy: - Determines how long the associated data should persist. If not specified the retention - policy will be disabled by default. - """ - - def __init__(self, **kwargs): - self.version = kwargs.get('version', u'1.0') - self.enabled = kwargs.get('enabled', False) - self.include_apis = kwargs.get('include_apis') - self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - version=generated.version, - enabled=generated.enabled, - include_apis=generated.include_apis, - retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access - ) - - -class RetentionPolicy(GeneratedRetentionPolicy): - """The retention policy which determines how long the associated data should - persist. - - :param bool enabled: - Indicates whether a retention policy is enabled for the storage service. - The default value is False. - :param int days: - Indicates the number of days that metrics or logging or - soft-deleted data should be retained. All data older than this value will - be deleted. If enabled=True, the number of days must be specified. - """ - - def __init__(self, enabled=False, days=None): - super(RetentionPolicy, self).__init__(enabled=enabled, days=days, allow_permanent_delete=None) - if self.enabled and (self.days is None): - raise ValueError("If policy is enabled, 'days' must be specified.") - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - enabled=generated.enabled, - days=generated.days, - ) - - -class StaticWebsite(GeneratedStaticWebsite): - """The properties that enable an account to host a static website. - - :keyword bool enabled: - Indicates whether this account is hosting a static website. - The default value is `False`. - :keyword str index_document: - The default name of the index page under each directory. - :keyword str error_document404_path: - The absolute path of the custom 404 page. - :keyword str default_index_document_path: - Absolute path of the default index page. - """ - - def __init__(self, **kwargs): - self.enabled = kwargs.get('enabled', False) - if self.enabled: - self.index_document = kwargs.get('index_document') - self.error_document404_path = kwargs.get('error_document404_path') - self.default_index_document_path = kwargs.get('default_index_document_path') - else: - self.index_document = None - self.error_document404_path = None - self.default_index_document_path = None - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - enabled=generated.enabled, - index_document=generated.index_document, - error_document404_path=generated.error_document404_path, - default_index_document_path=generated.default_index_document_path - ) - - -class CorsRule(GeneratedCorsRule): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. - - :param list(str) allowed_origins: - A list of origin domains that will be allowed via CORS, or "*" to allow - all domains. The list of must contain at least one entry. Limited to 64 - origin domains. Each allowed origin can have up to 256 characters. - :param list(str) allowed_methods: - A list of HTTP methods that are allowed to be executed by the origin. - The list of must contain at least one entry. For Azure Storage, - permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. - :keyword list(str) allowed_headers: - Defaults to an empty list. A list of headers allowed to be part of - the cross-origin request. Limited to 64 defined headers and 2 prefixed - headers. Each header can be up to 256 characters. - :keyword list(str) exposed_headers: - Defaults to an empty list. A list of response headers to expose to CORS - clients. Limited to 64 defined headers and two prefixed headers. Each - header can be up to 256 characters. - :keyword int max_age_in_seconds: - The number of seconds that the client/browser should cache a - preflight response. - """ - - def __init__(self, allowed_origins, allowed_methods, **kwargs): - self.allowed_origins = ','.join(allowed_origins) - self.allowed_methods = ','.join(allowed_methods) - self.allowed_headers = ','.join(kwargs.get('allowed_headers', [])) - self.exposed_headers = ','.join(kwargs.get('exposed_headers', [])) - self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0) - - @classmethod - def _from_generated(cls, generated): - return cls( - [generated.allowed_origins], - [generated.allowed_methods], - allowed_headers=[generated.allowed_headers], - exposed_headers=[generated.exposed_headers], - max_age_in_seconds=generated.max_age_in_seconds, - ) - - -class ContainerProperties(DictMixin): - """Blob container's properties class. - - Returned ``ContainerProperties`` instances expose these values through a - dictionary interface, for example: ``container_props["last_modified"]``. - Additionally, the container name is available as ``container_props["name"]``. - - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the container was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar ~azure.storage.blob.LeaseProperties lease: - Stores all the lease information for the container. - :ivar str public_access: Specifies whether data in the container may be accessed - publicly and the level of access. - :ivar bool has_immutability_policy: - Represents whether the container has an immutability policy. - :ivar bool has_legal_hold: - Represents whether the container has a legal hold. - :ivar dict metadata: A dict with name-value pairs to associate with the - container as metadata. - :ivar ~azure.storage.blob.ContainerEncryptionScope encryption_scope: - The default encryption scope configuration for the container. - :ivar bool deleted: - Whether this container was deleted. - :ivar str version: - The version of a deleted container. - """ - - def __init__(self, **kwargs): - self.name = None - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.lease = LeaseProperties(**kwargs) - self.public_access = kwargs.get('x-ms-blob-public-access') - self.has_immutability_policy = kwargs.get('x-ms-has-immutability-policy') - self.deleted = None - self.version = None - self.has_legal_hold = kwargs.get('x-ms-has-legal-hold') - self.metadata = kwargs.get('metadata') - self.encryption_scope = None - default_encryption_scope = kwargs.get('x-ms-default-encryption-scope') - if default_encryption_scope: - self.encryption_scope = ContainerEncryptionScope( - default_encryption_scope=default_encryption_scope, - prevent_encryption_scope_override=kwargs.get('x-ms-deny-encryption-scope-override', False) - ) - - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.last_modified = generated.properties.last_modified - props.etag = generated.properties.etag - props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access - props.public_access = generated.properties.public_access - props.has_immutability_policy = generated.properties.has_immutability_policy - props.deleted = generated.deleted - props.version = generated.version - props.has_legal_hold = generated.properties.has_legal_hold - props.metadata = generated.metadata - props.encryption_scope = ContainerEncryptionScope._from_generated(generated) #pylint: disable=protected-access - return props - - -class ContainerPropertiesPaged(PageIterator): - """An Iterable of Container properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A container name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.ContainerProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only containers whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of container names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(ContainerPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [self._build_item(item) for item in self._response.container_items] - - return self._response.next_marker or None, self.current_page - - @staticmethod - def _build_item(item): - return ContainerProperties._from_generated(item) # pylint: disable=protected-access - - -class BlobProperties(DictMixin): - """ - Blob Properties. - - :ivar str name: - The name of the blob. - :ivar str container: - The container in which the blob resides. - :ivar str snapshot: - Datetime value that uniquely identifies the blob snapshot. - :ivar ~azure.blob.storage.BlobType blob_type: - String indicating this blob's type. - :ivar dict metadata: - Name-value pairs associated with the blob as metadata. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the blob was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar int size: - The size of the content returned. If the entire blob was requested, - the length of blob in bytes. If a subset of the blob was requested, the - length of the returned subset. - :ivar str content_range: - Indicates the range of bytes returned in the event that the client - requested a subset of the blob. - :ivar int append_blob_committed_block_count: - (For Append Blobs) Number of committed blocks in the blob. - :ivar bool is_append_blob_sealed: - Indicate if the append blob is sealed or not. - - .. versionadded:: 12.4.0 - - :ivar int page_blob_sequence_number: - (For Page Blobs) Sequence number for page blob used for coordinating - concurrent writes. - :ivar bool server_encrypted: - Set to true if the blob is encrypted on the server. - :ivar ~azure.storage.blob.CopyProperties copy: - Stores all the copy properties for the blob. - :ivar ~azure.storage.blob.ContentSettings content_settings: - Stores all the content settings for the blob. - :ivar ~azure.storage.blob.LeaseProperties lease: - Stores all the lease information for the blob. - :ivar ~azure.storage.blob.StandardBlobTier blob_tier: - Indicates the access tier of the blob. The hot tier is optimized - for storing data that is accessed frequently. The cool storage tier - is optimized for storing data that is infrequently accessed and stored - for at least a month. The archive tier is optimized for storing - data that is rarely accessed and stored for at least six months - with flexible latency requirements. - :ivar str rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :ivar ~datetime.datetime blob_tier_change_time: - Indicates when the access tier was last changed. - :ivar bool blob_tier_inferred: - Indicates whether the access tier was inferred by the service. - If false, it indicates that the tier was set explicitly. - :ivar bool deleted: - Whether this blob was deleted. - :ivar ~datetime.datetime deleted_time: - A datetime object representing the time at which the blob was deleted. - :ivar int remaining_retention_days: - The number of days that the blob will be retained before being permanently deleted by the service. - :ivar ~datetime.datetime creation_time: - Indicates when the blob was created, in UTC. - :ivar str archive_status: - Archive status of blob. - :ivar str encryption_key_sha256: - The SHA-256 hash of the provided encryption key. - :ivar str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - :ivar bool request_server_encrypted: - Whether this blob is encrypted. - :ivar list(~azure.storage.blob.ObjectReplicationPolicy) object_replication_source_properties: - Only present for blobs that have policy ids and rule ids applied to them. - - .. versionadded:: 12.4.0 - - :ivar str object_replication_destination_policy: - Represents the Object Replication Policy Id that created this blob. - - .. versionadded:: 12.4.0 - - :ivar ~datetime.datetime last_accessed_on: - Indicates when the last Read/Write operation was performed on a Blob. - - .. versionadded:: 12.6.0 - - :ivar int tag_count: - Tags count on this blob. - - .. versionadded:: 12.4.0 - - :ivar dict(str, str) tags: - Key value pair of tags on this blob. - - .. versionadded:: 12.4.0 - - """ - - def __init__(self, **kwargs): - self.name = kwargs.get('name') - self.container = None - self.snapshot = kwargs.get('x-ms-snapshot') - self.version_id = kwargs.get('x-ms-version-id') - self.is_current_version = kwargs.get('x-ms-is-current-version') - self.blob_type = BlobType(kwargs['x-ms-blob-type']) if kwargs.get('x-ms-blob-type') else None - self.metadata = kwargs.get('metadata') - self.encrypted_metadata = kwargs.get('encrypted_metadata') - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.size = kwargs.get('Content-Length') - self.content_range = kwargs.get('Content-Range') - self.append_blob_committed_block_count = kwargs.get('x-ms-blob-committed-block-count') - self.is_append_blob_sealed = kwargs.get('x-ms-blob-sealed') - self.page_blob_sequence_number = kwargs.get('x-ms-blob-sequence-number') - self.server_encrypted = kwargs.get('x-ms-server-encrypted') - self.copy = CopyProperties(**kwargs) - self.content_settings = ContentSettings(**kwargs) - self.lease = LeaseProperties(**kwargs) - self.blob_tier = kwargs.get('x-ms-access-tier') - self.rehydrate_priority = kwargs.get('x-ms-rehydrate-priority') - self.blob_tier_change_time = kwargs.get('x-ms-access-tier-change-time') - self.blob_tier_inferred = kwargs.get('x-ms-access-tier-inferred') - self.deleted = False - self.deleted_time = None - self.remaining_retention_days = None - self.creation_time = kwargs.get('x-ms-creation-time') - self.archive_status = kwargs.get('x-ms-archive-status') - self.encryption_key_sha256 = kwargs.get('x-ms-encryption-key-sha256') - self.encryption_scope = kwargs.get('x-ms-encryption-scope') - self.request_server_encrypted = kwargs.get('x-ms-server-encrypted') - self.object_replication_source_properties = kwargs.get('object_replication_source_properties') - self.object_replication_destination_policy = kwargs.get('x-ms-or-policy-id') - self.last_accessed_on = kwargs.get('x-ms-last-access-time') - self.tag_count = kwargs.get('x-ms-tag-count') - self.tags = None - - -class FilteredBlob(DictMixin): - """Blob info from a Filter Blobs API call. - - :ivar name: Blob name - :type name: str - :ivar container_name: Container name. - :type container_name: str - :ivar tags: Key value pairs of blob tags. - :type tags: Dict[str, str] - """ - def __init__(self, **kwargs): - self.name = kwargs.get('name', None) - self.container_name = kwargs.get('container_name', None) - self.tags = kwargs.get('tags', None) - - -class LeaseProperties(DictMixin): - """Blob Lease Properties. - - :ivar str status: - The lease status of the blob. Possible values: locked|unlocked - :ivar str state: - Lease state of the blob. Possible values: available|leased|expired|breaking|broken - :ivar str duration: - When a blob is leased, specifies whether the lease is of infinite or fixed duration. - """ - - def __init__(self, **kwargs): - self.status = get_enum_value(kwargs.get('x-ms-lease-status')) - self.state = get_enum_value(kwargs.get('x-ms-lease-state')) - self.duration = get_enum_value(kwargs.get('x-ms-lease-duration')) - - @classmethod - def _from_generated(cls, generated): - lease = cls() - lease.status = get_enum_value(generated.properties.lease_status) - lease.state = get_enum_value(generated.properties.lease_state) - lease.duration = get_enum_value(generated.properties.lease_duration) - return lease - - -class ContentSettings(DictMixin): - """The content settings of a blob. - - :param str content_type: - The content type specified for the blob. If no content type was - specified, the default content type is application/octet-stream. - :param str content_encoding: - If the content_encoding has previously been set - for the blob, that value is stored. - :param str content_language: - If the content_language has previously been set - for the blob, that value is stored. - :param str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the blob, that value is stored. - :param str cache_control: - If the cache_control has previously been set for - the blob, that value is stored. - :param bytearray content_md5: - If the content_md5 has been set for the blob, this response - header is stored so that the client can check for message content - integrity. - """ - - def __init__( - self, content_type=None, content_encoding=None, - content_language=None, content_disposition=None, - cache_control=None, content_md5=None, **kwargs): - - self.content_type = content_type or kwargs.get('Content-Type') - self.content_encoding = content_encoding or kwargs.get('Content-Encoding') - self.content_language = content_language or kwargs.get('Content-Language') - self.content_md5 = content_md5 or kwargs.get('Content-MD5') - self.content_disposition = content_disposition or kwargs.get('Content-Disposition') - self.cache_control = cache_control or kwargs.get('Cache-Control') - - @classmethod - def _from_generated(cls, generated): - settings = cls() - settings.content_type = generated.properties.content_type or None - settings.content_encoding = generated.properties.content_encoding or None - settings.content_language = generated.properties.content_language or None - settings.content_md5 = generated.properties.content_md5 or None - settings.content_disposition = generated.properties.content_disposition or None - settings.cache_control = generated.properties.cache_control or None - return settings - - -class CopyProperties(DictMixin): - """Blob Copy Properties. - - These properties will be `None` if this blob has never been the destination - in a Copy Blob operation, or if this blob has been modified after a concluded - Copy Blob operation, for example, using Set Blob Properties, Upload Blob, or Commit Block List. - - :ivar str id: - String identifier for the last attempted Copy Blob operation where this blob - was the destination blob. - :ivar str source: - URL up to 2 KB in length that specifies the source blob used in the last attempted - Copy Blob operation where this blob was the destination blob. - :ivar str status: - State of the copy operation identified by Copy ID, with these values: - success: - Copy completed successfully. - pending: - Copy is in progress. Check copy_status_description if intermittent, - non-fatal errors impede copy progress but don't cause failure. - aborted: - Copy was ended by Abort Copy Blob. - failed: - Copy failed. See copy_status_description for failure details. - :ivar str progress: - Contains the number of bytes copied and the total bytes in the source in the last - attempted Copy Blob operation where this blob was the destination blob. Can show - between 0 and Content-Length bytes copied. - :ivar ~datetime.datetime completion_time: - Conclusion time of the last attempted Copy Blob operation where this blob was the - destination blob. This value can specify the time of a completed, aborted, or - failed copy attempt. - :ivar str status_description: - Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal - or non-fatal copy operation failure. - :ivar bool incremental_copy: - Copies the snapshot of the source page blob to a destination page blob. - The snapshot is copied such that only the differential changes between - the previously copied snapshot are transferred to the destination - :ivar ~datetime.datetime destination_snapshot: - Included if the blob is incremental copy blob or incremental copy snapshot, - if x-ms-copy-status is success. Snapshot time of the last successful - incremental copy snapshot for this blob. - """ - - def __init__(self, **kwargs): - self.id = kwargs.get('x-ms-copy-id') - self.source = kwargs.get('x-ms-copy-source') - self.status = get_enum_value(kwargs.get('x-ms-copy-status')) - self.progress = kwargs.get('x-ms-copy-progress') - self.completion_time = kwargs.get('x-ms-copy-completion_time') - self.status_description = kwargs.get('x-ms-copy-status-description') - self.incremental_copy = kwargs.get('x-ms-incremental-copy') - self.destination_snapshot = kwargs.get('x-ms-copy-destination-snapshot') - - @classmethod - def _from_generated(cls, generated): - copy = cls() - copy.id = generated.properties.copy_id or None - copy.status = get_enum_value(generated.properties.copy_status) or None - copy.source = generated.properties.copy_source or None - copy.progress = generated.properties.copy_progress or None - copy.completion_time = generated.properties.copy_completion_time or None - copy.status_description = generated.properties.copy_status_description or None - copy.incremental_copy = generated.properties.incremental_copy or None - copy.destination_snapshot = generated.properties.destination_snapshot or None - return copy - - -class BlobBlock(DictMixin): - """BlockBlob Block class. - - :param str block_id: - Block id. - :param str state: - Block state. Possible values: committed|uncommitted - :ivar int size: - Block size in bytes. - """ - - def __init__(self, block_id, state=BlockState.Latest): - self.id = block_id - self.state = state - self.size = None - - @classmethod - def _from_generated(cls, generated): - block = cls(decode_base64_to_text(generated.name)) - block.size = generated.size - return block - - -class PageRange(DictMixin): - """Page Range for page blob. - - :param int start: - Start of page range in bytes. - :param int end: - End of page range in bytes. - """ - - def __init__(self, start=None, end=None): - self.start = start - self.end = end - - -class AccessPolicy(GenAccessPolicy): - """Access Policy class used by the set and get access policy methods in each service. - - A stored access policy can specify the start time, expiry time, and - permissions for the Shared Access Signatures with which it's associated. - Depending on how you want to control access to your resource, you can - specify all of these parameters within the stored access policy, and omit - them from the URL for the Shared Access Signature. Doing so permits you to - modify the associated signature's behavior at any time, as well as to revoke - it. Or you can specify one or more of the access policy parameters within - the stored access policy, and the others on the URL. Finally, you can - specify all of the parameters on the URL. In this case, you can use the - stored access policy to revoke the signature, but not to modify its behavior. - - Together the Shared Access Signature and the stored access policy must - include all fields required to authenticate the signature. If any required - fields are missing, the request will fail. Likewise, if a field is specified - both in the Shared Access Signature URL and in the stored access policy, the - request will fail with status code 400 (Bad Request). - - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.blob.ContainerSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - """ - def __init__(self, permission=None, expiry=None, start=None): - self.start = start - self.expiry = expiry - self.permission = permission - - -class ContainerSasPermissions(object): - """ContainerSasPermissions class to be used with the - :func:`~azure.storage.blob.generate_container_sas` function and - for the AccessPolicies used with - :func:`~azure.storage.blob.ContainerClient.set_container_access_policy`. - - :param bool read: - Read the content, properties, metadata or block list of any blob in the - container. Use any blob in the container as the source of a copy operation. - :param bool write: - For any blob in the container, create or write content, properties, - metadata, or block list. Snapshot or lease the blob. Resize the blob - (page blob only). Use the blob as the destination of a copy operation - within the same account. Note: You cannot grant permissions to read or - write container properties or metadata, nor to lease a container, with - a container SAS. Use an account SAS instead. - :param bool delete: - Delete any blob in the container. Note: You cannot grant permissions to - delete a container with a container SAS. Use an account SAS instead. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool list: - List blobs in the container. - :param bool tag: - Set or get tags on the blobs in the container. - """ - def __init__(self, read=False, write=False, delete=False, list=False, delete_previous_version=False, tag=False): # pylint: disable=redefined-builtin - self.read = read - self.write = write - self.delete = delete - self.list = list - self.delete_previous_version = delete_previous_version - self.tag = tag - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('l' if self.list else '') + - ('t' if self.tag else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a ContainerSasPermissions from a string. - - To specify read, write, delete, or list permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, write, delete, - and list permissions. - :return: A ContainerSasPermissions object - :rtype: ~azure.storage.blob.ContainerSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_list = 'l' in permission - p_delete_previous_version = 'x' in permission - p_tag = 't' in permission - parsed = cls(read=p_read, write=p_write, delete=p_delete, list=p_list, - delete_previous_version=p_delete_previous_version, tag=p_tag) - - return parsed - - -class BlobSasPermissions(object): - """BlobSasPermissions class to be used with the - :func:`~azure.storage.blob.generate_blob_sas` function. - - :param bool read: - Read the content, properties, metadata and block list. Use the blob as - the source of a copy operation. - :param bool add: - Add a block to an append blob. - :param bool create: - Write a new blob, snapshot a blob, or copy a blob to a new blob. - :param bool write: - Create or write content, properties, metadata, or block list. Snapshot - or lease the blob. Resize the blob (page blob only). Use the blob as the - destination of a copy operation within the same account. - :param bool delete: - Delete the blob. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool tag: - Set or get tags on the blob. - """ - def __init__(self, read=False, add=False, create=False, write=False, - delete=False, delete_previous_version=False, tag=True): - self.read = read - self.add = add - self.create = create - self.write = write - self.delete = delete - self.delete_previous_version = delete_previous_version - self.tag = tag - self._str = (('r' if self.read else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('t' if self.tag else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a BlobSasPermissions from a string. - - To specify read, add, create, write, or delete permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, add, create, - write, or delete permissions. - :return: A BlobSasPermissions object - :rtype: ~azure.storage.blob.BlobSasPermissions - """ - p_read = 'r' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_delete_previous_version = 'x' in permission - p_tag = 't' in permission - - parsed = cls(read=p_read, add=p_add, create=p_create, write=p_write, delete=p_delete, - delete_previous_version=p_delete_previous_version, tag=p_tag) - - return parsed - - -class CustomerProvidedEncryptionKey(object): - """ - All data in Azure Storage is encrypted at-rest using an account-level encryption key. - In versions 2018-06-17 and newer, you can manage the key used to encrypt blob contents - and application metadata per-blob by providing an AES-256 encryption key in requests to the storage service. - - When you use a customer-provided key, Azure Storage does not manage or persist your key. - When writing data to a blob, the provided key is used to encrypt your data before writing it to disk. - A SHA-256 hash of the encryption key is written alongside the blob contents, - and is used to verify that all subsequent operations against the blob use the same encryption key. - This hash cannot be used to retrieve the encryption key or decrypt the contents of the blob. - When reading a blob, the provided key is used to decrypt your data after reading it from disk. - In both cases, the provided encryption key is securely discarded - as soon as the encryption or decryption process completes. - - :param str key_value: - Base64-encoded AES-256 encryption key value. - :param str key_hash: - Base64-encoded SHA256 of the encryption key. - :ivar str algorithm: - Specifies the algorithm to use when encrypting data using the given key. Must be AES256. - """ - def __init__(self, key_value, key_hash): - self.key_value = key_value - self.key_hash = key_hash - self.algorithm = 'AES256' - - -class ContainerEncryptionScope(object): - """The default encryption scope configuration for a container. - - This scope is used implicitly for all future writes within the container, - but can be overridden per blob operation. - - .. versionadded:: 12.2.0 - - :param str default_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - :param bool prevent_encryption_scope_override: - If true, prevents any request from specifying a different encryption scope than the scope - set on the container. Default value is false. - """ - - def __init__(self, default_encryption_scope, **kwargs): - self.default_encryption_scope = default_encryption_scope - self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', False) - - @classmethod - def _from_generated(cls, generated): - if generated.properties.default_encryption_scope: - scope = cls( - generated.properties.default_encryption_scope, - prevent_encryption_scope_override=generated.properties.prevent_encryption_scope_override or False - ) - return scope - return None - - -class DelimitedJsonDialect(object): - """Defines the input or output JSON serialization for a blob data query. - - :keyword str delimiter: The line separator character, default value is '\n' - """ - - def __init__(self, **kwargs): - self.delimiter = kwargs.pop('delimiter', '\n') - - -class DelimitedTextDialect(object): - """Defines the input or output delimited (CSV) serialization for a blob query request. - - :keyword str delimiter: - Column separator, defaults to ','. - :keyword str quotechar: - Field quote, defaults to '"'. - :keyword str lineterminator: - Record separator, defaults to '\n'. - :keyword str escapechar: - Escape char, defaults to empty. - :keyword bool has_header: - Whether the blob data includes headers in the first line. The default value is False, meaning that the - data will be returned inclusive of the first line. If set to True, the data will be returned exclusive - of the first line. - """ - def __init__(self, **kwargs): - self.delimiter = kwargs.pop('delimiter', ',') - self.quotechar = kwargs.pop('quotechar', '"') - self.lineterminator = kwargs.pop('lineterminator', '\n') - self.escapechar = kwargs.pop('escapechar', "") - self.has_header = kwargs.pop('has_header', False) - - -class ArrowDialect(ArrowField): - """field of an arrow schema. - - All required parameters must be populated in order to send to Azure. - - :param ~azure.storage.blob.ArrowType type: Arrow field type. - :keyword str name: The name of the field. - :keyword int precision: The precision of the field. - :keyword int scale: The scale of the field. - """ - def __init__(self, type, **kwargs): # pylint: disable=redefined-builtin - super(ArrowDialect, self).__init__(type=type, **kwargs) - - -class ArrowType(str, Enum): - - INT64 = "int64" - BOOL = "bool" - TIMESTAMP_MS = "timestamp[ms]" - STRING = "string" - DOUBLE = "double" - DECIMAL = 'decimal' - - -class ObjectReplicationPolicy(DictMixin): - """Policy id and rule ids applied to a blob. - - :ivar str policy_id: - Policy id for the blob. A replication policy gets created (policy id) when creating a source/destination pair. - :ivar list(~azure.storage.blob.ObjectReplicationRule) rules: - Within each policy there may be multiple replication rules. - e.g. rule 1= src/container/.pdf to dst/container2/; rule2 = src/container1/.jpg to dst/container3 - """ - - def __init__(self, **kwargs): - self.policy_id = kwargs.pop('policy_id', None) - self.rules = kwargs.pop('rules', None) - - -class ObjectReplicationRule(DictMixin): - """Policy id and rule ids applied to a blob. - - :ivar str rule_id: - Rule id. - :ivar str status: - The status of the rule. It could be "Complete" or "Failed" - """ - - def __init__(self, **kwargs): - self.rule_id = kwargs.pop('rule_id', None) - self.status = kwargs.pop('status', None) - - -class BlobQueryError(object): - """The error happened during quick query operation. - - :ivar str error: - The name of the error. - :ivar bool is_fatal: - If true, this error prevents further query processing. More result data may be returned, - but there is no guarantee that all of the original data will be processed. - If false, this error does not prevent further query processing. - :ivar str description: - A description of the error. - :ivar int position: - The blob offset at which the error occurred. - """ - def __init__(self, error=None, is_fatal=False, description=None, position=None): - self.error = error - self.is_fatal = is_fatal - self.description = description - self.position = position diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_quick_query_helper.py b/azure/multiapi/storagev2/blob/v2020_06_12/_quick_query_helper.py deleted file mode 100644 index eb51d98..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_quick_query_helper.py +++ /dev/null @@ -1,196 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from io import BytesIO -from typing import Union, Iterable, IO # pylint: disable=unused-import - -from ._shared.avro.datafile import DataFileReader -from ._shared.avro.avro_io import DatumReader - - -class BlobQueryReader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to read query results. - - :ivar str name: - The name of the blob being quered. - :ivar str container: - The name of the container where the blob is. - :ivar dict response_headers: - The response_headers of the quick query request. - :ivar bytes record_delimiter: - The delimiter used to separate lines, or records with the data. The `records` - method will return these lines via a generator. - """ - - def __init__( - self, - name=None, - container=None, - errors=None, - record_delimiter='\n', - encoding=None, - headers=None, - response=None, - error_cls=None, - ): - self.name = name - self.container = container - self.response_headers = headers - self.record_delimiter = record_delimiter - self._size = 0 - self._bytes_processed = 0 - self._errors = errors - self._encoding = encoding - self._parsed_results = DataFileReader(QuickQueryStreamer(response), DatumReader()) - self._first_result = self._process_record(next(self._parsed_results)) - self._error_cls = error_cls - - def __len__(self): - return self._size - - def _process_record(self, result): - self._size = result.get('totalBytes', self._size) - self._bytes_processed = result.get('bytesScanned', self._bytes_processed) - if 'data' in result: - return result.get('data') - if 'fatal' in result: - error = self._error_cls( - error=result['name'], - is_fatal=result['fatal'], - description=result['description'], - position=result['position'] - ) - if self._errors: - self._errors(error) - return None - - def _iter_stream(self): - if self._first_result is not None: - yield self._first_result - for next_result in self._parsed_results: - processed_result = self._process_record(next_result) - if processed_result is not None: - yield processed_result - - def readall(self): - # type: () -> Union[bytes, str] - """Return all query results. - - This operation is blocking until all data is downloaded. - If encoding has been configured - this will be used to decode individual - records are they are received. - - :rtype: Union[bytes, str] - """ - stream = BytesIO() - self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - def readinto(self, stream): - # type: (IO) -> None - """Download the query result to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. - :returns: None - """ - for record in self._iter_stream(): - stream.write(record) - - def records(self): - # type: () -> Iterable[Union[bytes, str]] - """Returns a record generator for the query result. - - Records will be returned line by line. - If encoding has been configured - this will be used to decode individual - records are they are received. - - :rtype: Iterable[Union[bytes, str]] - """ - delimiter = self.record_delimiter.encode('utf-8') - for record_chunk in self._iter_stream(): - for record in record_chunk.split(delimiter): - if self._encoding: - yield record.decode(self._encoding) - else: - yield record - - - -class QuickQueryStreamer(object): - """ - File-like streaming iterator. - """ - - def __init__(self, generator): - self.generator = generator - self.iterator = iter(generator) - self._buf = b"" - self._point = 0 - self._download_offset = 0 - self._buf_start = 0 - self.file_length = None - - def __len__(self): - return self.file_length - - def __iter__(self): - return self.iterator - - @staticmethod - def seekable(): - return True - - def __next__(self): - next_part = next(self.iterator) - self._download_offset += len(next_part) - return next_part - - next = __next__ # Python 2 compatibility. - - def tell(self): - return self._point - - def seek(self, offset, whence=0): - if whence == 0: - self._point = offset - elif whence == 1: - self._point += offset - else: - raise ValueError("whence must be 0, or 1") - if self._point < 0: - self._point = 0 # XXX is this right? - - def read(self, size): - try: - # keep reading from the generator until the buffer of this stream has enough data to read - while self._point + size > self._download_offset: - self._buf += self.__next__() - except StopIteration: - self.file_length = self._download_offset - - start_point = self._point - - # EOF - self._point = min(self._point + size, self._download_offset) - - relative_start = start_point - self._buf_start - if relative_start < 0: - raise ValueError("Buffer has dumped too much data") - relative_end = relative_start + size - data = self._buf[relative_start: relative_end] - - # dump the extra data in buffer - # buffer start--------------------16bytes----current read position - dumped_size = max(relative_end - 16 - relative_start, 0) - self._buf_start += dumped_size - self._buf = self._buf[dumped_size:] - - return data diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_serialize.py b/azure/multiapi/storagev2/blob/v2020_06_12/_serialize.py deleted file mode 100644 index 57f748a..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_serialize.py +++ /dev/null @@ -1,198 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use -try: - from urllib.parse import quote -except ImportError: - from urllib2 import quote # type: ignore - -from azure.core import MatchConditions - -from ._models import ( - ContainerEncryptionScope, - DelimitedJsonDialect) -from ._generated.models import ( - ModifiedAccessConditions, - SourceModifiedAccessConditions, - CpkScopeInfo, - ContainerCpkScopeInfo, - QueryFormat, - QuerySerialization, - DelimitedTextConfiguration, - JsonTextConfiguration, - ArrowConfiguration, - QueryFormatType, - BlobTag, - BlobTags, LeaseAccessConditions -) - - -_SUPPORTED_API_VERSIONS = [ - '2019-02-02', - '2019-07-07', - '2019-10-10', - '2019-12-12', - '2020-02-10', - '2020-04-08', - '2020-06-12' -] - - -def _get_match_headers(kwargs, match_param, etag_param): - # type: (str) -> Tuple(Dict[str, Any], Optional[str], Optional[str]) - if_match = None - if_none_match = None - match_condition = kwargs.pop(match_param, None) - if match_condition == MatchConditions.IfNotModified: - if_match = kwargs.pop(etag_param, None) - if not if_match: - raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) - elif match_condition == MatchConditions.IfPresent: - if_match = '*' - elif match_condition == MatchConditions.IfModified: - if_none_match = kwargs.pop(etag_param, None) - if not if_none_match: - raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) - elif match_condition == MatchConditions.IfMissing: - if_none_match = '*' - elif match_condition is None: - if kwargs.get(etag_param): - raise ValueError("'{}' specified without '{}'.".format(etag_param, match_param)) - else: - raise TypeError("Invalid match condition: {}".format(match_condition)) - return if_match, if_none_match - - -def get_access_conditions(lease): - # type: (Optional[Union[BlobLeaseClient, str]]) -> Union[LeaseAccessConditions, None] - try: - lease_id = lease.id # type: ignore - except AttributeError: - lease_id = lease # type: ignore - return LeaseAccessConditions(lease_id=lease_id) if lease_id else None - - -def get_modify_conditions(kwargs): - # type: (Dict[str, Any]) -> ModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'match_condition', 'etag') - return ModifiedAccessConditions( - if_modified_since=kwargs.pop('if_modified_since', None), - if_unmodified_since=kwargs.pop('if_unmodified_since', None), - if_match=if_match or kwargs.pop('if_match', None), - if_none_match=if_none_match or kwargs.pop('if_none_match', None), - if_tags=kwargs.pop('if_tags_match_condition', None) - ) - - -def get_source_conditions(kwargs): - # type: (Dict[str, Any]) -> SourceModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') - return SourceModifiedAccessConditions( - source_if_modified_since=kwargs.pop('source_if_modified_since', None), - source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), - source_if_match=if_match or kwargs.pop('source_if_match', None), - source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None), - source_if_tags=kwargs.pop('source_if_tags_match_condition', None) - ) - - -def get_cpk_scope_info(kwargs): - # type: (Dict[str, Any]) -> CpkScopeInfo - if 'encryption_scope' in kwargs: - return CpkScopeInfo(encryption_scope=kwargs.pop('encryption_scope')) - return None - - -def get_container_cpk_scope_info(kwargs): - # type: (Dict[str, Any]) -> ContainerCpkScopeInfo - encryption_scope = kwargs.pop('container_encryption_scope', None) - if encryption_scope: - if isinstance(encryption_scope, ContainerEncryptionScope): - return ContainerCpkScopeInfo( - default_encryption_scope=encryption_scope.default_encryption_scope, - prevent_encryption_scope_override=encryption_scope.prevent_encryption_scope_override - ) - if isinstance(encryption_scope, dict): - return ContainerCpkScopeInfo( - default_encryption_scope=encryption_scope['default_encryption_scope'], - prevent_encryption_scope_override=encryption_scope.get('prevent_encryption_scope_override') - ) - raise TypeError("Container encryption scope must be dict or type ContainerEncryptionScope.") - return None - - -def get_api_version(kwargs, default): - # type: (Dict[str, Any]) -> str - api_version = kwargs.pop('api_version', None) - if api_version and api_version not in _SUPPORTED_API_VERSIONS: - versions = '\n'.join(_SUPPORTED_API_VERSIONS) - raise ValueError("Unsupported API version '{}'. Please select from:\n{}".format(api_version, versions)) - return api_version or default - - -def serialize_blob_tags_header(tags=None): - # type: (Optional[Dict[str, str]]) -> str - if tags is None: - return None - - components = list() - if tags: - for key, value in tags.items(): - components.append(quote(key, safe='.-')) - components.append('=') - components.append(quote(value, safe='.-')) - components.append('&') - - if components: - del components[-1] - - return ''.join(components) - - -def serialize_blob_tags(tags=None): - # type: (Optional[Dict[str, str]]) -> Union[BlobTags, None] - tag_list = list() - if tags: - tag_list = [BlobTag(key=k, value=v) for k, v in tags.items()] - return BlobTags(blob_tag_set=tag_list) - - -def serialize_query_format(formater): - if isinstance(formater, DelimitedJsonDialect): - serialization_settings = JsonTextConfiguration( - record_separator=formater.delimiter - ) - qq_format = QueryFormat( - type=QueryFormatType.json, - json_text_configuration=serialization_settings) - elif hasattr(formater, 'quotechar'): # This supports a csv.Dialect as well - try: - headers = formater.has_header - except AttributeError: - headers = False - serialization_settings = DelimitedTextConfiguration( - column_separator=formater.delimiter, - field_quote=formater.quotechar, - record_separator=formater.lineterminator, - escape_char=formater.escapechar, - headers_present=headers - ) - qq_format = QueryFormat( - type=QueryFormatType.delimited, - delimited_text_configuration=serialization_settings - ) - elif isinstance(formater, list): - serialization_settings = ArrowConfiguration( - schema=formater - ) - qq_format = QueryFormat( - type=QueryFormatType.arrow, - arrow_configuration=serialization_settings) - elif not formater: - return None - else: - raise TypeError("Format must be DelimitedTextDialect or DelimitedJsonDialect.") - return QuerySerialization(format=qq_format) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/__init__.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/__init__.py deleted file mode 100644 index 160f882..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import hmac - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore - -import six - - -def url_quote(url): - return quote(url) - - -def url_unquote(url): - return unquote(url) - - -def encode_base64(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def decode_base64_to_bytes(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - return base64.b64decode(data) - - -def decode_base64_to_text(data): - decoded_bytes = decode_base64_to_bytes(data) - return decoded_bytes.decode('utf-8') - - -def sign_string(key, string_to_sign, key_is_base64=True): - if key_is_base64: - key = decode_base64_to_bytes(key) - else: - if isinstance(key, six.text_type): - key = key.encode('utf-8') - if isinstance(string_to_sign, six.text_type): - string_to_sign = string_to_sign.encode('utf-8') - signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) - digest = signed_hmac_sha256.digest() - encoded_digest = encode_base64(digest) - return encoded_digest diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/authentication.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/authentication.py deleted file mode 100644 index d04c1e4..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/authentication.py +++ /dev/null @@ -1,142 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import logging -import sys - -try: - from urllib.parse import urlparse, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import unquote # type: ignore - -try: - from yarl import URL -except ImportError: - pass - -try: - from azure.core.pipeline.transport import AioHttpTransport -except ImportError: - AioHttpTransport = None - -from azure.core.exceptions import ClientAuthenticationError -from azure.core.pipeline.policies import SansIOHTTPPolicy - -from . import sign_string - - -logger = logging.getLogger(__name__) - - - -# wraps a given exception with the desired exception type -def _wrap_exception(ex, desired_type): - msg = "" - if ex.args: - msg = ex.args[0] - if sys.version_info >= (3,): - # Automatic chaining in Python 3 means we keep the trace - return desired_type(msg) - # There isn't a good solution in 2 for keeping the stack trace - # in general, or that will not result in an error in 3 - # However, we can keep the previous error type and message - # TODO: In the future we will log the trace - return desired_type('{}: {}'.format(ex.__class__.__name__, msg)) - - -class AzureSigningError(ClientAuthenticationError): - """ - Represents a fatal error when attempting to sign a request. - In general, the cause of this exception is user error. For example, the given account key is not valid. - Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. - """ - - -# pylint: disable=no-self-use -class SharedKeyCredentialPolicy(SansIOHTTPPolicy): - - def __init__(self, account_name, account_key): - self.account_name = account_name - self.account_key = account_key - super(SharedKeyCredentialPolicy, self).__init__() - - @staticmethod - def _get_headers(request, headers_to_sign): - headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) - if 'content-length' in headers and headers['content-length'] == '0': - del headers['content-length'] - return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' - - @staticmethod - def _get_verb(request): - return request.http_request.method + '\n' - - def _get_canonicalized_resource(self, request): - uri_path = urlparse(request.http_request.url).path - try: - if isinstance(request.context.transport, AioHttpTransport) or \ - isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \ - isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None), - AioHttpTransport): - uri_path = URL(uri_path) - return '/' + self.account_name + str(uri_path) - except TypeError: - pass - return '/' + self.account_name + uri_path - - @staticmethod - def _get_canonicalized_headers(request): - string_to_sign = '' - x_ms_headers = [] - for name, value in request.http_request.headers.items(): - if name.startswith('x-ms-'): - x_ms_headers.append((name.lower(), value)) - x_ms_headers.sort() - for name, value in x_ms_headers: - if value is not None: - string_to_sign += ''.join([name, ':', value, '\n']) - return string_to_sign - - @staticmethod - def _get_canonicalized_resource_query(request): - sorted_queries = list(request.http_request.query.items()) - sorted_queries.sort() - - string_to_sign = '' - for name, value in sorted_queries: - if value is not None: - string_to_sign += '\n' + name.lower() + ':' + unquote(value) - - return string_to_sign - - def _add_authorization_header(self, request, string_to_sign): - try: - signature = sign_string(self.account_key, string_to_sign) - auth_string = 'SharedKey ' + self.account_name + ':' + signature - request.http_request.headers['Authorization'] = auth_string - except Exception as ex: - # Wrap any error that occurred as signing error - # Doing so will clarify/locate the source of problem - raise _wrap_exception(ex, AzureSigningError) - - def on_request(self, request): - string_to_sign = \ - self._get_verb(request) + \ - self._get_headers( - request, - [ - 'content-encoding', 'content-language', 'content-length', - 'content-md5', 'content-type', 'date', 'if-modified-since', - 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' - ] - ) + \ - self._get_canonicalized_headers(request) + \ - self._get_canonicalized_resource(request) + \ - self._get_canonicalized_resource_query(request) - - self._add_authorization_header(request, string_to_sign) - #logger.debug("String_to_sign=%s", string_to_sign) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/__init__.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/__init__.py deleted file mode 100644 index 5b396cd..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/avro_io.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/avro_io.py deleted file mode 100644 index 93a5c13..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/avro_io.py +++ /dev/null @@ -1,464 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -"""Input/output utilities. - -Includes: - - i/o-specific constants - - i/o-specific exceptions - - schema validation - - leaf value encoding and decoding - - datum reader/writer stuff (?) - -Also includes a generic representation for data, which uses the -following mapping: - - Schema records are implemented as dict. - - Schema arrays are implemented as list. - - Schema maps are implemented as dict. - - Schema strings are implemented as unicode. - - Schema bytes are implemented as str. - - Schema ints are implemented as int. - - Schema longs are implemented as long. - - Schema floats are implemented as float. - - Schema doubles are implemented as float. - - Schema booleans are implemented as bool. -""" - -import json -import logging -import struct -import sys - -from ..avro import schema - -PY3 = sys.version_info[0] == 3 - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Constants - -STRUCT_FLOAT = struct.Struct('= 0), n - input_bytes = self.reader.read(n) - if n > 0 and not input_bytes: - raise StopIteration - assert (len(input_bytes) == n), input_bytes - return input_bytes - - @staticmethod - def read_null(): - """ - null is written as zero bytes - """ - return None - - def read_boolean(self): - """ - a boolean is written as a single byte - whose value is either 0 (false) or 1 (true). - """ - b = ord(self.read(1)) - if b == 1: - return True - if b == 0: - return False - fail_msg = "Invalid value for boolean: %s" % b - raise schema.AvroException(fail_msg) - - def read_int(self): - """ - int and long values are written using variable-length, zig-zag coding. - """ - return self.read_long() - - def read_long(self): - """ - int and long values are written using variable-length, zig-zag coding. - """ - b = ord(self.read(1)) - n = b & 0x7F - shift = 7 - while (b & 0x80) != 0: - b = ord(self.read(1)) - n |= (b & 0x7F) << shift - shift += 7 - datum = (n >> 1) ^ -(n & 1) - return datum - - def read_float(self): - """ - A float is written as 4 bytes. - The float is converted into a 32-bit integer using a method equivalent to - Java's floatToIntBits and then encoded in little-endian format. - """ - return STRUCT_FLOAT.unpack(self.read(4))[0] - - def read_double(self): - """ - A double is written as 8 bytes. - The double is converted into a 64-bit integer using a method equivalent to - Java's doubleToLongBits and then encoded in little-endian format. - """ - return STRUCT_DOUBLE.unpack(self.read(8))[0] - - def read_bytes(self): - """ - Bytes are encoded as a long followed by that many bytes of data. - """ - nbytes = self.read_long() - assert (nbytes >= 0), nbytes - return self.read(nbytes) - - def read_utf8(self): - """ - A string is encoded as a long followed by - that many bytes of UTF-8 encoded character data. - """ - input_bytes = self.read_bytes() - if PY3: - try: - return input_bytes.decode('utf-8') - except UnicodeDecodeError as exn: - logger.error('Invalid UTF-8 input bytes: %r', input_bytes) - raise exn - else: - # PY2 - return unicode(input_bytes, "utf-8") # pylint: disable=undefined-variable - - def skip_null(self): - pass - - def skip_boolean(self): - self.skip(1) - - def skip_int(self): - self.skip_long() - - def skip_long(self): - b = ord(self.read(1)) - while (b & 0x80) != 0: - b = ord(self.read(1)) - - def skip_float(self): - self.skip(4) - - def skip_double(self): - self.skip(8) - - def skip_bytes(self): - self.skip(self.read_long()) - - def skip_utf8(self): - self.skip_bytes() - - def skip(self, n): - self.reader.seek(self.reader.tell() + n) - - -# ------------------------------------------------------------------------------ -# DatumReader - - -class DatumReader(object): - """Deserialize Avro-encoded data into a Python data structure.""" - - def __init__(self, writer_schema=None): - """ - As defined in the Avro specification, we call the schema encoded - in the data the "writer's schema". - """ - self._writer_schema = writer_schema - - # read/write properties - def set_writer_schema(self, writer_schema): - self._writer_schema = writer_schema - - writer_schema = property(lambda self: self._writer_schema, - set_writer_schema) - - def read(self, decoder): - return self.read_data(self.writer_schema, decoder) - - def read_data(self, writer_schema, decoder): - # function dispatch for reading data based on type of writer's schema - if writer_schema.type == 'null': - result = decoder.read_null() - elif writer_schema.type == 'boolean': - result = decoder.read_boolean() - elif writer_schema.type == 'string': - result = decoder.read_utf8() - elif writer_schema.type == 'int': - result = decoder.read_int() - elif writer_schema.type == 'long': - result = decoder.read_long() - elif writer_schema.type == 'float': - result = decoder.read_float() - elif writer_schema.type == 'double': - result = decoder.read_double() - elif writer_schema.type == 'bytes': - result = decoder.read_bytes() - elif writer_schema.type == 'fixed': - result = self.read_fixed(writer_schema, decoder) - elif writer_schema.type == 'enum': - result = self.read_enum(writer_schema, decoder) - elif writer_schema.type == 'array': - result = self.read_array(writer_schema, decoder) - elif writer_schema.type == 'map': - result = self.read_map(writer_schema, decoder) - elif writer_schema.type in ['union', 'error_union']: - result = self.read_union(writer_schema, decoder) - elif writer_schema.type in ['record', 'error', 'request']: - result = self.read_record(writer_schema, decoder) - else: - fail_msg = "Cannot read unknown schema type: %s" % writer_schema.type - raise schema.AvroException(fail_msg) - return result - - def skip_data(self, writer_schema, decoder): - if writer_schema.type == 'null': - result = decoder.skip_null() - elif writer_schema.type == 'boolean': - result = decoder.skip_boolean() - elif writer_schema.type == 'string': - result = decoder.skip_utf8() - elif writer_schema.type == 'int': - result = decoder.skip_int() - elif writer_schema.type == 'long': - result = decoder.skip_long() - elif writer_schema.type == 'float': - result = decoder.skip_float() - elif writer_schema.type == 'double': - result = decoder.skip_double() - elif writer_schema.type == 'bytes': - result = decoder.skip_bytes() - elif writer_schema.type == 'fixed': - result = self.skip_fixed(writer_schema, decoder) - elif writer_schema.type == 'enum': - result = self.skip_enum(decoder) - elif writer_schema.type == 'array': - self.skip_array(writer_schema, decoder) - result = None - elif writer_schema.type == 'map': - self.skip_map(writer_schema, decoder) - result = None - elif writer_schema.type in ['union', 'error_union']: - result = self.skip_union(writer_schema, decoder) - elif writer_schema.type in ['record', 'error', 'request']: - self.skip_record(writer_schema, decoder) - result = None - else: - fail_msg = "Unknown schema type: %s" % writer_schema.type - raise schema.AvroException(fail_msg) - return result - - @staticmethod - def read_fixed(writer_schema, decoder): - """ - Fixed instances are encoded using the number of bytes declared - in the schema. - """ - return decoder.read(writer_schema.size) - - @staticmethod - def skip_fixed(writer_schema, decoder): - return decoder.skip(writer_schema.size) - - @staticmethod - def read_enum(writer_schema, decoder): - """ - An enum is encoded by a int, representing the zero-based position - of the symbol in the schema. - """ - # read data - index_of_symbol = decoder.read_int() - if index_of_symbol >= len(writer_schema.symbols): - fail_msg = "Can't access enum index %d for enum with %d symbols" \ - % (index_of_symbol, len(writer_schema.symbols)) - raise SchemaResolutionException(fail_msg, writer_schema) - read_symbol = writer_schema.symbols[index_of_symbol] - return read_symbol - - @staticmethod - def skip_enum(decoder): - return decoder.skip_int() - - def read_array(self, writer_schema, decoder): - """ - Arrays are encoded as a series of blocks. - - Each block consists of a long count value, - followed by that many array items. - A block with count zero indicates the end of the array. - Each item is encoded per the array's item schema. - - If a block's count is negative, - then the count is followed immediately by a long block size, - indicating the number of bytes in the block. - The actual count in this case - is the absolute value of the count written. - """ - read_items = [] - block_count = decoder.read_long() - while block_count != 0: - if block_count < 0: - block_count = -block_count - decoder.read_long() - for _ in range(block_count): - read_items.append(self.read_data(writer_schema.items, decoder)) - block_count = decoder.read_long() - return read_items - - def skip_array(self, writer_schema, decoder): - block_count = decoder.read_long() - while block_count != 0: - if block_count < 0: - block_size = decoder.read_long() - decoder.skip(block_size) - else: - for _ in range(block_count): - self.skip_data(writer_schema.items, decoder) - block_count = decoder.read_long() - - def read_map(self, writer_schema, decoder): - """ - Maps are encoded as a series of blocks. - - Each block consists of a long count value, - followed by that many key/value pairs. - A block with count zero indicates the end of the map. - Each item is encoded per the map's value schema. - - If a block's count is negative, - then the count is followed immediately by a long block size, - indicating the number of bytes in the block. - The actual count in this case - is the absolute value of the count written. - """ - read_items = {} - block_count = decoder.read_long() - while block_count != 0: - if block_count < 0: - block_count = -block_count - decoder.read_long() - for _ in range(block_count): - key = decoder.read_utf8() - read_items[key] = self.read_data(writer_schema.values, decoder) - block_count = decoder.read_long() - return read_items - - def skip_map(self, writer_schema, decoder): - block_count = decoder.read_long() - while block_count != 0: - if block_count < 0: - block_size = decoder.read_long() - decoder.skip(block_size) - else: - for _ in range(block_count): - decoder.skip_utf8() - self.skip_data(writer_schema.values, decoder) - block_count = decoder.read_long() - - def read_union(self, writer_schema, decoder): - """ - A union is encoded by first writing a long value indicating - the zero-based position within the union of the schema of its value. - The value is then encoded per the indicated schema within the union. - """ - # schema resolution - index_of_schema = int(decoder.read_long()) - if index_of_schema >= len(writer_schema.schemas): - fail_msg = "Can't access branch index %d for union with %d branches" \ - % (index_of_schema, len(writer_schema.schemas)) - raise SchemaResolutionException(fail_msg, writer_schema) - selected_writer_schema = writer_schema.schemas[index_of_schema] - - # read data - return self.read_data(selected_writer_schema, decoder) - - def skip_union(self, writer_schema, decoder): - index_of_schema = int(decoder.read_long()) - if index_of_schema >= len(writer_schema.schemas): - fail_msg = "Can't access branch index %d for union with %d branches" \ - % (index_of_schema, len(writer_schema.schemas)) - raise SchemaResolutionException(fail_msg, writer_schema) - return self.skip_data(writer_schema.schemas[index_of_schema], decoder) - - def read_record(self, writer_schema, decoder): - """ - A record is encoded by encoding the values of its fields - in the order that they are declared. In other words, a record - is encoded as just the concatenation of the encodings of its fields. - Field values are encoded per their schema. - - Schema Resolution: - * the ordering of fields may be different: fields are matched by name. - * schemas for fields with the same name in both records are resolved - recursively. - * if the writer's record contains a field with a name not present in the - reader's record, the writer's value for that field is ignored. - * if the reader's record schema has a field that contains a default value, - and writer's schema does not have a field with the same name, then the - reader should use the default value from its field. - * if the reader's record schema has a field with no default value, and - writer's schema does not have a field with the same name, then the - field's value is unset. - """ - # schema resolution - read_record = {} - for field in writer_schema.fields: - field_val = self.read_data(field.type, decoder) - read_record[field.name] = field_val - return read_record - - def skip_record(self, writer_schema, decoder): - for field in writer_schema.fields: - self.skip_data(field.type, decoder) - - -# ------------------------------------------------------------------------------ - -if __name__ == '__main__': - raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/avro_io_async.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/avro_io_async.py deleted file mode 100644 index e981216..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/avro_io_async.py +++ /dev/null @@ -1,448 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -"""Input/output utilities. - -Includes: - - i/o-specific constants - - i/o-specific exceptions - - schema validation - - leaf value encoding and decoding - - datum reader/writer stuff (?) - -Also includes a generic representation for data, which uses the -following mapping: - - Schema records are implemented as dict. - - Schema arrays are implemented as list. - - Schema maps are implemented as dict. - - Schema strings are implemented as unicode. - - Schema bytes are implemented as str. - - Schema ints are implemented as int. - - Schema longs are implemented as long. - - Schema floats are implemented as float. - - Schema doubles are implemented as float. - - Schema booleans are implemented as bool. -""" - -import logging -import sys - -from ..avro import schema - -from .avro_io import STRUCT_FLOAT, STRUCT_DOUBLE, SchemaResolutionException - -PY3 = sys.version_info[0] == 3 - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Decoder - - -class AsyncBinaryDecoder(object): - """Read leaf values.""" - - def __init__(self, reader): - """ - reader is a Python object on which we can call read, seek, and tell. - """ - self._reader = reader - - @property - def reader(self): - """Reports the reader used by this decoder.""" - return self._reader - - async def read(self, n): - """Read n bytes. - - Args: - n: Number of bytes to read. - Returns: - The next n bytes from the input. - """ - assert (n >= 0), n - input_bytes = await self.reader.read(n) - if n > 0 and not input_bytes: - raise StopAsyncIteration - assert (len(input_bytes) == n), input_bytes - return input_bytes - - @staticmethod - def read_null(): - """ - null is written as zero bytes - """ - return None - - async def read_boolean(self): - """ - a boolean is written as a single byte - whose value is either 0 (false) or 1 (true). - """ - b = ord(await self.read(1)) - if b == 1: - return True - if b == 0: - return False - fail_msg = "Invalid value for boolean: %s" % b - raise schema.AvroException(fail_msg) - - async def read_int(self): - """ - int and long values are written using variable-length, zig-zag coding. - """ - return await self.read_long() - - async def read_long(self): - """ - int and long values are written using variable-length, zig-zag coding. - """ - b = ord(await self.read(1)) - n = b & 0x7F - shift = 7 - while (b & 0x80) != 0: - b = ord(await self.read(1)) - n |= (b & 0x7F) << shift - shift += 7 - datum = (n >> 1) ^ -(n & 1) - return datum - - async def read_float(self): - """ - A float is written as 4 bytes. - The float is converted into a 32-bit integer using a method equivalent to - Java's floatToIntBits and then encoded in little-endian format. - """ - return STRUCT_FLOAT.unpack(await self.read(4))[0] - - async def read_double(self): - """ - A double is written as 8 bytes. - The double is converted into a 64-bit integer using a method equivalent to - Java's doubleToLongBits and then encoded in little-endian format. - """ - return STRUCT_DOUBLE.unpack(await self.read(8))[0] - - async def read_bytes(self): - """ - Bytes are encoded as a long followed by that many bytes of data. - """ - nbytes = await self.read_long() - assert (nbytes >= 0), nbytes - return await self.read(nbytes) - - async def read_utf8(self): - """ - A string is encoded as a long followed by - that many bytes of UTF-8 encoded character data. - """ - input_bytes = await self.read_bytes() - if PY3: - try: - return input_bytes.decode('utf-8') - except UnicodeDecodeError as exn: - logger.error('Invalid UTF-8 input bytes: %r', input_bytes) - raise exn - else: - # PY2 - return unicode(input_bytes, "utf-8") # pylint: disable=undefined-variable - - def skip_null(self): - pass - - async def skip_boolean(self): - await self.skip(1) - - async def skip_int(self): - await self.skip_long() - - async def skip_long(self): - b = ord(await self.read(1)) - while (b & 0x80) != 0: - b = ord(await self.read(1)) - - async def skip_float(self): - await self.skip(4) - - async def skip_double(self): - await self.skip(8) - - async def skip_bytes(self): - await self.skip(await self.read_long()) - - async def skip_utf8(self): - await self.skip_bytes() - - async def skip(self, n): - await self.reader.seek(await self.reader.tell() + n) - - -# ------------------------------------------------------------------------------ -# DatumReader - - -class AsyncDatumReader(object): - """Deserialize Avro-encoded data into a Python data structure.""" - - def __init__(self, writer_schema=None): - """ - As defined in the Avro specification, we call the schema encoded - in the data the "writer's schema", and the schema expected by the - reader the "reader's schema". - """ - self._writer_schema = writer_schema - - # read/write properties - def set_writer_schema(self, writer_schema): - self._writer_schema = writer_schema - - writer_schema = property(lambda self: self._writer_schema, - set_writer_schema) - - async def read(self, decoder): - return await self.read_data(self.writer_schema, decoder) - - async def read_data(self, writer_schema, decoder): - # function dispatch for reading data based on type of writer's schema - if writer_schema.type == 'null': - result = decoder.read_null() - elif writer_schema.type == 'boolean': - result = await decoder.read_boolean() - elif writer_schema.type == 'string': - result = await decoder.read_utf8() - elif writer_schema.type == 'int': - result = await decoder.read_int() - elif writer_schema.type == 'long': - result = await decoder.read_long() - elif writer_schema.type == 'float': - result = await decoder.read_float() - elif writer_schema.type == 'double': - result = await decoder.read_double() - elif writer_schema.type == 'bytes': - result = await decoder.read_bytes() - elif writer_schema.type == 'fixed': - result = await self.read_fixed(writer_schema, decoder) - elif writer_schema.type == 'enum': - result = await self.read_enum(writer_schema, decoder) - elif writer_schema.type == 'array': - result = await self.read_array(writer_schema, decoder) - elif writer_schema.type == 'map': - result = await self.read_map(writer_schema, decoder) - elif writer_schema.type in ['union', 'error_union']: - result = await self.read_union(writer_schema, decoder) - elif writer_schema.type in ['record', 'error', 'request']: - result = await self.read_record(writer_schema, decoder) - else: - fail_msg = "Cannot read unknown schema type: %s" % writer_schema.type - raise schema.AvroException(fail_msg) - return result - - async def skip_data(self, writer_schema, decoder): - if writer_schema.type == 'null': - result = decoder.skip_null() - elif writer_schema.type == 'boolean': - result = await decoder.skip_boolean() - elif writer_schema.type == 'string': - result = await decoder.skip_utf8() - elif writer_schema.type == 'int': - result = await decoder.skip_int() - elif writer_schema.type == 'long': - result = await decoder.skip_long() - elif writer_schema.type == 'float': - result = await decoder.skip_float() - elif writer_schema.type == 'double': - result = await decoder.skip_double() - elif writer_schema.type == 'bytes': - result = await decoder.skip_bytes() - elif writer_schema.type == 'fixed': - result = await self.skip_fixed(writer_schema, decoder) - elif writer_schema.type == 'enum': - result = await self.skip_enum(decoder) - elif writer_schema.type == 'array': - await self.skip_array(writer_schema, decoder) - result = None - elif writer_schema.type == 'map': - await self.skip_map(writer_schema, decoder) - result = None - elif writer_schema.type in ['union', 'error_union']: - result = await self.skip_union(writer_schema, decoder) - elif writer_schema.type in ['record', 'error', 'request']: - await self.skip_record(writer_schema, decoder) - result = None - else: - fail_msg = "Unknown schema type: %s" % writer_schema.type - raise schema.AvroException(fail_msg) - return result - - @staticmethod - async def read_fixed(writer_schema, decoder): - """ - Fixed instances are encoded using the number of bytes declared - in the schema. - """ - return await decoder.read(writer_schema.size) - - @staticmethod - async def skip_fixed(writer_schema, decoder): - return await decoder.skip(writer_schema.size) - - @staticmethod - async def read_enum(writer_schema, decoder): - """ - An enum is encoded by a int, representing the zero-based position - of the symbol in the schema. - """ - # read data - index_of_symbol = await decoder.read_int() - if index_of_symbol >= len(writer_schema.symbols): - fail_msg = "Can't access enum index %d for enum with %d symbols" \ - % (index_of_symbol, len(writer_schema.symbols)) - raise SchemaResolutionException(fail_msg, writer_schema) - read_symbol = writer_schema.symbols[index_of_symbol] - return read_symbol - - @staticmethod - async def skip_enum(decoder): - return await decoder.skip_int() - - async def read_array(self, writer_schema, decoder): - """ - Arrays are encoded as a series of blocks. - - Each block consists of a long count value, - followed by that many array items. - A block with count zero indicates the end of the array. - Each item is encoded per the array's item schema. - - If a block's count is negative, - then the count is followed immediately by a long block size, - indicating the number of bytes in the block. - The actual count in this case - is the absolute value of the count written. - """ - read_items = [] - block_count = await decoder.read_long() - while block_count != 0: - if block_count < 0: - block_count = -block_count - await decoder.read_long() - for _ in range(block_count): - read_items.append(await self.read_data(writer_schema.items, decoder)) - block_count = await decoder.read_long() - return read_items - - async def skip_array(self, writer_schema, decoder): - block_count = await decoder.read_long() - while block_count != 0: - if block_count < 0: - block_size = await decoder.read_long() - await decoder.skip(block_size) - else: - for _ in range(block_count): - await self.skip_data(writer_schema.items, decoder) - block_count = await decoder.read_long() - - async def read_map(self, writer_schema, decoder): - """ - Maps are encoded as a series of blocks. - - Each block consists of a long count value, - followed by that many key/value pairs. - A block with count zero indicates the end of the map. - Each item is encoded per the map's value schema. - - If a block's count is negative, - then the count is followed immediately by a long block size, - indicating the number of bytes in the block. - The actual count in this case - is the absolute value of the count written. - """ - read_items = {} - block_count = await decoder.read_long() - while block_count != 0: - if block_count < 0: - block_count = -block_count - await decoder.read_long() - for _ in range(block_count): - key = await decoder.read_utf8() - read_items[key] = await self.read_data(writer_schema.values, decoder) - block_count = await decoder.read_long() - return read_items - - async def skip_map(self, writer_schema, decoder): - block_count = await decoder.read_long() - while block_count != 0: - if block_count < 0: - block_size = await decoder.read_long() - await decoder.skip(block_size) - else: - for _ in range(block_count): - await decoder.skip_utf8() - await self.skip_data(writer_schema.values, decoder) - block_count = await decoder.read_long() - - async def read_union(self, writer_schema, decoder): - """ - A union is encoded by first writing a long value indicating - the zero-based position within the union of the schema of its value. - The value is then encoded per the indicated schema within the union. - """ - # schema resolution - index_of_schema = int(await decoder.read_long()) - if index_of_schema >= len(writer_schema.schemas): - fail_msg = "Can't access branch index %d for union with %d branches" \ - % (index_of_schema, len(writer_schema.schemas)) - raise SchemaResolutionException(fail_msg, writer_schema) - selected_writer_schema = writer_schema.schemas[index_of_schema] - - # read data - return await self.read_data(selected_writer_schema, decoder) - - async def skip_union(self, writer_schema, decoder): - index_of_schema = int(await decoder.read_long()) - if index_of_schema >= len(writer_schema.schemas): - fail_msg = "Can't access branch index %d for union with %d branches" \ - % (index_of_schema, len(writer_schema.schemas)) - raise SchemaResolutionException(fail_msg, writer_schema) - return await self.skip_data(writer_schema.schemas[index_of_schema], decoder) - - async def read_record(self, writer_schema, decoder): - """ - A record is encoded by encoding the values of its fields - in the order that they are declared. In other words, a record - is encoded as just the concatenation of the encodings of its fields. - Field values are encoded per their schema. - - Schema Resolution: - * the ordering of fields may be different: fields are matched by name. - * schemas for fields with the same name in both records are resolved - recursively. - * if the writer's record contains a field with a name not present in the - reader's record, the writer's value for that field is ignored. - * if the reader's record schema has a field that contains a default value, - and writer's schema does not have a field with the same name, then the - reader should use the default value from its field. - * if the reader's record schema has a field with no default value, and - writer's schema does not have a field with the same name, then the - field's value is unset. - """ - # schema resolution - read_record = {} - for field in writer_schema.fields: - field_val = await self.read_data(field.type, decoder) - read_record[field.name] = field_val - return read_record - - async def skip_record(self, writer_schema, decoder): - for field in writer_schema.fields: - await self.skip_data(field.type, decoder) - - -# ------------------------------------------------------------------------------ - -if __name__ == '__main__': - raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/datafile.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/datafile.py deleted file mode 100644 index df06fe0..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/datafile.py +++ /dev/null @@ -1,266 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -"""Read/Write Avro File Object Containers.""" - -import io -import logging -import sys -import zlib - -from ..avro import avro_io -from ..avro import schema - -PY3 = sys.version_info[0] == 3 - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Constants - -# Version of the container file: -VERSION = 1 - -if PY3: - MAGIC = b'Obj' + bytes([VERSION]) - MAGIC_SIZE = len(MAGIC) -else: - MAGIC = 'Obj' + chr(VERSION) - MAGIC_SIZE = len(MAGIC) - -# Size of the synchronization marker, in number of bytes: -SYNC_SIZE = 16 - -# Schema of the container header: -META_SCHEMA = schema.parse(""" -{ - "type": "record", "name": "org.apache.avro.file.Header", - "fields": [{ - "name": "magic", - "type": {"type": "fixed", "name": "magic", "size": %(magic_size)d} - }, { - "name": "meta", - "type": {"type": "map", "values": "bytes"} - }, { - "name": "sync", - "type": {"type": "fixed", "name": "sync", "size": %(sync_size)d} - }] -} -""" % { - 'magic_size': MAGIC_SIZE, - 'sync_size': SYNC_SIZE, -}) - -# Codecs supported by container files: -VALID_CODECS = frozenset(['null', 'deflate']) - -# Metadata key associated to the schema: -SCHEMA_KEY = "avro.schema" - - -# ------------------------------------------------------------------------------ -# Exceptions - - -class DataFileException(schema.AvroException): - """Problem reading or writing file object containers.""" - -# ------------------------------------------------------------------------------ - - -class DataFileReader(object): # pylint: disable=too-many-instance-attributes - """Read files written by DataFileWriter.""" - - def __init__(self, reader, datum_reader, **kwargs): - """Initializes a new data file reader. - - Args: - reader: Open file to read from. - datum_reader: Avro datum reader. - """ - self._reader = reader - self._raw_decoder = avro_io.BinaryDecoder(reader) - self._header_reader = kwargs.pop('header_reader', None) - self._header_decoder = None if self._header_reader is None else avro_io.BinaryDecoder(self._header_reader) - self._datum_decoder = None # Maybe reset at every block. - self._datum_reader = datum_reader - - # In case self._reader only has partial content(without header). - # seek(0, 0) to make sure read the (partial)content from beginning. - self._reader.seek(0, 0) - - # read the header: magic, meta, sync - self._read_header() - - # ensure codec is valid - avro_codec_raw = self.get_meta('avro.codec') - if avro_codec_raw is None: - self.codec = "null" - else: - self.codec = avro_codec_raw.decode('utf-8') - if self.codec not in VALID_CODECS: - raise DataFileException('Unknown codec: %s.' % self.codec) - - # get ready to read - self._block_count = 0 - - # object_position is to support reading from current position in the future read, - # no need to downloading from the beginning of avro. - if hasattr(self._reader, 'object_position'): - self.reader.track_object_position() - - self._cur_object_index = 0 - # header_reader indicates reader only has partial content. The reader doesn't have block header, - # so we read use the block count stored last time. - # Also ChangeFeed only has codec==null, so use _raw_decoder is good. - if self._header_reader is not None: - self._datum_decoder = self._raw_decoder - - self.datum_reader.writer_schema = ( - schema.parse(self.get_meta(SCHEMA_KEY).decode('utf-8'))) - - def __enter__(self): - return self - - def __exit__(self, data_type, value, traceback): - # Perform a close if there's no exception - if data_type is None: - self.close() - - def __iter__(self): - return self - - # read-only properties - @property - def reader(self): - return self._reader - - @property - def raw_decoder(self): - return self._raw_decoder - - @property - def datum_decoder(self): - return self._datum_decoder - - @property - def datum_reader(self): - return self._datum_reader - - @property - def sync_marker(self): - return self._sync_marker - - @property - def meta(self): - return self._meta - - # read/write properties - @property - def block_count(self): - return self._block_count - - def get_meta(self, key): - """Reports the value of a given metadata key. - - Args: - key: Metadata key (string) to report the value of. - Returns: - Value associated to the metadata key, as bytes. - """ - return self._meta.get(key) - - def _read_header(self): - header_reader = self._header_reader if self._header_reader else self._reader - header_decoder = self._header_decoder if self._header_decoder else self._raw_decoder - - # seek to the beginning of the file to get magic block - header_reader.seek(0, 0) - - # read header into a dict - header = self.datum_reader.read_data(META_SCHEMA, header_decoder) - - # check magic number - if header.get('magic') != MAGIC: - fail_msg = "Not an Avro data file: %s doesn't match %s." \ - % (header.get('magic'), MAGIC) - raise schema.AvroException(fail_msg) - - # set metadata - self._meta = header['meta'] - - # set sync marker - self._sync_marker = header['sync'] - - def _read_block_header(self): - self._block_count = self.raw_decoder.read_long() - if self.codec == "null": - # Skip a long; we don't need to use the length. - self.raw_decoder.skip_long() - self._datum_decoder = self._raw_decoder - elif self.codec == 'deflate': - # Compressed data is stored as (length, data), which - # corresponds to how the "bytes" type is encoded. - data = self.raw_decoder.read_bytes() - # -15 is the log of the window size; negative indicates - # "raw" (no zlib headers) decompression. See zlib.h. - uncompressed = zlib.decompress(data, -15) - self._datum_decoder = avro_io.BinaryDecoder(io.BytesIO(uncompressed)) - else: - raise DataFileException("Unknown codec: %r" % self.codec) - - def _skip_sync(self): - """ - Read the length of the sync marker; if it matches the sync marker, - return True. Otherwise, seek back to where we started and return False. - """ - proposed_sync_marker = self.reader.read(SYNC_SIZE) - if SYNC_SIZE > 0 and not proposed_sync_marker: - raise StopIteration - if proposed_sync_marker != self.sync_marker: - self.reader.seek(-SYNC_SIZE, 1) - - def __next__(self): - """Return the next datum in the file.""" - if self.block_count == 0: - self._skip_sync() - - # object_position is to support reading from current position in the future read, - # no need to downloading from the beginning of avro file with this attr. - if hasattr(self._reader, 'object_position'): - self.reader.track_object_position() - self._cur_object_index = 0 - - self._read_block_header() - - datum = self.datum_reader.read(self.datum_decoder) - self._block_count -= 1 - self._cur_object_index += 1 - - # object_position is to support reading from current position in the future read, - # This will track the index of the next item to be read. - # This will also track the offset before the next sync marker. - if hasattr(self._reader, 'object_position'): - if self.block_count == 0: - # the next event to be read is at index 0 in the new chunk of blocks, - self.reader.track_object_position() - self.reader.set_object_index(0) - else: - self.reader.set_object_index(self._cur_object_index) - - return datum - - # PY2 - def next(self): - return self.__next__() - - def close(self): - """Close this reader.""" - self.reader.close() - - -if __name__ == '__main__': - raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/datafile_async.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/datafile_async.py deleted file mode 100644 index 1e9d018..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/datafile_async.py +++ /dev/null @@ -1,215 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -"""Read/Write Avro File Object Containers.""" - -import logging -import sys - -from ..avro import avro_io_async -from ..avro import schema -from .datafile import DataFileException -from .datafile import MAGIC, SYNC_SIZE, META_SCHEMA, SCHEMA_KEY - - -PY3 = sys.version_info[0] == 3 - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Constants - -# Codecs supported by container files: -VALID_CODECS = frozenset(['null']) - - -class AsyncDataFileReader(object): # pylint: disable=too-many-instance-attributes - """Read files written by DataFileWriter.""" - - def __init__(self, reader, datum_reader, **kwargs): - """Initializes a new data file reader. - - Args: - reader: Open file to read from. - datum_reader: Avro datum reader. - """ - self._reader = reader - self._raw_decoder = avro_io_async.AsyncBinaryDecoder(reader) - self._header_reader = kwargs.pop('header_reader', None) - self._header_decoder = None if self._header_reader is None else \ - avro_io_async.AsyncBinaryDecoder(self._header_reader) - self._datum_decoder = None # Maybe reset at every block. - self._datum_reader = datum_reader - self.codec = "null" - self._block_count = 0 - self._cur_object_index = 0 - self._meta = None - self._sync_marker = None - - async def init(self): - # In case self._reader only has partial content(without header). - # seek(0, 0) to make sure read the (partial)content from beginning. - await self._reader.seek(0, 0) - - # read the header: magic, meta, sync - await self._read_header() - - # ensure codec is valid - avro_codec_raw = self.get_meta('avro.codec') - if avro_codec_raw is None: - self.codec = "null" - else: - self.codec = avro_codec_raw.decode('utf-8') - if self.codec not in VALID_CODECS: - raise DataFileException('Unknown codec: %s.' % self.codec) - - # get ready to read - self._block_count = 0 - - # object_position is to support reading from current position in the future read, - # no need to downloading from the beginning of avro. - if hasattr(self._reader, 'object_position'): - self.reader.track_object_position() - - # header_reader indicates reader only has partial content. The reader doesn't have block header, - # so we read use the block count stored last time. - # Also ChangeFeed only has codec==null, so use _raw_decoder is good. - if self._header_reader is not None: - self._datum_decoder = self._raw_decoder - self.datum_reader.writer_schema = ( - schema.parse(self.get_meta(SCHEMA_KEY).decode('utf-8'))) - return self - - async def __aenter__(self): - return self - - async def __aexit__(self, data_type, value, traceback): - # Perform a close if there's no exception - if data_type is None: - self.close() - - def __aiter__(self): - return self - - # read-only properties - @property - def reader(self): - return self._reader - - @property - def raw_decoder(self): - return self._raw_decoder - - @property - def datum_decoder(self): - return self._datum_decoder - - @property - def datum_reader(self): - return self._datum_reader - - @property - def sync_marker(self): - return self._sync_marker - - @property - def meta(self): - return self._meta - - # read/write properties - @property - def block_count(self): - return self._block_count - - def get_meta(self, key): - """Reports the value of a given metadata key. - - Args: - key: Metadata key (string) to report the value of. - Returns: - Value associated to the metadata key, as bytes. - """ - return self._meta.get(key) - - async def _read_header(self): - header_reader = self._header_reader if self._header_reader else self._reader - header_decoder = self._header_decoder if self._header_decoder else self._raw_decoder - - # seek to the beginning of the file to get magic block - await header_reader.seek(0, 0) - - # read header into a dict - header = await self.datum_reader.read_data(META_SCHEMA, header_decoder) - - # check magic number - if header.get('magic') != MAGIC: - fail_msg = "Not an Avro data file: %s doesn't match %s." \ - % (header.get('magic'), MAGIC) - raise schema.AvroException(fail_msg) - - # set metadata - self._meta = header['meta'] - - # set sync marker - self._sync_marker = header['sync'] - - async def _read_block_header(self): - self._block_count = await self.raw_decoder.read_long() - if self.codec == "null": - # Skip a long; we don't need to use the length. - await self.raw_decoder.skip_long() - self._datum_decoder = self._raw_decoder - else: - raise DataFileException("Unknown codec: %r" % self.codec) - - async def _skip_sync(self): - """ - Read the length of the sync marker; if it matches the sync marker, - return True. Otherwise, seek back to where we started and return False. - """ - proposed_sync_marker = await self.reader.read(SYNC_SIZE) - if SYNC_SIZE > 0 and not proposed_sync_marker: - raise StopAsyncIteration - if proposed_sync_marker != self.sync_marker: - await self.reader.seek(-SYNC_SIZE, 1) - - async def __anext__(self): - """Return the next datum in the file.""" - if self.block_count == 0: - await self._skip_sync() - - # object_position is to support reading from current position in the future read, - # no need to downloading from the beginning of avro file with this attr. - if hasattr(self._reader, 'object_position'): - await self.reader.track_object_position() - self._cur_object_index = 0 - - await self._read_block_header() - - datum = await self.datum_reader.read(self.datum_decoder) - self._block_count -= 1 - self._cur_object_index += 1 - - # object_position is to support reading from current position in the future read, - # This will track the index of the next item to be read. - # This will also track the offset before the next sync marker. - if hasattr(self._reader, 'object_position'): - if self.block_count == 0: - # the next event to be read is at index 0 in the new chunk of blocks, - await self.reader.track_object_position() - await self.reader.set_object_index(0) - else: - await self.reader.set_object_index(self._cur_object_index) - - return datum - - def close(self): - """Close this reader.""" - self.reader.close() - - -if __name__ == '__main__': - raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/schema.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/schema.py deleted file mode 100644 index ffe2853..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/schema.py +++ /dev/null @@ -1,1221 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines - -"""Representation of Avro schemas. - -A schema may be one of: - - A record, mapping field names to field value data; - - An error, equivalent to a record; - - An enum, containing one of a small set of symbols; - - An array of values, all of the same schema; - - A map containing string/value pairs, each of a declared schema; - - A union of other schemas; - - A fixed sized binary object; - - A unicode string; - - A sequence of bytes; - - A 32-bit signed int; - - A 64-bit signed long; - - A 32-bit floating-point float; - - A 64-bit floating-point double; - - A boolean; - - Null. -""" - -import abc -import json -import logging -import re -import sys -from six import with_metaclass - -PY2 = sys.version_info[0] == 2 - -if PY2: - _str = unicode # pylint: disable=undefined-variable -else: - _str = str - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Constants - -# Log level more verbose than DEBUG=10, INFO=20, etc. -DEBUG_VERBOSE = 5 - -NULL = 'null' -BOOLEAN = 'boolean' -STRING = 'string' -BYTES = 'bytes' -INT = 'int' -LONG = 'long' -FLOAT = 'float' -DOUBLE = 'double' -FIXED = 'fixed' -ENUM = 'enum' -RECORD = 'record' -ERROR = 'error' -ARRAY = 'array' -MAP = 'map' -UNION = 'union' - -# Request and error unions are part of Avro protocols: -REQUEST = 'request' -ERROR_UNION = 'error_union' - -PRIMITIVE_TYPES = frozenset([ - NULL, - BOOLEAN, - STRING, - BYTES, - INT, - LONG, - FLOAT, - DOUBLE, -]) - -NAMED_TYPES = frozenset([ - FIXED, - ENUM, - RECORD, - ERROR, -]) - -VALID_TYPES = frozenset.union( - PRIMITIVE_TYPES, - NAMED_TYPES, - [ - ARRAY, - MAP, - UNION, - REQUEST, - ERROR_UNION, - ], -) - -SCHEMA_RESERVED_PROPS = frozenset([ - 'type', - 'name', - 'namespace', - 'fields', # Record - 'items', # Array - 'size', # Fixed - 'symbols', # Enum - 'values', # Map - 'doc', -]) - -FIELD_RESERVED_PROPS = frozenset([ - 'default', - 'name', - 'doc', - 'order', - 'type', -]) - -VALID_FIELD_SORT_ORDERS = frozenset([ - 'ascending', - 'descending', - 'ignore', -]) - - -# ------------------------------------------------------------------------------ -# Exceptions - - -class Error(Exception): - """Base class for errors in this module.""" - - -class AvroException(Error): - """Generic Avro schema error.""" - - -class SchemaParseException(AvroException): - """Error while parsing a JSON schema descriptor.""" - - -class Schema(with_metaclass(abc.ABCMeta, object)): - """Abstract base class for all Schema classes.""" - - def __init__(self, data_type, other_props=None): - """Initializes a new schema object. - - Args: - data_type: Type of the schema to initialize. - other_props: Optional dictionary of additional properties. - """ - if data_type not in VALID_TYPES: - raise SchemaParseException('%r is not a valid Avro type.' % data_type) - - # All properties of this schema, as a map: property name -> property value - self._props = {} - - self._props['type'] = data_type - self._type = data_type - - if other_props: - self._props.update(other_props) - - @property - def namespace(self): - """Returns: the namespace this schema belongs to, if any, or None.""" - return self._props.get('namespace', None) - - @property - def type(self): - """Returns: the type of this schema.""" - return self._type - - @property - def doc(self): - """Returns: the documentation associated to this schema, if any, or None.""" - return self._props.get('doc', None) - - @property - def props(self): - """Reports all the properties of this schema. - - Includes all properties, reserved and non reserved. - JSON properties of this schema are directly generated from this dict. - - Returns: - A dictionary of properties associated to this schema. - """ - return self._props - - @property - def other_props(self): - """Returns: the dictionary of non-reserved properties.""" - return dict(filter_keys_out(items=self._props, keys=SCHEMA_RESERVED_PROPS)) - - def __str__(self): - """Returns: the JSON representation of this schema.""" - return json.dumps(self.to_json(names=None)) - - @abc.abstractmethod - def to_json(self, names): - """Converts the schema object into its AVRO specification representation. - - Schema types that have names (records, enums, and fixed) must - be aware of not re-defining schemas that are already listed - in the parameter names. - """ - raise Exception('Cannot run abstract method.') - - -# ------------------------------------------------------------------------------ - - -_RE_NAME = re.compile(r'[A-Za-z_][A-Za-z0-9_]*') - -_RE_FULL_NAME = re.compile( - r'^' - r'[.]?(?:[A-Za-z_][A-Za-z0-9_]*[.])*' # optional namespace - r'([A-Za-z_][A-Za-z0-9_]*)' # name - r'$' -) - - -class Name(object): - """Representation of an Avro name.""" - - def __init__(self, name, namespace=None): - """Parses an Avro name. - - Args: - name: Avro name to parse (relative or absolute). - namespace: Optional explicit namespace if the name is relative. - """ - # Normalize: namespace is always defined as a string, possibly empty. - if namespace is None: - namespace = '' - - if '.' in name: - # name is absolute, namespace is ignored: - self._fullname = name - - match = _RE_FULL_NAME.match(self._fullname) - if match is None: - raise SchemaParseException( - 'Invalid absolute schema name: %r.' % self._fullname) - - self._name = match.group(1) - self._namespace = self._fullname[:-(len(self._name) + 1)] - - else: - # name is relative, combine with explicit namespace: - self._name = name - self._namespace = namespace - self._fullname = (self._name - if (not self._namespace) else - '%s.%s' % (self._namespace, self._name)) - - # Validate the fullname: - if _RE_FULL_NAME.match(self._fullname) is None: - raise SchemaParseException( - 'Invalid schema name %r infered from name %r and namespace %r.' - % (self._fullname, self._name, self._namespace)) - - def __eq__(self, other): - if not isinstance(other, Name): - return NotImplemented - return self.fullname == other.fullname - - @property - def simple_name(self): - """Returns: the simple name part of this name.""" - return self._name - - @property - def namespace(self): - """Returns: this name's namespace, possible the empty string.""" - return self._namespace - - @property - def fullname(self): - """Returns: the full name.""" - return self._fullname - - -# ------------------------------------------------------------------------------ - - -class Names(object): - """Tracks Avro named schemas and default namespace during parsing.""" - - def __init__(self, default_namespace=None, names=None): - """Initializes a new name tracker. - - Args: - default_namespace: Optional default namespace. - names: Optional initial mapping of known named schemas. - """ - if names is None: - names = {} - self._names = names - self._default_namespace = default_namespace - - @property - def names(self): - """Returns: the mapping of known named schemas.""" - return self._names - - @property - def default_namespace(self): - """Returns: the default namespace, if any, or None.""" - return self._default_namespace - - def new_with_default_namespace(self, namespace): - """Creates a new name tracker from this tracker, but with a new default ns. - - Args: - namespace: New default namespace to use. - Returns: - New name tracker with the specified default namespace. - """ - return Names(names=self._names, default_namespace=namespace) - - def get_name(self, name, namespace=None): - """Resolves the Avro name according to this name tracker's state. - - Args: - name: Name to resolve (absolute or relative). - namespace: Optional explicit namespace. - Returns: - The specified name, resolved according to this tracker. - """ - if namespace is None: - namespace = self._default_namespace - return Name(name=name, namespace=namespace) - - def get_schema(self, name, namespace=None): - """Resolves an Avro schema by name. - - Args: - name: Name (relative or absolute) of the Avro schema to look up. - namespace: Optional explicit namespace. - Returns: - The schema with the specified name, if any, or None. - """ - avro_name = self.get_name(name=name, namespace=namespace) - return self._names.get(avro_name.fullname, None) - - def prune_namespace(self, properties): - """given a properties, return properties with namespace removed if - it matches the own default namespace - """ - if self.default_namespace is None: - # I have no default -- no change - return properties - if 'namespace' not in properties: - # he has no namespace - no change - return properties - if properties['namespace'] != self.default_namespace: - # we're different - leave his stuff alone - return properties - # we each have a namespace and it's redundant. delete his. - prunable = properties.copy() - del prunable['namespace'] - return prunable - - def register(self, schema): - """Registers a new named schema in this tracker. - - Args: - schema: Named Avro schema to register in this tracker. - """ - if schema.fullname in VALID_TYPES: - raise SchemaParseException( - '%s is a reserved type name.' % schema.fullname) - if schema.fullname in self.names: - raise SchemaParseException( - 'Avro name %r already exists.' % schema.fullname) - - logger.log(DEBUG_VERBOSE, 'Register new name for %r', schema.fullname) - self._names[schema.fullname] = schema - - -# ------------------------------------------------------------------------------ - - -class NamedSchema(Schema): - """Abstract base class for named schemas. - - Named schemas are enumerated in NAMED_TYPES. - """ - - def __init__( - self, - data_type, - name=None, - namespace=None, - names=None, - other_props=None, - ): - """Initializes a new named schema object. - - Args: - data_type: Type of the named schema. - name: Name (absolute or relative) of the schema. - namespace: Optional explicit namespace if name is relative. - names: Tracker to resolve and register Avro names. - other_props: Optional map of additional properties of the schema. - """ - assert (data_type in NAMED_TYPES), ('Invalid named type: %r' % data_type) - self._avro_name = names.get_name(name=name, namespace=namespace) - - super(NamedSchema, self).__init__(data_type, other_props) - - names.register(self) - - self._props['name'] = self.name - if self.namespace: - self._props['namespace'] = self.namespace - - @property - def avro_name(self): - """Returns: the Name object describing this schema's name.""" - return self._avro_name - - @property - def name(self): - return self._avro_name.simple_name - - @property - def namespace(self): - return self._avro_name.namespace - - @property - def fullname(self): - return self._avro_name.fullname - - def name_ref(self, names): - """Reports this schema name relative to the specified name tracker. - - Args: - names: Avro name tracker to relativise this schema name against. - Returns: - This schema name, relativised against the specified name tracker. - """ - if self.namespace == names.default_namespace: - return self.name - return self.fullname - - @abc.abstractmethod - def to_json(self, names): - """Converts the schema object into its AVRO specification representation. - - Schema types that have names (records, enums, and fixed) must - be aware of not re-defining schemas that are already listed - in the parameter names. - """ - raise Exception('Cannot run abstract method.') - -# ------------------------------------------------------------------------------ - - -_NO_DEFAULT = object() - - -class Field(object): - """Representation of the schema of a field in a record.""" - - def __init__( - self, - data_type, - name, - index, - has_default, - default=_NO_DEFAULT, - order=None, - doc=None, - other_props=None - ): - """Initializes a new Field object. - - Args: - data_type: Avro schema of the field. - name: Name of the field. - index: 0-based position of the field. - has_default: - default: - order: - doc: - other_props: - """ - if (not isinstance(name, _str)) or (not name): - raise SchemaParseException('Invalid record field name: %r.' % name) - if (order is not None) and (order not in VALID_FIELD_SORT_ORDERS): - raise SchemaParseException('Invalid record field order: %r.' % order) - - # All properties of this record field: - self._props = {} - - self._has_default = has_default - if other_props: - self._props.update(other_props) - - self._index = index - self._type = self._props['type'] = data_type - self._name = self._props['name'] = name - - if has_default: - self._props['default'] = default - - if order is not None: - self._props['order'] = order - - if doc is not None: - self._props['doc'] = doc - - @property - def type(self): - """Returns: the schema of this field.""" - return self._type - - @property - def name(self): - """Returns: this field name.""" - return self._name - - @property - def index(self): - """Returns: the 0-based index of this field in the record.""" - return self._index - - @property - def default(self): - return self._props['default'] - - @property - def has_default(self): - return self._has_default - - @property - def order(self): - return self._props.get('order', None) - - @property - def doc(self): - return self._props.get('doc', None) - - @property - def props(self): - return self._props - - @property - def other_props(self): - return filter_keys_out(items=self._props, keys=FIELD_RESERVED_PROPS) - - def __str__(self): - return json.dumps(self.to_json()) - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = self.props.copy() - to_dump['type'] = self.type.to_json(names) - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ -# Primitive Types - - -class PrimitiveSchema(Schema): - """Schema of a primitive Avro type. - - Valid primitive types are defined in PRIMITIVE_TYPES. - """ - - def __init__(self, data_type, other_props=None): - """Initializes a new schema object for the specified primitive type. - - Args: - data_type: Type of the schema to construct. Must be primitive. - """ - if data_type not in PRIMITIVE_TYPES: - raise AvroException('%r is not a valid primitive type.' % data_type) - super(PrimitiveSchema, self).__init__(data_type, other_props=other_props) - - @property - def name(self): - """Returns: the simple name of this schema.""" - # The name of a primitive type is the type itself. - return self.type - - @property - def fullname(self): - """Returns: the fully qualified name of this schema.""" - # The full name is the simple name for primitive schema. - return self.name - - def to_json(self, names=None): - if len(self.props) == 1: - return self.fullname - return self.props - - def __eq__(self, that): - return self.props == that.props - - -# ------------------------------------------------------------------------------ -# Complex Types (non-recursive) - - -class FixedSchema(NamedSchema): - def __init__( - self, - name, - namespace, - size, - names=None, - other_props=None, - ): - # Ensure valid ctor args - if not isinstance(size, int): - fail_msg = 'Fixed Schema requires a valid integer for size property.' - raise AvroException(fail_msg) - - super(FixedSchema, self).__init__( - data_type=FIXED, - name=name, - namespace=namespace, - names=names, - other_props=other_props, - ) - self._props['size'] = size - - @property - def size(self): - """Returns: the size of this fixed schema, in bytes.""" - return self._props['size'] - - def to_json(self, names=None): - if names is None: - names = Names() - if self.fullname in names.names: - return self.name_ref(names) - names.names[self.fullname] = self - return names.prune_namespace(self.props) - - def __eq__(self, that): - return self.props == that.props - - -# ------------------------------------------------------------------------------ - - -class EnumSchema(NamedSchema): - def __init__( - self, - name, - namespace, - symbols, - names=None, - doc=None, - other_props=None, - ): - """Initializes a new enumeration schema object. - - Args: - name: Simple name of this enumeration. - namespace: Optional namespace. - symbols: Ordered list of symbols defined in this enumeration. - names: - doc: - other_props: - """ - symbols = tuple(symbols) - symbol_set = frozenset(symbols) - if (len(symbol_set) != len(symbols) - or not all(map(lambda symbol: isinstance(symbol, _str), symbols))): - raise AvroException( - 'Invalid symbols for enum schema: %r.' % (symbols,)) - - super(EnumSchema, self).__init__( - data_type=ENUM, - name=name, - namespace=namespace, - names=names, - other_props=other_props, - ) - - self._props['symbols'] = symbols - if doc is not None: - self._props['doc'] = doc - - @property - def symbols(self): - """Returns: the symbols defined in this enum.""" - return self._props['symbols'] - - def to_json(self, names=None): - if names is None: - names = Names() - if self.fullname in names.names: - return self.name_ref(names) - names.names[self.fullname] = self - return names.prune_namespace(self.props) - - def __eq__(self, that): - return self.props == that.props - - -# ------------------------------------------------------------------------------ -# Complex Types (recursive) - - -class ArraySchema(Schema): - """Schema of an array.""" - - def __init__(self, items, other_props=None): - """Initializes a new array schema object. - - Args: - items: Avro schema of the array items. - other_props: - """ - super(ArraySchema, self).__init__( - data_type=ARRAY, - other_props=other_props, - ) - self._items_schema = items - self._props['items'] = items - - @property - def items(self): - """Returns: the schema of the items in this array.""" - return self._items_schema - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = self.props.copy() - item_schema = self.items - to_dump['items'] = item_schema.to_json(names) - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ - - -class MapSchema(Schema): - """Schema of a map.""" - - def __init__(self, values, other_props=None): - """Initializes a new map schema object. - - Args: - values: Avro schema of the map values. - other_props: - """ - super(MapSchema, self).__init__( - data_type=MAP, - other_props=other_props, - ) - self._values_schema = values - self._props['values'] = values - - @property - def values(self): - """Returns: the schema of the values in this map.""" - return self._values_schema - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = self.props.copy() - to_dump['values'] = self.values.to_json(names) - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ - - -class UnionSchema(Schema): - """Schema of a union.""" - - def __init__(self, schemas): - """Initializes a new union schema object. - - Args: - schemas: Ordered collection of schema branches in the union. - """ - super(UnionSchema, self).__init__(data_type=UNION) - self._schemas = tuple(schemas) - - # Validate the schema branches: - - # All named schema names are unique: - named_branches = tuple( - filter(lambda schema: schema.type in NAMED_TYPES, self._schemas)) - unique_names = frozenset(map(lambda schema: schema.fullname, named_branches)) - if len(unique_names) != len(named_branches): - raise AvroException( - 'Invalid union branches with duplicate schema name:%s' - % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) - - # Types are unique within unnamed schemas, and union is not allowed: - unnamed_branches = tuple( - filter(lambda schema: schema.type not in NAMED_TYPES, self._schemas)) - unique_types = frozenset(map(lambda schema: schema.type, unnamed_branches)) - if UNION in unique_types: - raise AvroException( - 'Invalid union branches contain other unions:%s' - % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) - if len(unique_types) != len(unnamed_branches): - raise AvroException( - 'Invalid union branches with duplicate type:%s' - % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) - - @property - def schemas(self): - """Returns: the ordered list of schema branches in the union.""" - return self._schemas - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = [] - for schema in self.schemas: - to_dump.append(schema.to_json(names)) - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ - - -class ErrorUnionSchema(UnionSchema): - """Schema representing the declared errors of a protocol message.""" - - def __init__(self, schemas): - """Initializes an error-union schema. - - Args: - schema: collection of error schema. - """ - # Prepend "string" to handle system errors - schemas = [PrimitiveSchema(data_type=STRING)] + list(schemas) - super(ErrorUnionSchema, self).__init__(schemas=schemas) - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = [] - for schema in self.schemas: - # Don't print the system error schema - if schema.type == STRING: - continue - to_dump.append(schema.to_json(names)) - return to_dump - - -# ------------------------------------------------------------------------------ - - -class RecordSchema(NamedSchema): - """Schema of a record.""" - - @staticmethod - def _make_field(index, field_desc, names): - """Builds field schemas from a list of field JSON descriptors. - - Args: - index: 0-based index of the field in the record. - field_desc: JSON descriptors of a record field. - Return: - The field schema. - """ - field_schema = schema_from_json_data( - json_data=field_desc['type'], - names=names, - ) - other_props = ( - dict(filter_keys_out(items=field_desc, keys=FIELD_RESERVED_PROPS))) - return Field( - data_type=field_schema, - name=field_desc['name'], - index=index, - has_default=('default' in field_desc), - default=field_desc.get('default', _NO_DEFAULT), - order=field_desc.get('order', None), - doc=field_desc.get('doc', None), - other_props=other_props, - ) - - @staticmethod - def make_field_list(field_desc_list, names): - """Builds field schemas from a list of field JSON descriptors. - - Guarantees field name unicity. - - Args: - field_desc_list: collection of field JSON descriptors. - names: Avro schema tracker. - Yields - Field schemas. - """ - for index, field_desc in enumerate(field_desc_list): - yield RecordSchema._make_field(index, field_desc, names) - - @staticmethod - def _make_field_map(fields): - """Builds the field map. - - Guarantees field name unicity. - - Args: - fields: iterable of field schema. - Returns: - A map of field schemas, indexed by name. - """ - field_map = {} - for field in fields: - if field.name in field_map: - raise SchemaParseException( - 'Duplicate record field name %r.' % field.name) - field_map[field.name] = field - return field_map - - def __init__( - self, - name, - namespace, - fields=None, - make_fields=None, - names=None, - record_type=RECORD, - doc=None, - other_props=None - ): - """Initializes a new record schema object. - - Args: - name: Name of the record (absolute or relative). - namespace: Optional namespace the record belongs to, if name is relative. - fields: collection of fields to add to this record. - Exactly one of fields or make_fields must be specified. - make_fields: function creating the fields that belong to the record. - The function signature is: make_fields(names) -> ordered field list. - Exactly one of fields or make_fields must be specified. - names: - record_type: Type of the record: one of RECORD, ERROR or REQUEST. - Protocol requests are not named. - doc: - other_props: - """ - if record_type == REQUEST: - # Protocol requests are not named: - super(RecordSchema, self).__init__( - data_type=REQUEST, - other_props=other_props, - ) - elif record_type in [RECORD, ERROR]: - # Register this record name in the tracker: - super(RecordSchema, self).__init__( - data_type=record_type, - name=name, - namespace=namespace, - names=names, - other_props=other_props, - ) - else: - raise SchemaParseException( - 'Invalid record type: %r.' % record_type) - - if record_type in [RECORD, ERROR]: - avro_name = names.get_name(name=name, namespace=namespace) - nested_names = names.new_with_default_namespace(namespace=avro_name.namespace) - elif record_type == REQUEST: - # Protocol request has no name: no need to change default namespace: - nested_names = names - - if fields is None: - fields = make_fields(names=nested_names) - else: - assert make_fields is None - self._fields = tuple(fields) - - self._field_map = RecordSchema._make_field_map(self._fields) - - self._props['fields'] = fields - if doc is not None: - self._props['doc'] = doc - - @property - def fields(self): - """Returns: the field schemas, as an ordered tuple.""" - return self._fields - - @property - def field_map(self): - """Returns: a read-only map of the field schemas index by field names.""" - return self._field_map - - def to_json(self, names=None): - if names is None: - names = Names() - # Request records don't have names - if self.type == REQUEST: - return [f.to_json(names) for f in self.fields] - - if self.fullname in names.names: - return self.name_ref(names) - names.names[self.fullname] = self - - to_dump = names.prune_namespace(self.props.copy()) - to_dump['fields'] = [f.to_json(names) for f in self.fields] - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ -# Module functions - - -def filter_keys_out(items, keys): - """Filters a collection of (key, value) items. - - Exclude any item whose key belongs to keys. - - Args: - items: Dictionary of items to filter the keys out of. - keys: Keys to filter out. - Yields: - Filtered items. - """ - for key, value in items.items(): - if key in keys: - continue - yield key, value - - -# ------------------------------------------------------------------------------ - - -def _schema_from_json_string(json_string, names): - if json_string in PRIMITIVE_TYPES: - return PrimitiveSchema(data_type=json_string) - - # Look for a known named schema: - schema = names.get_schema(name=json_string) - if schema is None: - raise SchemaParseException( - 'Unknown named schema %r, known names: %r.' - % (json_string, sorted(names.names))) - return schema - - -def _schema_from_json_array(json_array, names): - def MakeSchema(desc): - return schema_from_json_data(json_data=desc, names=names) - - return UnionSchema(map(MakeSchema, json_array)) - - -def _schema_from_json_object(json_object, names): - data_type = json_object.get('type') - if data_type is None: - raise SchemaParseException( - 'Avro schema JSON descriptor has no "type" property: %r' % json_object) - - other_props = dict( - filter_keys_out(items=json_object, keys=SCHEMA_RESERVED_PROPS)) - - if data_type in PRIMITIVE_TYPES: - # FIXME should not ignore other properties - result = PrimitiveSchema(data_type, other_props=other_props) - - elif data_type in NAMED_TYPES: - name = json_object.get('name') - namespace = json_object.get('namespace', names.default_namespace) - if data_type == FIXED: - size = json_object.get('size') - result = FixedSchema(name, namespace, size, names, other_props) - elif data_type == ENUM: - symbols = json_object.get('symbols') - doc = json_object.get('doc') - result = EnumSchema(name, namespace, symbols, names, doc, other_props) - - elif data_type in [RECORD, ERROR]: - field_desc_list = json_object.get('fields', ()) - - def MakeFields(names): - return tuple(RecordSchema.make_field_list(field_desc_list, names)) - - result = RecordSchema( - name=name, - namespace=namespace, - make_fields=MakeFields, - names=names, - record_type=data_type, - doc=json_object.get('doc'), - other_props=other_props, - ) - else: - raise Exception('Internal error: unknown type %r.' % data_type) - - elif data_type in VALID_TYPES: - # Unnamed, non-primitive Avro type: - - if data_type == ARRAY: - items_desc = json_object.get('items') - if items_desc is None: - raise SchemaParseException( - 'Invalid array schema descriptor with no "items" : %r.' - % json_object) - result = ArraySchema( - items=schema_from_json_data(items_desc, names), - other_props=other_props, - ) - - elif data_type == MAP: - values_desc = json_object.get('values') - if values_desc is None: - raise SchemaParseException( - 'Invalid map schema descriptor with no "values" : %r.' - % json_object) - result = MapSchema( - values=schema_from_json_data(values_desc, names=names), - other_props=other_props, - ) - - elif data_type == ERROR_UNION: - error_desc_list = json_object.get('declared_errors') - assert error_desc_list is not None - error_schemas = map( - lambda desc: schema_from_json_data(desc, names=names), - error_desc_list) - result = ErrorUnionSchema(schemas=error_schemas) - - else: - raise Exception('Internal error: unknown type %r.' % data_type) - else: - raise SchemaParseException( - 'Invalid JSON descriptor for an Avro schema: %r' % json_object) - return result - - -# Parsers for the JSON data types: -_JSONDataParserTypeMap = { - _str: _schema_from_json_string, - list: _schema_from_json_array, - dict: _schema_from_json_object, -} - - -def schema_from_json_data(json_data, names=None): - """Builds an Avro Schema from its JSON descriptor. - - Args: - json_data: JSON data representing the descriptor of the Avro schema. - names: Optional tracker for Avro named schemas. - Returns: - The Avro schema parsed from the JSON descriptor. - Raises: - SchemaParseException: if the descriptor is invalid. - """ - if names is None: - names = Names() - - # Select the appropriate parser based on the JSON data type: - parser = _JSONDataParserTypeMap.get(type(json_data)) - if parser is None: - raise SchemaParseException( - 'Invalid JSON descriptor for an Avro schema: %r.' % json_data) - return parser(json_data, names=names) - - -# ------------------------------------------------------------------------------ - - -def parse(json_string): - """Constructs a Schema from its JSON descriptor in text form. - - Args: - json_string: String representation of the JSON descriptor of the schema. - Returns: - The parsed schema. - Raises: - SchemaParseException: on JSON parsing error, - or if the JSON descriptor is invalid. - """ - try: - json_data = json.loads(json_string) - except Exception as exn: - raise SchemaParseException( - 'Error parsing schema from JSON: %r. ' - 'Error message: %r.' - % (json_string, exn)) - - # Initialize the names object - names = Names() - - # construct the Avro Schema object - return schema_from_json_data(json_data, names) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/base_client.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/base_client.py deleted file mode 100644 index a2efa21..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/base_client.py +++ /dev/null @@ -1,460 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import logging -import uuid -from typing import ( # pylint: disable=unused-import - Optional, - Any, - Tuple, -) - -try: - from urllib.parse import parse_qs, quote -except ImportError: - from urlparse import parse_qs # type: ignore - from urllib2 import quote # type: ignore - -import six - -from azure.core.configuration import Configuration -from azure.core.credentials import AzureSasCredential -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline import Pipeline -from azure.core.pipeline.transport import RequestsTransport, HttpTransport -from azure.core.pipeline.policies import ( - RedirectPolicy, - ContentDecodePolicy, - BearerTokenCredentialPolicy, - ProxyPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, - UserAgentPolicy, - AzureSasCredentialPolicy -) - -from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .models import LocationMode -from .authentication import SharedKeyCredentialPolicy -from .shared_access_signature import QueryStringConstants -from .request_handlers import serialize_batch_body, _get_batch_request_delimiter -from .policies import ( - StorageHeadersPolicy, - StorageContentValidation, - StorageRequestHook, - StorageResponseHook, - StorageLoggingPolicy, - StorageHosts, - QueueMessagePolicy, - ExponentialRetry, -) -from .._version import VERSION -from .response_handlers import process_storage_error, PartialBatchErrorException - - -_LOGGER = logging.getLogger(__name__) -_SERVICE_PARAMS = { - "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"}, - "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"}, - "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"}, - "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"}, -} - - -class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - parsed_url, # type: Any - service, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) - self._hosts = kwargs.get("_hosts") - self.scheme = parsed_url.scheme - - if service not in ["blob", "queue", "file-share", "dfs"]: - raise ValueError("Invalid service: {}".format(service)) - service_name = service.split('-')[0] - account = parsed_url.netloc.split(".{}.core.".format(service_name)) - - self.account_name = account[0] if len(account) > 1 else None - if not self.account_name and parsed_url.netloc.startswith("localhost") \ - or parsed_url.netloc.startswith("127.0.0.1"): - self.account_name = parsed_url.path.strip("/") - - self.credential = _format_shared_key_credential(self.account_name, credential) - if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): - raise ValueError("Token credential is only supported with HTTPS.") - - secondary_hostname = None - if hasattr(self.credential, "account_name"): - self.account_name = self.credential.account_name - secondary_hostname = "{}-secondary.{}.{}".format( - self.credential.account_name, service_name, SERVICE_HOST_BASE) - - if not self._hosts: - if len(account) > 1: - secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") - if kwargs.get("secondary_hostname"): - secondary_hostname = kwargs["secondary_hostname"] - primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') - self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} - - self.require_encryption = kwargs.get("require_encryption", False) - self.key_encryption_key = kwargs.get("key_encryption_key") - self.key_resolver_function = kwargs.get("key_resolver_function") - self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) - - def __enter__(self): - self._client.__enter__() - return self - - def __exit__(self, *args): - self._client.__exit__(*args) - - def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._client.close() - - @property - def url(self): - """The full endpoint URL to this entity, including SAS token if used. - - This could be either the primary endpoint, - or the secondary endpoint depending on the current :func:`location_mode`. - """ - return self._format_url(self._hosts[self._location_mode]) - - @property - def primary_endpoint(self): - """The full primary endpoint URL. - - :type: str - """ - return self._format_url(self._hosts[LocationMode.PRIMARY]) - - @property - def primary_hostname(self): - """The hostname of the primary endpoint. - - :type: str - """ - return self._hosts[LocationMode.PRIMARY] - - @property - def secondary_endpoint(self): - """The full secondary endpoint URL if configured. - - If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str - :raise ValueError: - """ - if not self._hosts[LocationMode.SECONDARY]: - raise ValueError("No secondary host configured.") - return self._format_url(self._hosts[LocationMode.SECONDARY]) - - @property - def secondary_hostname(self): - """The hostname of the secondary endpoint. - - If not available this will be None. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str or None - """ - return self._hosts[LocationMode.SECONDARY] - - @property - def location_mode(self): - """The location mode that the client is currently using. - - By default this will be "primary". Options include "primary" and "secondary". - - :type: str - """ - - return self._location_mode - - @location_mode.setter - def location_mode(self, value): - if self._hosts.get(value): - self._location_mode = value - self._client._config.url = self.url # pylint: disable=protected-access - else: - raise ValueError("No host URL for location mode: {}".format(value)) - - @property - def api_version(self): - """The version of the Storage API used for requests. - - :type: str - """ - return self._client._config.version # pylint: disable=protected-access - - def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): - query_str = "?" - if snapshot: - query_str += "snapshot={}&".format(self.snapshot) - if share_snapshot: - query_str += "sharesnapshot={}&".format(self.snapshot) - if sas_token and isinstance(credential, AzureSasCredential): - raise ValueError( - "You cannot use AzureSasCredential when the resource URI also contains a Shared Access Signature.") - if sas_token and not credential: - query_str += sas_token - elif is_credential_sastoken(credential): - query_str += credential.lstrip("?") - credential = None - return query_str.rstrip("?&"), credential - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, "get_token"): - self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif isinstance(credential, AzureSasCredential): - self._credential_policy = AzureSasCredentialPolicy(credential) - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - - config = kwargs.get("_configuration") or create_configuration(**kwargs) - if kwargs.get("_pipeline"): - return config, kwargs["_pipeline"] - config.transport = kwargs.get("transport") # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - config.transport = RequestsTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - ContentDecodePolicy(response_encoding="utf-8"), - RedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), - config.retry_policy, - config.headers_policy, - StorageRequestHook(**kwargs), - self._credential_policy, - config.logging_policy, - StorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs) - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, Pipeline(config.transport, policies=policies) - - def _batch_send( - self, - *reqs, # type: HttpRequest - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - batch_id = str(uuid.uuid1()) - - request = self._client._client.post( # pylint: disable=protected-access - url='{}://{}/{}?{}comp=batch{}{}'.format( - self.scheme, - self.primary_hostname, - kwargs.pop('path', ""), - kwargs.pop('restype', ""), - kwargs.pop('sas', ""), - kwargs.pop('timeout', "") - ), - headers={ - 'x-ms-version': self.api_version, - "Content-Type": "multipart/mixed; boundary=" + _get_batch_request_delimiter(batch_id, False, False) - } - ) - - policies = [StorageHeadersPolicy()] - if self._credential_policy: - policies.append(self._credential_policy) - - request.set_multipart_mixed( - *reqs, - policies=policies, - enforce_https=False - ) - - Pipeline._prepare_multipart_mixed_request(request) # pylint: disable=protected-access - body = serialize_batch_body(request.multipart_mixed_info[0], batch_id) - request.set_bytes_body(body) - - temp = request.multipart_mixed_info - request.multipart_mixed_info = None - pipeline_response = self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - request.multipart_mixed_info = temp - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() - if raise_on_any_failure: - parts = list(response.parts()) - if any(p for p in parts if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts - ) - raise error - return iter(parts) - return parts - except HttpResponseError as error: - process_storage_error(error) - -class TransportWrapper(HttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, transport): - self._transport = transport - - def send(self, request, **kwargs): - return self._transport.send(request, **kwargs) - - def open(self): - pass - - def close(self): - pass - - def __enter__(self): - pass - - def __exit__(self, *args): # pylint: disable=arguments-differ - pass - - -def _format_shared_key_credential(account_name, credential): - if isinstance(credential, six.string_types): - if not account_name: - raise ValueError("Unable to determine account name for shared key credential.") - credential = {"account_name": account_name, "account_key": credential} - if isinstance(credential, dict): - if "account_name" not in credential: - raise ValueError("Shared key credential missing 'account_name") - if "account_key" not in credential: - raise ValueError("Shared key credential missing 'account_key") - return SharedKeyCredentialPolicy(**credential) - return credential - - -def parse_connection_str(conn_str, credential, service): - conn_str = conn_str.rstrip(";") - conn_settings = [s.split("=", 1) for s in conn_str.split(";")] - if any(len(tup) != 2 for tup in conn_settings): - raise ValueError("Connection string is either blank or malformed.") - conn_settings = dict((key.upper(), val) for key, val in conn_settings) - endpoints = _SERVICE_PARAMS[service] - primary = None - secondary = None - if not credential: - try: - credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]} - except KeyError: - credential = conn_settings.get("SHAREDACCESSSIGNATURE") - if endpoints["primary"] in conn_settings: - primary = conn_settings[endpoints["primary"]] - if endpoints["secondary"] in conn_settings: - secondary = conn_settings[endpoints["secondary"]] - else: - if endpoints["secondary"] in conn_settings: - raise ValueError("Connection string specifies only secondary endpoint.") - try: - primary = "{}://{}.{}.{}".format( - conn_settings["DEFAULTENDPOINTSPROTOCOL"], - conn_settings["ACCOUNTNAME"], - service, - conn_settings["ENDPOINTSUFFIX"], - ) - secondary = "{}-secondary.{}.{}".format( - conn_settings["ACCOUNTNAME"], service, conn_settings["ENDPOINTSUFFIX"] - ) - except KeyError: - pass - - if not primary: - try: - primary = "https://{}.{}.{}".format( - conn_settings["ACCOUNTNAME"], service, conn_settings.get("ENDPOINTSUFFIX", SERVICE_HOST_BASE) - ) - except KeyError: - raise ValueError("Connection string missing required connection details.") - return primary, secondary, credential - - -def create_configuration(**kwargs): - # type: (**Any) -> Configuration - config = Configuration(**kwargs) - config.headers_policy = StorageHeadersPolicy(**kwargs) - config.user_agent_policy = UserAgentPolicy( - sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) - config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) - config.logging_policy = StorageLoggingPolicy(**kwargs) - config.proxy_policy = ProxyPolicy(**kwargs) - - # Storage settings - config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) - config.copy_polling_interval = 15 - - # Block blob uploads - config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) - config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) - config.use_byte_buffer = kwargs.get("use_byte_buffer", False) - - # Page blob uploads - config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) - - # Datalake file uploads - config.min_large_chunk_upload_threshold = kwargs.get("min_large_chunk_upload_threshold", 100 * 1024 * 1024 + 1) - - # Blob downloads - config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) - config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) - - # File uploads - config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) - return config - - -def parse_query(query_str): - sas_values = QueryStringConstants.to_list() - parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} - sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values] - sas_token = None - if sas_params: - sas_token = "&".join(sas_params) - - snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") - return snapshot, sas_token - - -def is_credential_sastoken(credential): - if not credential or not isinstance(credential, six.string_types): - return False - - sas_values = QueryStringConstants.to_list() - parsed_query = parse_qs(credential.lstrip("?")) - if parsed_query and all([k in sas_values for k in parsed_query.keys()]): - return True - return False diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/base_client_async.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/base_client_async.py deleted file mode 100644 index 3e619c9..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/base_client_async.py +++ /dev/null @@ -1,192 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging - -from azure.core.credentials import AzureSasCredential -from azure.core.pipeline import AsyncPipeline -from azure.core.async_paging import AsyncList -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline.policies import ( - ContentDecodePolicy, - AsyncBearerTokenCredentialPolicy, - AsyncRedirectPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, - AzureSasCredentialPolicy, -) -from azure.core.pipeline.transport import AsyncHttpTransport - -from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .authentication import SharedKeyCredentialPolicy -from .base_client import create_configuration -from .policies import ( - StorageContentValidation, - StorageRequestHook, - StorageHosts, - StorageHeadersPolicy, - QueueMessagePolicy -) -from .policies_async import AsyncStorageResponseHook - -from .response_handlers import process_storage_error, PartialBatchErrorException - -if TYPE_CHECKING: - from azure.core.pipeline import Pipeline - from azure.core.pipeline.transport import HttpRequest - from azure.core.configuration import Configuration -_LOGGER = logging.getLogger(__name__) - - -class AsyncStorageAccountHostsMixin(object): - - def __enter__(self): - raise TypeError("Async client only supports 'async with'.") - - def __exit__(self, *args): - pass - - async def __aenter__(self): - await self._client.__aenter__() - return self - - async def __aexit__(self, *args): - await self._client.__aexit__(*args) - - async def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._client.close() - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, 'get_token'): - self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif isinstance(credential, AzureSasCredential): - self._credential_policy = AzureSasCredentialPolicy(credential) - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - config = kwargs.get('_configuration') or create_configuration(**kwargs) - if kwargs.get('_pipeline'): - return config, kwargs['_pipeline'] - config.transport = kwargs.get('transport') # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - try: - from azure.core.pipeline.transport import AioHttpTransport - except ImportError: - raise ImportError("Unable to create async transport. Please check aiohttp is installed.") - config.transport = AioHttpTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.headers_policy, - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - StorageRequestHook(**kwargs), - self._credential_policy, - ContentDecodePolicy(response_encoding="utf-8"), - AsyncRedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), # type: ignore - config.retry_policy, - config.logging_policy, - AsyncStorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs), - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, AsyncPipeline(config.transport, policies=policies) - - async def _batch_send( - self, - *reqs, # type: HttpRequest - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - request = self._client._client.post( # pylint: disable=protected-access - url='{}://{}/{}?{}comp=batch{}{}'.format( - self.scheme, - self.primary_hostname, - kwargs.pop('path', ""), - kwargs.pop('restype', ""), - kwargs.pop('sas', ""), - kwargs.pop('timeout', "") - ), - headers={ - 'x-ms-version': self.api_version - } - ) - - policies = [StorageHeadersPolicy()] - if self._credential_policy: - policies.append(self._credential_policy) - - request.set_multipart_mixed( - *reqs, - policies=policies, - enforce_https=False - ) - - pipeline_response = await self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() # Return an AsyncIterator - if raise_on_any_failure: - parts_list = [] - async for part in parts: - parts_list.append(part) - if any(p for p in parts_list if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts_list - ) - raise error - return AsyncList(parts_list) - return parts - except HttpResponseError as error: - process_storage_error(error) - - -class AsyncTransportWrapper(AsyncHttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, async_transport): - self._transport = async_transport - - async def send(self, request, **kwargs): - return await self._transport.send(request, **kwargs) - - async def open(self): - pass - - async def close(self): - pass - - async def __aenter__(self): - pass - - async def __aexit__(self, *args): # pylint: disable=arguments-differ - pass diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/constants.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/constants.py deleted file mode 100644 index bdee829..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/constants.py +++ /dev/null @@ -1,27 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -from .._generated import AzureBlobStorage - - -X_MS_VERSION = AzureBlobStorage(url="get_api_version")._config.version # pylint: disable=protected-access - -# Socket timeout in seconds -CONNECTION_TIMEOUT = 20 -READ_TIMEOUT = 20 - -# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned) -# The socket timeout is now the maximum total duration to send all data. -if sys.version_info >= (3, 5): - # the timeout to connect is 20 seconds, and the read timeout is 80000 seconds - # the 80000 seconds was calculated with: - # 4000MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) - READ_TIMEOUT = 80000 - -STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" - -SERVICE_HOST_BASE = 'core.windows.net' diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/encryption.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/encryption.py deleted file mode 100644 index 62607cc..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/encryption.py +++ /dev/null @@ -1,542 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os -from os import urandom -from json import ( - dumps, - loads, -) -from collections import OrderedDict - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.ciphers import Cipher -from cryptography.hazmat.primitives.ciphers.algorithms import AES -from cryptography.hazmat.primitives.ciphers.modes import CBC -from cryptography.hazmat.primitives.padding import PKCS7 - -from azure.core.exceptions import HttpResponseError - -from .._version import VERSION -from . import encode_base64, decode_base64_to_bytes - - -_ENCRYPTION_PROTOCOL_V1 = '1.0' -_ERROR_OBJECT_INVALID = \ - '{0} does not define a complete interface. Value of {1} is either missing or invalid.' - - -def _validate_not_none(param_name, param): - if param is None: - raise ValueError('{0} should not be None.'.format(param_name)) - - -def _validate_key_encryption_key_wrap(kek): - # Note that None is not callable and so will fail the second clause of each check. - if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) - if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) - - -class _EncryptionAlgorithm(object): - ''' - Specifies which client encryption algorithm is used. - ''' - AES_CBC_256 = 'AES_CBC_256' - - -class _WrappedContentKey: - ''' - Represents the envelope key details stored on the service. - ''' - - def __init__(self, algorithm, encrypted_key, key_id): - ''' - :param str algorithm: - The algorithm used for wrapping. - :param bytes encrypted_key: - The encrypted content-encryption-key. - :param str key_id: - The key-encryption-key identifier string. - ''' - - _validate_not_none('algorithm', algorithm) - _validate_not_none('encrypted_key', encrypted_key) - _validate_not_none('key_id', key_id) - - self.algorithm = algorithm - self.encrypted_key = encrypted_key - self.key_id = key_id - - -class _EncryptionAgent: - ''' - Represents the encryption agent stored on the service. - It consists of the encryption protocol version and encryption algorithm used. - ''' - - def __init__(self, encryption_algorithm, protocol): - ''' - :param _EncryptionAlgorithm encryption_algorithm: - The algorithm used for encrypting the message contents. - :param str protocol: - The protocol version used for encryption. - ''' - - _validate_not_none('encryption_algorithm', encryption_algorithm) - _validate_not_none('protocol', protocol) - - self.encryption_algorithm = str(encryption_algorithm) - self.protocol = protocol - - -class _EncryptionData: - ''' - Represents the encryption data that is stored on the service. - ''' - - def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, - key_wrapping_metadata): - ''' - :param bytes content_encryption_IV: - The content encryption initialization vector. - :param _EncryptionAgent encryption_agent: - The encryption agent. - :param _WrappedContentKey wrapped_content_key: - An object that stores the wrapping algorithm, the key identifier, - and the encrypted key bytes. - :param dict key_wrapping_metadata: - A dict containing metadata related to the key wrapping. - ''' - - _validate_not_none('content_encryption_IV', content_encryption_IV) - _validate_not_none('encryption_agent', encryption_agent) - _validate_not_none('wrapped_content_key', wrapped_content_key) - - self.content_encryption_IV = content_encryption_IV - self.encryption_agent = encryption_agent - self.wrapped_content_key = wrapped_content_key - self.key_wrapping_metadata = key_wrapping_metadata - - -def _generate_encryption_data_dict(kek, cek, iv): - ''' - Generates and returns the encryption metadata as a dict. - - :param object kek: The key encryption key. See calling functions for more information. - :param bytes cek: The content encryption key. - :param bytes iv: The initialization vector. - :return: A dict containing all the encryption metadata. - :rtype: dict - ''' - # Encrypt the cek. - wrapped_cek = kek.wrap_key(cek) - - # Build the encryption_data dict. - # Use OrderedDict to comply with Java's ordering requirement. - wrapped_content_key = OrderedDict() - wrapped_content_key['KeyId'] = kek.get_kid() - wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek) - wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() - - encryption_agent = OrderedDict() - encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 - encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 - - encryption_data_dict = OrderedDict() - encryption_data_dict['WrappedContentKey'] = wrapped_content_key - encryption_data_dict['EncryptionAgent'] = encryption_agent - encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv) - encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION} - - return encryption_data_dict - - -def _dict_to_encryption_data(encryption_data_dict): - ''' - Converts the specified dictionary to an EncryptionData object for - eventual use in decryption. - - :param dict encryption_data_dict: - The dictionary containing the encryption data. - :return: an _EncryptionData object built from the dictionary. - :rtype: _EncryptionData - ''' - try: - if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: - raise ValueError("Unsupported encryption version.") - except KeyError: - raise ValueError("Unsupported encryption version.") - wrapped_content_key = encryption_data_dict['WrappedContentKey'] - wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], - decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), - wrapped_content_key['KeyId']) - - encryption_agent = encryption_data_dict['EncryptionAgent'] - encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], - encryption_agent['Protocol']) - - if 'KeyWrappingMetadata' in encryption_data_dict: - key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] - else: - key_wrapping_metadata = None - - encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), - encryption_agent, - wrapped_content_key, - key_wrapping_metadata) - - return encryption_data - - -def _generate_AES_CBC_cipher(cek, iv): - ''' - Generates and returns an encryption cipher for AES CBC using the given cek and iv. - - :param bytes[] cek: The content encryption key for the cipher. - :param bytes[] iv: The initialization vector for the cipher. - :return: A cipher for encrypting in AES256 CBC. - :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher - ''' - - backend = default_backend() - algorithm = AES(cek) - mode = CBC(iv) - return Cipher(algorithm, mode, backend) - - -def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): - ''' - Extracts and returns the content_encryption_key stored in the encryption_data object - and performs necessary validation on all parameters. - :param _EncryptionData encryption_data: - The encryption metadata of the retrieved value. - :param obj key_encryption_key: - The key_encryption_key used to unwrap the cek. Please refer to high-level service object - instance variables for more details. - :param func key_resolver: - A function used that, given a key_id, will return a key_encryption_key. Please refer - to high-level service object instance variables for more details. - :return: the content_encryption_key stored in the encryption_data object. - :rtype: bytes[] - ''' - - _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) - _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) - - if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol: - raise ValueError('Encryption version is not supported.') - - content_encryption_key = None - - # If the resolver exists, give priority to the key it finds. - if key_resolver is not None: - key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) - - _validate_not_none('key_encryption_key', key_encryption_key) - if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) - if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid(): - raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.') - # Will throw an exception if the specified algorithm is not supported. - content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, - encryption_data.wrapped_content_key.algorithm) - _validate_not_none('content_encryption_key', content_encryption_key) - - return content_encryption_key - - -def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None): - ''' - Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. - Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). - Returns the original plaintex. - - :param str message: - The ciphertext to be decrypted. - :param _EncryptionData encryption_data: - The metadata associated with this ciphertext. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted plaintext. - :rtype: str - ''' - _validate_not_none('message', message) - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) - - if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm: - raise ValueError('Specified encryption algorithm is not supported.') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) - - # decrypt data - decrypted_data = message - decryptor = cipher.decryptor() - decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) - - # unpad data - unpadder = PKCS7(128).unpadder() - decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) - - return decrypted_data - - -def encrypt_blob(blob, key_encryption_key): - ''' - Encrypts the given blob using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encryption metadata. This method should - only be used when a blob is small enough for single shot upload. Encrypting larger blobs - is done as a part of the upload_data_chunks method. - - :param bytes blob: - The blob to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. - :rtype: (str, bytes) - ''' - - _validate_not_none('blob', blob) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(blob) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - - return dumps(encryption_data), encrypted_data - - -def generate_blob_encryption_data(key_encryption_key): - ''' - Generates the encryption_metadata for the blob. - - :param bytes key_encryption_key: - The key-encryption-key used to wrap the cek associate with this blob. - :return: A tuple containing the cek and iv for this blob as well as the - serialized encryption metadata for the blob. - :rtype: (bytes, bytes, str) - ''' - encryption_data = None - content_encryption_key = None - initialization_vector = None - if key_encryption_key: - _validate_key_encryption_key_wrap(key_encryption_key) - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - encryption_data = _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - encryption_data = dumps(encryption_data) - - return content_encryption_key, initialization_vector, encryption_data - - -def decrypt_blob(require_encryption, key_encryption_key, key_resolver, - content, start_offset, end_offset, response_headers): - ''' - Decrypts the given blob contents and returns only the requested range. - - :param bool require_encryption: - Whether or not the calling blob service requires objects to be decrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :param key_resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted blob content. - :rtype: bytes - ''' - try: - encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata'])) - except: # pylint: disable=bare-except - if require_encryption: - raise ValueError( - 'Encryption required, but received data does not contain appropriate metatadata.' + \ - 'Data was either not encrypted or metadata has been lost.') - - return content - - if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256: - raise ValueError('Specified encryption algorithm is not supported.') - - blob_type = response_headers['x-ms-blob-type'] - - iv = None - unpad = False - if 'content-range' in response_headers: - content_range = response_headers['content-range'] - # Format: 'bytes x-y/size' - - # Ignore the word 'bytes' - content_range = content_range.split(' ') - - content_range = content_range[1].split('-') - content_range = content_range[1].split('/') - end_range = int(content_range[0]) - blob_size = int(content_range[1]) - - if start_offset >= 16: - iv = content[:16] - content = content[16:] - start_offset -= 16 - else: - iv = encryption_data.content_encryption_IV - - if end_range == blob_size - 1: - unpad = True - else: - unpad = True - iv = encryption_data.content_encryption_IV - - if blob_type == 'PageBlob': - unpad = False - - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) - cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) - decryptor = cipher.decryptor() - - content = decryptor.update(content) + decryptor.finalize() - if unpad: - unpadder = PKCS7(128).unpadder() - content = unpadder.update(content) + unpadder.finalize() - - return content[start_offset: len(content) - end_offset] - - -def get_blob_encryptor_and_padder(cek, iv, should_pad): - encryptor = None - padder = None - - if cek is not None and iv is not None: - cipher = _generate_AES_CBC_cipher(cek, iv) - encryptor = cipher.encryptor() - padder = PKCS7(128).padder() if should_pad else None - - return encryptor, padder - - -def encrypt_queue_message(message, key_encryption_key): - ''' - Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encrypted message and the encryption metadata. - - :param object message: - The plain text messge to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A json-formatted string containing the encrypted message and the encryption metadata. - :rtype: str - ''' - - _validate_not_none('message', message) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = os.urandom(32) - initialization_vector = os.urandom(16) - - # Queue encoding functions all return unicode strings, and encryption should - # operate on binary strings. - message = message.encode('utf-8') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(message) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - - # Build the dictionary structure. - queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data), - 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector)} - - return dumps(queue_message) - - -def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver): - ''' - Returns the decrypted message contents from an EncryptedQueueMessage. - If no encryption metadata is present, will return the unaltered message. - :param str message: - The JSON formatted QueueEncryptedMessage contents with all associated metadata. - :param bool require_encryption: - If set, will enforce that the retrieved messages are encrypted and decrypt them. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The plain text message from the queue message. - :rtype: str - ''' - - try: - message = loads(message) - - encryption_data = _dict_to_encryption_data(message['EncryptionData']) - decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents']) - except (KeyError, ValueError): - # Message was not json formatted and so was not encrypted - # or the user provided a json formatted message. - if require_encryption: - raise ValueError('Message was not encrypted.') - - return message - try: - return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=response, - error=error) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/models.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/models.py deleted file mode 100644 index c51356b..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/models.py +++ /dev/null @@ -1,466 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-instance-attributes -from enum import Enum - - -def get_enum_value(value): - if value is None or value in ["None", ""]: - return None - try: - return value.value - except AttributeError: - return value - - -class StorageErrorCode(str, Enum): - - # Generic storage values - account_already_exists = "AccountAlreadyExists" - account_being_created = "AccountBeingCreated" - account_is_disabled = "AccountIsDisabled" - authentication_failed = "AuthenticationFailed" - authorization_failure = "AuthorizationFailure" - no_authentication_information = "NoAuthenticationInformation" - condition_headers_not_supported = "ConditionHeadersNotSupported" - condition_not_met = "ConditionNotMet" - empty_metadata_key = "EmptyMetadataKey" - insufficient_account_permissions = "InsufficientAccountPermissions" - internal_error = "InternalError" - invalid_authentication_info = "InvalidAuthenticationInfo" - invalid_header_value = "InvalidHeaderValue" - invalid_http_verb = "InvalidHttpVerb" - invalid_input = "InvalidInput" - invalid_md5 = "InvalidMd5" - invalid_metadata = "InvalidMetadata" - invalid_query_parameter_value = "InvalidQueryParameterValue" - invalid_range = "InvalidRange" - invalid_resource_name = "InvalidResourceName" - invalid_uri = "InvalidUri" - invalid_xml_document = "InvalidXmlDocument" - invalid_xml_node_value = "InvalidXmlNodeValue" - md5_mismatch = "Md5Mismatch" - metadata_too_large = "MetadataTooLarge" - missing_content_length_header = "MissingContentLengthHeader" - missing_required_query_parameter = "MissingRequiredQueryParameter" - missing_required_header = "MissingRequiredHeader" - missing_required_xml_node = "MissingRequiredXmlNode" - multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" - operation_timed_out = "OperationTimedOut" - out_of_range_input = "OutOfRangeInput" - out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" - request_body_too_large = "RequestBodyTooLarge" - resource_type_mismatch = "ResourceTypeMismatch" - request_url_failed_to_parse = "RequestUrlFailedToParse" - resource_already_exists = "ResourceAlreadyExists" - resource_not_found = "ResourceNotFound" - server_busy = "ServerBusy" - unsupported_header = "UnsupportedHeader" - unsupported_xml_node = "UnsupportedXmlNode" - unsupported_query_parameter = "UnsupportedQueryParameter" - unsupported_http_verb = "UnsupportedHttpVerb" - - # Blob values - append_position_condition_not_met = "AppendPositionConditionNotMet" - blob_already_exists = "BlobAlreadyExists" - blob_not_found = "BlobNotFound" - blob_overwritten = "BlobOverwritten" - blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" - block_count_exceeds_limit = "BlockCountExceedsLimit" - block_list_too_long = "BlockListTooLong" - cannot_change_to_lower_tier = "CannotChangeToLowerTier" - cannot_verify_copy_source = "CannotVerifyCopySource" - container_already_exists = "ContainerAlreadyExists" - container_being_deleted = "ContainerBeingDeleted" - container_disabled = "ContainerDisabled" - container_not_found = "ContainerNotFound" - content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" - copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" - copy_id_mismatch = "CopyIdMismatch" - feature_version_mismatch = "FeatureVersionMismatch" - incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" - incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" - incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" - infinite_lease_duration_required = "InfiniteLeaseDurationRequired" - invalid_blob_or_block = "InvalidBlobOrBlock" - invalid_blob_tier = "InvalidBlobTier" - invalid_blob_type = "InvalidBlobType" - invalid_block_id = "InvalidBlockId" - invalid_block_list = "InvalidBlockList" - invalid_operation = "InvalidOperation" - invalid_page_range = "InvalidPageRange" - invalid_source_blob_type = "InvalidSourceBlobType" - invalid_source_blob_url = "InvalidSourceBlobUrl" - invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" - lease_already_present = "LeaseAlreadyPresent" - lease_already_broken = "LeaseAlreadyBroken" - lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" - lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" - lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" - lease_id_missing = "LeaseIdMissing" - lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" - lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" - lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" - lease_lost = "LeaseLost" - lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" - lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" - lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" - max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" - no_pending_copy_operation = "NoPendingCopyOperation" - operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" - pending_copy_operation = "PendingCopyOperation" - previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" - previous_snapshot_not_found = "PreviousSnapshotNotFound" - previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" - sequence_number_condition_not_met = "SequenceNumberConditionNotMet" - sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" - snapshot_count_exceeded = "SnapshotCountExceeded" - snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" - snapshots_present = "SnapshotsPresent" - source_condition_not_met = "SourceConditionNotMet" - system_in_use = "SystemInUse" - target_condition_not_met = "TargetConditionNotMet" - unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" - blob_being_rehydrated = "BlobBeingRehydrated" - blob_archived = "BlobArchived" - blob_not_archived = "BlobNotArchived" - - # Queue values - invalid_marker = "InvalidMarker" - message_not_found = "MessageNotFound" - message_too_large = "MessageTooLarge" - pop_receipt_mismatch = "PopReceiptMismatch" - queue_already_exists = "QueueAlreadyExists" - queue_being_deleted = "QueueBeingDeleted" - queue_disabled = "QueueDisabled" - queue_not_empty = "QueueNotEmpty" - queue_not_found = "QueueNotFound" - - # File values - cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" - client_cache_flush_delay = "ClientCacheFlushDelay" - delete_pending = "DeletePending" - directory_not_empty = "DirectoryNotEmpty" - file_lock_conflict = "FileLockConflict" - invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" - parent_not_found = "ParentNotFound" - read_only_attribute = "ReadOnlyAttribute" - share_already_exists = "ShareAlreadyExists" - share_being_deleted = "ShareBeingDeleted" - share_disabled = "ShareDisabled" - share_not_found = "ShareNotFound" - sharing_violation = "SharingViolation" - share_snapshot_in_progress = "ShareSnapshotInProgress" - share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" - share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" - share_has_snapshots = "ShareHasSnapshots" - container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" - - # DataLake values - content_length_must_be_zero = 'ContentLengthMustBeZero' - path_already_exists = 'PathAlreadyExists' - invalid_flush_position = 'InvalidFlushPosition' - invalid_property_name = 'InvalidPropertyName' - invalid_source_uri = 'InvalidSourceUri' - unsupported_rest_version = 'UnsupportedRestVersion' - file_system_not_found = 'FilesystemNotFound' - path_not_found = 'PathNotFound' - rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound' - source_path_not_found = 'SourcePathNotFound' - destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted' - file_system_already_exists = 'FilesystemAlreadyExists' - file_system_being_deleted = 'FilesystemBeingDeleted' - invalid_destination_path = 'InvalidDestinationPath' - invalid_rename_source_path = 'InvalidRenameSourcePath' - invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType' - lease_is_already_broken = 'LeaseIsAlreadyBroken' - lease_name_mismatch = 'LeaseNameMismatch' - path_conflict = 'PathConflict' - source_path_is_being_deleted = 'SourcePathIsBeingDeleted' - - -class DictMixin(object): - - def __setitem__(self, key, item): - self.__dict__[key] = item - - def __getitem__(self, key): - return self.__dict__[key] - - def __repr__(self): - return str(self) - - def __len__(self): - return len(self.keys()) - - def __delitem__(self, key): - self.__dict__[key] = None - - def __eq__(self, other): - """Compare objects by comparing all attributes.""" - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - def __ne__(self, other): - """Compare objects by comparing all attributes.""" - return not self.__eq__(other) - - def __str__(self): - return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) - - def has_key(self, k): - return k in self.__dict__ - - def update(self, *args, **kwargs): - return self.__dict__.update(*args, **kwargs) - - def keys(self): - return [k for k in self.__dict__ if not k.startswith('_')] - - def values(self): - return [v for k, v in self.__dict__.items() if not k.startswith('_')] - - def items(self): - return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] - - def get(self, key, default=None): - if key in self.__dict__: - return self.__dict__[key] - return default - - -class LocationMode(object): - """ - Specifies the location the request should be sent to. This mode only applies - for RA-GRS accounts which allow secondary read access. All other account types - must use PRIMARY. - """ - - PRIMARY = 'primary' #: Requests should be sent to the primary location. - SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. - - -class ResourceTypes(object): - """ - Specifies the resource types that are accessible with the account SAS. - - :param bool service: - Access to service-level APIs (e.g., Get/Set Service Properties, - Get Service Stats, List Containers/Queues/Shares) - :param bool container: - Access to container-level APIs (e.g., Create/Delete Container, - Create/Delete Queue, Create/Delete Share, - List Blobs/Files and Directories) - :param bool object: - Access to object-level APIs for blobs, queue messages, and - files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) - """ - - def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin - self.service = service - self.container = container - self.object = object - self._str = (('s' if self.service else '') + - ('c' if self.container else '') + - ('o' if self.object else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create a ResourceTypes from a string. - - To specify service, container, or object you need only to - include the first letter of the word in the string. E.g. service and container, - you would provide a string "sc". - - :param str string: Specify service, container, or object in - in the string with the first letter of the word. - :return: A ResourceTypes object - :rtype: ~azure.storage.blob.ResourceTypes - """ - res_service = 's' in string - res_container = 'c' in string - res_object = 'o' in string - - parsed = cls(res_service, res_container, res_object) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class AccountSasPermissions(object): - """ - :class:`~ResourceTypes` class to be used with generate_account_sas - function and for the AccessPolicies used with set_*_acl. There are two types of - SAS which may be used to grant resource access. One is to grant access to a - specific resource (resource-specific). Another is to grant access to the - entire service for a specific account and allow certain operations based on - perms found here. - - :param bool read: - Valid for all signed resources types (Service, Container, and Object). - Permits read permissions to the specified resource type. - :param bool write: - Valid for all signed resources types (Service, Container, and Object). - Permits write permissions to the specified resource type. - :param bool delete: - Valid for Container and Object resource types, except for queue messages. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool list: - Valid for Service and Container resource types only. - :param bool add: - Valid for the following Object resource types only: queue messages, and append blobs. - :param bool create: - Valid for the following Object resource types only: blobs and files. - Users can create new blobs or files, but may not overwrite existing - blobs or files. - :param bool update: - Valid for the following Object resource types only: queue messages. - :param bool process: - Valid for the following Object resource type only: queue messages. - :keyword bool tag: - To enable set or get tags on the blobs in the container. - :keyword bool filter_by_tags: - To enable get blobs by tags, this should be used together with list permission. - """ - def __init__(self, read=False, write=False, delete=False, - list=False, # pylint: disable=redefined-builtin - add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): - self.read = read - self.write = write - self.delete = delete - self.delete_previous_version = delete_previous_version - self.list = list - self.add = add - self.create = create - self.update = update - self.process = process - self.tag = kwargs.pop('tag', False) - self.filter_by_tags = kwargs.pop('filter_by_tags', False) - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('l' if self.list else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('u' if self.update else '') + - ('p' if self.process else '') + - ('f' if self.filter_by_tags else '') + - ('t' if self.tag else '') - ) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create AccountSasPermissions from a string. - - To specify read, write, delete, etc. permissions you need only to - include the first letter of the word in the string. E.g. for read and write - permissions you would provide a string "rw". - - :param str permission: Specify permissions in - the string with the first letter of the word. - :return: An AccountSasPermissions object - :rtype: ~azure.storage.blob.AccountSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_delete_previous_version = 'x' in permission - p_list = 'l' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_update = 'u' in permission - p_process = 'p' in permission - p_tag = 't' in permission - p_filter_by_tags = 'f' in permission - parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, - list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, - filter_by_tags=p_filter_by_tags) - - return parsed - -class Services(object): - """Specifies the services accessible with the account SAS. - - :param bool blob: - Access for the `~azure.storage.blob.BlobServiceClient` - :param bool queue: - Access for the `~azure.storage.queue.QueueServiceClient` - :param bool fileshare: - Access for the `~azure.storage.fileshare.ShareServiceClient` - """ - - def __init__(self, blob=False, queue=False, fileshare=False): - self.blob = blob - self.queue = queue - self.fileshare = fileshare - self._str = (('b' if self.blob else '') + - ('q' if self.queue else '') + - ('f' if self.fileshare else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create Services from a string. - - To specify blob, queue, or file you need only to - include the first letter of the word in the string. E.g. for blob and queue - you would provide a string "bq". - - :param str string: Specify blob, queue, or file in - in the string with the first letter of the word. - :return: A Services object - :rtype: ~azure.storage.blob.Services - """ - res_blob = 'b' in string - res_queue = 'q' in string - res_file = 'f' in string - - parsed = cls(res_blob, res_queue, res_file) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class UserDelegationKey(object): - """ - Represents a user delegation key, provided to the user by Azure Storage - based on their Azure Active Directory access token. - - The fields are saved as simple strings since the user does not have to interact with this object; - to generate an identify SAS, the user can simply pass it to the right API. - - :ivar str signed_oid: - Object ID of this token. - :ivar str signed_tid: - Tenant ID of the tenant that issued this token. - :ivar str signed_start: - The datetime this token becomes valid. - :ivar str signed_expiry: - The datetime this token expires. - :ivar str signed_service: - What service this key is valid for. - :ivar str signed_version: - The version identifier of the REST service that created this token. - :ivar str value: - The user delegation key. - """ - def __init__(self): - self.signed_oid = None - self.signed_tid = None - self.signed_start = None - self.signed_expiry = None - self.signed_service = None - self.signed_version = None - self.value = None diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/parser.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/parser.py deleted file mode 100644 index c6feba8..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/parser.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys - -if sys.version_info < (3,): - def _str(value): - if isinstance(value, unicode): # pylint: disable=undefined-variable - return value.encode('utf-8') - - return str(value) -else: - _str = str - - -def _to_utc_datetime(value): - return value.strftime('%Y-%m-%dT%H:%M:%SZ') diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/policies.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/policies.py deleted file mode 100644 index c9bc798..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/policies.py +++ /dev/null @@ -1,610 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import re -import random -from time import time -from io import SEEK_SET, UnsupportedOperation -import logging -import uuid -import types -from typing import Any, TYPE_CHECKING -from wsgiref.handlers import format_date_time -try: - from urllib.parse import ( - urlparse, - parse_qsl, - urlunparse, - urlencode, - ) -except ImportError: - from urllib import urlencode # type: ignore - from urlparse import ( # type: ignore - urlparse, - parse_qsl, - urlunparse, - ) - -from azure.core.pipeline.policies import ( - HeadersPolicy, - SansIOHTTPPolicy, - NetworkTraceLoggingPolicy, - HTTPPolicy, - RequestHistory -) -from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError - -from .models import LocationMode - -try: - _unicode_type = unicode # type: ignore -except NameError: - _unicode_type = str - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -def encode_base64(data): - if isinstance(data, _unicode_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def is_exhausted(settings): - """Are we out of retries?""" - retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) - retry_counts = list(filter(None, retry_counts)) - if not retry_counts: - return False - return min(retry_counts) < 0 - - -def retry_hook(settings, **kwargs): - if settings['hook']: - settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) - - -def is_retry(response, mode): - """Is this method/status code retryable? (Based on whitelists and control - variables such as the number of total retries to allow, whether to - respect the Retry-After header, whether this header is present, and - whether the returned status code is on the list of status codes to - be retried upon on the presence of the aforementioned header) - """ - status = response.http_response.status_code - if 300 <= status < 500: - # An exception occured, but in most cases it was expected. Examples could - # include a 309 Conflict or 412 Precondition Failed. - if status == 404 and mode == LocationMode.SECONDARY: - # Response code 404 should be retried if secondary was used. - return True - if status == 408: - # Response code 408 is a timeout and should be retried. - return True - return False - if status >= 500: - # Response codes above 500 with the exception of 501 Not Implemented and - # 505 Version Not Supported indicate a server issue and should be retried. - if status in [501, 505]: - return False - return True - return False - - -def urljoin(base_url, stub_url): - parsed = urlparse(base_url) - parsed = parsed._replace(path=parsed.path + '/' + stub_url) - return parsed.geturl() - - -class QueueMessagePolicy(SansIOHTTPPolicy): - - def on_request(self, request): - message_id = request.context.options.pop('queue_message_id', None) - if message_id: - request.http_request.url = urljoin( - request.http_request.url, - message_id) - - -class StorageHeadersPolicy(HeadersPolicy): - request_id_header_name = 'x-ms-client-request-id' - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - super(StorageHeadersPolicy, self).on_request(request) - current_time = format_date_time(time()) - request.http_request.headers['x-ms-date'] = current_time - - custom_id = request.context.options.pop('client_request_id', None) - request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) - - # def on_response(self, request, response): - # # raise exception if the echoed client request id from the service is not identical to the one we sent - # if self.request_id_header_name in response.http_response.headers: - - # client_request_id = request.http_request.headers.get(self.request_id_header_name) - - # if response.http_response.headers[self.request_id_header_name] != client_request_id: - # raise AzureError( - # "Echoed client request ID: {} does not match sent client request ID: {}. " - # "Service request ID: {}".format( - # response.http_response.headers[self.request_id_header_name], client_request_id, - # response.http_response.headers['x-ms-request-id']), - # response=response.http_response - # ) - - -class StorageHosts(SansIOHTTPPolicy): - - def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument - self.hosts = hosts - super(StorageHosts, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - request.context.options['hosts'] = self.hosts - parsed_url = urlparse(request.http_request.url) - - # Detect what location mode we're currently requesting with - location_mode = LocationMode.PRIMARY - for key, value in self.hosts.items(): - if parsed_url.netloc == value: - location_mode = key - - # See if a specific location mode has been specified, and if so, redirect - use_location = request.context.options.pop('use_location', None) - if use_location: - # Lock retries to the specific location - request.context.options['retry_to_secondary'] = False - if use_location not in self.hosts: - raise ValueError("Attempting to use undefined host location {}".format(use_location)) - if use_location != location_mode: - # Update request URL to use the specified location - updated = parsed_url._replace(netloc=self.hosts[use_location]) - request.http_request.url = updated.geturl() - location_mode = use_location - - request.context.options['location_mode'] = location_mode - - -class StorageLoggingPolicy(NetworkTraceLoggingPolicy): - """A policy that logs HTTP request and response to the DEBUG logger. - - This accepts both global configuration, and per-request level with "enable_http_logger" - """ - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - http_request = request.http_request - options = request.context.options - if options.pop("logging_enable", self.enable_http_logger): - request.context["logging_enable"] = True - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - log_url = http_request.url - query_params = http_request.query - if 'sig' in query_params: - log_url = log_url.replace(query_params['sig'], "sig=*****") - _LOGGER.debug("Request URL: %r", log_url) - _LOGGER.debug("Request method: %r", http_request.method) - _LOGGER.debug("Request headers:") - for header, value in http_request.headers.items(): - if header.lower() == 'authorization': - value = '*****' - elif header.lower() == 'x-ms-copy-source' and 'sig' in value: - # take the url apart and scrub away the signed signature - scheme, netloc, path, params, query, fragment = urlparse(value) - parsed_qs = dict(parse_qsl(query)) - parsed_qs['sig'] = '*****' - - # the SAS needs to be put back together - value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) - - _LOGGER.debug(" %r: %r", header, value) - _LOGGER.debug("Request body:") - - # We don't want to log the binary data of a file upload. - if isinstance(http_request.body, types.GeneratorType): - _LOGGER.debug("File upload") - else: - _LOGGER.debug(str(http_request.body)) - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log request: %r", err) - - def on_response(self, request, response): - # type: (PipelineRequest, PipelineResponse, Any) -> None - if response.context.pop("logging_enable", self.enable_http_logger): - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - _LOGGER.debug("Response status: %r", response.http_response.status_code) - _LOGGER.debug("Response headers:") - for res_header, value in response.http_response.headers.items(): - _LOGGER.debug(" %r: %r", res_header, value) - - # We don't want to log binary data if the response is a file. - _LOGGER.debug("Response content:") - pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) - header = response.http_response.headers.get('content-disposition') - - if header and pattern.match(header): - filename = header.partition('=')[2] - _LOGGER.debug("File attachments: %s", filename) - elif response.http_response.headers.get("content-type", "").endswith("octet-stream"): - _LOGGER.debug("Body contains binary data.") - elif response.http_response.headers.get("content-type", "").startswith("image"): - _LOGGER.debug("Body contains image data.") - else: - if response.context.options.get('stream', False): - _LOGGER.debug("Body is streamable") - else: - _LOGGER.debug(response.http_response.text()) - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log response: %s", repr(err)) - - -class StorageRequestHook(SansIOHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._request_callback = kwargs.get('raw_request_hook') - super(StorageRequestHook, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, **Any) -> PipelineResponse - request_callback = request.context.options.pop('raw_request_hook', self._request_callback) - if request_callback: - request_callback(request) - - -class StorageResponseHook(HTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(StorageResponseHook, self).__init__() - - def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = self.next.send(request) - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - response_callback(response) - request.context['response_callback'] = response_callback - return response - - -class StorageContentValidation(SansIOHTTPPolicy): - """A simple policy that sends the given headers - with the request. - - This will overwrite any headers already defined in the request. - """ - header_name = 'Content-MD5' - - def __init__(self, **kwargs): # pylint: disable=unused-argument - super(StorageContentValidation, self).__init__() - - @staticmethod - def get_content_md5(data): - md5 = hashlib.md5() # nosec - if isinstance(data, bytes): - md5.update(data) - elif hasattr(data, 'read'): - pos = 0 - try: - pos = data.tell() - except: # pylint: disable=bare-except - pass - for chunk in iter(lambda: data.read(4096), b""): - md5.update(chunk) - try: - data.seek(pos, SEEK_SET) - except (AttributeError, IOError): - raise ValueError("Data should be bytes or a seekable file-like object.") - else: - raise ValueError("Data should be bytes or a seekable file-like object.") - - return md5.digest() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - validate_content = request.context.options.pop('validate_content', False) - if validate_content and request.http_request.method != 'GET': - computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) - request.http_request.headers[self.header_name] = computed_md5 - request.context['validate_content_md5'] = computed_md5 - request.context['validate_content'] = validate_content - - def on_response(self, request, response): - if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): - computed_md5 = request.context.get('validate_content_md5') or \ - encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) - if response.http_response.headers['content-md5'] != computed_md5: - raise AzureError( - 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format( - response.http_response.headers['content-md5'], computed_md5), - response=response.http_response - ) - - -class StorageRetryPolicy(HTTPPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - def __init__(self, **kwargs): - self.total_retries = kwargs.pop('retry_total', 10) - self.connect_retries = kwargs.pop('retry_connect', 3) - self.read_retries = kwargs.pop('retry_read', 3) - self.status_retries = kwargs.pop('retry_status', 3) - self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) - super(StorageRetryPolicy, self).__init__() - - def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use - """ - A function which sets the next host location on the request, if applicable. - - :param ~azure.storage.models.RetryContext context: - The retry context containing the previous host location and the request - to evaluate and possibly modify. - """ - if settings['hosts'] and all(settings['hosts'].values()): - url = urlparse(request.url) - # If there's more than one possible location, retry to the alternative - if settings['mode'] == LocationMode.PRIMARY: - settings['mode'] = LocationMode.SECONDARY - else: - settings['mode'] = LocationMode.PRIMARY - updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) - request.url = updated.geturl() - - def configure_retries(self, request): # pylint: disable=no-self-use - body_position = None - if hasattr(request.http_request.body, 'read'): - try: - body_position = request.http_request.body.tell() - except (AttributeError, UnsupportedOperation): - # if body position cannot be obtained, then retries will not work - pass - options = request.context.options - return { - 'total': options.pop("retry_total", self.total_retries), - 'connect': options.pop("retry_connect", self.connect_retries), - 'read': options.pop("retry_read", self.read_retries), - 'status': options.pop("retry_status", self.status_retries), - 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), - 'mode': options.pop("location_mode", LocationMode.PRIMARY), - 'hosts': options.pop("hosts", None), - 'hook': options.pop("retry_hook", None), - 'body_position': body_position, - 'count': 0, - 'history': [] - } - - def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use - """ Formula for computing the current backoff. - Should be calculated by child class. - - :rtype: float - """ - return 0 - - def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - transport.sleep(backoff) - - def increment(self, settings, request, response=None, error=None): - """Increment the retry counters. - - :param response: A pipeline response object. - :param error: An error encountered during the request, or - None if the response was received successfully. - - :return: Whether the retry attempts are exhausted. - """ - settings['total'] -= 1 - - if error and isinstance(error, ServiceRequestError): - # Errors when we're fairly sure that the server did not receive the - # request, so it should be safe to retry. - settings['connect'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - elif error and isinstance(error, ServiceResponseError): - # Errors that occur after the request has been started, so we should - # assume that the server began processing it. - settings['read'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - else: - # Incrementing because of a server error like a 500 in - # status_forcelist and a the given method is in the whitelist - if response: - settings['status'] -= 1 - settings['history'].append(RequestHistory(request, http_response=response)) - - if not is_exhausted(settings): - if request.method not in ['PUT'] and settings['retry_secondary']: - self._set_next_host_location(settings, request) - - # rewind the request body if it is a stream - if request.body and hasattr(request.body, 'read'): - # no position was saved, then retry would not work - if settings['body_position'] is None: - return False - try: - # attempt to rewind the body to the initial position - request.body.seek(settings['body_position'], SEEK_SET) - except (UnsupportedOperation, ValueError): - # if body is not seekable, then retry would not work - return False - settings['count'] += 1 - return True - return False - - def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(StorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(StorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/policies_async.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/policies_async.py deleted file mode 100644 index e0926b8..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/policies_async.py +++ /dev/null @@ -1,220 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -import asyncio -import random -import logging -from typing import Any, TYPE_CHECKING - -from azure.core.pipeline.policies import AsyncHTTPPolicy -from azure.core.exceptions import AzureError - -from .policies import is_retry, StorageRetryPolicy - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -async def retry_hook(settings, **kwargs): - if settings['hook']: - if asyncio.iscoroutine(settings['hook']): - await settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - else: - settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - - -class AsyncStorageResponseHook(AsyncHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(AsyncStorageResponseHook, self).__init__() - - async def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = await self.next.send(request) - await response.http_response.load_body() - - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - if asyncio.iscoroutine(response_callback): - await response_callback(response) - else: - response_callback(response) - request.context['response_callback'] = response_callback - return response - -class AsyncStorageRetryPolicy(StorageRetryPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - async def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - await transport.sleep(backoff) - - async def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = await self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - await self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - await self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(AsyncStorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(AsyncStorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/request_handlers.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/request_handlers.py deleted file mode 100644 index 37354d7..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/request_handlers.py +++ /dev/null @@ -1,273 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) - -import logging -from os import fstat -from io import (SEEK_END, SEEK_SET, UnsupportedOperation) - -import isodate - -from azure.core.exceptions import raise_with_traceback - - -_LOGGER = logging.getLogger(__name__) - -_REQUEST_DELIMITER_PREFIX = "batch_" -_HTTP1_1_IDENTIFIER = "HTTP/1.1" -_HTTP_LINE_ENDING = "\r\n" - - -def serialize_iso(attr): - """Serialize Datetime object into ISO-8601 formatted string. - - :param Datetime attr: Object to be serialized. - :rtype: str - :raises: ValueError if format invalid. - """ - if not attr: - return None - if isinstance(attr, str): - attr = isodate.parse_datetime(attr) - try: - utc = attr.utctimetuple() - if utc.tm_year > 9999 or utc.tm_year < 1: - raise OverflowError("Hit max or min date") - - date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( - utc.tm_year, utc.tm_mon, utc.tm_mday, - utc.tm_hour, utc.tm_min, utc.tm_sec) - return date + 'Z' - except (ValueError, OverflowError) as err: - msg = "Unable to serialize datetime object." - raise_with_traceback(ValueError, msg, err) - except AttributeError as err: - msg = "ISO-8601 object must be valid Datetime object." - raise_with_traceback(TypeError, msg, err) - - -def get_length(data): - length = None - # Check if object implements the __len__ method, covers most input cases such as bytearray. - try: - length = len(data) - except: # pylint: disable=bare-except - pass - - if not length: - # Check if the stream is a file-like stream object. - # If so, calculate the size using the file descriptor. - try: - fileno = data.fileno() - except (AttributeError, UnsupportedOperation): - pass - else: - try: - return fstat(fileno).st_size - except OSError: - # Not a valid fileno, may be possible requests returned - # a socket number? - pass - - # If the stream is seekable and tell() is implemented, calculate the stream size. - try: - current_position = data.tell() - data.seek(0, SEEK_END) - length = data.tell() - current_position - data.seek(current_position, SEEK_SET) - except (AttributeError, UnsupportedOperation): - pass - - return length - - -def read_length(data): - try: - if hasattr(data, 'read'): - read_data = b'' - for chunk in iter(lambda: data.read(4096), b""): - read_data += chunk - return len(read_data), read_data - if hasattr(data, '__iter__'): - read_data = b'' - for chunk in data: - read_data += chunk - return len(read_data), read_data - except: # pylint: disable=bare-except - pass - raise ValueError("Unable to calculate content length, please specify.") - - -def validate_and_format_range_headers( - start_range, end_range, start_range_required=True, - end_range_required=True, check_content_md5=False, align_to_page=False): - # If end range is provided, start range must be provided - if (start_range_required or end_range is not None) and start_range is None: - raise ValueError("start_range value cannot be None.") - if end_range_required and end_range is None: - raise ValueError("end_range value cannot be None.") - - # Page ranges must be 512 aligned - if align_to_page: - if start_range is not None and start_range % 512 != 0: - raise ValueError("Invalid page blob start_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(start_range)) - if end_range is not None and end_range % 512 != 511: - raise ValueError("Invalid page blob end_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(end_range)) - - # Format based on whether end_range is present - range_header = None - if end_range is not None: - range_header = 'bytes={0}-{1}'.format(start_range, end_range) - elif start_range is not None: - range_header = "bytes={0}-".format(start_range) - - # Content MD5 can only be provided for a complete range less than 4MB in size - range_validation = None - if check_content_md5: - if start_range is None or end_range is None: - raise ValueError("Both start and end range requied for MD5 content validation.") - if end_range - start_range > 4 * 1024 * 1024: - raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") - range_validation = 'true' - - return range_header, range_validation - - -def add_metadata_headers(metadata=None): - # type: (Optional[Dict[str, str]]) -> Dict[str, str] - headers = {} - if metadata: - for key, value in metadata.items(): - headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value - return headers - - -def serialize_batch_body(requests, batch_id): - """ - -- - - -- - (repeated as needed) - ---- - - Serializes the requests in this batch to a single HTTP mixed/multipart body. - - :param list[~azure.core.pipeline.transport.HttpRequest] requests: - a list of sub-request for the batch request - :param str batch_id: - to be embedded in batch sub-request delimiter - :return: The body bytes for this batch. - """ - - if requests is None or len(requests) == 0: - raise ValueError('Please provide sub-request(s) for this batch request') - - delimiter_bytes = (_get_batch_request_delimiter(batch_id, True, False) + _HTTP_LINE_ENDING).encode('utf-8') - newline_bytes = _HTTP_LINE_ENDING.encode('utf-8') - batch_body = list() - - content_index = 0 - for request in requests: - request.headers.update({ - "Content-ID": str(content_index), - "Content-Length": str(0) - }) - batch_body.append(delimiter_bytes) - batch_body.append(_make_body_from_sub_request(request)) - batch_body.append(newline_bytes) - content_index += 1 - - batch_body.append(_get_batch_request_delimiter(batch_id, True, True).encode('utf-8')) - # final line of body MUST have \r\n at the end, or it will not be properly read by the service - batch_body.append(newline_bytes) - - return bytes().join(batch_body) - - -def _get_batch_request_delimiter(batch_id, is_prepend_dashes=False, is_append_dashes=False): - """ - Gets the delimiter used for this batch request's mixed/multipart HTTP format. - - :param str batch_id: - Randomly generated id - :param bool is_prepend_dashes: - Whether to include the starting dashes. Used in the body, but non on defining the delimiter. - :param bool is_append_dashes: - Whether to include the ending dashes. Used in the body on the closing delimiter only. - :return: The delimiter, WITHOUT a trailing newline. - """ - - prepend_dashes = '--' if is_prepend_dashes else '' - append_dashes = '--' if is_append_dashes else '' - - return prepend_dashes + _REQUEST_DELIMITER_PREFIX + batch_id + append_dashes - - -def _make_body_from_sub_request(sub_request): - """ - Content-Type: application/http - Content-ID: - Content-Transfer-Encoding: (if present) - - HTTP/ -
:
(repeated as necessary) - Content-Length: - (newline if content length > 0) - (if content length > 0) - - Serializes an http request. - - :param ~azure.core.pipeline.transport.HttpRequest sub_request: - Request to serialize. - :return: The serialized sub-request in bytes - """ - - # put the sub-request's headers into a list for efficient str concatenation - sub_request_body = list() - - # get headers for ease of manipulation; remove headers as they are used - headers = sub_request.headers - - # append opening headers - sub_request_body.append("Content-Type: application/http") - sub_request_body.append(_HTTP_LINE_ENDING) - - sub_request_body.append("Content-ID: ") - sub_request_body.append(headers.pop("Content-ID", "")) - sub_request_body.append(_HTTP_LINE_ENDING) - - sub_request_body.append("Content-Transfer-Encoding: binary") - sub_request_body.append(_HTTP_LINE_ENDING) - - # append blank line - sub_request_body.append(_HTTP_LINE_ENDING) - - # append HTTP verb and path and query and HTTP version - sub_request_body.append(sub_request.method) - sub_request_body.append(' ') - sub_request_body.append(sub_request.url) - sub_request_body.append(' ') - sub_request_body.append(_HTTP1_1_IDENTIFIER) - sub_request_body.append(_HTTP_LINE_ENDING) - - # append remaining headers (this will set the Content-Length, as it was set on `sub-request`) - for header_name, header_value in headers.items(): - if header_value is not None: - sub_request_body.append(header_name) - sub_request_body.append(": ") - sub_request_body.append(header_value) - sub_request_body.append(_HTTP_LINE_ENDING) - - # append blank line - sub_request_body.append(_HTTP_LINE_ENDING) - - return ''.join(sub_request_body).encode() diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/response_handlers.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/response_handlers.py deleted file mode 100644 index 4b591dd..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/response_handlers.py +++ /dev/null @@ -1,162 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging - -from azure.core.pipeline.policies import ContentDecodePolicy -from azure.core.exceptions import ( - HttpResponseError, - ResourceNotFoundError, - ResourceModifiedError, - ResourceExistsError, - ClientAuthenticationError, - DecodeError) - -from .parser import _to_utc_datetime -from .models import StorageErrorCode, UserDelegationKey, get_enum_value - - -if TYPE_CHECKING: - from datetime import datetime - from azure.core.exceptions import AzureError - - -_LOGGER = logging.getLogger(__name__) - - -class PartialBatchErrorException(HttpResponseError): - """There is a partial failure in batch operations. - - :param str message: The message of the exception. - :param response: Server response to be deserialized. - :param list parts: A list of the parts in multipart response. - """ - - def __init__(self, message, response, parts): - self.parts = parts - super(PartialBatchErrorException, self).__init__(message=message, response=response) - - -def parse_length_from_content_range(content_range): - ''' - Parses the blob length from the content range header: bytes 1-3/65537 - ''' - if content_range is None: - return None - - # First, split in space and take the second half: '1-3/65537' - # Next, split on slash and take the second half: '65537' - # Finally, convert to an int: 65537 - return int(content_range.split(' ', 1)[1].split('/', 1)[1]) - - -def normalize_headers(headers): - normalized = {} - for key, value in headers.items(): - if key.startswith('x-ms-'): - key = key[5:] - normalized[key.lower().replace('-', '_')] = get_enum_value(value) - return normalized - - -def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument - raw_metadata = {k: v for k, v in response.http_response.headers.items() if k.startswith("x-ms-meta-")} - return {k[10:]: v for k, v in raw_metadata.items()} - - -def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers) - - -def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers), deserialized - - -def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return response.http_response.location_mode, deserialized - - -def process_storage_error(storage_error): - # If storage_error is one of the two then it has already been processed and serialized to the specific exception. - if isinstance(storage_error, (PartialBatchErrorException, ClientAuthenticationError)): - raise storage_error - raise_error = HttpResponseError - error_code = storage_error.response.headers.get('x-ms-error-code') - error_message = storage_error.message - additional_data = {} - try: - error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) - if error_body: - for info in error_body.iter(): - if info.tag.lower() == 'code': - error_code = info.text - elif info.tag.lower() == 'message': - error_message = info.text - else: - additional_data[info.tag] = info.text - except DecodeError: - pass - - try: - if error_code: - error_code = StorageErrorCode(error_code) - if error_code in [StorageErrorCode.condition_not_met, - StorageErrorCode.blob_overwritten]: - raise_error = ResourceModifiedError - if error_code in [StorageErrorCode.invalid_authentication_info, - StorageErrorCode.authentication_failed]: - raise_error = ClientAuthenticationError - if error_code in [StorageErrorCode.resource_not_found, - StorageErrorCode.cannot_verify_copy_source, - StorageErrorCode.blob_not_found, - StorageErrorCode.queue_not_found, - StorageErrorCode.container_not_found, - StorageErrorCode.parent_not_found, - StorageErrorCode.share_not_found]: - raise_error = ResourceNotFoundError - if error_code in [StorageErrorCode.account_already_exists, - StorageErrorCode.account_being_created, - StorageErrorCode.resource_already_exists, - StorageErrorCode.resource_type_mismatch, - StorageErrorCode.blob_already_exists, - StorageErrorCode.queue_already_exists, - StorageErrorCode.container_already_exists, - StorageErrorCode.container_being_deleted, - StorageErrorCode.queue_being_deleted, - StorageErrorCode.share_already_exists, - StorageErrorCode.share_being_deleted]: - raise_error = ResourceExistsError - except ValueError: - # Got an unknown error code - pass - - try: - error_message += "\nErrorCode:{}".format(error_code.value) - except AttributeError: - error_message += "\nErrorCode:{}".format(error_code) - for name, info in additional_data.items(): - error_message += "\n{}:{}".format(name, info) - - error = raise_error(message=error_message, response=storage_error.response) - error.error_code = error_code - error.additional_info = additional_data - error.raise_with_traceback() - - -def parse_to_internal_user_delegation_key(service_user_delegation_key): - internal_user_delegation_key = UserDelegationKey() - internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid - internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid - internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) - internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) - internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service - internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version - internal_user_delegation_key.value = service_user_delegation_key.value - return internal_user_delegation_key diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/shared_access_signature.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/shared_access_signature.py deleted file mode 100644 index 07aad5f..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/shared_access_signature.py +++ /dev/null @@ -1,220 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from datetime import date - -from .parser import _str, _to_utc_datetime -from .constants import X_MS_VERSION -from . import sign_string, url_quote - - -class QueryStringConstants(object): - SIGNED_SIGNATURE = 'sig' - SIGNED_PERMISSION = 'sp' - SIGNED_START = 'st' - SIGNED_EXPIRY = 'se' - SIGNED_RESOURCE = 'sr' - SIGNED_IDENTIFIER = 'si' - SIGNED_IP = 'sip' - SIGNED_PROTOCOL = 'spr' - SIGNED_VERSION = 'sv' - SIGNED_CACHE_CONTROL = 'rscc' - SIGNED_CONTENT_DISPOSITION = 'rscd' - SIGNED_CONTENT_ENCODING = 'rsce' - SIGNED_CONTENT_LANGUAGE = 'rscl' - SIGNED_CONTENT_TYPE = 'rsct' - START_PK = 'spk' - START_RK = 'srk' - END_PK = 'epk' - END_RK = 'erk' - SIGNED_RESOURCE_TYPES = 'srt' - SIGNED_SERVICES = 'ss' - SIGNED_OID = 'skoid' - SIGNED_TID = 'sktid' - SIGNED_KEY_START = 'skt' - SIGNED_KEY_EXPIRY = 'ske' - SIGNED_KEY_SERVICE = 'sks' - SIGNED_KEY_VERSION = 'skv' - - # for ADLS - SIGNED_AUTHORIZED_OID = 'saoid' - SIGNED_UNAUTHORIZED_OID = 'suoid' - SIGNED_CORRELATION_ID = 'scid' - SIGNED_DIRECTORY_DEPTH = 'sdd' - - @staticmethod - def to_list(): - return [ - QueryStringConstants.SIGNED_SIGNATURE, - QueryStringConstants.SIGNED_PERMISSION, - QueryStringConstants.SIGNED_START, - QueryStringConstants.SIGNED_EXPIRY, - QueryStringConstants.SIGNED_RESOURCE, - QueryStringConstants.SIGNED_IDENTIFIER, - QueryStringConstants.SIGNED_IP, - QueryStringConstants.SIGNED_PROTOCOL, - QueryStringConstants.SIGNED_VERSION, - QueryStringConstants.SIGNED_CACHE_CONTROL, - QueryStringConstants.SIGNED_CONTENT_DISPOSITION, - QueryStringConstants.SIGNED_CONTENT_ENCODING, - QueryStringConstants.SIGNED_CONTENT_LANGUAGE, - QueryStringConstants.SIGNED_CONTENT_TYPE, - QueryStringConstants.START_PK, - QueryStringConstants.START_RK, - QueryStringConstants.END_PK, - QueryStringConstants.END_RK, - QueryStringConstants.SIGNED_RESOURCE_TYPES, - QueryStringConstants.SIGNED_SERVICES, - QueryStringConstants.SIGNED_OID, - QueryStringConstants.SIGNED_TID, - QueryStringConstants.SIGNED_KEY_START, - QueryStringConstants.SIGNED_KEY_EXPIRY, - QueryStringConstants.SIGNED_KEY_SERVICE, - QueryStringConstants.SIGNED_KEY_VERSION, - # for ADLS - QueryStringConstants.SIGNED_AUTHORIZED_OID, - QueryStringConstants.SIGNED_UNAUTHORIZED_OID, - QueryStringConstants.SIGNED_CORRELATION_ID, - QueryStringConstants.SIGNED_DIRECTORY_DEPTH, - ] - - -class SharedAccessSignature(object): - ''' - Provides a factory for creating account access - signature tokens with an account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - :param str x_ms_version: - The service version used to generate the shared access signatures. - ''' - self.account_name = account_name - self.account_key = account_key - self.x_ms_version = x_ms_version - - def generate_account(self, services, resource_types, permission, expiry, start=None, - ip=None, protocol=None): - ''' - Generates a shared access signature for the account. - Use the returned signature with the sas_token parameter of the service - or to create a new account object. - - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account - SAS. You can combine values to provide access to more than one - resource type. - :param AccountSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. You can combine - values to provide more than one permission. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_account(services, resource_types) - sas.add_account_signature(self.account_name, self.account_key) - - return sas.get_token() - - -class _SharedAccessHelper(object): - def __init__(self): - self.query_dict = {} - - def _add_query(self, name, val): - if val: - self.query_dict[name] = _str(val) if val is not None else None - - def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): - if isinstance(start, date): - start = _to_utc_datetime(start) - - if isinstance(expiry, date): - expiry = _to_utc_datetime(expiry) - - self._add_query(QueryStringConstants.SIGNED_START, start) - self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) - self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) - self._add_query(QueryStringConstants.SIGNED_IP, ip) - self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) - self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) - - def add_resource(self, resource): - self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) - - def add_id(self, policy_id): - self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) - - def add_account(self, services, resource_types): - self._add_query(QueryStringConstants.SIGNED_SERVICES, services) - self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) - - def add_override_response_headers(self, cache_control, - content_disposition, - content_encoding, - content_language, - content_type): - self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) - self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) - self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) - self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) - self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) - - def add_account_signature(self, account_name, account_key): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - string_to_sign = \ - (account_name + '\n' + - get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + - get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + - get_value_to_append(QueryStringConstants.SIGNED_START) + - get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - get_value_to_append(QueryStringConstants.SIGNED_IP) + - get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(QueryStringConstants.SIGNED_VERSION)) - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key, string_to_sign)) - - def get_token(self): - return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/uploads.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/uploads.py deleted file mode 100644 index 941a90f..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/uploads.py +++ /dev/null @@ -1,603 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from concurrent import futures -from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) -from threading import Lock -from itertools import islice -from math import ceil - -import six - -from azure.core.tracing.common import with_current_context - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." - - -def _parallel_uploads(executor, uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(executor.submit(with_current_context(uploader), next_chunk)) - except StopIteration: - break - - # Wait for the remaining uploads to finish - done, _running = futures.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - validate_content=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - validate_content=validate_content, - **kwargs) - if parallel: - with futures.ThreadPoolExecutor(max_concurrency) as executor: - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - executor.submit(with_current_context(uploader.process_chunk), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - with futures.ThreadPoolExecutor(max_concurrency) as executor: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - executor.submit(with_current_context(uploader.process_substream_block), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] - if any(range_ids): - return sorted(range_ids) - return [] - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b"" - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError("Blob data should be of type bytes.") - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b"" or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - def _update_progress(self, length): - if self.progress_lock is not None: - with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = self._upload_chunk(chunk_offset, chunk_data) - self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield index, SubStream(self.stream, index, length, lock) - - def process_substream_block(self, block_data): - return self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - def _upload_substream_block(self, index, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_substream_block_with_progress(self, index, block_stream): - range_id = self._upload_substream_block(index, block_stream) - self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop("modified_access_conditions", None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - self.service.stage_block( - block_id, - len(chunk_data), - chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return index, block_id - - def _upload_substream_block(self, index, block_stream): - try: - block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) - self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - return not any(bytearray(chunk_data)) - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = self.service.upload_pages( - body=chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - def _upload_substream_block(self, index, block_stream): - pass - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - self.current_length = int(self.response_headers["blob_append_offset"]) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - def _upload_substream_block(self, index, block_stream): - pass - - -class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - self.response_headers = self.service.append_data( - body=chunk_data, - position=chunk_offset, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - def _upload_substream_block(self, index, block_stream): - try: - self.service.append_data( - body=block_stream, - position=index, - content_length=len(block_stream), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response - - # TODO: Implement this method. - def _upload_substream_block(self, index, block_stream): - pass - - -class SubStream(IOBase): - - def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): - # Python 2.7: file-like objects created with open() typically support seek(), but are not - # derivations of io.IOBase and thus do not implement seekable(). - # Python > 3.0: file-like objects created with open() are derived from io.IOBase. - try: - # only the main thread runs this, so there's no need grabbing the lock - wrapped_stream.seek(0, SEEK_CUR) - except: - raise ValueError("Wrapped stream must support seek().") - - self._lock = lockObj - self._wrapped_stream = wrapped_stream - self._position = 0 - self._stream_begin_index = stream_begin_index - self._length = length - self._buffer = BytesIO() - - # we must avoid buffering more than necessary, and also not use up too much memory - # so the max buffer size is capped at 4MB - self._max_buffer_size = ( - length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE - ) - self._current_buffer_start = 0 - self._current_buffer_size = 0 - super(SubStream, self).__init__() - - def __len__(self): - return self._length - - def close(self): - if self._buffer: - self._buffer.close() - self._wrapped_stream = None - IOBase.close(self) - - def fileno(self): - return self._wrapped_stream.fileno() - - def flush(self): - pass - - def read(self, size=None): - if self.closed: # pylint: disable=using-constant-test - raise ValueError("Stream is closed.") - - if size is None: - size = self._length - self._position - - # adjust if out of bounds - if size + self._position >= self._length: - size = self._length - self._position - - # return fast - if size == 0 or self._buffer.closed: - return b"" - - # attempt first read from the read buffer and update position - read_buffer = self._buffer.read(size) - bytes_read = len(read_buffer) - bytes_remaining = size - bytes_read - self._position += bytes_read - - # repopulate the read buffer from the underlying stream to fulfill the request - # ensure the seek and read operations are done atomically (only if a lock is provided) - if bytes_remaining > 0: - with self._buffer: - # either read in the max buffer size specified on the class - # or read in just enough data for the current block/sub stream - current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) - - # lock is only defined if max_concurrency > 1 (parallel uploads) - if self._lock: - with self._lock: - # reposition the underlying stream to match the start of the data to read - absolute_position = self._stream_begin_index + self._position - self._wrapped_stream.seek(absolute_position, SEEK_SET) - # If we can't seek to the right location, our read will be corrupted so fail fast. - if self._wrapped_stream.tell() != absolute_position: - raise IOError("Stream failed to seek to the desired location.") - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - else: - absolute_position = self._stream_begin_index + self._position - # It's possible that there's connection problem during data transfer, - # so when we retry we don't want to read from current position of wrapped stream, - # instead we should seek to where we want to read from. - if self._wrapped_stream.tell() != absolute_position: - self._wrapped_stream.seek(absolute_position, SEEK_SET) - - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - - if buffer_from_stream: - # update the buffer with new data from the wrapped stream - # we need to note down the start position and size of the buffer, in case seek is performed later - self._buffer = BytesIO(buffer_from_stream) - self._current_buffer_start = self._position - self._current_buffer_size = len(buffer_from_stream) - - # read the remaining bytes from the new buffer and update position - second_read_buffer = self._buffer.read(bytes_remaining) - read_buffer += second_read_buffer - self._position += len(second_read_buffer) - - return read_buffer - - def readable(self): - return True - - def readinto(self, b): - raise UnsupportedOperation - - def seek(self, offset, whence=0): - if whence is SEEK_SET: - start_index = 0 - elif whence is SEEK_CUR: - start_index = self._position - elif whence is SEEK_END: - start_index = self._length - offset = -offset - else: - raise ValueError("Invalid argument for the 'whence' parameter.") - - pos = start_index + offset - - if pos > self._length: - pos = self._length - elif pos < 0: - pos = 0 - - # check if buffer is still valid - # if not, drop buffer - if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: - self._buffer.close() - self._buffer = BytesIO() - else: # if yes seek to correct position - delta = pos - self._current_buffer_start - self._buffer.seek(delta, SEEK_SET) - - self._position = pos - return pos - - def seekable(self): - return True - - def tell(self): - return self._position - - def write(self): - raise UnsupportedOperation - - def writelines(self): - raise UnsupportedOperation - - def writeable(self): - return False - - -class IterStreamer(object): - """ - File-like streaming iterator. - """ - - def __init__(self, generator, encoding="UTF-8"): - self.generator = generator - self.iterator = iter(generator) - self.leftover = b"" - self.encoding = encoding - - def __len__(self): - return self.generator.__len__() - - def __iter__(self): - return self.iterator - - def seekable(self): - return False - - def __next__(self): - return next(self.iterator) - - next = __next__ # Python 2 compatibility. - - def tell(self, *args, **kwargs): - raise UnsupportedOperation("Data generator does not support tell.") - - def seek(self, *args, **kwargs): - raise UnsupportedOperation("Data generator is unseekable.") - - def read(self, size): - data = self.leftover - count = len(self.leftover) - try: - while count < size: - chunk = self.__next__() - if isinstance(chunk, six.text_type): - chunk = chunk.encode(self.encoding) - data += chunk - count += len(chunk) - # This means count < size and what's leftover will be returned in this call. - except StopIteration: - self.leftover = b"" - - if count >= size: - self.leftover = data[size:] - - return data[:size] diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/uploads_async.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/uploads_async.py deleted file mode 100644 index 5ed192b..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/uploads_async.py +++ /dev/null @@ -1,395 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -import asyncio -from asyncio import Lock -from itertools import islice -import threading - -from math import ceil - -import six - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder -from .uploads import SubStream, IterStreamer # pylint: disable=unused-import - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' - - -async def _parallel_uploads(uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(asyncio.ensure_future(uploader(next_chunk))) - except StopIteration: - break - - # Wait for the remaining uploads to finish - if running: - done, _running = await asyncio.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -async def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - asyncio.ensure_future(uploader.process_chunk(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [] - for chunk in uploader.get_chunk_streams(): - range_ids.append(await uploader.process_chunk(chunk)) - - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -async def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - asyncio.ensure_future(uploader.process_substream_block(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [] - for block in uploader.get_substream_blocks(): - range_ids.append(await uploader.process_substream_block(block)) - if any(range_ids): - return sorted(range_ids) - return - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = threading.Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b'' - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b'' or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - async def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - async def _update_progress(self, length): - if self.progress_lock is not None: - async with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - async def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = await self._upload_chunk(chunk_offset, chunk_data) - await self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield index, SubStream(self.stream, index, length, lock) - - async def process_substream_block(self, block_data): - return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - async def _upload_substream_block(self, index, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_substream_block_with_progress(self, index, block_stream): - range_id = await self._upload_substream_block(index, block_stream) - await self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop('modified_access_conditions', None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - await self.service.stage_block( - block_id, - len(chunk_data), - body=chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - return index, block_id - - async def _upload_substream_block(self, index, block_stream): - try: - block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) - await self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - for each_byte in chunk_data: - if each_byte not in [0, b'\x00']: - return False - return True - - async def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = await self.service.upload_pages( - body=chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - async def _upload_substream_block(self, index, block_stream): - pass - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = await self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - self.current_length = int(self.response_headers['blob_append_offset']) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = await self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - async def _upload_substream_block(self, index, block_stream): - pass - - -class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - self.response_headers = await self.service.append_data( - body=chunk_data, - position=chunk_offset, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - async def _upload_substream_block(self, index, block_stream): - try: - await self.service.append_data( - body=block_stream, - position=index, - content_length=len(block_stream), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = await self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - return range_id, response - - # TODO: Implement this method. - async def _upload_substream_block(self, index, block_stream): - pass diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared_access_signature.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared_access_signature.py deleted file mode 100644 index 890ef1b..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_shared_access_signature.py +++ /dev/null @@ -1,596 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, TYPE_CHECKING -) - -from ._shared import sign_string, url_quote -from ._shared.constants import X_MS_VERSION -from ._shared.models import Services -from ._shared.shared_access_signature import SharedAccessSignature, _SharedAccessHelper, \ - QueryStringConstants - -if TYPE_CHECKING: - from datetime import datetime - from ..blob import ( - ResourceTypes, - AccountSasPermissions, - UserDelegationKey, - ContainerSasPermissions, - BlobSasPermissions - ) - - -class BlobQueryStringConstants(object): - SIGNED_TIMESTAMP = 'snapshot' - - -class BlobSharedAccessSignature(SharedAccessSignature): - ''' - Provides a factory for creating blob and container access - signature tokens with a common account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key=None, user_delegation_key=None): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - :param ~azure.storage.blob.models.UserDelegationKey user_delegation_key: - Instead of an account key, the user could pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished by calling get_user_delegation_key on any Blob service object. - ''' - super(BlobSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) - self.user_delegation_key = user_delegation_key - - def generate_blob(self, container_name, blob_name, snapshot=None, version_id=None, permission=None, - expiry=None, start=None, policy_id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None, **kwargs): - ''' - Generates a shared access signature for the blob or one of its snapshots. - Use the returned signature with the sas_token parameter of any BlobService. - - :param str container_name: - Name of container. - :param str blob_name: - Name of blob. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to grant permission. - :param BlobSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_blob_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - resource_path = container_name + '/' + blob_name - - sas = _BlobSharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(policy_id) - - resource = 'bs' if snapshot else 'b' - resource = 'bv' if version_id else resource - resource = 'd' if kwargs.pop("is_directory", None) else resource - sas.add_resource(resource) - - sas.add_timestamp(snapshot or version_id) - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_info_for_hns_account(**kwargs) - sas.add_resource_signature(self.account_name, self.account_key, resource_path, - user_delegation_key=self.user_delegation_key) - - return sas.get_token() - - def generate_container(self, container_name, permission=None, expiry=None, - start=None, policy_id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None, **kwargs): - ''' - Generates a shared access signature for the container. - Use the returned signature with the sas_token parameter of any BlobService. - - :param str container_name: - Name of container. - :param ContainerSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_blob_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - sas = _BlobSharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(policy_id) - sas.add_resource('c') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_info_for_hns_account(**kwargs) - sas.add_resource_signature(self.account_name, self.account_key, container_name, - user_delegation_key=self.user_delegation_key) - return sas.get_token() - - -class _BlobSharedAccessHelper(_SharedAccessHelper): - - def add_timestamp(self, timestamp): - self._add_query(BlobQueryStringConstants.SIGNED_TIMESTAMP, timestamp) - - def add_info_for_hns_account(self, **kwargs): - self._add_query(QueryStringConstants.SIGNED_DIRECTORY_DEPTH, kwargs.pop('sdd', None)) - self._add_query(QueryStringConstants.SIGNED_AUTHORIZED_OID, kwargs.pop('preauthorized_agent_object_id', None)) - self._add_query(QueryStringConstants.SIGNED_UNAUTHORIZED_OID, kwargs.pop('agent_object_id', None)) - self._add_query(QueryStringConstants.SIGNED_CORRELATION_ID, kwargs.pop('correlation_id', None)) - - def get_value_to_append(self, query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - def add_resource_signature(self, account_name, account_key, path, user_delegation_key=None): - # pylint: disable = no-member - if path[0] != '/': - path = '/' + path - - canonicalized_resource = '/blob/' + account_name + path + '\n' - - # Form the string to sign from shared_access_policy and canonicalized - # resource. The order of values is important. - string_to_sign = \ - (self.get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - self.get_value_to_append(QueryStringConstants.SIGNED_START) + - self.get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - canonicalized_resource) - - if user_delegation_key is not None: - self._add_query(QueryStringConstants.SIGNED_OID, user_delegation_key.signed_oid) - self._add_query(QueryStringConstants.SIGNED_TID, user_delegation_key.signed_tid) - self._add_query(QueryStringConstants.SIGNED_KEY_START, user_delegation_key.signed_start) - self._add_query(QueryStringConstants.SIGNED_KEY_EXPIRY, user_delegation_key.signed_expiry) - self._add_query(QueryStringConstants.SIGNED_KEY_SERVICE, user_delegation_key.signed_service) - self._add_query(QueryStringConstants.SIGNED_KEY_VERSION, user_delegation_key.signed_version) - - string_to_sign += \ - (self.get_value_to_append(QueryStringConstants.SIGNED_OID) + - self.get_value_to_append(QueryStringConstants.SIGNED_TID) + - self.get_value_to_append(QueryStringConstants.SIGNED_KEY_START) + - self.get_value_to_append(QueryStringConstants.SIGNED_KEY_EXPIRY) + - self.get_value_to_append(QueryStringConstants.SIGNED_KEY_SERVICE) + - self.get_value_to_append(QueryStringConstants.SIGNED_KEY_VERSION) + - self.get_value_to_append(QueryStringConstants.SIGNED_AUTHORIZED_OID) + - self.get_value_to_append(QueryStringConstants.SIGNED_UNAUTHORIZED_OID) + - self.get_value_to_append(QueryStringConstants.SIGNED_CORRELATION_ID)) - else: - string_to_sign += self.get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER) - - string_to_sign += \ - (self.get_value_to_append(QueryStringConstants.SIGNED_IP) + - self.get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - self.get_value_to_append(QueryStringConstants.SIGNED_VERSION) + - self.get_value_to_append(QueryStringConstants.SIGNED_RESOURCE) + - self.get_value_to_append(BlobQueryStringConstants.SIGNED_TIMESTAMP) + - self.get_value_to_append(QueryStringConstants.SIGNED_CACHE_CONTROL) + - self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_DISPOSITION) + - self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_ENCODING) + - self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_LANGUAGE) + - self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_TYPE)) - - # remove the trailing newline - if string_to_sign[-1] == '\n': - string_to_sign = string_to_sign[:-1] - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key if user_delegation_key is None else user_delegation_key.value, - string_to_sign)) - - def get_token(self): - # a conscious decision was made to exclude the timestamp in the generated token - # this is to avoid having two snapshot ids in the query parameters when the user appends the snapshot timestamp - exclude = [BlobQueryStringConstants.SIGNED_TIMESTAMP] - return '&'.join(['{0}={1}'.format(n, url_quote(v)) - for n, v in self.query_dict.items() if v is not None and n not in exclude]) - - -def generate_account_sas( - account_name, # type: str - account_key, # type: str - resource_types, # type: Union[ResourceTypes, str] - permission, # type: Union[AccountSasPermissions, str] - expiry, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): # type: (...) -> str - """Generates a shared access signature for the blob service. - - Use the returned signature with the credential parameter of any BlobServiceClient, - ContainerClient or BlobClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - :param resource_types: - Specifies the resource types that are accessible with the account SAS. - :type resource_types: str or ~azure.storage.blob.ResourceTypes - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.blob.AccountSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_sas_token] - :end-before: [END create_sas_token] - :language: python - :dedent: 8 - :caption: Generating a shared access signature. - """ - sas = SharedAccessSignature(account_name, account_key) - return sas.generate_account( - services=Services(blob=True), - resource_types=resource_types, - permission=permission, - expiry=expiry, - start=start, - ip=ip, - **kwargs - ) # type: ignore - - -def generate_container_sas( - account_name, # type: str - container_name, # type: str - account_key=None, # type: Optional[str] - user_delegation_key=None, # type: Optional[UserDelegationKey] - permission=None, # type: Optional[Union[ContainerSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - policy_id=None, # type: Optional[str] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Any - """Generates a shared access signature for a container. - - Use the returned signature with the credential parameter of any BlobServiceClient, - ContainerClient or BlobClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str container_name: - The name of the container. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - Either `account_key` or `user_delegation_key` must be specified. - :param ~azure.storage.blob.UserDelegationKey user_delegation_key: - Instead of an account shared key, the user could pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.blob.ContainerSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - :func:`~azure.storage.blob.ContainerClient.set_container_access_policy`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :return: A Shared Access Signature (sas) token. - :rtype: str - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START generate_sas_token] - :end-before: [END generate_sas_token] - :language: python - :dedent: 12 - :caption: Generating a sas token. - """ - if not user_delegation_key and not account_key: - raise ValueError("Either user_delegation_key or account_key must be provided.") - - if user_delegation_key: - sas = BlobSharedAccessSignature(account_name, user_delegation_key=user_delegation_key) - else: - sas = BlobSharedAccessSignature(account_name, account_key=account_key) - return sas.generate_container( - container_name, - permission=permission, - expiry=expiry, - start=start, - policy_id=policy_id, - ip=ip, - **kwargs - ) - - -def generate_blob_sas( - account_name, # type: str - container_name, # type: str - blob_name, # type: str - snapshot=None, # type: Optional[str] - account_key=None, # type: Optional[str] - user_delegation_key=None, # type: Optional[UserDelegationKey] - permission=None, # type: Optional[Union[BlobSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - policy_id=None, # type: Optional[str] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Any - """Generates a shared access signature for a blob. - - Use the returned signature with the credential parameter of any BlobServiceClient, - ContainerClient or BlobClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str container_name: - The name of the container. - :param str blob_name: - The name of the blob. - :param str snapshot: - An optional blob snapshot ID. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - Either `account_key` or `user_delegation_key` must be specified. - :param ~azure.storage.blob.UserDelegationKey user_delegation_key: - Instead of an account shared key, the user could pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.blob.BlobSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - :func:`~azure.storage.blob.ContainerClient.set_container_access_policy()`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str version_id: - An optional blob version ID. This parameter is only for versioning enabled account - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - if not user_delegation_key and not account_key: - raise ValueError("Either user_delegation_key or account_key must be provided.") - version_id = kwargs.pop('version_id', None) - if version_id and snapshot: - raise ValueError("snapshot and version_id cannot be set at the same time.") - if user_delegation_key: - sas = BlobSharedAccessSignature(account_name, user_delegation_key=user_delegation_key) - else: - sas = BlobSharedAccessSignature(account_name, account_key=account_key) - return sas.generate_blob( - container_name, - blob_name, - snapshot=snapshot, - version_id=version_id, - permission=permission, - expiry=expiry, - start=start, - policy_id=policy_id, - ip=ip, - **kwargs - ) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_upload_helpers.py b/azure/multiapi/storagev2/blob/v2020_06_12/_upload_helpers.py deleted file mode 100644 index 94313f6..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_upload_helpers.py +++ /dev/null @@ -1,295 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from io import SEEK_SET, UnsupportedOperation -from typing import Optional, Union, Any, TypeVar, TYPE_CHECKING # pylint: disable=unused-import - -import six -from azure.core.exceptions import ResourceExistsError, ResourceModifiedError, HttpResponseError - -from ._shared.response_handlers import ( - process_storage_error, - return_response_headers) -from ._shared.models import StorageErrorCode -from ._shared.uploads import ( - upload_data_chunks, - upload_substream_blocks, - BlockBlobChunkUploader, - PageBlobChunkUploader, - AppendBlobChunkUploader) -from ._shared.encryption import generate_blob_encryption_data, encrypt_blob -from ._generated.models import ( - BlockLookupList, - AppendPositionAccessConditions, - ModifiedAccessConditions, -) - -if TYPE_CHECKING: - from datetime import datetime # pylint: disable=unused-import - BlobLeaseClient = TypeVar("BlobLeaseClient") - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' - - -def _convert_mod_error(error): - message = error.message.replace( - "The condition specified using HTTP conditional header(s) is not met.", - "The specified blob already exists.") - message = message.replace("ConditionNotMet", "BlobAlreadyExists") - overwrite_error = ResourceExistsError( - message=message, - response=error.response, - error=error) - overwrite_error.error_code = StorageErrorCode.blob_already_exists - raise overwrite_error - - -def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument - return any([ - modified_access_conditions.if_modified_since, - modified_access_conditions.if_unmodified_since, - modified_access_conditions.if_none_match, - modified_access_conditions.if_match - ]) - - -def upload_block_blob( # pylint: disable=too-many-locals - client=None, - data=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if not overwrite and not _any_conditions(**kwargs): - kwargs['modified_access_conditions'].if_none_match = '*' - adjusted_count = length - if (encryption_options.get('key') is not None) and (adjusted_count is not None): - adjusted_count += (16 - (length % 16)) - blob_headers = kwargs.pop('blob_headers', None) - tier = kwargs.pop('standard_blob_tier', None) - blob_tags_string = kwargs.pop('blob_tags_string', None) - - # Do single put if the size is smaller than or equal config.max_single_put_size - if adjusted_count is not None and (adjusted_count <= blob_settings.max_single_put_size): - try: - data = data.read(length) - if not isinstance(data, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - except AttributeError: - pass - if encryption_options.get('key'): - encryption_data, data = encrypt_blob(data, encryption_options['key']) - headers['x-ms-meta-encryptiondata'] = encryption_data - return client.upload( - body=data, - content_length=adjusted_count, - blob_http_headers=blob_headers, - headers=headers, - cls=return_response_headers, - validate_content=validate_content, - data_stream_total=adjusted_count, - upload_stream_current=0, - tier=tier.value if tier else None, - blob_tags_string=blob_tags_string, - **kwargs) - - use_original_upload_path = blob_settings.use_byte_buffer or \ - validate_content or encryption_options.get('required') or \ - blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \ - hasattr(stream, 'seekable') and not stream.seekable() or \ - not hasattr(stream, 'seek') or not hasattr(stream, 'tell') - - if use_original_upload_path: - if encryption_options.get('key'): - cek, iv, encryption_data = generate_blob_encryption_data(encryption_options['key']) - headers['x-ms-meta-encryptiondata'] = encryption_data - encryption_options['cek'] = cek - encryption_options['vector'] = iv - block_ids = upload_data_chunks( - service=client, - uploader_class=BlockBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - encryption_options=encryption_options, - headers=headers, - **kwargs - ) - else: - block_ids = upload_substream_blocks( - service=client, - uploader_class=BlockBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - headers=headers, - **kwargs - ) - - block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) - block_lookup.latest = block_ids - return client.commit_block_list( - block_lookup, - blob_http_headers=blob_headers, - cls=return_response_headers, - validate_content=validate_content, - headers=headers, - tier=tier.value if tier else None, - blob_tags_string=blob_tags_string, - **kwargs) - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceModifiedError as mod_error: - if not overwrite: - _convert_mod_error(mod_error) - raise - - -def upload_page_blob( - client=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if not overwrite and not _any_conditions(**kwargs): - kwargs['modified_access_conditions'].if_none_match = '*' - if length is None or length < 0: - raise ValueError("A content length must be specified for a Page Blob.") - if length % 512 != 0: - raise ValueError("Invalid page blob size: {0}. " - "The size must be aligned to a 512-byte boundary.".format(length)) - if kwargs.get('premium_page_blob_tier'): - premium_page_blob_tier = kwargs.pop('premium_page_blob_tier') - try: - headers['x-ms-access-tier'] = premium_page_blob_tier.value - except AttributeError: - headers['x-ms-access-tier'] = premium_page_blob_tier - if encryption_options and encryption_options.get('data'): - headers['x-ms-meta-encryptiondata'] = encryption_options['data'] - blob_tags_string = kwargs.pop('blob_tags_string', None) - - response = client.create( - content_length=0, - blob_content_length=length, - blob_sequence_number=None, - blob_http_headers=kwargs.pop('blob_headers', None), - blob_tags_string=blob_tags_string, - cls=return_response_headers, - headers=headers, - **kwargs) - if length == 0: - return response - - kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag']) - return upload_data_chunks( - service=client, - uploader_class=PageBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_page_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - encryption_options=encryption_options, - headers=headers, - **kwargs) - - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceModifiedError as mod_error: - if not overwrite: - _convert_mod_error(mod_error) - raise - - -def upload_append_blob( # pylint: disable=unused-argument - client=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if length == 0: - return {} - blob_headers = kwargs.pop('blob_headers', None) - append_conditions = AppendPositionAccessConditions( - max_size=kwargs.pop('maxsize_condition', None), - append_position=None) - blob_tags_string = kwargs.pop('blob_tags_string', None) - - try: - if overwrite: - client.create( - content_length=0, - blob_http_headers=blob_headers, - headers=headers, - blob_tags_string=blob_tags_string, - **kwargs) - return upload_data_chunks( - service=client, - uploader_class=AppendBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - append_position_access_conditions=append_conditions, - headers=headers, - **kwargs) - except HttpResponseError as error: - if error.response.status_code != 404: - raise - # rewind the request body if it is a stream - if hasattr(stream, 'read'): - try: - # attempt to rewind the body to the initial position - stream.seek(0, SEEK_SET) - except UnsupportedOperation: - # if body is not seekable, then retry would not work - raise error - client.create( - content_length=0, - blob_http_headers=blob_headers, - headers=headers, - blob_tags_string=blob_tags_string, - **kwargs) - return upload_data_chunks( - service=client, - uploader_class=AppendBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - append_position_access_conditions=append_conditions, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_version.py b/azure/multiapi/storagev2/blob/v2020_06_12/_version.py deleted file mode 100644 index a30b2ba..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/_version.py +++ /dev/null @@ -1,7 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -VERSION = "12.8.1" diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/aio/__init__.py b/azure/multiapi/storagev2/blob/v2020_06_12/aio/__init__.py deleted file mode 100644 index 33c1031..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/aio/__init__.py +++ /dev/null @@ -1,141 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os - -from .._models import BlobType -from .._shared.policies_async import ExponentialRetry, LinearRetry -from ._blob_client_async import BlobClient -from ._container_client_async import ContainerClient -from ._blob_service_client_async import BlobServiceClient -from ._lease_async import BlobLeaseClient -from ._download_async import StorageStreamDownloader - - -async def upload_blob_to_url( - blob_url, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - credential=None, # type: Any - **kwargs): - # type: (...) -> dict[str, Any] - """Upload data to a given URL - - The data will be uploaded as a block blob. - - :param str blob_url: - The full URI to the blob. This can also include a SAS token. - :param data: - The data to upload. This can be bytes, text, an iterable or a file-like object. - :type data: bytes or str or Iterable - :param credential: - The credentials with which to authenticate. This is optional if the - blob URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword bool overwrite: - Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob_to_url will overwrite any existing data. If set to False, the - operation will fail with a ResourceExistsError. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword dict(str,str) metadata: - Name-value pairs associated with the blob as metadata. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword str encoding: - Encoding to use if text is supplied as input. Defaults to UTF-8. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: dict(str, Any) - """ - async with BlobClient.from_blob_url(blob_url, credential=credential) as client: - return await client.upload_blob(data=data, blob_type=BlobType.BlockBlob, **kwargs) - - -async def _download_to_stream(client, handle, **kwargs): - """Download data to specified open file-handle.""" - stream = await client.download_blob(**kwargs) - await stream.readinto(handle) - - -async def download_blob_from_url( - blob_url, # type: str - output, # type: str - credential=None, # type: Any - **kwargs): - # type: (...) -> None - """Download the contents of a blob to a local file or stream. - - :param str blob_url: - The full URI to the blob. This can also include a SAS token. - :param output: - Where the data should be downloaded to. This could be either a file path to write to, - or an open IO handle to write to. - :type output: str or writable stream - :param credential: - The credentials with which to authenticate. This is optional if the - blob URL already has a SAS token or the blob is public. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, - an account shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword bool overwrite: - Whether the local file should be overwritten if it already exists. The default value is - `False` - in which case a ValueError will be raised if the file already exists. If set to - `True`, an attempt will be made to write to the existing file. If a stream handle is passed - in, this value is ignored. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :keyword int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :rtype: None - """ - overwrite = kwargs.pop('overwrite', False) - async with BlobClient.from_blob_url(blob_url, credential=credential) as client: - if hasattr(output, 'write'): - await _download_to_stream(client, output, **kwargs) - else: - if not overwrite and os.path.isfile(output): - raise ValueError("The file '{}' already exists.".format(output)) - with open(output, 'wb') as file_handle: - await _download_to_stream(client, file_handle, **kwargs) - - -__all__ = [ - 'upload_blob_to_url', - 'download_blob_from_url', - 'BlobServiceClient', - 'ContainerClient', - 'BlobClient', - 'BlobLeaseClient', - 'ExponentialRetry', - 'LinearRetry', - 'StorageStreamDownloader' -] diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/aio/_blob_client_async.py b/azure/multiapi/storagev2/blob/v2020_06_12/aio/_blob_client_async.py deleted file mode 100644 index a87a409..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/aio/_blob_client_async.py +++ /dev/null @@ -1,2483 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines, invalid-overridden-method -from functools import partial -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, - TYPE_CHECKING -) - -from azure.core.pipeline import AsyncPipeline - -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.exceptions import ResourceNotFoundError, HttpResponseError, ResourceExistsError - -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.policies_async import ExponentialRetry -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._deserialize import get_page_ranges_result, parse_tags, deserialize_pipeline_response_into_cls -from .._serialize import get_modify_conditions, get_api_version, get_access_conditions -from .._generated.aio import AzureBlobStorage -from .._generated.models import CpkInfo -from .._deserialize import deserialize_blob_properties -from .._blob_client import BlobClient as BlobClientBase -from ._upload_helpers import ( - upload_block_blob, - upload_append_blob, - upload_page_blob) -from .._models import BlobType, BlobBlock, BlobProperties -from ._lease_async import BlobLeaseClient -from ._download_async import StorageStreamDownloader - - -if TYPE_CHECKING: - from datetime import datetime - from .._models import ( # pylint: disable=unused-import - ContentSettings, - PremiumPageBlobTier, - StandardBlobTier, - SequenceNumberAction - ) - - -class BlobClient(AsyncStorageAccountHostsMixin, BlobClientBase): # pylint: disable=too-many-public-methods - """A client to interact with a specific blob, although that blob may not yet exist. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the blob, - use the :func:`from_blob_url` classmethod. - :param container_name: The container name for the blob. - :type container_name: str - :param blob_name: The name of the blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob_name: str - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication_async.py - :start-after: [START create_blob_client] - :end-before: [END create_blob_client] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a URL to a public blob (no auth needed). - - .. literalinclude:: ../samples/blob_samples_authentication_async.py - :start-after: [START create_blob_client_sas_url] - :end-before: [END create_blob_client_sas_url] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a SAS URL to a blob. - """ - def __init__( - self, account_url, # type: str - container_name, # type: str - blob_name, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(BlobClient, self).__init__( - account_url, - container_name=container_name, - blob_name=blob_name, - snapshot=snapshot, - credential=credential, - **kwargs) - self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) - default_api_version = self._client._config.version # pylint: disable=protected-access - self._client._config.version = get_api_version(kwargs, default_api_version) # pylint: disable=protected-access - self._loop = kwargs.get('loop', None) - - @distributed_trace_async - async def get_account_information(self, **kwargs): # type: ignore - # type: (Optional[int]) -> Dict[str, str] - """Gets information related to the storage account in which the blob resides. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - """ - try: - return await self._client.blob.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_blob_from_url(self, source_url, **kwargs): - # type: (str, Any) -> Dict[str, Any] - """ - Creates a new Block Blob where the content of the blob is read from a given URL. - The content of an existing blob is overwritten with the new blob. - - :param str source_url: - A URL of up to 2 KB in length that specifies a file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.blob.core.windows.net/mycontainer/myblob - - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - - https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. - :keyword bool include_source_blob_properties: - Indicates if properties from the source blob should be copied. Defaults to True. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - :paramtype tags: dict(str, str) - :keyword bytearray source_content_md5: - Specify the md5 that is used to verify the integrity of the source bytes. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword destination_lease: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - """ - options = self._upload_blob_from_url_options( - source_url=self._encode_source_url(source_url), - **kwargs) - try: - return await self._client.block_blob.put_blob_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_blob( - self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Any - """Creates a new blob from a data source with automatic chunking. - - :param data: The blob data to upload. - :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be - either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. The exception to the above is with Append - blob types: if set to False and the data already exists, an error will not be raised - and the data will be appended to the existing blob. If set overwrite=True, then the existing - append blob will be deleted, and a new one created. Defaults to False. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - If specified, upload_blob only succeeds if the - blob's lease is active and matches this ID. - Required if the blob has an active lease. - :paramtype: ~azure.storage.blob.aio.BlobLeaseClient - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int max_concurrency: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world_async.py - :start-after: [START upload_a_blob] - :end-before: [END upload_a_blob] - :language: python - :dedent: 16 - :caption: Upload a blob to the container. - """ - options = self._upload_blob_options( - data, - blob_type=blob_type, - length=length, - metadata=metadata, - **kwargs) - if blob_type == BlobType.BlockBlob: - return await upload_block_blob(**options) - if blob_type == BlobType.PageBlob: - return await upload_page_blob(**options) - return await upload_append_blob(**options) - - @distributed_trace_async - async def download_blob(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader - """Downloads a blob to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the blob into - a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks. - - :param int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to download. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, download_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword str encoding: - Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.blob.aio.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world_async.py - :start-after: [START download_a_blob] - :end-before: [END download_a_blob] - :language: python - :dedent: 16 - :caption: Download a blob. - """ - options = self._download_blob_options( - offset=offset, - length=length, - **kwargs) - downloader = StorageStreamDownloader(**options) - await downloader._setup() # pylint: disable=protected-access - return downloader - - @distributed_trace_async - async def delete_blob(self, delete_snapshots=None, **kwargs): - # type: (str, Any) -> None - """Marks the specified blob for deletion. - - The blob is later deleted during garbage collection. - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the delete_blob() - operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob - and retains the blob for a specified number of days. - After the specified number of days, the blob's data is removed from the service during garbage collection. - Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']` - option. Soft-deleted blob can be restored using :func:`undelete` operation. - - :param str delete_snapshots: - Required if the blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to delete. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword lease: - Required if the blob has an active lease. If specified, delete_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world_async.py - :start-after: [START delete_blob] - :end-before: [END delete_blob] - :language: python - :dedent: 16 - :caption: Delete a blob. - """ - options = self._delete_blob_options(delete_snapshots=delete_snapshots, **kwargs) - try: - await self._client.blob.delete(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def undelete_blob(self, **kwargs): - # type: (Any) -> None - """Restores soft-deleted blobs or snapshots. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START undelete_blob] - :end-before: [END undelete_blob] - :language: python - :dedent: 12 - :caption: Undeleting a blob. - """ - try: - await self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a blob exists with the defined parameters, and returns - False otherwise. - - :kwarg str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to check if it exists. - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - try: - await self._client.blob.get_properties( - snapshot=self.snapshot, - **kwargs) - return True - # Encrypted with CPK - except ResourceExistsError: - return True - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceNotFoundError: - return False - - @distributed_trace_async - async def get_blob_properties(self, **kwargs): - # type: (Any) -> BlobProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the blob. It does not return the content of the blob. - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to get properties. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: BlobProperties - :rtype: ~azure.storage.blob.BlobProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START get_blob_properties] - :end-before: [END get_blob_properties] - :language: python - :dedent: 12 - :caption: Getting the properties for a blob. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - try: - cls_method = kwargs.pop('cls', None) - if cls_method: - kwargs['cls'] = partial(deserialize_pipeline_response_into_cls, cls_method) - blob_props = await self._client.blob.get_properties( - timeout=kwargs.pop('timeout', None), - version_id=kwargs.pop('version_id', None), - snapshot=self.snapshot, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=kwargs.pop('cls', None) or deserialize_blob_properties, - cpk_info=cpk_info, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - blob_props.name = self.blob_name - if isinstance(blob_props, BlobProperties): - blob_props.container = self.container_name - blob_props.snapshot = self.snapshot - return blob_props # type: ignore - - @distributed_trace_async - async def set_http_headers(self, content_settings=None, **kwargs): - # type: (Optional[ContentSettings], Any) -> None - """Sets system properties on the blob. - - If one property is set for the content_settings, all properties will be overridden. - - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - options = self._set_http_headers_options(content_settings=content_settings, **kwargs) - try: - return await self._client.blob.set_http_headers(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_blob_metadata(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] - """Sets user-defined metadata for the blob as one or more name-value pairs. - - :param metadata: - Dict containing name and value pairs. Each call to this operation - replaces all existing metadata attached to the blob. To remove all - metadata from the blob, call this operation with no metadata headers. - :type metadata: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - """ - options = self._set_blob_metadata_options(metadata=metadata, **kwargs) - try: - return await self._client.blob.set_metadata(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def create_page_blob( # type: ignore - self, size, # type: int - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Creates a new Page Blob of the specified size. - - :param int size: - This specifies the maximum size for the page blob, up to 1 TB. - The page blob size must be aligned to a 512-byte boundary. - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword int sequence_number: - Only for Page blobs. The sequence number is a user-controlled value that you can use to - track requests. The value of the sequence number must be between 0 - and 2^63 - 1.The default value is 0. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict[str, Any] - """ - options = self._create_page_blob_options( - size, - content_settings=content_settings, - metadata=metadata, - premium_page_blob_tier=premium_page_blob_tier, - **kwargs) - try: - return await self._client.page_blob.create(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def create_append_blob(self, content_settings=None, metadata=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] - """Creates a new Append Blob. - - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict[str, Any] - """ - options = self._create_append_blob_options( - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return await self._client.append_blob.create(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def create_snapshot(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] - """Creates a snapshot of the blob. - - A snapshot is a read-only version of a blob that's taken at a point in time. - It can be read, copied, or deleted, but not modified. Snapshots provide a way - to back up a blob as it appears at a moment in time. - - A snapshot of a blob has the same name as the base blob from which the snapshot - is taken, with a DateTime value appended to indicate the time at which the - snapshot was taken. - - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified). - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START create_blob_snapshot] - :end-before: [END create_blob_snapshot] - :language: python - :dedent: 12 - :caption: Create a snapshot of the blob. - """ - options = self._create_snapshot_options(metadata=metadata, **kwargs) - try: - return await self._client.blob.create_snapshot(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def start_copy_from_url(self, source_url, metadata=None, incremental_copy=False, **kwargs): - # type: (str, Optional[Dict[str, str]], bool, Any) -> Any - """Copies a blob asynchronously. - - This operation returns a copy operation - object that can be used to wait on the completion of the operation, - as well as check status or abort the copy operation. - The Blob service copies blobs on a best-effort basis. - - The source blob for a copy operation may be a block blob, an append blob, - or a page blob. If the destination blob already exists, it must be of the - same blob type as the source blob. Any existing destination blob will be - overwritten. The destination blob cannot be modified while a copy operation - is in progress. - - When copying from a page blob, the Blob service creates a destination page - blob of the source blob's length, initially containing all zeroes. Then - the source page ranges are enumerated, and non-empty ranges are copied. - - For a block blob or an append blob, the Blob service creates a committed - blob of zero length before returning from this operation. When copying - from a block blob, all committed blocks and their block IDs are copied. - Uncommitted blocks are not copied. At the end of the copy operation, the - destination blob will have the same committed block count as the source. - - When copying from an append blob, all committed blocks are copied. At the - end of the copy operation, the destination blob will have the same committed - block count as the source. - - For all blob types, you can call status() on the returned polling object - to check the status of the copy operation, or wait() to block until the - operation is complete. The final blob will be committed when the copy completes. - - :param str source_url: - A URL of up to 2 KB in length that specifies a file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.blob.core.windows.net/mycontainer/myblob - - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - - https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken - :param metadata: - Name-value pairs associated with the blob as metadata. If no name-value - pairs are specified, the operation will copy the metadata from the - source blob or file to the destination blob. If one or more name-value - pairs are specified, the destination blob is created with the specified - metadata, and metadata is not copied from the source blob or file. - :type metadata: dict(str, str) - :param bool incremental_copy: - Copies the snapshot of the source page blob to a destination page blob. - The snapshot is copied such that only the differential changes between - the previously copied snapshot are transferred to the destination. - The copied snapshots are complete copies of the original snapshot and - can be read or copied from as usual. Defaults to False. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source - blob has been modified since the specified date/time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source blob - has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has been modified since the specified date/time. - If the destination blob has not been modified, the Blob service returns - status code 412 (Precondition Failed). - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has not been modified since the specified - date/time. If the destination blob has been modified, the Blob service - returns status code 412 (Precondition Failed). - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword destination_lease: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :paramtype destination_lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword source_lease: - Specify this to perform the Copy Blob operation only if - the lease ID given matches the active lease ID of the source blob. - :paramtype source_lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword bool seal_destination_blob: - Seal the destination append blob. This operation is only for append blob. - - .. versionadded:: 12.4.0 - - :keyword bool requires_sync: - Enforces that the service will not return a response until the copy is complete. - :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status). - :rtype: dict[str, str or ~datetime.datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START copy_blob_from_url] - :end-before: [END copy_blob_from_url] - :language: python - :dedent: 16 - :caption: Copy a blob from a URL. - """ - options = self._start_copy_from_url_options( - source_url=self._encode_source_url(source_url), - metadata=metadata, - incremental_copy=incremental_copy, - **kwargs) - try: - if incremental_copy: - return await self._client.page_blob.copy_incremental(**options) - return await self._client.blob.start_copy_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def abort_copy(self, copy_id, **kwargs): - # type: (Union[str, Dict[str, Any], BlobProperties], Any) -> None - """Abort an ongoing copy operation. - - This will leave a destination blob with zero length and full metadata. - This will raise an error if the copy operation has already ended. - - :param copy_id: - The copy operation to abort. This can be either an ID, or an - instance of BlobProperties. - :type copy_id: str or ~azure.storage.blob.BlobProperties - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START abort_copy_blob_from_url] - :end-before: [END abort_copy_blob_from_url] - :language: python - :dedent: 16 - :caption: Abort copying a blob from URL. - """ - options = self._abort_copy_options(copy_id, **kwargs) - try: - await self._client.blob.abort_copy_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): - # type: (int, Optional[str], Any) -> BlobLeaseClient - """Requests a new lease. - - If the blob does not have an active lease, the Blob - Service creates a lease on the blob and returns a new lease. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Blob Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A BlobLeaseClient object. - :rtype: ~azure.storage.blob.aio.BlobLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START acquire_lease_on_blob] - :end-before: [END acquire_lease_on_blob] - :language: python - :dedent: 12 - :caption: Acquiring a lease on a blob. - """ - lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore - await lease.acquire(lease_duration=lease_duration, **kwargs) - return lease - - @distributed_trace_async - async def set_standard_blob_tier(self, standard_blob_tier, **kwargs): - # type: (Union[str, StandardBlobTier], Any) -> None - """This operation sets the tier on a block blob. - - A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param standard_blob_tier: - Indicates the tier to be set on the blob. Options include 'Hot', 'Cool', - 'Archive'. The hot tier is optimized for storing data that is accessed - frequently. The cool storage tier is optimized for storing data that - is infrequently accessed and stored for at least a month. The archive - tier is optimized for storing data that is rarely accessed and stored - for at least six months with flexible latency requirements. - :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if standard_blob_tier is None: - raise ValueError("A StandardBlobTier must be specified") - try: - await self._client.blob.set_tier( - tier=standard_blob_tier, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def stage_block( - self, block_id, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> None - """Creates a new block to be committed as part of a blob. - - :param str block_id: A string value that identifies the block. - The string should be less than or equal to 64 bytes in size. - For a given blob, the block_id must be the same size for each block. - :param data: The blob data. - :param int length: Size of the block. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword str encoding: - Defaults to UTF-8. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - options = self._stage_block_options( - block_id, - data, - length=length, - **kwargs) - try: - return await self._client.block_blob.stage_block(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def stage_block_from_url( - self, block_id, # type: str - source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - source_content_md5=None, # type: Optional[Union[bytes, bytearray]] - **kwargs - ): - # type: (...) -> None - """Creates a new block to be committed as part of a blob where - the contents are read from a URL. - - :param str block_id: A string value that identifies the block. - The string should be less than or equal to 64 bytes in size. - For a given blob, the block_id must be the same size for each block. - :param str source_url: The URL. - :param int source_offset: - Start of byte range to use for the block. - Must be set if source length is provided. - :param int source_length: The size of the block in bytes. - :param bytearray source_content_md5: - Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - options = self._stage_block_from_url_options( - block_id, - source_url=self._encode_source_url(source_url), - source_offset=source_offset, - source_length=source_length, - source_content_md5=source_content_md5, - **kwargs) - try: - return await self._client.block_blob.stage_block_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_block_list(self, block_list_type="committed", **kwargs): - # type: (Optional[str], Any) -> Tuple[List[BlobBlock], List[BlobBlock]] - """The Get Block List operation retrieves the list of blocks that have - been uploaded as part of a block blob. - - :param str block_list_type: - Specifies whether to return the list of committed - blocks, the list of uncommitted blocks, or both lists together. - Possible values include: 'committed', 'uncommitted', 'all' - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A tuple of two lists - committed and uncommitted blocks - :rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock)) - """ - access_conditions = get_access_conditions(kwargs.pop('kease', None)) - mod_conditions = get_modify_conditions(kwargs) - try: - blocks = await self._client.block_blob.get_block_list( - list_type=block_list_type, - snapshot=self.snapshot, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return self._get_block_list_result(blocks) - - @distributed_trace_async - async def commit_block_list( # type: ignore - self, block_list, # type: List[BlobBlock] - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """The Commit Block List operation writes a blob by specifying the list of - block IDs that make up the blob. - - :param list block_list: - List of Blockblobs. - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict[str, str] - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._commit_block_list_options( - block_list, - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return await self._client.block_blob.commit_block_list(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): - # type: (Union[str, PremiumPageBlobTier], **Any) -> None - """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts. - - :param premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if premium_page_blob_tier is None: - raise ValueError("A PremiumPageBlobTiermust be specified") - try: - await self._client.blob.set_tier( - tier=premium_page_blob_tier, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_blob_tags(self, tags=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - """The Set Tags operation enables users to set tags on a blob or specific blob version, but not snapshot. - Each call to this operation replaces all existing tags attached to the blob. To remove all - tags from the blob, call this operation with no tags set. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :param tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - :type tags: dict(str, str) - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to delete. - :keyword bool validate_content: - If true, calculates an MD5 hash of the tags content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - options = self._set_blob_tags_options(tags=tags, **kwargs) - try: - return await self._client.blob.set_tags(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_blob_tags(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """The Get Tags operation enables users to get tags on a blob or specific blob version, but not snapshot. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to add tags to. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Key value pairs of blob tags. - :rtype: Dict[str, str] - """ - options = self._get_blob_tags_options(**kwargs) - try: - _, tags = await self._client.blob.get_tags(**options) - return parse_tags(tags) # pylint: disable=protected-access - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_page_ranges( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] - **kwargs - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a Page Blob or snapshot - of a page blob. - - :param int offset: - Start of byte range to use for getting valid page ranges. - If no length is given, all bytes after the offset will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for getting valid page ranges. - If length is given, offset must be provided. - This range will return valid page ranges from the offset start up to - the specified length. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param str previous_snapshot_diff: - The snapshot diff parameter that contains an opaque DateTime value that - specifies a previous blob snapshot to be compared - against a more recent snapshot or the current blob. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. - The first element are filled page ranges, the 2nd element is cleared page ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_page_ranges_options( - offset=offset, - length=length, - previous_snapshot_diff=previous_snapshot_diff, - **kwargs) - try: - if previous_snapshot_diff: - ranges = await self._client.page_blob.get_page_ranges_diff(**options) - else: - ranges = await self._client.page_blob.get_page_ranges(**options) - except HttpResponseError as error: - process_storage_error(error) - return get_page_ranges_result(ranges) - - @distributed_trace_async - async def get_page_range_diff_for_managed_disk( - self, previous_snapshot_url, # type: str - offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a managed disk or snapshot. - - .. note:: - This operation is only available for managed disk accounts. - - .. versionadded:: 12.2.0 - This operation was introduced in API version '2019-07-07'. - - :param previous_snapshot_url: - Specifies the URL of a previous snapshot of the managed disk. - The response will only contain pages that were changed between the target blob and - its previous snapshot. - :param int offset: - Start of byte range to use for getting valid page ranges. - If no length is given, all bytes after the offset will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for getting valid page ranges. - If length is given, offset must be provided. - This range will return valid page ranges from the offset start up to - the specified length. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. - The first element are filled page ranges, the 2nd element is cleared page ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_page_ranges_options( - offset=offset, - length=length, - prev_snapshot_url=previous_snapshot_url, - **kwargs) - try: - ranges = await self._client.page_blob.get_page_ranges_diff(**options) - except HttpResponseError as error: - process_storage_error(error) - return get_page_ranges_result(ranges) - - @distributed_trace_async - async def set_sequence_number( # type: ignore - self, sequence_number_action, # type: Union[str, SequenceNumberAction] - sequence_number=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the blob sequence number. - - :param str sequence_number_action: - This property indicates how the service should modify the blob's sequence - number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information. - :param str sequence_number: - This property sets the blob's sequence number. The sequence number is a - user-controlled property that you can use to track requests and manage - concurrency issues. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._set_sequence_number_options( - sequence_number_action, sequence_number=sequence_number, **kwargs) - try: - return await self._client.page_blob.update_sequence_number(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def resize_blob(self, size, **kwargs): - # type: (int, Any) -> Dict[str, Union[str, datetime]] - """Resizes a page blob to the specified size. - - If the specified value is less than the current size of the blob, - then all pages above the specified value are cleared. - - :param int size: - Size used to resize blob. Maximum size for a page blob is up to 1 TB. - The page blob size must be aligned to a 512-byte boundary. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._resize_blob_options(size, **kwargs) - try: - return await self._client.page_blob.resize(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_page( # type: ignore - self, page, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """The Upload Pages operation writes a range of pages to a page blob. - - :param bytes page: - Content of the page. - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._upload_page_options( - page=page, - offset=offset, - length=length, - **kwargs) - try: - return await self._client.page_blob.upload_pages(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_pages_from_url(self, source_url, # type: str - offset, # type: int - length, # type: int - source_offset, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """ - The Upload Pages operation writes a range of pages to a page blob where - the contents are read from a URL. - - :param str source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - The service will read the same number of bytes as the destination range (length-offset). - :keyword bytes source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - - options = self._upload_pages_from_url_options( - source_url=self._encode_source_url(source_url), - offset=offset, - length=length, - source_offset=source_offset, - **kwargs - ) - try: - return await self._client.page_blob.upload_pages_from_url(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def clear_page(self, offset, length, **kwargs): - # type: (int, int, Any) -> Dict[str, Union[str, datetime]] - """Clears a range of pages. - - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._clear_page_options(offset, length, **kwargs) - try: - return await self._client.page_blob.clear_pages(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def append_block( # type: ignore - self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """Commits a new block of data to the end of the existing append blob. - - :param data: - Content of the block. - :param int length: - Size of the block in bytes. - :keyword bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). - :rtype: dict(str, Any) - """ - options = self._append_block_options( - data, - length=length, - **kwargs - ) - try: - return await self._client.append_blob.append_block(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async() - async def append_block_from_url(self, copy_source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """ - Creates a new block to be committed as part of a blob, where the contents are read from a source url. - - :param str copy_source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - :param int source_length: - This indicates the end of the range of bytes that has to be taken from the copy source. - :keyword bytearray source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the - AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._append_block_from_url_options( - copy_source_url=self._encode_source_url(copy_source_url), - source_offset=source_offset, - source_length=source_length, - **kwargs - ) - try: - return await self._client.append_blob.append_block_from_url(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async() - async def seal_append_blob(self, **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """The Seal operation seals the Append Blob to make it read-only. - - .. versionadded:: 12.4.0 - - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). - :rtype: dict(str, Any) - """ - options = self._seal_append_blob_options(**kwargs) - try: - return await self._client.append_blob.seal(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _get_container_client(self): # pylint: disable=client-method-missing-kwargs - # type: (...) -> ContainerClient - """Get a client to interact with the blob's parent container. - - The container need not already exist. Defaults to current blob's credentials. - - :returns: A ContainerClient. - :rtype: ~azure.storage.blob.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START get_container_client_from_blob_client] - :end-before: [END get_container_client_from_blob_client] - :language: python - :dedent: 12 - :caption: Get container client from blob object. - """ - from ._container_client_async import ContainerClient - if not isinstance(self._pipeline._transport, AsyncTransportWrapper): # pylint: disable = protected-access - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - else: - _pipeline = self._pipeline # pylint: disable = protected-access - return ContainerClient( - "{}://{}".format(self.scheme, self.primary_hostname), container_name=self.container_name, - credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, - _location_mode=self._location_mode, _hosts=self._hosts, require_encryption=self.require_encryption, - _pipeline=_pipeline, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/aio/_blob_service_client_async.py b/azure/multiapi/storagev2/blob/v2020_06_12/aio/_blob_service_client_async.py deleted file mode 100644 index d3d72ba..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/aio/_blob_service_client_async.py +++ /dev/null @@ -1,678 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, - TYPE_CHECKING -) - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import AsyncPipeline -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.async_paging import AsyncItemPaged - -from .._shared.models import LocationMode -from .._shared.policies_async import ExponentialRetry -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._shared.parser import _to_utc_datetime -from .._shared.response_handlers import parse_to_internal_user_delegation_key -from .._generated.aio import AzureBlobStorage -from .._generated.models import StorageServiceProperties, KeyInfo -from .._blob_service_client import BlobServiceClient as BlobServiceClientBase -from ._container_client_async import ContainerClient -from ._blob_client_async import BlobClient -from .._models import ContainerProperties -from .._deserialize import service_stats_deserialize, service_properties_deserialize -from .._serialize import get_api_version -from ._models import ContainerPropertiesPaged, FilteredBlobPaged - -if TYPE_CHECKING: - from datetime import datetime - from .._shared.models import AccountSasPermissions, ResourceTypes, UserDelegationKey - from ._lease_async import BlobLeaseClient - from .._models import ( - BlobProperties, - PublicAccess, - BlobAnalyticsLogging, - Metrics, - CorsRule, - RetentionPolicy, - StaticWebsite, - ) - - -class BlobServiceClient(AsyncStorageAccountHostsMixin, BlobServiceClientBase): - """A client to interact with the Blob Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete containers within the account. - For operations relating to a specific container or blob, clients for those entities - can also be retrieved using the `get_client` functions. - - :param str account_url: - The URL to the blob storage account. Any other entities included - in the URL path (e.g. container or blob) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication_async.py - :start-after: [START create_blob_service_client] - :end-before: [END create_blob_service_client] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient with account url and credential. - - .. literalinclude:: ../samples/blob_samples_authentication_async.py - :start-after: [START create_blob_service_client_oauth] - :end-before: [END create_blob_service_client_oauth] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient with Azure Identity credentials. - """ - - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(BlobServiceClient, self).__init__( - account_url, - credential=credential, - **kwargs) - self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) - default_api_version = self._client._config.version # pylint: disable=protected-access - self._client._config.version = get_api_version(kwargs, default_api_version) # pylint: disable=protected-access - self._loop = kwargs.get('loop', None) - - @distributed_trace_async - async def get_user_delegation_key(self, key_start_time, # type: datetime - key_expiry_time, # type: datetime - **kwargs # type: Any - ): - # type: (...) -> UserDelegationKey - """ - Obtain a user delegation key for the purpose of signing SAS tokens. - A token credential must be present on the service object for this request to succeed. - - :param ~datetime.datetime key_start_time: - A DateTime value. Indicates when the key becomes valid. - :param ~datetime.datetime key_expiry_time: - A DateTime value. Indicates when the key stops being valid. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The user delegation key. - :rtype: ~azure.storage.blob.UserDelegationKey - """ - key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time)) - timeout = kwargs.pop('timeout', None) - try: - user_delegation_key = await self._client.service.get_user_delegation_key(key_info=key_info, - timeout=timeout, - **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - return parse_to_internal_user_delegation_key(user_delegation_key) # type: ignore - - @distributed_trace_async - async def get_account_information(self, **kwargs): - # type: (Any) -> Dict[str, str] - """Gets information related to the storage account. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START get_blob_service_account_info] - :end-before: [END get_blob_service_account_info] - :language: python - :dedent: 12 - :caption: Getting account information for the blob service. - """ - try: - return await self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_service_stats(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Retrieves statistics related to replication for the Blob service. - - It is only available when read-access geo-redundant replication is enabled for - the storage account. - - With geo-redundant replication, Azure Storage maintains your data durable - in two locations. In both locations, Azure Storage constantly maintains - multiple healthy replicas of your data. The location where you read, - create, update, or delete data is the primary storage account location. - The primary location exists in the region you choose at the time you - create an account via the Azure Management Azure classic portal, for - example, North Central US. The location to which your data is replicated - is the secondary location. The secondary location is automatically - determined based on the location of the primary; it is in a second data - center that resides in the same region as the primary location. Read-only - access is available from the secondary location, if read-access geo-redundant - replication is enabled for your storage account. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The blob service stats. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START get_blob_service_stats] - :end-before: [END get_blob_service_stats] - :language: python - :dedent: 12 - :caption: Getting service stats for the blob service. - """ - timeout = kwargs.pop('timeout', None) - try: - stats = await self._client.service.get_statistics( # type: ignore - timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs) - return service_stats_deserialize(stats) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_service_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An object containing blob service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START get_blob_service_properties] - :end-before: [END get_blob_service_properties] - :language: python - :dedent: 12 - :caption: Getting service properties for the blob service. - """ - timeout = kwargs.pop('timeout', None) - try: - service_props = await self._client.service.get_properties(timeout=timeout, **kwargs) - return service_properties_deserialize(service_props) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_service_properties( - self, analytics_logging=None, # type: Optional[BlobAnalyticsLogging] - hour_metrics=None, # type: Optional[Metrics] - minute_metrics=None, # type: Optional[Metrics] - cors=None, # type: Optional[List[CorsRule]] - target_version=None, # type: Optional[str] - delete_retention_policy=None, # type: Optional[RetentionPolicy] - static_website=None, # type: Optional[StaticWebsite] - **kwargs - ): - # type: (...) -> None - """Sets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - If an element (e.g. analytics_logging) is left as None, the - existing settings on the service for that functionality are preserved. - - :param analytics_logging: - Groups the Azure Analytics Logging settings. - :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging - :param hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for blobs. - :type hour_metrics: ~azure.storage.blob.Metrics - :param minute_metrics: - The minute metrics settings provide request statistics - for each minute for blobs. - :type minute_metrics: ~azure.storage.blob.Metrics - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list[~azure.storage.blob.CorsRule] - :param str target_version: - Indicates the default version to use for requests if an incoming - request's version is not specified. - :param delete_retention_policy: - The delete retention policy specifies whether to retain deleted blobs. - It also specifies the number of days and versions of blob to keep. - :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy - :param static_website: - Specifies whether the static website feature is enabled, - and if yes, indicates the index document and 404 error document to use. - :type static_website: ~azure.storage.blob.StaticWebsite - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START set_blob_service_properties] - :end-before: [END set_blob_service_properties] - :language: python - :dedent: 12 - :caption: Setting service properties for the blob service. - """ - if all(parameter is None for parameter in [ - analytics_logging, hour_metrics, minute_metrics, cors, - target_version, delete_retention_policy, static_website]): - raise ValueError("set_service_properties should be called with at least one parameter") - - props = StorageServiceProperties( - logging=analytics_logging, - hour_metrics=hour_metrics, - minute_metrics=minute_metrics, - cors=cors, - default_service_version=target_version, - delete_retention_policy=delete_retention_policy, - static_website=static_website - ) - timeout = kwargs.pop('timeout', None) - try: - await self._client.service.set_properties(props, timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_containers( - self, name_starts_with=None, # type: Optional[str] - include_metadata=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> AsyncItemPaged[ContainerProperties] - """Returns a generator to list the containers under the specified account. - - The generator will lazily follow the continuation tokens returned by - the service and stop when all containers have been returned. - - :param str name_starts_with: - Filters the results to return only containers whose names - begin with the specified prefix. - :param bool include_metadata: - Specifies that container metadata to be returned in the response. - The default value is `False`. - :keyword bool include_deleted: - Specifies that deleted containers to be returned in the response. This is for container restore enabled - account. The default value is `False`. - .. versionadded:: 12.4.0 - :keyword int results_per_page: - The maximum number of container names to retrieve per API - call. If the request does not specify the server will return up to 5,000 items. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) of ContainerProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.ContainerProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_list_containers] - :end-before: [END bsc_list_containers] - :language: python - :dedent: 16 - :caption: Listing the containers in the blob service. - """ - include = ['metadata'] if include_metadata else [] - include_deleted = kwargs.pop('include_deleted', None) - if include_deleted: - include.append("deleted") - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.service.list_containers_segment, - prefix=name_starts_with, - include=include, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - page_iterator_class=ContainerPropertiesPaged - ) - - @distributed_trace - def find_blobs_by_tags(self, filter_expression, **kwargs): - # type: (str, **Any) -> AsyncItemPaged[FilteredBlob] - """The Filter Blobs operation enables callers to list blobs across all - containers whose tags match a given search expression. Filter blobs - searches across all containers within a storage account but can be - scoped within the expression to a single container. - - :param str filter_expression: - The expression to find blobs whose tags matches the specified condition. - eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'" - To specify a container, eg. "@container='containerName' and \"Name\"='C'" - :keyword int results_per_page: - The max result per page when paginating. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.FilteredBlob] - """ - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.service.filter_blobs, - where=filter_expression, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=FilteredBlobPaged) - - @distributed_trace_async - async def create_container( - self, name, # type: str - metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[Union[PublicAccess, str]] - **kwargs - ): - # type: (...) -> ContainerClient - """Creates a new container under the specified account. - - If the container with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created container. - - :param str name: The name of the container to create. - :param metadata: - A dict with name-value pairs to associate with the - container as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - Possible values include: 'container', 'blob'. - :type public_access: str or ~azure.storage.blob.PublicAccess - :keyword container_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - - .. versionadded:: 12.2.0 - - :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.aio.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_create_container] - :end-before: [END bsc_create_container] - :language: python - :dedent: 16 - :caption: Creating a container in the blob service. - """ - container = self.get_container_client(name) - timeout = kwargs.pop('timeout', None) - kwargs.setdefault('merge_span', True) - await container.create_container( - metadata=metadata, public_access=public_access, timeout=timeout, **kwargs) - return container - - @distributed_trace_async - async def delete_container( - self, container, # type: Union[ContainerProperties, str] - lease=None, # type: Optional[Union[BlobLeaseClient, str]] - **kwargs - ): - # type: (...) -> None - """Marks the specified container for deletion. - - The container and any blobs contained within it are later deleted during garbage collection. - If the container is not found, a ResourceNotFoundError will be raised. - - :param container: - The container to delete. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :param lease: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_delete_container] - :end-before: [END bsc_delete_container] - :language: python - :dedent: 16 - :caption: Deleting a container in the blob service. - """ - container = self.get_container_client(container) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - await container.delete_container( # type: ignore - lease=lease, - timeout=timeout, - **kwargs) - - @distributed_trace_async - async def _rename_container(self, name, new_name, **kwargs): - # type: (str, str, **Any) -> ContainerClient - """Renames a container. - - Operation is successful only if the source container exists. - - :param str name: - The name of the container to rename. - :param str new_name: - The new container name the user wants to rename to. - :keyword lease: - Specify this to perform only if the lease ID given - matches the active lease ID of the source container. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.ContainerClient - """ - renamed_container = self.get_container_client(new_name) - lease = kwargs.pop('lease', None) - try: - kwargs['source_lease_id'] = lease.id # type: str - except AttributeError: - kwargs['source_lease_id'] = lease - try: - await renamed_container._client.container.rename(name, **kwargs) # pylint: disable = protected-access - return renamed_container - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def undelete_container(self, deleted_container_name, deleted_container_version, **kwargs): - # type: (str, str, **Any) -> ContainerClient - """Restores soft-deleted container. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :param str deleted_container_name: - Specifies the name of the deleted container to restore. - :param str deleted_container_version: - Specifies the version of the deleted container to restore. - :keyword str new_name: - The new name for the deleted container to be restored to. - If not specified deleted_container_name will be used as the restored container name. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.aio.ContainerClient - """ - new_name = kwargs.pop('new_name', None) - container = self.get_container_client(new_name or deleted_container_name) - try: - await container._client.container.restore(deleted_container_name=deleted_container_name, # pylint: disable = protected-access - deleted_container_version=deleted_container_version, - timeout=kwargs.pop('timeout', None), **kwargs) - return container - except HttpResponseError as error: - process_storage_error(error) - - def get_container_client(self, container): - # type: (Union[ContainerProperties, str]) -> ContainerClient - """Get a client to interact with the specified container. - - The container need not already exist. - - :param container: - The container. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :returns: A ContainerClient. - :rtype: ~azure.storage.blob.aio.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_get_container_client] - :end-before: [END bsc_get_container_client] - :language: python - :dedent: 12 - :caption: Getting the container client to interact with a specific container. - """ - try: - container_name = container.name - except AttributeError: - container_name = container - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ContainerClient( - self.url, container_name=container_name, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function, loop=self._loop) - - def get_blob_client( - self, container, # type: Union[ContainerProperties, str] - blob, # type: Union[BlobProperties, str] - snapshot=None # type: Optional[Union[Dict[str, Any], str]] - ): - # type: (...) -> BlobClient - """Get a client to interact with the specified blob. - - The blob need not already exist. - - :param container: - The container that the blob is in. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :param blob: - The blob with which to interact. This can either be the name of the blob, - or an instance of BlobProperties. - :type blob: str or ~azure.storage.blob.BlobProperties - :param snapshot: - The optional blob snapshot on which to operate. This can either be the ID of the snapshot, - or a dictionary output returned by - :func:`~azure.storage.blob.aio.BlobClient.create_snapshot()`. - :type snapshot: str or dict(str, Any) - :returns: A BlobClient. - :rtype: ~azure.storage.blob.aio.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_get_blob_client] - :end-before: [END bsc_get_blob_client] - :language: python - :dedent: 16 - :caption: Getting the blob client to interact with a specific blob. - """ - try: - container_name = container.name - except AttributeError: - container_name = container - - try: - blob_name = blob.name - except AttributeError: - blob_name = blob - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return BlobClient( # type: ignore - self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function, loop=self._loop) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/aio/_container_client_async.py b/azure/multiapi/storagev2/blob/v2020_06_12/aio/_container_client_async.py deleted file mode 100644 index 93cc877..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/aio/_container_client_async.py +++ /dev/null @@ -1,1210 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, AnyStr, Dict, List, IO, AsyncIterator, - TYPE_CHECKING -) - -from azure.core.exceptions import HttpResponseError, ResourceNotFoundError -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.async_paging import AsyncItemPaged -from azure.core.pipeline import AsyncPipeline -from azure.core.pipeline.transport import AsyncHttpResponse - -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.policies_async import ExponentialRetry -from .._shared.request_handlers import add_metadata_headers, serialize_iso -from .._shared.response_handlers import ( - process_storage_error, - return_response_headers, - return_headers_and_deserialized) -from .._generated.aio import AzureBlobStorage -from .._generated.models import SignedIdentifier -from .._deserialize import deserialize_container_properties -from .._serialize import get_modify_conditions, get_container_cpk_scope_info, get_api_version, get_access_conditions -from .._container_client import ContainerClient as ContainerClientBase, _get_blob_name -from .._models import ContainerProperties, BlobType, BlobProperties # pylint: disable=unused-import -from ._list_blobs_helper import BlobPropertiesPaged, BlobPrefix -from ._lease_async import BlobLeaseClient -from ._blob_client_async import BlobClient - -if TYPE_CHECKING: - from .._models import PublicAccess - from ._download_async import StorageStreamDownloader - from datetime import datetime - from .._models import ( # pylint: disable=unused-import - AccessPolicy, - StandardBlobTier, - PremiumPageBlobTier) - - -class ContainerClient(AsyncStorageAccountHostsMixin, ContainerClientBase): - """A client to interact with a specific container, although that container - may not yet exist. - - For operations relating to a specific blob within this container, a blob client can be - retrieved using the :func:`~get_blob_client` function. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the container, - use the :func:`from_container_url` classmethod. - :param container_name: - The name of the container for the blob. - :type container_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START create_container_client_from_service] - :end-before: [END create_container_client_from_service] - :language: python - :dedent: 8 - :caption: Get a ContainerClient from an existing BlobServiceClient. - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START create_container_client_sasurl] - :end-before: [END create_container_client_sasurl] - :language: python - :dedent: 12 - :caption: Creating the container client directly. - """ - def __init__( - self, account_url, # type: str - container_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(ContainerClient, self).__init__( - account_url, - container_name=container_name, - credential=credential, - **kwargs) - self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) - default_api_version = self._client._config.version # pylint: disable=protected-access - self._client._config.version = get_api_version(kwargs, default_api_version) # pylint: disable=protected-access - self._loop = kwargs.get('loop', None) - - @distributed_trace_async - async def create_container(self, metadata=None, public_access=None, **kwargs): - # type: (Optional[Dict[str, str]], Optional[Union[PublicAccess, str]], **Any) -> None - """ - Creates a new container under the specified account. If the container - with the same name already exists, the operation fails. - - :param metadata: - A dict with name_value pairs to associate with the - container as metadata. Example:{'Category':'test'} - :type metadata: dict[str, str] - :param ~azure.storage.blob.PublicAccess public_access: - Possible values include: 'container', 'blob'. - :keyword container_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - - .. versionadded:: 12.2.0 - - :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START create_container] - :end-before: [END create_container] - :language: python - :dedent: 16 - :caption: Creating a container to store blobs. - """ - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - timeout = kwargs.pop('timeout', None) - container_cpk_scope_info = get_container_cpk_scope_info(kwargs) - try: - return await self._client.container.create( # type: ignore - timeout=timeout, - access=public_access, - container_cpk_scope_info=container_cpk_scope_info, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def _rename_container(self, new_name, **kwargs): - # type: (str, **Any) -> ContainerClient - """Renames a container. - - Operation is successful only if the source container exists. - - :param str new_name: - The new container name the user wants to rename to. - :keyword lease: - Specify this to perform only if the lease ID given - matches the active lease ID of the source container. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.ContainerClient - """ - lease = kwargs.pop('lease', None) - try: - kwargs['source_lease_id'] = lease.id # type: str - except AttributeError: - kwargs['source_lease_id'] = lease - try: - renamed_container = ContainerClient( - "{}://{}".format(self.scheme, self.primary_hostname), container_name=new_name, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - await renamed_container._client.container.rename(self.container_name, **kwargs) # pylint: disable = protected-access - return renamed_container - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def delete_container( - self, **kwargs): - # type: (Any) -> None - """ - Marks the specified container for deletion. The container and any blobs - contained within it are later deleted during garbage collection. - - :keyword lease: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START delete_container] - :end-before: [END delete_container] - :language: python - :dedent: 16 - :caption: Delete a container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - mod_conditions = get_modify_conditions(kwargs) - timeout = kwargs.pop('timeout', None) - try: - await self._client.container.delete( - timeout=timeout, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def acquire_lease( - self, lease_duration=-1, # type: int - lease_id=None, # type: Optional[str] - **kwargs): - # type: (...) -> BlobLeaseClient - """ - Requests a new lease. If the container does not have an active lease, - the Blob service creates a lease on the container and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A BlobLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.blob.aio.BlobLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START acquire_lease_on_container] - :end-before: [END acquire_lease_on_container] - :language: python - :dedent: 12 - :caption: Acquiring a lease on the container. - """ - lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - await lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs) - return lease - - @distributed_trace_async - async def get_account_information(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """Gets information related to the storage account. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - """ - try: - return await self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_container_properties(self, **kwargs): - # type: (**Any) -> ContainerProperties - """Returns all user-defined metadata and system properties for the specified - container. The data returned does not include the container's list of blobs. - - :keyword lease: - If specified, get_container_properties only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Properties for the specified container within a container object. - :rtype: ~azure.storage.blob.ContainerProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START get_container_properties] - :end-before: [END get_container_properties] - :language: python - :dedent: 16 - :caption: Getting properties on the container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - response = await self._client.container.get_properties( - timeout=timeout, - lease_access_conditions=access_conditions, - cls=deserialize_container_properties, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - response.name = self.container_name - return response # type: ignore - - @distributed_trace_async - async def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a container exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - try: - await self._client.container.get_properties(**kwargs) - return True - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceNotFoundError: - return False - - @distributed_trace_async - async def set_container_metadata( # type: ignore - self, metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - container. Each call to this operation replaces all existing metadata - attached to the container. To remove all metadata from the container, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the container as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword lease: - If specified, set_container_metadata only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Container-updated property dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START set_container_metadata] - :end-before: [END set_container_metadata] - :language: python - :dedent: 16 - :caption: Setting metadata on the container. - """ - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - mod_conditions = get_modify_conditions(kwargs) - timeout = kwargs.pop('timeout', None) - try: - return await self._client.container.set_metadata( # type: ignore - timeout=timeout, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def _get_blob_service_client(self): # pylint: disable=client-method-missing-kwargs - # type: (...) -> BlobServiceClient - """Get a client to interact with the container's parent service account. - - Defaults to current container's credentials. - - :returns: A BlobServiceClient. - :rtype: ~azure.storage.blob.BlobServiceClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START get_blob_service_client_from_container_client] - :end-before: [END get_blob_service_client_from_container_client] - :language: python - :dedent: 8 - :caption: Get blob service client from container object. - """ - from ._blob_service_client_async import BlobServiceClient - if not isinstance(self._pipeline._transport, AsyncTransportWrapper): # pylint: disable = protected-access - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - else: - _pipeline = self._pipeline # pylint: disable = protected-access - return BlobServiceClient( - "{}://{}".format(self.scheme, self.primary_hostname), - credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, - _location_mode=self._location_mode, _hosts=self._hosts, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function, - _pipeline=_pipeline) - - - @distributed_trace_async - async def get_container_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the specified container. - The permissions indicate whether container data may be accessed publicly. - - :keyword lease: - If specified, get_container_access_policy only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START get_container_access_policy] - :end-before: [END get_container_access_policy] - :language: python - :dedent: 16 - :caption: Getting the access policy on the container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - response, identifiers = await self._client.container.get_access_policy( - timeout=timeout, - lease_access_conditions=access_conditions, - cls=return_headers_and_deserialized, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return { - 'public_access': response.get('blob_public_access'), - 'signed_identifiers': identifiers or [] - } - - @distributed_trace_async - async def set_container_access_policy( - self, signed_identifiers, # type: Dict[str, AccessPolicy] - public_access=None, # type: Optional[Union[str, PublicAccess]] - **kwargs # type: Any - ): # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the permissions for the specified container or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether blobs in a container may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the container. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy] - :param ~azure.storage.blob.PublicAccess public_access: - Possible values include: 'container', 'blob'. - :keyword lease: - Required if the container has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified date/time. - :keyword ~datetime.datetime if_unmodified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Container-updated property dict (Etag and last modified). - :rtype: dict[str, str or ~datetime.datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START set_container_access_policy] - :end-before: [END set_container_access_policy] - :language: python - :dedent: 16 - :caption: Setting access policy on the container. - """ - timeout = kwargs.pop('timeout', None) - lease = kwargs.pop('lease', None) - if len(signed_identifiers) > 5: - raise ValueError( - 'Too many access policies provided. The server does not support setting ' - 'more than 5 access policies on a single resource.') - identifiers = [] - for key, value in signed_identifiers.items(): - if value: - value.start = serialize_iso(value.start) - value.expiry = serialize_iso(value.expiry) - identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore - signed_identifiers = identifiers # type: ignore - - mod_conditions = get_modify_conditions(kwargs) - access_conditions = get_access_conditions(lease) - try: - return await self._client.container.set_access_policy( - container_acl=signed_identifiers or None, - timeout=timeout, - access=public_access, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_blobs(self, name_starts_with=None, include=None, **kwargs): - # type: (Optional[str], Optional[Union[str, List[str]]], **Any) -> AsyncItemPaged[BlobProperties] - """Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service. - - :param str name_starts_with: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param list[str] or str include: - Specifies one or more additional datasets to include in the response. - Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'tags'. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START list_blobs_in_container] - :end-before: [END list_blobs_in_container] - :language: python - :dedent: 12 - :caption: List the blobs in the container. - """ - if include and not isinstance(include, list): - include = [include] - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.list_blob_flat_segment, - include=include, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - page_iterator_class=BlobPropertiesPaged - ) - - @distributed_trace - def walk_blobs( - self, name_starts_with=None, # type: Optional[str] - include=None, # type: Optional[Any] - delimiter="/", # type: str - **kwargs # type: Optional[Any] - ): - # type: (...) -> AsyncItemPaged[BlobProperties] - """Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service. This operation will list blobs in accordance with a hierarchy, - as delimited by the specified delimiter character. - - :param str name_starts_with: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param list[str] include: - Specifies one or more additional datasets to include in the response. - Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'. - :param str delimiter: - When the request includes this parameter, the operation returns a BlobPrefix - element in the response body that acts as a placeholder for all blobs whose - names begin with the same substring up to the appearance of the delimiter - character. The delimiter may be a single character or a string. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties] - """ - if include and not isinstance(include, list): - include = [include] - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.list_blob_hierarchy_segment, - delimiter=delimiter, - include=include, - timeout=timeout, - **kwargs) - return BlobPrefix( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - delimiter=delimiter) - - @distributed_trace_async - async def upload_blob( - self, name, # type: Union[str, BlobProperties] - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> BlobClient - """Creates a new blob from a data source with automatic chunking. - - :param name: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type name: str or ~azure.storage.blob.BlobProperties - :param data: The blob data to upload. - :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be - either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. The exception to the above is with Append - blob types: if set to False and the data already exists, an error will not be raised - and the data will be appended to the existing blob. If set overwrite=True, then the existing - append blob will be deleted, and a new one created. Defaults to False. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the container has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int max_concurrency: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :returns: A BlobClient to interact with the newly uploaded blob. - :rtype: ~azure.storage.blob.aio.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START upload_blob_to_container] - :end-before: [END upload_blob_to_container] - :language: python - :dedent: 12 - :caption: Upload blob to the container. - """ - blob = self.get_blob_client(name) - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - await blob.upload_blob( - data, - blob_type=blob_type, - length=length, - metadata=metadata, - timeout=timeout, - encoding=encoding, - **kwargs - ) - return blob - - @distributed_trace_async - async def delete_blob( - self, blob, # type: Union[str, BlobProperties] - delete_snapshots=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> None - """Marks the specified blob or snapshot for deletion. - - The blob is later deleted during garbage collection. - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the delete_blob - operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot - and retains the blob or snapshot for specified number of days. - After specified number of days, blob's data is removed from the service during garbage collection. - Soft deleted blob or snapshot is accessible through :func:`list_blobs()` specifying `include=["deleted"]` - option. Soft-deleted blob or snapshot can be restored using :func:`~BlobClient.undelete()` - - :param blob: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob: str or ~azure.storage.blob.BlobProperties - :param str delete_snapshots: - Required if the blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to delete. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword lease: - Required if the blob has an active lease. Value can be a Lease object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - blob = self.get_blob_client(blob) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - await blob.delete_blob( # type: ignore - delete_snapshots=delete_snapshots, - timeout=timeout, - **kwargs) - - @distributed_trace_async - async def download_blob(self, blob, offset=None, length=None, **kwargs): - # type: (Union[str, BlobProperties], Optional[int], Optional[int], Any) -> StorageStreamDownloader - """Downloads a blob to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the blob into - a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks. - - :param blob: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob: str or ~azure.storage.blob.BlobProperties - :param int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, download_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword str encoding: - Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object. (StorageStreamDownloader) - :rtype: ~azure.storage.blob.aio.StorageStreamDownloader - """ - blob_client = self.get_blob_client(blob) # type: ignore - kwargs.setdefault('merge_span', True) - return await blob_client.download_blob( - offset=offset, - length=length, - **kwargs) - - @distributed_trace_async - async def delete_blobs( # pylint: disable=arguments-differ - self, *blobs: List[Union[str, BlobProperties, dict]], - **kwargs - ) -> AsyncIterator[AsyncHttpResponse]: - """Marks the specified blobs or snapshots for deletion. - - The blobs are later deleted during garbage collection. - Note that in order to delete blobs, you must delete all of their - snapshots. You can delete both at the same time with the delete_blobs operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots - and retains the blobs or snapshots for specified number of days. - After specified number of days, blobs' data is removed from the service during garbage collection. - Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]` - Soft-deleted blobs or snapshots can be restored using :func:`~BlobClient.undelete()` - - :param blobs: - The blobs to delete. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - snapshot you want to delete: - key: 'snapshot', value type: str - whether to delete snapthots when deleting blob: - key: 'delete_snapshots', value: 'include' or 'only' - if the blob modified or not: - key: 'if_modified_since', 'if_unmodified_since', value type: datetime - etag: - key: 'etag', value type: str - match the etag or not: - key: 'match_condition', value type: MatchConditions - tags match condition: - key: 'if_tags_match_condition', value type: str - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword str delete_snapshots: - Required if a blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. For optimal performance, - this should be set to False - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: An async iterator of responses, one for each blob in order - :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START delete_multiple_blobs] - :end-before: [END delete_multiple_blobs] - :language: python - :dedent: 12 - :caption: Deleting multiple blobs. - """ - if len(blobs) == 0: - return iter(list()) - - reqs, options = self._generate_delete_blobs_options(*blobs, **kwargs) - - return await self._batch_send(*reqs, **options) - - @distributed_trace - async def set_standard_blob_tier_blobs( - self, - standard_blob_tier: Union[str, 'StandardBlobTier'], - *blobs: List[Union[str, BlobProperties, dict]], - **kwargs - ) -> AsyncIterator[AsyncHttpResponse]: - """This operation sets the tier on block blobs. - - A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param standard_blob_tier: - Indicates the tier to be set on all blobs. Options include 'Hot', 'Cool', - 'Archive'. The hot tier is optimized for storing data that is accessed - frequently. The cool storage tier is optimized for storing data that - is infrequently accessed and stored for at least a month. The archive - tier is optimized for storing data that is rarely accessed and stored - for at least six months with flexible latency requirements. - - .. note:: - If you want to set different tier on different blobs please set this positional parameter to None. - Then the blob tier on every BlobProperties will be taken. - - :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier - :param blobs: - The blobs with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - blob name: - key: 'name', value type: str - standard blob tier: - key: 'blob_tier', value type: StandardBlobTier - rehydrate priority: - key: 'rehydrate_priority', value type: RehydratePriority - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - tags match condition: - key: 'if_tags_match_condition', value type: str - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. For optimal performance, - this should be set to False. - :return: An async iterator of responses, one for each blob in order - :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] - """ - reqs, options = self._generate_set_tiers_options(standard_blob_tier, *blobs, **kwargs) - - return await self._batch_send(*reqs, **options) - - @distributed_trace - async def set_premium_page_blob_tier_blobs( - self, - premium_page_blob_tier: Union[str, 'PremiumPageBlobTier'], - *blobs: List[Union[str, BlobProperties, dict]], - **kwargs - ) -> AsyncIterator[AsyncHttpResponse]: - """Sets the page blob tiers on the blobs. This API is only supported for page blobs on premium accounts. - - :param premium_page_blob_tier: - A page blob tier value to set on all blobs to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - - .. note:: - If you want to set different tier on different blobs please set this positional parameter to None. - Then the blob tier on every BlobProperties will be taken. - - :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier - :param blobs: The blobs with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - premium blob tier: - key: 'blob_tier', value type: PremiumPageBlobTier - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. For optimal performance, - this should be set to False. - :return: An async iterator of responses, one for each blob in order - :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] - """ - reqs, options = self._generate_set_tiers_options(premium_page_blob_tier, *blobs, **kwargs) - - return await self._batch_send(*reqs, **options) - - def get_blob_client( - self, blob, # type: Union[BlobProperties, str] - snapshot=None # type: str - ): - # type: (...) -> BlobClient - """Get a client to interact with the specified blob. - - The blob need not already exist. - - :param blob: - The blob with which to interact. - :type blob: str or ~azure.storage.blob.BlobProperties - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`~BlobClient.create_snapshot()`. - :returns: A BlobClient. - :rtype: ~azure.storage.blob.aio.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START get_blob_client] - :end-before: [END get_blob_client] - :language: python - :dedent: 12 - :caption: Get the blob client. - """ - blob_name = _get_blob_name(blob) - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return BlobClient( - self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function, loop=self._loop) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/aio/_download_async.py b/azure/multiapi/storagev2/blob/v2020_06_12/aio/_download_async.py deleted file mode 100644 index 1f05309..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/aio/_download_async.py +++ /dev/null @@ -1,549 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -import asyncio -import sys -from io import BytesIO -from itertools import islice -import warnings -from typing import AsyncIterator - -from aiohttp import ClientPayloadError -from azure.core.exceptions import HttpResponseError, ServiceResponseError -from .._shared.encryption import decrypt_blob -from .._shared.request_handlers import validate_and_format_range_headers -from .._shared.response_handlers import process_storage_error, parse_length_from_content_range -from .._deserialize import get_page_ranges_result -from .._download import process_range_and_offset, _ChunkDownloader - -async def process_content(data, start_offset, end_offset, encryption): - if data is None: - raise ValueError("Response cannot be None.") - content = data.response.body() - if encryption.get('key') is not None or encryption.get('resolver') is not None: - try: - return decrypt_blob( - encryption.get('required'), - encryption.get('key'), - encryption.get('resolver'), - content, - start_offset, - end_offset, - data.response.headers) - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=data.response, - error=error) - return content - - -class _AsyncChunkDownloader(_ChunkDownloader): - def __init__(self, **kwargs): - super(_AsyncChunkDownloader, self).__init__(**kwargs) - self.stream_lock = asyncio.Lock() if kwargs.get('parallel') else None - self.progress_lock = asyncio.Lock() if kwargs.get('parallel') else None - - async def process_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - chunk_data = await self._download_chunk(chunk_start, chunk_end - 1) - length = chunk_end - chunk_start - if length > 0: - await self._write_to_stream(chunk_data, chunk_start) - await self._update_progress(length) - - async def yield_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - return await self._download_chunk(chunk_start, chunk_end - 1) - - async def _update_progress(self, length): - if self.progress_lock: - async with self.progress_lock: # pylint: disable=not-async-context-manager - self.progress_total += length - else: - self.progress_total += length - - async def _write_to_stream(self, chunk_data, chunk_start): - if self.stream_lock: - async with self.stream_lock: # pylint: disable=not-async-context-manager - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - else: - self.stream.write(chunk_data) - - async def _download_chunk(self, chunk_start, chunk_end): - download_range, offset = process_range_and_offset( - chunk_start, chunk_end, chunk_end, self.encryption_options) - - # No need to download the empty chunk from server if there's no data in the chunk to be downloaded. - # Do optimize and create empty chunk locally if condition is met. - if self._do_optimize(download_range[0], download_range[1]): - chunk_data = b"\x00" * self.chunk_size - else: - range_header, range_validation = validate_and_format_range_headers( - download_range[0], - download_range[1], - check_content_md5=self.validate_content - ) - retry_active = True - retry_total = 3 - while retry_active: - try: - _, response = await self.client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self.validate_content, - data_stream_total=self.total_size, - download_stream_current=self.progress_total, - **self.request_options - ) - retry_active = False - - except HttpResponseError as error: - process_storage_error(error) - except ClientPayloadError as error: - retry_total -= 1 - if retry_total <= 0: - raise ServiceResponseError(error, error=error) - await asyncio.sleep(1) - - chunk_data = await process_content(response, offset[0], offset[1], self.encryption_options) - - - # This makes sure that if_match is set so that we can validate - # that subsequent downloads are to an unmodified blob - if self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = response.properties.etag - - return chunk_data - - -class _AsyncChunkIterator(object): - """Async iterator for chunks in blob download stream.""" - - def __init__(self, size, content, downloader, chunk_size): - self.size = size - self._chunk_size = chunk_size - self._current_content = content - self._iter_downloader = downloader - self._iter_chunks = None - self._complete = (size == 0) - - def __len__(self): - return self.size - - def __iter__(self): - raise TypeError("Async stream must be iterated asynchronously.") - - def __aiter__(self): - return self - - async def __anext__(self): - """Iterate through responses.""" - if self._complete: - raise StopAsyncIteration("Download complete") - if not self._iter_downloader: - # cut the data obtained from initial GET into chunks - if len(self._current_content) > self._chunk_size: - return self._get_chunk_data() - self._complete = True - return self._current_content - - if not self._iter_chunks: - self._iter_chunks = self._iter_downloader.get_chunk_offsets() - - # initial GET result still has more than _chunk_size bytes of data - if len(self._current_content) >= self._chunk_size: - return self._get_chunk_data() - - try: - chunk = next(self._iter_chunks) - self._current_content += await self._iter_downloader.yield_chunk(chunk) - except StopIteration: - self._complete = True - # it's likely that there some data left in self._current_content - if self._current_content: - return self._current_content - raise StopAsyncIteration("Download complete") - - return self._get_chunk_data() - - def _get_chunk_data(self): - chunk_data = self._current_content[: self._chunk_size] - self._current_content = self._current_content[self._chunk_size:] - return chunk_data - - -class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the blob being downloaded. - :ivar str container: - The name of the container where the blob is. - :ivar ~azure.storage.blob.BlobProperties properties: - The properties of the blob being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the blob. - """ - - def __init__( - self, - clients=None, - config=None, - start_range=None, - end_range=None, - validate_content=None, - encryption_options=None, - max_concurrency=1, - name=None, - container=None, - encoding=None, - **kwargs - ): - self.name = name - self.container = container - self.properties = None - self.size = None - - self._clients = clients - self._config = config - self._start_range = start_range - self._end_range = end_range - self._max_concurrency = max_concurrency - self._encoding = encoding - self._validate_content = validate_content - self._encryption_options = encryption_options or {} - self._request_options = kwargs - self._location_mode = None - self._download_complete = False - self._current_content = None - self._file_size = None - self._non_empty_ranges = None - self._response = None - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - self._first_get_size = self._config.max_single_get_size if not self._validate_content \ - else self._config.max_chunk_get_size - initial_request_start = self._start_range if self._start_range is not None else 0 - if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: - initial_request_end = self._end_range - else: - initial_request_end = initial_request_start + self._first_get_size - 1 - - self._initial_range, self._initial_offset = process_range_and_offset( - initial_request_start, initial_request_end, self._end_range, self._encryption_options - ) - - def __len__(self): - return self.size - - async def _setup(self): - self._response = await self._initial_request() - self.properties = self._response.properties - self.properties.name = self.name - self.properties.container = self.container - - # Set the content length to the download size instead of the size of - # the last range - self.properties.size = self.size - - # Overwrite the content range to the user requested range - self.properties.content_range = 'bytes {0}-{1}/{2}'.format( - self._start_range, - self._end_range, - self._file_size - ) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - self.properties.content_md5 = None - - if self.size == 0: - self._current_content = b"" - else: - self._current_content = await process_content( - self._response, - self._initial_offset[0], - self._initial_offset[1], - self._encryption_options - ) - - async def _initial_request(self): - range_header, range_validation = validate_and_format_range_headers( - self._initial_range[0], - self._initial_range[1], - start_range_required=False, - end_range_required=False, - check_content_md5=self._validate_content) - - retry_active = True - retry_total = 3 - while retry_active: - try: - location_mode, response = await self._clients.blob.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self._validate_content, - data_stream_total=None, - download_stream_current=0, - **self._request_options) - - # Check the location we read from to ensure we use the same one - # for subsequent requests. - self._location_mode = location_mode - - # Parse the total file size and adjust the download size if ranges - # were specified - self._file_size = parse_length_from_content_range(response.properties.content_range) - if self._end_range is not None: - # Use the length unless it is over the end of the file - self.size = min(self._file_size, self._end_range - self._start_range + 1) - elif self._start_range is not None: - self.size = self._file_size - self._start_range - else: - self.size = self._file_size - retry_active = False - - except HttpResponseError as error: - if self._start_range is None and error.response.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - try: - _, response = await self._clients.blob.download( - validate_content=self._validate_content, - data_stream_total=0, - download_stream_current=0, - **self._request_options) - retry_active = False - except HttpResponseError as error: - process_storage_error(error) - - # Set the download size to empty - self.size = 0 - self._file_size = 0 - else: - process_storage_error(error) - - except ClientPayloadError as error: - retry_total -= 1 - if retry_total <= 0: - raise ServiceResponseError(error, error=error) - await asyncio.sleep(1) - - # get page ranges to optimize downloading sparse page blob - if response.properties.blob_type == 'PageBlob': - try: - page_ranges = await self._clients.page_blob.get_page_ranges() - self._non_empty_ranges = get_page_ranges_result(page_ranges)[0] - except HttpResponseError: - pass - - # If the file is small, the download is complete at this point. - # If file size is large, download the rest of the file in chunks. - if response.properties.size != self.size: - # Lock on the etag. This can be overriden by the user by specifying '*' - if self._request_options.get('modified_access_conditions'): - if not self._request_options['modified_access_conditions'].if_match: - self._request_options['modified_access_conditions'].if_match = response.properties.etag - else: - self._download_complete = True - return response - - def chunks(self): - # type: () -> AsyncIterator[bytes] - """Iterate over chunks in the download stream. - - :rtype: AsyncIterator[bytes] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world_async.py - :start-after: [START download_a_blob_in_chunk] - :end-before: [END download_a_blob_in_chunk] - :language: python - :dedent: 16 - :caption: Download a blob using chunks(). - """ - if self.size == 0 or self._download_complete: - iter_downloader = None - else: - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - iter_downloader = _AsyncChunkDownloader( - client=self._clients.blob, - non_empty_ranges=self._non_empty_ranges, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # Start where the first download ended - end_range=data_end, - stream=None, - parallel=False, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options) - return _AsyncChunkIterator( - size=self.size, - content=self._current_content, - downloader=iter_downloader, - chunk_size=self._config.max_chunk_get_size) - - async def readall(self): - """Download the contents of this blob. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - stream = BytesIO() - await self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - async def content_as_bytes(self, max_concurrency=1): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :rtype: bytes - """ - warnings.warn( - "content_as_bytes is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - return await self.readall() - - async def content_as_text(self, max_concurrency=1, encoding="UTF-8"): - """Download the contents of this blob, and decode as text. - - This operation is blocking until all data is downloaded. - - :param int max_concurrency: - The number of parallel connections with which to download. - :param str encoding: - Test encoding to decode the downloaded bytes. Default is UTF-8. - :rtype: str - """ - warnings.warn( - "content_as_text is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self._encoding = encoding - return await self.readall() - - async def readinto(self, stream): - """Download the contents of this blob to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - # the stream must be seekable if parallel download is required - parallel = self._max_concurrency > 1 - if parallel: - error_message = "Target stream handle must be seekable." - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(error_message) - - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(error_message) - - # Write the content to the user stream - stream.write(self._current_content) - if self._download_complete: - return self.size - - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - - downloader = _AsyncChunkDownloader( - client=self._clients.blob, - non_empty_ranges=self._non_empty_ranges, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # start where the first download ended - end_range=data_end, - stream=stream, - parallel=parallel, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options) - - dl_tasks = downloader.get_chunk_offsets() - running_futures = [ - asyncio.ensure_future(downloader.process_chunk(d)) - for d in islice(dl_tasks, 0, self._max_concurrency) - ] - while running_futures: - # Wait for some download to finish before adding a new one - done, running_futures = await asyncio.wait( - running_futures, return_when=asyncio.FIRST_COMPLETED) - try: - for task in done: - task.result() - except HttpResponseError as error: - process_storage_error(error) - try: - next_chunk = next(dl_tasks) - except StopIteration: - break - else: - running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk))) - - if running_futures: - # Wait for the remaining downloads to finish - done, _running_futures = await asyncio.wait(running_futures) - try: - for task in done: - task.result() - except HttpResponseError as error: - process_storage_error(error) - return self.size - - async def download_to_stream(self, stream, max_concurrency=1): - """Download the contents of this blob to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :param int max_concurrency: - The number of parallel connections with which to download. - :returns: The properties of the downloaded blob. - :rtype: Any - """ - warnings.warn( - "download_to_stream is deprecated, use readinto instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - await self.readinto(stream) - return self.properties diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/aio/_lease_async.py b/azure/multiapi/storagev2/blob/v2020_06_12/aio/_lease_async.py deleted file mode 100644 index 79e6733..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/aio/_lease_async.py +++ /dev/null @@ -1,325 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, - TypeVar, TYPE_CHECKING -) - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator_async import distributed_trace_async - -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._serialize import get_modify_conditions -from .._lease import BlobLeaseClient as LeaseClientBase - -if TYPE_CHECKING: - from datetime import datetime - from .._generated.operations import BlobOperations, ContainerOperations - BlobClient = TypeVar("BlobClient") - ContainerClient = TypeVar("ContainerClient") - - -class BlobLeaseClient(LeaseClientBase): - """Creates a new BlobLeaseClient. - - This client provides lease operations on a BlobClient or ContainerClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the blob or container to lease. - :type client: ~azure.storage.blob.aio.BlobClient or - ~azure.storage.blob.aio.ContainerClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - - def __enter__(self): - raise TypeError("Async lease must use 'async with'.") - - def __exit__(self, *args): - self.release() - - async def __aenter__(self): - return self - - async def __aexit__(self, *args): - await self.release() - - @distributed_trace_async - async def acquire(self, lease_duration=-1, **kwargs): - # type: (int, Any) -> None - """Requests a new lease. - - If the container does not have an active lease, the Blob service creates a - lease on the container and returns a new lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.acquire_lease( - timeout=kwargs.pop('timeout', None), - duration=lease_duration, - proposed_lease_id=self.id, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - self.etag = response.get('etag') # type: str - - @distributed_trace_async - async def renew(self, **kwargs): - # type: (Any) -> None - """Renews the lease. - - The lease can be renewed if the lease ID specified in the - lease client matches that associated with the container or blob. Note that - the lease may be renewed even if it has expired as long as the container - or blob has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.renew_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def release(self, **kwargs): - # type: (Any) -> None - """Release the lease. - - The lease may be released if the client lease id specified matches - that associated with the container or blob. Releasing the lease allows another client - to immediately acquire the lease for the container or blob as soon as the release is complete. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.release_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """Change the lease ID of an active lease. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.change_lease( - lease_id=self.id, - proposed_lease_id=proposed_lease_id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def break_lease(self, lease_break_period=None, **kwargs): - # type: (Optional[int], Any) -> int - """Break the lease, if the container or blob has an active lease. - - Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. When a lease - is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the container or blob. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :param int lease_break_period: - This is the proposed duration of seconds that the lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the lease. If longer, the time remaining on the lease is used. - A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.break_lease( - timeout=kwargs.pop('timeout', None), - break_period=lease_break_period, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/aio/_list_blobs_helper.py b/azure/multiapi/storagev2/blob/v2020_06_12/aio/_list_blobs_helper.py deleted file mode 100644 index 058572f..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/aio/_list_blobs_helper.py +++ /dev/null @@ -1,163 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from azure.core.async_paging import AsyncPageIterator, AsyncItemPaged -from azure.core.exceptions import HttpResponseError -from .._deserialize import get_blob_properties_from_generated_code -from .._models import BlobProperties -from .._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix -from .._shared.models import DictMixin -from .._shared.response_handlers import return_context_and_deserialized, process_storage_error - - -class BlobPropertiesPaged(AsyncPageIterator): - """An Iterable of Blob properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.models.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - :param str container: The container that the blobs are listed from. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str continuation_token: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__( - self, command, - container=None, - prefix=None, - results_per_page=None, - continuation_token=None, - delimiter=None, - location_mode=None): - super(BlobPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.container = container - self.delimiter = delimiter - self.current_page = None - self.location_mode = location_mode - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - prefix=self.prefix, - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.container = self._response.container_name - self.current_page = [self._build_item(item) for item in self._response.segment.blob_items] - - return self._response.next_marker or None, self.current_page - - def _build_item(self, item): - if isinstance(item, BlobProperties): - return item - if isinstance(item, BlobItemInternal): - blob = get_blob_properties_from_generated_code(item) # pylint: disable=protected-access - blob.container = self.container - return blob - return item - - -class BlobPrefix(AsyncItemPaged, DictMixin): - """An Iterable of Blob properties. - - Returned from walk_blobs when a delimiter is used. - Can be thought of as a virtual blob directory. - - :ivar str name: The prefix, or "directory name" of the blob. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str marker: The continuation token of the current page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.models.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str marker: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__(self, *args, **kwargs): - super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs) - self.name = kwargs.get('prefix') - self.prefix = kwargs.get('prefix') - self.results_per_page = kwargs.get('results_per_page') - self.container = kwargs.get('container') - self.delimiter = kwargs.get('delimiter') - self.location_mode = kwargs.get('location_mode') - - -class BlobPrefixPaged(BlobPropertiesPaged): - def __init__(self, *args, **kwargs): - super(BlobPrefixPaged, self).__init__(*args, **kwargs) - self.name = self.prefix - - async def _extract_data_cb(self, get_next_return): - continuation_token, _ = await super(BlobPrefixPaged, self)._extract_data_cb(get_next_return) - self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items - self.current_page = [self._build_item(item) for item in self.current_page] - self.delimiter = self._response.delimiter - - return continuation_token, self.current_page - - def _build_item(self, item): - item = super(BlobPrefixPaged, self)._build_item(item) - if isinstance(item, GenBlobPrefix): - return BlobPrefix( - self._command, - container=self.container, - prefix=item.name, - results_per_page=self.results_per_page, - location_mode=self.location_mode) - return item diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/aio/_models.py b/azure/multiapi/storagev2/blob/v2020_06_12/aio/_models.py deleted file mode 100644 index 05edd78..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/aio/_models.py +++ /dev/null @@ -1,143 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines - -from azure.core.async_paging import AsyncPageIterator -from azure.core.exceptions import HttpResponseError -from .._deserialize import parse_tags - -from .._models import ContainerProperties, FilteredBlob -from .._shared.response_handlers import return_context_and_deserialized, process_storage_error - -from .._generated.models import FilterBlobItem - - -class ContainerPropertiesPaged(AsyncPageIterator): - """An Iterable of Container properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A container name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.models.ContainerProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only containers whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of container names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(ContainerPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [self._build_item(item) for item in self._response.container_items] - - return self._response.next_marker or None, self.current_page - - @staticmethod - def _build_item(item): - return ContainerProperties._from_generated(item) # pylint: disable=protected-access - - -class FilteredBlobPaged(AsyncPageIterator): - """An Iterable of Blob properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.BlobProperties) - :ivar str container: The container that the blobs are listed from. - - :param callable command: Function to retrieve the next page of items. - :param str container: The name of the container. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str continuation_token: An opaque continuation token. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__( - self, command, - container=None, - results_per_page=None, - continuation_token=None, - location_mode=None): - super(FilteredBlobPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.marker = continuation_token - self.results_per_page = results_per_page - self.container = container - self.current_page = None - self.location_mode = location_mode - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.marker = self._response.next_marker - self.current_page = [self._build_item(item) for item in self._response.blobs] - - return self._response.next_marker or None, self.current_page - - @staticmethod - def _build_item(item): - if isinstance(item, FilterBlobItem): - tags = parse_tags(item.tags) - blob = FilteredBlob(name=item.name, container_name=item.container_name, tags=tags) - return blob - return item diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/aio/_upload_helpers.py b/azure/multiapi/storagev2/blob/v2020_06_12/aio/_upload_helpers.py deleted file mode 100644 index 36d1e44..0000000 --- a/azure/multiapi/storagev2/blob/v2020_06_12/aio/_upload_helpers.py +++ /dev/null @@ -1,270 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from io import SEEK_SET, UnsupportedOperation -from typing import Optional, Union, Any, TypeVar, TYPE_CHECKING # pylint: disable=unused-import - -import six -from azure.core.exceptions import ResourceModifiedError, HttpResponseError - -from .._shared.response_handlers import ( - process_storage_error, - return_response_headers) -from .._shared.uploads_async import ( - upload_data_chunks, - upload_substream_blocks, - BlockBlobChunkUploader, - PageBlobChunkUploader, - AppendBlobChunkUploader) -from .._shared.encryption import generate_blob_encryption_data, encrypt_blob -from .._generated.models import ( - BlockLookupList, - AppendPositionAccessConditions, - ModifiedAccessConditions, -) -from .._upload_helpers import _convert_mod_error, _any_conditions - -if TYPE_CHECKING: - from datetime import datetime # pylint: disable=unused-import - BlobLeaseClient = TypeVar("BlobLeaseClient") - - -async def upload_block_blob( # pylint: disable=too-many-locals - client=None, - data=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if not overwrite and not _any_conditions(**kwargs): - kwargs['modified_access_conditions'].if_none_match = '*' - adjusted_count = length - if (encryption_options.get('key') is not None) and (adjusted_count is not None): - adjusted_count += (16 - (length % 16)) - blob_headers = kwargs.pop('blob_headers', None) - tier = kwargs.pop('standard_blob_tier', None) - blob_tags_string = kwargs.pop('blob_tags_string', None) - - # Do single put if the size is smaller than config.max_single_put_size - if adjusted_count is not None and (adjusted_count <= blob_settings.max_single_put_size): - try: - data = data.read(length) - if not isinstance(data, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - except AttributeError: - pass - if encryption_options.get('key'): - encryption_data, data = encrypt_blob(data, encryption_options['key']) - headers['x-ms-meta-encryptiondata'] = encryption_data - return await client.upload( - body=data, - content_length=adjusted_count, - blob_http_headers=blob_headers, - headers=headers, - cls=return_response_headers, - validate_content=validate_content, - data_stream_total=adjusted_count, - upload_stream_current=0, - tier=tier.value if tier else None, - blob_tags_string=blob_tags_string, - **kwargs) - - use_original_upload_path = blob_settings.use_byte_buffer or \ - validate_content or encryption_options.get('required') or \ - blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \ - hasattr(stream, 'seekable') and not stream.seekable() or \ - not hasattr(stream, 'seek') or not hasattr(stream, 'tell') - - if use_original_upload_path: - if encryption_options.get('key'): - cek, iv, encryption_data = generate_blob_encryption_data(encryption_options['key']) - headers['x-ms-meta-encryptiondata'] = encryption_data - encryption_options['cek'] = cek - encryption_options['vector'] = iv - block_ids = await upload_data_chunks( - service=client, - uploader_class=BlockBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - encryption_options=encryption_options, - headers=headers, - **kwargs - ) - else: - block_ids = await upload_substream_blocks( - service=client, - uploader_class=BlockBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - headers=headers, - **kwargs - ) - - block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) - block_lookup.latest = block_ids - return await client.commit_block_list( - block_lookup, - blob_http_headers=blob_headers, - cls=return_response_headers, - validate_content=validate_content, - headers=headers, - tier=tier.value if tier else None, - blob_tags_string=blob_tags_string, - **kwargs) - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceModifiedError as mod_error: - if not overwrite: - _convert_mod_error(mod_error) - raise - - -async def upload_page_blob( - client=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if not overwrite and not _any_conditions(**kwargs): - kwargs['modified_access_conditions'].if_none_match = '*' - if length is None or length < 0: - raise ValueError("A content length must be specified for a Page Blob.") - if length % 512 != 0: - raise ValueError("Invalid page blob size: {0}. " - "The size must be aligned to a 512-byte boundary.".format(length)) - if kwargs.get('premium_page_blob_tier'): - premium_page_blob_tier = kwargs.pop('premium_page_blob_tier') - try: - headers['x-ms-access-tier'] = premium_page_blob_tier.value - except AttributeError: - headers['x-ms-access-tier'] = premium_page_blob_tier - if encryption_options and encryption_options.get('data'): - headers['x-ms-meta-encryptiondata'] = encryption_options['data'] - blob_tags_string = kwargs.pop('blob_tags_string', None) - - response = await client.create( - content_length=0, - blob_content_length=length, - blob_sequence_number=None, - blob_http_headers=kwargs.pop('blob_headers', None), - blob_tags_string=blob_tags_string, - cls=return_response_headers, - headers=headers, - **kwargs) - if length == 0: - return response - - kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag']) - return await upload_data_chunks( - service=client, - uploader_class=PageBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_page_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - encryption_options=encryption_options, - headers=headers, - **kwargs) - - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceModifiedError as mod_error: - if not overwrite: - _convert_mod_error(mod_error) - raise - - -async def upload_append_blob( # pylint: disable=unused-argument - client=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if length == 0: - return {} - blob_headers = kwargs.pop('blob_headers', None) - append_conditions = AppendPositionAccessConditions( - max_size=kwargs.pop('maxsize_condition', None), - append_position=None) - blob_tags_string = kwargs.pop('blob_tags_string', None) - - try: - if overwrite: - await client.create( - content_length=0, - blob_http_headers=blob_headers, - headers=headers, - blob_tags_string=blob_tags_string, - **kwargs) - return await upload_data_chunks( - service=client, - uploader_class=AppendBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - append_position_access_conditions=append_conditions, - headers=headers, - **kwargs) - except HttpResponseError as error: - if error.response.status_code != 404: - raise - # rewind the request body if it is a stream - if hasattr(stream, 'read'): - try: - # attempt to rewind the body to the initial position - stream.seek(0, SEEK_SET) - except UnsupportedOperation: - # if body is not seekable, then retry would not work - raise error - await client.create( - content_length=0, - blob_http_headers=blob_headers, - headers=headers, - blob_tags_string=blob_tags_string, - **kwargs) - return await upload_data_chunks( - service=client, - uploader_class=AppendBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - append_position_access_conditions=append_conditions, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/__init__.py b/azure/multiapi/storagev2/blob/v2020_10_02/__init__.py deleted file mode 100644 index 58442ed..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/__init__.py +++ /dev/null @@ -1,239 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import os - -from typing import Union, Iterable, AnyStr, IO, Any, Dict # pylint: disable=unused-import -from ._version import VERSION -from ._blob_client import BlobClient -from ._container_client import ContainerClient -from ._blob_service_client import BlobServiceClient -from ._lease import BlobLeaseClient -from ._download import StorageStreamDownloader -from ._quick_query_helper import BlobQueryReader -from ._shared_access_signature import generate_account_sas, generate_container_sas, generate_blob_sas -from ._shared.policies import ExponentialRetry, LinearRetry -from ._shared.response_handlers import PartialBatchErrorException -from ._shared.models import( - LocationMode, - ResourceTypes, - AccountSasPermissions, - StorageErrorCode, - UserDelegationKey -) -from ._generated.models import ( - RehydratePriority, -) -from ._models import ( - BlobType, - BlockState, - StandardBlobTier, - PremiumPageBlobTier, - BlobImmutabilityPolicyMode, - SequenceNumberAction, - PublicAccess, - BlobAnalyticsLogging, - Metrics, - RetentionPolicy, - StaticWebsite, - CorsRule, - ContainerProperties, - BlobProperties, - FilteredBlob, - LeaseProperties, - ContentSettings, - CopyProperties, - BlobBlock, - PageRange, - AccessPolicy, - ContainerSasPermissions, - BlobSasPermissions, - CustomerProvidedEncryptionKey, - ContainerEncryptionScope, - BlobQueryError, - DelimitedJsonDialect, - DelimitedTextDialect, - QuickQueryDialect, - ArrowDialect, - ArrowType, - ObjectReplicationPolicy, - ObjectReplicationRule, - ImmutabilityPolicy -) -from ._list_blobs_helper import BlobPrefix - -__version__ = VERSION - - -def upload_blob_to_url( - blob_url, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - credential=None, # type: Any - **kwargs): - # type: (...) -> Dict[str, Any] - """Upload data to a given URL - - The data will be uploaded as a block blob. - - :param str blob_url: - The full URI to the blob. This can also include a SAS token. - :param data: - The data to upload. This can be bytes, text, an iterable or a file-like object. - :type data: bytes or str or Iterable - :param credential: - The credentials with which to authenticate. This is optional if the - blob URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword bool overwrite: - Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob_to_url will overwrite any existing data. If set to False, the - operation will fail with a ResourceExistsError. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword dict(str,str) metadata: - Name-value pairs associated with the blob as metadata. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword str encoding: - Encoding to use if text is supplied as input. Defaults to UTF-8. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: dict(str, Any) - """ - with BlobClient.from_blob_url(blob_url, credential=credential) as client: - return client.upload_blob(data=data, blob_type=BlobType.BlockBlob, **kwargs) - - -def _download_to_stream(client, handle, **kwargs): - """Download data to specified open file-handle.""" - stream = client.download_blob(**kwargs) - stream.readinto(handle) - - -def download_blob_from_url( - blob_url, # type: str - output, # type: str - credential=None, # type: Any - **kwargs): - # type: (...) -> None - """Download the contents of a blob to a local file or stream. - - :param str blob_url: - The full URI to the blob. This can also include a SAS token. - :param output: - Where the data should be downloaded to. This could be either a file path to write to, - or an open IO handle to write to. - :type output: str or writable stream. - :param credential: - The credentials with which to authenticate. This is optional if the - blob URL already has a SAS token or the blob is public. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, - an account shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword bool overwrite: - Whether the local file should be overwritten if it already exists. The default value is - `False` - in which case a ValueError will be raised if the file already exists. If set to - `True`, an attempt will be made to write to the existing file. If a stream handle is passed - in, this value is ignored. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :keyword int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :rtype: None - """ - overwrite = kwargs.pop('overwrite', False) - with BlobClient.from_blob_url(blob_url, credential=credential) as client: - if hasattr(output, 'write'): - _download_to_stream(client, output, **kwargs) - else: - if not overwrite and os.path.isfile(output): - raise ValueError("The file '{}' already exists.".format(output)) - with open(output, 'wb') as file_handle: - _download_to_stream(client, file_handle, **kwargs) - - -__all__ = [ - 'upload_blob_to_url', - 'download_blob_from_url', - 'BlobServiceClient', - 'ContainerClient', - 'BlobClient', - 'BlobType', - 'BlobLeaseClient', - 'StorageErrorCode', - 'UserDelegationKey', - 'ExponentialRetry', - 'LinearRetry', - 'LocationMode', - 'BlockState', - 'StandardBlobTier', - 'PremiumPageBlobTier', - 'SequenceNumberAction', - 'BlobImmutabilityPolicyMode', - 'ImmutabilityPolicy', - 'PublicAccess', - 'BlobAnalyticsLogging', - 'Metrics', - 'RetentionPolicy', - 'StaticWebsite', - 'CorsRule', - 'ContainerProperties', - 'BlobProperties', - 'BlobPrefix', - 'FilteredBlob', - 'LeaseProperties', - 'ContentSettings', - 'CopyProperties', - 'BlobBlock', - 'PageRange', - 'AccessPolicy', - 'QuickQueryDialect', - 'ContainerSasPermissions', - 'BlobSasPermissions', - 'ResourceTypes', - 'AccountSasPermissions', - 'StorageStreamDownloader', - 'CustomerProvidedEncryptionKey', - 'RehydratePriority', - 'generate_account_sas', - 'generate_container_sas', - 'generate_blob_sas', - 'PartialBatchErrorException', - 'ContainerEncryptionScope', - 'BlobQueryError', - 'DelimitedJsonDialect', - 'DelimitedTextDialect', - 'ArrowDialect', - 'ArrowType', - 'BlobQueryReader', - 'ObjectReplicationPolicy', - 'ObjectReplicationRule' -] diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_blob_client.py b/azure/multiapi/storagev2/blob/v2020_10_02/_blob_client.py deleted file mode 100644 index 902c013..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_blob_client.py +++ /dev/null @@ -1,3977 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines,no-self-use -from functools import partial -from io import BytesIO -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, - TYPE_CHECKING -) - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six -from azure.core.pipeline import Pipeline -from azure.core.tracing.decorator import distributed_trace -from azure.core.exceptions import ResourceNotFoundError, HttpResponseError, ResourceExistsError - -from ._shared import encode_base64 -from ._shared.base_client import StorageAccountHostsMixin, parse_connection_str, parse_query, TransportWrapper -from ._shared.encryption import generate_blob_encryption_data -from ._shared.uploads import IterStreamer -from ._shared.request_handlers import ( - add_metadata_headers, get_length, read_length, - validate_and_format_range_headers) -from ._shared.response_handlers import return_response_headers, process_storage_error, return_headers_and_deserialized -from ._generated import AzureBlobStorage -from ._generated.models import ( # pylint: disable=unused-import - DeleteSnapshotsOptionType, - BlobHTTPHeaders, - BlockLookupList, - AppendPositionAccessConditions, - SequenceNumberAccessConditions, - QueryRequest, - CpkInfo) -from ._serialize import ( - get_modify_conditions, - get_source_conditions, - get_cpk_scope_info, - get_api_version, - serialize_blob_tags_header, - serialize_blob_tags, - serialize_query_format, get_access_conditions -) -from ._deserialize import get_page_ranges_result, deserialize_blob_properties, deserialize_blob_stream, parse_tags, \ - deserialize_pipeline_response_into_cls -from ._quick_query_helper import BlobQueryReader -from ._upload_helpers import ( - upload_block_blob, - upload_append_blob, - upload_page_blob, _any_conditions) -from ._models import BlobType, BlobBlock, BlobProperties, BlobQueryError, QuickQueryDialect, \ - DelimitedJsonDialect, DelimitedTextDialect -from ._download import StorageStreamDownloader -from ._lease import BlobLeaseClient - -if TYPE_CHECKING: - from datetime import datetime - from ._generated.models import BlockList - from ._models import ( # pylint: disable=unused-import - ContentSettings, - PremiumPageBlobTier, - StandardBlobTier, - SequenceNumberAction - ) - -_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = ( - 'The require_encryption flag is set, but encryption is not supported' - ' for this method.') - - -class BlobClient(StorageAccountHostsMixin): # pylint: disable=too-many-public-methods - """A client to interact with a specific blob, although that blob may not yet exist. - - For more optional configuration, please click - `here `_. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the blob, - use the :func:`from_blob_url` classmethod. - :param container_name: The container name for the blob. - :type container_name: str - :param blob_name: The name of the blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob_name: str - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_blob_client] - :end-before: [END create_blob_client] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a URL to a public blob (no auth needed). - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_blob_client_sas_url] - :end-before: [END create_blob_client_sas_url] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a SAS URL to a blob. - """ - def __init__( - self, account_url, # type: str - container_name, # type: str - blob_name, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - - if not (container_name and blob_name): - raise ValueError("Please specify a container name and blob name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - path_snapshot, sas_token = parse_query(parsed_url.query) - - self.container_name = container_name - self.blob_name = blob_name - try: - self.snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - self.snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - self.snapshot = snapshot or path_snapshot - - # This parameter is used for the hierarchy traversal. Give precedence to credential. - self._raw_credential = credential if credential else sas_token - self._query_str, credential = self._format_query_string(sas_token, credential, snapshot=self.snapshot) - super(BlobClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) - self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - - def _format_url(self, hostname): - container_name = self.container_name - if isinstance(container_name, six.text_type): - container_name = container_name.encode('UTF-8') - return "{}://{}/{}/{}{}".format( - self.scheme, - hostname, - quote(container_name), - quote(self.blob_name, safe='~/'), - self._query_str) - - def _encode_source_url(self, source_url): - parsed_source_url = urlparse(source_url) - source_scheme = parsed_source_url.scheme - source_hostname = parsed_source_url.netloc.rstrip('/') - source_path = unquote(parsed_source_url.path) - source_query = parsed_source_url.query - result = ["{}://{}{}".format(source_scheme, source_hostname, quote(source_path, safe='~/'))] - if source_query: - result.append(source_query) - return '?'.join(result) - - @classmethod - def from_blob_url(cls, blob_url, credential=None, snapshot=None, **kwargs): - # type: (str, Optional[Any], Optional[Union[str, Dict[str, Any]]], Any) -> BlobClient - """Create BlobClient from a blob url. This doesn't support customized blob url with '/' in blob name. - - :param str blob_url: - The full endpoint URL to the Blob, including SAS token and snapshot if used. This could be - either the primary endpoint, or the secondary endpoint depending on the current `location_mode`. - :type blob_url: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. If specified, this will override - the snapshot in the url. - :returns: A Blob client. - :rtype: ~azure.storage.blob.BlobClient - """ - try: - if not blob_url.lower().startswith('http'): - blob_url = "https://" + blob_url - except AttributeError: - raise ValueError("Blob URL must be a string.") - parsed_url = urlparse(blob_url.rstrip('/')) - - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(blob_url)) - - account_path = "" - if ".core." in parsed_url.netloc: - # .core. is indicating non-customized url. Blob name with directory info can also be parsed. - path_blob = parsed_url.path.lstrip('/').split('/', 1) - elif "localhost" in parsed_url.netloc or "127.0.0.1" in parsed_url.netloc: - path_blob = parsed_url.path.lstrip('/').split('/', 2) - account_path += '/' + path_blob[0] - else: - # for customized url. blob name that has directory info cannot be parsed. - path_blob = parsed_url.path.lstrip('/').split('/') - if len(path_blob) > 2: - account_path = "/" + "/".join(path_blob[:-2]) - account_url = "{}://{}{}?{}".format( - parsed_url.scheme, - parsed_url.netloc.rstrip('/'), - account_path, - parsed_url.query) - container_name, blob_name = unquote(path_blob[-2]), unquote(path_blob[-1]) - if not container_name or not blob_name: - raise ValueError("Invalid URL. Provide a blob_url with a valid blob and container name.") - - path_snapshot, _ = parse_query(parsed_url.query) - if snapshot: - try: - path_snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - path_snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - path_snapshot = snapshot - - return cls( - account_url, container_name=container_name, blob_name=blob_name, - snapshot=path_snapshot, credential=credential, **kwargs - ) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - container_name, # type: str - blob_name, # type: str - snapshot=None, # type: Optional[str] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> BlobClient - """Create BlobClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param container_name: The container name for the blob. - :type container_name: str - :param blob_name: The name of the blob with which to interact. - :type blob_name: str - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :returns: A Blob client. - :rtype: ~azure.storage.blob.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START auth_from_connection_string_blob] - :end-before: [END auth_from_connection_string_blob] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, container_name=container_name, blob_name=blob_name, - snapshot=snapshot, credential=credential, **kwargs - ) - - @distributed_trace - def get_account_information(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """Gets information related to the storage account in which the blob resides. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - """ - try: - return self._client.blob.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _upload_blob_options( # pylint:disable=too-many-statements - self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption and not self.key_encryption_key: - raise ValueError("Encryption required but no key was provided.") - encryption_options = { - 'required': self.require_encryption, - 'key': self.key_encryption_key, - 'resolver': self.key_resolver_function, - } - if self.key_encryption_key is not None: - cek, iv, encryption_data = generate_blob_encryption_data(self.key_encryption_key) - encryption_options['cek'] = cek - encryption_options['vector'] = iv - encryption_options['data'] = encryption_data - - encoding = kwargs.pop('encoding', 'UTF-8') - if isinstance(data, six.text_type): - data = data.encode(encoding) # type: ignore - if length is None: - length = get_length(data) - if isinstance(data, bytes): - data = data[:length] - - if isinstance(data, bytes): - stream = BytesIO(data) - elif hasattr(data, 'read'): - stream = data - elif hasattr(data, '__iter__'): - stream = IterStreamer(data, encoding=encoding) - else: - raise TypeError("Unsupported data type: {}".format(type(data))) - - validate_content = kwargs.pop('validate_content', False) - content_settings = kwargs.pop('content_settings', None) - overwrite = kwargs.pop('overwrite', False) - max_concurrency = kwargs.pop('max_concurrency', 1) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - kwargs['cpk_info'] = cpk_info - - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None)) - kwargs['modified_access_conditions'] = get_modify_conditions(kwargs) - kwargs['cpk_scope_info'] = get_cpk_scope_info(kwargs) - if content_settings: - kwargs['blob_headers'] = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=content_settings.content_md5, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - kwargs['blob_tags_string'] = serialize_blob_tags_header(kwargs.pop('tags', None)) - kwargs['stream'] = stream - kwargs['length'] = length - kwargs['overwrite'] = overwrite - kwargs['headers'] = headers - kwargs['validate_content'] = validate_content - kwargs['blob_settings'] = self._config - kwargs['max_concurrency'] = max_concurrency - kwargs['encryption_options'] = encryption_options - - if blob_type == BlobType.BlockBlob: - kwargs['client'] = self._client.block_blob - kwargs['data'] = data - elif blob_type == BlobType.PageBlob: - kwargs['client'] = self._client.page_blob - elif blob_type == BlobType.AppendBlob: - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - kwargs['client'] = self._client.append_blob - else: - raise ValueError("Unsupported BlobType: {}".format(blob_type)) - return kwargs - - def _upload_blob_from_url_options(self, source_url, **kwargs): - # type: (...) -> Dict[str, Any] - tier = kwargs.pop('standard_blob_tier', None) - overwrite = kwargs.pop('overwrite', False) - content_settings = kwargs.pop('content_settings', None) - source_authorization = kwargs.pop('source_authorization', None) - if content_settings: - kwargs['blob_http_headers'] = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=None, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'copy_source_authorization': source_authorization, - 'content_length': 0, - 'copy_source_blob_properties': kwargs.pop('include_source_blob_properties', True), - 'source_content_md5': kwargs.pop('source_content_md5', None), - 'copy_source': source_url, - 'modified_access_conditions': get_modify_conditions(kwargs), - 'blob_tags_string': serialize_blob_tags_header(kwargs.pop('tags', None)), - 'cls': return_response_headers, - 'lease_access_conditions': get_access_conditions(kwargs.pop('destination_lease', None)), - 'tier': tier.value if tier else None, - 'source_modified_access_conditions': get_source_conditions(kwargs), - 'cpk_info': cpk_info, - 'cpk_scope_info': get_cpk_scope_info(kwargs) - } - options.update(kwargs) - if not overwrite and not _any_conditions(**options): # pylint: disable=protected-access - options['modified_access_conditions'].if_none_match = '*' - return options - - @distributed_trace - def upload_blob_from_url(self, source_url, **kwargs): - # type: (str, Any) -> Dict[str, Any] - """ - Creates a new Block Blob where the content of the blob is read from a given URL. - The content of an existing blob is overwritten with the new blob. - - :param str source_url: - A URL of up to 2 KB in length that specifies a file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.blob.core.windows.net/mycontainer/myblob - - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - - https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. - :keyword bool include_source_blob_properties: - Indicates if properties from the source blob should be copied. Defaults to True. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - :paramtype tags: dict(str, str) - :keyword bytearray source_content_md5: - Specify the md5 that is used to verify the integrity of the source bytes. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword destination_lease: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword str source_authorization: - Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is - the prefix of the source_authorization string. - """ - options = self._upload_blob_from_url_options( - source_url=self._encode_source_url(source_url), - **kwargs) - try: - return self._client.block_blob.put_blob_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def upload_blob( # pylint: disable=too-many-locals - self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Any - """Creates a new blob from a data source with automatic chunking. - - :param data: The blob data to upload. - :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be - either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. The exception to the above is with Append - blob types: if set to False and the data already exists, an error will not be raised - and the data will be appended to the existing blob. If set overwrite=True, then the existing - append blob will be deleted, and a new one created. Defaults to False. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, upload_blob only succeeds if the - blob's lease is active and matches this ID. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: - Specifies the immutability policy of a blob, blob snapshot or blob version. - Currently this parameter of upload_blob() API is for BlockBlob only. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword bool legal_hold: - Specified if a legal hold should be set on the blob. - Currently this parameter of upload_blob() API is for BlockBlob only. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int max_concurrency: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world.py - :start-after: [START upload_a_blob] - :end-before: [END upload_a_blob] - :language: python - :dedent: 12 - :caption: Upload a blob to the container. - """ - options = self._upload_blob_options( - data, - blob_type=blob_type, - length=length, - metadata=metadata, - **kwargs) - if blob_type == BlobType.BlockBlob: - return upload_block_blob(**options) - if blob_type == BlobType.PageBlob: - return upload_page_blob(**options) - return upload_append_blob(**options) - - def _download_blob_options(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], **Any) -> Dict[str, Any] - if self.require_encryption and not self.key_encryption_key: - raise ValueError("Encryption required but no key was provided.") - if length is not None and offset is None: - raise ValueError("Offset value must not be None if length is set.") - if length is not None: - length = offset + length - 1 # Service actually uses an end-range inclusive index - - validate_content = kwargs.pop('validate_content', False) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'clients': self._client, - 'config': self._config, - 'start_range': offset, - 'end_range': length, - 'version_id': kwargs.pop('version_id', None), - 'validate_content': validate_content, - 'encryption_options': { - 'required': self.require_encryption, - 'key': self.key_encryption_key, - 'resolver': self.key_resolver_function}, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'cls': kwargs.pop('cls', None) or deserialize_blob_stream, - 'max_concurrency':kwargs.pop('max_concurrency', 1), - 'encoding': kwargs.pop('encoding', None), - 'timeout': kwargs.pop('timeout', None), - 'name': self.blob_name, - 'container': self.container_name} - options.update(kwargs) - return options - - @distributed_trace - def download_blob(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], **Any) -> StorageStreamDownloader - """Downloads a blob to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the blob into - a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks. - - :param int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to download. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, download_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword str encoding: - Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.blob.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world.py - :start-after: [START download_a_blob] - :end-before: [END download_a_blob] - :language: python - :dedent: 12 - :caption: Download a blob. - """ - options = self._download_blob_options( - offset=offset, - length=length, - **kwargs) - return StorageStreamDownloader(**options) - - def _quick_query_options(self, query_expression, - **kwargs): - # type: (str, **Any) -> Dict[str, Any] - delimiter = '\n' - input_format = kwargs.pop('blob_format', None) - if input_format == QuickQueryDialect.DelimitedJson: - input_format = DelimitedJsonDialect() - if input_format == QuickQueryDialect.DelimitedText: - input_format = DelimitedTextDialect() - input_parquet_format = input_format == "ParquetDialect" - if input_format and not input_parquet_format: - try: - delimiter = input_format.lineterminator - except AttributeError: - try: - delimiter = input_format.delimiter - except AttributeError: - raise ValueError("The Type of blob_format can only be DelimitedTextDialect or " - "DelimitedJsonDialect or ParquetDialect") - output_format = kwargs.pop('output_format', None) - if output_format == QuickQueryDialect.DelimitedJson: - output_format = DelimitedJsonDialect() - if output_format == QuickQueryDialect.DelimitedText: - output_format = DelimitedTextDialect() - if output_format: - if output_format == "ParquetDialect": - raise ValueError("ParquetDialect is invalid as an output format.") - try: - delimiter = output_format.lineterminator - except AttributeError: - try: - delimiter = output_format.delimiter - except AttributeError: - pass - else: - output_format = input_format if not input_parquet_format else None - query_request = QueryRequest( - expression=query_expression, - input_serialization=serialize_query_format(input_format), - output_serialization=serialize_query_format(output_format) - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo( - encryption_key=cpk.key_value, - encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm - ) - options = { - 'query_request': query_request, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'snapshot': self.snapshot, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_headers_and_deserialized, - } - options.update(kwargs) - return options, delimiter - - @distributed_trace - def query_blob(self, query_expression, **kwargs): - # type: (str, **Any) -> BlobQueryReader - """Enables users to select/project on blob/or blob snapshot data by providing simple query expressions. - This operations returns a BlobQueryReader, users need to use readall() or readinto() to get query data. - - :param str query_expression: - Required. a query statement. - :keyword Callable[~azure.storage.blob.BlobQueryError] on_error: - A function to be called on any processing errors returned by the service. - :keyword blob_format: - Optional. Defines the serialization of the data currently stored in the blob. The default is to - treat the blob data as CSV data formatted in the default dialect. This can be overridden with - a custom DelimitedTextDialect, or DelimitedJsonDialect or "ParquetDialect" (passed as a string or enum). - These dialects can be passed through their respective classes, the QuickQueryDialect enum or as a string - :paramtype blob_format: ~azure.storage.blob.DelimitedTextDialect or ~azure.storage.blob.DelimitedJsonDialect - or ~azure.storage.blob.QuickQueryDialect or str - :keyword output_format: - Optional. Defines the output serialization for the data stream. By default the data will be returned - as it is represented in the blob (Parquet formats default to DelimitedTextDialect). - By providing an output format, the blob data will be reformatted according to that profile. - This value can be a DelimitedTextDialect or a DelimitedJsonDialect or ArrowDialect. - These dialects can be passed through their respective classes, the QuickQueryDialect enum or as a string - :paramtype output_format: ~azure.storage.blob.DelimitedTextDialect or ~azure.storage.blob.DelimitedJsonDialect - or list[~azure.storage.blob.ArrowDialect] or ~azure.storage.blob.QuickQueryDialect or str - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A streaming object (BlobQueryReader) - :rtype: ~azure.storage.blob.BlobQueryReader - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_query.py - :start-after: [START query] - :end-before: [END query] - :language: python - :dedent: 4 - :caption: select/project on blob/or blob snapshot data by providing simple query expressions. - """ - errors = kwargs.pop("on_error", None) - error_cls = kwargs.pop("error_cls", BlobQueryError) - encoding = kwargs.pop("encoding", None) - options, delimiter = self._quick_query_options(query_expression, **kwargs) - try: - headers, raw_response_body = self._client.blob.query(**options) - except HttpResponseError as error: - process_storage_error(error) - return BlobQueryReader( - name=self.blob_name, - container=self.container_name, - errors=errors, - record_delimiter=delimiter, - encoding=encoding, - headers=headers, - response=raw_response_body, - error_cls=error_cls) - - @staticmethod - def _generic_delete_blob_options(delete_snapshots=None, **kwargs): - # type: (str, **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if delete_snapshots: - delete_snapshots = DeleteSnapshotsOptionType(delete_snapshots) - options = { - 'timeout': kwargs.pop('timeout', None), - 'snapshot': kwargs.pop('snapshot', None), # this is added for delete_blobs - 'delete_snapshots': delete_snapshots or None, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions} - options.update(kwargs) - return options - - def _delete_blob_options(self, delete_snapshots=None, **kwargs): - # type: (str, **Any) -> Dict[str, Any] - if self.snapshot and delete_snapshots: - raise ValueError("The delete_snapshots option cannot be used with a specific snapshot.") - options = self._generic_delete_blob_options(delete_snapshots, **kwargs) - options['snapshot'] = self.snapshot - options['version_id'] = kwargs.pop('version_id', None) - options['blob_delete_type'] = kwargs.pop('blob_delete_type', None) - return options - - @distributed_trace - def delete_blob(self, delete_snapshots=None, **kwargs): - # type: (str, **Any) -> None - """Marks the specified blob for deletion. - - The blob is later deleted during garbage collection. - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the delete_blob() - operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob - and retains the blob for a specified number of days. - After the specified number of days, the blob's data is removed from the service during garbage collection. - Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']` - option. Soft-deleted blob can be restored using :func:`undelete` operation. - - :param str delete_snapshots: - Required if the blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to delete. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword lease: - Required if the blob has an active lease. If specified, delete_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world.py - :start-after: [START delete_blob] - :end-before: [END delete_blob] - :language: python - :dedent: 12 - :caption: Delete a blob. - """ - options = self._delete_blob_options(delete_snapshots=delete_snapshots, **kwargs) - try: - self._client.blob.delete(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def undelete_blob(self, **kwargs): - # type: (**Any) -> None - """Restores soft-deleted blobs or snapshots. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START undelete_blob] - :end-before: [END undelete_blob] - :language: python - :dedent: 8 - :caption: Undeleting a blob. - """ - try: - self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace() - def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a blob exists with the defined parameters, and returns - False otherwise. - - :kwarg str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to check if it exists. - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - try: - self._client.blob.get_properties( - snapshot=self.snapshot, - **kwargs) - return True - # Encrypted with CPK - except ResourceExistsError: - return True - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceNotFoundError: - return False - - @distributed_trace - def get_blob_properties(self, **kwargs): - # type: (**Any) -> BlobProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the blob. It does not return the content of the blob. - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to get properties. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: BlobProperties - :rtype: ~azure.storage.blob.BlobProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START get_blob_properties] - :end-before: [END get_blob_properties] - :language: python - :dedent: 8 - :caption: Getting the properties for a blob. - """ - # TODO: extract this out as _get_blob_properties_options - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - try: - cls_method = kwargs.pop('cls', None) - if cls_method: - kwargs['cls'] = partial(deserialize_pipeline_response_into_cls, cls_method) - blob_props = self._client.blob.get_properties( - timeout=kwargs.pop('timeout', None), - version_id=kwargs.pop('version_id', None), - snapshot=self.snapshot, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=kwargs.pop('cls', None) or deserialize_blob_properties, - cpk_info=cpk_info, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - blob_props.name = self.blob_name - if isinstance(blob_props, BlobProperties): - blob_props.container = self.container_name - blob_props.snapshot = self.snapshot - return blob_props # type: ignore - - def _set_http_headers_options(self, content_settings=None, **kwargs): - # type: (Optional[ContentSettings], **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - blob_headers = None - if content_settings: - blob_headers = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=content_settings.content_md5, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - options = { - 'timeout': kwargs.pop('timeout', None), - 'blob_http_headers': blob_headers, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def set_http_headers(self, content_settings=None, **kwargs): - # type: (Optional[ContentSettings], **Any) -> None - """Sets system properties on the blob. - - If one property is set for the content_settings, all properties will be overridden. - - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - options = self._set_http_headers_options(content_settings=content_settings, **kwargs) - try: - return self._client.blob.set_http_headers(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _set_blob_metadata_options(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - 'headers': headers} - options.update(kwargs) - return options - - @distributed_trace - def set_blob_metadata(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] - """Sets user-defined metadata for the blob as one or more name-value pairs. - - :param metadata: - Dict containing name and value pairs. Each call to this operation - replaces all existing metadata attached to the blob. To remove all - metadata from the blob, call this operation with no metadata headers. - :type metadata: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - """ - options = self._set_blob_metadata_options(metadata=metadata, **kwargs) - try: - return self._client.blob.set_metadata(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def set_immutability_policy(self, immutability_policy, **kwargs): - # type: (**Any) -> Dict[str, str] - """The Set Immutability Policy operation sets the immutability policy on the blob. - - .. versionadded:: 12.10.0 - This operation was introduced in API version '2020-10-02'. - - :param ~azure.storage.blob.ImmutabilityPolicy immutability_policy: - Specifies the immutability policy of a blob, blob snapshot or blob version. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Key value pairs of blob tags. - :rtype: Dict[str, str] - """ - - kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time - kwargs['immutability_policy_mode'] = immutability_policy.policy_mode - return self._client.blob.set_immutability_policy(cls=return_response_headers, **kwargs) - - @distributed_trace - def delete_immutability_policy(self, **kwargs): - # type: (**Any) -> None - """The Delete Immutability Policy operation deletes the immutability policy on the blob. - - .. versionadded:: 12.10.0 - This operation was introduced in API version '2020-10-02'. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Key value pairs of blob tags. - :rtype: Dict[str, str] - """ - - self._client.blob.delete_immutability_policy(**kwargs) - - @distributed_trace - def set_legal_hold(self, legal_hold, **kwargs): - # type: (bool, **Any) -> Dict[str, Union[str, datetime, bool]] - """The Set Legal Hold operation sets a legal hold on the blob. - - .. versionadded:: 12.10.0 - This operation was introduced in API version '2020-10-02'. - - :param bool legal_hold: - Specified if a legal hold should be set on the blob. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Key value pairs of blob tags. - :rtype: Dict[str, Union[str, datetime, bool]] - """ - - return self._client.blob.set_legal_hold(legal_hold, cls=return_response_headers, **kwargs) - - def _create_page_blob_options( # type: ignore - self, size, # type: int - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - blob_headers = None - if content_settings: - blob_headers = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=content_settings.content_md5, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - - sequence_number = kwargs.pop('sequence_number', None) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - immutability_policy = kwargs.pop('immutability_policy', None) - if immutability_policy: - kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time - kwargs['immutability_policy_mode'] = immutability_policy.policy_mode - - if premium_page_blob_tier: - try: - headers['x-ms-access-tier'] = premium_page_blob_tier.value # type: ignore - except AttributeError: - headers['x-ms-access-tier'] = premium_page_blob_tier # type: ignore - - blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) - - options = { - 'content_length': 0, - 'blob_content_length': size, - 'blob_sequence_number': sequence_number, - 'blob_http_headers': blob_headers, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'blob_tags_string': blob_tags_string, - 'cls': return_response_headers, - 'headers': headers} - options.update(kwargs) - return options - - @distributed_trace - def create_page_blob( # type: ignore - self, size, # type: int - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Creates a new Page Blob of the specified size. - - :param int size: - This specifies the maximum size for the page blob, up to 1 TB. - The page blob size must be aligned to a 512-byte boundary. - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword int sequence_number: - Only for Page blobs. The sequence number is a user-controlled value that you can use to - track requests. The value of the sequence number must be between 0 - and 2^63 - 1.The default value is 0. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: - Specifies the immutability policy of a blob, blob snapshot or blob version. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword bool legal_hold: - Specified if a legal hold should be set on the blob. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict[str, Any] - """ - options = self._create_page_blob_options( - size, - content_settings=content_settings, - metadata=metadata, - premium_page_blob_tier=premium_page_blob_tier, - **kwargs) - try: - return self._client.page_blob.create(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _create_append_blob_options(self, content_settings=None, metadata=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - blob_headers = None - if content_settings: - blob_headers = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=content_settings.content_md5, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - immutability_policy = kwargs.pop('immutability_policy', None) - if immutability_policy: - kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time - kwargs['immutability_policy_mode'] = immutability_policy.policy_mode - - blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) - - options = { - 'content_length': 0, - 'blob_http_headers': blob_headers, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'blob_tags_string': blob_tags_string, - 'cls': return_response_headers, - 'headers': headers} - options.update(kwargs) - return options - - @distributed_trace - def create_append_blob(self, content_settings=None, metadata=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] - """Creates a new Append Blob. - - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: - Specifies the immutability policy of a blob, blob snapshot or blob version. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword bool legal_hold: - Specified if a legal hold should be set on the blob. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict[str, Any] - """ - options = self._create_append_blob_options( - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return self._client.append_blob.create(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _create_snapshot_options(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - 'headers': headers} - options.update(kwargs) - return options - - @distributed_trace - def create_snapshot(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] - """Creates a snapshot of the blob. - - A snapshot is a read-only version of a blob that's taken at a point in time. - It can be read, copied, or deleted, but not modified. Snapshots provide a way - to back up a blob as it appears at a moment in time. - - A snapshot of a blob has the same name as the base blob from which the snapshot - is taken, with a DateTime value appended to indicate the time at which the - snapshot was taken. - - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - - .. versionadded:: 12.4.0 - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified). - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START create_blob_snapshot] - :end-before: [END create_blob_snapshot] - :language: python - :dedent: 8 - :caption: Create a snapshot of the blob. - """ - options = self._create_snapshot_options(metadata=metadata, **kwargs) - try: - return self._client.blob.create_snapshot(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _start_copy_from_url_options(self, source_url, metadata=None, incremental_copy=False, **kwargs): - # type: (str, Optional[Dict[str, str]], bool, **Any) -> Dict[str, Any] - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - if 'source_lease' in kwargs: - source_lease = kwargs.pop('source_lease') - try: - headers['x-ms-source-lease-id'] = source_lease.id # type: str - except AttributeError: - headers['x-ms-source-lease-id'] = source_lease - - tier = kwargs.pop('premium_page_blob_tier', None) or kwargs.pop('standard_blob_tier', None) - requires_sync = kwargs.pop('requires_sync', None) - source_authorization = kwargs.pop('source_authorization', None) - if source_authorization and incremental_copy: - raise ValueError("Source authorization tokens are not applicable for incremental copying.") - if requires_sync is True: - headers['x-ms-requires-sync'] = str(requires_sync) - if source_authorization: - headers['x-ms-copy-source-authorization'] = source_authorization - else: - if source_authorization: - raise ValueError("Source authorization tokens are only applicable for synchronous copy operations.") - timeout = kwargs.pop('timeout', None) - dest_mod_conditions = get_modify_conditions(kwargs) - blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) - - immutability_policy = kwargs.pop('immutability_policy', None) - if immutability_policy: - kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time - kwargs['immutability_policy_mode'] = immutability_policy.policy_mode - - options = { - 'copy_source': source_url, - 'seal_blob': kwargs.pop('seal_destination_blob', None), - 'timeout': timeout, - 'modified_access_conditions': dest_mod_conditions, - 'blob_tags_string': blob_tags_string, - 'headers': headers, - 'cls': return_response_headers, - } - if not incremental_copy: - source_mod_conditions = get_source_conditions(kwargs) - dest_access_conditions = get_access_conditions(kwargs.pop('destination_lease', None)) - options['source_modified_access_conditions'] = source_mod_conditions - options['lease_access_conditions'] = dest_access_conditions - options['tier'] = tier.value if tier else None - options.update(kwargs) - return options - - @distributed_trace - def start_copy_from_url(self, source_url, metadata=None, incremental_copy=False, **kwargs): - # type: (str, Optional[Dict[str, str]], bool, **Any) -> Dict[str, Union[str, datetime]] - """Copies a blob asynchronously. - - This operation returns a copy operation - object that can be used to wait on the completion of the operation, - as well as check status or abort the copy operation. - The Blob service copies blobs on a best-effort basis. - - The source blob for a copy operation may be a block blob, an append blob, - or a page blob. If the destination blob already exists, it must be of the - same blob type as the source blob. Any existing destination blob will be - overwritten. The destination blob cannot be modified while a copy operation - is in progress. - - When copying from a page blob, the Blob service creates a destination page - blob of the source blob's length, initially containing all zeroes. Then - the source page ranges are enumerated, and non-empty ranges are copied. - - For a block blob or an append blob, the Blob service creates a committed - blob of zero length before returning from this operation. When copying - from a block blob, all committed blocks and their block IDs are copied. - Uncommitted blocks are not copied. At the end of the copy operation, the - destination blob will have the same committed block count as the source. - - When copying from an append blob, all committed blocks are copied. At the - end of the copy operation, the destination blob will have the same committed - block count as the source. - - For all blob types, you can call status() on the returned polling object - to check the status of the copy operation, or wait() to block until the - operation is complete. The final blob will be committed when the copy completes. - - :param str source_url: - A URL of up to 2 KB in length that specifies a file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.blob.core.windows.net/mycontainer/myblob - - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - - https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken - :param metadata: - Name-value pairs associated with the blob as metadata. If no name-value - pairs are specified, the operation will copy the metadata from the - source blob or file to the destination blob. If one or more name-value - pairs are specified, the destination blob is created with the specified - metadata, and metadata is not copied from the source blob or file. - :type metadata: dict(str, str) - :param bool incremental_copy: - Copies the snapshot of the source page blob to a destination page blob. - The snapshot is copied such that only the differential changes between - the previously copied snapshot are transferred to the destination. - The copied snapshots are complete copies of the original snapshot and - can be read or copied from as usual. Defaults to False. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: - Specifies the immutability policy of a blob, blob snapshot or blob version. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword bool legal_hold: - Specified if a legal hold should be set on the blob. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source - blob has been modified since the specified date/time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source blob - has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has been modified since the specified date/time. - If the destination blob has not been modified, the Blob service returns - status code 412 (Precondition Failed). - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has not been modified since the specified - date/time. If the destination blob has been modified, the Blob service - returns status code 412 (Precondition Failed). - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword destination_lease: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword source_lease: - Specify this to perform the Copy Blob operation only if - the lease ID given matches the active lease ID of the source blob. - :paramtype source_lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword bool seal_destination_blob: - Seal the destination append blob. This operation is only for append blob. - - .. versionadded:: 12.4.0 - - :keyword bool requires_sync: - Enforces that the service will not return a response until the copy is complete. - :keyword str source_authorization: - Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is - the prefix of the source_authorization string. This option is only available when `incremental_copy` is - set to False and `requires_sync` is set to True. - :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status). - :rtype: dict[str, Union[str, ~datetime.datetime]] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START copy_blob_from_url] - :end-before: [END copy_blob_from_url] - :language: python - :dedent: 12 - :caption: Copy a blob from a URL. - """ - options = self._start_copy_from_url_options( - source_url=self._encode_source_url(source_url), - metadata=metadata, - incremental_copy=incremental_copy, - **kwargs) - try: - if incremental_copy: - return self._client.page_blob.copy_incremental(**options) - return self._client.blob.start_copy_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - def _abort_copy_options(self, copy_id, **kwargs): - # type: (Union[str, Dict[str, Any], BlobProperties], **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - try: - copy_id = copy_id.copy.id - except AttributeError: - try: - copy_id = copy_id['copy_id'] - except TypeError: - pass - options = { - 'copy_id': copy_id, - 'lease_access_conditions': access_conditions, - 'timeout': kwargs.pop('timeout', None)} - options.update(kwargs) - return options - - @distributed_trace - def abort_copy(self, copy_id, **kwargs): - # type: (Union[str, Dict[str, Any], BlobProperties], **Any) -> None - """Abort an ongoing copy operation. - - This will leave a destination blob with zero length and full metadata. - This will raise an error if the copy operation has already ended. - - :param copy_id: - The copy operation to abort. This can be either an ID string, or an - instance of BlobProperties. - :type copy_id: str or ~azure.storage.blob.BlobProperties - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START abort_copy_blob_from_url] - :end-before: [END abort_copy_blob_from_url] - :language: python - :dedent: 12 - :caption: Abort copying a blob from URL. - """ - options = self._abort_copy_options(copy_id, **kwargs) - try: - self._client.blob.abort_copy_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): - # type: (int, Optional[str], **Any) -> BlobLeaseClient - """Requests a new lease. - - If the blob does not have an active lease, the Blob - Service creates a lease on the blob and returns a new lease. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Blob Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A BlobLeaseClient object. - :rtype: ~azure.storage.blob.BlobLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START acquire_lease_on_blob] - :end-before: [END acquire_lease_on_blob] - :language: python - :dedent: 8 - :caption: Acquiring a lease on a blob. - """ - lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore - lease.acquire(lease_duration=lease_duration, **kwargs) - return lease - - @distributed_trace - def set_standard_blob_tier(self, standard_blob_tier, **kwargs): - # type: (Union[str, StandardBlobTier], Any) -> None - """This operation sets the tier on a block blob. - - A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param standard_blob_tier: - Indicates the tier to be set on the blob. Options include 'Hot', 'Cool', - 'Archive'. The hot tier is optimized for storing data that is accessed - frequently. The cool storage tier is optimized for storing data that - is infrequently accessed and stored for at least a month. The archive - tier is optimized for storing data that is rarely accessed and stored - for at least six months with flexible latency requirements. - :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to download. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if standard_blob_tier is None: - raise ValueError("A StandardBlobTier must be specified") - if self.snapshot and kwargs.get('version_id'): - raise ValueError("Snapshot and version_id cannot be set at the same time") - try: - self._client.blob.set_tier( - tier=standard_blob_tier, - snapshot=self.snapshot, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - def _stage_block_options( - self, block_id, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - block_id = encode_base64(str(block_id)) - if isinstance(data, six.text_type): - data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - if length is None: - length = get_length(data) - if length is None: - length, data = read_length(data) - if isinstance(data, bytes): - data = data[:length] - - validate_content = kwargs.pop('validate_content', False) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'block_id': block_id, - 'content_length': length, - 'body': data, - 'transactional_content_md5': None, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'validate_content': validate_content, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - } - options.update(kwargs) - return options - - @distributed_trace - def stage_block( - self, block_id, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Creates a new block to be committed as part of a blob. - - :param str block_id: A string value that identifies the block. - The string should be less than or equal to 64 bytes in size. - For a given blob, the block_id must be the same size for each block. - :param data: The blob data. - :param int length: Size of the block. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword str encoding: - Defaults to UTF-8. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob property dict. - :rtype: dict[str, Any] - """ - options = self._stage_block_options( - block_id, - data, - length=length, - **kwargs) - try: - return self._client.block_blob.stage_block(**options) - except HttpResponseError as error: - process_storage_error(error) - - def _stage_block_from_url_options( - self, block_id, # type: str - source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - source_content_md5=None, # type: Optional[Union[bytes, bytearray]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - source_authorization = kwargs.pop('source_authorization', None) - if source_length is not None and source_offset is None: - raise ValueError("Source offset value must not be None if length is set.") - if source_length is not None: - source_length = source_offset + source_length - 1 - block_id = encode_base64(str(block_id)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - range_header = None - if source_offset is not None: - range_header, _ = validate_and_format_range_headers(source_offset, source_length) - - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'copy_source_authorization': source_authorization, - 'block_id': block_id, - 'content_length': 0, - 'source_url': source_url, - 'source_range': range_header, - 'source_content_md5': bytearray(source_content_md5) if source_content_md5 else None, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - } - options.update(kwargs) - return options - - @distributed_trace - def stage_block_from_url( - self, block_id, # type: Union[str, int] - source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - source_content_md5=None, # type: Optional[Union[bytes, bytearray]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Creates a new block to be committed as part of a blob where - the contents are read from a URL. - - :param str block_id: A string value that identifies the block. - The string should be less than or equal to 64 bytes in size. - For a given blob, the block_id must be the same size for each block. - :param str source_url: The URL. - :param int source_offset: - Start of byte range to use for the block. - Must be set if source length is provided. - :param int source_length: The size of the block in bytes. - :param bytearray source_content_md5: - Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str source_authorization: - Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is - the prefix of the source_authorization string. - :returns: Blob property dict. - :rtype: dict[str, Any] - """ - options = self._stage_block_from_url_options( - block_id, - source_url=self._encode_source_url(source_url), - source_offset=source_offset, - source_length=source_length, - source_content_md5=source_content_md5, - **kwargs) - try: - return self._client.block_blob.stage_block_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - def _get_block_list_result(self, blocks): - # type: (BlockList) -> Tuple[List[BlobBlock], List[BlobBlock]] - committed = [] # type: List - uncommitted = [] # type: List - if blocks.committed_blocks: - committed = [BlobBlock._from_generated(b) for b in blocks.committed_blocks] # pylint: disable=protected-access - if blocks.uncommitted_blocks: - uncommitted = [BlobBlock._from_generated(b) for b in blocks.uncommitted_blocks] # pylint: disable=protected-access - return committed, uncommitted - - @distributed_trace - def get_block_list(self, block_list_type="committed", **kwargs): - # type: (Optional[str], **Any) -> Tuple[List[BlobBlock], List[BlobBlock]] - """The Get Block List operation retrieves the list of blocks that have - been uploaded as part of a block blob. - - :param str block_list_type: - Specifies whether to return the list of committed - blocks, the list of uncommitted blocks, or both lists together. - Possible values include: 'committed', 'uncommitted', 'all' - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A tuple of two lists - committed and uncommitted blocks - :rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock)) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - try: - blocks = self._client.block_blob.get_block_list( - list_type=block_list_type, - snapshot=self.snapshot, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return self._get_block_list_result(blocks) - - def _commit_block_list_options( # type: ignore - self, block_list, # type: List[BlobBlock] - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) - for block in block_list: - try: - if block.state.value == 'committed': - block_lookup.committed.append(encode_base64(str(block.id))) - elif block.state.value == 'uncommitted': - block_lookup.uncommitted.append(encode_base64(str(block.id))) - else: - block_lookup.latest.append(encode_base64(str(block.id))) - except AttributeError: - block_lookup.latest.append(encode_base64(str(block))) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - blob_headers = None - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if content_settings: - blob_headers = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=content_settings.content_md5, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - - validate_content = kwargs.pop('validate_content', False) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - immutability_policy = kwargs.pop('immutability_policy', None) - if immutability_policy: - kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time - kwargs['immutability_policy_mode'] = immutability_policy.policy_mode - - tier = kwargs.pop('standard_blob_tier', None) - blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) - - options = { - 'blocks': block_lookup, - 'blob_http_headers': blob_headers, - 'lease_access_conditions': access_conditions, - 'timeout': kwargs.pop('timeout', None), - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers, - 'validate_content': validate_content, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'tier': tier.value if tier else None, - 'blob_tags_string': blob_tags_string, - 'headers': headers - } - options.update(kwargs) - return options - - @distributed_trace - def commit_block_list( # type: ignore - self, block_list, # type: List[BlobBlock] - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """The Commit Block List operation writes a blob by specifying the list of - block IDs that make up the blob. - - :param list block_list: - List of Blockblobs. - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict[str, str] - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: - Specifies the immutability policy of a blob, blob snapshot or blob version. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword bool legal_hold: - Specified if a legal hold should be set on the blob. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._commit_block_list_options( - block_list, - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return self._client.block_blob.commit_block_list(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): - # type: (Union[str, PremiumPageBlobTier], **Any) -> None - """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts. - - :param premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if premium_page_blob_tier is None: - raise ValueError("A PremiumPageBlobTier must be specified") - try: - self._client.blob.set_tier( - tier=premium_page_blob_tier, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - def _set_blob_tags_options(self, tags=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - tags = serialize_blob_tags(tags) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - options = { - 'tags': tags, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def set_blob_tags(self, tags=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - """The Set Tags operation enables users to set tags on a blob or specific blob version, but not snapshot. - Each call to this operation replaces all existing tags attached to the blob. To remove all - tags from the blob, call this operation with no tags set. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :param tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - :type tags: dict(str, str) - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to add tags to. - :keyword bool validate_content: - If true, calculates an MD5 hash of the tags content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - options = self._set_blob_tags_options(tags=tags, **kwargs) - try: - return self._client.blob.set_tags(**options) - except HttpResponseError as error: - process_storage_error(error) - - def _get_blob_tags_options(self, **kwargs): - # type: (**Any) -> Dict[str, str] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - options = { - 'version_id': kwargs.pop('version_id', None), - 'snapshot': self.snapshot, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_headers_and_deserialized} - return options - - @distributed_trace - def get_blob_tags(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """The Get Tags operation enables users to get tags on a blob or specific blob version, or snapshot. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to add tags to. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Key value pairs of blob tags. - :rtype: Dict[str, str] - """ - options = self._get_blob_tags_options(**kwargs) - try: - _, tags = self._client.blob.get_tags(**options) - return parse_tags(tags) # pylint: disable=protected-access - except HttpResponseError as error: - process_storage_error(error) - - def _get_page_ranges_options( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if length is not None and offset is None: - raise ValueError("Offset value must not be None if length is set.") - if length is not None: - length = offset + length - 1 # Reformat to an inclusive range index - page_range, _ = validate_and_format_range_headers( - offset, length, start_range_required=False, end_range_required=False, align_to_page=True - ) - options = { - 'snapshot': self.snapshot, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'range': page_range} - if previous_snapshot_diff: - try: - options['prevsnapshot'] = previous_snapshot_diff.snapshot # type: ignore - except AttributeError: - try: - options['prevsnapshot'] = previous_snapshot_diff['snapshot'] # type: ignore - except TypeError: - options['prevsnapshot'] = previous_snapshot_diff - options.update(kwargs) - return options - - @distributed_trace - def get_page_ranges( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] - **kwargs - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a Page Blob or snapshot - of a page blob. - - :param int offset: - Start of byte range to use for getting valid page ranges. - If no length is given, all bytes after the offset will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for getting valid page ranges. - If length is given, offset must be provided. - This range will return valid page ranges from the offset start up to - the specified length. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param str previous_snapshot_diff: - The snapshot diff parameter that contains an opaque DateTime value that - specifies a previous blob snapshot to be compared - against a more recent snapshot or the current blob. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. - The first element are filled page ranges, the 2nd element is cleared page ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_page_ranges_options( - offset=offset, - length=length, - previous_snapshot_diff=previous_snapshot_diff, - **kwargs) - try: - if previous_snapshot_diff: - ranges = self._client.page_blob.get_page_ranges_diff(**options) - else: - ranges = self._client.page_blob.get_page_ranges(**options) - except HttpResponseError as error: - process_storage_error(error) - return get_page_ranges_result(ranges) - - @distributed_trace - def get_page_range_diff_for_managed_disk( - self, previous_snapshot_url, # type: str - offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a managed disk or snapshot. - - .. note:: - This operation is only available for managed disk accounts. - - .. versionadded:: 12.2.0 - This operation was introduced in API version '2019-07-07'. - - :param previous_snapshot_url: - Specifies the URL of a previous snapshot of the managed disk. - The response will only contain pages that were changed between the target blob and - its previous snapshot. - :param int offset: - Start of byte range to use for getting valid page ranges. - If no length is given, all bytes after the offset will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for getting valid page ranges. - If length is given, offset must be provided. - This range will return valid page ranges from the offset start up to - the specified length. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. - The first element are filled page ranges, the 2nd element is cleared page ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_page_ranges_options( - offset=offset, - length=length, - prev_snapshot_url=previous_snapshot_url, - **kwargs) - try: - ranges = self._client.page_blob.get_page_ranges_diff(**options) - except HttpResponseError as error: - process_storage_error(error) - return get_page_ranges_result(ranges) - - def _set_sequence_number_options(self, sequence_number_action, sequence_number=None, **kwargs): - # type: (Union[str, SequenceNumberAction], Optional[str], **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if sequence_number_action is None: - raise ValueError("A sequence number action must be specified") - options = { - 'sequence_number_action': sequence_number_action, - 'timeout': kwargs.pop('timeout', None), - 'blob_sequence_number': sequence_number, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def set_sequence_number(self, sequence_number_action, sequence_number=None, **kwargs): - # type: (Union[str, SequenceNumberAction], Optional[str], **Any) -> Dict[str, Union[str, datetime]] - """Sets the blob sequence number. - - :param str sequence_number_action: - This property indicates how the service should modify the blob's sequence - number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information. - :param str sequence_number: - This property sets the blob's sequence number. The sequence number is a - user-controlled property that you can use to track requests and manage - concurrency issues. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._set_sequence_number_options( - sequence_number_action, sequence_number=sequence_number, **kwargs) - try: - return self._client.page_blob.update_sequence_number(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _resize_blob_options(self, size, **kwargs): - # type: (int, **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if size is None: - raise ValueError("A content length must be specified for a Page Blob.") - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'blob_content_length': size, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def resize_blob(self, size, **kwargs): - # type: (int, **Any) -> Dict[str, Union[str, datetime]] - """Resizes a page blob to the specified size. - - If the specified value is less than the current size of the blob, - then all pages above the specified value are cleared. - - :param int size: - Size used to resize blob. Maximum size for a page blob is up to 1 TB. - The page blob size must be aligned to a 512-byte boundary. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._resize_blob_options(size, **kwargs) - try: - return self._client.page_blob.resize(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _upload_page_options( # type: ignore - self, page, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - if isinstance(page, six.text_type): - page = page.encode(kwargs.pop('encoding', 'UTF-8')) - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 page size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 page size") - end_range = offset + length - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) # type: ignore - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - seq_conditions = SequenceNumberAccessConditions( - if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), - if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), - if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) - ) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - validate_content = kwargs.pop('validate_content', False) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'body': page[:length], - 'content_length': length, - 'transactional_content_md5': None, - 'timeout': kwargs.pop('timeout', None), - 'range': content_range, - 'lease_access_conditions': access_conditions, - 'sequence_number_access_conditions': seq_conditions, - 'modified_access_conditions': mod_conditions, - 'validate_content': validate_content, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def upload_page( # type: ignore - self, page, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """The Upload Pages operation writes a range of pages to a page blob. - - :param bytes page: - Content of the page. - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._upload_page_options( - page=page, - offset=offset, - length=length, - **kwargs) - try: - return self._client.page_blob.upload_pages(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _upload_pages_from_url_options( # type: ignore - self, source_url, # type: str - offset, # type: int - length, # type: int - source_offset, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - # TODO: extract the code to a method format_range - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 page size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 page size") - if source_offset is None or offset % 512 != 0: - raise ValueError("source_offset must be an integer that aligns with 512 page size") - - # Format range - end_range = offset + length - 1 - destination_range = 'bytes={0}-{1}'.format(offset, end_range) - source_range = 'bytes={0}-{1}'.format(source_offset, source_offset + length - 1) # should subtract 1 here? - - seq_conditions = SequenceNumberAccessConditions( - if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), - if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), - if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) - ) - source_authorization = kwargs.pop('source_authorization', None) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - source_mod_conditions = get_source_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - source_content_md5 = kwargs.pop('source_content_md5', None) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'copy_source_authorization': source_authorization, - 'source_url': source_url, - 'content_length': 0, - 'source_range': source_range, - 'range': destination_range, - 'source_content_md5': bytearray(source_content_md5) if source_content_md5 else None, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'sequence_number_access_conditions': seq_conditions, - 'modified_access_conditions': mod_conditions, - 'source_modified_access_conditions': source_mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def upload_pages_from_url(self, source_url, # type: str - offset, # type: int - length, # type: int - source_offset, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """ - The Upload Pages operation writes a range of pages to a page blob where - the contents are read from a URL. - - :param str source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - The service will read the same number of bytes as the destination range (length-offset). - :keyword bytes source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str source_authorization: - Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is - the prefix of the source_authorization string. - """ - options = self._upload_pages_from_url_options( - source_url=self._encode_source_url(source_url), - offset=offset, - length=length, - source_offset=source_offset, - **kwargs - ) - try: - return self._client.page_blob.upload_pages_from_url(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _clear_page_options(self, offset, length, **kwargs): - # type: (int, int, **Any) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - seq_conditions = SequenceNumberAccessConditions( - if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), - if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), - if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) - ) - mod_conditions = get_modify_conditions(kwargs) - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 page size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 page size") - end_range = length + offset - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'content_length': 0, - 'timeout': kwargs.pop('timeout', None), - 'range': content_range, - 'lease_access_conditions': access_conditions, - 'sequence_number_access_conditions': seq_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def clear_page(self, offset, length, **kwargs): - # type: (int, int, **Any) -> Dict[str, Union[str, datetime]] - """Clears a range of pages. - - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._clear_page_options(offset, length, **kwargs) - try: - return self._client.page_blob.clear_pages(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _append_block_options( # type: ignore - self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - if isinstance(data, six.text_type): - data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore - if length is None: - length = get_length(data) - if length is None: - length, data = read_length(data) - if length == 0: - return {} - if isinstance(data, bytes): - data = data[:length] - - appendpos_condition = kwargs.pop('appendpos_condition', None) - maxsize_condition = kwargs.pop('maxsize_condition', None) - validate_content = kwargs.pop('validate_content', False) - append_conditions = None - if maxsize_condition or appendpos_condition is not None: - append_conditions = AppendPositionAccessConditions( - max_size=maxsize_condition, - append_position=appendpos_condition - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'body': data, - 'content_length': length, - 'timeout': kwargs.pop('timeout', None), - 'transactional_content_md5': None, - 'lease_access_conditions': access_conditions, - 'append_position_access_conditions': append_conditions, - 'modified_access_conditions': mod_conditions, - 'validate_content': validate_content, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def append_block( # type: ignore - self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """Commits a new block of data to the end of the existing append blob. - - :param data: - Content of the block. This can be bytes, text, an iterable or a file-like object. - :type data: bytes or str or Iterable - :param int length: - Size of the block in bytes. - :keyword bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). - :rtype: dict(str, Any) - """ - options = self._append_block_options( - data, - length=length, - **kwargs - ) - try: - return self._client.append_blob.append_block(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _append_block_from_url_options( # type: ignore - self, copy_source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - # If end range is provided, start range must be provided - if source_length is not None and source_offset is None: - raise ValueError("source_offset should also be specified if source_length is specified") - # Format based on whether length is present - source_range = None - if source_length is not None: - end_range = source_offset + source_length - 1 - source_range = 'bytes={0}-{1}'.format(source_offset, end_range) - elif source_offset is not None: - source_range = "bytes={0}-".format(source_offset) - - appendpos_condition = kwargs.pop('appendpos_condition', None) - maxsize_condition = kwargs.pop('maxsize_condition', None) - source_content_md5 = kwargs.pop('source_content_md5', None) - append_conditions = None - if maxsize_condition or appendpos_condition is not None: - append_conditions = AppendPositionAccessConditions( - max_size=maxsize_condition, - append_position=appendpos_condition - ) - source_authorization = kwargs.pop('source_authorization', None) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - source_mod_conditions = get_source_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'copy_source_authorization': source_authorization, - 'source_url': copy_source_url, - 'content_length': 0, - 'source_range': source_range, - 'source_content_md5': source_content_md5, - 'transactional_content_md5': None, - 'lease_access_conditions': access_conditions, - 'append_position_access_conditions': append_conditions, - 'modified_access_conditions': mod_conditions, - 'source_modified_access_conditions': source_mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - 'timeout': kwargs.pop('timeout', None)} - options.update(kwargs) - return options - - @distributed_trace - def append_block_from_url(self, copy_source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """ - Creates a new block to be committed as part of a blob, where the contents are read from a source url. - - :param str copy_source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int source_offset: - This indicates the start of the range of bytes (inclusive) that has to be taken from the copy source. - :param int source_length: - This indicates the end of the range of bytes that has to be taken from the copy source. - :keyword bytearray source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the - AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str source_authorization: - Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is - the prefix of the source_authorization string. - """ - options = self._append_block_from_url_options( - copy_source_url=self._encode_source_url(copy_source_url), - source_offset=source_offset, - source_length=source_length, - **kwargs - ) - try: - return self._client.append_blob.append_block_from_url(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _seal_append_blob_options(self, **kwargs): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - appendpos_condition = kwargs.pop('appendpos_condition', None) - append_conditions = None - if appendpos_condition is not None: - append_conditions = AppendPositionAccessConditions( - append_position=appendpos_condition - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - options = { - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'append_position_access_conditions': append_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def seal_append_blob(self, **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """The Seal operation seals the Append Blob to make it read-only. - - .. versionadded:: 12.4.0 - - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). - :rtype: dict(str, Any) - """ - options = self._seal_append_blob_options(**kwargs) - try: - return self._client.append_blob.seal(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def _get_container_client(self): # pylint: disable=client-method-missing-kwargs - # type: (...) -> ContainerClient - """Get a client to interact with the blob's parent container. - - The container need not already exist. Defaults to current blob's credentials. - - :returns: A ContainerClient. - :rtype: ~azure.storage.blob.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START get_container_client_from_blob_client] - :end-before: [END get_container_client_from_blob_client] - :language: python - :dedent: 8 - :caption: Get container client from blob object. - """ - from ._container_client import ContainerClient - if not isinstance(self._pipeline._transport, TransportWrapper): # pylint: disable = protected-access - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - else: - _pipeline = self._pipeline # pylint: disable = protected-access - return ContainerClient( - "{}://{}".format(self.scheme, self.primary_hostname), container_name=self.container_name, - credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_blob_service_client.py b/azure/multiapi/storagev2/blob/v2020_10_02/_blob_service_client.py deleted file mode 100644 index 33b6120..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_blob_service_client.py +++ /dev/null @@ -1,731 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -import warnings -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, - TYPE_CHECKING -) - - -try: - from urllib.parse import urlparse -except ImportError: - from urlparse import urlparse # type: ignore - -from azure.core.paging import ItemPaged -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline import Pipeline -from azure.core.tracing.decorator import distributed_trace - -from ._shared.models import LocationMode -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.parser import _to_utc_datetime -from ._shared.response_handlers import return_response_headers, process_storage_error, \ - parse_to_internal_user_delegation_key -from ._generated import AzureBlobStorage -from ._generated.models import StorageServiceProperties, KeyInfo -from ._container_client import ContainerClient -from ._blob_client import BlobClient -from ._models import ContainerPropertiesPaged -from ._list_blobs_helper import FilteredBlobPaged -from ._serialize import get_api_version -from ._deserialize import service_stats_deserialize, service_properties_deserialize - -if TYPE_CHECKING: - from datetime import datetime - from ._shared.models import UserDelegationKey - from ._lease import BlobLeaseClient - from ._models import ( - ContainerProperties, - BlobProperties, - PublicAccess, - BlobAnalyticsLogging, - Metrics, - CorsRule, - RetentionPolicy, - StaticWebsite, - FilteredBlob - ) - - -class BlobServiceClient(StorageAccountHostsMixin): - """A client to interact with the Blob Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete containers within the account. - For operations relating to a specific container or blob, clients for those entities - can also be retrieved using the `get_client` functions. - - For more optional configuration, please click - `here `_. - - :param str account_url: - The URL to the blob storage account. Any other entities included - in the URL path (e.g. container or blob) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_blob_service_client] - :end-before: [END create_blob_service_client] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient with account url and credential. - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_blob_service_client_oauth] - :end-before: [END create_blob_service_client_oauth] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient with Azure Identity credentials. - """ - - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - _, sas_token = parse_query(parsed_url.query) - self._query_str, credential = self._format_query_string(sas_token, credential) - super(BlobServiceClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) - self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - return "{}://{}/{}".format(self.scheme, hostname, self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> BlobServiceClient - """Create BlobServiceClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :returns: A Blob service client. - :rtype: ~azure.storage.blob.BlobServiceClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START auth_from_connection_string] - :end-before: [END auth_from_connection_string] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient from a connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls(account_url, credential=credential, **kwargs) - - @distributed_trace - def get_user_delegation_key(self, key_start_time, # type: datetime - key_expiry_time, # type: datetime - **kwargs # type: Any - ): - # type: (...) -> UserDelegationKey - """ - Obtain a user delegation key for the purpose of signing SAS tokens. - A token credential must be present on the service object for this request to succeed. - - :param ~datetime.datetime key_start_time: - A DateTime value. Indicates when the key becomes valid. - :param ~datetime.datetime key_expiry_time: - A DateTime value. Indicates when the key stops being valid. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The user delegation key. - :rtype: ~azure.storage.blob.UserDelegationKey - """ - key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time)) - timeout = kwargs.pop('timeout', None) - try: - user_delegation_key = self._client.service.get_user_delegation_key(key_info=key_info, - timeout=timeout, - **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - return parse_to_internal_user_delegation_key(user_delegation_key) # type: ignore - - @distributed_trace - def get_account_information(self, **kwargs): - # type: (Any) -> Dict[str, str] - """Gets information related to the storage account. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START get_blob_service_account_info] - :end-before: [END get_blob_service_account_info] - :language: python - :dedent: 8 - :caption: Getting account information for the blob service. - """ - try: - return self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def get_service_stats(self, **kwargs): - # type: (**Any) -> Dict[str, Any] - """Retrieves statistics related to replication for the Blob service. - - It is only available when read-access geo-redundant replication is enabled for - the storage account. - - With geo-redundant replication, Azure Storage maintains your data durable - in two locations. In both locations, Azure Storage constantly maintains - multiple healthy replicas of your data. The location where you read, - create, update, or delete data is the primary storage account location. - The primary location exists in the region you choose at the time you - create an account via the Azure Management Azure classic portal, for - example, North Central US. The location to which your data is replicated - is the secondary location. The secondary location is automatically - determined based on the location of the primary; it is in a second data - center that resides in the same region as the primary location. Read-only - access is available from the secondary location, if read-access geo-redundant - replication is enabled for your storage account. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The blob service stats. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START get_blob_service_stats] - :end-before: [END get_blob_service_stats] - :language: python - :dedent: 8 - :caption: Getting service stats for the blob service. - """ - timeout = kwargs.pop('timeout', None) - try: - stats = self._client.service.get_statistics( # type: ignore - timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs) - return service_stats_deserialize(stats) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def get_service_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An object containing blob service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START get_blob_service_properties] - :end-before: [END get_blob_service_properties] - :language: python - :dedent: 8 - :caption: Getting service properties for the blob service. - """ - timeout = kwargs.pop('timeout', None) - try: - service_props = self._client.service.get_properties(timeout=timeout, **kwargs) - return service_properties_deserialize(service_props) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def set_service_properties( - self, analytics_logging=None, # type: Optional[BlobAnalyticsLogging] - hour_metrics=None, # type: Optional[Metrics] - minute_metrics=None, # type: Optional[Metrics] - cors=None, # type: Optional[List[CorsRule]] - target_version=None, # type: Optional[str] - delete_retention_policy=None, # type: Optional[RetentionPolicy] - static_website=None, # type: Optional[StaticWebsite] - **kwargs - ): - # type: (...) -> None - """Sets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - If an element (e.g. analytics_logging) is left as None, the - existing settings on the service for that functionality are preserved. - - :param analytics_logging: - Groups the Azure Analytics Logging settings. - :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging - :param hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for blobs. - :type hour_metrics: ~azure.storage.blob.Metrics - :param minute_metrics: - The minute metrics settings provide request statistics - for each minute for blobs. - :type minute_metrics: ~azure.storage.blob.Metrics - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list[~azure.storage.blob.CorsRule] - :param str target_version: - Indicates the default version to use for requests if an incoming - request's version is not specified. - :param delete_retention_policy: - The delete retention policy specifies whether to retain deleted blobs. - It also specifies the number of days and versions of blob to keep. - :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy - :param static_website: - Specifies whether the static website feature is enabled, - and if yes, indicates the index document and 404 error document to use. - :type static_website: ~azure.storage.blob.StaticWebsite - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START set_blob_service_properties] - :end-before: [END set_blob_service_properties] - :language: python - :dedent: 8 - :caption: Setting service properties for the blob service. - """ - if all(parameter is None for parameter in [ - analytics_logging, hour_metrics, minute_metrics, cors, - target_version, delete_retention_policy, static_website]): - raise ValueError("set_service_properties should be called with at least one parameter") - - props = StorageServiceProperties( - logging=analytics_logging, - hour_metrics=hour_metrics, - minute_metrics=minute_metrics, - cors=cors, - default_service_version=target_version, - delete_retention_policy=delete_retention_policy, - static_website=static_website - ) - timeout = kwargs.pop('timeout', None) - try: - self._client.service.set_properties(props, timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_containers( - self, name_starts_with=None, # type: Optional[str] - include_metadata=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> ItemPaged[ContainerProperties] - """Returns a generator to list the containers under the specified account. - - The generator will lazily follow the continuation tokens returned by - the service and stop when all containers have been returned. - - :param str name_starts_with: - Filters the results to return only containers whose names - begin with the specified prefix. - :param bool include_metadata: - Specifies that container metadata to be returned in the response. - The default value is `False`. - :keyword bool include_deleted: - Specifies that deleted containers to be returned in the response. This is for container restore enabled - account. The default value is `False`. - .. versionadded:: 12.4.0 - :keyword int results_per_page: - The maximum number of container names to retrieve per API - call. If the request does not specify the server will return up to 5,000 items. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) of ContainerProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.ContainerProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_list_containers] - :end-before: [END bsc_list_containers] - :language: python - :dedent: 12 - :caption: Listing the containers in the blob service. - """ - include = ['metadata'] if include_metadata else [] - include_deleted = kwargs.pop('include_deleted', None) - if include_deleted: - include.append("deleted") - - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.service.list_containers_segment, - prefix=name_starts_with, - include=include, - timeout=timeout, - **kwargs) - return ItemPaged( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - page_iterator_class=ContainerPropertiesPaged - ) - - @distributed_trace - def find_blobs_by_tags(self, filter_expression, **kwargs): - # type: (str, **Any) -> ItemPaged[FilteredBlob] - """The Filter Blobs operation enables callers to list blobs across all - containers whose tags match a given search expression. Filter blobs - searches across all containers within a storage account but can be - scoped within the expression to a single container. - - :param str filter_expression: - The expression to find blobs whose tags matches the specified condition. - eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'" - To specify a container, eg. "@container='containerName' and \"Name\"='C'" - :keyword int results_per_page: - The max result per page when paginating. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.FilteredBlob] - """ - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.service.filter_blobs, - where=filter_expression, - timeout=timeout, - **kwargs) - return ItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=FilteredBlobPaged) - - @distributed_trace - def create_container( - self, name, # type: str - metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[Union[PublicAccess, str]] - **kwargs - ): - # type: (...) -> ContainerClient - """Creates a new container under the specified account. - - If the container with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created container. - - :param str name: The name of the container to create. - :param metadata: - A dict with name-value pairs to associate with the - container as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - Possible values include: 'container', 'blob'. - :type public_access: str or ~azure.storage.blob.PublicAccess - :keyword container_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - - .. versionadded:: 12.2.0 - - :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_create_container] - :end-before: [END bsc_create_container] - :language: python - :dedent: 12 - :caption: Creating a container in the blob service. - """ - container = self.get_container_client(name) - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - container.create_container( - metadata=metadata, public_access=public_access, timeout=timeout, **kwargs) - return container - - @distributed_trace - def delete_container( - self, container, # type: Union[ContainerProperties, str] - lease=None, # type: Optional[Union[BlobLeaseClient, str]] - **kwargs - ): - # type: (...) -> None - """Marks the specified container for deletion. - - The container and any blobs contained within it are later deleted during garbage collection. - If the container is not found, a ResourceNotFoundError will be raised. - - :param container: - The container to delete. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :param lease: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_delete_container] - :end-before: [END bsc_delete_container] - :language: python - :dedent: 12 - :caption: Deleting a container in the blob service. - """ - container = self.get_container_client(container) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - container.delete_container( # type: ignore - lease=lease, - timeout=timeout, - **kwargs) - - @distributed_trace - def _rename_container(self, name, new_name, **kwargs): - # type: (str, str, **Any) -> ContainerClient - """Renames a container. - - Operation is successful only if the source container exists. - - :param str name: - The name of the container to rename. - :param str new_name: - The new container name the user wants to rename to. - :keyword lease: - Specify this to perform only if the lease ID given - matches the active lease ID of the source container. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.ContainerClient - """ - renamed_container = self.get_container_client(new_name) - lease = kwargs.pop('lease', None) - try: - kwargs['source_lease_id'] = lease.id # type: str - except AttributeError: - kwargs['source_lease_id'] = lease - try: - renamed_container._client.container.rename(name, **kwargs) # pylint: disable = protected-access - return renamed_container - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def undelete_container(self, deleted_container_name, deleted_container_version, **kwargs): - # type: (str, str, **Any) -> ContainerClient - """Restores soft-deleted container. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :param str deleted_container_name: - Specifies the name of the deleted container to restore. - :param str deleted_container_version: - Specifies the version of the deleted container to restore. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.ContainerClient - """ - new_name = kwargs.pop('new_name', None) - if new_name: - warnings.warn("`new_name` is no longer supported.", DeprecationWarning) - container = self.get_container_client(new_name or deleted_container_name) - try: - container._client.container.restore(deleted_container_name=deleted_container_name, # pylint: disable = protected-access - deleted_container_version=deleted_container_version, - timeout=kwargs.pop('timeout', None), **kwargs) - return container - except HttpResponseError as error: - process_storage_error(error) - - def get_container_client(self, container): - # type: (Union[ContainerProperties, str]) -> ContainerClient - """Get a client to interact with the specified container. - - The container need not already exist. - - :param container: - The container. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :returns: A ContainerClient. - :rtype: ~azure.storage.blob.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_get_container_client] - :end-before: [END bsc_get_container_client] - :language: python - :dedent: 8 - :caption: Getting the container client to interact with a specific container. - """ - try: - container_name = container.name - except AttributeError: - container_name = container - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ContainerClient( - self.url, container_name=container_name, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_blob_client( - self, container, # type: Union[ContainerProperties, str] - blob, # type: Union[BlobProperties, str] - snapshot=None # type: Optional[Union[Dict[str, Any], str]] - ): - # type: (...) -> BlobClient - """Get a client to interact with the specified blob. - - The blob need not already exist. - - :param container: - The container that the blob is in. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :param blob: - The blob with which to interact. This can either be the name of the blob, - or an instance of BlobProperties. - :type blob: str or ~azure.storage.blob.BlobProperties - :param snapshot: - The optional blob snapshot on which to operate. This can either be the ID of the snapshot, - or a dictionary output returned by :func:`~azure.storage.blob.BlobClient.create_snapshot()`. - :type snapshot: str or dict(str, Any) - :returns: A BlobClient. - :rtype: ~azure.storage.blob.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_get_blob_client] - :end-before: [END bsc_get_blob_client] - :language: python - :dedent: 12 - :caption: Getting the blob client to interact with a specific blob. - """ - try: - container_name = container.name - except AttributeError: - container_name = container - try: - blob_name = blob.name - except AttributeError: - blob_name = blob - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return BlobClient( # type: ignore - self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_container_client.py b/azure/multiapi/storagev2/blob/v2020_10_02/_container_client.py deleted file mode 100644 index 59f17d6..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_container_client.py +++ /dev/null @@ -1,1551 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, AnyStr, Dict, List, Tuple, IO, Iterator, - TYPE_CHECKING -) - - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six - -from azure.core import MatchConditions -from azure.core.exceptions import HttpResponseError, ResourceNotFoundError -from azure.core.paging import ItemPaged -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import Pipeline -from azure.core.pipeline.transport import HttpRequest - -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.request_handlers import add_metadata_headers, serialize_iso -from ._shared.response_handlers import ( - process_storage_error, - return_response_headers, - return_headers_and_deserialized) -from ._generated import AzureBlobStorage -from ._generated.models import SignedIdentifier -from ._deserialize import deserialize_container_properties -from ._serialize import get_modify_conditions, get_container_cpk_scope_info, get_api_version, get_access_conditions -from ._models import ( # pylint: disable=unused-import - ContainerProperties, - BlobProperties, - BlobType) -from ._list_blobs_helper import BlobPrefix, BlobPropertiesPaged -from ._lease import BlobLeaseClient -from ._blob_client import BlobClient - -if TYPE_CHECKING: - from azure.core.pipeline.transport import HttpTransport, HttpResponse # pylint: disable=ungrouped-imports - from azure.core.pipeline.policies import HTTPPolicy # pylint: disable=ungrouped-imports - from datetime import datetime - from ._models import ( # pylint: disable=unused-import - PublicAccess, - AccessPolicy, - ContentSettings, - StandardBlobTier, - PremiumPageBlobTier) - - -def _get_blob_name(blob): - """Return the blob name. - - :param blob: A blob string or BlobProperties - :rtype: str - """ - try: - return blob.get('name') - except AttributeError: - return blob - - -class ContainerClient(StorageAccountHostsMixin): # pylint: disable=too-many-public-methods - """A client to interact with a specific container, although that container - may not yet exist. - - For operations relating to a specific blob within this container, a blob client can be - retrieved using the :func:`~get_blob_client` function. - - For more optional configuration, please click - `here `_. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the container, - use the :func:`from_container_url` classmethod. - :param container_name: - The name of the container for the blob. - :type container_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START create_container_client_from_service] - :end-before: [END create_container_client_from_service] - :language: python - :dedent: 8 - :caption: Get a ContainerClient from an existing BlobServiceClient. - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START create_container_client_sasurl] - :end-before: [END create_container_client_sasurl] - :language: python - :dedent: 8 - :caption: Creating the container client directly. - """ - def __init__( - self, account_url, # type: str - container_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Container URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not container_name: - raise ValueError("Please specify a container name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - _, sas_token = parse_query(parsed_url.query) - self.container_name = container_name - # This parameter is used for the hierarchy traversal. Give precedence to credential. - self._raw_credential = credential if credential else sas_token - self._query_str, credential = self._format_query_string(sas_token, credential) - super(ContainerClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) - self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - - def _format_url(self, hostname): - container_name = self.container_name - if isinstance(container_name, six.text_type): - container_name = container_name.encode('UTF-8') - return "{}://{}/{}{}".format( - self.scheme, - hostname, - quote(container_name), - self._query_str) - - @classmethod - def from_container_url(cls, container_url, credential=None, **kwargs): - # type: (str, Optional[Any], Any) -> ContainerClient - """Create ContainerClient from a container url. - - :param str container_url: - The full endpoint URL to the Container, including SAS token if used. This could be - either the primary endpoint, or the secondary endpoint depending on the current `location_mode`. - :type container_url: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :returns: A container client. - :rtype: ~azure.storage.blob.ContainerClient - """ - try: - if not container_url.lower().startswith('http'): - container_url = "https://" + container_url - except AttributeError: - raise ValueError("Container URL must be a string.") - parsed_url = urlparse(container_url.rstrip('/')) - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(container_url)) - - container_path = parsed_url.path.lstrip('/').split('/') - account_path = "" - if len(container_path) > 1: - account_path = "/" + "/".join(container_path[:-1]) - account_url = "{}://{}{}?{}".format( - parsed_url.scheme, - parsed_url.netloc.rstrip('/'), - account_path, - parsed_url.query) - container_name = unquote(container_path[-1]) - if not container_name: - raise ValueError("Invalid URL. Please provide a URL with a valid container name") - return cls(account_url, container_name=container_name, credential=credential, **kwargs) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - container_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> ContainerClient - """Create ContainerClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param container_name: - The container name for the blob. - :type container_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :returns: A container client. - :rtype: ~azure.storage.blob.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START auth_from_connection_string_container] - :end-before: [END auth_from_connection_string_container] - :language: python - :dedent: 8 - :caption: Creating the ContainerClient from a connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, container_name=container_name, credential=credential, **kwargs) - - @distributed_trace - def create_container(self, metadata=None, public_access=None, **kwargs): - # type: (Optional[Dict[str, str]], Optional[Union[PublicAccess, str]], **Any) -> None - """ - Creates a new container under the specified account. If the container - with the same name already exists, the operation fails. - - :param metadata: - A dict with name_value pairs to associate with the - container as metadata. Example:{'Category':'test'} - :type metadata: dict[str, str] - :param ~azure.storage.blob.PublicAccess public_access: - Possible values include: 'container', 'blob'. - :keyword container_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - - .. versionadded:: 12.2.0 - - :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START create_container] - :end-before: [END create_container] - :language: python - :dedent: 12 - :caption: Creating a container to store blobs. - """ - headers = kwargs.pop('headers', {}) - timeout = kwargs.pop('timeout', None) - headers.update(add_metadata_headers(metadata)) # type: ignore - container_cpk_scope_info = get_container_cpk_scope_info(kwargs) - try: - return self._client.container.create( # type: ignore - timeout=timeout, - access=public_access, - container_cpk_scope_info=container_cpk_scope_info, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def _rename_container(self, new_name, **kwargs): - # type: (str, **Any) -> ContainerClient - """Renames a container. - - Operation is successful only if the source container exists. - - :param str new_name: - The new container name the user wants to rename to. - :keyword lease: - Specify this to perform only if the lease ID given - matches the active lease ID of the source container. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.ContainerClient - """ - lease = kwargs.pop('lease', None) - try: - kwargs['source_lease_id'] = lease.id # type: str - except AttributeError: - kwargs['source_lease_id'] = lease - try: - renamed_container = ContainerClient( - "{}://{}".format(self.scheme, self.primary_hostname), container_name=new_name, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - renamed_container._client.container.rename(self.container_name, **kwargs) # pylint: disable = protected-access - return renamed_container - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def delete_container( - self, **kwargs): - # type: (Any) -> None - """ - Marks the specified container for deletion. The container and any blobs - contained within it are later deleted during garbage collection. - - :keyword lease: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START delete_container] - :end-before: [END delete_container] - :language: python - :dedent: 12 - :caption: Delete a container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - mod_conditions = get_modify_conditions(kwargs) - timeout = kwargs.pop('timeout', None) - try: - self._client.container.delete( - timeout=timeout, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def acquire_lease( - self, lease_duration=-1, # type: int - lease_id=None, # type: Optional[str] - **kwargs): - # type: (...) -> BlobLeaseClient - """ - Requests a new lease. If the container does not have an active lease, - the Blob service creates a lease on the container and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A BlobLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.blob.BlobLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START acquire_lease_on_container] - :end-before: [END acquire_lease_on_container] - :language: python - :dedent: 8 - :caption: Acquiring a lease on the container. - """ - lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs) - return lease - - @distributed_trace - def get_account_information(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """Gets information related to the storage account. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - """ - try: - return self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def get_container_properties(self, **kwargs): - # type: (Any) -> ContainerProperties - """Returns all user-defined metadata and system properties for the specified - container. The data returned does not include the container's list of blobs. - - :keyword lease: - If specified, get_container_properties only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Properties for the specified container within a container object. - :rtype: ~azure.storage.blob.ContainerProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START get_container_properties] - :end-before: [END get_container_properties] - :language: python - :dedent: 12 - :caption: Getting properties on the container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - response = self._client.container.get_properties( - timeout=timeout, - lease_access_conditions=access_conditions, - cls=deserialize_container_properties, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - response.name = self.container_name - return response # type: ignore - - @distributed_trace - def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a container exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - try: - self._client.container.get_properties(**kwargs) - return True - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceNotFoundError: - return False - - @distributed_trace - def set_container_metadata( # type: ignore - self, metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - container. Each call to this operation replaces all existing metadata - attached to the container. To remove all metadata from the container, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the container as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword lease: - If specified, set_container_metadata only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Container-updated property dict (Etag and last modified). - :rtype: dict[str, str or datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START set_container_metadata] - :end-before: [END set_container_metadata] - :language: python - :dedent: 12 - :caption: Setting metadata on the container. - """ - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - mod_conditions = get_modify_conditions(kwargs) - timeout = kwargs.pop('timeout', None) - try: - return self._client.container.set_metadata( # type: ignore - timeout=timeout, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def _get_blob_service_client(self): # pylint: disable=client-method-missing-kwargs - # type: (...) -> BlobServiceClient - """Get a client to interact with the container's parent service account. - - Defaults to current container's credentials. - - :returns: A BlobServiceClient. - :rtype: ~azure.storage.blob.BlobServiceClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START get_blob_service_client_from_container_client] - :end-before: [END get_blob_service_client_from_container_client] - :language: python - :dedent: 8 - :caption: Get blob service client from container object. - """ - from ._blob_service_client import BlobServiceClient - if not isinstance(self._pipeline._transport, TransportWrapper): # pylint: disable = protected-access - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - else: - _pipeline = self._pipeline # pylint: disable = protected-access - return BlobServiceClient( - "{}://{}".format(self.scheme, self.primary_hostname), - credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, - _location_mode=self._location_mode, _hosts=self._hosts, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function, - _pipeline=_pipeline) - - @distributed_trace - def get_container_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the specified container. - The permissions indicate whether container data may be accessed publicly. - - :keyword lease: - If specified, get_container_access_policy only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START get_container_access_policy] - :end-before: [END get_container_access_policy] - :language: python - :dedent: 12 - :caption: Getting the access policy on the container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - response, identifiers = self._client.container.get_access_policy( - timeout=timeout, - lease_access_conditions=access_conditions, - cls=return_headers_and_deserialized, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return { - 'public_access': response.get('blob_public_access'), - 'signed_identifiers': identifiers or [] - } - - @distributed_trace - def set_container_access_policy( - self, signed_identifiers, # type: Dict[str, AccessPolicy] - public_access=None, # type: Optional[Union[str, PublicAccess]] - **kwargs - ): # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the permissions for the specified container or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether blobs in a container may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the container. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy] - :param ~azure.storage.blob.PublicAccess public_access: - Possible values include: 'container', 'blob'. - :keyword lease: - Required if the container has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified date/time. - :keyword ~datetime.datetime if_unmodified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Container-updated property dict (Etag and last modified). - :rtype: dict[str, str or ~datetime.datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START set_container_access_policy] - :end-before: [END set_container_access_policy] - :language: python - :dedent: 12 - :caption: Setting access policy on the container. - """ - if len(signed_identifiers) > 5: - raise ValueError( - 'Too many access policies provided. The server does not support setting ' - 'more than 5 access policies on a single resource.') - identifiers = [] - for key, value in signed_identifiers.items(): - if value: - value.start = serialize_iso(value.start) - value.expiry = serialize_iso(value.expiry) - identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore - signed_identifiers = identifiers # type: ignore - lease = kwargs.pop('lease', None) - mod_conditions = get_modify_conditions(kwargs) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - return self._client.container.set_access_policy( - container_acl=signed_identifiers or None, - timeout=timeout, - access=public_access, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_blobs(self, name_starts_with=None, include=None, **kwargs): - # type: (Optional[str], Optional[Union[str, List[str]]], **Any) -> ItemPaged[BlobProperties] - """Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service. - - :param str name_starts_with: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param list[str] or str include: - Specifies one or more additional datasets to include in the response. - Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'deletedwithversions', - 'tags', 'versions', 'immutabilitypolicy', 'legalhold'. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START list_blobs_in_container] - :end-before: [END list_blobs_in_container] - :language: python - :dedent: 8 - :caption: List the blobs in the container. - """ - if include and not isinstance(include, list): - include = [include] - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.list_blob_flat_segment, - include=include, - timeout=timeout, - **kwargs) - return ItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=BlobPropertiesPaged) - - @distributed_trace - def walk_blobs( - self, name_starts_with=None, # type: Optional[str] - include=None, # type: Optional[Any] - delimiter="/", # type: str - **kwargs # type: Optional[Any] - ): - # type: (...) -> ItemPaged[BlobProperties] - """Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service. This operation will list blobs in accordance with a hierarchy, - as delimited by the specified delimiter character. - - :param str name_starts_with: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param list[str] include: - Specifies one or more additional datasets to include in the response. - Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'. - :param str delimiter: - When the request includes this parameter, the operation returns a BlobPrefix - element in the response body that acts as a placeholder for all blobs whose - names begin with the same substring up to the appearance of the delimiter - character. The delimiter may be a single character or a string. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties] - """ - if include and not isinstance(include, list): - include = [include] - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.list_blob_hierarchy_segment, - delimiter=delimiter, - include=include, - timeout=timeout, - **kwargs) - return BlobPrefix( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - delimiter=delimiter) - - @distributed_trace - def upload_blob( - self, name, # type: Union[str, BlobProperties] - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> BlobClient - """Creates a new blob from a data source with automatic chunking. - - :param name: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type name: str or ~azure.storage.blob.BlobProperties - :param data: The blob data to upload. - :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be - either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. The exception to the above is with Append - blob types: if set to False and the data already exists, an error will not be raised - and the data will be appended to the existing blob. If set overwrite=True, then the existing - append blob will be deleted, and a new one created. Defaults to False. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the container has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int max_concurrency: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :returns: A BlobClient to interact with the newly uploaded blob. - :rtype: ~azure.storage.blob.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START upload_blob_to_container] - :end-before: [END upload_blob_to_container] - :language: python - :dedent: 8 - :caption: Upload blob to the container. - """ - blob = self.get_blob_client(name) - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - blob.upload_blob( - data, - blob_type=blob_type, - length=length, - metadata=metadata, - timeout=timeout, - encoding=encoding, - **kwargs - ) - return blob - - @distributed_trace - def delete_blob( - self, blob, # type: Union[str, BlobProperties] - delete_snapshots=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> None - """Marks the specified blob or snapshot for deletion. - - The blob is later deleted during garbage collection. - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the delete_blob - operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot - and retains the blob or snapshot for specified number of days. - After specified number of days, blob's data is removed from the service during garbage collection. - Soft deleted blob or snapshot is accessible through :func:`list_blobs()` specifying `include=["deleted"]` - option. Soft-deleted blob or snapshot can be restored using :func:`~BlobClient.undelete()` - - :param blob: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob: str or ~azure.storage.blob.BlobProperties - :param str delete_snapshots: - Required if the blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to delete. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - blob_client = self.get_blob_client(blob) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - blob_client.delete_blob( # type: ignore - delete_snapshots=delete_snapshots, - timeout=timeout, - **kwargs) - - @distributed_trace - def download_blob(self, blob, offset=None, length=None, **kwargs): - # type: (Union[str, BlobProperties], Optional[int], Optional[int], **Any) -> StorageStreamDownloader - """Downloads a blob to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the blob into - a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks. - - :param blob: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob: str or ~azure.storage.blob.BlobProperties - :param int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, download_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword str encoding: - Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.blob.StorageStreamDownloader - """ - blob_client = self.get_blob_client(blob) # type: ignore - kwargs.setdefault('merge_span', True) - return blob_client.download_blob(offset=offset, length=length, **kwargs) - - def _generate_delete_blobs_subrequest_options( - self, snapshot=None, - delete_snapshots=None, - lease_access_conditions=None, - modified_access_conditions=None, - **kwargs - ): - """This code is a copy from _generated. - - Once Autorest is able to provide request preparation this code should be removed. - """ - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct parameters - timeout = kwargs.pop('timeout', None) - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._client._serialize.query("snapshot", snapshot, 'str') # pylint: disable=protected-access - if timeout is not None: - query_parameters['timeout'] = self._client._serialize.query("timeout", timeout, 'int', minimum=0) # pylint: disable=protected-access - - # Construct headers - header_parameters = {} - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._client._serialize.header( # pylint: disable=protected-access - "delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._client._serialize.header( # pylint: disable=protected-access - "lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._client._serialize.header( # pylint: disable=protected-access - "if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._client._serialize.header( # pylint: disable=protected-access - "if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._client._serialize.header( # pylint: disable=protected-access - "if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._client._serialize.header( # pylint: disable=protected-access - "if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._client._serialize.header("if_tags", if_tags, 'str') # pylint: disable=protected-access - - return query_parameters, header_parameters - - def _generate_delete_blobs_options(self, - *blobs, # type: List[Union[str, BlobProperties, dict]] - **kwargs - ): - timeout = kwargs.pop('timeout', None) - raise_on_any_failure = kwargs.pop('raise_on_any_failure', True) - delete_snapshots = kwargs.pop('delete_snapshots', None) - if_modified_since = kwargs.pop('if_modified_since', None) - if_unmodified_since = kwargs.pop('if_unmodified_since', None) - if_tags_match_condition = kwargs.pop('if_tags_match_condition', None) - kwargs.update({'raise_on_any_failure': raise_on_any_failure, - 'sas': self._query_str.replace('?', '&'), - 'timeout': '&timeout=' + str(timeout) if timeout else "", - 'path': self.container_name, - 'restype': 'restype=container&' - }) - - reqs = [] - for blob in blobs: - blob_name = _get_blob_name(blob) - container_name = self.container_name - - try: - options = BlobClient._generic_delete_blob_options( # pylint: disable=protected-access - snapshot=blob.get('snapshot'), - delete_snapshots=delete_snapshots or blob.get('delete_snapshots'), - lease=blob.get('lease_id'), - if_modified_since=if_modified_since or blob.get('if_modified_since'), - if_unmodified_since=if_unmodified_since or blob.get('if_unmodified_since'), - etag=blob.get('etag'), - if_tags_match_condition=if_tags_match_condition or blob.get('if_tags_match_condition'), - match_condition=blob.get('match_condition') or MatchConditions.IfNotModified if blob.get('etag') - else None, - timeout=blob.get('timeout'), - ) - except AttributeError: - options = BlobClient._generic_delete_blob_options( # pylint: disable=protected-access - delete_snapshots=delete_snapshots, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_tags_match_condition=if_tags_match_condition - ) - - query_parameters, header_parameters = self._generate_delete_blobs_subrequest_options(**options) - - req = HttpRequest( - "DELETE", - "/{}/{}{}".format(quote(container_name), quote(blob_name, safe='/~'), self._query_str), - headers=header_parameters - ) - req.format_parameters(query_parameters) - reqs.append(req) - - return reqs, kwargs - - @distributed_trace - def delete_blobs(self, *blobs, **kwargs): - # type: (...) -> Iterator[HttpResponse] - """Marks the specified blobs or snapshots for deletion. - - The blobs are later deleted during garbage collection. - Note that in order to delete blobs, you must delete all of their - snapshots. You can delete both at the same time with the delete_blobs operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots - and retains the blobs or snapshots for specified number of days. - After specified number of days, blobs' data is removed from the service during garbage collection. - Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]` - Soft-deleted blobs or snapshots can be restored using :func:`~BlobClient.undelete()` - - :param blobs: - The blobs to delete. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - snapshot you want to delete: - key: 'snapshot', value type: str - whether to delete snapthots when deleting blob: - key: 'delete_snapshots', value: 'include' or 'only' - if the blob modified or not: - key: 'if_modified_since', 'if_unmodified_since', value type: datetime - etag: - key: 'etag', value type: str - match the etag or not: - key: 'match_condition', value type: MatchConditions - tags match condition: - key: 'if_tags_match_condition', value type: str - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword str delete_snapshots: - Required if a blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: An iterator of responses, one for each blob in order - :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START delete_multiple_blobs] - :end-before: [END delete_multiple_blobs] - :language: python - :dedent: 8 - :caption: Deleting multiple blobs. - """ - if len(blobs) == 0: - return iter(list()) - - reqs, options = self._generate_delete_blobs_options(*blobs, **kwargs) - - return self._batch_send(*reqs, **options) - - def _generate_set_tiers_subrequest_options( - self, tier, snapshot=None, version_id=None, rehydrate_priority=None, lease_access_conditions=None, **kwargs - ): - """This code is a copy from _generated. - - Once Autorest is able to provide request preparation this code should be removed. - """ - if not tier: - raise ValueError("A blob tier must be specified") - if snapshot and version_id: - raise ValueError("Snapshot and version_id cannot be set at the same time") - if_tags = kwargs.pop('if_tags', None) - - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "tier" - timeout = kwargs.pop('timeout', None) - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._client._serialize.query("snapshot", snapshot, 'str') # pylint: disable=protected-access - if version_id is not None: - query_parameters['versionid'] = self._client._serialize.query("version_id", version_id, 'str') # pylint: disable=protected-access - if timeout is not None: - query_parameters['timeout'] = self._client._serialize.query("timeout", timeout, 'int', minimum=0) # pylint: disable=protected-access - query_parameters['comp'] = self._client._serialize.query("comp", comp, 'str') # pylint: disable=protected-access, specify-parameter-names-in-call - - # Construct headers - header_parameters = {} - header_parameters['x-ms-access-tier'] = self._client._serialize.header("tier", tier, 'str') # pylint: disable=protected-access, specify-parameter-names-in-call - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._client._serialize.header( # pylint: disable=protected-access - "rehydrate_priority", rehydrate_priority, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._client._serialize.header("lease_id", lease_id, 'str') # pylint: disable=protected-access - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._client._serialize.header("if_tags", if_tags, 'str') # pylint: disable=protected-access - - return query_parameters, header_parameters - - def _generate_set_tiers_options(self, - blob_tier, # type: Optional[Union[str, StandardBlobTier, PremiumPageBlobTier]] - *blobs, # type: List[Union[str, BlobProperties, dict]] - **kwargs - ): - timeout = kwargs.pop('timeout', None) - raise_on_any_failure = kwargs.pop('raise_on_any_failure', True) - rehydrate_priority = kwargs.pop('rehydrate_priority', None) - if_tags = kwargs.pop('if_tags_match_condition', None) - kwargs.update({'raise_on_any_failure': raise_on_any_failure, - 'sas': self._query_str.replace('?', '&'), - 'timeout': '&timeout=' + str(timeout) if timeout else "", - 'path': self.container_name, - 'restype': 'restype=container&' - }) - - reqs = [] - for blob in blobs: - blob_name = _get_blob_name(blob) - container_name = self.container_name - - try: - tier = blob_tier or blob.get('blob_tier') - query_parameters, header_parameters = self._generate_set_tiers_subrequest_options( - tier=tier, - snapshot=blob.get('snapshot'), - version_id=blob.get('version_id'), - rehydrate_priority=rehydrate_priority or blob.get('rehydrate_priority'), - lease_access_conditions=blob.get('lease_id'), - if_tags=if_tags or blob.get('if_tags_match_condition'), - timeout=timeout or blob.get('timeout') - ) - except AttributeError: - query_parameters, header_parameters = self._generate_set_tiers_subrequest_options( - blob_tier, rehydrate_priority=rehydrate_priority, if_tags=if_tags) - - req = HttpRequest( - "PUT", - "/{}/{}{}".format(quote(container_name), quote(blob_name, safe='/~'), self._query_str), - headers=header_parameters - ) - req.format_parameters(query_parameters) - reqs.append(req) - - return reqs, kwargs - - @distributed_trace - def set_standard_blob_tier_blobs( - self, - standard_blob_tier, # type: Optional[Union[str, StandardBlobTier]] - *blobs, # type: List[Union[str, BlobProperties, dict]] - **kwargs - ): - # type: (...) -> Iterator[HttpResponse] - """This operation sets the tier on block blobs. - - A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param standard_blob_tier: - Indicates the tier to be set on all blobs. Options include 'Hot', 'Cool', - 'Archive'. The hot tier is optimized for storing data that is accessed - frequently. The cool storage tier is optimized for storing data that - is infrequently accessed and stored for at least a month. The archive - tier is optimized for storing data that is rarely accessed and stored - for at least six months with flexible latency requirements. - - .. note:: - If you want to set different tier on different blobs please set this positional parameter to None. - Then the blob tier on every BlobProperties will be taken. - - :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier - :param blobs: - The blobs with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - standard blob tier: - key: 'blob_tier', value type: StandardBlobTier - rehydrate priority: - key: 'rehydrate_priority', value type: RehydratePriority - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - snapshot: - key: "snapshost", value type: str - version id: - key: "version_id", value type: str - tags match condition: - key: 'if_tags_match_condition', value type: str - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. - :return: An iterator of responses, one for each blob in order - :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse] - """ - reqs, options = self._generate_set_tiers_options(standard_blob_tier, *blobs, **kwargs) - - return self._batch_send(*reqs, **options) - - @distributed_trace - def set_premium_page_blob_tier_blobs( - self, - premium_page_blob_tier, # type: Optional[Union[str, PremiumPageBlobTier]] - *blobs, # type: List[Union[str, BlobProperties, dict]] - **kwargs - ): - # type: (...) -> Iterator[HttpResponse] - """Sets the page blob tiers on all blobs. This API is only supported for page blobs on premium accounts. - - :param premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - - .. note:: - If you want to set different tier on different blobs please set this positional parameter to None. - Then the blob tier on every BlobProperties will be taken. - - :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier - :param blobs: - The blobs with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - premium blob tier: - key: 'blob_tier', value type: PremiumPageBlobTier - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. - :return: An iterator of responses, one for each blob in order - :rtype: iterator[~azure.core.pipeline.transport.HttpResponse] - """ - reqs, options = self._generate_set_tiers_options(premium_page_blob_tier, *blobs, **kwargs) - - return self._batch_send(*reqs, **options) - - def get_blob_client( - self, blob, # type: Union[str, BlobProperties] - snapshot=None # type: str - ): - # type: (...) -> BlobClient - """Get a client to interact with the specified blob. - - The blob need not already exist. - - :param blob: - The blob with which to interact. - :type blob: str or ~azure.storage.blob.BlobProperties - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`~BlobClient.create_snapshot()`. - :returns: A BlobClient. - :rtype: ~azure.storage.blob.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START get_blob_client] - :end-before: [END get_blob_client] - :language: python - :dedent: 8 - :caption: Get the blob client. - """ - blob_name = _get_blob_name(blob) - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return BlobClient( - self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_deserialize.py b/azure/multiapi/storagev2/blob/v2020_10_02/_deserialize.py deleted file mode 100644 index c724753..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_deserialize.py +++ /dev/null @@ -1,169 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use -from typing import ( # pylint: disable=unused-import - Tuple, Dict, List, - TYPE_CHECKING -) - -from ._models import BlobType, CopyProperties, ContentSettings, LeaseProperties, BlobProperties, ImmutabilityPolicy -from ._shared.models import get_enum_value - -from ._shared.response_handlers import deserialize_metadata -from ._models import ContainerProperties, BlobAnalyticsLogging, Metrics, CorsRule, RetentionPolicy, \ - StaticWebsite, ObjectReplicationPolicy, ObjectReplicationRule - -if TYPE_CHECKING: - from ._generated.models import PageList - - -def deserialize_pipeline_response_into_cls(cls_method, response, obj, headers): - try: - deserialized_response = response.http_response - except AttributeError: - deserialized_response = response - return cls_method(deserialized_response, obj, headers) - - -def deserialize_blob_properties(response, obj, headers): - blob_properties = BlobProperties( - metadata=deserialize_metadata(response, obj, headers), - object_replication_source_properties=deserialize_ors_policies(response.http_response.headers), - **headers - ) - if 'Content-Range' in headers: - if 'x-ms-blob-content-md5' in headers: - blob_properties.content_settings.content_md5 = headers['x-ms-blob-content-md5'] - else: - blob_properties.content_settings.content_md5 = None - return blob_properties - - -def deserialize_ors_policies(policy_dictionary): - - if policy_dictionary is None: - return None - # For source blobs (blobs that have policy ids and rule ids applied to them), - # the header will be formatted as "x-ms-or-_: {Complete, Failed}". - # The value of this header is the status of the replication. - or_policy_status_headers = {key: val for key, val in policy_dictionary.items() - if 'or-' in key and key != 'x-ms-or-policy-id'} - - parsed_result = {} - - for key, val in or_policy_status_headers.items(): - # list blobs gives or-policy_rule and get blob properties gives x-ms-or-policy_rule - policy_and_rule_ids = key.split('or-')[1].split('_') - policy_id = policy_and_rule_ids[0] - rule_id = policy_and_rule_ids[1] - - # If we are seeing this policy for the first time, create a new list to store rule_id -> result - parsed_result[policy_id] = parsed_result.get(policy_id) or list() - parsed_result[policy_id].append(ObjectReplicationRule(rule_id=rule_id, status=val)) - - result_list = [ObjectReplicationPolicy(policy_id=k, rules=v) for k, v in parsed_result.items()] - - return result_list - - -def deserialize_blob_stream(response, obj, headers): - blob_properties = deserialize_blob_properties(response, obj, headers) - obj.properties = blob_properties - return response.http_response.location_mode, obj - - -def deserialize_container_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - container_properties = ContainerProperties( - metadata=metadata, - **headers - ) - return container_properties - - -def get_page_ranges_result(ranges): - # type: (PageList) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - page_range = [] # type: ignore - clear_range = [] # type: List - if ranges.page_range: - page_range = [{'start': b.start, 'end': b.end} for b in ranges.page_range] # type: ignore - if ranges.clear_range: - clear_range = [{'start': b.start, 'end': b.end} for b in ranges.clear_range] - return page_range, clear_range # type: ignore - - -def service_stats_deserialize(generated): - """Deserialize a ServiceStats objects into a dict. - """ - return { - 'geo_replication': { - 'status': generated.geo_replication.status, - 'last_sync_time': generated.geo_replication.last_sync_time, - } - } - - -def service_properties_deserialize(generated): - """Deserialize a ServiceProperties objects into a dict. - """ - return { - 'analytics_logging': BlobAnalyticsLogging._from_generated(generated.logging), # pylint: disable=protected-access - 'hour_metrics': Metrics._from_generated(generated.hour_metrics), # pylint: disable=protected-access - 'minute_metrics': Metrics._from_generated(generated.minute_metrics), # pylint: disable=protected-access - 'cors': [CorsRule._from_generated(cors) for cors in generated.cors], # pylint: disable=protected-access - 'target_version': generated.default_service_version, # pylint: disable=protected-access - 'delete_retention_policy': RetentionPolicy._from_generated(generated.delete_retention_policy), # pylint: disable=protected-access - 'static_website': StaticWebsite._from_generated(generated.static_website), # pylint: disable=protected-access - } - - -def get_blob_properties_from_generated_code(generated): - blob = BlobProperties() - blob.name = generated.name - blob_type = get_enum_value(generated.properties.blob_type) - blob.blob_type = BlobType(blob_type) if blob_type else None - blob.etag = generated.properties.etag - blob.deleted = generated.deleted - blob.snapshot = generated.snapshot - blob.is_append_blob_sealed = generated.properties.is_sealed - blob.metadata = generated.metadata.additional_properties if generated.metadata else {} - blob.encrypted_metadata = generated.metadata.encrypted if generated.metadata else None - blob.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access - blob.copy = CopyProperties._from_generated(generated) # pylint: disable=protected-access - blob.last_modified = generated.properties.last_modified - blob.creation_time = generated.properties.creation_time - blob.content_settings = ContentSettings._from_generated(generated) # pylint: disable=protected-access - blob.size = generated.properties.content_length - blob.page_blob_sequence_number = generated.properties.blob_sequence_number - blob.server_encrypted = generated.properties.server_encrypted - blob.encryption_scope = generated.properties.encryption_scope - blob.deleted_time = generated.properties.deleted_time - blob.remaining_retention_days = generated.properties.remaining_retention_days - blob.blob_tier = generated.properties.access_tier - blob.rehydrate_priority = generated.properties.rehydrate_priority - blob.blob_tier_inferred = generated.properties.access_tier_inferred - blob.archive_status = generated.properties.archive_status - blob.blob_tier_change_time = generated.properties.access_tier_change_time - blob.version_id = generated.version_id - blob.is_current_version = generated.is_current_version - blob.tag_count = generated.properties.tag_count - blob.tags = parse_tags(generated.blob_tags) # pylint: disable=protected-access - blob.object_replication_source_properties = deserialize_ors_policies(generated.object_replication_metadata) - blob.last_accessed_on = generated.properties.last_accessed_on - blob.immutability_policy = ImmutabilityPolicy._from_generated(generated) # pylint: disable=protected-access - blob.has_legal_hold = generated.properties.legal_hold - blob.has_versions_only = generated.has_versions_only - return blob - - -def parse_tags(generated_tags): - # type: (Optional[List[BlobTag]]) -> Union[Dict[str, str], None] - """Deserialize a list of BlobTag objects into a dict. - """ - if generated_tags: - tag_dict = {t.key: t.value for t in generated_tags.blob_tag_set} - return tag_dict - return None diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_download.py b/azure/multiapi/storagev2/blob/v2020_10_02/_download.py deleted file mode 100644 index 05bdbd0..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_download.py +++ /dev/null @@ -1,636 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -import threading -import time - -import warnings -from io import BytesIO -from typing import Iterator - -import requests -from azure.core.exceptions import HttpResponseError, ServiceResponseError - -from azure.core.tracing.common import with_current_context -from ._shared.encryption import decrypt_blob -from ._shared.request_handlers import validate_and_format_range_headers -from ._shared.response_handlers import process_storage_error, parse_length_from_content_range -from ._deserialize import get_page_ranges_result - - -def process_range_and_offset(start_range, end_range, length, encryption): - start_offset, end_offset = 0, 0 - if encryption.get("key") is not None or encryption.get("resolver") is not None: - if start_range is not None: - # Align the start of the range along a 16 byte block - start_offset = start_range % 16 - start_range -= start_offset - - # Include an extra 16 bytes for the IV if necessary - # Because of the previous offsetting, start_range will always - # be a multiple of 16. - if start_range > 0: - start_offset += 16 - start_range -= 16 - - if length is not None: - # Align the end of the range along a 16 byte block - end_offset = 15 - (end_range % 16) - end_range += end_offset - - return (start_range, end_range), (start_offset, end_offset) - - -def process_content(data, start_offset, end_offset, encryption): - if data is None: - raise ValueError("Response cannot be None.") - - content = b"".join(list(data)) - - if content and encryption.get("key") is not None or encryption.get("resolver") is not None: - try: - return decrypt_blob( - encryption.get("required"), - encryption.get("key"), - encryption.get("resolver"), - content, - start_offset, - end_offset, - data.response.headers, - ) - except Exception as error: - raise HttpResponseError(message="Decryption failed.", response=data.response, error=error) - return content - - -class _ChunkDownloader(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - client=None, - non_empty_ranges=None, - total_size=None, - chunk_size=None, - current_progress=None, - start_range=None, - end_range=None, - stream=None, - parallel=None, - validate_content=None, - encryption_options=None, - **kwargs - ): - self.client = client - self.non_empty_ranges = non_empty_ranges - - # Information on the download range/chunk size - self.chunk_size = chunk_size - self.total_size = total_size - self.start_index = start_range - self.end_index = end_range - - # The destination that we will write to - self.stream = stream - self.stream_lock = threading.Lock() if parallel else None - self.progress_lock = threading.Lock() if parallel else None - - # For a parallel download, the stream is always seekable, so we note down the current position - # in order to seek to the right place when out-of-order chunks come in - self.stream_start = stream.tell() if parallel else None - - # Download progress so far - self.progress_total = current_progress - - # Encryption - self.encryption_options = encryption_options - - # Parameters for each get operation - self.validate_content = validate_content - self.request_options = kwargs - - def _calculate_range(self, chunk_start): - if chunk_start + self.chunk_size > self.end_index: - chunk_end = self.end_index - else: - chunk_end = chunk_start + self.chunk_size - return chunk_start, chunk_end - - def get_chunk_offsets(self): - index = self.start_index - while index < self.end_index: - yield index - index += self.chunk_size - - def process_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - chunk_data = self._download_chunk(chunk_start, chunk_end - 1) - length = chunk_end - chunk_start - if length > 0: - self._write_to_stream(chunk_data, chunk_start) - self._update_progress(length) - - def yield_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - return self._download_chunk(chunk_start, chunk_end - 1) - - def _update_progress(self, length): - if self.progress_lock: - with self.progress_lock: # pylint: disable=not-context-manager - self.progress_total += length - else: - self.progress_total += length - - def _write_to_stream(self, chunk_data, chunk_start): - if self.stream_lock: - with self.stream_lock: # pylint: disable=not-context-manager - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - else: - self.stream.write(chunk_data) - - def _do_optimize(self, given_range_start, given_range_end): - # If we have no page range list stored, then assume there's data everywhere for that page blob - # or it's a block blob or append blob - if self.non_empty_ranges is None: - return False - - for source_range in self.non_empty_ranges: - # Case 1: As the range list is sorted, if we've reached such a source_range - # we've checked all the appropriate source_range already and haven't found any overlapping. - # so the given range doesn't have any data and download optimization could be applied. - # given range: | | - # source range: | | - if given_range_end < source_range['start']: # pylint:disable=no-else-return - return True - # Case 2: the given range comes after source_range, continue checking. - # given range: | | - # source range: | | - elif source_range['end'] < given_range_start: - pass - # Case 3: source_range and given range overlap somehow, no need to optimize. - else: - return False - # Went through all src_ranges, but nothing overlapped. Optimization will be applied. - return True - - def _download_chunk(self, chunk_start, chunk_end): - download_range, offset = process_range_and_offset( - chunk_start, chunk_end, chunk_end, self.encryption_options - ) - - # No need to download the empty chunk from server if there's no data in the chunk to be downloaded. - # Do optimize and create empty chunk locally if condition is met. - if self._do_optimize(download_range[0], download_range[1]): - chunk_data = b"\x00" * self.chunk_size - else: - range_header, range_validation = validate_and_format_range_headers( - download_range[0], - download_range[1], - check_content_md5=self.validate_content - ) - - retry_active = True - retry_total = 3 - while retry_active: - try: - _, response = self.client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self.validate_content, - data_stream_total=self.total_size, - download_stream_current=self.progress_total, - **self.request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - try: - chunk_data = process_content(response, offset[0], offset[1], self.encryption_options) - retry_active = False - except (requests.exceptions.ChunkedEncodingError, requests.exceptions.ConnectionError) as error: - retry_total -= 1 - if retry_total <= 0: - raise ServiceResponseError(error, error=error) - time.sleep(1) - - # This makes sure that if_match is set so that we can validate - # that subsequent downloads are to an unmodified blob - if self.request_options.get("modified_access_conditions"): - self.request_options["modified_access_conditions"].if_match = response.properties.etag - - return chunk_data - - -class _ChunkIterator(object): - """Async iterator for chunks in blob download stream.""" - - def __init__(self, size, content, downloader, chunk_size): - self.size = size - self._chunk_size = chunk_size - self._current_content = content - self._iter_downloader = downloader - self._iter_chunks = None - self._complete = (size == 0) - - def __len__(self): - return self.size - - def __iter__(self): - return self - - def __next__(self): - """Iterate through responses.""" - if self._complete: - raise StopIteration("Download complete") - if not self._iter_downloader: - # cut the data obtained from initial GET into chunks - if len(self._current_content) > self._chunk_size: - return self._get_chunk_data() - self._complete = True - return self._current_content - - if not self._iter_chunks: - self._iter_chunks = self._iter_downloader.get_chunk_offsets() - - # initial GET result still has more than _chunk_size bytes of data - if len(self._current_content) >= self._chunk_size: - return self._get_chunk_data() - - try: - chunk = next(self._iter_chunks) - self._current_content += self._iter_downloader.yield_chunk(chunk) - except StopIteration as e: - self._complete = True - if self._current_content: - return self._current_content - raise e - - # the current content from the first get is still there but smaller than chunk size - # therefore we want to make sure its also included - return self._get_chunk_data() - - next = __next__ # Python 2 compatibility. - - def _get_chunk_data(self): - chunk_data = self._current_content[: self._chunk_size] - self._current_content = self._current_content[self._chunk_size:] - return chunk_data - - -class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the blob being downloaded. - :ivar str container: - The name of the container where the blob is. - :ivar ~azure.storage.blob.BlobProperties properties: - The properties of the blob being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if specified, - otherwise the total size of the blob. - """ - - def __init__( - self, - clients=None, - config=None, - start_range=None, - end_range=None, - validate_content=None, - encryption_options=None, - max_concurrency=1, - name=None, - container=None, - encoding=None, - **kwargs - ): - self.name = name - self.container = container - self.properties = None - self.size = None - - self._clients = clients - self._config = config - self._start_range = start_range - self._end_range = end_range - self._max_concurrency = max_concurrency - self._encoding = encoding - self._validate_content = validate_content - self._encryption_options = encryption_options or {} - self._request_options = kwargs - self._location_mode = None - self._download_complete = False - self._current_content = None - self._file_size = None - self._non_empty_ranges = None - self._response = None - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - self._first_get_size = ( - self._config.max_single_get_size if not self._validate_content else self._config.max_chunk_get_size - ) - initial_request_start = self._start_range if self._start_range is not None else 0 - if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: - initial_request_end = self._end_range - else: - initial_request_end = initial_request_start + self._first_get_size - 1 - - self._initial_range, self._initial_offset = process_range_and_offset( - initial_request_start, initial_request_end, self._end_range, self._encryption_options - ) - - self._response = self._initial_request() - self.properties = self._response.properties - self.properties.name = self.name - self.properties.container = self.container - - # Set the content length to the download size instead of the size of - # the last range - self.properties.size = self.size - - # Overwrite the content range to the user requested range - self.properties.content_range = "bytes {0}-{1}/{2}".format( - self._start_range, - self._end_range, - self._file_size - ) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - self.properties.content_md5 = None - - def __len__(self): - return self.size - - def _initial_request(self): - range_header, range_validation = validate_and_format_range_headers( - self._initial_range[0], - self._initial_range[1], - start_range_required=False, - end_range_required=False, - check_content_md5=self._validate_content - ) - - retry_active = True - retry_total = 3 - while retry_active: - try: - location_mode, response = self._clients.blob.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self._validate_content, - data_stream_total=None, - download_stream_current=0, - **self._request_options - ) - - # Check the location we read from to ensure we use the same one - # for subsequent requests. - self._location_mode = location_mode - - # Parse the total file size and adjust the download size if ranges - # were specified - self._file_size = parse_length_from_content_range(response.properties.content_range) - if self._end_range is not None: - # Use the end range index unless it is over the end of the file - self.size = min(self._file_size, self._end_range - self._start_range + 1) - elif self._start_range is not None: - self.size = self._file_size - self._start_range - else: - self.size = self._file_size - - except HttpResponseError as error: - if self._start_range is None and error.response.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - try: - _, response = self._clients.blob.download( - validate_content=self._validate_content, - data_stream_total=0, - download_stream_current=0, - **self._request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - # Set the download size to empty - self.size = 0 - self._file_size = 0 - else: - process_storage_error(error) - - try: - if self.size == 0: - self._current_content = b"" - else: - self._current_content = process_content( - response, - self._initial_offset[0], - self._initial_offset[1], - self._encryption_options - ) - retry_active = False - except (requests.exceptions.ChunkedEncodingError, requests.exceptions.ConnectionError) as error: - retry_total -= 1 - if retry_total <= 0: - raise ServiceResponseError(error, error=error) - time.sleep(1) - - # get page ranges to optimize downloading sparse page blob - if response.properties.blob_type == 'PageBlob': - try: - page_ranges = self._clients.page_blob.get_page_ranges() - self._non_empty_ranges = get_page_ranges_result(page_ranges)[0] - # according to the REST API documentation: - # in a highly fragmented page blob with a large number of writes, - # a Get Page Ranges request can fail due to an internal server timeout. - # thus, if the page blob is not sparse, it's ok for it to fail - except HttpResponseError: - pass - - # If the file is small, the download is complete at this point. - # If file size is large, download the rest of the file in chunks. - if response.properties.size != self.size: - if self._request_options.get("modified_access_conditions"): - self._request_options["modified_access_conditions"].if_match = response.properties.etag - else: - self._download_complete = True - return response - - def chunks(self): - # type: () -> Iterator[bytes] - """Iterate over chunks in the download stream. - - :rtype: Iterator[bytes] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world.py - :start-after: [START download_a_blob_in_chunk] - :end-before: [END download_a_blob_in_chunk] - :language: python - :dedent: 12 - :caption: Download a blob using chunks(). - """ - if self.size == 0 or self._download_complete: - iter_downloader = None - else: - data_end = self._file_size - if self._end_range is not None: - # Use the end range index unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - iter_downloader = _ChunkDownloader( - client=self._clients.blob, - non_empty_ranges=self._non_empty_ranges, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # start where the first download ended - end_range=data_end, - stream=None, - parallel=False, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options - ) - return _ChunkIterator( - size=self.size, - content=self._current_content, - downloader=iter_downloader, - chunk_size=self._config.max_chunk_get_size) - - def readall(self): - """Download the contents of this blob. - - This operation is blocking until all data is downloaded. - - :rtype: bytes or str - """ - stream = BytesIO() - self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - def content_as_bytes(self, max_concurrency=1): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :rtype: bytes - """ - warnings.warn( - "content_as_bytes is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - return self.readall() - - def content_as_text(self, max_concurrency=1, encoding="UTF-8"): - """Download the contents of this blob, and decode as text. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :param str encoding: - Test encoding to decode the downloaded bytes. Default is UTF-8. - :rtype: str - """ - warnings.warn( - "content_as_text is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self._encoding = encoding - return self.readall() - - def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - # The stream must be seekable if parallel download is required - parallel = self._max_concurrency > 1 - if parallel: - error_message = "Target stream handle must be seekable." - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(error_message) - - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(error_message) - - # Write the content to the user stream - stream.write(self._current_content) - if self._download_complete: - return self.size - - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - - downloader = _ChunkDownloader( - client=self._clients.blob, - non_empty_ranges=self._non_empty_ranges, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # Start where the first download ended - end_range=data_end, - stream=stream, - parallel=parallel, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options - ) - if parallel: - import concurrent.futures - with concurrent.futures.ThreadPoolExecutor(self._max_concurrency) as executor: - list(executor.map( - with_current_context(downloader.process_chunk), - downloader.get_chunk_offsets() - )) - else: - for chunk in downloader.get_chunk_offsets(): - downloader.process_chunk(chunk) - return self.size - - def download_to_stream(self, stream, max_concurrency=1): - """Download the contents of this blob to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The properties of the downloaded blob. - :rtype: Any - """ - warnings.warn( - "download_to_stream is deprecated, use readinto instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self.readinto(stream) - return self.properties diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/__init__.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/__init__.py deleted file mode 100644 index cc760e7..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._azure_blob_storage import AzureBlobStorage -__all__ = ['AzureBlobStorage'] - -try: - from ._patch import patch_sdk # type: ignore - patch_sdk() -except ImportError: - pass diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/_azure_blob_storage.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/_azure_blob_storage.py deleted file mode 100644 index d93c6ec..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/_azure_blob_storage.py +++ /dev/null @@ -1,111 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import TYPE_CHECKING - -from azure.core import PipelineClient -from msrest import Deserializer, Serializer - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any - - from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from ._configuration import AzureBlobStorageConfiguration -from .operations import ServiceOperations -from .operations import ContainerOperations -from .operations import DirectoryOperations -from .operations import BlobOperations -from .operations import PageBlobOperations -from .operations import AppendBlobOperations -from .operations import BlockBlobOperations -from . import models - - -class AzureBlobStorage(object): - """AzureBlobStorage. - - :ivar service: ServiceOperations operations - :vartype service: azure.storage.blob.operations.ServiceOperations - :ivar container: ContainerOperations operations - :vartype container: azure.storage.blob.operations.ContainerOperations - :ivar directory: DirectoryOperations operations - :vartype directory: azure.storage.blob.operations.DirectoryOperations - :ivar blob: BlobOperations operations - :vartype blob: azure.storage.blob.operations.BlobOperations - :ivar page_blob: PageBlobOperations operations - :vartype page_blob: azure.storage.blob.operations.PageBlobOperations - :ivar append_blob: AppendBlobOperations operations - :vartype append_blob: azure.storage.blob.operations.AppendBlobOperations - :ivar block_blob: BlockBlobOperations operations - :vartype block_blob: azure.storage.blob.operations.BlockBlobOperations - :param url: The URL of the service account, container, or blob that is the target of the desired operation. - :type url: str - """ - - def __init__( - self, - url, # type: str - **kwargs # type: Any - ): - # type: (...) -> None - base_url = '{url}' - self._config = AzureBlobStorageConfiguration(url, **kwargs) - self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._serialize.client_side_validation = False - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.container = ContainerOperations( - self._client, self._config, self._serialize, self._deserialize) - self.directory = DirectoryOperations( - self._client, self._config, self._serialize, self._deserialize) - self.blob = BlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.page_blob = PageBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.append_blob = AppendBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.block_blob = BlockBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - - def _send_request(self, http_request, **kwargs): - # type: (HttpRequest, Any) -> HttpResponse - """Runs the network request through the client's chained policies. - - :param http_request: The network request you want to make. Required. - :type http_request: ~azure.core.pipeline.transport.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to True. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.pipeline.transport.HttpResponse - """ - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - http_request.url = self._client.format_url(http_request.url, **path_format_arguments) - stream = kwargs.pop("stream", True) - pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs) - return pipeline_response.http_response - - def close(self): - # type: () -> None - self._client.close() - - def __enter__(self): - # type: () -> AzureBlobStorage - self._client.__enter__() - return self - - def __exit__(self, *exc_details): - # type: (Any) -> None - self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/_configuration.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/_configuration.py deleted file mode 100644 index 1c0d9c8..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/_configuration.py +++ /dev/null @@ -1,58 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import TYPE_CHECKING - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any - -VERSION = "unknown" - -class AzureBlobStorageConfiguration(Configuration): - """Configuration for AzureBlobStorage. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, container, or blob that is the target of the desired operation. - :type url: str - """ - - def __init__( - self, - url, # type: str - **kwargs # type: Any - ): - # type: (...) -> None - if url is None: - raise ValueError("Parameter 'url' must not be None.") - super(AzureBlobStorageConfiguration, self).__init__(**kwargs) - - self.url = url - self.version = "2020-10-02" - kwargs.setdefault('sdk_moniker', 'azureblobstorage/{}'.format(VERSION)) - self._configure(**kwargs) - - def _configure( - self, - **kwargs # type: Any - ): - # type: (...) -> None - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) - self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/__init__.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/__init__.py deleted file mode 100644 index 12cfcf6..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._azure_blob_storage import AzureBlobStorage -__all__ = ['AzureBlobStorage'] diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/_azure_blob_storage.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/_azure_blob_storage.py deleted file mode 100644 index b945951..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/_azure_blob_storage.py +++ /dev/null @@ -1,101 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any - -from azure.core import AsyncPipelineClient -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest -from msrest import Deserializer, Serializer - -from ._configuration import AzureBlobStorageConfiguration -from .operations import ServiceOperations -from .operations import ContainerOperations -from .operations import DirectoryOperations -from .operations import BlobOperations -from .operations import PageBlobOperations -from .operations import AppendBlobOperations -from .operations import BlockBlobOperations -from .. import models - - -class AzureBlobStorage(object): - """AzureBlobStorage. - - :ivar service: ServiceOperations operations - :vartype service: azure.storage.blob.aio.operations.ServiceOperations - :ivar container: ContainerOperations operations - :vartype container: azure.storage.blob.aio.operations.ContainerOperations - :ivar directory: DirectoryOperations operations - :vartype directory: azure.storage.blob.aio.operations.DirectoryOperations - :ivar blob: BlobOperations operations - :vartype blob: azure.storage.blob.aio.operations.BlobOperations - :ivar page_blob: PageBlobOperations operations - :vartype page_blob: azure.storage.blob.aio.operations.PageBlobOperations - :ivar append_blob: AppendBlobOperations operations - :vartype append_blob: azure.storage.blob.aio.operations.AppendBlobOperations - :ivar block_blob: BlockBlobOperations operations - :vartype block_blob: azure.storage.blob.aio.operations.BlockBlobOperations - :param url: The URL of the service account, container, or blob that is the target of the desired operation. - :type url: str - """ - - def __init__( - self, - url: str, - **kwargs: Any - ) -> None: - base_url = '{url}' - self._config = AzureBlobStorageConfiguration(url, **kwargs) - self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._serialize.client_side_validation = False - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.container = ContainerOperations( - self._client, self._config, self._serialize, self._deserialize) - self.directory = DirectoryOperations( - self._client, self._config, self._serialize, self._deserialize) - self.blob = BlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.page_blob = PageBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.append_blob = AppendBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.block_blob = BlockBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - - async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse: - """Runs the network request through the client's chained policies. - - :param http_request: The network request you want to make. Required. - :type http_request: ~azure.core.pipeline.transport.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to True. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.pipeline.transport.AsyncHttpResponse - """ - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - http_request.url = self._client.format_url(http_request.url, **path_format_arguments) - stream = kwargs.pop("stream", True) - pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs) - return pipeline_response.http_response - - async def close(self) -> None: - await self._client.close() - - async def __aenter__(self) -> "AzureBlobStorage": - await self._client.__aenter__() - return self - - async def __aexit__(self, *exc_details) -> None: - await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/_configuration.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/_configuration.py deleted file mode 100644 index bb5c749..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/_configuration.py +++ /dev/null @@ -1,52 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -VERSION = "unknown" - -class AzureBlobStorageConfiguration(Configuration): - """Configuration for AzureBlobStorage. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, container, or blob that is the target of the desired operation. - :type url: str - """ - - def __init__( - self, - url: str, - **kwargs: Any - ) -> None: - if url is None: - raise ValueError("Parameter 'url' must not be None.") - super(AzureBlobStorageConfiguration, self).__init__(**kwargs) - - self.url = url - self.version = "2020-10-02" - kwargs.setdefault('sdk_moniker', 'azureblobstorage/{}'.format(VERSION)) - self._configure(**kwargs) - - def _configure( - self, - **kwargs: Any - ) -> None: - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) - self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/__init__.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/__init__.py deleted file mode 100644 index 62f85c9..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._container_operations import ContainerOperations -from ._directory_operations import DirectoryOperations -from ._blob_operations import BlobOperations -from ._page_blob_operations import PageBlobOperations -from ._append_blob_operations import AppendBlobOperations -from ._block_blob_operations import BlockBlobOperations - -__all__ = [ - 'ServiceOperations', - 'ContainerOperations', - 'DirectoryOperations', - 'BlobOperations', - 'PageBlobOperations', - 'AppendBlobOperations', - 'BlockBlobOperations', -] diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_append_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_append_blob_operations.py deleted file mode 100644 index 4d18668..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_append_blob_operations.py +++ /dev/null @@ -1,726 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class AppendBlobOperations: - """AppendBlobOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - content_length: int, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - request_id_parameter: Optional[str] = None, - blob_tags_string: Optional[str] = None, - immutability_policy_expiry: Optional[datetime.datetime] = None, - immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, - legal_hold: Optional[bool] = None, - blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Create Append Blob operation creates a new append blob. - - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy - is set to expire. - :type immutability_policy_expiry: ~datetime.datetime - :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param legal_hold: Specified if a legal hold should be set on the blob. - :type legal_hold: bool - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - blob_type = "AppendBlob" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if immutability_policy_expiry is not None: - header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') - if immutability_policy_mode is not None: - header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') - if legal_hold is not None: - header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def append_block( - self, - content_length: int, - body: IO, - timeout: Optional[int] = None, - transactional_content_md5: Optional[bytearray] = None, - transactional_content_crc64: Optional[bytearray] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - append_position_access_conditions: Optional["_models.AppendPositionAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Append Block operation commits a new block of data to the end of an existing append blob. - The Append Block operation is permitted only if the blob was created with x-ms-blob-type set to - AppendBlob. Append Block is supported only on version 2015-02-21 version or later. - - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param append_position_access_conditions: Parameter group. - :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _max_size = None - _append_position = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if append_position_access_conditions is not None: - _max_size = append_position_access_conditions.max_size - _append_position = append_position_access_conditions.append_position - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "appendblock" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.append_block.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _max_size is not None: - header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", _max_size, 'long') - if _append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-append-offset']=self._deserialize('str', response.headers.get('x-ms-blob-append-offset')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - append_block.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def append_block_from_url( - self, - source_url: str, - content_length: int, - source_range: Optional[str] = None, - source_content_md5: Optional[bytearray] = None, - source_contentcrc64: Optional[bytearray] = None, - timeout: Optional[int] = None, - transactional_content_md5: Optional[bytearray] = None, - request_id_parameter: Optional[str] = None, - copy_source_authorization: Optional[str] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - append_position_access_conditions: Optional["_models.AppendPositionAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Append Block operation commits a new block of data to the end of an existing append blob - where the contents are read from a source url. The Append Block operation is permitted only if - the blob was created with x-ms-blob-type set to AppendBlob. Append Block is supported only on - version 2015-02-21 version or later. - - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param content_length: The length of the request. - :type content_length: long - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid - OAuth access token to copy source. - :type copy_source_authorization: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param append_position_access_conditions: Parameter group. - :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _lease_id = None - _max_size = None - _append_position = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if append_position_access_conditions is not None: - _max_size = append_position_access_conditions.max_size - _append_position = append_position_access_conditions.append_position - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - comp = "appendblock" - accept = "application/xml" - - # Construct URL - url = self.append_block_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _max_size is not None: - header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", _max_size, 'long') - if _append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if copy_source_authorization is not None: - header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-append-offset']=self._deserialize('str', response.headers.get('x-ms-blob-append-offset')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - append_block_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def seal( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - append_position_access_conditions: Optional["_models.AppendPositionAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Seal operation seals the Append Blob to make it read-only. Seal is supported only on - version 2019-12-12 version or later. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param append_position_access_conditions: Parameter group. - :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _append_position = None - if append_position_access_conditions is not None: - _append_position = append_position_access_conditions.append_position - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - comp = "seal" - accept = "application/xml" - - # Construct URL - url = self.seal.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - - if cls: - return cls(pipeline_response, None, response_headers) - - seal.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_blob_operations.py deleted file mode 100644 index f2d0642..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_blob_operations.py +++ /dev/null @@ -1,3425 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class BlobOperations: - """BlobOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def download( - self, - snapshot: Optional[str] = None, - version_id: Optional[str] = None, - timeout: Optional[int] = None, - range: Optional[str] = None, - range_get_content_md5: Optional[bool] = None, - range_get_content_crc64: Optional[bool] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> IO: - """The Download operation reads or downloads a blob from the system, including its metadata and - properties. You can also call Download to read a snapshot. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param range_get_content_md5: When set to true and specified together with the Range, the - service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB - in size. - :type range_get_content_md5: bool - :param range_get_content_crc64: When set to true and specified together with the Range, the - service returns the CRC64 hash for the range, as long as the range is less than or equal to 4 - MB in size. - :type range_get_content_crc64: bool - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - accept = "application/xml" - - # Construct URL - url = self.download.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') - if range_get_content_crc64 is not None: - header_parameters['x-ms-range-get-content-crc64'] = self._serialize.header("range_get_content_crc64", range_get_content_crc64, 'bool') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) - response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) - response_headers['x-ms-immutability-policy-until-date']=self._deserialize('rfc-1123', response.headers.get('x-ms-immutability-policy-until-date')) - response_headers['x-ms-immutability-policy-mode']=self._deserialize('str', response.headers.get('x-ms-immutability-policy-mode')) - response_headers['x-ms-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-legal-hold')) - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) - response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) - response_headers['x-ms-immutability-policy-until-date']=self._deserialize('rfc-1123', response.headers.get('x-ms-immutability-policy-until-date')) - response_headers['x-ms-immutability-policy-mode']=self._deserialize('str', response.headers.get('x-ms-immutability-policy-mode')) - response_headers['x-ms-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-legal-hold')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - download.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def get_properties( - self, - snapshot: Optional[str] = None, - version_id: Optional[str] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Get Properties operation returns all user-defined metadata, standard HTTP properties, and - system properties for the blob. It does not return the content of the blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-creation-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-creation-time')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) - response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-incremental-copy']=self._deserialize('bool', response.headers.get('x-ms-incremental-copy')) - response_headers['x-ms-copy-destination-snapshot']=self._deserialize('str', response.headers.get('x-ms-copy-destination-snapshot')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-access-tier']=self._deserialize('str', response.headers.get('x-ms-access-tier')) - response_headers['x-ms-access-tier-inferred']=self._deserialize('bool', response.headers.get('x-ms-access-tier-inferred')) - response_headers['x-ms-archive-status']=self._deserialize('str', response.headers.get('x-ms-archive-status')) - response_headers['x-ms-access-tier-change-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) - response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) - response_headers['x-ms-expiry-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-expiry-time')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - response_headers['x-ms-rehydrate-priority']=self._deserialize('str', response.headers.get('x-ms-rehydrate-priority')) - response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) - response_headers['x-ms-immutability-policy-until-date']=self._deserialize('rfc-1123', response.headers.get('x-ms-immutability-policy-until-date')) - response_headers['x-ms-immutability-policy-mode']=self._deserialize('str', response.headers.get('x-ms-immutability-policy-mode')) - response_headers['x-ms-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-legal-hold')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def delete( - self, - snapshot: Optional[str] = None, - version_id: Optional[str] = None, - timeout: Optional[int] = None, - delete_snapshots: Optional[Union[str, "_models.DeleteSnapshotsOptionType"]] = None, - request_id_parameter: Optional[str] = None, - blob_delete_type: Optional[str] = "Permanent", - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """If the storage account's soft delete feature is disabled then, when a blob is deleted, it is - permanently removed from the storage account. If the storage account's soft delete feature is - enabled, then, when a blob is deleted, it is marked for deletion and becomes inaccessible - immediately. However, the blob service retains the blob or snapshot for the number of days - specified by the DeleteRetentionPolicy section of [Storage service properties] - (Set-Blob-Service-Properties.md). After the specified number of days has passed, the blob's - data is permanently removed from the storage account. Note that you continue to be charged for - the soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and - specify the "include=deleted" query parameter to discover which blobs and snapshots have been - soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. All other - operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code - of 404 (ResourceNotFound). - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param delete_snapshots: Required if the blob has associated snapshots. Specify one of the - following two options: include: Delete the base blob and all of its snapshots. only: Delete - only the blob's snapshots and not the blob itself. - :type delete_snapshots: str or ~azure.storage.blob.models.DeleteSnapshotsOptionType - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_delete_type: Optional. Only possible value is 'permanent', which specifies to - permanently delete a blob if blob soft delete is enabled. - :type blob_delete_type: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if blob_delete_type is not None: - query_parameters['deletetype'] = self._serialize.query("blob_delete_type", blob_delete_type, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def set_access_control( - self, - timeout: Optional[int] = None, - owner: Optional[str] = None, - group: Optional[str] = None, - posix_permissions: Optional[str] = None, - posix_acl: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Set the owner, group, permissions, or access control list for a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_acl: Sets POSIX access control rights on files and directories. The value is a - comma-separated list of access control entries. Each access control entry (ACE) consists of a - scope, a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type posix_acl: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "setAccessControl" - accept = "application/xml" - - # Construct URL - url = self.set_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def get_access_control( - self, - timeout: Optional[int] = None, - upn: Optional[bool] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Get the owner, group, permissions, or access control list for a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If - "true", the identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response - headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If - "false", the values will be returned as Azure Active Directory Object IDs. The default value is - false. - :type upn: bool - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "getAccessControl" - accept = "application/xml" - - # Construct URL - url = self.get_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) - response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) - response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) - response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def rename( - self, - rename_source: str, - timeout: Optional[int] = None, - path_rename_mode: Optional[Union[str, "_models.PathRenameMode"]] = None, - directory_properties: Optional[str] = None, - posix_permissions: Optional[str] = None, - posix_umask: Optional[str] = None, - source_lease_id: Optional[str] = None, - request_id_parameter: Optional[str] = None, - directory_http_headers: Optional["_models.DirectoryHttpHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Rename a blob/file. By default, the destination is overwritten and if the destination already - exists and has a lease the lease is broken. This operation supports conditional HTTP requests. - For more information, see `Specifying Conditional Headers for Blob Service Operations - `_. - To fail if the destination already exists, use a conditional request with If-None-Match: "*". - - :param rename_source: The file or directory to be renamed. The value must have the following - format: "/{filesysystem}/{path}". If "x-ms-properties" is specified, the properties will - overwrite the existing properties; otherwise, the existing properties will be preserved. - :type rename_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param path_rename_mode: Determines the behavior of the rename operation. - :type path_rename_mode: str or ~azure.storage.blob.models.PathRenameMode - :param directory_properties: Optional. User-defined properties to be stored with the file or - directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", - where each value is base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask - restricts permission settings for file and directory, and will only be applied when default Acl - does not exist in parent directory. If the umask bit has set, it means that the corresponding - permission will be disabled. Otherwise the corresponding permission will be determined by the - permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, - a default umask - 0027 will be used. - :type posix_umask: str - :param source_lease_id: A lease ID for the source path. If specified, the source path must have - an active lease and the lease ID must match. - :type source_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param directory_http_headers: Parameter group. - :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _cache_control = None - _content_type = None - _content_encoding = None - _content_language = None - _content_disposition = None - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if directory_http_headers is not None: - _cache_control = directory_http_headers.cache_control - _content_type = directory_http_headers.content_type - _content_encoding = directory_http_headers.content_encoding - _content_language = directory_http_headers.content_language - _content_disposition = directory_http_headers.content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - accept = "application/xml" - - # Construct URL - url = self.rename.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if path_rename_mode is not None: - query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - rename.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def undelete( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> None: - """Undelete a blob that was previously soft deleted. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "undelete" - accept = "application/xml" - - # Construct URL - url = self.undelete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - undelete.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def set_expiry( - self, - expiry_options: Union[str, "_models.BlobExpiryOptions"], - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - expires_on: Optional[str] = None, - **kwargs: Any - ) -> None: - """Sets the time a blob will expire and be deleted. - - :param expiry_options: Required. Indicates mode of the expiry time. - :type expiry_options: str or ~azure.storage.blob.models.BlobExpiryOptions - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param expires_on: The time to set the blob to expiry. - :type expires_on: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "expiry" - accept = "application/xml" - - # Construct URL - url = self.set_expiry.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') - if expires_on is not None: - header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_expiry.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def set_http_headers( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Set HTTP Headers operation sets system properties on the blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_cache_control = None - _blob_content_type = None - _blob_content_md5 = None - _blob_content_encoding = None - _blob_content_language = None - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _blob_content_disposition = None - if blob_http_headers is not None: - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_disposition = blob_http_headers.blob_content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.set_http_headers.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_http_headers.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def set_immutability_policy( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - immutability_policy_expiry: Optional[datetime.datetime] = None, - immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Set Immutability Policy operation sets the immutability policy on the blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy - is set to expire. - :type immutability_policy_expiry: ~datetime.datetime - :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "immutabilityPolicies" - accept = "application/xml" - - # Construct URL - url = self.set_immutability_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if immutability_policy_expiry is not None: - header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') - if immutability_policy_mode is not None: - header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-immutability-policy-until-date']=self._deserialize('rfc-1123', response.headers.get('x-ms-immutability-policy-until-date')) - response_headers['x-ms-immutability-policy-mode']=self._deserialize('str', response.headers.get('x-ms-immutability-policy-mode')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_immutability_policy.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def delete_immutability_policy( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> None: - """The Delete Immutability Policy operation deletes the immutability policy on the blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "immutabilityPolicies" - accept = "application/xml" - - # Construct URL - url = self.delete_immutability_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete_immutability_policy.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def set_legal_hold( - self, - legal_hold: bool, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> None: - """The Set Legal Hold operation sets a legal hold on the blob. - - :param legal_hold: Specified if a legal hold should be set on the blob. - :type legal_hold: bool - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "legalhold" - accept = "application/xml" - - # Construct URL - url = self.set_legal_hold.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-legal-hold')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_legal_hold.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def set_metadata( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or - more name-value pairs. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def acquire_lease( - self, - timeout: Optional[int] = None, - duration: Optional[int] = None, - proposed_lease_id: Optional[str] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a - lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease - duration cannot be changed using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "acquire" - accept = "application/xml" - - # Construct URL - url = self.acquire_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - acquire_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def release_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "release" - accept = "application/xml" - - # Construct URL - url = self.release_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - release_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def renew_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "renew" - accept = "application/xml" - - # Construct URL - url = self.renew_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - renew_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def change_lease( - self, - lease_id: str, - proposed_lease_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "change" - accept = "application/xml" - - # Construct URL - url = self.change_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - change_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def break_lease( - self, - timeout: Optional[int] = None, - break_period: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param break_period: For a break operation, proposed duration the lease should continue before - it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining on the lease is used. A new - lease will not be available before the break period has expired, but the lease may be held for - longer than the break period. If this header does not appear with a break operation, a - fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease - breaks immediately. - :type break_period: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "break" - accept = "application/xml" - - # Construct URL - url = self.break_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - break_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def create_snapshot( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - request_id_parameter: Optional[str] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Create Snapshot operation creates a read-only snapshot of a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _lease_id = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "snapshot" - accept = "application/xml" - - # Construct URL - url = self.create_snapshot.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-snapshot']=self._deserialize('str', response.headers.get('x-ms-snapshot')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create_snapshot.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def start_copy_from_url( - self, - copy_source: str, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, - rehydrate_priority: Optional[Union[str, "_models.RehydratePriority"]] = None, - request_id_parameter: Optional[str] = None, - blob_tags_string: Optional[str] = None, - seal_blob: Optional[bool] = None, - immutability_policy_expiry: Optional[datetime.datetime] = None, - immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, - legal_hold: Optional[bool] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Start Copy From URL operation copies a blob or an internet resource to a new blob. - - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived - blob. - :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param seal_blob: Overrides the sealed state of the destination blob. Service version - 2019-12-12 and newer. - :type seal_blob: bool - :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy - is set to expire. - :type immutability_policy_expiry: ~datetime.datetime - :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param legal_hold: Specified if a legal hold should be set on the blob. - :type legal_hold: bool - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _source_if_tags = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - _source_if_tags = source_modified_access_conditions.source_if_tags - accept = "application/xml" - - # Construct URL - url = self.start_copy_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _source_if_tags is not None: - header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", _source_if_tags, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if seal_blob is not None: - header_parameters['x-ms-seal-blob'] = self._serialize.header("seal_blob", seal_blob, 'bool') - if immutability_policy_expiry is not None: - header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') - if immutability_policy_mode is not None: - header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') - if legal_hold is not None: - header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def copy_from_url( - self, - copy_source: str, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, - request_id_parameter: Optional[str] = None, - source_content_md5: Optional[bytearray] = None, - blob_tags_string: Optional[str] = None, - immutability_policy_expiry: Optional[datetime.datetime] = None, - immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, - legal_hold: Optional[bool] = None, - copy_source_authorization: Optional[str] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Copy From URL operation copies a blob or an internet resource to a new blob. It will not - return a response until the copy is complete. - - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy - is set to expire. - :type immutability_policy_expiry: ~datetime.datetime - :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param legal_hold: Specified if a legal hold should be set on the blob. - :type legal_hold: bool - :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid - OAuth access token to copy source. - :type copy_source_authorization: str - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - x_ms_requires_sync = "true" - accept = "application/xml" - - # Construct URL - url = self.copy_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-requires-sync'] = self._serialize.header("x_ms_requires_sync", x_ms_requires_sync, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if immutability_policy_expiry is not None: - header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') - if immutability_policy_mode is not None: - header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') - if legal_hold is not None: - header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') - if copy_source_authorization is not None: - header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - - if cls: - return cls(pipeline_response, None, response_headers) - - copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def abort_copy_from_url( - self, - copy_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a - destination blob with zero length and full metadata. - - :param copy_id: The copy identifier provided in the x-ms-copy-id header of the original Copy - Blob operation. - :type copy_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "copy" - copy_action_abort_constant = "abort" - accept = "application/xml" - - # Construct URL - url = self.abort_copy_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-copy-action'] = self._serialize.header("copy_action_abort_constant", copy_action_abort_constant, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - abort_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def set_tier( - self, - tier: Union[str, "_models.AccessTierRequired"], - snapshot: Optional[str] = None, - version_id: Optional[str] = None, - timeout: Optional[int] = None, - rehydrate_priority: Optional[Union[str, "_models.RehydratePriority"]] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a - premium storage account and on a block blob in a blob storage account (locally redundant - storage only). A premium page blob's tier determines the allowed size, IOPS, and bandwidth of - the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation does not - update the blob's ETag. - - :param tier: Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierRequired - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived - blob. - :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "tier" - accept = "application/xml" - - # Construct URL - url = self.set_tier.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if response.status_code == 202: - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_tier.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def get_account_info( - self, - **kwargs: Any - ) -> None: - """Returns the sku name and account kind. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "account" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_account_info.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) - response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_account_info.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def query( - self, - snapshot: Optional[str] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - query_request: Optional["_models.QueryRequest"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> IO: - """The Query operation enables users to select/project on blob data by providing simple query - expressions. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param query_request: the query request. - :type query_request: ~azure.storage.blob.models.QueryRequest - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "query" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.query.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if query_request is not None: - body_content = self._serialize.body(query_request, 'QueryRequest', is_xml=True) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - query.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def get_tags( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - snapshot: Optional[str] = None, - version_id: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> "_models.BlobTags": - """The Get Tags operation enables users to get the tags associated with a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: BlobTags, or the result of cls(response) - :rtype: ~azure.storage.blob.models.BlobTags - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobTags"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "tags" - accept = "application/xml" - - # Construct URL - url = self.get_tags.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('BlobTags', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_tags.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def set_tags( - self, - timeout: Optional[int] = None, - version_id: Optional[str] = None, - transactional_content_md5: Optional[bytearray] = None, - transactional_content_crc64: Optional[bytearray] = None, - request_id_parameter: Optional[str] = None, - tags: Optional["_models.BlobTags"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Set Tags operation enables users to set tags on a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param tags: Blob tags. - :type tags: ~azure.storage.blob.models.BlobTags - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "tags" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_tags.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if tags is not None: - body_content = self._serialize.body(tags, 'BlobTags', is_xml=True) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_tags.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_block_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_block_blob_operations.py deleted file mode 100644 index 3eb1659..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_block_blob_operations.py +++ /dev/null @@ -1,1138 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class BlockBlobOperations: - """BlockBlobOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def upload( - self, - content_length: int, - body: IO, - timeout: Optional[int] = None, - transactional_content_md5: Optional[bytearray] = None, - metadata: Optional[str] = None, - tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, - request_id_parameter: Optional[str] = None, - blob_tags_string: Optional[str] = None, - immutability_policy_expiry: Optional[datetime.datetime] = None, - immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, - legal_hold: Optional[bool] = None, - blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Upload Block Blob operation updates the content of an existing block blob. Updating an - existing block blob overwrites any existing metadata on the blob. Partial updates are not - supported with Put Blob; the content of the existing blob is overwritten with the content of - the new blob. To perform a partial update of the content of a block blob, use the Put Block - List operation. - - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy - is set to expire. - :type immutability_policy_expiry: ~datetime.datetime - :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param legal_hold: Specified if a legal hold should be set on the blob. - :type legal_hold: bool - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - blob_type = "BlockBlob" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.upload.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if immutability_policy_expiry is not None: - header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') - if immutability_policy_mode is not None: - header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') - if legal_hold is not None: - header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def put_blob_from_url( - self, - content_length: int, - copy_source: str, - timeout: Optional[int] = None, - transactional_content_md5: Optional[bytearray] = None, - metadata: Optional[str] = None, - tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, - request_id_parameter: Optional[str] = None, - source_content_md5: Optional[bytearray] = None, - blob_tags_string: Optional[str] = None, - copy_source_blob_properties: Optional[bool] = None, - copy_source_authorization: Optional[str] = None, - blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Put Blob from URL operation creates a new Block Blob where the contents of the blob are - read from a given URL. This API is supported beginning with the 2020-04-08 version. Partial - updates are not supported with Put Blob from URL; the content of an existing blob is - overwritten with the content of the new blob. To perform partial updates to a block blob’s - contents using a source URL, use the Put Block from URL API in conjunction with Put Block List. - - :param content_length: The length of the request. - :type content_length: long - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param copy_source_blob_properties: Optional, default is true. Indicates if properties from - the source blob should be copied. - :type copy_source_blob_properties: bool - :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid - OAuth access token to copy source. - :type copy_source_authorization: str - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _source_if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - _source_if_tags = source_modified_access_conditions.source_if_tags - blob_type = "BlockBlob" - accept = "application/xml" - - # Construct URL - url = self.put_blob_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _source_if_tags is not None: - header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", _source_if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if copy_source_blob_properties is not None: - header_parameters['x-ms-copy-source-blob-properties'] = self._serialize.header("copy_source_blob_properties", copy_source_blob_properties, 'bool') - if copy_source_authorization is not None: - header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - put_blob_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def stage_block( - self, - block_id: str, - content_length: int, - body: IO, - transactional_content_md5: Optional[bytearray] = None, - transactional_content_crc64: Optional[bytearray] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - **kwargs: Any - ) -> None: - """The Stage Block operation creates a new block to be committed as part of a blob. - - :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the - string must be less than or equal to 64 bytes in size. For a given blob, the length of the - value specified for the blockid parameter must be the same size for each block. - :type block_id: str - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "block" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.stage_block.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - stage_block.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def stage_block_from_url( - self, - block_id: str, - content_length: int, - source_url: str, - source_range: Optional[str] = None, - source_content_md5: Optional[bytearray] = None, - source_contentcrc64: Optional[bytearray] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - copy_source_authorization: Optional[str] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Stage Block operation creates a new block to be committed as part of a blob where the - contents are read from a URL. - - :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the - string must be less than or equal to 64 bytes in size. For a given blob, the length of the - value specified for the blockid parameter must be the same size for each block. - :type block_id: str - :param content_length: The length of the request. - :type content_length: long - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid - OAuth access token to copy source. - :type copy_source_authorization: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _lease_id = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - comp = "block" - accept = "application/xml" - - # Construct URL - url = self.stage_block_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if copy_source_authorization is not None: - header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - stage_block_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def commit_block_list( - self, - blocks: "_models.BlockLookupList", - timeout: Optional[int] = None, - transactional_content_md5: Optional[bytearray] = None, - transactional_content_crc64: Optional[bytearray] = None, - metadata: Optional[str] = None, - tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, - request_id_parameter: Optional[str] = None, - blob_tags_string: Optional[str] = None, - immutability_policy_expiry: Optional[datetime.datetime] = None, - immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, - legal_hold: Optional[bool] = None, - blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Commit Block List operation writes a blob by specifying the list of block IDs that make up - the blob. In order to be written as part of a blob, a block must have been successfully written - to the server in a prior Put Block operation. You can call Put Block List to update a blob by - uploading only those blocks that have changed, then committing the new and existing blocks - together. You can do this by specifying whether to commit a block from the committed block list - or from the uncommitted block list, or to commit the most recently uploaded version of the - block, whichever list it may belong to. - - :param blocks: - :type blocks: ~azure.storage.blob.models.BlockLookupList - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy - is set to expire. - :type immutability_policy_expiry: ~datetime.datetime - :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param legal_hold: Specified if a legal hold should be set on the blob. - :type legal_hold: bool - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_cache_control = None - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "blocklist" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.commit_block_list.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if immutability_policy_expiry is not None: - header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') - if immutability_policy_mode is not None: - header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') - if legal_hold is not None: - header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(blocks, 'BlockLookupList', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - commit_block_list.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def get_block_list( - self, - snapshot: Optional[str] = None, - list_type: Union[str, "_models.BlockListType"] = "committed", - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> "_models.BlockList": - """The Get Block List operation retrieves the list of blocks that have been uploaded as part of a - block blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param list_type: Specifies whether to return the list of committed blocks, the list of - uncommitted blocks, or both lists together. - :type list_type: str or ~azure.storage.blob.models.BlockListType - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: BlockList, or the result of cls(response) - :rtype: ~azure.storage.blob.models.BlockList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.BlockList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "blocklist" - accept = "application/xml" - - # Construct URL - url = self.get_block_list.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - query_parameters['blocklisttype'] = self._serialize.query("list_type", list_type, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('BlockList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_block_list.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_container_operations.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_container_operations.py deleted file mode 100644 index 17f58d5..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_container_operations.py +++ /dev/null @@ -1,1648 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class ContainerOperations: - """ContainerOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - access: Optional[Union[str, "_models.PublicAccessType"]] = None, - request_id_parameter: Optional[str] = None, - container_cpk_scope_info: Optional["_models.ContainerCpkScopeInfo"] = None, - **kwargs: Any - ) -> None: - """creates a new container under the specified account. If the container with the same name - already exists, the operation fails. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param access: Specifies whether data in the container may be accessed publicly and the level - of access. - :type access: str or ~azure.storage.blob.models.PublicAccessType - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param container_cpk_scope_info: Parameter group. - :type container_cpk_scope_info: ~azure.storage.blob.models.ContainerCpkScopeInfo - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _default_encryption_scope = None - _prevent_encryption_scope_override = None - if container_cpk_scope_info is not None: - _default_encryption_scope = container_cpk_scope_info.default_encryption_scope - _prevent_encryption_scope_override = container_cpk_scope_info.prevent_encryption_scope_override - restype = "container" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if access is not None: - header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _default_encryption_scope is not None: - header_parameters['x-ms-default-encryption-scope'] = self._serialize.header("default_encryption_scope", _default_encryption_scope, 'str') - if _prevent_encryption_scope_override is not None: - header_parameters['x-ms-deny-encryption-scope-override'] = self._serialize.header("prevent_encryption_scope_override", _prevent_encryption_scope_override, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{containerName}'} # type: ignore - - async def get_properties( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """returns all user-defined metadata and system properties for the specified container. The data - returned does not include the container's list of blobs. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "container" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-public-access']=self._deserialize('str', response.headers.get('x-ms-blob-public-access')) - response_headers['x-ms-has-immutability-policy']=self._deserialize('bool', response.headers.get('x-ms-has-immutability-policy')) - response_headers['x-ms-has-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-has-legal-hold')) - response_headers['x-ms-default-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-default-encryption-scope')) - response_headers['x-ms-deny-encryption-scope-override']=self._deserialize('bool', response.headers.get('x-ms-deny-encryption-scope-override')) - response_headers['x-ms-immutable-storage-with-versioning-enabled']=self._deserialize('bool', response.headers.get('x-ms-immutable-storage-with-versioning-enabled')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{containerName}'} # type: ignore - - async def delete( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """operation marks the specified container for deletion. The container and any blobs contained - within it are later deleted during garbage collection. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - restype = "container" - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{containerName}'} # type: ignore - - async def set_metadata( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """operation sets one or more user-defined name-value pairs for the specified container. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - restype = "container" - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{containerName}'} # type: ignore - - async def get_access_policy( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> List["_models.SignedIdentifier"]: - """gets the permissions for the specified container. The permissions indicate whether container - data may be accessed publicly. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of SignedIdentifier, or the result of cls(response) - :rtype: list[~azure.storage.blob.models.SignedIdentifier] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.SignedIdentifier"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "container" - comp = "acl" - accept = "application/xml" - - # Construct URL - url = self.get_access_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-blob-public-access']=self._deserialize('str', response.headers.get('x-ms-blob-public-access')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('[SignedIdentifier]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_access_policy.metadata = {'url': '/{containerName}'} # type: ignore - - async def set_access_policy( - self, - timeout: Optional[int] = None, - access: Optional[Union[str, "_models.PublicAccessType"]] = None, - request_id_parameter: Optional[str] = None, - container_acl: Optional[List["_models.SignedIdentifier"]] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """sets the permissions for the specified container. The permissions indicate whether blobs in a - container may be accessed publicly. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param access: Specifies whether data in the container may be accessed publicly and the level - of access. - :type access: str or ~azure.storage.blob.models.PublicAccessType - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param container_acl: the acls for the container. - :type container_acl: list[~azure.storage.blob.models.SignedIdentifier] - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - restype = "container" - comp = "acl" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_access_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if access is not None: - header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'wrapped': True, 'itemsName': 'SignedIdentifier'}} - if container_acl is not None: - body_content = self._serialize.body(container_acl, '[SignedIdentifier]', is_xml=True, serialization_ctxt=serialization_ctxt) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_policy.metadata = {'url': '/{containerName}'} # type: ignore - - async def restore( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - deleted_container_name: Optional[str] = None, - deleted_container_version: Optional[str] = None, - **kwargs: Any - ) -> None: - """Restores a previously-deleted container. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param deleted_container_name: Optional. Version 2019-12-12 and later. Specifies the name of - the deleted container to restore. - :type deleted_container_name: str - :param deleted_container_version: Optional. Version 2019-12-12 and later. Specifies the - version of the deleted container to restore. - :type deleted_container_version: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "undelete" - accept = "application/xml" - - # Construct URL - url = self.restore.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if deleted_container_name is not None: - header_parameters['x-ms-deleted-container-name'] = self._serialize.header("deleted_container_name", deleted_container_name, 'str') - if deleted_container_version is not None: - header_parameters['x-ms-deleted-container-version'] = self._serialize.header("deleted_container_version", deleted_container_version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - restore.metadata = {'url': '/{containerName}'} # type: ignore - - async def rename( - self, - source_container_name: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - source_lease_id: Optional[str] = None, - **kwargs: Any - ) -> None: - """Renames an existing container. - - :param source_container_name: Required. Specifies the name of the container to rename. - :type source_container_name: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param source_lease_id: A lease ID for the source path. If specified, the source path must have - an active lease and the lease ID must match. - :type source_lease_id: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "rename" - accept = "application/xml" - - # Construct URL - url = self.rename.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-source-container-name'] = self._serialize.header("source_container_name", source_container_name, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - rename.metadata = {'url': '/{containerName}'} # type: ignore - - async def submit_batch( - self, - content_length: int, - multipart_content_type: str, - body: IO, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> IO: - """The Batch operation allows multiple API calls to be embedded into a single HTTP request. - - :param content_length: The length of the request. - :type content_length: long - :param multipart_content_type: Required. The value of this header must be multipart/mixed with - a batch boundary. Example header value: multipart/mixed; boundary=batch_:code:``. - :type multipart_content_type: str - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "batch" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.submit_batch.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(body, 'IO', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - submit_batch.metadata = {'url': '/{containerName}'} # type: ignore - - async def acquire_lease( - self, - timeout: Optional[int] = None, - duration: Optional[int] = None, - proposed_lease_id: Optional[str] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a - lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease - duration cannot be changed using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "acquire" - accept = "application/xml" - - # Construct URL - url = self.acquire_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - acquire_lease.metadata = {'url': '/{containerName}'} # type: ignore - - async def release_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "release" - accept = "application/xml" - - # Construct URL - url = self.release_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - release_lease.metadata = {'url': '/{containerName}'} # type: ignore - - async def renew_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "renew" - accept = "application/xml" - - # Construct URL - url = self.renew_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - renew_lease.metadata = {'url': '/{containerName}'} # type: ignore - - async def break_lease( - self, - timeout: Optional[int] = None, - break_period: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param break_period: For a break operation, proposed duration the lease should continue before - it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining on the lease is used. A new - lease will not be available before the break period has expired, but the lease may be held for - longer than the break period. If this header does not appear with a break operation, a - fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease - breaks immediately. - :type break_period: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "break" - accept = "application/xml" - - # Construct URL - url = self.break_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - break_lease.metadata = {'url': '/{containerName}'} # type: ignore - - async def change_lease( - self, - lease_id: str, - proposed_lease_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "change" - accept = "application/xml" - - # Construct URL - url = self.change_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - change_lease.metadata = {'url': '/{containerName}'} # type: ignore - - async def list_blob_flat_segment( - self, - prefix: Optional[str] = None, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - include: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> "_models.ListBlobsFlatSegmentResponse": - """[Update] The List Blobs operation returns a list of the blobs under the specified container. - - :param prefix: Filters the results to return only containers whose name begins with the - specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListBlobsFlatSegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsFlatSegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_blob_flat_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListBlobsFlatSegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_blob_flat_segment.metadata = {'url': '/{containerName}'} # type: ignore - - async def list_blob_hierarchy_segment( - self, - delimiter: str, - prefix: Optional[str] = None, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - include: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> "_models.ListBlobsHierarchySegmentResponse": - """[Update] The List Blobs operation returns a list of the blobs under the specified container. - - :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix - element in the response body that acts as a placeholder for all blobs whose names begin with - the same substring up to the appearance of the delimiter character. The delimiter may be a - single character or a string. - :type delimiter: str - :param prefix: Filters the results to return only containers whose name begins with the - specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListBlobsHierarchySegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsHierarchySegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_blob_hierarchy_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_blob_hierarchy_segment.metadata = {'url': '/{containerName}'} # type: ignore - - async def get_account_info( - self, - **kwargs: Any - ) -> None: - """Returns the sku name and account kind. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "account" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_account_info.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) - response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_account_info.metadata = {'url': '/{containerName}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_directory_operations.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_directory_operations.py deleted file mode 100644 index 12e49a1..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_directory_operations.py +++ /dev/null @@ -1,742 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class DirectoryOperations: - """DirectoryOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - timeout: Optional[int] = None, - directory_properties: Optional[str] = None, - posix_permissions: Optional[str] = None, - posix_umask: Optional[str] = None, - request_id_parameter: Optional[str] = None, - directory_http_headers: Optional["_models.DirectoryHttpHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Create a directory. By default, the destination is overwritten and if the destination already - exists and has a lease the lease is broken. This operation supports conditional HTTP requests. - For more information, see `Specifying Conditional Headers for Blob Service Operations - `_. - To fail if the destination already exists, use a conditional request with If-None-Match: "*". - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param directory_properties: Optional. User-defined properties to be stored with the file or - directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", - where each value is base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask - restricts permission settings for file and directory, and will only be applied when default Acl - does not exist in parent directory. If the umask bit has set, it means that the corresponding - permission will be disabled. Otherwise the corresponding permission will be determined by the - permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, - a default umask - 0027 will be used. - :type posix_umask: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param directory_http_headers: Parameter group. - :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _cache_control = None - _content_type = None - _content_encoding = None - _content_language = None - _content_disposition = None - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - if directory_http_headers is not None: - _cache_control = directory_http_headers.cache_control - _content_type = directory_http_headers.content_type - _content_encoding = directory_http_headers.content_encoding - _content_language = directory_http_headers.content_language - _content_disposition = directory_http_headers.content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - resource = "directory" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("resource", resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def rename( - self, - rename_source: str, - timeout: Optional[int] = None, - marker: Optional[str] = None, - path_rename_mode: Optional[Union[str, "_models.PathRenameMode"]] = None, - directory_properties: Optional[str] = None, - posix_permissions: Optional[str] = None, - posix_umask: Optional[str] = None, - source_lease_id: Optional[str] = None, - request_id_parameter: Optional[str] = None, - directory_http_headers: Optional["_models.DirectoryHttpHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Rename a directory. By default, the destination is overwritten and if the destination already - exists and has a lease the lease is broken. This operation supports conditional HTTP requests. - For more information, see `Specifying Conditional Headers for Blob Service Operations - `_. - To fail if the destination already exists, use a conditional request with If-None-Match: "*". - - :param rename_source: The file or directory to be renamed. The value must have the following - format: "/{filesysystem}/{path}". If "x-ms-properties" is specified, the properties will - overwrite the existing properties; otherwise, the existing properties will be preserved. - :type rename_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param marker: When renaming a directory, the number of paths that are renamed with each - invocation is limited. If the number of paths to be renamed exceeds this limit, a continuation - token is returned in this response header. When a continuation token is returned in the - response, it must be specified in a subsequent invocation of the rename operation to continue - renaming the directory. - :type marker: str - :param path_rename_mode: Determines the behavior of the rename operation. - :type path_rename_mode: str or ~azure.storage.blob.models.PathRenameMode - :param directory_properties: Optional. User-defined properties to be stored with the file or - directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", - where each value is base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask - restricts permission settings for file and directory, and will only be applied when default Acl - does not exist in parent directory. If the umask bit has set, it means that the corresponding - permission will be disabled. Otherwise the corresponding permission will be determined by the - permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, - a default umask - 0027 will be used. - :type posix_umask: str - :param source_lease_id: A lease ID for the source path. If specified, the source path must have - an active lease and the lease ID must match. - :type source_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param directory_http_headers: Parameter group. - :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _cache_control = None - _content_type = None - _content_encoding = None - _content_language = None - _content_disposition = None - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if directory_http_headers is not None: - _cache_control = directory_http_headers.cache_control - _content_type = directory_http_headers.content_type - _content_encoding = directory_http_headers.content_encoding - _content_language = directory_http_headers.content_language - _content_disposition = directory_http_headers.content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - accept = "application/xml" - - # Construct URL - url = self.rename.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') - if path_rename_mode is not None: - query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - rename.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def delete( - self, - recursive_directory_delete: bool, - timeout: Optional[int] = None, - marker: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Deletes the directory. - - :param recursive_directory_delete: If "true", all paths beneath the directory will be deleted. - If "false" and the directory is non-empty, an error occurs. - :type recursive_directory_delete: bool - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param marker: When renaming a directory, the number of paths that are renamed with each - invocation is limited. If the number of paths to be renamed exceeds this limit, a continuation - token is returned in this response header. When a continuation token is returned in the - response, it must be specified in a subsequent invocation of the rename operation to continue - renaming the directory. - :type marker: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['recursive'] = self._serialize.query("recursive_directory_delete", recursive_directory_delete, 'bool') - if marker is not None: - query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def set_access_control( - self, - timeout: Optional[int] = None, - owner: Optional[str] = None, - group: Optional[str] = None, - posix_permissions: Optional[str] = None, - posix_acl: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Set the owner, group, permissions, or access control list for a directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_acl: Sets POSIX access control rights on files and directories. The value is a - comma-separated list of access control entries. Each access control entry (ACE) consists of a - scope, a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type posix_acl: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "setAccessControl" - accept = "application/xml" - - # Construct URL - url = self.set_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def get_access_control( - self, - timeout: Optional[int] = None, - upn: Optional[bool] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Get the owner, group, permissions, or access control list for a directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If - "true", the identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response - headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If - "false", the values will be returned as Azure Active Directory Object IDs. The default value is - false. - :type upn: bool - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "getAccessControl" - accept = "application/xml" - - # Construct URL - url = self.get_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) - response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) - response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) - response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_page_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_page_blob_operations.py deleted file mode 100644 index 06f1755..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_page_blob_operations.py +++ /dev/null @@ -1,1424 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class PageBlobOperations: - """PageBlobOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - content_length: int, - blob_content_length: int, - timeout: Optional[int] = None, - tier: Optional[Union[str, "_models.PremiumPageBlobAccessTier"]] = None, - metadata: Optional[str] = None, - blob_sequence_number: Optional[int] = 0, - request_id_parameter: Optional[str] = None, - blob_tags_string: Optional[str] = None, - immutability_policy_expiry: Optional[datetime.datetime] = None, - immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, - legal_hold: Optional[bool] = None, - blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Create operation creates a new page blob. - - :param content_length: The length of the request. - :type content_length: long - :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 - TB. The page blob size must be aligned to a 512-byte boundary. - :type blob_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param tier: Optional. Indicates the tier to be set on the page blob. - :type tier: str or ~azure.storage.blob.models.PremiumPageBlobAccessTier - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled - value that you can use to track requests. The value of the sequence number must be between 0 - and 2^63 - 1. - :type blob_sequence_number: long - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy - is set to expire. - :type immutability_policy_expiry: ~datetime.datetime - :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param legal_hold: Specified if a legal hold should be set on the blob. - :type legal_hold: bool - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - blob_type = "PageBlob" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') - if blob_sequence_number is not None: - header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if immutability_policy_expiry is not None: - header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') - if immutability_policy_mode is not None: - header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') - if legal_hold is not None: - header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def upload_pages( - self, - content_length: int, - body: IO, - transactional_content_md5: Optional[bytearray] = None, - transactional_content_crc64: Optional[bytearray] = None, - timeout: Optional[int] = None, - range: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - sequence_number_access_conditions: Optional["_models.SequenceNumberAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Upload Pages operation writes a range of pages to a page blob. - - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param sequence_number_access_conditions: Parameter group. - :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_sequence_number_less_than_or_equal_to = None - _if_sequence_number_less_than = None - _if_sequence_number_equal_to = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if sequence_number_access_conditions is not None: - _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - comp = "page" - page_write = "update" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.upload_pages.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') - if _if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') - if _if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload_pages.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def clear_pages( - self, - content_length: int, - timeout: Optional[int] = None, - range: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - sequence_number_access_conditions: Optional["_models.SequenceNumberAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Clear Pages operation clears a set of pages from a page blob. - - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param sequence_number_access_conditions: Parameter group. - :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_sequence_number_less_than_or_equal_to = None - _if_sequence_number_less_than = None - _if_sequence_number_equal_to = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if sequence_number_access_conditions is not None: - _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - comp = "page" - page_write = "clear" - accept = "application/xml" - - # Construct URL - url = self.clear_pages.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') - if _if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') - if _if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - clear_pages.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def upload_pages_from_url( - self, - source_url: str, - source_range: str, - content_length: int, - range: str, - source_content_md5: Optional[bytearray] = None, - source_contentcrc64: Optional[bytearray] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - copy_source_authorization: Optional[str] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - sequence_number_access_conditions: Optional["_models.SequenceNumberAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Upload Pages operation writes a range of pages to a page blob where the contents are read - from a URL. - - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param source_range: Bytes of source data in the specified range. The length of this range - should match the ContentLength header and x-ms-range/Range destination range header. - :type source_range: str - :param content_length: The length of the request. - :type content_length: long - :param range: The range of bytes to which the source range would be written. The range should - be 512 aligned and range-end is required. - :type range: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid - OAuth access token to copy source. - :type copy_source_authorization: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param sequence_number_access_conditions: Parameter group. - :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _lease_id = None - _if_sequence_number_less_than_or_equal_to = None - _if_sequence_number_less_than = None - _if_sequence_number_equal_to = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if sequence_number_access_conditions is not None: - _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - comp = "page" - page_write = "update" - accept = "application/xml" - - # Construct URL - url = self.upload_pages_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') - if _if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') - if _if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if copy_source_authorization is not None: - header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload_pages_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def get_page_ranges( - self, - snapshot: Optional[str] = None, - timeout: Optional[int] = None, - range: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> "_models.PageList": - """The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot - of a page blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PageList, or the result of cls(response) - :rtype: ~azure.storage.blob.models.PageList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PageList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "pagelist" - accept = "application/xml" - - # Construct URL - url = self.get_page_ranges.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('PageList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_page_ranges.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def get_page_ranges_diff( - self, - snapshot: Optional[str] = None, - timeout: Optional[int] = None, - prevsnapshot: Optional[str] = None, - prev_snapshot_url: Optional[str] = None, - range: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> "_models.PageList": - """The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that - were changed between target blob and previous snapshot. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param prevsnapshot: Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a - DateTime value that specifies that the response will contain only pages that were changed - between target blob and previous snapshot. Changed pages include both updated and cleared - pages. The target blob may be a snapshot, as long as the snapshot specified by prevsnapshot is - the older of the two. Note that incremental snapshots are currently supported only for blobs - created on or after January 1, 2016. - :type prevsnapshot: str - :param prev_snapshot_url: Optional. This header is only supported in service versions - 2019-04-19 and after and specifies the URL of a previous snapshot of the target blob. The - response will only contain pages that were changed between the target blob and its previous - snapshot. - :type prev_snapshot_url: str - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PageList, or the result of cls(response) - :rtype: ~azure.storage.blob.models.PageList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PageList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "pagelist" - accept = "application/xml" - - # Construct URL - url = self.get_page_ranges_diff.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if prevsnapshot is not None: - query_parameters['prevsnapshot'] = self._serialize.query("prevsnapshot", prevsnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if prev_snapshot_url is not None: - header_parameters['x-ms-previous-snapshot-url'] = self._serialize.header("prev_snapshot_url", prev_snapshot_url, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('PageList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_page_ranges_diff.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def resize( - self, - blob_content_length: int, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Resize the Blob. - - :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 - TB. The page blob size must be aligned to a 512-byte boundary. - :type blob_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.resize.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - resize.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def update_sequence_number( - self, - sequence_number_action: Union[str, "_models.SequenceNumberActionType"], - timeout: Optional[int] = None, - blob_sequence_number: Optional[int] = 0, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Update the sequence number of the blob. - - :param sequence_number_action: Required if the x-ms-blob-sequence-number header is set for the - request. This property applies to page blobs only. This property indicates how the service - should modify the blob's sequence number. - :type sequence_number_action: str or ~azure.storage.blob.models.SequenceNumberActionType - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled - value that you can use to track requests. The value of the sequence number must be between 0 - and 2^63 - 1. - :type blob_sequence_number: long - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.update_sequence_number.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-sequence-number-action'] = self._serialize.header("sequence_number_action", sequence_number_action, 'str') - if blob_sequence_number is not None: - header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - update_sequence_number.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def copy_incremental( - self, - copy_source: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Copy Incremental operation copies a snapshot of the source page blob to a destination page - blob. The snapshot is copied such that only the differential changes between the previously - copied snapshot are transferred to the destination. The copied snapshots are complete copies of - the original snapshot and can be read or copied from as usual. This API is supported since REST - version 2016-05-31. - - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "incrementalcopy" - accept = "application/xml" - - # Construct URL - url = self.copy_incremental.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - copy_incremental.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_service_operations.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_service_operations.py deleted file mode 100644 index 8127498..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_service_operations.py +++ /dev/null @@ -1,698 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class ServiceOperations: - """ServiceOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def set_properties( - self, - storage_service_properties: "_models.StorageServiceProperties", - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> None: - """Sets properties for a storage account's Blob service endpoint, including properties for Storage - Analytics and CORS (Cross-Origin Resource Sharing) rules. - - :param storage_service_properties: The StorageService properties. - :type storage_service_properties: ~azure.storage.blob.models.StorageServiceProperties - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "properties" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/'} # type: ignore - - async def get_properties( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> "_models.StorageServiceProperties": - """gets the properties of a storage account's Blob service, including properties for Storage - Analytics and CORS (Cross-Origin Resource Sharing) rules. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: StorageServiceProperties, or the result of cls(response) - :rtype: ~azure.storage.blob.models.StorageServiceProperties - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceProperties"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('StorageServiceProperties', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_properties.metadata = {'url': '/'} # type: ignore - - async def get_statistics( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> "_models.StorageServiceStats": - """Retrieves statistics related to replication for the Blob service. It is only available on the - secondary location endpoint when read-access geo-redundant replication is enabled for the - storage account. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: StorageServiceStats, or the result of cls(response) - :rtype: ~azure.storage.blob.models.StorageServiceStats - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceStats"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "stats" - accept = "application/xml" - - # Construct URL - url = self.get_statistics.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('StorageServiceStats', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_statistics.metadata = {'url': '/'} # type: ignore - - async def list_containers_segment( - self, - prefix: Optional[str] = None, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - include: Optional[List[Union[str, "_models.ListContainersIncludeType"]]] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> "_models.ListContainersSegmentResponse": - """The List Containers Segment operation returns a list of the containers under the specified - account. - - :param prefix: Filters the results to return only containers whose name begins with the - specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :param include: Include this parameter to specify that the container's metadata be returned as - part of the response body. - :type include: list[str or ~azure.storage.blob.models.ListContainersIncludeType] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListContainersSegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListContainersSegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListContainersSegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_containers_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('ListContainersSegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_containers_segment.metadata = {'url': '/'} # type: ignore - - async def get_user_delegation_key( - self, - key_info: "_models.KeyInfo", - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> "_models.UserDelegationKey": - """Retrieves a user delegation key for the Blob service. This is only a valid operation when using - bearer token authentication. - - :param key_info: - :type key_info: ~azure.storage.blob.models.KeyInfo - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: UserDelegationKey, or the result of cls(response) - :rtype: ~azure.storage.blob.models.UserDelegationKey - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.UserDelegationKey"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "userdelegationkey" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.get_user_delegation_key.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(key_info, 'KeyInfo', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('UserDelegationKey', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_user_delegation_key.metadata = {'url': '/'} # type: ignore - - async def get_account_info( - self, - **kwargs: Any - ) -> None: - """Returns the sku name and account kind. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "account" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_account_info.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) - response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) - response_headers['x-ms-is-hns-enabled']=self._deserialize('bool', response.headers.get('x-ms-is-hns-enabled')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_account_info.metadata = {'url': '/'} # type: ignore - - async def submit_batch( - self, - content_length: int, - multipart_content_type: str, - body: IO, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> IO: - """The Batch operation allows multiple API calls to be embedded into a single HTTP request. - - :param content_length: The length of the request. - :type content_length: long - :param multipart_content_type: Required. The value of this header must be multipart/mixed with - a batch boundary. Example header value: multipart/mixed; boundary=batch_:code:``. - :type multipart_content_type: str - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "batch" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.submit_batch.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(body, 'IO', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - submit_batch.metadata = {'url': '/'} # type: ignore - - async def filter_blobs( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - where: Optional[str] = None, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - **kwargs: Any - ) -> "_models.FilterBlobSegment": - """The Filter Blobs operation enables callers to list blobs across all containers whose tags match - a given search expression. Filter blobs searches across all containers within a storage - account but can be scoped within the expression to a single container. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param where: Filters the results to return only to return only blobs whose tags match the - specified expression. - :type where: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: FilterBlobSegment, or the result of cls(response) - :rtype: ~azure.storage.blob.models.FilterBlobSegment - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.FilterBlobSegment"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "blobs" - accept = "application/xml" - - # Construct URL - url = self.filter_blobs.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if where is not None: - query_parameters['where'] = self._serialize.query("where", where, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('FilterBlobSegment', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - filter_blobs.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/models/__init__.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/models/__init__.py deleted file mode 100644 index 3efc69e..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/models/__init__.py +++ /dev/null @@ -1,227 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -try: - from ._models_py3 import AccessPolicy - from ._models_py3 import AppendPositionAccessConditions - from ._models_py3 import ArrowConfiguration - from ._models_py3 import ArrowField - from ._models_py3 import BlobFlatListSegment - from ._models_py3 import BlobHTTPHeaders - from ._models_py3 import BlobHierarchyListSegment - from ._models_py3 import BlobItemInternal - from ._models_py3 import BlobMetadata - from ._models_py3 import BlobPrefix - from ._models_py3 import BlobPropertiesInternal - from ._models_py3 import BlobTag - from ._models_py3 import BlobTags - from ._models_py3 import Block - from ._models_py3 import BlockList - from ._models_py3 import BlockLookupList - from ._models_py3 import ClearRange - from ._models_py3 import ContainerCpkScopeInfo - from ._models_py3 import ContainerItem - from ._models_py3 import ContainerProperties - from ._models_py3 import CorsRule - from ._models_py3 import CpkInfo - from ._models_py3 import CpkScopeInfo - from ._models_py3 import DataLakeStorageError - from ._models_py3 import DataLakeStorageErrorError - from ._models_py3 import DelimitedTextConfiguration - from ._models_py3 import DirectoryHttpHeaders - from ._models_py3 import FilterBlobItem - from ._models_py3 import FilterBlobSegment - from ._models_py3 import GeoReplication - from ._models_py3 import JsonTextConfiguration - from ._models_py3 import KeyInfo - from ._models_py3 import LeaseAccessConditions - from ._models_py3 import ListBlobsFlatSegmentResponse - from ._models_py3 import ListBlobsHierarchySegmentResponse - from ._models_py3 import ListContainersSegmentResponse - from ._models_py3 import Logging - from ._models_py3 import Metrics - from ._models_py3 import ModifiedAccessConditions - from ._models_py3 import PageList - from ._models_py3 import PageRange - from ._models_py3 import QueryFormat - from ._models_py3 import QueryRequest - from ._models_py3 import QuerySerialization - from ._models_py3 import RetentionPolicy - from ._models_py3 import SequenceNumberAccessConditions - from ._models_py3 import SignedIdentifier - from ._models_py3 import SourceModifiedAccessConditions - from ._models_py3 import StaticWebsite - from ._models_py3 import StorageError - from ._models_py3 import StorageServiceProperties - from ._models_py3 import StorageServiceStats - from ._models_py3 import UserDelegationKey -except (SyntaxError, ImportError): - from ._models import AccessPolicy # type: ignore - from ._models import AppendPositionAccessConditions # type: ignore - from ._models import ArrowConfiguration # type: ignore - from ._models import ArrowField # type: ignore - from ._models import BlobFlatListSegment # type: ignore - from ._models import BlobHTTPHeaders # type: ignore - from ._models import BlobHierarchyListSegment # type: ignore - from ._models import BlobItemInternal # type: ignore - from ._models import BlobMetadata # type: ignore - from ._models import BlobPrefix # type: ignore - from ._models import BlobPropertiesInternal # type: ignore - from ._models import BlobTag # type: ignore - from ._models import BlobTags # type: ignore - from ._models import Block # type: ignore - from ._models import BlockList # type: ignore - from ._models import BlockLookupList # type: ignore - from ._models import ClearRange # type: ignore - from ._models import ContainerCpkScopeInfo # type: ignore - from ._models import ContainerItem # type: ignore - from ._models import ContainerProperties # type: ignore - from ._models import CorsRule # type: ignore - from ._models import CpkInfo # type: ignore - from ._models import CpkScopeInfo # type: ignore - from ._models import DataLakeStorageError # type: ignore - from ._models import DataLakeStorageErrorError # type: ignore - from ._models import DelimitedTextConfiguration # type: ignore - from ._models import DirectoryHttpHeaders # type: ignore - from ._models import FilterBlobItem # type: ignore - from ._models import FilterBlobSegment # type: ignore - from ._models import GeoReplication # type: ignore - from ._models import JsonTextConfiguration # type: ignore - from ._models import KeyInfo # type: ignore - from ._models import LeaseAccessConditions # type: ignore - from ._models import ListBlobsFlatSegmentResponse # type: ignore - from ._models import ListBlobsHierarchySegmentResponse # type: ignore - from ._models import ListContainersSegmentResponse # type: ignore - from ._models import Logging # type: ignore - from ._models import Metrics # type: ignore - from ._models import ModifiedAccessConditions # type: ignore - from ._models import PageList # type: ignore - from ._models import PageRange # type: ignore - from ._models import QueryFormat # type: ignore - from ._models import QueryRequest # type: ignore - from ._models import QuerySerialization # type: ignore - from ._models import RetentionPolicy # type: ignore - from ._models import SequenceNumberAccessConditions # type: ignore - from ._models import SignedIdentifier # type: ignore - from ._models import SourceModifiedAccessConditions # type: ignore - from ._models import StaticWebsite # type: ignore - from ._models import StorageError # type: ignore - from ._models import StorageServiceProperties # type: ignore - from ._models import StorageServiceStats # type: ignore - from ._models import UserDelegationKey # type: ignore - -from ._azure_blob_storage_enums import ( - AccessTier, - AccessTierOptional, - AccessTierRequired, - AccountKind, - ArchiveStatus, - BlobExpiryOptions, - BlobImmutabilityPolicyMode, - BlobType, - BlockListType, - CopyStatusType, - DeleteSnapshotsOptionType, - EncryptionAlgorithmType, - GeoReplicationStatusType, - LeaseDurationType, - LeaseStateType, - LeaseStatusType, - ListBlobsIncludeItem, - ListContainersIncludeType, - PathRenameMode, - PremiumPageBlobAccessTier, - PublicAccessType, - QueryFormatType, - RehydratePriority, - SequenceNumberActionType, - SkuName, - StorageErrorCode, -) - -__all__ = [ - 'AccessPolicy', - 'AppendPositionAccessConditions', - 'ArrowConfiguration', - 'ArrowField', - 'BlobFlatListSegment', - 'BlobHTTPHeaders', - 'BlobHierarchyListSegment', - 'BlobItemInternal', - 'BlobMetadata', - 'BlobPrefix', - 'BlobPropertiesInternal', - 'BlobTag', - 'BlobTags', - 'Block', - 'BlockList', - 'BlockLookupList', - 'ClearRange', - 'ContainerCpkScopeInfo', - 'ContainerItem', - 'ContainerProperties', - 'CorsRule', - 'CpkInfo', - 'CpkScopeInfo', - 'DataLakeStorageError', - 'DataLakeStorageErrorError', - 'DelimitedTextConfiguration', - 'DirectoryHttpHeaders', - 'FilterBlobItem', - 'FilterBlobSegment', - 'GeoReplication', - 'JsonTextConfiguration', - 'KeyInfo', - 'LeaseAccessConditions', - 'ListBlobsFlatSegmentResponse', - 'ListBlobsHierarchySegmentResponse', - 'ListContainersSegmentResponse', - 'Logging', - 'Metrics', - 'ModifiedAccessConditions', - 'PageList', - 'PageRange', - 'QueryFormat', - 'QueryRequest', - 'QuerySerialization', - 'RetentionPolicy', - 'SequenceNumberAccessConditions', - 'SignedIdentifier', - 'SourceModifiedAccessConditions', - 'StaticWebsite', - 'StorageError', - 'StorageServiceProperties', - 'StorageServiceStats', - 'UserDelegationKey', - 'AccessTier', - 'AccessTierOptional', - 'AccessTierRequired', - 'AccountKind', - 'ArchiveStatus', - 'BlobExpiryOptions', - 'BlobImmutabilityPolicyMode', - 'BlobType', - 'BlockListType', - 'CopyStatusType', - 'DeleteSnapshotsOptionType', - 'EncryptionAlgorithmType', - 'GeoReplicationStatusType', - 'LeaseDurationType', - 'LeaseStateType', - 'LeaseStatusType', - 'ListBlobsIncludeItem', - 'ListContainersIncludeType', - 'PathRenameMode', - 'PremiumPageBlobAccessTier', - 'PublicAccessType', - 'QueryFormatType', - 'RehydratePriority', - 'SequenceNumberActionType', - 'SkuName', - 'StorageErrorCode', -] diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/models/_azure_blob_storage_enums.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/models/_azure_blob_storage_enums.py deleted file mode 100644 index 39eec6b..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/models/_azure_blob_storage_enums.py +++ /dev/null @@ -1,350 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from enum import Enum, EnumMeta -from six import with_metaclass - -class _CaseInsensitiveEnumMeta(EnumMeta): - def __getitem__(self, name): - return super().__getitem__(name.upper()) - - def __getattr__(cls, name): - """Return the enum member matching `name` - We use __getattr__ instead of descriptors or inserting into the enum - class' __dict__ in order to support `name` and `value` being both - properties for enum members (which live in the class' __dict__) and - enum members themselves. - """ - try: - return cls._member_map_[name.upper()] - except KeyError: - raise AttributeError(name) - - -class AccessTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - P4 = "P4" - P6 = "P6" - P10 = "P10" - P15 = "P15" - P20 = "P20" - P30 = "P30" - P40 = "P40" - P50 = "P50" - P60 = "P60" - P70 = "P70" - P80 = "P80" - HOT = "Hot" - COOL = "Cool" - ARCHIVE = "Archive" - -class AccessTierOptional(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - P4 = "P4" - P6 = "P6" - P10 = "P10" - P15 = "P15" - P20 = "P20" - P30 = "P30" - P40 = "P40" - P50 = "P50" - P60 = "P60" - P70 = "P70" - P80 = "P80" - HOT = "Hot" - COOL = "Cool" - ARCHIVE = "Archive" - -class AccessTierRequired(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - P4 = "P4" - P6 = "P6" - P10 = "P10" - P15 = "P15" - P20 = "P20" - P30 = "P30" - P40 = "P40" - P50 = "P50" - P60 = "P60" - P70 = "P70" - P80 = "P80" - HOT = "Hot" - COOL = "Cool" - ARCHIVE = "Archive" - -class AccountKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - STORAGE = "Storage" - BLOB_STORAGE = "BlobStorage" - STORAGE_V2 = "StorageV2" - FILE_STORAGE = "FileStorage" - BLOCK_BLOB_STORAGE = "BlockBlobStorage" - -class ArchiveStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - REHYDRATE_PENDING_TO_HOT = "rehydrate-pending-to-hot" - REHYDRATE_PENDING_TO_COOL = "rehydrate-pending-to-cool" - -class BlobExpiryOptions(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - NEVER_EXPIRE = "NeverExpire" - RELATIVE_TO_CREATION = "RelativeToCreation" - RELATIVE_TO_NOW = "RelativeToNow" - ABSOLUTE = "Absolute" - -class BlobImmutabilityPolicyMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - UNLOCKED = "Unlocked" - LOCKED = "Locked" - MUTABLE = "Mutable" - -class BlobType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - BLOCK_BLOB = "BlockBlob" - PAGE_BLOB = "PageBlob" - APPEND_BLOB = "AppendBlob" - -class BlockListType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - COMMITTED = "committed" - UNCOMMITTED = "uncommitted" - ALL = "all" - -class CopyStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - PENDING = "pending" - SUCCESS = "success" - ABORTED = "aborted" - FAILED = "failed" - -class DeleteSnapshotsOptionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - INCLUDE = "include" - ONLY = "only" - -class EncryptionAlgorithmType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - NONE = "None" - AES256 = "AES256" - -class GeoReplicationStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The status of the secondary location - """ - - LIVE = "live" - BOOTSTRAP = "bootstrap" - UNAVAILABLE = "unavailable" - -class LeaseDurationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - INFINITE = "infinite" - FIXED = "fixed" - -class LeaseStateType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - AVAILABLE = "available" - LEASED = "leased" - EXPIRED = "expired" - BREAKING = "breaking" - BROKEN = "broken" - -class LeaseStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - LOCKED = "locked" - UNLOCKED = "unlocked" - -class ListBlobsIncludeItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - COPY = "copy" - DELETED = "deleted" - METADATA = "metadata" - SNAPSHOTS = "snapshots" - UNCOMMITTEDBLOBS = "uncommittedblobs" - VERSIONS = "versions" - TAGS = "tags" - IMMUTABILITYPOLICY = "immutabilitypolicy" - LEGALHOLD = "legalhold" - DELETEDWITHVERSIONS = "deletedwithversions" - -class ListContainersIncludeType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - METADATA = "metadata" - DELETED = "deleted" - -class PathRenameMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - LEGACY = "legacy" - POSIX = "posix" - -class PremiumPageBlobAccessTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - P4 = "P4" - P6 = "P6" - P10 = "P10" - P15 = "P15" - P20 = "P20" - P30 = "P30" - P40 = "P40" - P50 = "P50" - P60 = "P60" - P70 = "P70" - P80 = "P80" - -class PublicAccessType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - CONTAINER = "container" - BLOB = "blob" - -class QueryFormatType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The quick query format type. - """ - - DELIMITED = "delimited" - JSON = "json" - ARROW = "arrow" - PARQUET = "parquet" - -class RehydratePriority(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """If an object is in rehydrate pending state then this header is returned with priority of - rehydrate. Valid values are High and Standard. - """ - - HIGH = "High" - STANDARD = "Standard" - -class SequenceNumberActionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - MAX = "max" - UPDATE = "update" - INCREMENT = "increment" - -class SkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - STANDARD_LRS = "Standard_LRS" - STANDARD_GRS = "Standard_GRS" - STANDARD_RAGRS = "Standard_RAGRS" - STANDARD_ZRS = "Standard_ZRS" - PREMIUM_LRS = "Premium_LRS" - -class StorageErrorCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Error codes returned by the service - """ - - ACCOUNT_ALREADY_EXISTS = "AccountAlreadyExists" - ACCOUNT_BEING_CREATED = "AccountBeingCreated" - ACCOUNT_IS_DISABLED = "AccountIsDisabled" - AUTHENTICATION_FAILED = "AuthenticationFailed" - AUTHORIZATION_FAILURE = "AuthorizationFailure" - CONDITION_HEADERS_NOT_SUPPORTED = "ConditionHeadersNotSupported" - CONDITION_NOT_MET = "ConditionNotMet" - EMPTY_METADATA_KEY = "EmptyMetadataKey" - INSUFFICIENT_ACCOUNT_PERMISSIONS = "InsufficientAccountPermissions" - INTERNAL_ERROR = "InternalError" - INVALID_AUTHENTICATION_INFO = "InvalidAuthenticationInfo" - INVALID_HEADER_VALUE = "InvalidHeaderValue" - INVALID_HTTP_VERB = "InvalidHttpVerb" - INVALID_INPUT = "InvalidInput" - INVALID_MD5 = "InvalidMd5" - INVALID_METADATA = "InvalidMetadata" - INVALID_QUERY_PARAMETER_VALUE = "InvalidQueryParameterValue" - INVALID_RANGE = "InvalidRange" - INVALID_RESOURCE_NAME = "InvalidResourceName" - INVALID_URI = "InvalidUri" - INVALID_XML_DOCUMENT = "InvalidXmlDocument" - INVALID_XML_NODE_VALUE = "InvalidXmlNodeValue" - MD5_MISMATCH = "Md5Mismatch" - METADATA_TOO_LARGE = "MetadataTooLarge" - MISSING_CONTENT_LENGTH_HEADER = "MissingContentLengthHeader" - MISSING_REQUIRED_QUERY_PARAMETER = "MissingRequiredQueryParameter" - MISSING_REQUIRED_HEADER = "MissingRequiredHeader" - MISSING_REQUIRED_XML_NODE = "MissingRequiredXmlNode" - MULTIPLE_CONDITION_HEADERS_NOT_SUPPORTED = "MultipleConditionHeadersNotSupported" - OPERATION_TIMED_OUT = "OperationTimedOut" - OUT_OF_RANGE_INPUT = "OutOfRangeInput" - OUT_OF_RANGE_QUERY_PARAMETER_VALUE = "OutOfRangeQueryParameterValue" - REQUEST_BODY_TOO_LARGE = "RequestBodyTooLarge" - RESOURCE_TYPE_MISMATCH = "ResourceTypeMismatch" - REQUEST_URL_FAILED_TO_PARSE = "RequestUrlFailedToParse" - RESOURCE_ALREADY_EXISTS = "ResourceAlreadyExists" - RESOURCE_NOT_FOUND = "ResourceNotFound" - SERVER_BUSY = "ServerBusy" - UNSUPPORTED_HEADER = "UnsupportedHeader" - UNSUPPORTED_XML_NODE = "UnsupportedXmlNode" - UNSUPPORTED_QUERY_PARAMETER = "UnsupportedQueryParameter" - UNSUPPORTED_HTTP_VERB = "UnsupportedHttpVerb" - APPEND_POSITION_CONDITION_NOT_MET = "AppendPositionConditionNotMet" - BLOB_ALREADY_EXISTS = "BlobAlreadyExists" - BLOB_IMMUTABLE_DUE_TO_POLICY = "BlobImmutableDueToPolicy" - BLOB_NOT_FOUND = "BlobNotFound" - BLOB_OVERWRITTEN = "BlobOverwritten" - BLOB_TIER_INADEQUATE_FOR_CONTENT_LENGTH = "BlobTierInadequateForContentLength" - BLOB_USES_CUSTOMER_SPECIFIED_ENCRYPTION = "BlobUsesCustomerSpecifiedEncryption" - BLOCK_COUNT_EXCEEDS_LIMIT = "BlockCountExceedsLimit" - BLOCK_LIST_TOO_LONG = "BlockListTooLong" - CANNOT_CHANGE_TO_LOWER_TIER = "CannotChangeToLowerTier" - CANNOT_VERIFY_COPY_SOURCE = "CannotVerifyCopySource" - CONTAINER_ALREADY_EXISTS = "ContainerAlreadyExists" - CONTAINER_BEING_DELETED = "ContainerBeingDeleted" - CONTAINER_DISABLED = "ContainerDisabled" - CONTAINER_NOT_FOUND = "ContainerNotFound" - CONTENT_LENGTH_LARGER_THAN_TIER_LIMIT = "ContentLengthLargerThanTierLimit" - COPY_ACROSS_ACCOUNTS_NOT_SUPPORTED = "CopyAcrossAccountsNotSupported" - COPY_ID_MISMATCH = "CopyIdMismatch" - FEATURE_VERSION_MISMATCH = "FeatureVersionMismatch" - INCREMENTAL_COPY_BLOB_MISMATCH = "IncrementalCopyBlobMismatch" - INCREMENTAL_COPY_OF_ERALIER_VERSION_SNAPSHOT_NOT_ALLOWED = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" - INCREMENTAL_COPY_SOURCE_MUST_BE_SNAPSHOT = "IncrementalCopySourceMustBeSnapshot" - INFINITE_LEASE_DURATION_REQUIRED = "InfiniteLeaseDurationRequired" - INVALID_BLOB_OR_BLOCK = "InvalidBlobOrBlock" - INVALID_BLOB_TIER = "InvalidBlobTier" - INVALID_BLOB_TYPE = "InvalidBlobType" - INVALID_BLOCK_ID = "InvalidBlockId" - INVALID_BLOCK_LIST = "InvalidBlockList" - INVALID_OPERATION = "InvalidOperation" - INVALID_PAGE_RANGE = "InvalidPageRange" - INVALID_SOURCE_BLOB_TYPE = "InvalidSourceBlobType" - INVALID_SOURCE_BLOB_URL = "InvalidSourceBlobUrl" - INVALID_VERSION_FOR_PAGE_BLOB_OPERATION = "InvalidVersionForPageBlobOperation" - LEASE_ALREADY_PRESENT = "LeaseAlreadyPresent" - LEASE_ALREADY_BROKEN = "LeaseAlreadyBroken" - LEASE_ID_MISMATCH_WITH_BLOB_OPERATION = "LeaseIdMismatchWithBlobOperation" - LEASE_ID_MISMATCH_WITH_CONTAINER_OPERATION = "LeaseIdMismatchWithContainerOperation" - LEASE_ID_MISMATCH_WITH_LEASE_OPERATION = "LeaseIdMismatchWithLeaseOperation" - LEASE_ID_MISSING = "LeaseIdMissing" - LEASE_IS_BREAKING_AND_CANNOT_BE_ACQUIRED = "LeaseIsBreakingAndCannotBeAcquired" - LEASE_IS_BREAKING_AND_CANNOT_BE_CHANGED = "LeaseIsBreakingAndCannotBeChanged" - LEASE_IS_BROKEN_AND_CANNOT_BE_RENEWED = "LeaseIsBrokenAndCannotBeRenewed" - LEASE_LOST = "LeaseLost" - LEASE_NOT_PRESENT_WITH_BLOB_OPERATION = "LeaseNotPresentWithBlobOperation" - LEASE_NOT_PRESENT_WITH_CONTAINER_OPERATION = "LeaseNotPresentWithContainerOperation" - LEASE_NOT_PRESENT_WITH_LEASE_OPERATION = "LeaseNotPresentWithLeaseOperation" - MAX_BLOB_SIZE_CONDITION_NOT_MET = "MaxBlobSizeConditionNotMet" - NO_AUTHENTICATION_INFORMATION = "NoAuthenticationInformation" - NO_PENDING_COPY_OPERATION = "NoPendingCopyOperation" - OPERATION_NOT_ALLOWED_ON_INCREMENTAL_COPY_BLOB = "OperationNotAllowedOnIncrementalCopyBlob" - PENDING_COPY_OPERATION = "PendingCopyOperation" - PREVIOUS_SNAPSHOT_CANNOT_BE_NEWER = "PreviousSnapshotCannotBeNewer" - PREVIOUS_SNAPSHOT_NOT_FOUND = "PreviousSnapshotNotFound" - PREVIOUS_SNAPSHOT_OPERATION_NOT_SUPPORTED = "PreviousSnapshotOperationNotSupported" - SEQUENCE_NUMBER_CONDITION_NOT_MET = "SequenceNumberConditionNotMet" - SEQUENCE_NUMBER_INCREMENT_TOO_LARGE = "SequenceNumberIncrementTooLarge" - SNAPSHOT_COUNT_EXCEEDED = "SnapshotCountExceeded" - SNAPHOT_OPERATION_RATE_EXCEEDED = "SnaphotOperationRateExceeded" - SNAPSHOTS_PRESENT = "SnapshotsPresent" - SOURCE_CONDITION_NOT_MET = "SourceConditionNotMet" - SYSTEM_IN_USE = "SystemInUse" - TARGET_CONDITION_NOT_MET = "TargetConditionNotMet" - UNAUTHORIZED_BLOB_OVERWRITE = "UnauthorizedBlobOverwrite" - BLOB_BEING_REHYDRATED = "BlobBeingRehydrated" - BLOB_ARCHIVED = "BlobArchived" - BLOB_NOT_ARCHIVED = "BlobNotArchived" - AUTHORIZATION_SOURCE_IP_MISMATCH = "AuthorizationSourceIPMismatch" - AUTHORIZATION_PROTOCOL_MISMATCH = "AuthorizationProtocolMismatch" - AUTHORIZATION_PERMISSION_MISMATCH = "AuthorizationPermissionMismatch" - AUTHORIZATION_SERVICE_MISMATCH = "AuthorizationServiceMismatch" - AUTHORIZATION_RESOURCE_TYPE_MISMATCH = "AuthorizationResourceTypeMismatch" diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/models/_models.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/models/_models.py deleted file mode 100644 index b8e178d..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/models/_models.py +++ /dev/null @@ -1,2055 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import HttpResponseError -import msrest.serialization - - -class AccessPolicy(msrest.serialization.Model): - """An Access policy. - - :param start: the date-time the policy is active. - :type start: str - :param expiry: the date-time the policy expires. - :type expiry: str - :param permission: the permissions for the acl policy. - :type permission: str - """ - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str'}, - 'expiry': {'key': 'Expiry', 'type': 'str'}, - 'permission': {'key': 'Permission', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(AccessPolicy, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.expiry = kwargs.get('expiry', None) - self.permission = kwargs.get('permission', None) - - -class AppendPositionAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param max_size: Optional conditional header. The max length in bytes permitted for the append - blob. If the Append Block operation would cause the blob to exceed that limit or if the blob - size is already greater than the value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :type max_size: long - :param append_position: Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will succeed only if the append - position is equal to this number. If it is not, the request will fail with the - AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). - :type append_position: long - """ - - _attribute_map = { - 'max_size': {'key': 'maxSize', 'type': 'long'}, - 'append_position': {'key': 'appendPosition', 'type': 'long'}, - } - - def __init__( - self, - **kwargs - ): - super(AppendPositionAccessConditions, self).__init__(**kwargs) - self.max_size = kwargs.get('max_size', None) - self.append_position = kwargs.get('append_position', None) - - -class ArrowConfiguration(msrest.serialization.Model): - """arrow configuration. - - All required parameters must be populated in order to send to Azure. - - :param schema: Required. - :type schema: list[~azure.storage.blob.models.ArrowField] - """ - - _validation = { - 'schema': {'required': True}, - } - - _attribute_map = { - 'schema': {'key': 'Schema', 'type': '[ArrowField]', 'xml': {'name': 'Schema', 'wrapped': True, 'itemsName': 'Field'}}, - } - _xml_map = { - 'name': 'ArrowConfiguration' - } - - def __init__( - self, - **kwargs - ): - super(ArrowConfiguration, self).__init__(**kwargs) - self.schema = kwargs['schema'] - - -class ArrowField(msrest.serialization.Model): - """field of an arrow schema. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. - :type type: str - :param name: - :type name: str - :param precision: - :type precision: int - :param scale: - :type scale: int - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': 'Type', 'type': 'str'}, - 'name': {'key': 'Name', 'type': 'str'}, - 'precision': {'key': 'Precision', 'type': 'int'}, - 'scale': {'key': 'Scale', 'type': 'int'}, - } - _xml_map = { - 'name': 'Field' - } - - def __init__( - self, - **kwargs - ): - super(ArrowField, self).__init__(**kwargs) - self.type = kwargs['type'] - self.name = kwargs.get('name', None) - self.precision = kwargs.get('precision', None) - self.scale = kwargs.get('scale', None) - - -class BlobFlatListSegment(msrest.serialization.Model): - """BlobFlatListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]'}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__( - self, - **kwargs - ): - super(BlobFlatListSegment, self).__init__(**kwargs) - self.blob_items = kwargs['blob_items'] - - -class BlobHierarchyListSegment(msrest.serialization.Model): - """BlobHierarchyListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_prefixes: - :type blob_prefixes: list[~azure.storage.blob.models.BlobPrefix] - :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]', 'xml': {'name': 'BlobPrefix'}}, - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__( - self, - **kwargs - ): - super(BlobHierarchyListSegment, self).__init__(**kwargs) - self.blob_prefixes = kwargs.get('blob_prefixes', None) - self.blob_items = kwargs['blob_items'] - - -class BlobHTTPHeaders(msrest.serialization.Model): - """Parameter group. - - :param blob_cache_control: Optional. Sets the blob's cache control. If specified, this property - is stored with the blob and returned with a read request. - :type blob_cache_control: str - :param blob_content_type: Optional. Sets the blob's content type. If specified, this property - is stored with the blob and returned with a read request. - :type blob_content_type: str - :param blob_content_md5: Optional. An MD5 hash of the blob content. Note that this hash is not - validated, as the hashes for the individual blocks were validated when each was uploaded. - :type blob_content_md5: bytearray - :param blob_content_encoding: Optional. Sets the blob's content encoding. If specified, this - property is stored with the blob and returned with a read request. - :type blob_content_encoding: str - :param blob_content_language: Optional. Set the blob's content language. If specified, this - property is stored with the blob and returned with a read request. - :type blob_content_language: str - :param blob_content_disposition: Optional. Sets the blob's Content-Disposition header. - :type blob_content_disposition: str - """ - - _attribute_map = { - 'blob_cache_control': {'key': 'blobCacheControl', 'type': 'str'}, - 'blob_content_type': {'key': 'blobContentType', 'type': 'str'}, - 'blob_content_md5': {'key': 'blobContentMD5', 'type': 'bytearray'}, - 'blob_content_encoding': {'key': 'blobContentEncoding', 'type': 'str'}, - 'blob_content_language': {'key': 'blobContentLanguage', 'type': 'str'}, - 'blob_content_disposition': {'key': 'blobContentDisposition', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(BlobHTTPHeaders, self).__init__(**kwargs) - self.blob_cache_control = kwargs.get('blob_cache_control', None) - self.blob_content_type = kwargs.get('blob_content_type', None) - self.blob_content_md5 = kwargs.get('blob_content_md5', None) - self.blob_content_encoding = kwargs.get('blob_content_encoding', None) - self.blob_content_language = kwargs.get('blob_content_language', None) - self.blob_content_disposition = kwargs.get('blob_content_disposition', None) - - -class BlobItemInternal(msrest.serialization.Model): - """An Azure Storage blob. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param deleted: Required. - :type deleted: bool - :param snapshot: Required. - :type snapshot: str - :param version_id: - :type version_id: str - :param is_current_version: - :type is_current_version: bool - :param properties: Required. Properties of a blob. - :type properties: ~azure.storage.blob.models.BlobPropertiesInternal - :param metadata: - :type metadata: ~azure.storage.blob.models.BlobMetadata - :param blob_tags: Blob tags. - :type blob_tags: ~azure.storage.blob.models.BlobTags - :param object_replication_metadata: Dictionary of :code:``. - :type object_replication_metadata: dict[str, str] - :param has_versions_only: - :type has_versions_only: bool - """ - - _validation = { - 'name': {'required': True}, - 'deleted': {'required': True}, - 'snapshot': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'deleted': {'key': 'Deleted', 'type': 'bool'}, - 'snapshot': {'key': 'Snapshot', 'type': 'str'}, - 'version_id': {'key': 'VersionId', 'type': 'str'}, - 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool'}, - 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal'}, - 'metadata': {'key': 'Metadata', 'type': 'BlobMetadata'}, - 'blob_tags': {'key': 'BlobTags', 'type': 'BlobTags'}, - 'object_replication_metadata': {'key': 'OrMetadata', 'type': '{str}'}, - 'has_versions_only': {'key': 'HasVersionsOnly', 'type': 'bool'}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__( - self, - **kwargs - ): - super(BlobItemInternal, self).__init__(**kwargs) - self.name = kwargs['name'] - self.deleted = kwargs['deleted'] - self.snapshot = kwargs['snapshot'] - self.version_id = kwargs.get('version_id', None) - self.is_current_version = kwargs.get('is_current_version', None) - self.properties = kwargs['properties'] - self.metadata = kwargs.get('metadata', None) - self.blob_tags = kwargs.get('blob_tags', None) - self.object_replication_metadata = kwargs.get('object_replication_metadata', None) - self.has_versions_only = kwargs.get('has_versions_only', None) - - -class BlobMetadata(msrest.serialization.Model): - """BlobMetadata. - - :param additional_properties: Unmatched properties from the message are deserialized to this - collection. - :type additional_properties: dict[str, str] - :param encrypted: - :type encrypted: str - """ - - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{str}'}, - 'encrypted': {'key': 'Encrypted', 'type': 'str', 'xml': {'attr': True}}, - } - _xml_map = { - 'name': 'Metadata' - } - - def __init__( - self, - **kwargs - ): - super(BlobMetadata, self).__init__(**kwargs) - self.additional_properties = kwargs.get('additional_properties', None) - self.encrypted = kwargs.get('encrypted', None) - - -class BlobPrefix(msrest.serialization.Model): - """BlobPrefix. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(BlobPrefix, self).__init__(**kwargs) - self.name = kwargs['name'] - - -class BlobPropertiesInternal(msrest.serialization.Model): - """Properties of a blob. - - All required parameters must be populated in order to send to Azure. - - :param creation_time: - :type creation_time: ~datetime.datetime - :param last_modified: Required. - :type last_modified: ~datetime.datetime - :param etag: Required. - :type etag: str - :param content_length: Size in bytes. - :type content_length: long - :param content_type: - :type content_type: str - :param content_encoding: - :type content_encoding: str - :param content_language: - :type content_language: str - :param content_md5: - :type content_md5: bytearray - :param content_disposition: - :type content_disposition: str - :param cache_control: - :type cache_control: str - :param blob_sequence_number: - :type blob_sequence_number: long - :param blob_type: Possible values include: "BlockBlob", "PageBlob", "AppendBlob". - :type blob_type: str or ~azure.storage.blob.models.BlobType - :param lease_status: Possible values include: "locked", "unlocked". - :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType - :param lease_state: Possible values include: "available", "leased", "expired", "breaking", - "broken". - :type lease_state: str or ~azure.storage.blob.models.LeaseStateType - :param lease_duration: Possible values include: "infinite", "fixed". - :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType - :param copy_id: - :type copy_id: str - :param copy_status: Possible values include: "pending", "success", "aborted", "failed". - :type copy_status: str or ~azure.storage.blob.models.CopyStatusType - :param copy_source: - :type copy_source: str - :param copy_progress: - :type copy_progress: str - :param copy_completion_time: - :type copy_completion_time: ~datetime.datetime - :param copy_status_description: - :type copy_status_description: str - :param server_encrypted: - :type server_encrypted: bool - :param incremental_copy: - :type incremental_copy: bool - :param destination_snapshot: - :type destination_snapshot: str - :param deleted_time: - :type deleted_time: ~datetime.datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param access_tier: Possible values include: "P4", "P6", "P10", "P15", "P20", "P30", "P40", - "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive". - :type access_tier: str or ~azure.storage.blob.models.AccessTier - :param access_tier_inferred: - :type access_tier_inferred: bool - :param archive_status: Possible values include: "rehydrate-pending-to-hot", - "rehydrate-pending-to-cool". - :type archive_status: str or ~azure.storage.blob.models.ArchiveStatus - :param customer_provided_key_sha256: - :type customer_provided_key_sha256: str - :param encryption_scope: The name of the encryption scope under which the blob is encrypted. - :type encryption_scope: str - :param access_tier_change_time: - :type access_tier_change_time: ~datetime.datetime - :param tag_count: - :type tag_count: int - :param expires_on: - :type expires_on: ~datetime.datetime - :param is_sealed: - :type is_sealed: bool - :param rehydrate_priority: If an object is in rehydrate pending state then this header is - returned with priority of rehydrate. Valid values are High and Standard. Possible values - include: "High", "Standard". - :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority - :param last_accessed_on: - :type last_accessed_on: ~datetime.datetime - :param immutability_policy_expires_on: - :type immutability_policy_expires_on: ~datetime.datetime - :param immutability_policy_mode: Possible values include: "Unlocked", "Locked", "Mutable". - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param legal_hold: - :type legal_hold: bool - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123'}, - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - 'content_length': {'key': 'Content-Length', 'type': 'long'}, - 'content_type': {'key': 'Content-Type', 'type': 'str'}, - 'content_encoding': {'key': 'Content-Encoding', 'type': 'str'}, - 'content_language': {'key': 'Content-Language', 'type': 'str'}, - 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray'}, - 'content_disposition': {'key': 'Content-Disposition', 'type': 'str'}, - 'cache_control': {'key': 'Cache-Control', 'type': 'str'}, - 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long'}, - 'blob_type': {'key': 'BlobType', 'type': 'str'}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, - 'lease_state': {'key': 'LeaseState', 'type': 'str'}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, - 'copy_id': {'key': 'CopyId', 'type': 'str'}, - 'copy_status': {'key': 'CopyStatus', 'type': 'str'}, - 'copy_source': {'key': 'CopySource', 'type': 'str'}, - 'copy_progress': {'key': 'CopyProgress', 'type': 'str'}, - 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123'}, - 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str'}, - 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool'}, - 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool'}, - 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str'}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, - 'access_tier': {'key': 'AccessTier', 'type': 'str'}, - 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool'}, - 'archive_status': {'key': 'ArchiveStatus', 'type': 'str'}, - 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str'}, - 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str'}, - 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'}, - 'tag_count': {'key': 'TagCount', 'type': 'int'}, - 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123'}, - 'is_sealed': {'key': 'Sealed', 'type': 'bool'}, - 'rehydrate_priority': {'key': 'RehydratePriority', 'type': 'str'}, - 'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123'}, - 'immutability_policy_expires_on': {'key': 'ImmutabilityPolicyUntilDate', 'type': 'rfc-1123'}, - 'immutability_policy_mode': {'key': 'ImmutabilityPolicyMode', 'type': 'str'}, - 'legal_hold': {'key': 'LegalHold', 'type': 'bool'}, - } - _xml_map = { - 'name': 'Properties' - } - - def __init__( - self, - **kwargs - ): - super(BlobPropertiesInternal, self).__init__(**kwargs) - self.creation_time = kwargs.get('creation_time', None) - self.last_modified = kwargs['last_modified'] - self.etag = kwargs['etag'] - self.content_length = kwargs.get('content_length', None) - self.content_type = kwargs.get('content_type', None) - self.content_encoding = kwargs.get('content_encoding', None) - self.content_language = kwargs.get('content_language', None) - self.content_md5 = kwargs.get('content_md5', None) - self.content_disposition = kwargs.get('content_disposition', None) - self.cache_control = kwargs.get('cache_control', None) - self.blob_sequence_number = kwargs.get('blob_sequence_number', None) - self.blob_type = kwargs.get('blob_type', None) - self.lease_status = kwargs.get('lease_status', None) - self.lease_state = kwargs.get('lease_state', None) - self.lease_duration = kwargs.get('lease_duration', None) - self.copy_id = kwargs.get('copy_id', None) - self.copy_status = kwargs.get('copy_status', None) - self.copy_source = kwargs.get('copy_source', None) - self.copy_progress = kwargs.get('copy_progress', None) - self.copy_completion_time = kwargs.get('copy_completion_time', None) - self.copy_status_description = kwargs.get('copy_status_description', None) - self.server_encrypted = kwargs.get('server_encrypted', None) - self.incremental_copy = kwargs.get('incremental_copy', None) - self.destination_snapshot = kwargs.get('destination_snapshot', None) - self.deleted_time = kwargs.get('deleted_time', None) - self.remaining_retention_days = kwargs.get('remaining_retention_days', None) - self.access_tier = kwargs.get('access_tier', None) - self.access_tier_inferred = kwargs.get('access_tier_inferred', None) - self.archive_status = kwargs.get('archive_status', None) - self.customer_provided_key_sha256 = kwargs.get('customer_provided_key_sha256', None) - self.encryption_scope = kwargs.get('encryption_scope', None) - self.access_tier_change_time = kwargs.get('access_tier_change_time', None) - self.tag_count = kwargs.get('tag_count', None) - self.expires_on = kwargs.get('expires_on', None) - self.is_sealed = kwargs.get('is_sealed', None) - self.rehydrate_priority = kwargs.get('rehydrate_priority', None) - self.last_accessed_on = kwargs.get('last_accessed_on', None) - self.immutability_policy_expires_on = kwargs.get('immutability_policy_expires_on', None) - self.immutability_policy_mode = kwargs.get('immutability_policy_mode', None) - self.legal_hold = kwargs.get('legal_hold', None) - - -class BlobTag(msrest.serialization.Model): - """BlobTag. - - All required parameters must be populated in order to send to Azure. - - :param key: Required. - :type key: str - :param value: Required. - :type value: str - """ - - _validation = { - 'key': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'key': {'key': 'Key', 'type': 'str'}, - 'value': {'key': 'Value', 'type': 'str'}, - } - _xml_map = { - 'name': 'Tag' - } - - def __init__( - self, - **kwargs - ): - super(BlobTag, self).__init__(**kwargs) - self.key = kwargs['key'] - self.value = kwargs['value'] - - -class BlobTags(msrest.serialization.Model): - """Blob tags. - - All required parameters must be populated in order to send to Azure. - - :param blob_tag_set: Required. - :type blob_tag_set: list[~azure.storage.blob.models.BlobTag] - """ - - _validation = { - 'blob_tag_set': {'required': True}, - } - - _attribute_map = { - 'blob_tag_set': {'key': 'BlobTagSet', 'type': '[BlobTag]', 'xml': {'name': 'TagSet', 'wrapped': True, 'itemsName': 'Tag'}}, - } - _xml_map = { - 'name': 'Tags' - } - - def __init__( - self, - **kwargs - ): - super(BlobTags, self).__init__(**kwargs) - self.blob_tag_set = kwargs['blob_tag_set'] - - -class Block(msrest.serialization.Model): - """Represents a single block in a block blob. It describes the block's ID and size. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The base64 encoded block ID. - :type name: str - :param size: Required. The block size in bytes. - :type size: long - """ - - _validation = { - 'name': {'required': True}, - 'size': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'size': {'key': 'Size', 'type': 'long'}, - } - - def __init__( - self, - **kwargs - ): - super(Block, self).__init__(**kwargs) - self.name = kwargs['name'] - self.size = kwargs['size'] - - -class BlockList(msrest.serialization.Model): - """BlockList. - - :param committed_blocks: - :type committed_blocks: list[~azure.storage.blob.models.Block] - :param uncommitted_blocks: - :type uncommitted_blocks: list[~azure.storage.blob.models.Block] - """ - - _attribute_map = { - 'committed_blocks': {'key': 'CommittedBlocks', 'type': '[Block]', 'xml': {'wrapped': True}}, - 'uncommitted_blocks': {'key': 'UncommittedBlocks', 'type': '[Block]', 'xml': {'wrapped': True}}, - } - - def __init__( - self, - **kwargs - ): - super(BlockList, self).__init__(**kwargs) - self.committed_blocks = kwargs.get('committed_blocks', None) - self.uncommitted_blocks = kwargs.get('uncommitted_blocks', None) - - -class BlockLookupList(msrest.serialization.Model): - """BlockLookupList. - - :param committed: - :type committed: list[str] - :param uncommitted: - :type uncommitted: list[str] - :param latest: - :type latest: list[str] - """ - - _attribute_map = { - 'committed': {'key': 'Committed', 'type': '[str]', 'xml': {'itemsName': 'Committed'}}, - 'uncommitted': {'key': 'Uncommitted', 'type': '[str]', 'xml': {'itemsName': 'Uncommitted'}}, - 'latest': {'key': 'Latest', 'type': '[str]', 'xml': {'itemsName': 'Latest'}}, - } - _xml_map = { - 'name': 'BlockList' - } - - def __init__( - self, - **kwargs - ): - super(BlockLookupList, self).__init__(**kwargs) - self.committed = kwargs.get('committed', None) - self.uncommitted = kwargs.get('uncommitted', None) - self.latest = kwargs.get('latest', None) - - -class ClearRange(msrest.serialization.Model): - """ClearRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'ClearRange' - } - - def __init__( - self, - **kwargs - ): - super(ClearRange, self).__init__(**kwargs) - self.start = kwargs['start'] - self.end = kwargs['end'] - - -class ContainerCpkScopeInfo(msrest.serialization.Model): - """Parameter group. - - :param default_encryption_scope: Optional. Version 2019-07-07 and later. Specifies the - default encryption scope to set on the container and use for all future writes. - :type default_encryption_scope: str - :param prevent_encryption_scope_override: Optional. Version 2019-07-07 and newer. If true, - prevents any request from specifying a different encryption scope than the scope set on the - container. - :type prevent_encryption_scope_override: bool - """ - - _attribute_map = { - 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str'}, - 'prevent_encryption_scope_override': {'key': 'PreventEncryptionScopeOverride', 'type': 'bool'}, - } - - def __init__( - self, - **kwargs - ): - super(ContainerCpkScopeInfo, self).__init__(**kwargs) - self.default_encryption_scope = kwargs.get('default_encryption_scope', None) - self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', None) - - -class ContainerItem(msrest.serialization.Model): - """An Azure Storage container. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param deleted: - :type deleted: bool - :param version: - :type version: str - :param properties: Required. Properties of a container. - :type properties: ~azure.storage.blob.models.ContainerProperties - :param metadata: Dictionary of :code:``. - :type metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'deleted': {'key': 'Deleted', 'type': 'bool'}, - 'version': {'key': 'Version', 'type': 'str'}, - 'properties': {'key': 'Properties', 'type': 'ContainerProperties'}, - 'metadata': {'key': 'Metadata', 'type': '{str}'}, - } - _xml_map = { - 'name': 'Container' - } - - def __init__( - self, - **kwargs - ): - super(ContainerItem, self).__init__(**kwargs) - self.name = kwargs['name'] - self.deleted = kwargs.get('deleted', None) - self.version = kwargs.get('version', None) - self.properties = kwargs['properties'] - self.metadata = kwargs.get('metadata', None) - - -class ContainerProperties(msrest.serialization.Model): - """Properties of a container. - - All required parameters must be populated in order to send to Azure. - - :param last_modified: Required. - :type last_modified: ~datetime.datetime - :param etag: Required. - :type etag: str - :param lease_status: Possible values include: "locked", "unlocked". - :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType - :param lease_state: Possible values include: "available", "leased", "expired", "breaking", - "broken". - :type lease_state: str or ~azure.storage.blob.models.LeaseStateType - :param lease_duration: Possible values include: "infinite", "fixed". - :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType - :param public_access: Possible values include: "container", "blob". - :type public_access: str or ~azure.storage.blob.models.PublicAccessType - :param has_immutability_policy: - :type has_immutability_policy: bool - :param has_legal_hold: - :type has_legal_hold: bool - :param default_encryption_scope: - :type default_encryption_scope: str - :param prevent_encryption_scope_override: - :type prevent_encryption_scope_override: bool - :param deleted_time: - :type deleted_time: ~datetime.datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param is_immutable_storage_with_versioning_enabled: Indicates if version level worm is enabled - on this container. - :type is_immutable_storage_with_versioning_enabled: bool - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, - 'lease_state': {'key': 'LeaseState', 'type': 'str'}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, - 'public_access': {'key': 'PublicAccess', 'type': 'str'}, - 'has_immutability_policy': {'key': 'HasImmutabilityPolicy', 'type': 'bool'}, - 'has_legal_hold': {'key': 'HasLegalHold', 'type': 'bool'}, - 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str'}, - 'prevent_encryption_scope_override': {'key': 'DenyEncryptionScopeOverride', 'type': 'bool'}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, - 'is_immutable_storage_with_versioning_enabled': {'key': 'ImmutableStorageWithVersioningEnabled', 'type': 'bool'}, - } - - def __init__( - self, - **kwargs - ): - super(ContainerProperties, self).__init__(**kwargs) - self.last_modified = kwargs['last_modified'] - self.etag = kwargs['etag'] - self.lease_status = kwargs.get('lease_status', None) - self.lease_state = kwargs.get('lease_state', None) - self.lease_duration = kwargs.get('lease_duration', None) - self.public_access = kwargs.get('public_access', None) - self.has_immutability_policy = kwargs.get('has_immutability_policy', None) - self.has_legal_hold = kwargs.get('has_legal_hold', None) - self.default_encryption_scope = kwargs.get('default_encryption_scope', None) - self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', None) - self.deleted_time = kwargs.get('deleted_time', None) - self.remaining_retention_days = kwargs.get('remaining_retention_days', None) - self.is_immutable_storage_with_versioning_enabled = kwargs.get('is_immutable_storage_with_versioning_enabled', None) - - -class CorsRule(msrest.serialization.Model): - """CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param allowed_origins: Required. The origin domains that are permitted to make a request - against the storage service via CORS. The origin domain is the domain from which the request - originates. Note that the origin must be an exact case-sensitive match with the origin that the - user age sends to the service. You can also use the wildcard character '*' to allow all origin - domains to make requests via CORS. - :type allowed_origins: str - :param allowed_methods: Required. The methods (HTTP request verbs) that the origin domain may - use for a CORS request. (comma separated). - :type allowed_methods: str - :param allowed_headers: Required. the request headers that the origin domain may specify on the - CORS request. - :type allowed_headers: str - :param exposed_headers: Required. The response headers that may be sent in the response to the - CORS request and exposed by the browser to the request issuer. - :type exposed_headers: str - :param max_age_in_seconds: Required. The maximum amount time that a browser should cache the - preflight OPTIONS request. - :type max_age_in_seconds: int - """ - - _validation = { - 'allowed_origins': {'required': True}, - 'allowed_methods': {'required': True}, - 'allowed_headers': {'required': True}, - 'exposed_headers': {'required': True}, - 'max_age_in_seconds': {'required': True, 'minimum': 0}, - } - - _attribute_map = { - 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str'}, - 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str'}, - 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str'}, - 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str'}, - 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int'}, - } - - def __init__( - self, - **kwargs - ): - super(CorsRule, self).__init__(**kwargs) - self.allowed_origins = kwargs['allowed_origins'] - self.allowed_methods = kwargs['allowed_methods'] - self.allowed_headers = kwargs['allowed_headers'] - self.exposed_headers = kwargs['exposed_headers'] - self.max_age_in_seconds = kwargs['max_age_in_seconds'] - - -class CpkInfo(msrest.serialization.Model): - """Parameter group. - - :param encryption_key: Optional. Specifies the encryption key to use to encrypt the data - provided in the request. If not specified, encryption is performed with the root account - encryption key. For more information, see Encryption at Rest for Azure Storage Services. - :type encryption_key: str - :param encryption_key_sha256: The SHA-256 hash of the provided encryption key. Must be provided - if the x-ms-encryption-key header is provided. - :type encryption_key_sha256: str - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. Possible values include: "None", "AES256". - :type encryption_algorithm: str or ~azure.storage.blob.models.EncryptionAlgorithmType - """ - - _attribute_map = { - 'encryption_key': {'key': 'encryptionKey', 'type': 'str'}, - 'encryption_key_sha256': {'key': 'encryptionKeySha256', 'type': 'str'}, - 'encryption_algorithm': {'key': 'encryptionAlgorithm', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(CpkInfo, self).__init__(**kwargs) - self.encryption_key = kwargs.get('encryption_key', None) - self.encryption_key_sha256 = kwargs.get('encryption_key_sha256', None) - self.encryption_algorithm = kwargs.get('encryption_algorithm', None) - - -class CpkScopeInfo(msrest.serialization.Model): - """Parameter group. - - :param encryption_scope: Optional. Version 2019-07-07 and later. Specifies the name of the - encryption scope to use to encrypt the data provided in the request. If not specified, - encryption is performed with the default account encryption scope. For more information, see - Encryption at Rest for Azure Storage Services. - :type encryption_scope: str - """ - - _attribute_map = { - 'encryption_scope': {'key': 'encryptionScope', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(CpkScopeInfo, self).__init__(**kwargs) - self.encryption_scope = kwargs.get('encryption_scope', None) - - -class DataLakeStorageError(msrest.serialization.Model): - """DataLakeStorageError. - - :param data_lake_storage_error_details: The service error response object. - :type data_lake_storage_error_details: ~azure.storage.blob.models.DataLakeStorageErrorError - """ - - _attribute_map = { - 'data_lake_storage_error_details': {'key': 'error', 'type': 'DataLakeStorageErrorError'}, - } - - def __init__( - self, - **kwargs - ): - super(DataLakeStorageError, self).__init__(**kwargs) - self.data_lake_storage_error_details = kwargs.get('data_lake_storage_error_details', None) - - -class DataLakeStorageErrorError(msrest.serialization.Model): - """The service error response object. - - :param code: The service error code. - :type code: str - :param message: The service error message. - :type message: str - """ - - _attribute_map = { - 'code': {'key': 'Code', 'type': 'str'}, - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(DataLakeStorageErrorError, self).__init__(**kwargs) - self.code = kwargs.get('code', None) - self.message = kwargs.get('message', None) - - -class DelimitedTextConfiguration(msrest.serialization.Model): - """delimited text configuration. - - All required parameters must be populated in order to send to Azure. - - :param column_separator: Required. column separator. - :type column_separator: str - :param field_quote: Required. field quote. - :type field_quote: str - :param record_separator: Required. record separator. - :type record_separator: str - :param escape_char: Required. escape char. - :type escape_char: str - :param headers_present: Required. has headers. - :type headers_present: bool - """ - - _validation = { - 'column_separator': {'required': True}, - 'field_quote': {'required': True}, - 'record_separator': {'required': True}, - 'escape_char': {'required': True}, - 'headers_present': {'required': True}, - } - - _attribute_map = { - 'column_separator': {'key': 'ColumnSeparator', 'type': 'str', 'xml': {'name': 'ColumnSeparator'}}, - 'field_quote': {'key': 'FieldQuote', 'type': 'str', 'xml': {'name': 'FieldQuote'}}, - 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, - 'escape_char': {'key': 'EscapeChar', 'type': 'str', 'xml': {'name': 'EscapeChar'}}, - 'headers_present': {'key': 'HeadersPresent', 'type': 'bool', 'xml': {'name': 'HasHeaders'}}, - } - _xml_map = { - 'name': 'DelimitedTextConfiguration' - } - - def __init__( - self, - **kwargs - ): - super(DelimitedTextConfiguration, self).__init__(**kwargs) - self.column_separator = kwargs['column_separator'] - self.field_quote = kwargs['field_quote'] - self.record_separator = kwargs['record_separator'] - self.escape_char = kwargs['escape_char'] - self.headers_present = kwargs['headers_present'] - - -class DirectoryHttpHeaders(msrest.serialization.Model): - """Parameter group. - - :param cache_control: Cache control for given resource. - :type cache_control: str - :param content_type: Content type for given resource. - :type content_type: str - :param content_encoding: Content encoding for given resource. - :type content_encoding: str - :param content_language: Content language for given resource. - :type content_language: str - :param content_disposition: Content disposition for given resource. - :type content_disposition: str - """ - - _attribute_map = { - 'cache_control': {'key': 'cacheControl', 'type': 'str'}, - 'content_type': {'key': 'contentType', 'type': 'str'}, - 'content_encoding': {'key': 'contentEncoding', 'type': 'str'}, - 'content_language': {'key': 'contentLanguage', 'type': 'str'}, - 'content_disposition': {'key': 'contentDisposition', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(DirectoryHttpHeaders, self).__init__(**kwargs) - self.cache_control = kwargs.get('cache_control', None) - self.content_type = kwargs.get('content_type', None) - self.content_encoding = kwargs.get('content_encoding', None) - self.content_language = kwargs.get('content_language', None) - self.content_disposition = kwargs.get('content_disposition', None) - - -class FilterBlobItem(msrest.serialization.Model): - """Blob info from a Filter Blobs API call. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param container_name: Required. - :type container_name: str - :param tags: A set of tags. Blob tags. - :type tags: ~azure.storage.blob.models.BlobTags - """ - - _validation = { - 'name': {'required': True}, - 'container_name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'container_name': {'key': 'ContainerName', 'type': 'str'}, - 'tags': {'key': 'Tags', 'type': 'BlobTags'}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__( - self, - **kwargs - ): - super(FilterBlobItem, self).__init__(**kwargs) - self.name = kwargs['name'] - self.container_name = kwargs['container_name'] - self.tags = kwargs.get('tags', None) - - -class FilterBlobSegment(msrest.serialization.Model): - """The result of a Filter Blobs API call. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param where: Required. - :type where: str - :param blobs: Required. - :type blobs: list[~azure.storage.blob.models.FilterBlobItem] - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'where': {'required': True}, - 'blobs': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'where': {'key': 'Where', 'type': 'str'}, - 'blobs': {'key': 'Blobs', 'type': '[FilterBlobItem]', 'xml': {'name': 'Blobs', 'wrapped': True, 'itemsName': 'Blob'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - **kwargs - ): - super(FilterBlobSegment, self).__init__(**kwargs) - self.service_endpoint = kwargs['service_endpoint'] - self.where = kwargs['where'] - self.blobs = kwargs['blobs'] - self.next_marker = kwargs.get('next_marker', None) - - -class GeoReplication(msrest.serialization.Model): - """Geo-Replication information for the Secondary Storage Service. - - All required parameters must be populated in order to send to Azure. - - :param status: Required. The status of the secondary location. Possible values include: "live", - "bootstrap", "unavailable". - :type status: str or ~azure.storage.blob.models.GeoReplicationStatusType - :param last_sync_time: Required. A GMT date/time value, to the second. All primary writes - preceding this value are guaranteed to be available for read operations at the secondary. - Primary writes after this point in time may or may not be available for reads. - :type last_sync_time: ~datetime.datetime - """ - - _validation = { - 'status': {'required': True}, - 'last_sync_time': {'required': True}, - } - - _attribute_map = { - 'status': {'key': 'Status', 'type': 'str'}, - 'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123'}, - } - - def __init__( - self, - **kwargs - ): - super(GeoReplication, self).__init__(**kwargs) - self.status = kwargs['status'] - self.last_sync_time = kwargs['last_sync_time'] - - -class JsonTextConfiguration(msrest.serialization.Model): - """json text configuration. - - All required parameters must be populated in order to send to Azure. - - :param record_separator: Required. record separator. - :type record_separator: str - """ - - _validation = { - 'record_separator': {'required': True}, - } - - _attribute_map = { - 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, - } - _xml_map = { - 'name': 'JsonTextConfiguration' - } - - def __init__( - self, - **kwargs - ): - super(JsonTextConfiguration, self).__init__(**kwargs) - self.record_separator = kwargs['record_separator'] - - -class KeyInfo(msrest.serialization.Model): - """Key information. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. The date-time the key is active in ISO 8601 UTC time. - :type start: str - :param expiry: Required. The date-time the key expires in ISO 8601 UTC time. - :type expiry: str - """ - - _validation = { - 'start': {'required': True}, - 'expiry': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str'}, - 'expiry': {'key': 'Expiry', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(KeyInfo, self).__init__(**kwargs) - self.start = kwargs['start'] - self.expiry = kwargs['expiry'] - - -class LeaseAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param lease_id: If specified, the operation only succeeds if the resource's lease is active - and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': 'leaseId', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = kwargs.get('lease_id', None) - - -class ListBlobsFlatSegmentResponse(msrest.serialization.Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param segment: Required. - :type segment: ~azure.storage.blob.models.BlobFlatListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'segment': {'key': 'Segment', 'type': 'BlobFlatListSegment'}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - **kwargs - ): - super(ListBlobsFlatSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs['service_endpoint'] - self.container_name = kwargs['container_name'] - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.segment = kwargs['segment'] - self.next_marker = kwargs.get('next_marker', None) - - -class ListBlobsHierarchySegmentResponse(msrest.serialization.Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param delimiter: - :type delimiter: str - :param segment: Required. - :type segment: ~azure.storage.blob.models.BlobHierarchyListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'delimiter': {'key': 'Delimiter', 'type': 'str'}, - 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment'}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - **kwargs - ): - super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs['service_endpoint'] - self.container_name = kwargs['container_name'] - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.delimiter = kwargs.get('delimiter', None) - self.segment = kwargs['segment'] - self.next_marker = kwargs.get('next_marker', None) - - -class ListContainersSegmentResponse(msrest.serialization.Model): - """An enumeration of containers. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param container_items: Required. - :type container_items: list[~azure.storage.blob.models.ContainerItem] - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_items': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'container_items': {'key': 'ContainerItems', 'type': '[ContainerItem]', 'xml': {'name': 'Containers', 'wrapped': True, 'itemsName': 'Container'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - **kwargs - ): - super(ListContainersSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs['service_endpoint'] - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.container_items = kwargs['container_items'] - self.next_marker = kwargs.get('next_marker', None) - - -class Logging(msrest.serialization.Model): - """Azure Analytics Logging settings. - - All required parameters must be populated in order to send to Azure. - - :param version: Required. The version of Storage Analytics to configure. - :type version: str - :param delete: Required. Indicates whether all delete requests should be logged. - :type delete: bool - :param read: Required. Indicates whether all read requests should be logged. - :type read: bool - :param write: Required. Indicates whether all write requests should be logged. - :type write: bool - :param retention_policy: Required. the retention policy which determines how long the - associated data should persist. - :type retention_policy: ~azure.storage.blob.models.RetentionPolicy - """ - - _validation = { - 'version': {'required': True}, - 'delete': {'required': True}, - 'read': {'required': True}, - 'write': {'required': True}, - 'retention_policy': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str'}, - 'delete': {'key': 'Delete', 'type': 'bool'}, - 'read': {'key': 'Read', 'type': 'bool'}, - 'write': {'key': 'Write', 'type': 'bool'}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, - } - - def __init__( - self, - **kwargs - ): - super(Logging, self).__init__(**kwargs) - self.version = kwargs['version'] - self.delete = kwargs['delete'] - self.read = kwargs['read'] - self.write = kwargs['write'] - self.retention_policy = kwargs['retention_policy'] - - -class Metrics(msrest.serialization.Model): - """a summary of request statistics grouped by API in hour or minute aggregates for blobs. - - All required parameters must be populated in order to send to Azure. - - :param version: The version of Storage Analytics to configure. - :type version: str - :param enabled: Required. Indicates whether metrics are enabled for the Blob service. - :type enabled: bool - :param include_apis: Indicates whether metrics should generate summary statistics for called - API operations. - :type include_apis: bool - :param retention_policy: the retention policy which determines how long the associated data - should persist. - :type retention_policy: ~azure.storage.blob.models.RetentionPolicy - """ - - _validation = { - 'enabled': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str'}, - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool'}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, - } - - def __init__( - self, - **kwargs - ): - super(Metrics, self).__init__(**kwargs) - self.version = kwargs.get('version', None) - self.enabled = kwargs['enabled'] - self.include_apis = kwargs.get('include_apis', None) - self.retention_policy = kwargs.get('retention_policy', None) - - -class ModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param if_modified_since: Specify this header value to operate only on a blob if it has been - modified since the specified date/time. - :type if_modified_since: ~datetime.datetime - :param if_unmodified_since: Specify this header value to operate only on a blob if it has not - been modified since the specified date/time. - :type if_unmodified_since: ~datetime.datetime - :param if_match: Specify an ETag value to operate only on blobs with a matching value. - :type if_match: str - :param if_none_match: Specify an ETag value to operate only on blobs without a matching value. - :type if_none_match: str - :param if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a - matching value. - :type if_tags: str - """ - - _attribute_map = { - 'if_modified_since': {'key': 'ifModifiedSince', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': 'ifUnmodifiedSince', 'type': 'rfc-1123'}, - 'if_match': {'key': 'ifMatch', 'type': 'str'}, - 'if_none_match': {'key': 'ifNoneMatch', 'type': 'str'}, - 'if_tags': {'key': 'ifTags', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(ModifiedAccessConditions, self).__init__(**kwargs) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_tags = kwargs.get('if_tags', None) - - -class PageList(msrest.serialization.Model): - """the list of pages. - - :param page_range: - :type page_range: list[~azure.storage.blob.models.PageRange] - :param clear_range: - :type clear_range: list[~azure.storage.blob.models.ClearRange] - """ - - _attribute_map = { - 'page_range': {'key': 'PageRange', 'type': '[PageRange]'}, - 'clear_range': {'key': 'ClearRange', 'type': '[ClearRange]'}, - } - - def __init__( - self, - **kwargs - ): - super(PageList, self).__init__(**kwargs) - self.page_range = kwargs.get('page_range', None) - self.clear_range = kwargs.get('clear_range', None) - - -class PageRange(msrest.serialization.Model): - """PageRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'PageRange' - } - - def __init__( - self, - **kwargs - ): - super(PageRange, self).__init__(**kwargs) - self.start = kwargs['start'] - self.end = kwargs['end'] - - -class QueryFormat(msrest.serialization.Model): - """QueryFormat. - - :param type: The quick query format type. Possible values include: "delimited", "json", - "arrow", "parquet". - :type type: str or ~azure.storage.blob.models.QueryFormatType - :param delimited_text_configuration: delimited text configuration. - :type delimited_text_configuration: ~azure.storage.blob.models.DelimitedTextConfiguration - :param json_text_configuration: json text configuration. - :type json_text_configuration: ~azure.storage.blob.models.JsonTextConfiguration - :param arrow_configuration: arrow configuration. - :type arrow_configuration: ~azure.storage.blob.models.ArrowConfiguration - :param parquet_text_configuration: Any object. - :type parquet_text_configuration: any - """ - - _attribute_map = { - 'type': {'key': 'Type', 'type': 'str', 'xml': {'name': 'Type'}}, - 'delimited_text_configuration': {'key': 'DelimitedTextConfiguration', 'type': 'DelimitedTextConfiguration'}, - 'json_text_configuration': {'key': 'JsonTextConfiguration', 'type': 'JsonTextConfiguration'}, - 'arrow_configuration': {'key': 'ArrowConfiguration', 'type': 'ArrowConfiguration'}, - 'parquet_text_configuration': {'key': 'ParquetTextConfiguration', 'type': 'object'}, - } - - def __init__( - self, - **kwargs - ): - super(QueryFormat, self).__init__(**kwargs) - self.type = kwargs.get('type', None) - self.delimited_text_configuration = kwargs.get('delimited_text_configuration', None) - self.json_text_configuration = kwargs.get('json_text_configuration', None) - self.arrow_configuration = kwargs.get('arrow_configuration', None) - self.parquet_text_configuration = kwargs.get('parquet_text_configuration', None) - - -class QueryRequest(msrest.serialization.Model): - """the quick query body. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar query_type: Required. the query type. Default value: "SQL". - :vartype query_type: str - :param expression: Required. a query statement. - :type expression: str - :param input_serialization: - :type input_serialization: ~azure.storage.blob.models.QuerySerialization - :param output_serialization: - :type output_serialization: ~azure.storage.blob.models.QuerySerialization - """ - - _validation = { - 'query_type': {'required': True, 'constant': True}, - 'expression': {'required': True}, - } - - _attribute_map = { - 'query_type': {'key': 'QueryType', 'type': 'str', 'xml': {'name': 'QueryType'}}, - 'expression': {'key': 'Expression', 'type': 'str', 'xml': {'name': 'Expression'}}, - 'input_serialization': {'key': 'InputSerialization', 'type': 'QuerySerialization'}, - 'output_serialization': {'key': 'OutputSerialization', 'type': 'QuerySerialization'}, - } - _xml_map = { - 'name': 'QueryRequest' - } - - query_type = "SQL" - - def __init__( - self, - **kwargs - ): - super(QueryRequest, self).__init__(**kwargs) - self.expression = kwargs['expression'] - self.input_serialization = kwargs.get('input_serialization', None) - self.output_serialization = kwargs.get('output_serialization', None) - - -class QuerySerialization(msrest.serialization.Model): - """QuerySerialization. - - All required parameters must be populated in order to send to Azure. - - :param format: Required. - :type format: ~azure.storage.blob.models.QueryFormat - """ - - _validation = { - 'format': {'required': True}, - } - - _attribute_map = { - 'format': {'key': 'Format', 'type': 'QueryFormat'}, - } - - def __init__( - self, - **kwargs - ): - super(QuerySerialization, self).__init__(**kwargs) - self.format = kwargs['format'] - - -class RetentionPolicy(msrest.serialization.Model): - """the retention policy which determines how long the associated data should persist. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether a retention policy is enabled for the storage - service. - :type enabled: bool - :param days: Indicates the number of days that metrics or logging or soft-deleted data should - be retained. All data older than this value will be deleted. - :type days: int - :param allow_permanent_delete: Indicates whether permanent delete is allowed on this storage - account. - :type allow_permanent_delete: bool - """ - - _validation = { - 'enabled': {'required': True}, - 'days': {'minimum': 1}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'days': {'key': 'Days', 'type': 'int'}, - 'allow_permanent_delete': {'key': 'AllowPermanentDelete', 'type': 'bool'}, - } - - def __init__( - self, - **kwargs - ): - super(RetentionPolicy, self).__init__(**kwargs) - self.enabled = kwargs['enabled'] - self.days = kwargs.get('days', None) - self.allow_permanent_delete = kwargs.get('allow_permanent_delete', None) - - -class SequenceNumberAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param if_sequence_number_less_than_or_equal_to: Specify this header value to operate only on a - blob if it has a sequence number less than or equal to the specified. - :type if_sequence_number_less_than_or_equal_to: long - :param if_sequence_number_less_than: Specify this header value to operate only on a blob if it - has a sequence number less than the specified. - :type if_sequence_number_less_than: long - :param if_sequence_number_equal_to: Specify this header value to operate only on a blob if it - has the specified sequence number. - :type if_sequence_number_equal_to: long - """ - - _attribute_map = { - 'if_sequence_number_less_than_or_equal_to': {'key': 'ifSequenceNumberLessThanOrEqualTo', 'type': 'long'}, - 'if_sequence_number_less_than': {'key': 'ifSequenceNumberLessThan', 'type': 'long'}, - 'if_sequence_number_equal_to': {'key': 'ifSequenceNumberEqualTo', 'type': 'long'}, - } - - def __init__( - self, - **kwargs - ): - super(SequenceNumberAccessConditions, self).__init__(**kwargs) - self.if_sequence_number_less_than_or_equal_to = kwargs.get('if_sequence_number_less_than_or_equal_to', None) - self.if_sequence_number_less_than = kwargs.get('if_sequence_number_less_than', None) - self.if_sequence_number_equal_to = kwargs.get('if_sequence_number_equal_to', None) - - -class SignedIdentifier(msrest.serialization.Model): - """signed identifier. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. a unique id. - :type id: str - :param access_policy: An Access policy. - :type access_policy: ~azure.storage.blob.models.AccessPolicy - """ - - _validation = { - 'id': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'Id', 'type': 'str'}, - 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy'}, - } - _xml_map = { - 'name': 'SignedIdentifier' - } - - def __init__( - self, - **kwargs - ): - super(SignedIdentifier, self).__init__(**kwargs) - self.id = kwargs['id'] - self.access_policy = kwargs.get('access_policy', None) - - -class SourceModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param source_if_modified_since: Specify this header value to operate only on a blob if it has - been modified since the specified date/time. - :type source_if_modified_since: ~datetime.datetime - :param source_if_unmodified_since: Specify this header value to operate only on a blob if it - has not been modified since the specified date/time. - :type source_if_unmodified_since: ~datetime.datetime - :param source_if_match: Specify an ETag value to operate only on blobs with a matching value. - :type source_if_match: str - :param source_if_none_match: Specify an ETag value to operate only on blobs without a matching - value. - :type source_if_none_match: str - :param source_if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a - matching value. - :type source_if_tags: str - """ - - _attribute_map = { - 'source_if_modified_since': {'key': 'sourceIfModifiedSince', 'type': 'rfc-1123'}, - 'source_if_unmodified_since': {'key': 'sourceIfUnmodifiedSince', 'type': 'rfc-1123'}, - 'source_if_match': {'key': 'sourceIfMatch', 'type': 'str'}, - 'source_if_none_match': {'key': 'sourceIfNoneMatch', 'type': 'str'}, - 'source_if_tags': {'key': 'sourceIfTags', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_modified_since = kwargs.get('source_if_modified_since', None) - self.source_if_unmodified_since = kwargs.get('source_if_unmodified_since', None) - self.source_if_match = kwargs.get('source_if_match', None) - self.source_if_none_match = kwargs.get('source_if_none_match', None) - self.source_if_tags = kwargs.get('source_if_tags', None) - - -class StaticWebsite(msrest.serialization.Model): - """The properties that enable an account to host a static website. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether this account is hosting a static website. - :type enabled: bool - :param index_document: The default name of the index page under each directory. - :type index_document: str - :param error_document404_path: The absolute path of the custom 404 page. - :type error_document404_path: str - :param default_index_document_path: Absolute path of the default index page. - :type default_index_document_path: str - """ - - _validation = { - 'enabled': {'required': True}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'index_document': {'key': 'IndexDocument', 'type': 'str'}, - 'error_document404_path': {'key': 'ErrorDocument404Path', 'type': 'str'}, - 'default_index_document_path': {'key': 'DefaultIndexDocumentPath', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(StaticWebsite, self).__init__(**kwargs) - self.enabled = kwargs['enabled'] - self.index_document = kwargs.get('index_document', None) - self.error_document404_path = kwargs.get('error_document404_path', None) - self.default_index_document_path = kwargs.get('default_index_document_path', None) - - -class StorageError(msrest.serialization.Model): - """StorageError. - - :param message: - :type message: str - """ - - _attribute_map = { - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(StorageError, self).__init__(**kwargs) - self.message = kwargs.get('message', None) - - -class StorageServiceProperties(msrest.serialization.Model): - """Storage Service Properties. - - :param logging: Azure Analytics Logging settings. - :type logging: ~azure.storage.blob.models.Logging - :param hour_metrics: a summary of request statistics grouped by API in hour or minute - aggregates for blobs. - :type hour_metrics: ~azure.storage.blob.models.Metrics - :param minute_metrics: a summary of request statistics grouped by API in hour or minute - aggregates for blobs. - :type minute_metrics: ~azure.storage.blob.models.Metrics - :param cors: The set of CORS rules. - :type cors: list[~azure.storage.blob.models.CorsRule] - :param default_service_version: The default version to use for requests to the Blob service if - an incoming request's version is not specified. Possible values include version 2008-10-27 and - all more recent versions. - :type default_service_version: str - :param delete_retention_policy: the retention policy which determines how long the associated - data should persist. - :type delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy - :param static_website: The properties that enable an account to host a static website. - :type static_website: ~azure.storage.blob.models.StaticWebsite - """ - - _attribute_map = { - 'logging': {'key': 'Logging', 'type': 'Logging'}, - 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics'}, - 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics'}, - 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'wrapped': True}}, - 'default_service_version': {'key': 'DefaultServiceVersion', 'type': 'str'}, - 'delete_retention_policy': {'key': 'DeleteRetentionPolicy', 'type': 'RetentionPolicy'}, - 'static_website': {'key': 'StaticWebsite', 'type': 'StaticWebsite'}, - } - - def __init__( - self, - **kwargs - ): - super(StorageServiceProperties, self).__init__(**kwargs) - self.logging = kwargs.get('logging', None) - self.hour_metrics = kwargs.get('hour_metrics', None) - self.minute_metrics = kwargs.get('minute_metrics', None) - self.cors = kwargs.get('cors', None) - self.default_service_version = kwargs.get('default_service_version', None) - self.delete_retention_policy = kwargs.get('delete_retention_policy', None) - self.static_website = kwargs.get('static_website', None) - - -class StorageServiceStats(msrest.serialization.Model): - """Stats for the storage service. - - :param geo_replication: Geo-Replication information for the Secondary Storage Service. - :type geo_replication: ~azure.storage.blob.models.GeoReplication - """ - - _attribute_map = { - 'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication'}, - } - - def __init__( - self, - **kwargs - ): - super(StorageServiceStats, self).__init__(**kwargs) - self.geo_replication = kwargs.get('geo_replication', None) - - -class UserDelegationKey(msrest.serialization.Model): - """A user delegation key. - - All required parameters must be populated in order to send to Azure. - - :param signed_oid: Required. The Azure Active Directory object ID in GUID format. - :type signed_oid: str - :param signed_tid: Required. The Azure Active Directory tenant ID in GUID format. - :type signed_tid: str - :param signed_start: Required. The date-time the key is active. - :type signed_start: ~datetime.datetime - :param signed_expiry: Required. The date-time the key expires. - :type signed_expiry: ~datetime.datetime - :param signed_service: Required. Abbreviation of the Azure Storage service that accepts the - key. - :type signed_service: str - :param signed_version: Required. The service version that created the key. - :type signed_version: str - :param value: Required. The key as a base64 string. - :type value: str - """ - - _validation = { - 'signed_oid': {'required': True}, - 'signed_tid': {'required': True}, - 'signed_start': {'required': True}, - 'signed_expiry': {'required': True}, - 'signed_service': {'required': True}, - 'signed_version': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'signed_oid': {'key': 'SignedOid', 'type': 'str'}, - 'signed_tid': {'key': 'SignedTid', 'type': 'str'}, - 'signed_start': {'key': 'SignedStart', 'type': 'iso-8601'}, - 'signed_expiry': {'key': 'SignedExpiry', 'type': 'iso-8601'}, - 'signed_service': {'key': 'SignedService', 'type': 'str'}, - 'signed_version': {'key': 'SignedVersion', 'type': 'str'}, - 'value': {'key': 'Value', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(UserDelegationKey, self).__init__(**kwargs) - self.signed_oid = kwargs['signed_oid'] - self.signed_tid = kwargs['signed_tid'] - self.signed_start = kwargs['signed_start'] - self.signed_expiry = kwargs['signed_expiry'] - self.signed_service = kwargs['signed_service'] - self.signed_version = kwargs['signed_version'] - self.value = kwargs['value'] diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/models/_models_py3.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/models/_models_py3.py deleted file mode 100644 index b3a394e..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/models/_models_py3.py +++ /dev/null @@ -1,2333 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -import datetime -from typing import Any, Dict, List, Optional, Union - -from azure.core.exceptions import HttpResponseError -import msrest.serialization - -from ._azure_blob_storage_enums import * - - -class AccessPolicy(msrest.serialization.Model): - """An Access policy. - - :param start: the date-time the policy is active. - :type start: str - :param expiry: the date-time the policy expires. - :type expiry: str - :param permission: the permissions for the acl policy. - :type permission: str - """ - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str'}, - 'expiry': {'key': 'Expiry', 'type': 'str'}, - 'permission': {'key': 'Permission', 'type': 'str'}, - } - - def __init__( - self, - *, - start: Optional[str] = None, - expiry: Optional[str] = None, - permission: Optional[str] = None, - **kwargs - ): - super(AccessPolicy, self).__init__(**kwargs) - self.start = start - self.expiry = expiry - self.permission = permission - - -class AppendPositionAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param max_size: Optional conditional header. The max length in bytes permitted for the append - blob. If the Append Block operation would cause the blob to exceed that limit or if the blob - size is already greater than the value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :type max_size: long - :param append_position: Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will succeed only if the append - position is equal to this number. If it is not, the request will fail with the - AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). - :type append_position: long - """ - - _attribute_map = { - 'max_size': {'key': 'maxSize', 'type': 'long'}, - 'append_position': {'key': 'appendPosition', 'type': 'long'}, - } - - def __init__( - self, - *, - max_size: Optional[int] = None, - append_position: Optional[int] = None, - **kwargs - ): - super(AppendPositionAccessConditions, self).__init__(**kwargs) - self.max_size = max_size - self.append_position = append_position - - -class ArrowConfiguration(msrest.serialization.Model): - """arrow configuration. - - All required parameters must be populated in order to send to Azure. - - :param schema: Required. - :type schema: list[~azure.storage.blob.models.ArrowField] - """ - - _validation = { - 'schema': {'required': True}, - } - - _attribute_map = { - 'schema': {'key': 'Schema', 'type': '[ArrowField]', 'xml': {'name': 'Schema', 'wrapped': True, 'itemsName': 'Field'}}, - } - _xml_map = { - 'name': 'ArrowConfiguration' - } - - def __init__( - self, - *, - schema: List["ArrowField"], - **kwargs - ): - super(ArrowConfiguration, self).__init__(**kwargs) - self.schema = schema - - -class ArrowField(msrest.serialization.Model): - """field of an arrow schema. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. - :type type: str - :param name: - :type name: str - :param precision: - :type precision: int - :param scale: - :type scale: int - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': 'Type', 'type': 'str'}, - 'name': {'key': 'Name', 'type': 'str'}, - 'precision': {'key': 'Precision', 'type': 'int'}, - 'scale': {'key': 'Scale', 'type': 'int'}, - } - _xml_map = { - 'name': 'Field' - } - - def __init__( - self, - *, - type: str, - name: Optional[str] = None, - precision: Optional[int] = None, - scale: Optional[int] = None, - **kwargs - ): - super(ArrowField, self).__init__(**kwargs) - self.type = type - self.name = name - self.precision = precision - self.scale = scale - - -class BlobFlatListSegment(msrest.serialization.Model): - """BlobFlatListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]'}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__( - self, - *, - blob_items: List["BlobItemInternal"], - **kwargs - ): - super(BlobFlatListSegment, self).__init__(**kwargs) - self.blob_items = blob_items - - -class BlobHierarchyListSegment(msrest.serialization.Model): - """BlobHierarchyListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_prefixes: - :type blob_prefixes: list[~azure.storage.blob.models.BlobPrefix] - :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]', 'xml': {'name': 'BlobPrefix'}}, - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__( - self, - *, - blob_items: List["BlobItemInternal"], - blob_prefixes: Optional[List["BlobPrefix"]] = None, - **kwargs - ): - super(BlobHierarchyListSegment, self).__init__(**kwargs) - self.blob_prefixes = blob_prefixes - self.blob_items = blob_items - - -class BlobHTTPHeaders(msrest.serialization.Model): - """Parameter group. - - :param blob_cache_control: Optional. Sets the blob's cache control. If specified, this property - is stored with the blob and returned with a read request. - :type blob_cache_control: str - :param blob_content_type: Optional. Sets the blob's content type. If specified, this property - is stored with the blob and returned with a read request. - :type blob_content_type: str - :param blob_content_md5: Optional. An MD5 hash of the blob content. Note that this hash is not - validated, as the hashes for the individual blocks were validated when each was uploaded. - :type blob_content_md5: bytearray - :param blob_content_encoding: Optional. Sets the blob's content encoding. If specified, this - property is stored with the blob and returned with a read request. - :type blob_content_encoding: str - :param blob_content_language: Optional. Set the blob's content language. If specified, this - property is stored with the blob and returned with a read request. - :type blob_content_language: str - :param blob_content_disposition: Optional. Sets the blob's Content-Disposition header. - :type blob_content_disposition: str - """ - - _attribute_map = { - 'blob_cache_control': {'key': 'blobCacheControl', 'type': 'str'}, - 'blob_content_type': {'key': 'blobContentType', 'type': 'str'}, - 'blob_content_md5': {'key': 'blobContentMD5', 'type': 'bytearray'}, - 'blob_content_encoding': {'key': 'blobContentEncoding', 'type': 'str'}, - 'blob_content_language': {'key': 'blobContentLanguage', 'type': 'str'}, - 'blob_content_disposition': {'key': 'blobContentDisposition', 'type': 'str'}, - } - - def __init__( - self, - *, - blob_cache_control: Optional[str] = None, - blob_content_type: Optional[str] = None, - blob_content_md5: Optional[bytearray] = None, - blob_content_encoding: Optional[str] = None, - blob_content_language: Optional[str] = None, - blob_content_disposition: Optional[str] = None, - **kwargs - ): - super(BlobHTTPHeaders, self).__init__(**kwargs) - self.blob_cache_control = blob_cache_control - self.blob_content_type = blob_content_type - self.blob_content_md5 = blob_content_md5 - self.blob_content_encoding = blob_content_encoding - self.blob_content_language = blob_content_language - self.blob_content_disposition = blob_content_disposition - - -class BlobItemInternal(msrest.serialization.Model): - """An Azure Storage blob. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param deleted: Required. - :type deleted: bool - :param snapshot: Required. - :type snapshot: str - :param version_id: - :type version_id: str - :param is_current_version: - :type is_current_version: bool - :param properties: Required. Properties of a blob. - :type properties: ~azure.storage.blob.models.BlobPropertiesInternal - :param metadata: - :type metadata: ~azure.storage.blob.models.BlobMetadata - :param blob_tags: Blob tags. - :type blob_tags: ~azure.storage.blob.models.BlobTags - :param object_replication_metadata: Dictionary of :code:``. - :type object_replication_metadata: dict[str, str] - :param has_versions_only: - :type has_versions_only: bool - """ - - _validation = { - 'name': {'required': True}, - 'deleted': {'required': True}, - 'snapshot': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'deleted': {'key': 'Deleted', 'type': 'bool'}, - 'snapshot': {'key': 'Snapshot', 'type': 'str'}, - 'version_id': {'key': 'VersionId', 'type': 'str'}, - 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool'}, - 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal'}, - 'metadata': {'key': 'Metadata', 'type': 'BlobMetadata'}, - 'blob_tags': {'key': 'BlobTags', 'type': 'BlobTags'}, - 'object_replication_metadata': {'key': 'OrMetadata', 'type': '{str}'}, - 'has_versions_only': {'key': 'HasVersionsOnly', 'type': 'bool'}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__( - self, - *, - name: str, - deleted: bool, - snapshot: str, - properties: "BlobPropertiesInternal", - version_id: Optional[str] = None, - is_current_version: Optional[bool] = None, - metadata: Optional["BlobMetadata"] = None, - blob_tags: Optional["BlobTags"] = None, - object_replication_metadata: Optional[Dict[str, str]] = None, - has_versions_only: Optional[bool] = None, - **kwargs - ): - super(BlobItemInternal, self).__init__(**kwargs) - self.name = name - self.deleted = deleted - self.snapshot = snapshot - self.version_id = version_id - self.is_current_version = is_current_version - self.properties = properties - self.metadata = metadata - self.blob_tags = blob_tags - self.object_replication_metadata = object_replication_metadata - self.has_versions_only = has_versions_only - - -class BlobMetadata(msrest.serialization.Model): - """BlobMetadata. - - :param additional_properties: Unmatched properties from the message are deserialized to this - collection. - :type additional_properties: dict[str, str] - :param encrypted: - :type encrypted: str - """ - - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{str}'}, - 'encrypted': {'key': 'Encrypted', 'type': 'str', 'xml': {'attr': True}}, - } - _xml_map = { - 'name': 'Metadata' - } - - def __init__( - self, - *, - additional_properties: Optional[Dict[str, str]] = None, - encrypted: Optional[str] = None, - **kwargs - ): - super(BlobMetadata, self).__init__(**kwargs) - self.additional_properties = additional_properties - self.encrypted = encrypted - - -class BlobPrefix(msrest.serialization.Model): - """BlobPrefix. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(BlobPrefix, self).__init__(**kwargs) - self.name = name - - -class BlobPropertiesInternal(msrest.serialization.Model): - """Properties of a blob. - - All required parameters must be populated in order to send to Azure. - - :param creation_time: - :type creation_time: ~datetime.datetime - :param last_modified: Required. - :type last_modified: ~datetime.datetime - :param etag: Required. - :type etag: str - :param content_length: Size in bytes. - :type content_length: long - :param content_type: - :type content_type: str - :param content_encoding: - :type content_encoding: str - :param content_language: - :type content_language: str - :param content_md5: - :type content_md5: bytearray - :param content_disposition: - :type content_disposition: str - :param cache_control: - :type cache_control: str - :param blob_sequence_number: - :type blob_sequence_number: long - :param blob_type: Possible values include: "BlockBlob", "PageBlob", "AppendBlob". - :type blob_type: str or ~azure.storage.blob.models.BlobType - :param lease_status: Possible values include: "locked", "unlocked". - :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType - :param lease_state: Possible values include: "available", "leased", "expired", "breaking", - "broken". - :type lease_state: str or ~azure.storage.blob.models.LeaseStateType - :param lease_duration: Possible values include: "infinite", "fixed". - :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType - :param copy_id: - :type copy_id: str - :param copy_status: Possible values include: "pending", "success", "aborted", "failed". - :type copy_status: str or ~azure.storage.blob.models.CopyStatusType - :param copy_source: - :type copy_source: str - :param copy_progress: - :type copy_progress: str - :param copy_completion_time: - :type copy_completion_time: ~datetime.datetime - :param copy_status_description: - :type copy_status_description: str - :param server_encrypted: - :type server_encrypted: bool - :param incremental_copy: - :type incremental_copy: bool - :param destination_snapshot: - :type destination_snapshot: str - :param deleted_time: - :type deleted_time: ~datetime.datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param access_tier: Possible values include: "P4", "P6", "P10", "P15", "P20", "P30", "P40", - "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive". - :type access_tier: str or ~azure.storage.blob.models.AccessTier - :param access_tier_inferred: - :type access_tier_inferred: bool - :param archive_status: Possible values include: "rehydrate-pending-to-hot", - "rehydrate-pending-to-cool". - :type archive_status: str or ~azure.storage.blob.models.ArchiveStatus - :param customer_provided_key_sha256: - :type customer_provided_key_sha256: str - :param encryption_scope: The name of the encryption scope under which the blob is encrypted. - :type encryption_scope: str - :param access_tier_change_time: - :type access_tier_change_time: ~datetime.datetime - :param tag_count: - :type tag_count: int - :param expires_on: - :type expires_on: ~datetime.datetime - :param is_sealed: - :type is_sealed: bool - :param rehydrate_priority: If an object is in rehydrate pending state then this header is - returned with priority of rehydrate. Valid values are High and Standard. Possible values - include: "High", "Standard". - :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority - :param last_accessed_on: - :type last_accessed_on: ~datetime.datetime - :param immutability_policy_expires_on: - :type immutability_policy_expires_on: ~datetime.datetime - :param immutability_policy_mode: Possible values include: "Unlocked", "Locked", "Mutable". - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param legal_hold: - :type legal_hold: bool - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123'}, - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - 'content_length': {'key': 'Content-Length', 'type': 'long'}, - 'content_type': {'key': 'Content-Type', 'type': 'str'}, - 'content_encoding': {'key': 'Content-Encoding', 'type': 'str'}, - 'content_language': {'key': 'Content-Language', 'type': 'str'}, - 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray'}, - 'content_disposition': {'key': 'Content-Disposition', 'type': 'str'}, - 'cache_control': {'key': 'Cache-Control', 'type': 'str'}, - 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long'}, - 'blob_type': {'key': 'BlobType', 'type': 'str'}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, - 'lease_state': {'key': 'LeaseState', 'type': 'str'}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, - 'copy_id': {'key': 'CopyId', 'type': 'str'}, - 'copy_status': {'key': 'CopyStatus', 'type': 'str'}, - 'copy_source': {'key': 'CopySource', 'type': 'str'}, - 'copy_progress': {'key': 'CopyProgress', 'type': 'str'}, - 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123'}, - 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str'}, - 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool'}, - 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool'}, - 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str'}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, - 'access_tier': {'key': 'AccessTier', 'type': 'str'}, - 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool'}, - 'archive_status': {'key': 'ArchiveStatus', 'type': 'str'}, - 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str'}, - 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str'}, - 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'}, - 'tag_count': {'key': 'TagCount', 'type': 'int'}, - 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123'}, - 'is_sealed': {'key': 'Sealed', 'type': 'bool'}, - 'rehydrate_priority': {'key': 'RehydratePriority', 'type': 'str'}, - 'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123'}, - 'immutability_policy_expires_on': {'key': 'ImmutabilityPolicyUntilDate', 'type': 'rfc-1123'}, - 'immutability_policy_mode': {'key': 'ImmutabilityPolicyMode', 'type': 'str'}, - 'legal_hold': {'key': 'LegalHold', 'type': 'bool'}, - } - _xml_map = { - 'name': 'Properties' - } - - def __init__( - self, - *, - last_modified: datetime.datetime, - etag: str, - creation_time: Optional[datetime.datetime] = None, - content_length: Optional[int] = None, - content_type: Optional[str] = None, - content_encoding: Optional[str] = None, - content_language: Optional[str] = None, - content_md5: Optional[bytearray] = None, - content_disposition: Optional[str] = None, - cache_control: Optional[str] = None, - blob_sequence_number: Optional[int] = None, - blob_type: Optional[Union[str, "BlobType"]] = None, - lease_status: Optional[Union[str, "LeaseStatusType"]] = None, - lease_state: Optional[Union[str, "LeaseStateType"]] = None, - lease_duration: Optional[Union[str, "LeaseDurationType"]] = None, - copy_id: Optional[str] = None, - copy_status: Optional[Union[str, "CopyStatusType"]] = None, - copy_source: Optional[str] = None, - copy_progress: Optional[str] = None, - copy_completion_time: Optional[datetime.datetime] = None, - copy_status_description: Optional[str] = None, - server_encrypted: Optional[bool] = None, - incremental_copy: Optional[bool] = None, - destination_snapshot: Optional[str] = None, - deleted_time: Optional[datetime.datetime] = None, - remaining_retention_days: Optional[int] = None, - access_tier: Optional[Union[str, "AccessTier"]] = None, - access_tier_inferred: Optional[bool] = None, - archive_status: Optional[Union[str, "ArchiveStatus"]] = None, - customer_provided_key_sha256: Optional[str] = None, - encryption_scope: Optional[str] = None, - access_tier_change_time: Optional[datetime.datetime] = None, - tag_count: Optional[int] = None, - expires_on: Optional[datetime.datetime] = None, - is_sealed: Optional[bool] = None, - rehydrate_priority: Optional[Union[str, "RehydratePriority"]] = None, - last_accessed_on: Optional[datetime.datetime] = None, - immutability_policy_expires_on: Optional[datetime.datetime] = None, - immutability_policy_mode: Optional[Union[str, "BlobImmutabilityPolicyMode"]] = None, - legal_hold: Optional[bool] = None, - **kwargs - ): - super(BlobPropertiesInternal, self).__init__(**kwargs) - self.creation_time = creation_time - self.last_modified = last_modified - self.etag = etag - self.content_length = content_length - self.content_type = content_type - self.content_encoding = content_encoding - self.content_language = content_language - self.content_md5 = content_md5 - self.content_disposition = content_disposition - self.cache_control = cache_control - self.blob_sequence_number = blob_sequence_number - self.blob_type = blob_type - self.lease_status = lease_status - self.lease_state = lease_state - self.lease_duration = lease_duration - self.copy_id = copy_id - self.copy_status = copy_status - self.copy_source = copy_source - self.copy_progress = copy_progress - self.copy_completion_time = copy_completion_time - self.copy_status_description = copy_status_description - self.server_encrypted = server_encrypted - self.incremental_copy = incremental_copy - self.destination_snapshot = destination_snapshot - self.deleted_time = deleted_time - self.remaining_retention_days = remaining_retention_days - self.access_tier = access_tier - self.access_tier_inferred = access_tier_inferred - self.archive_status = archive_status - self.customer_provided_key_sha256 = customer_provided_key_sha256 - self.encryption_scope = encryption_scope - self.access_tier_change_time = access_tier_change_time - self.tag_count = tag_count - self.expires_on = expires_on - self.is_sealed = is_sealed - self.rehydrate_priority = rehydrate_priority - self.last_accessed_on = last_accessed_on - self.immutability_policy_expires_on = immutability_policy_expires_on - self.immutability_policy_mode = immutability_policy_mode - self.legal_hold = legal_hold - - -class BlobTag(msrest.serialization.Model): - """BlobTag. - - All required parameters must be populated in order to send to Azure. - - :param key: Required. - :type key: str - :param value: Required. - :type value: str - """ - - _validation = { - 'key': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'key': {'key': 'Key', 'type': 'str'}, - 'value': {'key': 'Value', 'type': 'str'}, - } - _xml_map = { - 'name': 'Tag' - } - - def __init__( - self, - *, - key: str, - value: str, - **kwargs - ): - super(BlobTag, self).__init__(**kwargs) - self.key = key - self.value = value - - -class BlobTags(msrest.serialization.Model): - """Blob tags. - - All required parameters must be populated in order to send to Azure. - - :param blob_tag_set: Required. - :type blob_tag_set: list[~azure.storage.blob.models.BlobTag] - """ - - _validation = { - 'blob_tag_set': {'required': True}, - } - - _attribute_map = { - 'blob_tag_set': {'key': 'BlobTagSet', 'type': '[BlobTag]', 'xml': {'name': 'TagSet', 'wrapped': True, 'itemsName': 'Tag'}}, - } - _xml_map = { - 'name': 'Tags' - } - - def __init__( - self, - *, - blob_tag_set: List["BlobTag"], - **kwargs - ): - super(BlobTags, self).__init__(**kwargs) - self.blob_tag_set = blob_tag_set - - -class Block(msrest.serialization.Model): - """Represents a single block in a block blob. It describes the block's ID and size. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The base64 encoded block ID. - :type name: str - :param size: Required. The block size in bytes. - :type size: long - """ - - _validation = { - 'name': {'required': True}, - 'size': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'size': {'key': 'Size', 'type': 'long'}, - } - - def __init__( - self, - *, - name: str, - size: int, - **kwargs - ): - super(Block, self).__init__(**kwargs) - self.name = name - self.size = size - - -class BlockList(msrest.serialization.Model): - """BlockList. - - :param committed_blocks: - :type committed_blocks: list[~azure.storage.blob.models.Block] - :param uncommitted_blocks: - :type uncommitted_blocks: list[~azure.storage.blob.models.Block] - """ - - _attribute_map = { - 'committed_blocks': {'key': 'CommittedBlocks', 'type': '[Block]', 'xml': {'wrapped': True}}, - 'uncommitted_blocks': {'key': 'UncommittedBlocks', 'type': '[Block]', 'xml': {'wrapped': True}}, - } - - def __init__( - self, - *, - committed_blocks: Optional[List["Block"]] = None, - uncommitted_blocks: Optional[List["Block"]] = None, - **kwargs - ): - super(BlockList, self).__init__(**kwargs) - self.committed_blocks = committed_blocks - self.uncommitted_blocks = uncommitted_blocks - - -class BlockLookupList(msrest.serialization.Model): - """BlockLookupList. - - :param committed: - :type committed: list[str] - :param uncommitted: - :type uncommitted: list[str] - :param latest: - :type latest: list[str] - """ - - _attribute_map = { - 'committed': {'key': 'Committed', 'type': '[str]', 'xml': {'itemsName': 'Committed'}}, - 'uncommitted': {'key': 'Uncommitted', 'type': '[str]', 'xml': {'itemsName': 'Uncommitted'}}, - 'latest': {'key': 'Latest', 'type': '[str]', 'xml': {'itemsName': 'Latest'}}, - } - _xml_map = { - 'name': 'BlockList' - } - - def __init__( - self, - *, - committed: Optional[List[str]] = None, - uncommitted: Optional[List[str]] = None, - latest: Optional[List[str]] = None, - **kwargs - ): - super(BlockLookupList, self).__init__(**kwargs) - self.committed = committed - self.uncommitted = uncommitted - self.latest = latest - - -class ClearRange(msrest.serialization.Model): - """ClearRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'ClearRange' - } - - def __init__( - self, - *, - start: int, - end: int, - **kwargs - ): - super(ClearRange, self).__init__(**kwargs) - self.start = start - self.end = end - - -class ContainerCpkScopeInfo(msrest.serialization.Model): - """Parameter group. - - :param default_encryption_scope: Optional. Version 2019-07-07 and later. Specifies the - default encryption scope to set on the container and use for all future writes. - :type default_encryption_scope: str - :param prevent_encryption_scope_override: Optional. Version 2019-07-07 and newer. If true, - prevents any request from specifying a different encryption scope than the scope set on the - container. - :type prevent_encryption_scope_override: bool - """ - - _attribute_map = { - 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str'}, - 'prevent_encryption_scope_override': {'key': 'PreventEncryptionScopeOverride', 'type': 'bool'}, - } - - def __init__( - self, - *, - default_encryption_scope: Optional[str] = None, - prevent_encryption_scope_override: Optional[bool] = None, - **kwargs - ): - super(ContainerCpkScopeInfo, self).__init__(**kwargs) - self.default_encryption_scope = default_encryption_scope - self.prevent_encryption_scope_override = prevent_encryption_scope_override - - -class ContainerItem(msrest.serialization.Model): - """An Azure Storage container. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param deleted: - :type deleted: bool - :param version: - :type version: str - :param properties: Required. Properties of a container. - :type properties: ~azure.storage.blob.models.ContainerProperties - :param metadata: Dictionary of :code:``. - :type metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'deleted': {'key': 'Deleted', 'type': 'bool'}, - 'version': {'key': 'Version', 'type': 'str'}, - 'properties': {'key': 'Properties', 'type': 'ContainerProperties'}, - 'metadata': {'key': 'Metadata', 'type': '{str}'}, - } - _xml_map = { - 'name': 'Container' - } - - def __init__( - self, - *, - name: str, - properties: "ContainerProperties", - deleted: Optional[bool] = None, - version: Optional[str] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs - ): - super(ContainerItem, self).__init__(**kwargs) - self.name = name - self.deleted = deleted - self.version = version - self.properties = properties - self.metadata = metadata - - -class ContainerProperties(msrest.serialization.Model): - """Properties of a container. - - All required parameters must be populated in order to send to Azure. - - :param last_modified: Required. - :type last_modified: ~datetime.datetime - :param etag: Required. - :type etag: str - :param lease_status: Possible values include: "locked", "unlocked". - :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType - :param lease_state: Possible values include: "available", "leased", "expired", "breaking", - "broken". - :type lease_state: str or ~azure.storage.blob.models.LeaseStateType - :param lease_duration: Possible values include: "infinite", "fixed". - :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType - :param public_access: Possible values include: "container", "blob". - :type public_access: str or ~azure.storage.blob.models.PublicAccessType - :param has_immutability_policy: - :type has_immutability_policy: bool - :param has_legal_hold: - :type has_legal_hold: bool - :param default_encryption_scope: - :type default_encryption_scope: str - :param prevent_encryption_scope_override: - :type prevent_encryption_scope_override: bool - :param deleted_time: - :type deleted_time: ~datetime.datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param is_immutable_storage_with_versioning_enabled: Indicates if version level worm is enabled - on this container. - :type is_immutable_storage_with_versioning_enabled: bool - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, - 'lease_state': {'key': 'LeaseState', 'type': 'str'}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, - 'public_access': {'key': 'PublicAccess', 'type': 'str'}, - 'has_immutability_policy': {'key': 'HasImmutabilityPolicy', 'type': 'bool'}, - 'has_legal_hold': {'key': 'HasLegalHold', 'type': 'bool'}, - 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str'}, - 'prevent_encryption_scope_override': {'key': 'DenyEncryptionScopeOverride', 'type': 'bool'}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, - 'is_immutable_storage_with_versioning_enabled': {'key': 'ImmutableStorageWithVersioningEnabled', 'type': 'bool'}, - } - - def __init__( - self, - *, - last_modified: datetime.datetime, - etag: str, - lease_status: Optional[Union[str, "LeaseStatusType"]] = None, - lease_state: Optional[Union[str, "LeaseStateType"]] = None, - lease_duration: Optional[Union[str, "LeaseDurationType"]] = None, - public_access: Optional[Union[str, "PublicAccessType"]] = None, - has_immutability_policy: Optional[bool] = None, - has_legal_hold: Optional[bool] = None, - default_encryption_scope: Optional[str] = None, - prevent_encryption_scope_override: Optional[bool] = None, - deleted_time: Optional[datetime.datetime] = None, - remaining_retention_days: Optional[int] = None, - is_immutable_storage_with_versioning_enabled: Optional[bool] = None, - **kwargs - ): - super(ContainerProperties, self).__init__(**kwargs) - self.last_modified = last_modified - self.etag = etag - self.lease_status = lease_status - self.lease_state = lease_state - self.lease_duration = lease_duration - self.public_access = public_access - self.has_immutability_policy = has_immutability_policy - self.has_legal_hold = has_legal_hold - self.default_encryption_scope = default_encryption_scope - self.prevent_encryption_scope_override = prevent_encryption_scope_override - self.deleted_time = deleted_time - self.remaining_retention_days = remaining_retention_days - self.is_immutable_storage_with_versioning_enabled = is_immutable_storage_with_versioning_enabled - - -class CorsRule(msrest.serialization.Model): - """CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param allowed_origins: Required. The origin domains that are permitted to make a request - against the storage service via CORS. The origin domain is the domain from which the request - originates. Note that the origin must be an exact case-sensitive match with the origin that the - user age sends to the service. You can also use the wildcard character '*' to allow all origin - domains to make requests via CORS. - :type allowed_origins: str - :param allowed_methods: Required. The methods (HTTP request verbs) that the origin domain may - use for a CORS request. (comma separated). - :type allowed_methods: str - :param allowed_headers: Required. the request headers that the origin domain may specify on the - CORS request. - :type allowed_headers: str - :param exposed_headers: Required. The response headers that may be sent in the response to the - CORS request and exposed by the browser to the request issuer. - :type exposed_headers: str - :param max_age_in_seconds: Required. The maximum amount time that a browser should cache the - preflight OPTIONS request. - :type max_age_in_seconds: int - """ - - _validation = { - 'allowed_origins': {'required': True}, - 'allowed_methods': {'required': True}, - 'allowed_headers': {'required': True}, - 'exposed_headers': {'required': True}, - 'max_age_in_seconds': {'required': True, 'minimum': 0}, - } - - _attribute_map = { - 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str'}, - 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str'}, - 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str'}, - 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str'}, - 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int'}, - } - - def __init__( - self, - *, - allowed_origins: str, - allowed_methods: str, - allowed_headers: str, - exposed_headers: str, - max_age_in_seconds: int, - **kwargs - ): - super(CorsRule, self).__init__(**kwargs) - self.allowed_origins = allowed_origins - self.allowed_methods = allowed_methods - self.allowed_headers = allowed_headers - self.exposed_headers = exposed_headers - self.max_age_in_seconds = max_age_in_seconds - - -class CpkInfo(msrest.serialization.Model): - """Parameter group. - - :param encryption_key: Optional. Specifies the encryption key to use to encrypt the data - provided in the request. If not specified, encryption is performed with the root account - encryption key. For more information, see Encryption at Rest for Azure Storage Services. - :type encryption_key: str - :param encryption_key_sha256: The SHA-256 hash of the provided encryption key. Must be provided - if the x-ms-encryption-key header is provided. - :type encryption_key_sha256: str - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. Possible values include: "None", "AES256". - :type encryption_algorithm: str or ~azure.storage.blob.models.EncryptionAlgorithmType - """ - - _attribute_map = { - 'encryption_key': {'key': 'encryptionKey', 'type': 'str'}, - 'encryption_key_sha256': {'key': 'encryptionKeySha256', 'type': 'str'}, - 'encryption_algorithm': {'key': 'encryptionAlgorithm', 'type': 'str'}, - } - - def __init__( - self, - *, - encryption_key: Optional[str] = None, - encryption_key_sha256: Optional[str] = None, - encryption_algorithm: Optional[Union[str, "EncryptionAlgorithmType"]] = None, - **kwargs - ): - super(CpkInfo, self).__init__(**kwargs) - self.encryption_key = encryption_key - self.encryption_key_sha256 = encryption_key_sha256 - self.encryption_algorithm = encryption_algorithm - - -class CpkScopeInfo(msrest.serialization.Model): - """Parameter group. - - :param encryption_scope: Optional. Version 2019-07-07 and later. Specifies the name of the - encryption scope to use to encrypt the data provided in the request. If not specified, - encryption is performed with the default account encryption scope. For more information, see - Encryption at Rest for Azure Storage Services. - :type encryption_scope: str - """ - - _attribute_map = { - 'encryption_scope': {'key': 'encryptionScope', 'type': 'str'}, - } - - def __init__( - self, - *, - encryption_scope: Optional[str] = None, - **kwargs - ): - super(CpkScopeInfo, self).__init__(**kwargs) - self.encryption_scope = encryption_scope - - -class DataLakeStorageError(msrest.serialization.Model): - """DataLakeStorageError. - - :param data_lake_storage_error_details: The service error response object. - :type data_lake_storage_error_details: ~azure.storage.blob.models.DataLakeStorageErrorError - """ - - _attribute_map = { - 'data_lake_storage_error_details': {'key': 'error', 'type': 'DataLakeStorageErrorError'}, - } - - def __init__( - self, - *, - data_lake_storage_error_details: Optional["DataLakeStorageErrorError"] = None, - **kwargs - ): - super(DataLakeStorageError, self).__init__(**kwargs) - self.data_lake_storage_error_details = data_lake_storage_error_details - - -class DataLakeStorageErrorError(msrest.serialization.Model): - """The service error response object. - - :param code: The service error code. - :type code: str - :param message: The service error message. - :type message: str - """ - - _attribute_map = { - 'code': {'key': 'Code', 'type': 'str'}, - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__( - self, - *, - code: Optional[str] = None, - message: Optional[str] = None, - **kwargs - ): - super(DataLakeStorageErrorError, self).__init__(**kwargs) - self.code = code - self.message = message - - -class DelimitedTextConfiguration(msrest.serialization.Model): - """delimited text configuration. - - All required parameters must be populated in order to send to Azure. - - :param column_separator: Required. column separator. - :type column_separator: str - :param field_quote: Required. field quote. - :type field_quote: str - :param record_separator: Required. record separator. - :type record_separator: str - :param escape_char: Required. escape char. - :type escape_char: str - :param headers_present: Required. has headers. - :type headers_present: bool - """ - - _validation = { - 'column_separator': {'required': True}, - 'field_quote': {'required': True}, - 'record_separator': {'required': True}, - 'escape_char': {'required': True}, - 'headers_present': {'required': True}, - } - - _attribute_map = { - 'column_separator': {'key': 'ColumnSeparator', 'type': 'str', 'xml': {'name': 'ColumnSeparator'}}, - 'field_quote': {'key': 'FieldQuote', 'type': 'str', 'xml': {'name': 'FieldQuote'}}, - 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, - 'escape_char': {'key': 'EscapeChar', 'type': 'str', 'xml': {'name': 'EscapeChar'}}, - 'headers_present': {'key': 'HeadersPresent', 'type': 'bool', 'xml': {'name': 'HasHeaders'}}, - } - _xml_map = { - 'name': 'DelimitedTextConfiguration' - } - - def __init__( - self, - *, - column_separator: str, - field_quote: str, - record_separator: str, - escape_char: str, - headers_present: bool, - **kwargs - ): - super(DelimitedTextConfiguration, self).__init__(**kwargs) - self.column_separator = column_separator - self.field_quote = field_quote - self.record_separator = record_separator - self.escape_char = escape_char - self.headers_present = headers_present - - -class DirectoryHttpHeaders(msrest.serialization.Model): - """Parameter group. - - :param cache_control: Cache control for given resource. - :type cache_control: str - :param content_type: Content type for given resource. - :type content_type: str - :param content_encoding: Content encoding for given resource. - :type content_encoding: str - :param content_language: Content language for given resource. - :type content_language: str - :param content_disposition: Content disposition for given resource. - :type content_disposition: str - """ - - _attribute_map = { - 'cache_control': {'key': 'cacheControl', 'type': 'str'}, - 'content_type': {'key': 'contentType', 'type': 'str'}, - 'content_encoding': {'key': 'contentEncoding', 'type': 'str'}, - 'content_language': {'key': 'contentLanguage', 'type': 'str'}, - 'content_disposition': {'key': 'contentDisposition', 'type': 'str'}, - } - - def __init__( - self, - *, - cache_control: Optional[str] = None, - content_type: Optional[str] = None, - content_encoding: Optional[str] = None, - content_language: Optional[str] = None, - content_disposition: Optional[str] = None, - **kwargs - ): - super(DirectoryHttpHeaders, self).__init__(**kwargs) - self.cache_control = cache_control - self.content_type = content_type - self.content_encoding = content_encoding - self.content_language = content_language - self.content_disposition = content_disposition - - -class FilterBlobItem(msrest.serialization.Model): - """Blob info from a Filter Blobs API call. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param container_name: Required. - :type container_name: str - :param tags: A set of tags. Blob tags. - :type tags: ~azure.storage.blob.models.BlobTags - """ - - _validation = { - 'name': {'required': True}, - 'container_name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'container_name': {'key': 'ContainerName', 'type': 'str'}, - 'tags': {'key': 'Tags', 'type': 'BlobTags'}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__( - self, - *, - name: str, - container_name: str, - tags: Optional["BlobTags"] = None, - **kwargs - ): - super(FilterBlobItem, self).__init__(**kwargs) - self.name = name - self.container_name = container_name - self.tags = tags - - -class FilterBlobSegment(msrest.serialization.Model): - """The result of a Filter Blobs API call. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param where: Required. - :type where: str - :param blobs: Required. - :type blobs: list[~azure.storage.blob.models.FilterBlobItem] - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'where': {'required': True}, - 'blobs': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'where': {'key': 'Where', 'type': 'str'}, - 'blobs': {'key': 'Blobs', 'type': '[FilterBlobItem]', 'xml': {'name': 'Blobs', 'wrapped': True, 'itemsName': 'Blob'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - *, - service_endpoint: str, - where: str, - blobs: List["FilterBlobItem"], - next_marker: Optional[str] = None, - **kwargs - ): - super(FilterBlobSegment, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.where = where - self.blobs = blobs - self.next_marker = next_marker - - -class GeoReplication(msrest.serialization.Model): - """Geo-Replication information for the Secondary Storage Service. - - All required parameters must be populated in order to send to Azure. - - :param status: Required. The status of the secondary location. Possible values include: "live", - "bootstrap", "unavailable". - :type status: str or ~azure.storage.blob.models.GeoReplicationStatusType - :param last_sync_time: Required. A GMT date/time value, to the second. All primary writes - preceding this value are guaranteed to be available for read operations at the secondary. - Primary writes after this point in time may or may not be available for reads. - :type last_sync_time: ~datetime.datetime - """ - - _validation = { - 'status': {'required': True}, - 'last_sync_time': {'required': True}, - } - - _attribute_map = { - 'status': {'key': 'Status', 'type': 'str'}, - 'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123'}, - } - - def __init__( - self, - *, - status: Union[str, "GeoReplicationStatusType"], - last_sync_time: datetime.datetime, - **kwargs - ): - super(GeoReplication, self).__init__(**kwargs) - self.status = status - self.last_sync_time = last_sync_time - - -class JsonTextConfiguration(msrest.serialization.Model): - """json text configuration. - - All required parameters must be populated in order to send to Azure. - - :param record_separator: Required. record separator. - :type record_separator: str - """ - - _validation = { - 'record_separator': {'required': True}, - } - - _attribute_map = { - 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, - } - _xml_map = { - 'name': 'JsonTextConfiguration' - } - - def __init__( - self, - *, - record_separator: str, - **kwargs - ): - super(JsonTextConfiguration, self).__init__(**kwargs) - self.record_separator = record_separator - - -class KeyInfo(msrest.serialization.Model): - """Key information. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. The date-time the key is active in ISO 8601 UTC time. - :type start: str - :param expiry: Required. The date-time the key expires in ISO 8601 UTC time. - :type expiry: str - """ - - _validation = { - 'start': {'required': True}, - 'expiry': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str'}, - 'expiry': {'key': 'Expiry', 'type': 'str'}, - } - - def __init__( - self, - *, - start: str, - expiry: str, - **kwargs - ): - super(KeyInfo, self).__init__(**kwargs) - self.start = start - self.expiry = expiry - - -class LeaseAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param lease_id: If specified, the operation only succeeds if the resource's lease is active - and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': 'leaseId', 'type': 'str'}, - } - - def __init__( - self, - *, - lease_id: Optional[str] = None, - **kwargs - ): - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = lease_id - - -class ListBlobsFlatSegmentResponse(msrest.serialization.Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param segment: Required. - :type segment: ~azure.storage.blob.models.BlobFlatListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'segment': {'key': 'Segment', 'type': 'BlobFlatListSegment'}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - *, - service_endpoint: str, - container_name: str, - segment: "BlobFlatListSegment", - prefix: Optional[str] = None, - marker: Optional[str] = None, - max_results: Optional[int] = None, - next_marker: Optional[str] = None, - **kwargs - ): - super(ListBlobsFlatSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.container_name = container_name - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.segment = segment - self.next_marker = next_marker - - -class ListBlobsHierarchySegmentResponse(msrest.serialization.Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param delimiter: - :type delimiter: str - :param segment: Required. - :type segment: ~azure.storage.blob.models.BlobHierarchyListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'delimiter': {'key': 'Delimiter', 'type': 'str'}, - 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment'}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - *, - service_endpoint: str, - container_name: str, - segment: "BlobHierarchyListSegment", - prefix: Optional[str] = None, - marker: Optional[str] = None, - max_results: Optional[int] = None, - delimiter: Optional[str] = None, - next_marker: Optional[str] = None, - **kwargs - ): - super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.container_name = container_name - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.delimiter = delimiter - self.segment = segment - self.next_marker = next_marker - - -class ListContainersSegmentResponse(msrest.serialization.Model): - """An enumeration of containers. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param container_items: Required. - :type container_items: list[~azure.storage.blob.models.ContainerItem] - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_items': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'container_items': {'key': 'ContainerItems', 'type': '[ContainerItem]', 'xml': {'name': 'Containers', 'wrapped': True, 'itemsName': 'Container'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - *, - service_endpoint: str, - container_items: List["ContainerItem"], - prefix: Optional[str] = None, - marker: Optional[str] = None, - max_results: Optional[int] = None, - next_marker: Optional[str] = None, - **kwargs - ): - super(ListContainersSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.container_items = container_items - self.next_marker = next_marker - - -class Logging(msrest.serialization.Model): - """Azure Analytics Logging settings. - - All required parameters must be populated in order to send to Azure. - - :param version: Required. The version of Storage Analytics to configure. - :type version: str - :param delete: Required. Indicates whether all delete requests should be logged. - :type delete: bool - :param read: Required. Indicates whether all read requests should be logged. - :type read: bool - :param write: Required. Indicates whether all write requests should be logged. - :type write: bool - :param retention_policy: Required. the retention policy which determines how long the - associated data should persist. - :type retention_policy: ~azure.storage.blob.models.RetentionPolicy - """ - - _validation = { - 'version': {'required': True}, - 'delete': {'required': True}, - 'read': {'required': True}, - 'write': {'required': True}, - 'retention_policy': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str'}, - 'delete': {'key': 'Delete', 'type': 'bool'}, - 'read': {'key': 'Read', 'type': 'bool'}, - 'write': {'key': 'Write', 'type': 'bool'}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, - } - - def __init__( - self, - *, - version: str, - delete: bool, - read: bool, - write: bool, - retention_policy: "RetentionPolicy", - **kwargs - ): - super(Logging, self).__init__(**kwargs) - self.version = version - self.delete = delete - self.read = read - self.write = write - self.retention_policy = retention_policy - - -class Metrics(msrest.serialization.Model): - """a summary of request statistics grouped by API in hour or minute aggregates for blobs. - - All required parameters must be populated in order to send to Azure. - - :param version: The version of Storage Analytics to configure. - :type version: str - :param enabled: Required. Indicates whether metrics are enabled for the Blob service. - :type enabled: bool - :param include_apis: Indicates whether metrics should generate summary statistics for called - API operations. - :type include_apis: bool - :param retention_policy: the retention policy which determines how long the associated data - should persist. - :type retention_policy: ~azure.storage.blob.models.RetentionPolicy - """ - - _validation = { - 'enabled': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str'}, - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool'}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, - } - - def __init__( - self, - *, - enabled: bool, - version: Optional[str] = None, - include_apis: Optional[bool] = None, - retention_policy: Optional["RetentionPolicy"] = None, - **kwargs - ): - super(Metrics, self).__init__(**kwargs) - self.version = version - self.enabled = enabled - self.include_apis = include_apis - self.retention_policy = retention_policy - - -class ModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param if_modified_since: Specify this header value to operate only on a blob if it has been - modified since the specified date/time. - :type if_modified_since: ~datetime.datetime - :param if_unmodified_since: Specify this header value to operate only on a blob if it has not - been modified since the specified date/time. - :type if_unmodified_since: ~datetime.datetime - :param if_match: Specify an ETag value to operate only on blobs with a matching value. - :type if_match: str - :param if_none_match: Specify an ETag value to operate only on blobs without a matching value. - :type if_none_match: str - :param if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a - matching value. - :type if_tags: str - """ - - _attribute_map = { - 'if_modified_since': {'key': 'ifModifiedSince', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': 'ifUnmodifiedSince', 'type': 'rfc-1123'}, - 'if_match': {'key': 'ifMatch', 'type': 'str'}, - 'if_none_match': {'key': 'ifNoneMatch', 'type': 'str'}, - 'if_tags': {'key': 'ifTags', 'type': 'str'}, - } - - def __init__( - self, - *, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - if_tags: Optional[str] = None, - **kwargs - ): - super(ModifiedAccessConditions, self).__init__(**kwargs) - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - self.if_match = if_match - self.if_none_match = if_none_match - self.if_tags = if_tags - - -class PageList(msrest.serialization.Model): - """the list of pages. - - :param page_range: - :type page_range: list[~azure.storage.blob.models.PageRange] - :param clear_range: - :type clear_range: list[~azure.storage.blob.models.ClearRange] - """ - - _attribute_map = { - 'page_range': {'key': 'PageRange', 'type': '[PageRange]'}, - 'clear_range': {'key': 'ClearRange', 'type': '[ClearRange]'}, - } - - def __init__( - self, - *, - page_range: Optional[List["PageRange"]] = None, - clear_range: Optional[List["ClearRange"]] = None, - **kwargs - ): - super(PageList, self).__init__(**kwargs) - self.page_range = page_range - self.clear_range = clear_range - - -class PageRange(msrest.serialization.Model): - """PageRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'PageRange' - } - - def __init__( - self, - *, - start: int, - end: int, - **kwargs - ): - super(PageRange, self).__init__(**kwargs) - self.start = start - self.end = end - - -class QueryFormat(msrest.serialization.Model): - """QueryFormat. - - :param type: The quick query format type. Possible values include: "delimited", "json", - "arrow", "parquet". - :type type: str or ~azure.storage.blob.models.QueryFormatType - :param delimited_text_configuration: delimited text configuration. - :type delimited_text_configuration: ~azure.storage.blob.models.DelimitedTextConfiguration - :param json_text_configuration: json text configuration. - :type json_text_configuration: ~azure.storage.blob.models.JsonTextConfiguration - :param arrow_configuration: arrow configuration. - :type arrow_configuration: ~azure.storage.blob.models.ArrowConfiguration - :param parquet_text_configuration: Any object. - :type parquet_text_configuration: any - """ - - _attribute_map = { - 'type': {'key': 'Type', 'type': 'str', 'xml': {'name': 'Type'}}, - 'delimited_text_configuration': {'key': 'DelimitedTextConfiguration', 'type': 'DelimitedTextConfiguration'}, - 'json_text_configuration': {'key': 'JsonTextConfiguration', 'type': 'JsonTextConfiguration'}, - 'arrow_configuration': {'key': 'ArrowConfiguration', 'type': 'ArrowConfiguration'}, - 'parquet_text_configuration': {'key': 'ParquetTextConfiguration', 'type': 'object'}, - } - - def __init__( - self, - *, - type: Optional[Union[str, "QueryFormatType"]] = None, - delimited_text_configuration: Optional["DelimitedTextConfiguration"] = None, - json_text_configuration: Optional["JsonTextConfiguration"] = None, - arrow_configuration: Optional["ArrowConfiguration"] = None, - parquet_text_configuration: Optional[Any] = None, - **kwargs - ): - super(QueryFormat, self).__init__(**kwargs) - self.type = type - self.delimited_text_configuration = delimited_text_configuration - self.json_text_configuration = json_text_configuration - self.arrow_configuration = arrow_configuration - self.parquet_text_configuration = parquet_text_configuration - - -class QueryRequest(msrest.serialization.Model): - """the quick query body. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar query_type: Required. the query type. Default value: "SQL". - :vartype query_type: str - :param expression: Required. a query statement. - :type expression: str - :param input_serialization: - :type input_serialization: ~azure.storage.blob.models.QuerySerialization - :param output_serialization: - :type output_serialization: ~azure.storage.blob.models.QuerySerialization - """ - - _validation = { - 'query_type': {'required': True, 'constant': True}, - 'expression': {'required': True}, - } - - _attribute_map = { - 'query_type': {'key': 'QueryType', 'type': 'str', 'xml': {'name': 'QueryType'}}, - 'expression': {'key': 'Expression', 'type': 'str', 'xml': {'name': 'Expression'}}, - 'input_serialization': {'key': 'InputSerialization', 'type': 'QuerySerialization'}, - 'output_serialization': {'key': 'OutputSerialization', 'type': 'QuerySerialization'}, - } - _xml_map = { - 'name': 'QueryRequest' - } - - query_type = "SQL" - - def __init__( - self, - *, - expression: str, - input_serialization: Optional["QuerySerialization"] = None, - output_serialization: Optional["QuerySerialization"] = None, - **kwargs - ): - super(QueryRequest, self).__init__(**kwargs) - self.expression = expression - self.input_serialization = input_serialization - self.output_serialization = output_serialization - - -class QuerySerialization(msrest.serialization.Model): - """QuerySerialization. - - All required parameters must be populated in order to send to Azure. - - :param format: Required. - :type format: ~azure.storage.blob.models.QueryFormat - """ - - _validation = { - 'format': {'required': True}, - } - - _attribute_map = { - 'format': {'key': 'Format', 'type': 'QueryFormat'}, - } - - def __init__( - self, - *, - format: "QueryFormat", - **kwargs - ): - super(QuerySerialization, self).__init__(**kwargs) - self.format = format - - -class RetentionPolicy(msrest.serialization.Model): - """the retention policy which determines how long the associated data should persist. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether a retention policy is enabled for the storage - service. - :type enabled: bool - :param days: Indicates the number of days that metrics or logging or soft-deleted data should - be retained. All data older than this value will be deleted. - :type days: int - :param allow_permanent_delete: Indicates whether permanent delete is allowed on this storage - account. - :type allow_permanent_delete: bool - """ - - _validation = { - 'enabled': {'required': True}, - 'days': {'minimum': 1}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'days': {'key': 'Days', 'type': 'int'}, - 'allow_permanent_delete': {'key': 'AllowPermanentDelete', 'type': 'bool'}, - } - - def __init__( - self, - *, - enabled: bool, - days: Optional[int] = None, - allow_permanent_delete: Optional[bool] = None, - **kwargs - ): - super(RetentionPolicy, self).__init__(**kwargs) - self.enabled = enabled - self.days = days - self.allow_permanent_delete = allow_permanent_delete - - -class SequenceNumberAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param if_sequence_number_less_than_or_equal_to: Specify this header value to operate only on a - blob if it has a sequence number less than or equal to the specified. - :type if_sequence_number_less_than_or_equal_to: long - :param if_sequence_number_less_than: Specify this header value to operate only on a blob if it - has a sequence number less than the specified. - :type if_sequence_number_less_than: long - :param if_sequence_number_equal_to: Specify this header value to operate only on a blob if it - has the specified sequence number. - :type if_sequence_number_equal_to: long - """ - - _attribute_map = { - 'if_sequence_number_less_than_or_equal_to': {'key': 'ifSequenceNumberLessThanOrEqualTo', 'type': 'long'}, - 'if_sequence_number_less_than': {'key': 'ifSequenceNumberLessThan', 'type': 'long'}, - 'if_sequence_number_equal_to': {'key': 'ifSequenceNumberEqualTo', 'type': 'long'}, - } - - def __init__( - self, - *, - if_sequence_number_less_than_or_equal_to: Optional[int] = None, - if_sequence_number_less_than: Optional[int] = None, - if_sequence_number_equal_to: Optional[int] = None, - **kwargs - ): - super(SequenceNumberAccessConditions, self).__init__(**kwargs) - self.if_sequence_number_less_than_or_equal_to = if_sequence_number_less_than_or_equal_to - self.if_sequence_number_less_than = if_sequence_number_less_than - self.if_sequence_number_equal_to = if_sequence_number_equal_to - - -class SignedIdentifier(msrest.serialization.Model): - """signed identifier. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. a unique id. - :type id: str - :param access_policy: An Access policy. - :type access_policy: ~azure.storage.blob.models.AccessPolicy - """ - - _validation = { - 'id': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'Id', 'type': 'str'}, - 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy'}, - } - _xml_map = { - 'name': 'SignedIdentifier' - } - - def __init__( - self, - *, - id: str, - access_policy: Optional["AccessPolicy"] = None, - **kwargs - ): - super(SignedIdentifier, self).__init__(**kwargs) - self.id = id - self.access_policy = access_policy - - -class SourceModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param source_if_modified_since: Specify this header value to operate only on a blob if it has - been modified since the specified date/time. - :type source_if_modified_since: ~datetime.datetime - :param source_if_unmodified_since: Specify this header value to operate only on a blob if it - has not been modified since the specified date/time. - :type source_if_unmodified_since: ~datetime.datetime - :param source_if_match: Specify an ETag value to operate only on blobs with a matching value. - :type source_if_match: str - :param source_if_none_match: Specify an ETag value to operate only on blobs without a matching - value. - :type source_if_none_match: str - :param source_if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a - matching value. - :type source_if_tags: str - """ - - _attribute_map = { - 'source_if_modified_since': {'key': 'sourceIfModifiedSince', 'type': 'rfc-1123'}, - 'source_if_unmodified_since': {'key': 'sourceIfUnmodifiedSince', 'type': 'rfc-1123'}, - 'source_if_match': {'key': 'sourceIfMatch', 'type': 'str'}, - 'source_if_none_match': {'key': 'sourceIfNoneMatch', 'type': 'str'}, - 'source_if_tags': {'key': 'sourceIfTags', 'type': 'str'}, - } - - def __init__( - self, - *, - source_if_modified_since: Optional[datetime.datetime] = None, - source_if_unmodified_since: Optional[datetime.datetime] = None, - source_if_match: Optional[str] = None, - source_if_none_match: Optional[str] = None, - source_if_tags: Optional[str] = None, - **kwargs - ): - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_modified_since = source_if_modified_since - self.source_if_unmodified_since = source_if_unmodified_since - self.source_if_match = source_if_match - self.source_if_none_match = source_if_none_match - self.source_if_tags = source_if_tags - - -class StaticWebsite(msrest.serialization.Model): - """The properties that enable an account to host a static website. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether this account is hosting a static website. - :type enabled: bool - :param index_document: The default name of the index page under each directory. - :type index_document: str - :param error_document404_path: The absolute path of the custom 404 page. - :type error_document404_path: str - :param default_index_document_path: Absolute path of the default index page. - :type default_index_document_path: str - """ - - _validation = { - 'enabled': {'required': True}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'index_document': {'key': 'IndexDocument', 'type': 'str'}, - 'error_document404_path': {'key': 'ErrorDocument404Path', 'type': 'str'}, - 'default_index_document_path': {'key': 'DefaultIndexDocumentPath', 'type': 'str'}, - } - - def __init__( - self, - *, - enabled: bool, - index_document: Optional[str] = None, - error_document404_path: Optional[str] = None, - default_index_document_path: Optional[str] = None, - **kwargs - ): - super(StaticWebsite, self).__init__(**kwargs) - self.enabled = enabled - self.index_document = index_document - self.error_document404_path = error_document404_path - self.default_index_document_path = default_index_document_path - - -class StorageError(msrest.serialization.Model): - """StorageError. - - :param message: - :type message: str - """ - - _attribute_map = { - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__( - self, - *, - message: Optional[str] = None, - **kwargs - ): - super(StorageError, self).__init__(**kwargs) - self.message = message - - -class StorageServiceProperties(msrest.serialization.Model): - """Storage Service Properties. - - :param logging: Azure Analytics Logging settings. - :type logging: ~azure.storage.blob.models.Logging - :param hour_metrics: a summary of request statistics grouped by API in hour or minute - aggregates for blobs. - :type hour_metrics: ~azure.storage.blob.models.Metrics - :param minute_metrics: a summary of request statistics grouped by API in hour or minute - aggregates for blobs. - :type minute_metrics: ~azure.storage.blob.models.Metrics - :param cors: The set of CORS rules. - :type cors: list[~azure.storage.blob.models.CorsRule] - :param default_service_version: The default version to use for requests to the Blob service if - an incoming request's version is not specified. Possible values include version 2008-10-27 and - all more recent versions. - :type default_service_version: str - :param delete_retention_policy: the retention policy which determines how long the associated - data should persist. - :type delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy - :param static_website: The properties that enable an account to host a static website. - :type static_website: ~azure.storage.blob.models.StaticWebsite - """ - - _attribute_map = { - 'logging': {'key': 'Logging', 'type': 'Logging'}, - 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics'}, - 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics'}, - 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'wrapped': True}}, - 'default_service_version': {'key': 'DefaultServiceVersion', 'type': 'str'}, - 'delete_retention_policy': {'key': 'DeleteRetentionPolicy', 'type': 'RetentionPolicy'}, - 'static_website': {'key': 'StaticWebsite', 'type': 'StaticWebsite'}, - } - - def __init__( - self, - *, - logging: Optional["Logging"] = None, - hour_metrics: Optional["Metrics"] = None, - minute_metrics: Optional["Metrics"] = None, - cors: Optional[List["CorsRule"]] = None, - default_service_version: Optional[str] = None, - delete_retention_policy: Optional["RetentionPolicy"] = None, - static_website: Optional["StaticWebsite"] = None, - **kwargs - ): - super(StorageServiceProperties, self).__init__(**kwargs) - self.logging = logging - self.hour_metrics = hour_metrics - self.minute_metrics = minute_metrics - self.cors = cors - self.default_service_version = default_service_version - self.delete_retention_policy = delete_retention_policy - self.static_website = static_website - - -class StorageServiceStats(msrest.serialization.Model): - """Stats for the storage service. - - :param geo_replication: Geo-Replication information for the Secondary Storage Service. - :type geo_replication: ~azure.storage.blob.models.GeoReplication - """ - - _attribute_map = { - 'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication'}, - } - - def __init__( - self, - *, - geo_replication: Optional["GeoReplication"] = None, - **kwargs - ): - super(StorageServiceStats, self).__init__(**kwargs) - self.geo_replication = geo_replication - - -class UserDelegationKey(msrest.serialization.Model): - """A user delegation key. - - All required parameters must be populated in order to send to Azure. - - :param signed_oid: Required. The Azure Active Directory object ID in GUID format. - :type signed_oid: str - :param signed_tid: Required. The Azure Active Directory tenant ID in GUID format. - :type signed_tid: str - :param signed_start: Required. The date-time the key is active. - :type signed_start: ~datetime.datetime - :param signed_expiry: Required. The date-time the key expires. - :type signed_expiry: ~datetime.datetime - :param signed_service: Required. Abbreviation of the Azure Storage service that accepts the - key. - :type signed_service: str - :param signed_version: Required. The service version that created the key. - :type signed_version: str - :param value: Required. The key as a base64 string. - :type value: str - """ - - _validation = { - 'signed_oid': {'required': True}, - 'signed_tid': {'required': True}, - 'signed_start': {'required': True}, - 'signed_expiry': {'required': True}, - 'signed_service': {'required': True}, - 'signed_version': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'signed_oid': {'key': 'SignedOid', 'type': 'str'}, - 'signed_tid': {'key': 'SignedTid', 'type': 'str'}, - 'signed_start': {'key': 'SignedStart', 'type': 'iso-8601'}, - 'signed_expiry': {'key': 'SignedExpiry', 'type': 'iso-8601'}, - 'signed_service': {'key': 'SignedService', 'type': 'str'}, - 'signed_version': {'key': 'SignedVersion', 'type': 'str'}, - 'value': {'key': 'Value', 'type': 'str'}, - } - - def __init__( - self, - *, - signed_oid: str, - signed_tid: str, - signed_start: datetime.datetime, - signed_expiry: datetime.datetime, - signed_service: str, - signed_version: str, - value: str, - **kwargs - ): - super(UserDelegationKey, self).__init__(**kwargs) - self.signed_oid = signed_oid - self.signed_tid = signed_tid - self.signed_start = signed_start - self.signed_expiry = signed_expiry - self.signed_service = signed_service - self.signed_version = signed_version - self.value = value diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/__init__.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/__init__.py deleted file mode 100644 index 62f85c9..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._container_operations import ContainerOperations -from ._directory_operations import DirectoryOperations -from ._blob_operations import BlobOperations -from ._page_blob_operations import PageBlobOperations -from ._append_blob_operations import AppendBlobOperations -from ._block_blob_operations import BlockBlobOperations - -__all__ = [ - 'ServiceOperations', - 'ContainerOperations', - 'DirectoryOperations', - 'BlobOperations', - 'PageBlobOperations', - 'AppendBlobOperations', - 'BlockBlobOperations', -] diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_append_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_append_blob_operations.py deleted file mode 100644 index b38af4b..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_append_blob_operations.py +++ /dev/null @@ -1,734 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class AppendBlobOperations(object): - """AppendBlobOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - content_length, # type: int - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - blob_tags_string=None, # type: Optional[str] - immutability_policy_expiry=None, # type: Optional[datetime.datetime] - immutability_policy_mode=None, # type: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] - legal_hold=None, # type: Optional[bool] - blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Create Append Blob operation creates a new append blob. - - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy - is set to expire. - :type immutability_policy_expiry: ~datetime.datetime - :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param legal_hold: Specified if a legal hold should be set on the blob. - :type legal_hold: bool - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - blob_type = "AppendBlob" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if immutability_policy_expiry is not None: - header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') - if immutability_policy_mode is not None: - header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') - if legal_hold is not None: - header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def append_block( - self, - content_length, # type: int - body, # type: IO - timeout=None, # type: Optional[int] - transactional_content_md5=None, # type: Optional[bytearray] - transactional_content_crc64=None, # type: Optional[bytearray] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - append_position_access_conditions=None, # type: Optional["_models.AppendPositionAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Append Block operation commits a new block of data to the end of an existing append blob. - The Append Block operation is permitted only if the blob was created with x-ms-blob-type set to - AppendBlob. Append Block is supported only on version 2015-02-21 version or later. - - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param append_position_access_conditions: Parameter group. - :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _max_size = None - _append_position = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if append_position_access_conditions is not None: - _max_size = append_position_access_conditions.max_size - _append_position = append_position_access_conditions.append_position - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "appendblock" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.append_block.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _max_size is not None: - header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", _max_size, 'long') - if _append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-append-offset']=self._deserialize('str', response.headers.get('x-ms-blob-append-offset')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - append_block.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def append_block_from_url( - self, - source_url, # type: str - content_length, # type: int - source_range=None, # type: Optional[str] - source_content_md5=None, # type: Optional[bytearray] - source_contentcrc64=None, # type: Optional[bytearray] - timeout=None, # type: Optional[int] - transactional_content_md5=None, # type: Optional[bytearray] - request_id_parameter=None, # type: Optional[str] - copy_source_authorization=None, # type: Optional[str] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - append_position_access_conditions=None, # type: Optional["_models.AppendPositionAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Append Block operation commits a new block of data to the end of an existing append blob - where the contents are read from a source url. The Append Block operation is permitted only if - the blob was created with x-ms-blob-type set to AppendBlob. Append Block is supported only on - version 2015-02-21 version or later. - - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param content_length: The length of the request. - :type content_length: long - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid - OAuth access token to copy source. - :type copy_source_authorization: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param append_position_access_conditions: Parameter group. - :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _lease_id = None - _max_size = None - _append_position = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if append_position_access_conditions is not None: - _max_size = append_position_access_conditions.max_size - _append_position = append_position_access_conditions.append_position - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - comp = "appendblock" - accept = "application/xml" - - # Construct URL - url = self.append_block_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _max_size is not None: - header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", _max_size, 'long') - if _append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if copy_source_authorization is not None: - header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-append-offset']=self._deserialize('str', response.headers.get('x-ms-blob-append-offset')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - append_block_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def seal( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - append_position_access_conditions=None, # type: Optional["_models.AppendPositionAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Seal operation seals the Append Blob to make it read-only. Seal is supported only on - version 2019-12-12 version or later. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param append_position_access_conditions: Parameter group. - :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _append_position = None - if append_position_access_conditions is not None: - _append_position = append_position_access_conditions.append_position - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - comp = "seal" - accept = "application/xml" - - # Construct URL - url = self.seal.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - - if cls: - return cls(pipeline_response, None, response_headers) - - seal.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_blob_operations.py deleted file mode 100644 index 9b60a81..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_blob_operations.py +++ /dev/null @@ -1,3456 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class BlobOperations(object): - """BlobOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def download( - self, - snapshot=None, # type: Optional[str] - version_id=None, # type: Optional[str] - timeout=None, # type: Optional[int] - range=None, # type: Optional[str] - range_get_content_md5=None, # type: Optional[bool] - range_get_content_crc64=None, # type: Optional[bool] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> IO - """The Download operation reads or downloads a blob from the system, including its metadata and - properties. You can also call Download to read a snapshot. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param range_get_content_md5: When set to true and specified together with the Range, the - service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB - in size. - :type range_get_content_md5: bool - :param range_get_content_crc64: When set to true and specified together with the Range, the - service returns the CRC64 hash for the range, as long as the range is less than or equal to 4 - MB in size. - :type range_get_content_crc64: bool - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - accept = "application/xml" - - # Construct URL - url = self.download.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') - if range_get_content_crc64 is not None: - header_parameters['x-ms-range-get-content-crc64'] = self._serialize.header("range_get_content_crc64", range_get_content_crc64, 'bool') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) - response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) - response_headers['x-ms-immutability-policy-until-date']=self._deserialize('rfc-1123', response.headers.get('x-ms-immutability-policy-until-date')) - response_headers['x-ms-immutability-policy-mode']=self._deserialize('str', response.headers.get('x-ms-immutability-policy-mode')) - response_headers['x-ms-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-legal-hold')) - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) - response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) - response_headers['x-ms-immutability-policy-until-date']=self._deserialize('rfc-1123', response.headers.get('x-ms-immutability-policy-until-date')) - response_headers['x-ms-immutability-policy-mode']=self._deserialize('str', response.headers.get('x-ms-immutability-policy-mode')) - response_headers['x-ms-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-legal-hold')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - download.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def get_properties( - self, - snapshot=None, # type: Optional[str] - version_id=None, # type: Optional[str] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Get Properties operation returns all user-defined metadata, standard HTTP properties, and - system properties for the blob. It does not return the content of the blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-creation-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-creation-time')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) - response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-incremental-copy']=self._deserialize('bool', response.headers.get('x-ms-incremental-copy')) - response_headers['x-ms-copy-destination-snapshot']=self._deserialize('str', response.headers.get('x-ms-copy-destination-snapshot')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-access-tier']=self._deserialize('str', response.headers.get('x-ms-access-tier')) - response_headers['x-ms-access-tier-inferred']=self._deserialize('bool', response.headers.get('x-ms-access-tier-inferred')) - response_headers['x-ms-archive-status']=self._deserialize('str', response.headers.get('x-ms-archive-status')) - response_headers['x-ms-access-tier-change-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) - response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) - response_headers['x-ms-expiry-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-expiry-time')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - response_headers['x-ms-rehydrate-priority']=self._deserialize('str', response.headers.get('x-ms-rehydrate-priority')) - response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) - response_headers['x-ms-immutability-policy-until-date']=self._deserialize('rfc-1123', response.headers.get('x-ms-immutability-policy-until-date')) - response_headers['x-ms-immutability-policy-mode']=self._deserialize('str', response.headers.get('x-ms-immutability-policy-mode')) - response_headers['x-ms-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-legal-hold')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def delete( - self, - snapshot=None, # type: Optional[str] - version_id=None, # type: Optional[str] - timeout=None, # type: Optional[int] - delete_snapshots=None, # type: Optional[Union[str, "_models.DeleteSnapshotsOptionType"]] - request_id_parameter=None, # type: Optional[str] - blob_delete_type="Permanent", # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """If the storage account's soft delete feature is disabled then, when a blob is deleted, it is - permanently removed from the storage account. If the storage account's soft delete feature is - enabled, then, when a blob is deleted, it is marked for deletion and becomes inaccessible - immediately. However, the blob service retains the blob or snapshot for the number of days - specified by the DeleteRetentionPolicy section of [Storage service properties] - (Set-Blob-Service-Properties.md). After the specified number of days has passed, the blob's - data is permanently removed from the storage account. Note that you continue to be charged for - the soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and - specify the "include=deleted" query parameter to discover which blobs and snapshots have been - soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. All other - operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code - of 404 (ResourceNotFound). - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param delete_snapshots: Required if the blob has associated snapshots. Specify one of the - following two options: include: Delete the base blob and all of its snapshots. only: Delete - only the blob's snapshots and not the blob itself. - :type delete_snapshots: str or ~azure.storage.blob.models.DeleteSnapshotsOptionType - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_delete_type: Optional. Only possible value is 'permanent', which specifies to - permanently delete a blob if blob soft delete is enabled. - :type blob_delete_type: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if blob_delete_type is not None: - query_parameters['deletetype'] = self._serialize.query("blob_delete_type", blob_delete_type, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def set_access_control( - self, - timeout=None, # type: Optional[int] - owner=None, # type: Optional[str] - group=None, # type: Optional[str] - posix_permissions=None, # type: Optional[str] - posix_acl=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Set the owner, group, permissions, or access control list for a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_acl: Sets POSIX access control rights on files and directories. The value is a - comma-separated list of access control entries. Each access control entry (ACE) consists of a - scope, a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type posix_acl: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "setAccessControl" - accept = "application/xml" - - # Construct URL - url = self.set_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def get_access_control( - self, - timeout=None, # type: Optional[int] - upn=None, # type: Optional[bool] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Get the owner, group, permissions, or access control list for a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If - "true", the identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response - headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If - "false", the values will be returned as Azure Active Directory Object IDs. The default value is - false. - :type upn: bool - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "getAccessControl" - accept = "application/xml" - - # Construct URL - url = self.get_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) - response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) - response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) - response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def rename( - self, - rename_source, # type: str - timeout=None, # type: Optional[int] - path_rename_mode=None, # type: Optional[Union[str, "_models.PathRenameMode"]] - directory_properties=None, # type: Optional[str] - posix_permissions=None, # type: Optional[str] - posix_umask=None, # type: Optional[str] - source_lease_id=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - directory_http_headers=None, # type: Optional["_models.DirectoryHttpHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Rename a blob/file. By default, the destination is overwritten and if the destination already - exists and has a lease the lease is broken. This operation supports conditional HTTP requests. - For more information, see `Specifying Conditional Headers for Blob Service Operations - `_. - To fail if the destination already exists, use a conditional request with If-None-Match: "*". - - :param rename_source: The file or directory to be renamed. The value must have the following - format: "/{filesysystem}/{path}". If "x-ms-properties" is specified, the properties will - overwrite the existing properties; otherwise, the existing properties will be preserved. - :type rename_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param path_rename_mode: Determines the behavior of the rename operation. - :type path_rename_mode: str or ~azure.storage.blob.models.PathRenameMode - :param directory_properties: Optional. User-defined properties to be stored with the file or - directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", - where each value is base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask - restricts permission settings for file and directory, and will only be applied when default Acl - does not exist in parent directory. If the umask bit has set, it means that the corresponding - permission will be disabled. Otherwise the corresponding permission will be determined by the - permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, - a default umask - 0027 will be used. - :type posix_umask: str - :param source_lease_id: A lease ID for the source path. If specified, the source path must have - an active lease and the lease ID must match. - :type source_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param directory_http_headers: Parameter group. - :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _cache_control = None - _content_type = None - _content_encoding = None - _content_language = None - _content_disposition = None - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if directory_http_headers is not None: - _cache_control = directory_http_headers.cache_control - _content_type = directory_http_headers.content_type - _content_encoding = directory_http_headers.content_encoding - _content_language = directory_http_headers.content_language - _content_disposition = directory_http_headers.content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - accept = "application/xml" - - # Construct URL - url = self.rename.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if path_rename_mode is not None: - query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - rename.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def undelete( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Undelete a blob that was previously soft deleted. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "undelete" - accept = "application/xml" - - # Construct URL - url = self.undelete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - undelete.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def set_expiry( - self, - expiry_options, # type: Union[str, "_models.BlobExpiryOptions"] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - expires_on=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Sets the time a blob will expire and be deleted. - - :param expiry_options: Required. Indicates mode of the expiry time. - :type expiry_options: str or ~azure.storage.blob.models.BlobExpiryOptions - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param expires_on: The time to set the blob to expiry. - :type expires_on: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "expiry" - accept = "application/xml" - - # Construct URL - url = self.set_expiry.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') - if expires_on is not None: - header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_expiry.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def set_http_headers( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Set HTTP Headers operation sets system properties on the blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_cache_control = None - _blob_content_type = None - _blob_content_md5 = None - _blob_content_encoding = None - _blob_content_language = None - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _blob_content_disposition = None - if blob_http_headers is not None: - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_disposition = blob_http_headers.blob_content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.set_http_headers.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_http_headers.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def set_immutability_policy( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - immutability_policy_expiry=None, # type: Optional[datetime.datetime] - immutability_policy_mode=None, # type: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Set Immutability Policy operation sets the immutability policy on the blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy - is set to expire. - :type immutability_policy_expiry: ~datetime.datetime - :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "immutabilityPolicies" - accept = "application/xml" - - # Construct URL - url = self.set_immutability_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if immutability_policy_expiry is not None: - header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') - if immutability_policy_mode is not None: - header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-immutability-policy-until-date']=self._deserialize('rfc-1123', response.headers.get('x-ms-immutability-policy-until-date')) - response_headers['x-ms-immutability-policy-mode']=self._deserialize('str', response.headers.get('x-ms-immutability-policy-mode')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_immutability_policy.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def delete_immutability_policy( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """The Delete Immutability Policy operation deletes the immutability policy on the blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "immutabilityPolicies" - accept = "application/xml" - - # Construct URL - url = self.delete_immutability_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete_immutability_policy.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def set_legal_hold( - self, - legal_hold, # type: bool - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """The Set Legal Hold operation sets a legal hold on the blob. - - :param legal_hold: Specified if a legal hold should be set on the blob. - :type legal_hold: bool - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "legalhold" - accept = "application/xml" - - # Construct URL - url = self.set_legal_hold.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-legal-hold')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_legal_hold.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def set_metadata( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or - more name-value pairs. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def acquire_lease( - self, - timeout=None, # type: Optional[int] - duration=None, # type: Optional[int] - proposed_lease_id=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a - lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease - duration cannot be changed using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "acquire" - accept = "application/xml" - - # Construct URL - url = self.acquire_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - acquire_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def release_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "release" - accept = "application/xml" - - # Construct URL - url = self.release_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - release_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def renew_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "renew" - accept = "application/xml" - - # Construct URL - url = self.renew_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - renew_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def change_lease( - self, - lease_id, # type: str - proposed_lease_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "change" - accept = "application/xml" - - # Construct URL - url = self.change_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - change_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def break_lease( - self, - timeout=None, # type: Optional[int] - break_period=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param break_period: For a break operation, proposed duration the lease should continue before - it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining on the lease is used. A new - lease will not be available before the break period has expired, but the lease may be held for - longer than the break period. If this header does not appear with a break operation, a - fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease - breaks immediately. - :type break_period: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "break" - accept = "application/xml" - - # Construct URL - url = self.break_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - break_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def create_snapshot( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Create Snapshot operation creates a read-only snapshot of a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _lease_id = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "snapshot" - accept = "application/xml" - - # Construct URL - url = self.create_snapshot.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-snapshot']=self._deserialize('str', response.headers.get('x-ms-snapshot')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create_snapshot.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def start_copy_from_url( - self, - copy_source, # type: str - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] - rehydrate_priority=None, # type: Optional[Union[str, "_models.RehydratePriority"]] - request_id_parameter=None, # type: Optional[str] - blob_tags_string=None, # type: Optional[str] - seal_blob=None, # type: Optional[bool] - immutability_policy_expiry=None, # type: Optional[datetime.datetime] - immutability_policy_mode=None, # type: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] - legal_hold=None, # type: Optional[bool] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Start Copy From URL operation copies a blob or an internet resource to a new blob. - - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived - blob. - :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param seal_blob: Overrides the sealed state of the destination blob. Service version - 2019-12-12 and newer. - :type seal_blob: bool - :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy - is set to expire. - :type immutability_policy_expiry: ~datetime.datetime - :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param legal_hold: Specified if a legal hold should be set on the blob. - :type legal_hold: bool - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _source_if_tags = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - _source_if_tags = source_modified_access_conditions.source_if_tags - accept = "application/xml" - - # Construct URL - url = self.start_copy_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _source_if_tags is not None: - header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", _source_if_tags, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if seal_blob is not None: - header_parameters['x-ms-seal-blob'] = self._serialize.header("seal_blob", seal_blob, 'bool') - if immutability_policy_expiry is not None: - header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') - if immutability_policy_mode is not None: - header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') - if legal_hold is not None: - header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def copy_from_url( - self, - copy_source, # type: str - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] - request_id_parameter=None, # type: Optional[str] - source_content_md5=None, # type: Optional[bytearray] - blob_tags_string=None, # type: Optional[str] - immutability_policy_expiry=None, # type: Optional[datetime.datetime] - immutability_policy_mode=None, # type: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] - legal_hold=None, # type: Optional[bool] - copy_source_authorization=None, # type: Optional[str] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Copy From URL operation copies a blob or an internet resource to a new blob. It will not - return a response until the copy is complete. - - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy - is set to expire. - :type immutability_policy_expiry: ~datetime.datetime - :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param legal_hold: Specified if a legal hold should be set on the blob. - :type legal_hold: bool - :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid - OAuth access token to copy source. - :type copy_source_authorization: str - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - x_ms_requires_sync = "true" - accept = "application/xml" - - # Construct URL - url = self.copy_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-requires-sync'] = self._serialize.header("x_ms_requires_sync", x_ms_requires_sync, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if immutability_policy_expiry is not None: - header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') - if immutability_policy_mode is not None: - header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') - if legal_hold is not None: - header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') - if copy_source_authorization is not None: - header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - - if cls: - return cls(pipeline_response, None, response_headers) - - copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def abort_copy_from_url( - self, - copy_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a - destination blob with zero length and full metadata. - - :param copy_id: The copy identifier provided in the x-ms-copy-id header of the original Copy - Blob operation. - :type copy_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "copy" - copy_action_abort_constant = "abort" - accept = "application/xml" - - # Construct URL - url = self.abort_copy_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-copy-action'] = self._serialize.header("copy_action_abort_constant", copy_action_abort_constant, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - abort_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def set_tier( - self, - tier, # type: Union[str, "_models.AccessTierRequired"] - snapshot=None, # type: Optional[str] - version_id=None, # type: Optional[str] - timeout=None, # type: Optional[int] - rehydrate_priority=None, # type: Optional[Union[str, "_models.RehydratePriority"]] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a - premium storage account and on a block blob in a blob storage account (locally redundant - storage only). A premium page blob's tier determines the allowed size, IOPS, and bandwidth of - the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation does not - update the blob's ETag. - - :param tier: Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierRequired - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived - blob. - :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "tier" - accept = "application/xml" - - # Construct URL - url = self.set_tier.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if response.status_code == 202: - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_tier.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def get_account_info( - self, - **kwargs # type: Any - ): - # type: (...) -> None - """Returns the sku name and account kind. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "account" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_account_info.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) - response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_account_info.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def query( - self, - snapshot=None, # type: Optional[str] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - query_request=None, # type: Optional["_models.QueryRequest"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> IO - """The Query operation enables users to select/project on blob data by providing simple query - expressions. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param query_request: the query request. - :type query_request: ~azure.storage.blob.models.QueryRequest - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "query" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.query.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if query_request is not None: - body_content = self._serialize.body(query_request, 'QueryRequest', is_xml=True) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - query.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def get_tags( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - snapshot=None, # type: Optional[str] - version_id=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> "_models.BlobTags" - """The Get Tags operation enables users to get the tags associated with a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: BlobTags, or the result of cls(response) - :rtype: ~azure.storage.blob.models.BlobTags - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobTags"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "tags" - accept = "application/xml" - - # Construct URL - url = self.get_tags.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('BlobTags', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_tags.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def set_tags( - self, - timeout=None, # type: Optional[int] - version_id=None, # type: Optional[str] - transactional_content_md5=None, # type: Optional[bytearray] - transactional_content_crc64=None, # type: Optional[bytearray] - request_id_parameter=None, # type: Optional[str] - tags=None, # type: Optional["_models.BlobTags"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Set Tags operation enables users to set tags on a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param tags: Blob tags. - :type tags: ~azure.storage.blob.models.BlobTags - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "tags" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_tags.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if tags is not None: - body_content = self._serialize.body(tags, 'BlobTags', is_xml=True) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_tags.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_block_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_block_blob_operations.py deleted file mode 100644 index 20a4f54..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_block_blob_operations.py +++ /dev/null @@ -1,1148 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class BlockBlobOperations(object): - """BlockBlobOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def upload( - self, - content_length, # type: int - body, # type: IO - timeout=None, # type: Optional[int] - transactional_content_md5=None, # type: Optional[bytearray] - metadata=None, # type: Optional[str] - tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] - request_id_parameter=None, # type: Optional[str] - blob_tags_string=None, # type: Optional[str] - immutability_policy_expiry=None, # type: Optional[datetime.datetime] - immutability_policy_mode=None, # type: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] - legal_hold=None, # type: Optional[bool] - blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Upload Block Blob operation updates the content of an existing block blob. Updating an - existing block blob overwrites any existing metadata on the blob. Partial updates are not - supported with Put Blob; the content of the existing blob is overwritten with the content of - the new blob. To perform a partial update of the content of a block blob, use the Put Block - List operation. - - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy - is set to expire. - :type immutability_policy_expiry: ~datetime.datetime - :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param legal_hold: Specified if a legal hold should be set on the blob. - :type legal_hold: bool - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - blob_type = "BlockBlob" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.upload.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if immutability_policy_expiry is not None: - header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') - if immutability_policy_mode is not None: - header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') - if legal_hold is not None: - header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def put_blob_from_url( - self, - content_length, # type: int - copy_source, # type: str - timeout=None, # type: Optional[int] - transactional_content_md5=None, # type: Optional[bytearray] - metadata=None, # type: Optional[str] - tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] - request_id_parameter=None, # type: Optional[str] - source_content_md5=None, # type: Optional[bytearray] - blob_tags_string=None, # type: Optional[str] - copy_source_blob_properties=None, # type: Optional[bool] - copy_source_authorization=None, # type: Optional[str] - blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Put Blob from URL operation creates a new Block Blob where the contents of the blob are - read from a given URL. This API is supported beginning with the 2020-04-08 version. Partial - updates are not supported with Put Blob from URL; the content of an existing blob is - overwritten with the content of the new blob. To perform partial updates to a block blob’s - contents using a source URL, use the Put Block from URL API in conjunction with Put Block List. - - :param content_length: The length of the request. - :type content_length: long - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param copy_source_blob_properties: Optional, default is true. Indicates if properties from - the source blob should be copied. - :type copy_source_blob_properties: bool - :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid - OAuth access token to copy source. - :type copy_source_authorization: str - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _source_if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - _source_if_tags = source_modified_access_conditions.source_if_tags - blob_type = "BlockBlob" - accept = "application/xml" - - # Construct URL - url = self.put_blob_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _source_if_tags is not None: - header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", _source_if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if copy_source_blob_properties is not None: - header_parameters['x-ms-copy-source-blob-properties'] = self._serialize.header("copy_source_blob_properties", copy_source_blob_properties, 'bool') - if copy_source_authorization is not None: - header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - put_blob_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def stage_block( - self, - block_id, # type: str - content_length, # type: int - body, # type: IO - transactional_content_md5=None, # type: Optional[bytearray] - transactional_content_crc64=None, # type: Optional[bytearray] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Stage Block operation creates a new block to be committed as part of a blob. - - :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the - string must be less than or equal to 64 bytes in size. For a given blob, the length of the - value specified for the blockid parameter must be the same size for each block. - :type block_id: str - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "block" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.stage_block.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - stage_block.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def stage_block_from_url( - self, - block_id, # type: str - content_length, # type: int - source_url, # type: str - source_range=None, # type: Optional[str] - source_content_md5=None, # type: Optional[bytearray] - source_contentcrc64=None, # type: Optional[bytearray] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - copy_source_authorization=None, # type: Optional[str] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Stage Block operation creates a new block to be committed as part of a blob where the - contents are read from a URL. - - :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the - string must be less than or equal to 64 bytes in size. For a given blob, the length of the - value specified for the blockid parameter must be the same size for each block. - :type block_id: str - :param content_length: The length of the request. - :type content_length: long - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid - OAuth access token to copy source. - :type copy_source_authorization: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _lease_id = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - comp = "block" - accept = "application/xml" - - # Construct URL - url = self.stage_block_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if copy_source_authorization is not None: - header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - stage_block_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def commit_block_list( - self, - blocks, # type: "_models.BlockLookupList" - timeout=None, # type: Optional[int] - transactional_content_md5=None, # type: Optional[bytearray] - transactional_content_crc64=None, # type: Optional[bytearray] - metadata=None, # type: Optional[str] - tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] - request_id_parameter=None, # type: Optional[str] - blob_tags_string=None, # type: Optional[str] - immutability_policy_expiry=None, # type: Optional[datetime.datetime] - immutability_policy_mode=None, # type: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] - legal_hold=None, # type: Optional[bool] - blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Commit Block List operation writes a blob by specifying the list of block IDs that make up - the blob. In order to be written as part of a blob, a block must have been successfully written - to the server in a prior Put Block operation. You can call Put Block List to update a blob by - uploading only those blocks that have changed, then committing the new and existing blocks - together. You can do this by specifying whether to commit a block from the committed block list - or from the uncommitted block list, or to commit the most recently uploaded version of the - block, whichever list it may belong to. - - :param blocks: - :type blocks: ~azure.storage.blob.models.BlockLookupList - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy - is set to expire. - :type immutability_policy_expiry: ~datetime.datetime - :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param legal_hold: Specified if a legal hold should be set on the blob. - :type legal_hold: bool - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_cache_control = None - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "blocklist" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.commit_block_list.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if immutability_policy_expiry is not None: - header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') - if immutability_policy_mode is not None: - header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') - if legal_hold is not None: - header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(blocks, 'BlockLookupList', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - commit_block_list.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def get_block_list( - self, - snapshot=None, # type: Optional[str] - list_type="committed", # type: Union[str, "_models.BlockListType"] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> "_models.BlockList" - """The Get Block List operation retrieves the list of blocks that have been uploaded as part of a - block blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param list_type: Specifies whether to return the list of committed blocks, the list of - uncommitted blocks, or both lists together. - :type list_type: str or ~azure.storage.blob.models.BlockListType - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: BlockList, or the result of cls(response) - :rtype: ~azure.storage.blob.models.BlockList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.BlockList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "blocklist" - accept = "application/xml" - - # Construct URL - url = self.get_block_list.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - query_parameters['blocklisttype'] = self._serialize.query("list_type", list_type, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('BlockList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_block_list.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_container_operations.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_container_operations.py deleted file mode 100644 index 1fdd911..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_container_operations.py +++ /dev/null @@ -1,1669 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class ContainerOperations(object): - """ContainerOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - access=None, # type: Optional[Union[str, "_models.PublicAccessType"]] - request_id_parameter=None, # type: Optional[str] - container_cpk_scope_info=None, # type: Optional["_models.ContainerCpkScopeInfo"] - **kwargs # type: Any - ): - # type: (...) -> None - """creates a new container under the specified account. If the container with the same name - already exists, the operation fails. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param access: Specifies whether data in the container may be accessed publicly and the level - of access. - :type access: str or ~azure.storage.blob.models.PublicAccessType - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param container_cpk_scope_info: Parameter group. - :type container_cpk_scope_info: ~azure.storage.blob.models.ContainerCpkScopeInfo - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _default_encryption_scope = None - _prevent_encryption_scope_override = None - if container_cpk_scope_info is not None: - _default_encryption_scope = container_cpk_scope_info.default_encryption_scope - _prevent_encryption_scope_override = container_cpk_scope_info.prevent_encryption_scope_override - restype = "container" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if access is not None: - header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _default_encryption_scope is not None: - header_parameters['x-ms-default-encryption-scope'] = self._serialize.header("default_encryption_scope", _default_encryption_scope, 'str') - if _prevent_encryption_scope_override is not None: - header_parameters['x-ms-deny-encryption-scope-override'] = self._serialize.header("prevent_encryption_scope_override", _prevent_encryption_scope_override, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{containerName}'} # type: ignore - - def get_properties( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """returns all user-defined metadata and system properties for the specified container. The data - returned does not include the container's list of blobs. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "container" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-public-access']=self._deserialize('str', response.headers.get('x-ms-blob-public-access')) - response_headers['x-ms-has-immutability-policy']=self._deserialize('bool', response.headers.get('x-ms-has-immutability-policy')) - response_headers['x-ms-has-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-has-legal-hold')) - response_headers['x-ms-default-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-default-encryption-scope')) - response_headers['x-ms-deny-encryption-scope-override']=self._deserialize('bool', response.headers.get('x-ms-deny-encryption-scope-override')) - response_headers['x-ms-immutable-storage-with-versioning-enabled']=self._deserialize('bool', response.headers.get('x-ms-immutable-storage-with-versioning-enabled')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{containerName}'} # type: ignore - - def delete( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """operation marks the specified container for deletion. The container and any blobs contained - within it are later deleted during garbage collection. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - restype = "container" - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{containerName}'} # type: ignore - - def set_metadata( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """operation sets one or more user-defined name-value pairs for the specified container. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - restype = "container" - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{containerName}'} # type: ignore - - def get_access_policy( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> List["_models.SignedIdentifier"] - """gets the permissions for the specified container. The permissions indicate whether container - data may be accessed publicly. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of SignedIdentifier, or the result of cls(response) - :rtype: list[~azure.storage.blob.models.SignedIdentifier] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.SignedIdentifier"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "container" - comp = "acl" - accept = "application/xml" - - # Construct URL - url = self.get_access_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-blob-public-access']=self._deserialize('str', response.headers.get('x-ms-blob-public-access')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('[SignedIdentifier]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_access_policy.metadata = {'url': '/{containerName}'} # type: ignore - - def set_access_policy( - self, - timeout=None, # type: Optional[int] - access=None, # type: Optional[Union[str, "_models.PublicAccessType"]] - request_id_parameter=None, # type: Optional[str] - container_acl=None, # type: Optional[List["_models.SignedIdentifier"]] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """sets the permissions for the specified container. The permissions indicate whether blobs in a - container may be accessed publicly. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param access: Specifies whether data in the container may be accessed publicly and the level - of access. - :type access: str or ~azure.storage.blob.models.PublicAccessType - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param container_acl: the acls for the container. - :type container_acl: list[~azure.storage.blob.models.SignedIdentifier] - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - restype = "container" - comp = "acl" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_access_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if access is not None: - header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'wrapped': True, 'itemsName': 'SignedIdentifier'}} - if container_acl is not None: - body_content = self._serialize.body(container_acl, '[SignedIdentifier]', is_xml=True, serialization_ctxt=serialization_ctxt) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_policy.metadata = {'url': '/{containerName}'} # type: ignore - - def restore( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - deleted_container_name=None, # type: Optional[str] - deleted_container_version=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Restores a previously-deleted container. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param deleted_container_name: Optional. Version 2019-12-12 and later. Specifies the name of - the deleted container to restore. - :type deleted_container_name: str - :param deleted_container_version: Optional. Version 2019-12-12 and later. Specifies the - version of the deleted container to restore. - :type deleted_container_version: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "undelete" - accept = "application/xml" - - # Construct URL - url = self.restore.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if deleted_container_name is not None: - header_parameters['x-ms-deleted-container-name'] = self._serialize.header("deleted_container_name", deleted_container_name, 'str') - if deleted_container_version is not None: - header_parameters['x-ms-deleted-container-version'] = self._serialize.header("deleted_container_version", deleted_container_version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - restore.metadata = {'url': '/{containerName}'} # type: ignore - - def rename( - self, - source_container_name, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - source_lease_id=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Renames an existing container. - - :param source_container_name: Required. Specifies the name of the container to rename. - :type source_container_name: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param source_lease_id: A lease ID for the source path. If specified, the source path must have - an active lease and the lease ID must match. - :type source_lease_id: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "rename" - accept = "application/xml" - - # Construct URL - url = self.rename.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-source-container-name'] = self._serialize.header("source_container_name", source_container_name, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - rename.metadata = {'url': '/{containerName}'} # type: ignore - - def submit_batch( - self, - content_length, # type: int - multipart_content_type, # type: str - body, # type: IO - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> IO - """The Batch operation allows multiple API calls to be embedded into a single HTTP request. - - :param content_length: The length of the request. - :type content_length: long - :param multipart_content_type: Required. The value of this header must be multipart/mixed with - a batch boundary. Example header value: multipart/mixed; boundary=batch_:code:``. - :type multipart_content_type: str - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "batch" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.submit_batch.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(body, 'IO', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - submit_batch.metadata = {'url': '/{containerName}'} # type: ignore - - def acquire_lease( - self, - timeout=None, # type: Optional[int] - duration=None, # type: Optional[int] - proposed_lease_id=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a - lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease - duration cannot be changed using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "acquire" - accept = "application/xml" - - # Construct URL - url = self.acquire_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - acquire_lease.metadata = {'url': '/{containerName}'} # type: ignore - - def release_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "release" - accept = "application/xml" - - # Construct URL - url = self.release_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - release_lease.metadata = {'url': '/{containerName}'} # type: ignore - - def renew_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "renew" - accept = "application/xml" - - # Construct URL - url = self.renew_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - renew_lease.metadata = {'url': '/{containerName}'} # type: ignore - - def break_lease( - self, - timeout=None, # type: Optional[int] - break_period=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param break_period: For a break operation, proposed duration the lease should continue before - it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining on the lease is used. A new - lease will not be available before the break period has expired, but the lease may be held for - longer than the break period. If this header does not appear with a break operation, a - fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease - breaks immediately. - :type break_period: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "break" - accept = "application/xml" - - # Construct URL - url = self.break_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - break_lease.metadata = {'url': '/{containerName}'} # type: ignore - - def change_lease( - self, - lease_id, # type: str - proposed_lease_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "change" - accept = "application/xml" - - # Construct URL - url = self.change_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - change_lease.metadata = {'url': '/{containerName}'} # type: ignore - - def list_blob_flat_segment( - self, - prefix=None, # type: Optional[str] - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - include=None, # type: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.ListBlobsFlatSegmentResponse" - """[Update] The List Blobs operation returns a list of the blobs under the specified container. - - :param prefix: Filters the results to return only containers whose name begins with the - specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListBlobsFlatSegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsFlatSegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_blob_flat_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListBlobsFlatSegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_blob_flat_segment.metadata = {'url': '/{containerName}'} # type: ignore - - def list_blob_hierarchy_segment( - self, - delimiter, # type: str - prefix=None, # type: Optional[str] - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - include=None, # type: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.ListBlobsHierarchySegmentResponse" - """[Update] The List Blobs operation returns a list of the blobs under the specified container. - - :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix - element in the response body that acts as a placeholder for all blobs whose names begin with - the same substring up to the appearance of the delimiter character. The delimiter may be a - single character or a string. - :type delimiter: str - :param prefix: Filters the results to return only containers whose name begins with the - specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListBlobsHierarchySegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsHierarchySegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_blob_hierarchy_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_blob_hierarchy_segment.metadata = {'url': '/{containerName}'} # type: ignore - - def get_account_info( - self, - **kwargs # type: Any - ): - # type: (...) -> None - """Returns the sku name and account kind. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "account" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_account_info.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) - response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_account_info.metadata = {'url': '/{containerName}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_directory_operations.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_directory_operations.py deleted file mode 100644 index 0ebb32d..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_directory_operations.py +++ /dev/null @@ -1,751 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class DirectoryOperations(object): - """DirectoryOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - timeout=None, # type: Optional[int] - directory_properties=None, # type: Optional[str] - posix_permissions=None, # type: Optional[str] - posix_umask=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - directory_http_headers=None, # type: Optional["_models.DirectoryHttpHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Create a directory. By default, the destination is overwritten and if the destination already - exists and has a lease the lease is broken. This operation supports conditional HTTP requests. - For more information, see `Specifying Conditional Headers for Blob Service Operations - `_. - To fail if the destination already exists, use a conditional request with If-None-Match: "*". - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param directory_properties: Optional. User-defined properties to be stored with the file or - directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", - where each value is base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask - restricts permission settings for file and directory, and will only be applied when default Acl - does not exist in parent directory. If the umask bit has set, it means that the corresponding - permission will be disabled. Otherwise the corresponding permission will be determined by the - permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, - a default umask - 0027 will be used. - :type posix_umask: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param directory_http_headers: Parameter group. - :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _cache_control = None - _content_type = None - _content_encoding = None - _content_language = None - _content_disposition = None - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - if directory_http_headers is not None: - _cache_control = directory_http_headers.cache_control - _content_type = directory_http_headers.content_type - _content_encoding = directory_http_headers.content_encoding - _content_language = directory_http_headers.content_language - _content_disposition = directory_http_headers.content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - resource = "directory" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("resource", resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def rename( - self, - rename_source, # type: str - timeout=None, # type: Optional[int] - marker=None, # type: Optional[str] - path_rename_mode=None, # type: Optional[Union[str, "_models.PathRenameMode"]] - directory_properties=None, # type: Optional[str] - posix_permissions=None, # type: Optional[str] - posix_umask=None, # type: Optional[str] - source_lease_id=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - directory_http_headers=None, # type: Optional["_models.DirectoryHttpHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Rename a directory. By default, the destination is overwritten and if the destination already - exists and has a lease the lease is broken. This operation supports conditional HTTP requests. - For more information, see `Specifying Conditional Headers for Blob Service Operations - `_. - To fail if the destination already exists, use a conditional request with If-None-Match: "*". - - :param rename_source: The file or directory to be renamed. The value must have the following - format: "/{filesysystem}/{path}". If "x-ms-properties" is specified, the properties will - overwrite the existing properties; otherwise, the existing properties will be preserved. - :type rename_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param marker: When renaming a directory, the number of paths that are renamed with each - invocation is limited. If the number of paths to be renamed exceeds this limit, a continuation - token is returned in this response header. When a continuation token is returned in the - response, it must be specified in a subsequent invocation of the rename operation to continue - renaming the directory. - :type marker: str - :param path_rename_mode: Determines the behavior of the rename operation. - :type path_rename_mode: str or ~azure.storage.blob.models.PathRenameMode - :param directory_properties: Optional. User-defined properties to be stored with the file or - directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", - where each value is base64 encoded. - :type directory_properties: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask - restricts permission settings for file and directory, and will only be applied when default Acl - does not exist in parent directory. If the umask bit has set, it means that the corresponding - permission will be disabled. Otherwise the corresponding permission will be determined by the - permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, - a default umask - 0027 will be used. - :type posix_umask: str - :param source_lease_id: A lease ID for the source path. If specified, the source path must have - an active lease and the lease ID must match. - :type source_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param directory_http_headers: Parameter group. - :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _cache_control = None - _content_type = None - _content_encoding = None - _content_language = None - _content_disposition = None - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if directory_http_headers is not None: - _cache_control = directory_http_headers.cache_control - _content_type = directory_http_headers.content_type - _content_encoding = directory_http_headers.content_encoding - _content_language = directory_http_headers.content_language - _content_disposition = directory_http_headers.content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - accept = "application/xml" - - # Construct URL - url = self.rename.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') - if path_rename_mode is not None: - query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if directory_properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - rename.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def delete( - self, - recursive_directory_delete, # type: bool - timeout=None, # type: Optional[int] - marker=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Deletes the directory. - - :param recursive_directory_delete: If "true", all paths beneath the directory will be deleted. - If "false" and the directory is non-empty, an error occurs. - :type recursive_directory_delete: bool - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param marker: When renaming a directory, the number of paths that are renamed with each - invocation is limited. If the number of paths to be renamed exceeds this limit, a continuation - token is returned in this response header. When a continuation token is returned in the - response, it must be specified in a subsequent invocation of the rename operation to continue - renaming the directory. - :type marker: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['recursive'] = self._serialize.query("recursive_directory_delete", recursive_directory_delete, 'bool') - if marker is not None: - query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def set_access_control( - self, - timeout=None, # type: Optional[int] - owner=None, # type: Optional[str] - group=None, # type: Optional[str] - posix_permissions=None, # type: Optional[str] - posix_acl=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Set the owner, group, permissions, or access control list for a directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type posix_permissions: str - :param posix_acl: Sets POSIX access control rights on files and directories. The value is a - comma-separated list of access control entries. Each access control entry (ACE) consists of a - scope, a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type posix_acl: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "setAccessControl" - accept = "application/xml" - - # Construct URL - url = self.set_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if posix_permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') - if posix_acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def get_access_control( - self, - timeout=None, # type: Optional[int] - upn=None, # type: Optional[bool] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Get the owner, group, permissions, or access control list for a directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If - "true", the identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response - headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If - "false", the values will be returned as Azure Active Directory Object IDs. The default value is - false. - :type upn: bool - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "getAccessControl" - accept = "application/xml" - - # Construct URL - url = self.get_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) - response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) - response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) - response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_page_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_page_blob_operations.py deleted file mode 100644 index c953df2..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_page_blob_operations.py +++ /dev/null @@ -1,1437 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class PageBlobOperations(object): - """PageBlobOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - content_length, # type: int - blob_content_length, # type: int - timeout=None, # type: Optional[int] - tier=None, # type: Optional[Union[str, "_models.PremiumPageBlobAccessTier"]] - metadata=None, # type: Optional[str] - blob_sequence_number=0, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - blob_tags_string=None, # type: Optional[str] - immutability_policy_expiry=None, # type: Optional[datetime.datetime] - immutability_policy_mode=None, # type: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] - legal_hold=None, # type: Optional[bool] - blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Create operation creates a new page blob. - - :param content_length: The length of the request. - :type content_length: long - :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 - TB. The page blob size must be aligned to a 512-byte boundary. - :type blob_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param tier: Optional. Indicates the tier to be set on the page blob. - :type tier: str or ~azure.storage.blob.models.PremiumPageBlobAccessTier - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled - value that you can use to track requests. The value of the sequence number must be between 0 - and 2^63 - 1. - :type blob_sequence_number: long - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy - is set to expire. - :type immutability_policy_expiry: ~datetime.datetime - :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param legal_hold: Specified if a legal hold should be set on the blob. - :type legal_hold: bool - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - blob_type = "PageBlob" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') - if blob_sequence_number is not None: - header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if immutability_policy_expiry is not None: - header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') - if immutability_policy_mode is not None: - header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') - if legal_hold is not None: - header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def upload_pages( - self, - content_length, # type: int - body, # type: IO - transactional_content_md5=None, # type: Optional[bytearray] - transactional_content_crc64=None, # type: Optional[bytearray] - timeout=None, # type: Optional[int] - range=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - sequence_number_access_conditions=None, # type: Optional["_models.SequenceNumberAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Upload Pages operation writes a range of pages to a page blob. - - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param sequence_number_access_conditions: Parameter group. - :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_sequence_number_less_than_or_equal_to = None - _if_sequence_number_less_than = None - _if_sequence_number_equal_to = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if sequence_number_access_conditions is not None: - _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - comp = "page" - page_write = "update" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.upload_pages.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') - if _if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') - if _if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload_pages.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def clear_pages( - self, - content_length, # type: int - timeout=None, # type: Optional[int] - range=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - sequence_number_access_conditions=None, # type: Optional["_models.SequenceNumberAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Clear Pages operation clears a set of pages from a page blob. - - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param sequence_number_access_conditions: Parameter group. - :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_sequence_number_less_than_or_equal_to = None - _if_sequence_number_less_than = None - _if_sequence_number_equal_to = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if sequence_number_access_conditions is not None: - _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - comp = "page" - page_write = "clear" - accept = "application/xml" - - # Construct URL - url = self.clear_pages.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') - if _if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') - if _if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - clear_pages.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def upload_pages_from_url( - self, - source_url, # type: str - source_range, # type: str - content_length, # type: int - range, # type: str - source_content_md5=None, # type: Optional[bytearray] - source_contentcrc64=None, # type: Optional[bytearray] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - copy_source_authorization=None, # type: Optional[str] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - sequence_number_access_conditions=None, # type: Optional["_models.SequenceNumberAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Upload Pages operation writes a range of pages to a page blob where the contents are read - from a URL. - - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param source_range: Bytes of source data in the specified range. The length of this range - should match the ContentLength header and x-ms-range/Range destination range header. - :type source_range: str - :param content_length: The length of the request. - :type content_length: long - :param range: The range of bytes to which the source range would be written. The range should - be 512 aligned and range-end is required. - :type range: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid - OAuth access token to copy source. - :type copy_source_authorization: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param sequence_number_access_conditions: Parameter group. - :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _lease_id = None - _if_sequence_number_less_than_or_equal_to = None - _if_sequence_number_less_than = None - _if_sequence_number_equal_to = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if sequence_number_access_conditions is not None: - _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - comp = "page" - page_write = "update" - accept = "application/xml" - - # Construct URL - url = self.upload_pages_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') - if _if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') - if _if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if copy_source_authorization is not None: - header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload_pages_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def get_page_ranges( - self, - snapshot=None, # type: Optional[str] - timeout=None, # type: Optional[int] - range=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> "_models.PageList" - """The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot - of a page blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PageList, or the result of cls(response) - :rtype: ~azure.storage.blob.models.PageList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PageList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "pagelist" - accept = "application/xml" - - # Construct URL - url = self.get_page_ranges.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('PageList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_page_ranges.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def get_page_ranges_diff( - self, - snapshot=None, # type: Optional[str] - timeout=None, # type: Optional[int] - prevsnapshot=None, # type: Optional[str] - prev_snapshot_url=None, # type: Optional[str] - range=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> "_models.PageList" - """The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that - were changed between target blob and previous snapshot. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param prevsnapshot: Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a - DateTime value that specifies that the response will contain only pages that were changed - between target blob and previous snapshot. Changed pages include both updated and cleared - pages. The target blob may be a snapshot, as long as the snapshot specified by prevsnapshot is - the older of the two. Note that incremental snapshots are currently supported only for blobs - created on or after January 1, 2016. - :type prevsnapshot: str - :param prev_snapshot_url: Optional. This header is only supported in service versions - 2019-04-19 and after and specifies the URL of a previous snapshot of the target blob. The - response will only contain pages that were changed between the target blob and its previous - snapshot. - :type prev_snapshot_url: str - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PageList, or the result of cls(response) - :rtype: ~azure.storage.blob.models.PageList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PageList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "pagelist" - accept = "application/xml" - - # Construct URL - url = self.get_page_ranges_diff.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if prevsnapshot is not None: - query_parameters['prevsnapshot'] = self._serialize.query("prevsnapshot", prevsnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if prev_snapshot_url is not None: - header_parameters['x-ms-previous-snapshot-url'] = self._serialize.header("prev_snapshot_url", prev_snapshot_url, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('PageList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_page_ranges_diff.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def resize( - self, - blob_content_length, # type: int - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Resize the Blob. - - :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 - TB. The page blob size must be aligned to a 512-byte boundary. - :type blob_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.resize.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - resize.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def update_sequence_number( - self, - sequence_number_action, # type: Union[str, "_models.SequenceNumberActionType"] - timeout=None, # type: Optional[int] - blob_sequence_number=0, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Update the sequence number of the blob. - - :param sequence_number_action: Required if the x-ms-blob-sequence-number header is set for the - request. This property applies to page blobs only. This property indicates how the service - should modify the blob's sequence number. - :type sequence_number_action: str or ~azure.storage.blob.models.SequenceNumberActionType - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled - value that you can use to track requests. The value of the sequence number must be between 0 - and 2^63 - 1. - :type blob_sequence_number: long - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.update_sequence_number.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-sequence-number-action'] = self._serialize.header("sequence_number_action", sequence_number_action, 'str') - if blob_sequence_number is not None: - header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - update_sequence_number.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def copy_incremental( - self, - copy_source, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Copy Incremental operation copies a snapshot of the source page blob to a destination page - blob. The snapshot is copied such that only the differential changes between the previously - copied snapshot are transferred to the destination. The copied snapshots are complete copies of - the original snapshot and can be read or copied from as usual. This API is supported since REST - version 2016-05-31. - - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "incrementalcopy" - accept = "application/xml" - - # Construct URL - url = self.copy_incremental.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - copy_incremental.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_service_operations.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_service_operations.py deleted file mode 100644 index 1292561..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_service_operations.py +++ /dev/null @@ -1,710 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class ServiceOperations(object): - """ServiceOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def set_properties( - self, - storage_service_properties, # type: "_models.StorageServiceProperties" - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Sets properties for a storage account's Blob service endpoint, including properties for Storage - Analytics and CORS (Cross-Origin Resource Sharing) rules. - - :param storage_service_properties: The StorageService properties. - :type storage_service_properties: ~azure.storage.blob.models.StorageServiceProperties - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "properties" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/'} # type: ignore - - def get_properties( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.StorageServiceProperties" - """gets the properties of a storage account's Blob service, including properties for Storage - Analytics and CORS (Cross-Origin Resource Sharing) rules. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: StorageServiceProperties, or the result of cls(response) - :rtype: ~azure.storage.blob.models.StorageServiceProperties - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceProperties"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('StorageServiceProperties', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_properties.metadata = {'url': '/'} # type: ignore - - def get_statistics( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.StorageServiceStats" - """Retrieves statistics related to replication for the Blob service. It is only available on the - secondary location endpoint when read-access geo-redundant replication is enabled for the - storage account. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: StorageServiceStats, or the result of cls(response) - :rtype: ~azure.storage.blob.models.StorageServiceStats - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceStats"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "stats" - accept = "application/xml" - - # Construct URL - url = self.get_statistics.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('StorageServiceStats', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_statistics.metadata = {'url': '/'} # type: ignore - - def list_containers_segment( - self, - prefix=None, # type: Optional[str] - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - include=None, # type: Optional[List[Union[str, "_models.ListContainersIncludeType"]]] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.ListContainersSegmentResponse" - """The List Containers Segment operation returns a list of the containers under the specified - account. - - :param prefix: Filters the results to return only containers whose name begins with the - specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :param include: Include this parameter to specify that the container's metadata be returned as - part of the response body. - :type include: list[str or ~azure.storage.blob.models.ListContainersIncludeType] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListContainersSegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListContainersSegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListContainersSegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_containers_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('ListContainersSegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_containers_segment.metadata = {'url': '/'} # type: ignore - - def get_user_delegation_key( - self, - key_info, # type: "_models.KeyInfo" - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.UserDelegationKey" - """Retrieves a user delegation key for the Blob service. This is only a valid operation when using - bearer token authentication. - - :param key_info: - :type key_info: ~azure.storage.blob.models.KeyInfo - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: UserDelegationKey, or the result of cls(response) - :rtype: ~azure.storage.blob.models.UserDelegationKey - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.UserDelegationKey"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "userdelegationkey" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.get_user_delegation_key.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(key_info, 'KeyInfo', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('UserDelegationKey', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_user_delegation_key.metadata = {'url': '/'} # type: ignore - - def get_account_info( - self, - **kwargs # type: Any - ): - # type: (...) -> None - """Returns the sku name and account kind. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "account" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_account_info.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) - response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) - response_headers['x-ms-is-hns-enabled']=self._deserialize('bool', response.headers.get('x-ms-is-hns-enabled')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_account_info.metadata = {'url': '/'} # type: ignore - - def submit_batch( - self, - content_length, # type: int - multipart_content_type, # type: str - body, # type: IO - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> IO - """The Batch operation allows multiple API calls to be embedded into a single HTTP request. - - :param content_length: The length of the request. - :type content_length: long - :param multipart_content_type: Required. The value of this header must be multipart/mixed with - a batch boundary. Example header value: multipart/mixed; boundary=batch_:code:``. - :type multipart_content_type: str - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "batch" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.submit_batch.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(body, 'IO', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - submit_batch.metadata = {'url': '/'} # type: ignore - - def filter_blobs( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - where=None, # type: Optional[str] - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.FilterBlobSegment" - """The Filter Blobs operation enables callers to list blobs across all containers whose tags match - a given search expression. Filter blobs searches across all containers within a storage - account but can be scoped within the expression to a single container. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param where: Filters the results to return only to return only blobs whose tags match the - specified expression. - :type where: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: FilterBlobSegment, or the result of cls(response) - :rtype: ~azure.storage.blob.models.FilterBlobSegment - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.FilterBlobSegment"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "blobs" - accept = "application/xml" - - # Construct URL - url = self.filter_blobs.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if where is not None: - query_parameters['where'] = self._serialize.query("where", where, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('FilterBlobSegment', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - filter_blobs.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_lease.py b/azure/multiapi/storagev2/blob/v2020_10_02/_lease.py deleted file mode 100644 index d495d6e..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_lease.py +++ /dev/null @@ -1,331 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import uuid - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, TypeVar, TYPE_CHECKING -) - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator import distributed_trace - -from ._shared.response_handlers import return_response_headers, process_storage_error -from ._serialize import get_modify_conditions - -if TYPE_CHECKING: - from datetime import datetime - - BlobClient = TypeVar("BlobClient") - ContainerClient = TypeVar("ContainerClient") - - -class BlobLeaseClient(object): - """Creates a new BlobLeaseClient. - - This client provides lease operations on a BlobClient or ContainerClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the blob or container to lease. - :type client: ~azure.storage.blob.BlobClient or - ~azure.storage.blob.ContainerClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - def __init__( - self, client, lease_id=None - ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs - # type: (Union[BlobClient, ContainerClient], Optional[str]) -> None - self.id = lease_id or str(uuid.uuid4()) - self.last_modified = None - self.etag = None - if hasattr(client, 'blob_name'): - self._client = client._client.blob # type: ignore # pylint: disable=protected-access - elif hasattr(client, 'container_name'): - self._client = client._client.container # type: ignore # pylint: disable=protected-access - else: - raise TypeError("Lease must use either BlobClient or ContainerClient.") - - def __enter__(self): - return self - - def __exit__(self, *args): - self.release() - - @distributed_trace - def acquire(self, lease_duration=-1, **kwargs): - # type: (int, **Any) -> None - """Requests a new lease. - - If the container does not have an active lease, the Blob service creates a - lease on the container and returns a new lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.acquire_lease( - timeout=kwargs.pop('timeout', None), - duration=lease_duration, - proposed_lease_id=self.id, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - self.etag = response.get('etag') # type: str - - @distributed_trace - def renew(self, **kwargs): - # type: (Any) -> None - """Renews the lease. - - The lease can be renewed if the lease ID specified in the - lease client matches that associated with the container or blob. Note that - the lease may be renewed even if it has expired as long as the container - or blob has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.renew_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def release(self, **kwargs): - # type: (Any) -> None - """Release the lease. - - The lease may be released if the client lease id specified matches - that associated with the container or blob. Releasing the lease allows another client - to immediately acquire the lease for the container or blob as soon as the release is complete. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.release_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """Change the lease ID of an active lease. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.change_lease( - lease_id=self.id, - proposed_lease_id=proposed_lease_id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def break_lease(self, lease_break_period=None, **kwargs): - # type: (Optional[int], Any) -> int - """Break the lease, if the container or blob has an active lease. - - Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. When a lease - is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the container or blob. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :param int lease_break_period: - This is the proposed duration of seconds that the lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the lease. If longer, the time remaining on the lease is used. - A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.break_lease( - timeout=kwargs.pop('timeout', None), - break_period=lease_break_period, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_list_blobs_helper.py b/azure/multiapi/storagev2/blob/v2020_10_02/_list_blobs_helper.py deleted file mode 100644 index 309d37b..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_list_blobs_helper.py +++ /dev/null @@ -1,236 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from azure.core.paging import PageIterator, ItemPaged -from azure.core.exceptions import HttpResponseError -from ._deserialize import get_blob_properties_from_generated_code, parse_tags -from ._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix, FilterBlobItem -from ._models import BlobProperties, FilteredBlob -from ._shared.models import DictMixin -from ._shared.response_handlers import return_context_and_deserialized, process_storage_error - - -class BlobPropertiesPaged(PageIterator): - """An Iterable of Blob properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - :param str container: The name of the container. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str continuation_token: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__( - self, command, - container=None, - prefix=None, - results_per_page=None, - continuation_token=None, - delimiter=None, - location_mode=None): - super(BlobPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.container = container - self.delimiter = delimiter - self.current_page = None - self.location_mode = location_mode - - def _get_next_cb(self, continuation_token): - try: - return self._command( - prefix=self.prefix, - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.container = self._response.container_name - self.current_page = [self._build_item(item) for item in self._response.segment.blob_items] - - return self._response.next_marker or None, self.current_page - - def _build_item(self, item): - if isinstance(item, BlobProperties): - return item - if isinstance(item, BlobItemInternal): - blob = get_blob_properties_from_generated_code(item) # pylint: disable=protected-access - blob.container = self.container - return blob - return item - - -class BlobPrefixPaged(BlobPropertiesPaged): - def __init__(self, *args, **kwargs): - super(BlobPrefixPaged, self).__init__(*args, **kwargs) - self.name = self.prefix - - def _extract_data_cb(self, get_next_return): - continuation_token, _ = super(BlobPrefixPaged, self)._extract_data_cb(get_next_return) - self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items - self.current_page = [self._build_item(item) for item in self.current_page] - self.delimiter = self._response.delimiter - - return continuation_token, self.current_page - - def _build_item(self, item): - item = super(BlobPrefixPaged, self)._build_item(item) - if isinstance(item, GenBlobPrefix): - return BlobPrefix( - self._command, - container=self.container, - prefix=item.name, - results_per_page=self.results_per_page, - location_mode=self.location_mode) - return item - - -class BlobPrefix(ItemPaged, DictMixin): - """An Iterable of Blob properties. - - Returned from walk_blobs when a delimiter is used. - Can be thought of as a virtual blob directory. - - :ivar str name: The prefix, or "directory name" of the blob. - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str next_marker: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str marker: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__(self, *args, **kwargs): - super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs) - self.name = kwargs.get('prefix') - self.prefix = kwargs.get('prefix') - self.results_per_page = kwargs.get('results_per_page') - self.container = kwargs.get('container') - self.delimiter = kwargs.get('delimiter') - self.location_mode = kwargs.get('location_mode') - - -class FilteredBlobPaged(PageIterator): - """An Iterable of Blob properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.FilteredBlob) - :ivar str container: The container that the blobs are listed from. - - :param callable command: Function to retrieve the next page of items. - :param str container: The name of the container. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str continuation_token: An opaque continuation token. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__( - self, command, - container=None, - results_per_page=None, - continuation_token=None, - location_mode=None): - super(FilteredBlobPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.marker = continuation_token - self.results_per_page = results_per_page - self.container = container - self.current_page = None - self.location_mode = location_mode - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.marker = self._response.next_marker - self.current_page = [self._build_item(item) for item in self._response.blobs] - - return self._response.next_marker or None, self.current_page - - @staticmethod - def _build_item(item): - if isinstance(item, FilterBlobItem): - tags = parse_tags(item.tags) - blob = FilteredBlob(name=item.name, container_name=item.container_name, tags=tags) - return blob - return item diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_models.py b/azure/multiapi/storagev2/blob/v2020_10_02/_models.py deleted file mode 100644 index 68c3b4d..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_models.py +++ /dev/null @@ -1,1209 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines - -from enum import Enum - -from azure.core.paging import PageIterator -from azure.core.exceptions import HttpResponseError -from ._generated.models import ArrowField - -from ._shared import decode_base64_to_bytes -from ._shared.response_handlers import return_context_and_deserialized, process_storage_error -from ._shared.models import DictMixin, get_enum_value -from ._generated.models import Logging as GeneratedLogging -from ._generated.models import Metrics as GeneratedMetrics -from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy -from ._generated.models import StaticWebsite as GeneratedStaticWebsite -from ._generated.models import CorsRule as GeneratedCorsRule -from ._generated.models import AccessPolicy as GenAccessPolicy - - -class BlobType(str, Enum): - - BlockBlob = "BlockBlob" - PageBlob = "PageBlob" - AppendBlob = "AppendBlob" - - -class BlockState(str, Enum): - """Block blob block types.""" - - Committed = 'Committed' #: Committed blocks. - Latest = 'Latest' #: Latest blocks. - Uncommitted = 'Uncommitted' #: Uncommitted blocks. - - -class StandardBlobTier(str, Enum): - """ - Specifies the blob tier to set the blob to. This is only applicable for - block blobs on standard storage accounts. - """ - - Archive = 'Archive' #: Archive - Cool = 'Cool' #: Cool - Hot = 'Hot' #: Hot - - -class PremiumPageBlobTier(str, Enum): - """ - Specifies the page blob tier to set the blob to. This is only applicable to page - blobs on premium storage accounts. Please take a look at: - https://docs.microsoft.com/en-us/azure/storage/storage-premium-storage#scalability-and-performance-targets - for detailed information on the corresponding IOPS and throughput per PageBlobTier. - """ - - P4 = 'P4' #: P4 Tier - P6 = 'P6' #: P6 Tier - P10 = 'P10' #: P10 Tier - P20 = 'P20' #: P20 Tier - P30 = 'P30' #: P30 Tier - P40 = 'P40' #: P40 Tier - P50 = 'P50' #: P50 Tier - P60 = 'P60' #: P60 Tier - - -class QuickQueryDialect(str, Enum): - """Specifies the quick query input/output dialect.""" - - DelimitedText = 'DelimitedTextDialect' - DelimitedJson = 'DelimitedJsonDialect' - Parquet = 'ParquetDialect' - - -class SequenceNumberAction(str, Enum): - """Sequence number actions.""" - - Increment = 'increment' - """ - Increments the value of the sequence number by 1. If specifying this option, - do not include the x-ms-blob-sequence-number header. - """ - - Max = 'max' - """ - Sets the sequence number to be the higher of the value included with the - request and the value currently stored for the blob. - """ - - Update = 'update' - """Sets the sequence number to the value included with the request.""" - - -class PublicAccess(str, Enum): - """ - Specifies whether data in the container may be accessed publicly and the level of access. - """ - - OFF = 'off' - """ - Specifies that there is no public read access for both the container and blobs within the container. - Clients cannot enumerate the containers within the storage account as well as the blobs within the container. - """ - - Blob = 'blob' - """ - Specifies public read access for blobs. Blob data within this container can be read - via anonymous request, but container data is not available. Clients cannot enumerate - blobs within the container via anonymous request. - """ - - Container = 'container' - """ - Specifies full public read access for container and blob data. Clients can enumerate - blobs within the container via anonymous request, but cannot enumerate containers - within the storage account. - """ - - -class BlobImmutabilityPolicyMode(str, Enum): - """ - Specifies the immutability policy mode to set on the blob. - "Mutable" can only be returned by service, don't set to "Mutable". - """ - - Unlocked = "Unlocked" - Locked = "Locked" - Mutable = "Mutable" - - -class BlobAnalyticsLogging(GeneratedLogging): - """Azure Analytics Logging settings. - - :keyword str version: - The version of Storage Analytics to configure. The default value is 1.0. - :keyword bool delete: - Indicates whether all delete requests should be logged. The default value is `False`. - :keyword bool read: - Indicates whether all read requests should be logged. The default value is `False`. - :keyword bool write: - Indicates whether all write requests should be logged. The default value is `False`. - :keyword ~azure.storage.blob.RetentionPolicy retention_policy: - Determines how long the associated data should persist. If not specified the retention - policy will be disabled by default. - """ - - def __init__(self, **kwargs): - self.version = kwargs.get('version', u'1.0') - self.delete = kwargs.get('delete', False) - self.read = kwargs.get('read', False) - self.write = kwargs.get('write', False) - self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - version=generated.version, - delete=generated.delete, - read=generated.read, - write=generated.write, - retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access - ) - - -class Metrics(GeneratedMetrics): - """A summary of request statistics grouped by API in hour or minute aggregates - for blobs. - - :keyword str version: - The version of Storage Analytics to configure. The default value is 1.0. - :keyword bool enabled: - Indicates whether metrics are enabled for the Blob service. - The default value is `False`. - :keyword bool include_apis: - Indicates whether metrics should generate summary statistics for called API operations. - :keyword ~azure.storage.blob.RetentionPolicy retention_policy: - Determines how long the associated data should persist. If not specified the retention - policy will be disabled by default. - """ - - def __init__(self, **kwargs): - self.version = kwargs.get('version', u'1.0') - self.enabled = kwargs.get('enabled', False) - self.include_apis = kwargs.get('include_apis') - self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - version=generated.version, - enabled=generated.enabled, - include_apis=generated.include_apis, - retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access - ) - - -class RetentionPolicy(GeneratedRetentionPolicy): - """The retention policy which determines how long the associated data should - persist. - - :param bool enabled: - Indicates whether a retention policy is enabled for the storage service. - The default value is False. - :param int days: - Indicates the number of days that metrics or logging or - soft-deleted data should be retained. All data older than this value will - be deleted. If enabled=True, the number of days must be specified. - """ - - def __init__(self, enabled=False, days=None): - super(RetentionPolicy, self).__init__(enabled=enabled, days=days, allow_permanent_delete=None) - if self.enabled and (self.days is None): - raise ValueError("If policy is enabled, 'days' must be specified.") - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - enabled=generated.enabled, - days=generated.days, - ) - - -class StaticWebsite(GeneratedStaticWebsite): - """The properties that enable an account to host a static website. - - :keyword bool enabled: - Indicates whether this account is hosting a static website. - The default value is `False`. - :keyword str index_document: - The default name of the index page under each directory. - :keyword str error_document404_path: - The absolute path of the custom 404 page. - :keyword str default_index_document_path: - Absolute path of the default index page. - """ - - def __init__(self, **kwargs): - self.enabled = kwargs.get('enabled', False) - if self.enabled: - self.index_document = kwargs.get('index_document') - self.error_document404_path = kwargs.get('error_document404_path') - self.default_index_document_path = kwargs.get('default_index_document_path') - else: - self.index_document = None - self.error_document404_path = None - self.default_index_document_path = None - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - enabled=generated.enabled, - index_document=generated.index_document, - error_document404_path=generated.error_document404_path, - default_index_document_path=generated.default_index_document_path - ) - - -class CorsRule(GeneratedCorsRule): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. - - :param list(str) allowed_origins: - A list of origin domains that will be allowed via CORS, or "*" to allow - all domains. The list of must contain at least one entry. Limited to 64 - origin domains. Each allowed origin can have up to 256 characters. - :param list(str) allowed_methods: - A list of HTTP methods that are allowed to be executed by the origin. - The list of must contain at least one entry. For Azure Storage, - permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. - :keyword list(str) allowed_headers: - Defaults to an empty list. A list of headers allowed to be part of - the cross-origin request. Limited to 64 defined headers and 2 prefixed - headers. Each header can be up to 256 characters. - :keyword list(str) exposed_headers: - Defaults to an empty list. A list of response headers to expose to CORS - clients. Limited to 64 defined headers and two prefixed headers. Each - header can be up to 256 characters. - :keyword int max_age_in_seconds: - The number of seconds that the client/browser should cache a - preflight response. - """ - - def __init__(self, allowed_origins, allowed_methods, **kwargs): - self.allowed_origins = ','.join(allowed_origins) - self.allowed_methods = ','.join(allowed_methods) - self.allowed_headers = ','.join(kwargs.get('allowed_headers', [])) - self.exposed_headers = ','.join(kwargs.get('exposed_headers', [])) - self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0) - - @classmethod - def _from_generated(cls, generated): - return cls( - [generated.allowed_origins], - [generated.allowed_methods], - allowed_headers=[generated.allowed_headers], - exposed_headers=[generated.exposed_headers], - max_age_in_seconds=generated.max_age_in_seconds, - ) - - -class ContainerProperties(DictMixin): - """Blob container's properties class. - - Returned ``ContainerProperties`` instances expose these values through a - dictionary interface, for example: ``container_props["last_modified"]``. - Additionally, the container name is available as ``container_props["name"]``. - - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the container was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar ~azure.storage.blob.LeaseProperties lease: - Stores all the lease information for the container. - :ivar str public_access: Specifies whether data in the container may be accessed - publicly and the level of access. - :ivar bool has_immutability_policy: - Represents whether the container has an immutability policy. - :ivar bool has_legal_hold: - Represents whether the container has a legal hold. - :ivar bool immutable_storage_with_versioning_enabled: - Represents whether immutable storage with versioning enabled on the container. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :ivar dict metadata: A dict with name-value pairs to associate with the - container as metadata. - :ivar ~azure.storage.blob.ContainerEncryptionScope encryption_scope: - The default encryption scope configuration for the container. - :ivar bool deleted: - Whether this container was deleted. - :ivar str version: - The version of a deleted container. - """ - - def __init__(self, **kwargs): - self.name = None - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.lease = LeaseProperties(**kwargs) - self.public_access = kwargs.get('x-ms-blob-public-access') - self.has_immutability_policy = kwargs.get('x-ms-has-immutability-policy') - self.deleted = None - self.version = None - self.has_legal_hold = kwargs.get('x-ms-has-legal-hold') - self.metadata = kwargs.get('metadata') - self.encryption_scope = None - self.immutable_storage_with_versioning_enabled = kwargs.get('x-ms-immutable-storage-with-versioning-enabled') - default_encryption_scope = kwargs.get('x-ms-default-encryption-scope') - if default_encryption_scope: - self.encryption_scope = ContainerEncryptionScope( - default_encryption_scope=default_encryption_scope, - prevent_encryption_scope_override=kwargs.get('x-ms-deny-encryption-scope-override', False) - ) - - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.last_modified = generated.properties.last_modified - props.etag = generated.properties.etag - props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access - props.public_access = generated.properties.public_access - props.has_immutability_policy = generated.properties.has_immutability_policy - props.immutable_storage_with_versioning_enabled = \ - generated.properties.is_immutable_storage_with_versioning_enabled - props.deleted = generated.deleted - props.version = generated.version - props.has_legal_hold = generated.properties.has_legal_hold - props.metadata = generated.metadata - props.encryption_scope = ContainerEncryptionScope._from_generated(generated) #pylint: disable=protected-access - return props - - -class ContainerPropertiesPaged(PageIterator): - """An Iterable of Container properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A container name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.ContainerProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only containers whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of container names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(ContainerPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [self._build_item(item) for item in self._response.container_items] - - return self._response.next_marker or None, self.current_page - - @staticmethod - def _build_item(item): - return ContainerProperties._from_generated(item) # pylint: disable=protected-access - - -class ImmutabilityPolicy(DictMixin): - """Optional parameters for setting the immutability policy of a blob, blob snapshot or blob version. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword ~datetime.datetime expiry_time: - Specifies the date time when the blobs immutability policy is set to expire. - :keyword str or ~azure.storage.blob.BlobImmutabilityPolicyMode policy_mode: - Specifies the immutability policy mode to set on the blob. - Possible values to set include: "Locked", "Unlocked". - "Mutable" can only be returned by service, don't set to "Mutable". - """ - - def __init__(self, **kwargs): - self.expiry_time = kwargs.pop('expiry_time', None) - self.policy_mode = kwargs.pop('policy_mode', None) - - @classmethod - def _from_generated(cls, generated): - immutability_policy = cls() - immutability_policy.expiry_time = generated.properties.immutability_policy_expires_on - immutability_policy.policy_mode = generated.properties.immutability_policy_mode - return immutability_policy - - -class BlobProperties(DictMixin): - """ - Blob Properties. - - :ivar str name: - The name of the blob. - :ivar str container: - The container in which the blob resides. - :ivar str snapshot: - Datetime value that uniquely identifies the blob snapshot. - :ivar ~azure.blob.storage.BlobType blob_type: - String indicating this blob's type. - :ivar dict metadata: - Name-value pairs associated with the blob as metadata. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the blob was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar int size: - The size of the content returned. If the entire blob was requested, - the length of blob in bytes. If a subset of the blob was requested, the - length of the returned subset. - :ivar str content_range: - Indicates the range of bytes returned in the event that the client - requested a subset of the blob. - :ivar int append_blob_committed_block_count: - (For Append Blobs) Number of committed blocks in the blob. - :ivar bool is_append_blob_sealed: - Indicate if the append blob is sealed or not. - - .. versionadded:: 12.4.0 - - :ivar int page_blob_sequence_number: - (For Page Blobs) Sequence number for page blob used for coordinating - concurrent writes. - :ivar bool server_encrypted: - Set to true if the blob is encrypted on the server. - :ivar ~azure.storage.blob.CopyProperties copy: - Stores all the copy properties for the blob. - :ivar ~azure.storage.blob.ContentSettings content_settings: - Stores all the content settings for the blob. - :ivar ~azure.storage.blob.LeaseProperties lease: - Stores all the lease information for the blob. - :ivar ~azure.storage.blob.StandardBlobTier blob_tier: - Indicates the access tier of the blob. The hot tier is optimized - for storing data that is accessed frequently. The cool storage tier - is optimized for storing data that is infrequently accessed and stored - for at least a month. The archive tier is optimized for storing - data that is rarely accessed and stored for at least six months - with flexible latency requirements. - :ivar str rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :ivar ~datetime.datetime blob_tier_change_time: - Indicates when the access tier was last changed. - :ivar bool blob_tier_inferred: - Indicates whether the access tier was inferred by the service. - If false, it indicates that the tier was set explicitly. - :ivar bool deleted: - Whether this blob was deleted. - :ivar ~datetime.datetime deleted_time: - A datetime object representing the time at which the blob was deleted. - :ivar int remaining_retention_days: - The number of days that the blob will be retained before being permanently deleted by the service. - :ivar ~datetime.datetime creation_time: - Indicates when the blob was created, in UTC. - :ivar str archive_status: - Archive status of blob. - :ivar str encryption_key_sha256: - The SHA-256 hash of the provided encryption key. - :ivar str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - :ivar bool request_server_encrypted: - Whether this blob is encrypted. - :ivar list(~azure.storage.blob.ObjectReplicationPolicy) object_replication_source_properties: - Only present for blobs that have policy ids and rule ids applied to them. - - .. versionadded:: 12.4.0 - - :ivar str object_replication_destination_policy: - Represents the Object Replication Policy Id that created this blob. - - .. versionadded:: 12.4.0 - - :ivar ~datetime.datetime last_accessed_on: - Indicates when the last Read/Write operation was performed on a Blob. - - .. versionadded:: 12.6.0 - - :ivar int tag_count: - Tags count on this blob. - - .. versionadded:: 12.4.0 - - :ivar dict(str, str) tags: - Key value pair of tags on this blob. - - .. versionadded:: 12.4.0 - :ivar bool has_versions_only: - A true value indicates the root blob is deleted - - .. versionadded:: 12.10.0 - - :ivar ~azure.storage.blob.ImmutabilityPolicy immutability_policy: - Specifies the immutability policy of a blob, blob snapshot or blob version. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :ivar bool has_legal_hold: - Specified if a legal hold should be set on the blob. - Currently this parameter of upload_blob() API is for BlockBlob only. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - """ - - def __init__(self, **kwargs): - self.name = kwargs.get('name') - self.container = None - self.snapshot = kwargs.get('x-ms-snapshot') - self.version_id = kwargs.get('x-ms-version-id') - self.is_current_version = kwargs.get('x-ms-is-current-version') - self.blob_type = BlobType(kwargs['x-ms-blob-type']) if kwargs.get('x-ms-blob-type') else None - self.metadata = kwargs.get('metadata') - self.encrypted_metadata = kwargs.get('encrypted_metadata') - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.size = kwargs.get('Content-Length') - self.content_range = kwargs.get('Content-Range') - self.append_blob_committed_block_count = kwargs.get('x-ms-blob-committed-block-count') - self.is_append_blob_sealed = kwargs.get('x-ms-blob-sealed') - self.page_blob_sequence_number = kwargs.get('x-ms-blob-sequence-number') - self.server_encrypted = kwargs.get('x-ms-server-encrypted') - self.copy = CopyProperties(**kwargs) - self.content_settings = ContentSettings(**kwargs) - self.lease = LeaseProperties(**kwargs) - self.blob_tier = kwargs.get('x-ms-access-tier') - self.rehydrate_priority = kwargs.get('x-ms-rehydrate-priority') - self.blob_tier_change_time = kwargs.get('x-ms-access-tier-change-time') - self.blob_tier_inferred = kwargs.get('x-ms-access-tier-inferred') - self.deleted = False - self.deleted_time = None - self.remaining_retention_days = None - self.creation_time = kwargs.get('x-ms-creation-time') - self.archive_status = kwargs.get('x-ms-archive-status') - self.encryption_key_sha256 = kwargs.get('x-ms-encryption-key-sha256') - self.encryption_scope = kwargs.get('x-ms-encryption-scope') - self.request_server_encrypted = kwargs.get('x-ms-server-encrypted') - self.object_replication_source_properties = kwargs.get('object_replication_source_properties') - self.object_replication_destination_policy = kwargs.get('x-ms-or-policy-id') - self.last_accessed_on = kwargs.get('x-ms-last-access-time') - self.tag_count = kwargs.get('x-ms-tag-count') - self.tags = None - self.immutability_policy = ImmutabilityPolicy(expiry_time=kwargs.get('x-ms-immutability-policy-until-date'), - policy_mode=kwargs.get('x-ms-immutability-policy-mode')) - self.has_legal_hold = kwargs.get('x-ms-legal-hold') - self.has_versions_only = None - - -class FilteredBlob(DictMixin): - """Blob info from a Filter Blobs API call. - - :ivar name: Blob name - :type name: str - :ivar container_name: Container name. - :type container_name: str - :ivar tags: Key value pairs of blob tags. - :type tags: Dict[str, str] - """ - def __init__(self, **kwargs): - self.name = kwargs.get('name', None) - self.container_name = kwargs.get('container_name', None) - self.tags = kwargs.get('tags', None) - - -class LeaseProperties(DictMixin): - """Blob Lease Properties. - - :ivar str status: - The lease status of the blob. Possible values: locked|unlocked - :ivar str state: - Lease state of the blob. Possible values: available|leased|expired|breaking|broken - :ivar str duration: - When a blob is leased, specifies whether the lease is of infinite or fixed duration. - """ - - def __init__(self, **kwargs): - self.status = get_enum_value(kwargs.get('x-ms-lease-status')) - self.state = get_enum_value(kwargs.get('x-ms-lease-state')) - self.duration = get_enum_value(kwargs.get('x-ms-lease-duration')) - - @classmethod - def _from_generated(cls, generated): - lease = cls() - lease.status = get_enum_value(generated.properties.lease_status) - lease.state = get_enum_value(generated.properties.lease_state) - lease.duration = get_enum_value(generated.properties.lease_duration) - return lease - - -class ContentSettings(DictMixin): - """The content settings of a blob. - - :param str content_type: - The content type specified for the blob. If no content type was - specified, the default content type is application/octet-stream. - :param str content_encoding: - If the content_encoding has previously been set - for the blob, that value is stored. - :param str content_language: - If the content_language has previously been set - for the blob, that value is stored. - :param str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the blob, that value is stored. - :param str cache_control: - If the cache_control has previously been set for - the blob, that value is stored. - :param bytearray content_md5: - If the content_md5 has been set for the blob, this response - header is stored so that the client can check for message content - integrity. - """ - - def __init__( - self, content_type=None, content_encoding=None, - content_language=None, content_disposition=None, - cache_control=None, content_md5=None, **kwargs): - - self.content_type = content_type or kwargs.get('Content-Type') - self.content_encoding = content_encoding or kwargs.get('Content-Encoding') - self.content_language = content_language or kwargs.get('Content-Language') - self.content_md5 = content_md5 or kwargs.get('Content-MD5') - self.content_disposition = content_disposition or kwargs.get('Content-Disposition') - self.cache_control = cache_control or kwargs.get('Cache-Control') - - @classmethod - def _from_generated(cls, generated): - settings = cls() - settings.content_type = generated.properties.content_type or None - settings.content_encoding = generated.properties.content_encoding or None - settings.content_language = generated.properties.content_language or None - settings.content_md5 = generated.properties.content_md5 or None - settings.content_disposition = generated.properties.content_disposition or None - settings.cache_control = generated.properties.cache_control or None - return settings - - -class CopyProperties(DictMixin): - """Blob Copy Properties. - - These properties will be `None` if this blob has never been the destination - in a Copy Blob operation, or if this blob has been modified after a concluded - Copy Blob operation, for example, using Set Blob Properties, Upload Blob, or Commit Block List. - - :ivar str id: - String identifier for the last attempted Copy Blob operation where this blob - was the destination blob. - :ivar str source: - URL up to 2 KB in length that specifies the source blob used in the last attempted - Copy Blob operation where this blob was the destination blob. - :ivar str status: - State of the copy operation identified by Copy ID, with these values: - success: - Copy completed successfully. - pending: - Copy is in progress. Check copy_status_description if intermittent, - non-fatal errors impede copy progress but don't cause failure. - aborted: - Copy was ended by Abort Copy Blob. - failed: - Copy failed. See copy_status_description for failure details. - :ivar str progress: - Contains the number of bytes copied and the total bytes in the source in the last - attempted Copy Blob operation where this blob was the destination blob. Can show - between 0 and Content-Length bytes copied. - :ivar ~datetime.datetime completion_time: - Conclusion time of the last attempted Copy Blob operation where this blob was the - destination blob. This value can specify the time of a completed, aborted, or - failed copy attempt. - :ivar str status_description: - Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal - or non-fatal copy operation failure. - :ivar bool incremental_copy: - Copies the snapshot of the source page blob to a destination page blob. - The snapshot is copied such that only the differential changes between - the previously copied snapshot are transferred to the destination - :ivar ~datetime.datetime destination_snapshot: - Included if the blob is incremental copy blob or incremental copy snapshot, - if x-ms-copy-status is success. Snapshot time of the last successful - incremental copy snapshot for this blob. - """ - - def __init__(self, **kwargs): - self.id = kwargs.get('x-ms-copy-id') - self.source = kwargs.get('x-ms-copy-source') - self.status = get_enum_value(kwargs.get('x-ms-copy-status')) - self.progress = kwargs.get('x-ms-copy-progress') - self.completion_time = kwargs.get('x-ms-copy-completion_time') - self.status_description = kwargs.get('x-ms-copy-status-description') - self.incremental_copy = kwargs.get('x-ms-incremental-copy') - self.destination_snapshot = kwargs.get('x-ms-copy-destination-snapshot') - - @classmethod - def _from_generated(cls, generated): - copy = cls() - copy.id = generated.properties.copy_id or None - copy.status = get_enum_value(generated.properties.copy_status) or None - copy.source = generated.properties.copy_source or None - copy.progress = generated.properties.copy_progress or None - copy.completion_time = generated.properties.copy_completion_time or None - copy.status_description = generated.properties.copy_status_description or None - copy.incremental_copy = generated.properties.incremental_copy or None - copy.destination_snapshot = generated.properties.destination_snapshot or None - return copy - - -class BlobBlock(DictMixin): - """BlockBlob Block class. - - :param str block_id: - Block id. - :param str state: - Block state. Possible values: committed|uncommitted - :ivar int size: - Block size in bytes. - """ - - def __init__(self, block_id, state=BlockState.Latest): - self.id = block_id - self.state = state - self.size = None - - @classmethod - def _from_generated(cls, generated): - try: - decoded_bytes = decode_base64_to_bytes(generated.name) - block_id = decoded_bytes.decode('utf-8') - # this is to fix a bug. When large blocks are uploaded through upload_blob the block id isn't base64 encoded - # while service expected block id is base64 encoded, so when we get block_id if we cannot base64 decode, it - # means we didn't base64 encode it when stage the block, we want to use the returned block_id directly. - except UnicodeDecodeError: - block_id = generated.name - block = cls(block_id) - block.size = generated.size - return block - - -class PageRange(DictMixin): - """Page Range for page blob. - - :param int start: - Start of page range in bytes. - :param int end: - End of page range in bytes. - """ - - def __init__(self, start=None, end=None): - self.start = start - self.end = end - - -class AccessPolicy(GenAccessPolicy): - """Access Policy class used by the set and get access policy methods in each service. - - A stored access policy can specify the start time, expiry time, and - permissions for the Shared Access Signatures with which it's associated. - Depending on how you want to control access to your resource, you can - specify all of these parameters within the stored access policy, and omit - them from the URL for the Shared Access Signature. Doing so permits you to - modify the associated signature's behavior at any time, as well as to revoke - it. Or you can specify one or more of the access policy parameters within - the stored access policy, and the others on the URL. Finally, you can - specify all of the parameters on the URL. In this case, you can use the - stored access policy to revoke the signature, but not to modify its behavior. - - Together the Shared Access Signature and the stored access policy must - include all fields required to authenticate the signature. If any required - fields are missing, the request will fail. Likewise, if a field is specified - both in the Shared Access Signature URL and in the stored access policy, the - request will fail with status code 400 (Bad Request). - - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.blob.ContainerSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - """ - def __init__(self, permission=None, expiry=None, start=None): - self.start = start - self.expiry = expiry - self.permission = permission - - -class ContainerSasPermissions(object): - """ContainerSasPermissions class to be used with the - :func:`~azure.storage.blob.generate_container_sas` function and - for the AccessPolicies used with - :func:`~azure.storage.blob.ContainerClient.set_container_access_policy`. - - :param bool read: - Read the content, properties, metadata or block list of any blob in the - container. Use any blob in the container as the source of a copy operation. - :param bool write: - For any blob in the container, create or write content, properties, - metadata, or block list. Snapshot or lease the blob. Resize the blob - (page blob only). Use the blob as the destination of a copy operation - within the same account. Note: You cannot grant permissions to read or - write container properties or metadata, nor to lease a container, with - a container SAS. Use an account SAS instead. - :param bool delete: - Delete any blob in the container. Note: You cannot grant permissions to - delete a container with a container SAS. Use an account SAS instead. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool list: - List blobs in the container. - :param bool tag: - Set or get tags on the blobs in the container. - :keyword bool set_immutability_policy: - To enable operations related to set/delete immutability policy. - To get immutability policy, you just need read permission. - """ - def __init__(self, read=False, write=False, delete=False, - list=False, delete_previous_version=False, tag=False, **kwargs): # pylint: disable=redefined-builtin - self.read = read - self.write = write - self.delete = delete - self.list = list - self.delete_previous_version = delete_previous_version - self.tag = tag - self.set_immutability_policy = kwargs.pop('set_immutability_policy', False) - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('l' if self.list else '') + - ('t' if self.tag else '') + - ('i' if self.set_immutability_policy else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a ContainerSasPermissions from a string. - - To specify read, write, delete, or list permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, write, delete, - and list permissions. - :return: A ContainerSasPermissions object - :rtype: ~azure.storage.blob.ContainerSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_list = 'l' in permission - p_delete_previous_version = 'x' in permission - p_tag = 't' in permission - p_set_immutability_policy = 'i' in permission - parsed = cls(read=p_read, write=p_write, delete=p_delete, list=p_list, - delete_previous_version=p_delete_previous_version, tag=p_tag, - set_immutability_policy=p_set_immutability_policy) - - return parsed - - -class BlobSasPermissions(object): - """BlobSasPermissions class to be used with the - :func:`~azure.storage.blob.generate_blob_sas` function. - - :param bool read: - Read the content, properties, metadata and block list. Use the blob as - the source of a copy operation. - :param bool add: - Add a block to an append blob. - :param bool create: - Write a new blob, snapshot a blob, or copy a blob to a new blob. - :param bool write: - Create or write content, properties, metadata, or block list. Snapshot - or lease the blob. Resize the blob (page blob only). Use the blob as the - destination of a copy operation within the same account. - :param bool delete: - Delete the blob. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool tag: - Set or get tags on the blob. - :keyword bool set_immutability_policy: - To enable operations related to set/delete immutability policy. - To get immutability policy, you just need read permission. - """ - def __init__(self, read=False, add=False, create=False, write=False, - delete=False, delete_previous_version=False, tag=True, **kwargs): - self.read = read - self.add = add - self.create = create - self.write = write - self.delete = delete - self.delete_previous_version = delete_previous_version - self.tag = tag - self.set_immutability_policy = kwargs.pop('set_immutability_policy', False) - self._str = (('r' if self.read else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('t' if self.tag else '') + - ('i' if self.set_immutability_policy else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a BlobSasPermissions from a string. - - To specify read, add, create, write, or delete permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, add, create, - write, or delete permissions. - :return: A BlobSasPermissions object - :rtype: ~azure.storage.blob.BlobSasPermissions - """ - p_read = 'r' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_delete_previous_version = 'x' in permission - p_tag = 't' in permission - p_set_immutability_policy = 'i' in permission - - parsed = cls(read=p_read, add=p_add, create=p_create, write=p_write, delete=p_delete, - delete_previous_version=p_delete_previous_version, tag=p_tag, - set_immutability_policy=p_set_immutability_policy) - - return parsed - - -class CustomerProvidedEncryptionKey(object): - """ - All data in Azure Storage is encrypted at-rest using an account-level encryption key. - In versions 2018-06-17 and newer, you can manage the key used to encrypt blob contents - and application metadata per-blob by providing an AES-256 encryption key in requests to the storage service. - - When you use a customer-provided key, Azure Storage does not manage or persist your key. - When writing data to a blob, the provided key is used to encrypt your data before writing it to disk. - A SHA-256 hash of the encryption key is written alongside the blob contents, - and is used to verify that all subsequent operations against the blob use the same encryption key. - This hash cannot be used to retrieve the encryption key or decrypt the contents of the blob. - When reading a blob, the provided key is used to decrypt your data after reading it from disk. - In both cases, the provided encryption key is securely discarded - as soon as the encryption or decryption process completes. - - :param str key_value: - Base64-encoded AES-256 encryption key value. - :param str key_hash: - Base64-encoded SHA256 of the encryption key. - :ivar str algorithm: - Specifies the algorithm to use when encrypting data using the given key. Must be AES256. - """ - def __init__(self, key_value, key_hash): - self.key_value = key_value - self.key_hash = key_hash - self.algorithm = 'AES256' - - -class ContainerEncryptionScope(object): - """The default encryption scope configuration for a container. - - This scope is used implicitly for all future writes within the container, - but can be overridden per blob operation. - - .. versionadded:: 12.2.0 - - :param str default_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - :param bool prevent_encryption_scope_override: - If true, prevents any request from specifying a different encryption scope than the scope - set on the container. Default value is false. - """ - - def __init__(self, default_encryption_scope, **kwargs): - self.default_encryption_scope = default_encryption_scope - self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', False) - - @classmethod - def _from_generated(cls, generated): - if generated.properties.default_encryption_scope: - scope = cls( - generated.properties.default_encryption_scope, - prevent_encryption_scope_override=generated.properties.prevent_encryption_scope_override or False - ) - return scope - return None - - -class DelimitedJsonDialect(DictMixin): - """Defines the input or output JSON serialization for a blob data query. - - :keyword str delimiter: The line separator character, default value is '\n' - """ - - def __init__(self, **kwargs): - self.delimiter = kwargs.pop('delimiter', '\n') - - -class DelimitedTextDialect(DictMixin): - """Defines the input or output delimited (CSV) serialization for a blob query request. - - :keyword str delimiter: - Column separator, defaults to ','. - :keyword str quotechar: - Field quote, defaults to '"'. - :keyword str lineterminator: - Record separator, defaults to '\n'. - :keyword str escapechar: - Escape char, defaults to empty. - :keyword bool has_header: - Whether the blob data includes headers in the first line. The default value is False, meaning that the - data will be returned inclusive of the first line. If set to True, the data will be returned exclusive - of the first line. - """ - def __init__(self, **kwargs): - self.delimiter = kwargs.pop('delimiter', ',') - self.quotechar = kwargs.pop('quotechar', '"') - self.lineterminator = kwargs.pop('lineterminator', '\n') - self.escapechar = kwargs.pop('escapechar', "") - self.has_header = kwargs.pop('has_header', False) - - -class ArrowDialect(ArrowField): - """field of an arrow schema. - - All required parameters must be populated in order to send to Azure. - - :param ~azure.storage.blob.ArrowType type: Arrow field type. - :keyword str name: The name of the field. - :keyword int precision: The precision of the field. - :keyword int scale: The scale of the field. - """ - def __init__(self, type, **kwargs): # pylint: disable=redefined-builtin - super(ArrowDialect, self).__init__(type=type, **kwargs) - - -class ArrowType(str, Enum): - - INT64 = "int64" - BOOL = "bool" - TIMESTAMP_MS = "timestamp[ms]" - STRING = "string" - DOUBLE = "double" - DECIMAL = 'decimal' - - -class ObjectReplicationPolicy(DictMixin): - """Policy id and rule ids applied to a blob. - - :ivar str policy_id: - Policy id for the blob. A replication policy gets created (policy id) when creating a source/destination pair. - :ivar list(~azure.storage.blob.ObjectReplicationRule) rules: - Within each policy there may be multiple replication rules. - e.g. rule 1= src/container/.pdf to dst/container2/; rule2 = src/container1/.jpg to dst/container3 - """ - - def __init__(self, **kwargs): - self.policy_id = kwargs.pop('policy_id', None) - self.rules = kwargs.pop('rules', None) - - -class ObjectReplicationRule(DictMixin): - """Policy id and rule ids applied to a blob. - - :ivar str rule_id: - Rule id. - :ivar str status: - The status of the rule. It could be "Complete" or "Failed" - """ - - def __init__(self, **kwargs): - self.rule_id = kwargs.pop('rule_id', None) - self.status = kwargs.pop('status', None) - - -class BlobQueryError(object): - """The error happened during quick query operation. - - :ivar str error: - The name of the error. - :ivar bool is_fatal: - If true, this error prevents further query processing. More result data may be returned, - but there is no guarantee that all of the original data will be processed. - If false, this error does not prevent further query processing. - :ivar str description: - A description of the error. - :ivar int position: - The blob offset at which the error occurred. - """ - def __init__(self, error=None, is_fatal=False, description=None, position=None): - self.error = error - self.is_fatal = is_fatal - self.description = description - self.position = position diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_quick_query_helper.py b/azure/multiapi/storagev2/blob/v2020_10_02/_quick_query_helper.py deleted file mode 100644 index 3164337..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_quick_query_helper.py +++ /dev/null @@ -1,195 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from io import BytesIO -from typing import Union, Iterable, IO # pylint: disable=unused-import - -from ._shared.avro.datafile import DataFileReader -from ._shared.avro.avro_io import DatumReader - - -class BlobQueryReader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to read query results. - - :ivar str name: - The name of the blob being quered. - :ivar str container: - The name of the container where the blob is. - :ivar dict response_headers: - The response_headers of the quick query request. - :ivar bytes record_delimiter: - The delimiter used to separate lines, or records with the data. The `records` - method will return these lines via a generator. - """ - - def __init__( - self, - name=None, - container=None, - errors=None, - record_delimiter='\n', - encoding=None, - headers=None, - response=None, - error_cls=None, - ): - self.name = name - self.container = container - self.response_headers = headers - self.record_delimiter = record_delimiter - self._size = 0 - self._bytes_processed = 0 - self._errors = errors - self._encoding = encoding - self._parsed_results = DataFileReader(QuickQueryStreamer(response), DatumReader()) - self._first_result = self._process_record(next(self._parsed_results)) - self._error_cls = error_cls - - def __len__(self): - return self._size - - def _process_record(self, result): - self._size = result.get('totalBytes', self._size) - self._bytes_processed = result.get('bytesScanned', self._bytes_processed) - if 'data' in result: - return result.get('data') - if 'fatal' in result: - error = self._error_cls( - error=result['name'], - is_fatal=result['fatal'], - description=result['description'], - position=result['position'] - ) - if self._errors: - self._errors(error) - return None - - def _iter_stream(self): - if self._first_result is not None: - yield self._first_result - for next_result in self._parsed_results: - processed_result = self._process_record(next_result) - if processed_result is not None: - yield processed_result - - def readall(self): - # type: () -> Union[bytes, str] - """Return all query results. - - This operation is blocking until all data is downloaded. - If encoding has been configured - this will be used to decode individual - records are they are received. - - :rtype: Union[bytes, str] - """ - stream = BytesIO() - self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - def readinto(self, stream): - # type: (IO) -> None - """Download the query result to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. - :returns: None - """ - for record in self._iter_stream(): - stream.write(record) - - def records(self): - # type: () -> Iterable[Union[bytes, str]] - """Returns a record generator for the query result. - - Records will be returned line by line. - If encoding has been configured - this will be used to decode individual - records are they are received. - - :rtype: Iterable[Union[bytes, str]] - """ - delimiter = self.record_delimiter.encode('utf-8') - for record_chunk in self._iter_stream(): - for record in record_chunk.split(delimiter): - if self._encoding: - yield record.decode(self._encoding) - else: - yield record - - -class QuickQueryStreamer(object): - """ - File-like streaming iterator. - """ - - def __init__(self, generator): - self.generator = generator - self.iterator = iter(generator) - self._buf = b"" - self._point = 0 - self._download_offset = 0 - self._buf_start = 0 - self.file_length = None - - def __len__(self): - return self.file_length - - def __iter__(self): - return self.iterator - - @staticmethod - def seekable(): - return True - - def __next__(self): - next_part = next(self.iterator) - self._download_offset += len(next_part) - return next_part - - next = __next__ # Python 2 compatibility. - - def tell(self): - return self._point - - def seek(self, offset, whence=0): - if whence == 0: - self._point = offset - elif whence == 1: - self._point += offset - else: - raise ValueError("whence must be 0, or 1") - if self._point < 0: - self._point = 0 # XXX is this right? - - def read(self, size): - try: - # keep reading from the generator until the buffer of this stream has enough data to read - while self._point + size > self._download_offset: - self._buf += self.__next__() - except StopIteration: - self.file_length = self._download_offset - - start_point = self._point - - # EOF - self._point = min(self._point + size, self._download_offset) - - relative_start = start_point - self._buf_start - if relative_start < 0: - raise ValueError("Buffer has dumped too much data") - relative_end = relative_start + size - data = self._buf[relative_start: relative_end] - - # dump the extra data in buffer - # buffer start--------------------16bytes----current read position - dumped_size = max(relative_end - 16 - relative_start, 0) - self._buf_start += dumped_size - self._buf = self._buf[dumped_size:] - - return data diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_serialize.py b/azure/multiapi/storagev2/blob/v2020_10_02/_serialize.py deleted file mode 100644 index d44c5ad..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_serialize.py +++ /dev/null @@ -1,205 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use -try: - from urllib.parse import quote -except ImportError: - from urllib2 import quote # type: ignore - -from azure.core import MatchConditions - -from ._models import ( - ContainerEncryptionScope, - DelimitedJsonDialect) -from ._generated.models import ( - ModifiedAccessConditions, - SourceModifiedAccessConditions, - CpkScopeInfo, - ContainerCpkScopeInfo, - QueryFormat, - QuerySerialization, - DelimitedTextConfiguration, - JsonTextConfiguration, - ArrowConfiguration, - QueryFormatType, - BlobTag, - BlobTags, LeaseAccessConditions -) - - -_SUPPORTED_API_VERSIONS = [ - '2019-02-02', - '2019-07-07', - '2019-10-10', - '2019-12-12', - '2020-02-10', - '2020-04-08', - '2020-06-12', - '2020-08-04', - '2020-10-02' -] - - -def _get_match_headers(kwargs, match_param, etag_param): - # type: (Dict[str, Any], str, str) -> Tuple(Dict[str, Any], Optional[str], Optional[str]) - if_match = None - if_none_match = None - match_condition = kwargs.pop(match_param, None) - if match_condition == MatchConditions.IfNotModified: - if_match = kwargs.pop(etag_param, None) - if not if_match: - raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) - elif match_condition == MatchConditions.IfPresent: - if_match = '*' - elif match_condition == MatchConditions.IfModified: - if_none_match = kwargs.pop(etag_param, None) - if not if_none_match: - raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) - elif match_condition == MatchConditions.IfMissing: - if_none_match = '*' - elif match_condition is None: - if kwargs.get(etag_param): - raise ValueError("'{}' specified without '{}'.".format(etag_param, match_param)) - else: - raise TypeError("Invalid match condition: {}".format(match_condition)) - return if_match, if_none_match - - -def get_access_conditions(lease): - # type: (Optional[Union[BlobLeaseClient, str]]) -> Union[LeaseAccessConditions, None] - try: - lease_id = lease.id # type: ignore - except AttributeError: - lease_id = lease # type: ignore - return LeaseAccessConditions(lease_id=lease_id) if lease_id else None - - -def get_modify_conditions(kwargs): - # type: (Dict[str, Any]) -> ModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'match_condition', 'etag') - return ModifiedAccessConditions( - if_modified_since=kwargs.pop('if_modified_since', None), - if_unmodified_since=kwargs.pop('if_unmodified_since', None), - if_match=if_match or kwargs.pop('if_match', None), - if_none_match=if_none_match or kwargs.pop('if_none_match', None), - if_tags=kwargs.pop('if_tags_match_condition', None) - ) - - -def get_source_conditions(kwargs): - # type: (Dict[str, Any]) -> SourceModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') - return SourceModifiedAccessConditions( - source_if_modified_since=kwargs.pop('source_if_modified_since', None), - source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), - source_if_match=if_match or kwargs.pop('source_if_match', None), - source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None), - source_if_tags=kwargs.pop('source_if_tags_match_condition', None) - ) - - -def get_cpk_scope_info(kwargs): - # type: (Dict[str, Any]) -> CpkScopeInfo - if 'encryption_scope' in kwargs: - return CpkScopeInfo(encryption_scope=kwargs.pop('encryption_scope')) - return None - - -def get_container_cpk_scope_info(kwargs): - # type: (Dict[str, Any]) -> ContainerCpkScopeInfo - encryption_scope = kwargs.pop('container_encryption_scope', None) - if encryption_scope: - if isinstance(encryption_scope, ContainerEncryptionScope): - return ContainerCpkScopeInfo( - default_encryption_scope=encryption_scope.default_encryption_scope, - prevent_encryption_scope_override=encryption_scope.prevent_encryption_scope_override - ) - if isinstance(encryption_scope, dict): - return ContainerCpkScopeInfo( - default_encryption_scope=encryption_scope['default_encryption_scope'], - prevent_encryption_scope_override=encryption_scope.get('prevent_encryption_scope_override') - ) - raise TypeError("Container encryption scope must be dict or type ContainerEncryptionScope.") - return None - - -def get_api_version(kwargs): - # type: (Dict[str, Any], str) -> str - api_version = kwargs.get('api_version', None) - if api_version and api_version not in _SUPPORTED_API_VERSIONS: - versions = '\n'.join(_SUPPORTED_API_VERSIONS) - raise ValueError("Unsupported API version '{}'. Please select from:\n{}".format(api_version, versions)) - return api_version or _SUPPORTED_API_VERSIONS[-1] - - -def serialize_blob_tags_header(tags=None): - # type: (Optional[Dict[str, str]]) -> str - if tags is None: - return None - - components = list() - if tags: - for key, value in tags.items(): - components.append(quote(key, safe='.-')) - components.append('=') - components.append(quote(value, safe='.-')) - components.append('&') - - if components: - del components[-1] - - return ''.join(components) - - -def serialize_blob_tags(tags=None): - # type: (Optional[Dict[str, str]]) -> Union[BlobTags, None] - tag_list = list() - if tags: - tag_list = [BlobTag(key=k, value=v) for k, v in tags.items()] - return BlobTags(blob_tag_set=tag_list) - - -def serialize_query_format(formater): - if formater == "ParquetDialect": - qq_format = QueryFormat( - type=QueryFormatType.PARQUET, - parquet_text_configuration=' ' - ) - elif isinstance(formater, DelimitedJsonDialect): - serialization_settings = JsonTextConfiguration( - record_separator=formater.delimiter - ) - qq_format = QueryFormat( - type=QueryFormatType.json, - json_text_configuration=serialization_settings) - elif hasattr(formater, 'quotechar'): # This supports a csv.Dialect as well - try: - headers = formater.has_header - except AttributeError: - headers = False - serialization_settings = DelimitedTextConfiguration( - column_separator=formater.delimiter, - field_quote=formater.quotechar, - record_separator=formater.lineterminator, - escape_char=formater.escapechar, - headers_present=headers - ) - qq_format = QueryFormat( - type=QueryFormatType.delimited, - delimited_text_configuration=serialization_settings - ) - elif isinstance(formater, list): - serialization_settings = ArrowConfiguration( - schema=formater - ) - qq_format = QueryFormat( - type=QueryFormatType.arrow, - arrow_configuration=serialization_settings) - elif not formater: - return None - else: - raise TypeError("Format must be DelimitedTextDialect or DelimitedJsonDialect or ParquetDialect.") - return QuerySerialization(format=qq_format) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/__init__.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/__init__.py deleted file mode 100644 index 160f882..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import hmac - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore - -import six - - -def url_quote(url): - return quote(url) - - -def url_unquote(url): - return unquote(url) - - -def encode_base64(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def decode_base64_to_bytes(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - return base64.b64decode(data) - - -def decode_base64_to_text(data): - decoded_bytes = decode_base64_to_bytes(data) - return decoded_bytes.decode('utf-8') - - -def sign_string(key, string_to_sign, key_is_base64=True): - if key_is_base64: - key = decode_base64_to_bytes(key) - else: - if isinstance(key, six.text_type): - key = key.encode('utf-8') - if isinstance(string_to_sign, six.text_type): - string_to_sign = string_to_sign.encode('utf-8') - signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) - digest = signed_hmac_sha256.digest() - encoded_digest = encode_base64(digest) - return encoded_digest diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/authentication.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/authentication.py deleted file mode 100644 index d04c1e4..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/authentication.py +++ /dev/null @@ -1,142 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import logging -import sys - -try: - from urllib.parse import urlparse, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import unquote # type: ignore - -try: - from yarl import URL -except ImportError: - pass - -try: - from azure.core.pipeline.transport import AioHttpTransport -except ImportError: - AioHttpTransport = None - -from azure.core.exceptions import ClientAuthenticationError -from azure.core.pipeline.policies import SansIOHTTPPolicy - -from . import sign_string - - -logger = logging.getLogger(__name__) - - - -# wraps a given exception with the desired exception type -def _wrap_exception(ex, desired_type): - msg = "" - if ex.args: - msg = ex.args[0] - if sys.version_info >= (3,): - # Automatic chaining in Python 3 means we keep the trace - return desired_type(msg) - # There isn't a good solution in 2 for keeping the stack trace - # in general, or that will not result in an error in 3 - # However, we can keep the previous error type and message - # TODO: In the future we will log the trace - return desired_type('{}: {}'.format(ex.__class__.__name__, msg)) - - -class AzureSigningError(ClientAuthenticationError): - """ - Represents a fatal error when attempting to sign a request. - In general, the cause of this exception is user error. For example, the given account key is not valid. - Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. - """ - - -# pylint: disable=no-self-use -class SharedKeyCredentialPolicy(SansIOHTTPPolicy): - - def __init__(self, account_name, account_key): - self.account_name = account_name - self.account_key = account_key - super(SharedKeyCredentialPolicy, self).__init__() - - @staticmethod - def _get_headers(request, headers_to_sign): - headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) - if 'content-length' in headers and headers['content-length'] == '0': - del headers['content-length'] - return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' - - @staticmethod - def _get_verb(request): - return request.http_request.method + '\n' - - def _get_canonicalized_resource(self, request): - uri_path = urlparse(request.http_request.url).path - try: - if isinstance(request.context.transport, AioHttpTransport) or \ - isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \ - isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None), - AioHttpTransport): - uri_path = URL(uri_path) - return '/' + self.account_name + str(uri_path) - except TypeError: - pass - return '/' + self.account_name + uri_path - - @staticmethod - def _get_canonicalized_headers(request): - string_to_sign = '' - x_ms_headers = [] - for name, value in request.http_request.headers.items(): - if name.startswith('x-ms-'): - x_ms_headers.append((name.lower(), value)) - x_ms_headers.sort() - for name, value in x_ms_headers: - if value is not None: - string_to_sign += ''.join([name, ':', value, '\n']) - return string_to_sign - - @staticmethod - def _get_canonicalized_resource_query(request): - sorted_queries = list(request.http_request.query.items()) - sorted_queries.sort() - - string_to_sign = '' - for name, value in sorted_queries: - if value is not None: - string_to_sign += '\n' + name.lower() + ':' + unquote(value) - - return string_to_sign - - def _add_authorization_header(self, request, string_to_sign): - try: - signature = sign_string(self.account_key, string_to_sign) - auth_string = 'SharedKey ' + self.account_name + ':' + signature - request.http_request.headers['Authorization'] = auth_string - except Exception as ex: - # Wrap any error that occurred as signing error - # Doing so will clarify/locate the source of problem - raise _wrap_exception(ex, AzureSigningError) - - def on_request(self, request): - string_to_sign = \ - self._get_verb(request) + \ - self._get_headers( - request, - [ - 'content-encoding', 'content-language', 'content-length', - 'content-md5', 'content-type', 'date', 'if-modified-since', - 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' - ] - ) + \ - self._get_canonicalized_headers(request) + \ - self._get_canonicalized_resource(request) + \ - self._get_canonicalized_resource_query(request) - - self._add_authorization_header(request, string_to_sign) - #logger.debug("String_to_sign=%s", string_to_sign) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/__init__.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/__init__.py deleted file mode 100644 index 5b396cd..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/avro_io.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/avro_io.py deleted file mode 100644 index 93a5c13..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/avro_io.py +++ /dev/null @@ -1,464 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -"""Input/output utilities. - -Includes: - - i/o-specific constants - - i/o-specific exceptions - - schema validation - - leaf value encoding and decoding - - datum reader/writer stuff (?) - -Also includes a generic representation for data, which uses the -following mapping: - - Schema records are implemented as dict. - - Schema arrays are implemented as list. - - Schema maps are implemented as dict. - - Schema strings are implemented as unicode. - - Schema bytes are implemented as str. - - Schema ints are implemented as int. - - Schema longs are implemented as long. - - Schema floats are implemented as float. - - Schema doubles are implemented as float. - - Schema booleans are implemented as bool. -""" - -import json -import logging -import struct -import sys - -from ..avro import schema - -PY3 = sys.version_info[0] == 3 - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Constants - -STRUCT_FLOAT = struct.Struct('= 0), n - input_bytes = self.reader.read(n) - if n > 0 and not input_bytes: - raise StopIteration - assert (len(input_bytes) == n), input_bytes - return input_bytes - - @staticmethod - def read_null(): - """ - null is written as zero bytes - """ - return None - - def read_boolean(self): - """ - a boolean is written as a single byte - whose value is either 0 (false) or 1 (true). - """ - b = ord(self.read(1)) - if b == 1: - return True - if b == 0: - return False - fail_msg = "Invalid value for boolean: %s" % b - raise schema.AvroException(fail_msg) - - def read_int(self): - """ - int and long values are written using variable-length, zig-zag coding. - """ - return self.read_long() - - def read_long(self): - """ - int and long values are written using variable-length, zig-zag coding. - """ - b = ord(self.read(1)) - n = b & 0x7F - shift = 7 - while (b & 0x80) != 0: - b = ord(self.read(1)) - n |= (b & 0x7F) << shift - shift += 7 - datum = (n >> 1) ^ -(n & 1) - return datum - - def read_float(self): - """ - A float is written as 4 bytes. - The float is converted into a 32-bit integer using a method equivalent to - Java's floatToIntBits and then encoded in little-endian format. - """ - return STRUCT_FLOAT.unpack(self.read(4))[0] - - def read_double(self): - """ - A double is written as 8 bytes. - The double is converted into a 64-bit integer using a method equivalent to - Java's doubleToLongBits and then encoded in little-endian format. - """ - return STRUCT_DOUBLE.unpack(self.read(8))[0] - - def read_bytes(self): - """ - Bytes are encoded as a long followed by that many bytes of data. - """ - nbytes = self.read_long() - assert (nbytes >= 0), nbytes - return self.read(nbytes) - - def read_utf8(self): - """ - A string is encoded as a long followed by - that many bytes of UTF-8 encoded character data. - """ - input_bytes = self.read_bytes() - if PY3: - try: - return input_bytes.decode('utf-8') - except UnicodeDecodeError as exn: - logger.error('Invalid UTF-8 input bytes: %r', input_bytes) - raise exn - else: - # PY2 - return unicode(input_bytes, "utf-8") # pylint: disable=undefined-variable - - def skip_null(self): - pass - - def skip_boolean(self): - self.skip(1) - - def skip_int(self): - self.skip_long() - - def skip_long(self): - b = ord(self.read(1)) - while (b & 0x80) != 0: - b = ord(self.read(1)) - - def skip_float(self): - self.skip(4) - - def skip_double(self): - self.skip(8) - - def skip_bytes(self): - self.skip(self.read_long()) - - def skip_utf8(self): - self.skip_bytes() - - def skip(self, n): - self.reader.seek(self.reader.tell() + n) - - -# ------------------------------------------------------------------------------ -# DatumReader - - -class DatumReader(object): - """Deserialize Avro-encoded data into a Python data structure.""" - - def __init__(self, writer_schema=None): - """ - As defined in the Avro specification, we call the schema encoded - in the data the "writer's schema". - """ - self._writer_schema = writer_schema - - # read/write properties - def set_writer_schema(self, writer_schema): - self._writer_schema = writer_schema - - writer_schema = property(lambda self: self._writer_schema, - set_writer_schema) - - def read(self, decoder): - return self.read_data(self.writer_schema, decoder) - - def read_data(self, writer_schema, decoder): - # function dispatch for reading data based on type of writer's schema - if writer_schema.type == 'null': - result = decoder.read_null() - elif writer_schema.type == 'boolean': - result = decoder.read_boolean() - elif writer_schema.type == 'string': - result = decoder.read_utf8() - elif writer_schema.type == 'int': - result = decoder.read_int() - elif writer_schema.type == 'long': - result = decoder.read_long() - elif writer_schema.type == 'float': - result = decoder.read_float() - elif writer_schema.type == 'double': - result = decoder.read_double() - elif writer_schema.type == 'bytes': - result = decoder.read_bytes() - elif writer_schema.type == 'fixed': - result = self.read_fixed(writer_schema, decoder) - elif writer_schema.type == 'enum': - result = self.read_enum(writer_schema, decoder) - elif writer_schema.type == 'array': - result = self.read_array(writer_schema, decoder) - elif writer_schema.type == 'map': - result = self.read_map(writer_schema, decoder) - elif writer_schema.type in ['union', 'error_union']: - result = self.read_union(writer_schema, decoder) - elif writer_schema.type in ['record', 'error', 'request']: - result = self.read_record(writer_schema, decoder) - else: - fail_msg = "Cannot read unknown schema type: %s" % writer_schema.type - raise schema.AvroException(fail_msg) - return result - - def skip_data(self, writer_schema, decoder): - if writer_schema.type == 'null': - result = decoder.skip_null() - elif writer_schema.type == 'boolean': - result = decoder.skip_boolean() - elif writer_schema.type == 'string': - result = decoder.skip_utf8() - elif writer_schema.type == 'int': - result = decoder.skip_int() - elif writer_schema.type == 'long': - result = decoder.skip_long() - elif writer_schema.type == 'float': - result = decoder.skip_float() - elif writer_schema.type == 'double': - result = decoder.skip_double() - elif writer_schema.type == 'bytes': - result = decoder.skip_bytes() - elif writer_schema.type == 'fixed': - result = self.skip_fixed(writer_schema, decoder) - elif writer_schema.type == 'enum': - result = self.skip_enum(decoder) - elif writer_schema.type == 'array': - self.skip_array(writer_schema, decoder) - result = None - elif writer_schema.type == 'map': - self.skip_map(writer_schema, decoder) - result = None - elif writer_schema.type in ['union', 'error_union']: - result = self.skip_union(writer_schema, decoder) - elif writer_schema.type in ['record', 'error', 'request']: - self.skip_record(writer_schema, decoder) - result = None - else: - fail_msg = "Unknown schema type: %s" % writer_schema.type - raise schema.AvroException(fail_msg) - return result - - @staticmethod - def read_fixed(writer_schema, decoder): - """ - Fixed instances are encoded using the number of bytes declared - in the schema. - """ - return decoder.read(writer_schema.size) - - @staticmethod - def skip_fixed(writer_schema, decoder): - return decoder.skip(writer_schema.size) - - @staticmethod - def read_enum(writer_schema, decoder): - """ - An enum is encoded by a int, representing the zero-based position - of the symbol in the schema. - """ - # read data - index_of_symbol = decoder.read_int() - if index_of_symbol >= len(writer_schema.symbols): - fail_msg = "Can't access enum index %d for enum with %d symbols" \ - % (index_of_symbol, len(writer_schema.symbols)) - raise SchemaResolutionException(fail_msg, writer_schema) - read_symbol = writer_schema.symbols[index_of_symbol] - return read_symbol - - @staticmethod - def skip_enum(decoder): - return decoder.skip_int() - - def read_array(self, writer_schema, decoder): - """ - Arrays are encoded as a series of blocks. - - Each block consists of a long count value, - followed by that many array items. - A block with count zero indicates the end of the array. - Each item is encoded per the array's item schema. - - If a block's count is negative, - then the count is followed immediately by a long block size, - indicating the number of bytes in the block. - The actual count in this case - is the absolute value of the count written. - """ - read_items = [] - block_count = decoder.read_long() - while block_count != 0: - if block_count < 0: - block_count = -block_count - decoder.read_long() - for _ in range(block_count): - read_items.append(self.read_data(writer_schema.items, decoder)) - block_count = decoder.read_long() - return read_items - - def skip_array(self, writer_schema, decoder): - block_count = decoder.read_long() - while block_count != 0: - if block_count < 0: - block_size = decoder.read_long() - decoder.skip(block_size) - else: - for _ in range(block_count): - self.skip_data(writer_schema.items, decoder) - block_count = decoder.read_long() - - def read_map(self, writer_schema, decoder): - """ - Maps are encoded as a series of blocks. - - Each block consists of a long count value, - followed by that many key/value pairs. - A block with count zero indicates the end of the map. - Each item is encoded per the map's value schema. - - If a block's count is negative, - then the count is followed immediately by a long block size, - indicating the number of bytes in the block. - The actual count in this case - is the absolute value of the count written. - """ - read_items = {} - block_count = decoder.read_long() - while block_count != 0: - if block_count < 0: - block_count = -block_count - decoder.read_long() - for _ in range(block_count): - key = decoder.read_utf8() - read_items[key] = self.read_data(writer_schema.values, decoder) - block_count = decoder.read_long() - return read_items - - def skip_map(self, writer_schema, decoder): - block_count = decoder.read_long() - while block_count != 0: - if block_count < 0: - block_size = decoder.read_long() - decoder.skip(block_size) - else: - for _ in range(block_count): - decoder.skip_utf8() - self.skip_data(writer_schema.values, decoder) - block_count = decoder.read_long() - - def read_union(self, writer_schema, decoder): - """ - A union is encoded by first writing a long value indicating - the zero-based position within the union of the schema of its value. - The value is then encoded per the indicated schema within the union. - """ - # schema resolution - index_of_schema = int(decoder.read_long()) - if index_of_schema >= len(writer_schema.schemas): - fail_msg = "Can't access branch index %d for union with %d branches" \ - % (index_of_schema, len(writer_schema.schemas)) - raise SchemaResolutionException(fail_msg, writer_schema) - selected_writer_schema = writer_schema.schemas[index_of_schema] - - # read data - return self.read_data(selected_writer_schema, decoder) - - def skip_union(self, writer_schema, decoder): - index_of_schema = int(decoder.read_long()) - if index_of_schema >= len(writer_schema.schemas): - fail_msg = "Can't access branch index %d for union with %d branches" \ - % (index_of_schema, len(writer_schema.schemas)) - raise SchemaResolutionException(fail_msg, writer_schema) - return self.skip_data(writer_schema.schemas[index_of_schema], decoder) - - def read_record(self, writer_schema, decoder): - """ - A record is encoded by encoding the values of its fields - in the order that they are declared. In other words, a record - is encoded as just the concatenation of the encodings of its fields. - Field values are encoded per their schema. - - Schema Resolution: - * the ordering of fields may be different: fields are matched by name. - * schemas for fields with the same name in both records are resolved - recursively. - * if the writer's record contains a field with a name not present in the - reader's record, the writer's value for that field is ignored. - * if the reader's record schema has a field that contains a default value, - and writer's schema does not have a field with the same name, then the - reader should use the default value from its field. - * if the reader's record schema has a field with no default value, and - writer's schema does not have a field with the same name, then the - field's value is unset. - """ - # schema resolution - read_record = {} - for field in writer_schema.fields: - field_val = self.read_data(field.type, decoder) - read_record[field.name] = field_val - return read_record - - def skip_record(self, writer_schema, decoder): - for field in writer_schema.fields: - self.skip_data(field.type, decoder) - - -# ------------------------------------------------------------------------------ - -if __name__ == '__main__': - raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/avro_io_async.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/avro_io_async.py deleted file mode 100644 index e981216..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/avro_io_async.py +++ /dev/null @@ -1,448 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -"""Input/output utilities. - -Includes: - - i/o-specific constants - - i/o-specific exceptions - - schema validation - - leaf value encoding and decoding - - datum reader/writer stuff (?) - -Also includes a generic representation for data, which uses the -following mapping: - - Schema records are implemented as dict. - - Schema arrays are implemented as list. - - Schema maps are implemented as dict. - - Schema strings are implemented as unicode. - - Schema bytes are implemented as str. - - Schema ints are implemented as int. - - Schema longs are implemented as long. - - Schema floats are implemented as float. - - Schema doubles are implemented as float. - - Schema booleans are implemented as bool. -""" - -import logging -import sys - -from ..avro import schema - -from .avro_io import STRUCT_FLOAT, STRUCT_DOUBLE, SchemaResolutionException - -PY3 = sys.version_info[0] == 3 - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Decoder - - -class AsyncBinaryDecoder(object): - """Read leaf values.""" - - def __init__(self, reader): - """ - reader is a Python object on which we can call read, seek, and tell. - """ - self._reader = reader - - @property - def reader(self): - """Reports the reader used by this decoder.""" - return self._reader - - async def read(self, n): - """Read n bytes. - - Args: - n: Number of bytes to read. - Returns: - The next n bytes from the input. - """ - assert (n >= 0), n - input_bytes = await self.reader.read(n) - if n > 0 and not input_bytes: - raise StopAsyncIteration - assert (len(input_bytes) == n), input_bytes - return input_bytes - - @staticmethod - def read_null(): - """ - null is written as zero bytes - """ - return None - - async def read_boolean(self): - """ - a boolean is written as a single byte - whose value is either 0 (false) or 1 (true). - """ - b = ord(await self.read(1)) - if b == 1: - return True - if b == 0: - return False - fail_msg = "Invalid value for boolean: %s" % b - raise schema.AvroException(fail_msg) - - async def read_int(self): - """ - int and long values are written using variable-length, zig-zag coding. - """ - return await self.read_long() - - async def read_long(self): - """ - int and long values are written using variable-length, zig-zag coding. - """ - b = ord(await self.read(1)) - n = b & 0x7F - shift = 7 - while (b & 0x80) != 0: - b = ord(await self.read(1)) - n |= (b & 0x7F) << shift - shift += 7 - datum = (n >> 1) ^ -(n & 1) - return datum - - async def read_float(self): - """ - A float is written as 4 bytes. - The float is converted into a 32-bit integer using a method equivalent to - Java's floatToIntBits and then encoded in little-endian format. - """ - return STRUCT_FLOAT.unpack(await self.read(4))[0] - - async def read_double(self): - """ - A double is written as 8 bytes. - The double is converted into a 64-bit integer using a method equivalent to - Java's doubleToLongBits and then encoded in little-endian format. - """ - return STRUCT_DOUBLE.unpack(await self.read(8))[0] - - async def read_bytes(self): - """ - Bytes are encoded as a long followed by that many bytes of data. - """ - nbytes = await self.read_long() - assert (nbytes >= 0), nbytes - return await self.read(nbytes) - - async def read_utf8(self): - """ - A string is encoded as a long followed by - that many bytes of UTF-8 encoded character data. - """ - input_bytes = await self.read_bytes() - if PY3: - try: - return input_bytes.decode('utf-8') - except UnicodeDecodeError as exn: - logger.error('Invalid UTF-8 input bytes: %r', input_bytes) - raise exn - else: - # PY2 - return unicode(input_bytes, "utf-8") # pylint: disable=undefined-variable - - def skip_null(self): - pass - - async def skip_boolean(self): - await self.skip(1) - - async def skip_int(self): - await self.skip_long() - - async def skip_long(self): - b = ord(await self.read(1)) - while (b & 0x80) != 0: - b = ord(await self.read(1)) - - async def skip_float(self): - await self.skip(4) - - async def skip_double(self): - await self.skip(8) - - async def skip_bytes(self): - await self.skip(await self.read_long()) - - async def skip_utf8(self): - await self.skip_bytes() - - async def skip(self, n): - await self.reader.seek(await self.reader.tell() + n) - - -# ------------------------------------------------------------------------------ -# DatumReader - - -class AsyncDatumReader(object): - """Deserialize Avro-encoded data into a Python data structure.""" - - def __init__(self, writer_schema=None): - """ - As defined in the Avro specification, we call the schema encoded - in the data the "writer's schema", and the schema expected by the - reader the "reader's schema". - """ - self._writer_schema = writer_schema - - # read/write properties - def set_writer_schema(self, writer_schema): - self._writer_schema = writer_schema - - writer_schema = property(lambda self: self._writer_schema, - set_writer_schema) - - async def read(self, decoder): - return await self.read_data(self.writer_schema, decoder) - - async def read_data(self, writer_schema, decoder): - # function dispatch for reading data based on type of writer's schema - if writer_schema.type == 'null': - result = decoder.read_null() - elif writer_schema.type == 'boolean': - result = await decoder.read_boolean() - elif writer_schema.type == 'string': - result = await decoder.read_utf8() - elif writer_schema.type == 'int': - result = await decoder.read_int() - elif writer_schema.type == 'long': - result = await decoder.read_long() - elif writer_schema.type == 'float': - result = await decoder.read_float() - elif writer_schema.type == 'double': - result = await decoder.read_double() - elif writer_schema.type == 'bytes': - result = await decoder.read_bytes() - elif writer_schema.type == 'fixed': - result = await self.read_fixed(writer_schema, decoder) - elif writer_schema.type == 'enum': - result = await self.read_enum(writer_schema, decoder) - elif writer_schema.type == 'array': - result = await self.read_array(writer_schema, decoder) - elif writer_schema.type == 'map': - result = await self.read_map(writer_schema, decoder) - elif writer_schema.type in ['union', 'error_union']: - result = await self.read_union(writer_schema, decoder) - elif writer_schema.type in ['record', 'error', 'request']: - result = await self.read_record(writer_schema, decoder) - else: - fail_msg = "Cannot read unknown schema type: %s" % writer_schema.type - raise schema.AvroException(fail_msg) - return result - - async def skip_data(self, writer_schema, decoder): - if writer_schema.type == 'null': - result = decoder.skip_null() - elif writer_schema.type == 'boolean': - result = await decoder.skip_boolean() - elif writer_schema.type == 'string': - result = await decoder.skip_utf8() - elif writer_schema.type == 'int': - result = await decoder.skip_int() - elif writer_schema.type == 'long': - result = await decoder.skip_long() - elif writer_schema.type == 'float': - result = await decoder.skip_float() - elif writer_schema.type == 'double': - result = await decoder.skip_double() - elif writer_schema.type == 'bytes': - result = await decoder.skip_bytes() - elif writer_schema.type == 'fixed': - result = await self.skip_fixed(writer_schema, decoder) - elif writer_schema.type == 'enum': - result = await self.skip_enum(decoder) - elif writer_schema.type == 'array': - await self.skip_array(writer_schema, decoder) - result = None - elif writer_schema.type == 'map': - await self.skip_map(writer_schema, decoder) - result = None - elif writer_schema.type in ['union', 'error_union']: - result = await self.skip_union(writer_schema, decoder) - elif writer_schema.type in ['record', 'error', 'request']: - await self.skip_record(writer_schema, decoder) - result = None - else: - fail_msg = "Unknown schema type: %s" % writer_schema.type - raise schema.AvroException(fail_msg) - return result - - @staticmethod - async def read_fixed(writer_schema, decoder): - """ - Fixed instances are encoded using the number of bytes declared - in the schema. - """ - return await decoder.read(writer_schema.size) - - @staticmethod - async def skip_fixed(writer_schema, decoder): - return await decoder.skip(writer_schema.size) - - @staticmethod - async def read_enum(writer_schema, decoder): - """ - An enum is encoded by a int, representing the zero-based position - of the symbol in the schema. - """ - # read data - index_of_symbol = await decoder.read_int() - if index_of_symbol >= len(writer_schema.symbols): - fail_msg = "Can't access enum index %d for enum with %d symbols" \ - % (index_of_symbol, len(writer_schema.symbols)) - raise SchemaResolutionException(fail_msg, writer_schema) - read_symbol = writer_schema.symbols[index_of_symbol] - return read_symbol - - @staticmethod - async def skip_enum(decoder): - return await decoder.skip_int() - - async def read_array(self, writer_schema, decoder): - """ - Arrays are encoded as a series of blocks. - - Each block consists of a long count value, - followed by that many array items. - A block with count zero indicates the end of the array. - Each item is encoded per the array's item schema. - - If a block's count is negative, - then the count is followed immediately by a long block size, - indicating the number of bytes in the block. - The actual count in this case - is the absolute value of the count written. - """ - read_items = [] - block_count = await decoder.read_long() - while block_count != 0: - if block_count < 0: - block_count = -block_count - await decoder.read_long() - for _ in range(block_count): - read_items.append(await self.read_data(writer_schema.items, decoder)) - block_count = await decoder.read_long() - return read_items - - async def skip_array(self, writer_schema, decoder): - block_count = await decoder.read_long() - while block_count != 0: - if block_count < 0: - block_size = await decoder.read_long() - await decoder.skip(block_size) - else: - for _ in range(block_count): - await self.skip_data(writer_schema.items, decoder) - block_count = await decoder.read_long() - - async def read_map(self, writer_schema, decoder): - """ - Maps are encoded as a series of blocks. - - Each block consists of a long count value, - followed by that many key/value pairs. - A block with count zero indicates the end of the map. - Each item is encoded per the map's value schema. - - If a block's count is negative, - then the count is followed immediately by a long block size, - indicating the number of bytes in the block. - The actual count in this case - is the absolute value of the count written. - """ - read_items = {} - block_count = await decoder.read_long() - while block_count != 0: - if block_count < 0: - block_count = -block_count - await decoder.read_long() - for _ in range(block_count): - key = await decoder.read_utf8() - read_items[key] = await self.read_data(writer_schema.values, decoder) - block_count = await decoder.read_long() - return read_items - - async def skip_map(self, writer_schema, decoder): - block_count = await decoder.read_long() - while block_count != 0: - if block_count < 0: - block_size = await decoder.read_long() - await decoder.skip(block_size) - else: - for _ in range(block_count): - await decoder.skip_utf8() - await self.skip_data(writer_schema.values, decoder) - block_count = await decoder.read_long() - - async def read_union(self, writer_schema, decoder): - """ - A union is encoded by first writing a long value indicating - the zero-based position within the union of the schema of its value. - The value is then encoded per the indicated schema within the union. - """ - # schema resolution - index_of_schema = int(await decoder.read_long()) - if index_of_schema >= len(writer_schema.schemas): - fail_msg = "Can't access branch index %d for union with %d branches" \ - % (index_of_schema, len(writer_schema.schemas)) - raise SchemaResolutionException(fail_msg, writer_schema) - selected_writer_schema = writer_schema.schemas[index_of_schema] - - # read data - return await self.read_data(selected_writer_schema, decoder) - - async def skip_union(self, writer_schema, decoder): - index_of_schema = int(await decoder.read_long()) - if index_of_schema >= len(writer_schema.schemas): - fail_msg = "Can't access branch index %d for union with %d branches" \ - % (index_of_schema, len(writer_schema.schemas)) - raise SchemaResolutionException(fail_msg, writer_schema) - return await self.skip_data(writer_schema.schemas[index_of_schema], decoder) - - async def read_record(self, writer_schema, decoder): - """ - A record is encoded by encoding the values of its fields - in the order that they are declared. In other words, a record - is encoded as just the concatenation of the encodings of its fields. - Field values are encoded per their schema. - - Schema Resolution: - * the ordering of fields may be different: fields are matched by name. - * schemas for fields with the same name in both records are resolved - recursively. - * if the writer's record contains a field with a name not present in the - reader's record, the writer's value for that field is ignored. - * if the reader's record schema has a field that contains a default value, - and writer's schema does not have a field with the same name, then the - reader should use the default value from its field. - * if the reader's record schema has a field with no default value, and - writer's schema does not have a field with the same name, then the - field's value is unset. - """ - # schema resolution - read_record = {} - for field in writer_schema.fields: - field_val = await self.read_data(field.type, decoder) - read_record[field.name] = field_val - return read_record - - async def skip_record(self, writer_schema, decoder): - for field in writer_schema.fields: - await self.skip_data(field.type, decoder) - - -# ------------------------------------------------------------------------------ - -if __name__ == '__main__': - raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/datafile.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/datafile.py deleted file mode 100644 index df06fe0..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/datafile.py +++ /dev/null @@ -1,266 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -"""Read/Write Avro File Object Containers.""" - -import io -import logging -import sys -import zlib - -from ..avro import avro_io -from ..avro import schema - -PY3 = sys.version_info[0] == 3 - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Constants - -# Version of the container file: -VERSION = 1 - -if PY3: - MAGIC = b'Obj' + bytes([VERSION]) - MAGIC_SIZE = len(MAGIC) -else: - MAGIC = 'Obj' + chr(VERSION) - MAGIC_SIZE = len(MAGIC) - -# Size of the synchronization marker, in number of bytes: -SYNC_SIZE = 16 - -# Schema of the container header: -META_SCHEMA = schema.parse(""" -{ - "type": "record", "name": "org.apache.avro.file.Header", - "fields": [{ - "name": "magic", - "type": {"type": "fixed", "name": "magic", "size": %(magic_size)d} - }, { - "name": "meta", - "type": {"type": "map", "values": "bytes"} - }, { - "name": "sync", - "type": {"type": "fixed", "name": "sync", "size": %(sync_size)d} - }] -} -""" % { - 'magic_size': MAGIC_SIZE, - 'sync_size': SYNC_SIZE, -}) - -# Codecs supported by container files: -VALID_CODECS = frozenset(['null', 'deflate']) - -# Metadata key associated to the schema: -SCHEMA_KEY = "avro.schema" - - -# ------------------------------------------------------------------------------ -# Exceptions - - -class DataFileException(schema.AvroException): - """Problem reading or writing file object containers.""" - -# ------------------------------------------------------------------------------ - - -class DataFileReader(object): # pylint: disable=too-many-instance-attributes - """Read files written by DataFileWriter.""" - - def __init__(self, reader, datum_reader, **kwargs): - """Initializes a new data file reader. - - Args: - reader: Open file to read from. - datum_reader: Avro datum reader. - """ - self._reader = reader - self._raw_decoder = avro_io.BinaryDecoder(reader) - self._header_reader = kwargs.pop('header_reader', None) - self._header_decoder = None if self._header_reader is None else avro_io.BinaryDecoder(self._header_reader) - self._datum_decoder = None # Maybe reset at every block. - self._datum_reader = datum_reader - - # In case self._reader only has partial content(without header). - # seek(0, 0) to make sure read the (partial)content from beginning. - self._reader.seek(0, 0) - - # read the header: magic, meta, sync - self._read_header() - - # ensure codec is valid - avro_codec_raw = self.get_meta('avro.codec') - if avro_codec_raw is None: - self.codec = "null" - else: - self.codec = avro_codec_raw.decode('utf-8') - if self.codec not in VALID_CODECS: - raise DataFileException('Unknown codec: %s.' % self.codec) - - # get ready to read - self._block_count = 0 - - # object_position is to support reading from current position in the future read, - # no need to downloading from the beginning of avro. - if hasattr(self._reader, 'object_position'): - self.reader.track_object_position() - - self._cur_object_index = 0 - # header_reader indicates reader only has partial content. The reader doesn't have block header, - # so we read use the block count stored last time. - # Also ChangeFeed only has codec==null, so use _raw_decoder is good. - if self._header_reader is not None: - self._datum_decoder = self._raw_decoder - - self.datum_reader.writer_schema = ( - schema.parse(self.get_meta(SCHEMA_KEY).decode('utf-8'))) - - def __enter__(self): - return self - - def __exit__(self, data_type, value, traceback): - # Perform a close if there's no exception - if data_type is None: - self.close() - - def __iter__(self): - return self - - # read-only properties - @property - def reader(self): - return self._reader - - @property - def raw_decoder(self): - return self._raw_decoder - - @property - def datum_decoder(self): - return self._datum_decoder - - @property - def datum_reader(self): - return self._datum_reader - - @property - def sync_marker(self): - return self._sync_marker - - @property - def meta(self): - return self._meta - - # read/write properties - @property - def block_count(self): - return self._block_count - - def get_meta(self, key): - """Reports the value of a given metadata key. - - Args: - key: Metadata key (string) to report the value of. - Returns: - Value associated to the metadata key, as bytes. - """ - return self._meta.get(key) - - def _read_header(self): - header_reader = self._header_reader if self._header_reader else self._reader - header_decoder = self._header_decoder if self._header_decoder else self._raw_decoder - - # seek to the beginning of the file to get magic block - header_reader.seek(0, 0) - - # read header into a dict - header = self.datum_reader.read_data(META_SCHEMA, header_decoder) - - # check magic number - if header.get('magic') != MAGIC: - fail_msg = "Not an Avro data file: %s doesn't match %s." \ - % (header.get('magic'), MAGIC) - raise schema.AvroException(fail_msg) - - # set metadata - self._meta = header['meta'] - - # set sync marker - self._sync_marker = header['sync'] - - def _read_block_header(self): - self._block_count = self.raw_decoder.read_long() - if self.codec == "null": - # Skip a long; we don't need to use the length. - self.raw_decoder.skip_long() - self._datum_decoder = self._raw_decoder - elif self.codec == 'deflate': - # Compressed data is stored as (length, data), which - # corresponds to how the "bytes" type is encoded. - data = self.raw_decoder.read_bytes() - # -15 is the log of the window size; negative indicates - # "raw" (no zlib headers) decompression. See zlib.h. - uncompressed = zlib.decompress(data, -15) - self._datum_decoder = avro_io.BinaryDecoder(io.BytesIO(uncompressed)) - else: - raise DataFileException("Unknown codec: %r" % self.codec) - - def _skip_sync(self): - """ - Read the length of the sync marker; if it matches the sync marker, - return True. Otherwise, seek back to where we started and return False. - """ - proposed_sync_marker = self.reader.read(SYNC_SIZE) - if SYNC_SIZE > 0 and not proposed_sync_marker: - raise StopIteration - if proposed_sync_marker != self.sync_marker: - self.reader.seek(-SYNC_SIZE, 1) - - def __next__(self): - """Return the next datum in the file.""" - if self.block_count == 0: - self._skip_sync() - - # object_position is to support reading from current position in the future read, - # no need to downloading from the beginning of avro file with this attr. - if hasattr(self._reader, 'object_position'): - self.reader.track_object_position() - self._cur_object_index = 0 - - self._read_block_header() - - datum = self.datum_reader.read(self.datum_decoder) - self._block_count -= 1 - self._cur_object_index += 1 - - # object_position is to support reading from current position in the future read, - # This will track the index of the next item to be read. - # This will also track the offset before the next sync marker. - if hasattr(self._reader, 'object_position'): - if self.block_count == 0: - # the next event to be read is at index 0 in the new chunk of blocks, - self.reader.track_object_position() - self.reader.set_object_index(0) - else: - self.reader.set_object_index(self._cur_object_index) - - return datum - - # PY2 - def next(self): - return self.__next__() - - def close(self): - """Close this reader.""" - self.reader.close() - - -if __name__ == '__main__': - raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/datafile_async.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/datafile_async.py deleted file mode 100644 index 1e9d018..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/datafile_async.py +++ /dev/null @@ -1,215 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -"""Read/Write Avro File Object Containers.""" - -import logging -import sys - -from ..avro import avro_io_async -from ..avro import schema -from .datafile import DataFileException -from .datafile import MAGIC, SYNC_SIZE, META_SCHEMA, SCHEMA_KEY - - -PY3 = sys.version_info[0] == 3 - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Constants - -# Codecs supported by container files: -VALID_CODECS = frozenset(['null']) - - -class AsyncDataFileReader(object): # pylint: disable=too-many-instance-attributes - """Read files written by DataFileWriter.""" - - def __init__(self, reader, datum_reader, **kwargs): - """Initializes a new data file reader. - - Args: - reader: Open file to read from. - datum_reader: Avro datum reader. - """ - self._reader = reader - self._raw_decoder = avro_io_async.AsyncBinaryDecoder(reader) - self._header_reader = kwargs.pop('header_reader', None) - self._header_decoder = None if self._header_reader is None else \ - avro_io_async.AsyncBinaryDecoder(self._header_reader) - self._datum_decoder = None # Maybe reset at every block. - self._datum_reader = datum_reader - self.codec = "null" - self._block_count = 0 - self._cur_object_index = 0 - self._meta = None - self._sync_marker = None - - async def init(self): - # In case self._reader only has partial content(without header). - # seek(0, 0) to make sure read the (partial)content from beginning. - await self._reader.seek(0, 0) - - # read the header: magic, meta, sync - await self._read_header() - - # ensure codec is valid - avro_codec_raw = self.get_meta('avro.codec') - if avro_codec_raw is None: - self.codec = "null" - else: - self.codec = avro_codec_raw.decode('utf-8') - if self.codec not in VALID_CODECS: - raise DataFileException('Unknown codec: %s.' % self.codec) - - # get ready to read - self._block_count = 0 - - # object_position is to support reading from current position in the future read, - # no need to downloading from the beginning of avro. - if hasattr(self._reader, 'object_position'): - self.reader.track_object_position() - - # header_reader indicates reader only has partial content. The reader doesn't have block header, - # so we read use the block count stored last time. - # Also ChangeFeed only has codec==null, so use _raw_decoder is good. - if self._header_reader is not None: - self._datum_decoder = self._raw_decoder - self.datum_reader.writer_schema = ( - schema.parse(self.get_meta(SCHEMA_KEY).decode('utf-8'))) - return self - - async def __aenter__(self): - return self - - async def __aexit__(self, data_type, value, traceback): - # Perform a close if there's no exception - if data_type is None: - self.close() - - def __aiter__(self): - return self - - # read-only properties - @property - def reader(self): - return self._reader - - @property - def raw_decoder(self): - return self._raw_decoder - - @property - def datum_decoder(self): - return self._datum_decoder - - @property - def datum_reader(self): - return self._datum_reader - - @property - def sync_marker(self): - return self._sync_marker - - @property - def meta(self): - return self._meta - - # read/write properties - @property - def block_count(self): - return self._block_count - - def get_meta(self, key): - """Reports the value of a given metadata key. - - Args: - key: Metadata key (string) to report the value of. - Returns: - Value associated to the metadata key, as bytes. - """ - return self._meta.get(key) - - async def _read_header(self): - header_reader = self._header_reader if self._header_reader else self._reader - header_decoder = self._header_decoder if self._header_decoder else self._raw_decoder - - # seek to the beginning of the file to get magic block - await header_reader.seek(0, 0) - - # read header into a dict - header = await self.datum_reader.read_data(META_SCHEMA, header_decoder) - - # check magic number - if header.get('magic') != MAGIC: - fail_msg = "Not an Avro data file: %s doesn't match %s." \ - % (header.get('magic'), MAGIC) - raise schema.AvroException(fail_msg) - - # set metadata - self._meta = header['meta'] - - # set sync marker - self._sync_marker = header['sync'] - - async def _read_block_header(self): - self._block_count = await self.raw_decoder.read_long() - if self.codec == "null": - # Skip a long; we don't need to use the length. - await self.raw_decoder.skip_long() - self._datum_decoder = self._raw_decoder - else: - raise DataFileException("Unknown codec: %r" % self.codec) - - async def _skip_sync(self): - """ - Read the length of the sync marker; if it matches the sync marker, - return True. Otherwise, seek back to where we started and return False. - """ - proposed_sync_marker = await self.reader.read(SYNC_SIZE) - if SYNC_SIZE > 0 and not proposed_sync_marker: - raise StopAsyncIteration - if proposed_sync_marker != self.sync_marker: - await self.reader.seek(-SYNC_SIZE, 1) - - async def __anext__(self): - """Return the next datum in the file.""" - if self.block_count == 0: - await self._skip_sync() - - # object_position is to support reading from current position in the future read, - # no need to downloading from the beginning of avro file with this attr. - if hasattr(self._reader, 'object_position'): - await self.reader.track_object_position() - self._cur_object_index = 0 - - await self._read_block_header() - - datum = await self.datum_reader.read(self.datum_decoder) - self._block_count -= 1 - self._cur_object_index += 1 - - # object_position is to support reading from current position in the future read, - # This will track the index of the next item to be read. - # This will also track the offset before the next sync marker. - if hasattr(self._reader, 'object_position'): - if self.block_count == 0: - # the next event to be read is at index 0 in the new chunk of blocks, - await self.reader.track_object_position() - await self.reader.set_object_index(0) - else: - await self.reader.set_object_index(self._cur_object_index) - - return datum - - def close(self): - """Close this reader.""" - self.reader.close() - - -if __name__ == '__main__': - raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/schema.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/schema.py deleted file mode 100644 index ffe2853..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/schema.py +++ /dev/null @@ -1,1221 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines - -"""Representation of Avro schemas. - -A schema may be one of: - - A record, mapping field names to field value data; - - An error, equivalent to a record; - - An enum, containing one of a small set of symbols; - - An array of values, all of the same schema; - - A map containing string/value pairs, each of a declared schema; - - A union of other schemas; - - A fixed sized binary object; - - A unicode string; - - A sequence of bytes; - - A 32-bit signed int; - - A 64-bit signed long; - - A 32-bit floating-point float; - - A 64-bit floating-point double; - - A boolean; - - Null. -""" - -import abc -import json -import logging -import re -import sys -from six import with_metaclass - -PY2 = sys.version_info[0] == 2 - -if PY2: - _str = unicode # pylint: disable=undefined-variable -else: - _str = str - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Constants - -# Log level more verbose than DEBUG=10, INFO=20, etc. -DEBUG_VERBOSE = 5 - -NULL = 'null' -BOOLEAN = 'boolean' -STRING = 'string' -BYTES = 'bytes' -INT = 'int' -LONG = 'long' -FLOAT = 'float' -DOUBLE = 'double' -FIXED = 'fixed' -ENUM = 'enum' -RECORD = 'record' -ERROR = 'error' -ARRAY = 'array' -MAP = 'map' -UNION = 'union' - -# Request and error unions are part of Avro protocols: -REQUEST = 'request' -ERROR_UNION = 'error_union' - -PRIMITIVE_TYPES = frozenset([ - NULL, - BOOLEAN, - STRING, - BYTES, - INT, - LONG, - FLOAT, - DOUBLE, -]) - -NAMED_TYPES = frozenset([ - FIXED, - ENUM, - RECORD, - ERROR, -]) - -VALID_TYPES = frozenset.union( - PRIMITIVE_TYPES, - NAMED_TYPES, - [ - ARRAY, - MAP, - UNION, - REQUEST, - ERROR_UNION, - ], -) - -SCHEMA_RESERVED_PROPS = frozenset([ - 'type', - 'name', - 'namespace', - 'fields', # Record - 'items', # Array - 'size', # Fixed - 'symbols', # Enum - 'values', # Map - 'doc', -]) - -FIELD_RESERVED_PROPS = frozenset([ - 'default', - 'name', - 'doc', - 'order', - 'type', -]) - -VALID_FIELD_SORT_ORDERS = frozenset([ - 'ascending', - 'descending', - 'ignore', -]) - - -# ------------------------------------------------------------------------------ -# Exceptions - - -class Error(Exception): - """Base class for errors in this module.""" - - -class AvroException(Error): - """Generic Avro schema error.""" - - -class SchemaParseException(AvroException): - """Error while parsing a JSON schema descriptor.""" - - -class Schema(with_metaclass(abc.ABCMeta, object)): - """Abstract base class for all Schema classes.""" - - def __init__(self, data_type, other_props=None): - """Initializes a new schema object. - - Args: - data_type: Type of the schema to initialize. - other_props: Optional dictionary of additional properties. - """ - if data_type not in VALID_TYPES: - raise SchemaParseException('%r is not a valid Avro type.' % data_type) - - # All properties of this schema, as a map: property name -> property value - self._props = {} - - self._props['type'] = data_type - self._type = data_type - - if other_props: - self._props.update(other_props) - - @property - def namespace(self): - """Returns: the namespace this schema belongs to, if any, or None.""" - return self._props.get('namespace', None) - - @property - def type(self): - """Returns: the type of this schema.""" - return self._type - - @property - def doc(self): - """Returns: the documentation associated to this schema, if any, or None.""" - return self._props.get('doc', None) - - @property - def props(self): - """Reports all the properties of this schema. - - Includes all properties, reserved and non reserved. - JSON properties of this schema are directly generated from this dict. - - Returns: - A dictionary of properties associated to this schema. - """ - return self._props - - @property - def other_props(self): - """Returns: the dictionary of non-reserved properties.""" - return dict(filter_keys_out(items=self._props, keys=SCHEMA_RESERVED_PROPS)) - - def __str__(self): - """Returns: the JSON representation of this schema.""" - return json.dumps(self.to_json(names=None)) - - @abc.abstractmethod - def to_json(self, names): - """Converts the schema object into its AVRO specification representation. - - Schema types that have names (records, enums, and fixed) must - be aware of not re-defining schemas that are already listed - in the parameter names. - """ - raise Exception('Cannot run abstract method.') - - -# ------------------------------------------------------------------------------ - - -_RE_NAME = re.compile(r'[A-Za-z_][A-Za-z0-9_]*') - -_RE_FULL_NAME = re.compile( - r'^' - r'[.]?(?:[A-Za-z_][A-Za-z0-9_]*[.])*' # optional namespace - r'([A-Za-z_][A-Za-z0-9_]*)' # name - r'$' -) - - -class Name(object): - """Representation of an Avro name.""" - - def __init__(self, name, namespace=None): - """Parses an Avro name. - - Args: - name: Avro name to parse (relative or absolute). - namespace: Optional explicit namespace if the name is relative. - """ - # Normalize: namespace is always defined as a string, possibly empty. - if namespace is None: - namespace = '' - - if '.' in name: - # name is absolute, namespace is ignored: - self._fullname = name - - match = _RE_FULL_NAME.match(self._fullname) - if match is None: - raise SchemaParseException( - 'Invalid absolute schema name: %r.' % self._fullname) - - self._name = match.group(1) - self._namespace = self._fullname[:-(len(self._name) + 1)] - - else: - # name is relative, combine with explicit namespace: - self._name = name - self._namespace = namespace - self._fullname = (self._name - if (not self._namespace) else - '%s.%s' % (self._namespace, self._name)) - - # Validate the fullname: - if _RE_FULL_NAME.match(self._fullname) is None: - raise SchemaParseException( - 'Invalid schema name %r infered from name %r and namespace %r.' - % (self._fullname, self._name, self._namespace)) - - def __eq__(self, other): - if not isinstance(other, Name): - return NotImplemented - return self.fullname == other.fullname - - @property - def simple_name(self): - """Returns: the simple name part of this name.""" - return self._name - - @property - def namespace(self): - """Returns: this name's namespace, possible the empty string.""" - return self._namespace - - @property - def fullname(self): - """Returns: the full name.""" - return self._fullname - - -# ------------------------------------------------------------------------------ - - -class Names(object): - """Tracks Avro named schemas and default namespace during parsing.""" - - def __init__(self, default_namespace=None, names=None): - """Initializes a new name tracker. - - Args: - default_namespace: Optional default namespace. - names: Optional initial mapping of known named schemas. - """ - if names is None: - names = {} - self._names = names - self._default_namespace = default_namespace - - @property - def names(self): - """Returns: the mapping of known named schemas.""" - return self._names - - @property - def default_namespace(self): - """Returns: the default namespace, if any, or None.""" - return self._default_namespace - - def new_with_default_namespace(self, namespace): - """Creates a new name tracker from this tracker, but with a new default ns. - - Args: - namespace: New default namespace to use. - Returns: - New name tracker with the specified default namespace. - """ - return Names(names=self._names, default_namespace=namespace) - - def get_name(self, name, namespace=None): - """Resolves the Avro name according to this name tracker's state. - - Args: - name: Name to resolve (absolute or relative). - namespace: Optional explicit namespace. - Returns: - The specified name, resolved according to this tracker. - """ - if namespace is None: - namespace = self._default_namespace - return Name(name=name, namespace=namespace) - - def get_schema(self, name, namespace=None): - """Resolves an Avro schema by name. - - Args: - name: Name (relative or absolute) of the Avro schema to look up. - namespace: Optional explicit namespace. - Returns: - The schema with the specified name, if any, or None. - """ - avro_name = self.get_name(name=name, namespace=namespace) - return self._names.get(avro_name.fullname, None) - - def prune_namespace(self, properties): - """given a properties, return properties with namespace removed if - it matches the own default namespace - """ - if self.default_namespace is None: - # I have no default -- no change - return properties - if 'namespace' not in properties: - # he has no namespace - no change - return properties - if properties['namespace'] != self.default_namespace: - # we're different - leave his stuff alone - return properties - # we each have a namespace and it's redundant. delete his. - prunable = properties.copy() - del prunable['namespace'] - return prunable - - def register(self, schema): - """Registers a new named schema in this tracker. - - Args: - schema: Named Avro schema to register in this tracker. - """ - if schema.fullname in VALID_TYPES: - raise SchemaParseException( - '%s is a reserved type name.' % schema.fullname) - if schema.fullname in self.names: - raise SchemaParseException( - 'Avro name %r already exists.' % schema.fullname) - - logger.log(DEBUG_VERBOSE, 'Register new name for %r', schema.fullname) - self._names[schema.fullname] = schema - - -# ------------------------------------------------------------------------------ - - -class NamedSchema(Schema): - """Abstract base class for named schemas. - - Named schemas are enumerated in NAMED_TYPES. - """ - - def __init__( - self, - data_type, - name=None, - namespace=None, - names=None, - other_props=None, - ): - """Initializes a new named schema object. - - Args: - data_type: Type of the named schema. - name: Name (absolute or relative) of the schema. - namespace: Optional explicit namespace if name is relative. - names: Tracker to resolve and register Avro names. - other_props: Optional map of additional properties of the schema. - """ - assert (data_type in NAMED_TYPES), ('Invalid named type: %r' % data_type) - self._avro_name = names.get_name(name=name, namespace=namespace) - - super(NamedSchema, self).__init__(data_type, other_props) - - names.register(self) - - self._props['name'] = self.name - if self.namespace: - self._props['namespace'] = self.namespace - - @property - def avro_name(self): - """Returns: the Name object describing this schema's name.""" - return self._avro_name - - @property - def name(self): - return self._avro_name.simple_name - - @property - def namespace(self): - return self._avro_name.namespace - - @property - def fullname(self): - return self._avro_name.fullname - - def name_ref(self, names): - """Reports this schema name relative to the specified name tracker. - - Args: - names: Avro name tracker to relativise this schema name against. - Returns: - This schema name, relativised against the specified name tracker. - """ - if self.namespace == names.default_namespace: - return self.name - return self.fullname - - @abc.abstractmethod - def to_json(self, names): - """Converts the schema object into its AVRO specification representation. - - Schema types that have names (records, enums, and fixed) must - be aware of not re-defining schemas that are already listed - in the parameter names. - """ - raise Exception('Cannot run abstract method.') - -# ------------------------------------------------------------------------------ - - -_NO_DEFAULT = object() - - -class Field(object): - """Representation of the schema of a field in a record.""" - - def __init__( - self, - data_type, - name, - index, - has_default, - default=_NO_DEFAULT, - order=None, - doc=None, - other_props=None - ): - """Initializes a new Field object. - - Args: - data_type: Avro schema of the field. - name: Name of the field. - index: 0-based position of the field. - has_default: - default: - order: - doc: - other_props: - """ - if (not isinstance(name, _str)) or (not name): - raise SchemaParseException('Invalid record field name: %r.' % name) - if (order is not None) and (order not in VALID_FIELD_SORT_ORDERS): - raise SchemaParseException('Invalid record field order: %r.' % order) - - # All properties of this record field: - self._props = {} - - self._has_default = has_default - if other_props: - self._props.update(other_props) - - self._index = index - self._type = self._props['type'] = data_type - self._name = self._props['name'] = name - - if has_default: - self._props['default'] = default - - if order is not None: - self._props['order'] = order - - if doc is not None: - self._props['doc'] = doc - - @property - def type(self): - """Returns: the schema of this field.""" - return self._type - - @property - def name(self): - """Returns: this field name.""" - return self._name - - @property - def index(self): - """Returns: the 0-based index of this field in the record.""" - return self._index - - @property - def default(self): - return self._props['default'] - - @property - def has_default(self): - return self._has_default - - @property - def order(self): - return self._props.get('order', None) - - @property - def doc(self): - return self._props.get('doc', None) - - @property - def props(self): - return self._props - - @property - def other_props(self): - return filter_keys_out(items=self._props, keys=FIELD_RESERVED_PROPS) - - def __str__(self): - return json.dumps(self.to_json()) - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = self.props.copy() - to_dump['type'] = self.type.to_json(names) - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ -# Primitive Types - - -class PrimitiveSchema(Schema): - """Schema of a primitive Avro type. - - Valid primitive types are defined in PRIMITIVE_TYPES. - """ - - def __init__(self, data_type, other_props=None): - """Initializes a new schema object for the specified primitive type. - - Args: - data_type: Type of the schema to construct. Must be primitive. - """ - if data_type not in PRIMITIVE_TYPES: - raise AvroException('%r is not a valid primitive type.' % data_type) - super(PrimitiveSchema, self).__init__(data_type, other_props=other_props) - - @property - def name(self): - """Returns: the simple name of this schema.""" - # The name of a primitive type is the type itself. - return self.type - - @property - def fullname(self): - """Returns: the fully qualified name of this schema.""" - # The full name is the simple name for primitive schema. - return self.name - - def to_json(self, names=None): - if len(self.props) == 1: - return self.fullname - return self.props - - def __eq__(self, that): - return self.props == that.props - - -# ------------------------------------------------------------------------------ -# Complex Types (non-recursive) - - -class FixedSchema(NamedSchema): - def __init__( - self, - name, - namespace, - size, - names=None, - other_props=None, - ): - # Ensure valid ctor args - if not isinstance(size, int): - fail_msg = 'Fixed Schema requires a valid integer for size property.' - raise AvroException(fail_msg) - - super(FixedSchema, self).__init__( - data_type=FIXED, - name=name, - namespace=namespace, - names=names, - other_props=other_props, - ) - self._props['size'] = size - - @property - def size(self): - """Returns: the size of this fixed schema, in bytes.""" - return self._props['size'] - - def to_json(self, names=None): - if names is None: - names = Names() - if self.fullname in names.names: - return self.name_ref(names) - names.names[self.fullname] = self - return names.prune_namespace(self.props) - - def __eq__(self, that): - return self.props == that.props - - -# ------------------------------------------------------------------------------ - - -class EnumSchema(NamedSchema): - def __init__( - self, - name, - namespace, - symbols, - names=None, - doc=None, - other_props=None, - ): - """Initializes a new enumeration schema object. - - Args: - name: Simple name of this enumeration. - namespace: Optional namespace. - symbols: Ordered list of symbols defined in this enumeration. - names: - doc: - other_props: - """ - symbols = tuple(symbols) - symbol_set = frozenset(symbols) - if (len(symbol_set) != len(symbols) - or not all(map(lambda symbol: isinstance(symbol, _str), symbols))): - raise AvroException( - 'Invalid symbols for enum schema: %r.' % (symbols,)) - - super(EnumSchema, self).__init__( - data_type=ENUM, - name=name, - namespace=namespace, - names=names, - other_props=other_props, - ) - - self._props['symbols'] = symbols - if doc is not None: - self._props['doc'] = doc - - @property - def symbols(self): - """Returns: the symbols defined in this enum.""" - return self._props['symbols'] - - def to_json(self, names=None): - if names is None: - names = Names() - if self.fullname in names.names: - return self.name_ref(names) - names.names[self.fullname] = self - return names.prune_namespace(self.props) - - def __eq__(self, that): - return self.props == that.props - - -# ------------------------------------------------------------------------------ -# Complex Types (recursive) - - -class ArraySchema(Schema): - """Schema of an array.""" - - def __init__(self, items, other_props=None): - """Initializes a new array schema object. - - Args: - items: Avro schema of the array items. - other_props: - """ - super(ArraySchema, self).__init__( - data_type=ARRAY, - other_props=other_props, - ) - self._items_schema = items - self._props['items'] = items - - @property - def items(self): - """Returns: the schema of the items in this array.""" - return self._items_schema - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = self.props.copy() - item_schema = self.items - to_dump['items'] = item_schema.to_json(names) - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ - - -class MapSchema(Schema): - """Schema of a map.""" - - def __init__(self, values, other_props=None): - """Initializes a new map schema object. - - Args: - values: Avro schema of the map values. - other_props: - """ - super(MapSchema, self).__init__( - data_type=MAP, - other_props=other_props, - ) - self._values_schema = values - self._props['values'] = values - - @property - def values(self): - """Returns: the schema of the values in this map.""" - return self._values_schema - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = self.props.copy() - to_dump['values'] = self.values.to_json(names) - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ - - -class UnionSchema(Schema): - """Schema of a union.""" - - def __init__(self, schemas): - """Initializes a new union schema object. - - Args: - schemas: Ordered collection of schema branches in the union. - """ - super(UnionSchema, self).__init__(data_type=UNION) - self._schemas = tuple(schemas) - - # Validate the schema branches: - - # All named schema names are unique: - named_branches = tuple( - filter(lambda schema: schema.type in NAMED_TYPES, self._schemas)) - unique_names = frozenset(map(lambda schema: schema.fullname, named_branches)) - if len(unique_names) != len(named_branches): - raise AvroException( - 'Invalid union branches with duplicate schema name:%s' - % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) - - # Types are unique within unnamed schemas, and union is not allowed: - unnamed_branches = tuple( - filter(lambda schema: schema.type not in NAMED_TYPES, self._schemas)) - unique_types = frozenset(map(lambda schema: schema.type, unnamed_branches)) - if UNION in unique_types: - raise AvroException( - 'Invalid union branches contain other unions:%s' - % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) - if len(unique_types) != len(unnamed_branches): - raise AvroException( - 'Invalid union branches with duplicate type:%s' - % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) - - @property - def schemas(self): - """Returns: the ordered list of schema branches in the union.""" - return self._schemas - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = [] - for schema in self.schemas: - to_dump.append(schema.to_json(names)) - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ - - -class ErrorUnionSchema(UnionSchema): - """Schema representing the declared errors of a protocol message.""" - - def __init__(self, schemas): - """Initializes an error-union schema. - - Args: - schema: collection of error schema. - """ - # Prepend "string" to handle system errors - schemas = [PrimitiveSchema(data_type=STRING)] + list(schemas) - super(ErrorUnionSchema, self).__init__(schemas=schemas) - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = [] - for schema in self.schemas: - # Don't print the system error schema - if schema.type == STRING: - continue - to_dump.append(schema.to_json(names)) - return to_dump - - -# ------------------------------------------------------------------------------ - - -class RecordSchema(NamedSchema): - """Schema of a record.""" - - @staticmethod - def _make_field(index, field_desc, names): - """Builds field schemas from a list of field JSON descriptors. - - Args: - index: 0-based index of the field in the record. - field_desc: JSON descriptors of a record field. - Return: - The field schema. - """ - field_schema = schema_from_json_data( - json_data=field_desc['type'], - names=names, - ) - other_props = ( - dict(filter_keys_out(items=field_desc, keys=FIELD_RESERVED_PROPS))) - return Field( - data_type=field_schema, - name=field_desc['name'], - index=index, - has_default=('default' in field_desc), - default=field_desc.get('default', _NO_DEFAULT), - order=field_desc.get('order', None), - doc=field_desc.get('doc', None), - other_props=other_props, - ) - - @staticmethod - def make_field_list(field_desc_list, names): - """Builds field schemas from a list of field JSON descriptors. - - Guarantees field name unicity. - - Args: - field_desc_list: collection of field JSON descriptors. - names: Avro schema tracker. - Yields - Field schemas. - """ - for index, field_desc in enumerate(field_desc_list): - yield RecordSchema._make_field(index, field_desc, names) - - @staticmethod - def _make_field_map(fields): - """Builds the field map. - - Guarantees field name unicity. - - Args: - fields: iterable of field schema. - Returns: - A map of field schemas, indexed by name. - """ - field_map = {} - for field in fields: - if field.name in field_map: - raise SchemaParseException( - 'Duplicate record field name %r.' % field.name) - field_map[field.name] = field - return field_map - - def __init__( - self, - name, - namespace, - fields=None, - make_fields=None, - names=None, - record_type=RECORD, - doc=None, - other_props=None - ): - """Initializes a new record schema object. - - Args: - name: Name of the record (absolute or relative). - namespace: Optional namespace the record belongs to, if name is relative. - fields: collection of fields to add to this record. - Exactly one of fields or make_fields must be specified. - make_fields: function creating the fields that belong to the record. - The function signature is: make_fields(names) -> ordered field list. - Exactly one of fields or make_fields must be specified. - names: - record_type: Type of the record: one of RECORD, ERROR or REQUEST. - Protocol requests are not named. - doc: - other_props: - """ - if record_type == REQUEST: - # Protocol requests are not named: - super(RecordSchema, self).__init__( - data_type=REQUEST, - other_props=other_props, - ) - elif record_type in [RECORD, ERROR]: - # Register this record name in the tracker: - super(RecordSchema, self).__init__( - data_type=record_type, - name=name, - namespace=namespace, - names=names, - other_props=other_props, - ) - else: - raise SchemaParseException( - 'Invalid record type: %r.' % record_type) - - if record_type in [RECORD, ERROR]: - avro_name = names.get_name(name=name, namespace=namespace) - nested_names = names.new_with_default_namespace(namespace=avro_name.namespace) - elif record_type == REQUEST: - # Protocol request has no name: no need to change default namespace: - nested_names = names - - if fields is None: - fields = make_fields(names=nested_names) - else: - assert make_fields is None - self._fields = tuple(fields) - - self._field_map = RecordSchema._make_field_map(self._fields) - - self._props['fields'] = fields - if doc is not None: - self._props['doc'] = doc - - @property - def fields(self): - """Returns: the field schemas, as an ordered tuple.""" - return self._fields - - @property - def field_map(self): - """Returns: a read-only map of the field schemas index by field names.""" - return self._field_map - - def to_json(self, names=None): - if names is None: - names = Names() - # Request records don't have names - if self.type == REQUEST: - return [f.to_json(names) for f in self.fields] - - if self.fullname in names.names: - return self.name_ref(names) - names.names[self.fullname] = self - - to_dump = names.prune_namespace(self.props.copy()) - to_dump['fields'] = [f.to_json(names) for f in self.fields] - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ -# Module functions - - -def filter_keys_out(items, keys): - """Filters a collection of (key, value) items. - - Exclude any item whose key belongs to keys. - - Args: - items: Dictionary of items to filter the keys out of. - keys: Keys to filter out. - Yields: - Filtered items. - """ - for key, value in items.items(): - if key in keys: - continue - yield key, value - - -# ------------------------------------------------------------------------------ - - -def _schema_from_json_string(json_string, names): - if json_string in PRIMITIVE_TYPES: - return PrimitiveSchema(data_type=json_string) - - # Look for a known named schema: - schema = names.get_schema(name=json_string) - if schema is None: - raise SchemaParseException( - 'Unknown named schema %r, known names: %r.' - % (json_string, sorted(names.names))) - return schema - - -def _schema_from_json_array(json_array, names): - def MakeSchema(desc): - return schema_from_json_data(json_data=desc, names=names) - - return UnionSchema(map(MakeSchema, json_array)) - - -def _schema_from_json_object(json_object, names): - data_type = json_object.get('type') - if data_type is None: - raise SchemaParseException( - 'Avro schema JSON descriptor has no "type" property: %r' % json_object) - - other_props = dict( - filter_keys_out(items=json_object, keys=SCHEMA_RESERVED_PROPS)) - - if data_type in PRIMITIVE_TYPES: - # FIXME should not ignore other properties - result = PrimitiveSchema(data_type, other_props=other_props) - - elif data_type in NAMED_TYPES: - name = json_object.get('name') - namespace = json_object.get('namespace', names.default_namespace) - if data_type == FIXED: - size = json_object.get('size') - result = FixedSchema(name, namespace, size, names, other_props) - elif data_type == ENUM: - symbols = json_object.get('symbols') - doc = json_object.get('doc') - result = EnumSchema(name, namespace, symbols, names, doc, other_props) - - elif data_type in [RECORD, ERROR]: - field_desc_list = json_object.get('fields', ()) - - def MakeFields(names): - return tuple(RecordSchema.make_field_list(field_desc_list, names)) - - result = RecordSchema( - name=name, - namespace=namespace, - make_fields=MakeFields, - names=names, - record_type=data_type, - doc=json_object.get('doc'), - other_props=other_props, - ) - else: - raise Exception('Internal error: unknown type %r.' % data_type) - - elif data_type in VALID_TYPES: - # Unnamed, non-primitive Avro type: - - if data_type == ARRAY: - items_desc = json_object.get('items') - if items_desc is None: - raise SchemaParseException( - 'Invalid array schema descriptor with no "items" : %r.' - % json_object) - result = ArraySchema( - items=schema_from_json_data(items_desc, names), - other_props=other_props, - ) - - elif data_type == MAP: - values_desc = json_object.get('values') - if values_desc is None: - raise SchemaParseException( - 'Invalid map schema descriptor with no "values" : %r.' - % json_object) - result = MapSchema( - values=schema_from_json_data(values_desc, names=names), - other_props=other_props, - ) - - elif data_type == ERROR_UNION: - error_desc_list = json_object.get('declared_errors') - assert error_desc_list is not None - error_schemas = map( - lambda desc: schema_from_json_data(desc, names=names), - error_desc_list) - result = ErrorUnionSchema(schemas=error_schemas) - - else: - raise Exception('Internal error: unknown type %r.' % data_type) - else: - raise SchemaParseException( - 'Invalid JSON descriptor for an Avro schema: %r' % json_object) - return result - - -# Parsers for the JSON data types: -_JSONDataParserTypeMap = { - _str: _schema_from_json_string, - list: _schema_from_json_array, - dict: _schema_from_json_object, -} - - -def schema_from_json_data(json_data, names=None): - """Builds an Avro Schema from its JSON descriptor. - - Args: - json_data: JSON data representing the descriptor of the Avro schema. - names: Optional tracker for Avro named schemas. - Returns: - The Avro schema parsed from the JSON descriptor. - Raises: - SchemaParseException: if the descriptor is invalid. - """ - if names is None: - names = Names() - - # Select the appropriate parser based on the JSON data type: - parser = _JSONDataParserTypeMap.get(type(json_data)) - if parser is None: - raise SchemaParseException( - 'Invalid JSON descriptor for an Avro schema: %r.' % json_data) - return parser(json_data, names=names) - - -# ------------------------------------------------------------------------------ - - -def parse(json_string): - """Constructs a Schema from its JSON descriptor in text form. - - Args: - json_string: String representation of the JSON descriptor of the schema. - Returns: - The parsed schema. - Raises: - SchemaParseException: on JSON parsing error, - or if the JSON descriptor is invalid. - """ - try: - json_data = json.loads(json_string) - except Exception as exn: - raise SchemaParseException( - 'Error parsing schema from JSON: %r. ' - 'Error message: %r.' - % (json_string, exn)) - - # Initialize the names object - names = Names() - - # construct the Avro Schema object - return schema_from_json_data(json_data, names) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/base_client.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/base_client.py deleted file mode 100644 index a2efa21..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/base_client.py +++ /dev/null @@ -1,460 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import logging -import uuid -from typing import ( # pylint: disable=unused-import - Optional, - Any, - Tuple, -) - -try: - from urllib.parse import parse_qs, quote -except ImportError: - from urlparse import parse_qs # type: ignore - from urllib2 import quote # type: ignore - -import six - -from azure.core.configuration import Configuration -from azure.core.credentials import AzureSasCredential -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline import Pipeline -from azure.core.pipeline.transport import RequestsTransport, HttpTransport -from azure.core.pipeline.policies import ( - RedirectPolicy, - ContentDecodePolicy, - BearerTokenCredentialPolicy, - ProxyPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, - UserAgentPolicy, - AzureSasCredentialPolicy -) - -from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .models import LocationMode -from .authentication import SharedKeyCredentialPolicy -from .shared_access_signature import QueryStringConstants -from .request_handlers import serialize_batch_body, _get_batch_request_delimiter -from .policies import ( - StorageHeadersPolicy, - StorageContentValidation, - StorageRequestHook, - StorageResponseHook, - StorageLoggingPolicy, - StorageHosts, - QueueMessagePolicy, - ExponentialRetry, -) -from .._version import VERSION -from .response_handlers import process_storage_error, PartialBatchErrorException - - -_LOGGER = logging.getLogger(__name__) -_SERVICE_PARAMS = { - "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"}, - "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"}, - "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"}, - "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"}, -} - - -class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - parsed_url, # type: Any - service, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) - self._hosts = kwargs.get("_hosts") - self.scheme = parsed_url.scheme - - if service not in ["blob", "queue", "file-share", "dfs"]: - raise ValueError("Invalid service: {}".format(service)) - service_name = service.split('-')[0] - account = parsed_url.netloc.split(".{}.core.".format(service_name)) - - self.account_name = account[0] if len(account) > 1 else None - if not self.account_name and parsed_url.netloc.startswith("localhost") \ - or parsed_url.netloc.startswith("127.0.0.1"): - self.account_name = parsed_url.path.strip("/") - - self.credential = _format_shared_key_credential(self.account_name, credential) - if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): - raise ValueError("Token credential is only supported with HTTPS.") - - secondary_hostname = None - if hasattr(self.credential, "account_name"): - self.account_name = self.credential.account_name - secondary_hostname = "{}-secondary.{}.{}".format( - self.credential.account_name, service_name, SERVICE_HOST_BASE) - - if not self._hosts: - if len(account) > 1: - secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") - if kwargs.get("secondary_hostname"): - secondary_hostname = kwargs["secondary_hostname"] - primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') - self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} - - self.require_encryption = kwargs.get("require_encryption", False) - self.key_encryption_key = kwargs.get("key_encryption_key") - self.key_resolver_function = kwargs.get("key_resolver_function") - self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) - - def __enter__(self): - self._client.__enter__() - return self - - def __exit__(self, *args): - self._client.__exit__(*args) - - def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._client.close() - - @property - def url(self): - """The full endpoint URL to this entity, including SAS token if used. - - This could be either the primary endpoint, - or the secondary endpoint depending on the current :func:`location_mode`. - """ - return self._format_url(self._hosts[self._location_mode]) - - @property - def primary_endpoint(self): - """The full primary endpoint URL. - - :type: str - """ - return self._format_url(self._hosts[LocationMode.PRIMARY]) - - @property - def primary_hostname(self): - """The hostname of the primary endpoint. - - :type: str - """ - return self._hosts[LocationMode.PRIMARY] - - @property - def secondary_endpoint(self): - """The full secondary endpoint URL if configured. - - If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str - :raise ValueError: - """ - if not self._hosts[LocationMode.SECONDARY]: - raise ValueError("No secondary host configured.") - return self._format_url(self._hosts[LocationMode.SECONDARY]) - - @property - def secondary_hostname(self): - """The hostname of the secondary endpoint. - - If not available this will be None. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str or None - """ - return self._hosts[LocationMode.SECONDARY] - - @property - def location_mode(self): - """The location mode that the client is currently using. - - By default this will be "primary". Options include "primary" and "secondary". - - :type: str - """ - - return self._location_mode - - @location_mode.setter - def location_mode(self, value): - if self._hosts.get(value): - self._location_mode = value - self._client._config.url = self.url # pylint: disable=protected-access - else: - raise ValueError("No host URL for location mode: {}".format(value)) - - @property - def api_version(self): - """The version of the Storage API used for requests. - - :type: str - """ - return self._client._config.version # pylint: disable=protected-access - - def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): - query_str = "?" - if snapshot: - query_str += "snapshot={}&".format(self.snapshot) - if share_snapshot: - query_str += "sharesnapshot={}&".format(self.snapshot) - if sas_token and isinstance(credential, AzureSasCredential): - raise ValueError( - "You cannot use AzureSasCredential when the resource URI also contains a Shared Access Signature.") - if sas_token and not credential: - query_str += sas_token - elif is_credential_sastoken(credential): - query_str += credential.lstrip("?") - credential = None - return query_str.rstrip("?&"), credential - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, "get_token"): - self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif isinstance(credential, AzureSasCredential): - self._credential_policy = AzureSasCredentialPolicy(credential) - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - - config = kwargs.get("_configuration") or create_configuration(**kwargs) - if kwargs.get("_pipeline"): - return config, kwargs["_pipeline"] - config.transport = kwargs.get("transport") # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - config.transport = RequestsTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - ContentDecodePolicy(response_encoding="utf-8"), - RedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), - config.retry_policy, - config.headers_policy, - StorageRequestHook(**kwargs), - self._credential_policy, - config.logging_policy, - StorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs) - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, Pipeline(config.transport, policies=policies) - - def _batch_send( - self, - *reqs, # type: HttpRequest - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - batch_id = str(uuid.uuid1()) - - request = self._client._client.post( # pylint: disable=protected-access - url='{}://{}/{}?{}comp=batch{}{}'.format( - self.scheme, - self.primary_hostname, - kwargs.pop('path', ""), - kwargs.pop('restype', ""), - kwargs.pop('sas', ""), - kwargs.pop('timeout', "") - ), - headers={ - 'x-ms-version': self.api_version, - "Content-Type": "multipart/mixed; boundary=" + _get_batch_request_delimiter(batch_id, False, False) - } - ) - - policies = [StorageHeadersPolicy()] - if self._credential_policy: - policies.append(self._credential_policy) - - request.set_multipart_mixed( - *reqs, - policies=policies, - enforce_https=False - ) - - Pipeline._prepare_multipart_mixed_request(request) # pylint: disable=protected-access - body = serialize_batch_body(request.multipart_mixed_info[0], batch_id) - request.set_bytes_body(body) - - temp = request.multipart_mixed_info - request.multipart_mixed_info = None - pipeline_response = self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - request.multipart_mixed_info = temp - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() - if raise_on_any_failure: - parts = list(response.parts()) - if any(p for p in parts if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts - ) - raise error - return iter(parts) - return parts - except HttpResponseError as error: - process_storage_error(error) - -class TransportWrapper(HttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, transport): - self._transport = transport - - def send(self, request, **kwargs): - return self._transport.send(request, **kwargs) - - def open(self): - pass - - def close(self): - pass - - def __enter__(self): - pass - - def __exit__(self, *args): # pylint: disable=arguments-differ - pass - - -def _format_shared_key_credential(account_name, credential): - if isinstance(credential, six.string_types): - if not account_name: - raise ValueError("Unable to determine account name for shared key credential.") - credential = {"account_name": account_name, "account_key": credential} - if isinstance(credential, dict): - if "account_name" not in credential: - raise ValueError("Shared key credential missing 'account_name") - if "account_key" not in credential: - raise ValueError("Shared key credential missing 'account_key") - return SharedKeyCredentialPolicy(**credential) - return credential - - -def parse_connection_str(conn_str, credential, service): - conn_str = conn_str.rstrip(";") - conn_settings = [s.split("=", 1) for s in conn_str.split(";")] - if any(len(tup) != 2 for tup in conn_settings): - raise ValueError("Connection string is either blank or malformed.") - conn_settings = dict((key.upper(), val) for key, val in conn_settings) - endpoints = _SERVICE_PARAMS[service] - primary = None - secondary = None - if not credential: - try: - credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]} - except KeyError: - credential = conn_settings.get("SHAREDACCESSSIGNATURE") - if endpoints["primary"] in conn_settings: - primary = conn_settings[endpoints["primary"]] - if endpoints["secondary"] in conn_settings: - secondary = conn_settings[endpoints["secondary"]] - else: - if endpoints["secondary"] in conn_settings: - raise ValueError("Connection string specifies only secondary endpoint.") - try: - primary = "{}://{}.{}.{}".format( - conn_settings["DEFAULTENDPOINTSPROTOCOL"], - conn_settings["ACCOUNTNAME"], - service, - conn_settings["ENDPOINTSUFFIX"], - ) - secondary = "{}-secondary.{}.{}".format( - conn_settings["ACCOUNTNAME"], service, conn_settings["ENDPOINTSUFFIX"] - ) - except KeyError: - pass - - if not primary: - try: - primary = "https://{}.{}.{}".format( - conn_settings["ACCOUNTNAME"], service, conn_settings.get("ENDPOINTSUFFIX", SERVICE_HOST_BASE) - ) - except KeyError: - raise ValueError("Connection string missing required connection details.") - return primary, secondary, credential - - -def create_configuration(**kwargs): - # type: (**Any) -> Configuration - config = Configuration(**kwargs) - config.headers_policy = StorageHeadersPolicy(**kwargs) - config.user_agent_policy = UserAgentPolicy( - sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) - config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) - config.logging_policy = StorageLoggingPolicy(**kwargs) - config.proxy_policy = ProxyPolicy(**kwargs) - - # Storage settings - config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) - config.copy_polling_interval = 15 - - # Block blob uploads - config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) - config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) - config.use_byte_buffer = kwargs.get("use_byte_buffer", False) - - # Page blob uploads - config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) - - # Datalake file uploads - config.min_large_chunk_upload_threshold = kwargs.get("min_large_chunk_upload_threshold", 100 * 1024 * 1024 + 1) - - # Blob downloads - config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) - config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) - - # File uploads - config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) - return config - - -def parse_query(query_str): - sas_values = QueryStringConstants.to_list() - parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} - sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values] - sas_token = None - if sas_params: - sas_token = "&".join(sas_params) - - snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") - return snapshot, sas_token - - -def is_credential_sastoken(credential): - if not credential or not isinstance(credential, six.string_types): - return False - - sas_values = QueryStringConstants.to_list() - parsed_query = parse_qs(credential.lstrip("?")) - if parsed_query and all([k in sas_values for k in parsed_query.keys()]): - return True - return False diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/base_client_async.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/base_client_async.py deleted file mode 100644 index 3e619c9..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/base_client_async.py +++ /dev/null @@ -1,192 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging - -from azure.core.credentials import AzureSasCredential -from azure.core.pipeline import AsyncPipeline -from azure.core.async_paging import AsyncList -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline.policies import ( - ContentDecodePolicy, - AsyncBearerTokenCredentialPolicy, - AsyncRedirectPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, - AzureSasCredentialPolicy, -) -from azure.core.pipeline.transport import AsyncHttpTransport - -from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .authentication import SharedKeyCredentialPolicy -from .base_client import create_configuration -from .policies import ( - StorageContentValidation, - StorageRequestHook, - StorageHosts, - StorageHeadersPolicy, - QueueMessagePolicy -) -from .policies_async import AsyncStorageResponseHook - -from .response_handlers import process_storage_error, PartialBatchErrorException - -if TYPE_CHECKING: - from azure.core.pipeline import Pipeline - from azure.core.pipeline.transport import HttpRequest - from azure.core.configuration import Configuration -_LOGGER = logging.getLogger(__name__) - - -class AsyncStorageAccountHostsMixin(object): - - def __enter__(self): - raise TypeError("Async client only supports 'async with'.") - - def __exit__(self, *args): - pass - - async def __aenter__(self): - await self._client.__aenter__() - return self - - async def __aexit__(self, *args): - await self._client.__aexit__(*args) - - async def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._client.close() - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, 'get_token'): - self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif isinstance(credential, AzureSasCredential): - self._credential_policy = AzureSasCredentialPolicy(credential) - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - config = kwargs.get('_configuration') or create_configuration(**kwargs) - if kwargs.get('_pipeline'): - return config, kwargs['_pipeline'] - config.transport = kwargs.get('transport') # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - try: - from azure.core.pipeline.transport import AioHttpTransport - except ImportError: - raise ImportError("Unable to create async transport. Please check aiohttp is installed.") - config.transport = AioHttpTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.headers_policy, - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - StorageRequestHook(**kwargs), - self._credential_policy, - ContentDecodePolicy(response_encoding="utf-8"), - AsyncRedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), # type: ignore - config.retry_policy, - config.logging_policy, - AsyncStorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs), - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, AsyncPipeline(config.transport, policies=policies) - - async def _batch_send( - self, - *reqs, # type: HttpRequest - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - request = self._client._client.post( # pylint: disable=protected-access - url='{}://{}/{}?{}comp=batch{}{}'.format( - self.scheme, - self.primary_hostname, - kwargs.pop('path', ""), - kwargs.pop('restype', ""), - kwargs.pop('sas', ""), - kwargs.pop('timeout', "") - ), - headers={ - 'x-ms-version': self.api_version - } - ) - - policies = [StorageHeadersPolicy()] - if self._credential_policy: - policies.append(self._credential_policy) - - request.set_multipart_mixed( - *reqs, - policies=policies, - enforce_https=False - ) - - pipeline_response = await self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() # Return an AsyncIterator - if raise_on_any_failure: - parts_list = [] - async for part in parts: - parts_list.append(part) - if any(p for p in parts_list if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts_list - ) - raise error - return AsyncList(parts_list) - return parts - except HttpResponseError as error: - process_storage_error(error) - - -class AsyncTransportWrapper(AsyncHttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, async_transport): - self._transport = async_transport - - async def send(self, request, **kwargs): - return await self._transport.send(request, **kwargs) - - async def open(self): - pass - - async def close(self): - pass - - async def __aenter__(self): - pass - - async def __aexit__(self, *args): # pylint: disable=arguments-differ - pass diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/constants.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/constants.py deleted file mode 100644 index bdee829..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/constants.py +++ /dev/null @@ -1,27 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -from .._generated import AzureBlobStorage - - -X_MS_VERSION = AzureBlobStorage(url="get_api_version")._config.version # pylint: disable=protected-access - -# Socket timeout in seconds -CONNECTION_TIMEOUT = 20 -READ_TIMEOUT = 20 - -# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned) -# The socket timeout is now the maximum total duration to send all data. -if sys.version_info >= (3, 5): - # the timeout to connect is 20 seconds, and the read timeout is 80000 seconds - # the 80000 seconds was calculated with: - # 4000MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) - READ_TIMEOUT = 80000 - -STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" - -SERVICE_HOST_BASE = 'core.windows.net' diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/encryption.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/encryption.py deleted file mode 100644 index 62607cc..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/encryption.py +++ /dev/null @@ -1,542 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os -from os import urandom -from json import ( - dumps, - loads, -) -from collections import OrderedDict - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.ciphers import Cipher -from cryptography.hazmat.primitives.ciphers.algorithms import AES -from cryptography.hazmat.primitives.ciphers.modes import CBC -from cryptography.hazmat.primitives.padding import PKCS7 - -from azure.core.exceptions import HttpResponseError - -from .._version import VERSION -from . import encode_base64, decode_base64_to_bytes - - -_ENCRYPTION_PROTOCOL_V1 = '1.0' -_ERROR_OBJECT_INVALID = \ - '{0} does not define a complete interface. Value of {1} is either missing or invalid.' - - -def _validate_not_none(param_name, param): - if param is None: - raise ValueError('{0} should not be None.'.format(param_name)) - - -def _validate_key_encryption_key_wrap(kek): - # Note that None is not callable and so will fail the second clause of each check. - if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) - if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) - - -class _EncryptionAlgorithm(object): - ''' - Specifies which client encryption algorithm is used. - ''' - AES_CBC_256 = 'AES_CBC_256' - - -class _WrappedContentKey: - ''' - Represents the envelope key details stored on the service. - ''' - - def __init__(self, algorithm, encrypted_key, key_id): - ''' - :param str algorithm: - The algorithm used for wrapping. - :param bytes encrypted_key: - The encrypted content-encryption-key. - :param str key_id: - The key-encryption-key identifier string. - ''' - - _validate_not_none('algorithm', algorithm) - _validate_not_none('encrypted_key', encrypted_key) - _validate_not_none('key_id', key_id) - - self.algorithm = algorithm - self.encrypted_key = encrypted_key - self.key_id = key_id - - -class _EncryptionAgent: - ''' - Represents the encryption agent stored on the service. - It consists of the encryption protocol version and encryption algorithm used. - ''' - - def __init__(self, encryption_algorithm, protocol): - ''' - :param _EncryptionAlgorithm encryption_algorithm: - The algorithm used for encrypting the message contents. - :param str protocol: - The protocol version used for encryption. - ''' - - _validate_not_none('encryption_algorithm', encryption_algorithm) - _validate_not_none('protocol', protocol) - - self.encryption_algorithm = str(encryption_algorithm) - self.protocol = protocol - - -class _EncryptionData: - ''' - Represents the encryption data that is stored on the service. - ''' - - def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, - key_wrapping_metadata): - ''' - :param bytes content_encryption_IV: - The content encryption initialization vector. - :param _EncryptionAgent encryption_agent: - The encryption agent. - :param _WrappedContentKey wrapped_content_key: - An object that stores the wrapping algorithm, the key identifier, - and the encrypted key bytes. - :param dict key_wrapping_metadata: - A dict containing metadata related to the key wrapping. - ''' - - _validate_not_none('content_encryption_IV', content_encryption_IV) - _validate_not_none('encryption_agent', encryption_agent) - _validate_not_none('wrapped_content_key', wrapped_content_key) - - self.content_encryption_IV = content_encryption_IV - self.encryption_agent = encryption_agent - self.wrapped_content_key = wrapped_content_key - self.key_wrapping_metadata = key_wrapping_metadata - - -def _generate_encryption_data_dict(kek, cek, iv): - ''' - Generates and returns the encryption metadata as a dict. - - :param object kek: The key encryption key. See calling functions for more information. - :param bytes cek: The content encryption key. - :param bytes iv: The initialization vector. - :return: A dict containing all the encryption metadata. - :rtype: dict - ''' - # Encrypt the cek. - wrapped_cek = kek.wrap_key(cek) - - # Build the encryption_data dict. - # Use OrderedDict to comply with Java's ordering requirement. - wrapped_content_key = OrderedDict() - wrapped_content_key['KeyId'] = kek.get_kid() - wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek) - wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() - - encryption_agent = OrderedDict() - encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 - encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 - - encryption_data_dict = OrderedDict() - encryption_data_dict['WrappedContentKey'] = wrapped_content_key - encryption_data_dict['EncryptionAgent'] = encryption_agent - encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv) - encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION} - - return encryption_data_dict - - -def _dict_to_encryption_data(encryption_data_dict): - ''' - Converts the specified dictionary to an EncryptionData object for - eventual use in decryption. - - :param dict encryption_data_dict: - The dictionary containing the encryption data. - :return: an _EncryptionData object built from the dictionary. - :rtype: _EncryptionData - ''' - try: - if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: - raise ValueError("Unsupported encryption version.") - except KeyError: - raise ValueError("Unsupported encryption version.") - wrapped_content_key = encryption_data_dict['WrappedContentKey'] - wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], - decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), - wrapped_content_key['KeyId']) - - encryption_agent = encryption_data_dict['EncryptionAgent'] - encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], - encryption_agent['Protocol']) - - if 'KeyWrappingMetadata' in encryption_data_dict: - key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] - else: - key_wrapping_metadata = None - - encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), - encryption_agent, - wrapped_content_key, - key_wrapping_metadata) - - return encryption_data - - -def _generate_AES_CBC_cipher(cek, iv): - ''' - Generates and returns an encryption cipher for AES CBC using the given cek and iv. - - :param bytes[] cek: The content encryption key for the cipher. - :param bytes[] iv: The initialization vector for the cipher. - :return: A cipher for encrypting in AES256 CBC. - :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher - ''' - - backend = default_backend() - algorithm = AES(cek) - mode = CBC(iv) - return Cipher(algorithm, mode, backend) - - -def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): - ''' - Extracts and returns the content_encryption_key stored in the encryption_data object - and performs necessary validation on all parameters. - :param _EncryptionData encryption_data: - The encryption metadata of the retrieved value. - :param obj key_encryption_key: - The key_encryption_key used to unwrap the cek. Please refer to high-level service object - instance variables for more details. - :param func key_resolver: - A function used that, given a key_id, will return a key_encryption_key. Please refer - to high-level service object instance variables for more details. - :return: the content_encryption_key stored in the encryption_data object. - :rtype: bytes[] - ''' - - _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) - _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) - - if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol: - raise ValueError('Encryption version is not supported.') - - content_encryption_key = None - - # If the resolver exists, give priority to the key it finds. - if key_resolver is not None: - key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) - - _validate_not_none('key_encryption_key', key_encryption_key) - if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) - if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid(): - raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.') - # Will throw an exception if the specified algorithm is not supported. - content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, - encryption_data.wrapped_content_key.algorithm) - _validate_not_none('content_encryption_key', content_encryption_key) - - return content_encryption_key - - -def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None): - ''' - Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. - Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). - Returns the original plaintex. - - :param str message: - The ciphertext to be decrypted. - :param _EncryptionData encryption_data: - The metadata associated with this ciphertext. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted plaintext. - :rtype: str - ''' - _validate_not_none('message', message) - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) - - if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm: - raise ValueError('Specified encryption algorithm is not supported.') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) - - # decrypt data - decrypted_data = message - decryptor = cipher.decryptor() - decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) - - # unpad data - unpadder = PKCS7(128).unpadder() - decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) - - return decrypted_data - - -def encrypt_blob(blob, key_encryption_key): - ''' - Encrypts the given blob using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encryption metadata. This method should - only be used when a blob is small enough for single shot upload. Encrypting larger blobs - is done as a part of the upload_data_chunks method. - - :param bytes blob: - The blob to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. - :rtype: (str, bytes) - ''' - - _validate_not_none('blob', blob) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(blob) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - - return dumps(encryption_data), encrypted_data - - -def generate_blob_encryption_data(key_encryption_key): - ''' - Generates the encryption_metadata for the blob. - - :param bytes key_encryption_key: - The key-encryption-key used to wrap the cek associate with this blob. - :return: A tuple containing the cek and iv for this blob as well as the - serialized encryption metadata for the blob. - :rtype: (bytes, bytes, str) - ''' - encryption_data = None - content_encryption_key = None - initialization_vector = None - if key_encryption_key: - _validate_key_encryption_key_wrap(key_encryption_key) - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - encryption_data = _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - encryption_data = dumps(encryption_data) - - return content_encryption_key, initialization_vector, encryption_data - - -def decrypt_blob(require_encryption, key_encryption_key, key_resolver, - content, start_offset, end_offset, response_headers): - ''' - Decrypts the given blob contents and returns only the requested range. - - :param bool require_encryption: - Whether or not the calling blob service requires objects to be decrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :param key_resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted blob content. - :rtype: bytes - ''' - try: - encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata'])) - except: # pylint: disable=bare-except - if require_encryption: - raise ValueError( - 'Encryption required, but received data does not contain appropriate metatadata.' + \ - 'Data was either not encrypted or metadata has been lost.') - - return content - - if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256: - raise ValueError('Specified encryption algorithm is not supported.') - - blob_type = response_headers['x-ms-blob-type'] - - iv = None - unpad = False - if 'content-range' in response_headers: - content_range = response_headers['content-range'] - # Format: 'bytes x-y/size' - - # Ignore the word 'bytes' - content_range = content_range.split(' ') - - content_range = content_range[1].split('-') - content_range = content_range[1].split('/') - end_range = int(content_range[0]) - blob_size = int(content_range[1]) - - if start_offset >= 16: - iv = content[:16] - content = content[16:] - start_offset -= 16 - else: - iv = encryption_data.content_encryption_IV - - if end_range == blob_size - 1: - unpad = True - else: - unpad = True - iv = encryption_data.content_encryption_IV - - if blob_type == 'PageBlob': - unpad = False - - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) - cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) - decryptor = cipher.decryptor() - - content = decryptor.update(content) + decryptor.finalize() - if unpad: - unpadder = PKCS7(128).unpadder() - content = unpadder.update(content) + unpadder.finalize() - - return content[start_offset: len(content) - end_offset] - - -def get_blob_encryptor_and_padder(cek, iv, should_pad): - encryptor = None - padder = None - - if cek is not None and iv is not None: - cipher = _generate_AES_CBC_cipher(cek, iv) - encryptor = cipher.encryptor() - padder = PKCS7(128).padder() if should_pad else None - - return encryptor, padder - - -def encrypt_queue_message(message, key_encryption_key): - ''' - Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encrypted message and the encryption metadata. - - :param object message: - The plain text messge to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A json-formatted string containing the encrypted message and the encryption metadata. - :rtype: str - ''' - - _validate_not_none('message', message) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = os.urandom(32) - initialization_vector = os.urandom(16) - - # Queue encoding functions all return unicode strings, and encryption should - # operate on binary strings. - message = message.encode('utf-8') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(message) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - - # Build the dictionary structure. - queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data), - 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector)} - - return dumps(queue_message) - - -def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver): - ''' - Returns the decrypted message contents from an EncryptedQueueMessage. - If no encryption metadata is present, will return the unaltered message. - :param str message: - The JSON formatted QueueEncryptedMessage contents with all associated metadata. - :param bool require_encryption: - If set, will enforce that the retrieved messages are encrypted and decrypt them. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The plain text message from the queue message. - :rtype: str - ''' - - try: - message = loads(message) - - encryption_data = _dict_to_encryption_data(message['EncryptionData']) - decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents']) - except (KeyError, ValueError): - # Message was not json formatted and so was not encrypted - # or the user provided a json formatted message. - if require_encryption: - raise ValueError('Message was not encrypted.') - - return message - try: - return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=response, - error=error) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/models.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/models.py deleted file mode 100644 index 6f6052a..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/models.py +++ /dev/null @@ -1,473 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-instance-attributes -from enum import Enum - - -def get_enum_value(value): - if value is None or value in ["None", ""]: - return None - try: - return value.value - except AttributeError: - return value - - -class StorageErrorCode(str, Enum): - - # Generic storage values - account_already_exists = "AccountAlreadyExists" - account_being_created = "AccountBeingCreated" - account_is_disabled = "AccountIsDisabled" - authentication_failed = "AuthenticationFailed" - authorization_failure = "AuthorizationFailure" - no_authentication_information = "NoAuthenticationInformation" - condition_headers_not_supported = "ConditionHeadersNotSupported" - condition_not_met = "ConditionNotMet" - empty_metadata_key = "EmptyMetadataKey" - insufficient_account_permissions = "InsufficientAccountPermissions" - internal_error = "InternalError" - invalid_authentication_info = "InvalidAuthenticationInfo" - invalid_header_value = "InvalidHeaderValue" - invalid_http_verb = "InvalidHttpVerb" - invalid_input = "InvalidInput" - invalid_md5 = "InvalidMd5" - invalid_metadata = "InvalidMetadata" - invalid_query_parameter_value = "InvalidQueryParameterValue" - invalid_range = "InvalidRange" - invalid_resource_name = "InvalidResourceName" - invalid_uri = "InvalidUri" - invalid_xml_document = "InvalidXmlDocument" - invalid_xml_node_value = "InvalidXmlNodeValue" - md5_mismatch = "Md5Mismatch" - metadata_too_large = "MetadataTooLarge" - missing_content_length_header = "MissingContentLengthHeader" - missing_required_query_parameter = "MissingRequiredQueryParameter" - missing_required_header = "MissingRequiredHeader" - missing_required_xml_node = "MissingRequiredXmlNode" - multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" - operation_timed_out = "OperationTimedOut" - out_of_range_input = "OutOfRangeInput" - out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" - request_body_too_large = "RequestBodyTooLarge" - resource_type_mismatch = "ResourceTypeMismatch" - request_url_failed_to_parse = "RequestUrlFailedToParse" - resource_already_exists = "ResourceAlreadyExists" - resource_not_found = "ResourceNotFound" - server_busy = "ServerBusy" - unsupported_header = "UnsupportedHeader" - unsupported_xml_node = "UnsupportedXmlNode" - unsupported_query_parameter = "UnsupportedQueryParameter" - unsupported_http_verb = "UnsupportedHttpVerb" - - # Blob values - append_position_condition_not_met = "AppendPositionConditionNotMet" - blob_already_exists = "BlobAlreadyExists" - blob_not_found = "BlobNotFound" - blob_overwritten = "BlobOverwritten" - blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" - block_count_exceeds_limit = "BlockCountExceedsLimit" - block_list_too_long = "BlockListTooLong" - cannot_change_to_lower_tier = "CannotChangeToLowerTier" - cannot_verify_copy_source = "CannotVerifyCopySource" - container_already_exists = "ContainerAlreadyExists" - container_being_deleted = "ContainerBeingDeleted" - container_disabled = "ContainerDisabled" - container_not_found = "ContainerNotFound" - content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" - copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" - copy_id_mismatch = "CopyIdMismatch" - feature_version_mismatch = "FeatureVersionMismatch" - incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" - incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" - incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" - infinite_lease_duration_required = "InfiniteLeaseDurationRequired" - invalid_blob_or_block = "InvalidBlobOrBlock" - invalid_blob_tier = "InvalidBlobTier" - invalid_blob_type = "InvalidBlobType" - invalid_block_id = "InvalidBlockId" - invalid_block_list = "InvalidBlockList" - invalid_operation = "InvalidOperation" - invalid_page_range = "InvalidPageRange" - invalid_source_blob_type = "InvalidSourceBlobType" - invalid_source_blob_url = "InvalidSourceBlobUrl" - invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" - lease_already_present = "LeaseAlreadyPresent" - lease_already_broken = "LeaseAlreadyBroken" - lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" - lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" - lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" - lease_id_missing = "LeaseIdMissing" - lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" - lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" - lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" - lease_lost = "LeaseLost" - lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" - lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" - lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" - max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" - no_pending_copy_operation = "NoPendingCopyOperation" - operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" - pending_copy_operation = "PendingCopyOperation" - previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" - previous_snapshot_not_found = "PreviousSnapshotNotFound" - previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" - sequence_number_condition_not_met = "SequenceNumberConditionNotMet" - sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" - snapshot_count_exceeded = "SnapshotCountExceeded" - snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" - snapshots_present = "SnapshotsPresent" - source_condition_not_met = "SourceConditionNotMet" - system_in_use = "SystemInUse" - target_condition_not_met = "TargetConditionNotMet" - unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" - blob_being_rehydrated = "BlobBeingRehydrated" - blob_archived = "BlobArchived" - blob_not_archived = "BlobNotArchived" - - # Queue values - invalid_marker = "InvalidMarker" - message_not_found = "MessageNotFound" - message_too_large = "MessageTooLarge" - pop_receipt_mismatch = "PopReceiptMismatch" - queue_already_exists = "QueueAlreadyExists" - queue_being_deleted = "QueueBeingDeleted" - queue_disabled = "QueueDisabled" - queue_not_empty = "QueueNotEmpty" - queue_not_found = "QueueNotFound" - - # File values - cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" - client_cache_flush_delay = "ClientCacheFlushDelay" - delete_pending = "DeletePending" - directory_not_empty = "DirectoryNotEmpty" - file_lock_conflict = "FileLockConflict" - invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" - parent_not_found = "ParentNotFound" - read_only_attribute = "ReadOnlyAttribute" - share_already_exists = "ShareAlreadyExists" - share_being_deleted = "ShareBeingDeleted" - share_disabled = "ShareDisabled" - share_not_found = "ShareNotFound" - sharing_violation = "SharingViolation" - share_snapshot_in_progress = "ShareSnapshotInProgress" - share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" - share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" - share_has_snapshots = "ShareHasSnapshots" - container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" - - # DataLake values - content_length_must_be_zero = 'ContentLengthMustBeZero' - path_already_exists = 'PathAlreadyExists' - invalid_flush_position = 'InvalidFlushPosition' - invalid_property_name = 'InvalidPropertyName' - invalid_source_uri = 'InvalidSourceUri' - unsupported_rest_version = 'UnsupportedRestVersion' - file_system_not_found = 'FilesystemNotFound' - path_not_found = 'PathNotFound' - rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound' - source_path_not_found = 'SourcePathNotFound' - destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted' - file_system_already_exists = 'FilesystemAlreadyExists' - file_system_being_deleted = 'FilesystemBeingDeleted' - invalid_destination_path = 'InvalidDestinationPath' - invalid_rename_source_path = 'InvalidRenameSourcePath' - invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType' - lease_is_already_broken = 'LeaseIsAlreadyBroken' - lease_name_mismatch = 'LeaseNameMismatch' - path_conflict = 'PathConflict' - source_path_is_being_deleted = 'SourcePathIsBeingDeleted' - - -class DictMixin(object): - - def __setitem__(self, key, item): - self.__dict__[key] = item - - def __getitem__(self, key): - return self.__dict__[key] - - def __repr__(self): - return str(self) - - def __len__(self): - return len(self.keys()) - - def __delitem__(self, key): - self.__dict__[key] = None - - def __eq__(self, other): - """Compare objects by comparing all attributes.""" - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - def __ne__(self, other): - """Compare objects by comparing all attributes.""" - return not self.__eq__(other) - - def __str__(self): - return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) - - def has_key(self, k): - return k in self.__dict__ - - def update(self, *args, **kwargs): - return self.__dict__.update(*args, **kwargs) - - def keys(self): - return [k for k in self.__dict__ if not k.startswith('_')] - - def values(self): - return [v for k, v in self.__dict__.items() if not k.startswith('_')] - - def items(self): - return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] - - def get(self, key, default=None): - if key in self.__dict__: - return self.__dict__[key] - return default - - -class LocationMode(object): - """ - Specifies the location the request should be sent to. This mode only applies - for RA-GRS accounts which allow secondary read access. All other account types - must use PRIMARY. - """ - - PRIMARY = 'primary' #: Requests should be sent to the primary location. - SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. - - -class ResourceTypes(object): - """ - Specifies the resource types that are accessible with the account SAS. - - :param bool service: - Access to service-level APIs (e.g., Get/Set Service Properties, - Get Service Stats, List Containers/Queues/Shares) - :param bool container: - Access to container-level APIs (e.g., Create/Delete Container, - Create/Delete Queue, Create/Delete Share, - List Blobs/Files and Directories) - :param bool object: - Access to object-level APIs for blobs, queue messages, and - files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) - """ - - def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin - self.service = service - self.container = container - self.object = object - self._str = (('s' if self.service else '') + - ('c' if self.container else '') + - ('o' if self.object else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create a ResourceTypes from a string. - - To specify service, container, or object you need only to - include the first letter of the word in the string. E.g. service and container, - you would provide a string "sc". - - :param str string: Specify service, container, or object in - in the string with the first letter of the word. - :return: A ResourceTypes object - :rtype: ~azure.storage.blob.ResourceTypes - """ - res_service = 's' in string - res_container = 'c' in string - res_object = 'o' in string - - parsed = cls(res_service, res_container, res_object) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class AccountSasPermissions(object): - """ - :class:`~ResourceTypes` class to be used with generate_account_sas - function and for the AccessPolicies used with set_*_acl. There are two types of - SAS which may be used to grant resource access. One is to grant access to a - specific resource (resource-specific). Another is to grant access to the - entire service for a specific account and allow certain operations based on - perms found here. - - :param bool read: - Valid for all signed resources types (Service, Container, and Object). - Permits read permissions to the specified resource type. - :param bool write: - Valid for all signed resources types (Service, Container, and Object). - Permits write permissions to the specified resource type. - :param bool delete: - Valid for Container and Object resource types, except for queue messages. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool list: - Valid for Service and Container resource types only. - :param bool add: - Valid for the following Object resource types only: queue messages, and append blobs. - :param bool create: - Valid for the following Object resource types only: blobs and files. - Users can create new blobs or files, but may not overwrite existing - blobs or files. - :param bool update: - Valid for the following Object resource types only: queue messages. - :param bool process: - Valid for the following Object resource type only: queue messages. - :keyword bool tag: - To enable set or get tags on the blobs in the container. - :keyword bool filter_by_tags: - To enable get blobs by tags, this should be used together with list permission. - :keyword bool set_immutability_policy: - To enable operations related to set/delete immutability policy. - To get immutability policy, you just need read permission. - """ - def __init__(self, read=False, write=False, delete=False, - list=False, # pylint: disable=redefined-builtin - add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): - self.read = read - self.write = write - self.delete = delete - self.delete_previous_version = delete_previous_version - self.list = list - self.add = add - self.create = create - self.update = update - self.process = process - self.tag = kwargs.pop('tag', False) - self.filter_by_tags = kwargs.pop('filter_by_tags', False) - self.set_immutability_policy = kwargs.pop('set_immutability_policy', False) - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('l' if self.list else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('u' if self.update else '') + - ('p' if self.process else '') + - ('f' if self.filter_by_tags else '') + - ('t' if self.tag else '') + - ('i' if self.set_immutability_policy else '') - ) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create AccountSasPermissions from a string. - - To specify read, write, delete, etc. permissions you need only to - include the first letter of the word in the string. E.g. for read and write - permissions you would provide a string "rw". - - :param str permission: Specify permissions in - the string with the first letter of the word. - :return: An AccountSasPermissions object - :rtype: ~azure.storage.blob.AccountSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_delete_previous_version = 'x' in permission - p_list = 'l' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_update = 'u' in permission - p_process = 'p' in permission - p_tag = 't' in permission - p_filter_by_tags = 'f' in permission - p_set_immutability_policy = 'i' in permission - parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, - list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, - filter_by_tags=p_filter_by_tags, set_immutability_policy=p_set_immutability_policy) - - return parsed - - -class Services(object): - """Specifies the services accessible with the account SAS. - - :param bool blob: - Access for the `~azure.storage.blob.BlobServiceClient` - :param bool queue: - Access for the `~azure.storage.queue.QueueServiceClient` - :param bool fileshare: - Access for the `~azure.storage.fileshare.ShareServiceClient` - """ - - def __init__(self, blob=False, queue=False, fileshare=False): - self.blob = blob - self.queue = queue - self.fileshare = fileshare - self._str = (('b' if self.blob else '') + - ('q' if self.queue else '') + - ('f' if self.fileshare else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create Services from a string. - - To specify blob, queue, or file you need only to - include the first letter of the word in the string. E.g. for blob and queue - you would provide a string "bq". - - :param str string: Specify blob, queue, or file in - in the string with the first letter of the word. - :return: A Services object - :rtype: ~azure.storage.blob.Services - """ - res_blob = 'b' in string - res_queue = 'q' in string - res_file = 'f' in string - - parsed = cls(res_blob, res_queue, res_file) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class UserDelegationKey(object): - """ - Represents a user delegation key, provided to the user by Azure Storage - based on their Azure Active Directory access token. - - The fields are saved as simple strings since the user does not have to interact with this object; - to generate an identify SAS, the user can simply pass it to the right API. - - :ivar str signed_oid: - Object ID of this token. - :ivar str signed_tid: - Tenant ID of the tenant that issued this token. - :ivar str signed_start: - The datetime this token becomes valid. - :ivar str signed_expiry: - The datetime this token expires. - :ivar str signed_service: - What service this key is valid for. - :ivar str signed_version: - The version identifier of the REST service that created this token. - :ivar str value: - The user delegation key. - """ - def __init__(self): - self.signed_oid = None - self.signed_tid = None - self.signed_start = None - self.signed_expiry = None - self.signed_service = None - self.signed_version = None - self.value = None diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/parser.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/parser.py deleted file mode 100644 index c6feba8..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/parser.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys - -if sys.version_info < (3,): - def _str(value): - if isinstance(value, unicode): # pylint: disable=undefined-variable - return value.encode('utf-8') - - return str(value) -else: - _str = str - - -def _to_utc_datetime(value): - return value.strftime('%Y-%m-%dT%H:%M:%SZ') diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/policies.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/policies.py deleted file mode 100644 index 11fc984..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/policies.py +++ /dev/null @@ -1,608 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import re -import random -from time import time -from io import SEEK_SET, UnsupportedOperation -import logging -import uuid -import types -from typing import Any, TYPE_CHECKING -from wsgiref.handlers import format_date_time -try: - from urllib.parse import ( - urlparse, - parse_qsl, - urlunparse, - urlencode, - ) -except ImportError: - from urllib import urlencode # type: ignore - from urlparse import ( # type: ignore - urlparse, - parse_qsl, - urlunparse, - ) - -from azure.core.pipeline.policies import ( - HeadersPolicy, - SansIOHTTPPolicy, - NetworkTraceLoggingPolicy, - HTTPPolicy, - RequestHistory -) -from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError - -from .models import LocationMode - -try: - _unicode_type = unicode # type: ignore -except NameError: - _unicode_type = str - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -def encode_base64(data): - if isinstance(data, _unicode_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def is_exhausted(settings): - """Are we out of retries?""" - retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) - retry_counts = list(filter(None, retry_counts)) - if not retry_counts: - return False - return min(retry_counts) < 0 - - -def retry_hook(settings, **kwargs): - if settings['hook']: - settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) - - -def is_retry(response, mode): - """Is this method/status code retryable? (Based on whitelists and control - variables such as the number of total retries to allow, whether to - respect the Retry-After header, whether this header is present, and - whether the returned status code is on the list of status codes to - be retried upon on the presence of the aforementioned header) - """ - status = response.http_response.status_code - if 300 <= status < 500: - # An exception occured, but in most cases it was expected. Examples could - # include a 309 Conflict or 412 Precondition Failed. - if status == 404 and mode == LocationMode.SECONDARY: - # Response code 404 should be retried if secondary was used. - return True - if status == 408: - # Response code 408 is a timeout and should be retried. - return True - return False - if status >= 500: - # Response codes above 500 with the exception of 501 Not Implemented and - # 505 Version Not Supported indicate a server issue and should be retried. - if status in [501, 505]: - return False - return True - return False - - -def urljoin(base_url, stub_url): - parsed = urlparse(base_url) - parsed = parsed._replace(path=parsed.path + '/' + stub_url) - return parsed.geturl() - - -class QueueMessagePolicy(SansIOHTTPPolicy): - - def on_request(self, request): - message_id = request.context.options.pop('queue_message_id', None) - if message_id: - request.http_request.url = urljoin( - request.http_request.url, - message_id) - - -class StorageHeadersPolicy(HeadersPolicy): - request_id_header_name = 'x-ms-client-request-id' - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - super(StorageHeadersPolicy, self).on_request(request) - current_time = format_date_time(time()) - request.http_request.headers['x-ms-date'] = current_time - - custom_id = request.context.options.pop('client_request_id', None) - request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) - - # def on_response(self, request, response): - # # raise exception if the echoed client request id from the service is not identical to the one we sent - # if self.request_id_header_name in response.http_response.headers: - - # client_request_id = request.http_request.headers.get(self.request_id_header_name) - - # if response.http_response.headers[self.request_id_header_name] != client_request_id: - # raise AzureError( - # "Echoed client request ID: {} does not match sent client request ID: {}. " - # "Service request ID: {}".format( - # response.http_response.headers[self.request_id_header_name], client_request_id, - # response.http_response.headers['x-ms-request-id']), - # response=response.http_response - # ) - - -class StorageHosts(SansIOHTTPPolicy): - - def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument - self.hosts = hosts - super(StorageHosts, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - request.context.options['hosts'] = self.hosts - parsed_url = urlparse(request.http_request.url) - - # Detect what location mode we're currently requesting with - location_mode = LocationMode.PRIMARY - for key, value in self.hosts.items(): - if parsed_url.netloc == value: - location_mode = key - - # See if a specific location mode has been specified, and if so, redirect - use_location = request.context.options.pop('use_location', None) - if use_location: - # Lock retries to the specific location - request.context.options['retry_to_secondary'] = False - if use_location not in self.hosts: - raise ValueError("Attempting to use undefined host location {}".format(use_location)) - if use_location != location_mode: - # Update request URL to use the specified location - updated = parsed_url._replace(netloc=self.hosts[use_location]) - request.http_request.url = updated.geturl() - location_mode = use_location - - request.context.options['location_mode'] = location_mode - - -class StorageLoggingPolicy(NetworkTraceLoggingPolicy): - """A policy that logs HTTP request and response to the DEBUG logger. - - This accepts both global configuration, and per-request level with "enable_http_logger" - """ - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - http_request = request.http_request - options = request.context.options - if options.pop("logging_enable", self.enable_http_logger): - request.context["logging_enable"] = True - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - log_url = http_request.url - query_params = http_request.query - if 'sig' in query_params: - log_url = log_url.replace(query_params['sig'], "sig=*****") - _LOGGER.debug("Request URL: %r", log_url) - _LOGGER.debug("Request method: %r", http_request.method) - _LOGGER.debug("Request headers:") - for header, value in http_request.headers.items(): - if header.lower() == 'authorization': - value = '*****' - elif header.lower() == 'x-ms-copy-source' and 'sig' in value: - # take the url apart and scrub away the signed signature - scheme, netloc, path, params, query, fragment = urlparse(value) - parsed_qs = dict(parse_qsl(query)) - parsed_qs['sig'] = '*****' - - # the SAS needs to be put back together - value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) - - _LOGGER.debug(" %r: %r", header, value) - _LOGGER.debug("Request body:") - - # We don't want to log the binary data of a file upload. - if isinstance(http_request.body, types.GeneratorType): - _LOGGER.debug("File upload") - else: - _LOGGER.debug(str(http_request.body)) - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log request: %r", err) - - def on_response(self, request, response): - # type: (PipelineRequest, PipelineResponse, Any) -> None - if response.context.pop("logging_enable", self.enable_http_logger): - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - _LOGGER.debug("Response status: %r", response.http_response.status_code) - _LOGGER.debug("Response headers:") - for res_header, value in response.http_response.headers.items(): - _LOGGER.debug(" %r: %r", res_header, value) - - # We don't want to log binary data if the response is a file. - _LOGGER.debug("Response content:") - pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) - header = response.http_response.headers.get('content-disposition') - - if header and pattern.match(header): - filename = header.partition('=')[2] - _LOGGER.debug("File attachments: %s", filename) - elif response.http_response.headers.get("content-type", "").endswith("octet-stream"): - _LOGGER.debug("Body contains binary data.") - elif response.http_response.headers.get("content-type", "").startswith("image"): - _LOGGER.debug("Body contains image data.") - else: - if response.context.options.get('stream', False): - _LOGGER.debug("Body is streamable") - else: - _LOGGER.debug(response.http_response.text()) - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log response: %s", repr(err)) - - -class StorageRequestHook(SansIOHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._request_callback = kwargs.get('raw_request_hook') - super(StorageRequestHook, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, **Any) -> PipelineResponse - request_callback = request.context.options.pop('raw_request_hook', self._request_callback) - if request_callback: - request_callback(request) - - -class StorageResponseHook(HTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(StorageResponseHook, self).__init__() - - def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = self.next.send(request) - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - response_callback(response) - request.context['response_callback'] = response_callback - return response - - -class StorageContentValidation(SansIOHTTPPolicy): - """A simple policy that sends the given headers - with the request. - - This will overwrite any headers already defined in the request. - """ - header_name = 'Content-MD5' - - def __init__(self, **kwargs): # pylint: disable=unused-argument - super(StorageContentValidation, self).__init__() - - @staticmethod - def get_content_md5(data): - md5 = hashlib.md5() # nosec - if isinstance(data, bytes): - md5.update(data) - elif hasattr(data, 'read'): - pos = 0 - try: - pos = data.tell() - except: # pylint: disable=bare-except - pass - for chunk in iter(lambda: data.read(4096), b""): - md5.update(chunk) - try: - data.seek(pos, SEEK_SET) - except (AttributeError, IOError): - raise ValueError("Data should be bytes or a seekable file-like object.") - else: - raise ValueError("Data should be bytes or a seekable file-like object.") - - return md5.digest() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - validate_content = request.context.options.pop('validate_content', False) - if validate_content and request.http_request.method != 'GET': - computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) - request.http_request.headers[self.header_name] = computed_md5 - request.context['validate_content_md5'] = computed_md5 - request.context['validate_content'] = validate_content - - def on_response(self, request, response): - if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): - computed_md5 = request.context.get('validate_content_md5') or \ - encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) - if response.http_response.headers['content-md5'] != computed_md5: - raise AzureError( - 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format( - response.http_response.headers['content-md5'], computed_md5), - response=response.http_response - ) - - -class StorageRetryPolicy(HTTPPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - def __init__(self, **kwargs): - self.total_retries = kwargs.pop('retry_total', 10) - self.connect_retries = kwargs.pop('retry_connect', 3) - self.read_retries = kwargs.pop('retry_read', 3) - self.status_retries = kwargs.pop('retry_status', 3) - self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) - super(StorageRetryPolicy, self).__init__() - - def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use - """ - A function which sets the next host location on the request, if applicable. - - :param ~azure.storage.models.RetryContext context: - The retry context containing the previous host location and the request - to evaluate and possibly modify. - """ - if settings['hosts'] and all(settings['hosts'].values()): - url = urlparse(request.url) - # If there's more than one possible location, retry to the alternative - if settings['mode'] == LocationMode.PRIMARY: - settings['mode'] = LocationMode.SECONDARY - else: - settings['mode'] = LocationMode.PRIMARY - updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) - request.url = updated.geturl() - - def configure_retries(self, request): # pylint: disable=no-self-use - body_position = None - if hasattr(request.http_request.body, 'read'): - try: - body_position = request.http_request.body.tell() - except (AttributeError, UnsupportedOperation): - # if body position cannot be obtained, then retries will not work - pass - options = request.context.options - return { - 'total': options.pop("retry_total", self.total_retries), - 'connect': options.pop("retry_connect", self.connect_retries), - 'read': options.pop("retry_read", self.read_retries), - 'status': options.pop("retry_status", self.status_retries), - 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), - 'mode': options.pop("location_mode", LocationMode.PRIMARY), - 'hosts': options.pop("hosts", None), - 'hook': options.pop("retry_hook", None), - 'body_position': body_position, - 'count': 0, - 'history': [] - } - - def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use - """ Formula for computing the current backoff. - Should be calculated by child class. - - :rtype: float - """ - return 0 - - def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - transport.sleep(backoff) - - def increment(self, settings, request, response=None, error=None): - """Increment the retry counters. - - :param response: A pipeline response object. - :param error: An error encountered during the request, or - None if the response was received successfully. - - :return: Whether the retry attempts are exhausted. - """ - settings['total'] -= 1 - - if error and isinstance(error, ServiceRequestError): - # Errors when we're fairly sure that the server did not receive the - # request, so it should be safe to retry. - settings['connect'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - elif error and isinstance(error, ServiceResponseError): - # Errors that occur after the request has been started, so we should - # assume that the server began processing it. - settings['read'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - else: - # Incrementing because of a server error like a 500 in - # status_forcelist and a the given method is in the whitelist - if response: - settings['status'] -= 1 - settings['history'].append(RequestHistory(request, http_response=response)) - - if not is_exhausted(settings): - if request.method not in ['PUT'] and settings['retry_secondary']: - self._set_next_host_location(settings, request) - - # rewind the request body if it is a stream - if request.body and hasattr(request.body, 'read'): - # no position was saved, then retry would not work - if settings['body_position'] is None: - return False - try: - # attempt to rewind the body to the initial position - request.body.seek(settings['body_position'], SEEK_SET) - except (UnsupportedOperation, ValueError): - # if body is not seekable, then retry would not work - return False - settings['count'] += 1 - return True - return False - - def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(StorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(StorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/policies_async.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/policies_async.py deleted file mode 100644 index e0926b8..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/policies_async.py +++ /dev/null @@ -1,220 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -import asyncio -import random -import logging -from typing import Any, TYPE_CHECKING - -from azure.core.pipeline.policies import AsyncHTTPPolicy -from azure.core.exceptions import AzureError - -from .policies import is_retry, StorageRetryPolicy - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -async def retry_hook(settings, **kwargs): - if settings['hook']: - if asyncio.iscoroutine(settings['hook']): - await settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - else: - settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - - -class AsyncStorageResponseHook(AsyncHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(AsyncStorageResponseHook, self).__init__() - - async def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = await self.next.send(request) - await response.http_response.load_body() - - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - if asyncio.iscoroutine(response_callback): - await response_callback(response) - else: - response_callback(response) - request.context['response_callback'] = response_callback - return response - -class AsyncStorageRetryPolicy(StorageRetryPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - async def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - await transport.sleep(backoff) - - async def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = await self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - await self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - await self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(AsyncStorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(AsyncStorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/request_handlers.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/request_handlers.py deleted file mode 100644 index 0d3a2a5..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/request_handlers.py +++ /dev/null @@ -1,278 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) - -import logging -from os import fstat -import stat -from io import (SEEK_END, SEEK_SET, UnsupportedOperation) - -import isodate - -from azure.core.exceptions import raise_with_traceback - - -_LOGGER = logging.getLogger(__name__) - -_REQUEST_DELIMITER_PREFIX = "batch_" -_HTTP1_1_IDENTIFIER = "HTTP/1.1" -_HTTP_LINE_ENDING = "\r\n" - - -def serialize_iso(attr): - """Serialize Datetime object into ISO-8601 formatted string. - - :param Datetime attr: Object to be serialized. - :rtype: str - :raises: ValueError if format invalid. - """ - if not attr: - return None - if isinstance(attr, str): - attr = isodate.parse_datetime(attr) - try: - utc = attr.utctimetuple() - if utc.tm_year > 9999 or utc.tm_year < 1: - raise OverflowError("Hit max or min date") - - date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( - utc.tm_year, utc.tm_mon, utc.tm_mday, - utc.tm_hour, utc.tm_min, utc.tm_sec) - return date + 'Z' - except (ValueError, OverflowError) as err: - msg = "Unable to serialize datetime object." - raise_with_traceback(ValueError, msg, err) - except AttributeError as err: - msg = "ISO-8601 object must be valid Datetime object." - raise_with_traceback(TypeError, msg, err) - - -def get_length(data): - length = None - # Check if object implements the __len__ method, covers most input cases such as bytearray. - try: - length = len(data) - except: # pylint: disable=bare-except - pass - - if not length: - # Check if the stream is a file-like stream object. - # If so, calculate the size using the file descriptor. - try: - fileno = data.fileno() - except (AttributeError, UnsupportedOperation): - pass - else: - try: - mode = fstat(fileno).st_mode - if stat.S_ISREG(mode) or stat.S_ISLNK(mode): - #st_size only meaningful if regular file or symlink, other types - # e.g. sockets may return misleading sizes like 0 - return fstat(fileno).st_size - except OSError: - # Not a valid fileno, may be possible requests returned - # a socket number? - pass - - # If the stream is seekable and tell() is implemented, calculate the stream size. - try: - current_position = data.tell() - data.seek(0, SEEK_END) - length = data.tell() - current_position - data.seek(current_position, SEEK_SET) - except (AttributeError, UnsupportedOperation): - pass - - return length - - -def read_length(data): - try: - if hasattr(data, 'read'): - read_data = b'' - for chunk in iter(lambda: data.read(4096), b""): - read_data += chunk - return len(read_data), read_data - if hasattr(data, '__iter__'): - read_data = b'' - for chunk in data: - read_data += chunk - return len(read_data), read_data - except: # pylint: disable=bare-except - pass - raise ValueError("Unable to calculate content length, please specify.") - - -def validate_and_format_range_headers( - start_range, end_range, start_range_required=True, - end_range_required=True, check_content_md5=False, align_to_page=False): - # If end range is provided, start range must be provided - if (start_range_required or end_range is not None) and start_range is None: - raise ValueError("start_range value cannot be None.") - if end_range_required and end_range is None: - raise ValueError("end_range value cannot be None.") - - # Page ranges must be 512 aligned - if align_to_page: - if start_range is not None and start_range % 512 != 0: - raise ValueError("Invalid page blob start_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(start_range)) - if end_range is not None and end_range % 512 != 511: - raise ValueError("Invalid page blob end_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(end_range)) - - # Format based on whether end_range is present - range_header = None - if end_range is not None: - range_header = 'bytes={0}-{1}'.format(start_range, end_range) - elif start_range is not None: - range_header = "bytes={0}-".format(start_range) - - # Content MD5 can only be provided for a complete range less than 4MB in size - range_validation = None - if check_content_md5: - if start_range is None or end_range is None: - raise ValueError("Both start and end range requied for MD5 content validation.") - if end_range - start_range > 4 * 1024 * 1024: - raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") - range_validation = 'true' - - return range_header, range_validation - - -def add_metadata_headers(metadata=None): - # type: (Optional[Dict[str, str]]) -> Dict[str, str] - headers = {} - if metadata: - for key, value in metadata.items(): - headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value - return headers - - -def serialize_batch_body(requests, batch_id): - """ - -- - - -- - (repeated as needed) - ---- - - Serializes the requests in this batch to a single HTTP mixed/multipart body. - - :param list[~azure.core.pipeline.transport.HttpRequest] requests: - a list of sub-request for the batch request - :param str batch_id: - to be embedded in batch sub-request delimiter - :return: The body bytes for this batch. - """ - - if requests is None or len(requests) == 0: - raise ValueError('Please provide sub-request(s) for this batch request') - - delimiter_bytes = (_get_batch_request_delimiter(batch_id, True, False) + _HTTP_LINE_ENDING).encode('utf-8') - newline_bytes = _HTTP_LINE_ENDING.encode('utf-8') - batch_body = list() - - content_index = 0 - for request in requests: - request.headers.update({ - "Content-ID": str(content_index), - "Content-Length": str(0) - }) - batch_body.append(delimiter_bytes) - batch_body.append(_make_body_from_sub_request(request)) - batch_body.append(newline_bytes) - content_index += 1 - - batch_body.append(_get_batch_request_delimiter(batch_id, True, True).encode('utf-8')) - # final line of body MUST have \r\n at the end, or it will not be properly read by the service - batch_body.append(newline_bytes) - - return bytes().join(batch_body) - - -def _get_batch_request_delimiter(batch_id, is_prepend_dashes=False, is_append_dashes=False): - """ - Gets the delimiter used for this batch request's mixed/multipart HTTP format. - - :param str batch_id: - Randomly generated id - :param bool is_prepend_dashes: - Whether to include the starting dashes. Used in the body, but non on defining the delimiter. - :param bool is_append_dashes: - Whether to include the ending dashes. Used in the body on the closing delimiter only. - :return: The delimiter, WITHOUT a trailing newline. - """ - - prepend_dashes = '--' if is_prepend_dashes else '' - append_dashes = '--' if is_append_dashes else '' - - return prepend_dashes + _REQUEST_DELIMITER_PREFIX + batch_id + append_dashes - - -def _make_body_from_sub_request(sub_request): - """ - Content-Type: application/http - Content-ID: - Content-Transfer-Encoding: (if present) - - HTTP/ -
:
(repeated as necessary) - Content-Length: - (newline if content length > 0) - (if content length > 0) - - Serializes an http request. - - :param ~azure.core.pipeline.transport.HttpRequest sub_request: - Request to serialize. - :return: The serialized sub-request in bytes - """ - - # put the sub-request's headers into a list for efficient str concatenation - sub_request_body = list() - - # get headers for ease of manipulation; remove headers as they are used - headers = sub_request.headers - - # append opening headers - sub_request_body.append("Content-Type: application/http") - sub_request_body.append(_HTTP_LINE_ENDING) - - sub_request_body.append("Content-ID: ") - sub_request_body.append(headers.pop("Content-ID", "")) - sub_request_body.append(_HTTP_LINE_ENDING) - - sub_request_body.append("Content-Transfer-Encoding: binary") - sub_request_body.append(_HTTP_LINE_ENDING) - - # append blank line - sub_request_body.append(_HTTP_LINE_ENDING) - - # append HTTP verb and path and query and HTTP version - sub_request_body.append(sub_request.method) - sub_request_body.append(' ') - sub_request_body.append(sub_request.url) - sub_request_body.append(' ') - sub_request_body.append(_HTTP1_1_IDENTIFIER) - sub_request_body.append(_HTTP_LINE_ENDING) - - # append remaining headers (this will set the Content-Length, as it was set on `sub-request`) - for header_name, header_value in headers.items(): - if header_value is not None: - sub_request_body.append(header_name) - sub_request_body.append(": ") - sub_request_body.append(header_value) - sub_request_body.append(_HTTP_LINE_ENDING) - - # append blank line - sub_request_body.append(_HTTP_LINE_ENDING) - - return ''.join(sub_request_body).encode() diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/response_handlers.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/response_handlers.py deleted file mode 100644 index e5a3514..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/response_handlers.py +++ /dev/null @@ -1,191 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging -from xml.etree.ElementTree import Element - -from azure.core.pipeline.policies import ContentDecodePolicy -from azure.core.exceptions import ( - HttpResponseError, - ResourceNotFoundError, - ResourceModifiedError, - ResourceExistsError, - ClientAuthenticationError, - DecodeError) - -from .parser import _to_utc_datetime -from .models import StorageErrorCode, UserDelegationKey, get_enum_value - -if TYPE_CHECKING: - from datetime import datetime - from azure.core.exceptions import AzureError - - -_LOGGER = logging.getLogger(__name__) - - -class PartialBatchErrorException(HttpResponseError): - """There is a partial failure in batch operations. - - :param str message: The message of the exception. - :param response: Server response to be deserialized. - :param list parts: A list of the parts in multipart response. - """ - - def __init__(self, message, response, parts): - self.parts = parts - super(PartialBatchErrorException, self).__init__(message=message, response=response) - - -def parse_length_from_content_range(content_range): - ''' - Parses the blob length from the content range header: bytes 1-3/65537 - ''' - if content_range is None: - return None - - # First, split in space and take the second half: '1-3/65537' - # Next, split on slash and take the second half: '65537' - # Finally, convert to an int: 65537 - return int(content_range.split(' ', 1)[1].split('/', 1)[1]) - - -def normalize_headers(headers): - normalized = {} - for key, value in headers.items(): - if key.startswith('x-ms-'): - key = key[5:] - normalized[key.lower().replace('-', '_')] = get_enum_value(value) - return normalized - - -def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument - raw_metadata = {k: v for k, v in response.http_response.headers.items() if k.startswith("x-ms-meta-")} - return {k[10:]: v for k, v in raw_metadata.items()} - - -def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers) - - -def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers), deserialized - - -def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return response.http_response.location_mode, deserialized - - -def process_storage_error(storage_error): # pylint:disable=too-many-statements - raise_error = HttpResponseError - serialized = False - if not storage_error.response: - raise storage_error - # If it is one of those three then it has been serialized prior by the generated layer. - if isinstance(storage_error, (PartialBatchErrorException, - ClientAuthenticationError, ResourceNotFoundError, ResourceExistsError)): - serialized = True - error_code = storage_error.response.headers.get('x-ms-error-code') - error_message = storage_error.message - additional_data = {} - error_dict = {} - try: - error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) - # If it is an XML response - if isinstance(error_body, Element): - error_dict = { - child.tag.lower(): child.text - for child in error_body - } - # If it is a JSON response - elif isinstance(error_body, dict): - error_dict = error_body.get('error', {}) - elif not error_code: - _LOGGER.warning( - 'Unexpected return type % from ContentDecodePolicy.deserialize_from_http_generics.', type(error_body)) - error_dict = {'message': str(error_body)} - - # If we extracted from a Json or XML response - if error_dict: - error_code = error_dict.get('code') - error_message = error_dict.get('message') - additional_data = {k: v for k, v in error_dict.items() if k not in {'code', 'message'}} - except DecodeError: - pass - - try: - # This check would be unnecessary if we have already serialized the error - if error_code and not serialized: - error_code = StorageErrorCode(error_code) - if error_code in [StorageErrorCode.condition_not_met, - StorageErrorCode.blob_overwritten]: - raise_error = ResourceModifiedError - if error_code in [StorageErrorCode.invalid_authentication_info, - StorageErrorCode.authentication_failed]: - raise_error = ClientAuthenticationError - if error_code in [StorageErrorCode.resource_not_found, - StorageErrorCode.cannot_verify_copy_source, - StorageErrorCode.blob_not_found, - StorageErrorCode.queue_not_found, - StorageErrorCode.container_not_found, - StorageErrorCode.parent_not_found, - StorageErrorCode.share_not_found]: - raise_error = ResourceNotFoundError - if error_code in [StorageErrorCode.account_already_exists, - StorageErrorCode.account_being_created, - StorageErrorCode.resource_already_exists, - StorageErrorCode.resource_type_mismatch, - StorageErrorCode.blob_already_exists, - StorageErrorCode.queue_already_exists, - StorageErrorCode.container_already_exists, - StorageErrorCode.container_being_deleted, - StorageErrorCode.queue_being_deleted, - StorageErrorCode.share_already_exists, - StorageErrorCode.share_being_deleted]: - raise_error = ResourceExistsError - except ValueError: - # Got an unknown error code - pass - - # Error message should include all the error properties - try: - error_message += "\nErrorCode:{}".format(error_code.value) - except AttributeError: - error_message += "\nErrorCode:{}".format(error_code) - for name, info in additional_data.items(): - error_message += "\n{}:{}".format(name, info) - - # No need to create an instance if it has already been serialized by the generated layer - if serialized: - storage_error.message = error_message - error = storage_error - else: - error = raise_error(message=error_message, response=storage_error.response) - # Ensure these properties are stored in the error instance as well (not just the error message) - error.error_code = error_code - error.additional_info = additional_data - # error.args is what's surfaced on the traceback - show error message in all cases - error.args = (error.message,) - try: - # `from None` prevents us from double printing the exception (suppresses generated layer error context) - exec("raise error from None") # pylint: disable=exec-used # nosec - except SyntaxError: - raise error - - -def parse_to_internal_user_delegation_key(service_user_delegation_key): - internal_user_delegation_key = UserDelegationKey() - internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid - internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid - internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) - internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) - internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service - internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version - internal_user_delegation_key.value = service_user_delegation_key.value - return internal_user_delegation_key diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/shared_access_signature.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/shared_access_signature.py deleted file mode 100644 index 07aad5f..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/shared_access_signature.py +++ /dev/null @@ -1,220 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from datetime import date - -from .parser import _str, _to_utc_datetime -from .constants import X_MS_VERSION -from . import sign_string, url_quote - - -class QueryStringConstants(object): - SIGNED_SIGNATURE = 'sig' - SIGNED_PERMISSION = 'sp' - SIGNED_START = 'st' - SIGNED_EXPIRY = 'se' - SIGNED_RESOURCE = 'sr' - SIGNED_IDENTIFIER = 'si' - SIGNED_IP = 'sip' - SIGNED_PROTOCOL = 'spr' - SIGNED_VERSION = 'sv' - SIGNED_CACHE_CONTROL = 'rscc' - SIGNED_CONTENT_DISPOSITION = 'rscd' - SIGNED_CONTENT_ENCODING = 'rsce' - SIGNED_CONTENT_LANGUAGE = 'rscl' - SIGNED_CONTENT_TYPE = 'rsct' - START_PK = 'spk' - START_RK = 'srk' - END_PK = 'epk' - END_RK = 'erk' - SIGNED_RESOURCE_TYPES = 'srt' - SIGNED_SERVICES = 'ss' - SIGNED_OID = 'skoid' - SIGNED_TID = 'sktid' - SIGNED_KEY_START = 'skt' - SIGNED_KEY_EXPIRY = 'ske' - SIGNED_KEY_SERVICE = 'sks' - SIGNED_KEY_VERSION = 'skv' - - # for ADLS - SIGNED_AUTHORIZED_OID = 'saoid' - SIGNED_UNAUTHORIZED_OID = 'suoid' - SIGNED_CORRELATION_ID = 'scid' - SIGNED_DIRECTORY_DEPTH = 'sdd' - - @staticmethod - def to_list(): - return [ - QueryStringConstants.SIGNED_SIGNATURE, - QueryStringConstants.SIGNED_PERMISSION, - QueryStringConstants.SIGNED_START, - QueryStringConstants.SIGNED_EXPIRY, - QueryStringConstants.SIGNED_RESOURCE, - QueryStringConstants.SIGNED_IDENTIFIER, - QueryStringConstants.SIGNED_IP, - QueryStringConstants.SIGNED_PROTOCOL, - QueryStringConstants.SIGNED_VERSION, - QueryStringConstants.SIGNED_CACHE_CONTROL, - QueryStringConstants.SIGNED_CONTENT_DISPOSITION, - QueryStringConstants.SIGNED_CONTENT_ENCODING, - QueryStringConstants.SIGNED_CONTENT_LANGUAGE, - QueryStringConstants.SIGNED_CONTENT_TYPE, - QueryStringConstants.START_PK, - QueryStringConstants.START_RK, - QueryStringConstants.END_PK, - QueryStringConstants.END_RK, - QueryStringConstants.SIGNED_RESOURCE_TYPES, - QueryStringConstants.SIGNED_SERVICES, - QueryStringConstants.SIGNED_OID, - QueryStringConstants.SIGNED_TID, - QueryStringConstants.SIGNED_KEY_START, - QueryStringConstants.SIGNED_KEY_EXPIRY, - QueryStringConstants.SIGNED_KEY_SERVICE, - QueryStringConstants.SIGNED_KEY_VERSION, - # for ADLS - QueryStringConstants.SIGNED_AUTHORIZED_OID, - QueryStringConstants.SIGNED_UNAUTHORIZED_OID, - QueryStringConstants.SIGNED_CORRELATION_ID, - QueryStringConstants.SIGNED_DIRECTORY_DEPTH, - ] - - -class SharedAccessSignature(object): - ''' - Provides a factory for creating account access - signature tokens with an account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - :param str x_ms_version: - The service version used to generate the shared access signatures. - ''' - self.account_name = account_name - self.account_key = account_key - self.x_ms_version = x_ms_version - - def generate_account(self, services, resource_types, permission, expiry, start=None, - ip=None, protocol=None): - ''' - Generates a shared access signature for the account. - Use the returned signature with the sas_token parameter of the service - or to create a new account object. - - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account - SAS. You can combine values to provide access to more than one - resource type. - :param AccountSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. You can combine - values to provide more than one permission. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_account(services, resource_types) - sas.add_account_signature(self.account_name, self.account_key) - - return sas.get_token() - - -class _SharedAccessHelper(object): - def __init__(self): - self.query_dict = {} - - def _add_query(self, name, val): - if val: - self.query_dict[name] = _str(val) if val is not None else None - - def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): - if isinstance(start, date): - start = _to_utc_datetime(start) - - if isinstance(expiry, date): - expiry = _to_utc_datetime(expiry) - - self._add_query(QueryStringConstants.SIGNED_START, start) - self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) - self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) - self._add_query(QueryStringConstants.SIGNED_IP, ip) - self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) - self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) - - def add_resource(self, resource): - self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) - - def add_id(self, policy_id): - self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) - - def add_account(self, services, resource_types): - self._add_query(QueryStringConstants.SIGNED_SERVICES, services) - self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) - - def add_override_response_headers(self, cache_control, - content_disposition, - content_encoding, - content_language, - content_type): - self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) - self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) - self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) - self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) - self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) - - def add_account_signature(self, account_name, account_key): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - string_to_sign = \ - (account_name + '\n' + - get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + - get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + - get_value_to_append(QueryStringConstants.SIGNED_START) + - get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - get_value_to_append(QueryStringConstants.SIGNED_IP) + - get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(QueryStringConstants.SIGNED_VERSION)) - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key, string_to_sign)) - - def get_token(self): - return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/uploads.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/uploads.py deleted file mode 100644 index 941a90f..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/uploads.py +++ /dev/null @@ -1,603 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from concurrent import futures -from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) -from threading import Lock -from itertools import islice -from math import ceil - -import six - -from azure.core.tracing.common import with_current_context - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." - - -def _parallel_uploads(executor, uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(executor.submit(with_current_context(uploader), next_chunk)) - except StopIteration: - break - - # Wait for the remaining uploads to finish - done, _running = futures.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - validate_content=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - validate_content=validate_content, - **kwargs) - if parallel: - with futures.ThreadPoolExecutor(max_concurrency) as executor: - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - executor.submit(with_current_context(uploader.process_chunk), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - with futures.ThreadPoolExecutor(max_concurrency) as executor: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - executor.submit(with_current_context(uploader.process_substream_block), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] - if any(range_ids): - return sorted(range_ids) - return [] - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b"" - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError("Blob data should be of type bytes.") - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b"" or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - def _update_progress(self, length): - if self.progress_lock is not None: - with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = self._upload_chunk(chunk_offset, chunk_data) - self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield index, SubStream(self.stream, index, length, lock) - - def process_substream_block(self, block_data): - return self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - def _upload_substream_block(self, index, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_substream_block_with_progress(self, index, block_stream): - range_id = self._upload_substream_block(index, block_stream) - self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop("modified_access_conditions", None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - self.service.stage_block( - block_id, - len(chunk_data), - chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return index, block_id - - def _upload_substream_block(self, index, block_stream): - try: - block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) - self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - return not any(bytearray(chunk_data)) - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = self.service.upload_pages( - body=chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - def _upload_substream_block(self, index, block_stream): - pass - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - self.current_length = int(self.response_headers["blob_append_offset"]) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - def _upload_substream_block(self, index, block_stream): - pass - - -class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - self.response_headers = self.service.append_data( - body=chunk_data, - position=chunk_offset, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - def _upload_substream_block(self, index, block_stream): - try: - self.service.append_data( - body=block_stream, - position=index, - content_length=len(block_stream), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response - - # TODO: Implement this method. - def _upload_substream_block(self, index, block_stream): - pass - - -class SubStream(IOBase): - - def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): - # Python 2.7: file-like objects created with open() typically support seek(), but are not - # derivations of io.IOBase and thus do not implement seekable(). - # Python > 3.0: file-like objects created with open() are derived from io.IOBase. - try: - # only the main thread runs this, so there's no need grabbing the lock - wrapped_stream.seek(0, SEEK_CUR) - except: - raise ValueError("Wrapped stream must support seek().") - - self._lock = lockObj - self._wrapped_stream = wrapped_stream - self._position = 0 - self._stream_begin_index = stream_begin_index - self._length = length - self._buffer = BytesIO() - - # we must avoid buffering more than necessary, and also not use up too much memory - # so the max buffer size is capped at 4MB - self._max_buffer_size = ( - length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE - ) - self._current_buffer_start = 0 - self._current_buffer_size = 0 - super(SubStream, self).__init__() - - def __len__(self): - return self._length - - def close(self): - if self._buffer: - self._buffer.close() - self._wrapped_stream = None - IOBase.close(self) - - def fileno(self): - return self._wrapped_stream.fileno() - - def flush(self): - pass - - def read(self, size=None): - if self.closed: # pylint: disable=using-constant-test - raise ValueError("Stream is closed.") - - if size is None: - size = self._length - self._position - - # adjust if out of bounds - if size + self._position >= self._length: - size = self._length - self._position - - # return fast - if size == 0 or self._buffer.closed: - return b"" - - # attempt first read from the read buffer and update position - read_buffer = self._buffer.read(size) - bytes_read = len(read_buffer) - bytes_remaining = size - bytes_read - self._position += bytes_read - - # repopulate the read buffer from the underlying stream to fulfill the request - # ensure the seek and read operations are done atomically (only if a lock is provided) - if bytes_remaining > 0: - with self._buffer: - # either read in the max buffer size specified on the class - # or read in just enough data for the current block/sub stream - current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) - - # lock is only defined if max_concurrency > 1 (parallel uploads) - if self._lock: - with self._lock: - # reposition the underlying stream to match the start of the data to read - absolute_position = self._stream_begin_index + self._position - self._wrapped_stream.seek(absolute_position, SEEK_SET) - # If we can't seek to the right location, our read will be corrupted so fail fast. - if self._wrapped_stream.tell() != absolute_position: - raise IOError("Stream failed to seek to the desired location.") - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - else: - absolute_position = self._stream_begin_index + self._position - # It's possible that there's connection problem during data transfer, - # so when we retry we don't want to read from current position of wrapped stream, - # instead we should seek to where we want to read from. - if self._wrapped_stream.tell() != absolute_position: - self._wrapped_stream.seek(absolute_position, SEEK_SET) - - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - - if buffer_from_stream: - # update the buffer with new data from the wrapped stream - # we need to note down the start position and size of the buffer, in case seek is performed later - self._buffer = BytesIO(buffer_from_stream) - self._current_buffer_start = self._position - self._current_buffer_size = len(buffer_from_stream) - - # read the remaining bytes from the new buffer and update position - second_read_buffer = self._buffer.read(bytes_remaining) - read_buffer += second_read_buffer - self._position += len(second_read_buffer) - - return read_buffer - - def readable(self): - return True - - def readinto(self, b): - raise UnsupportedOperation - - def seek(self, offset, whence=0): - if whence is SEEK_SET: - start_index = 0 - elif whence is SEEK_CUR: - start_index = self._position - elif whence is SEEK_END: - start_index = self._length - offset = -offset - else: - raise ValueError("Invalid argument for the 'whence' parameter.") - - pos = start_index + offset - - if pos > self._length: - pos = self._length - elif pos < 0: - pos = 0 - - # check if buffer is still valid - # if not, drop buffer - if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: - self._buffer.close() - self._buffer = BytesIO() - else: # if yes seek to correct position - delta = pos - self._current_buffer_start - self._buffer.seek(delta, SEEK_SET) - - self._position = pos - return pos - - def seekable(self): - return True - - def tell(self): - return self._position - - def write(self): - raise UnsupportedOperation - - def writelines(self): - raise UnsupportedOperation - - def writeable(self): - return False - - -class IterStreamer(object): - """ - File-like streaming iterator. - """ - - def __init__(self, generator, encoding="UTF-8"): - self.generator = generator - self.iterator = iter(generator) - self.leftover = b"" - self.encoding = encoding - - def __len__(self): - return self.generator.__len__() - - def __iter__(self): - return self.iterator - - def seekable(self): - return False - - def __next__(self): - return next(self.iterator) - - next = __next__ # Python 2 compatibility. - - def tell(self, *args, **kwargs): - raise UnsupportedOperation("Data generator does not support tell.") - - def seek(self, *args, **kwargs): - raise UnsupportedOperation("Data generator is unseekable.") - - def read(self, size): - data = self.leftover - count = len(self.leftover) - try: - while count < size: - chunk = self.__next__() - if isinstance(chunk, six.text_type): - chunk = chunk.encode(self.encoding) - data += chunk - count += len(chunk) - # This means count < size and what's leftover will be returned in this call. - except StopIteration: - self.leftover = b"" - - if count >= size: - self.leftover = data[size:] - - return data[:size] diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/uploads_async.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/uploads_async.py deleted file mode 100644 index 5ed192b..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/uploads_async.py +++ /dev/null @@ -1,395 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -import asyncio -from asyncio import Lock -from itertools import islice -import threading - -from math import ceil - -import six - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder -from .uploads import SubStream, IterStreamer # pylint: disable=unused-import - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' - - -async def _parallel_uploads(uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(asyncio.ensure_future(uploader(next_chunk))) - except StopIteration: - break - - # Wait for the remaining uploads to finish - if running: - done, _running = await asyncio.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -async def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - asyncio.ensure_future(uploader.process_chunk(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [] - for chunk in uploader.get_chunk_streams(): - range_ids.append(await uploader.process_chunk(chunk)) - - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -async def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - asyncio.ensure_future(uploader.process_substream_block(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [] - for block in uploader.get_substream_blocks(): - range_ids.append(await uploader.process_substream_block(block)) - if any(range_ids): - return sorted(range_ids) - return - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = threading.Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b'' - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b'' or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - async def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - async def _update_progress(self, length): - if self.progress_lock is not None: - async with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - async def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = await self._upload_chunk(chunk_offset, chunk_data) - await self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield index, SubStream(self.stream, index, length, lock) - - async def process_substream_block(self, block_data): - return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - async def _upload_substream_block(self, index, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_substream_block_with_progress(self, index, block_stream): - range_id = await self._upload_substream_block(index, block_stream) - await self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop('modified_access_conditions', None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - await self.service.stage_block( - block_id, - len(chunk_data), - body=chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - return index, block_id - - async def _upload_substream_block(self, index, block_stream): - try: - block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) - await self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - for each_byte in chunk_data: - if each_byte not in [0, b'\x00']: - return False - return True - - async def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = await self.service.upload_pages( - body=chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - async def _upload_substream_block(self, index, block_stream): - pass - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = await self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - self.current_length = int(self.response_headers['blob_append_offset']) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = await self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - async def _upload_substream_block(self, index, block_stream): - pass - - -class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - self.response_headers = await self.service.append_data( - body=chunk_data, - position=chunk_offset, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - async def _upload_substream_block(self, index, block_stream): - try: - await self.service.append_data( - body=block_stream, - position=index, - content_length=len(block_stream), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = await self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - return range_id, response - - # TODO: Implement this method. - async def _upload_substream_block(self, index, block_stream): - pass diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared_access_signature.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared_access_signature.py deleted file mode 100644 index 890ef1b..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_shared_access_signature.py +++ /dev/null @@ -1,596 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, TYPE_CHECKING -) - -from ._shared import sign_string, url_quote -from ._shared.constants import X_MS_VERSION -from ._shared.models import Services -from ._shared.shared_access_signature import SharedAccessSignature, _SharedAccessHelper, \ - QueryStringConstants - -if TYPE_CHECKING: - from datetime import datetime - from ..blob import ( - ResourceTypes, - AccountSasPermissions, - UserDelegationKey, - ContainerSasPermissions, - BlobSasPermissions - ) - - -class BlobQueryStringConstants(object): - SIGNED_TIMESTAMP = 'snapshot' - - -class BlobSharedAccessSignature(SharedAccessSignature): - ''' - Provides a factory for creating blob and container access - signature tokens with a common account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key=None, user_delegation_key=None): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - :param ~azure.storage.blob.models.UserDelegationKey user_delegation_key: - Instead of an account key, the user could pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished by calling get_user_delegation_key on any Blob service object. - ''' - super(BlobSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) - self.user_delegation_key = user_delegation_key - - def generate_blob(self, container_name, blob_name, snapshot=None, version_id=None, permission=None, - expiry=None, start=None, policy_id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None, **kwargs): - ''' - Generates a shared access signature for the blob or one of its snapshots. - Use the returned signature with the sas_token parameter of any BlobService. - - :param str container_name: - Name of container. - :param str blob_name: - Name of blob. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to grant permission. - :param BlobSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_blob_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - resource_path = container_name + '/' + blob_name - - sas = _BlobSharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(policy_id) - - resource = 'bs' if snapshot else 'b' - resource = 'bv' if version_id else resource - resource = 'd' if kwargs.pop("is_directory", None) else resource - sas.add_resource(resource) - - sas.add_timestamp(snapshot or version_id) - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_info_for_hns_account(**kwargs) - sas.add_resource_signature(self.account_name, self.account_key, resource_path, - user_delegation_key=self.user_delegation_key) - - return sas.get_token() - - def generate_container(self, container_name, permission=None, expiry=None, - start=None, policy_id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None, **kwargs): - ''' - Generates a shared access signature for the container. - Use the returned signature with the sas_token parameter of any BlobService. - - :param str container_name: - Name of container. - :param ContainerSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_blob_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - sas = _BlobSharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(policy_id) - sas.add_resource('c') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_info_for_hns_account(**kwargs) - sas.add_resource_signature(self.account_name, self.account_key, container_name, - user_delegation_key=self.user_delegation_key) - return sas.get_token() - - -class _BlobSharedAccessHelper(_SharedAccessHelper): - - def add_timestamp(self, timestamp): - self._add_query(BlobQueryStringConstants.SIGNED_TIMESTAMP, timestamp) - - def add_info_for_hns_account(self, **kwargs): - self._add_query(QueryStringConstants.SIGNED_DIRECTORY_DEPTH, kwargs.pop('sdd', None)) - self._add_query(QueryStringConstants.SIGNED_AUTHORIZED_OID, kwargs.pop('preauthorized_agent_object_id', None)) - self._add_query(QueryStringConstants.SIGNED_UNAUTHORIZED_OID, kwargs.pop('agent_object_id', None)) - self._add_query(QueryStringConstants.SIGNED_CORRELATION_ID, kwargs.pop('correlation_id', None)) - - def get_value_to_append(self, query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - def add_resource_signature(self, account_name, account_key, path, user_delegation_key=None): - # pylint: disable = no-member - if path[0] != '/': - path = '/' + path - - canonicalized_resource = '/blob/' + account_name + path + '\n' - - # Form the string to sign from shared_access_policy and canonicalized - # resource. The order of values is important. - string_to_sign = \ - (self.get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - self.get_value_to_append(QueryStringConstants.SIGNED_START) + - self.get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - canonicalized_resource) - - if user_delegation_key is not None: - self._add_query(QueryStringConstants.SIGNED_OID, user_delegation_key.signed_oid) - self._add_query(QueryStringConstants.SIGNED_TID, user_delegation_key.signed_tid) - self._add_query(QueryStringConstants.SIGNED_KEY_START, user_delegation_key.signed_start) - self._add_query(QueryStringConstants.SIGNED_KEY_EXPIRY, user_delegation_key.signed_expiry) - self._add_query(QueryStringConstants.SIGNED_KEY_SERVICE, user_delegation_key.signed_service) - self._add_query(QueryStringConstants.SIGNED_KEY_VERSION, user_delegation_key.signed_version) - - string_to_sign += \ - (self.get_value_to_append(QueryStringConstants.SIGNED_OID) + - self.get_value_to_append(QueryStringConstants.SIGNED_TID) + - self.get_value_to_append(QueryStringConstants.SIGNED_KEY_START) + - self.get_value_to_append(QueryStringConstants.SIGNED_KEY_EXPIRY) + - self.get_value_to_append(QueryStringConstants.SIGNED_KEY_SERVICE) + - self.get_value_to_append(QueryStringConstants.SIGNED_KEY_VERSION) + - self.get_value_to_append(QueryStringConstants.SIGNED_AUTHORIZED_OID) + - self.get_value_to_append(QueryStringConstants.SIGNED_UNAUTHORIZED_OID) + - self.get_value_to_append(QueryStringConstants.SIGNED_CORRELATION_ID)) - else: - string_to_sign += self.get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER) - - string_to_sign += \ - (self.get_value_to_append(QueryStringConstants.SIGNED_IP) + - self.get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - self.get_value_to_append(QueryStringConstants.SIGNED_VERSION) + - self.get_value_to_append(QueryStringConstants.SIGNED_RESOURCE) + - self.get_value_to_append(BlobQueryStringConstants.SIGNED_TIMESTAMP) + - self.get_value_to_append(QueryStringConstants.SIGNED_CACHE_CONTROL) + - self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_DISPOSITION) + - self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_ENCODING) + - self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_LANGUAGE) + - self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_TYPE)) - - # remove the trailing newline - if string_to_sign[-1] == '\n': - string_to_sign = string_to_sign[:-1] - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key if user_delegation_key is None else user_delegation_key.value, - string_to_sign)) - - def get_token(self): - # a conscious decision was made to exclude the timestamp in the generated token - # this is to avoid having two snapshot ids in the query parameters when the user appends the snapshot timestamp - exclude = [BlobQueryStringConstants.SIGNED_TIMESTAMP] - return '&'.join(['{0}={1}'.format(n, url_quote(v)) - for n, v in self.query_dict.items() if v is not None and n not in exclude]) - - -def generate_account_sas( - account_name, # type: str - account_key, # type: str - resource_types, # type: Union[ResourceTypes, str] - permission, # type: Union[AccountSasPermissions, str] - expiry, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): # type: (...) -> str - """Generates a shared access signature for the blob service. - - Use the returned signature with the credential parameter of any BlobServiceClient, - ContainerClient or BlobClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - :param resource_types: - Specifies the resource types that are accessible with the account SAS. - :type resource_types: str or ~azure.storage.blob.ResourceTypes - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.blob.AccountSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_sas_token] - :end-before: [END create_sas_token] - :language: python - :dedent: 8 - :caption: Generating a shared access signature. - """ - sas = SharedAccessSignature(account_name, account_key) - return sas.generate_account( - services=Services(blob=True), - resource_types=resource_types, - permission=permission, - expiry=expiry, - start=start, - ip=ip, - **kwargs - ) # type: ignore - - -def generate_container_sas( - account_name, # type: str - container_name, # type: str - account_key=None, # type: Optional[str] - user_delegation_key=None, # type: Optional[UserDelegationKey] - permission=None, # type: Optional[Union[ContainerSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - policy_id=None, # type: Optional[str] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Any - """Generates a shared access signature for a container. - - Use the returned signature with the credential parameter of any BlobServiceClient, - ContainerClient or BlobClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str container_name: - The name of the container. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - Either `account_key` or `user_delegation_key` must be specified. - :param ~azure.storage.blob.UserDelegationKey user_delegation_key: - Instead of an account shared key, the user could pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.blob.ContainerSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - :func:`~azure.storage.blob.ContainerClient.set_container_access_policy`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :return: A Shared Access Signature (sas) token. - :rtype: str - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START generate_sas_token] - :end-before: [END generate_sas_token] - :language: python - :dedent: 12 - :caption: Generating a sas token. - """ - if not user_delegation_key and not account_key: - raise ValueError("Either user_delegation_key or account_key must be provided.") - - if user_delegation_key: - sas = BlobSharedAccessSignature(account_name, user_delegation_key=user_delegation_key) - else: - sas = BlobSharedAccessSignature(account_name, account_key=account_key) - return sas.generate_container( - container_name, - permission=permission, - expiry=expiry, - start=start, - policy_id=policy_id, - ip=ip, - **kwargs - ) - - -def generate_blob_sas( - account_name, # type: str - container_name, # type: str - blob_name, # type: str - snapshot=None, # type: Optional[str] - account_key=None, # type: Optional[str] - user_delegation_key=None, # type: Optional[UserDelegationKey] - permission=None, # type: Optional[Union[BlobSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - policy_id=None, # type: Optional[str] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Any - """Generates a shared access signature for a blob. - - Use the returned signature with the credential parameter of any BlobServiceClient, - ContainerClient or BlobClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str container_name: - The name of the container. - :param str blob_name: - The name of the blob. - :param str snapshot: - An optional blob snapshot ID. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - Either `account_key` or `user_delegation_key` must be specified. - :param ~azure.storage.blob.UserDelegationKey user_delegation_key: - Instead of an account shared key, the user could pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.blob.BlobSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - :func:`~azure.storage.blob.ContainerClient.set_container_access_policy()`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str version_id: - An optional blob version ID. This parameter is only for versioning enabled account - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - if not user_delegation_key and not account_key: - raise ValueError("Either user_delegation_key or account_key must be provided.") - version_id = kwargs.pop('version_id', None) - if version_id and snapshot: - raise ValueError("snapshot and version_id cannot be set at the same time.") - if user_delegation_key: - sas = BlobSharedAccessSignature(account_name, user_delegation_key=user_delegation_key) - else: - sas = BlobSharedAccessSignature(account_name, account_key=account_key) - return sas.generate_blob( - container_name, - blob_name, - snapshot=snapshot, - version_id=version_id, - permission=permission, - expiry=expiry, - start=start, - policy_id=policy_id, - ip=ip, - **kwargs - ) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_upload_helpers.py b/azure/multiapi/storagev2/blob/v2020_10_02/_upload_helpers.py deleted file mode 100644 index 30d5bfa..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_upload_helpers.py +++ /dev/null @@ -1,306 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from io import SEEK_SET, UnsupportedOperation -from typing import Optional, Union, Any, TypeVar, TYPE_CHECKING # pylint: disable=unused-import - -import six -from azure.core.exceptions import ResourceExistsError, ResourceModifiedError, HttpResponseError - -from ._shared.response_handlers import ( - process_storage_error, - return_response_headers) -from ._shared.models import StorageErrorCode -from ._shared.uploads import ( - upload_data_chunks, - upload_substream_blocks, - BlockBlobChunkUploader, - PageBlobChunkUploader, - AppendBlobChunkUploader) -from ._shared.encryption import generate_blob_encryption_data, encrypt_blob -from ._generated.models import ( - BlockLookupList, - AppendPositionAccessConditions, - ModifiedAccessConditions, -) - -if TYPE_CHECKING: - from datetime import datetime # pylint: disable=unused-import - BlobLeaseClient = TypeVar("BlobLeaseClient") - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' - - -def _convert_mod_error(error): - message = error.message.replace( - "The condition specified using HTTP conditional header(s) is not met.", - "The specified blob already exists.") - message = message.replace("ConditionNotMet", "BlobAlreadyExists") - overwrite_error = ResourceExistsError( - message=message, - response=error.response, - error=error) - overwrite_error.error_code = StorageErrorCode.blob_already_exists - raise overwrite_error - - -def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument - return any([ - modified_access_conditions.if_modified_since, - modified_access_conditions.if_unmodified_since, - modified_access_conditions.if_none_match, - modified_access_conditions.if_match - ]) - - -def upload_block_blob( # pylint: disable=too-many-locals - client=None, - data=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if not overwrite and not _any_conditions(**kwargs): - kwargs['modified_access_conditions'].if_none_match = '*' - adjusted_count = length - if (encryption_options.get('key') is not None) and (adjusted_count is not None): - adjusted_count += (16 - (length % 16)) - blob_headers = kwargs.pop('blob_headers', None) - tier = kwargs.pop('standard_blob_tier', None) - blob_tags_string = kwargs.pop('blob_tags_string', None) - - immutability_policy = kwargs.pop('immutability_policy', None) - immutability_policy_expiry = None if immutability_policy is None else immutability_policy.expiry_time - immutability_policy_mode = None if immutability_policy is None else immutability_policy.policy_mode - legal_hold = kwargs.pop('legal_hold', None) - - # Do single put if the size is smaller than or equal config.max_single_put_size - if adjusted_count is not None and (adjusted_count <= blob_settings.max_single_put_size): - try: - data = data.read(length) - if not isinstance(data, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - except AttributeError: - pass - if encryption_options.get('key'): - encryption_data, data = encrypt_blob(data, encryption_options['key']) - headers['x-ms-meta-encryptiondata'] = encryption_data - return client.upload( - body=data, - content_length=adjusted_count, - blob_http_headers=blob_headers, - headers=headers, - cls=return_response_headers, - validate_content=validate_content, - data_stream_total=adjusted_count, - upload_stream_current=0, - tier=tier.value if tier else None, - blob_tags_string=blob_tags_string, - immutability_policy_expiry=immutability_policy_expiry, - immutability_policy_mode=immutability_policy_mode, - legal_hold=legal_hold, - **kwargs) - - use_original_upload_path = blob_settings.use_byte_buffer or \ - validate_content or encryption_options.get('required') or \ - blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \ - hasattr(stream, 'seekable') and not stream.seekable() or \ - not hasattr(stream, 'seek') or not hasattr(stream, 'tell') - - if use_original_upload_path: - if encryption_options.get('key'): - cek, iv, encryption_data = generate_blob_encryption_data(encryption_options['key']) - headers['x-ms-meta-encryptiondata'] = encryption_data - encryption_options['cek'] = cek - encryption_options['vector'] = iv - block_ids = upload_data_chunks( - service=client, - uploader_class=BlockBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - encryption_options=encryption_options, - headers=headers, - **kwargs - ) - else: - block_ids = upload_substream_blocks( - service=client, - uploader_class=BlockBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - headers=headers, - **kwargs - ) - - block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) - block_lookup.latest = block_ids - return client.commit_block_list( - block_lookup, - blob_http_headers=blob_headers, - cls=return_response_headers, - validate_content=validate_content, - headers=headers, - tier=tier.value if tier else None, - blob_tags_string=blob_tags_string, - immutability_policy_expiry=immutability_policy_expiry, - immutability_policy_mode=immutability_policy_mode, - legal_hold=legal_hold, - **kwargs) - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceModifiedError as mod_error: - if not overwrite: - _convert_mod_error(mod_error) - raise - - -def upload_page_blob( - client=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if not overwrite and not _any_conditions(**kwargs): - kwargs['modified_access_conditions'].if_none_match = '*' - if length is None or length < 0: - raise ValueError("A content length must be specified for a Page Blob.") - if length % 512 != 0: - raise ValueError("Invalid page blob size: {0}. " - "The size must be aligned to a 512-byte boundary.".format(length)) - if kwargs.get('premium_page_blob_tier'): - premium_page_blob_tier = kwargs.pop('premium_page_blob_tier') - try: - headers['x-ms-access-tier'] = premium_page_blob_tier.value - except AttributeError: - headers['x-ms-access-tier'] = premium_page_blob_tier - if encryption_options and encryption_options.get('data'): - headers['x-ms-meta-encryptiondata'] = encryption_options['data'] - blob_tags_string = kwargs.pop('blob_tags_string', None) - - response = client.create( - content_length=0, - blob_content_length=length, - blob_sequence_number=None, - blob_http_headers=kwargs.pop('blob_headers', None), - blob_tags_string=blob_tags_string, - cls=return_response_headers, - headers=headers, - **kwargs) - if length == 0: - return response - - kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag']) - return upload_data_chunks( - service=client, - uploader_class=PageBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_page_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - encryption_options=encryption_options, - headers=headers, - **kwargs) - - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceModifiedError as mod_error: - if not overwrite: - _convert_mod_error(mod_error) - raise - - -def upload_append_blob( # pylint: disable=unused-argument - client=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if length == 0: - return {} - blob_headers = kwargs.pop('blob_headers', None) - append_conditions = AppendPositionAccessConditions( - max_size=kwargs.pop('maxsize_condition', None), - append_position=None) - blob_tags_string = kwargs.pop('blob_tags_string', None) - - try: - if overwrite: - client.create( - content_length=0, - blob_http_headers=blob_headers, - headers=headers, - blob_tags_string=blob_tags_string, - **kwargs) - return upload_data_chunks( - service=client, - uploader_class=AppendBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - append_position_access_conditions=append_conditions, - headers=headers, - **kwargs) - except HttpResponseError as error: - if error.response.status_code != 404: - raise - # rewind the request body if it is a stream - if hasattr(stream, 'read'): - try: - # attempt to rewind the body to the initial position - stream.seek(0, SEEK_SET) - except UnsupportedOperation: - # if body is not seekable, then retry would not work - raise error - client.create( - content_length=0, - blob_http_headers=blob_headers, - headers=headers, - blob_tags_string=blob_tags_string, - **kwargs) - return upload_data_chunks( - service=client, - uploader_class=AppendBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - append_position_access_conditions=append_conditions, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_version.py b/azure/multiapi/storagev2/blob/v2020_10_02/_version.py deleted file mode 100644 index 68dc953..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/_version.py +++ /dev/null @@ -1,7 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -VERSION = "12.9.0" diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/aio/__init__.py b/azure/multiapi/storagev2/blob/v2020_10_02/aio/__init__.py deleted file mode 100644 index 33c1031..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/aio/__init__.py +++ /dev/null @@ -1,141 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os - -from .._models import BlobType -from .._shared.policies_async import ExponentialRetry, LinearRetry -from ._blob_client_async import BlobClient -from ._container_client_async import ContainerClient -from ._blob_service_client_async import BlobServiceClient -from ._lease_async import BlobLeaseClient -from ._download_async import StorageStreamDownloader - - -async def upload_blob_to_url( - blob_url, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - credential=None, # type: Any - **kwargs): - # type: (...) -> dict[str, Any] - """Upload data to a given URL - - The data will be uploaded as a block blob. - - :param str blob_url: - The full URI to the blob. This can also include a SAS token. - :param data: - The data to upload. This can be bytes, text, an iterable or a file-like object. - :type data: bytes or str or Iterable - :param credential: - The credentials with which to authenticate. This is optional if the - blob URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword bool overwrite: - Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob_to_url will overwrite any existing data. If set to False, the - operation will fail with a ResourceExistsError. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword dict(str,str) metadata: - Name-value pairs associated with the blob as metadata. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword str encoding: - Encoding to use if text is supplied as input. Defaults to UTF-8. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: dict(str, Any) - """ - async with BlobClient.from_blob_url(blob_url, credential=credential) as client: - return await client.upload_blob(data=data, blob_type=BlobType.BlockBlob, **kwargs) - - -async def _download_to_stream(client, handle, **kwargs): - """Download data to specified open file-handle.""" - stream = await client.download_blob(**kwargs) - await stream.readinto(handle) - - -async def download_blob_from_url( - blob_url, # type: str - output, # type: str - credential=None, # type: Any - **kwargs): - # type: (...) -> None - """Download the contents of a blob to a local file or stream. - - :param str blob_url: - The full URI to the blob. This can also include a SAS token. - :param output: - Where the data should be downloaded to. This could be either a file path to write to, - or an open IO handle to write to. - :type output: str or writable stream - :param credential: - The credentials with which to authenticate. This is optional if the - blob URL already has a SAS token or the blob is public. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, - an account shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword bool overwrite: - Whether the local file should be overwritten if it already exists. The default value is - `False` - in which case a ValueError will be raised if the file already exists. If set to - `True`, an attempt will be made to write to the existing file. If a stream handle is passed - in, this value is ignored. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :keyword int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :rtype: None - """ - overwrite = kwargs.pop('overwrite', False) - async with BlobClient.from_blob_url(blob_url, credential=credential) as client: - if hasattr(output, 'write'): - await _download_to_stream(client, output, **kwargs) - else: - if not overwrite and os.path.isfile(output): - raise ValueError("The file '{}' already exists.".format(output)) - with open(output, 'wb') as file_handle: - await _download_to_stream(client, file_handle, **kwargs) - - -__all__ = [ - 'upload_blob_to_url', - 'download_blob_from_url', - 'BlobServiceClient', - 'ContainerClient', - 'BlobClient', - 'BlobLeaseClient', - 'ExponentialRetry', - 'LinearRetry', - 'StorageStreamDownloader' -] diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/aio/_blob_client_async.py b/azure/multiapi/storagev2/blob/v2020_10_02/aio/_blob_client_async.py deleted file mode 100644 index 97b412e..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/aio/_blob_client_async.py +++ /dev/null @@ -1,2617 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines, invalid-overridden-method -from functools import partial -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, - TYPE_CHECKING -) - -from azure.core.pipeline import AsyncPipeline - -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.exceptions import ResourceNotFoundError, HttpResponseError, ResourceExistsError - -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.policies_async import ExponentialRetry -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._deserialize import get_page_ranges_result, parse_tags, deserialize_pipeline_response_into_cls -from .._serialize import get_modify_conditions, get_api_version, get_access_conditions -from .._generated.aio import AzureBlobStorage -from .._generated.models import CpkInfo -from .._deserialize import deserialize_blob_properties -from .._blob_client import BlobClient as BlobClientBase -from ._upload_helpers import ( - upload_block_blob, - upload_append_blob, - upload_page_blob) -from .._models import BlobType, BlobBlock, BlobProperties -from ._lease_async import BlobLeaseClient -from ._download_async import StorageStreamDownloader - - -if TYPE_CHECKING: - from datetime import datetime - from .._models import ( # pylint: disable=unused-import - ContentSettings, - PremiumPageBlobTier, - StandardBlobTier, - SequenceNumberAction - ) - - -class BlobClient(AsyncStorageAccountHostsMixin, BlobClientBase): # pylint: disable=too-many-public-methods - """A client to interact with a specific blob, although that blob may not yet exist. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the blob, - use the :func:`from_blob_url` classmethod. - :param container_name: The container name for the blob. - :type container_name: str - :param blob_name: The name of the blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob_name: str - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication_async.py - :start-after: [START create_blob_client] - :end-before: [END create_blob_client] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a URL to a public blob (no auth needed). - - .. literalinclude:: ../samples/blob_samples_authentication_async.py - :start-after: [START create_blob_client_sas_url] - :end-before: [END create_blob_client_sas_url] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a SAS URL to a blob. - """ - def __init__( - self, account_url, # type: str - container_name, # type: str - blob_name, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(BlobClient, self).__init__( - account_url, - container_name=container_name, - blob_name=blob_name, - snapshot=snapshot, - credential=credential, - **kwargs) - self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - - @distributed_trace_async - async def get_account_information(self, **kwargs): # type: ignore - # type: (Optional[int]) -> Dict[str, str] - """Gets information related to the storage account in which the blob resides. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - """ - try: - return await self._client.blob.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_blob_from_url(self, source_url, **kwargs): - # type: (str, Any) -> Dict[str, Any] - """ - Creates a new Block Blob where the content of the blob is read from a given URL. - The content of an existing blob is overwritten with the new blob. - - :param str source_url: - A URL of up to 2 KB in length that specifies a file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.blob.core.windows.net/mycontainer/myblob - - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - - https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. - :keyword bool include_source_blob_properties: - Indicates if properties from the source blob should be copied. Defaults to True. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - :paramtype tags: dict(str, str) - :keyword bytearray source_content_md5: - Specify the md5 that is used to verify the integrity of the source bytes. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword destination_lease: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword str source_authorization: - Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is - the prefix of the source_authorization string. - """ - options = self._upload_blob_from_url_options( - source_url=self._encode_source_url(source_url), - **kwargs) - try: - return await self._client.block_blob.put_blob_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_blob( - self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Any - """Creates a new blob from a data source with automatic chunking. - - :param data: The blob data to upload. - :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be - either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. The exception to the above is with Append - blob types: if set to False and the data already exists, an error will not be raised - and the data will be appended to the existing blob. If set overwrite=True, then the existing - append blob will be deleted, and a new one created. Defaults to False. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - If specified, upload_blob only succeeds if the - blob's lease is active and matches this ID. - Required if the blob has an active lease. - :paramtype: ~azure.storage.blob.aio.BlobLeaseClient - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: - Specifies the immutability policy of a blob, blob snapshot or blob version. - Currently this parameter of upload_blob() API is for BlockBlob only. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword bool legal_hold: - Specified if a legal hold should be set on the blob. - Currently this parameter of upload_blob() API is for BlockBlob only. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int max_concurrency: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world_async.py - :start-after: [START upload_a_blob] - :end-before: [END upload_a_blob] - :language: python - :dedent: 16 - :caption: Upload a blob to the container. - """ - options = self._upload_blob_options( - data, - blob_type=blob_type, - length=length, - metadata=metadata, - **kwargs) - if blob_type == BlobType.BlockBlob: - return await upload_block_blob(**options) - if blob_type == BlobType.PageBlob: - return await upload_page_blob(**options) - return await upload_append_blob(**options) - - @distributed_trace_async - async def download_blob(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader - """Downloads a blob to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the blob into - a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks. - - :param int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to download. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, download_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword str encoding: - Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.blob.aio.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world_async.py - :start-after: [START download_a_blob] - :end-before: [END download_a_blob] - :language: python - :dedent: 16 - :caption: Download a blob. - """ - options = self._download_blob_options( - offset=offset, - length=length, - **kwargs) - downloader = StorageStreamDownloader(**options) - await downloader._setup() # pylint: disable=protected-access - return downloader - - @distributed_trace_async - async def delete_blob(self, delete_snapshots=None, **kwargs): - # type: (str, Any) -> None - """Marks the specified blob for deletion. - - The blob is later deleted during garbage collection. - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the delete_blob() - operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob - and retains the blob for a specified number of days. - After the specified number of days, the blob's data is removed from the service during garbage collection. - Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']` - option. Soft-deleted blob can be restored using :func:`undelete` operation. - - :param str delete_snapshots: - Required if the blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to delete. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword lease: - Required if the blob has an active lease. If specified, delete_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world_async.py - :start-after: [START delete_blob] - :end-before: [END delete_blob] - :language: python - :dedent: 16 - :caption: Delete a blob. - """ - options = self._delete_blob_options(delete_snapshots=delete_snapshots, **kwargs) - try: - await self._client.blob.delete(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def undelete_blob(self, **kwargs): - # type: (Any) -> None - """Restores soft-deleted blobs or snapshots. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START undelete_blob] - :end-before: [END undelete_blob] - :language: python - :dedent: 12 - :caption: Undeleting a blob. - """ - try: - await self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a blob exists with the defined parameters, and returns - False otherwise. - - :kwarg str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to check if it exists. - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - try: - await self._client.blob.get_properties( - snapshot=self.snapshot, - **kwargs) - return True - # Encrypted with CPK - except ResourceExistsError: - return True - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceNotFoundError: - return False - - @distributed_trace_async - async def get_blob_properties(self, **kwargs): - # type: (Any) -> BlobProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the blob. It does not return the content of the blob. - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to get properties. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: BlobProperties - :rtype: ~azure.storage.blob.BlobProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START get_blob_properties] - :end-before: [END get_blob_properties] - :language: python - :dedent: 12 - :caption: Getting the properties for a blob. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - try: - cls_method = kwargs.pop('cls', None) - if cls_method: - kwargs['cls'] = partial(deserialize_pipeline_response_into_cls, cls_method) - blob_props = await self._client.blob.get_properties( - timeout=kwargs.pop('timeout', None), - version_id=kwargs.pop('version_id', None), - snapshot=self.snapshot, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=kwargs.pop('cls', None) or deserialize_blob_properties, - cpk_info=cpk_info, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - blob_props.name = self.blob_name - if isinstance(blob_props, BlobProperties): - blob_props.container = self.container_name - blob_props.snapshot = self.snapshot - return blob_props # type: ignore - - @distributed_trace_async - async def set_http_headers(self, content_settings=None, **kwargs): - # type: (Optional[ContentSettings], Any) -> None - """Sets system properties on the blob. - - If one property is set for the content_settings, all properties will be overridden. - - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - options = self._set_http_headers_options(content_settings=content_settings, **kwargs) - try: - return await self._client.blob.set_http_headers(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_blob_metadata(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] - """Sets user-defined metadata for the blob as one or more name-value pairs. - - :param metadata: - Dict containing name and value pairs. Each call to this operation - replaces all existing metadata attached to the blob. To remove all - metadata from the blob, call this operation with no metadata headers. - :type metadata: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - """ - options = self._set_blob_metadata_options(metadata=metadata, **kwargs) - try: - return await self._client.blob.set_metadata(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_immutability_policy(self, immutability_policy, **kwargs): - # type: (**Any) -> Dict[str, str] - """The Set Immutability Policy operation sets the immutability policy on the blob. - - .. versionadded:: 12.10.0 - This operation was introduced in API version '2020-10-02'. - - :param ~azure.storage.blob.ImmutabilityPolicy immutability_policy: - Specifies the immutability policy of a blob, blob snapshot or blob version. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Key value pairs of blob tags. - :rtype: Dict[str, str] - """ - - kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time - kwargs['immutability_policy_mode'] = immutability_policy.policy_mode - return await self._client.blob.set_immutability_policy(cls=return_response_headers, **kwargs) - - @distributed_trace_async() - async def delete_immutability_policy(self, **kwargs): - # type: (**Any) -> None - """The Delete Immutability Policy operation deletes the immutability policy on the blob. - - .. versionadded:: 12.10.0 - This operation was introduced in API version '2020-10-02'. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Key value pairs of blob tags. - :rtype: Dict[str, str] - """ - - await self._client.blob.delete_immutability_policy(**kwargs) - - @distributed_trace_async - async def set_legal_hold(self, legal_hold, **kwargs): - # type: (bool, **Any) -> Dict[str, Union[str, datetime, bool]] - """The Set Legal Hold operation sets a legal hold on the blob. - - .. versionadded:: 12.10.0 - This operation was introduced in API version '2020-10-02'. - - :param bool legal_hold: - Specified if a legal hold should be set on the blob. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Key value pairs of blob tags. - :rtype: Dict[str, Union[str, datetime, bool]] - """ - - return await self._client.blob.set_legal_hold(legal_hold, cls=return_response_headers, **kwargs) - - @distributed_trace_async - async def create_page_blob( # type: ignore - self, size, # type: int - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Creates a new Page Blob of the specified size. - - :param int size: - This specifies the maximum size for the page blob, up to 1 TB. - The page blob size must be aligned to a 512-byte boundary. - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword int sequence_number: - Only for Page blobs. The sequence number is a user-controlled value that you can use to - track requests. The value of the sequence number must be between 0 - and 2^63 - 1.The default value is 0. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: - Specifies the immutability policy of a blob, blob snapshot or blob version. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword bool legal_hold: - Specified if a legal hold should be set on the blob. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict[str, Any] - """ - options = self._create_page_blob_options( - size, - content_settings=content_settings, - metadata=metadata, - premium_page_blob_tier=premium_page_blob_tier, - **kwargs) - try: - return await self._client.page_blob.create(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def create_append_blob(self, content_settings=None, metadata=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] - """Creates a new Append Blob. - - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: - Specifies the immutability policy of a blob, blob snapshot or blob version. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword bool legal_hold: - Specified if a legal hold should be set on the blob. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict[str, Any] - """ - options = self._create_append_blob_options( - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return await self._client.append_blob.create(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def create_snapshot(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] - """Creates a snapshot of the blob. - - A snapshot is a read-only version of a blob that's taken at a point in time. - It can be read, copied, or deleted, but not modified. Snapshots provide a way - to back up a blob as it appears at a moment in time. - - A snapshot of a blob has the same name as the base blob from which the snapshot - is taken, with a DateTime value appended to indicate the time at which the - snapshot was taken. - - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified). - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START create_blob_snapshot] - :end-before: [END create_blob_snapshot] - :language: python - :dedent: 12 - :caption: Create a snapshot of the blob. - """ - options = self._create_snapshot_options(metadata=metadata, **kwargs) - try: - return await self._client.blob.create_snapshot(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def start_copy_from_url(self, source_url, metadata=None, incremental_copy=False, **kwargs): - # type: (str, Optional[Dict[str, str]], bool, Any) -> Any - """Copies a blob asynchronously. - - This operation returns a copy operation - object that can be used to wait on the completion of the operation, - as well as check status or abort the copy operation. - The Blob service copies blobs on a best-effort basis. - - The source blob for a copy operation may be a block blob, an append blob, - or a page blob. If the destination blob already exists, it must be of the - same blob type as the source blob. Any existing destination blob will be - overwritten. The destination blob cannot be modified while a copy operation - is in progress. - - When copying from a page blob, the Blob service creates a destination page - blob of the source blob's length, initially containing all zeroes. Then - the source page ranges are enumerated, and non-empty ranges are copied. - - For a block blob or an append blob, the Blob service creates a committed - blob of zero length before returning from this operation. When copying - from a block blob, all committed blocks and their block IDs are copied. - Uncommitted blocks are not copied. At the end of the copy operation, the - destination blob will have the same committed block count as the source. - - When copying from an append blob, all committed blocks are copied. At the - end of the copy operation, the destination blob will have the same committed - block count as the source. - - For all blob types, you can call status() on the returned polling object - to check the status of the copy operation, or wait() to block until the - operation is complete. The final blob will be committed when the copy completes. - - :param str source_url: - A URL of up to 2 KB in length that specifies a file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.blob.core.windows.net/mycontainer/myblob - - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - - https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken - :param metadata: - Name-value pairs associated with the blob as metadata. If no name-value - pairs are specified, the operation will copy the metadata from the - source blob or file to the destination blob. If one or more name-value - pairs are specified, the destination blob is created with the specified - metadata, and metadata is not copied from the source blob or file. - :type metadata: dict(str, str) - :param bool incremental_copy: - Copies the snapshot of the source page blob to a destination page blob. - The snapshot is copied such that only the differential changes between - the previously copied snapshot are transferred to the destination. - The copied snapshots are complete copies of the original snapshot and - can be read or copied from as usual. Defaults to False. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: - Specifies the immutability policy of a blob, blob snapshot or blob version. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword bool legal_hold: - Specified if a legal hold should be set on the blob. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source - blob has been modified since the specified date/time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source blob - has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has been modified since the specified date/time. - If the destination blob has not been modified, the Blob service returns - status code 412 (Precondition Failed). - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has not been modified since the specified - date/time. If the destination blob has been modified, the Blob service - returns status code 412 (Precondition Failed). - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword destination_lease: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :paramtype destination_lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword source_lease: - Specify this to perform the Copy Blob operation only if - the lease ID given matches the active lease ID of the source blob. - :paramtype source_lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword bool seal_destination_blob: - Seal the destination append blob. This operation is only for append blob. - - .. versionadded:: 12.4.0 - - :keyword bool requires_sync: - Enforces that the service will not return a response until the copy is complete. - :keyword str source_authorization: - Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is - the prefix of the source_authorization string. This option is only available when `incremental_copy` is - set to False and `requires_sync` is set to True. - :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status). - :rtype: dict[str, Union[str, ~datetime.datetime]] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START copy_blob_from_url] - :end-before: [END copy_blob_from_url] - :language: python - :dedent: 16 - :caption: Copy a blob from a URL. - """ - options = self._start_copy_from_url_options( - source_url=self._encode_source_url(source_url), - metadata=metadata, - incremental_copy=incremental_copy, - **kwargs) - try: - if incremental_copy: - return await self._client.page_blob.copy_incremental(**options) - return await self._client.blob.start_copy_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def abort_copy(self, copy_id, **kwargs): - # type: (Union[str, Dict[str, Any], BlobProperties], Any) -> None - """Abort an ongoing copy operation. - - This will leave a destination blob with zero length and full metadata. - This will raise an error if the copy operation has already ended. - - :param copy_id: - The copy operation to abort. This can be either an ID, or an - instance of BlobProperties. - :type copy_id: str or ~azure.storage.blob.BlobProperties - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START abort_copy_blob_from_url] - :end-before: [END abort_copy_blob_from_url] - :language: python - :dedent: 16 - :caption: Abort copying a blob from URL. - """ - options = self._abort_copy_options(copy_id, **kwargs) - try: - await self._client.blob.abort_copy_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): - # type: (int, Optional[str], Any) -> BlobLeaseClient - """Requests a new lease. - - If the blob does not have an active lease, the Blob - Service creates a lease on the blob and returns a new lease. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Blob Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A BlobLeaseClient object. - :rtype: ~azure.storage.blob.aio.BlobLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START acquire_lease_on_blob] - :end-before: [END acquire_lease_on_blob] - :language: python - :dedent: 12 - :caption: Acquiring a lease on a blob. - """ - lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore - await lease.acquire(lease_duration=lease_duration, **kwargs) - return lease - - @distributed_trace_async - async def set_standard_blob_tier(self, standard_blob_tier, **kwargs): - # type: (Union[str, StandardBlobTier], Any) -> None - """This operation sets the tier on a block blob. - - A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param standard_blob_tier: - Indicates the tier to be set on the blob. Options include 'Hot', 'Cool', - 'Archive'. The hot tier is optimized for storing data that is accessed - frequently. The cool storage tier is optimized for storing data that - is infrequently accessed and stored for at least a month. The archive - tier is optimized for storing data that is rarely accessed and stored - for at least six months with flexible latency requirements. - :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if standard_blob_tier is None: - raise ValueError("A StandardBlobTier must be specified") - try: - await self._client.blob.set_tier( - tier=standard_blob_tier, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def stage_block( - self, block_id, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> None - """Creates a new block to be committed as part of a blob. - - :param str block_id: A string value that identifies the block. - The string should be less than or equal to 64 bytes in size. - For a given blob, the block_id must be the same size for each block. - :param data: The blob data. - :param int length: Size of the block. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword str encoding: - Defaults to UTF-8. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - options = self._stage_block_options( - block_id, - data, - length=length, - **kwargs) - try: - return await self._client.block_blob.stage_block(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def stage_block_from_url( - self, block_id, # type: Union[str, int] - source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - source_content_md5=None, # type: Optional[Union[bytes, bytearray]] - **kwargs - ): - # type: (...) -> None - """Creates a new block to be committed as part of a blob where - the contents are read from a URL. - - :param str block_id: A string value that identifies the block. - The string should be less than or equal to 64 bytes in size. - For a given blob, the block_id must be the same size for each block. - :param str source_url: The URL. - :param int source_offset: - Start of byte range to use for the block. - Must be set if source length is provided. - :param int source_length: The size of the block in bytes. - :param bytearray source_content_md5: - Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str source_authorization: - Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is - the prefix of the source_authorization string. - :rtype: None - """ - options = self._stage_block_from_url_options( - block_id, - source_url=self._encode_source_url(source_url), - source_offset=source_offset, - source_length=source_length, - source_content_md5=source_content_md5, - **kwargs) - try: - return await self._client.block_blob.stage_block_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_block_list(self, block_list_type="committed", **kwargs): - # type: (Optional[str], Any) -> Tuple[List[BlobBlock], List[BlobBlock]] - """The Get Block List operation retrieves the list of blocks that have - been uploaded as part of a block blob. - - :param str block_list_type: - Specifies whether to return the list of committed - blocks, the list of uncommitted blocks, or both lists together. - Possible values include: 'committed', 'uncommitted', 'all' - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A tuple of two lists - committed and uncommitted blocks - :rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock)) - """ - access_conditions = get_access_conditions(kwargs.pop('kease', None)) - mod_conditions = get_modify_conditions(kwargs) - try: - blocks = await self._client.block_blob.get_block_list( - list_type=block_list_type, - snapshot=self.snapshot, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return self._get_block_list_result(blocks) - - @distributed_trace_async - async def commit_block_list( # type: ignore - self, block_list, # type: List[BlobBlock] - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """The Commit Block List operation writes a blob by specifying the list of - block IDs that make up the blob. - - :param list block_list: - List of Blockblobs. - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict[str, str] - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: - Specifies the immutability policy of a blob, blob snapshot or blob version. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword bool legal_hold: - Specified if a legal hold should be set on the blob. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._commit_block_list_options( - block_list, - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return await self._client.block_blob.commit_block_list(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): - # type: (Union[str, PremiumPageBlobTier], **Any) -> None - """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts. - - :param premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if premium_page_blob_tier is None: - raise ValueError("A PremiumPageBlobTiermust be specified") - try: - await self._client.blob.set_tier( - tier=premium_page_blob_tier, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_blob_tags(self, tags=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - """The Set Tags operation enables users to set tags on a blob or specific blob version, but not snapshot. - Each call to this operation replaces all existing tags attached to the blob. To remove all - tags from the blob, call this operation with no tags set. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :param tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - :type tags: dict(str, str) - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to delete. - :keyword bool validate_content: - If true, calculates an MD5 hash of the tags content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - options = self._set_blob_tags_options(tags=tags, **kwargs) - try: - return await self._client.blob.set_tags(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_blob_tags(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """The Get Tags operation enables users to get tags on a blob or specific blob version, but not snapshot. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to add tags to. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Key value pairs of blob tags. - :rtype: Dict[str, str] - """ - options = self._get_blob_tags_options(**kwargs) - try: - _, tags = await self._client.blob.get_tags(**options) - return parse_tags(tags) # pylint: disable=protected-access - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_page_ranges( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] - **kwargs - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a Page Blob or snapshot - of a page blob. - - :param int offset: - Start of byte range to use for getting valid page ranges. - If no length is given, all bytes after the offset will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for getting valid page ranges. - If length is given, offset must be provided. - This range will return valid page ranges from the offset start up to - the specified length. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param str previous_snapshot_diff: - The snapshot diff parameter that contains an opaque DateTime value that - specifies a previous blob snapshot to be compared - against a more recent snapshot or the current blob. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. - The first element are filled page ranges, the 2nd element is cleared page ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_page_ranges_options( - offset=offset, - length=length, - previous_snapshot_diff=previous_snapshot_diff, - **kwargs) - try: - if previous_snapshot_diff: - ranges = await self._client.page_blob.get_page_ranges_diff(**options) - else: - ranges = await self._client.page_blob.get_page_ranges(**options) - except HttpResponseError as error: - process_storage_error(error) - return get_page_ranges_result(ranges) - - @distributed_trace_async - async def get_page_range_diff_for_managed_disk( - self, previous_snapshot_url, # type: str - offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a managed disk or snapshot. - - .. note:: - This operation is only available for managed disk accounts. - - .. versionadded:: 12.2.0 - This operation was introduced in API version '2019-07-07'. - - :param previous_snapshot_url: - Specifies the URL of a previous snapshot of the managed disk. - The response will only contain pages that were changed between the target blob and - its previous snapshot. - :param int offset: - Start of byte range to use for getting valid page ranges. - If no length is given, all bytes after the offset will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for getting valid page ranges. - If length is given, offset must be provided. - This range will return valid page ranges from the offset start up to - the specified length. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. - The first element are filled page ranges, the 2nd element is cleared page ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_page_ranges_options( - offset=offset, - length=length, - prev_snapshot_url=previous_snapshot_url, - **kwargs) - try: - ranges = await self._client.page_blob.get_page_ranges_diff(**options) - except HttpResponseError as error: - process_storage_error(error) - return get_page_ranges_result(ranges) - - @distributed_trace_async - async def set_sequence_number( # type: ignore - self, sequence_number_action, # type: Union[str, SequenceNumberAction] - sequence_number=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the blob sequence number. - - :param str sequence_number_action: - This property indicates how the service should modify the blob's sequence - number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information. - :param str sequence_number: - This property sets the blob's sequence number. The sequence number is a - user-controlled property that you can use to track requests and manage - concurrency issues. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._set_sequence_number_options( - sequence_number_action, sequence_number=sequence_number, **kwargs) - try: - return await self._client.page_blob.update_sequence_number(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def resize_blob(self, size, **kwargs): - # type: (int, Any) -> Dict[str, Union[str, datetime]] - """Resizes a page blob to the specified size. - - If the specified value is less than the current size of the blob, - then all pages above the specified value are cleared. - - :param int size: - Size used to resize blob. Maximum size for a page blob is up to 1 TB. - The page blob size must be aligned to a 512-byte boundary. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._resize_blob_options(size, **kwargs) - try: - return await self._client.page_blob.resize(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_page( # type: ignore - self, page, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """The Upload Pages operation writes a range of pages to a page blob. - - :param bytes page: - Content of the page. - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._upload_page_options( - page=page, - offset=offset, - length=length, - **kwargs) - try: - return await self._client.page_blob.upload_pages(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_pages_from_url(self, source_url, # type: str - offset, # type: int - length, # type: int - source_offset, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """ - The Upload Pages operation writes a range of pages to a page blob where - the contents are read from a URL. - - :param str source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - The service will read the same number of bytes as the destination range (length-offset). - :keyword bytes source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str source_authorization: - Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is - the prefix of the source_authorization string. - """ - - options = self._upload_pages_from_url_options( - source_url=self._encode_source_url(source_url), - offset=offset, - length=length, - source_offset=source_offset, - **kwargs - ) - try: - return await self._client.page_blob.upload_pages_from_url(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def clear_page(self, offset, length, **kwargs): - # type: (int, int, Any) -> Dict[str, Union[str, datetime]] - """Clears a range of pages. - - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._clear_page_options(offset, length, **kwargs) - try: - return await self._client.page_blob.clear_pages(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def append_block( # type: ignore - self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """Commits a new block of data to the end of the existing append blob. - - :param data: - Content of the block. - :param int length: - Size of the block in bytes. - :keyword bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). - :rtype: dict(str, Any) - """ - options = self._append_block_options( - data, - length=length, - **kwargs - ) - try: - return await self._client.append_blob.append_block(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async() - async def append_block_from_url(self, copy_source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """ - Creates a new block to be committed as part of a blob, where the contents are read from a source url. - - :param str copy_source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - :param int source_length: - This indicates the end of the range of bytes that has to be taken from the copy source. - :keyword bytearray source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the - AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str source_authorization: - Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is - the prefix of the source_authorization string. - """ - options = self._append_block_from_url_options( - copy_source_url=self._encode_source_url(copy_source_url), - source_offset=source_offset, - source_length=source_length, - **kwargs - ) - try: - return await self._client.append_blob.append_block_from_url(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async() - async def seal_append_blob(self, **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """The Seal operation seals the Append Blob to make it read-only. - - .. versionadded:: 12.4.0 - - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). - :rtype: dict(str, Any) - """ - options = self._seal_append_blob_options(**kwargs) - try: - return await self._client.append_blob.seal(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _get_container_client(self): # pylint: disable=client-method-missing-kwargs - # type: (...) -> ContainerClient - """Get a client to interact with the blob's parent container. - - The container need not already exist. Defaults to current blob's credentials. - - :returns: A ContainerClient. - :rtype: ~azure.storage.blob.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START get_container_client_from_blob_client] - :end-before: [END get_container_client_from_blob_client] - :language: python - :dedent: 12 - :caption: Get container client from blob object. - """ - from ._container_client_async import ContainerClient - if not isinstance(self._pipeline._transport, AsyncTransportWrapper): # pylint: disable = protected-access - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - else: - _pipeline = self._pipeline # pylint: disable = protected-access - return ContainerClient( - "{}://{}".format(self.scheme, self.primary_hostname), container_name=self.container_name, - credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, - _location_mode=self._location_mode, _hosts=self._hosts, require_encryption=self.require_encryption, - _pipeline=_pipeline, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/aio/_blob_service_client_async.py b/azure/multiapi/storagev2/blob/v2020_10_02/aio/_blob_service_client_async.py deleted file mode 100644 index 9cb1563..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/aio/_blob_service_client_async.py +++ /dev/null @@ -1,676 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import functools -import warnings -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, - TYPE_CHECKING -) - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import AsyncPipeline -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.async_paging import AsyncItemPaged - -from .._shared.models import LocationMode -from .._shared.policies_async import ExponentialRetry -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._shared.parser import _to_utc_datetime -from .._shared.response_handlers import parse_to_internal_user_delegation_key -from .._generated.aio import AzureBlobStorage -from .._generated.models import StorageServiceProperties, KeyInfo -from .._blob_service_client import BlobServiceClient as BlobServiceClientBase -from ._container_client_async import ContainerClient -from ._blob_client_async import BlobClient -from .._models import ContainerProperties -from .._deserialize import service_stats_deserialize, service_properties_deserialize -from .._serialize import get_api_version -from ._models import ContainerPropertiesPaged, FilteredBlobPaged - -if TYPE_CHECKING: - from datetime import datetime - from .._shared.models import AccountSasPermissions, ResourceTypes, UserDelegationKey - from ._lease_async import BlobLeaseClient - from .._models import ( - BlobProperties, - PublicAccess, - BlobAnalyticsLogging, - Metrics, - CorsRule, - RetentionPolicy, - StaticWebsite, - ) - - -class BlobServiceClient(AsyncStorageAccountHostsMixin, BlobServiceClientBase): - """A client to interact with the Blob Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete containers within the account. - For operations relating to a specific container or blob, clients for those entities - can also be retrieved using the `get_client` functions. - - :param str account_url: - The URL to the blob storage account. Any other entities included - in the URL path (e.g. container or blob) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication_async.py - :start-after: [START create_blob_service_client] - :end-before: [END create_blob_service_client] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient with account url and credential. - - .. literalinclude:: ../samples/blob_samples_authentication_async.py - :start-after: [START create_blob_service_client_oauth] - :end-before: [END create_blob_service_client_oauth] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient with Azure Identity credentials. - """ - - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(BlobServiceClient, self).__init__( - account_url, - credential=credential, - **kwargs) - self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - - @distributed_trace_async - async def get_user_delegation_key(self, key_start_time, # type: datetime - key_expiry_time, # type: datetime - **kwargs # type: Any - ): - # type: (...) -> UserDelegationKey - """ - Obtain a user delegation key for the purpose of signing SAS tokens. - A token credential must be present on the service object for this request to succeed. - - :param ~datetime.datetime key_start_time: - A DateTime value. Indicates when the key becomes valid. - :param ~datetime.datetime key_expiry_time: - A DateTime value. Indicates when the key stops being valid. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The user delegation key. - :rtype: ~azure.storage.blob.UserDelegationKey - """ - key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time)) - timeout = kwargs.pop('timeout', None) - try: - user_delegation_key = await self._client.service.get_user_delegation_key(key_info=key_info, - timeout=timeout, - **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - return parse_to_internal_user_delegation_key(user_delegation_key) # type: ignore - - @distributed_trace_async - async def get_account_information(self, **kwargs): - # type: (Any) -> Dict[str, str] - """Gets information related to the storage account. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START get_blob_service_account_info] - :end-before: [END get_blob_service_account_info] - :language: python - :dedent: 12 - :caption: Getting account information for the blob service. - """ - try: - return await self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_service_stats(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Retrieves statistics related to replication for the Blob service. - - It is only available when read-access geo-redundant replication is enabled for - the storage account. - - With geo-redundant replication, Azure Storage maintains your data durable - in two locations. In both locations, Azure Storage constantly maintains - multiple healthy replicas of your data. The location where you read, - create, update, or delete data is the primary storage account location. - The primary location exists in the region you choose at the time you - create an account via the Azure Management Azure classic portal, for - example, North Central US. The location to which your data is replicated - is the secondary location. The secondary location is automatically - determined based on the location of the primary; it is in a second data - center that resides in the same region as the primary location. Read-only - access is available from the secondary location, if read-access geo-redundant - replication is enabled for your storage account. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The blob service stats. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START get_blob_service_stats] - :end-before: [END get_blob_service_stats] - :language: python - :dedent: 12 - :caption: Getting service stats for the blob service. - """ - timeout = kwargs.pop('timeout', None) - try: - stats = await self._client.service.get_statistics( # type: ignore - timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs) - return service_stats_deserialize(stats) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_service_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An object containing blob service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START get_blob_service_properties] - :end-before: [END get_blob_service_properties] - :language: python - :dedent: 12 - :caption: Getting service properties for the blob service. - """ - timeout = kwargs.pop('timeout', None) - try: - service_props = await self._client.service.get_properties(timeout=timeout, **kwargs) - return service_properties_deserialize(service_props) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_service_properties( - self, analytics_logging=None, # type: Optional[BlobAnalyticsLogging] - hour_metrics=None, # type: Optional[Metrics] - minute_metrics=None, # type: Optional[Metrics] - cors=None, # type: Optional[List[CorsRule]] - target_version=None, # type: Optional[str] - delete_retention_policy=None, # type: Optional[RetentionPolicy] - static_website=None, # type: Optional[StaticWebsite] - **kwargs - ): - # type: (...) -> None - """Sets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - If an element (e.g. analytics_logging) is left as None, the - existing settings on the service for that functionality are preserved. - - :param analytics_logging: - Groups the Azure Analytics Logging settings. - :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging - :param hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for blobs. - :type hour_metrics: ~azure.storage.blob.Metrics - :param minute_metrics: - The minute metrics settings provide request statistics - for each minute for blobs. - :type minute_metrics: ~azure.storage.blob.Metrics - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list[~azure.storage.blob.CorsRule] - :param str target_version: - Indicates the default version to use for requests if an incoming - request's version is not specified. - :param delete_retention_policy: - The delete retention policy specifies whether to retain deleted blobs. - It also specifies the number of days and versions of blob to keep. - :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy - :param static_website: - Specifies whether the static website feature is enabled, - and if yes, indicates the index document and 404 error document to use. - :type static_website: ~azure.storage.blob.StaticWebsite - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START set_blob_service_properties] - :end-before: [END set_blob_service_properties] - :language: python - :dedent: 12 - :caption: Setting service properties for the blob service. - """ - if all(parameter is None for parameter in [ - analytics_logging, hour_metrics, minute_metrics, cors, - target_version, delete_retention_policy, static_website]): - raise ValueError("set_service_properties should be called with at least one parameter") - - props = StorageServiceProperties( - logging=analytics_logging, - hour_metrics=hour_metrics, - minute_metrics=minute_metrics, - cors=cors, - default_service_version=target_version, - delete_retention_policy=delete_retention_policy, - static_website=static_website - ) - timeout = kwargs.pop('timeout', None) - try: - await self._client.service.set_properties(props, timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_containers( - self, name_starts_with=None, # type: Optional[str] - include_metadata=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> AsyncItemPaged[ContainerProperties] - """Returns a generator to list the containers under the specified account. - - The generator will lazily follow the continuation tokens returned by - the service and stop when all containers have been returned. - - :param str name_starts_with: - Filters the results to return only containers whose names - begin with the specified prefix. - :param bool include_metadata: - Specifies that container metadata to be returned in the response. - The default value is `False`. - :keyword bool include_deleted: - Specifies that deleted containers to be returned in the response. This is for container restore enabled - account. The default value is `False`. - .. versionadded:: 12.4.0 - :keyword int results_per_page: - The maximum number of container names to retrieve per API - call. If the request does not specify the server will return up to 5,000 items. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) of ContainerProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.ContainerProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_list_containers] - :end-before: [END bsc_list_containers] - :language: python - :dedent: 16 - :caption: Listing the containers in the blob service. - """ - include = ['metadata'] if include_metadata else [] - include_deleted = kwargs.pop('include_deleted', None) - if include_deleted: - include.append("deleted") - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.service.list_containers_segment, - prefix=name_starts_with, - include=include, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - page_iterator_class=ContainerPropertiesPaged - ) - - @distributed_trace - def find_blobs_by_tags(self, filter_expression, **kwargs): - # type: (str, **Any) -> AsyncItemPaged[FilteredBlob] - """The Filter Blobs operation enables callers to list blobs across all - containers whose tags match a given search expression. Filter blobs - searches across all containers within a storage account but can be - scoped within the expression to a single container. - - :param str filter_expression: - The expression to find blobs whose tags matches the specified condition. - eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'" - To specify a container, eg. "@container='containerName' and \"Name\"='C'" - :keyword int results_per_page: - The max result per page when paginating. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.FilteredBlob] - """ - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.service.filter_blobs, - where=filter_expression, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=FilteredBlobPaged) - - @distributed_trace_async - async def create_container( - self, name, # type: str - metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[Union[PublicAccess, str]] - **kwargs - ): - # type: (...) -> ContainerClient - """Creates a new container under the specified account. - - If the container with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created container. - - :param str name: The name of the container to create. - :param metadata: - A dict with name-value pairs to associate with the - container as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - Possible values include: 'container', 'blob'. - :type public_access: str or ~azure.storage.blob.PublicAccess - :keyword container_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - - .. versionadded:: 12.2.0 - - :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.aio.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_create_container] - :end-before: [END bsc_create_container] - :language: python - :dedent: 16 - :caption: Creating a container in the blob service. - """ - container = self.get_container_client(name) - timeout = kwargs.pop('timeout', None) - kwargs.setdefault('merge_span', True) - await container.create_container( - metadata=metadata, public_access=public_access, timeout=timeout, **kwargs) - return container - - @distributed_trace_async - async def delete_container( - self, container, # type: Union[ContainerProperties, str] - lease=None, # type: Optional[Union[BlobLeaseClient, str]] - **kwargs - ): - # type: (...) -> None - """Marks the specified container for deletion. - - The container and any blobs contained within it are later deleted during garbage collection. - If the container is not found, a ResourceNotFoundError will be raised. - - :param container: - The container to delete. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :param lease: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_delete_container] - :end-before: [END bsc_delete_container] - :language: python - :dedent: 16 - :caption: Deleting a container in the blob service. - """ - container = self.get_container_client(container) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - await container.delete_container( # type: ignore - lease=lease, - timeout=timeout, - **kwargs) - - @distributed_trace_async - async def _rename_container(self, name, new_name, **kwargs): - # type: (str, str, **Any) -> ContainerClient - """Renames a container. - - Operation is successful only if the source container exists. - - :param str name: - The name of the container to rename. - :param str new_name: - The new container name the user wants to rename to. - :keyword lease: - Specify this to perform only if the lease ID given - matches the active lease ID of the source container. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.ContainerClient - """ - renamed_container = self.get_container_client(new_name) - lease = kwargs.pop('lease', None) - try: - kwargs['source_lease_id'] = lease.id # type: str - except AttributeError: - kwargs['source_lease_id'] = lease - try: - await renamed_container._client.container.rename(name, **kwargs) # pylint: disable = protected-access - return renamed_container - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def undelete_container(self, deleted_container_name, deleted_container_version, **kwargs): - # type: (str, str, **Any) -> ContainerClient - """Restores soft-deleted container. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :param str deleted_container_name: - Specifies the name of the deleted container to restore. - :param str deleted_container_version: - Specifies the version of the deleted container to restore. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.aio.ContainerClient - """ - new_name = kwargs.pop('new_name', None) - if new_name: - warnings.warn("`new_name` is no longer supported.", DeprecationWarning) - container = self.get_container_client(new_name or deleted_container_name) - try: - await container._client.container.restore(deleted_container_name=deleted_container_name, # pylint: disable = protected-access - deleted_container_version=deleted_container_version, - timeout=kwargs.pop('timeout', None), **kwargs) - return container - except HttpResponseError as error: - process_storage_error(error) - - def get_container_client(self, container): - # type: (Union[ContainerProperties, str]) -> ContainerClient - """Get a client to interact with the specified container. - - The container need not already exist. - - :param container: - The container. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :returns: A ContainerClient. - :rtype: ~azure.storage.blob.aio.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_get_container_client] - :end-before: [END bsc_get_container_client] - :language: python - :dedent: 12 - :caption: Getting the container client to interact with a specific container. - """ - try: - container_name = container.name - except AttributeError: - container_name = container - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ContainerClient( - self.url, container_name=container_name, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_blob_client( - self, container, # type: Union[ContainerProperties, str] - blob, # type: Union[BlobProperties, str] - snapshot=None # type: Optional[Union[Dict[str, Any], str]] - ): - # type: (...) -> BlobClient - """Get a client to interact with the specified blob. - - The blob need not already exist. - - :param container: - The container that the blob is in. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :param blob: - The blob with which to interact. This can either be the name of the blob, - or an instance of BlobProperties. - :type blob: str or ~azure.storage.blob.BlobProperties - :param snapshot: - The optional blob snapshot on which to operate. This can either be the ID of the snapshot, - or a dictionary output returned by - :func:`~azure.storage.blob.aio.BlobClient.create_snapshot()`. - :type snapshot: str or dict(str, Any) - :returns: A BlobClient. - :rtype: ~azure.storage.blob.aio.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_get_blob_client] - :end-before: [END bsc_get_blob_client] - :language: python - :dedent: 16 - :caption: Getting the blob client to interact with a specific blob. - """ - try: - container_name = container.name - except AttributeError: - container_name = container - - try: - blob_name = blob.name - except AttributeError: - blob_name = blob - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return BlobClient( # type: ignore - self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/aio/_container_client_async.py b/azure/multiapi/storagev2/blob/v2020_10_02/aio/_container_client_async.py deleted file mode 100644 index 2f73b9c..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/aio/_container_client_async.py +++ /dev/null @@ -1,1209 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, AnyStr, Dict, List, IO, AsyncIterator, - TYPE_CHECKING -) - -from azure.core.exceptions import HttpResponseError, ResourceNotFoundError -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.async_paging import AsyncItemPaged -from azure.core.pipeline import AsyncPipeline -from azure.core.pipeline.transport import AsyncHttpResponse - -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.policies_async import ExponentialRetry -from .._shared.request_handlers import add_metadata_headers, serialize_iso -from .._shared.response_handlers import ( - process_storage_error, - return_response_headers, - return_headers_and_deserialized) -from .._generated.aio import AzureBlobStorage -from .._generated.models import SignedIdentifier -from .._deserialize import deserialize_container_properties -from .._serialize import get_modify_conditions, get_container_cpk_scope_info, get_api_version, get_access_conditions -from .._container_client import ContainerClient as ContainerClientBase, _get_blob_name -from .._models import ContainerProperties, BlobType, BlobProperties # pylint: disable=unused-import -from ._list_blobs_helper import BlobPropertiesPaged, BlobPrefix -from ._lease_async import BlobLeaseClient -from ._blob_client_async import BlobClient - -if TYPE_CHECKING: - from .._models import PublicAccess - from ._download_async import StorageStreamDownloader - from datetime import datetime - from .._models import ( # pylint: disable=unused-import - AccessPolicy, - StandardBlobTier, - PremiumPageBlobTier) - - -class ContainerClient(AsyncStorageAccountHostsMixin, ContainerClientBase): - """A client to interact with a specific container, although that container - may not yet exist. - - For operations relating to a specific blob within this container, a blob client can be - retrieved using the :func:`~get_blob_client` function. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the container, - use the :func:`from_container_url` classmethod. - :param container_name: - The name of the container for the blob. - :type container_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START create_container_client_from_service] - :end-before: [END create_container_client_from_service] - :language: python - :dedent: 8 - :caption: Get a ContainerClient from an existing BlobServiceClient. - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START create_container_client_sasurl] - :end-before: [END create_container_client_sasurl] - :language: python - :dedent: 12 - :caption: Creating the container client directly. - """ - def __init__( - self, account_url, # type: str - container_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(ContainerClient, self).__init__( - account_url, - container_name=container_name, - credential=credential, - **kwargs) - self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - - @distributed_trace_async - async def create_container(self, metadata=None, public_access=None, **kwargs): - # type: (Optional[Dict[str, str]], Optional[Union[PublicAccess, str]], **Any) -> None - """ - Creates a new container under the specified account. If the container - with the same name already exists, the operation fails. - - :param metadata: - A dict with name_value pairs to associate with the - container as metadata. Example:{'Category':'test'} - :type metadata: dict[str, str] - :param ~azure.storage.blob.PublicAccess public_access: - Possible values include: 'container', 'blob'. - :keyword container_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - - .. versionadded:: 12.2.0 - - :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START create_container] - :end-before: [END create_container] - :language: python - :dedent: 16 - :caption: Creating a container to store blobs. - """ - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - timeout = kwargs.pop('timeout', None) - container_cpk_scope_info = get_container_cpk_scope_info(kwargs) - try: - return await self._client.container.create( # type: ignore - timeout=timeout, - access=public_access, - container_cpk_scope_info=container_cpk_scope_info, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def _rename_container(self, new_name, **kwargs): - # type: (str, **Any) -> ContainerClient - """Renames a container. - - Operation is successful only if the source container exists. - - :param str new_name: - The new container name the user wants to rename to. - :keyword lease: - Specify this to perform only if the lease ID given - matches the active lease ID of the source container. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.ContainerClient - """ - lease = kwargs.pop('lease', None) - try: - kwargs['source_lease_id'] = lease.id # type: str - except AttributeError: - kwargs['source_lease_id'] = lease - try: - renamed_container = ContainerClient( - "{}://{}".format(self.scheme, self.primary_hostname), container_name=new_name, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - await renamed_container._client.container.rename(self.container_name, **kwargs) # pylint: disable = protected-access - return renamed_container - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def delete_container( - self, **kwargs): - # type: (Any) -> None - """ - Marks the specified container for deletion. The container and any blobs - contained within it are later deleted during garbage collection. - - :keyword lease: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START delete_container] - :end-before: [END delete_container] - :language: python - :dedent: 16 - :caption: Delete a container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - mod_conditions = get_modify_conditions(kwargs) - timeout = kwargs.pop('timeout', None) - try: - await self._client.container.delete( - timeout=timeout, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def acquire_lease( - self, lease_duration=-1, # type: int - lease_id=None, # type: Optional[str] - **kwargs): - # type: (...) -> BlobLeaseClient - """ - Requests a new lease. If the container does not have an active lease, - the Blob service creates a lease on the container and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A BlobLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.blob.aio.BlobLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START acquire_lease_on_container] - :end-before: [END acquire_lease_on_container] - :language: python - :dedent: 12 - :caption: Acquiring a lease on the container. - """ - lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - await lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs) - return lease - - @distributed_trace_async - async def get_account_information(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """Gets information related to the storage account. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - """ - try: - return await self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_container_properties(self, **kwargs): - # type: (**Any) -> ContainerProperties - """Returns all user-defined metadata and system properties for the specified - container. The data returned does not include the container's list of blobs. - - :keyword lease: - If specified, get_container_properties only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Properties for the specified container within a container object. - :rtype: ~azure.storage.blob.ContainerProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START get_container_properties] - :end-before: [END get_container_properties] - :language: python - :dedent: 16 - :caption: Getting properties on the container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - response = await self._client.container.get_properties( - timeout=timeout, - lease_access_conditions=access_conditions, - cls=deserialize_container_properties, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - response.name = self.container_name - return response # type: ignore - - @distributed_trace_async - async def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a container exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - try: - await self._client.container.get_properties(**kwargs) - return True - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceNotFoundError: - return False - - @distributed_trace_async - async def set_container_metadata( # type: ignore - self, metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - container. Each call to this operation replaces all existing metadata - attached to the container. To remove all metadata from the container, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the container as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword lease: - If specified, set_container_metadata only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Container-updated property dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START set_container_metadata] - :end-before: [END set_container_metadata] - :language: python - :dedent: 16 - :caption: Setting metadata on the container. - """ - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - mod_conditions = get_modify_conditions(kwargs) - timeout = kwargs.pop('timeout', None) - try: - return await self._client.container.set_metadata( # type: ignore - timeout=timeout, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def _get_blob_service_client(self): # pylint: disable=client-method-missing-kwargs - # type: (...) -> BlobServiceClient - """Get a client to interact with the container's parent service account. - - Defaults to current container's credentials. - - :returns: A BlobServiceClient. - :rtype: ~azure.storage.blob.BlobServiceClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START get_blob_service_client_from_container_client] - :end-before: [END get_blob_service_client_from_container_client] - :language: python - :dedent: 8 - :caption: Get blob service client from container object. - """ - from ._blob_service_client_async import BlobServiceClient - if not isinstance(self._pipeline._transport, AsyncTransportWrapper): # pylint: disable = protected-access - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - else: - _pipeline = self._pipeline # pylint: disable = protected-access - return BlobServiceClient( - "{}://{}".format(self.scheme, self.primary_hostname), - credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, - _location_mode=self._location_mode, _hosts=self._hosts, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function, - _pipeline=_pipeline) - - - @distributed_trace_async - async def get_container_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the specified container. - The permissions indicate whether container data may be accessed publicly. - - :keyword lease: - If specified, get_container_access_policy only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START get_container_access_policy] - :end-before: [END get_container_access_policy] - :language: python - :dedent: 16 - :caption: Getting the access policy on the container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - response, identifiers = await self._client.container.get_access_policy( - timeout=timeout, - lease_access_conditions=access_conditions, - cls=return_headers_and_deserialized, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return { - 'public_access': response.get('blob_public_access'), - 'signed_identifiers': identifiers or [] - } - - @distributed_trace_async - async def set_container_access_policy( - self, signed_identifiers, # type: Dict[str, AccessPolicy] - public_access=None, # type: Optional[Union[str, PublicAccess]] - **kwargs # type: Any - ): # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the permissions for the specified container or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether blobs in a container may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the container. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy] - :param ~azure.storage.blob.PublicAccess public_access: - Possible values include: 'container', 'blob'. - :keyword lease: - Required if the container has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified date/time. - :keyword ~datetime.datetime if_unmodified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Container-updated property dict (Etag and last modified). - :rtype: dict[str, str or ~datetime.datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START set_container_access_policy] - :end-before: [END set_container_access_policy] - :language: python - :dedent: 16 - :caption: Setting access policy on the container. - """ - timeout = kwargs.pop('timeout', None) - lease = kwargs.pop('lease', None) - if len(signed_identifiers) > 5: - raise ValueError( - 'Too many access policies provided. The server does not support setting ' - 'more than 5 access policies on a single resource.') - identifiers = [] - for key, value in signed_identifiers.items(): - if value: - value.start = serialize_iso(value.start) - value.expiry = serialize_iso(value.expiry) - identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore - signed_identifiers = identifiers # type: ignore - - mod_conditions = get_modify_conditions(kwargs) - access_conditions = get_access_conditions(lease) - try: - return await self._client.container.set_access_policy( - container_acl=signed_identifiers or None, - timeout=timeout, - access=public_access, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_blobs(self, name_starts_with=None, include=None, **kwargs): - # type: (Optional[str], Optional[Union[str, List[str]]], **Any) -> AsyncItemPaged[BlobProperties] - """Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service. - - :param str name_starts_with: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param list[str] or str include: - Specifies one or more additional datasets to include in the response. - Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'deletedwithversions', - 'tags', 'versions'. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START list_blobs_in_container] - :end-before: [END list_blobs_in_container] - :language: python - :dedent: 12 - :caption: List the blobs in the container. - """ - if include and not isinstance(include, list): - include = [include] - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.list_blob_flat_segment, - include=include, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - page_iterator_class=BlobPropertiesPaged - ) - - @distributed_trace - def walk_blobs( - self, name_starts_with=None, # type: Optional[str] - include=None, # type: Optional[Any] - delimiter="/", # type: str - **kwargs # type: Optional[Any] - ): - # type: (...) -> AsyncItemPaged[BlobProperties] - """Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service. This operation will list blobs in accordance with a hierarchy, - as delimited by the specified delimiter character. - - :param str name_starts_with: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param list[str] include: - Specifies one or more additional datasets to include in the response. - Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'. - :param str delimiter: - When the request includes this parameter, the operation returns a BlobPrefix - element in the response body that acts as a placeholder for all blobs whose - names begin with the same substring up to the appearance of the delimiter - character. The delimiter may be a single character or a string. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties] - """ - if include and not isinstance(include, list): - include = [include] - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.list_blob_hierarchy_segment, - delimiter=delimiter, - include=include, - timeout=timeout, - **kwargs) - return BlobPrefix( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - delimiter=delimiter) - - @distributed_trace_async - async def upload_blob( - self, name, # type: Union[str, BlobProperties] - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> BlobClient - """Creates a new blob from a data source with automatic chunking. - - :param name: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type name: str or ~azure.storage.blob.BlobProperties - :param data: The blob data to upload. - :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be - either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. The exception to the above is with Append - blob types: if set to False and the data already exists, an error will not be raised - and the data will be appended to the existing blob. If set overwrite=True, then the existing - append blob will be deleted, and a new one created. Defaults to False. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the container has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int max_concurrency: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :returns: A BlobClient to interact with the newly uploaded blob. - :rtype: ~azure.storage.blob.aio.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START upload_blob_to_container] - :end-before: [END upload_blob_to_container] - :language: python - :dedent: 12 - :caption: Upload blob to the container. - """ - blob = self.get_blob_client(name) - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - await blob.upload_blob( - data, - blob_type=blob_type, - length=length, - metadata=metadata, - timeout=timeout, - encoding=encoding, - **kwargs - ) - return blob - - @distributed_trace_async - async def delete_blob( - self, blob, # type: Union[str, BlobProperties] - delete_snapshots=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> None - """Marks the specified blob or snapshot for deletion. - - The blob is later deleted during garbage collection. - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the delete_blob - operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot - and retains the blob or snapshot for specified number of days. - After specified number of days, blob's data is removed from the service during garbage collection. - Soft deleted blob or snapshot is accessible through :func:`list_blobs()` specifying `include=["deleted"]` - option. Soft-deleted blob or snapshot can be restored using :func:`~BlobClient.undelete()` - - :param blob: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob: str or ~azure.storage.blob.BlobProperties - :param str delete_snapshots: - Required if the blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to delete. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword lease: - Required if the blob has an active lease. Value can be a Lease object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - blob = self.get_blob_client(blob) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - await blob.delete_blob( # type: ignore - delete_snapshots=delete_snapshots, - timeout=timeout, - **kwargs) - - @distributed_trace_async - async def download_blob(self, blob, offset=None, length=None, **kwargs): - # type: (Union[str, BlobProperties], Optional[int], Optional[int], Any) -> StorageStreamDownloader - """Downloads a blob to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the blob into - a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks. - - :param blob: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob: str or ~azure.storage.blob.BlobProperties - :param int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, download_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword str encoding: - Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object. (StorageStreamDownloader) - :rtype: ~azure.storage.blob.aio.StorageStreamDownloader - """ - blob_client = self.get_blob_client(blob) # type: ignore - kwargs.setdefault('merge_span', True) - return await blob_client.download_blob( - offset=offset, - length=length, - **kwargs) - - @distributed_trace_async - async def delete_blobs( # pylint: disable=arguments-differ - self, *blobs: List[Union[str, BlobProperties, dict]], - **kwargs - ) -> AsyncIterator[AsyncHttpResponse]: - """Marks the specified blobs or snapshots for deletion. - - The blobs are later deleted during garbage collection. - Note that in order to delete blobs, you must delete all of their - snapshots. You can delete both at the same time with the delete_blobs operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots - and retains the blobs or snapshots for specified number of days. - After specified number of days, blobs' data is removed from the service during garbage collection. - Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]` - Soft-deleted blobs or snapshots can be restored using :func:`~BlobClient.undelete()` - - :param blobs: - The blobs to delete. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - snapshot you want to delete: - key: 'snapshot', value type: str - whether to delete snapthots when deleting blob: - key: 'delete_snapshots', value: 'include' or 'only' - if the blob modified or not: - key: 'if_modified_since', 'if_unmodified_since', value type: datetime - etag: - key: 'etag', value type: str - match the etag or not: - key: 'match_condition', value type: MatchConditions - tags match condition: - key: 'if_tags_match_condition', value type: str - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword str delete_snapshots: - Required if a blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. For optimal performance, - this should be set to False - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: An async iterator of responses, one for each blob in order - :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START delete_multiple_blobs] - :end-before: [END delete_multiple_blobs] - :language: python - :dedent: 12 - :caption: Deleting multiple blobs. - """ - if len(blobs) == 0: - return iter(list()) - - reqs, options = self._generate_delete_blobs_options(*blobs, **kwargs) - - return await self._batch_send(*reqs, **options) - - @distributed_trace - async def set_standard_blob_tier_blobs( - self, - standard_blob_tier: Union[str, 'StandardBlobTier'], - *blobs: List[Union[str, BlobProperties, dict]], - **kwargs - ) -> AsyncIterator[AsyncHttpResponse]: - """This operation sets the tier on block blobs. - - A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param standard_blob_tier: - Indicates the tier to be set on all blobs. Options include 'Hot', 'Cool', - 'Archive'. The hot tier is optimized for storing data that is accessed - frequently. The cool storage tier is optimized for storing data that - is infrequently accessed and stored for at least a month. The archive - tier is optimized for storing data that is rarely accessed and stored - for at least six months with flexible latency requirements. - - .. note:: - If you want to set different tier on different blobs please set this positional parameter to None. - Then the blob tier on every BlobProperties will be taken. - - :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier - :param blobs: - The blobs with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - blob name: - key: 'name', value type: str - standard blob tier: - key: 'blob_tier', value type: StandardBlobTier - rehydrate priority: - key: 'rehydrate_priority', value type: RehydratePriority - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - tags match condition: - key: 'if_tags_match_condition', value type: str - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. For optimal performance, - this should be set to False. - :return: An async iterator of responses, one for each blob in order - :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] - """ - reqs, options = self._generate_set_tiers_options(standard_blob_tier, *blobs, **kwargs) - - return await self._batch_send(*reqs, **options) - - @distributed_trace - async def set_premium_page_blob_tier_blobs( - self, - premium_page_blob_tier: Union[str, 'PremiumPageBlobTier'], - *blobs: List[Union[str, BlobProperties, dict]], - **kwargs - ) -> AsyncIterator[AsyncHttpResponse]: - """Sets the page blob tiers on the blobs. This API is only supported for page blobs on premium accounts. - - :param premium_page_blob_tier: - A page blob tier value to set on all blobs to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - - .. note:: - If you want to set different tier on different blobs please set this positional parameter to None. - Then the blob tier on every BlobProperties will be taken. - - :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier - :param blobs: The blobs with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - premium blob tier: - key: 'blob_tier', value type: PremiumPageBlobTier - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. For optimal performance, - this should be set to False. - :return: An async iterator of responses, one for each blob in order - :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] - """ - reqs, options = self._generate_set_tiers_options(premium_page_blob_tier, *blobs, **kwargs) - - return await self._batch_send(*reqs, **options) - - def get_blob_client( - self, blob, # type: Union[BlobProperties, str] - snapshot=None # type: str - ): - # type: (...) -> BlobClient - """Get a client to interact with the specified blob. - - The blob need not already exist. - - :param blob: - The blob with which to interact. - :type blob: str or ~azure.storage.blob.BlobProperties - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`~BlobClient.create_snapshot()`. - :returns: A BlobClient. - :rtype: ~azure.storage.blob.aio.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START get_blob_client] - :end-before: [END get_blob_client] - :language: python - :dedent: 12 - :caption: Get the blob client. - """ - blob_name = _get_blob_name(blob) - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return BlobClient( - self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/aio/_download_async.py b/azure/multiapi/storagev2/blob/v2020_10_02/aio/_download_async.py deleted file mode 100644 index 135fd66..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/aio/_download_async.py +++ /dev/null @@ -1,547 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -import asyncio -import sys -from io import BytesIO -from itertools import islice -import warnings -from typing import AsyncIterator - -from aiohttp import ClientPayloadError -from azure.core.exceptions import HttpResponseError, ServiceResponseError -from .._shared.encryption import decrypt_blob -from .._shared.request_handlers import validate_and_format_range_headers -from .._shared.response_handlers import process_storage_error, parse_length_from_content_range -from .._deserialize import get_page_ranges_result -from .._download import process_range_and_offset, _ChunkDownloader - -async def process_content(data, start_offset, end_offset, encryption): - if data is None: - raise ValueError("Response cannot be None.") - content = data.response.body() - if encryption.get('key') is not None or encryption.get('resolver') is not None: - try: - return decrypt_blob( - encryption.get('required'), - encryption.get('key'), - encryption.get('resolver'), - content, - start_offset, - end_offset, - data.response.headers) - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=data.response, - error=error) - return content - - -class _AsyncChunkDownloader(_ChunkDownloader): - def __init__(self, **kwargs): - super(_AsyncChunkDownloader, self).__init__(**kwargs) - self.stream_lock = asyncio.Lock() if kwargs.get('parallel') else None - self.progress_lock = asyncio.Lock() if kwargs.get('parallel') else None - - async def process_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - chunk_data = await self._download_chunk(chunk_start, chunk_end - 1) - length = chunk_end - chunk_start - if length > 0: - await self._write_to_stream(chunk_data, chunk_start) - await self._update_progress(length) - - async def yield_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - return await self._download_chunk(chunk_start, chunk_end - 1) - - async def _update_progress(self, length): - if self.progress_lock: - async with self.progress_lock: # pylint: disable=not-async-context-manager - self.progress_total += length - else: - self.progress_total += length - - async def _write_to_stream(self, chunk_data, chunk_start): - if self.stream_lock: - async with self.stream_lock: # pylint: disable=not-async-context-manager - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - else: - self.stream.write(chunk_data) - - async def _download_chunk(self, chunk_start, chunk_end): - download_range, offset = process_range_and_offset( - chunk_start, chunk_end, chunk_end, self.encryption_options) - - # No need to download the empty chunk from server if there's no data in the chunk to be downloaded. - # Do optimize and create empty chunk locally if condition is met. - if self._do_optimize(download_range[0], download_range[1]): - chunk_data = b"\x00" * self.chunk_size - else: - range_header, range_validation = validate_and_format_range_headers( - download_range[0], - download_range[1], - check_content_md5=self.validate_content - ) - retry_active = True - retry_total = 3 - while retry_active: - try: - _, response = await self.client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self.validate_content, - data_stream_total=self.total_size, - download_stream_current=self.progress_total, - **self.request_options - ) - retry_active = False - - except HttpResponseError as error: - process_storage_error(error) - except ClientPayloadError as error: - retry_total -= 1 - if retry_total <= 0: - raise ServiceResponseError(error, error=error) - await asyncio.sleep(1) - - chunk_data = await process_content(response, offset[0], offset[1], self.encryption_options) - - - # This makes sure that if_match is set so that we can validate - # that subsequent downloads are to an unmodified blob - if self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = response.properties.etag - - return chunk_data - - -class _AsyncChunkIterator(object): - """Async iterator for chunks in blob download stream.""" - - def __init__(self, size, content, downloader, chunk_size): - self.size = size - self._chunk_size = chunk_size - self._current_content = content - self._iter_downloader = downloader - self._iter_chunks = None - self._complete = (size == 0) - - def __len__(self): - return self.size - - def __iter__(self): - raise TypeError("Async stream must be iterated asynchronously.") - - def __aiter__(self): - return self - - async def __anext__(self): - """Iterate through responses.""" - if self._complete: - raise StopAsyncIteration("Download complete") - if not self._iter_downloader: - # cut the data obtained from initial GET into chunks - if len(self._current_content) > self._chunk_size: - return self._get_chunk_data() - self._complete = True - return self._current_content - - if not self._iter_chunks: - self._iter_chunks = self._iter_downloader.get_chunk_offsets() - - # initial GET result still has more than _chunk_size bytes of data - if len(self._current_content) >= self._chunk_size: - return self._get_chunk_data() - - try: - chunk = next(self._iter_chunks) - self._current_content += await self._iter_downloader.yield_chunk(chunk) - except StopIteration: - self._complete = True - # it's likely that there some data left in self._current_content - if self._current_content: - return self._current_content - raise StopAsyncIteration("Download complete") - - return self._get_chunk_data() - - def _get_chunk_data(self): - chunk_data = self._current_content[: self._chunk_size] - self._current_content = self._current_content[self._chunk_size:] - return chunk_data - - -class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the blob being downloaded. - :ivar str container: - The name of the container where the blob is. - :ivar ~azure.storage.blob.BlobProperties properties: - The properties of the blob being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the blob. - """ - - def __init__( - self, - clients=None, - config=None, - start_range=None, - end_range=None, - validate_content=None, - encryption_options=None, - max_concurrency=1, - name=None, - container=None, - encoding=None, - **kwargs - ): - self.name = name - self.container = container - self.properties = None - self.size = None - - self._clients = clients - self._config = config - self._start_range = start_range - self._end_range = end_range - self._max_concurrency = max_concurrency - self._encoding = encoding - self._validate_content = validate_content - self._encryption_options = encryption_options or {} - self._request_options = kwargs - self._location_mode = None - self._download_complete = False - self._current_content = None - self._file_size = None - self._non_empty_ranges = None - self._response = None - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - self._first_get_size = self._config.max_single_get_size if not self._validate_content \ - else self._config.max_chunk_get_size - initial_request_start = self._start_range if self._start_range is not None else 0 - if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: - initial_request_end = self._end_range - else: - initial_request_end = initial_request_start + self._first_get_size - 1 - - self._initial_range, self._initial_offset = process_range_and_offset( - initial_request_start, initial_request_end, self._end_range, self._encryption_options - ) - - def __len__(self): - return self.size - - async def _setup(self): - self._response = await self._initial_request() - self.properties = self._response.properties - self.properties.name = self.name - self.properties.container = self.container - - # Set the content length to the download size instead of the size of - # the last range - self.properties.size = self.size - - # Overwrite the content range to the user requested range - self.properties.content_range = 'bytes {0}-{1}/{2}'.format( - self._start_range, - self._end_range, - self._file_size - ) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - self.properties.content_md5 = None - - if self.size == 0: - self._current_content = b"" - else: - self._current_content = await process_content( - self._response, - self._initial_offset[0], - self._initial_offset[1], - self._encryption_options - ) - - async def _initial_request(self): - range_header, range_validation = validate_and_format_range_headers( - self._initial_range[0], - self._initial_range[1], - start_range_required=False, - end_range_required=False, - check_content_md5=self._validate_content) - - retry_active = True - retry_total = 3 - while retry_active: - try: - location_mode, response = await self._clients.blob.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self._validate_content, - data_stream_total=None, - download_stream_current=0, - **self._request_options) - - # Check the location we read from to ensure we use the same one - # for subsequent requests. - self._location_mode = location_mode - - # Parse the total file size and adjust the download size if ranges - # were specified - self._file_size = parse_length_from_content_range(response.properties.content_range) - if self._end_range is not None: - # Use the length unless it is over the end of the file - self.size = min(self._file_size, self._end_range - self._start_range + 1) - elif self._start_range is not None: - self.size = self._file_size - self._start_range - else: - self.size = self._file_size - retry_active = False - - except HttpResponseError as error: - if self._start_range is None and error.response.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - try: - _, response = await self._clients.blob.download( - validate_content=self._validate_content, - data_stream_total=0, - download_stream_current=0, - **self._request_options) - retry_active = False - except HttpResponseError as error: - process_storage_error(error) - - # Set the download size to empty - self.size = 0 - self._file_size = 0 - else: - process_storage_error(error) - - except ClientPayloadError as error: - retry_total -= 1 - if retry_total <= 0: - raise ServiceResponseError(error, error=error) - await asyncio.sleep(1) - - # get page ranges to optimize downloading sparse page blob - if response.properties.blob_type == 'PageBlob': - try: - page_ranges = await self._clients.page_blob.get_page_ranges() - self._non_empty_ranges = get_page_ranges_result(page_ranges)[0] - except HttpResponseError: - pass - - # If the file is small, the download is complete at this point. - # If file size is large, download the rest of the file in chunks. - if response.properties.size != self.size: - if self._request_options.get('modified_access_conditions'): - self._request_options['modified_access_conditions'].if_match = response.properties.etag - else: - self._download_complete = True - return response - - def chunks(self): - # type: () -> AsyncIterator[bytes] - """Iterate over chunks in the download stream. - - :rtype: AsyncIterator[bytes] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world_async.py - :start-after: [START download_a_blob_in_chunk] - :end-before: [END download_a_blob_in_chunk] - :language: python - :dedent: 16 - :caption: Download a blob using chunks(). - """ - if self.size == 0 or self._download_complete: - iter_downloader = None - else: - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - iter_downloader = _AsyncChunkDownloader( - client=self._clients.blob, - non_empty_ranges=self._non_empty_ranges, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # Start where the first download ended - end_range=data_end, - stream=None, - parallel=False, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options) - return _AsyncChunkIterator( - size=self.size, - content=self._current_content, - downloader=iter_downloader, - chunk_size=self._config.max_chunk_get_size) - - async def readall(self): - """Download the contents of this blob. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - stream = BytesIO() - await self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - async def content_as_bytes(self, max_concurrency=1): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :rtype: bytes - """ - warnings.warn( - "content_as_bytes is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - return await self.readall() - - async def content_as_text(self, max_concurrency=1, encoding="UTF-8"): - """Download the contents of this blob, and decode as text. - - This operation is blocking until all data is downloaded. - - :param int max_concurrency: - The number of parallel connections with which to download. - :param str encoding: - Test encoding to decode the downloaded bytes. Default is UTF-8. - :rtype: str - """ - warnings.warn( - "content_as_text is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self._encoding = encoding - return await self.readall() - - async def readinto(self, stream): - """Download the contents of this blob to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - # the stream must be seekable if parallel download is required - parallel = self._max_concurrency > 1 - if parallel: - error_message = "Target stream handle must be seekable." - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(error_message) - - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(error_message) - - # Write the content to the user stream - stream.write(self._current_content) - if self._download_complete: - return self.size - - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - - downloader = _AsyncChunkDownloader( - client=self._clients.blob, - non_empty_ranges=self._non_empty_ranges, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # start where the first download ended - end_range=data_end, - stream=stream, - parallel=parallel, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options) - - dl_tasks = downloader.get_chunk_offsets() - running_futures = [ - asyncio.ensure_future(downloader.process_chunk(d)) - for d in islice(dl_tasks, 0, self._max_concurrency) - ] - while running_futures: - # Wait for some download to finish before adding a new one - done, running_futures = await asyncio.wait( - running_futures, return_when=asyncio.FIRST_COMPLETED) - try: - for task in done: - task.result() - except HttpResponseError as error: - process_storage_error(error) - try: - next_chunk = next(dl_tasks) - except StopIteration: - break - else: - running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk))) - - if running_futures: - # Wait for the remaining downloads to finish - done, _running_futures = await asyncio.wait(running_futures) - try: - for task in done: - task.result() - except HttpResponseError as error: - process_storage_error(error) - return self.size - - async def download_to_stream(self, stream, max_concurrency=1): - """Download the contents of this blob to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :param int max_concurrency: - The number of parallel connections with which to download. - :returns: The properties of the downloaded blob. - :rtype: Any - """ - warnings.warn( - "download_to_stream is deprecated, use readinto instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - await self.readinto(stream) - return self.properties diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/aio/_lease_async.py b/azure/multiapi/storagev2/blob/v2020_10_02/aio/_lease_async.py deleted file mode 100644 index 79e6733..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/aio/_lease_async.py +++ /dev/null @@ -1,325 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, - TypeVar, TYPE_CHECKING -) - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator_async import distributed_trace_async - -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._serialize import get_modify_conditions -from .._lease import BlobLeaseClient as LeaseClientBase - -if TYPE_CHECKING: - from datetime import datetime - from .._generated.operations import BlobOperations, ContainerOperations - BlobClient = TypeVar("BlobClient") - ContainerClient = TypeVar("ContainerClient") - - -class BlobLeaseClient(LeaseClientBase): - """Creates a new BlobLeaseClient. - - This client provides lease operations on a BlobClient or ContainerClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the blob or container to lease. - :type client: ~azure.storage.blob.aio.BlobClient or - ~azure.storage.blob.aio.ContainerClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - - def __enter__(self): - raise TypeError("Async lease must use 'async with'.") - - def __exit__(self, *args): - self.release() - - async def __aenter__(self): - return self - - async def __aexit__(self, *args): - await self.release() - - @distributed_trace_async - async def acquire(self, lease_duration=-1, **kwargs): - # type: (int, Any) -> None - """Requests a new lease. - - If the container does not have an active lease, the Blob service creates a - lease on the container and returns a new lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.acquire_lease( - timeout=kwargs.pop('timeout', None), - duration=lease_duration, - proposed_lease_id=self.id, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - self.etag = response.get('etag') # type: str - - @distributed_trace_async - async def renew(self, **kwargs): - # type: (Any) -> None - """Renews the lease. - - The lease can be renewed if the lease ID specified in the - lease client matches that associated with the container or blob. Note that - the lease may be renewed even if it has expired as long as the container - or blob has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.renew_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def release(self, **kwargs): - # type: (Any) -> None - """Release the lease. - - The lease may be released if the client lease id specified matches - that associated with the container or blob. Releasing the lease allows another client - to immediately acquire the lease for the container or blob as soon as the release is complete. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.release_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """Change the lease ID of an active lease. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.change_lease( - lease_id=self.id, - proposed_lease_id=proposed_lease_id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def break_lease(self, lease_break_period=None, **kwargs): - # type: (Optional[int], Any) -> int - """Break the lease, if the container or blob has an active lease. - - Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. When a lease - is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the container or blob. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :param int lease_break_period: - This is the proposed duration of seconds that the lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the lease. If longer, the time remaining on the lease is used. - A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.break_lease( - timeout=kwargs.pop('timeout', None), - break_period=lease_break_period, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/aio/_list_blobs_helper.py b/azure/multiapi/storagev2/blob/v2020_10_02/aio/_list_blobs_helper.py deleted file mode 100644 index 058572f..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/aio/_list_blobs_helper.py +++ /dev/null @@ -1,163 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from azure.core.async_paging import AsyncPageIterator, AsyncItemPaged -from azure.core.exceptions import HttpResponseError -from .._deserialize import get_blob_properties_from_generated_code -from .._models import BlobProperties -from .._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix -from .._shared.models import DictMixin -from .._shared.response_handlers import return_context_and_deserialized, process_storage_error - - -class BlobPropertiesPaged(AsyncPageIterator): - """An Iterable of Blob properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.models.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - :param str container: The container that the blobs are listed from. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str continuation_token: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__( - self, command, - container=None, - prefix=None, - results_per_page=None, - continuation_token=None, - delimiter=None, - location_mode=None): - super(BlobPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.container = container - self.delimiter = delimiter - self.current_page = None - self.location_mode = location_mode - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - prefix=self.prefix, - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.container = self._response.container_name - self.current_page = [self._build_item(item) for item in self._response.segment.blob_items] - - return self._response.next_marker or None, self.current_page - - def _build_item(self, item): - if isinstance(item, BlobProperties): - return item - if isinstance(item, BlobItemInternal): - blob = get_blob_properties_from_generated_code(item) # pylint: disable=protected-access - blob.container = self.container - return blob - return item - - -class BlobPrefix(AsyncItemPaged, DictMixin): - """An Iterable of Blob properties. - - Returned from walk_blobs when a delimiter is used. - Can be thought of as a virtual blob directory. - - :ivar str name: The prefix, or "directory name" of the blob. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str marker: The continuation token of the current page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.models.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str marker: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__(self, *args, **kwargs): - super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs) - self.name = kwargs.get('prefix') - self.prefix = kwargs.get('prefix') - self.results_per_page = kwargs.get('results_per_page') - self.container = kwargs.get('container') - self.delimiter = kwargs.get('delimiter') - self.location_mode = kwargs.get('location_mode') - - -class BlobPrefixPaged(BlobPropertiesPaged): - def __init__(self, *args, **kwargs): - super(BlobPrefixPaged, self).__init__(*args, **kwargs) - self.name = self.prefix - - async def _extract_data_cb(self, get_next_return): - continuation_token, _ = await super(BlobPrefixPaged, self)._extract_data_cb(get_next_return) - self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items - self.current_page = [self._build_item(item) for item in self.current_page] - self.delimiter = self._response.delimiter - - return continuation_token, self.current_page - - def _build_item(self, item): - item = super(BlobPrefixPaged, self)._build_item(item) - if isinstance(item, GenBlobPrefix): - return BlobPrefix( - self._command, - container=self.container, - prefix=item.name, - results_per_page=self.results_per_page, - location_mode=self.location_mode) - return item diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/aio/_models.py b/azure/multiapi/storagev2/blob/v2020_10_02/aio/_models.py deleted file mode 100644 index 05edd78..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/aio/_models.py +++ /dev/null @@ -1,143 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines - -from azure.core.async_paging import AsyncPageIterator -from azure.core.exceptions import HttpResponseError -from .._deserialize import parse_tags - -from .._models import ContainerProperties, FilteredBlob -from .._shared.response_handlers import return_context_and_deserialized, process_storage_error - -from .._generated.models import FilterBlobItem - - -class ContainerPropertiesPaged(AsyncPageIterator): - """An Iterable of Container properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A container name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.models.ContainerProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only containers whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of container names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(ContainerPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [self._build_item(item) for item in self._response.container_items] - - return self._response.next_marker or None, self.current_page - - @staticmethod - def _build_item(item): - return ContainerProperties._from_generated(item) # pylint: disable=protected-access - - -class FilteredBlobPaged(AsyncPageIterator): - """An Iterable of Blob properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.BlobProperties) - :ivar str container: The container that the blobs are listed from. - - :param callable command: Function to retrieve the next page of items. - :param str container: The name of the container. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str continuation_token: An opaque continuation token. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__( - self, command, - container=None, - results_per_page=None, - continuation_token=None, - location_mode=None): - super(FilteredBlobPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.marker = continuation_token - self.results_per_page = results_per_page - self.container = container - self.current_page = None - self.location_mode = location_mode - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.marker = self._response.next_marker - self.current_page = [self._build_item(item) for item in self._response.blobs] - - return self._response.next_marker or None, self.current_page - - @staticmethod - def _build_item(item): - if isinstance(item, FilterBlobItem): - tags = parse_tags(item.tags) - blob = FilteredBlob(name=item.name, container_name=item.container_name, tags=tags) - return blob - return item diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/aio/_upload_helpers.py b/azure/multiapi/storagev2/blob/v2020_10_02/aio/_upload_helpers.py deleted file mode 100644 index 985e731..0000000 --- a/azure/multiapi/storagev2/blob/v2020_10_02/aio/_upload_helpers.py +++ /dev/null @@ -1,281 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from io import SEEK_SET, UnsupportedOperation -from typing import Optional, Union, Any, TypeVar, TYPE_CHECKING # pylint: disable=unused-import - -import six -from azure.core.exceptions import ResourceModifiedError, HttpResponseError - -from .._shared.response_handlers import ( - process_storage_error, - return_response_headers) -from .._shared.uploads_async import ( - upload_data_chunks, - upload_substream_blocks, - BlockBlobChunkUploader, - PageBlobChunkUploader, - AppendBlobChunkUploader) -from .._shared.encryption import generate_blob_encryption_data, encrypt_blob -from .._generated.models import ( - BlockLookupList, - AppendPositionAccessConditions, - ModifiedAccessConditions, -) -from .._upload_helpers import _convert_mod_error, _any_conditions - -if TYPE_CHECKING: - from datetime import datetime # pylint: disable=unused-import - BlobLeaseClient = TypeVar("BlobLeaseClient") - - -async def upload_block_blob( # pylint: disable=too-many-locals - client=None, - data=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if not overwrite and not _any_conditions(**kwargs): - kwargs['modified_access_conditions'].if_none_match = '*' - adjusted_count = length - if (encryption_options.get('key') is not None) and (adjusted_count is not None): - adjusted_count += (16 - (length % 16)) - blob_headers = kwargs.pop('blob_headers', None) - tier = kwargs.pop('standard_blob_tier', None) - blob_tags_string = kwargs.pop('blob_tags_string', None) - - immutability_policy = kwargs.pop('immutability_policy', None) - immutability_policy_expiry = None if immutability_policy is None else immutability_policy.expiry_time - immutability_policy_mode = None if immutability_policy is None else immutability_policy.policy_mode - legal_hold = kwargs.pop('legal_hold', None) - - # Do single put if the size is smaller than config.max_single_put_size - if adjusted_count is not None and (adjusted_count <= blob_settings.max_single_put_size): - try: - data = data.read(length) - if not isinstance(data, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - except AttributeError: - pass - if encryption_options.get('key'): - encryption_data, data = encrypt_blob(data, encryption_options['key']) - headers['x-ms-meta-encryptiondata'] = encryption_data - return await client.upload( - body=data, - content_length=adjusted_count, - blob_http_headers=blob_headers, - headers=headers, - cls=return_response_headers, - validate_content=validate_content, - data_stream_total=adjusted_count, - upload_stream_current=0, - tier=tier.value if tier else None, - blob_tags_string=blob_tags_string, - immutability_policy_expiry=immutability_policy_expiry, - immutability_policy_mode=immutability_policy_mode, - legal_hold=legal_hold, - **kwargs) - - use_original_upload_path = blob_settings.use_byte_buffer or \ - validate_content or encryption_options.get('required') or \ - blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \ - hasattr(stream, 'seekable') and not stream.seekable() or \ - not hasattr(stream, 'seek') or not hasattr(stream, 'tell') - - if use_original_upload_path: - if encryption_options.get('key'): - cek, iv, encryption_data = generate_blob_encryption_data(encryption_options['key']) - headers['x-ms-meta-encryptiondata'] = encryption_data - encryption_options['cek'] = cek - encryption_options['vector'] = iv - block_ids = await upload_data_chunks( - service=client, - uploader_class=BlockBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - encryption_options=encryption_options, - headers=headers, - **kwargs - ) - else: - block_ids = await upload_substream_blocks( - service=client, - uploader_class=BlockBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - headers=headers, - **kwargs - ) - - block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) - block_lookup.latest = block_ids - return await client.commit_block_list( - block_lookup, - blob_http_headers=blob_headers, - cls=return_response_headers, - validate_content=validate_content, - headers=headers, - tier=tier.value if tier else None, - blob_tags_string=blob_tags_string, - immutability_policy_expiry=immutability_policy_expiry, - immutability_policy_mode=immutability_policy_mode, - legal_hold=legal_hold, - **kwargs) - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceModifiedError as mod_error: - if not overwrite: - _convert_mod_error(mod_error) - raise - - -async def upload_page_blob( - client=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if not overwrite and not _any_conditions(**kwargs): - kwargs['modified_access_conditions'].if_none_match = '*' - if length is None or length < 0: - raise ValueError("A content length must be specified for a Page Blob.") - if length % 512 != 0: - raise ValueError("Invalid page blob size: {0}. " - "The size must be aligned to a 512-byte boundary.".format(length)) - if kwargs.get('premium_page_blob_tier'): - premium_page_blob_tier = kwargs.pop('premium_page_blob_tier') - try: - headers['x-ms-access-tier'] = premium_page_blob_tier.value - except AttributeError: - headers['x-ms-access-tier'] = premium_page_blob_tier - if encryption_options and encryption_options.get('data'): - headers['x-ms-meta-encryptiondata'] = encryption_options['data'] - blob_tags_string = kwargs.pop('blob_tags_string', None) - - response = await client.create( - content_length=0, - blob_content_length=length, - blob_sequence_number=None, - blob_http_headers=kwargs.pop('blob_headers', None), - blob_tags_string=blob_tags_string, - cls=return_response_headers, - headers=headers, - **kwargs) - if length == 0: - return response - - kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag']) - return await upload_data_chunks( - service=client, - uploader_class=PageBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_page_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - encryption_options=encryption_options, - headers=headers, - **kwargs) - - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceModifiedError as mod_error: - if not overwrite: - _convert_mod_error(mod_error) - raise - - -async def upload_append_blob( # pylint: disable=unused-argument - client=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if length == 0: - return {} - blob_headers = kwargs.pop('blob_headers', None) - append_conditions = AppendPositionAccessConditions( - max_size=kwargs.pop('maxsize_condition', None), - append_position=None) - blob_tags_string = kwargs.pop('blob_tags_string', None) - - try: - if overwrite: - await client.create( - content_length=0, - blob_http_headers=blob_headers, - headers=headers, - blob_tags_string=blob_tags_string, - **kwargs) - return await upload_data_chunks( - service=client, - uploader_class=AppendBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - append_position_access_conditions=append_conditions, - headers=headers, - **kwargs) - except HttpResponseError as error: - if error.response.status_code != 404: - raise - # rewind the request body if it is a stream - if hasattr(stream, 'read'): - try: - # attempt to rewind the body to the initial position - stream.seek(0, SEEK_SET) - except UnsupportedOperation: - # if body is not seekable, then retry would not work - raise error - await client.create( - content_length=0, - blob_http_headers=blob_headers, - headers=headers, - blob_tags_string=blob_tags_string, - **kwargs) - return await upload_data_chunks( - service=client, - uploader_class=AppendBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - append_position_access_conditions=append_conditions, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/py.typed b/azure/multiapi/storagev2/blob/v2020_10_02/py.typed deleted file mode 100644 index e69de29..0000000 diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/__init__.py b/azure/multiapi/storagev2/blob/v2021_04_10/__init__.py deleted file mode 100644 index 58442ed..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/__init__.py +++ /dev/null @@ -1,239 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import os - -from typing import Union, Iterable, AnyStr, IO, Any, Dict # pylint: disable=unused-import -from ._version import VERSION -from ._blob_client import BlobClient -from ._container_client import ContainerClient -from ._blob_service_client import BlobServiceClient -from ._lease import BlobLeaseClient -from ._download import StorageStreamDownloader -from ._quick_query_helper import BlobQueryReader -from ._shared_access_signature import generate_account_sas, generate_container_sas, generate_blob_sas -from ._shared.policies import ExponentialRetry, LinearRetry -from ._shared.response_handlers import PartialBatchErrorException -from ._shared.models import( - LocationMode, - ResourceTypes, - AccountSasPermissions, - StorageErrorCode, - UserDelegationKey -) -from ._generated.models import ( - RehydratePriority, -) -from ._models import ( - BlobType, - BlockState, - StandardBlobTier, - PremiumPageBlobTier, - BlobImmutabilityPolicyMode, - SequenceNumberAction, - PublicAccess, - BlobAnalyticsLogging, - Metrics, - RetentionPolicy, - StaticWebsite, - CorsRule, - ContainerProperties, - BlobProperties, - FilteredBlob, - LeaseProperties, - ContentSettings, - CopyProperties, - BlobBlock, - PageRange, - AccessPolicy, - ContainerSasPermissions, - BlobSasPermissions, - CustomerProvidedEncryptionKey, - ContainerEncryptionScope, - BlobQueryError, - DelimitedJsonDialect, - DelimitedTextDialect, - QuickQueryDialect, - ArrowDialect, - ArrowType, - ObjectReplicationPolicy, - ObjectReplicationRule, - ImmutabilityPolicy -) -from ._list_blobs_helper import BlobPrefix - -__version__ = VERSION - - -def upload_blob_to_url( - blob_url, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - credential=None, # type: Any - **kwargs): - # type: (...) -> Dict[str, Any] - """Upload data to a given URL - - The data will be uploaded as a block blob. - - :param str blob_url: - The full URI to the blob. This can also include a SAS token. - :param data: - The data to upload. This can be bytes, text, an iterable or a file-like object. - :type data: bytes or str or Iterable - :param credential: - The credentials with which to authenticate. This is optional if the - blob URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword bool overwrite: - Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob_to_url will overwrite any existing data. If set to False, the - operation will fail with a ResourceExistsError. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword dict(str,str) metadata: - Name-value pairs associated with the blob as metadata. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword str encoding: - Encoding to use if text is supplied as input. Defaults to UTF-8. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: dict(str, Any) - """ - with BlobClient.from_blob_url(blob_url, credential=credential) as client: - return client.upload_blob(data=data, blob_type=BlobType.BlockBlob, **kwargs) - - -def _download_to_stream(client, handle, **kwargs): - """Download data to specified open file-handle.""" - stream = client.download_blob(**kwargs) - stream.readinto(handle) - - -def download_blob_from_url( - blob_url, # type: str - output, # type: str - credential=None, # type: Any - **kwargs): - # type: (...) -> None - """Download the contents of a blob to a local file or stream. - - :param str blob_url: - The full URI to the blob. This can also include a SAS token. - :param output: - Where the data should be downloaded to. This could be either a file path to write to, - or an open IO handle to write to. - :type output: str or writable stream. - :param credential: - The credentials with which to authenticate. This is optional if the - blob URL already has a SAS token or the blob is public. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, - an account shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword bool overwrite: - Whether the local file should be overwritten if it already exists. The default value is - `False` - in which case a ValueError will be raised if the file already exists. If set to - `True`, an attempt will be made to write to the existing file. If a stream handle is passed - in, this value is ignored. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :keyword int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :rtype: None - """ - overwrite = kwargs.pop('overwrite', False) - with BlobClient.from_blob_url(blob_url, credential=credential) as client: - if hasattr(output, 'write'): - _download_to_stream(client, output, **kwargs) - else: - if not overwrite and os.path.isfile(output): - raise ValueError("The file '{}' already exists.".format(output)) - with open(output, 'wb') as file_handle: - _download_to_stream(client, file_handle, **kwargs) - - -__all__ = [ - 'upload_blob_to_url', - 'download_blob_from_url', - 'BlobServiceClient', - 'ContainerClient', - 'BlobClient', - 'BlobType', - 'BlobLeaseClient', - 'StorageErrorCode', - 'UserDelegationKey', - 'ExponentialRetry', - 'LinearRetry', - 'LocationMode', - 'BlockState', - 'StandardBlobTier', - 'PremiumPageBlobTier', - 'SequenceNumberAction', - 'BlobImmutabilityPolicyMode', - 'ImmutabilityPolicy', - 'PublicAccess', - 'BlobAnalyticsLogging', - 'Metrics', - 'RetentionPolicy', - 'StaticWebsite', - 'CorsRule', - 'ContainerProperties', - 'BlobProperties', - 'BlobPrefix', - 'FilteredBlob', - 'LeaseProperties', - 'ContentSettings', - 'CopyProperties', - 'BlobBlock', - 'PageRange', - 'AccessPolicy', - 'QuickQueryDialect', - 'ContainerSasPermissions', - 'BlobSasPermissions', - 'ResourceTypes', - 'AccountSasPermissions', - 'StorageStreamDownloader', - 'CustomerProvidedEncryptionKey', - 'RehydratePriority', - 'generate_account_sas', - 'generate_container_sas', - 'generate_blob_sas', - 'PartialBatchErrorException', - 'ContainerEncryptionScope', - 'BlobQueryError', - 'DelimitedJsonDialect', - 'DelimitedTextDialect', - 'ArrowDialect', - 'ArrowType', - 'BlobQueryReader', - 'ObjectReplicationPolicy', - 'ObjectReplicationRule' -] diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_blob_client.py b/azure/multiapi/storagev2/blob/v2021_04_10/_blob_client.py deleted file mode 100644 index 65f901f..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_blob_client.py +++ /dev/null @@ -1,4003 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines,no-self-use -from functools import partial -from io import BytesIO -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, - TYPE_CHECKING, - TypeVar, Type) - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore -import six -from azure.core.pipeline import Pipeline -from azure.core.tracing.decorator import distributed_trace -from azure.core.exceptions import ResourceNotFoundError, HttpResponseError, ResourceExistsError - -from ._shared import encode_base64 -from ._shared.base_client import StorageAccountHostsMixin, parse_connection_str, parse_query, TransportWrapper -from ._shared.encryption import generate_blob_encryption_data -from ._shared.uploads import IterStreamer -from ._shared.request_handlers import ( - add_metadata_headers, get_length, read_length, - validate_and_format_range_headers) -from ._shared.response_handlers import return_response_headers, process_storage_error, return_headers_and_deserialized -from ._generated import AzureBlobStorage -from ._generated.models import ( # pylint: disable=unused-import - DeleteSnapshotsOptionType, - BlobHTTPHeaders, - BlockLookupList, - AppendPositionAccessConditions, - SequenceNumberAccessConditions, - QueryRequest, - CpkInfo) -from ._serialize import ( - get_modify_conditions, - get_source_conditions, - get_cpk_scope_info, - get_api_version, - serialize_blob_tags_header, - serialize_blob_tags, - serialize_query_format, get_access_conditions -) -from ._deserialize import get_page_ranges_result, deserialize_blob_properties, deserialize_blob_stream, parse_tags, \ - deserialize_pipeline_response_into_cls -from ._quick_query_helper import BlobQueryReader -from ._upload_helpers import ( - upload_block_blob, - upload_append_blob, - upload_page_blob, _any_conditions) -from ._models import BlobType, BlobBlock, BlobProperties, BlobQueryError, QuickQueryDialect, \ - DelimitedJsonDialect, DelimitedTextDialect -from ._download import StorageStreamDownloader -from ._lease import BlobLeaseClient - -if TYPE_CHECKING: - from datetime import datetime - from ._generated.models import BlockList - from ._models import ( # pylint: disable=unused-import - ContentSettings, - ImmutabilityPolicy, - PremiumPageBlobTier, - StandardBlobTier, - SequenceNumberAction - ) - -_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = ( - 'The require_encryption flag is set, but encryption is not supported' - ' for this method.') - -ClassType = TypeVar("ClassType") - - -class BlobClient(StorageAccountHostsMixin): # pylint: disable=too-many-public-methods - """A client to interact with a specific blob, although that blob may not yet exist. - - For more optional configuration, please click - `here `_. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the blob, - use the :func:`from_blob_url` classmethod. - :param container_name: The container name for the blob. - :type container_name: str - :param blob_name: The name of the blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob_name: str - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is the most recent service version that is - compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_blob_client] - :end-before: [END create_blob_client] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a URL to a public blob (no auth needed). - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_blob_client_sas_url] - :end-before: [END create_blob_client_sas_url] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a SAS URL to a blob. - """ - def __init__( - self, account_url, # type: str - container_name, # type: str - blob_name, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - - if not (container_name and blob_name): - raise ValueError("Please specify a container name and blob name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - path_snapshot, sas_token = parse_query(parsed_url.query) - - self.container_name = container_name - self.blob_name = blob_name - try: - self.snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - self.snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - self.snapshot = snapshot or path_snapshot - - # This parameter is used for the hierarchy traversal. Give precedence to credential. - self._raw_credential = credential if credential else sas_token - self._query_str, credential = self._format_query_string(sas_token, credential, snapshot=self.snapshot) - super(BlobClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) - self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - - def _format_url(self, hostname): - container_name = self.container_name - if isinstance(container_name, six.text_type): - container_name = container_name.encode('UTF-8') - return "{}://{}/{}/{}{}".format( - self.scheme, - hostname, - quote(container_name), - quote(self.blob_name, safe='~/'), - self._query_str) - - def _encode_source_url(self, source_url): - parsed_source_url = urlparse(source_url) - source_scheme = parsed_source_url.scheme - source_hostname = parsed_source_url.netloc.rstrip('/') - source_path = unquote(parsed_source_url.path) - source_query = parsed_source_url.query - result = ["{}://{}{}".format(source_scheme, source_hostname, quote(source_path, safe='~/'))] - if source_query: - result.append(source_query) - return '?'.join(result) - - @classmethod - def from_blob_url(cls, blob_url, credential=None, snapshot=None, **kwargs): - # type: (Type[ClassType], str, Optional[Any], Optional[Union[str, Dict[str, Any]]], Any) -> ClassType - """Create BlobClient from a blob url. This doesn't support customized blob url with '/' in blob name. - - :param str blob_url: - The full endpoint URL to the Blob, including SAS token and snapshot if used. This could be - either the primary endpoint, or the secondary endpoint depending on the current `location_mode`. - :type blob_url: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. If specified, this will override - the snapshot in the url. - :returns: A Blob client. - :rtype: ~azure.storage.blob.BlobClient - """ - try: - if not blob_url.lower().startswith('http'): - blob_url = "https://" + blob_url - except AttributeError: - raise ValueError("Blob URL must be a string.") - parsed_url = urlparse(blob_url.rstrip('/')) - - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(blob_url)) - - account_path = "" - if ".core." in parsed_url.netloc: - # .core. is indicating non-customized url. Blob name with directory info can also be parsed. - path_blob = parsed_url.path.lstrip('/').split('/', 1) - elif "localhost" in parsed_url.netloc or "127.0.0.1" in parsed_url.netloc: - path_blob = parsed_url.path.lstrip('/').split('/', 2) - account_path += '/' + path_blob[0] - else: - # for customized url. blob name that has directory info cannot be parsed. - path_blob = parsed_url.path.lstrip('/').split('/') - if len(path_blob) > 2: - account_path = "/" + "/".join(path_blob[:-2]) - account_url = "{}://{}{}?{}".format( - parsed_url.scheme, - parsed_url.netloc.rstrip('/'), - account_path, - parsed_url.query) - container_name, blob_name = unquote(path_blob[-2]), unquote(path_blob[-1]) - if not container_name or not blob_name: - raise ValueError("Invalid URL. Provide a blob_url with a valid blob and container name.") - - path_snapshot, _ = parse_query(parsed_url.query) - if snapshot: - try: - path_snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - path_snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - path_snapshot = snapshot - - return cls( - account_url, container_name=container_name, blob_name=blob_name, - snapshot=path_snapshot, credential=credential, **kwargs - ) - - @classmethod - def from_connection_string( - cls, # type: Type[ClassType] - conn_str, # type: str - container_name, # type: str - blob_name, # type: str - snapshot=None, # type: Optional[str] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> ClassType - """Create BlobClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param container_name: The container name for the blob. - :type container_name: str - :param blob_name: The name of the blob with which to interact. - :type blob_name: str - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :returns: A Blob client. - :rtype: ~azure.storage.blob.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START auth_from_connection_string_blob] - :end-before: [END auth_from_connection_string_blob] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, container_name=container_name, blob_name=blob_name, - snapshot=snapshot, credential=credential, **kwargs - ) - - @distributed_trace - def get_account_information(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """Gets information related to the storage account in which the blob resides. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - """ - try: - return self._client.blob.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _upload_blob_options( # pylint:disable=too-many-statements - self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption and not self.key_encryption_key: - raise ValueError("Encryption required but no key was provided.") - encryption_options = { - 'required': self.require_encryption, - 'key': self.key_encryption_key, - 'resolver': self.key_resolver_function, - } - if self.key_encryption_key is not None: - cek, iv, encryption_data = generate_blob_encryption_data(self.key_encryption_key) - encryption_options['cek'] = cek - encryption_options['vector'] = iv - encryption_options['data'] = encryption_data - - encoding = kwargs.pop('encoding', 'UTF-8') - if isinstance(data, six.text_type): - data = data.encode(encoding) # type: ignore - if length is None: - length = get_length(data) - if isinstance(data, bytes): - data = data[:length] - - if isinstance(data, bytes): - stream = BytesIO(data) - elif hasattr(data, 'read'): - stream = data - elif hasattr(data, '__iter__'): - stream = IterStreamer(data, encoding=encoding) - else: - raise TypeError("Unsupported data type: {}".format(type(data))) - - validate_content = kwargs.pop('validate_content', False) - content_settings = kwargs.pop('content_settings', None) - overwrite = kwargs.pop('overwrite', False) - max_concurrency = kwargs.pop('max_concurrency', 1) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - kwargs['cpk_info'] = cpk_info - - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None)) - kwargs['modified_access_conditions'] = get_modify_conditions(kwargs) - kwargs['cpk_scope_info'] = get_cpk_scope_info(kwargs) - if content_settings: - kwargs['blob_headers'] = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=content_settings.content_md5, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - kwargs['blob_tags_string'] = serialize_blob_tags_header(kwargs.pop('tags', None)) - kwargs['stream'] = stream - kwargs['length'] = length - kwargs['overwrite'] = overwrite - kwargs['headers'] = headers - kwargs['validate_content'] = validate_content - kwargs['blob_settings'] = self._config - kwargs['max_concurrency'] = max_concurrency - kwargs['encryption_options'] = encryption_options - - if blob_type == BlobType.BlockBlob: - kwargs['client'] = self._client.block_blob - kwargs['data'] = data - elif blob_type == BlobType.PageBlob: - kwargs['client'] = self._client.page_blob - elif blob_type == BlobType.AppendBlob: - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - kwargs['client'] = self._client.append_blob - else: - raise ValueError("Unsupported BlobType: {}".format(blob_type)) - return kwargs - - def _upload_blob_from_url_options(self, source_url, **kwargs): - # type: (...) -> Dict[str, Any] - tier = kwargs.pop('standard_blob_tier', None) - overwrite = kwargs.pop('overwrite', False) - content_settings = kwargs.pop('content_settings', None) - source_authorization = kwargs.pop('source_authorization', None) - if content_settings: - kwargs['blob_http_headers'] = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=None, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'copy_source_authorization': source_authorization, - 'content_length': 0, - 'copy_source_blob_properties': kwargs.pop('include_source_blob_properties', True), - 'source_content_md5': kwargs.pop('source_content_md5', None), - 'copy_source': source_url, - 'modified_access_conditions': get_modify_conditions(kwargs), - 'blob_tags_string': serialize_blob_tags_header(kwargs.pop('tags', None)), - 'cls': return_response_headers, - 'lease_access_conditions': get_access_conditions(kwargs.pop('destination_lease', None)), - 'tier': tier.value if tier else None, - 'source_modified_access_conditions': get_source_conditions(kwargs), - 'cpk_info': cpk_info, - 'cpk_scope_info': get_cpk_scope_info(kwargs) - } - options.update(kwargs) - if not overwrite and not _any_conditions(**options): # pylint: disable=protected-access - options['modified_access_conditions'].if_none_match = '*' - return options - - @distributed_trace - def upload_blob_from_url(self, source_url, **kwargs): - # type: (str, Any) -> Dict[str, Any] - """ - Creates a new Block Blob where the content of the blob is read from a given URL. - The content of an existing blob is overwritten with the new blob. - - :param str source_url: - A URL of up to 2 KB in length that specifies a file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.blob.core.windows.net/mycontainer/myblob - - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - - https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. - :keyword bool include_source_blob_properties: - Indicates if properties from the source blob should be copied. Defaults to True. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - :paramtype tags: dict(str, str) - :keyword bytearray source_content_md5: - Specify the md5 that is used to verify the integrity of the source bytes. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword destination_lease: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword str source_authorization: - Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is - the prefix of the source_authorization string. - """ - options = self._upload_blob_from_url_options( - source_url=self._encode_source_url(source_url), - **kwargs) - try: - return self._client.block_blob.put_blob_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def upload_blob( # pylint: disable=too-many-locals - self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Any - """Creates a new blob from a data source with automatic chunking. - - :param data: The blob data to upload. - :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be - either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. The exception to the above is with Append - blob types: if set to False and the data already exists, an error will not be raised - and the data will be appended to the existing blob. If set overwrite=True, then the existing - append blob will be deleted, and a new one created. Defaults to False. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, upload_blob only succeeds if the - blob's lease is active and matches this ID. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: - Specifies the immutability policy of a blob, blob snapshot or blob version. - Currently this parameter of upload_blob() API is for BlockBlob only. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword bool legal_hold: - Specified if a legal hold should be set on the blob. - Currently this parameter of upload_blob() API is for BlockBlob only. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int max_concurrency: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world.py - :start-after: [START upload_a_blob] - :end-before: [END upload_a_blob] - :language: python - :dedent: 12 - :caption: Upload a blob to the container. - """ - options = self._upload_blob_options( - data, - blob_type=blob_type, - length=length, - metadata=metadata, - **kwargs) - if blob_type == BlobType.BlockBlob: - return upload_block_blob(**options) - if blob_type == BlobType.PageBlob: - return upload_page_blob(**options) - return upload_append_blob(**options) - - def _download_blob_options(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], **Any) -> Dict[str, Any] - if self.require_encryption and not self.key_encryption_key: - raise ValueError("Encryption required but no key was provided.") - if length is not None and offset is None: - raise ValueError("Offset value must not be None if length is set.") - if length is not None: - length = offset + length - 1 # Service actually uses an end-range inclusive index - - validate_content = kwargs.pop('validate_content', False) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'clients': self._client, - 'config': self._config, - 'start_range': offset, - 'end_range': length, - 'version_id': kwargs.pop('version_id', None), - 'validate_content': validate_content, - 'encryption_options': { - 'required': self.require_encryption, - 'key': self.key_encryption_key, - 'resolver': self.key_resolver_function}, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'cls': kwargs.pop('cls', None) or deserialize_blob_stream, - 'max_concurrency':kwargs.pop('max_concurrency', 1), - 'encoding': kwargs.pop('encoding', None), - 'timeout': kwargs.pop('timeout', None), - 'name': self.blob_name, - 'container': self.container_name} - options.update(kwargs) - return options - - @distributed_trace - def download_blob(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], **Any) -> StorageStreamDownloader - """Downloads a blob to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the blob into - a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks. - - :param int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to download. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, download_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword str encoding: - Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.blob.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world.py - :start-after: [START download_a_blob] - :end-before: [END download_a_blob] - :language: python - :dedent: 12 - :caption: Download a blob. - """ - options = self._download_blob_options( - offset=offset, - length=length, - **kwargs) - return StorageStreamDownloader(**options) - - def _quick_query_options(self, query_expression, - **kwargs): - # type: (str, **Any) -> Dict[str, Any] - delimiter = '\n' - input_format = kwargs.pop('blob_format', None) - if input_format == QuickQueryDialect.DelimitedJson: - input_format = DelimitedJsonDialect() - if input_format == QuickQueryDialect.DelimitedText: - input_format = DelimitedTextDialect() - input_parquet_format = input_format == "ParquetDialect" - if input_format and not input_parquet_format: - try: - delimiter = input_format.lineterminator - except AttributeError: - try: - delimiter = input_format.delimiter - except AttributeError: - raise ValueError("The Type of blob_format can only be DelimitedTextDialect or " - "DelimitedJsonDialect or ParquetDialect") - output_format = kwargs.pop('output_format', None) - if output_format == QuickQueryDialect.DelimitedJson: - output_format = DelimitedJsonDialect() - if output_format == QuickQueryDialect.DelimitedText: - output_format = DelimitedTextDialect() - if output_format: - if output_format == "ParquetDialect": - raise ValueError("ParquetDialect is invalid as an output format.") - try: - delimiter = output_format.lineterminator - except AttributeError: - try: - delimiter = output_format.delimiter - except AttributeError: - pass - else: - output_format = input_format if not input_parquet_format else None - query_request = QueryRequest( - expression=query_expression, - input_serialization=serialize_query_format(input_format), - output_serialization=serialize_query_format(output_format) - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo( - encryption_key=cpk.key_value, - encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm - ) - options = { - 'query_request': query_request, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'snapshot': self.snapshot, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_headers_and_deserialized, - } - options.update(kwargs) - return options, delimiter - - @distributed_trace - def query_blob(self, query_expression, **kwargs): - # type: (str, **Any) -> BlobQueryReader - """Enables users to select/project on blob/or blob snapshot data by providing simple query expressions. - This operations returns a BlobQueryReader, users need to use readall() or readinto() to get query data. - - :param str query_expression: - Required. a query statement. - :keyword Callable[~azure.storage.blob.BlobQueryError] on_error: - A function to be called on any processing errors returned by the service. - :keyword blob_format: - Optional. Defines the serialization of the data currently stored in the blob. The default is to - treat the blob data as CSV data formatted in the default dialect. This can be overridden with - a custom DelimitedTextDialect, or DelimitedJsonDialect or "ParquetDialect" (passed as a string or enum). - These dialects can be passed through their respective classes, the QuickQueryDialect enum or as a string - :paramtype blob_format: ~azure.storage.blob.DelimitedTextDialect or ~azure.storage.blob.DelimitedJsonDialect - or ~azure.storage.blob.QuickQueryDialect or str - :keyword output_format: - Optional. Defines the output serialization for the data stream. By default the data will be returned - as it is represented in the blob (Parquet formats default to DelimitedTextDialect). - By providing an output format, the blob data will be reformatted according to that profile. - This value can be a DelimitedTextDialect or a DelimitedJsonDialect or ArrowDialect. - These dialects can be passed through their respective classes, the QuickQueryDialect enum or as a string - :paramtype output_format: ~azure.storage.blob.DelimitedTextDialect or ~azure.storage.blob.DelimitedJsonDialect - or list[~azure.storage.blob.ArrowDialect] or ~azure.storage.blob.QuickQueryDialect or str - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A streaming object (BlobQueryReader) - :rtype: ~azure.storage.blob.BlobQueryReader - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_query.py - :start-after: [START query] - :end-before: [END query] - :language: python - :dedent: 4 - :caption: select/project on blob/or blob snapshot data by providing simple query expressions. - """ - errors = kwargs.pop("on_error", None) - error_cls = kwargs.pop("error_cls", BlobQueryError) - encoding = kwargs.pop("encoding", None) - options, delimiter = self._quick_query_options(query_expression, **kwargs) - try: - headers, raw_response_body = self._client.blob.query(**options) - except HttpResponseError as error: - process_storage_error(error) - return BlobQueryReader( - name=self.blob_name, - container=self.container_name, - errors=errors, - record_delimiter=delimiter, - encoding=encoding, - headers=headers, - response=raw_response_body, - error_cls=error_cls) - - @staticmethod - def _generic_delete_blob_options(delete_snapshots=None, **kwargs): - # type: (str, **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if delete_snapshots: - delete_snapshots = DeleteSnapshotsOptionType(delete_snapshots) - options = { - 'timeout': kwargs.pop('timeout', None), - 'snapshot': kwargs.pop('snapshot', None), # this is added for delete_blobs - 'delete_snapshots': delete_snapshots or None, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions} - options.update(kwargs) - return options - - def _delete_blob_options(self, delete_snapshots=None, **kwargs): - # type: (str, **Any) -> Dict[str, Any] - if self.snapshot and delete_snapshots: - raise ValueError("The delete_snapshots option cannot be used with a specific snapshot.") - options = self._generic_delete_blob_options(delete_snapshots, **kwargs) - options['snapshot'] = self.snapshot - options['version_id'] = kwargs.pop('version_id', None) - options['blob_delete_type'] = kwargs.pop('blob_delete_type', None) - return options - - @distributed_trace - def delete_blob(self, delete_snapshots=None, **kwargs): - # type: (str, **Any) -> None - """Marks the specified blob for deletion. - - The blob is later deleted during garbage collection. - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the delete_blob() - operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob - and retains the blob for a specified number of days. - After the specified number of days, the blob's data is removed from the service during garbage collection. - Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']` - option. Soft-deleted blob can be restored using :func:`undelete` operation. - - :param str delete_snapshots: - Required if the blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to delete. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword lease: - Required if the blob has an active lease. If specified, delete_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world.py - :start-after: [START delete_blob] - :end-before: [END delete_blob] - :language: python - :dedent: 12 - :caption: Delete a blob. - """ - options = self._delete_blob_options(delete_snapshots=delete_snapshots, **kwargs) - try: - self._client.blob.delete(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def undelete_blob(self, **kwargs): - # type: (**Any) -> None - """Restores soft-deleted blobs or snapshots. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START undelete_blob] - :end-before: [END undelete_blob] - :language: python - :dedent: 8 - :caption: Undeleting a blob. - """ - try: - self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace() - def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a blob exists with the defined parameters, and returns - False otherwise. - - :kwarg str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to check if it exists. - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - try: - self._client.blob.get_properties( - snapshot=self.snapshot, - **kwargs) - return True - # Encrypted with CPK - except ResourceExistsError: - return True - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceNotFoundError: - return False - - @distributed_trace - def get_blob_properties(self, **kwargs): - # type: (**Any) -> BlobProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the blob. It does not return the content of the blob. - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to get properties. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: BlobProperties - :rtype: ~azure.storage.blob.BlobProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START get_blob_properties] - :end-before: [END get_blob_properties] - :language: python - :dedent: 8 - :caption: Getting the properties for a blob. - """ - # TODO: extract this out as _get_blob_properties_options - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - try: - cls_method = kwargs.pop('cls', None) - if cls_method: - kwargs['cls'] = partial(deserialize_pipeline_response_into_cls, cls_method) - blob_props = self._client.blob.get_properties( - timeout=kwargs.pop('timeout', None), - version_id=kwargs.pop('version_id', None), - snapshot=self.snapshot, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=kwargs.pop('cls', None) or deserialize_blob_properties, - cpk_info=cpk_info, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - blob_props.name = self.blob_name - if isinstance(blob_props, BlobProperties): - blob_props.container = self.container_name - blob_props.snapshot = self.snapshot - return blob_props # type: ignore - - def _set_http_headers_options(self, content_settings=None, **kwargs): - # type: (Optional[ContentSettings], **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - blob_headers = None - if content_settings: - blob_headers = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=content_settings.content_md5, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - options = { - 'timeout': kwargs.pop('timeout', None), - 'blob_http_headers': blob_headers, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def set_http_headers(self, content_settings=None, **kwargs): - # type: (Optional[ContentSettings], **Any) -> None - """Sets system properties on the blob. - - If one property is set for the content_settings, all properties will be overridden. - - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - options = self._set_http_headers_options(content_settings=content_settings, **kwargs) - try: - return self._client.blob.set_http_headers(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _set_blob_metadata_options(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - 'headers': headers} - options.update(kwargs) - return options - - @distributed_trace - def set_blob_metadata(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] - """Sets user-defined metadata for the blob as one or more name-value pairs. - - :param metadata: - Dict containing name and value pairs. Each call to this operation - replaces all existing metadata attached to the blob. To remove all - metadata from the blob, call this operation with no metadata headers. - :type metadata: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - """ - options = self._set_blob_metadata_options(metadata=metadata, **kwargs) - try: - return self._client.blob.set_metadata(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def set_immutability_policy(self, immutability_policy, **kwargs): - # type: (ImmutabilityPolicy, **Any) -> Dict[str, str] - """The Set Immutability Policy operation sets the immutability policy on the blob. - - .. versionadded:: 12.10.0 - This operation was introduced in API version '2020-10-02'. - - :param ~azure.storage.blob.ImmutabilityPolicy immutability_policy: - Specifies the immutability policy of a blob, blob snapshot or blob version. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Key value pairs of blob tags. - :rtype: Dict[str, str] - """ - - kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time - kwargs['immutability_policy_mode'] = immutability_policy.policy_mode - return self._client.blob.set_immutability_policy(cls=return_response_headers, **kwargs) - - @distributed_trace - def delete_immutability_policy(self, **kwargs): - # type: (**Any) -> None - """The Delete Immutability Policy operation deletes the immutability policy on the blob. - - .. versionadded:: 12.10.0 - This operation was introduced in API version '2020-10-02'. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Key value pairs of blob tags. - :rtype: Dict[str, str] - """ - - self._client.blob.delete_immutability_policy(**kwargs) - - @distributed_trace - def set_legal_hold(self, legal_hold, **kwargs): - # type: (bool, **Any) -> Dict[str, Union[str, datetime, bool]] - """The Set Legal Hold operation sets a legal hold on the blob. - - .. versionadded:: 12.10.0 - This operation was introduced in API version '2020-10-02'. - - :param bool legal_hold: - Specified if a legal hold should be set on the blob. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Key value pairs of blob tags. - :rtype: Dict[str, Union[str, datetime, bool]] - """ - - return self._client.blob.set_legal_hold(legal_hold, cls=return_response_headers, **kwargs) - - def _create_page_blob_options( # type: ignore - self, size, # type: int - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - blob_headers = None - if content_settings: - blob_headers = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=content_settings.content_md5, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - - sequence_number = kwargs.pop('sequence_number', None) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - immutability_policy = kwargs.pop('immutability_policy', None) - if immutability_policy: - kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time - kwargs['immutability_policy_mode'] = immutability_policy.policy_mode - - if premium_page_blob_tier: - try: - headers['x-ms-access-tier'] = premium_page_blob_tier.value # type: ignore - except AttributeError: - headers['x-ms-access-tier'] = premium_page_blob_tier # type: ignore - - blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) - - options = { - 'content_length': 0, - 'blob_content_length': size, - 'blob_sequence_number': sequence_number, - 'blob_http_headers': blob_headers, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'blob_tags_string': blob_tags_string, - 'cls': return_response_headers, - 'headers': headers} - options.update(kwargs) - return options - - @distributed_trace - def create_page_blob( # type: ignore - self, size, # type: int - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Creates a new Page Blob of the specified size. - - :param int size: - This specifies the maximum size for the page blob, up to 1 TB. - The page blob size must be aligned to a 512-byte boundary. - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword int sequence_number: - Only for Page blobs. The sequence number is a user-controlled value that you can use to - track requests. The value of the sequence number must be between 0 - and 2^63 - 1.The default value is 0. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: - Specifies the immutability policy of a blob, blob snapshot or blob version. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword bool legal_hold: - Specified if a legal hold should be set on the blob. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict[str, Any] - """ - options = self._create_page_blob_options( - size, - content_settings=content_settings, - metadata=metadata, - premium_page_blob_tier=premium_page_blob_tier, - **kwargs) - try: - return self._client.page_blob.create(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _create_append_blob_options(self, content_settings=None, metadata=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - blob_headers = None - if content_settings: - blob_headers = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=content_settings.content_md5, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - immutability_policy = kwargs.pop('immutability_policy', None) - if immutability_policy: - kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time - kwargs['immutability_policy_mode'] = immutability_policy.policy_mode - - blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) - - options = { - 'content_length': 0, - 'blob_http_headers': blob_headers, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'blob_tags_string': blob_tags_string, - 'cls': return_response_headers, - 'headers': headers} - options.update(kwargs) - return options - - @distributed_trace - def create_append_blob(self, content_settings=None, metadata=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] - """Creates a new Append Blob. - - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: - Specifies the immutability policy of a blob, blob snapshot or blob version. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword bool legal_hold: - Specified if a legal hold should be set on the blob. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict[str, Any] - """ - options = self._create_append_blob_options( - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return self._client.append_blob.create(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _create_snapshot_options(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - 'headers': headers} - options.update(kwargs) - return options - - @distributed_trace - def create_snapshot(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] - """Creates a snapshot of the blob. - - A snapshot is a read-only version of a blob that's taken at a point in time. - It can be read, copied, or deleted, but not modified. Snapshots provide a way - to back up a blob as it appears at a moment in time. - - A snapshot of a blob has the same name as the base blob from which the snapshot - is taken, with a DateTime value appended to indicate the time at which the - snapshot was taken. - - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - - .. versionadded:: 12.4.0 - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified). - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START create_blob_snapshot] - :end-before: [END create_blob_snapshot] - :language: python - :dedent: 8 - :caption: Create a snapshot of the blob. - """ - options = self._create_snapshot_options(metadata=metadata, **kwargs) - try: - return self._client.blob.create_snapshot(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _start_copy_from_url_options(self, source_url, metadata=None, incremental_copy=False, **kwargs): - # type: (str, Optional[Dict[str, str]], bool, **Any) -> Dict[str, Any] - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - if 'source_lease' in kwargs: - source_lease = kwargs.pop('source_lease') - try: - headers['x-ms-source-lease-id'] = source_lease.id # type: str - except AttributeError: - headers['x-ms-source-lease-id'] = source_lease - - tier = kwargs.pop('premium_page_blob_tier', None) or kwargs.pop('standard_blob_tier', None) - requires_sync = kwargs.pop('requires_sync', None) - encryption_scope_str = kwargs.pop('encryption_scope', None) - source_authorization = kwargs.pop('source_authorization', None) - - if not requires_sync and encryption_scope_str: - raise ValueError("Encryption_scope is only supported for sync copy, please specify requires_sync=True") - if source_authorization and incremental_copy: - raise ValueError("Source authorization tokens are not applicable for incremental copying.") - # - # TODO: refactor start_copy_from_url api in _blob_client.py. Call _generated/_blob_operations.py copy_from_url - # when requires_sync=True is set. - # Currently both sync copy and async copy are calling _generated/_blob_operations.py start_copy_from_url. - # As sync copy diverges more from async copy, more problem will surface. - if encryption_scope_str: - headers.update({'x-ms-encryption-scope': encryption_scope_str}) - - if requires_sync is True: - headers['x-ms-requires-sync'] = str(requires_sync) - if source_authorization: - headers['x-ms-copy-source-authorization'] = source_authorization - else: - if source_authorization: - raise ValueError("Source authorization tokens are only applicable for synchronous copy operations.") - timeout = kwargs.pop('timeout', None) - dest_mod_conditions = get_modify_conditions(kwargs) - blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) - - immutability_policy = kwargs.pop('immutability_policy', None) - if immutability_policy: - kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time - kwargs['immutability_policy_mode'] = immutability_policy.policy_mode - - options = { - 'copy_source': source_url, - 'seal_blob': kwargs.pop('seal_destination_blob', None), - 'timeout': timeout, - 'modified_access_conditions': dest_mod_conditions, - 'blob_tags_string': blob_tags_string, - 'headers': headers, - 'cls': return_response_headers, - } - if not incremental_copy: - source_mod_conditions = get_source_conditions(kwargs) - dest_access_conditions = get_access_conditions(kwargs.pop('destination_lease', None)) - options['source_modified_access_conditions'] = source_mod_conditions - options['lease_access_conditions'] = dest_access_conditions - options['tier'] = tier.value if tier else None - options.update(kwargs) - return options - - @distributed_trace - def start_copy_from_url(self, source_url, metadata=None, incremental_copy=False, **kwargs): - # type: (str, Optional[Dict[str, str]], bool, **Any) -> Dict[str, Union[str, datetime]] - """Copies a blob asynchronously. - - This operation returns a copy operation - object that can be used to wait on the completion of the operation, - as well as check status or abort the copy operation. - The Blob service copies blobs on a best-effort basis. - - The source blob for a copy operation may be a block blob, an append blob, - or a page blob. If the destination blob already exists, it must be of the - same blob type as the source blob. Any existing destination blob will be - overwritten. The destination blob cannot be modified while a copy operation - is in progress. - - When copying from a page blob, the Blob service creates a destination page - blob of the source blob's length, initially containing all zeroes. Then - the source page ranges are enumerated, and non-empty ranges are copied. - - For a block blob or an append blob, the Blob service creates a committed - blob of zero length before returning from this operation. When copying - from a block blob, all committed blocks and their block IDs are copied. - Uncommitted blocks are not copied. At the end of the copy operation, the - destination blob will have the same committed block count as the source. - - When copying from an append blob, all committed blocks are copied. At the - end of the copy operation, the destination blob will have the same committed - block count as the source. - - For all blob types, you can call status() on the returned polling object - to check the status of the copy operation, or wait() to block until the - operation is complete. The final blob will be committed when the copy completes. - - :param str source_url: - A URL of up to 2 KB in length that specifies a file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.blob.core.windows.net/mycontainer/myblob - - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - - https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken - :param metadata: - Name-value pairs associated with the blob as metadata. If no name-value - pairs are specified, the operation will copy the metadata from the - source blob or file to the destination blob. If one or more name-value - pairs are specified, the destination blob is created with the specified - metadata, and metadata is not copied from the source blob or file. - :type metadata: dict(str, str) - :param bool incremental_copy: - Copies the snapshot of the source page blob to a destination page blob. - The snapshot is copied such that only the differential changes between - the previously copied snapshot are transferred to the destination. - The copied snapshots are complete copies of the original snapshot and - can be read or copied from as usual. Defaults to False. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: - Specifies the immutability policy of a blob, blob snapshot or blob version. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword bool legal_hold: - Specified if a legal hold should be set on the blob. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source - blob has been modified since the specified date/time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source blob - has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has been modified since the specified date/time. - If the destination blob has not been modified, the Blob service returns - status code 412 (Precondition Failed). - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has not been modified since the specified - date/time. If the destination blob has been modified, the Blob service - returns status code 412 (Precondition Failed). - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword destination_lease: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword source_lease: - Specify this to perform the Copy Blob operation only if - the lease ID given matches the active lease ID of the source blob. - :paramtype source_lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword bool seal_destination_blob: - Seal the destination append blob. This operation is only for append blob. - - .. versionadded:: 12.4.0 - - :keyword bool requires_sync: - Enforces that the service will not return a response until the copy is complete. - :keyword str source_authorization: - Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is - the prefix of the source_authorization string. This option is only available when `incremental_copy` is - set to False and `requires_sync` is set to True. - - .. versionadded:: 12.9.0 - - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the sync copied blob. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.10.0 - - :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status). - :rtype: dict[str, Union[str, ~datetime.datetime]] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START copy_blob_from_url] - :end-before: [END copy_blob_from_url] - :language: python - :dedent: 12 - :caption: Copy a blob from a URL. - """ - options = self._start_copy_from_url_options( - source_url=self._encode_source_url(source_url), - metadata=metadata, - incremental_copy=incremental_copy, - **kwargs) - try: - if incremental_copy: - return self._client.page_blob.copy_incremental(**options) - return self._client.blob.start_copy_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - def _abort_copy_options(self, copy_id, **kwargs): - # type: (Union[str, Dict[str, Any], BlobProperties], **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - try: - copy_id = copy_id.copy.id - except AttributeError: - try: - copy_id = copy_id['copy_id'] - except TypeError: - pass - options = { - 'copy_id': copy_id, - 'lease_access_conditions': access_conditions, - 'timeout': kwargs.pop('timeout', None)} - options.update(kwargs) - return options - - @distributed_trace - def abort_copy(self, copy_id, **kwargs): - # type: (Union[str, Dict[str, Any], BlobProperties], **Any) -> None - """Abort an ongoing copy operation. - - This will leave a destination blob with zero length and full metadata. - This will raise an error if the copy operation has already ended. - - :param copy_id: - The copy operation to abort. This can be either an ID string, or an - instance of BlobProperties. - :type copy_id: str or ~azure.storage.blob.BlobProperties - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START abort_copy_blob_from_url] - :end-before: [END abort_copy_blob_from_url] - :language: python - :dedent: 12 - :caption: Abort copying a blob from URL. - """ - options = self._abort_copy_options(copy_id, **kwargs) - try: - self._client.blob.abort_copy_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): - # type: (int, Optional[str], **Any) -> BlobLeaseClient - """Requests a new lease. - - If the blob does not have an active lease, the Blob - Service creates a lease on the blob and returns a new lease. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Blob Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A BlobLeaseClient object. - :rtype: ~azure.storage.blob.BlobLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START acquire_lease_on_blob] - :end-before: [END acquire_lease_on_blob] - :language: python - :dedent: 8 - :caption: Acquiring a lease on a blob. - """ - lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore - lease.acquire(lease_duration=lease_duration, **kwargs) - return lease - - @distributed_trace - def set_standard_blob_tier(self, standard_blob_tier, **kwargs): - # type: (Union[str, StandardBlobTier], Any) -> None - """This operation sets the tier on a block blob. - - A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param standard_blob_tier: - Indicates the tier to be set on the blob. Options include 'Hot', 'Cool', - 'Archive'. The hot tier is optimized for storing data that is accessed - frequently. The cool storage tier is optimized for storing data that - is infrequently accessed and stored for at least a month. The archive - tier is optimized for storing data that is rarely accessed and stored - for at least six months with flexible latency requirements. - :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to download. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if standard_blob_tier is None: - raise ValueError("A StandardBlobTier must be specified") - if self.snapshot and kwargs.get('version_id'): - raise ValueError("Snapshot and version_id cannot be set at the same time") - try: - self._client.blob.set_tier( - tier=standard_blob_tier, - snapshot=self.snapshot, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - def _stage_block_options( - self, block_id, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - block_id = encode_base64(str(block_id)) - if isinstance(data, six.text_type): - data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - if length is None: - length = get_length(data) - if length is None: - length, data = read_length(data) - if isinstance(data, bytes): - data = data[:length] - - validate_content = kwargs.pop('validate_content', False) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'block_id': block_id, - 'content_length': length, - 'body': data, - 'transactional_content_md5': None, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'validate_content': validate_content, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - } - options.update(kwargs) - return options - - @distributed_trace - def stage_block( - self, block_id, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Creates a new block to be committed as part of a blob. - - :param str block_id: A string value that identifies the block. - The string should be less than or equal to 64 bytes in size. - For a given blob, the block_id must be the same size for each block. - :param data: The blob data. - :param int length: Size of the block. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword str encoding: - Defaults to UTF-8. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob property dict. - :rtype: dict[str, Any] - """ - options = self._stage_block_options( - block_id, - data, - length=length, - **kwargs) - try: - return self._client.block_blob.stage_block(**options) - except HttpResponseError as error: - process_storage_error(error) - - def _stage_block_from_url_options( - self, block_id, # type: str - source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - source_content_md5=None, # type: Optional[Union[bytes, bytearray]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - source_authorization = kwargs.pop('source_authorization', None) - if source_length is not None and source_offset is None: - raise ValueError("Source offset value must not be None if length is set.") - if source_length is not None: - source_length = source_offset + source_length - 1 - block_id = encode_base64(str(block_id)) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - range_header = None - if source_offset is not None: - range_header, _ = validate_and_format_range_headers(source_offset, source_length) - - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'copy_source_authorization': source_authorization, - 'block_id': block_id, - 'content_length': 0, - 'source_url': source_url, - 'source_range': range_header, - 'source_content_md5': bytearray(source_content_md5) if source_content_md5 else None, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - } - options.update(kwargs) - return options - - @distributed_trace - def stage_block_from_url( - self, block_id, # type: Union[str, int] - source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - source_content_md5=None, # type: Optional[Union[bytes, bytearray]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Creates a new block to be committed as part of a blob where - the contents are read from a URL. - - :param str block_id: A string value that identifies the block. - The string should be less than or equal to 64 bytes in size. - For a given blob, the block_id must be the same size for each block. - :param str source_url: The URL. - :param int source_offset: - Start of byte range to use for the block. - Must be set if source length is provided. - :param int source_length: The size of the block in bytes. - :param bytearray source_content_md5: - Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str source_authorization: - Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is - the prefix of the source_authorization string. - :returns: Blob property dict. - :rtype: dict[str, Any] - """ - options = self._stage_block_from_url_options( - block_id, - source_url=self._encode_source_url(source_url), - source_offset=source_offset, - source_length=source_length, - source_content_md5=source_content_md5, - **kwargs) - try: - return self._client.block_blob.stage_block_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - def _get_block_list_result(self, blocks): - # type: (BlockList) -> Tuple[List[BlobBlock], List[BlobBlock]] - committed = [] # type: List - uncommitted = [] # type: List - if blocks.committed_blocks: - committed = [BlobBlock._from_generated(b) for b in blocks.committed_blocks] # pylint: disable=protected-access - if blocks.uncommitted_blocks: - uncommitted = [BlobBlock._from_generated(b) for b in blocks.uncommitted_blocks] # pylint: disable=protected-access - return committed, uncommitted - - @distributed_trace - def get_block_list(self, block_list_type="committed", **kwargs): - # type: (Optional[str], **Any) -> Tuple[List[BlobBlock], List[BlobBlock]] - """The Get Block List operation retrieves the list of blocks that have - been uploaded as part of a block blob. - - :param str block_list_type: - Specifies whether to return the list of committed - blocks, the list of uncommitted blocks, or both lists together. - Possible values include: 'committed', 'uncommitted', 'all' - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A tuple of two lists - committed and uncommitted blocks - :rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock)) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - try: - blocks = self._client.block_blob.get_block_list( - list_type=block_list_type, - snapshot=self.snapshot, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return self._get_block_list_result(blocks) - - def _commit_block_list_options( # type: ignore - self, block_list, # type: List[BlobBlock] - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) - for block in block_list: - try: - if block.state.value == 'committed': - block_lookup.committed.append(encode_base64(str(block.id))) - elif block.state.value == 'uncommitted': - block_lookup.uncommitted.append(encode_base64(str(block.id))) - else: - block_lookup.latest.append(encode_base64(str(block.id))) - except AttributeError: - block_lookup.latest.append(encode_base64(str(block))) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - blob_headers = None - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if content_settings: - blob_headers = BlobHTTPHeaders( - blob_cache_control=content_settings.cache_control, - blob_content_type=content_settings.content_type, - blob_content_md5=content_settings.content_md5, - blob_content_encoding=content_settings.content_encoding, - blob_content_language=content_settings.content_language, - blob_content_disposition=content_settings.content_disposition - ) - - validate_content = kwargs.pop('validate_content', False) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - immutability_policy = kwargs.pop('immutability_policy', None) - if immutability_policy: - kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time - kwargs['immutability_policy_mode'] = immutability_policy.policy_mode - - tier = kwargs.pop('standard_blob_tier', None) - blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) - - options = { - 'blocks': block_lookup, - 'blob_http_headers': blob_headers, - 'lease_access_conditions': access_conditions, - 'timeout': kwargs.pop('timeout', None), - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers, - 'validate_content': validate_content, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'tier': tier.value if tier else None, - 'blob_tags_string': blob_tags_string, - 'headers': headers - } - options.update(kwargs) - return options - - @distributed_trace - def commit_block_list( # type: ignore - self, block_list, # type: List[BlobBlock] - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """The Commit Block List operation writes a blob by specifying the list of - block IDs that make up the blob. - - :param list block_list: - List of Blockblobs. - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict[str, str] - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: - Specifies the immutability policy of a blob, blob snapshot or blob version. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword bool legal_hold: - Specified if a legal hold should be set on the blob. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._commit_block_list_options( - block_list, - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return self._client.block_blob.commit_block_list(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): - # type: (Union[str, PremiumPageBlobTier], **Any) -> None - """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts. - - :param premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if premium_page_blob_tier is None: - raise ValueError("A PremiumPageBlobTier must be specified") - try: - self._client.blob.set_tier( - tier=premium_page_blob_tier, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - def _set_blob_tags_options(self, tags=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - tags = serialize_blob_tags(tags) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - options = { - 'tags': tags, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def set_blob_tags(self, tags=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - """The Set Tags operation enables users to set tags on a blob or specific blob version, but not snapshot. - Each call to this operation replaces all existing tags attached to the blob. To remove all - tags from the blob, call this operation with no tags set. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :param tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - :type tags: dict(str, str) - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to add tags to. - :keyword bool validate_content: - If true, calculates an MD5 hash of the tags content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - options = self._set_blob_tags_options(tags=tags, **kwargs) - try: - return self._client.blob.set_tags(**options) - except HttpResponseError as error: - process_storage_error(error) - - def _get_blob_tags_options(self, **kwargs): - # type: (**Any) -> Dict[str, str] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - options = { - 'version_id': kwargs.pop('version_id', None), - 'snapshot': self.snapshot, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_headers_and_deserialized} - return options - - @distributed_trace - def get_blob_tags(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """The Get Tags operation enables users to get tags on a blob or specific blob version, or snapshot. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to add tags to. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Key value pairs of blob tags. - :rtype: Dict[str, str] - """ - options = self._get_blob_tags_options(**kwargs) - try: - _, tags = self._client.blob.get_tags(**options) - return parse_tags(tags) # pylint: disable=protected-access - except HttpResponseError as error: - process_storage_error(error) - - def _get_page_ranges_options( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if length is not None and offset is None: - raise ValueError("Offset value must not be None if length is set.") - if length is not None: - length = offset + length - 1 # Reformat to an inclusive range index - page_range, _ = validate_and_format_range_headers( - offset, length, start_range_required=False, end_range_required=False, align_to_page=True - ) - options = { - 'snapshot': self.snapshot, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'range': page_range} - if previous_snapshot_diff: - try: - options['prevsnapshot'] = previous_snapshot_diff.snapshot # type: ignore - except AttributeError: - try: - options['prevsnapshot'] = previous_snapshot_diff['snapshot'] # type: ignore - except TypeError: - options['prevsnapshot'] = previous_snapshot_diff - options.update(kwargs) - return options - - @distributed_trace - def get_page_ranges( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] - **kwargs - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a Page Blob or snapshot - of a page blob. - - :param int offset: - Start of byte range to use for getting valid page ranges. - If no length is given, all bytes after the offset will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for getting valid page ranges. - If length is given, offset must be provided. - This range will return valid page ranges from the offset start up to - the specified length. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param str previous_snapshot_diff: - The snapshot diff parameter that contains an opaque DateTime value that - specifies a previous blob snapshot to be compared - against a more recent snapshot or the current blob. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. - The first element are filled page ranges, the 2nd element is cleared page ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_page_ranges_options( - offset=offset, - length=length, - previous_snapshot_diff=previous_snapshot_diff, - **kwargs) - try: - if previous_snapshot_diff: - ranges = self._client.page_blob.get_page_ranges_diff(**options) - else: - ranges = self._client.page_blob.get_page_ranges(**options) - except HttpResponseError as error: - process_storage_error(error) - return get_page_ranges_result(ranges) - - @distributed_trace - def get_page_range_diff_for_managed_disk( - self, previous_snapshot_url, # type: str - offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a managed disk or snapshot. - - .. note:: - This operation is only available for managed disk accounts. - - .. versionadded:: 12.2.0 - This operation was introduced in API version '2019-07-07'. - - :param previous_snapshot_url: - Specifies the URL of a previous snapshot of the managed disk. - The response will only contain pages that were changed between the target blob and - its previous snapshot. - :param int offset: - Start of byte range to use for getting valid page ranges. - If no length is given, all bytes after the offset will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for getting valid page ranges. - If length is given, offset must be provided. - This range will return valid page ranges from the offset start up to - the specified length. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. - The first element are filled page ranges, the 2nd element is cleared page ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_page_ranges_options( - offset=offset, - length=length, - prev_snapshot_url=previous_snapshot_url, - **kwargs) - try: - ranges = self._client.page_blob.get_page_ranges_diff(**options) - except HttpResponseError as error: - process_storage_error(error) - return get_page_ranges_result(ranges) - - def _set_sequence_number_options(self, sequence_number_action, sequence_number=None, **kwargs): - # type: (Union[str, SequenceNumberAction], Optional[str], **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if sequence_number_action is None: - raise ValueError("A sequence number action must be specified") - options = { - 'sequence_number_action': sequence_number_action, - 'timeout': kwargs.pop('timeout', None), - 'blob_sequence_number': sequence_number, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def set_sequence_number(self, sequence_number_action, sequence_number=None, **kwargs): - # type: (Union[str, SequenceNumberAction], Optional[str], **Any) -> Dict[str, Union[str, datetime]] - """Sets the blob sequence number. - - :param str sequence_number_action: - This property indicates how the service should modify the blob's sequence - number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information. - :param str sequence_number: - This property sets the blob's sequence number. The sequence number is a - user-controlled property that you can use to track requests and manage - concurrency issues. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._set_sequence_number_options( - sequence_number_action, sequence_number=sequence_number, **kwargs) - try: - return self._client.page_blob.update_sequence_number(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _resize_blob_options(self, size, **kwargs): - # type: (int, **Any) -> Dict[str, Any] - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if size is None: - raise ValueError("A content length must be specified for a Page Blob.") - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'blob_content_length': size, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def resize_blob(self, size, **kwargs): - # type: (int, **Any) -> Dict[str, Union[str, datetime]] - """Resizes a page blob to the specified size. - - If the specified value is less than the current size of the blob, - then all pages above the specified value are cleared. - - :param int size: - Size used to resize blob. Maximum size for a page blob is up to 1 TB. - The page blob size must be aligned to a 512-byte boundary. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._resize_blob_options(size, **kwargs) - try: - return self._client.page_blob.resize(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _upload_page_options( # type: ignore - self, page, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - if isinstance(page, six.text_type): - page = page.encode(kwargs.pop('encoding', 'UTF-8')) - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 page size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 page size") - end_range = offset + length - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) # type: ignore - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - seq_conditions = SequenceNumberAccessConditions( - if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), - if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), - if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) - ) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - validate_content = kwargs.pop('validate_content', False) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'body': page[:length], - 'content_length': length, - 'transactional_content_md5': None, - 'timeout': kwargs.pop('timeout', None), - 'range': content_range, - 'lease_access_conditions': access_conditions, - 'sequence_number_access_conditions': seq_conditions, - 'modified_access_conditions': mod_conditions, - 'validate_content': validate_content, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def upload_page( # type: ignore - self, page, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """The Upload Pages operation writes a range of pages to a page blob. - - :param bytes page: - Content of the page. - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._upload_page_options( - page=page, - offset=offset, - length=length, - **kwargs) - try: - return self._client.page_blob.upload_pages(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _upload_pages_from_url_options( # type: ignore - self, source_url, # type: str - offset, # type: int - length, # type: int - source_offset, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - # TODO: extract the code to a method format_range - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 page size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 page size") - if source_offset is None or offset % 512 != 0: - raise ValueError("source_offset must be an integer that aligns with 512 page size") - - # Format range - end_range = offset + length - 1 - destination_range = 'bytes={0}-{1}'.format(offset, end_range) - source_range = 'bytes={0}-{1}'.format(source_offset, source_offset + length - 1) # should subtract 1 here? - - seq_conditions = SequenceNumberAccessConditions( - if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), - if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), - if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) - ) - source_authorization = kwargs.pop('source_authorization', None) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - source_mod_conditions = get_source_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - source_content_md5 = kwargs.pop('source_content_md5', None) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'copy_source_authorization': source_authorization, - 'source_url': source_url, - 'content_length': 0, - 'source_range': source_range, - 'range': destination_range, - 'source_content_md5': bytearray(source_content_md5) if source_content_md5 else None, - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'sequence_number_access_conditions': seq_conditions, - 'modified_access_conditions': mod_conditions, - 'source_modified_access_conditions': source_mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def upload_pages_from_url(self, source_url, # type: str - offset, # type: int - length, # type: int - source_offset, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """ - The Upload Pages operation writes a range of pages to a page blob where - the contents are read from a URL. - - :param str source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - The service will read the same number of bytes as the destination range (length-offset). - :keyword bytes source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str source_authorization: - Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is - the prefix of the source_authorization string. - """ - options = self._upload_pages_from_url_options( - source_url=self._encode_source_url(source_url), - offset=offset, - length=length, - source_offset=source_offset, - **kwargs - ) - try: - return self._client.page_blob.upload_pages_from_url(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _clear_page_options(self, offset, length, **kwargs): - # type: (int, int, **Any) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - seq_conditions = SequenceNumberAccessConditions( - if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), - if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), - if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) - ) - mod_conditions = get_modify_conditions(kwargs) - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 page size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 page size") - end_range = length + offset - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'content_length': 0, - 'timeout': kwargs.pop('timeout', None), - 'range': content_range, - 'lease_access_conditions': access_conditions, - 'sequence_number_access_conditions': seq_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def clear_page(self, offset, length, **kwargs): - # type: (int, int, **Any) -> Dict[str, Union[str, datetime]] - """Clears a range of pages. - - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._clear_page_options(offset, length, **kwargs) - try: - return self._client.page_blob.clear_pages(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _append_block_options( # type: ignore - self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - if isinstance(data, six.text_type): - data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore - if length is None: - length = get_length(data) - if length is None: - length, data = read_length(data) - if length == 0: - return {} - if isinstance(data, bytes): - data = data[:length] - - appendpos_condition = kwargs.pop('appendpos_condition', None) - maxsize_condition = kwargs.pop('maxsize_condition', None) - validate_content = kwargs.pop('validate_content', False) - append_conditions = None - if maxsize_condition or appendpos_condition is not None: - append_conditions = AppendPositionAccessConditions( - max_size=maxsize_condition, - append_position=appendpos_condition - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - options = { - 'body': data, - 'content_length': length, - 'timeout': kwargs.pop('timeout', None), - 'transactional_content_md5': None, - 'lease_access_conditions': access_conditions, - 'append_position_access_conditions': append_conditions, - 'modified_access_conditions': mod_conditions, - 'validate_content': validate_content, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def append_block( # type: ignore - self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """Commits a new block of data to the end of the existing append blob. - - :param data: - Content of the block. This can be bytes, text, an iterable or a file-like object. - :type data: bytes or str or Iterable - :param int length: - Size of the block in bytes. - :keyword bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). - :rtype: dict(str, Any) - """ - options = self._append_block_options( - data, - length=length, - **kwargs - ) - try: - return self._client.append_blob.append_block(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _append_block_from_url_options( # type: ignore - self, copy_source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - # If end range is provided, start range must be provided - if source_length is not None and source_offset is None: - raise ValueError("source_offset should also be specified if source_length is specified") - # Format based on whether length is present - source_range = None - if source_length is not None: - end_range = source_offset + source_length - 1 - source_range = 'bytes={0}-{1}'.format(source_offset, end_range) - elif source_offset is not None: - source_range = "bytes={0}-".format(source_offset) - - appendpos_condition = kwargs.pop('appendpos_condition', None) - maxsize_condition = kwargs.pop('maxsize_condition', None) - source_content_md5 = kwargs.pop('source_content_md5', None) - append_conditions = None - if maxsize_condition or appendpos_condition is not None: - append_conditions = AppendPositionAccessConditions( - max_size=maxsize_condition, - append_position=appendpos_condition - ) - source_authorization = kwargs.pop('source_authorization', None) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - source_mod_conditions = get_source_conditions(kwargs) - cpk_scope_info = get_cpk_scope_info(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - options = { - 'copy_source_authorization': source_authorization, - 'source_url': copy_source_url, - 'content_length': 0, - 'source_range': source_range, - 'source_content_md5': source_content_md5, - 'transactional_content_md5': None, - 'lease_access_conditions': access_conditions, - 'append_position_access_conditions': append_conditions, - 'modified_access_conditions': mod_conditions, - 'source_modified_access_conditions': source_mod_conditions, - 'cpk_scope_info': cpk_scope_info, - 'cpk_info': cpk_info, - 'cls': return_response_headers, - 'timeout': kwargs.pop('timeout', None)} - options.update(kwargs) - return options - - @distributed_trace - def append_block_from_url(self, copy_source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """ - Creates a new block to be committed as part of a blob, where the contents are read from a source url. - - :param str copy_source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int source_offset: - This indicates the start of the range of bytes (inclusive) that has to be taken from the copy source. - :param int source_length: - This indicates the end of the range of bytes that has to be taken from the copy source. - :keyword bytearray source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the - AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str source_authorization: - Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is - the prefix of the source_authorization string. - """ - options = self._append_block_from_url_options( - copy_source_url=self._encode_source_url(copy_source_url), - source_offset=source_offset, - source_length=source_length, - **kwargs - ) - try: - return self._client.append_blob.append_block_from_url(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _seal_append_blob_options(self, **kwargs): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - appendpos_condition = kwargs.pop('appendpos_condition', None) - append_conditions = None - if appendpos_condition is not None: - append_conditions = AppendPositionAccessConditions( - append_position=appendpos_condition - ) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - - options = { - 'timeout': kwargs.pop('timeout', None), - 'lease_access_conditions': access_conditions, - 'append_position_access_conditions': append_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def seal_append_blob(self, **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """The Seal operation seals the Append Blob to make it read-only. - - .. versionadded:: 12.4.0 - - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). - :rtype: dict(str, Any) - """ - options = self._seal_append_blob_options(**kwargs) - try: - return self._client.append_blob.seal(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def _get_container_client(self): # pylint: disable=client-method-missing-kwargs - # type: (...) -> ContainerClient - """Get a client to interact with the blob's parent container. - - The container need not already exist. Defaults to current blob's credentials. - - :returns: A ContainerClient. - :rtype: ~azure.storage.blob.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START get_container_client_from_blob_client] - :end-before: [END get_container_client_from_blob_client] - :language: python - :dedent: 8 - :caption: Get container client from blob object. - """ - from ._container_client import ContainerClient - if not isinstance(self._pipeline._transport, TransportWrapper): # pylint: disable = protected-access - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - else: - _pipeline = self._pipeline # pylint: disable = protected-access - return ContainerClient( - "{}://{}".format(self.scheme, self.primary_hostname), container_name=self.container_name, - credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_blob_service_client.py b/azure/multiapi/storagev2/blob/v2021_04_10/_blob_service_client.py deleted file mode 100644 index 6740dc3..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_blob_service_client.py +++ /dev/null @@ -1,740 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -import warnings -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, - TYPE_CHECKING, - TypeVar) - - -try: - from urllib.parse import urlparse -except ImportError: - from urlparse import urlparse # type: ignore - -from azure.core.paging import ItemPaged -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline import Pipeline -from azure.core.tracing.decorator import distributed_trace - -from ._shared.models import LocationMode -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.parser import _to_utc_datetime -from ._shared.response_handlers import return_response_headers, process_storage_error, \ - parse_to_internal_user_delegation_key -from ._generated import AzureBlobStorage -from ._generated.models import StorageServiceProperties, KeyInfo -from ._container_client import ContainerClient -from ._blob_client import BlobClient -from ._models import ContainerPropertiesPaged -from ._list_blobs_helper import FilteredBlobPaged -from ._serialize import get_api_version -from ._deserialize import service_stats_deserialize, service_properties_deserialize - -if TYPE_CHECKING: - from datetime import datetime - from ._shared.models import UserDelegationKey - from ._lease import BlobLeaseClient - from ._models import ( - ContainerProperties, - BlobProperties, - PublicAccess, - BlobAnalyticsLogging, - Metrics, - CorsRule, - RetentionPolicy, - StaticWebsite, - FilteredBlob - ) - -ClassType = TypeVar("ClassType") - - -class BlobServiceClient(StorageAccountHostsMixin): - """A client to interact with the Blob Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete containers within the account. - For operations relating to a specific container or blob, clients for those entities - can also be retrieved using the `get_client` functions. - - For more optional configuration, please click - `here `_. - - :param str account_url: - The URL to the blob storage account. Any other entities included - in the URL path (e.g. container or blob) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is the most recent service version that is - compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_blob_service_client] - :end-before: [END create_blob_service_client] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient with account url and credential. - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_blob_service_client_oauth] - :end-before: [END create_blob_service_client_oauth] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient with Azure Identity credentials. - """ - - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - _, sas_token = parse_query(parsed_url.query) - self._query_str, credential = self._format_query_string(sas_token, credential) - super(BlobServiceClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) - self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - return "{}://{}/{}".format(self.scheme, hostname, self._query_str) - - @classmethod - def from_connection_string( - cls, # type: Type[ClassType] - conn_str, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> ClassType - """Create BlobServiceClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :returns: A Blob service client. - :rtype: ~azure.storage.blob.BlobServiceClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START auth_from_connection_string] - :end-before: [END auth_from_connection_string] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient from a connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls(account_url, credential=credential, **kwargs) - - @distributed_trace - def get_user_delegation_key(self, key_start_time, # type: datetime - key_expiry_time, # type: datetime - **kwargs # type: Any - ): - # type: (...) -> UserDelegationKey - """ - Obtain a user delegation key for the purpose of signing SAS tokens. - A token credential must be present on the service object for this request to succeed. - - :param ~datetime.datetime key_start_time: - A DateTime value. Indicates when the key becomes valid. - :param ~datetime.datetime key_expiry_time: - A DateTime value. Indicates when the key stops being valid. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The user delegation key. - :rtype: ~azure.storage.blob.UserDelegationKey - """ - key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time)) - timeout = kwargs.pop('timeout', None) - try: - user_delegation_key = self._client.service.get_user_delegation_key(key_info=key_info, - timeout=timeout, - **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - return parse_to_internal_user_delegation_key(user_delegation_key) # type: ignore - - @distributed_trace - def get_account_information(self, **kwargs): - # type: (Any) -> Dict[str, str] - """Gets information related to the storage account. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START get_blob_service_account_info] - :end-before: [END get_blob_service_account_info] - :language: python - :dedent: 8 - :caption: Getting account information for the blob service. - """ - try: - return self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def get_service_stats(self, **kwargs): - # type: (**Any) -> Dict[str, Any] - """Retrieves statistics related to replication for the Blob service. - - It is only available when read-access geo-redundant replication is enabled for - the storage account. - - With geo-redundant replication, Azure Storage maintains your data durable - in two locations. In both locations, Azure Storage constantly maintains - multiple healthy replicas of your data. The location where you read, - create, update, or delete data is the primary storage account location. - The primary location exists in the region you choose at the time you - create an account via the Azure Management Azure classic portal, for - example, North Central US. The location to which your data is replicated - is the secondary location. The secondary location is automatically - determined based on the location of the primary; it is in a second data - center that resides in the same region as the primary location. Read-only - access is available from the secondary location, if read-access geo-redundant - replication is enabled for your storage account. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The blob service stats. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START get_blob_service_stats] - :end-before: [END get_blob_service_stats] - :language: python - :dedent: 8 - :caption: Getting service stats for the blob service. - """ - timeout = kwargs.pop('timeout', None) - try: - stats = self._client.service.get_statistics( # type: ignore - timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs) - return service_stats_deserialize(stats) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def get_service_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An object containing blob service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START get_blob_service_properties] - :end-before: [END get_blob_service_properties] - :language: python - :dedent: 8 - :caption: Getting service properties for the blob service. - """ - timeout = kwargs.pop('timeout', None) - try: - service_props = self._client.service.get_properties(timeout=timeout, **kwargs) - return service_properties_deserialize(service_props) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def set_service_properties( - self, analytics_logging=None, # type: Optional[BlobAnalyticsLogging] - hour_metrics=None, # type: Optional[Metrics] - minute_metrics=None, # type: Optional[Metrics] - cors=None, # type: Optional[List[CorsRule]] - target_version=None, # type: Optional[str] - delete_retention_policy=None, # type: Optional[RetentionPolicy] - static_website=None, # type: Optional[StaticWebsite] - **kwargs - ): - # type: (...) -> None - """Sets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - If an element (e.g. analytics_logging) is left as None, the - existing settings on the service for that functionality are preserved. - - :param analytics_logging: - Groups the Azure Analytics Logging settings. - :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging - :param hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for blobs. - :type hour_metrics: ~azure.storage.blob.Metrics - :param minute_metrics: - The minute metrics settings provide request statistics - for each minute for blobs. - :type minute_metrics: ~azure.storage.blob.Metrics - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list[~azure.storage.blob.CorsRule] - :param str target_version: - Indicates the default version to use for requests if an incoming - request's version is not specified. - :param delete_retention_policy: - The delete retention policy specifies whether to retain deleted blobs. - It also specifies the number of days and versions of blob to keep. - :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy - :param static_website: - Specifies whether the static website feature is enabled, - and if yes, indicates the index document and 404 error document to use. - :type static_website: ~azure.storage.blob.StaticWebsite - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START set_blob_service_properties] - :end-before: [END set_blob_service_properties] - :language: python - :dedent: 8 - :caption: Setting service properties for the blob service. - """ - if all(parameter is None for parameter in [ - analytics_logging, hour_metrics, minute_metrics, cors, - target_version, delete_retention_policy, static_website]): - raise ValueError("set_service_properties should be called with at least one parameter") - - props = StorageServiceProperties( - logging=analytics_logging, - hour_metrics=hour_metrics, - minute_metrics=minute_metrics, - cors=cors, - default_service_version=target_version, - delete_retention_policy=delete_retention_policy, - static_website=static_website - ) - timeout = kwargs.pop('timeout', None) - try: - self._client.service.set_properties(props, timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_containers( - self, name_starts_with=None, # type: Optional[str] - include_metadata=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> ItemPaged[ContainerProperties] - """Returns a generator to list the containers under the specified account. - - The generator will lazily follow the continuation tokens returned by - the service and stop when all containers have been returned. - - :param str name_starts_with: - Filters the results to return only containers whose names - begin with the specified prefix. - :param bool include_metadata: - Specifies that container metadata to be returned in the response. - The default value is `False`. - :keyword bool include_deleted: - Specifies that deleted containers to be returned in the response. This is for container restore enabled - account. The default value is `False`. - .. versionadded:: 12.4.0 - :keyword bool include_system: - Flag specifying that system containers should be included. - .. versionadded:: 12.10.0 - :keyword int results_per_page: - The maximum number of container names to retrieve per API - call. If the request does not specify the server will return up to 5,000 items. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) of ContainerProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.ContainerProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_list_containers] - :end-before: [END bsc_list_containers] - :language: python - :dedent: 12 - :caption: Listing the containers in the blob service. - """ - include = ['metadata'] if include_metadata else [] - include_deleted = kwargs.pop('include_deleted', None) - if include_deleted: - include.append("deleted") - include_system = kwargs.pop('include_system', None) - if include_system: - include.append("system") - - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.service.list_containers_segment, - prefix=name_starts_with, - include=include, - timeout=timeout, - **kwargs) - return ItemPaged( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - page_iterator_class=ContainerPropertiesPaged - ) - - @distributed_trace - def find_blobs_by_tags(self, filter_expression, **kwargs): - # type: (str, **Any) -> ItemPaged[FilteredBlob] - """The Filter Blobs operation enables callers to list blobs across all - containers whose tags match a given search expression. Filter blobs - searches across all containers within a storage account but can be - scoped within the expression to a single container. - - :param str filter_expression: - The expression to find blobs whose tags matches the specified condition. - eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'" - To specify a container, eg. "@container='containerName' and \"Name\"='C'" - :keyword int results_per_page: - The max result per page when paginating. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.FilteredBlob] - """ - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.service.filter_blobs, - where=filter_expression, - timeout=timeout, - **kwargs) - return ItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=FilteredBlobPaged) - - @distributed_trace - def create_container( - self, name, # type: str - metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[Union[PublicAccess, str]] - **kwargs - ): - # type: (...) -> ContainerClient - """Creates a new container under the specified account. - - If the container with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created container. - - :param str name: The name of the container to create. - :param metadata: - A dict with name-value pairs to associate with the - container as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - Possible values include: 'container', 'blob'. - :type public_access: str or ~azure.storage.blob.PublicAccess - :keyword container_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - - .. versionadded:: 12.2.0 - - :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_create_container] - :end-before: [END bsc_create_container] - :language: python - :dedent: 12 - :caption: Creating a container in the blob service. - """ - container = self.get_container_client(name) - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - container.create_container( - metadata=metadata, public_access=public_access, timeout=timeout, **kwargs) - return container - - @distributed_trace - def delete_container( - self, container, # type: Union[ContainerProperties, str] - lease=None, # type: Optional[Union[BlobLeaseClient, str]] - **kwargs - ): - # type: (...) -> None - """Marks the specified container for deletion. - - The container and any blobs contained within it are later deleted during garbage collection. - If the container is not found, a ResourceNotFoundError will be raised. - - :param container: - The container to delete. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :param lease: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_delete_container] - :end-before: [END bsc_delete_container] - :language: python - :dedent: 12 - :caption: Deleting a container in the blob service. - """ - container = self.get_container_client(container) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - container.delete_container( # type: ignore - lease=lease, - timeout=timeout, - **kwargs) - - @distributed_trace - def _rename_container(self, name, new_name, **kwargs): - # type: (str, str, **Any) -> ContainerClient - """Renames a container. - - Operation is successful only if the source container exists. - - :param str name: - The name of the container to rename. - :param str new_name: - The new container name the user wants to rename to. - :keyword lease: - Specify this to perform only if the lease ID given - matches the active lease ID of the source container. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.ContainerClient - """ - renamed_container = self.get_container_client(new_name) - lease = kwargs.pop('lease', None) - try: - kwargs['source_lease_id'] = lease.id # type: str - except AttributeError: - kwargs['source_lease_id'] = lease - try: - renamed_container._client.container.rename(name, **kwargs) # pylint: disable = protected-access - return renamed_container - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def undelete_container(self, deleted_container_name, deleted_container_version, **kwargs): - # type: (str, str, **Any) -> ContainerClient - """Restores soft-deleted container. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :param str deleted_container_name: - Specifies the name of the deleted container to restore. - :param str deleted_container_version: - Specifies the version of the deleted container to restore. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.ContainerClient - """ - new_name = kwargs.pop('new_name', None) - if new_name: - warnings.warn("`new_name` is no longer supported.", DeprecationWarning) - container = self.get_container_client(new_name or deleted_container_name) - try: - container._client.container.restore(deleted_container_name=deleted_container_name, # pylint: disable = protected-access - deleted_container_version=deleted_container_version, - timeout=kwargs.pop('timeout', None), **kwargs) - return container - except HttpResponseError as error: - process_storage_error(error) - - def get_container_client(self, container): - # type: (Union[ContainerProperties, str]) -> ContainerClient - """Get a client to interact with the specified container. - - The container need not already exist. - - :param container: - The container. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :returns: A ContainerClient. - :rtype: ~azure.storage.blob.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_get_container_client] - :end-before: [END bsc_get_container_client] - :language: python - :dedent: 8 - :caption: Getting the container client to interact with a specific container. - """ - try: - container_name = container.name - except AttributeError: - container_name = container - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ContainerClient( - self.url, container_name=container_name, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_blob_client( - self, container, # type: Union[ContainerProperties, str] - blob, # type: Union[BlobProperties, str] - snapshot=None # type: Optional[Union[Dict[str, Any], str]] - ): - # type: (...) -> BlobClient - """Get a client to interact with the specified blob. - - The blob need not already exist. - - :param container: - The container that the blob is in. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :param blob: - The blob with which to interact. This can either be the name of the blob, - or an instance of BlobProperties. - :type blob: str or ~azure.storage.blob.BlobProperties - :param snapshot: - The optional blob snapshot on which to operate. This can either be the ID of the snapshot, - or a dictionary output returned by :func:`~azure.storage.blob.BlobClient.create_snapshot()`. - :type snapshot: str or dict(str, Any) - :returns: A BlobClient. - :rtype: ~azure.storage.blob.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START bsc_get_blob_client] - :end-before: [END bsc_get_blob_client] - :language: python - :dedent: 12 - :caption: Getting the blob client to interact with a specific blob. - """ - try: - container_name = container.name - except AttributeError: - container_name = container - try: - blob_name = blob.name - except AttributeError: - blob_name = blob - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return BlobClient( # type: ignore - self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_container_client.py b/azure/multiapi/storagev2/blob/v2021_04_10/_container_client.py deleted file mode 100644 index b5cbd58..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_container_client.py +++ /dev/null @@ -1,1601 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, AnyStr, Dict, List, Tuple, IO, Iterator, - TYPE_CHECKING, - TypeVar) - - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six - -from azure.core import MatchConditions -from azure.core.exceptions import HttpResponseError, ResourceNotFoundError -from azure.core.paging import ItemPaged -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import Pipeline -from azure.core.pipeline.transport import HttpRequest - -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.request_handlers import add_metadata_headers, serialize_iso -from ._shared.response_handlers import ( - process_storage_error, - return_response_headers, - return_headers_and_deserialized) -from ._generated import AzureBlobStorage -from ._generated.models import SignedIdentifier -from ._deserialize import deserialize_container_properties -from ._serialize import get_modify_conditions, get_container_cpk_scope_info, get_api_version, get_access_conditions -from ._models import ( # pylint: disable=unused-import - ContainerProperties, - BlobProperties, - BlobType, - FilteredBlob) -from ._list_blobs_helper import BlobPrefix, BlobPropertiesPaged, FilteredBlobPaged -from ._lease import BlobLeaseClient -from ._blob_client import BlobClient - -if TYPE_CHECKING: - from azure.core.pipeline.transport import HttpTransport, HttpResponse # pylint: disable=ungrouped-imports - from azure.core.pipeline.policies import HTTPPolicy # pylint: disable=ungrouped-imports - from datetime import datetime - from ._models import ( # pylint: disable=unused-import - PublicAccess, - AccessPolicy, - ContentSettings, - StandardBlobTier, - PremiumPageBlobTier) - - -def _get_blob_name(blob): - """Return the blob name. - - :param blob: A blob string or BlobProperties - :rtype: str - """ - try: - return blob.get('name') - except AttributeError: - return blob - - -ClassType = TypeVar("ClassType") - - -class ContainerClient(StorageAccountHostsMixin): # pylint: disable=too-many-public-methods - """A client to interact with a specific container, although that container - may not yet exist. - - For operations relating to a specific blob within this container, a blob client can be - retrieved using the :func:`~get_blob_client` function. - - For more optional configuration, please click - `here `_. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the container, - use the :func:`from_container_url` classmethod. - :param container_name: - The name of the container for the blob. - :type container_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is the most recent service version that is - compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START create_container_client_from_service] - :end-before: [END create_container_client_from_service] - :language: python - :dedent: 8 - :caption: Get a ContainerClient from an existing BlobServiceClient. - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START create_container_client_sasurl] - :end-before: [END create_container_client_sasurl] - :language: python - :dedent: 8 - :caption: Creating the container client directly. - """ - def __init__( - self, account_url, # type: str - container_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Container URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not container_name: - raise ValueError("Please specify a container name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - _, sas_token = parse_query(parsed_url.query) - self.container_name = container_name - # This parameter is used for the hierarchy traversal. Give precedence to credential. - self._raw_credential = credential if credential else sas_token - self._query_str, credential = self._format_query_string(sas_token, credential) - super(ContainerClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) - self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - - def _format_url(self, hostname): - container_name = self.container_name - if isinstance(container_name, six.text_type): - container_name = container_name.encode('UTF-8') - return "{}://{}/{}{}".format( - self.scheme, - hostname, - quote(container_name), - self._query_str) - - @classmethod - def from_container_url(cls, container_url, credential=None, **kwargs): - # type: (Type[ClassType], str, Optional[Any], Any) -> ClassType - """Create ContainerClient from a container url. - - :param str container_url: - The full endpoint URL to the Container, including SAS token if used. This could be - either the primary endpoint, or the secondary endpoint depending on the current `location_mode`. - :type container_url: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :returns: A container client. - :rtype: ~azure.storage.blob.ContainerClient - """ - try: - if not container_url.lower().startswith('http'): - container_url = "https://" + container_url - except AttributeError: - raise ValueError("Container URL must be a string.") - parsed_url = urlparse(container_url.rstrip('/')) - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(container_url)) - - container_path = parsed_url.path.lstrip('/').split('/') - account_path = "" - if len(container_path) > 1: - account_path = "/" + "/".join(container_path[:-1]) - account_url = "{}://{}{}?{}".format( - parsed_url.scheme, - parsed_url.netloc.rstrip('/'), - account_path, - parsed_url.query) - container_name = unquote(container_path[-1]) - if not container_name: - raise ValueError("Invalid URL. Please provide a URL with a valid container name") - return cls(account_url, container_name=container_name, credential=credential, **kwargs) - - @classmethod - def from_connection_string( - cls, # type: Type[ClassType] - conn_str, # type: str - container_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> ClassType - """Create ContainerClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param container_name: - The container name for the blob. - :type container_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :returns: A container client. - :rtype: ~azure.storage.blob.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START auth_from_connection_string_container] - :end-before: [END auth_from_connection_string_container] - :language: python - :dedent: 8 - :caption: Creating the ContainerClient from a connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, container_name=container_name, credential=credential, **kwargs) - - @distributed_trace - def create_container(self, metadata=None, public_access=None, **kwargs): - # type: (Optional[Dict[str, str]], Optional[Union[PublicAccess, str]], **Any) -> None - """ - Creates a new container under the specified account. If the container - with the same name already exists, the operation fails. - - :param metadata: - A dict with name_value pairs to associate with the - container as metadata. Example:{'Category':'test'} - :type metadata: dict[str, str] - :param ~azure.storage.blob.PublicAccess public_access: - Possible values include: 'container', 'blob'. - :keyword container_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - - .. versionadded:: 12.2.0 - - :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START create_container] - :end-before: [END create_container] - :language: python - :dedent: 12 - :caption: Creating a container to store blobs. - """ - headers = kwargs.pop('headers', {}) - timeout = kwargs.pop('timeout', None) - headers.update(add_metadata_headers(metadata)) # type: ignore - container_cpk_scope_info = get_container_cpk_scope_info(kwargs) - try: - return self._client.container.create( # type: ignore - timeout=timeout, - access=public_access, - container_cpk_scope_info=container_cpk_scope_info, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def _rename_container(self, new_name, **kwargs): - # type: (str, **Any) -> ContainerClient - """Renames a container. - - Operation is successful only if the source container exists. - - :param str new_name: - The new container name the user wants to rename to. - :keyword lease: - Specify this to perform only if the lease ID given - matches the active lease ID of the source container. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.ContainerClient - """ - lease = kwargs.pop('lease', None) - try: - kwargs['source_lease_id'] = lease.id # type: str - except AttributeError: - kwargs['source_lease_id'] = lease - try: - renamed_container = ContainerClient( - "{}://{}".format(self.scheme, self.primary_hostname), container_name=new_name, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - renamed_container._client.container.rename(self.container_name, **kwargs) # pylint: disable = protected-access - return renamed_container - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def delete_container( - self, **kwargs): - # type: (Any) -> None - """ - Marks the specified container for deletion. The container and any blobs - contained within it are later deleted during garbage collection. - - :keyword lease: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START delete_container] - :end-before: [END delete_container] - :language: python - :dedent: 12 - :caption: Delete a container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - mod_conditions = get_modify_conditions(kwargs) - timeout = kwargs.pop('timeout', None) - try: - self._client.container.delete( - timeout=timeout, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def acquire_lease( - self, lease_duration=-1, # type: int - lease_id=None, # type: Optional[str] - **kwargs): - # type: (...) -> BlobLeaseClient - """ - Requests a new lease. If the container does not have an active lease, - the Blob service creates a lease on the container and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A BlobLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.blob.BlobLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START acquire_lease_on_container] - :end-before: [END acquire_lease_on_container] - :language: python - :dedent: 8 - :caption: Acquiring a lease on the container. - """ - lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs) - return lease - - @distributed_trace - def get_account_information(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """Gets information related to the storage account. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - """ - try: - return self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def get_container_properties(self, **kwargs): - # type: (Any) -> ContainerProperties - """Returns all user-defined metadata and system properties for the specified - container. The data returned does not include the container's list of blobs. - - :keyword lease: - If specified, get_container_properties only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Properties for the specified container within a container object. - :rtype: ~azure.storage.blob.ContainerProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START get_container_properties] - :end-before: [END get_container_properties] - :language: python - :dedent: 12 - :caption: Getting properties on the container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - response = self._client.container.get_properties( - timeout=timeout, - lease_access_conditions=access_conditions, - cls=deserialize_container_properties, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - response.name = self.container_name - return response # type: ignore - - @distributed_trace - def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a container exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - try: - self._client.container.get_properties(**kwargs) - return True - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceNotFoundError: - return False - - @distributed_trace - def set_container_metadata( # type: ignore - self, metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - container. Each call to this operation replaces all existing metadata - attached to the container. To remove all metadata from the container, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the container as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword lease: - If specified, set_container_metadata only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Container-updated property dict (Etag and last modified). - :rtype: dict[str, str or datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START set_container_metadata] - :end-before: [END set_container_metadata] - :language: python - :dedent: 12 - :caption: Setting metadata on the container. - """ - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - mod_conditions = get_modify_conditions(kwargs) - timeout = kwargs.pop('timeout', None) - try: - return self._client.container.set_metadata( # type: ignore - timeout=timeout, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def _get_blob_service_client(self): # pylint: disable=client-method-missing-kwargs - # type: (...) -> BlobServiceClient - """Get a client to interact with the container's parent service account. - - Defaults to current container's credentials. - - :returns: A BlobServiceClient. - :rtype: ~azure.storage.blob.BlobServiceClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service.py - :start-after: [START get_blob_service_client_from_container_client] - :end-before: [END get_blob_service_client_from_container_client] - :language: python - :dedent: 8 - :caption: Get blob service client from container object. - """ - from ._blob_service_client import BlobServiceClient - if not isinstance(self._pipeline._transport, TransportWrapper): # pylint: disable = protected-access - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - else: - _pipeline = self._pipeline # pylint: disable = protected-access - return BlobServiceClient( - "{}://{}".format(self.scheme, self.primary_hostname), - credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, - _location_mode=self._location_mode, _hosts=self._hosts, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function, - _pipeline=_pipeline) - - @distributed_trace - def get_container_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the specified container. - The permissions indicate whether container data may be accessed publicly. - - :keyword lease: - If specified, get_container_access_policy only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START get_container_access_policy] - :end-before: [END get_container_access_policy] - :language: python - :dedent: 12 - :caption: Getting the access policy on the container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - response, identifiers = self._client.container.get_access_policy( - timeout=timeout, - lease_access_conditions=access_conditions, - cls=return_headers_and_deserialized, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return { - 'public_access': response.get('blob_public_access'), - 'signed_identifiers': identifiers or [] - } - - @distributed_trace - def set_container_access_policy( - self, signed_identifiers, # type: Dict[str, AccessPolicy] - public_access=None, # type: Optional[Union[str, PublicAccess]] - **kwargs - ): # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the permissions for the specified container or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether blobs in a container may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the container. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy] - :param ~azure.storage.blob.PublicAccess public_access: - Possible values include: 'container', 'blob'. - :keyword lease: - Required if the container has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified date/time. - :keyword ~datetime.datetime if_unmodified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Container-updated property dict (Etag and last modified). - :rtype: dict[str, str or ~datetime.datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START set_container_access_policy] - :end-before: [END set_container_access_policy] - :language: python - :dedent: 12 - :caption: Setting access policy on the container. - """ - if len(signed_identifiers) > 5: - raise ValueError( - 'Too many access policies provided. The server does not support setting ' - 'more than 5 access policies on a single resource.') - identifiers = [] - for key, value in signed_identifiers.items(): - if value: - value.start = serialize_iso(value.start) - value.expiry = serialize_iso(value.expiry) - identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore - signed_identifiers = identifiers # type: ignore - lease = kwargs.pop('lease', None) - mod_conditions = get_modify_conditions(kwargs) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - return self._client.container.set_access_policy( - container_acl=signed_identifiers or None, - timeout=timeout, - access=public_access, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_blobs(self, name_starts_with=None, include=None, **kwargs): - # type: (Optional[str], Optional[Union[str, List[str]]], **Any) -> ItemPaged[BlobProperties] - """Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service. - - :param str name_starts_with: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param list[str] or str include: - Specifies one or more additional datasets to include in the response. - Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'deletedwithversions', - 'tags', 'versions', 'immutabilitypolicy', 'legalhold'. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START list_blobs_in_container] - :end-before: [END list_blobs_in_container] - :language: python - :dedent: 8 - :caption: List the blobs in the container. - """ - if include and not isinstance(include, list): - include = [include] - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.list_blob_flat_segment, - include=include, - timeout=timeout, - **kwargs) - return ItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=BlobPropertiesPaged) - - @distributed_trace - def walk_blobs( - self, name_starts_with=None, # type: Optional[str] - include=None, # type: Optional[Any] - delimiter="/", # type: str - **kwargs # type: Optional[Any] - ): - # type: (...) -> ItemPaged[BlobProperties] - """Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service. This operation will list blobs in accordance with a hierarchy, - as delimited by the specified delimiter character. - - :param str name_starts_with: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param list[str] include: - Specifies one or more additional datasets to include in the response. - Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'. - :param str delimiter: - When the request includes this parameter, the operation returns a BlobPrefix - element in the response body that acts as a placeholder for all blobs whose - names begin with the same substring up to the appearance of the delimiter - character. The delimiter may be a single character or a string. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties] - """ - if include and not isinstance(include, list): - include = [include] - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.list_blob_hierarchy_segment, - delimiter=delimiter, - include=include, - timeout=timeout, - **kwargs) - return BlobPrefix( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - delimiter=delimiter) - - @distributed_trace - def find_blobs_by_tags( - self, filter_expression, # type: str - **kwargs # type: Optional[Any] - ): - # type: (...) -> ItemPaged[FilteredBlob] - """Returns a generator to list the blobs under the specified container whose tags - match the given search expression. - The generator will lazily follow the continuation tokens returned by - the service. - - :param str filter_expression: - The expression to find blobs whose tags matches the specified condition. - eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'" - :keyword int results_per_page: - The max result per page when paginating. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of FilteredBlob. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties] - """ - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.filter_blobs, - timeout=timeout, - where=filter_expression, - **kwargs) - return ItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=FilteredBlobPaged) - - @distributed_trace - def upload_blob( - self, name, # type: Union[str, BlobProperties] - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> BlobClient - """Creates a new blob from a data source with automatic chunking. - - :param name: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type name: str or ~azure.storage.blob.BlobProperties - :param data: The blob data to upload. - :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be - either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. The exception to the above is with Append - blob types: if set to False and the data already exists, an error will not be raised - and the data will be appended to the existing blob. If set overwrite=True, then the existing - append blob will be deleted, and a new one created. Defaults to False. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the container has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int max_concurrency: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :returns: A BlobClient to interact with the newly uploaded blob. - :rtype: ~azure.storage.blob.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START upload_blob_to_container] - :end-before: [END upload_blob_to_container] - :language: python - :dedent: 8 - :caption: Upload blob to the container. - """ - blob = self.get_blob_client(name) - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - blob.upload_blob( - data, - blob_type=blob_type, - length=length, - metadata=metadata, - timeout=timeout, - encoding=encoding, - **kwargs - ) - return blob - - @distributed_trace - def delete_blob( - self, blob, # type: Union[str, BlobProperties] - delete_snapshots=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> None - """Marks the specified blob or snapshot for deletion. - - The blob is later deleted during garbage collection. - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the delete_blob - operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot - and retains the blob or snapshot for specified number of days. - After specified number of days, blob's data is removed from the service during garbage collection. - Soft deleted blob or snapshot is accessible through :func:`list_blobs()` specifying `include=["deleted"]` - option. Soft-deleted blob or snapshot can be restored using :func:`~BlobClient.undelete()` - - :param blob: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob: str or ~azure.storage.blob.BlobProperties - :param str delete_snapshots: - Required if the blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to delete. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - blob_client = self.get_blob_client(blob) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - blob_client.delete_blob( # type: ignore - delete_snapshots=delete_snapshots, - timeout=timeout, - **kwargs) - - @distributed_trace - def download_blob(self, blob, offset=None, length=None, **kwargs): - # type: (Union[str, BlobProperties], Optional[int], Optional[int], **Any) -> StorageStreamDownloader - """Downloads a blob to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the blob into - a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks. - - :param blob: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob: str or ~azure.storage.blob.BlobProperties - :param int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to download. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, download_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword str encoding: - Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.blob.StorageStreamDownloader - """ - blob_client = self.get_blob_client(blob) # type: ignore - kwargs.setdefault('merge_span', True) - return blob_client.download_blob(offset=offset, length=length, **kwargs) - - def _generate_delete_blobs_subrequest_options( - self, snapshot=None, - delete_snapshots=None, - lease_access_conditions=None, - modified_access_conditions=None, - **kwargs - ): - """This code is a copy from _generated. - - Once Autorest is able to provide request preparation this code should be removed. - """ - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_tags = None - if modified_access_conditions is not None: - if_tags = modified_access_conditions.if_tags - - # Construct parameters - timeout = kwargs.pop('timeout', None) - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._client._serialize.query("snapshot", snapshot, 'str') # pylint: disable=protected-access - if timeout is not None: - query_parameters['timeout'] = self._client._serialize.query("timeout", timeout, 'int', minimum=0) # pylint: disable=protected-access - - # Construct headers - header_parameters = {} - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._client._serialize.header( # pylint: disable=protected-access - "delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._client._serialize.header( # pylint: disable=protected-access - "lease_id", lease_id, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._client._serialize.header( # pylint: disable=protected-access - "if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._client._serialize.header( # pylint: disable=protected-access - "if_unmodified_since", if_unmodified_since, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._client._serialize.header( # pylint: disable=protected-access - "if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._client._serialize.header( # pylint: disable=protected-access - "if_none_match", if_none_match, 'str') - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._client._serialize.header("if_tags", if_tags, 'str') # pylint: disable=protected-access - - return query_parameters, header_parameters - - def _generate_delete_blobs_options(self, - *blobs, # type: List[Union[str, BlobProperties, dict]] - **kwargs - ): - timeout = kwargs.pop('timeout', None) - raise_on_any_failure = kwargs.pop('raise_on_any_failure', True) - delete_snapshots = kwargs.pop('delete_snapshots', None) - if_modified_since = kwargs.pop('if_modified_since', None) - if_unmodified_since = kwargs.pop('if_unmodified_since', None) - if_tags_match_condition = kwargs.pop('if_tags_match_condition', None) - kwargs.update({'raise_on_any_failure': raise_on_any_failure, - 'sas': self._query_str.replace('?', '&'), - 'timeout': '&timeout=' + str(timeout) if timeout else "", - 'path': self.container_name, - 'restype': 'restype=container&' - }) - - reqs = [] - for blob in blobs: - blob_name = _get_blob_name(blob) - container_name = self.container_name - - try: - options = BlobClient._generic_delete_blob_options( # pylint: disable=protected-access - snapshot=blob.get('snapshot'), - delete_snapshots=delete_snapshots or blob.get('delete_snapshots'), - lease=blob.get('lease_id'), - if_modified_since=if_modified_since or blob.get('if_modified_since'), - if_unmodified_since=if_unmodified_since or blob.get('if_unmodified_since'), - etag=blob.get('etag'), - if_tags_match_condition=if_tags_match_condition or blob.get('if_tags_match_condition'), - match_condition=blob.get('match_condition') or MatchConditions.IfNotModified if blob.get('etag') - else None, - timeout=blob.get('timeout'), - ) - except AttributeError: - options = BlobClient._generic_delete_blob_options( # pylint: disable=protected-access - delete_snapshots=delete_snapshots, - if_modified_since=if_modified_since, - if_unmodified_since=if_unmodified_since, - if_tags_match_condition=if_tags_match_condition - ) - - query_parameters, header_parameters = self._generate_delete_blobs_subrequest_options(**options) - - req = HttpRequest( - "DELETE", - "/{}/{}{}".format(quote(container_name), quote(blob_name, safe='/~'), self._query_str), - headers=header_parameters - ) - req.format_parameters(query_parameters) - reqs.append(req) - - return reqs, kwargs - - @distributed_trace - def delete_blobs(self, *blobs, **kwargs): - # type: (...) -> Iterator[HttpResponse] - """Marks the specified blobs or snapshots for deletion. - - The blobs are later deleted during garbage collection. - Note that in order to delete blobs, you must delete all of their - snapshots. You can delete both at the same time with the delete_blobs operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots - and retains the blobs or snapshots for specified number of days. - After specified number of days, blobs' data is removed from the service during garbage collection. - Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]` - Soft-deleted blobs or snapshots can be restored using :func:`~BlobClient.undelete()` - - The maximum number of blobs that can be deleted in a single request is 256. - - :param blobs: - The blobs to delete. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - snapshot you want to delete: - key: 'snapshot', value type: str - whether to delete snapthots when deleting blob: - key: 'delete_snapshots', value: 'include' or 'only' - if the blob modified or not: - key: 'if_modified_since', 'if_unmodified_since', value type: datetime - etag: - key: 'etag', value type: str - match the etag or not: - key: 'match_condition', value type: MatchConditions - tags match condition: - key: 'if_tags_match_condition', value type: str - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword str delete_snapshots: - Required if a blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: An iterator of responses, one for each blob in order - :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START delete_multiple_blobs] - :end-before: [END delete_multiple_blobs] - :language: python - :dedent: 8 - :caption: Deleting multiple blobs. - """ - if len(blobs) == 0: - return iter(list()) - - reqs, options = self._generate_delete_blobs_options(*blobs, **kwargs) - - return self._batch_send(*reqs, **options) - - def _generate_set_tiers_subrequest_options( - self, tier, snapshot=None, version_id=None, rehydrate_priority=None, lease_access_conditions=None, **kwargs - ): - """This code is a copy from _generated. - - Once Autorest is able to provide request preparation this code should be removed. - """ - if not tier: - raise ValueError("A blob tier must be specified") - if snapshot and version_id: - raise ValueError("Snapshot and version_id cannot be set at the same time") - if_tags = kwargs.pop('if_tags', None) - - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "tier" - timeout = kwargs.pop('timeout', None) - # Construct parameters - query_parameters = {} - if snapshot is not None: - query_parameters['snapshot'] = self._client._serialize.query("snapshot", snapshot, 'str') # pylint: disable=protected-access - if version_id is not None: - query_parameters['versionid'] = self._client._serialize.query("version_id", version_id, 'str') # pylint: disable=protected-access - if timeout is not None: - query_parameters['timeout'] = self._client._serialize.query("timeout", timeout, 'int', minimum=0) # pylint: disable=protected-access - query_parameters['comp'] = self._client._serialize.query("comp", comp, 'str') # pylint: disable=protected-access, specify-parameter-names-in-call - - # Construct headers - header_parameters = {} - header_parameters['x-ms-access-tier'] = self._client._serialize.header("tier", tier, 'str') # pylint: disable=protected-access, specify-parameter-names-in-call - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._client._serialize.header( # pylint: disable=protected-access - "rehydrate_priority", rehydrate_priority, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._client._serialize.header("lease_id", lease_id, 'str') # pylint: disable=protected-access - if if_tags is not None: - header_parameters['x-ms-if-tags'] = self._client._serialize.header("if_tags", if_tags, 'str') # pylint: disable=protected-access - - return query_parameters, header_parameters - - def _generate_set_tiers_options(self, - blob_tier, # type: Optional[Union[str, StandardBlobTier, PremiumPageBlobTier]] - *blobs, # type: List[Union[str, BlobProperties, dict]] - **kwargs - ): - timeout = kwargs.pop('timeout', None) - raise_on_any_failure = kwargs.pop('raise_on_any_failure', True) - rehydrate_priority = kwargs.pop('rehydrate_priority', None) - if_tags = kwargs.pop('if_tags_match_condition', None) - kwargs.update({'raise_on_any_failure': raise_on_any_failure, - 'sas': self._query_str.replace('?', '&'), - 'timeout': '&timeout=' + str(timeout) if timeout else "", - 'path': self.container_name, - 'restype': 'restype=container&' - }) - - reqs = [] - for blob in blobs: - blob_name = _get_blob_name(blob) - container_name = self.container_name - - try: - tier = blob_tier or blob.get('blob_tier') - query_parameters, header_parameters = self._generate_set_tiers_subrequest_options( - tier=tier, - snapshot=blob.get('snapshot'), - version_id=blob.get('version_id'), - rehydrate_priority=rehydrate_priority or blob.get('rehydrate_priority'), - lease_access_conditions=blob.get('lease_id'), - if_tags=if_tags or blob.get('if_tags_match_condition'), - timeout=timeout or blob.get('timeout') - ) - except AttributeError: - query_parameters, header_parameters = self._generate_set_tiers_subrequest_options( - blob_tier, rehydrate_priority=rehydrate_priority, if_tags=if_tags) - - req = HttpRequest( - "PUT", - "/{}/{}{}".format(quote(container_name), quote(blob_name, safe='/~'), self._query_str), - headers=header_parameters - ) - req.format_parameters(query_parameters) - reqs.append(req) - - return reqs, kwargs - - @distributed_trace - def set_standard_blob_tier_blobs( - self, - standard_blob_tier, # type: Optional[Union[str, StandardBlobTier]] - *blobs, # type: List[Union[str, BlobProperties, dict]] - **kwargs - ): - # type: (...) -> Iterator[HttpResponse] - """This operation sets the tier on block blobs. - - A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - The maximum number of blobs that can be updated in a single request is 256. - - :param standard_blob_tier: - Indicates the tier to be set on all blobs. Options include 'Hot', 'Cool', - 'Archive'. The hot tier is optimized for storing data that is accessed - frequently. The cool storage tier is optimized for storing data that - is infrequently accessed and stored for at least a month. The archive - tier is optimized for storing data that is rarely accessed and stored - for at least six months with flexible latency requirements. - - .. note:: - If you want to set different tier on different blobs please set this positional parameter to None. - Then the blob tier on every BlobProperties will be taken. - - :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier - :param blobs: - The blobs with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - standard blob tier: - key: 'blob_tier', value type: StandardBlobTier - rehydrate priority: - key: 'rehydrate_priority', value type: RehydratePriority - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - snapshot: - key: "snapshost", value type: str - version id: - key: "version_id", value type: str - tags match condition: - key: 'if_tags_match_condition', value type: str - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. - :return: An iterator of responses, one for each blob in order - :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse] - """ - reqs, options = self._generate_set_tiers_options(standard_blob_tier, *blobs, **kwargs) - - return self._batch_send(*reqs, **options) - - @distributed_trace - def set_premium_page_blob_tier_blobs( - self, - premium_page_blob_tier, # type: Optional[Union[str, PremiumPageBlobTier]] - *blobs, # type: List[Union[str, BlobProperties, dict]] - **kwargs - ): - # type: (...) -> Iterator[HttpResponse] - """Sets the page blob tiers on all blobs. This API is only supported for page blobs on premium accounts. - - The maximum number of blobs that can be updated in a single request is 256. - - :param premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - - .. note:: - If you want to set different tier on different blobs please set this positional parameter to None. - Then the blob tier on every BlobProperties will be taken. - - :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier - :param blobs: - The blobs with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - premium blob tier: - key: 'blob_tier', value type: PremiumPageBlobTier - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. - :return: An iterator of responses, one for each blob in order - :rtype: iterator[~azure.core.pipeline.transport.HttpResponse] - """ - reqs, options = self._generate_set_tiers_options(premium_page_blob_tier, *blobs, **kwargs) - - return self._batch_send(*reqs, **options) - - def get_blob_client( - self, blob, # type: Union[str, BlobProperties] - snapshot=None # type: str - ): - # type: (...) -> BlobClient - """Get a client to interact with the specified blob. - - The blob need not already exist. - - :param blob: - The blob with which to interact. - :type blob: str or ~azure.storage.blob.BlobProperties - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`~BlobClient.create_snapshot()`. - :returns: A BlobClient. - :rtype: ~azure.storage.blob.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START get_blob_client] - :end-before: [END get_blob_client] - :language: python - :dedent: 8 - :caption: Get the blob client. - """ - blob_name = _get_blob_name(blob) - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return BlobClient( - self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_deserialize.py b/azure/multiapi/storagev2/blob/v2021_04_10/_deserialize.py deleted file mode 100644 index f7101e0..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_deserialize.py +++ /dev/null @@ -1,174 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use -from typing import ( # pylint: disable=unused-import - Tuple, Dict, List, - TYPE_CHECKING -) -try: - from urllib.parse import unquote -except ImportError: - from urllib import unquote -from ._models import BlobType, CopyProperties, ContentSettings, LeaseProperties, BlobProperties, ImmutabilityPolicy -from ._shared.models import get_enum_value -from ._shared.response_handlers import deserialize_metadata -from ._models import ContainerProperties, BlobAnalyticsLogging, Metrics, CorsRule, RetentionPolicy, \ - StaticWebsite, ObjectReplicationPolicy, ObjectReplicationRule - -if TYPE_CHECKING: - from ._generated.models import PageList - - -def deserialize_pipeline_response_into_cls(cls_method, response, obj, headers): - try: - deserialized_response = response.http_response - except AttributeError: - deserialized_response = response - return cls_method(deserialized_response, obj, headers) - - -def deserialize_blob_properties(response, obj, headers): - blob_properties = BlobProperties( - metadata=deserialize_metadata(response, obj, headers), - object_replication_source_properties=deserialize_ors_policies(response.http_response.headers), - **headers - ) - if 'Content-Range' in headers: - if 'x-ms-blob-content-md5' in headers: - blob_properties.content_settings.content_md5 = headers['x-ms-blob-content-md5'] - else: - blob_properties.content_settings.content_md5 = None - return blob_properties - - -def deserialize_ors_policies(policy_dictionary): - - if policy_dictionary is None: - return None - # For source blobs (blobs that have policy ids and rule ids applied to them), - # the header will be formatted as "x-ms-or-_: {Complete, Failed}". - # The value of this header is the status of the replication. - or_policy_status_headers = {key: val for key, val in policy_dictionary.items() - if 'or-' in key and key != 'x-ms-or-policy-id'} - - parsed_result = {} - - for key, val in or_policy_status_headers.items(): - # list blobs gives or-policy_rule and get blob properties gives x-ms-or-policy_rule - policy_and_rule_ids = key.split('or-')[1].split('_') - policy_id = policy_and_rule_ids[0] - rule_id = policy_and_rule_ids[1] - - # If we are seeing this policy for the first time, create a new list to store rule_id -> result - parsed_result[policy_id] = parsed_result.get(policy_id) or list() - parsed_result[policy_id].append(ObjectReplicationRule(rule_id=rule_id, status=val)) - - result_list = [ObjectReplicationPolicy(policy_id=k, rules=v) for k, v in parsed_result.items()] - - return result_list - - -def deserialize_blob_stream(response, obj, headers): - blob_properties = deserialize_blob_properties(response, obj, headers) - obj.properties = blob_properties - return response.http_response.location_mode, obj - - -def deserialize_container_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - container_properties = ContainerProperties( - metadata=metadata, - **headers - ) - return container_properties - - -def get_page_ranges_result(ranges): - # type: (PageList) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - page_range = [] # type: ignore - clear_range = [] # type: List - if ranges.page_range: - page_range = [{'start': b.start, 'end': b.end} for b in ranges.page_range] # type: ignore - if ranges.clear_range: - clear_range = [{'start': b.start, 'end': b.end} for b in ranges.clear_range] - return page_range, clear_range # type: ignore - - -def service_stats_deserialize(generated): - """Deserialize a ServiceStats objects into a dict. - """ - return { - 'geo_replication': { - 'status': generated.geo_replication.status, - 'last_sync_time': generated.geo_replication.last_sync_time, - } - } - - -def service_properties_deserialize(generated): - """Deserialize a ServiceProperties objects into a dict. - """ - return { - 'analytics_logging': BlobAnalyticsLogging._from_generated(generated.logging), # pylint: disable=protected-access - 'hour_metrics': Metrics._from_generated(generated.hour_metrics), # pylint: disable=protected-access - 'minute_metrics': Metrics._from_generated(generated.minute_metrics), # pylint: disable=protected-access - 'cors': [CorsRule._from_generated(cors) for cors in generated.cors], # pylint: disable=protected-access - 'target_version': generated.default_service_version, # pylint: disable=protected-access - 'delete_retention_policy': RetentionPolicy._from_generated(generated.delete_retention_policy), # pylint: disable=protected-access - 'static_website': StaticWebsite._from_generated(generated.static_website), # pylint: disable=protected-access - } - - -def get_blob_properties_from_generated_code(generated): - blob = BlobProperties() - if generated.name.encoded: - blob.name = unquote(generated.name.content) - else: - blob.name = generated.name.content - blob_type = get_enum_value(generated.properties.blob_type) - blob.blob_type = BlobType(blob_type) if blob_type else None - blob.etag = generated.properties.etag - blob.deleted = generated.deleted - blob.snapshot = generated.snapshot - blob.is_append_blob_sealed = generated.properties.is_sealed - blob.metadata = generated.metadata.additional_properties if generated.metadata else {} - blob.encrypted_metadata = generated.metadata.encrypted if generated.metadata else None - blob.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access - blob.copy = CopyProperties._from_generated(generated) # pylint: disable=protected-access - blob.last_modified = generated.properties.last_modified - blob.creation_time = generated.properties.creation_time - blob.content_settings = ContentSettings._from_generated(generated) # pylint: disable=protected-access - blob.size = generated.properties.content_length - blob.page_blob_sequence_number = generated.properties.blob_sequence_number - blob.server_encrypted = generated.properties.server_encrypted - blob.encryption_scope = generated.properties.encryption_scope - blob.deleted_time = generated.properties.deleted_time - blob.remaining_retention_days = generated.properties.remaining_retention_days - blob.blob_tier = generated.properties.access_tier - blob.rehydrate_priority = generated.properties.rehydrate_priority - blob.blob_tier_inferred = generated.properties.access_tier_inferred - blob.archive_status = generated.properties.archive_status - blob.blob_tier_change_time = generated.properties.access_tier_change_time - blob.version_id = generated.version_id - blob.is_current_version = generated.is_current_version - blob.tag_count = generated.properties.tag_count - blob.tags = parse_tags(generated.blob_tags) # pylint: disable=protected-access - blob.object_replication_source_properties = deserialize_ors_policies(generated.object_replication_metadata) - blob.last_accessed_on = generated.properties.last_accessed_on - blob.immutability_policy = ImmutabilityPolicy._from_generated(generated) # pylint: disable=protected-access - blob.has_legal_hold = generated.properties.legal_hold - blob.has_versions_only = generated.has_versions_only - return blob - - -def parse_tags(generated_tags): - # type: (Optional[List[BlobTag]]) -> Union[Dict[str, str], None] - """Deserialize a list of BlobTag objects into a dict. - """ - if generated_tags: - tag_dict = {t.key: t.value for t in generated_tags.blob_tag_set} - return tag_dict - return None diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_download.py b/azure/multiapi/storagev2/blob/v2021_04_10/_download.py deleted file mode 100644 index c74af2f..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_download.py +++ /dev/null @@ -1,637 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -import threading -import time - -import warnings -from io import BytesIO -from typing import Iterator - -import requests -from azure.core.exceptions import HttpResponseError, ServiceResponseError - -from azure.core.tracing.common import with_current_context -from ._shared.encryption import decrypt_blob -from ._shared.request_handlers import validate_and_format_range_headers -from ._shared.response_handlers import process_storage_error, parse_length_from_content_range -from ._deserialize import get_page_ranges_result - - -def process_range_and_offset(start_range, end_range, length, encryption): - start_offset, end_offset = 0, 0 - if encryption.get("key") is not None or encryption.get("resolver") is not None: - if start_range is not None: - # Align the start of the range along a 16 byte block - start_offset = start_range % 16 - start_range -= start_offset - - # Include an extra 16 bytes for the IV if necessary - # Because of the previous offsetting, start_range will always - # be a multiple of 16. - if start_range > 0: - start_offset += 16 - start_range -= 16 - - if length is not None: - # Align the end of the range along a 16 byte block - end_offset = 15 - (end_range % 16) - end_range += end_offset - - return (start_range, end_range), (start_offset, end_offset) - - -def process_content(data, start_offset, end_offset, encryption): - if data is None: - raise ValueError("Response cannot be None.") - - content = b"".join(list(data)) - - if content and encryption.get("key") is not None or encryption.get("resolver") is not None: - try: - return decrypt_blob( - encryption.get("required"), - encryption.get("key"), - encryption.get("resolver"), - content, - start_offset, - end_offset, - data.response.headers, - ) - except Exception as error: - raise HttpResponseError(message="Decryption failed.", response=data.response, error=error) - return content - - -class _ChunkDownloader(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - client=None, - non_empty_ranges=None, - total_size=None, - chunk_size=None, - current_progress=None, - start_range=None, - end_range=None, - stream=None, - parallel=None, - validate_content=None, - encryption_options=None, - **kwargs - ): - self.client = client - self.non_empty_ranges = non_empty_ranges - - # Information on the download range/chunk size - self.chunk_size = chunk_size - self.total_size = total_size - self.start_index = start_range - self.end_index = end_range - - # The destination that we will write to - self.stream = stream - self.stream_lock = threading.Lock() if parallel else None - self.progress_lock = threading.Lock() if parallel else None - - # For a parallel download, the stream is always seekable, so we note down the current position - # in order to seek to the right place when out-of-order chunks come in - self.stream_start = stream.tell() if parallel else None - - # Download progress so far - self.progress_total = current_progress - - # Encryption - self.encryption_options = encryption_options - - # Parameters for each get operation - self.validate_content = validate_content - self.request_options = kwargs - - def _calculate_range(self, chunk_start): - if chunk_start + self.chunk_size > self.end_index: - chunk_end = self.end_index - else: - chunk_end = chunk_start + self.chunk_size - return chunk_start, chunk_end - - def get_chunk_offsets(self): - index = self.start_index - while index < self.end_index: - yield index - index += self.chunk_size - - def process_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - chunk_data = self._download_chunk(chunk_start, chunk_end - 1) - length = chunk_end - chunk_start - if length > 0: - self._write_to_stream(chunk_data, chunk_start) - self._update_progress(length) - - def yield_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - return self._download_chunk(chunk_start, chunk_end - 1) - - def _update_progress(self, length): - if self.progress_lock: - with self.progress_lock: # pylint: disable=not-context-manager - self.progress_total += length - else: - self.progress_total += length - - def _write_to_stream(self, chunk_data, chunk_start): - if self.stream_lock: - with self.stream_lock: # pylint: disable=not-context-manager - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - else: - self.stream.write(chunk_data) - - def _do_optimize(self, given_range_start, given_range_end): - # If we have no page range list stored, then assume there's data everywhere for that page blob - # or it's a block blob or append blob - if self.non_empty_ranges is None: - return False - - for source_range in self.non_empty_ranges: - # Case 1: As the range list is sorted, if we've reached such a source_range - # we've checked all the appropriate source_range already and haven't found any overlapping. - # so the given range doesn't have any data and download optimization could be applied. - # given range: | | - # source range: | | - if given_range_end < source_range['start']: # pylint:disable=no-else-return - return True - # Case 2: the given range comes after source_range, continue checking. - # given range: | | - # source range: | | - elif source_range['end'] < given_range_start: - pass - # Case 3: source_range and given range overlap somehow, no need to optimize. - else: - return False - # Went through all src_ranges, but nothing overlapped. Optimization will be applied. - return True - - def _download_chunk(self, chunk_start, chunk_end): - download_range, offset = process_range_and_offset( - chunk_start, chunk_end, chunk_end, self.encryption_options - ) - - # No need to download the empty chunk from server if there's no data in the chunk to be downloaded. - # Do optimize and create empty chunk locally if condition is met. - if self._do_optimize(download_range[0], download_range[1]): - chunk_data = b"\x00" * self.chunk_size - else: - range_header, range_validation = validate_and_format_range_headers( - download_range[0], - download_range[1], - check_content_md5=self.validate_content - ) - - retry_active = True - retry_total = 3 - while retry_active: - try: - _, response = self.client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self.validate_content, - data_stream_total=self.total_size, - download_stream_current=self.progress_total, - **self.request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - try: - chunk_data = process_content(response, offset[0], offset[1], self.encryption_options) - retry_active = False - except (requests.exceptions.ChunkedEncodingError, requests.exceptions.ConnectionError) as error: - retry_total -= 1 - if retry_total <= 0: - raise ServiceResponseError(error, error=error) - time.sleep(1) - - # This makes sure that if_match is set so that we can validate - # that subsequent downloads are to an unmodified blob - if self.request_options.get("modified_access_conditions"): - self.request_options["modified_access_conditions"].if_match = response.properties.etag - - return chunk_data - - -class _ChunkIterator(object): - """Async iterator for chunks in blob download stream.""" - - def __init__(self, size, content, downloader, chunk_size): - self.size = size - self._chunk_size = chunk_size - self._current_content = content - self._iter_downloader = downloader - self._iter_chunks = None - self._complete = (size == 0) - - def __len__(self): - return self.size - - def __iter__(self): - return self - - def __next__(self): - """Iterate through responses.""" - if self._complete: - raise StopIteration("Download complete") - if not self._iter_downloader: - # cut the data obtained from initial GET into chunks - if len(self._current_content) > self._chunk_size: - return self._get_chunk_data() - self._complete = True - return self._current_content - - if not self._iter_chunks: - self._iter_chunks = self._iter_downloader.get_chunk_offsets() - - # initial GET result still has more than _chunk_size bytes of data - if len(self._current_content) >= self._chunk_size: - return self._get_chunk_data() - - try: - chunk = next(self._iter_chunks) - self._current_content += self._iter_downloader.yield_chunk(chunk) - except StopIteration as e: - self._complete = True - if self._current_content: - return self._current_content - raise e - - # the current content from the first get is still there but smaller than chunk size - # therefore we want to make sure its also included - return self._get_chunk_data() - - next = __next__ # Python 2 compatibility. - - def _get_chunk_data(self): - chunk_data = self._current_content[: self._chunk_size] - self._current_content = self._current_content[self._chunk_size:] - return chunk_data - - -class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the blob being downloaded. - :ivar str container: - The name of the container where the blob is. - :ivar ~azure.storage.blob.BlobProperties properties: - The properties of the blob being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if specified, - otherwise the total size of the blob. - """ - - def __init__( - self, - clients=None, - config=None, - start_range=None, - end_range=None, - validate_content=None, - encryption_options=None, - max_concurrency=1, - name=None, - container=None, - encoding=None, - **kwargs - ): - self.name = name - self.container = container - self.properties = None - self.size = None - - self._clients = clients - self._config = config - self._start_range = start_range - self._end_range = end_range - self._max_concurrency = max_concurrency - self._encoding = encoding - self._validate_content = validate_content - self._encryption_options = encryption_options or {} - self._request_options = kwargs - self._location_mode = None - self._download_complete = False - self._current_content = None - self._file_size = None - self._non_empty_ranges = None - self._response = None - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - self._first_get_size = ( - self._config.max_single_get_size if not self._validate_content else self._config.max_chunk_get_size - ) - initial_request_start = self._start_range if self._start_range is not None else 0 - if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: - initial_request_end = self._end_range - else: - initial_request_end = initial_request_start + self._first_get_size - 1 - - self._initial_range, self._initial_offset = process_range_and_offset( - initial_request_start, initial_request_end, self._end_range, self._encryption_options - ) - - self._response = self._initial_request() - self.properties = self._response.properties - self.properties.name = self.name - self.properties.container = self.container - - # Set the content length to the download size instead of the size of - # the last range - self.properties.size = self.size - - # Overwrite the content range to the user requested range - self.properties.content_range = "bytes {0}-{1}/{2}".format( - self._start_range, - self._end_range, - self._file_size - ) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - self.properties.content_md5 = None - - def __len__(self): - return self.size - - def _initial_request(self): - range_header, range_validation = validate_and_format_range_headers( - self._initial_range[0], - self._initial_range[1], - start_range_required=False, - end_range_required=False, - check_content_md5=self._validate_content - ) - - retry_active = True - retry_total = 3 - while retry_active: - try: - location_mode, response = self._clients.blob.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self._validate_content, - data_stream_total=None, - download_stream_current=0, - **self._request_options - ) - - # Check the location we read from to ensure we use the same one - # for subsequent requests. - self._location_mode = location_mode - - # Parse the total file size and adjust the download size if ranges - # were specified - self._file_size = parse_length_from_content_range(response.properties.content_range) - if self._end_range is not None: - # Use the end range index unless it is over the end of the file - self.size = min(self._file_size, self._end_range - self._start_range + 1) - elif self._start_range is not None: - self.size = self._file_size - self._start_range - else: - self.size = self._file_size - - except HttpResponseError as error: - if self._start_range is None and error.response.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - try: - _, response = self._clients.blob.download( - validate_content=self._validate_content, - data_stream_total=0, - download_stream_current=0, - **self._request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - # Set the download size to empty - self.size = 0 - self._file_size = 0 - else: - process_storage_error(error) - - try: - if self.size == 0: - self._current_content = b"" - else: - self._current_content = process_content( - response, - self._initial_offset[0], - self._initial_offset[1], - self._encryption_options - ) - retry_active = False - except (requests.exceptions.ChunkedEncodingError, requests.exceptions.ConnectionError) as error: - retry_total -= 1 - if retry_total <= 0: - raise ServiceResponseError(error, error=error) - time.sleep(1) - - # get page ranges to optimize downloading sparse page blob - if response.properties.blob_type == 'PageBlob': - try: - page_ranges = self._clients.page_blob.get_page_ranges() - self._non_empty_ranges = get_page_ranges_result(page_ranges)[0] - # according to the REST API documentation: - # in a highly fragmented page blob with a large number of writes, - # a Get Page Ranges request can fail due to an internal server timeout. - # thus, if the page blob is not sparse, it's ok for it to fail - except HttpResponseError: - pass - - # If the file is small, the download is complete at this point. - # If file size is large, download the rest of the file in chunks. - if response.properties.size != self.size: - if self._request_options.get("modified_access_conditions"): - self._request_options["modified_access_conditions"].if_match = response.properties.etag - else: - self._download_complete = True - return response - - def chunks(self): - # type: () -> Iterator[bytes] - """Iterate over chunks in the download stream. - - :rtype: Iterator[bytes] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world.py - :start-after: [START download_a_blob_in_chunk] - :end-before: [END download_a_blob_in_chunk] - :language: python - :dedent: 12 - :caption: Download a blob using chunks(). - """ - if self.size == 0 or self._download_complete: - iter_downloader = None - else: - data_end = self._file_size - if self._end_range is not None: - # Use the end range index unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - iter_downloader = _ChunkDownloader( - client=self._clients.blob, - non_empty_ranges=self._non_empty_ranges, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # start where the first download ended - end_range=data_end, - stream=None, - parallel=False, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options - ) - return _ChunkIterator( - size=self.size, - content=self._current_content, - downloader=iter_downloader, - chunk_size=self._config.max_chunk_get_size) - - def readall(self): - # type: () -> Union[bytes, str] - """Download the contents of this blob. - - This operation is blocking until all data is downloaded. - - :rtype: bytes or str - """ - stream = BytesIO() - self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - def content_as_bytes(self, max_concurrency=1): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :rtype: bytes - """ - warnings.warn( - "content_as_bytes is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - return self.readall() - - def content_as_text(self, max_concurrency=1, encoding="UTF-8"): - """Download the contents of this blob, and decode as text. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :param str encoding: - Test encoding to decode the downloaded bytes. Default is UTF-8. - :rtype: str - """ - warnings.warn( - "content_as_text is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self._encoding = encoding - return self.readall() - - def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - # The stream must be seekable if parallel download is required - parallel = self._max_concurrency > 1 - if parallel: - error_message = "Target stream handle must be seekable." - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(error_message) - - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(error_message) - - # Write the content to the user stream - stream.write(self._current_content) - if self._download_complete: - return self.size - - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - - downloader = _ChunkDownloader( - client=self._clients.blob, - non_empty_ranges=self._non_empty_ranges, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # Start where the first download ended - end_range=data_end, - stream=stream, - parallel=parallel, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options - ) - if parallel: - import concurrent.futures - with concurrent.futures.ThreadPoolExecutor(self._max_concurrency) as executor: - list(executor.map( - with_current_context(downloader.process_chunk), - downloader.get_chunk_offsets() - )) - else: - for chunk in downloader.get_chunk_offsets(): - downloader.process_chunk(chunk) - return self.size - - def download_to_stream(self, stream, max_concurrency=1): - """Download the contents of this blob to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The properties of the downloaded blob. - :rtype: Any - """ - warnings.warn( - "download_to_stream is deprecated, use readinto instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self.readinto(stream) - return self.properties diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/__init__.py b/azure/multiapi/storagev2/blob/v2021_04_10/_generated/__init__.py deleted file mode 100644 index cc760e7..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._azure_blob_storage import AzureBlobStorage -__all__ = ['AzureBlobStorage'] - -try: - from ._patch import patch_sdk # type: ignore - patch_sdk() -except ImportError: - pass diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/_azure_blob_storage.py b/azure/multiapi/storagev2/blob/v2021_04_10/_generated/_azure_blob_storage.py deleted file mode 100644 index 578f658..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/_azure_blob_storage.py +++ /dev/null @@ -1,106 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import TYPE_CHECKING - -from azure.core import PipelineClient -from msrest import Deserializer, Serializer - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any - - from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from ._configuration import AzureBlobStorageConfiguration -from .operations import ServiceOperations -from .operations import ContainerOperations -from .operations import BlobOperations -from .operations import PageBlobOperations -from .operations import AppendBlobOperations -from .operations import BlockBlobOperations -from . import models - - -class AzureBlobStorage(object): - """AzureBlobStorage. - - :ivar service: ServiceOperations operations - :vartype service: azure.storage.blob.operations.ServiceOperations - :ivar container: ContainerOperations operations - :vartype container: azure.storage.blob.operations.ContainerOperations - :ivar blob: BlobOperations operations - :vartype blob: azure.storage.blob.operations.BlobOperations - :ivar page_blob: PageBlobOperations operations - :vartype page_blob: azure.storage.blob.operations.PageBlobOperations - :ivar append_blob: AppendBlobOperations operations - :vartype append_blob: azure.storage.blob.operations.AppendBlobOperations - :ivar block_blob: BlockBlobOperations operations - :vartype block_blob: azure.storage.blob.operations.BlockBlobOperations - :param url: The URL of the service account, container, or blob that is the target of the desired operation. - :type url: str - """ - - def __init__( - self, - url, # type: str - **kwargs # type: Any - ): - # type: (...) -> None - base_url = '{url}' - self._config = AzureBlobStorageConfiguration(url, **kwargs) - self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._serialize.client_side_validation = False - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.container = ContainerOperations( - self._client, self._config, self._serialize, self._deserialize) - self.blob = BlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.page_blob = PageBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.append_blob = AppendBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.block_blob = BlockBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - - def _send_request(self, http_request, **kwargs): - # type: (HttpRequest, Any) -> HttpResponse - """Runs the network request through the client's chained policies. - - :param http_request: The network request you want to make. Required. - :type http_request: ~azure.core.pipeline.transport.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to True. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.pipeline.transport.HttpResponse - """ - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - http_request.url = self._client.format_url(http_request.url, **path_format_arguments) - stream = kwargs.pop("stream", True) - pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs) - return pipeline_response.http_response - - def close(self): - # type: () -> None - self._client.close() - - def __enter__(self): - # type: () -> AzureBlobStorage - self._client.__enter__() - return self - - def __exit__(self, *exc_details): - # type: (Any) -> None - self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/_configuration.py b/azure/multiapi/storagev2/blob/v2021_04_10/_generated/_configuration.py deleted file mode 100644 index e25c0cd..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/_configuration.py +++ /dev/null @@ -1,58 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import TYPE_CHECKING - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any - -VERSION = "unknown" - -class AzureBlobStorageConfiguration(Configuration): - """Configuration for AzureBlobStorage. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, container, or blob that is the target of the desired operation. - :type url: str - """ - - def __init__( - self, - url, # type: str - **kwargs # type: Any - ): - # type: (...) -> None - if url is None: - raise ValueError("Parameter 'url' must not be None.") - super(AzureBlobStorageConfiguration, self).__init__(**kwargs) - - self.url = url - self.version = "2021-04-10" - kwargs.setdefault('sdk_moniker', 'azureblobstorage/{}'.format(VERSION)) - self._configure(**kwargs) - - def _configure( - self, - **kwargs # type: Any - ): - # type: (...) -> None - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) - self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/__init__.py b/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/__init__.py deleted file mode 100644 index 12cfcf6..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._azure_blob_storage import AzureBlobStorage -__all__ = ['AzureBlobStorage'] diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/_azure_blob_storage.py b/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/_azure_blob_storage.py deleted file mode 100644 index 68f116a..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/_azure_blob_storage.py +++ /dev/null @@ -1,96 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any - -from azure.core import AsyncPipelineClient -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest -from msrest import Deserializer, Serializer - -from ._configuration import AzureBlobStorageConfiguration -from .operations import ServiceOperations -from .operations import ContainerOperations -from .operations import BlobOperations -from .operations import PageBlobOperations -from .operations import AppendBlobOperations -from .operations import BlockBlobOperations -from .. import models - - -class AzureBlobStorage(object): - """AzureBlobStorage. - - :ivar service: ServiceOperations operations - :vartype service: azure.storage.blob.aio.operations.ServiceOperations - :ivar container: ContainerOperations operations - :vartype container: azure.storage.blob.aio.operations.ContainerOperations - :ivar blob: BlobOperations operations - :vartype blob: azure.storage.blob.aio.operations.BlobOperations - :ivar page_blob: PageBlobOperations operations - :vartype page_blob: azure.storage.blob.aio.operations.PageBlobOperations - :ivar append_blob: AppendBlobOperations operations - :vartype append_blob: azure.storage.blob.aio.operations.AppendBlobOperations - :ivar block_blob: BlockBlobOperations operations - :vartype block_blob: azure.storage.blob.aio.operations.BlockBlobOperations - :param url: The URL of the service account, container, or blob that is the target of the desired operation. - :type url: str - """ - - def __init__( - self, - url: str, - **kwargs: Any - ) -> None: - base_url = '{url}' - self._config = AzureBlobStorageConfiguration(url, **kwargs) - self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._serialize.client_side_validation = False - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.container = ContainerOperations( - self._client, self._config, self._serialize, self._deserialize) - self.blob = BlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.page_blob = PageBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.append_blob = AppendBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - self.block_blob = BlockBlobOperations( - self._client, self._config, self._serialize, self._deserialize) - - async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse: - """Runs the network request through the client's chained policies. - - :param http_request: The network request you want to make. Required. - :type http_request: ~azure.core.pipeline.transport.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to True. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.pipeline.transport.AsyncHttpResponse - """ - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - http_request.url = self._client.format_url(http_request.url, **path_format_arguments) - stream = kwargs.pop("stream", True) - pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs) - return pipeline_response.http_response - - async def close(self) -> None: - await self._client.close() - - async def __aenter__(self) -> "AzureBlobStorage": - await self._client.__aenter__() - return self - - async def __aexit__(self, *exc_details) -> None: - await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/_configuration.py b/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/_configuration.py deleted file mode 100644 index bcf04ce..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/_configuration.py +++ /dev/null @@ -1,52 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -VERSION = "unknown" - -class AzureBlobStorageConfiguration(Configuration): - """Configuration for AzureBlobStorage. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, container, or blob that is the target of the desired operation. - :type url: str - """ - - def __init__( - self, - url: str, - **kwargs: Any - ) -> None: - if url is None: - raise ValueError("Parameter 'url' must not be None.") - super(AzureBlobStorageConfiguration, self).__init__(**kwargs) - - self.url = url - self.version = "2021-04-10" - kwargs.setdefault('sdk_moniker', 'azureblobstorage/{}'.format(VERSION)) - self._configure(**kwargs) - - def _configure( - self, - **kwargs: Any - ) -> None: - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) - self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/operations/__init__.py b/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/operations/__init__.py deleted file mode 100644 index 902269d..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/operations/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._container_operations import ContainerOperations -from ._blob_operations import BlobOperations -from ._page_blob_operations import PageBlobOperations -from ._append_blob_operations import AppendBlobOperations -from ._block_blob_operations import BlockBlobOperations - -__all__ = [ - 'ServiceOperations', - 'ContainerOperations', - 'BlobOperations', - 'PageBlobOperations', - 'AppendBlobOperations', - 'BlockBlobOperations', -] diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/operations/_append_blob_operations.py b/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/operations/_append_blob_operations.py deleted file mode 100644 index 4d18668..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/operations/_append_blob_operations.py +++ /dev/null @@ -1,726 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class AppendBlobOperations: - """AppendBlobOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - content_length: int, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - request_id_parameter: Optional[str] = None, - blob_tags_string: Optional[str] = None, - immutability_policy_expiry: Optional[datetime.datetime] = None, - immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, - legal_hold: Optional[bool] = None, - blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Create Append Blob operation creates a new append blob. - - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy - is set to expire. - :type immutability_policy_expiry: ~datetime.datetime - :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param legal_hold: Specified if a legal hold should be set on the blob. - :type legal_hold: bool - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - blob_type = "AppendBlob" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if immutability_policy_expiry is not None: - header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') - if immutability_policy_mode is not None: - header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') - if legal_hold is not None: - header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def append_block( - self, - content_length: int, - body: IO, - timeout: Optional[int] = None, - transactional_content_md5: Optional[bytearray] = None, - transactional_content_crc64: Optional[bytearray] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - append_position_access_conditions: Optional["_models.AppendPositionAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Append Block operation commits a new block of data to the end of an existing append blob. - The Append Block operation is permitted only if the blob was created with x-ms-blob-type set to - AppendBlob. Append Block is supported only on version 2015-02-21 version or later. - - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param append_position_access_conditions: Parameter group. - :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _max_size = None - _append_position = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if append_position_access_conditions is not None: - _max_size = append_position_access_conditions.max_size - _append_position = append_position_access_conditions.append_position - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "appendblock" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.append_block.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _max_size is not None: - header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", _max_size, 'long') - if _append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-append-offset']=self._deserialize('str', response.headers.get('x-ms-blob-append-offset')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - append_block.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def append_block_from_url( - self, - source_url: str, - content_length: int, - source_range: Optional[str] = None, - source_content_md5: Optional[bytearray] = None, - source_contentcrc64: Optional[bytearray] = None, - timeout: Optional[int] = None, - transactional_content_md5: Optional[bytearray] = None, - request_id_parameter: Optional[str] = None, - copy_source_authorization: Optional[str] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - append_position_access_conditions: Optional["_models.AppendPositionAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Append Block operation commits a new block of data to the end of an existing append blob - where the contents are read from a source url. The Append Block operation is permitted only if - the blob was created with x-ms-blob-type set to AppendBlob. Append Block is supported only on - version 2015-02-21 version or later. - - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param content_length: The length of the request. - :type content_length: long - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid - OAuth access token to copy source. - :type copy_source_authorization: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param append_position_access_conditions: Parameter group. - :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _lease_id = None - _max_size = None - _append_position = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if append_position_access_conditions is not None: - _max_size = append_position_access_conditions.max_size - _append_position = append_position_access_conditions.append_position - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - comp = "appendblock" - accept = "application/xml" - - # Construct URL - url = self.append_block_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _max_size is not None: - header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", _max_size, 'long') - if _append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if copy_source_authorization is not None: - header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-append-offset']=self._deserialize('str', response.headers.get('x-ms-blob-append-offset')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - append_block_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def seal( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - append_position_access_conditions: Optional["_models.AppendPositionAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Seal operation seals the Append Blob to make it read-only. Seal is supported only on - version 2019-12-12 version or later. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param append_position_access_conditions: Parameter group. - :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _append_position = None - if append_position_access_conditions is not None: - _append_position = append_position_access_conditions.append_position - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - comp = "seal" - accept = "application/xml" - - # Construct URL - url = self.seal.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - - if cls: - return cls(pipeline_response, None, response_headers) - - seal.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/operations/_blob_operations.py b/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/operations/_blob_operations.py deleted file mode 100644 index 46a5ad2..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/operations/_blob_operations.py +++ /dev/null @@ -1,3008 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class BlobOperations: - """BlobOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def download( - self, - snapshot: Optional[str] = None, - version_id: Optional[str] = None, - timeout: Optional[int] = None, - range: Optional[str] = None, - range_get_content_md5: Optional[bool] = None, - range_get_content_crc64: Optional[bool] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> IO: - """The Download operation reads or downloads a blob from the system, including its metadata and - properties. You can also call Download to read a snapshot. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param range_get_content_md5: When set to true and specified together with the Range, the - service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB - in size. - :type range_get_content_md5: bool - :param range_get_content_crc64: When set to true and specified together with the Range, the - service returns the CRC64 hash for the range, as long as the range is less than or equal to 4 - MB in size. - :type range_get_content_crc64: bool - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - accept = "application/xml" - - # Construct URL - url = self.download.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') - if range_get_content_crc64 is not None: - header_parameters['x-ms-range-get-content-crc64'] = self._serialize.header("range_get_content_crc64", range_get_content_crc64, 'bool') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) - response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) - response_headers['x-ms-immutability-policy-until-date']=self._deserialize('rfc-1123', response.headers.get('x-ms-immutability-policy-until-date')) - response_headers['x-ms-immutability-policy-mode']=self._deserialize('str', response.headers.get('x-ms-immutability-policy-mode')) - response_headers['x-ms-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-legal-hold')) - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) - response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) - response_headers['x-ms-immutability-policy-until-date']=self._deserialize('rfc-1123', response.headers.get('x-ms-immutability-policy-until-date')) - response_headers['x-ms-immutability-policy-mode']=self._deserialize('str', response.headers.get('x-ms-immutability-policy-mode')) - response_headers['x-ms-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-legal-hold')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - download.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def get_properties( - self, - snapshot: Optional[str] = None, - version_id: Optional[str] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Get Properties operation returns all user-defined metadata, standard HTTP properties, and - system properties for the blob. It does not return the content of the blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-creation-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-creation-time')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) - response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-incremental-copy']=self._deserialize('bool', response.headers.get('x-ms-incremental-copy')) - response_headers['x-ms-copy-destination-snapshot']=self._deserialize('str', response.headers.get('x-ms-copy-destination-snapshot')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-access-tier']=self._deserialize('str', response.headers.get('x-ms-access-tier')) - response_headers['x-ms-access-tier-inferred']=self._deserialize('bool', response.headers.get('x-ms-access-tier-inferred')) - response_headers['x-ms-archive-status']=self._deserialize('str', response.headers.get('x-ms-archive-status')) - response_headers['x-ms-access-tier-change-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) - response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) - response_headers['x-ms-expiry-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-expiry-time')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - response_headers['x-ms-rehydrate-priority']=self._deserialize('str', response.headers.get('x-ms-rehydrate-priority')) - response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) - response_headers['x-ms-immutability-policy-until-date']=self._deserialize('rfc-1123', response.headers.get('x-ms-immutability-policy-until-date')) - response_headers['x-ms-immutability-policy-mode']=self._deserialize('str', response.headers.get('x-ms-immutability-policy-mode')) - response_headers['x-ms-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-legal-hold')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def delete( - self, - snapshot: Optional[str] = None, - version_id: Optional[str] = None, - timeout: Optional[int] = None, - delete_snapshots: Optional[Union[str, "_models.DeleteSnapshotsOptionType"]] = None, - request_id_parameter: Optional[str] = None, - blob_delete_type: Optional[str] = "Permanent", - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """If the storage account's soft delete feature is disabled then, when a blob is deleted, it is - permanently removed from the storage account. If the storage account's soft delete feature is - enabled, then, when a blob is deleted, it is marked for deletion and becomes inaccessible - immediately. However, the blob service retains the blob or snapshot for the number of days - specified by the DeleteRetentionPolicy section of [Storage service properties] - (Set-Blob-Service-Properties.md). After the specified number of days has passed, the blob's - data is permanently removed from the storage account. Note that you continue to be charged for - the soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and - specify the "include=deleted" query parameter to discover which blobs and snapshots have been - soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. All other - operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code - of 404 (ResourceNotFound). - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param delete_snapshots: Required if the blob has associated snapshots. Specify one of the - following two options: include: Delete the base blob and all of its snapshots. only: Delete - only the blob's snapshots and not the blob itself. - :type delete_snapshots: str or ~azure.storage.blob.models.DeleteSnapshotsOptionType - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_delete_type: Optional. Only possible value is 'permanent', which specifies to - permanently delete a blob if blob soft delete is enabled. - :type blob_delete_type: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if blob_delete_type is not None: - query_parameters['deletetype'] = self._serialize.query("blob_delete_type", blob_delete_type, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def undelete( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> None: - """Undelete a blob that was previously soft deleted. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "undelete" - accept = "application/xml" - - # Construct URL - url = self.undelete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - undelete.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def set_expiry( - self, - expiry_options: Union[str, "_models.BlobExpiryOptions"], - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - expires_on: Optional[str] = None, - **kwargs: Any - ) -> None: - """Sets the time a blob will expire and be deleted. - - :param expiry_options: Required. Indicates mode of the expiry time. - :type expiry_options: str or ~azure.storage.blob.models.BlobExpiryOptions - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param expires_on: The time to set the blob to expiry. - :type expires_on: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "expiry" - accept = "application/xml" - - # Construct URL - url = self.set_expiry.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') - if expires_on is not None: - header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_expiry.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def set_http_headers( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Set HTTP Headers operation sets system properties on the blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_cache_control = None - _blob_content_type = None - _blob_content_md5 = None - _blob_content_encoding = None - _blob_content_language = None - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _blob_content_disposition = None - if blob_http_headers is not None: - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_disposition = blob_http_headers.blob_content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.set_http_headers.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_http_headers.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def set_immutability_policy( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - immutability_policy_expiry: Optional[datetime.datetime] = None, - immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Set Immutability Policy operation sets the immutability policy on the blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy - is set to expire. - :type immutability_policy_expiry: ~datetime.datetime - :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "immutabilityPolicies" - accept = "application/xml" - - # Construct URL - url = self.set_immutability_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if immutability_policy_expiry is not None: - header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') - if immutability_policy_mode is not None: - header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-immutability-policy-until-date']=self._deserialize('rfc-1123', response.headers.get('x-ms-immutability-policy-until-date')) - response_headers['x-ms-immutability-policy-mode']=self._deserialize('str', response.headers.get('x-ms-immutability-policy-mode')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_immutability_policy.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def delete_immutability_policy( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> None: - """The Delete Immutability Policy operation deletes the immutability policy on the blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "immutabilityPolicies" - accept = "application/xml" - - # Construct URL - url = self.delete_immutability_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete_immutability_policy.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def set_legal_hold( - self, - legal_hold: bool, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> None: - """The Set Legal Hold operation sets a legal hold on the blob. - - :param legal_hold: Specified if a legal hold should be set on the blob. - :type legal_hold: bool - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "legalhold" - accept = "application/xml" - - # Construct URL - url = self.set_legal_hold.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-legal-hold')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_legal_hold.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def set_metadata( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or - more name-value pairs. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def acquire_lease( - self, - timeout: Optional[int] = None, - duration: Optional[int] = None, - proposed_lease_id: Optional[str] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a - lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease - duration cannot be changed using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "acquire" - accept = "application/xml" - - # Construct URL - url = self.acquire_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - acquire_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def release_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "release" - accept = "application/xml" - - # Construct URL - url = self.release_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - release_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def renew_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "renew" - accept = "application/xml" - - # Construct URL - url = self.renew_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - renew_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def change_lease( - self, - lease_id: str, - proposed_lease_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "change" - accept = "application/xml" - - # Construct URL - url = self.change_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - change_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def break_lease( - self, - timeout: Optional[int] = None, - break_period: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param break_period: For a break operation, proposed duration the lease should continue before - it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining on the lease is used. A new - lease will not be available before the break period has expired, but the lease may be held for - longer than the break period. If this header does not appear with a break operation, a - fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease - breaks immediately. - :type break_period: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "break" - accept = "application/xml" - - # Construct URL - url = self.break_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - break_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def create_snapshot( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - request_id_parameter: Optional[str] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Create Snapshot operation creates a read-only snapshot of a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _lease_id = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "snapshot" - accept = "application/xml" - - # Construct URL - url = self.create_snapshot.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-snapshot']=self._deserialize('str', response.headers.get('x-ms-snapshot')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create_snapshot.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def start_copy_from_url( - self, - copy_source: str, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, - rehydrate_priority: Optional[Union[str, "_models.RehydratePriority"]] = None, - request_id_parameter: Optional[str] = None, - blob_tags_string: Optional[str] = None, - seal_blob: Optional[bool] = None, - immutability_policy_expiry: Optional[datetime.datetime] = None, - immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, - legal_hold: Optional[bool] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Start Copy From URL operation copies a blob or an internet resource to a new blob. - - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived - blob. - :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param seal_blob: Overrides the sealed state of the destination blob. Service version - 2019-12-12 and newer. - :type seal_blob: bool - :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy - is set to expire. - :type immutability_policy_expiry: ~datetime.datetime - :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param legal_hold: Specified if a legal hold should be set on the blob. - :type legal_hold: bool - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _source_if_tags = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - _source_if_tags = source_modified_access_conditions.source_if_tags - accept = "application/xml" - - # Construct URL - url = self.start_copy_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _source_if_tags is not None: - header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", _source_if_tags, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if seal_blob is not None: - header_parameters['x-ms-seal-blob'] = self._serialize.header("seal_blob", seal_blob, 'bool') - if immutability_policy_expiry is not None: - header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') - if immutability_policy_mode is not None: - header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') - if legal_hold is not None: - header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def copy_from_url( - self, - copy_source: str, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, - request_id_parameter: Optional[str] = None, - source_content_md5: Optional[bytearray] = None, - blob_tags_string: Optional[str] = None, - immutability_policy_expiry: Optional[datetime.datetime] = None, - immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, - legal_hold: Optional[bool] = None, - copy_source_authorization: Optional[str] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - **kwargs: Any - ) -> None: - """The Copy From URL operation copies a blob or an internet resource to a new blob. It will not - return a response until the copy is complete. - - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy - is set to expire. - :type immutability_policy_expiry: ~datetime.datetime - :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param legal_hold: Specified if a legal hold should be set on the blob. - :type legal_hold: bool - :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid - OAuth access token to copy source. - :type copy_source_authorization: str - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _lease_id = None - _encryption_scope = None - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - x_ms_requires_sync = "true" - accept = "application/xml" - - # Construct URL - url = self.copy_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-requires-sync'] = self._serialize.header("x_ms_requires_sync", x_ms_requires_sync, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if immutability_policy_expiry is not None: - header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') - if immutability_policy_mode is not None: - header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') - if legal_hold is not None: - header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') - if copy_source_authorization is not None: - header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def abort_copy_from_url( - self, - copy_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a - destination blob with zero length and full metadata. - - :param copy_id: The copy identifier provided in the x-ms-copy-id header of the original Copy - Blob operation. - :type copy_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "copy" - copy_action_abort_constant = "abort" - accept = "application/xml" - - # Construct URL - url = self.abort_copy_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-copy-action'] = self._serialize.header("copy_action_abort_constant", copy_action_abort_constant, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - abort_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def set_tier( - self, - tier: Union[str, "_models.AccessTierRequired"], - snapshot: Optional[str] = None, - version_id: Optional[str] = None, - timeout: Optional[int] = None, - rehydrate_priority: Optional[Union[str, "_models.RehydratePriority"]] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a - premium storage account and on a block blob in a blob storage account (locally redundant - storage only). A premium page blob's tier determines the allowed size, IOPS, and bandwidth of - the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation does not - update the blob's ETag. - - :param tier: Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierRequired - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived - blob. - :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "tier" - accept = "application/xml" - - # Construct URL - url = self.set_tier.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if response.status_code == 202: - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_tier.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def get_account_info( - self, - **kwargs: Any - ) -> None: - """Returns the sku name and account kind. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "account" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_account_info.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) - response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_account_info.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def query( - self, - snapshot: Optional[str] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - query_request: Optional["_models.QueryRequest"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> IO: - """The Query operation enables users to select/project on blob data by providing simple query - expressions. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param query_request: the query request. - :type query_request: ~azure.storage.blob.models.QueryRequest - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "query" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.query.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if query_request is not None: - body_content = self._serialize.body(query_request, 'QueryRequest', is_xml=True) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - query.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def get_tags( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - snapshot: Optional[str] = None, - version_id: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> "_models.BlobTags": - """The Get Tags operation enables users to get the tags associated with a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: BlobTags, or the result of cls(response) - :rtype: ~azure.storage.blob.models.BlobTags - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobTags"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "tags" - accept = "application/xml" - - # Construct URL - url = self.get_tags.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('BlobTags', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_tags.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def set_tags( - self, - timeout: Optional[int] = None, - version_id: Optional[str] = None, - transactional_content_md5: Optional[bytearray] = None, - transactional_content_crc64: Optional[bytearray] = None, - request_id_parameter: Optional[str] = None, - tags: Optional["_models.BlobTags"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Set Tags operation enables users to set tags on a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param tags: Blob tags. - :type tags: ~azure.storage.blob.models.BlobTags - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "tags" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_tags.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if tags is not None: - body_content = self._serialize.body(tags, 'BlobTags', is_xml=True) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_tags.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/operations/_block_blob_operations.py b/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/operations/_block_blob_operations.py deleted file mode 100644 index c45f674..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/operations/_block_blob_operations.py +++ /dev/null @@ -1,1138 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class BlockBlobOperations: - """BlockBlobOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def upload( - self, - content_length: int, - body: IO, - timeout: Optional[int] = None, - transactional_content_md5: Optional[bytearray] = None, - metadata: Optional[str] = None, - tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, - request_id_parameter: Optional[str] = None, - blob_tags_string: Optional[str] = None, - immutability_policy_expiry: Optional[datetime.datetime] = None, - immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, - legal_hold: Optional[bool] = None, - blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Upload Block Blob operation updates the content of an existing block blob. Updating an - existing block blob overwrites any existing metadata on the blob. Partial updates are not - supported with Put Blob; the content of the existing blob is overwritten with the content of - the new blob. To perform a partial update of the content of a block blob, use the Put Block - List operation. - - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy - is set to expire. - :type immutability_policy_expiry: ~datetime.datetime - :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param legal_hold: Specified if a legal hold should be set on the blob. - :type legal_hold: bool - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - blob_type = "BlockBlob" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.upload.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if immutability_policy_expiry is not None: - header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') - if immutability_policy_mode is not None: - header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') - if legal_hold is not None: - header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def put_blob_from_url( - self, - content_length: int, - copy_source: str, - timeout: Optional[int] = None, - transactional_content_md5: Optional[bytearray] = None, - metadata: Optional[str] = None, - tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, - request_id_parameter: Optional[str] = None, - source_content_md5: Optional[bytearray] = None, - blob_tags_string: Optional[str] = None, - copy_source_blob_properties: Optional[bool] = None, - copy_source_authorization: Optional[str] = None, - blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Put Blob from URL operation creates a new Block Blob where the contents of the blob are - read from a given URL. This API is supported beginning with the 2020-04-08 version. Partial - updates are not supported with Put Blob from URL; the content of an existing blob is - overwritten with the content of the new blob. To perform partial updates to a block blob’s - contents using a source URL, use the Put Block from URL API in conjunction with Put Block List. - - :param content_length: The length of the request. - :type content_length: long - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param copy_source_blob_properties: Optional, default is true. Indicates if properties from - the source blob should be copied. - :type copy_source_blob_properties: bool - :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid - OAuth access token to copy source. - :type copy_source_authorization: str - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _source_if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - _source_if_tags = source_modified_access_conditions.source_if_tags - blob_type = "BlockBlob" - accept = "application/xml" - - # Construct URL - url = self.put_blob_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _source_if_tags is not None: - header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", _source_if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if copy_source_blob_properties is not None: - header_parameters['x-ms-copy-source-blob-properties'] = self._serialize.header("copy_source_blob_properties", copy_source_blob_properties, 'bool') - if copy_source_authorization is not None: - header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - put_blob_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def stage_block( - self, - block_id: str, - content_length: int, - body: IO, - transactional_content_md5: Optional[bytearray] = None, - transactional_content_crc64: Optional[bytearray] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - **kwargs: Any - ) -> None: - """The Stage Block operation creates a new block to be committed as part of a blob. - - :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the - string must be less than or equal to 64 bytes in size. For a given blob, the length of the - value specified for the blockid parameter must be the same size for each block. - :type block_id: str - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "block" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.stage_block.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - stage_block.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def stage_block_from_url( - self, - block_id: str, - content_length: int, - source_url: str, - source_range: Optional[str] = None, - source_content_md5: Optional[bytearray] = None, - source_contentcrc64: Optional[bytearray] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - copy_source_authorization: Optional[str] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Stage Block operation creates a new block to be committed as part of a blob where the - contents are read from a URL. - - :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the - string must be less than or equal to 64 bytes in size. For a given blob, the length of the - value specified for the blockid parameter must be the same size for each block. - :type block_id: str - :param content_length: The length of the request. - :type content_length: long - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid - OAuth access token to copy source. - :type copy_source_authorization: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _lease_id = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - comp = "block" - accept = "application/xml" - - # Construct URL - url = self.stage_block_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if copy_source_authorization is not None: - header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - stage_block_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def commit_block_list( - self, - blocks: "_models.BlockLookupList", - timeout: Optional[int] = None, - transactional_content_md5: Optional[bytearray] = None, - transactional_content_crc64: Optional[bytearray] = None, - metadata: Optional[str] = None, - tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, - request_id_parameter: Optional[str] = None, - blob_tags_string: Optional[str] = None, - immutability_policy_expiry: Optional[datetime.datetime] = None, - immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, - legal_hold: Optional[bool] = None, - blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Commit Block List operation writes a blob by specifying the list of block IDs that make up - the blob. In order to be written as part of a blob, a block must have been successfully written - to the server in a prior Put Block operation. You can call Put Block List to update a blob by - uploading only those blocks that have changed, then committing the new and existing blocks - together. You can do this by specifying whether to commit a block from the committed block list - or from the uncommitted block list, or to commit the most recently uploaded version of the - block, whichever list it may belong to. - - :param blocks: Blob Blocks. - :type blocks: ~azure.storage.blob.models.BlockLookupList - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy - is set to expire. - :type immutability_policy_expiry: ~datetime.datetime - :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param legal_hold: Specified if a legal hold should be set on the blob. - :type legal_hold: bool - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_cache_control = None - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "blocklist" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.commit_block_list.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if immutability_policy_expiry is not None: - header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') - if immutability_policy_mode is not None: - header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') - if legal_hold is not None: - header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(blocks, 'BlockLookupList', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - commit_block_list.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def get_block_list( - self, - snapshot: Optional[str] = None, - list_type: Union[str, "_models.BlockListType"] = "committed", - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> "_models.BlockList": - """The Get Block List operation retrieves the list of blocks that have been uploaded as part of a - block blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param list_type: Specifies whether to return the list of committed blocks, the list of - uncommitted blocks, or both lists together. - :type list_type: str or ~azure.storage.blob.models.BlockListType - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: BlockList, or the result of cls(response) - :rtype: ~azure.storage.blob.models.BlockList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.BlockList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "blocklist" - accept = "application/xml" - - # Construct URL - url = self.get_block_list.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - query_parameters['blocklisttype'] = self._serialize.query("list_type", list_type, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('BlockList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_block_list.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/operations/_container_operations.py b/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/operations/_container_operations.py deleted file mode 100644 index 18c70f5..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/operations/_container_operations.py +++ /dev/null @@ -1,1748 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class ContainerOperations: - """ContainerOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - access: Optional[Union[str, "_models.PublicAccessType"]] = None, - request_id_parameter: Optional[str] = None, - container_cpk_scope_info: Optional["_models.ContainerCpkScopeInfo"] = None, - **kwargs: Any - ) -> None: - """creates a new container under the specified account. If the container with the same name - already exists, the operation fails. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param access: Specifies whether data in the container may be accessed publicly and the level - of access. - :type access: str or ~azure.storage.blob.models.PublicAccessType - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param container_cpk_scope_info: Parameter group. - :type container_cpk_scope_info: ~azure.storage.blob.models.ContainerCpkScopeInfo - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _default_encryption_scope = None - _prevent_encryption_scope_override = None - if container_cpk_scope_info is not None: - _default_encryption_scope = container_cpk_scope_info.default_encryption_scope - _prevent_encryption_scope_override = container_cpk_scope_info.prevent_encryption_scope_override - restype = "container" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if access is not None: - header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _default_encryption_scope is not None: - header_parameters['x-ms-default-encryption-scope'] = self._serialize.header("default_encryption_scope", _default_encryption_scope, 'str') - if _prevent_encryption_scope_override is not None: - header_parameters['x-ms-deny-encryption-scope-override'] = self._serialize.header("prevent_encryption_scope_override", _prevent_encryption_scope_override, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{containerName}'} # type: ignore - - async def get_properties( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """returns all user-defined metadata and system properties for the specified container. The data - returned does not include the container's list of blobs. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "container" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-public-access']=self._deserialize('str', response.headers.get('x-ms-blob-public-access')) - response_headers['x-ms-has-immutability-policy']=self._deserialize('bool', response.headers.get('x-ms-has-immutability-policy')) - response_headers['x-ms-has-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-has-legal-hold')) - response_headers['x-ms-default-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-default-encryption-scope')) - response_headers['x-ms-deny-encryption-scope-override']=self._deserialize('bool', response.headers.get('x-ms-deny-encryption-scope-override')) - response_headers['x-ms-immutable-storage-with-versioning-enabled']=self._deserialize('bool', response.headers.get('x-ms-immutable-storage-with-versioning-enabled')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{containerName}'} # type: ignore - - async def delete( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """operation marks the specified container for deletion. The container and any blobs contained - within it are later deleted during garbage collection. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - restype = "container" - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{containerName}'} # type: ignore - - async def set_metadata( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """operation sets one or more user-defined name-value pairs for the specified container. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - restype = "container" - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{containerName}'} # type: ignore - - async def get_access_policy( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> List["_models.SignedIdentifier"]: - """gets the permissions for the specified container. The permissions indicate whether container - data may be accessed publicly. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of SignedIdentifier, or the result of cls(response) - :rtype: list[~azure.storage.blob.models.SignedIdentifier] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.SignedIdentifier"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "container" - comp = "acl" - accept = "application/xml" - - # Construct URL - url = self.get_access_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-blob-public-access']=self._deserialize('str', response.headers.get('x-ms-blob-public-access')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('[SignedIdentifier]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_access_policy.metadata = {'url': '/{containerName}'} # type: ignore - - async def set_access_policy( - self, - timeout: Optional[int] = None, - access: Optional[Union[str, "_models.PublicAccessType"]] = None, - request_id_parameter: Optional[str] = None, - container_acl: Optional[List["_models.SignedIdentifier"]] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """sets the permissions for the specified container. The permissions indicate whether blobs in a - container may be accessed publicly. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param access: Specifies whether data in the container may be accessed publicly and the level - of access. - :type access: str or ~azure.storage.blob.models.PublicAccessType - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param container_acl: the acls for the container. - :type container_acl: list[~azure.storage.blob.models.SignedIdentifier] - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - restype = "container" - comp = "acl" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_access_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if access is not None: - header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'wrapped': True, 'itemsName': 'SignedIdentifier'}} - if container_acl is not None: - body_content = self._serialize.body(container_acl, '[SignedIdentifier]', is_xml=True, serialization_ctxt=serialization_ctxt) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_policy.metadata = {'url': '/{containerName}'} # type: ignore - - async def restore( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - deleted_container_name: Optional[str] = None, - deleted_container_version: Optional[str] = None, - **kwargs: Any - ) -> None: - """Restores a previously-deleted container. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param deleted_container_name: Optional. Version 2019-12-12 and later. Specifies the name of - the deleted container to restore. - :type deleted_container_name: str - :param deleted_container_version: Optional. Version 2019-12-12 and later. Specifies the - version of the deleted container to restore. - :type deleted_container_version: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "undelete" - accept = "application/xml" - - # Construct URL - url = self.restore.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if deleted_container_name is not None: - header_parameters['x-ms-deleted-container-name'] = self._serialize.header("deleted_container_name", deleted_container_name, 'str') - if deleted_container_version is not None: - header_parameters['x-ms-deleted-container-version'] = self._serialize.header("deleted_container_version", deleted_container_version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - restore.metadata = {'url': '/{containerName}'} # type: ignore - - async def rename( - self, - source_container_name: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - source_lease_id: Optional[str] = None, - **kwargs: Any - ) -> None: - """Renames an existing container. - - :param source_container_name: Required. Specifies the name of the container to rename. - :type source_container_name: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param source_lease_id: A lease ID for the source path. If specified, the source path must have - an active lease and the lease ID must match. - :type source_lease_id: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "rename" - accept = "application/xml" - - # Construct URL - url = self.rename.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-source-container-name'] = self._serialize.header("source_container_name", source_container_name, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - rename.metadata = {'url': '/{containerName}'} # type: ignore - - async def submit_batch( - self, - content_length: int, - multipart_content_type: str, - body: IO, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> IO: - """The Batch operation allows multiple API calls to be embedded into a single HTTP request. - - :param content_length: The length of the request. - :type content_length: long - :param multipart_content_type: Required. The value of this header must be multipart/mixed with - a batch boundary. Example header value: multipart/mixed; boundary=batch_:code:``. - :type multipart_content_type: str - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "batch" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.submit_batch.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(body, 'IO', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - submit_batch.metadata = {'url': '/{containerName}'} # type: ignore - - async def filter_blobs( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - where: Optional[str] = None, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - **kwargs: Any - ) -> "_models.FilterBlobSegment": - """The Filter Blobs operation enables callers to list blobs in a container whose tags match a - given search expression. Filter blobs searches within the given container. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param where: Filters the results to return only to return only blobs whose tags match the - specified expression. - :type where: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: FilterBlobSegment, or the result of cls(response) - :rtype: ~azure.storage.blob.models.FilterBlobSegment - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.FilterBlobSegment"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "blobs" - accept = "application/xml" - - # Construct URL - url = self.filter_blobs.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if where is not None: - query_parameters['where'] = self._serialize.query("where", where, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('FilterBlobSegment', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - filter_blobs.metadata = {'url': '/{containerName}'} # type: ignore - - async def acquire_lease( - self, - timeout: Optional[int] = None, - duration: Optional[int] = None, - proposed_lease_id: Optional[str] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a - lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease - duration cannot be changed using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "acquire" - accept = "application/xml" - - # Construct URL - url = self.acquire_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - acquire_lease.metadata = {'url': '/{containerName}'} # type: ignore - - async def release_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "release" - accept = "application/xml" - - # Construct URL - url = self.release_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - release_lease.metadata = {'url': '/{containerName}'} # type: ignore - - async def renew_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "renew" - accept = "application/xml" - - # Construct URL - url = self.renew_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - renew_lease.metadata = {'url': '/{containerName}'} # type: ignore - - async def break_lease( - self, - timeout: Optional[int] = None, - break_period: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param break_period: For a break operation, proposed duration the lease should continue before - it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining on the lease is used. A new - lease will not be available before the break period has expired, but the lease may be held for - longer than the break period. If this header does not appear with a break operation, a - fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease - breaks immediately. - :type break_period: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "break" - accept = "application/xml" - - # Construct URL - url = self.break_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - break_lease.metadata = {'url': '/{containerName}'} # type: ignore - - async def change_lease( - self, - lease_id: str, - proposed_lease_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "change" - accept = "application/xml" - - # Construct URL - url = self.change_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - change_lease.metadata = {'url': '/{containerName}'} # type: ignore - - async def list_blob_flat_segment( - self, - prefix: Optional[str] = None, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - include: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> "_models.ListBlobsFlatSegmentResponse": - """[Update] The List Blobs operation returns a list of the blobs under the specified container. - - :param prefix: Filters the results to return only containers whose name begins with the - specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListBlobsFlatSegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsFlatSegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_blob_flat_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListBlobsFlatSegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_blob_flat_segment.metadata = {'url': '/{containerName}'} # type: ignore - - async def list_blob_hierarchy_segment( - self, - delimiter: str, - prefix: Optional[str] = None, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - include: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> "_models.ListBlobsHierarchySegmentResponse": - """[Update] The List Blobs operation returns a list of the blobs under the specified container. - - :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix - element in the response body that acts as a placeholder for all blobs whose names begin with - the same substring up to the appearance of the delimiter character. The delimiter may be a - single character or a string. - :type delimiter: str - :param prefix: Filters the results to return only containers whose name begins with the - specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListBlobsHierarchySegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsHierarchySegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_blob_hierarchy_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_blob_hierarchy_segment.metadata = {'url': '/{containerName}'} # type: ignore - - async def get_account_info( - self, - **kwargs: Any - ) -> None: - """Returns the sku name and account kind. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "account" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_account_info.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) - response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_account_info.metadata = {'url': '/{containerName}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/operations/_page_blob_operations.py b/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/operations/_page_blob_operations.py deleted file mode 100644 index 06f1755..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/operations/_page_blob_operations.py +++ /dev/null @@ -1,1424 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class PageBlobOperations: - """PageBlobOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - content_length: int, - blob_content_length: int, - timeout: Optional[int] = None, - tier: Optional[Union[str, "_models.PremiumPageBlobAccessTier"]] = None, - metadata: Optional[str] = None, - blob_sequence_number: Optional[int] = 0, - request_id_parameter: Optional[str] = None, - blob_tags_string: Optional[str] = None, - immutability_policy_expiry: Optional[datetime.datetime] = None, - immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, - legal_hold: Optional[bool] = None, - blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Create operation creates a new page blob. - - :param content_length: The length of the request. - :type content_length: long - :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 - TB. The page blob size must be aligned to a 512-byte boundary. - :type blob_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param tier: Optional. Indicates the tier to be set on the page blob. - :type tier: str or ~azure.storage.blob.models.PremiumPageBlobAccessTier - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled - value that you can use to track requests. The value of the sequence number must be between 0 - and 2^63 - 1. - :type blob_sequence_number: long - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy - is set to expire. - :type immutability_policy_expiry: ~datetime.datetime - :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param legal_hold: Specified if a legal hold should be set on the blob. - :type legal_hold: bool - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - blob_type = "PageBlob" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') - if blob_sequence_number is not None: - header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if immutability_policy_expiry is not None: - header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') - if immutability_policy_mode is not None: - header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') - if legal_hold is not None: - header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def upload_pages( - self, - content_length: int, - body: IO, - transactional_content_md5: Optional[bytearray] = None, - transactional_content_crc64: Optional[bytearray] = None, - timeout: Optional[int] = None, - range: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - sequence_number_access_conditions: Optional["_models.SequenceNumberAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Upload Pages operation writes a range of pages to a page blob. - - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param sequence_number_access_conditions: Parameter group. - :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_sequence_number_less_than_or_equal_to = None - _if_sequence_number_less_than = None - _if_sequence_number_equal_to = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if sequence_number_access_conditions is not None: - _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - comp = "page" - page_write = "update" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.upload_pages.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') - if _if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') - if _if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload_pages.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def clear_pages( - self, - content_length: int, - timeout: Optional[int] = None, - range: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - sequence_number_access_conditions: Optional["_models.SequenceNumberAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Clear Pages operation clears a set of pages from a page blob. - - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param sequence_number_access_conditions: Parameter group. - :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_sequence_number_less_than_or_equal_to = None - _if_sequence_number_less_than = None - _if_sequence_number_equal_to = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if sequence_number_access_conditions is not None: - _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - comp = "page" - page_write = "clear" - accept = "application/xml" - - # Construct URL - url = self.clear_pages.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') - if _if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') - if _if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - clear_pages.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def upload_pages_from_url( - self, - source_url: str, - source_range: str, - content_length: int, - range: str, - source_content_md5: Optional[bytearray] = None, - source_contentcrc64: Optional[bytearray] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - copy_source_authorization: Optional[str] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - sequence_number_access_conditions: Optional["_models.SequenceNumberAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Upload Pages operation writes a range of pages to a page blob where the contents are read - from a URL. - - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param source_range: Bytes of source data in the specified range. The length of this range - should match the ContentLength header and x-ms-range/Range destination range header. - :type source_range: str - :param content_length: The length of the request. - :type content_length: long - :param range: The range of bytes to which the source range would be written. The range should - be 512 aligned and range-end is required. - :type range: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid - OAuth access token to copy source. - :type copy_source_authorization: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param sequence_number_access_conditions: Parameter group. - :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _lease_id = None - _if_sequence_number_less_than_or_equal_to = None - _if_sequence_number_less_than = None - _if_sequence_number_equal_to = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if sequence_number_access_conditions is not None: - _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - comp = "page" - page_write = "update" - accept = "application/xml" - - # Construct URL - url = self.upload_pages_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') - if _if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') - if _if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if copy_source_authorization is not None: - header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload_pages_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def get_page_ranges( - self, - snapshot: Optional[str] = None, - timeout: Optional[int] = None, - range: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> "_models.PageList": - """The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot - of a page blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PageList, or the result of cls(response) - :rtype: ~azure.storage.blob.models.PageList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PageList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "pagelist" - accept = "application/xml" - - # Construct URL - url = self.get_page_ranges.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('PageList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_page_ranges.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def get_page_ranges_diff( - self, - snapshot: Optional[str] = None, - timeout: Optional[int] = None, - prevsnapshot: Optional[str] = None, - prev_snapshot_url: Optional[str] = None, - range: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> "_models.PageList": - """The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that - were changed between target blob and previous snapshot. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param prevsnapshot: Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a - DateTime value that specifies that the response will contain only pages that were changed - between target blob and previous snapshot. Changed pages include both updated and cleared - pages. The target blob may be a snapshot, as long as the snapshot specified by prevsnapshot is - the older of the two. Note that incremental snapshots are currently supported only for blobs - created on or after January 1, 2016. - :type prevsnapshot: str - :param prev_snapshot_url: Optional. This header is only supported in service versions - 2019-04-19 and after and specifies the URL of a previous snapshot of the target blob. The - response will only contain pages that were changed between the target blob and its previous - snapshot. - :type prev_snapshot_url: str - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PageList, or the result of cls(response) - :rtype: ~azure.storage.blob.models.PageList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PageList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "pagelist" - accept = "application/xml" - - # Construct URL - url = self.get_page_ranges_diff.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if prevsnapshot is not None: - query_parameters['prevsnapshot'] = self._serialize.query("prevsnapshot", prevsnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if prev_snapshot_url is not None: - header_parameters['x-ms-previous-snapshot-url'] = self._serialize.header("prev_snapshot_url", prev_snapshot_url, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('PageList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_page_ranges_diff.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def resize( - self, - blob_content_length: int, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Resize the Blob. - - :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 - TB. The page blob size must be aligned to a 512-byte boundary. - :type blob_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.resize.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - resize.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def update_sequence_number( - self, - sequence_number_action: Union[str, "_models.SequenceNumberActionType"], - timeout: Optional[int] = None, - blob_sequence_number: Optional[int] = 0, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Update the sequence number of the blob. - - :param sequence_number_action: Required if the x-ms-blob-sequence-number header is set for the - request. This property applies to page blobs only. This property indicates how the service - should modify the blob's sequence number. - :type sequence_number_action: str or ~azure.storage.blob.models.SequenceNumberActionType - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled - value that you can use to track requests. The value of the sequence number must be between 0 - and 2^63 - 1. - :type blob_sequence_number: long - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.update_sequence_number.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-sequence-number-action'] = self._serialize.header("sequence_number_action", sequence_number_action, 'str') - if blob_sequence_number is not None: - header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - update_sequence_number.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - async def copy_incremental( - self, - copy_source: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Copy Incremental operation copies a snapshot of the source page blob to a destination page - blob. The snapshot is copied such that only the differential changes between the previously - copied snapshot are transferred to the destination. The copied snapshots are complete copies of - the original snapshot and can be read or copied from as usual. This API is supported since REST - version 2016-05-31. - - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "incrementalcopy" - accept = "application/xml" - - # Construct URL - url = self.copy_incremental.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - copy_incremental.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/operations/_service_operations.py b/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/operations/_service_operations.py deleted file mode 100644 index a6592a3..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/aio/operations/_service_operations.py +++ /dev/null @@ -1,698 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class ServiceOperations: - """ServiceOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def set_properties( - self, - storage_service_properties: "_models.StorageServiceProperties", - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> None: - """Sets properties for a storage account's Blob service endpoint, including properties for Storage - Analytics and CORS (Cross-Origin Resource Sharing) rules. - - :param storage_service_properties: The StorageService properties. - :type storage_service_properties: ~azure.storage.blob.models.StorageServiceProperties - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "properties" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/'} # type: ignore - - async def get_properties( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> "_models.StorageServiceProperties": - """gets the properties of a storage account's Blob service, including properties for Storage - Analytics and CORS (Cross-Origin Resource Sharing) rules. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: StorageServiceProperties, or the result of cls(response) - :rtype: ~azure.storage.blob.models.StorageServiceProperties - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceProperties"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('StorageServiceProperties', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_properties.metadata = {'url': '/'} # type: ignore - - async def get_statistics( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> "_models.StorageServiceStats": - """Retrieves statistics related to replication for the Blob service. It is only available on the - secondary location endpoint when read-access geo-redundant replication is enabled for the - storage account. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: StorageServiceStats, or the result of cls(response) - :rtype: ~azure.storage.blob.models.StorageServiceStats - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceStats"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "stats" - accept = "application/xml" - - # Construct URL - url = self.get_statistics.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('StorageServiceStats', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_statistics.metadata = {'url': '/'} # type: ignore - - async def list_containers_segment( - self, - prefix: Optional[str] = None, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - include: Optional[List[Union[str, "_models.ListContainersIncludeType"]]] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> "_models.ListContainersSegmentResponse": - """The List Containers Segment operation returns a list of the containers under the specified - account. - - :param prefix: Filters the results to return only containers whose name begins with the - specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :param include: Include this parameter to specify that the container's metadata be returned as - part of the response body. - :type include: list[str or ~azure.storage.blob.models.ListContainersIncludeType] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListContainersSegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListContainersSegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListContainersSegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_containers_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('ListContainersSegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_containers_segment.metadata = {'url': '/'} # type: ignore - - async def get_user_delegation_key( - self, - key_info: "_models.KeyInfo", - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> "_models.UserDelegationKey": - """Retrieves a user delegation key for the Blob service. This is only a valid operation when using - bearer token authentication. - - :param key_info: Key information. - :type key_info: ~azure.storage.blob.models.KeyInfo - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: UserDelegationKey, or the result of cls(response) - :rtype: ~azure.storage.blob.models.UserDelegationKey - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.UserDelegationKey"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "userdelegationkey" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.get_user_delegation_key.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(key_info, 'KeyInfo', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('UserDelegationKey', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_user_delegation_key.metadata = {'url': '/'} # type: ignore - - async def get_account_info( - self, - **kwargs: Any - ) -> None: - """Returns the sku name and account kind. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "account" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_account_info.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) - response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) - response_headers['x-ms-is-hns-enabled']=self._deserialize('bool', response.headers.get('x-ms-is-hns-enabled')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_account_info.metadata = {'url': '/'} # type: ignore - - async def submit_batch( - self, - content_length: int, - multipart_content_type: str, - body: IO, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> IO: - """The Batch operation allows multiple API calls to be embedded into a single HTTP request. - - :param content_length: The length of the request. - :type content_length: long - :param multipart_content_type: Required. The value of this header must be multipart/mixed with - a batch boundary. Example header value: multipart/mixed; boundary=batch_:code:``. - :type multipart_content_type: str - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "batch" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.submit_batch.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(body, 'IO', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - submit_batch.metadata = {'url': '/'} # type: ignore - - async def filter_blobs( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - where: Optional[str] = None, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - **kwargs: Any - ) -> "_models.FilterBlobSegment": - """The Filter Blobs operation enables callers to list blobs across all containers whose tags match - a given search expression. Filter blobs searches across all containers within a storage - account but can be scoped within the expression to a single container. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param where: Filters the results to return only to return only blobs whose tags match the - specified expression. - :type where: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: FilterBlobSegment, or the result of cls(response) - :rtype: ~azure.storage.blob.models.FilterBlobSegment - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.FilterBlobSegment"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "blobs" - accept = "application/xml" - - # Construct URL - url = self.filter_blobs.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if where is not None: - query_parameters['where'] = self._serialize.query("where", where, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('FilterBlobSegment', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - filter_blobs.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/models/__init__.py b/azure/multiapi/storagev2/blob/v2021_04_10/_generated/models/__init__.py deleted file mode 100644 index e3307ac..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/models/__init__.py +++ /dev/null @@ -1,219 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -try: - from ._models_py3 import AccessPolicy - from ._models_py3 import AppendPositionAccessConditions - from ._models_py3 import ArrowConfiguration - from ._models_py3 import ArrowField - from ._models_py3 import BlobFlatListSegment - from ._models_py3 import BlobHTTPHeaders - from ._models_py3 import BlobHierarchyListSegment - from ._models_py3 import BlobItemInternal - from ._models_py3 import BlobMetadata - from ._models_py3 import BlobName - from ._models_py3 import BlobPrefix - from ._models_py3 import BlobPropertiesInternal - from ._models_py3 import BlobTag - from ._models_py3 import BlobTags - from ._models_py3 import Block - from ._models_py3 import BlockList - from ._models_py3 import BlockLookupList - from ._models_py3 import ClearRange - from ._models_py3 import ContainerCpkScopeInfo - from ._models_py3 import ContainerItem - from ._models_py3 import ContainerProperties - from ._models_py3 import CorsRule - from ._models_py3 import CpkInfo - from ._models_py3 import CpkScopeInfo - from ._models_py3 import DelimitedTextConfiguration - from ._models_py3 import FilterBlobItem - from ._models_py3 import FilterBlobSegment - from ._models_py3 import GeoReplication - from ._models_py3 import JsonTextConfiguration - from ._models_py3 import KeyInfo - from ._models_py3 import LeaseAccessConditions - from ._models_py3 import ListBlobsFlatSegmentResponse - from ._models_py3 import ListBlobsHierarchySegmentResponse - from ._models_py3 import ListContainersSegmentResponse - from ._models_py3 import Logging - from ._models_py3 import Metrics - from ._models_py3 import ModifiedAccessConditions - from ._models_py3 import PageList - from ._models_py3 import PageRange - from ._models_py3 import QueryFormat - from ._models_py3 import QueryRequest - from ._models_py3 import QuerySerialization - from ._models_py3 import RetentionPolicy - from ._models_py3 import SequenceNumberAccessConditions - from ._models_py3 import SignedIdentifier - from ._models_py3 import SourceModifiedAccessConditions - from ._models_py3 import StaticWebsite - from ._models_py3 import StorageError - from ._models_py3 import StorageServiceProperties - from ._models_py3 import StorageServiceStats - from ._models_py3 import UserDelegationKey -except (SyntaxError, ImportError): - from ._models import AccessPolicy # type: ignore - from ._models import AppendPositionAccessConditions # type: ignore - from ._models import ArrowConfiguration # type: ignore - from ._models import ArrowField # type: ignore - from ._models import BlobFlatListSegment # type: ignore - from ._models import BlobHTTPHeaders # type: ignore - from ._models import BlobHierarchyListSegment # type: ignore - from ._models import BlobItemInternal # type: ignore - from ._models import BlobMetadata # type: ignore - from ._models import BlobName # type: ignore - from ._models import BlobPrefix # type: ignore - from ._models import BlobPropertiesInternal # type: ignore - from ._models import BlobTag # type: ignore - from ._models import BlobTags # type: ignore - from ._models import Block # type: ignore - from ._models import BlockList # type: ignore - from ._models import BlockLookupList # type: ignore - from ._models import ClearRange # type: ignore - from ._models import ContainerCpkScopeInfo # type: ignore - from ._models import ContainerItem # type: ignore - from ._models import ContainerProperties # type: ignore - from ._models import CorsRule # type: ignore - from ._models import CpkInfo # type: ignore - from ._models import CpkScopeInfo # type: ignore - from ._models import DelimitedTextConfiguration # type: ignore - from ._models import FilterBlobItem # type: ignore - from ._models import FilterBlobSegment # type: ignore - from ._models import GeoReplication # type: ignore - from ._models import JsonTextConfiguration # type: ignore - from ._models import KeyInfo # type: ignore - from ._models import LeaseAccessConditions # type: ignore - from ._models import ListBlobsFlatSegmentResponse # type: ignore - from ._models import ListBlobsHierarchySegmentResponse # type: ignore - from ._models import ListContainersSegmentResponse # type: ignore - from ._models import Logging # type: ignore - from ._models import Metrics # type: ignore - from ._models import ModifiedAccessConditions # type: ignore - from ._models import PageList # type: ignore - from ._models import PageRange # type: ignore - from ._models import QueryFormat # type: ignore - from ._models import QueryRequest # type: ignore - from ._models import QuerySerialization # type: ignore - from ._models import RetentionPolicy # type: ignore - from ._models import SequenceNumberAccessConditions # type: ignore - from ._models import SignedIdentifier # type: ignore - from ._models import SourceModifiedAccessConditions # type: ignore - from ._models import StaticWebsite # type: ignore - from ._models import StorageError # type: ignore - from ._models import StorageServiceProperties # type: ignore - from ._models import StorageServiceStats # type: ignore - from ._models import UserDelegationKey # type: ignore - -from ._azure_blob_storage_enums import ( - AccessTier, - AccessTierOptional, - AccessTierRequired, - AccountKind, - ArchiveStatus, - BlobExpiryOptions, - BlobImmutabilityPolicyMode, - BlobType, - BlockListType, - CopyStatusType, - DeleteSnapshotsOptionType, - EncryptionAlgorithmType, - GeoReplicationStatusType, - LeaseDurationType, - LeaseStateType, - LeaseStatusType, - ListBlobsIncludeItem, - ListContainersIncludeType, - PremiumPageBlobAccessTier, - PublicAccessType, - QueryFormatType, - RehydratePriority, - SequenceNumberActionType, - SkuName, - StorageErrorCode, -) - -__all__ = [ - 'AccessPolicy', - 'AppendPositionAccessConditions', - 'ArrowConfiguration', - 'ArrowField', - 'BlobFlatListSegment', - 'BlobHTTPHeaders', - 'BlobHierarchyListSegment', - 'BlobItemInternal', - 'BlobMetadata', - 'BlobName', - 'BlobPrefix', - 'BlobPropertiesInternal', - 'BlobTag', - 'BlobTags', - 'Block', - 'BlockList', - 'BlockLookupList', - 'ClearRange', - 'ContainerCpkScopeInfo', - 'ContainerItem', - 'ContainerProperties', - 'CorsRule', - 'CpkInfo', - 'CpkScopeInfo', - 'DelimitedTextConfiguration', - 'FilterBlobItem', - 'FilterBlobSegment', - 'GeoReplication', - 'JsonTextConfiguration', - 'KeyInfo', - 'LeaseAccessConditions', - 'ListBlobsFlatSegmentResponse', - 'ListBlobsHierarchySegmentResponse', - 'ListContainersSegmentResponse', - 'Logging', - 'Metrics', - 'ModifiedAccessConditions', - 'PageList', - 'PageRange', - 'QueryFormat', - 'QueryRequest', - 'QuerySerialization', - 'RetentionPolicy', - 'SequenceNumberAccessConditions', - 'SignedIdentifier', - 'SourceModifiedAccessConditions', - 'StaticWebsite', - 'StorageError', - 'StorageServiceProperties', - 'StorageServiceStats', - 'UserDelegationKey', - 'AccessTier', - 'AccessTierOptional', - 'AccessTierRequired', - 'AccountKind', - 'ArchiveStatus', - 'BlobExpiryOptions', - 'BlobImmutabilityPolicyMode', - 'BlobType', - 'BlockListType', - 'CopyStatusType', - 'DeleteSnapshotsOptionType', - 'EncryptionAlgorithmType', - 'GeoReplicationStatusType', - 'LeaseDurationType', - 'LeaseStateType', - 'LeaseStatusType', - 'ListBlobsIncludeItem', - 'ListContainersIncludeType', - 'PremiumPageBlobAccessTier', - 'PublicAccessType', - 'QueryFormatType', - 'RehydratePriority', - 'SequenceNumberActionType', - 'SkuName', - 'StorageErrorCode', -] diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/models/_azure_blob_storage_enums.py b/azure/multiapi/storagev2/blob/v2021_04_10/_generated/models/_azure_blob_storage_enums.py deleted file mode 100644 index 3132545..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/models/_azure_blob_storage_enums.py +++ /dev/null @@ -1,346 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from enum import Enum, EnumMeta -from six import with_metaclass - -class _CaseInsensitiveEnumMeta(EnumMeta): - def __getitem__(self, name): - return super().__getitem__(name.upper()) - - def __getattr__(cls, name): - """Return the enum member matching `name` - We use __getattr__ instead of descriptors or inserting into the enum - class' __dict__ in order to support `name` and `value` being both - properties for enum members (which live in the class' __dict__) and - enum members themselves. - """ - try: - return cls._member_map_[name.upper()] - except KeyError: - raise AttributeError(name) - - -class AccessTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - P4 = "P4" - P6 = "P6" - P10 = "P10" - P15 = "P15" - P20 = "P20" - P30 = "P30" - P40 = "P40" - P50 = "P50" - P60 = "P60" - P70 = "P70" - P80 = "P80" - HOT = "Hot" - COOL = "Cool" - ARCHIVE = "Archive" - -class AccessTierOptional(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - P4 = "P4" - P6 = "P6" - P10 = "P10" - P15 = "P15" - P20 = "P20" - P30 = "P30" - P40 = "P40" - P50 = "P50" - P60 = "P60" - P70 = "P70" - P80 = "P80" - HOT = "Hot" - COOL = "Cool" - ARCHIVE = "Archive" - -class AccessTierRequired(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - P4 = "P4" - P6 = "P6" - P10 = "P10" - P15 = "P15" - P20 = "P20" - P30 = "P30" - P40 = "P40" - P50 = "P50" - P60 = "P60" - P70 = "P70" - P80 = "P80" - HOT = "Hot" - COOL = "Cool" - ARCHIVE = "Archive" - -class AccountKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - STORAGE = "Storage" - BLOB_STORAGE = "BlobStorage" - STORAGE_V2 = "StorageV2" - FILE_STORAGE = "FileStorage" - BLOCK_BLOB_STORAGE = "BlockBlobStorage" - -class ArchiveStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - REHYDRATE_PENDING_TO_HOT = "rehydrate-pending-to-hot" - REHYDRATE_PENDING_TO_COOL = "rehydrate-pending-to-cool" - -class BlobExpiryOptions(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - NEVER_EXPIRE = "NeverExpire" - RELATIVE_TO_CREATION = "RelativeToCreation" - RELATIVE_TO_NOW = "RelativeToNow" - ABSOLUTE = "Absolute" - -class BlobImmutabilityPolicyMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - MUTABLE = "Mutable" - UNLOCKED = "Unlocked" - LOCKED = "Locked" - -class BlobType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - BLOCK_BLOB = "BlockBlob" - PAGE_BLOB = "PageBlob" - APPEND_BLOB = "AppendBlob" - -class BlockListType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - COMMITTED = "committed" - UNCOMMITTED = "uncommitted" - ALL = "all" - -class CopyStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - PENDING = "pending" - SUCCESS = "success" - ABORTED = "aborted" - FAILED = "failed" - -class DeleteSnapshotsOptionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - INCLUDE = "include" - ONLY = "only" - -class EncryptionAlgorithmType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - NONE = "None" - AES256 = "AES256" - -class GeoReplicationStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The status of the secondary location - """ - - LIVE = "live" - BOOTSTRAP = "bootstrap" - UNAVAILABLE = "unavailable" - -class LeaseDurationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - INFINITE = "infinite" - FIXED = "fixed" - -class LeaseStateType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - AVAILABLE = "available" - LEASED = "leased" - EXPIRED = "expired" - BREAKING = "breaking" - BROKEN = "broken" - -class LeaseStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - LOCKED = "locked" - UNLOCKED = "unlocked" - -class ListBlobsIncludeItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - COPY = "copy" - DELETED = "deleted" - METADATA = "metadata" - SNAPSHOTS = "snapshots" - UNCOMMITTEDBLOBS = "uncommittedblobs" - VERSIONS = "versions" - TAGS = "tags" - IMMUTABILITYPOLICY = "immutabilitypolicy" - LEGALHOLD = "legalhold" - DELETEDWITHVERSIONS = "deletedwithversions" - -class ListContainersIncludeType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - METADATA = "metadata" - DELETED = "deleted" - SYSTEM = "system" - -class PremiumPageBlobAccessTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - P4 = "P4" - P6 = "P6" - P10 = "P10" - P15 = "P15" - P20 = "P20" - P30 = "P30" - P40 = "P40" - P50 = "P50" - P60 = "P60" - P70 = "P70" - P80 = "P80" - -class PublicAccessType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - CONTAINER = "container" - BLOB = "blob" - -class QueryFormatType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The quick query format type. - """ - - DELIMITED = "delimited" - JSON = "json" - ARROW = "arrow" - PARQUET = "parquet" - -class RehydratePriority(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """If an object is in rehydrate pending state then this header is returned with priority of - rehydrate. Valid values are High and Standard. - """ - - HIGH = "High" - STANDARD = "Standard" - -class SequenceNumberActionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - MAX = "max" - UPDATE = "update" - INCREMENT = "increment" - -class SkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - STANDARD_LRS = "Standard_LRS" - STANDARD_GRS = "Standard_GRS" - STANDARD_RAGRS = "Standard_RAGRS" - STANDARD_ZRS = "Standard_ZRS" - PREMIUM_LRS = "Premium_LRS" - -class StorageErrorCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Error codes returned by the service - """ - - ACCOUNT_ALREADY_EXISTS = "AccountAlreadyExists" - ACCOUNT_BEING_CREATED = "AccountBeingCreated" - ACCOUNT_IS_DISABLED = "AccountIsDisabled" - AUTHENTICATION_FAILED = "AuthenticationFailed" - AUTHORIZATION_FAILURE = "AuthorizationFailure" - CONDITION_HEADERS_NOT_SUPPORTED = "ConditionHeadersNotSupported" - CONDITION_NOT_MET = "ConditionNotMet" - EMPTY_METADATA_KEY = "EmptyMetadataKey" - INSUFFICIENT_ACCOUNT_PERMISSIONS = "InsufficientAccountPermissions" - INTERNAL_ERROR = "InternalError" - INVALID_AUTHENTICATION_INFO = "InvalidAuthenticationInfo" - INVALID_HEADER_VALUE = "InvalidHeaderValue" - INVALID_HTTP_VERB = "InvalidHttpVerb" - INVALID_INPUT = "InvalidInput" - INVALID_MD5 = "InvalidMd5" - INVALID_METADATA = "InvalidMetadata" - INVALID_QUERY_PARAMETER_VALUE = "InvalidQueryParameterValue" - INVALID_RANGE = "InvalidRange" - INVALID_RESOURCE_NAME = "InvalidResourceName" - INVALID_URI = "InvalidUri" - INVALID_XML_DOCUMENT = "InvalidXmlDocument" - INVALID_XML_NODE_VALUE = "InvalidXmlNodeValue" - MD5_MISMATCH = "Md5Mismatch" - METADATA_TOO_LARGE = "MetadataTooLarge" - MISSING_CONTENT_LENGTH_HEADER = "MissingContentLengthHeader" - MISSING_REQUIRED_QUERY_PARAMETER = "MissingRequiredQueryParameter" - MISSING_REQUIRED_HEADER = "MissingRequiredHeader" - MISSING_REQUIRED_XML_NODE = "MissingRequiredXmlNode" - MULTIPLE_CONDITION_HEADERS_NOT_SUPPORTED = "MultipleConditionHeadersNotSupported" - OPERATION_TIMED_OUT = "OperationTimedOut" - OUT_OF_RANGE_INPUT = "OutOfRangeInput" - OUT_OF_RANGE_QUERY_PARAMETER_VALUE = "OutOfRangeQueryParameterValue" - REQUEST_BODY_TOO_LARGE = "RequestBodyTooLarge" - RESOURCE_TYPE_MISMATCH = "ResourceTypeMismatch" - REQUEST_URL_FAILED_TO_PARSE = "RequestUrlFailedToParse" - RESOURCE_ALREADY_EXISTS = "ResourceAlreadyExists" - RESOURCE_NOT_FOUND = "ResourceNotFound" - SERVER_BUSY = "ServerBusy" - UNSUPPORTED_HEADER = "UnsupportedHeader" - UNSUPPORTED_XML_NODE = "UnsupportedXmlNode" - UNSUPPORTED_QUERY_PARAMETER = "UnsupportedQueryParameter" - UNSUPPORTED_HTTP_VERB = "UnsupportedHttpVerb" - APPEND_POSITION_CONDITION_NOT_MET = "AppendPositionConditionNotMet" - BLOB_ALREADY_EXISTS = "BlobAlreadyExists" - BLOB_IMMUTABLE_DUE_TO_POLICY = "BlobImmutableDueToPolicy" - BLOB_NOT_FOUND = "BlobNotFound" - BLOB_OVERWRITTEN = "BlobOverwritten" - BLOB_TIER_INADEQUATE_FOR_CONTENT_LENGTH = "BlobTierInadequateForContentLength" - BLOB_USES_CUSTOMER_SPECIFIED_ENCRYPTION = "BlobUsesCustomerSpecifiedEncryption" - BLOCK_COUNT_EXCEEDS_LIMIT = "BlockCountExceedsLimit" - BLOCK_LIST_TOO_LONG = "BlockListTooLong" - CANNOT_CHANGE_TO_LOWER_TIER = "CannotChangeToLowerTier" - CANNOT_VERIFY_COPY_SOURCE = "CannotVerifyCopySource" - CONTAINER_ALREADY_EXISTS = "ContainerAlreadyExists" - CONTAINER_BEING_DELETED = "ContainerBeingDeleted" - CONTAINER_DISABLED = "ContainerDisabled" - CONTAINER_NOT_FOUND = "ContainerNotFound" - CONTENT_LENGTH_LARGER_THAN_TIER_LIMIT = "ContentLengthLargerThanTierLimit" - COPY_ACROSS_ACCOUNTS_NOT_SUPPORTED = "CopyAcrossAccountsNotSupported" - COPY_ID_MISMATCH = "CopyIdMismatch" - FEATURE_VERSION_MISMATCH = "FeatureVersionMismatch" - INCREMENTAL_COPY_BLOB_MISMATCH = "IncrementalCopyBlobMismatch" - INCREMENTAL_COPY_OF_ERALIER_VERSION_SNAPSHOT_NOT_ALLOWED = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" - INCREMENTAL_COPY_SOURCE_MUST_BE_SNAPSHOT = "IncrementalCopySourceMustBeSnapshot" - INFINITE_LEASE_DURATION_REQUIRED = "InfiniteLeaseDurationRequired" - INVALID_BLOB_OR_BLOCK = "InvalidBlobOrBlock" - INVALID_BLOB_TIER = "InvalidBlobTier" - INVALID_BLOB_TYPE = "InvalidBlobType" - INVALID_BLOCK_ID = "InvalidBlockId" - INVALID_BLOCK_LIST = "InvalidBlockList" - INVALID_OPERATION = "InvalidOperation" - INVALID_PAGE_RANGE = "InvalidPageRange" - INVALID_SOURCE_BLOB_TYPE = "InvalidSourceBlobType" - INVALID_SOURCE_BLOB_URL = "InvalidSourceBlobUrl" - INVALID_VERSION_FOR_PAGE_BLOB_OPERATION = "InvalidVersionForPageBlobOperation" - LEASE_ALREADY_PRESENT = "LeaseAlreadyPresent" - LEASE_ALREADY_BROKEN = "LeaseAlreadyBroken" - LEASE_ID_MISMATCH_WITH_BLOB_OPERATION = "LeaseIdMismatchWithBlobOperation" - LEASE_ID_MISMATCH_WITH_CONTAINER_OPERATION = "LeaseIdMismatchWithContainerOperation" - LEASE_ID_MISMATCH_WITH_LEASE_OPERATION = "LeaseIdMismatchWithLeaseOperation" - LEASE_ID_MISSING = "LeaseIdMissing" - LEASE_IS_BREAKING_AND_CANNOT_BE_ACQUIRED = "LeaseIsBreakingAndCannotBeAcquired" - LEASE_IS_BREAKING_AND_CANNOT_BE_CHANGED = "LeaseIsBreakingAndCannotBeChanged" - LEASE_IS_BROKEN_AND_CANNOT_BE_RENEWED = "LeaseIsBrokenAndCannotBeRenewed" - LEASE_LOST = "LeaseLost" - LEASE_NOT_PRESENT_WITH_BLOB_OPERATION = "LeaseNotPresentWithBlobOperation" - LEASE_NOT_PRESENT_WITH_CONTAINER_OPERATION = "LeaseNotPresentWithContainerOperation" - LEASE_NOT_PRESENT_WITH_LEASE_OPERATION = "LeaseNotPresentWithLeaseOperation" - MAX_BLOB_SIZE_CONDITION_NOT_MET = "MaxBlobSizeConditionNotMet" - NO_AUTHENTICATION_INFORMATION = "NoAuthenticationInformation" - NO_PENDING_COPY_OPERATION = "NoPendingCopyOperation" - OPERATION_NOT_ALLOWED_ON_INCREMENTAL_COPY_BLOB = "OperationNotAllowedOnIncrementalCopyBlob" - PENDING_COPY_OPERATION = "PendingCopyOperation" - PREVIOUS_SNAPSHOT_CANNOT_BE_NEWER = "PreviousSnapshotCannotBeNewer" - PREVIOUS_SNAPSHOT_NOT_FOUND = "PreviousSnapshotNotFound" - PREVIOUS_SNAPSHOT_OPERATION_NOT_SUPPORTED = "PreviousSnapshotOperationNotSupported" - SEQUENCE_NUMBER_CONDITION_NOT_MET = "SequenceNumberConditionNotMet" - SEQUENCE_NUMBER_INCREMENT_TOO_LARGE = "SequenceNumberIncrementTooLarge" - SNAPSHOT_COUNT_EXCEEDED = "SnapshotCountExceeded" - SNAPSHOT_OPERATION_RATE_EXCEEDED = "SnapshotOperationRateExceeded" - SNAPSHOTS_PRESENT = "SnapshotsPresent" - SOURCE_CONDITION_NOT_MET = "SourceConditionNotMet" - SYSTEM_IN_USE = "SystemInUse" - TARGET_CONDITION_NOT_MET = "TargetConditionNotMet" - UNAUTHORIZED_BLOB_OVERWRITE = "UnauthorizedBlobOverwrite" - BLOB_BEING_REHYDRATED = "BlobBeingRehydrated" - BLOB_ARCHIVED = "BlobArchived" - BLOB_NOT_ARCHIVED = "BlobNotArchived" - AUTHORIZATION_SOURCE_IP_MISMATCH = "AuthorizationSourceIPMismatch" - AUTHORIZATION_PROTOCOL_MISMATCH = "AuthorizationProtocolMismatch" - AUTHORIZATION_PERMISSION_MISMATCH = "AuthorizationPermissionMismatch" - AUTHORIZATION_SERVICE_MISMATCH = "AuthorizationServiceMismatch" - AUTHORIZATION_RESOURCE_TYPE_MISMATCH = "AuthorizationResourceTypeMismatch" diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/models/_models.py b/azure/multiapi/storagev2/blob/v2021_04_10/_generated/models/_models.py deleted file mode 100644 index abf1632..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/models/_models.py +++ /dev/null @@ -1,1995 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import HttpResponseError -import msrest.serialization - - -class AccessPolicy(msrest.serialization.Model): - """An Access policy. - - :param start: the date-time the policy is active. - :type start: str - :param expiry: the date-time the policy expires. - :type expiry: str - :param permission: the permissions for the acl policy. - :type permission: str - """ - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str'}, - 'expiry': {'key': 'Expiry', 'type': 'str'}, - 'permission': {'key': 'Permission', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(AccessPolicy, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.expiry = kwargs.get('expiry', None) - self.permission = kwargs.get('permission', None) - - -class AppendPositionAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param max_size: Optional conditional header. The max length in bytes permitted for the append - blob. If the Append Block operation would cause the blob to exceed that limit or if the blob - size is already greater than the value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :type max_size: long - :param append_position: Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will succeed only if the append - position is equal to this number. If it is not, the request will fail with the - AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). - :type append_position: long - """ - - _attribute_map = { - 'max_size': {'key': 'maxSize', 'type': 'long'}, - 'append_position': {'key': 'appendPosition', 'type': 'long'}, - } - - def __init__( - self, - **kwargs - ): - super(AppendPositionAccessConditions, self).__init__(**kwargs) - self.max_size = kwargs.get('max_size', None) - self.append_position = kwargs.get('append_position', None) - - -class ArrowConfiguration(msrest.serialization.Model): - """Groups the settings used for formatting the response if the response should be Arrow formatted. - - All required parameters must be populated in order to send to Azure. - - :param schema: Required. - :type schema: list[~azure.storage.blob.models.ArrowField] - """ - - _validation = { - 'schema': {'required': True}, - } - - _attribute_map = { - 'schema': {'key': 'Schema', 'type': '[ArrowField]', 'xml': {'name': 'Schema', 'wrapped': True, 'itemsName': 'Field'}}, - } - _xml_map = { - 'name': 'ArrowConfiguration' - } - - def __init__( - self, - **kwargs - ): - super(ArrowConfiguration, self).__init__(**kwargs) - self.schema = kwargs['schema'] - - -class ArrowField(msrest.serialization.Model): - """Groups settings regarding specific field of an arrow schema. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. - :type type: str - :param name: - :type name: str - :param precision: - :type precision: int - :param scale: - :type scale: int - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': 'Type', 'type': 'str'}, - 'name': {'key': 'Name', 'type': 'str'}, - 'precision': {'key': 'Precision', 'type': 'int'}, - 'scale': {'key': 'Scale', 'type': 'int'}, - } - _xml_map = { - 'name': 'Field' - } - - def __init__( - self, - **kwargs - ): - super(ArrowField, self).__init__(**kwargs) - self.type = kwargs['type'] - self.name = kwargs.get('name', None) - self.precision = kwargs.get('precision', None) - self.scale = kwargs.get('scale', None) - - -class BlobFlatListSegment(msrest.serialization.Model): - """BlobFlatListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]'}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__( - self, - **kwargs - ): - super(BlobFlatListSegment, self).__init__(**kwargs) - self.blob_items = kwargs['blob_items'] - - -class BlobHierarchyListSegment(msrest.serialization.Model): - """BlobHierarchyListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_prefixes: - :type blob_prefixes: list[~azure.storage.blob.models.BlobPrefix] - :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]', 'xml': {'name': 'BlobPrefix'}}, - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__( - self, - **kwargs - ): - super(BlobHierarchyListSegment, self).__init__(**kwargs) - self.blob_prefixes = kwargs.get('blob_prefixes', None) - self.blob_items = kwargs['blob_items'] - - -class BlobHTTPHeaders(msrest.serialization.Model): - """Parameter group. - - :param blob_cache_control: Optional. Sets the blob's cache control. If specified, this property - is stored with the blob and returned with a read request. - :type blob_cache_control: str - :param blob_content_type: Optional. Sets the blob's content type. If specified, this property - is stored with the blob and returned with a read request. - :type blob_content_type: str - :param blob_content_md5: Optional. An MD5 hash of the blob content. Note that this hash is not - validated, as the hashes for the individual blocks were validated when each was uploaded. - :type blob_content_md5: bytearray - :param blob_content_encoding: Optional. Sets the blob's content encoding. If specified, this - property is stored with the blob and returned with a read request. - :type blob_content_encoding: str - :param blob_content_language: Optional. Set the blob's content language. If specified, this - property is stored with the blob and returned with a read request. - :type blob_content_language: str - :param blob_content_disposition: Optional. Sets the blob's Content-Disposition header. - :type blob_content_disposition: str - """ - - _attribute_map = { - 'blob_cache_control': {'key': 'blobCacheControl', 'type': 'str'}, - 'blob_content_type': {'key': 'blobContentType', 'type': 'str'}, - 'blob_content_md5': {'key': 'blobContentMD5', 'type': 'bytearray'}, - 'blob_content_encoding': {'key': 'blobContentEncoding', 'type': 'str'}, - 'blob_content_language': {'key': 'blobContentLanguage', 'type': 'str'}, - 'blob_content_disposition': {'key': 'blobContentDisposition', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(BlobHTTPHeaders, self).__init__(**kwargs) - self.blob_cache_control = kwargs.get('blob_cache_control', None) - self.blob_content_type = kwargs.get('blob_content_type', None) - self.blob_content_md5 = kwargs.get('blob_content_md5', None) - self.blob_content_encoding = kwargs.get('blob_content_encoding', None) - self.blob_content_language = kwargs.get('blob_content_language', None) - self.blob_content_disposition = kwargs.get('blob_content_disposition', None) - - -class BlobItemInternal(msrest.serialization.Model): - """An Azure Storage blob. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: ~azure.storage.blob.models.BlobName - :param deleted: Required. - :type deleted: bool - :param snapshot: Required. - :type snapshot: str - :param version_id: - :type version_id: str - :param is_current_version: - :type is_current_version: bool - :param properties: Required. Properties of a blob. - :type properties: ~azure.storage.blob.models.BlobPropertiesInternal - :param metadata: - :type metadata: ~azure.storage.blob.models.BlobMetadata - :param blob_tags: Blob tags. - :type blob_tags: ~azure.storage.blob.models.BlobTags - :param has_versions_only: - :type has_versions_only: bool - :param object_replication_metadata: Dictionary of :code:``. - :type object_replication_metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'deleted': {'required': True}, - 'snapshot': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'BlobName'}, - 'deleted': {'key': 'Deleted', 'type': 'bool'}, - 'snapshot': {'key': 'Snapshot', 'type': 'str'}, - 'version_id': {'key': 'VersionId', 'type': 'str'}, - 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool'}, - 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal'}, - 'metadata': {'key': 'Metadata', 'type': 'BlobMetadata'}, - 'blob_tags': {'key': 'BlobTags', 'type': 'BlobTags'}, - 'has_versions_only': {'key': 'HasVersionsOnly', 'type': 'bool'}, - 'object_replication_metadata': {'key': 'OrMetadata', 'type': '{str}'}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__( - self, - **kwargs - ): - super(BlobItemInternal, self).__init__(**kwargs) - self.name = kwargs['name'] - self.deleted = kwargs['deleted'] - self.snapshot = kwargs['snapshot'] - self.version_id = kwargs.get('version_id', None) - self.is_current_version = kwargs.get('is_current_version', None) - self.properties = kwargs['properties'] - self.metadata = kwargs.get('metadata', None) - self.blob_tags = kwargs.get('blob_tags', None) - self.has_versions_only = kwargs.get('has_versions_only', None) - self.object_replication_metadata = kwargs.get('object_replication_metadata', None) - - -class BlobMetadata(msrest.serialization.Model): - """BlobMetadata. - - :param additional_properties: Unmatched properties from the message are deserialized to this - collection. - :type additional_properties: dict[str, str] - :param encrypted: - :type encrypted: str - """ - - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{str}'}, - 'encrypted': {'key': 'Encrypted', 'type': 'str', 'xml': {'attr': True}}, - } - _xml_map = { - 'name': 'Metadata' - } - - def __init__( - self, - **kwargs - ): - super(BlobMetadata, self).__init__(**kwargs) - self.additional_properties = kwargs.get('additional_properties', None) - self.encrypted = kwargs.get('encrypted', None) - - -class BlobName(msrest.serialization.Model): - """BlobName. - - :param encoded: Indicates if the blob name is encoded. - :type encoded: bool - :param content: The name of the blob. - :type content: str - """ - - _attribute_map = { - 'encoded': {'key': 'Encoded', 'type': 'bool', 'xml': {'name': 'Encoded', 'attr': True}}, - 'content': {'key': 'content', 'type': 'str', 'xml': {'text': True}}, - } - - def __init__( - self, - **kwargs - ): - super(BlobName, self).__init__(**kwargs) - self.encoded = kwargs.get('encoded', None) - self.content = kwargs.get('content', None) - - -class BlobPrefix(msrest.serialization.Model): - """BlobPrefix. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: ~azure.storage.blob.models.BlobName - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'BlobName'}, - } - - def __init__( - self, - **kwargs - ): - super(BlobPrefix, self).__init__(**kwargs) - self.name = kwargs['name'] - - -class BlobPropertiesInternal(msrest.serialization.Model): - """Properties of a blob. - - All required parameters must be populated in order to send to Azure. - - :param creation_time: - :type creation_time: ~datetime.datetime - :param last_modified: Required. - :type last_modified: ~datetime.datetime - :param etag: Required. - :type etag: str - :param content_length: Size in bytes. - :type content_length: long - :param content_type: - :type content_type: str - :param content_encoding: - :type content_encoding: str - :param content_language: - :type content_language: str - :param content_md5: - :type content_md5: bytearray - :param content_disposition: - :type content_disposition: str - :param cache_control: - :type cache_control: str - :param blob_sequence_number: - :type blob_sequence_number: long - :param blob_type: Possible values include: "BlockBlob", "PageBlob", "AppendBlob". - :type blob_type: str or ~azure.storage.blob.models.BlobType - :param lease_status: Possible values include: "locked", "unlocked". - :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType - :param lease_state: Possible values include: "available", "leased", "expired", "breaking", - "broken". - :type lease_state: str or ~azure.storage.blob.models.LeaseStateType - :param lease_duration: Possible values include: "infinite", "fixed". - :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType - :param copy_id: - :type copy_id: str - :param copy_status: Possible values include: "pending", "success", "aborted", "failed". - :type copy_status: str or ~azure.storage.blob.models.CopyStatusType - :param copy_source: - :type copy_source: str - :param copy_progress: - :type copy_progress: str - :param copy_completion_time: - :type copy_completion_time: ~datetime.datetime - :param copy_status_description: - :type copy_status_description: str - :param server_encrypted: - :type server_encrypted: bool - :param incremental_copy: - :type incremental_copy: bool - :param destination_snapshot: - :type destination_snapshot: str - :param deleted_time: - :type deleted_time: ~datetime.datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param access_tier: Possible values include: "P4", "P6", "P10", "P15", "P20", "P30", "P40", - "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive". - :type access_tier: str or ~azure.storage.blob.models.AccessTier - :param access_tier_inferred: - :type access_tier_inferred: bool - :param archive_status: Possible values include: "rehydrate-pending-to-hot", - "rehydrate-pending-to-cool". - :type archive_status: str or ~azure.storage.blob.models.ArchiveStatus - :param customer_provided_key_sha256: - :type customer_provided_key_sha256: str - :param encryption_scope: The name of the encryption scope under which the blob is encrypted. - :type encryption_scope: str - :param access_tier_change_time: - :type access_tier_change_time: ~datetime.datetime - :param tag_count: - :type tag_count: int - :param expires_on: - :type expires_on: ~datetime.datetime - :param is_sealed: - :type is_sealed: bool - :param rehydrate_priority: If an object is in rehydrate pending state then this header is - returned with priority of rehydrate. Valid values are High and Standard. Possible values - include: "High", "Standard". - :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority - :param last_accessed_on: - :type last_accessed_on: ~datetime.datetime - :param immutability_policy_expires_on: - :type immutability_policy_expires_on: ~datetime.datetime - :param immutability_policy_mode: Possible values include: "Mutable", "Unlocked", "Locked". - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param legal_hold: - :type legal_hold: bool - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123'}, - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - 'content_length': {'key': 'Content-Length', 'type': 'long'}, - 'content_type': {'key': 'Content-Type', 'type': 'str'}, - 'content_encoding': {'key': 'Content-Encoding', 'type': 'str'}, - 'content_language': {'key': 'Content-Language', 'type': 'str'}, - 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray'}, - 'content_disposition': {'key': 'Content-Disposition', 'type': 'str'}, - 'cache_control': {'key': 'Cache-Control', 'type': 'str'}, - 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long'}, - 'blob_type': {'key': 'BlobType', 'type': 'str'}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, - 'lease_state': {'key': 'LeaseState', 'type': 'str'}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, - 'copy_id': {'key': 'CopyId', 'type': 'str'}, - 'copy_status': {'key': 'CopyStatus', 'type': 'str'}, - 'copy_source': {'key': 'CopySource', 'type': 'str'}, - 'copy_progress': {'key': 'CopyProgress', 'type': 'str'}, - 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123'}, - 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str'}, - 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool'}, - 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool'}, - 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str'}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, - 'access_tier': {'key': 'AccessTier', 'type': 'str'}, - 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool'}, - 'archive_status': {'key': 'ArchiveStatus', 'type': 'str'}, - 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str'}, - 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str'}, - 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'}, - 'tag_count': {'key': 'TagCount', 'type': 'int'}, - 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123'}, - 'is_sealed': {'key': 'Sealed', 'type': 'bool'}, - 'rehydrate_priority': {'key': 'RehydratePriority', 'type': 'str'}, - 'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123'}, - 'immutability_policy_expires_on': {'key': 'ImmutabilityPolicyUntilDate', 'type': 'rfc-1123'}, - 'immutability_policy_mode': {'key': 'ImmutabilityPolicyMode', 'type': 'str'}, - 'legal_hold': {'key': 'LegalHold', 'type': 'bool'}, - } - _xml_map = { - 'name': 'Properties' - } - - def __init__( - self, - **kwargs - ): - super(BlobPropertiesInternal, self).__init__(**kwargs) - self.creation_time = kwargs.get('creation_time', None) - self.last_modified = kwargs['last_modified'] - self.etag = kwargs['etag'] - self.content_length = kwargs.get('content_length', None) - self.content_type = kwargs.get('content_type', None) - self.content_encoding = kwargs.get('content_encoding', None) - self.content_language = kwargs.get('content_language', None) - self.content_md5 = kwargs.get('content_md5', None) - self.content_disposition = kwargs.get('content_disposition', None) - self.cache_control = kwargs.get('cache_control', None) - self.blob_sequence_number = kwargs.get('blob_sequence_number', None) - self.blob_type = kwargs.get('blob_type', None) - self.lease_status = kwargs.get('lease_status', None) - self.lease_state = kwargs.get('lease_state', None) - self.lease_duration = kwargs.get('lease_duration', None) - self.copy_id = kwargs.get('copy_id', None) - self.copy_status = kwargs.get('copy_status', None) - self.copy_source = kwargs.get('copy_source', None) - self.copy_progress = kwargs.get('copy_progress', None) - self.copy_completion_time = kwargs.get('copy_completion_time', None) - self.copy_status_description = kwargs.get('copy_status_description', None) - self.server_encrypted = kwargs.get('server_encrypted', None) - self.incremental_copy = kwargs.get('incremental_copy', None) - self.destination_snapshot = kwargs.get('destination_snapshot', None) - self.deleted_time = kwargs.get('deleted_time', None) - self.remaining_retention_days = kwargs.get('remaining_retention_days', None) - self.access_tier = kwargs.get('access_tier', None) - self.access_tier_inferred = kwargs.get('access_tier_inferred', None) - self.archive_status = kwargs.get('archive_status', None) - self.customer_provided_key_sha256 = kwargs.get('customer_provided_key_sha256', None) - self.encryption_scope = kwargs.get('encryption_scope', None) - self.access_tier_change_time = kwargs.get('access_tier_change_time', None) - self.tag_count = kwargs.get('tag_count', None) - self.expires_on = kwargs.get('expires_on', None) - self.is_sealed = kwargs.get('is_sealed', None) - self.rehydrate_priority = kwargs.get('rehydrate_priority', None) - self.last_accessed_on = kwargs.get('last_accessed_on', None) - self.immutability_policy_expires_on = kwargs.get('immutability_policy_expires_on', None) - self.immutability_policy_mode = kwargs.get('immutability_policy_mode', None) - self.legal_hold = kwargs.get('legal_hold', None) - - -class BlobTag(msrest.serialization.Model): - """BlobTag. - - All required parameters must be populated in order to send to Azure. - - :param key: Required. - :type key: str - :param value: Required. - :type value: str - """ - - _validation = { - 'key': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'key': {'key': 'Key', 'type': 'str'}, - 'value': {'key': 'Value', 'type': 'str'}, - } - _xml_map = { - 'name': 'Tag' - } - - def __init__( - self, - **kwargs - ): - super(BlobTag, self).__init__(**kwargs) - self.key = kwargs['key'] - self.value = kwargs['value'] - - -class BlobTags(msrest.serialization.Model): - """Blob tags. - - All required parameters must be populated in order to send to Azure. - - :param blob_tag_set: Required. - :type blob_tag_set: list[~azure.storage.blob.models.BlobTag] - """ - - _validation = { - 'blob_tag_set': {'required': True}, - } - - _attribute_map = { - 'blob_tag_set': {'key': 'BlobTagSet', 'type': '[BlobTag]', 'xml': {'name': 'TagSet', 'wrapped': True, 'itemsName': 'Tag'}}, - } - _xml_map = { - 'name': 'Tags' - } - - def __init__( - self, - **kwargs - ): - super(BlobTags, self).__init__(**kwargs) - self.blob_tag_set = kwargs['blob_tag_set'] - - -class Block(msrest.serialization.Model): - """Represents a single block in a block blob. It describes the block's ID and size. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The base64 encoded block ID. - :type name: str - :param size: Required. The block size in bytes. - :type size: long - """ - - _validation = { - 'name': {'required': True}, - 'size': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'size': {'key': 'Size', 'type': 'long'}, - } - - def __init__( - self, - **kwargs - ): - super(Block, self).__init__(**kwargs) - self.name = kwargs['name'] - self.size = kwargs['size'] - - -class BlockList(msrest.serialization.Model): - """BlockList. - - :param committed_blocks: - :type committed_blocks: list[~azure.storage.blob.models.Block] - :param uncommitted_blocks: - :type uncommitted_blocks: list[~azure.storage.blob.models.Block] - """ - - _attribute_map = { - 'committed_blocks': {'key': 'CommittedBlocks', 'type': '[Block]', 'xml': {'wrapped': True}}, - 'uncommitted_blocks': {'key': 'UncommittedBlocks', 'type': '[Block]', 'xml': {'wrapped': True}}, - } - - def __init__( - self, - **kwargs - ): - super(BlockList, self).__init__(**kwargs) - self.committed_blocks = kwargs.get('committed_blocks', None) - self.uncommitted_blocks = kwargs.get('uncommitted_blocks', None) - - -class BlockLookupList(msrest.serialization.Model): - """BlockLookupList. - - :param committed: - :type committed: list[str] - :param uncommitted: - :type uncommitted: list[str] - :param latest: - :type latest: list[str] - """ - - _attribute_map = { - 'committed': {'key': 'Committed', 'type': '[str]', 'xml': {'itemsName': 'Committed'}}, - 'uncommitted': {'key': 'Uncommitted', 'type': '[str]', 'xml': {'itemsName': 'Uncommitted'}}, - 'latest': {'key': 'Latest', 'type': '[str]', 'xml': {'itemsName': 'Latest'}}, - } - _xml_map = { - 'name': 'BlockList' - } - - def __init__( - self, - **kwargs - ): - super(BlockLookupList, self).__init__(**kwargs) - self.committed = kwargs.get('committed', None) - self.uncommitted = kwargs.get('uncommitted', None) - self.latest = kwargs.get('latest', None) - - -class ClearRange(msrest.serialization.Model): - """ClearRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'ClearRange' - } - - def __init__( - self, - **kwargs - ): - super(ClearRange, self).__init__(**kwargs) - self.start = kwargs['start'] - self.end = kwargs['end'] - - -class ContainerCpkScopeInfo(msrest.serialization.Model): - """Parameter group. - - :param default_encryption_scope: Optional. Version 2019-07-07 and later. Specifies the - default encryption scope to set on the container and use for all future writes. - :type default_encryption_scope: str - :param prevent_encryption_scope_override: Optional. Version 2019-07-07 and newer. If true, - prevents any request from specifying a different encryption scope than the scope set on the - container. - :type prevent_encryption_scope_override: bool - """ - - _attribute_map = { - 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str'}, - 'prevent_encryption_scope_override': {'key': 'PreventEncryptionScopeOverride', 'type': 'bool'}, - } - - def __init__( - self, - **kwargs - ): - super(ContainerCpkScopeInfo, self).__init__(**kwargs) - self.default_encryption_scope = kwargs.get('default_encryption_scope', None) - self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', None) - - -class ContainerItem(msrest.serialization.Model): - """An Azure Storage container. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param deleted: - :type deleted: bool - :param version: - :type version: str - :param properties: Required. Properties of a container. - :type properties: ~azure.storage.blob.models.ContainerProperties - :param metadata: Dictionary of :code:``. - :type metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'deleted': {'key': 'Deleted', 'type': 'bool'}, - 'version': {'key': 'Version', 'type': 'str'}, - 'properties': {'key': 'Properties', 'type': 'ContainerProperties'}, - 'metadata': {'key': 'Metadata', 'type': '{str}'}, - } - _xml_map = { - 'name': 'Container' - } - - def __init__( - self, - **kwargs - ): - super(ContainerItem, self).__init__(**kwargs) - self.name = kwargs['name'] - self.deleted = kwargs.get('deleted', None) - self.version = kwargs.get('version', None) - self.properties = kwargs['properties'] - self.metadata = kwargs.get('metadata', None) - - -class ContainerProperties(msrest.serialization.Model): - """Properties of a container. - - All required parameters must be populated in order to send to Azure. - - :param last_modified: Required. - :type last_modified: ~datetime.datetime - :param etag: Required. - :type etag: str - :param lease_status: Possible values include: "locked", "unlocked". - :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType - :param lease_state: Possible values include: "available", "leased", "expired", "breaking", - "broken". - :type lease_state: str or ~azure.storage.blob.models.LeaseStateType - :param lease_duration: Possible values include: "infinite", "fixed". - :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType - :param public_access: Possible values include: "container", "blob". - :type public_access: str or ~azure.storage.blob.models.PublicAccessType - :param has_immutability_policy: - :type has_immutability_policy: bool - :param has_legal_hold: - :type has_legal_hold: bool - :param default_encryption_scope: - :type default_encryption_scope: str - :param prevent_encryption_scope_override: - :type prevent_encryption_scope_override: bool - :param deleted_time: - :type deleted_time: ~datetime.datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param is_immutable_storage_with_versioning_enabled: Indicates if version level worm is enabled - on this container. - :type is_immutable_storage_with_versioning_enabled: bool - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, - 'lease_state': {'key': 'LeaseState', 'type': 'str'}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, - 'public_access': {'key': 'PublicAccess', 'type': 'str'}, - 'has_immutability_policy': {'key': 'HasImmutabilityPolicy', 'type': 'bool'}, - 'has_legal_hold': {'key': 'HasLegalHold', 'type': 'bool'}, - 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str'}, - 'prevent_encryption_scope_override': {'key': 'DenyEncryptionScopeOverride', 'type': 'bool'}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, - 'is_immutable_storage_with_versioning_enabled': {'key': 'ImmutableStorageWithVersioningEnabled', 'type': 'bool'}, - } - - def __init__( - self, - **kwargs - ): - super(ContainerProperties, self).__init__(**kwargs) - self.last_modified = kwargs['last_modified'] - self.etag = kwargs['etag'] - self.lease_status = kwargs.get('lease_status', None) - self.lease_state = kwargs.get('lease_state', None) - self.lease_duration = kwargs.get('lease_duration', None) - self.public_access = kwargs.get('public_access', None) - self.has_immutability_policy = kwargs.get('has_immutability_policy', None) - self.has_legal_hold = kwargs.get('has_legal_hold', None) - self.default_encryption_scope = kwargs.get('default_encryption_scope', None) - self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', None) - self.deleted_time = kwargs.get('deleted_time', None) - self.remaining_retention_days = kwargs.get('remaining_retention_days', None) - self.is_immutable_storage_with_versioning_enabled = kwargs.get('is_immutable_storage_with_versioning_enabled', None) - - -class CorsRule(msrest.serialization.Model): - """CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param allowed_origins: Required. The origin domains that are permitted to make a request - against the storage service via CORS. The origin domain is the domain from which the request - originates. Note that the origin must be an exact case-sensitive match with the origin that the - user age sends to the service. You can also use the wildcard character '*' to allow all origin - domains to make requests via CORS. - :type allowed_origins: str - :param allowed_methods: Required. The methods (HTTP request verbs) that the origin domain may - use for a CORS request. (comma separated). - :type allowed_methods: str - :param allowed_headers: Required. the request headers that the origin domain may specify on the - CORS request. - :type allowed_headers: str - :param exposed_headers: Required. The response headers that may be sent in the response to the - CORS request and exposed by the browser to the request issuer. - :type exposed_headers: str - :param max_age_in_seconds: Required. The maximum amount time that a browser should cache the - preflight OPTIONS request. - :type max_age_in_seconds: int - """ - - _validation = { - 'allowed_origins': {'required': True}, - 'allowed_methods': {'required': True}, - 'allowed_headers': {'required': True}, - 'exposed_headers': {'required': True}, - 'max_age_in_seconds': {'required': True, 'minimum': 0}, - } - - _attribute_map = { - 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str'}, - 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str'}, - 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str'}, - 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str'}, - 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int'}, - } - - def __init__( - self, - **kwargs - ): - super(CorsRule, self).__init__(**kwargs) - self.allowed_origins = kwargs['allowed_origins'] - self.allowed_methods = kwargs['allowed_methods'] - self.allowed_headers = kwargs['allowed_headers'] - self.exposed_headers = kwargs['exposed_headers'] - self.max_age_in_seconds = kwargs['max_age_in_seconds'] - - -class CpkInfo(msrest.serialization.Model): - """Parameter group. - - :param encryption_key: Optional. Specifies the encryption key to use to encrypt the data - provided in the request. If not specified, encryption is performed with the root account - encryption key. For more information, see Encryption at Rest for Azure Storage Services. - :type encryption_key: str - :param encryption_key_sha256: The SHA-256 hash of the provided encryption key. Must be provided - if the x-ms-encryption-key header is provided. - :type encryption_key_sha256: str - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. Possible values include: "None", "AES256". - :type encryption_algorithm: str or ~azure.storage.blob.models.EncryptionAlgorithmType - """ - - _attribute_map = { - 'encryption_key': {'key': 'encryptionKey', 'type': 'str'}, - 'encryption_key_sha256': {'key': 'encryptionKeySha256', 'type': 'str'}, - 'encryption_algorithm': {'key': 'encryptionAlgorithm', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(CpkInfo, self).__init__(**kwargs) - self.encryption_key = kwargs.get('encryption_key', None) - self.encryption_key_sha256 = kwargs.get('encryption_key_sha256', None) - self.encryption_algorithm = kwargs.get('encryption_algorithm', None) - - -class CpkScopeInfo(msrest.serialization.Model): - """Parameter group. - - :param encryption_scope: Optional. Version 2019-07-07 and later. Specifies the name of the - encryption scope to use to encrypt the data provided in the request. If not specified, - encryption is performed with the default account encryption scope. For more information, see - Encryption at Rest for Azure Storage Services. - :type encryption_scope: str - """ - - _attribute_map = { - 'encryption_scope': {'key': 'encryptionScope', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(CpkScopeInfo, self).__init__(**kwargs) - self.encryption_scope = kwargs.get('encryption_scope', None) - - -class DelimitedTextConfiguration(msrest.serialization.Model): - """Groups the settings used for interpreting the blob data if the blob is delimited text formatted. - - :param column_separator: The string used to separate columns. - :type column_separator: str - :param field_quote: The string used to quote a specific field. - :type field_quote: str - :param record_separator: The string used to separate records. - :type record_separator: str - :param escape_char: The string used as an escape character. - :type escape_char: str - :param headers_present: Represents whether the data has headers. - :type headers_present: bool - """ - - _attribute_map = { - 'column_separator': {'key': 'ColumnSeparator', 'type': 'str', 'xml': {'name': 'ColumnSeparator'}}, - 'field_quote': {'key': 'FieldQuote', 'type': 'str', 'xml': {'name': 'FieldQuote'}}, - 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, - 'escape_char': {'key': 'EscapeChar', 'type': 'str', 'xml': {'name': 'EscapeChar'}}, - 'headers_present': {'key': 'HeadersPresent', 'type': 'bool', 'xml': {'name': 'HasHeaders'}}, - } - _xml_map = { - 'name': 'DelimitedTextConfiguration' - } - - def __init__( - self, - **kwargs - ): - super(DelimitedTextConfiguration, self).__init__(**kwargs) - self.column_separator = kwargs.get('column_separator', None) - self.field_quote = kwargs.get('field_quote', None) - self.record_separator = kwargs.get('record_separator', None) - self.escape_char = kwargs.get('escape_char', None) - self.headers_present = kwargs.get('headers_present', None) - - -class FilterBlobItem(msrest.serialization.Model): - """Blob info from a Filter Blobs API call. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param container_name: Required. - :type container_name: str - :param tags: A set of tags. Blob tags. - :type tags: ~azure.storage.blob.models.BlobTags - """ - - _validation = { - 'name': {'required': True}, - 'container_name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'container_name': {'key': 'ContainerName', 'type': 'str'}, - 'tags': {'key': 'Tags', 'type': 'BlobTags'}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__( - self, - **kwargs - ): - super(FilterBlobItem, self).__init__(**kwargs) - self.name = kwargs['name'] - self.container_name = kwargs['container_name'] - self.tags = kwargs.get('tags', None) - - -class FilterBlobSegment(msrest.serialization.Model): - """The result of a Filter Blobs API call. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param where: Required. - :type where: str - :param blobs: Required. - :type blobs: list[~azure.storage.blob.models.FilterBlobItem] - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'where': {'required': True}, - 'blobs': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'where': {'key': 'Where', 'type': 'str'}, - 'blobs': {'key': 'Blobs', 'type': '[FilterBlobItem]', 'xml': {'name': 'Blobs', 'wrapped': True, 'itemsName': 'Blob'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - **kwargs - ): - super(FilterBlobSegment, self).__init__(**kwargs) - self.service_endpoint = kwargs['service_endpoint'] - self.where = kwargs['where'] - self.blobs = kwargs['blobs'] - self.next_marker = kwargs.get('next_marker', None) - - -class GeoReplication(msrest.serialization.Model): - """Geo-Replication information for the Secondary Storage Service. - - All required parameters must be populated in order to send to Azure. - - :param status: Required. The status of the secondary location. Possible values include: "live", - "bootstrap", "unavailable". - :type status: str or ~azure.storage.blob.models.GeoReplicationStatusType - :param last_sync_time: Required. A GMT date/time value, to the second. All primary writes - preceding this value are guaranteed to be available for read operations at the secondary. - Primary writes after this point in time may or may not be available for reads. - :type last_sync_time: ~datetime.datetime - """ - - _validation = { - 'status': {'required': True}, - 'last_sync_time': {'required': True}, - } - - _attribute_map = { - 'status': {'key': 'Status', 'type': 'str'}, - 'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123'}, - } - - def __init__( - self, - **kwargs - ): - super(GeoReplication, self).__init__(**kwargs) - self.status = kwargs['status'] - self.last_sync_time = kwargs['last_sync_time'] - - -class JsonTextConfiguration(msrest.serialization.Model): - """json text configuration. - - :param record_separator: The string used to separate records. - :type record_separator: str - """ - - _attribute_map = { - 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, - } - _xml_map = { - 'name': 'JsonTextConfiguration' - } - - def __init__( - self, - **kwargs - ): - super(JsonTextConfiguration, self).__init__(**kwargs) - self.record_separator = kwargs.get('record_separator', None) - - -class KeyInfo(msrest.serialization.Model): - """Key information. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. The date-time the key is active in ISO 8601 UTC time. - :type start: str - :param expiry: Required. The date-time the key expires in ISO 8601 UTC time. - :type expiry: str - """ - - _validation = { - 'start': {'required': True}, - 'expiry': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str'}, - 'expiry': {'key': 'Expiry', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(KeyInfo, self).__init__(**kwargs) - self.start = kwargs['start'] - self.expiry = kwargs['expiry'] - - -class LeaseAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param lease_id: If specified, the operation only succeeds if the resource's lease is active - and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': 'leaseId', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = kwargs.get('lease_id', None) - - -class ListBlobsFlatSegmentResponse(msrest.serialization.Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param segment: Required. - :type segment: ~azure.storage.blob.models.BlobFlatListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'segment': {'key': 'Segment', 'type': 'BlobFlatListSegment'}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - **kwargs - ): - super(ListBlobsFlatSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs['service_endpoint'] - self.container_name = kwargs['container_name'] - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.segment = kwargs['segment'] - self.next_marker = kwargs.get('next_marker', None) - - -class ListBlobsHierarchySegmentResponse(msrest.serialization.Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param delimiter: - :type delimiter: str - :param segment: Required. - :type segment: ~azure.storage.blob.models.BlobHierarchyListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'delimiter': {'key': 'Delimiter', 'type': 'str'}, - 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment'}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - **kwargs - ): - super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs['service_endpoint'] - self.container_name = kwargs['container_name'] - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.delimiter = kwargs.get('delimiter', None) - self.segment = kwargs['segment'] - self.next_marker = kwargs.get('next_marker', None) - - -class ListContainersSegmentResponse(msrest.serialization.Model): - """An enumeration of containers. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param container_items: Required. - :type container_items: list[~azure.storage.blob.models.ContainerItem] - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_items': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'container_items': {'key': 'ContainerItems', 'type': '[ContainerItem]', 'xml': {'name': 'Containers', 'wrapped': True, 'itemsName': 'Container'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - **kwargs - ): - super(ListContainersSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs['service_endpoint'] - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.container_items = kwargs['container_items'] - self.next_marker = kwargs.get('next_marker', None) - - -class Logging(msrest.serialization.Model): - """Azure Analytics Logging settings. - - All required parameters must be populated in order to send to Azure. - - :param version: Required. The version of Storage Analytics to configure. - :type version: str - :param delete: Required. Indicates whether all delete requests should be logged. - :type delete: bool - :param read: Required. Indicates whether all read requests should be logged. - :type read: bool - :param write: Required. Indicates whether all write requests should be logged. - :type write: bool - :param retention_policy: Required. the retention policy which determines how long the - associated data should persist. - :type retention_policy: ~azure.storage.blob.models.RetentionPolicy - """ - - _validation = { - 'version': {'required': True}, - 'delete': {'required': True}, - 'read': {'required': True}, - 'write': {'required': True}, - 'retention_policy': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str'}, - 'delete': {'key': 'Delete', 'type': 'bool'}, - 'read': {'key': 'Read', 'type': 'bool'}, - 'write': {'key': 'Write', 'type': 'bool'}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, - } - - def __init__( - self, - **kwargs - ): - super(Logging, self).__init__(**kwargs) - self.version = kwargs['version'] - self.delete = kwargs['delete'] - self.read = kwargs['read'] - self.write = kwargs['write'] - self.retention_policy = kwargs['retention_policy'] - - -class Metrics(msrest.serialization.Model): - """a summary of request statistics grouped by API in hour or minute aggregates for blobs. - - All required parameters must be populated in order to send to Azure. - - :param version: The version of Storage Analytics to configure. - :type version: str - :param enabled: Required. Indicates whether metrics are enabled for the Blob service. - :type enabled: bool - :param include_apis: Indicates whether metrics should generate summary statistics for called - API operations. - :type include_apis: bool - :param retention_policy: the retention policy which determines how long the associated data - should persist. - :type retention_policy: ~azure.storage.blob.models.RetentionPolicy - """ - - _validation = { - 'enabled': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str'}, - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool'}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, - } - - def __init__( - self, - **kwargs - ): - super(Metrics, self).__init__(**kwargs) - self.version = kwargs.get('version', None) - self.enabled = kwargs['enabled'] - self.include_apis = kwargs.get('include_apis', None) - self.retention_policy = kwargs.get('retention_policy', None) - - -class ModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param if_modified_since: Specify this header value to operate only on a blob if it has been - modified since the specified date/time. - :type if_modified_since: ~datetime.datetime - :param if_unmodified_since: Specify this header value to operate only on a blob if it has not - been modified since the specified date/time. - :type if_unmodified_since: ~datetime.datetime - :param if_match: Specify an ETag value to operate only on blobs with a matching value. - :type if_match: str - :param if_none_match: Specify an ETag value to operate only on blobs without a matching value. - :type if_none_match: str - :param if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a - matching value. - :type if_tags: str - """ - - _attribute_map = { - 'if_modified_since': {'key': 'ifModifiedSince', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': 'ifUnmodifiedSince', 'type': 'rfc-1123'}, - 'if_match': {'key': 'ifMatch', 'type': 'str'}, - 'if_none_match': {'key': 'ifNoneMatch', 'type': 'str'}, - 'if_tags': {'key': 'ifTags', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(ModifiedAccessConditions, self).__init__(**kwargs) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_tags = kwargs.get('if_tags', None) - - -class PageList(msrest.serialization.Model): - """the list of pages. - - :param page_range: - :type page_range: list[~azure.storage.blob.models.PageRange] - :param clear_range: - :type clear_range: list[~azure.storage.blob.models.ClearRange] - """ - - _attribute_map = { - 'page_range': {'key': 'PageRange', 'type': '[PageRange]'}, - 'clear_range': {'key': 'ClearRange', 'type': '[ClearRange]'}, - } - - def __init__( - self, - **kwargs - ): - super(PageList, self).__init__(**kwargs) - self.page_range = kwargs.get('page_range', None) - self.clear_range = kwargs.get('clear_range', None) - - -class PageRange(msrest.serialization.Model): - """PageRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'PageRange' - } - - def __init__( - self, - **kwargs - ): - super(PageRange, self).__init__(**kwargs) - self.start = kwargs['start'] - self.end = kwargs['end'] - - -class QueryFormat(msrest.serialization.Model): - """QueryFormat. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The quick query format type. Possible values include: "delimited", - "json", "arrow", "parquet". - :type type: str or ~azure.storage.blob.models.QueryFormatType - :param delimited_text_configuration: Groups the settings used for interpreting the blob data if - the blob is delimited text formatted. - :type delimited_text_configuration: ~azure.storage.blob.models.DelimitedTextConfiguration - :param json_text_configuration: json text configuration. - :type json_text_configuration: ~azure.storage.blob.models.JsonTextConfiguration - :param arrow_configuration: Groups the settings used for formatting the response if the - response should be Arrow formatted. - :type arrow_configuration: ~azure.storage.blob.models.ArrowConfiguration - :param parquet_text_configuration: Any object. - :type parquet_text_configuration: any - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': 'Type', 'type': 'str', 'xml': {'name': 'Type'}}, - 'delimited_text_configuration': {'key': 'DelimitedTextConfiguration', 'type': 'DelimitedTextConfiguration'}, - 'json_text_configuration': {'key': 'JsonTextConfiguration', 'type': 'JsonTextConfiguration'}, - 'arrow_configuration': {'key': 'ArrowConfiguration', 'type': 'ArrowConfiguration'}, - 'parquet_text_configuration': {'key': 'ParquetTextConfiguration', 'type': 'object'}, - } - - def __init__( - self, - **kwargs - ): - super(QueryFormat, self).__init__(**kwargs) - self.type = kwargs['type'] - self.delimited_text_configuration = kwargs.get('delimited_text_configuration', None) - self.json_text_configuration = kwargs.get('json_text_configuration', None) - self.arrow_configuration = kwargs.get('arrow_configuration', None) - self.parquet_text_configuration = kwargs.get('parquet_text_configuration', None) - - -class QueryRequest(msrest.serialization.Model): - """Groups the set of query request settings. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar query_type: Required. The type of the provided query expression. Has constant value: - "SQL". - :vartype query_type: str - :param expression: Required. The query expression in SQL. The maximum size of the query - expression is 256KiB. - :type expression: str - :param input_serialization: - :type input_serialization: ~azure.storage.blob.models.QuerySerialization - :param output_serialization: - :type output_serialization: ~azure.storage.blob.models.QuerySerialization - """ - - _validation = { - 'query_type': {'required': True, 'constant': True}, - 'expression': {'required': True}, - } - - _attribute_map = { - 'query_type': {'key': 'QueryType', 'type': 'str', 'xml': {'name': 'QueryType'}}, - 'expression': {'key': 'Expression', 'type': 'str', 'xml': {'name': 'Expression'}}, - 'input_serialization': {'key': 'InputSerialization', 'type': 'QuerySerialization'}, - 'output_serialization': {'key': 'OutputSerialization', 'type': 'QuerySerialization'}, - } - _xml_map = { - 'name': 'QueryRequest' - } - - query_type = "SQL" - - def __init__( - self, - **kwargs - ): - super(QueryRequest, self).__init__(**kwargs) - self.expression = kwargs['expression'] - self.input_serialization = kwargs.get('input_serialization', None) - self.output_serialization = kwargs.get('output_serialization', None) - - -class QuerySerialization(msrest.serialization.Model): - """QuerySerialization. - - All required parameters must be populated in order to send to Azure. - - :param format: Required. - :type format: ~azure.storage.blob.models.QueryFormat - """ - - _validation = { - 'format': {'required': True}, - } - - _attribute_map = { - 'format': {'key': 'Format', 'type': 'QueryFormat'}, - } - - def __init__( - self, - **kwargs - ): - super(QuerySerialization, self).__init__(**kwargs) - self.format = kwargs['format'] - - -class RetentionPolicy(msrest.serialization.Model): - """the retention policy which determines how long the associated data should persist. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether a retention policy is enabled for the storage - service. - :type enabled: bool - :param days: Indicates the number of days that metrics or logging or soft-deleted data should - be retained. All data older than this value will be deleted. - :type days: int - :param allow_permanent_delete: Indicates whether permanent delete is allowed on this storage - account. - :type allow_permanent_delete: bool - """ - - _validation = { - 'enabled': {'required': True}, - 'days': {'minimum': 1}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'days': {'key': 'Days', 'type': 'int'}, - 'allow_permanent_delete': {'key': 'AllowPermanentDelete', 'type': 'bool'}, - } - - def __init__( - self, - **kwargs - ): - super(RetentionPolicy, self).__init__(**kwargs) - self.enabled = kwargs['enabled'] - self.days = kwargs.get('days', None) - self.allow_permanent_delete = kwargs.get('allow_permanent_delete', None) - - -class SequenceNumberAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param if_sequence_number_less_than_or_equal_to: Specify this header value to operate only on a - blob if it has a sequence number less than or equal to the specified. - :type if_sequence_number_less_than_or_equal_to: long - :param if_sequence_number_less_than: Specify this header value to operate only on a blob if it - has a sequence number less than the specified. - :type if_sequence_number_less_than: long - :param if_sequence_number_equal_to: Specify this header value to operate only on a blob if it - has the specified sequence number. - :type if_sequence_number_equal_to: long - """ - - _attribute_map = { - 'if_sequence_number_less_than_or_equal_to': {'key': 'ifSequenceNumberLessThanOrEqualTo', 'type': 'long'}, - 'if_sequence_number_less_than': {'key': 'ifSequenceNumberLessThan', 'type': 'long'}, - 'if_sequence_number_equal_to': {'key': 'ifSequenceNumberEqualTo', 'type': 'long'}, - } - - def __init__( - self, - **kwargs - ): - super(SequenceNumberAccessConditions, self).__init__(**kwargs) - self.if_sequence_number_less_than_or_equal_to = kwargs.get('if_sequence_number_less_than_or_equal_to', None) - self.if_sequence_number_less_than = kwargs.get('if_sequence_number_less_than', None) - self.if_sequence_number_equal_to = kwargs.get('if_sequence_number_equal_to', None) - - -class SignedIdentifier(msrest.serialization.Model): - """signed identifier. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. a unique id. - :type id: str - :param access_policy: An Access policy. - :type access_policy: ~azure.storage.blob.models.AccessPolicy - """ - - _validation = { - 'id': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'Id', 'type': 'str'}, - 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy'}, - } - _xml_map = { - 'name': 'SignedIdentifier' - } - - def __init__( - self, - **kwargs - ): - super(SignedIdentifier, self).__init__(**kwargs) - self.id = kwargs['id'] - self.access_policy = kwargs.get('access_policy', None) - - -class SourceModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param source_if_modified_since: Specify this header value to operate only on a blob if it has - been modified since the specified date/time. - :type source_if_modified_since: ~datetime.datetime - :param source_if_unmodified_since: Specify this header value to operate only on a blob if it - has not been modified since the specified date/time. - :type source_if_unmodified_since: ~datetime.datetime - :param source_if_match: Specify an ETag value to operate only on blobs with a matching value. - :type source_if_match: str - :param source_if_none_match: Specify an ETag value to operate only on blobs without a matching - value. - :type source_if_none_match: str - :param source_if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a - matching value. - :type source_if_tags: str - """ - - _attribute_map = { - 'source_if_modified_since': {'key': 'sourceIfModifiedSince', 'type': 'rfc-1123'}, - 'source_if_unmodified_since': {'key': 'sourceIfUnmodifiedSince', 'type': 'rfc-1123'}, - 'source_if_match': {'key': 'sourceIfMatch', 'type': 'str'}, - 'source_if_none_match': {'key': 'sourceIfNoneMatch', 'type': 'str'}, - 'source_if_tags': {'key': 'sourceIfTags', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_modified_since = kwargs.get('source_if_modified_since', None) - self.source_if_unmodified_since = kwargs.get('source_if_unmodified_since', None) - self.source_if_match = kwargs.get('source_if_match', None) - self.source_if_none_match = kwargs.get('source_if_none_match', None) - self.source_if_tags = kwargs.get('source_if_tags', None) - - -class StaticWebsite(msrest.serialization.Model): - """The properties that enable an account to host a static website. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether this account is hosting a static website. - :type enabled: bool - :param index_document: The default name of the index page under each directory. - :type index_document: str - :param error_document404_path: The absolute path of the custom 404 page. - :type error_document404_path: str - :param default_index_document_path: Absolute path of the default index page. - :type default_index_document_path: str - """ - - _validation = { - 'enabled': {'required': True}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'index_document': {'key': 'IndexDocument', 'type': 'str'}, - 'error_document404_path': {'key': 'ErrorDocument404Path', 'type': 'str'}, - 'default_index_document_path': {'key': 'DefaultIndexDocumentPath', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(StaticWebsite, self).__init__(**kwargs) - self.enabled = kwargs['enabled'] - self.index_document = kwargs.get('index_document', None) - self.error_document404_path = kwargs.get('error_document404_path', None) - self.default_index_document_path = kwargs.get('default_index_document_path', None) - - -class StorageError(msrest.serialization.Model): - """StorageError. - - :param message: - :type message: str - """ - - _attribute_map = { - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(StorageError, self).__init__(**kwargs) - self.message = kwargs.get('message', None) - - -class StorageServiceProperties(msrest.serialization.Model): - """Storage Service Properties. - - :param logging: Azure Analytics Logging settings. - :type logging: ~azure.storage.blob.models.Logging - :param hour_metrics: a summary of request statistics grouped by API in hour or minute - aggregates for blobs. - :type hour_metrics: ~azure.storage.blob.models.Metrics - :param minute_metrics: a summary of request statistics grouped by API in hour or minute - aggregates for blobs. - :type minute_metrics: ~azure.storage.blob.models.Metrics - :param cors: The set of CORS rules. - :type cors: list[~azure.storage.blob.models.CorsRule] - :param default_service_version: The default version to use for requests to the Blob service if - an incoming request's version is not specified. Possible values include version 2008-10-27 and - all more recent versions. - :type default_service_version: str - :param delete_retention_policy: the retention policy which determines how long the associated - data should persist. - :type delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy - :param static_website: The properties that enable an account to host a static website. - :type static_website: ~azure.storage.blob.models.StaticWebsite - """ - - _attribute_map = { - 'logging': {'key': 'Logging', 'type': 'Logging'}, - 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics'}, - 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics'}, - 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'wrapped': True}}, - 'default_service_version': {'key': 'DefaultServiceVersion', 'type': 'str'}, - 'delete_retention_policy': {'key': 'DeleteRetentionPolicy', 'type': 'RetentionPolicy'}, - 'static_website': {'key': 'StaticWebsite', 'type': 'StaticWebsite'}, - } - - def __init__( - self, - **kwargs - ): - super(StorageServiceProperties, self).__init__(**kwargs) - self.logging = kwargs.get('logging', None) - self.hour_metrics = kwargs.get('hour_metrics', None) - self.minute_metrics = kwargs.get('minute_metrics', None) - self.cors = kwargs.get('cors', None) - self.default_service_version = kwargs.get('default_service_version', None) - self.delete_retention_policy = kwargs.get('delete_retention_policy', None) - self.static_website = kwargs.get('static_website', None) - - -class StorageServiceStats(msrest.serialization.Model): - """Stats for the storage service. - - :param geo_replication: Geo-Replication information for the Secondary Storage Service. - :type geo_replication: ~azure.storage.blob.models.GeoReplication - """ - - _attribute_map = { - 'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication'}, - } - - def __init__( - self, - **kwargs - ): - super(StorageServiceStats, self).__init__(**kwargs) - self.geo_replication = kwargs.get('geo_replication', None) - - -class UserDelegationKey(msrest.serialization.Model): - """A user delegation key. - - All required parameters must be populated in order to send to Azure. - - :param signed_oid: Required. The Azure Active Directory object ID in GUID format. - :type signed_oid: str - :param signed_tid: Required. The Azure Active Directory tenant ID in GUID format. - :type signed_tid: str - :param signed_start: Required. The date-time the key is active. - :type signed_start: ~datetime.datetime - :param signed_expiry: Required. The date-time the key expires. - :type signed_expiry: ~datetime.datetime - :param signed_service: Required. Abbreviation of the Azure Storage service that accepts the - key. - :type signed_service: str - :param signed_version: Required. The service version that created the key. - :type signed_version: str - :param value: Required. The key as a base64 string. - :type value: str - """ - - _validation = { - 'signed_oid': {'required': True}, - 'signed_tid': {'required': True}, - 'signed_start': {'required': True}, - 'signed_expiry': {'required': True}, - 'signed_service': {'required': True}, - 'signed_version': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'signed_oid': {'key': 'SignedOid', 'type': 'str'}, - 'signed_tid': {'key': 'SignedTid', 'type': 'str'}, - 'signed_start': {'key': 'SignedStart', 'type': 'iso-8601'}, - 'signed_expiry': {'key': 'SignedExpiry', 'type': 'iso-8601'}, - 'signed_service': {'key': 'SignedService', 'type': 'str'}, - 'signed_version': {'key': 'SignedVersion', 'type': 'str'}, - 'value': {'key': 'Value', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(UserDelegationKey, self).__init__(**kwargs) - self.signed_oid = kwargs['signed_oid'] - self.signed_tid = kwargs['signed_tid'] - self.signed_start = kwargs['signed_start'] - self.signed_expiry = kwargs['signed_expiry'] - self.signed_service = kwargs['signed_service'] - self.signed_version = kwargs['signed_version'] - self.value = kwargs['value'] diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/models/_models_py3.py b/azure/multiapi/storagev2/blob/v2021_04_10/_generated/models/_models_py3.py deleted file mode 100644 index e51ab85..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/models/_models_py3.py +++ /dev/null @@ -1,2265 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -import datetime -from typing import Any, Dict, List, Optional, Union - -from azure.core.exceptions import HttpResponseError -import msrest.serialization - -from ._azure_blob_storage_enums import * - - -class AccessPolicy(msrest.serialization.Model): - """An Access policy. - - :param start: the date-time the policy is active. - :type start: str - :param expiry: the date-time the policy expires. - :type expiry: str - :param permission: the permissions for the acl policy. - :type permission: str - """ - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str'}, - 'expiry': {'key': 'Expiry', 'type': 'str'}, - 'permission': {'key': 'Permission', 'type': 'str'}, - } - - def __init__( - self, - *, - start: Optional[str] = None, - expiry: Optional[str] = None, - permission: Optional[str] = None, - **kwargs - ): - super(AccessPolicy, self).__init__(**kwargs) - self.start = start - self.expiry = expiry - self.permission = permission - - -class AppendPositionAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param max_size: Optional conditional header. The max length in bytes permitted for the append - blob. If the Append Block operation would cause the blob to exceed that limit or if the blob - size is already greater than the value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :type max_size: long - :param append_position: Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will succeed only if the append - position is equal to this number. If it is not, the request will fail with the - AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). - :type append_position: long - """ - - _attribute_map = { - 'max_size': {'key': 'maxSize', 'type': 'long'}, - 'append_position': {'key': 'appendPosition', 'type': 'long'}, - } - - def __init__( - self, - *, - max_size: Optional[int] = None, - append_position: Optional[int] = None, - **kwargs - ): - super(AppendPositionAccessConditions, self).__init__(**kwargs) - self.max_size = max_size - self.append_position = append_position - - -class ArrowConfiguration(msrest.serialization.Model): - """Groups the settings used for formatting the response if the response should be Arrow formatted. - - All required parameters must be populated in order to send to Azure. - - :param schema: Required. - :type schema: list[~azure.storage.blob.models.ArrowField] - """ - - _validation = { - 'schema': {'required': True}, - } - - _attribute_map = { - 'schema': {'key': 'Schema', 'type': '[ArrowField]', 'xml': {'name': 'Schema', 'wrapped': True, 'itemsName': 'Field'}}, - } - _xml_map = { - 'name': 'ArrowConfiguration' - } - - def __init__( - self, - *, - schema: List["ArrowField"], - **kwargs - ): - super(ArrowConfiguration, self).__init__(**kwargs) - self.schema = schema - - -class ArrowField(msrest.serialization.Model): - """Groups settings regarding specific field of an arrow schema. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. - :type type: str - :param name: - :type name: str - :param precision: - :type precision: int - :param scale: - :type scale: int - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': 'Type', 'type': 'str'}, - 'name': {'key': 'Name', 'type': 'str'}, - 'precision': {'key': 'Precision', 'type': 'int'}, - 'scale': {'key': 'Scale', 'type': 'int'}, - } - _xml_map = { - 'name': 'Field' - } - - def __init__( - self, - *, - type: str, - name: Optional[str] = None, - precision: Optional[int] = None, - scale: Optional[int] = None, - **kwargs - ): - super(ArrowField, self).__init__(**kwargs) - self.type = type - self.name = name - self.precision = precision - self.scale = scale - - -class BlobFlatListSegment(msrest.serialization.Model): - """BlobFlatListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]'}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__( - self, - *, - blob_items: List["BlobItemInternal"], - **kwargs - ): - super(BlobFlatListSegment, self).__init__(**kwargs) - self.blob_items = blob_items - - -class BlobHierarchyListSegment(msrest.serialization.Model): - """BlobHierarchyListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_prefixes: - :type blob_prefixes: list[~azure.storage.blob.models.BlobPrefix] - :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]', 'xml': {'name': 'BlobPrefix'}}, - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__( - self, - *, - blob_items: List["BlobItemInternal"], - blob_prefixes: Optional[List["BlobPrefix"]] = None, - **kwargs - ): - super(BlobHierarchyListSegment, self).__init__(**kwargs) - self.blob_prefixes = blob_prefixes - self.blob_items = blob_items - - -class BlobHTTPHeaders(msrest.serialization.Model): - """Parameter group. - - :param blob_cache_control: Optional. Sets the blob's cache control. If specified, this property - is stored with the blob and returned with a read request. - :type blob_cache_control: str - :param blob_content_type: Optional. Sets the blob's content type. If specified, this property - is stored with the blob and returned with a read request. - :type blob_content_type: str - :param blob_content_md5: Optional. An MD5 hash of the blob content. Note that this hash is not - validated, as the hashes for the individual blocks were validated when each was uploaded. - :type blob_content_md5: bytearray - :param blob_content_encoding: Optional. Sets the blob's content encoding. If specified, this - property is stored with the blob and returned with a read request. - :type blob_content_encoding: str - :param blob_content_language: Optional. Set the blob's content language. If specified, this - property is stored with the blob and returned with a read request. - :type blob_content_language: str - :param blob_content_disposition: Optional. Sets the blob's Content-Disposition header. - :type blob_content_disposition: str - """ - - _attribute_map = { - 'blob_cache_control': {'key': 'blobCacheControl', 'type': 'str'}, - 'blob_content_type': {'key': 'blobContentType', 'type': 'str'}, - 'blob_content_md5': {'key': 'blobContentMD5', 'type': 'bytearray'}, - 'blob_content_encoding': {'key': 'blobContentEncoding', 'type': 'str'}, - 'blob_content_language': {'key': 'blobContentLanguage', 'type': 'str'}, - 'blob_content_disposition': {'key': 'blobContentDisposition', 'type': 'str'}, - } - - def __init__( - self, - *, - blob_cache_control: Optional[str] = None, - blob_content_type: Optional[str] = None, - blob_content_md5: Optional[bytearray] = None, - blob_content_encoding: Optional[str] = None, - blob_content_language: Optional[str] = None, - blob_content_disposition: Optional[str] = None, - **kwargs - ): - super(BlobHTTPHeaders, self).__init__(**kwargs) - self.blob_cache_control = blob_cache_control - self.blob_content_type = blob_content_type - self.blob_content_md5 = blob_content_md5 - self.blob_content_encoding = blob_content_encoding - self.blob_content_language = blob_content_language - self.blob_content_disposition = blob_content_disposition - - -class BlobItemInternal(msrest.serialization.Model): - """An Azure Storage blob. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: ~azure.storage.blob.models.BlobName - :param deleted: Required. - :type deleted: bool - :param snapshot: Required. - :type snapshot: str - :param version_id: - :type version_id: str - :param is_current_version: - :type is_current_version: bool - :param properties: Required. Properties of a blob. - :type properties: ~azure.storage.blob.models.BlobPropertiesInternal - :param metadata: - :type metadata: ~azure.storage.blob.models.BlobMetadata - :param blob_tags: Blob tags. - :type blob_tags: ~azure.storage.blob.models.BlobTags - :param has_versions_only: - :type has_versions_only: bool - :param object_replication_metadata: Dictionary of :code:``. - :type object_replication_metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'deleted': {'required': True}, - 'snapshot': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'BlobName'}, - 'deleted': {'key': 'Deleted', 'type': 'bool'}, - 'snapshot': {'key': 'Snapshot', 'type': 'str'}, - 'version_id': {'key': 'VersionId', 'type': 'str'}, - 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool'}, - 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal'}, - 'metadata': {'key': 'Metadata', 'type': 'BlobMetadata'}, - 'blob_tags': {'key': 'BlobTags', 'type': 'BlobTags'}, - 'has_versions_only': {'key': 'HasVersionsOnly', 'type': 'bool'}, - 'object_replication_metadata': {'key': 'OrMetadata', 'type': '{str}'}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__( - self, - *, - name: "BlobName", - deleted: bool, - snapshot: str, - properties: "BlobPropertiesInternal", - version_id: Optional[str] = None, - is_current_version: Optional[bool] = None, - metadata: Optional["BlobMetadata"] = None, - blob_tags: Optional["BlobTags"] = None, - has_versions_only: Optional[bool] = None, - object_replication_metadata: Optional[Dict[str, str]] = None, - **kwargs - ): - super(BlobItemInternal, self).__init__(**kwargs) - self.name = name - self.deleted = deleted - self.snapshot = snapshot - self.version_id = version_id - self.is_current_version = is_current_version - self.properties = properties - self.metadata = metadata - self.blob_tags = blob_tags - self.has_versions_only = has_versions_only - self.object_replication_metadata = object_replication_metadata - - -class BlobMetadata(msrest.serialization.Model): - """BlobMetadata. - - :param additional_properties: Unmatched properties from the message are deserialized to this - collection. - :type additional_properties: dict[str, str] - :param encrypted: - :type encrypted: str - """ - - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{str}'}, - 'encrypted': {'key': 'Encrypted', 'type': 'str', 'xml': {'attr': True}}, - } - _xml_map = { - 'name': 'Metadata' - } - - def __init__( - self, - *, - additional_properties: Optional[Dict[str, str]] = None, - encrypted: Optional[str] = None, - **kwargs - ): - super(BlobMetadata, self).__init__(**kwargs) - self.additional_properties = additional_properties - self.encrypted = encrypted - - -class BlobName(msrest.serialization.Model): - """BlobName. - - :param encoded: Indicates if the blob name is encoded. - :type encoded: bool - :param content: The name of the blob. - :type content: str - """ - - _attribute_map = { - 'encoded': {'key': 'Encoded', 'type': 'bool', 'xml': {'name': 'Encoded', 'attr': True}}, - 'content': {'key': 'content', 'type': 'str', 'xml': {'text': True}}, - } - - def __init__( - self, - *, - encoded: Optional[bool] = None, - content: Optional[str] = None, - **kwargs - ): - super(BlobName, self).__init__(**kwargs) - self.encoded = encoded - self.content = content - - -class BlobPrefix(msrest.serialization.Model): - """BlobPrefix. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: ~azure.storage.blob.models.BlobName - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'BlobName'}, - } - - def __init__( - self, - *, - name: "BlobName", - **kwargs - ): - super(BlobPrefix, self).__init__(**kwargs) - self.name = name - - -class BlobPropertiesInternal(msrest.serialization.Model): - """Properties of a blob. - - All required parameters must be populated in order to send to Azure. - - :param creation_time: - :type creation_time: ~datetime.datetime - :param last_modified: Required. - :type last_modified: ~datetime.datetime - :param etag: Required. - :type etag: str - :param content_length: Size in bytes. - :type content_length: long - :param content_type: - :type content_type: str - :param content_encoding: - :type content_encoding: str - :param content_language: - :type content_language: str - :param content_md5: - :type content_md5: bytearray - :param content_disposition: - :type content_disposition: str - :param cache_control: - :type cache_control: str - :param blob_sequence_number: - :type blob_sequence_number: long - :param blob_type: Possible values include: "BlockBlob", "PageBlob", "AppendBlob". - :type blob_type: str or ~azure.storage.blob.models.BlobType - :param lease_status: Possible values include: "locked", "unlocked". - :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType - :param lease_state: Possible values include: "available", "leased", "expired", "breaking", - "broken". - :type lease_state: str or ~azure.storage.blob.models.LeaseStateType - :param lease_duration: Possible values include: "infinite", "fixed". - :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType - :param copy_id: - :type copy_id: str - :param copy_status: Possible values include: "pending", "success", "aborted", "failed". - :type copy_status: str or ~azure.storage.blob.models.CopyStatusType - :param copy_source: - :type copy_source: str - :param copy_progress: - :type copy_progress: str - :param copy_completion_time: - :type copy_completion_time: ~datetime.datetime - :param copy_status_description: - :type copy_status_description: str - :param server_encrypted: - :type server_encrypted: bool - :param incremental_copy: - :type incremental_copy: bool - :param destination_snapshot: - :type destination_snapshot: str - :param deleted_time: - :type deleted_time: ~datetime.datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param access_tier: Possible values include: "P4", "P6", "P10", "P15", "P20", "P30", "P40", - "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive". - :type access_tier: str or ~azure.storage.blob.models.AccessTier - :param access_tier_inferred: - :type access_tier_inferred: bool - :param archive_status: Possible values include: "rehydrate-pending-to-hot", - "rehydrate-pending-to-cool". - :type archive_status: str or ~azure.storage.blob.models.ArchiveStatus - :param customer_provided_key_sha256: - :type customer_provided_key_sha256: str - :param encryption_scope: The name of the encryption scope under which the blob is encrypted. - :type encryption_scope: str - :param access_tier_change_time: - :type access_tier_change_time: ~datetime.datetime - :param tag_count: - :type tag_count: int - :param expires_on: - :type expires_on: ~datetime.datetime - :param is_sealed: - :type is_sealed: bool - :param rehydrate_priority: If an object is in rehydrate pending state then this header is - returned with priority of rehydrate. Valid values are High and Standard. Possible values - include: "High", "Standard". - :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority - :param last_accessed_on: - :type last_accessed_on: ~datetime.datetime - :param immutability_policy_expires_on: - :type immutability_policy_expires_on: ~datetime.datetime - :param immutability_policy_mode: Possible values include: "Mutable", "Unlocked", "Locked". - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param legal_hold: - :type legal_hold: bool - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123'}, - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - 'content_length': {'key': 'Content-Length', 'type': 'long'}, - 'content_type': {'key': 'Content-Type', 'type': 'str'}, - 'content_encoding': {'key': 'Content-Encoding', 'type': 'str'}, - 'content_language': {'key': 'Content-Language', 'type': 'str'}, - 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray'}, - 'content_disposition': {'key': 'Content-Disposition', 'type': 'str'}, - 'cache_control': {'key': 'Cache-Control', 'type': 'str'}, - 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long'}, - 'blob_type': {'key': 'BlobType', 'type': 'str'}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, - 'lease_state': {'key': 'LeaseState', 'type': 'str'}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, - 'copy_id': {'key': 'CopyId', 'type': 'str'}, - 'copy_status': {'key': 'CopyStatus', 'type': 'str'}, - 'copy_source': {'key': 'CopySource', 'type': 'str'}, - 'copy_progress': {'key': 'CopyProgress', 'type': 'str'}, - 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123'}, - 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str'}, - 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool'}, - 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool'}, - 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str'}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, - 'access_tier': {'key': 'AccessTier', 'type': 'str'}, - 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool'}, - 'archive_status': {'key': 'ArchiveStatus', 'type': 'str'}, - 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str'}, - 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str'}, - 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'}, - 'tag_count': {'key': 'TagCount', 'type': 'int'}, - 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123'}, - 'is_sealed': {'key': 'Sealed', 'type': 'bool'}, - 'rehydrate_priority': {'key': 'RehydratePriority', 'type': 'str'}, - 'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123'}, - 'immutability_policy_expires_on': {'key': 'ImmutabilityPolicyUntilDate', 'type': 'rfc-1123'}, - 'immutability_policy_mode': {'key': 'ImmutabilityPolicyMode', 'type': 'str'}, - 'legal_hold': {'key': 'LegalHold', 'type': 'bool'}, - } - _xml_map = { - 'name': 'Properties' - } - - def __init__( - self, - *, - last_modified: datetime.datetime, - etag: str, - creation_time: Optional[datetime.datetime] = None, - content_length: Optional[int] = None, - content_type: Optional[str] = None, - content_encoding: Optional[str] = None, - content_language: Optional[str] = None, - content_md5: Optional[bytearray] = None, - content_disposition: Optional[str] = None, - cache_control: Optional[str] = None, - blob_sequence_number: Optional[int] = None, - blob_type: Optional[Union[str, "BlobType"]] = None, - lease_status: Optional[Union[str, "LeaseStatusType"]] = None, - lease_state: Optional[Union[str, "LeaseStateType"]] = None, - lease_duration: Optional[Union[str, "LeaseDurationType"]] = None, - copy_id: Optional[str] = None, - copy_status: Optional[Union[str, "CopyStatusType"]] = None, - copy_source: Optional[str] = None, - copy_progress: Optional[str] = None, - copy_completion_time: Optional[datetime.datetime] = None, - copy_status_description: Optional[str] = None, - server_encrypted: Optional[bool] = None, - incremental_copy: Optional[bool] = None, - destination_snapshot: Optional[str] = None, - deleted_time: Optional[datetime.datetime] = None, - remaining_retention_days: Optional[int] = None, - access_tier: Optional[Union[str, "AccessTier"]] = None, - access_tier_inferred: Optional[bool] = None, - archive_status: Optional[Union[str, "ArchiveStatus"]] = None, - customer_provided_key_sha256: Optional[str] = None, - encryption_scope: Optional[str] = None, - access_tier_change_time: Optional[datetime.datetime] = None, - tag_count: Optional[int] = None, - expires_on: Optional[datetime.datetime] = None, - is_sealed: Optional[bool] = None, - rehydrate_priority: Optional[Union[str, "RehydratePriority"]] = None, - last_accessed_on: Optional[datetime.datetime] = None, - immutability_policy_expires_on: Optional[datetime.datetime] = None, - immutability_policy_mode: Optional[Union[str, "BlobImmutabilityPolicyMode"]] = None, - legal_hold: Optional[bool] = None, - **kwargs - ): - super(BlobPropertiesInternal, self).__init__(**kwargs) - self.creation_time = creation_time - self.last_modified = last_modified - self.etag = etag - self.content_length = content_length - self.content_type = content_type - self.content_encoding = content_encoding - self.content_language = content_language - self.content_md5 = content_md5 - self.content_disposition = content_disposition - self.cache_control = cache_control - self.blob_sequence_number = blob_sequence_number - self.blob_type = blob_type - self.lease_status = lease_status - self.lease_state = lease_state - self.lease_duration = lease_duration - self.copy_id = copy_id - self.copy_status = copy_status - self.copy_source = copy_source - self.copy_progress = copy_progress - self.copy_completion_time = copy_completion_time - self.copy_status_description = copy_status_description - self.server_encrypted = server_encrypted - self.incremental_copy = incremental_copy - self.destination_snapshot = destination_snapshot - self.deleted_time = deleted_time - self.remaining_retention_days = remaining_retention_days - self.access_tier = access_tier - self.access_tier_inferred = access_tier_inferred - self.archive_status = archive_status - self.customer_provided_key_sha256 = customer_provided_key_sha256 - self.encryption_scope = encryption_scope - self.access_tier_change_time = access_tier_change_time - self.tag_count = tag_count - self.expires_on = expires_on - self.is_sealed = is_sealed - self.rehydrate_priority = rehydrate_priority - self.last_accessed_on = last_accessed_on - self.immutability_policy_expires_on = immutability_policy_expires_on - self.immutability_policy_mode = immutability_policy_mode - self.legal_hold = legal_hold - - -class BlobTag(msrest.serialization.Model): - """BlobTag. - - All required parameters must be populated in order to send to Azure. - - :param key: Required. - :type key: str - :param value: Required. - :type value: str - """ - - _validation = { - 'key': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'key': {'key': 'Key', 'type': 'str'}, - 'value': {'key': 'Value', 'type': 'str'}, - } - _xml_map = { - 'name': 'Tag' - } - - def __init__( - self, - *, - key: str, - value: str, - **kwargs - ): - super(BlobTag, self).__init__(**kwargs) - self.key = key - self.value = value - - -class BlobTags(msrest.serialization.Model): - """Blob tags. - - All required parameters must be populated in order to send to Azure. - - :param blob_tag_set: Required. - :type blob_tag_set: list[~azure.storage.blob.models.BlobTag] - """ - - _validation = { - 'blob_tag_set': {'required': True}, - } - - _attribute_map = { - 'blob_tag_set': {'key': 'BlobTagSet', 'type': '[BlobTag]', 'xml': {'name': 'TagSet', 'wrapped': True, 'itemsName': 'Tag'}}, - } - _xml_map = { - 'name': 'Tags' - } - - def __init__( - self, - *, - blob_tag_set: List["BlobTag"], - **kwargs - ): - super(BlobTags, self).__init__(**kwargs) - self.blob_tag_set = blob_tag_set - - -class Block(msrest.serialization.Model): - """Represents a single block in a block blob. It describes the block's ID and size. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The base64 encoded block ID. - :type name: str - :param size: Required. The block size in bytes. - :type size: long - """ - - _validation = { - 'name': {'required': True}, - 'size': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'size': {'key': 'Size', 'type': 'long'}, - } - - def __init__( - self, - *, - name: str, - size: int, - **kwargs - ): - super(Block, self).__init__(**kwargs) - self.name = name - self.size = size - - -class BlockList(msrest.serialization.Model): - """BlockList. - - :param committed_blocks: - :type committed_blocks: list[~azure.storage.blob.models.Block] - :param uncommitted_blocks: - :type uncommitted_blocks: list[~azure.storage.blob.models.Block] - """ - - _attribute_map = { - 'committed_blocks': {'key': 'CommittedBlocks', 'type': '[Block]', 'xml': {'wrapped': True}}, - 'uncommitted_blocks': {'key': 'UncommittedBlocks', 'type': '[Block]', 'xml': {'wrapped': True}}, - } - - def __init__( - self, - *, - committed_blocks: Optional[List["Block"]] = None, - uncommitted_blocks: Optional[List["Block"]] = None, - **kwargs - ): - super(BlockList, self).__init__(**kwargs) - self.committed_blocks = committed_blocks - self.uncommitted_blocks = uncommitted_blocks - - -class BlockLookupList(msrest.serialization.Model): - """BlockLookupList. - - :param committed: - :type committed: list[str] - :param uncommitted: - :type uncommitted: list[str] - :param latest: - :type latest: list[str] - """ - - _attribute_map = { - 'committed': {'key': 'Committed', 'type': '[str]', 'xml': {'itemsName': 'Committed'}}, - 'uncommitted': {'key': 'Uncommitted', 'type': '[str]', 'xml': {'itemsName': 'Uncommitted'}}, - 'latest': {'key': 'Latest', 'type': '[str]', 'xml': {'itemsName': 'Latest'}}, - } - _xml_map = { - 'name': 'BlockList' - } - - def __init__( - self, - *, - committed: Optional[List[str]] = None, - uncommitted: Optional[List[str]] = None, - latest: Optional[List[str]] = None, - **kwargs - ): - super(BlockLookupList, self).__init__(**kwargs) - self.committed = committed - self.uncommitted = uncommitted - self.latest = latest - - -class ClearRange(msrest.serialization.Model): - """ClearRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'ClearRange' - } - - def __init__( - self, - *, - start: int, - end: int, - **kwargs - ): - super(ClearRange, self).__init__(**kwargs) - self.start = start - self.end = end - - -class ContainerCpkScopeInfo(msrest.serialization.Model): - """Parameter group. - - :param default_encryption_scope: Optional. Version 2019-07-07 and later. Specifies the - default encryption scope to set on the container and use for all future writes. - :type default_encryption_scope: str - :param prevent_encryption_scope_override: Optional. Version 2019-07-07 and newer. If true, - prevents any request from specifying a different encryption scope than the scope set on the - container. - :type prevent_encryption_scope_override: bool - """ - - _attribute_map = { - 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str'}, - 'prevent_encryption_scope_override': {'key': 'PreventEncryptionScopeOverride', 'type': 'bool'}, - } - - def __init__( - self, - *, - default_encryption_scope: Optional[str] = None, - prevent_encryption_scope_override: Optional[bool] = None, - **kwargs - ): - super(ContainerCpkScopeInfo, self).__init__(**kwargs) - self.default_encryption_scope = default_encryption_scope - self.prevent_encryption_scope_override = prevent_encryption_scope_override - - -class ContainerItem(msrest.serialization.Model): - """An Azure Storage container. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param deleted: - :type deleted: bool - :param version: - :type version: str - :param properties: Required. Properties of a container. - :type properties: ~azure.storage.blob.models.ContainerProperties - :param metadata: Dictionary of :code:``. - :type metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'deleted': {'key': 'Deleted', 'type': 'bool'}, - 'version': {'key': 'Version', 'type': 'str'}, - 'properties': {'key': 'Properties', 'type': 'ContainerProperties'}, - 'metadata': {'key': 'Metadata', 'type': '{str}'}, - } - _xml_map = { - 'name': 'Container' - } - - def __init__( - self, - *, - name: str, - properties: "ContainerProperties", - deleted: Optional[bool] = None, - version: Optional[str] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs - ): - super(ContainerItem, self).__init__(**kwargs) - self.name = name - self.deleted = deleted - self.version = version - self.properties = properties - self.metadata = metadata - - -class ContainerProperties(msrest.serialization.Model): - """Properties of a container. - - All required parameters must be populated in order to send to Azure. - - :param last_modified: Required. - :type last_modified: ~datetime.datetime - :param etag: Required. - :type etag: str - :param lease_status: Possible values include: "locked", "unlocked". - :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType - :param lease_state: Possible values include: "available", "leased", "expired", "breaking", - "broken". - :type lease_state: str or ~azure.storage.blob.models.LeaseStateType - :param lease_duration: Possible values include: "infinite", "fixed". - :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType - :param public_access: Possible values include: "container", "blob". - :type public_access: str or ~azure.storage.blob.models.PublicAccessType - :param has_immutability_policy: - :type has_immutability_policy: bool - :param has_legal_hold: - :type has_legal_hold: bool - :param default_encryption_scope: - :type default_encryption_scope: str - :param prevent_encryption_scope_override: - :type prevent_encryption_scope_override: bool - :param deleted_time: - :type deleted_time: ~datetime.datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param is_immutable_storage_with_versioning_enabled: Indicates if version level worm is enabled - on this container. - :type is_immutable_storage_with_versioning_enabled: bool - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, - 'lease_state': {'key': 'LeaseState', 'type': 'str'}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, - 'public_access': {'key': 'PublicAccess', 'type': 'str'}, - 'has_immutability_policy': {'key': 'HasImmutabilityPolicy', 'type': 'bool'}, - 'has_legal_hold': {'key': 'HasLegalHold', 'type': 'bool'}, - 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str'}, - 'prevent_encryption_scope_override': {'key': 'DenyEncryptionScopeOverride', 'type': 'bool'}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, - 'is_immutable_storage_with_versioning_enabled': {'key': 'ImmutableStorageWithVersioningEnabled', 'type': 'bool'}, - } - - def __init__( - self, - *, - last_modified: datetime.datetime, - etag: str, - lease_status: Optional[Union[str, "LeaseStatusType"]] = None, - lease_state: Optional[Union[str, "LeaseStateType"]] = None, - lease_duration: Optional[Union[str, "LeaseDurationType"]] = None, - public_access: Optional[Union[str, "PublicAccessType"]] = None, - has_immutability_policy: Optional[bool] = None, - has_legal_hold: Optional[bool] = None, - default_encryption_scope: Optional[str] = None, - prevent_encryption_scope_override: Optional[bool] = None, - deleted_time: Optional[datetime.datetime] = None, - remaining_retention_days: Optional[int] = None, - is_immutable_storage_with_versioning_enabled: Optional[bool] = None, - **kwargs - ): - super(ContainerProperties, self).__init__(**kwargs) - self.last_modified = last_modified - self.etag = etag - self.lease_status = lease_status - self.lease_state = lease_state - self.lease_duration = lease_duration - self.public_access = public_access - self.has_immutability_policy = has_immutability_policy - self.has_legal_hold = has_legal_hold - self.default_encryption_scope = default_encryption_scope - self.prevent_encryption_scope_override = prevent_encryption_scope_override - self.deleted_time = deleted_time - self.remaining_retention_days = remaining_retention_days - self.is_immutable_storage_with_versioning_enabled = is_immutable_storage_with_versioning_enabled - - -class CorsRule(msrest.serialization.Model): - """CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param allowed_origins: Required. The origin domains that are permitted to make a request - against the storage service via CORS. The origin domain is the domain from which the request - originates. Note that the origin must be an exact case-sensitive match with the origin that the - user age sends to the service. You can also use the wildcard character '*' to allow all origin - domains to make requests via CORS. - :type allowed_origins: str - :param allowed_methods: Required. The methods (HTTP request verbs) that the origin domain may - use for a CORS request. (comma separated). - :type allowed_methods: str - :param allowed_headers: Required. the request headers that the origin domain may specify on the - CORS request. - :type allowed_headers: str - :param exposed_headers: Required. The response headers that may be sent in the response to the - CORS request and exposed by the browser to the request issuer. - :type exposed_headers: str - :param max_age_in_seconds: Required. The maximum amount time that a browser should cache the - preflight OPTIONS request. - :type max_age_in_seconds: int - """ - - _validation = { - 'allowed_origins': {'required': True}, - 'allowed_methods': {'required': True}, - 'allowed_headers': {'required': True}, - 'exposed_headers': {'required': True}, - 'max_age_in_seconds': {'required': True, 'minimum': 0}, - } - - _attribute_map = { - 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str'}, - 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str'}, - 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str'}, - 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str'}, - 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int'}, - } - - def __init__( - self, - *, - allowed_origins: str, - allowed_methods: str, - allowed_headers: str, - exposed_headers: str, - max_age_in_seconds: int, - **kwargs - ): - super(CorsRule, self).__init__(**kwargs) - self.allowed_origins = allowed_origins - self.allowed_methods = allowed_methods - self.allowed_headers = allowed_headers - self.exposed_headers = exposed_headers - self.max_age_in_seconds = max_age_in_seconds - - -class CpkInfo(msrest.serialization.Model): - """Parameter group. - - :param encryption_key: Optional. Specifies the encryption key to use to encrypt the data - provided in the request. If not specified, encryption is performed with the root account - encryption key. For more information, see Encryption at Rest for Azure Storage Services. - :type encryption_key: str - :param encryption_key_sha256: The SHA-256 hash of the provided encryption key. Must be provided - if the x-ms-encryption-key header is provided. - :type encryption_key_sha256: str - :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. Possible values include: "None", "AES256". - :type encryption_algorithm: str or ~azure.storage.blob.models.EncryptionAlgorithmType - """ - - _attribute_map = { - 'encryption_key': {'key': 'encryptionKey', 'type': 'str'}, - 'encryption_key_sha256': {'key': 'encryptionKeySha256', 'type': 'str'}, - 'encryption_algorithm': {'key': 'encryptionAlgorithm', 'type': 'str'}, - } - - def __init__( - self, - *, - encryption_key: Optional[str] = None, - encryption_key_sha256: Optional[str] = None, - encryption_algorithm: Optional[Union[str, "EncryptionAlgorithmType"]] = None, - **kwargs - ): - super(CpkInfo, self).__init__(**kwargs) - self.encryption_key = encryption_key - self.encryption_key_sha256 = encryption_key_sha256 - self.encryption_algorithm = encryption_algorithm - - -class CpkScopeInfo(msrest.serialization.Model): - """Parameter group. - - :param encryption_scope: Optional. Version 2019-07-07 and later. Specifies the name of the - encryption scope to use to encrypt the data provided in the request. If not specified, - encryption is performed with the default account encryption scope. For more information, see - Encryption at Rest for Azure Storage Services. - :type encryption_scope: str - """ - - _attribute_map = { - 'encryption_scope': {'key': 'encryptionScope', 'type': 'str'}, - } - - def __init__( - self, - *, - encryption_scope: Optional[str] = None, - **kwargs - ): - super(CpkScopeInfo, self).__init__(**kwargs) - self.encryption_scope = encryption_scope - - -class DelimitedTextConfiguration(msrest.serialization.Model): - """Groups the settings used for interpreting the blob data if the blob is delimited text formatted. - - :param column_separator: The string used to separate columns. - :type column_separator: str - :param field_quote: The string used to quote a specific field. - :type field_quote: str - :param record_separator: The string used to separate records. - :type record_separator: str - :param escape_char: The string used as an escape character. - :type escape_char: str - :param headers_present: Represents whether the data has headers. - :type headers_present: bool - """ - - _attribute_map = { - 'column_separator': {'key': 'ColumnSeparator', 'type': 'str', 'xml': {'name': 'ColumnSeparator'}}, - 'field_quote': {'key': 'FieldQuote', 'type': 'str', 'xml': {'name': 'FieldQuote'}}, - 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, - 'escape_char': {'key': 'EscapeChar', 'type': 'str', 'xml': {'name': 'EscapeChar'}}, - 'headers_present': {'key': 'HeadersPresent', 'type': 'bool', 'xml': {'name': 'HasHeaders'}}, - } - _xml_map = { - 'name': 'DelimitedTextConfiguration' - } - - def __init__( - self, - *, - column_separator: Optional[str] = None, - field_quote: Optional[str] = None, - record_separator: Optional[str] = None, - escape_char: Optional[str] = None, - headers_present: Optional[bool] = None, - **kwargs - ): - super(DelimitedTextConfiguration, self).__init__(**kwargs) - self.column_separator = column_separator - self.field_quote = field_quote - self.record_separator = record_separator - self.escape_char = escape_char - self.headers_present = headers_present - - -class FilterBlobItem(msrest.serialization.Model): - """Blob info from a Filter Blobs API call. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param container_name: Required. - :type container_name: str - :param tags: A set of tags. Blob tags. - :type tags: ~azure.storage.blob.models.BlobTags - """ - - _validation = { - 'name': {'required': True}, - 'container_name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'container_name': {'key': 'ContainerName', 'type': 'str'}, - 'tags': {'key': 'Tags', 'type': 'BlobTags'}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__( - self, - *, - name: str, - container_name: str, - tags: Optional["BlobTags"] = None, - **kwargs - ): - super(FilterBlobItem, self).__init__(**kwargs) - self.name = name - self.container_name = container_name - self.tags = tags - - -class FilterBlobSegment(msrest.serialization.Model): - """The result of a Filter Blobs API call. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param where: Required. - :type where: str - :param blobs: Required. - :type blobs: list[~azure.storage.blob.models.FilterBlobItem] - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'where': {'required': True}, - 'blobs': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'where': {'key': 'Where', 'type': 'str'}, - 'blobs': {'key': 'Blobs', 'type': '[FilterBlobItem]', 'xml': {'name': 'Blobs', 'wrapped': True, 'itemsName': 'Blob'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - *, - service_endpoint: str, - where: str, - blobs: List["FilterBlobItem"], - next_marker: Optional[str] = None, - **kwargs - ): - super(FilterBlobSegment, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.where = where - self.blobs = blobs - self.next_marker = next_marker - - -class GeoReplication(msrest.serialization.Model): - """Geo-Replication information for the Secondary Storage Service. - - All required parameters must be populated in order to send to Azure. - - :param status: Required. The status of the secondary location. Possible values include: "live", - "bootstrap", "unavailable". - :type status: str or ~azure.storage.blob.models.GeoReplicationStatusType - :param last_sync_time: Required. A GMT date/time value, to the second. All primary writes - preceding this value are guaranteed to be available for read operations at the secondary. - Primary writes after this point in time may or may not be available for reads. - :type last_sync_time: ~datetime.datetime - """ - - _validation = { - 'status': {'required': True}, - 'last_sync_time': {'required': True}, - } - - _attribute_map = { - 'status': {'key': 'Status', 'type': 'str'}, - 'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123'}, - } - - def __init__( - self, - *, - status: Union[str, "GeoReplicationStatusType"], - last_sync_time: datetime.datetime, - **kwargs - ): - super(GeoReplication, self).__init__(**kwargs) - self.status = status - self.last_sync_time = last_sync_time - - -class JsonTextConfiguration(msrest.serialization.Model): - """json text configuration. - - :param record_separator: The string used to separate records. - :type record_separator: str - """ - - _attribute_map = { - 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, - } - _xml_map = { - 'name': 'JsonTextConfiguration' - } - - def __init__( - self, - *, - record_separator: Optional[str] = None, - **kwargs - ): - super(JsonTextConfiguration, self).__init__(**kwargs) - self.record_separator = record_separator - - -class KeyInfo(msrest.serialization.Model): - """Key information. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. The date-time the key is active in ISO 8601 UTC time. - :type start: str - :param expiry: Required. The date-time the key expires in ISO 8601 UTC time. - :type expiry: str - """ - - _validation = { - 'start': {'required': True}, - 'expiry': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str'}, - 'expiry': {'key': 'Expiry', 'type': 'str'}, - } - - def __init__( - self, - *, - start: str, - expiry: str, - **kwargs - ): - super(KeyInfo, self).__init__(**kwargs) - self.start = start - self.expiry = expiry - - -class LeaseAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param lease_id: If specified, the operation only succeeds if the resource's lease is active - and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': 'leaseId', 'type': 'str'}, - } - - def __init__( - self, - *, - lease_id: Optional[str] = None, - **kwargs - ): - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = lease_id - - -class ListBlobsFlatSegmentResponse(msrest.serialization.Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param segment: Required. - :type segment: ~azure.storage.blob.models.BlobFlatListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'segment': {'key': 'Segment', 'type': 'BlobFlatListSegment'}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - *, - service_endpoint: str, - container_name: str, - segment: "BlobFlatListSegment", - prefix: Optional[str] = None, - marker: Optional[str] = None, - max_results: Optional[int] = None, - next_marker: Optional[str] = None, - **kwargs - ): - super(ListBlobsFlatSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.container_name = container_name - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.segment = segment - self.next_marker = next_marker - - -class ListBlobsHierarchySegmentResponse(msrest.serialization.Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param delimiter: - :type delimiter: str - :param segment: Required. - :type segment: ~azure.storage.blob.models.BlobHierarchyListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'delimiter': {'key': 'Delimiter', 'type': 'str'}, - 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment'}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - *, - service_endpoint: str, - container_name: str, - segment: "BlobHierarchyListSegment", - prefix: Optional[str] = None, - marker: Optional[str] = None, - max_results: Optional[int] = None, - delimiter: Optional[str] = None, - next_marker: Optional[str] = None, - **kwargs - ): - super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.container_name = container_name - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.delimiter = delimiter - self.segment = segment - self.next_marker = next_marker - - -class ListContainersSegmentResponse(msrest.serialization.Model): - """An enumeration of containers. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param container_items: Required. - :type container_items: list[~azure.storage.blob.models.ContainerItem] - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_items': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'container_items': {'key': 'ContainerItems', 'type': '[ContainerItem]', 'xml': {'name': 'Containers', 'wrapped': True, 'itemsName': 'Container'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - *, - service_endpoint: str, - container_items: List["ContainerItem"], - prefix: Optional[str] = None, - marker: Optional[str] = None, - max_results: Optional[int] = None, - next_marker: Optional[str] = None, - **kwargs - ): - super(ListContainersSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.container_items = container_items - self.next_marker = next_marker - - -class Logging(msrest.serialization.Model): - """Azure Analytics Logging settings. - - All required parameters must be populated in order to send to Azure. - - :param version: Required. The version of Storage Analytics to configure. - :type version: str - :param delete: Required. Indicates whether all delete requests should be logged. - :type delete: bool - :param read: Required. Indicates whether all read requests should be logged. - :type read: bool - :param write: Required. Indicates whether all write requests should be logged. - :type write: bool - :param retention_policy: Required. the retention policy which determines how long the - associated data should persist. - :type retention_policy: ~azure.storage.blob.models.RetentionPolicy - """ - - _validation = { - 'version': {'required': True}, - 'delete': {'required': True}, - 'read': {'required': True}, - 'write': {'required': True}, - 'retention_policy': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str'}, - 'delete': {'key': 'Delete', 'type': 'bool'}, - 'read': {'key': 'Read', 'type': 'bool'}, - 'write': {'key': 'Write', 'type': 'bool'}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, - } - - def __init__( - self, - *, - version: str, - delete: bool, - read: bool, - write: bool, - retention_policy: "RetentionPolicy", - **kwargs - ): - super(Logging, self).__init__(**kwargs) - self.version = version - self.delete = delete - self.read = read - self.write = write - self.retention_policy = retention_policy - - -class Metrics(msrest.serialization.Model): - """a summary of request statistics grouped by API in hour or minute aggregates for blobs. - - All required parameters must be populated in order to send to Azure. - - :param version: The version of Storage Analytics to configure. - :type version: str - :param enabled: Required. Indicates whether metrics are enabled for the Blob service. - :type enabled: bool - :param include_apis: Indicates whether metrics should generate summary statistics for called - API operations. - :type include_apis: bool - :param retention_policy: the retention policy which determines how long the associated data - should persist. - :type retention_policy: ~azure.storage.blob.models.RetentionPolicy - """ - - _validation = { - 'enabled': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str'}, - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool'}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, - } - - def __init__( - self, - *, - enabled: bool, - version: Optional[str] = None, - include_apis: Optional[bool] = None, - retention_policy: Optional["RetentionPolicy"] = None, - **kwargs - ): - super(Metrics, self).__init__(**kwargs) - self.version = version - self.enabled = enabled - self.include_apis = include_apis - self.retention_policy = retention_policy - - -class ModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param if_modified_since: Specify this header value to operate only on a blob if it has been - modified since the specified date/time. - :type if_modified_since: ~datetime.datetime - :param if_unmodified_since: Specify this header value to operate only on a blob if it has not - been modified since the specified date/time. - :type if_unmodified_since: ~datetime.datetime - :param if_match: Specify an ETag value to operate only on blobs with a matching value. - :type if_match: str - :param if_none_match: Specify an ETag value to operate only on blobs without a matching value. - :type if_none_match: str - :param if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a - matching value. - :type if_tags: str - """ - - _attribute_map = { - 'if_modified_since': {'key': 'ifModifiedSince', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': 'ifUnmodifiedSince', 'type': 'rfc-1123'}, - 'if_match': {'key': 'ifMatch', 'type': 'str'}, - 'if_none_match': {'key': 'ifNoneMatch', 'type': 'str'}, - 'if_tags': {'key': 'ifTags', 'type': 'str'}, - } - - def __init__( - self, - *, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - if_tags: Optional[str] = None, - **kwargs - ): - super(ModifiedAccessConditions, self).__init__(**kwargs) - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - self.if_match = if_match - self.if_none_match = if_none_match - self.if_tags = if_tags - - -class PageList(msrest.serialization.Model): - """the list of pages. - - :param page_range: - :type page_range: list[~azure.storage.blob.models.PageRange] - :param clear_range: - :type clear_range: list[~azure.storage.blob.models.ClearRange] - """ - - _attribute_map = { - 'page_range': {'key': 'PageRange', 'type': '[PageRange]'}, - 'clear_range': {'key': 'ClearRange', 'type': '[ClearRange]'}, - } - - def __init__( - self, - *, - page_range: Optional[List["PageRange"]] = None, - clear_range: Optional[List["ClearRange"]] = None, - **kwargs - ): - super(PageList, self).__init__(**kwargs) - self.page_range = page_range - self.clear_range = clear_range - - -class PageRange(msrest.serialization.Model): - """PageRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'PageRange' - } - - def __init__( - self, - *, - start: int, - end: int, - **kwargs - ): - super(PageRange, self).__init__(**kwargs) - self.start = start - self.end = end - - -class QueryFormat(msrest.serialization.Model): - """QueryFormat. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The quick query format type. Possible values include: "delimited", - "json", "arrow", "parquet". - :type type: str or ~azure.storage.blob.models.QueryFormatType - :param delimited_text_configuration: Groups the settings used for interpreting the blob data if - the blob is delimited text formatted. - :type delimited_text_configuration: ~azure.storage.blob.models.DelimitedTextConfiguration - :param json_text_configuration: json text configuration. - :type json_text_configuration: ~azure.storage.blob.models.JsonTextConfiguration - :param arrow_configuration: Groups the settings used for formatting the response if the - response should be Arrow formatted. - :type arrow_configuration: ~azure.storage.blob.models.ArrowConfiguration - :param parquet_text_configuration: Any object. - :type parquet_text_configuration: any - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': 'Type', 'type': 'str', 'xml': {'name': 'Type'}}, - 'delimited_text_configuration': {'key': 'DelimitedTextConfiguration', 'type': 'DelimitedTextConfiguration'}, - 'json_text_configuration': {'key': 'JsonTextConfiguration', 'type': 'JsonTextConfiguration'}, - 'arrow_configuration': {'key': 'ArrowConfiguration', 'type': 'ArrowConfiguration'}, - 'parquet_text_configuration': {'key': 'ParquetTextConfiguration', 'type': 'object'}, - } - - def __init__( - self, - *, - type: Union[str, "QueryFormatType"], - delimited_text_configuration: Optional["DelimitedTextConfiguration"] = None, - json_text_configuration: Optional["JsonTextConfiguration"] = None, - arrow_configuration: Optional["ArrowConfiguration"] = None, - parquet_text_configuration: Optional[Any] = None, - **kwargs - ): - super(QueryFormat, self).__init__(**kwargs) - self.type = type - self.delimited_text_configuration = delimited_text_configuration - self.json_text_configuration = json_text_configuration - self.arrow_configuration = arrow_configuration - self.parquet_text_configuration = parquet_text_configuration - - -class QueryRequest(msrest.serialization.Model): - """Groups the set of query request settings. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar query_type: Required. The type of the provided query expression. Has constant value: - "SQL". - :vartype query_type: str - :param expression: Required. The query expression in SQL. The maximum size of the query - expression is 256KiB. - :type expression: str - :param input_serialization: - :type input_serialization: ~azure.storage.blob.models.QuerySerialization - :param output_serialization: - :type output_serialization: ~azure.storage.blob.models.QuerySerialization - """ - - _validation = { - 'query_type': {'required': True, 'constant': True}, - 'expression': {'required': True}, - } - - _attribute_map = { - 'query_type': {'key': 'QueryType', 'type': 'str', 'xml': {'name': 'QueryType'}}, - 'expression': {'key': 'Expression', 'type': 'str', 'xml': {'name': 'Expression'}}, - 'input_serialization': {'key': 'InputSerialization', 'type': 'QuerySerialization'}, - 'output_serialization': {'key': 'OutputSerialization', 'type': 'QuerySerialization'}, - } - _xml_map = { - 'name': 'QueryRequest' - } - - query_type = "SQL" - - def __init__( - self, - *, - expression: str, - input_serialization: Optional["QuerySerialization"] = None, - output_serialization: Optional["QuerySerialization"] = None, - **kwargs - ): - super(QueryRequest, self).__init__(**kwargs) - self.expression = expression - self.input_serialization = input_serialization - self.output_serialization = output_serialization - - -class QuerySerialization(msrest.serialization.Model): - """QuerySerialization. - - All required parameters must be populated in order to send to Azure. - - :param format: Required. - :type format: ~azure.storage.blob.models.QueryFormat - """ - - _validation = { - 'format': {'required': True}, - } - - _attribute_map = { - 'format': {'key': 'Format', 'type': 'QueryFormat'}, - } - - def __init__( - self, - *, - format: "QueryFormat", - **kwargs - ): - super(QuerySerialization, self).__init__(**kwargs) - self.format = format - - -class RetentionPolicy(msrest.serialization.Model): - """the retention policy which determines how long the associated data should persist. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether a retention policy is enabled for the storage - service. - :type enabled: bool - :param days: Indicates the number of days that metrics or logging or soft-deleted data should - be retained. All data older than this value will be deleted. - :type days: int - :param allow_permanent_delete: Indicates whether permanent delete is allowed on this storage - account. - :type allow_permanent_delete: bool - """ - - _validation = { - 'enabled': {'required': True}, - 'days': {'minimum': 1}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'days': {'key': 'Days', 'type': 'int'}, - 'allow_permanent_delete': {'key': 'AllowPermanentDelete', 'type': 'bool'}, - } - - def __init__( - self, - *, - enabled: bool, - days: Optional[int] = None, - allow_permanent_delete: Optional[bool] = None, - **kwargs - ): - super(RetentionPolicy, self).__init__(**kwargs) - self.enabled = enabled - self.days = days - self.allow_permanent_delete = allow_permanent_delete - - -class SequenceNumberAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param if_sequence_number_less_than_or_equal_to: Specify this header value to operate only on a - blob if it has a sequence number less than or equal to the specified. - :type if_sequence_number_less_than_or_equal_to: long - :param if_sequence_number_less_than: Specify this header value to operate only on a blob if it - has a sequence number less than the specified. - :type if_sequence_number_less_than: long - :param if_sequence_number_equal_to: Specify this header value to operate only on a blob if it - has the specified sequence number. - :type if_sequence_number_equal_to: long - """ - - _attribute_map = { - 'if_sequence_number_less_than_or_equal_to': {'key': 'ifSequenceNumberLessThanOrEqualTo', 'type': 'long'}, - 'if_sequence_number_less_than': {'key': 'ifSequenceNumberLessThan', 'type': 'long'}, - 'if_sequence_number_equal_to': {'key': 'ifSequenceNumberEqualTo', 'type': 'long'}, - } - - def __init__( - self, - *, - if_sequence_number_less_than_or_equal_to: Optional[int] = None, - if_sequence_number_less_than: Optional[int] = None, - if_sequence_number_equal_to: Optional[int] = None, - **kwargs - ): - super(SequenceNumberAccessConditions, self).__init__(**kwargs) - self.if_sequence_number_less_than_or_equal_to = if_sequence_number_less_than_or_equal_to - self.if_sequence_number_less_than = if_sequence_number_less_than - self.if_sequence_number_equal_to = if_sequence_number_equal_to - - -class SignedIdentifier(msrest.serialization.Model): - """signed identifier. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. a unique id. - :type id: str - :param access_policy: An Access policy. - :type access_policy: ~azure.storage.blob.models.AccessPolicy - """ - - _validation = { - 'id': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'Id', 'type': 'str'}, - 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy'}, - } - _xml_map = { - 'name': 'SignedIdentifier' - } - - def __init__( - self, - *, - id: str, - access_policy: Optional["AccessPolicy"] = None, - **kwargs - ): - super(SignedIdentifier, self).__init__(**kwargs) - self.id = id - self.access_policy = access_policy - - -class SourceModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param source_if_modified_since: Specify this header value to operate only on a blob if it has - been modified since the specified date/time. - :type source_if_modified_since: ~datetime.datetime - :param source_if_unmodified_since: Specify this header value to operate only on a blob if it - has not been modified since the specified date/time. - :type source_if_unmodified_since: ~datetime.datetime - :param source_if_match: Specify an ETag value to operate only on blobs with a matching value. - :type source_if_match: str - :param source_if_none_match: Specify an ETag value to operate only on blobs without a matching - value. - :type source_if_none_match: str - :param source_if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a - matching value. - :type source_if_tags: str - """ - - _attribute_map = { - 'source_if_modified_since': {'key': 'sourceIfModifiedSince', 'type': 'rfc-1123'}, - 'source_if_unmodified_since': {'key': 'sourceIfUnmodifiedSince', 'type': 'rfc-1123'}, - 'source_if_match': {'key': 'sourceIfMatch', 'type': 'str'}, - 'source_if_none_match': {'key': 'sourceIfNoneMatch', 'type': 'str'}, - 'source_if_tags': {'key': 'sourceIfTags', 'type': 'str'}, - } - - def __init__( - self, - *, - source_if_modified_since: Optional[datetime.datetime] = None, - source_if_unmodified_since: Optional[datetime.datetime] = None, - source_if_match: Optional[str] = None, - source_if_none_match: Optional[str] = None, - source_if_tags: Optional[str] = None, - **kwargs - ): - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_modified_since = source_if_modified_since - self.source_if_unmodified_since = source_if_unmodified_since - self.source_if_match = source_if_match - self.source_if_none_match = source_if_none_match - self.source_if_tags = source_if_tags - - -class StaticWebsite(msrest.serialization.Model): - """The properties that enable an account to host a static website. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether this account is hosting a static website. - :type enabled: bool - :param index_document: The default name of the index page under each directory. - :type index_document: str - :param error_document404_path: The absolute path of the custom 404 page. - :type error_document404_path: str - :param default_index_document_path: Absolute path of the default index page. - :type default_index_document_path: str - """ - - _validation = { - 'enabled': {'required': True}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'index_document': {'key': 'IndexDocument', 'type': 'str'}, - 'error_document404_path': {'key': 'ErrorDocument404Path', 'type': 'str'}, - 'default_index_document_path': {'key': 'DefaultIndexDocumentPath', 'type': 'str'}, - } - - def __init__( - self, - *, - enabled: bool, - index_document: Optional[str] = None, - error_document404_path: Optional[str] = None, - default_index_document_path: Optional[str] = None, - **kwargs - ): - super(StaticWebsite, self).__init__(**kwargs) - self.enabled = enabled - self.index_document = index_document - self.error_document404_path = error_document404_path - self.default_index_document_path = default_index_document_path - - -class StorageError(msrest.serialization.Model): - """StorageError. - - :param message: - :type message: str - """ - - _attribute_map = { - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__( - self, - *, - message: Optional[str] = None, - **kwargs - ): - super(StorageError, self).__init__(**kwargs) - self.message = message - - -class StorageServiceProperties(msrest.serialization.Model): - """Storage Service Properties. - - :param logging: Azure Analytics Logging settings. - :type logging: ~azure.storage.blob.models.Logging - :param hour_metrics: a summary of request statistics grouped by API in hour or minute - aggregates for blobs. - :type hour_metrics: ~azure.storage.blob.models.Metrics - :param minute_metrics: a summary of request statistics grouped by API in hour or minute - aggregates for blobs. - :type minute_metrics: ~azure.storage.blob.models.Metrics - :param cors: The set of CORS rules. - :type cors: list[~azure.storage.blob.models.CorsRule] - :param default_service_version: The default version to use for requests to the Blob service if - an incoming request's version is not specified. Possible values include version 2008-10-27 and - all more recent versions. - :type default_service_version: str - :param delete_retention_policy: the retention policy which determines how long the associated - data should persist. - :type delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy - :param static_website: The properties that enable an account to host a static website. - :type static_website: ~azure.storage.blob.models.StaticWebsite - """ - - _attribute_map = { - 'logging': {'key': 'Logging', 'type': 'Logging'}, - 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics'}, - 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics'}, - 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'wrapped': True}}, - 'default_service_version': {'key': 'DefaultServiceVersion', 'type': 'str'}, - 'delete_retention_policy': {'key': 'DeleteRetentionPolicy', 'type': 'RetentionPolicy'}, - 'static_website': {'key': 'StaticWebsite', 'type': 'StaticWebsite'}, - } - - def __init__( - self, - *, - logging: Optional["Logging"] = None, - hour_metrics: Optional["Metrics"] = None, - minute_metrics: Optional["Metrics"] = None, - cors: Optional[List["CorsRule"]] = None, - default_service_version: Optional[str] = None, - delete_retention_policy: Optional["RetentionPolicy"] = None, - static_website: Optional["StaticWebsite"] = None, - **kwargs - ): - super(StorageServiceProperties, self).__init__(**kwargs) - self.logging = logging - self.hour_metrics = hour_metrics - self.minute_metrics = minute_metrics - self.cors = cors - self.default_service_version = default_service_version - self.delete_retention_policy = delete_retention_policy - self.static_website = static_website - - -class StorageServiceStats(msrest.serialization.Model): - """Stats for the storage service. - - :param geo_replication: Geo-Replication information for the Secondary Storage Service. - :type geo_replication: ~azure.storage.blob.models.GeoReplication - """ - - _attribute_map = { - 'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication'}, - } - - def __init__( - self, - *, - geo_replication: Optional["GeoReplication"] = None, - **kwargs - ): - super(StorageServiceStats, self).__init__(**kwargs) - self.geo_replication = geo_replication - - -class UserDelegationKey(msrest.serialization.Model): - """A user delegation key. - - All required parameters must be populated in order to send to Azure. - - :param signed_oid: Required. The Azure Active Directory object ID in GUID format. - :type signed_oid: str - :param signed_tid: Required. The Azure Active Directory tenant ID in GUID format. - :type signed_tid: str - :param signed_start: Required. The date-time the key is active. - :type signed_start: ~datetime.datetime - :param signed_expiry: Required. The date-time the key expires. - :type signed_expiry: ~datetime.datetime - :param signed_service: Required. Abbreviation of the Azure Storage service that accepts the - key. - :type signed_service: str - :param signed_version: Required. The service version that created the key. - :type signed_version: str - :param value: Required. The key as a base64 string. - :type value: str - """ - - _validation = { - 'signed_oid': {'required': True}, - 'signed_tid': {'required': True}, - 'signed_start': {'required': True}, - 'signed_expiry': {'required': True}, - 'signed_service': {'required': True}, - 'signed_version': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'signed_oid': {'key': 'SignedOid', 'type': 'str'}, - 'signed_tid': {'key': 'SignedTid', 'type': 'str'}, - 'signed_start': {'key': 'SignedStart', 'type': 'iso-8601'}, - 'signed_expiry': {'key': 'SignedExpiry', 'type': 'iso-8601'}, - 'signed_service': {'key': 'SignedService', 'type': 'str'}, - 'signed_version': {'key': 'SignedVersion', 'type': 'str'}, - 'value': {'key': 'Value', 'type': 'str'}, - } - - def __init__( - self, - *, - signed_oid: str, - signed_tid: str, - signed_start: datetime.datetime, - signed_expiry: datetime.datetime, - signed_service: str, - signed_version: str, - value: str, - **kwargs - ): - super(UserDelegationKey, self).__init__(**kwargs) - self.signed_oid = signed_oid - self.signed_tid = signed_tid - self.signed_start = signed_start - self.signed_expiry = signed_expiry - self.signed_service = signed_service - self.signed_version = signed_version - self.value = value diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/operations/__init__.py b/azure/multiapi/storagev2/blob/v2021_04_10/_generated/operations/__init__.py deleted file mode 100644 index 902269d..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/operations/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._container_operations import ContainerOperations -from ._blob_operations import BlobOperations -from ._page_blob_operations import PageBlobOperations -from ._append_blob_operations import AppendBlobOperations -from ._block_blob_operations import BlockBlobOperations - -__all__ = [ - 'ServiceOperations', - 'ContainerOperations', - 'BlobOperations', - 'PageBlobOperations', - 'AppendBlobOperations', - 'BlockBlobOperations', -] diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/operations/_append_blob_operations.py b/azure/multiapi/storagev2/blob/v2021_04_10/_generated/operations/_append_blob_operations.py deleted file mode 100644 index b38af4b..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/operations/_append_blob_operations.py +++ /dev/null @@ -1,734 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class AppendBlobOperations(object): - """AppendBlobOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - content_length, # type: int - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - blob_tags_string=None, # type: Optional[str] - immutability_policy_expiry=None, # type: Optional[datetime.datetime] - immutability_policy_mode=None, # type: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] - legal_hold=None, # type: Optional[bool] - blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Create Append Blob operation creates a new append blob. - - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy - is set to expire. - :type immutability_policy_expiry: ~datetime.datetime - :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param legal_hold: Specified if a legal hold should be set on the blob. - :type legal_hold: bool - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - blob_type = "AppendBlob" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if immutability_policy_expiry is not None: - header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') - if immutability_policy_mode is not None: - header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') - if legal_hold is not None: - header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def append_block( - self, - content_length, # type: int - body, # type: IO - timeout=None, # type: Optional[int] - transactional_content_md5=None, # type: Optional[bytearray] - transactional_content_crc64=None, # type: Optional[bytearray] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - append_position_access_conditions=None, # type: Optional["_models.AppendPositionAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Append Block operation commits a new block of data to the end of an existing append blob. - The Append Block operation is permitted only if the blob was created with x-ms-blob-type set to - AppendBlob. Append Block is supported only on version 2015-02-21 version or later. - - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param append_position_access_conditions: Parameter group. - :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _max_size = None - _append_position = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if append_position_access_conditions is not None: - _max_size = append_position_access_conditions.max_size - _append_position = append_position_access_conditions.append_position - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "appendblock" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.append_block.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _max_size is not None: - header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", _max_size, 'long') - if _append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-append-offset']=self._deserialize('str', response.headers.get('x-ms-blob-append-offset')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - append_block.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def append_block_from_url( - self, - source_url, # type: str - content_length, # type: int - source_range=None, # type: Optional[str] - source_content_md5=None, # type: Optional[bytearray] - source_contentcrc64=None, # type: Optional[bytearray] - timeout=None, # type: Optional[int] - transactional_content_md5=None, # type: Optional[bytearray] - request_id_parameter=None, # type: Optional[str] - copy_source_authorization=None, # type: Optional[str] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - append_position_access_conditions=None, # type: Optional["_models.AppendPositionAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Append Block operation commits a new block of data to the end of an existing append blob - where the contents are read from a source url. The Append Block operation is permitted only if - the blob was created with x-ms-blob-type set to AppendBlob. Append Block is supported only on - version 2015-02-21 version or later. - - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param content_length: The length of the request. - :type content_length: long - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid - OAuth access token to copy source. - :type copy_source_authorization: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param append_position_access_conditions: Parameter group. - :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _lease_id = None - _max_size = None - _append_position = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if append_position_access_conditions is not None: - _max_size = append_position_access_conditions.max_size - _append_position = append_position_access_conditions.append_position - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - comp = "appendblock" - accept = "application/xml" - - # Construct URL - url = self.append_block_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _max_size is not None: - header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", _max_size, 'long') - if _append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if copy_source_authorization is not None: - header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-append-offset']=self._deserialize('str', response.headers.get('x-ms-blob-append-offset')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - append_block_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def seal( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - append_position_access_conditions=None, # type: Optional["_models.AppendPositionAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Seal operation seals the Append Blob to make it read-only. Seal is supported only on - version 2019-12-12 version or later. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param append_position_access_conditions: Parameter group. - :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _append_position = None - if append_position_access_conditions is not None: - _append_position = append_position_access_conditions.append_position - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - comp = "seal" - accept = "application/xml" - - # Construct URL - url = self.seal.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _append_position is not None: - header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - - if cls: - return cls(pipeline_response, None, response_headers) - - seal.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/operations/_blob_operations.py b/azure/multiapi/storagev2/blob/v2021_04_10/_generated/operations/_blob_operations.py deleted file mode 100644 index e041221..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/operations/_blob_operations.py +++ /dev/null @@ -1,3036 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class BlobOperations(object): - """BlobOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def download( - self, - snapshot=None, # type: Optional[str] - version_id=None, # type: Optional[str] - timeout=None, # type: Optional[int] - range=None, # type: Optional[str] - range_get_content_md5=None, # type: Optional[bool] - range_get_content_crc64=None, # type: Optional[bool] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> IO - """The Download operation reads or downloads a blob from the system, including its metadata and - properties. You can also call Download to read a snapshot. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param range_get_content_md5: When set to true and specified together with the Range, the - service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB - in size. - :type range_get_content_md5: bool - :param range_get_content_crc64: When set to true and specified together with the Range, the - service returns the CRC64 hash for the range, as long as the range is less than or equal to 4 - MB in size. - :type range_get_content_crc64: bool - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - accept = "application/xml" - - # Construct URL - url = self.download.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') - if range_get_content_crc64 is not None: - header_parameters['x-ms-range-get-content-crc64'] = self._serialize.header("range_get_content_crc64", range_get_content_crc64, 'bool') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) - response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) - response_headers['x-ms-immutability-policy-until-date']=self._deserialize('rfc-1123', response.headers.get('x-ms-immutability-policy-until-date')) - response_headers['x-ms-immutability-policy-mode']=self._deserialize('str', response.headers.get('x-ms-immutability-policy-mode')) - response_headers['x-ms-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-legal-hold')) - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) - response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) - response_headers['x-ms-immutability-policy-until-date']=self._deserialize('rfc-1123', response.headers.get('x-ms-immutability-policy-until-date')) - response_headers['x-ms-immutability-policy-mode']=self._deserialize('str', response.headers.get('x-ms-immutability-policy-mode')) - response_headers['x-ms-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-legal-hold')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - download.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def get_properties( - self, - snapshot=None, # type: Optional[str] - version_id=None, # type: Optional[str] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Get Properties operation returns all user-defined metadata, standard HTTP properties, and - system properties for the blob. It does not return the content of the blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-creation-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-creation-time')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) - response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-incremental-copy']=self._deserialize('bool', response.headers.get('x-ms-incremental-copy')) - response_headers['x-ms-copy-destination-snapshot']=self._deserialize('str', response.headers.get('x-ms-copy-destination-snapshot')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-access-tier']=self._deserialize('str', response.headers.get('x-ms-access-tier')) - response_headers['x-ms-access-tier-inferred']=self._deserialize('bool', response.headers.get('x-ms-access-tier-inferred')) - response_headers['x-ms-archive-status']=self._deserialize('str', response.headers.get('x-ms-archive-status')) - response_headers['x-ms-access-tier-change-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) - response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) - response_headers['x-ms-expiry-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-expiry-time')) - response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) - response_headers['x-ms-rehydrate-priority']=self._deserialize('str', response.headers.get('x-ms-rehydrate-priority')) - response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) - response_headers['x-ms-immutability-policy-until-date']=self._deserialize('rfc-1123', response.headers.get('x-ms-immutability-policy-until-date')) - response_headers['x-ms-immutability-policy-mode']=self._deserialize('str', response.headers.get('x-ms-immutability-policy-mode')) - response_headers['x-ms-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-legal-hold')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def delete( - self, - snapshot=None, # type: Optional[str] - version_id=None, # type: Optional[str] - timeout=None, # type: Optional[int] - delete_snapshots=None, # type: Optional[Union[str, "_models.DeleteSnapshotsOptionType"]] - request_id_parameter=None, # type: Optional[str] - blob_delete_type="Permanent", # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """If the storage account's soft delete feature is disabled then, when a blob is deleted, it is - permanently removed from the storage account. If the storage account's soft delete feature is - enabled, then, when a blob is deleted, it is marked for deletion and becomes inaccessible - immediately. However, the blob service retains the blob or snapshot for the number of days - specified by the DeleteRetentionPolicy section of [Storage service properties] - (Set-Blob-Service-Properties.md). After the specified number of days has passed, the blob's - data is permanently removed from the storage account. Note that you continue to be charged for - the soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and - specify the "include=deleted" query parameter to discover which blobs and snapshots have been - soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. All other - operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code - of 404 (ResourceNotFound). - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param delete_snapshots: Required if the blob has associated snapshots. Specify one of the - following two options: include: Delete the base blob and all of its snapshots. only: Delete - only the blob's snapshots and not the blob itself. - :type delete_snapshots: str or ~azure.storage.blob.models.DeleteSnapshotsOptionType - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_delete_type: Optional. Only possible value is 'permanent', which specifies to - permanently delete a blob if blob soft delete is enabled. - :type blob_delete_type: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if blob_delete_type is not None: - query_parameters['deletetype'] = self._serialize.query("blob_delete_type", blob_delete_type, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def undelete( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Undelete a blob that was previously soft deleted. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "undelete" - accept = "application/xml" - - # Construct URL - url = self.undelete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - undelete.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def set_expiry( - self, - expiry_options, # type: Union[str, "_models.BlobExpiryOptions"] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - expires_on=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Sets the time a blob will expire and be deleted. - - :param expiry_options: Required. Indicates mode of the expiry time. - :type expiry_options: str or ~azure.storage.blob.models.BlobExpiryOptions - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param expires_on: The time to set the blob to expiry. - :type expires_on: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "expiry" - accept = "application/xml" - - # Construct URL - url = self.set_expiry.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') - if expires_on is not None: - header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_expiry.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def set_http_headers( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Set HTTP Headers operation sets system properties on the blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_cache_control = None - _blob_content_type = None - _blob_content_md5 = None - _blob_content_encoding = None - _blob_content_language = None - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _blob_content_disposition = None - if blob_http_headers is not None: - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_disposition = blob_http_headers.blob_content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.set_http_headers.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_http_headers.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def set_immutability_policy( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - immutability_policy_expiry=None, # type: Optional[datetime.datetime] - immutability_policy_mode=None, # type: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Set Immutability Policy operation sets the immutability policy on the blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy - is set to expire. - :type immutability_policy_expiry: ~datetime.datetime - :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "immutabilityPolicies" - accept = "application/xml" - - # Construct URL - url = self.set_immutability_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if immutability_policy_expiry is not None: - header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') - if immutability_policy_mode is not None: - header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-immutability-policy-until-date']=self._deserialize('rfc-1123', response.headers.get('x-ms-immutability-policy-until-date')) - response_headers['x-ms-immutability-policy-mode']=self._deserialize('str', response.headers.get('x-ms-immutability-policy-mode')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_immutability_policy.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def delete_immutability_policy( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """The Delete Immutability Policy operation deletes the immutability policy on the blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "immutabilityPolicies" - accept = "application/xml" - - # Construct URL - url = self.delete_immutability_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete_immutability_policy.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def set_legal_hold( - self, - legal_hold, # type: bool - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """The Set Legal Hold operation sets a legal hold on the blob. - - :param legal_hold: Specified if a legal hold should be set on the blob. - :type legal_hold: bool - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "legalhold" - accept = "application/xml" - - # Construct URL - url = self.set_legal_hold.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-legal-hold')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_legal_hold.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def set_metadata( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or - more name-value pairs. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def acquire_lease( - self, - timeout=None, # type: Optional[int] - duration=None, # type: Optional[int] - proposed_lease_id=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a - lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease - duration cannot be changed using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "acquire" - accept = "application/xml" - - # Construct URL - url = self.acquire_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - acquire_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def release_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "release" - accept = "application/xml" - - # Construct URL - url = self.release_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - release_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def renew_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "renew" - accept = "application/xml" - - # Construct URL - url = self.renew_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - renew_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def change_lease( - self, - lease_id, # type: str - proposed_lease_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "change" - accept = "application/xml" - - # Construct URL - url = self.change_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - change_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def break_lease( - self, - timeout=None, # type: Optional[int] - break_period=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete - operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param break_period: For a break operation, proposed duration the lease should continue before - it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining on the lease is used. A new - lease will not be available before the break period has expired, but the lease may be held for - longer than the break period. If this header does not appear with a break operation, a - fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease - breaks immediately. - :type break_period: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "lease" - action = "break" - accept = "application/xml" - - # Construct URL - url = self.break_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - break_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def create_snapshot( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Create Snapshot operation creates a read-only snapshot of a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _lease_id = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "snapshot" - accept = "application/xml" - - # Construct URL - url = self.create_snapshot.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-snapshot']=self._deserialize('str', response.headers.get('x-ms-snapshot')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create_snapshot.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def start_copy_from_url( - self, - copy_source, # type: str - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] - rehydrate_priority=None, # type: Optional[Union[str, "_models.RehydratePriority"]] - request_id_parameter=None, # type: Optional[str] - blob_tags_string=None, # type: Optional[str] - seal_blob=None, # type: Optional[bool] - immutability_policy_expiry=None, # type: Optional[datetime.datetime] - immutability_policy_mode=None, # type: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] - legal_hold=None, # type: Optional[bool] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Start Copy From URL operation copies a blob or an internet resource to a new blob. - - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived - blob. - :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param seal_blob: Overrides the sealed state of the destination blob. Service version - 2019-12-12 and newer. - :type seal_blob: bool - :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy - is set to expire. - :type immutability_policy_expiry: ~datetime.datetime - :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param legal_hold: Specified if a legal hold should be set on the blob. - :type legal_hold: bool - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _source_if_tags = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - _source_if_tags = source_modified_access_conditions.source_if_tags - accept = "application/xml" - - # Construct URL - url = self.start_copy_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _source_if_tags is not None: - header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", _source_if_tags, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if seal_blob is not None: - header_parameters['x-ms-seal-blob'] = self._serialize.header("seal_blob", seal_blob, 'bool') - if immutability_policy_expiry is not None: - header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') - if immutability_policy_mode is not None: - header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') - if legal_hold is not None: - header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def copy_from_url( - self, - copy_source, # type: str - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] - request_id_parameter=None, # type: Optional[str] - source_content_md5=None, # type: Optional[bytearray] - blob_tags_string=None, # type: Optional[str] - immutability_policy_expiry=None, # type: Optional[datetime.datetime] - immutability_policy_mode=None, # type: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] - legal_hold=None, # type: Optional[bool] - copy_source_authorization=None, # type: Optional[str] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Copy From URL operation copies a blob or an internet resource to a new blob. It will not - return a response until the copy is complete. - - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy - is set to expire. - :type immutability_policy_expiry: ~datetime.datetime - :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param legal_hold: Specified if a legal hold should be set on the blob. - :type legal_hold: bool - :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid - OAuth access token to copy source. - :type copy_source_authorization: str - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _lease_id = None - _encryption_scope = None - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - x_ms_requires_sync = "true" - accept = "application/xml" - - # Construct URL - url = self.copy_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-requires-sync'] = self._serialize.header("x_ms_requires_sync", x_ms_requires_sync, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if immutability_policy_expiry is not None: - header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') - if immutability_policy_mode is not None: - header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') - if legal_hold is not None: - header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') - if copy_source_authorization is not None: - header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def abort_copy_from_url( - self, - copy_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a - destination blob with zero length and full metadata. - - :param copy_id: The copy identifier provided in the x-ms-copy-id header of the original Copy - Blob operation. - :type copy_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "copy" - copy_action_abort_constant = "abort" - accept = "application/xml" - - # Construct URL - url = self.abort_copy_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-copy-action'] = self._serialize.header("copy_action_abort_constant", copy_action_abort_constant, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - abort_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def set_tier( - self, - tier, # type: Union[str, "_models.AccessTierRequired"] - snapshot=None, # type: Optional[str] - version_id=None, # type: Optional[str] - timeout=None, # type: Optional[int] - rehydrate_priority=None, # type: Optional[Union[str, "_models.RehydratePriority"]] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a - premium storage account and on a block blob in a blob storage account (locally redundant - storage only). A premium page blob's tier determines the allowed size, IOPS, and bandwidth of - the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation does not - update the blob's ETag. - - :param tier: Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierRequired - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived - blob. - :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "tier" - accept = "application/xml" - - # Construct URL - url = self.set_tier.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if rehydrate_priority is not None: - header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if response.status_code == 202: - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_tier.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def get_account_info( - self, - **kwargs # type: Any - ): - # type: (...) -> None - """Returns the sku name and account kind. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "account" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_account_info.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) - response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_account_info.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def query( - self, - snapshot=None, # type: Optional[str] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - query_request=None, # type: Optional["_models.QueryRequest"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> IO - """The Query operation enables users to select/project on blob data by providing simple query - expressions. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param query_request: the query request. - :type query_request: ~azure.storage.blob.models.QueryRequest - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "query" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.query.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if query_request is not None: - body_content = self._serialize.body(query_request, 'QueryRequest', is_xml=True) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - query.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def get_tags( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - snapshot=None, # type: Optional[str] - version_id=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> "_models.BlobTags" - """The Get Tags operation enables users to get the tags associated with a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: BlobTags, or the result of cls(response) - :rtype: ~azure.storage.blob.models.BlobTags - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobTags"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "tags" - accept = "application/xml" - - # Construct URL - url = self.get_tags.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('BlobTags', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_tags.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def set_tags( - self, - timeout=None, # type: Optional[int] - version_id=None, # type: Optional[str] - transactional_content_md5=None, # type: Optional[bytearray] - transactional_content_crc64=None, # type: Optional[bytearray] - request_id_parameter=None, # type: Optional[str] - tags=None, # type: Optional["_models.BlobTags"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Set Tags operation enables users to set tags on a blob. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param version_id: The version id parameter is an opaque DateTime value that, when present, - specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. - :type version_id: str - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param tags: Blob tags. - :type tags: ~azure.storage.blob.models.BlobTags - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_tags = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "tags" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_tags.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if version_id is not None: - query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - if tags is not None: - body_content = self._serialize.body(tags, 'BlobTags', is_xml=True) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_tags.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/operations/_block_blob_operations.py b/azure/multiapi/storagev2/blob/v2021_04_10/_generated/operations/_block_blob_operations.py deleted file mode 100644 index 3cbe55e..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/operations/_block_blob_operations.py +++ /dev/null @@ -1,1148 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class BlockBlobOperations(object): - """BlockBlobOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def upload( - self, - content_length, # type: int - body, # type: IO - timeout=None, # type: Optional[int] - transactional_content_md5=None, # type: Optional[bytearray] - metadata=None, # type: Optional[str] - tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] - request_id_parameter=None, # type: Optional[str] - blob_tags_string=None, # type: Optional[str] - immutability_policy_expiry=None, # type: Optional[datetime.datetime] - immutability_policy_mode=None, # type: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] - legal_hold=None, # type: Optional[bool] - blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Upload Block Blob operation updates the content of an existing block blob. Updating an - existing block blob overwrites any existing metadata on the blob. Partial updates are not - supported with Put Blob; the content of the existing blob is overwritten with the content of - the new blob. To perform a partial update of the content of a block blob, use the Put Block - List operation. - - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy - is set to expire. - :type immutability_policy_expiry: ~datetime.datetime - :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param legal_hold: Specified if a legal hold should be set on the blob. - :type legal_hold: bool - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - blob_type = "BlockBlob" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.upload.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if immutability_policy_expiry is not None: - header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') - if immutability_policy_mode is not None: - header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') - if legal_hold is not None: - header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def put_blob_from_url( - self, - content_length, # type: int - copy_source, # type: str - timeout=None, # type: Optional[int] - transactional_content_md5=None, # type: Optional[bytearray] - metadata=None, # type: Optional[str] - tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] - request_id_parameter=None, # type: Optional[str] - source_content_md5=None, # type: Optional[bytearray] - blob_tags_string=None, # type: Optional[str] - copy_source_blob_properties=None, # type: Optional[bool] - copy_source_authorization=None, # type: Optional[str] - blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Put Blob from URL operation creates a new Block Blob where the contents of the blob are - read from a given URL. This API is supported beginning with the 2020-04-08 version. Partial - updates are not supported with Put Blob from URL; the content of an existing blob is - overwritten with the content of the new blob. To perform partial updates to a block blob’s - contents using a source URL, use the Put Block from URL API in conjunction with Put Block List. - - :param content_length: The length of the request. - :type content_length: long - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param copy_source_blob_properties: Optional, default is true. Indicates if properties from - the source blob should be copied. - :type copy_source_blob_properties: bool - :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid - OAuth access token to copy source. - :type copy_source_authorization: str - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _source_if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - _source_if_tags = source_modified_access_conditions.source_if_tags - blob_type = "BlockBlob" - accept = "application/xml" - - # Construct URL - url = self.put_blob_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _source_if_tags is not None: - header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", _source_if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if copy_source_blob_properties is not None: - header_parameters['x-ms-copy-source-blob-properties'] = self._serialize.header("copy_source_blob_properties", copy_source_blob_properties, 'bool') - if copy_source_authorization is not None: - header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - put_blob_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def stage_block( - self, - block_id, # type: str - content_length, # type: int - body, # type: IO - transactional_content_md5=None, # type: Optional[bytearray] - transactional_content_crc64=None, # type: Optional[bytearray] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Stage Block operation creates a new block to be committed as part of a blob. - - :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the - string must be less than or equal to 64 bytes in size. For a given blob, the length of the - value specified for the blockid parameter must be the same size for each block. - :type block_id: str - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "block" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.stage_block.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - stage_block.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def stage_block_from_url( - self, - block_id, # type: str - content_length, # type: int - source_url, # type: str - source_range=None, # type: Optional[str] - source_content_md5=None, # type: Optional[bytearray] - source_contentcrc64=None, # type: Optional[bytearray] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - copy_source_authorization=None, # type: Optional[str] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Stage Block operation creates a new block to be committed as part of a blob where the - contents are read from a URL. - - :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the - string must be less than or equal to 64 bytes in size. For a given blob, the length of the - value specified for the blockid parameter must be the same size for each block. - :type block_id: str - :param content_length: The length of the request. - :type content_length: long - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid - OAuth access token to copy source. - :type copy_source_authorization: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _lease_id = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - comp = "block" - accept = "application/xml" - - # Construct URL - url = self.stage_block_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if copy_source_authorization is not None: - header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - stage_block_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def commit_block_list( - self, - blocks, # type: "_models.BlockLookupList" - timeout=None, # type: Optional[int] - transactional_content_md5=None, # type: Optional[bytearray] - transactional_content_crc64=None, # type: Optional[bytearray] - metadata=None, # type: Optional[str] - tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] - request_id_parameter=None, # type: Optional[str] - blob_tags_string=None, # type: Optional[str] - immutability_policy_expiry=None, # type: Optional[datetime.datetime] - immutability_policy_mode=None, # type: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] - legal_hold=None, # type: Optional[bool] - blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Commit Block List operation writes a blob by specifying the list of block IDs that make up - the blob. In order to be written as part of a blob, a block must have been successfully written - to the server in a prior Put Block operation. You can call Put Block List to update a blob by - uploading only those blocks that have changed, then committing the new and existing blocks - together. You can do this by specifying whether to commit a block from the committed block list - or from the uncommitted block list, or to commit the most recently uploaded version of the - block, whichever list it may belong to. - - :param blocks: Blob Blocks. - :type blocks: ~azure.storage.blob.models.BlockLookupList - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param tier: Optional. Indicates the tier to be set on the blob. - :type tier: str or ~azure.storage.blob.models.AccessTierOptional - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy - is set to expire. - :type immutability_policy_expiry: ~datetime.datetime - :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param legal_hold: Specified if a legal hold should be set on the blob. - :type legal_hold: bool - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_cache_control = None - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "blocklist" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.commit_block_list.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if immutability_policy_expiry is not None: - header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') - if immutability_policy_mode is not None: - header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') - if legal_hold is not None: - header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(blocks, 'BlockLookupList', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - commit_block_list.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def get_block_list( - self, - snapshot=None, # type: Optional[str] - list_type="committed", # type: Union[str, "_models.BlockListType"] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> "_models.BlockList" - """The Get Block List operation retrieves the list of blocks that have been uploaded as part of a - block blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param list_type: Specifies whether to return the list of committed blocks, the list of - uncommitted blocks, or both lists together. - :type list_type: str or ~azure.storage.blob.models.BlockListType - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: BlockList, or the result of cls(response) - :rtype: ~azure.storage.blob.models.BlockList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.BlockList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_tags = modified_access_conditions.if_tags - comp = "blocklist" - accept = "application/xml" - - # Construct URL - url = self.get_block_list.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - query_parameters['blocklisttype'] = self._serialize.query("list_type", list_type, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('BlockList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_block_list.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/operations/_container_operations.py b/azure/multiapi/storagev2/blob/v2021_04_10/_generated/operations/_container_operations.py deleted file mode 100644 index c9f8080..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/operations/_container_operations.py +++ /dev/null @@ -1,1770 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class ContainerOperations(object): - """ContainerOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - access=None, # type: Optional[Union[str, "_models.PublicAccessType"]] - request_id_parameter=None, # type: Optional[str] - container_cpk_scope_info=None, # type: Optional["_models.ContainerCpkScopeInfo"] - **kwargs # type: Any - ): - # type: (...) -> None - """creates a new container under the specified account. If the container with the same name - already exists, the operation fails. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param access: Specifies whether data in the container may be accessed publicly and the level - of access. - :type access: str or ~azure.storage.blob.models.PublicAccessType - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param container_cpk_scope_info: Parameter group. - :type container_cpk_scope_info: ~azure.storage.blob.models.ContainerCpkScopeInfo - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _default_encryption_scope = None - _prevent_encryption_scope_override = None - if container_cpk_scope_info is not None: - _default_encryption_scope = container_cpk_scope_info.default_encryption_scope - _prevent_encryption_scope_override = container_cpk_scope_info.prevent_encryption_scope_override - restype = "container" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if access is not None: - header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if _default_encryption_scope is not None: - header_parameters['x-ms-default-encryption-scope'] = self._serialize.header("default_encryption_scope", _default_encryption_scope, 'str') - if _prevent_encryption_scope_override is not None: - header_parameters['x-ms-deny-encryption-scope-override'] = self._serialize.header("prevent_encryption_scope_override", _prevent_encryption_scope_override, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{containerName}'} # type: ignore - - def get_properties( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """returns all user-defined metadata and system properties for the specified container. The data - returned does not include the container's list of blobs. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "container" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-blob-public-access']=self._deserialize('str', response.headers.get('x-ms-blob-public-access')) - response_headers['x-ms-has-immutability-policy']=self._deserialize('bool', response.headers.get('x-ms-has-immutability-policy')) - response_headers['x-ms-has-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-has-legal-hold')) - response_headers['x-ms-default-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-default-encryption-scope')) - response_headers['x-ms-deny-encryption-scope-override']=self._deserialize('bool', response.headers.get('x-ms-deny-encryption-scope-override')) - response_headers['x-ms-immutable-storage-with-versioning-enabled']=self._deserialize('bool', response.headers.get('x-ms-immutable-storage-with-versioning-enabled')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{containerName}'} # type: ignore - - def delete( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """operation marks the specified container for deletion. The container and any blobs contained - within it are later deleted during garbage collection. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - restype = "container" - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{containerName}'} # type: ignore - - def set_metadata( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """operation sets one or more user-defined name-value pairs for the specified container. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - restype = "container" - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{containerName}'} # type: ignore - - def get_access_policy( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> List["_models.SignedIdentifier"] - """gets the permissions for the specified container. The permissions indicate whether container - data may be accessed publicly. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of SignedIdentifier, or the result of cls(response) - :rtype: list[~azure.storage.blob.models.SignedIdentifier] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.SignedIdentifier"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "container" - comp = "acl" - accept = "application/xml" - - # Construct URL - url = self.get_access_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-blob-public-access']=self._deserialize('str', response.headers.get('x-ms-blob-public-access')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('[SignedIdentifier]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_access_policy.metadata = {'url': '/{containerName}'} # type: ignore - - def set_access_policy( - self, - timeout=None, # type: Optional[int] - access=None, # type: Optional[Union[str, "_models.PublicAccessType"]] - request_id_parameter=None, # type: Optional[str] - container_acl=None, # type: Optional[List["_models.SignedIdentifier"]] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """sets the permissions for the specified container. The permissions indicate whether blobs in a - container may be accessed publicly. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param access: Specifies whether data in the container may be accessed publicly and the level - of access. - :type access: str or ~azure.storage.blob.models.PublicAccessType - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param container_acl: the acls for the container. - :type container_acl: list[~azure.storage.blob.models.SignedIdentifier] - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - restype = "container" - comp = "acl" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_access_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if access is not None: - header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'wrapped': True, 'itemsName': 'SignedIdentifier'}} - if container_acl is not None: - body_content = self._serialize.body(container_acl, '[SignedIdentifier]', is_xml=True, serialization_ctxt=serialization_ctxt) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_policy.metadata = {'url': '/{containerName}'} # type: ignore - - def restore( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - deleted_container_name=None, # type: Optional[str] - deleted_container_version=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Restores a previously-deleted container. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param deleted_container_name: Optional. Version 2019-12-12 and later. Specifies the name of - the deleted container to restore. - :type deleted_container_name: str - :param deleted_container_version: Optional. Version 2019-12-12 and later. Specifies the - version of the deleted container to restore. - :type deleted_container_version: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "undelete" - accept = "application/xml" - - # Construct URL - url = self.restore.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if deleted_container_name is not None: - header_parameters['x-ms-deleted-container-name'] = self._serialize.header("deleted_container_name", deleted_container_name, 'str') - if deleted_container_version is not None: - header_parameters['x-ms-deleted-container-version'] = self._serialize.header("deleted_container_version", deleted_container_version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - restore.metadata = {'url': '/{containerName}'} # type: ignore - - def rename( - self, - source_container_name, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - source_lease_id=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Renames an existing container. - - :param source_container_name: Required. Specifies the name of the container to rename. - :type source_container_name: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param source_lease_id: A lease ID for the source path. If specified, the source path must have - an active lease and the lease ID must match. - :type source_lease_id: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "rename" - accept = "application/xml" - - # Construct URL - url = self.rename.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-source-container-name'] = self._serialize.header("source_container_name", source_container_name, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - rename.metadata = {'url': '/{containerName}'} # type: ignore - - def submit_batch( - self, - content_length, # type: int - multipart_content_type, # type: str - body, # type: IO - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> IO - """The Batch operation allows multiple API calls to be embedded into a single HTTP request. - - :param content_length: The length of the request. - :type content_length: long - :param multipart_content_type: Required. The value of this header must be multipart/mixed with - a batch boundary. Example header value: multipart/mixed; boundary=batch_:code:``. - :type multipart_content_type: str - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "batch" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.submit_batch.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(body, 'IO', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - submit_batch.metadata = {'url': '/{containerName}'} # type: ignore - - def filter_blobs( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - where=None, # type: Optional[str] - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.FilterBlobSegment" - """The Filter Blobs operation enables callers to list blobs in a container whose tags match a - given search expression. Filter blobs searches within the given container. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param where: Filters the results to return only to return only blobs whose tags match the - specified expression. - :type where: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: FilterBlobSegment, or the result of cls(response) - :rtype: ~azure.storage.blob.models.FilterBlobSegment - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.FilterBlobSegment"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "blobs" - accept = "application/xml" - - # Construct URL - url = self.filter_blobs.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if where is not None: - query_parameters['where'] = self._serialize.query("where", where, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('FilterBlobSegment', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - filter_blobs.metadata = {'url': '/{containerName}'} # type: ignore - - def acquire_lease( - self, - timeout=None, # type: Optional[int] - duration=None, # type: Optional[int] - proposed_lease_id=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a - lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease - duration cannot be changed using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "acquire" - accept = "application/xml" - - # Construct URL - url = self.acquire_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - acquire_lease.metadata = {'url': '/{containerName}'} # type: ignore - - def release_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "release" - accept = "application/xml" - - # Construct URL - url = self.release_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - release_lease.metadata = {'url': '/{containerName}'} # type: ignore - - def renew_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "renew" - accept = "application/xml" - - # Construct URL - url = self.renew_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - renew_lease.metadata = {'url': '/{containerName}'} # type: ignore - - def break_lease( - self, - timeout=None, # type: Optional[int] - break_period=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param break_period: For a break operation, proposed duration the lease should continue before - it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining on the lease is used. A new - lease will not be available before the break period has expired, but the lease may be held for - longer than the break period. If this header does not appear with a break operation, a - fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease - breaks immediately. - :type break_period: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "break" - accept = "application/xml" - - # Construct URL - url = self.break_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - break_lease.metadata = {'url': '/{containerName}'} # type: ignore - - def change_lease( - self, - lease_id, # type: str - proposed_lease_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] establishes and manages a lock on a container for delete operations. The lock duration - can be 15 to 60 seconds, or can be infinite. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - comp = "lease" - restype = "container" - action = "change" - accept = "application/xml" - - # Construct URL - url = self.change_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - change_lease.metadata = {'url': '/{containerName}'} # type: ignore - - def list_blob_flat_segment( - self, - prefix=None, # type: Optional[str] - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - include=None, # type: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.ListBlobsFlatSegmentResponse" - """[Update] The List Blobs operation returns a list of the blobs under the specified container. - - :param prefix: Filters the results to return only containers whose name begins with the - specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListBlobsFlatSegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsFlatSegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_blob_flat_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListBlobsFlatSegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_blob_flat_segment.metadata = {'url': '/{containerName}'} # type: ignore - - def list_blob_hierarchy_segment( - self, - delimiter, # type: str - prefix=None, # type: Optional[str] - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - include=None, # type: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.ListBlobsHierarchySegmentResponse" - """[Update] The List Blobs operation returns a list of the blobs under the specified container. - - :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix - element in the response body that acts as a placeholder for all blobs whose names begin with - the same substring up to the appearance of the delimiter character. The delimiter may be a - single character or a string. - :type delimiter: str - :param prefix: Filters the results to return only containers whose name begins with the - specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListBlobsHierarchySegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsHierarchySegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_blob_hierarchy_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_blob_hierarchy_segment.metadata = {'url': '/{containerName}'} # type: ignore - - def get_account_info( - self, - **kwargs # type: Any - ): - # type: (...) -> None - """Returns the sku name and account kind. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "account" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_account_info.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) - response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_account_info.metadata = {'url': '/{containerName}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/operations/_page_blob_operations.py b/azure/multiapi/storagev2/blob/v2021_04_10/_generated/operations/_page_blob_operations.py deleted file mode 100644 index c953df2..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/operations/_page_blob_operations.py +++ /dev/null @@ -1,1437 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class PageBlobOperations(object): - """PageBlobOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - content_length, # type: int - blob_content_length, # type: int - timeout=None, # type: Optional[int] - tier=None, # type: Optional[Union[str, "_models.PremiumPageBlobAccessTier"]] - metadata=None, # type: Optional[str] - blob_sequence_number=0, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - blob_tags_string=None, # type: Optional[str] - immutability_policy_expiry=None, # type: Optional[datetime.datetime] - immutability_policy_mode=None, # type: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] - legal_hold=None, # type: Optional[bool] - blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Create operation creates a new page blob. - - :param content_length: The length of the request. - :type content_length: long - :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 - TB. The page blob size must be aligned to a 512-byte boundary. - :type blob_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param tier: Optional. Indicates the tier to be set on the page blob. - :type tier: str or ~azure.storage.blob.models.PremiumPageBlobAccessTier - :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. - If no name-value pairs are specified, the operation will copy the metadata from the source blob - or file to the destination blob. If one or more name-value pairs are specified, the destination - blob is created with the specified metadata, and metadata is not copied from the source blob or - file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming - rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more - information. - :type metadata: str - :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled - value that you can use to track requests. The value of the sequence number must be between 0 - and 2^63 - 1. - :type blob_sequence_number: long - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param blob_tags_string: Optional. Used to set blob tags in various blob operations. - :type blob_tags_string: str - :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy - is set to expire. - :type immutability_policy_expiry: ~datetime.datetime - :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. - :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode - :param legal_hold: Specified if a legal hold should be set on the blob. - :type legal_hold: bool - :param blob_http_headers: Parameter group. - :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _blob_content_type = None - _blob_content_encoding = None - _blob_content_language = None - _blob_content_md5 = None - _blob_cache_control = None - _lease_id = None - _blob_content_disposition = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if blob_http_headers is not None: - _blob_content_type = blob_http_headers.blob_content_type - _blob_content_encoding = blob_http_headers.blob_content_encoding - _blob_content_language = blob_http_headers.blob_content_language - _blob_content_md5 = blob_http_headers.blob_content_md5 - _blob_cache_control = blob_http_headers.blob_cache_control - _blob_content_disposition = blob_http_headers.blob_content_disposition - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - blob_type = "PageBlob" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') - if _blob_content_type is not None: - header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') - if _blob_content_encoding is not None: - header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') - if _blob_content_language is not None: - header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') - if _blob_content_md5 is not None: - header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') - if _blob_cache_control is not None: - header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _blob_content_disposition is not None: - header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') - if blob_sequence_number is not None: - header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if blob_tags_string is not None: - header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') - if immutability_policy_expiry is not None: - header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') - if immutability_policy_mode is not None: - header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') - if legal_hold is not None: - header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def upload_pages( - self, - content_length, # type: int - body, # type: IO - transactional_content_md5=None, # type: Optional[bytearray] - transactional_content_crc64=None, # type: Optional[bytearray] - timeout=None, # type: Optional[int] - range=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - sequence_number_access_conditions=None, # type: Optional["_models.SequenceNumberAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Upload Pages operation writes a range of pages to a page blob. - - :param content_length: The length of the request. - :type content_length: long - :param body: Initial data. - :type body: IO - :param transactional_content_md5: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_md5: bytearray - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param sequence_number_access_conditions: Parameter group. - :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_sequence_number_less_than_or_equal_to = None - _if_sequence_number_less_than = None - _if_sequence_number_equal_to = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if sequence_number_access_conditions is not None: - _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - comp = "page" - page_write = "update" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.upload_pages.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if transactional_content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') - if _if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') - if _if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload_pages.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def clear_pages( - self, - content_length, # type: int - timeout=None, # type: Optional[int] - range=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - sequence_number_access_conditions=None, # type: Optional["_models.SequenceNumberAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Clear Pages operation clears a set of pages from a page blob. - - :param content_length: The length of the request. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param sequence_number_access_conditions: Parameter group. - :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_sequence_number_less_than_or_equal_to = None - _if_sequence_number_less_than = None - _if_sequence_number_equal_to = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if sequence_number_access_conditions is not None: - _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - comp = "page" - page_write = "clear" - accept = "application/xml" - - # Construct URL - url = self.clear_pages.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') - if _if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') - if _if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - clear_pages.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def upload_pages_from_url( - self, - source_url, # type: str - source_range, # type: str - content_length, # type: int - range, # type: str - source_content_md5=None, # type: Optional[bytearray] - source_contentcrc64=None, # type: Optional[bytearray] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - copy_source_authorization=None, # type: Optional[str] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - sequence_number_access_conditions=None, # type: Optional["_models.SequenceNumberAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Upload Pages operation writes a range of pages to a page blob where the contents are read - from a URL. - - :param source_url: Specify a URL to the copy source. - :type source_url: str - :param source_range: Bytes of source data in the specified range. The length of this range - should match the ContentLength header and x-ms-range/Range destination range header. - :type source_range: str - :param content_length: The length of the request. - :type content_length: long - :param range: The range of bytes to which the source range would be written. The range should - be 512 aligned and range-end is required. - :type range: str - :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read - from the copy source. - :type source_content_md5: bytearray - :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_contentcrc64: bytearray - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid - OAuth access token to copy source. - :type copy_source_authorization: str - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param sequence_number_access_conditions: Parameter group. - :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _lease_id = None - _if_sequence_number_less_than_or_equal_to = None - _if_sequence_number_less_than = None - _if_sequence_number_equal_to = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - if sequence_number_access_conditions is not None: - _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to - _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than - _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to - if source_modified_access_conditions is not None: - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - comp = "page" - page_write = "update" - accept = "application/xml" - - # Construct URL - url = self.upload_pages_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - if source_content_md5 is not None: - header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') - if source_contentcrc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_sequence_number_less_than_or_equal_to is not None: - header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') - if _if_sequence_number_less_than is not None: - header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') - if _if_sequence_number_equal_to is not None: - header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if copy_source_authorization is not None: - header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload_pages_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def get_page_ranges( - self, - snapshot=None, # type: Optional[str] - timeout=None, # type: Optional[int] - range=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> "_models.PageList" - """The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot - of a page blob. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PageList, or the result of cls(response) - :rtype: ~azure.storage.blob.models.PageList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PageList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "pagelist" - accept = "application/xml" - - # Construct URL - url = self.get_page_ranges.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('PageList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_page_ranges.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def get_page_ranges_diff( - self, - snapshot=None, # type: Optional[str] - timeout=None, # type: Optional[int] - prevsnapshot=None, # type: Optional[str] - prev_snapshot_url=None, # type: Optional[str] - range=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> "_models.PageList" - """The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that - were changed between target blob and previous snapshot. - - :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the blob snapshot to retrieve. For more information on working with blob snapshots, - see :code:`Creating - a Snapshot of a Blob.`. - :type snapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param prevsnapshot: Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a - DateTime value that specifies that the response will contain only pages that were changed - between target blob and previous snapshot. Changed pages include both updated and cleared - pages. The target blob may be a snapshot, as long as the snapshot specified by prevsnapshot is - the older of the two. Note that incremental snapshots are currently supported only for blobs - created on or after January 1, 2016. - :type prevsnapshot: str - :param prev_snapshot_url: Optional. This header is only supported in service versions - 2019-04-19 and after and specifies the URL of a previous snapshot of the target blob. The - response will only contain pages that were changed between the target blob and its previous - snapshot. - :type prev_snapshot_url: str - :param range: Return only the bytes of the blob in the specified range. - :type range: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PageList, or the result of cls(response) - :rtype: ~azure.storage.blob.models.PageList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PageList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "pagelist" - accept = "application/xml" - - # Construct URL - url = self.get_page_ranges_diff.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if snapshot is not None: - query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if prevsnapshot is not None: - query_parameters['prevsnapshot'] = self._serialize.query("prevsnapshot", prevsnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if prev_snapshot_url is not None: - header_parameters['x-ms-previous-snapshot-url'] = self._serialize.header("prev_snapshot_url", prev_snapshot_url, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('PageList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_page_ranges_diff.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def resize( - self, - blob_content_length, # type: int - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Resize the Blob. - - :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 - TB. The page blob size must be aligned to a 512-byte boundary. - :type blob_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param cpk_info: Parameter group. - :type cpk_info: ~azure.storage.blob.models.CpkInfo - :param cpk_scope_info: Parameter group. - :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - _encryption_algorithm = None - _encryption_scope = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - _encryption_algorithm = cpk_info.encryption_algorithm - if cpk_scope_info is not None: - _encryption_scope = cpk_scope_info.encryption_scope - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.resize.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _encryption_key is not None: - header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') - if _encryption_key_sha256 is not None: - header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') - if _encryption_algorithm is not None: - header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') - if _encryption_scope is not None: - header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - resize.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def update_sequence_number( - self, - sequence_number_action, # type: Union[str, "_models.SequenceNumberActionType"] - timeout=None, # type: Optional[int] - blob_sequence_number=0, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Update the sequence number of the blob. - - :param sequence_number_action: Required if the x-ms-blob-sequence-number header is set for the - request. This property applies to page blobs only. This property indicates how the service - should modify the blob's sequence number. - :type sequence_number_action: str or ~azure.storage.blob.models.SequenceNumberActionType - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled - value that you can use to track requests. The value of the sequence number must be between 0 - and 2^63 - 1. - :type blob_sequence_number: long - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.update_sequence_number.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-sequence-number-action'] = self._serialize.header("sequence_number_action", sequence_number_action, 'str') - if blob_sequence_number is not None: - header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - update_sequence_number.metadata = {'url': '/{containerName}/{blob}'} # type: ignore - - def copy_incremental( - self, - copy_source, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Copy Incremental operation copies a snapshot of the source page blob to a destination page - blob. The snapshot is copied such that only the differential changes between the previously - copied snapshot are transferred to the destination. The copied snapshots are complete copies of - the original snapshot and can be read or copied from as usual. This API is supported since REST - version 2016-05-31. - - :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of - up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it - would appear in a request URI. The source blob must either be public or must be authenticated - via a shared access signature. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - _if_match = None - _if_none_match = None - _if_tags = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_tags = modified_access_conditions.if_tags - comp = "incrementalcopy" - accept = "application/xml" - - # Construct URL - url = self.copy_incremental.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_tags is not None: - header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - copy_incremental.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/operations/_service_operations.py b/azure/multiapi/storagev2/blob/v2021_04_10/_generated/operations/_service_operations.py deleted file mode 100644 index 6362841..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_generated/operations/_service_operations.py +++ /dev/null @@ -1,710 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class ServiceOperations(object): - """ServiceOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.blob.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def set_properties( - self, - storage_service_properties, # type: "_models.StorageServiceProperties" - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Sets properties for a storage account's Blob service endpoint, including properties for Storage - Analytics and CORS (Cross-Origin Resource Sharing) rules. - - :param storage_service_properties: The StorageService properties. - :type storage_service_properties: ~azure.storage.blob.models.StorageServiceProperties - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "properties" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/'} # type: ignore - - def get_properties( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.StorageServiceProperties" - """gets the properties of a storage account's Blob service, including properties for Storage - Analytics and CORS (Cross-Origin Resource Sharing) rules. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: StorageServiceProperties, or the result of cls(response) - :rtype: ~azure.storage.blob.models.StorageServiceProperties - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceProperties"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('StorageServiceProperties', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_properties.metadata = {'url': '/'} # type: ignore - - def get_statistics( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.StorageServiceStats" - """Retrieves statistics related to replication for the Blob service. It is only available on the - secondary location endpoint when read-access geo-redundant replication is enabled for the - storage account. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: StorageServiceStats, or the result of cls(response) - :rtype: ~azure.storage.blob.models.StorageServiceStats - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceStats"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "stats" - accept = "application/xml" - - # Construct URL - url = self.get_statistics.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('StorageServiceStats', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_statistics.metadata = {'url': '/'} # type: ignore - - def list_containers_segment( - self, - prefix=None, # type: Optional[str] - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - include=None, # type: Optional[List[Union[str, "_models.ListContainersIncludeType"]]] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.ListContainersSegmentResponse" - """The List Containers Segment operation returns a list of the containers under the specified - account. - - :param prefix: Filters the results to return only containers whose name begins with the - specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :param include: Include this parameter to specify that the container's metadata be returned as - part of the response body. - :type include: list[str or ~azure.storage.blob.models.ListContainersIncludeType] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListContainersSegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListContainersSegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListContainersSegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_containers_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('ListContainersSegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_containers_segment.metadata = {'url': '/'} # type: ignore - - def get_user_delegation_key( - self, - key_info, # type: "_models.KeyInfo" - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.UserDelegationKey" - """Retrieves a user delegation key for the Blob service. This is only a valid operation when using - bearer token authentication. - - :param key_info: Key information. - :type key_info: ~azure.storage.blob.models.KeyInfo - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: UserDelegationKey, or the result of cls(response) - :rtype: ~azure.storage.blob.models.UserDelegationKey - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.UserDelegationKey"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "userdelegationkey" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.get_user_delegation_key.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(key_info, 'KeyInfo', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('UserDelegationKey', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_user_delegation_key.metadata = {'url': '/'} # type: ignore - - def get_account_info( - self, - **kwargs # type: Any - ): - # type: (...) -> None - """Returns the sku name and account kind. - - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "account" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_account_info.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) - response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) - response_headers['x-ms-is-hns-enabled']=self._deserialize('bool', response.headers.get('x-ms-is-hns-enabled')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_account_info.metadata = {'url': '/'} # type: ignore - - def submit_batch( - self, - content_length, # type: int - multipart_content_type, # type: str - body, # type: IO - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> IO - """The Batch operation allows multiple API calls to be embedded into a single HTTP request. - - :param content_length: The length of the request. - :type content_length: long - :param multipart_content_type: Required. The value of this header must be multipart/mixed with - a batch boundary. Example header value: multipart/mixed; boundary=batch_:code:``. - :type multipart_content_type: str - :param body: Initial data. - :type body: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "batch" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.submit_batch.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(body, 'IO', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - submit_batch.metadata = {'url': '/'} # type: ignore - - def filter_blobs( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - where=None, # type: Optional[str] - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.FilterBlobSegment" - """The Filter Blobs operation enables callers to list blobs across all containers whose tags match - a given search expression. Filter blobs searches across all containers within a storage - account but can be scoped within the expression to a single container. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param where: Filters the results to return only to return only blobs whose tags match the - specified expression. - :type where: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to return. If the request does - not specify maxresults, or specifies a value greater than 5000, the server will return up to - 5000 items. Note that if the listing operation crosses a partition boundary, then the service - will return a continuation token for retrieving the remainder of the results. For this reason, - it is possible that the service will return fewer results than specified by maxresults, or than - the default of 5000. - :type maxresults: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: FilterBlobSegment, or the result of cls(response) - :rtype: ~azure.storage.blob.models.FilterBlobSegment - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.FilterBlobSegment"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "blobs" - accept = "application/xml" - - # Construct URL - url = self.filter_blobs.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if where is not None: - query_parameters['where'] = self._serialize.query("where", where, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('FilterBlobSegment', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - filter_blobs.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_lease.py b/azure/multiapi/storagev2/blob/v2021_04_10/_lease.py deleted file mode 100644 index d495d6e..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_lease.py +++ /dev/null @@ -1,331 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import uuid - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, TypeVar, TYPE_CHECKING -) - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator import distributed_trace - -from ._shared.response_handlers import return_response_headers, process_storage_error -from ._serialize import get_modify_conditions - -if TYPE_CHECKING: - from datetime import datetime - - BlobClient = TypeVar("BlobClient") - ContainerClient = TypeVar("ContainerClient") - - -class BlobLeaseClient(object): - """Creates a new BlobLeaseClient. - - This client provides lease operations on a BlobClient or ContainerClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the blob or container to lease. - :type client: ~azure.storage.blob.BlobClient or - ~azure.storage.blob.ContainerClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - def __init__( - self, client, lease_id=None - ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs - # type: (Union[BlobClient, ContainerClient], Optional[str]) -> None - self.id = lease_id or str(uuid.uuid4()) - self.last_modified = None - self.etag = None - if hasattr(client, 'blob_name'): - self._client = client._client.blob # type: ignore # pylint: disable=protected-access - elif hasattr(client, 'container_name'): - self._client = client._client.container # type: ignore # pylint: disable=protected-access - else: - raise TypeError("Lease must use either BlobClient or ContainerClient.") - - def __enter__(self): - return self - - def __exit__(self, *args): - self.release() - - @distributed_trace - def acquire(self, lease_duration=-1, **kwargs): - # type: (int, **Any) -> None - """Requests a new lease. - - If the container does not have an active lease, the Blob service creates a - lease on the container and returns a new lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.acquire_lease( - timeout=kwargs.pop('timeout', None), - duration=lease_duration, - proposed_lease_id=self.id, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - self.etag = response.get('etag') # type: str - - @distributed_trace - def renew(self, **kwargs): - # type: (Any) -> None - """Renews the lease. - - The lease can be renewed if the lease ID specified in the - lease client matches that associated with the container or blob. Note that - the lease may be renewed even if it has expired as long as the container - or blob has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.renew_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def release(self, **kwargs): - # type: (Any) -> None - """Release the lease. - - The lease may be released if the client lease id specified matches - that associated with the container or blob. Releasing the lease allows another client - to immediately acquire the lease for the container or blob as soon as the release is complete. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.release_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """Change the lease ID of an active lease. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.change_lease( - lease_id=self.id, - proposed_lease_id=proposed_lease_id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def break_lease(self, lease_break_period=None, **kwargs): - # type: (Optional[int], Any) -> int - """Break the lease, if the container or blob has an active lease. - - Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. When a lease - is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the container or blob. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :param int lease_break_period: - This is the proposed duration of seconds that the lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the lease. If longer, the time remaining on the lease is used. - A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = self._client.break_lease( - timeout=kwargs.pop('timeout', None), - break_period=lease_break_period, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_list_blobs_helper.py b/azure/multiapi/storagev2/blob/v2021_04_10/_list_blobs_helper.py deleted file mode 100644 index faf9433..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_list_blobs_helper.py +++ /dev/null @@ -1,244 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -try: - from urllib.parse import unquote -except ImportError: - from urllib import unquote -from azure.core.paging import PageIterator, ItemPaged -from azure.core.exceptions import HttpResponseError -from ._deserialize import get_blob_properties_from_generated_code, parse_tags -from ._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix, FilterBlobItem -from ._models import BlobProperties, FilteredBlob -from ._shared.models import DictMixin -from ._shared.response_handlers import return_context_and_deserialized, process_storage_error - - -class BlobPropertiesPaged(PageIterator): - """An Iterable of Blob properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - :param str container: The name of the container. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str continuation_token: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__( - self, command, - container=None, - prefix=None, - results_per_page=None, - continuation_token=None, - delimiter=None, - location_mode=None): - super(BlobPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.container = container - self.delimiter = delimiter - self.current_page = None - self.location_mode = location_mode - - def _get_next_cb(self, continuation_token): - try: - return self._command( - prefix=self.prefix, - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.container = self._response.container_name - self.current_page = [self._build_item(item) for item in self._response.segment.blob_items] - - return self._response.next_marker or None, self.current_page - - def _build_item(self, item): - if isinstance(item, BlobProperties): - return item - if isinstance(item, BlobItemInternal): - blob = get_blob_properties_from_generated_code(item) # pylint: disable=protected-access - blob.container = self.container - return blob - return item - - -class BlobPrefixPaged(BlobPropertiesPaged): - def __init__(self, *args, **kwargs): - super(BlobPrefixPaged, self).__init__(*args, **kwargs) - self.name = self.prefix - - def _extract_data_cb(self, get_next_return): - continuation_token, _ = super(BlobPrefixPaged, self)._extract_data_cb(get_next_return) - self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items - self.current_page = [self._build_item(item) for item in self.current_page] - self.delimiter = self._response.delimiter - - return continuation_token, self.current_page - - def _build_item(self, item): - item = super(BlobPrefixPaged, self)._build_item(item) - if isinstance(item, GenBlobPrefix): - if item.name.encoded: - name = unquote(item.name.content) - else: - name = item.name.content - return BlobPrefix( - self._command, - container=self.container, - prefix=name, - results_per_page=self.results_per_page, - location_mode=self.location_mode) - return item - - -class BlobPrefix(ItemPaged, DictMixin): - """An Iterable of Blob properties. - - Returned from walk_blobs when a delimiter is used. - Can be thought of as a virtual blob directory. - - :ivar str name: The prefix, or "directory name" of the blob. - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str next_marker: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str marker: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__(self, *args, **kwargs): - super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs) - self.name = kwargs.get('prefix') - self.prefix = kwargs.get('prefix') - self.results_per_page = kwargs.get('results_per_page') - self.container = kwargs.get('container') - self.delimiter = kwargs.get('delimiter') - self.location_mode = kwargs.get('location_mode') - - -class FilteredBlobPaged(PageIterator): - """An Iterable of Blob properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.FilteredBlob) - :ivar str container: The container that the blobs are listed from. - - :param callable command: Function to retrieve the next page of items. - :param str container: The name of the container. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str continuation_token: An opaque continuation token. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__( - self, command, - container=None, - results_per_page=None, - continuation_token=None, - location_mode=None): - super(FilteredBlobPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.marker = continuation_token - self.results_per_page = results_per_page - self.container = container - self.current_page = None - self.location_mode = location_mode - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.marker = self._response.next_marker - self.current_page = [self._build_item(item) for item in self._response.blobs] - - return self._response.next_marker or None, self.current_page - - @staticmethod - def _build_item(item): - if isinstance(item, FilterBlobItem): - tags = parse_tags(item.tags) - blob = FilteredBlob(name=item.name, container_name=item.container_name, tags=tags) - return blob - return item diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_models.py b/azure/multiapi/storagev2/blob/v2021_04_10/_models.py deleted file mode 100644 index c67806d..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_models.py +++ /dev/null @@ -1,1259 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines - -from enum import Enum - -from azure.core.paging import PageIterator -from azure.core.exceptions import HttpResponseError -from ._generated.models import ArrowField - -from ._shared import decode_base64_to_bytes -from ._shared.response_handlers import return_context_and_deserialized, process_storage_error -from ._shared.models import DictMixin, get_enum_value -from ._generated.models import Logging as GeneratedLogging -from ._generated.models import Metrics as GeneratedMetrics -from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy -from ._generated.models import StaticWebsite as GeneratedStaticWebsite -from ._generated.models import CorsRule as GeneratedCorsRule -from ._generated.models import AccessPolicy as GenAccessPolicy - - -class BlobType(str, Enum): - - BlockBlob = "BlockBlob" - PageBlob = "PageBlob" - AppendBlob = "AppendBlob" - - -class BlockState(str, Enum): - """Block blob block types.""" - - Committed = 'Committed' #: Committed blocks. - Latest = 'Latest' #: Latest blocks. - Uncommitted = 'Uncommitted' #: Uncommitted blocks. - - -class StandardBlobTier(str, Enum): - """ - Specifies the blob tier to set the blob to. This is only applicable for - block blobs on standard storage accounts. - """ - - Archive = 'Archive' #: Archive - Cool = 'Cool' #: Cool - Hot = 'Hot' #: Hot - - -class PremiumPageBlobTier(str, Enum): - """ - Specifies the page blob tier to set the blob to. This is only applicable to page - blobs on premium storage accounts. Please take a look at: - https://docs.microsoft.com/en-us/azure/storage/storage-premium-storage#scalability-and-performance-targets - for detailed information on the corresponding IOPS and throughput per PageBlobTier. - """ - - P4 = 'P4' #: P4 Tier - P6 = 'P6' #: P6 Tier - P10 = 'P10' #: P10 Tier - P20 = 'P20' #: P20 Tier - P30 = 'P30' #: P30 Tier - P40 = 'P40' #: P40 Tier - P50 = 'P50' #: P50 Tier - P60 = 'P60' #: P60 Tier - - -class QuickQueryDialect(str, Enum): - """Specifies the quick query input/output dialect.""" - - DelimitedText = 'DelimitedTextDialect' - DelimitedJson = 'DelimitedJsonDialect' - Parquet = 'ParquetDialect' - - -class SequenceNumberAction(str, Enum): - """Sequence number actions.""" - - Increment = 'increment' - """ - Increments the value of the sequence number by 1. If specifying this option, - do not include the x-ms-blob-sequence-number header. - """ - - Max = 'max' - """ - Sets the sequence number to be the higher of the value included with the - request and the value currently stored for the blob. - """ - - Update = 'update' - """Sets the sequence number to the value included with the request.""" - - -class PublicAccess(str, Enum): - """ - Specifies whether data in the container may be accessed publicly and the level of access. - """ - - OFF = 'off' - """ - Specifies that there is no public read access for both the container and blobs within the container. - Clients cannot enumerate the containers within the storage account as well as the blobs within the container. - """ - - Blob = 'blob' - """ - Specifies public read access for blobs. Blob data within this container can be read - via anonymous request, but container data is not available. Clients cannot enumerate - blobs within the container via anonymous request. - """ - - Container = 'container' - """ - Specifies full public read access for container and blob data. Clients can enumerate - blobs within the container via anonymous request, but cannot enumerate containers - within the storage account. - """ - - -class BlobImmutabilityPolicyMode(str, Enum): - """ - Specifies the immutability policy mode to set on the blob. - "Mutable" can only be returned by service, don't set to "Mutable". - """ - - Unlocked = "Unlocked" - Locked = "Locked" - Mutable = "Mutable" - - -class BlobAnalyticsLogging(GeneratedLogging): - """Azure Analytics Logging settings. - - :keyword str version: - The version of Storage Analytics to configure. The default value is 1.0. - :keyword bool delete: - Indicates whether all delete requests should be logged. The default value is `False`. - :keyword bool read: - Indicates whether all read requests should be logged. The default value is `False`. - :keyword bool write: - Indicates whether all write requests should be logged. The default value is `False`. - :keyword ~azure.storage.blob.RetentionPolicy retention_policy: - Determines how long the associated data should persist. If not specified the retention - policy will be disabled by default. - """ - - def __init__(self, **kwargs): - self.version = kwargs.get('version', u'1.0') - self.delete = kwargs.get('delete', False) - self.read = kwargs.get('read', False) - self.write = kwargs.get('write', False) - self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - version=generated.version, - delete=generated.delete, - read=generated.read, - write=generated.write, - retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access - ) - - -class Metrics(GeneratedMetrics): - """A summary of request statistics grouped by API in hour or minute aggregates - for blobs. - - :keyword str version: - The version of Storage Analytics to configure. The default value is 1.0. - :keyword bool enabled: - Indicates whether metrics are enabled for the Blob service. - The default value is `False`. - :keyword bool include_apis: - Indicates whether metrics should generate summary statistics for called API operations. - :keyword ~azure.storage.blob.RetentionPolicy retention_policy: - Determines how long the associated data should persist. If not specified the retention - policy will be disabled by default. - """ - - def __init__(self, **kwargs): - self.version = kwargs.get('version', u'1.0') - self.enabled = kwargs.get('enabled', False) - self.include_apis = kwargs.get('include_apis') - self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - version=generated.version, - enabled=generated.enabled, - include_apis=generated.include_apis, - retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access - ) - - -class RetentionPolicy(GeneratedRetentionPolicy): - """The retention policy which determines how long the associated data should - persist. - - :param bool enabled: - Indicates whether a retention policy is enabled for the storage service. - The default value is False. - :param int days: - Indicates the number of days that metrics or logging or - soft-deleted data should be retained. All data older than this value will - be deleted. If enabled=True, the number of days must be specified. - """ - - def __init__(self, enabled=False, days=None): - super(RetentionPolicy, self).__init__(enabled=enabled, days=days, allow_permanent_delete=None) - if self.enabled and (self.days is None): - raise ValueError("If policy is enabled, 'days' must be specified.") - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - enabled=generated.enabled, - days=generated.days, - ) - - -class StaticWebsite(GeneratedStaticWebsite): - """The properties that enable an account to host a static website. - - :keyword bool enabled: - Indicates whether this account is hosting a static website. - The default value is `False`. - :keyword str index_document: - The default name of the index page under each directory. - :keyword str error_document404_path: - The absolute path of the custom 404 page. - :keyword str default_index_document_path: - Absolute path of the default index page. - """ - - def __init__(self, **kwargs): - self.enabled = kwargs.get('enabled', False) - if self.enabled: - self.index_document = kwargs.get('index_document') - self.error_document404_path = kwargs.get('error_document404_path') - self.default_index_document_path = kwargs.get('default_index_document_path') - else: - self.index_document = None - self.error_document404_path = None - self.default_index_document_path = None - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - enabled=generated.enabled, - index_document=generated.index_document, - error_document404_path=generated.error_document404_path, - default_index_document_path=generated.default_index_document_path - ) - - -class CorsRule(GeneratedCorsRule): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. - - :param list(str) allowed_origins: - A list of origin domains that will be allowed via CORS, or "*" to allow - all domains. The list of must contain at least one entry. Limited to 64 - origin domains. Each allowed origin can have up to 256 characters. - :param list(str) allowed_methods: - A list of HTTP methods that are allowed to be executed by the origin. - The list of must contain at least one entry. For Azure Storage, - permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. - :keyword list(str) allowed_headers: - Defaults to an empty list. A list of headers allowed to be part of - the cross-origin request. Limited to 64 defined headers and 2 prefixed - headers. Each header can be up to 256 characters. - :keyword list(str) exposed_headers: - Defaults to an empty list. A list of response headers to expose to CORS - clients. Limited to 64 defined headers and two prefixed headers. Each - header can be up to 256 characters. - :keyword int max_age_in_seconds: - The number of seconds that the client/browser should cache a - preflight response. - """ - - def __init__(self, allowed_origins, allowed_methods, **kwargs): - self.allowed_origins = ','.join(allowed_origins) - self.allowed_methods = ','.join(allowed_methods) - self.allowed_headers = ','.join(kwargs.get('allowed_headers', [])) - self.exposed_headers = ','.join(kwargs.get('exposed_headers', [])) - self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0) - - @classmethod - def _from_generated(cls, generated): - return cls( - [generated.allowed_origins], - [generated.allowed_methods], - allowed_headers=[generated.allowed_headers], - exposed_headers=[generated.exposed_headers], - max_age_in_seconds=generated.max_age_in_seconds, - ) - - -class ContainerProperties(DictMixin): - """Blob container's properties class. - - Returned ``ContainerProperties`` instances expose these values through a - dictionary interface, for example: ``container_props["last_modified"]``. - Additionally, the container name is available as ``container_props["name"]``. - - :ivar str name: - Name of the container. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the container was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar ~azure.storage.blob.LeaseProperties lease: - Stores all the lease information for the container. - :ivar str public_access: Specifies whether data in the container may be accessed - publicly and the level of access. - :ivar bool has_immutability_policy: - Represents whether the container has an immutability policy. - :ivar bool has_legal_hold: - Represents whether the container has a legal hold. - :ivar bool immutable_storage_with_versioning_enabled: - Represents whether immutable storage with versioning enabled on the container. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :ivar dict metadata: A dict with name-value pairs to associate with the - container as metadata. - :ivar ~azure.storage.blob.ContainerEncryptionScope encryption_scope: - The default encryption scope configuration for the container. - :ivar bool deleted: - Whether this container was deleted. - :ivar str version: - The version of a deleted container. - """ - - def __init__(self, **kwargs): - self.name = None - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.lease = LeaseProperties(**kwargs) - self.public_access = kwargs.get('x-ms-blob-public-access') - self.has_immutability_policy = kwargs.get('x-ms-has-immutability-policy') - self.deleted = None - self.version = None - self.has_legal_hold = kwargs.get('x-ms-has-legal-hold') - self.metadata = kwargs.get('metadata') - self.encryption_scope = None - self.immutable_storage_with_versioning_enabled = kwargs.get('x-ms-immutable-storage-with-versioning-enabled') - default_encryption_scope = kwargs.get('x-ms-default-encryption-scope') - if default_encryption_scope: - self.encryption_scope = ContainerEncryptionScope( - default_encryption_scope=default_encryption_scope, - prevent_encryption_scope_override=kwargs.get('x-ms-deny-encryption-scope-override', False) - ) - - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.last_modified = generated.properties.last_modified - props.etag = generated.properties.etag - props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access - props.public_access = generated.properties.public_access - props.has_immutability_policy = generated.properties.has_immutability_policy - props.immutable_storage_with_versioning_enabled = \ - generated.properties.is_immutable_storage_with_versioning_enabled - props.deleted = generated.deleted - props.version = generated.version - props.has_legal_hold = generated.properties.has_legal_hold - props.metadata = generated.metadata - props.encryption_scope = ContainerEncryptionScope._from_generated(generated) #pylint: disable=protected-access - return props - - -class ContainerPropertiesPaged(PageIterator): - """An Iterable of Container properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A container name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.ContainerProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only containers whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of container names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(ContainerPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [self._build_item(item) for item in self._response.container_items] - - return self._response.next_marker or None, self.current_page - - @staticmethod - def _build_item(item): - return ContainerProperties._from_generated(item) # pylint: disable=protected-access - - -class ImmutabilityPolicy(DictMixin): - """Optional parameters for setting the immutability policy of a blob, blob snapshot or blob version. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword ~datetime.datetime expiry_time: - Specifies the date time when the blobs immutability policy is set to expire. - :keyword str or ~azure.storage.blob.BlobImmutabilityPolicyMode policy_mode: - Specifies the immutability policy mode to set on the blob. - Possible values to set include: "Locked", "Unlocked". - "Mutable" can only be returned by service, don't set to "Mutable". - """ - - def __init__(self, **kwargs): - self.expiry_time = kwargs.pop('expiry_time', None) - self.policy_mode = kwargs.pop('policy_mode', None) - - @classmethod - def _from_generated(cls, generated): - immutability_policy = cls() - immutability_policy.expiry_time = generated.properties.immutability_policy_expires_on - immutability_policy.policy_mode = generated.properties.immutability_policy_mode - return immutability_policy - - -class BlobProperties(DictMixin): - """ - Blob Properties. - - :ivar str name: - The name of the blob. - :ivar str container: - The container in which the blob resides. - :ivar str snapshot: - Datetime value that uniquely identifies the blob snapshot. - :ivar ~azure.blob.storage.BlobType blob_type: - String indicating this blob's type. - :ivar dict metadata: - Name-value pairs associated with the blob as metadata. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the blob was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar int size: - The size of the content returned. If the entire blob was requested, - the length of blob in bytes. If a subset of the blob was requested, the - length of the returned subset. - :ivar str content_range: - Indicates the range of bytes returned in the event that the client - requested a subset of the blob. - :ivar int append_blob_committed_block_count: - (For Append Blobs) Number of committed blocks in the blob. - :ivar bool is_append_blob_sealed: - Indicate if the append blob is sealed or not. - - .. versionadded:: 12.4.0 - - :ivar int page_blob_sequence_number: - (For Page Blobs) Sequence number for page blob used for coordinating - concurrent writes. - :ivar bool server_encrypted: - Set to true if the blob is encrypted on the server. - :ivar ~azure.storage.blob.CopyProperties copy: - Stores all the copy properties for the blob. - :ivar ~azure.storage.blob.ContentSettings content_settings: - Stores all the content settings for the blob. - :ivar ~azure.storage.blob.LeaseProperties lease: - Stores all the lease information for the blob. - :ivar ~azure.storage.blob.StandardBlobTier blob_tier: - Indicates the access tier of the blob. The hot tier is optimized - for storing data that is accessed frequently. The cool storage tier - is optimized for storing data that is infrequently accessed and stored - for at least a month. The archive tier is optimized for storing - data that is rarely accessed and stored for at least six months - with flexible latency requirements. - :ivar str rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :ivar ~datetime.datetime blob_tier_change_time: - Indicates when the access tier was last changed. - :ivar bool blob_tier_inferred: - Indicates whether the access tier was inferred by the service. - If false, it indicates that the tier was set explicitly. - :ivar bool deleted: - Whether this blob was deleted. - :ivar ~datetime.datetime deleted_time: - A datetime object representing the time at which the blob was deleted. - :ivar int remaining_retention_days: - The number of days that the blob will be retained before being permanently deleted by the service. - :ivar ~datetime.datetime creation_time: - Indicates when the blob was created, in UTC. - :ivar str archive_status: - Archive status of blob. - :ivar str encryption_key_sha256: - The SHA-256 hash of the provided encryption key. - :ivar str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - :ivar bool request_server_encrypted: - Whether this blob is encrypted. - :ivar list(~azure.storage.blob.ObjectReplicationPolicy) object_replication_source_properties: - Only present for blobs that have policy ids and rule ids applied to them. - - .. versionadded:: 12.4.0 - - :ivar str object_replication_destination_policy: - Represents the Object Replication Policy Id that created this blob. - - .. versionadded:: 12.4.0 - - :ivar ~datetime.datetime last_accessed_on: - Indicates when the last Read/Write operation was performed on a Blob. - - .. versionadded:: 12.6.0 - - :ivar int tag_count: - Tags count on this blob. - - .. versionadded:: 12.4.0 - - :ivar dict(str, str) tags: - Key value pair of tags on this blob. - - .. versionadded:: 12.4.0 - :ivar bool has_versions_only: - A true value indicates the root blob is deleted - - .. versionadded:: 12.10.0 - - :ivar ~azure.storage.blob.ImmutabilityPolicy immutability_policy: - Specifies the immutability policy of a blob, blob snapshot or blob version. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :ivar bool has_legal_hold: - Specified if a legal hold should be set on the blob. - Currently this parameter of upload_blob() API is for BlockBlob only. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - """ - - def __init__(self, **kwargs): - self.name = kwargs.get('name') - self.container = None - self.snapshot = kwargs.get('x-ms-snapshot') - self.version_id = kwargs.get('x-ms-version-id') - self.is_current_version = kwargs.get('x-ms-is-current-version') - self.blob_type = BlobType(kwargs['x-ms-blob-type']) if kwargs.get('x-ms-blob-type') else None - self.metadata = kwargs.get('metadata') - self.encrypted_metadata = kwargs.get('encrypted_metadata') - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.size = kwargs.get('Content-Length') - self.content_range = kwargs.get('Content-Range') - self.append_blob_committed_block_count = kwargs.get('x-ms-blob-committed-block-count') - self.is_append_blob_sealed = kwargs.get('x-ms-blob-sealed') - self.page_blob_sequence_number = kwargs.get('x-ms-blob-sequence-number') - self.server_encrypted = kwargs.get('x-ms-server-encrypted') - self.copy = CopyProperties(**kwargs) - self.content_settings = ContentSettings(**kwargs) - self.lease = LeaseProperties(**kwargs) - self.blob_tier = kwargs.get('x-ms-access-tier') - self.rehydrate_priority = kwargs.get('x-ms-rehydrate-priority') - self.blob_tier_change_time = kwargs.get('x-ms-access-tier-change-time') - self.blob_tier_inferred = kwargs.get('x-ms-access-tier-inferred') - self.deleted = False - self.deleted_time = None - self.remaining_retention_days = None - self.creation_time = kwargs.get('x-ms-creation-time') - self.archive_status = kwargs.get('x-ms-archive-status') - self.encryption_key_sha256 = kwargs.get('x-ms-encryption-key-sha256') - self.encryption_scope = kwargs.get('x-ms-encryption-scope') - self.request_server_encrypted = kwargs.get('x-ms-server-encrypted') - self.object_replication_source_properties = kwargs.get('object_replication_source_properties') - self.object_replication_destination_policy = kwargs.get('x-ms-or-policy-id') - self.last_accessed_on = kwargs.get('x-ms-last-access-time') - self.tag_count = kwargs.get('x-ms-tag-count') - self.tags = None - self.immutability_policy = ImmutabilityPolicy(expiry_time=kwargs.get('x-ms-immutability-policy-until-date'), - policy_mode=kwargs.get('x-ms-immutability-policy-mode')) - self.has_legal_hold = kwargs.get('x-ms-legal-hold') - self.has_versions_only = None - - -class FilteredBlob(DictMixin): - """Blob info from a Filter Blobs API call. - - :ivar name: Blob name - :type name: str - :ivar container_name: Container name. - :type container_name: str - :ivar tags: Key value pairs of blob tags. - :type tags: Dict[str, str] - """ - def __init__(self, **kwargs): - self.name = kwargs.get('name', None) - self.container_name = kwargs.get('container_name', None) - self.tags = kwargs.get('tags', None) - - -class LeaseProperties(DictMixin): - """Blob Lease Properties. - - :ivar str status: - The lease status of the blob. Possible values: locked|unlocked - :ivar str state: - Lease state of the blob. Possible values: available|leased|expired|breaking|broken - :ivar str duration: - When a blob is leased, specifies whether the lease is of infinite or fixed duration. - """ - - def __init__(self, **kwargs): - self.status = get_enum_value(kwargs.get('x-ms-lease-status')) - self.state = get_enum_value(kwargs.get('x-ms-lease-state')) - self.duration = get_enum_value(kwargs.get('x-ms-lease-duration')) - - @classmethod - def _from_generated(cls, generated): - lease = cls() - lease.status = get_enum_value(generated.properties.lease_status) - lease.state = get_enum_value(generated.properties.lease_state) - lease.duration = get_enum_value(generated.properties.lease_duration) - return lease - - -class ContentSettings(DictMixin): - """The content settings of a blob. - - :param str content_type: - The content type specified for the blob. If no content type was - specified, the default content type is application/octet-stream. - :param str content_encoding: - If the content_encoding has previously been set - for the blob, that value is stored. - :param str content_language: - If the content_language has previously been set - for the blob, that value is stored. - :param str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the blob, that value is stored. - :param str cache_control: - If the cache_control has previously been set for - the blob, that value is stored. - :param bytearray content_md5: - If the content_md5 has been set for the blob, this response - header is stored so that the client can check for message content - integrity. - """ - - def __init__( - self, content_type=None, content_encoding=None, - content_language=None, content_disposition=None, - cache_control=None, content_md5=None, **kwargs): - - self.content_type = content_type or kwargs.get('Content-Type') - self.content_encoding = content_encoding or kwargs.get('Content-Encoding') - self.content_language = content_language or kwargs.get('Content-Language') - self.content_md5 = content_md5 or kwargs.get('Content-MD5') - self.content_disposition = content_disposition or kwargs.get('Content-Disposition') - self.cache_control = cache_control or kwargs.get('Cache-Control') - - @classmethod - def _from_generated(cls, generated): - settings = cls() - settings.content_type = generated.properties.content_type or None - settings.content_encoding = generated.properties.content_encoding or None - settings.content_language = generated.properties.content_language or None - settings.content_md5 = generated.properties.content_md5 or None - settings.content_disposition = generated.properties.content_disposition or None - settings.cache_control = generated.properties.cache_control or None - return settings - - -class CopyProperties(DictMixin): - """Blob Copy Properties. - - These properties will be `None` if this blob has never been the destination - in a Copy Blob operation, or if this blob has been modified after a concluded - Copy Blob operation, for example, using Set Blob Properties, Upload Blob, or Commit Block List. - - :ivar str id: - String identifier for the last attempted Copy Blob operation where this blob - was the destination blob. - :ivar str source: - URL up to 2 KB in length that specifies the source blob used in the last attempted - Copy Blob operation where this blob was the destination blob. - :ivar str status: - State of the copy operation identified by Copy ID, with these values: - success: - Copy completed successfully. - pending: - Copy is in progress. Check copy_status_description if intermittent, - non-fatal errors impede copy progress but don't cause failure. - aborted: - Copy was ended by Abort Copy Blob. - failed: - Copy failed. See copy_status_description for failure details. - :ivar str progress: - Contains the number of bytes copied and the total bytes in the source in the last - attempted Copy Blob operation where this blob was the destination blob. Can show - between 0 and Content-Length bytes copied. - :ivar ~datetime.datetime completion_time: - Conclusion time of the last attempted Copy Blob operation where this blob was the - destination blob. This value can specify the time of a completed, aborted, or - failed copy attempt. - :ivar str status_description: - Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal - or non-fatal copy operation failure. - :ivar bool incremental_copy: - Copies the snapshot of the source page blob to a destination page blob. - The snapshot is copied such that only the differential changes between - the previously copied snapshot are transferred to the destination - :ivar ~datetime.datetime destination_snapshot: - Included if the blob is incremental copy blob or incremental copy snapshot, - if x-ms-copy-status is success. Snapshot time of the last successful - incremental copy snapshot for this blob. - """ - - def __init__(self, **kwargs): - self.id = kwargs.get('x-ms-copy-id') - self.source = kwargs.get('x-ms-copy-source') - self.status = get_enum_value(kwargs.get('x-ms-copy-status')) - self.progress = kwargs.get('x-ms-copy-progress') - self.completion_time = kwargs.get('x-ms-copy-completion_time') - self.status_description = kwargs.get('x-ms-copy-status-description') - self.incremental_copy = kwargs.get('x-ms-incremental-copy') - self.destination_snapshot = kwargs.get('x-ms-copy-destination-snapshot') - - @classmethod - def _from_generated(cls, generated): - copy = cls() - copy.id = generated.properties.copy_id or None - copy.status = get_enum_value(generated.properties.copy_status) or None - copy.source = generated.properties.copy_source or None - copy.progress = generated.properties.copy_progress or None - copy.completion_time = generated.properties.copy_completion_time or None - copy.status_description = generated.properties.copy_status_description or None - copy.incremental_copy = generated.properties.incremental_copy or None - copy.destination_snapshot = generated.properties.destination_snapshot or None - return copy - - -class BlobBlock(DictMixin): - """BlockBlob Block class. - - :param str block_id: - Block id. - :param str state: - Block state. Possible values: committed|uncommitted - :ivar int size: - Block size in bytes. - """ - - def __init__(self, block_id, state=BlockState.Latest): - self.id = block_id - self.state = state - self.size = None - - @classmethod - def _from_generated(cls, generated): - try: - decoded_bytes = decode_base64_to_bytes(generated.name) - block_id = decoded_bytes.decode('utf-8') - # this is to fix a bug. When large blocks are uploaded through upload_blob the block id isn't base64 encoded - # while service expected block id is base64 encoded, so when we get block_id if we cannot base64 decode, it - # means we didn't base64 encode it when stage the block, we want to use the returned block_id directly. - except UnicodeDecodeError: - block_id = generated.name - block = cls(block_id) - block.size = generated.size - return block - - -class PageRange(DictMixin): - """Page Range for page blob. - - :param int start: - Start of page range in bytes. - :param int end: - End of page range in bytes. - """ - - def __init__(self, start=None, end=None): - self.start = start - self.end = end - - -class AccessPolicy(GenAccessPolicy): - """Access Policy class used by the set and get access policy methods in each service. - - A stored access policy can specify the start time, expiry time, and - permissions for the Shared Access Signatures with which it's associated. - Depending on how you want to control access to your resource, you can - specify all of these parameters within the stored access policy, and omit - them from the URL for the Shared Access Signature. Doing so permits you to - modify the associated signature's behavior at any time, as well as to revoke - it. Or you can specify one or more of the access policy parameters within - the stored access policy, and the others on the URL. Finally, you can - specify all of the parameters on the URL. In this case, you can use the - stored access policy to revoke the signature, but not to modify its behavior. - - Together the Shared Access Signature and the stored access policy must - include all fields required to authenticate the signature. If any required - fields are missing, the request will fail. Likewise, if a field is specified - both in the Shared Access Signature URL and in the stored access policy, the - request will fail with status code 400 (Bad Request). - - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.blob.ContainerSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - """ - def __init__(self, permission=None, expiry=None, start=None): - self.start = start - self.expiry = expiry - self.permission = permission - - -class ContainerSasPermissions(object): - """ContainerSasPermissions class to be used with the - :func:`~azure.storage.blob.generate_container_sas` function and - for the AccessPolicies used with - :func:`~azure.storage.blob.ContainerClient.set_container_access_policy`. - - :param bool read: - Read the content, properties, metadata or block list of any blob in the - container. Use any blob in the container as the source of a copy operation. - :param bool write: - For any blob in the container, create or write content, properties, - metadata, or block list. Snapshot or lease the blob. Resize the blob - (page blob only). Use the blob as the destination of a copy operation - within the same account. Note: You cannot grant permissions to read or - write container properties or metadata, nor to lease a container, with - a container SAS. Use an account SAS instead. - :param bool delete: - Delete any blob in the container. Note: You cannot grant permissions to - delete a container with a container SAS. Use an account SAS instead. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool list: - List blobs in the container. - :param bool tag: - Set or get tags on the blobs in the container. - :keyword bool add: - Add a block to an append blob. - :keyword bool create: - Write a new blob, snapshot a blob, or copy a blob to a new blob. - :keyword bool permanent_delete: - To enable permanent delete on the blob is permitted. - :keyword bool filter_by_tags: - To enable finding blobs by tags. - :keyword bool move: - Move a blob or a directory and its contents to a new location. - :keyword bool execute: - Get the system properties and, if the hierarchical namespace is enabled for the storage account, - get the POSIX ACL of a blob. - :keyword bool set_immutability_policy: - To enable operations related to set/delete immutability policy. - To get immutability policy, you just need read permission. - """ - def __init__(self, read=False, write=False, delete=False, - list=False, delete_previous_version=False, tag=False, **kwargs): # pylint: disable=redefined-builtin - self.read = read - self.add = kwargs.pop('add', False) - self.create = kwargs.pop('create', False) - self.write = write - self.delete = delete - self.delete_previous_version = delete_previous_version - self.permanent_delete = kwargs.pop('permanent_delete', False) - self.list = list - self.tag = tag - self.filter_by_tags = kwargs.pop('filter_by_tags', False) - self.move = kwargs.pop('move', False) - self.execute = kwargs.pop('execute', False) - self.set_immutability_policy = kwargs.pop('set_immutability_policy', False) - self._str = (('r' if self.read else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('y' if self.permanent_delete else '') + - ('l' if self.list else '') + - ('t' if self.tag else '') + - ('f' if self.filter_by_tags else '') + - ('m' if self.move else '') + - ('e' if self.execute else '') + - ('i' if self.set_immutability_policy else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a ContainerSasPermissions from a string. - - To specify read, write, delete, or list permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, write, delete, - and list permissions. - :return: A ContainerSasPermissions object - :rtype: ~azure.storage.blob.ContainerSasPermissions - """ - p_read = 'r' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_delete_previous_version = 'x' in permission - p_permanent_delete = 'y' in permission - p_list = 'l' in permission - p_tag = 't' in permission - p_filter_by_tags = 'f' in permission - p_move = 'm' in permission - p_execute = 'e' in permission - p_set_immutability_policy = 'i' in permission - parsed = cls(read=p_read, write=p_write, delete=p_delete, list=p_list, - delete_previous_version=p_delete_previous_version, tag=p_tag, add=p_add, - create=p_create, permanent_delete=p_permanent_delete, filter_by_tags=p_filter_by_tags, - move=p_move, execute=p_execute, set_immutability_policy=p_set_immutability_policy) - - return parsed - - -class BlobSasPermissions(object): - """BlobSasPermissions class to be used with the - :func:`~azure.storage.blob.generate_blob_sas` function. - - :param bool read: - Read the content, properties, metadata and block list. Use the blob as - the source of a copy operation. - :param bool add: - Add a block to an append blob. - :param bool create: - Write a new blob, snapshot a blob, or copy a blob to a new blob. - :param bool write: - Create or write content, properties, metadata, or block list. Snapshot - or lease the blob. Resize the blob (page blob only). Use the blob as the - destination of a copy operation within the same account. - :param bool delete: - Delete the blob. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool tag: - Set or get tags on the blob. - :keyword bool permanent_delete: - To enable permanent delete on the blob is permitted. - :keyword bool move: - Move a blob or a directory and its contents to a new location. - :keyword bool execute: - Get the system properties and, if the hierarchical namespace is enabled for the storage account, - get the POSIX ACL of a blob. - :keyword bool set_immutability_policy: - To enable operations related to set/delete immutability policy. - To get immutability policy, you just need read permission. - """ - def __init__(self, read=False, add=False, create=False, write=False, - delete=False, delete_previous_version=False, tag=False, **kwargs): - self.read = read - self.add = add - self.create = create - self.write = write - self.delete = delete - self.delete_previous_version = delete_previous_version - self.permanent_delete = kwargs.pop('permanent_delete', False) - self.tag = tag - self.move = kwargs.pop('move', False) - self.execute = kwargs.pop('execute', False) - self.set_immutability_policy = kwargs.pop('set_immutability_policy', False) - self._str = (('r' if self.read else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('y' if self.permanent_delete else '') + - ('t' if self.tag else '') + - ('m' if self.move else '') + - ('e' if self.execute else '') + - ('i' if self.set_immutability_policy else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a BlobSasPermissions from a string. - - To specify read, add, create, write, or delete permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, add, create, - write, or delete permissions. - :return: A BlobSasPermissions object - :rtype: ~azure.storage.blob.BlobSasPermissions - """ - p_read = 'r' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_delete_previous_version = 'x' in permission - p_permanent_delete = 'y' in permission - p_tag = 't' in permission - p_move = 'm' in permission - p_execute = 'e' in permission - p_set_immutability_policy = 'i' in permission - - parsed = cls(read=p_read, add=p_add, create=p_create, write=p_write, delete=p_delete, - delete_previous_version=p_delete_previous_version, tag=p_tag, permanent_delete=p_permanent_delete, - move=p_move, execute=p_execute, set_immutability_policy=p_set_immutability_policy) - - return parsed - - -class CustomerProvidedEncryptionKey(object): - """ - All data in Azure Storage is encrypted at-rest using an account-level encryption key. - In versions 2018-06-17 and newer, you can manage the key used to encrypt blob contents - and application metadata per-blob by providing an AES-256 encryption key in requests to the storage service. - - When you use a customer-provided key, Azure Storage does not manage or persist your key. - When writing data to a blob, the provided key is used to encrypt your data before writing it to disk. - A SHA-256 hash of the encryption key is written alongside the blob contents, - and is used to verify that all subsequent operations against the blob use the same encryption key. - This hash cannot be used to retrieve the encryption key or decrypt the contents of the blob. - When reading a blob, the provided key is used to decrypt your data after reading it from disk. - In both cases, the provided encryption key is securely discarded - as soon as the encryption or decryption process completes. - - :param str key_value: - Base64-encoded AES-256 encryption key value. - :param str key_hash: - Base64-encoded SHA256 of the encryption key. - :ivar str algorithm: - Specifies the algorithm to use when encrypting data using the given key. Must be AES256. - """ - def __init__(self, key_value, key_hash): - self.key_value = key_value - self.key_hash = key_hash - self.algorithm = 'AES256' - - -class ContainerEncryptionScope(object): - """The default encryption scope configuration for a container. - - This scope is used implicitly for all future writes within the container, - but can be overridden per blob operation. - - .. versionadded:: 12.2.0 - - :param str default_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - :param bool prevent_encryption_scope_override: - If true, prevents any request from specifying a different encryption scope than the scope - set on the container. Default value is false. - """ - - def __init__(self, default_encryption_scope, **kwargs): - self.default_encryption_scope = default_encryption_scope - self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', False) - - @classmethod - def _from_generated(cls, generated): - if generated.properties.default_encryption_scope: - scope = cls( - generated.properties.default_encryption_scope, - prevent_encryption_scope_override=generated.properties.prevent_encryption_scope_override or False - ) - return scope - return None - - -class DelimitedJsonDialect(DictMixin): - """Defines the input or output JSON serialization for a blob data query. - - :keyword str delimiter: The line separator character, default value is '\n' - """ - - def __init__(self, **kwargs): - self.delimiter = kwargs.pop('delimiter', '\n') - - -class DelimitedTextDialect(DictMixin): - """Defines the input or output delimited (CSV) serialization for a blob query request. - - :keyword str delimiter: - Column separator, defaults to ','. - :keyword str quotechar: - Field quote, defaults to '"'. - :keyword str lineterminator: - Record separator, defaults to '\\\\n'. - :keyword str escapechar: - Escape char, defaults to empty. - :keyword bool has_header: - Whether the blob data includes headers in the first line. The default value is False, meaning that the - data will be returned inclusive of the first line. If set to True, the data will be returned exclusive - of the first line. - """ - def __init__(self, **kwargs): - self.delimiter = kwargs.pop('delimiter', ',') - self.quotechar = kwargs.pop('quotechar', '"') - self.lineterminator = kwargs.pop('lineterminator', '\n') - self.escapechar = kwargs.pop('escapechar', "") - self.has_header = kwargs.pop('has_header', False) - - -class ArrowDialect(ArrowField): - """field of an arrow schema. - - All required parameters must be populated in order to send to Azure. - - :param ~azure.storage.blob.ArrowType type: Arrow field type. - :keyword str name: The name of the field. - :keyword int precision: The precision of the field. - :keyword int scale: The scale of the field. - """ - def __init__(self, type, **kwargs): # pylint: disable=redefined-builtin - super(ArrowDialect, self).__init__(type=type, **kwargs) - - -class ArrowType(str, Enum): - - INT64 = "int64" - BOOL = "bool" - TIMESTAMP_MS = "timestamp[ms]" - STRING = "string" - DOUBLE = "double" - DECIMAL = 'decimal' - - -class ObjectReplicationPolicy(DictMixin): - """Policy id and rule ids applied to a blob. - - :ivar str policy_id: - Policy id for the blob. A replication policy gets created (policy id) when creating a source/destination pair. - :ivar list(~azure.storage.blob.ObjectReplicationRule) rules: - Within each policy there may be multiple replication rules. - e.g. rule 1= src/container/.pdf to dst/container2/; rule2 = src/container1/.jpg to dst/container3 - """ - - def __init__(self, **kwargs): - self.policy_id = kwargs.pop('policy_id', None) - self.rules = kwargs.pop('rules', None) - - -class ObjectReplicationRule(DictMixin): - """Policy id and rule ids applied to a blob. - - :ivar str rule_id: - Rule id. - :ivar str status: - The status of the rule. It could be "Complete" or "Failed" - """ - - def __init__(self, **kwargs): - self.rule_id = kwargs.pop('rule_id', None) - self.status = kwargs.pop('status', None) - - -class BlobQueryError(object): - """The error happened during quick query operation. - - :ivar str error: - The name of the error. - :ivar bool is_fatal: - If true, this error prevents further query processing. More result data may be returned, - but there is no guarantee that all of the original data will be processed. - If false, this error does not prevent further query processing. - :ivar str description: - A description of the error. - :ivar int position: - The blob offset at which the error occurred. - """ - def __init__(self, error=None, is_fatal=False, description=None, position=None): - self.error = error - self.is_fatal = is_fatal - self.description = description - self.position = position diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_quick_query_helper.py b/azure/multiapi/storagev2/blob/v2021_04_10/_quick_query_helper.py deleted file mode 100644 index 3164337..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_quick_query_helper.py +++ /dev/null @@ -1,195 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from io import BytesIO -from typing import Union, Iterable, IO # pylint: disable=unused-import - -from ._shared.avro.datafile import DataFileReader -from ._shared.avro.avro_io import DatumReader - - -class BlobQueryReader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to read query results. - - :ivar str name: - The name of the blob being quered. - :ivar str container: - The name of the container where the blob is. - :ivar dict response_headers: - The response_headers of the quick query request. - :ivar bytes record_delimiter: - The delimiter used to separate lines, or records with the data. The `records` - method will return these lines via a generator. - """ - - def __init__( - self, - name=None, - container=None, - errors=None, - record_delimiter='\n', - encoding=None, - headers=None, - response=None, - error_cls=None, - ): - self.name = name - self.container = container - self.response_headers = headers - self.record_delimiter = record_delimiter - self._size = 0 - self._bytes_processed = 0 - self._errors = errors - self._encoding = encoding - self._parsed_results = DataFileReader(QuickQueryStreamer(response), DatumReader()) - self._first_result = self._process_record(next(self._parsed_results)) - self._error_cls = error_cls - - def __len__(self): - return self._size - - def _process_record(self, result): - self._size = result.get('totalBytes', self._size) - self._bytes_processed = result.get('bytesScanned', self._bytes_processed) - if 'data' in result: - return result.get('data') - if 'fatal' in result: - error = self._error_cls( - error=result['name'], - is_fatal=result['fatal'], - description=result['description'], - position=result['position'] - ) - if self._errors: - self._errors(error) - return None - - def _iter_stream(self): - if self._first_result is not None: - yield self._first_result - for next_result in self._parsed_results: - processed_result = self._process_record(next_result) - if processed_result is not None: - yield processed_result - - def readall(self): - # type: () -> Union[bytes, str] - """Return all query results. - - This operation is blocking until all data is downloaded. - If encoding has been configured - this will be used to decode individual - records are they are received. - - :rtype: Union[bytes, str] - """ - stream = BytesIO() - self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - def readinto(self, stream): - # type: (IO) -> None - """Download the query result to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. - :returns: None - """ - for record in self._iter_stream(): - stream.write(record) - - def records(self): - # type: () -> Iterable[Union[bytes, str]] - """Returns a record generator for the query result. - - Records will be returned line by line. - If encoding has been configured - this will be used to decode individual - records are they are received. - - :rtype: Iterable[Union[bytes, str]] - """ - delimiter = self.record_delimiter.encode('utf-8') - for record_chunk in self._iter_stream(): - for record in record_chunk.split(delimiter): - if self._encoding: - yield record.decode(self._encoding) - else: - yield record - - -class QuickQueryStreamer(object): - """ - File-like streaming iterator. - """ - - def __init__(self, generator): - self.generator = generator - self.iterator = iter(generator) - self._buf = b"" - self._point = 0 - self._download_offset = 0 - self._buf_start = 0 - self.file_length = None - - def __len__(self): - return self.file_length - - def __iter__(self): - return self.iterator - - @staticmethod - def seekable(): - return True - - def __next__(self): - next_part = next(self.iterator) - self._download_offset += len(next_part) - return next_part - - next = __next__ # Python 2 compatibility. - - def tell(self): - return self._point - - def seek(self, offset, whence=0): - if whence == 0: - self._point = offset - elif whence == 1: - self._point += offset - else: - raise ValueError("whence must be 0, or 1") - if self._point < 0: - self._point = 0 # XXX is this right? - - def read(self, size): - try: - # keep reading from the generator until the buffer of this stream has enough data to read - while self._point + size > self._download_offset: - self._buf += self.__next__() - except StopIteration: - self.file_length = self._download_offset - - start_point = self._point - - # EOF - self._point = min(self._point + size, self._download_offset) - - relative_start = start_point - self._buf_start - if relative_start < 0: - raise ValueError("Buffer has dumped too much data") - relative_end = relative_start + size - data = self._buf[relative_start: relative_end] - - # dump the extra data in buffer - # buffer start--------------------16bytes----current read position - dumped_size = max(relative_end - 16 - relative_start, 0) - self._buf_start += dumped_size - self._buf = self._buf[dumped_size:] - - return data diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_serialize.py b/azure/multiapi/storagev2/blob/v2021_04_10/_serialize.py deleted file mode 100644 index b6399c0..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_serialize.py +++ /dev/null @@ -1,215 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use -from typing import ( # pylint: disable=unused-import - Any, Dict, Optional, Tuple, Union, - TYPE_CHECKING) - -try: - from urllib.parse import quote -except ImportError: - from urllib2 import quote # type: ignore - -from azure.core import MatchConditions - -from ._models import ( - ContainerEncryptionScope, - DelimitedJsonDialect) -from ._generated.models import ( - ModifiedAccessConditions, - SourceModifiedAccessConditions, - CpkScopeInfo, - ContainerCpkScopeInfo, - QueryFormat, - QuerySerialization, - DelimitedTextConfiguration, - JsonTextConfiguration, - ArrowConfiguration, - QueryFormatType, - BlobTag, - BlobTags, LeaseAccessConditions -) - -if TYPE_CHECKING: - from ._lease import BlobLeaseClient - - -_SUPPORTED_API_VERSIONS = [ - '2019-02-02', - '2019-07-07', - '2019-10-10', - '2019-12-12', - '2020-02-10', - '2020-04-08', - '2020-06-12', - '2020-08-04', - '2020-10-02', - '2020-12-06', - '2021-02-12', - '2021-04-10' -] - - -def _get_match_headers(kwargs, match_param, etag_param): - # type: (Dict[str, Any], str, str) -> Tuple(Dict[str, Any], Optional[str], Optional[str]) - if_match = None - if_none_match = None - match_condition = kwargs.pop(match_param, None) - if match_condition == MatchConditions.IfNotModified: - if_match = kwargs.pop(etag_param, None) - if not if_match: - raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) - elif match_condition == MatchConditions.IfPresent: - if_match = '*' - elif match_condition == MatchConditions.IfModified: - if_none_match = kwargs.pop(etag_param, None) - if not if_none_match: - raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) - elif match_condition == MatchConditions.IfMissing: - if_none_match = '*' - elif match_condition is None: - if kwargs.get(etag_param): - raise ValueError("'{}' specified without '{}'.".format(etag_param, match_param)) - else: - raise TypeError("Invalid match condition: {}".format(match_condition)) - return if_match, if_none_match - - -def get_access_conditions(lease): - # type: (Optional[Union[BlobLeaseClient, str]]) -> Union[LeaseAccessConditions, None] - try: - lease_id = lease.id # type: ignore - except AttributeError: - lease_id = lease # type: ignore - return LeaseAccessConditions(lease_id=lease_id) if lease_id else None - - -def get_modify_conditions(kwargs): - # type: (Dict[str, Any]) -> ModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'match_condition', 'etag') - return ModifiedAccessConditions( - if_modified_since=kwargs.pop('if_modified_since', None), - if_unmodified_since=kwargs.pop('if_unmodified_since', None), - if_match=if_match or kwargs.pop('if_match', None), - if_none_match=if_none_match or kwargs.pop('if_none_match', None), - if_tags=kwargs.pop('if_tags_match_condition', None) - ) - - -def get_source_conditions(kwargs): - # type: (Dict[str, Any]) -> SourceModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') - return SourceModifiedAccessConditions( - source_if_modified_since=kwargs.pop('source_if_modified_since', None), - source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), - source_if_match=if_match or kwargs.pop('source_if_match', None), - source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None), - source_if_tags=kwargs.pop('source_if_tags_match_condition', None) - ) - - -def get_cpk_scope_info(kwargs): - # type: (Dict[str, Any]) -> CpkScopeInfo - if 'encryption_scope' in kwargs: - return CpkScopeInfo(encryption_scope=kwargs.pop('encryption_scope')) - return None - - -def get_container_cpk_scope_info(kwargs): - # type: (Dict[str, Any]) -> ContainerCpkScopeInfo - encryption_scope = kwargs.pop('container_encryption_scope', None) - if encryption_scope: - if isinstance(encryption_scope, ContainerEncryptionScope): - return ContainerCpkScopeInfo( - default_encryption_scope=encryption_scope.default_encryption_scope, - prevent_encryption_scope_override=encryption_scope.prevent_encryption_scope_override - ) - if isinstance(encryption_scope, dict): - return ContainerCpkScopeInfo( - default_encryption_scope=encryption_scope['default_encryption_scope'], - prevent_encryption_scope_override=encryption_scope.get('prevent_encryption_scope_override') - ) - raise TypeError("Container encryption scope must be dict or type ContainerEncryptionScope.") - return None - - -def get_api_version(kwargs): - # type: (Dict[str, Any]) -> str - api_version = kwargs.get('api_version', None) - if api_version and api_version not in _SUPPORTED_API_VERSIONS: - versions = '\n'.join(_SUPPORTED_API_VERSIONS) - raise ValueError("Unsupported API version '{}'. Please select from:\n{}".format(api_version, versions)) - return api_version or _SUPPORTED_API_VERSIONS[-1] - - -def serialize_blob_tags_header(tags=None): - # type: (Optional[Dict[str, str]]) -> str - if tags is None: - return None - - components = list() - if tags: - for key, value in tags.items(): - components.append(quote(key, safe='.-')) - components.append('=') - components.append(quote(value, safe='.-')) - components.append('&') - - if components: - del components[-1] - - return ''.join(components) - - -def serialize_blob_tags(tags=None): - # type: (Optional[Dict[str, str]]) -> Union[BlobTags, None] - tag_list = list() - if tags: - tag_list = [BlobTag(key=k, value=v) for k, v in tags.items()] - return BlobTags(blob_tag_set=tag_list) - - -def serialize_query_format(formater): - if formater == "ParquetDialect": - qq_format = QueryFormat( - type=QueryFormatType.PARQUET, - parquet_text_configuration=' ' - ) - elif isinstance(formater, DelimitedJsonDialect): - serialization_settings = JsonTextConfiguration( - record_separator=formater.delimiter - ) - qq_format = QueryFormat( - type=QueryFormatType.json, - json_text_configuration=serialization_settings) - elif hasattr(formater, 'quotechar'): # This supports a csv.Dialect as well - try: - headers = formater.has_header - except AttributeError: - headers = False - serialization_settings = DelimitedTextConfiguration( - column_separator=formater.delimiter, - field_quote=formater.quotechar, - record_separator=formater.lineterminator, - escape_char=formater.escapechar, - headers_present=headers - ) - qq_format = QueryFormat( - type=QueryFormatType.delimited, - delimited_text_configuration=serialization_settings - ) - elif isinstance(formater, list): - serialization_settings = ArrowConfiguration( - schema=formater - ) - qq_format = QueryFormat( - type=QueryFormatType.arrow, - arrow_configuration=serialization_settings) - elif not formater: - return None - else: - raise TypeError("Format must be DelimitedTextDialect or DelimitedJsonDialect or ParquetDialect.") - return QuerySerialization(format=qq_format) diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/__init__.py b/azure/multiapi/storagev2/blob/v2021_04_10/_shared/__init__.py deleted file mode 100644 index 160f882..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import hmac - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore - -import six - - -def url_quote(url): - return quote(url) - - -def url_unquote(url): - return unquote(url) - - -def encode_base64(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def decode_base64_to_bytes(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - return base64.b64decode(data) - - -def decode_base64_to_text(data): - decoded_bytes = decode_base64_to_bytes(data) - return decoded_bytes.decode('utf-8') - - -def sign_string(key, string_to_sign, key_is_base64=True): - if key_is_base64: - key = decode_base64_to_bytes(key) - else: - if isinstance(key, six.text_type): - key = key.encode('utf-8') - if isinstance(string_to_sign, six.text_type): - string_to_sign = string_to_sign.encode('utf-8') - signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) - digest = signed_hmac_sha256.digest() - encoded_digest = encode_base64(digest) - return encoded_digest diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/authentication.py b/azure/multiapi/storagev2/blob/v2021_04_10/_shared/authentication.py deleted file mode 100644 index adf64c7..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/authentication.py +++ /dev/null @@ -1,178 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import logging -import re -import sys - -try: - from urllib.parse import urlparse, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import unquote # type: ignore - -try: - from yarl import URL -except ImportError: - pass - -try: - from azure.core.pipeline.transport import AioHttpTransport -except ImportError: - AioHttpTransport = None - -from azure.core.exceptions import ClientAuthenticationError -from azure.core.pipeline.policies import SansIOHTTPPolicy - -from . import sign_string - - -logger = logging.getLogger(__name__) - - - -# wraps a given exception with the desired exception type -def _wrap_exception(ex, desired_type): - msg = "" - if ex.args: - msg = ex.args[0] - if sys.version_info >= (3,): - # Automatic chaining in Python 3 means we keep the trace - return desired_type(msg) - # There isn't a good solution in 2 for keeping the stack trace - # in general, or that will not result in an error in 3 - # However, we can keep the previous error type and message - # TODO: In the future we will log the trace - return desired_type('{}: {}'.format(ex.__class__.__name__, msg)) - - -class AzureSigningError(ClientAuthenticationError): - """ - Represents a fatal error when attempting to sign a request. - In general, the cause of this exception is user error. For example, the given account key is not valid. - Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. - """ - - -# pylint: disable=no-self-use -class SharedKeyCredentialPolicy(SansIOHTTPPolicy): - - def __init__(self, account_name, account_key): - self.account_name = account_name - self.account_key = account_key - super(SharedKeyCredentialPolicy, self).__init__() - - @staticmethod - def _get_headers(request, headers_to_sign): - headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) - if 'content-length' in headers and headers['content-length'] == '0': - del headers['content-length'] - return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' - - @staticmethod - def _get_verb(request): - return request.http_request.method + '\n' - - def _get_canonicalized_resource(self, request): - uri_path = urlparse(request.http_request.url).path - try: - if isinstance(request.context.transport, AioHttpTransport) or \ - isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \ - isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None), - AioHttpTransport): - uri_path = URL(uri_path) - return '/' + self.account_name + str(uri_path) - except TypeError: - pass - return '/' + self.account_name + uri_path - - @staticmethod - def _get_canonicalized_headers(request): - string_to_sign = '' - x_ms_headers = [] - for name, value in request.http_request.headers.items(): - if name.startswith('x-ms-'): - x_ms_headers.append((name.lower(), value)) - x_ms_headers.sort() - for name, value in x_ms_headers: - if value is not None: - string_to_sign += ''.join([name, ':', value, '\n']) - return string_to_sign - - @staticmethod - def _get_canonicalized_resource_query(request): - sorted_queries = list(request.http_request.query.items()) - sorted_queries.sort() - - string_to_sign = '' - for name, value in sorted_queries: - if value is not None: - string_to_sign += '\n' + name.lower() + ':' + unquote(value) - - return string_to_sign - - def _add_authorization_header(self, request, string_to_sign): - try: - signature = sign_string(self.account_key, string_to_sign) - auth_string = 'SharedKey ' + self.account_name + ':' + signature - request.http_request.headers['Authorization'] = auth_string - except Exception as ex: - # Wrap any error that occurred as signing error - # Doing so will clarify/locate the source of problem - raise _wrap_exception(ex, AzureSigningError) - - def on_request(self, request): - string_to_sign = \ - self._get_verb(request) + \ - self._get_headers( - request, - [ - 'content-encoding', 'content-language', 'content-length', - 'content-md5', 'content-type', 'date', 'if-modified-since', - 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' - ] - ) + \ - self._get_canonicalized_headers(request) + \ - self._get_canonicalized_resource(request) + \ - self._get_canonicalized_resource_query(request) - - self._add_authorization_header(request, string_to_sign) - #logger.debug("String_to_sign=%s", string_to_sign) - - -class StorageHttpChallenge(object): - def __init__(self, challenge): - """ Parses an HTTP WWW-Authentication Bearer challenge from the Storage service. """ - if not challenge: - raise ValueError("Challenge cannot be empty") - - self._parameters = {} - self.scheme, trimmed_challenge = challenge.strip().split(" ", 1) - - # name=value pairs either comma or space separated with values possibly being - # enclosed in quotes - for item in re.split('[, ]', trimmed_challenge): - comps = item.split("=") - if len(comps) == 2: - key = comps[0].strip(' "') - value = comps[1].strip(' "') - if key: - self._parameters[key] = value - - # Extract and verify required parameters - self.authorization_uri = self._parameters.get('authorization_uri') - if not self.authorization_uri: - raise ValueError("Authorization Uri not found") - - self.resource_id = self._parameters.get('resource_id') - if not self.resource_id: - raise ValueError("Resource id not found") - - uri_path = urlparse(self.authorization_uri).path.lstrip("/") - self.tenant_id = uri_path.split("/")[0] - - def get_value(self, key): - return self._parameters.get(key) diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/avro/__init__.py b/azure/multiapi/storagev2/blob/v2021_04_10/_shared/avro/__init__.py deleted file mode 100644 index 5b396cd..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/avro/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/avro/avro_io.py b/azure/multiapi/storagev2/blob/v2021_04_10/_shared/avro/avro_io.py deleted file mode 100644 index 93a5c13..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/avro/avro_io.py +++ /dev/null @@ -1,464 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -"""Input/output utilities. - -Includes: - - i/o-specific constants - - i/o-specific exceptions - - schema validation - - leaf value encoding and decoding - - datum reader/writer stuff (?) - -Also includes a generic representation for data, which uses the -following mapping: - - Schema records are implemented as dict. - - Schema arrays are implemented as list. - - Schema maps are implemented as dict. - - Schema strings are implemented as unicode. - - Schema bytes are implemented as str. - - Schema ints are implemented as int. - - Schema longs are implemented as long. - - Schema floats are implemented as float. - - Schema doubles are implemented as float. - - Schema booleans are implemented as bool. -""" - -import json -import logging -import struct -import sys - -from ..avro import schema - -PY3 = sys.version_info[0] == 3 - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Constants - -STRUCT_FLOAT = struct.Struct('= 0), n - input_bytes = self.reader.read(n) - if n > 0 and not input_bytes: - raise StopIteration - assert (len(input_bytes) == n), input_bytes - return input_bytes - - @staticmethod - def read_null(): - """ - null is written as zero bytes - """ - return None - - def read_boolean(self): - """ - a boolean is written as a single byte - whose value is either 0 (false) or 1 (true). - """ - b = ord(self.read(1)) - if b == 1: - return True - if b == 0: - return False - fail_msg = "Invalid value for boolean: %s" % b - raise schema.AvroException(fail_msg) - - def read_int(self): - """ - int and long values are written using variable-length, zig-zag coding. - """ - return self.read_long() - - def read_long(self): - """ - int and long values are written using variable-length, zig-zag coding. - """ - b = ord(self.read(1)) - n = b & 0x7F - shift = 7 - while (b & 0x80) != 0: - b = ord(self.read(1)) - n |= (b & 0x7F) << shift - shift += 7 - datum = (n >> 1) ^ -(n & 1) - return datum - - def read_float(self): - """ - A float is written as 4 bytes. - The float is converted into a 32-bit integer using a method equivalent to - Java's floatToIntBits and then encoded in little-endian format. - """ - return STRUCT_FLOAT.unpack(self.read(4))[0] - - def read_double(self): - """ - A double is written as 8 bytes. - The double is converted into a 64-bit integer using a method equivalent to - Java's doubleToLongBits and then encoded in little-endian format. - """ - return STRUCT_DOUBLE.unpack(self.read(8))[0] - - def read_bytes(self): - """ - Bytes are encoded as a long followed by that many bytes of data. - """ - nbytes = self.read_long() - assert (nbytes >= 0), nbytes - return self.read(nbytes) - - def read_utf8(self): - """ - A string is encoded as a long followed by - that many bytes of UTF-8 encoded character data. - """ - input_bytes = self.read_bytes() - if PY3: - try: - return input_bytes.decode('utf-8') - except UnicodeDecodeError as exn: - logger.error('Invalid UTF-8 input bytes: %r', input_bytes) - raise exn - else: - # PY2 - return unicode(input_bytes, "utf-8") # pylint: disable=undefined-variable - - def skip_null(self): - pass - - def skip_boolean(self): - self.skip(1) - - def skip_int(self): - self.skip_long() - - def skip_long(self): - b = ord(self.read(1)) - while (b & 0x80) != 0: - b = ord(self.read(1)) - - def skip_float(self): - self.skip(4) - - def skip_double(self): - self.skip(8) - - def skip_bytes(self): - self.skip(self.read_long()) - - def skip_utf8(self): - self.skip_bytes() - - def skip(self, n): - self.reader.seek(self.reader.tell() + n) - - -# ------------------------------------------------------------------------------ -# DatumReader - - -class DatumReader(object): - """Deserialize Avro-encoded data into a Python data structure.""" - - def __init__(self, writer_schema=None): - """ - As defined in the Avro specification, we call the schema encoded - in the data the "writer's schema". - """ - self._writer_schema = writer_schema - - # read/write properties - def set_writer_schema(self, writer_schema): - self._writer_schema = writer_schema - - writer_schema = property(lambda self: self._writer_schema, - set_writer_schema) - - def read(self, decoder): - return self.read_data(self.writer_schema, decoder) - - def read_data(self, writer_schema, decoder): - # function dispatch for reading data based on type of writer's schema - if writer_schema.type == 'null': - result = decoder.read_null() - elif writer_schema.type == 'boolean': - result = decoder.read_boolean() - elif writer_schema.type == 'string': - result = decoder.read_utf8() - elif writer_schema.type == 'int': - result = decoder.read_int() - elif writer_schema.type == 'long': - result = decoder.read_long() - elif writer_schema.type == 'float': - result = decoder.read_float() - elif writer_schema.type == 'double': - result = decoder.read_double() - elif writer_schema.type == 'bytes': - result = decoder.read_bytes() - elif writer_schema.type == 'fixed': - result = self.read_fixed(writer_schema, decoder) - elif writer_schema.type == 'enum': - result = self.read_enum(writer_schema, decoder) - elif writer_schema.type == 'array': - result = self.read_array(writer_schema, decoder) - elif writer_schema.type == 'map': - result = self.read_map(writer_schema, decoder) - elif writer_schema.type in ['union', 'error_union']: - result = self.read_union(writer_schema, decoder) - elif writer_schema.type in ['record', 'error', 'request']: - result = self.read_record(writer_schema, decoder) - else: - fail_msg = "Cannot read unknown schema type: %s" % writer_schema.type - raise schema.AvroException(fail_msg) - return result - - def skip_data(self, writer_schema, decoder): - if writer_schema.type == 'null': - result = decoder.skip_null() - elif writer_schema.type == 'boolean': - result = decoder.skip_boolean() - elif writer_schema.type == 'string': - result = decoder.skip_utf8() - elif writer_schema.type == 'int': - result = decoder.skip_int() - elif writer_schema.type == 'long': - result = decoder.skip_long() - elif writer_schema.type == 'float': - result = decoder.skip_float() - elif writer_schema.type == 'double': - result = decoder.skip_double() - elif writer_schema.type == 'bytes': - result = decoder.skip_bytes() - elif writer_schema.type == 'fixed': - result = self.skip_fixed(writer_schema, decoder) - elif writer_schema.type == 'enum': - result = self.skip_enum(decoder) - elif writer_schema.type == 'array': - self.skip_array(writer_schema, decoder) - result = None - elif writer_schema.type == 'map': - self.skip_map(writer_schema, decoder) - result = None - elif writer_schema.type in ['union', 'error_union']: - result = self.skip_union(writer_schema, decoder) - elif writer_schema.type in ['record', 'error', 'request']: - self.skip_record(writer_schema, decoder) - result = None - else: - fail_msg = "Unknown schema type: %s" % writer_schema.type - raise schema.AvroException(fail_msg) - return result - - @staticmethod - def read_fixed(writer_schema, decoder): - """ - Fixed instances are encoded using the number of bytes declared - in the schema. - """ - return decoder.read(writer_schema.size) - - @staticmethod - def skip_fixed(writer_schema, decoder): - return decoder.skip(writer_schema.size) - - @staticmethod - def read_enum(writer_schema, decoder): - """ - An enum is encoded by a int, representing the zero-based position - of the symbol in the schema. - """ - # read data - index_of_symbol = decoder.read_int() - if index_of_symbol >= len(writer_schema.symbols): - fail_msg = "Can't access enum index %d for enum with %d symbols" \ - % (index_of_symbol, len(writer_schema.symbols)) - raise SchemaResolutionException(fail_msg, writer_schema) - read_symbol = writer_schema.symbols[index_of_symbol] - return read_symbol - - @staticmethod - def skip_enum(decoder): - return decoder.skip_int() - - def read_array(self, writer_schema, decoder): - """ - Arrays are encoded as a series of blocks. - - Each block consists of a long count value, - followed by that many array items. - A block with count zero indicates the end of the array. - Each item is encoded per the array's item schema. - - If a block's count is negative, - then the count is followed immediately by a long block size, - indicating the number of bytes in the block. - The actual count in this case - is the absolute value of the count written. - """ - read_items = [] - block_count = decoder.read_long() - while block_count != 0: - if block_count < 0: - block_count = -block_count - decoder.read_long() - for _ in range(block_count): - read_items.append(self.read_data(writer_schema.items, decoder)) - block_count = decoder.read_long() - return read_items - - def skip_array(self, writer_schema, decoder): - block_count = decoder.read_long() - while block_count != 0: - if block_count < 0: - block_size = decoder.read_long() - decoder.skip(block_size) - else: - for _ in range(block_count): - self.skip_data(writer_schema.items, decoder) - block_count = decoder.read_long() - - def read_map(self, writer_schema, decoder): - """ - Maps are encoded as a series of blocks. - - Each block consists of a long count value, - followed by that many key/value pairs. - A block with count zero indicates the end of the map. - Each item is encoded per the map's value schema. - - If a block's count is negative, - then the count is followed immediately by a long block size, - indicating the number of bytes in the block. - The actual count in this case - is the absolute value of the count written. - """ - read_items = {} - block_count = decoder.read_long() - while block_count != 0: - if block_count < 0: - block_count = -block_count - decoder.read_long() - for _ in range(block_count): - key = decoder.read_utf8() - read_items[key] = self.read_data(writer_schema.values, decoder) - block_count = decoder.read_long() - return read_items - - def skip_map(self, writer_schema, decoder): - block_count = decoder.read_long() - while block_count != 0: - if block_count < 0: - block_size = decoder.read_long() - decoder.skip(block_size) - else: - for _ in range(block_count): - decoder.skip_utf8() - self.skip_data(writer_schema.values, decoder) - block_count = decoder.read_long() - - def read_union(self, writer_schema, decoder): - """ - A union is encoded by first writing a long value indicating - the zero-based position within the union of the schema of its value. - The value is then encoded per the indicated schema within the union. - """ - # schema resolution - index_of_schema = int(decoder.read_long()) - if index_of_schema >= len(writer_schema.schemas): - fail_msg = "Can't access branch index %d for union with %d branches" \ - % (index_of_schema, len(writer_schema.schemas)) - raise SchemaResolutionException(fail_msg, writer_schema) - selected_writer_schema = writer_schema.schemas[index_of_schema] - - # read data - return self.read_data(selected_writer_schema, decoder) - - def skip_union(self, writer_schema, decoder): - index_of_schema = int(decoder.read_long()) - if index_of_schema >= len(writer_schema.schemas): - fail_msg = "Can't access branch index %d for union with %d branches" \ - % (index_of_schema, len(writer_schema.schemas)) - raise SchemaResolutionException(fail_msg, writer_schema) - return self.skip_data(writer_schema.schemas[index_of_schema], decoder) - - def read_record(self, writer_schema, decoder): - """ - A record is encoded by encoding the values of its fields - in the order that they are declared. In other words, a record - is encoded as just the concatenation of the encodings of its fields. - Field values are encoded per their schema. - - Schema Resolution: - * the ordering of fields may be different: fields are matched by name. - * schemas for fields with the same name in both records are resolved - recursively. - * if the writer's record contains a field with a name not present in the - reader's record, the writer's value for that field is ignored. - * if the reader's record schema has a field that contains a default value, - and writer's schema does not have a field with the same name, then the - reader should use the default value from its field. - * if the reader's record schema has a field with no default value, and - writer's schema does not have a field with the same name, then the - field's value is unset. - """ - # schema resolution - read_record = {} - for field in writer_schema.fields: - field_val = self.read_data(field.type, decoder) - read_record[field.name] = field_val - return read_record - - def skip_record(self, writer_schema, decoder): - for field in writer_schema.fields: - self.skip_data(field.type, decoder) - - -# ------------------------------------------------------------------------------ - -if __name__ == '__main__': - raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/avro/avro_io_async.py b/azure/multiapi/storagev2/blob/v2021_04_10/_shared/avro/avro_io_async.py deleted file mode 100644 index e981216..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/avro/avro_io_async.py +++ /dev/null @@ -1,448 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -"""Input/output utilities. - -Includes: - - i/o-specific constants - - i/o-specific exceptions - - schema validation - - leaf value encoding and decoding - - datum reader/writer stuff (?) - -Also includes a generic representation for data, which uses the -following mapping: - - Schema records are implemented as dict. - - Schema arrays are implemented as list. - - Schema maps are implemented as dict. - - Schema strings are implemented as unicode. - - Schema bytes are implemented as str. - - Schema ints are implemented as int. - - Schema longs are implemented as long. - - Schema floats are implemented as float. - - Schema doubles are implemented as float. - - Schema booleans are implemented as bool. -""" - -import logging -import sys - -from ..avro import schema - -from .avro_io import STRUCT_FLOAT, STRUCT_DOUBLE, SchemaResolutionException - -PY3 = sys.version_info[0] == 3 - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Decoder - - -class AsyncBinaryDecoder(object): - """Read leaf values.""" - - def __init__(self, reader): - """ - reader is a Python object on which we can call read, seek, and tell. - """ - self._reader = reader - - @property - def reader(self): - """Reports the reader used by this decoder.""" - return self._reader - - async def read(self, n): - """Read n bytes. - - Args: - n: Number of bytes to read. - Returns: - The next n bytes from the input. - """ - assert (n >= 0), n - input_bytes = await self.reader.read(n) - if n > 0 and not input_bytes: - raise StopAsyncIteration - assert (len(input_bytes) == n), input_bytes - return input_bytes - - @staticmethod - def read_null(): - """ - null is written as zero bytes - """ - return None - - async def read_boolean(self): - """ - a boolean is written as a single byte - whose value is either 0 (false) or 1 (true). - """ - b = ord(await self.read(1)) - if b == 1: - return True - if b == 0: - return False - fail_msg = "Invalid value for boolean: %s" % b - raise schema.AvroException(fail_msg) - - async def read_int(self): - """ - int and long values are written using variable-length, zig-zag coding. - """ - return await self.read_long() - - async def read_long(self): - """ - int and long values are written using variable-length, zig-zag coding. - """ - b = ord(await self.read(1)) - n = b & 0x7F - shift = 7 - while (b & 0x80) != 0: - b = ord(await self.read(1)) - n |= (b & 0x7F) << shift - shift += 7 - datum = (n >> 1) ^ -(n & 1) - return datum - - async def read_float(self): - """ - A float is written as 4 bytes. - The float is converted into a 32-bit integer using a method equivalent to - Java's floatToIntBits and then encoded in little-endian format. - """ - return STRUCT_FLOAT.unpack(await self.read(4))[0] - - async def read_double(self): - """ - A double is written as 8 bytes. - The double is converted into a 64-bit integer using a method equivalent to - Java's doubleToLongBits and then encoded in little-endian format. - """ - return STRUCT_DOUBLE.unpack(await self.read(8))[0] - - async def read_bytes(self): - """ - Bytes are encoded as a long followed by that many bytes of data. - """ - nbytes = await self.read_long() - assert (nbytes >= 0), nbytes - return await self.read(nbytes) - - async def read_utf8(self): - """ - A string is encoded as a long followed by - that many bytes of UTF-8 encoded character data. - """ - input_bytes = await self.read_bytes() - if PY3: - try: - return input_bytes.decode('utf-8') - except UnicodeDecodeError as exn: - logger.error('Invalid UTF-8 input bytes: %r', input_bytes) - raise exn - else: - # PY2 - return unicode(input_bytes, "utf-8") # pylint: disable=undefined-variable - - def skip_null(self): - pass - - async def skip_boolean(self): - await self.skip(1) - - async def skip_int(self): - await self.skip_long() - - async def skip_long(self): - b = ord(await self.read(1)) - while (b & 0x80) != 0: - b = ord(await self.read(1)) - - async def skip_float(self): - await self.skip(4) - - async def skip_double(self): - await self.skip(8) - - async def skip_bytes(self): - await self.skip(await self.read_long()) - - async def skip_utf8(self): - await self.skip_bytes() - - async def skip(self, n): - await self.reader.seek(await self.reader.tell() + n) - - -# ------------------------------------------------------------------------------ -# DatumReader - - -class AsyncDatumReader(object): - """Deserialize Avro-encoded data into a Python data structure.""" - - def __init__(self, writer_schema=None): - """ - As defined in the Avro specification, we call the schema encoded - in the data the "writer's schema", and the schema expected by the - reader the "reader's schema". - """ - self._writer_schema = writer_schema - - # read/write properties - def set_writer_schema(self, writer_schema): - self._writer_schema = writer_schema - - writer_schema = property(lambda self: self._writer_schema, - set_writer_schema) - - async def read(self, decoder): - return await self.read_data(self.writer_schema, decoder) - - async def read_data(self, writer_schema, decoder): - # function dispatch for reading data based on type of writer's schema - if writer_schema.type == 'null': - result = decoder.read_null() - elif writer_schema.type == 'boolean': - result = await decoder.read_boolean() - elif writer_schema.type == 'string': - result = await decoder.read_utf8() - elif writer_schema.type == 'int': - result = await decoder.read_int() - elif writer_schema.type == 'long': - result = await decoder.read_long() - elif writer_schema.type == 'float': - result = await decoder.read_float() - elif writer_schema.type == 'double': - result = await decoder.read_double() - elif writer_schema.type == 'bytes': - result = await decoder.read_bytes() - elif writer_schema.type == 'fixed': - result = await self.read_fixed(writer_schema, decoder) - elif writer_schema.type == 'enum': - result = await self.read_enum(writer_schema, decoder) - elif writer_schema.type == 'array': - result = await self.read_array(writer_schema, decoder) - elif writer_schema.type == 'map': - result = await self.read_map(writer_schema, decoder) - elif writer_schema.type in ['union', 'error_union']: - result = await self.read_union(writer_schema, decoder) - elif writer_schema.type in ['record', 'error', 'request']: - result = await self.read_record(writer_schema, decoder) - else: - fail_msg = "Cannot read unknown schema type: %s" % writer_schema.type - raise schema.AvroException(fail_msg) - return result - - async def skip_data(self, writer_schema, decoder): - if writer_schema.type == 'null': - result = decoder.skip_null() - elif writer_schema.type == 'boolean': - result = await decoder.skip_boolean() - elif writer_schema.type == 'string': - result = await decoder.skip_utf8() - elif writer_schema.type == 'int': - result = await decoder.skip_int() - elif writer_schema.type == 'long': - result = await decoder.skip_long() - elif writer_schema.type == 'float': - result = await decoder.skip_float() - elif writer_schema.type == 'double': - result = await decoder.skip_double() - elif writer_schema.type == 'bytes': - result = await decoder.skip_bytes() - elif writer_schema.type == 'fixed': - result = await self.skip_fixed(writer_schema, decoder) - elif writer_schema.type == 'enum': - result = await self.skip_enum(decoder) - elif writer_schema.type == 'array': - await self.skip_array(writer_schema, decoder) - result = None - elif writer_schema.type == 'map': - await self.skip_map(writer_schema, decoder) - result = None - elif writer_schema.type in ['union', 'error_union']: - result = await self.skip_union(writer_schema, decoder) - elif writer_schema.type in ['record', 'error', 'request']: - await self.skip_record(writer_schema, decoder) - result = None - else: - fail_msg = "Unknown schema type: %s" % writer_schema.type - raise schema.AvroException(fail_msg) - return result - - @staticmethod - async def read_fixed(writer_schema, decoder): - """ - Fixed instances are encoded using the number of bytes declared - in the schema. - """ - return await decoder.read(writer_schema.size) - - @staticmethod - async def skip_fixed(writer_schema, decoder): - return await decoder.skip(writer_schema.size) - - @staticmethod - async def read_enum(writer_schema, decoder): - """ - An enum is encoded by a int, representing the zero-based position - of the symbol in the schema. - """ - # read data - index_of_symbol = await decoder.read_int() - if index_of_symbol >= len(writer_schema.symbols): - fail_msg = "Can't access enum index %d for enum with %d symbols" \ - % (index_of_symbol, len(writer_schema.symbols)) - raise SchemaResolutionException(fail_msg, writer_schema) - read_symbol = writer_schema.symbols[index_of_symbol] - return read_symbol - - @staticmethod - async def skip_enum(decoder): - return await decoder.skip_int() - - async def read_array(self, writer_schema, decoder): - """ - Arrays are encoded as a series of blocks. - - Each block consists of a long count value, - followed by that many array items. - A block with count zero indicates the end of the array. - Each item is encoded per the array's item schema. - - If a block's count is negative, - then the count is followed immediately by a long block size, - indicating the number of bytes in the block. - The actual count in this case - is the absolute value of the count written. - """ - read_items = [] - block_count = await decoder.read_long() - while block_count != 0: - if block_count < 0: - block_count = -block_count - await decoder.read_long() - for _ in range(block_count): - read_items.append(await self.read_data(writer_schema.items, decoder)) - block_count = await decoder.read_long() - return read_items - - async def skip_array(self, writer_schema, decoder): - block_count = await decoder.read_long() - while block_count != 0: - if block_count < 0: - block_size = await decoder.read_long() - await decoder.skip(block_size) - else: - for _ in range(block_count): - await self.skip_data(writer_schema.items, decoder) - block_count = await decoder.read_long() - - async def read_map(self, writer_schema, decoder): - """ - Maps are encoded as a series of blocks. - - Each block consists of a long count value, - followed by that many key/value pairs. - A block with count zero indicates the end of the map. - Each item is encoded per the map's value schema. - - If a block's count is negative, - then the count is followed immediately by a long block size, - indicating the number of bytes in the block. - The actual count in this case - is the absolute value of the count written. - """ - read_items = {} - block_count = await decoder.read_long() - while block_count != 0: - if block_count < 0: - block_count = -block_count - await decoder.read_long() - for _ in range(block_count): - key = await decoder.read_utf8() - read_items[key] = await self.read_data(writer_schema.values, decoder) - block_count = await decoder.read_long() - return read_items - - async def skip_map(self, writer_schema, decoder): - block_count = await decoder.read_long() - while block_count != 0: - if block_count < 0: - block_size = await decoder.read_long() - await decoder.skip(block_size) - else: - for _ in range(block_count): - await decoder.skip_utf8() - await self.skip_data(writer_schema.values, decoder) - block_count = await decoder.read_long() - - async def read_union(self, writer_schema, decoder): - """ - A union is encoded by first writing a long value indicating - the zero-based position within the union of the schema of its value. - The value is then encoded per the indicated schema within the union. - """ - # schema resolution - index_of_schema = int(await decoder.read_long()) - if index_of_schema >= len(writer_schema.schemas): - fail_msg = "Can't access branch index %d for union with %d branches" \ - % (index_of_schema, len(writer_schema.schemas)) - raise SchemaResolutionException(fail_msg, writer_schema) - selected_writer_schema = writer_schema.schemas[index_of_schema] - - # read data - return await self.read_data(selected_writer_schema, decoder) - - async def skip_union(self, writer_schema, decoder): - index_of_schema = int(await decoder.read_long()) - if index_of_schema >= len(writer_schema.schemas): - fail_msg = "Can't access branch index %d for union with %d branches" \ - % (index_of_schema, len(writer_schema.schemas)) - raise SchemaResolutionException(fail_msg, writer_schema) - return await self.skip_data(writer_schema.schemas[index_of_schema], decoder) - - async def read_record(self, writer_schema, decoder): - """ - A record is encoded by encoding the values of its fields - in the order that they are declared. In other words, a record - is encoded as just the concatenation of the encodings of its fields. - Field values are encoded per their schema. - - Schema Resolution: - * the ordering of fields may be different: fields are matched by name. - * schemas for fields with the same name in both records are resolved - recursively. - * if the writer's record contains a field with a name not present in the - reader's record, the writer's value for that field is ignored. - * if the reader's record schema has a field that contains a default value, - and writer's schema does not have a field with the same name, then the - reader should use the default value from its field. - * if the reader's record schema has a field with no default value, and - writer's schema does not have a field with the same name, then the - field's value is unset. - """ - # schema resolution - read_record = {} - for field in writer_schema.fields: - field_val = await self.read_data(field.type, decoder) - read_record[field.name] = field_val - return read_record - - async def skip_record(self, writer_schema, decoder): - for field in writer_schema.fields: - await self.skip_data(field.type, decoder) - - -# ------------------------------------------------------------------------------ - -if __name__ == '__main__': - raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/avro/datafile.py b/azure/multiapi/storagev2/blob/v2021_04_10/_shared/avro/datafile.py deleted file mode 100644 index df06fe0..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/avro/datafile.py +++ /dev/null @@ -1,266 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -"""Read/Write Avro File Object Containers.""" - -import io -import logging -import sys -import zlib - -from ..avro import avro_io -from ..avro import schema - -PY3 = sys.version_info[0] == 3 - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Constants - -# Version of the container file: -VERSION = 1 - -if PY3: - MAGIC = b'Obj' + bytes([VERSION]) - MAGIC_SIZE = len(MAGIC) -else: - MAGIC = 'Obj' + chr(VERSION) - MAGIC_SIZE = len(MAGIC) - -# Size of the synchronization marker, in number of bytes: -SYNC_SIZE = 16 - -# Schema of the container header: -META_SCHEMA = schema.parse(""" -{ - "type": "record", "name": "org.apache.avro.file.Header", - "fields": [{ - "name": "magic", - "type": {"type": "fixed", "name": "magic", "size": %(magic_size)d} - }, { - "name": "meta", - "type": {"type": "map", "values": "bytes"} - }, { - "name": "sync", - "type": {"type": "fixed", "name": "sync", "size": %(sync_size)d} - }] -} -""" % { - 'magic_size': MAGIC_SIZE, - 'sync_size': SYNC_SIZE, -}) - -# Codecs supported by container files: -VALID_CODECS = frozenset(['null', 'deflate']) - -# Metadata key associated to the schema: -SCHEMA_KEY = "avro.schema" - - -# ------------------------------------------------------------------------------ -# Exceptions - - -class DataFileException(schema.AvroException): - """Problem reading or writing file object containers.""" - -# ------------------------------------------------------------------------------ - - -class DataFileReader(object): # pylint: disable=too-many-instance-attributes - """Read files written by DataFileWriter.""" - - def __init__(self, reader, datum_reader, **kwargs): - """Initializes a new data file reader. - - Args: - reader: Open file to read from. - datum_reader: Avro datum reader. - """ - self._reader = reader - self._raw_decoder = avro_io.BinaryDecoder(reader) - self._header_reader = kwargs.pop('header_reader', None) - self._header_decoder = None if self._header_reader is None else avro_io.BinaryDecoder(self._header_reader) - self._datum_decoder = None # Maybe reset at every block. - self._datum_reader = datum_reader - - # In case self._reader only has partial content(without header). - # seek(0, 0) to make sure read the (partial)content from beginning. - self._reader.seek(0, 0) - - # read the header: magic, meta, sync - self._read_header() - - # ensure codec is valid - avro_codec_raw = self.get_meta('avro.codec') - if avro_codec_raw is None: - self.codec = "null" - else: - self.codec = avro_codec_raw.decode('utf-8') - if self.codec not in VALID_CODECS: - raise DataFileException('Unknown codec: %s.' % self.codec) - - # get ready to read - self._block_count = 0 - - # object_position is to support reading from current position in the future read, - # no need to downloading from the beginning of avro. - if hasattr(self._reader, 'object_position'): - self.reader.track_object_position() - - self._cur_object_index = 0 - # header_reader indicates reader only has partial content. The reader doesn't have block header, - # so we read use the block count stored last time. - # Also ChangeFeed only has codec==null, so use _raw_decoder is good. - if self._header_reader is not None: - self._datum_decoder = self._raw_decoder - - self.datum_reader.writer_schema = ( - schema.parse(self.get_meta(SCHEMA_KEY).decode('utf-8'))) - - def __enter__(self): - return self - - def __exit__(self, data_type, value, traceback): - # Perform a close if there's no exception - if data_type is None: - self.close() - - def __iter__(self): - return self - - # read-only properties - @property - def reader(self): - return self._reader - - @property - def raw_decoder(self): - return self._raw_decoder - - @property - def datum_decoder(self): - return self._datum_decoder - - @property - def datum_reader(self): - return self._datum_reader - - @property - def sync_marker(self): - return self._sync_marker - - @property - def meta(self): - return self._meta - - # read/write properties - @property - def block_count(self): - return self._block_count - - def get_meta(self, key): - """Reports the value of a given metadata key. - - Args: - key: Metadata key (string) to report the value of. - Returns: - Value associated to the metadata key, as bytes. - """ - return self._meta.get(key) - - def _read_header(self): - header_reader = self._header_reader if self._header_reader else self._reader - header_decoder = self._header_decoder if self._header_decoder else self._raw_decoder - - # seek to the beginning of the file to get magic block - header_reader.seek(0, 0) - - # read header into a dict - header = self.datum_reader.read_data(META_SCHEMA, header_decoder) - - # check magic number - if header.get('magic') != MAGIC: - fail_msg = "Not an Avro data file: %s doesn't match %s." \ - % (header.get('magic'), MAGIC) - raise schema.AvroException(fail_msg) - - # set metadata - self._meta = header['meta'] - - # set sync marker - self._sync_marker = header['sync'] - - def _read_block_header(self): - self._block_count = self.raw_decoder.read_long() - if self.codec == "null": - # Skip a long; we don't need to use the length. - self.raw_decoder.skip_long() - self._datum_decoder = self._raw_decoder - elif self.codec == 'deflate': - # Compressed data is stored as (length, data), which - # corresponds to how the "bytes" type is encoded. - data = self.raw_decoder.read_bytes() - # -15 is the log of the window size; negative indicates - # "raw" (no zlib headers) decompression. See zlib.h. - uncompressed = zlib.decompress(data, -15) - self._datum_decoder = avro_io.BinaryDecoder(io.BytesIO(uncompressed)) - else: - raise DataFileException("Unknown codec: %r" % self.codec) - - def _skip_sync(self): - """ - Read the length of the sync marker; if it matches the sync marker, - return True. Otherwise, seek back to where we started and return False. - """ - proposed_sync_marker = self.reader.read(SYNC_SIZE) - if SYNC_SIZE > 0 and not proposed_sync_marker: - raise StopIteration - if proposed_sync_marker != self.sync_marker: - self.reader.seek(-SYNC_SIZE, 1) - - def __next__(self): - """Return the next datum in the file.""" - if self.block_count == 0: - self._skip_sync() - - # object_position is to support reading from current position in the future read, - # no need to downloading from the beginning of avro file with this attr. - if hasattr(self._reader, 'object_position'): - self.reader.track_object_position() - self._cur_object_index = 0 - - self._read_block_header() - - datum = self.datum_reader.read(self.datum_decoder) - self._block_count -= 1 - self._cur_object_index += 1 - - # object_position is to support reading from current position in the future read, - # This will track the index of the next item to be read. - # This will also track the offset before the next sync marker. - if hasattr(self._reader, 'object_position'): - if self.block_count == 0: - # the next event to be read is at index 0 in the new chunk of blocks, - self.reader.track_object_position() - self.reader.set_object_index(0) - else: - self.reader.set_object_index(self._cur_object_index) - - return datum - - # PY2 - def next(self): - return self.__next__() - - def close(self): - """Close this reader.""" - self.reader.close() - - -if __name__ == '__main__': - raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/avro/datafile_async.py b/azure/multiapi/storagev2/blob/v2021_04_10/_shared/avro/datafile_async.py deleted file mode 100644 index 1e9d018..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/avro/datafile_async.py +++ /dev/null @@ -1,215 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -"""Read/Write Avro File Object Containers.""" - -import logging -import sys - -from ..avro import avro_io_async -from ..avro import schema -from .datafile import DataFileException -from .datafile import MAGIC, SYNC_SIZE, META_SCHEMA, SCHEMA_KEY - - -PY3 = sys.version_info[0] == 3 - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Constants - -# Codecs supported by container files: -VALID_CODECS = frozenset(['null']) - - -class AsyncDataFileReader(object): # pylint: disable=too-many-instance-attributes - """Read files written by DataFileWriter.""" - - def __init__(self, reader, datum_reader, **kwargs): - """Initializes a new data file reader. - - Args: - reader: Open file to read from. - datum_reader: Avro datum reader. - """ - self._reader = reader - self._raw_decoder = avro_io_async.AsyncBinaryDecoder(reader) - self._header_reader = kwargs.pop('header_reader', None) - self._header_decoder = None if self._header_reader is None else \ - avro_io_async.AsyncBinaryDecoder(self._header_reader) - self._datum_decoder = None # Maybe reset at every block. - self._datum_reader = datum_reader - self.codec = "null" - self._block_count = 0 - self._cur_object_index = 0 - self._meta = None - self._sync_marker = None - - async def init(self): - # In case self._reader only has partial content(without header). - # seek(0, 0) to make sure read the (partial)content from beginning. - await self._reader.seek(0, 0) - - # read the header: magic, meta, sync - await self._read_header() - - # ensure codec is valid - avro_codec_raw = self.get_meta('avro.codec') - if avro_codec_raw is None: - self.codec = "null" - else: - self.codec = avro_codec_raw.decode('utf-8') - if self.codec not in VALID_CODECS: - raise DataFileException('Unknown codec: %s.' % self.codec) - - # get ready to read - self._block_count = 0 - - # object_position is to support reading from current position in the future read, - # no need to downloading from the beginning of avro. - if hasattr(self._reader, 'object_position'): - self.reader.track_object_position() - - # header_reader indicates reader only has partial content. The reader doesn't have block header, - # so we read use the block count stored last time. - # Also ChangeFeed only has codec==null, so use _raw_decoder is good. - if self._header_reader is not None: - self._datum_decoder = self._raw_decoder - self.datum_reader.writer_schema = ( - schema.parse(self.get_meta(SCHEMA_KEY).decode('utf-8'))) - return self - - async def __aenter__(self): - return self - - async def __aexit__(self, data_type, value, traceback): - # Perform a close if there's no exception - if data_type is None: - self.close() - - def __aiter__(self): - return self - - # read-only properties - @property - def reader(self): - return self._reader - - @property - def raw_decoder(self): - return self._raw_decoder - - @property - def datum_decoder(self): - return self._datum_decoder - - @property - def datum_reader(self): - return self._datum_reader - - @property - def sync_marker(self): - return self._sync_marker - - @property - def meta(self): - return self._meta - - # read/write properties - @property - def block_count(self): - return self._block_count - - def get_meta(self, key): - """Reports the value of a given metadata key. - - Args: - key: Metadata key (string) to report the value of. - Returns: - Value associated to the metadata key, as bytes. - """ - return self._meta.get(key) - - async def _read_header(self): - header_reader = self._header_reader if self._header_reader else self._reader - header_decoder = self._header_decoder if self._header_decoder else self._raw_decoder - - # seek to the beginning of the file to get magic block - await header_reader.seek(0, 0) - - # read header into a dict - header = await self.datum_reader.read_data(META_SCHEMA, header_decoder) - - # check magic number - if header.get('magic') != MAGIC: - fail_msg = "Not an Avro data file: %s doesn't match %s." \ - % (header.get('magic'), MAGIC) - raise schema.AvroException(fail_msg) - - # set metadata - self._meta = header['meta'] - - # set sync marker - self._sync_marker = header['sync'] - - async def _read_block_header(self): - self._block_count = await self.raw_decoder.read_long() - if self.codec == "null": - # Skip a long; we don't need to use the length. - await self.raw_decoder.skip_long() - self._datum_decoder = self._raw_decoder - else: - raise DataFileException("Unknown codec: %r" % self.codec) - - async def _skip_sync(self): - """ - Read the length of the sync marker; if it matches the sync marker, - return True. Otherwise, seek back to where we started and return False. - """ - proposed_sync_marker = await self.reader.read(SYNC_SIZE) - if SYNC_SIZE > 0 and not proposed_sync_marker: - raise StopAsyncIteration - if proposed_sync_marker != self.sync_marker: - await self.reader.seek(-SYNC_SIZE, 1) - - async def __anext__(self): - """Return the next datum in the file.""" - if self.block_count == 0: - await self._skip_sync() - - # object_position is to support reading from current position in the future read, - # no need to downloading from the beginning of avro file with this attr. - if hasattr(self._reader, 'object_position'): - await self.reader.track_object_position() - self._cur_object_index = 0 - - await self._read_block_header() - - datum = await self.datum_reader.read(self.datum_decoder) - self._block_count -= 1 - self._cur_object_index += 1 - - # object_position is to support reading from current position in the future read, - # This will track the index of the next item to be read. - # This will also track the offset before the next sync marker. - if hasattr(self._reader, 'object_position'): - if self.block_count == 0: - # the next event to be read is at index 0 in the new chunk of blocks, - await self.reader.track_object_position() - await self.reader.set_object_index(0) - else: - await self.reader.set_object_index(self._cur_object_index) - - return datum - - def close(self): - """Close this reader.""" - self.reader.close() - - -if __name__ == '__main__': - raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/avro/schema.py b/azure/multiapi/storagev2/blob/v2021_04_10/_shared/avro/schema.py deleted file mode 100644 index ffe2853..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/avro/schema.py +++ /dev/null @@ -1,1221 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines - -"""Representation of Avro schemas. - -A schema may be one of: - - A record, mapping field names to field value data; - - An error, equivalent to a record; - - An enum, containing one of a small set of symbols; - - An array of values, all of the same schema; - - A map containing string/value pairs, each of a declared schema; - - A union of other schemas; - - A fixed sized binary object; - - A unicode string; - - A sequence of bytes; - - A 32-bit signed int; - - A 64-bit signed long; - - A 32-bit floating-point float; - - A 64-bit floating-point double; - - A boolean; - - Null. -""" - -import abc -import json -import logging -import re -import sys -from six import with_metaclass - -PY2 = sys.version_info[0] == 2 - -if PY2: - _str = unicode # pylint: disable=undefined-variable -else: - _str = str - -logger = logging.getLogger(__name__) - -# ------------------------------------------------------------------------------ -# Constants - -# Log level more verbose than DEBUG=10, INFO=20, etc. -DEBUG_VERBOSE = 5 - -NULL = 'null' -BOOLEAN = 'boolean' -STRING = 'string' -BYTES = 'bytes' -INT = 'int' -LONG = 'long' -FLOAT = 'float' -DOUBLE = 'double' -FIXED = 'fixed' -ENUM = 'enum' -RECORD = 'record' -ERROR = 'error' -ARRAY = 'array' -MAP = 'map' -UNION = 'union' - -# Request and error unions are part of Avro protocols: -REQUEST = 'request' -ERROR_UNION = 'error_union' - -PRIMITIVE_TYPES = frozenset([ - NULL, - BOOLEAN, - STRING, - BYTES, - INT, - LONG, - FLOAT, - DOUBLE, -]) - -NAMED_TYPES = frozenset([ - FIXED, - ENUM, - RECORD, - ERROR, -]) - -VALID_TYPES = frozenset.union( - PRIMITIVE_TYPES, - NAMED_TYPES, - [ - ARRAY, - MAP, - UNION, - REQUEST, - ERROR_UNION, - ], -) - -SCHEMA_RESERVED_PROPS = frozenset([ - 'type', - 'name', - 'namespace', - 'fields', # Record - 'items', # Array - 'size', # Fixed - 'symbols', # Enum - 'values', # Map - 'doc', -]) - -FIELD_RESERVED_PROPS = frozenset([ - 'default', - 'name', - 'doc', - 'order', - 'type', -]) - -VALID_FIELD_SORT_ORDERS = frozenset([ - 'ascending', - 'descending', - 'ignore', -]) - - -# ------------------------------------------------------------------------------ -# Exceptions - - -class Error(Exception): - """Base class for errors in this module.""" - - -class AvroException(Error): - """Generic Avro schema error.""" - - -class SchemaParseException(AvroException): - """Error while parsing a JSON schema descriptor.""" - - -class Schema(with_metaclass(abc.ABCMeta, object)): - """Abstract base class for all Schema classes.""" - - def __init__(self, data_type, other_props=None): - """Initializes a new schema object. - - Args: - data_type: Type of the schema to initialize. - other_props: Optional dictionary of additional properties. - """ - if data_type not in VALID_TYPES: - raise SchemaParseException('%r is not a valid Avro type.' % data_type) - - # All properties of this schema, as a map: property name -> property value - self._props = {} - - self._props['type'] = data_type - self._type = data_type - - if other_props: - self._props.update(other_props) - - @property - def namespace(self): - """Returns: the namespace this schema belongs to, if any, or None.""" - return self._props.get('namespace', None) - - @property - def type(self): - """Returns: the type of this schema.""" - return self._type - - @property - def doc(self): - """Returns: the documentation associated to this schema, if any, or None.""" - return self._props.get('doc', None) - - @property - def props(self): - """Reports all the properties of this schema. - - Includes all properties, reserved and non reserved. - JSON properties of this schema are directly generated from this dict. - - Returns: - A dictionary of properties associated to this schema. - """ - return self._props - - @property - def other_props(self): - """Returns: the dictionary of non-reserved properties.""" - return dict(filter_keys_out(items=self._props, keys=SCHEMA_RESERVED_PROPS)) - - def __str__(self): - """Returns: the JSON representation of this schema.""" - return json.dumps(self.to_json(names=None)) - - @abc.abstractmethod - def to_json(self, names): - """Converts the schema object into its AVRO specification representation. - - Schema types that have names (records, enums, and fixed) must - be aware of not re-defining schemas that are already listed - in the parameter names. - """ - raise Exception('Cannot run abstract method.') - - -# ------------------------------------------------------------------------------ - - -_RE_NAME = re.compile(r'[A-Za-z_][A-Za-z0-9_]*') - -_RE_FULL_NAME = re.compile( - r'^' - r'[.]?(?:[A-Za-z_][A-Za-z0-9_]*[.])*' # optional namespace - r'([A-Za-z_][A-Za-z0-9_]*)' # name - r'$' -) - - -class Name(object): - """Representation of an Avro name.""" - - def __init__(self, name, namespace=None): - """Parses an Avro name. - - Args: - name: Avro name to parse (relative or absolute). - namespace: Optional explicit namespace if the name is relative. - """ - # Normalize: namespace is always defined as a string, possibly empty. - if namespace is None: - namespace = '' - - if '.' in name: - # name is absolute, namespace is ignored: - self._fullname = name - - match = _RE_FULL_NAME.match(self._fullname) - if match is None: - raise SchemaParseException( - 'Invalid absolute schema name: %r.' % self._fullname) - - self._name = match.group(1) - self._namespace = self._fullname[:-(len(self._name) + 1)] - - else: - # name is relative, combine with explicit namespace: - self._name = name - self._namespace = namespace - self._fullname = (self._name - if (not self._namespace) else - '%s.%s' % (self._namespace, self._name)) - - # Validate the fullname: - if _RE_FULL_NAME.match(self._fullname) is None: - raise SchemaParseException( - 'Invalid schema name %r infered from name %r and namespace %r.' - % (self._fullname, self._name, self._namespace)) - - def __eq__(self, other): - if not isinstance(other, Name): - return NotImplemented - return self.fullname == other.fullname - - @property - def simple_name(self): - """Returns: the simple name part of this name.""" - return self._name - - @property - def namespace(self): - """Returns: this name's namespace, possible the empty string.""" - return self._namespace - - @property - def fullname(self): - """Returns: the full name.""" - return self._fullname - - -# ------------------------------------------------------------------------------ - - -class Names(object): - """Tracks Avro named schemas and default namespace during parsing.""" - - def __init__(self, default_namespace=None, names=None): - """Initializes a new name tracker. - - Args: - default_namespace: Optional default namespace. - names: Optional initial mapping of known named schemas. - """ - if names is None: - names = {} - self._names = names - self._default_namespace = default_namespace - - @property - def names(self): - """Returns: the mapping of known named schemas.""" - return self._names - - @property - def default_namespace(self): - """Returns: the default namespace, if any, or None.""" - return self._default_namespace - - def new_with_default_namespace(self, namespace): - """Creates a new name tracker from this tracker, but with a new default ns. - - Args: - namespace: New default namespace to use. - Returns: - New name tracker with the specified default namespace. - """ - return Names(names=self._names, default_namespace=namespace) - - def get_name(self, name, namespace=None): - """Resolves the Avro name according to this name tracker's state. - - Args: - name: Name to resolve (absolute or relative). - namespace: Optional explicit namespace. - Returns: - The specified name, resolved according to this tracker. - """ - if namespace is None: - namespace = self._default_namespace - return Name(name=name, namespace=namespace) - - def get_schema(self, name, namespace=None): - """Resolves an Avro schema by name. - - Args: - name: Name (relative or absolute) of the Avro schema to look up. - namespace: Optional explicit namespace. - Returns: - The schema with the specified name, if any, or None. - """ - avro_name = self.get_name(name=name, namespace=namespace) - return self._names.get(avro_name.fullname, None) - - def prune_namespace(self, properties): - """given a properties, return properties with namespace removed if - it matches the own default namespace - """ - if self.default_namespace is None: - # I have no default -- no change - return properties - if 'namespace' not in properties: - # he has no namespace - no change - return properties - if properties['namespace'] != self.default_namespace: - # we're different - leave his stuff alone - return properties - # we each have a namespace and it's redundant. delete his. - prunable = properties.copy() - del prunable['namespace'] - return prunable - - def register(self, schema): - """Registers a new named schema in this tracker. - - Args: - schema: Named Avro schema to register in this tracker. - """ - if schema.fullname in VALID_TYPES: - raise SchemaParseException( - '%s is a reserved type name.' % schema.fullname) - if schema.fullname in self.names: - raise SchemaParseException( - 'Avro name %r already exists.' % schema.fullname) - - logger.log(DEBUG_VERBOSE, 'Register new name for %r', schema.fullname) - self._names[schema.fullname] = schema - - -# ------------------------------------------------------------------------------ - - -class NamedSchema(Schema): - """Abstract base class for named schemas. - - Named schemas are enumerated in NAMED_TYPES. - """ - - def __init__( - self, - data_type, - name=None, - namespace=None, - names=None, - other_props=None, - ): - """Initializes a new named schema object. - - Args: - data_type: Type of the named schema. - name: Name (absolute or relative) of the schema. - namespace: Optional explicit namespace if name is relative. - names: Tracker to resolve and register Avro names. - other_props: Optional map of additional properties of the schema. - """ - assert (data_type in NAMED_TYPES), ('Invalid named type: %r' % data_type) - self._avro_name = names.get_name(name=name, namespace=namespace) - - super(NamedSchema, self).__init__(data_type, other_props) - - names.register(self) - - self._props['name'] = self.name - if self.namespace: - self._props['namespace'] = self.namespace - - @property - def avro_name(self): - """Returns: the Name object describing this schema's name.""" - return self._avro_name - - @property - def name(self): - return self._avro_name.simple_name - - @property - def namespace(self): - return self._avro_name.namespace - - @property - def fullname(self): - return self._avro_name.fullname - - def name_ref(self, names): - """Reports this schema name relative to the specified name tracker. - - Args: - names: Avro name tracker to relativise this schema name against. - Returns: - This schema name, relativised against the specified name tracker. - """ - if self.namespace == names.default_namespace: - return self.name - return self.fullname - - @abc.abstractmethod - def to_json(self, names): - """Converts the schema object into its AVRO specification representation. - - Schema types that have names (records, enums, and fixed) must - be aware of not re-defining schemas that are already listed - in the parameter names. - """ - raise Exception('Cannot run abstract method.') - -# ------------------------------------------------------------------------------ - - -_NO_DEFAULT = object() - - -class Field(object): - """Representation of the schema of a field in a record.""" - - def __init__( - self, - data_type, - name, - index, - has_default, - default=_NO_DEFAULT, - order=None, - doc=None, - other_props=None - ): - """Initializes a new Field object. - - Args: - data_type: Avro schema of the field. - name: Name of the field. - index: 0-based position of the field. - has_default: - default: - order: - doc: - other_props: - """ - if (not isinstance(name, _str)) or (not name): - raise SchemaParseException('Invalid record field name: %r.' % name) - if (order is not None) and (order not in VALID_FIELD_SORT_ORDERS): - raise SchemaParseException('Invalid record field order: %r.' % order) - - # All properties of this record field: - self._props = {} - - self._has_default = has_default - if other_props: - self._props.update(other_props) - - self._index = index - self._type = self._props['type'] = data_type - self._name = self._props['name'] = name - - if has_default: - self._props['default'] = default - - if order is not None: - self._props['order'] = order - - if doc is not None: - self._props['doc'] = doc - - @property - def type(self): - """Returns: the schema of this field.""" - return self._type - - @property - def name(self): - """Returns: this field name.""" - return self._name - - @property - def index(self): - """Returns: the 0-based index of this field in the record.""" - return self._index - - @property - def default(self): - return self._props['default'] - - @property - def has_default(self): - return self._has_default - - @property - def order(self): - return self._props.get('order', None) - - @property - def doc(self): - return self._props.get('doc', None) - - @property - def props(self): - return self._props - - @property - def other_props(self): - return filter_keys_out(items=self._props, keys=FIELD_RESERVED_PROPS) - - def __str__(self): - return json.dumps(self.to_json()) - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = self.props.copy() - to_dump['type'] = self.type.to_json(names) - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ -# Primitive Types - - -class PrimitiveSchema(Schema): - """Schema of a primitive Avro type. - - Valid primitive types are defined in PRIMITIVE_TYPES. - """ - - def __init__(self, data_type, other_props=None): - """Initializes a new schema object for the specified primitive type. - - Args: - data_type: Type of the schema to construct. Must be primitive. - """ - if data_type not in PRIMITIVE_TYPES: - raise AvroException('%r is not a valid primitive type.' % data_type) - super(PrimitiveSchema, self).__init__(data_type, other_props=other_props) - - @property - def name(self): - """Returns: the simple name of this schema.""" - # The name of a primitive type is the type itself. - return self.type - - @property - def fullname(self): - """Returns: the fully qualified name of this schema.""" - # The full name is the simple name for primitive schema. - return self.name - - def to_json(self, names=None): - if len(self.props) == 1: - return self.fullname - return self.props - - def __eq__(self, that): - return self.props == that.props - - -# ------------------------------------------------------------------------------ -# Complex Types (non-recursive) - - -class FixedSchema(NamedSchema): - def __init__( - self, - name, - namespace, - size, - names=None, - other_props=None, - ): - # Ensure valid ctor args - if not isinstance(size, int): - fail_msg = 'Fixed Schema requires a valid integer for size property.' - raise AvroException(fail_msg) - - super(FixedSchema, self).__init__( - data_type=FIXED, - name=name, - namespace=namespace, - names=names, - other_props=other_props, - ) - self._props['size'] = size - - @property - def size(self): - """Returns: the size of this fixed schema, in bytes.""" - return self._props['size'] - - def to_json(self, names=None): - if names is None: - names = Names() - if self.fullname in names.names: - return self.name_ref(names) - names.names[self.fullname] = self - return names.prune_namespace(self.props) - - def __eq__(self, that): - return self.props == that.props - - -# ------------------------------------------------------------------------------ - - -class EnumSchema(NamedSchema): - def __init__( - self, - name, - namespace, - symbols, - names=None, - doc=None, - other_props=None, - ): - """Initializes a new enumeration schema object. - - Args: - name: Simple name of this enumeration. - namespace: Optional namespace. - symbols: Ordered list of symbols defined in this enumeration. - names: - doc: - other_props: - """ - symbols = tuple(symbols) - symbol_set = frozenset(symbols) - if (len(symbol_set) != len(symbols) - or not all(map(lambda symbol: isinstance(symbol, _str), symbols))): - raise AvroException( - 'Invalid symbols for enum schema: %r.' % (symbols,)) - - super(EnumSchema, self).__init__( - data_type=ENUM, - name=name, - namespace=namespace, - names=names, - other_props=other_props, - ) - - self._props['symbols'] = symbols - if doc is not None: - self._props['doc'] = doc - - @property - def symbols(self): - """Returns: the symbols defined in this enum.""" - return self._props['symbols'] - - def to_json(self, names=None): - if names is None: - names = Names() - if self.fullname in names.names: - return self.name_ref(names) - names.names[self.fullname] = self - return names.prune_namespace(self.props) - - def __eq__(self, that): - return self.props == that.props - - -# ------------------------------------------------------------------------------ -# Complex Types (recursive) - - -class ArraySchema(Schema): - """Schema of an array.""" - - def __init__(self, items, other_props=None): - """Initializes a new array schema object. - - Args: - items: Avro schema of the array items. - other_props: - """ - super(ArraySchema, self).__init__( - data_type=ARRAY, - other_props=other_props, - ) - self._items_schema = items - self._props['items'] = items - - @property - def items(self): - """Returns: the schema of the items in this array.""" - return self._items_schema - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = self.props.copy() - item_schema = self.items - to_dump['items'] = item_schema.to_json(names) - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ - - -class MapSchema(Schema): - """Schema of a map.""" - - def __init__(self, values, other_props=None): - """Initializes a new map schema object. - - Args: - values: Avro schema of the map values. - other_props: - """ - super(MapSchema, self).__init__( - data_type=MAP, - other_props=other_props, - ) - self._values_schema = values - self._props['values'] = values - - @property - def values(self): - """Returns: the schema of the values in this map.""" - return self._values_schema - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = self.props.copy() - to_dump['values'] = self.values.to_json(names) - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ - - -class UnionSchema(Schema): - """Schema of a union.""" - - def __init__(self, schemas): - """Initializes a new union schema object. - - Args: - schemas: Ordered collection of schema branches in the union. - """ - super(UnionSchema, self).__init__(data_type=UNION) - self._schemas = tuple(schemas) - - # Validate the schema branches: - - # All named schema names are unique: - named_branches = tuple( - filter(lambda schema: schema.type in NAMED_TYPES, self._schemas)) - unique_names = frozenset(map(lambda schema: schema.fullname, named_branches)) - if len(unique_names) != len(named_branches): - raise AvroException( - 'Invalid union branches with duplicate schema name:%s' - % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) - - # Types are unique within unnamed schemas, and union is not allowed: - unnamed_branches = tuple( - filter(lambda schema: schema.type not in NAMED_TYPES, self._schemas)) - unique_types = frozenset(map(lambda schema: schema.type, unnamed_branches)) - if UNION in unique_types: - raise AvroException( - 'Invalid union branches contain other unions:%s' - % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) - if len(unique_types) != len(unnamed_branches): - raise AvroException( - 'Invalid union branches with duplicate type:%s' - % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) - - @property - def schemas(self): - """Returns: the ordered list of schema branches in the union.""" - return self._schemas - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = [] - for schema in self.schemas: - to_dump.append(schema.to_json(names)) - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ - - -class ErrorUnionSchema(UnionSchema): - """Schema representing the declared errors of a protocol message.""" - - def __init__(self, schemas): - """Initializes an error-union schema. - - Args: - schema: collection of error schema. - """ - # Prepend "string" to handle system errors - schemas = [PrimitiveSchema(data_type=STRING)] + list(schemas) - super(ErrorUnionSchema, self).__init__(schemas=schemas) - - def to_json(self, names=None): - if names is None: - names = Names() - to_dump = [] - for schema in self.schemas: - # Don't print the system error schema - if schema.type == STRING: - continue - to_dump.append(schema.to_json(names)) - return to_dump - - -# ------------------------------------------------------------------------------ - - -class RecordSchema(NamedSchema): - """Schema of a record.""" - - @staticmethod - def _make_field(index, field_desc, names): - """Builds field schemas from a list of field JSON descriptors. - - Args: - index: 0-based index of the field in the record. - field_desc: JSON descriptors of a record field. - Return: - The field schema. - """ - field_schema = schema_from_json_data( - json_data=field_desc['type'], - names=names, - ) - other_props = ( - dict(filter_keys_out(items=field_desc, keys=FIELD_RESERVED_PROPS))) - return Field( - data_type=field_schema, - name=field_desc['name'], - index=index, - has_default=('default' in field_desc), - default=field_desc.get('default', _NO_DEFAULT), - order=field_desc.get('order', None), - doc=field_desc.get('doc', None), - other_props=other_props, - ) - - @staticmethod - def make_field_list(field_desc_list, names): - """Builds field schemas from a list of field JSON descriptors. - - Guarantees field name unicity. - - Args: - field_desc_list: collection of field JSON descriptors. - names: Avro schema tracker. - Yields - Field schemas. - """ - for index, field_desc in enumerate(field_desc_list): - yield RecordSchema._make_field(index, field_desc, names) - - @staticmethod - def _make_field_map(fields): - """Builds the field map. - - Guarantees field name unicity. - - Args: - fields: iterable of field schema. - Returns: - A map of field schemas, indexed by name. - """ - field_map = {} - for field in fields: - if field.name in field_map: - raise SchemaParseException( - 'Duplicate record field name %r.' % field.name) - field_map[field.name] = field - return field_map - - def __init__( - self, - name, - namespace, - fields=None, - make_fields=None, - names=None, - record_type=RECORD, - doc=None, - other_props=None - ): - """Initializes a new record schema object. - - Args: - name: Name of the record (absolute or relative). - namespace: Optional namespace the record belongs to, if name is relative. - fields: collection of fields to add to this record. - Exactly one of fields or make_fields must be specified. - make_fields: function creating the fields that belong to the record. - The function signature is: make_fields(names) -> ordered field list. - Exactly one of fields or make_fields must be specified. - names: - record_type: Type of the record: one of RECORD, ERROR or REQUEST. - Protocol requests are not named. - doc: - other_props: - """ - if record_type == REQUEST: - # Protocol requests are not named: - super(RecordSchema, self).__init__( - data_type=REQUEST, - other_props=other_props, - ) - elif record_type in [RECORD, ERROR]: - # Register this record name in the tracker: - super(RecordSchema, self).__init__( - data_type=record_type, - name=name, - namespace=namespace, - names=names, - other_props=other_props, - ) - else: - raise SchemaParseException( - 'Invalid record type: %r.' % record_type) - - if record_type in [RECORD, ERROR]: - avro_name = names.get_name(name=name, namespace=namespace) - nested_names = names.new_with_default_namespace(namespace=avro_name.namespace) - elif record_type == REQUEST: - # Protocol request has no name: no need to change default namespace: - nested_names = names - - if fields is None: - fields = make_fields(names=nested_names) - else: - assert make_fields is None - self._fields = tuple(fields) - - self._field_map = RecordSchema._make_field_map(self._fields) - - self._props['fields'] = fields - if doc is not None: - self._props['doc'] = doc - - @property - def fields(self): - """Returns: the field schemas, as an ordered tuple.""" - return self._fields - - @property - def field_map(self): - """Returns: a read-only map of the field schemas index by field names.""" - return self._field_map - - def to_json(self, names=None): - if names is None: - names = Names() - # Request records don't have names - if self.type == REQUEST: - return [f.to_json(names) for f in self.fields] - - if self.fullname in names.names: - return self.name_ref(names) - names.names[self.fullname] = self - - to_dump = names.prune_namespace(self.props.copy()) - to_dump['fields'] = [f.to_json(names) for f in self.fields] - return to_dump - - def __eq__(self, that): - to_cmp = json.loads(_str(self)) - return to_cmp == json.loads(_str(that)) - - -# ------------------------------------------------------------------------------ -# Module functions - - -def filter_keys_out(items, keys): - """Filters a collection of (key, value) items. - - Exclude any item whose key belongs to keys. - - Args: - items: Dictionary of items to filter the keys out of. - keys: Keys to filter out. - Yields: - Filtered items. - """ - for key, value in items.items(): - if key in keys: - continue - yield key, value - - -# ------------------------------------------------------------------------------ - - -def _schema_from_json_string(json_string, names): - if json_string in PRIMITIVE_TYPES: - return PrimitiveSchema(data_type=json_string) - - # Look for a known named schema: - schema = names.get_schema(name=json_string) - if schema is None: - raise SchemaParseException( - 'Unknown named schema %r, known names: %r.' - % (json_string, sorted(names.names))) - return schema - - -def _schema_from_json_array(json_array, names): - def MakeSchema(desc): - return schema_from_json_data(json_data=desc, names=names) - - return UnionSchema(map(MakeSchema, json_array)) - - -def _schema_from_json_object(json_object, names): - data_type = json_object.get('type') - if data_type is None: - raise SchemaParseException( - 'Avro schema JSON descriptor has no "type" property: %r' % json_object) - - other_props = dict( - filter_keys_out(items=json_object, keys=SCHEMA_RESERVED_PROPS)) - - if data_type in PRIMITIVE_TYPES: - # FIXME should not ignore other properties - result = PrimitiveSchema(data_type, other_props=other_props) - - elif data_type in NAMED_TYPES: - name = json_object.get('name') - namespace = json_object.get('namespace', names.default_namespace) - if data_type == FIXED: - size = json_object.get('size') - result = FixedSchema(name, namespace, size, names, other_props) - elif data_type == ENUM: - symbols = json_object.get('symbols') - doc = json_object.get('doc') - result = EnumSchema(name, namespace, symbols, names, doc, other_props) - - elif data_type in [RECORD, ERROR]: - field_desc_list = json_object.get('fields', ()) - - def MakeFields(names): - return tuple(RecordSchema.make_field_list(field_desc_list, names)) - - result = RecordSchema( - name=name, - namespace=namespace, - make_fields=MakeFields, - names=names, - record_type=data_type, - doc=json_object.get('doc'), - other_props=other_props, - ) - else: - raise Exception('Internal error: unknown type %r.' % data_type) - - elif data_type in VALID_TYPES: - # Unnamed, non-primitive Avro type: - - if data_type == ARRAY: - items_desc = json_object.get('items') - if items_desc is None: - raise SchemaParseException( - 'Invalid array schema descriptor with no "items" : %r.' - % json_object) - result = ArraySchema( - items=schema_from_json_data(items_desc, names), - other_props=other_props, - ) - - elif data_type == MAP: - values_desc = json_object.get('values') - if values_desc is None: - raise SchemaParseException( - 'Invalid map schema descriptor with no "values" : %r.' - % json_object) - result = MapSchema( - values=schema_from_json_data(values_desc, names=names), - other_props=other_props, - ) - - elif data_type == ERROR_UNION: - error_desc_list = json_object.get('declared_errors') - assert error_desc_list is not None - error_schemas = map( - lambda desc: schema_from_json_data(desc, names=names), - error_desc_list) - result = ErrorUnionSchema(schemas=error_schemas) - - else: - raise Exception('Internal error: unknown type %r.' % data_type) - else: - raise SchemaParseException( - 'Invalid JSON descriptor for an Avro schema: %r' % json_object) - return result - - -# Parsers for the JSON data types: -_JSONDataParserTypeMap = { - _str: _schema_from_json_string, - list: _schema_from_json_array, - dict: _schema_from_json_object, -} - - -def schema_from_json_data(json_data, names=None): - """Builds an Avro Schema from its JSON descriptor. - - Args: - json_data: JSON data representing the descriptor of the Avro schema. - names: Optional tracker for Avro named schemas. - Returns: - The Avro schema parsed from the JSON descriptor. - Raises: - SchemaParseException: if the descriptor is invalid. - """ - if names is None: - names = Names() - - # Select the appropriate parser based on the JSON data type: - parser = _JSONDataParserTypeMap.get(type(json_data)) - if parser is None: - raise SchemaParseException( - 'Invalid JSON descriptor for an Avro schema: %r.' % json_data) - return parser(json_data, names=names) - - -# ------------------------------------------------------------------------------ - - -def parse(json_string): - """Constructs a Schema from its JSON descriptor in text form. - - Args: - json_string: String representation of the JSON descriptor of the schema. - Returns: - The parsed schema. - Raises: - SchemaParseException: on JSON parsing error, - or if the JSON descriptor is invalid. - """ - try: - json_data = json.loads(json_string) - except Exception as exn: - raise SchemaParseException( - 'Error parsing schema from JSON: %r. ' - 'Error message: %r.' - % (json_string, exn)) - - # Initialize the names object - names = Names() - - # construct the Avro Schema object - return schema_from_json_data(json_data, names) diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/base_client.py b/azure/multiapi/storagev2/blob/v2021_04_10/_shared/base_client.py deleted file mode 100644 index 9784a27..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/base_client.py +++ /dev/null @@ -1,463 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import logging -import uuid -from typing import ( # pylint: disable=unused-import - Optional, - Any, - Tuple, -) - -try: - from urllib.parse import parse_qs, quote -except ImportError: - from urlparse import parse_qs # type: ignore - from urllib2 import quote # type: ignore - -import six - -from azure.core.configuration import Configuration -from azure.core.credentials import AzureSasCredential -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline import Pipeline -from azure.core.pipeline.transport import RequestsTransport, HttpTransport -from azure.core.pipeline.policies import ( - AzureSasCredentialPolicy, - ContentDecodePolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, - RedirectPolicy, - ProxyPolicy, - UserAgentPolicy, -) - -from .constants import CONNECTION_TIMEOUT, READ_TIMEOUT, SERVICE_HOST_BASE -from .models import LocationMode -from .authentication import SharedKeyCredentialPolicy -from .shared_access_signature import QueryStringConstants -from .request_handlers import serialize_batch_body, _get_batch_request_delimiter -from .policies import ( - ExponentialRetry, - StorageBearerTokenCredentialPolicy, - StorageContentValidation, - StorageHeadersPolicy, - StorageHosts, - StorageLoggingPolicy, - StorageRequestHook, - StorageResponseHook, - QueueMessagePolicy, -) -from .._version import VERSION -from .response_handlers import process_storage_error, PartialBatchErrorException - - -_LOGGER = logging.getLogger(__name__) -_SERVICE_PARAMS = { - "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"}, - "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"}, - "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"}, - "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"}, -} - - -class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - parsed_url, # type: Any - service, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) - self._hosts = kwargs.get("_hosts") - self.scheme = parsed_url.scheme - - if service not in ["blob", "queue", "file-share", "dfs"]: - raise ValueError("Invalid service: {}".format(service)) - service_name = service.split('-')[0] - account = parsed_url.netloc.split(".{}.core.".format(service_name)) - - self.account_name = account[0] if len(account) > 1 else None - if not self.account_name and parsed_url.netloc.startswith("localhost") \ - or parsed_url.netloc.startswith("127.0.0.1"): - self.account_name = parsed_url.path.strip("/") - - self.credential = _format_shared_key_credential(self.account_name, credential) - if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): - raise ValueError("Token credential is only supported with HTTPS.") - - secondary_hostname = None - if hasattr(self.credential, "account_name"): - self.account_name = self.credential.account_name - secondary_hostname = "{}-secondary.{}.{}".format( - self.credential.account_name, service_name, SERVICE_HOST_BASE) - - if not self._hosts: - if len(account) > 1: - secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") - if kwargs.get("secondary_hostname"): - secondary_hostname = kwargs["secondary_hostname"] - primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') - self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} - - self.require_encryption = kwargs.get("require_encryption", False) - self.key_encryption_key = kwargs.get("key_encryption_key") - self.key_resolver_function = kwargs.get("key_resolver_function") - self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) - - def __enter__(self): - self._client.__enter__() - return self - - def __exit__(self, *args): - self._client.__exit__(*args) - - def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._client.close() - - @property - def url(self): - """The full endpoint URL to this entity, including SAS token if used. - - This could be either the primary endpoint, - or the secondary endpoint depending on the current :func:`location_mode`. - """ - return self._format_url(self._hosts[self._location_mode]) - - @property - def primary_endpoint(self): - """The full primary endpoint URL. - - :type: str - """ - return self._format_url(self._hosts[LocationMode.PRIMARY]) - - @property - def primary_hostname(self): - """The hostname of the primary endpoint. - - :type: str - """ - return self._hosts[LocationMode.PRIMARY] - - @property - def secondary_endpoint(self): - """The full secondary endpoint URL if configured. - - If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str - :raise ValueError: - """ - if not self._hosts[LocationMode.SECONDARY]: - raise ValueError("No secondary host configured.") - return self._format_url(self._hosts[LocationMode.SECONDARY]) - - @property - def secondary_hostname(self): - """The hostname of the secondary endpoint. - - If not available this will be None. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str or None - """ - return self._hosts[LocationMode.SECONDARY] - - @property - def location_mode(self): - """The location mode that the client is currently using. - - By default this will be "primary". Options include "primary" and "secondary". - - :type: str - """ - - return self._location_mode - - @location_mode.setter - def location_mode(self, value): - if self._hosts.get(value): - self._location_mode = value - self._client._config.url = self.url # pylint: disable=protected-access - else: - raise ValueError("No host URL for location mode: {}".format(value)) - - @property - def api_version(self): - """The version of the Storage API used for requests. - - :type: str - """ - return self._client._config.version # pylint: disable=protected-access - - def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): - query_str = "?" - if snapshot: - query_str += "snapshot={}&".format(self.snapshot) - if share_snapshot: - query_str += "sharesnapshot={}&".format(self.snapshot) - if sas_token and isinstance(credential, AzureSasCredential): - raise ValueError( - "You cannot use AzureSasCredential when the resource URI also contains a Shared Access Signature.") - if is_credential_sastoken(credential): - query_str += credential.lstrip("?") - credential = None - elif sas_token: - query_str += sas_token - return query_str.rstrip("?&"), credential - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, "get_token"): - self._credential_policy = StorageBearerTokenCredentialPolicy(credential) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif isinstance(credential, AzureSasCredential): - self._credential_policy = AzureSasCredentialPolicy(credential) - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - - config = kwargs.get("_configuration") or create_configuration(**kwargs) - if kwargs.get("_pipeline"): - return config, kwargs["_pipeline"] - config.transport = kwargs.get("transport") # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - config.transport = RequestsTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - ContentDecodePolicy(response_encoding="utf-8"), - RedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), - config.retry_policy, - config.headers_policy, - StorageRequestHook(**kwargs), - self._credential_policy, - config.logging_policy, - StorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs) - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, Pipeline(config.transport, policies=policies) - - def _batch_send( - self, - *reqs, # type: HttpRequest - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - batch_id = str(uuid.uuid1()) - - request = self._client._client.post( # pylint: disable=protected-access - url='{}://{}/{}?{}comp=batch{}{}'.format( - self.scheme, - self.primary_hostname, - kwargs.pop('path', ""), - kwargs.pop('restype', ""), - kwargs.pop('sas', ""), - kwargs.pop('timeout', "") - ), - headers={ - 'x-ms-version': self.api_version, - "Content-Type": "multipart/mixed; boundary=" + _get_batch_request_delimiter(batch_id, False, False) - } - ) - - policies = [StorageHeadersPolicy()] - if self._credential_policy: - policies.append(self._credential_policy) - - request.set_multipart_mixed( - *reqs, - policies=policies, - enforce_https=False - ) - - Pipeline._prepare_multipart_mixed_request(request) # pylint: disable=protected-access - body = serialize_batch_body(request.multipart_mixed_info[0], batch_id) - request.set_bytes_body(body) - - temp = request.multipart_mixed_info - request.multipart_mixed_info = None - pipeline_response = self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - request.multipart_mixed_info = temp - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() - if raise_on_any_failure: - parts = list(response.parts()) - if any(p for p in parts if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts - ) - raise error - return iter(parts) - return parts - except HttpResponseError as error: - process_storage_error(error) - -class TransportWrapper(HttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, transport): - self._transport = transport - - def send(self, request, **kwargs): - return self._transport.send(request, **kwargs) - - def open(self): - pass - - def close(self): - pass - - def __enter__(self): - pass - - def __exit__(self, *args): # pylint: disable=arguments-differ - pass - - -def _format_shared_key_credential(account_name, credential): - if isinstance(credential, six.string_types): - if not account_name: - raise ValueError("Unable to determine account name for shared key credential.") - credential = {"account_name": account_name, "account_key": credential} - if isinstance(credential, dict): - if "account_name" not in credential: - raise ValueError("Shared key credential missing 'account_name") - if "account_key" not in credential: - raise ValueError("Shared key credential missing 'account_key") - return SharedKeyCredentialPolicy(**credential) - return credential - - -def parse_connection_str(conn_str, credential, service): - conn_str = conn_str.rstrip(";") - conn_settings = [s.split("=", 1) for s in conn_str.split(";")] - if any(len(tup) != 2 for tup in conn_settings): - raise ValueError("Connection string is either blank or malformed.") - conn_settings = dict((key.upper(), val) for key, val in conn_settings) - endpoints = _SERVICE_PARAMS[service] - primary = None - secondary = None - if not credential: - try: - credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]} - except KeyError: - credential = conn_settings.get("SHAREDACCESSSIGNATURE") - if endpoints["primary"] in conn_settings: - primary = conn_settings[endpoints["primary"]] - if endpoints["secondary"] in conn_settings: - secondary = conn_settings[endpoints["secondary"]] - else: - if endpoints["secondary"] in conn_settings: - raise ValueError("Connection string specifies only secondary endpoint.") - try: - primary = "{}://{}.{}.{}".format( - conn_settings["DEFAULTENDPOINTSPROTOCOL"], - conn_settings["ACCOUNTNAME"], - service, - conn_settings["ENDPOINTSUFFIX"], - ) - secondary = "{}-secondary.{}.{}".format( - conn_settings["ACCOUNTNAME"], service, conn_settings["ENDPOINTSUFFIX"] - ) - except KeyError: - pass - - if not primary: - try: - primary = "https://{}.{}.{}".format( - conn_settings["ACCOUNTNAME"], service, conn_settings.get("ENDPOINTSUFFIX", SERVICE_HOST_BASE) - ) - except KeyError: - raise ValueError("Connection string missing required connection details.") - if service == "dfs": - primary = primary.replace(".blob.", ".dfs.") - secondary = secondary.replace(".blob.", ".dfs.") - return primary, secondary, credential - - -def create_configuration(**kwargs): - # type: (**Any) -> Configuration - config = Configuration(**kwargs) - config.headers_policy = StorageHeadersPolicy(**kwargs) - config.user_agent_policy = UserAgentPolicy( - sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) - config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) - config.logging_policy = StorageLoggingPolicy(**kwargs) - config.proxy_policy = ProxyPolicy(**kwargs) - - # Storage settings - config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) - config.copy_polling_interval = 15 - - # Block blob uploads - config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) - config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) - config.use_byte_buffer = kwargs.get("use_byte_buffer", False) - - # Page blob uploads - config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) - - # Datalake file uploads - config.min_large_chunk_upload_threshold = kwargs.get("min_large_chunk_upload_threshold", 100 * 1024 * 1024 + 1) - - # Blob downloads - config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) - config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) - - # File uploads - config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) - return config - - -def parse_query(query_str): - sas_values = QueryStringConstants.to_list() - parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} - sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values] - sas_token = None - if sas_params: - sas_token = "&".join(sas_params) - - snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") - return snapshot, sas_token - - -def is_credential_sastoken(credential): - if not credential or not isinstance(credential, six.string_types): - return False - - sas_values = QueryStringConstants.to_list() - parsed_query = parse_qs(credential.lstrip("?")) - if parsed_query and all([k in sas_values for k in parsed_query.keys()]): - return True - return False diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/base_client_async.py b/azure/multiapi/storagev2/blob/v2021_04_10/_shared/base_client_async.py deleted file mode 100644 index d8c5f43..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/base_client_async.py +++ /dev/null @@ -1,191 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging - -from azure.core.credentials import AzureSasCredential -from azure.core.pipeline import AsyncPipeline -from azure.core.async_paging import AsyncList -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline.policies import ( - AsyncRedirectPolicy, - AzureSasCredentialPolicy, - ContentDecodePolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, -) -from azure.core.pipeline.transport import AsyncHttpTransport - -from .constants import CONNECTION_TIMEOUT, READ_TIMEOUT -from .authentication import SharedKeyCredentialPolicy -from .base_client import create_configuration -from .policies import ( - StorageContentValidation, - StorageHeadersPolicy, - StorageHosts, - StorageRequestHook, - QueueMessagePolicy -) -from .policies_async import AsyncStorageBearerTokenCredentialPolicy, AsyncStorageResponseHook - -from .response_handlers import process_storage_error, PartialBatchErrorException - -if TYPE_CHECKING: - from azure.core.pipeline import Pipeline - from azure.core.pipeline.transport import HttpRequest - from azure.core.configuration import Configuration -_LOGGER = logging.getLogger(__name__) - - -class AsyncStorageAccountHostsMixin(object): - - def __enter__(self): - raise TypeError("Async client only supports 'async with'.") - - def __exit__(self, *args): - pass - - async def __aenter__(self): - await self._client.__aenter__() - return self - - async def __aexit__(self, *args): - await self._client.__aexit__(*args) - - async def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._client.close() - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, 'get_token'): - self._credential_policy = AsyncStorageBearerTokenCredentialPolicy(credential) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif isinstance(credential, AzureSasCredential): - self._credential_policy = AzureSasCredentialPolicy(credential) - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - config = kwargs.get('_configuration') or create_configuration(**kwargs) - if kwargs.get('_pipeline'): - return config, kwargs['_pipeline'] - config.transport = kwargs.get('transport') # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - try: - from azure.core.pipeline.transport import AioHttpTransport - except ImportError: - raise ImportError("Unable to create async transport. Please check aiohttp is installed.") - config.transport = AioHttpTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.headers_policy, - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - StorageRequestHook(**kwargs), - self._credential_policy, - ContentDecodePolicy(response_encoding="utf-8"), - AsyncRedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), # type: ignore - config.retry_policy, - config.logging_policy, - AsyncStorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs), - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, AsyncPipeline(config.transport, policies=policies) - - async def _batch_send( - self, - *reqs, # type: HttpRequest - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - request = self._client._client.post( # pylint: disable=protected-access - url='{}://{}/{}?{}comp=batch{}{}'.format( - self.scheme, - self.primary_hostname, - kwargs.pop('path', ""), - kwargs.pop('restype', ""), - kwargs.pop('sas', ""), - kwargs.pop('timeout', "") - ), - headers={ - 'x-ms-version': self.api_version - } - ) - - policies = [StorageHeadersPolicy()] - if self._credential_policy: - policies.append(self._credential_policy) - - request.set_multipart_mixed( - *reqs, - policies=policies, - enforce_https=False - ) - - pipeline_response = await self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() # Return an AsyncIterator - if raise_on_any_failure: - parts_list = [] - async for part in parts: - parts_list.append(part) - if any(p for p in parts_list if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts_list - ) - raise error - return AsyncList(parts_list) - return parts - except HttpResponseError as error: - process_storage_error(error) - - -class AsyncTransportWrapper(AsyncHttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, async_transport): - self._transport = async_transport - - async def send(self, request, **kwargs): - return await self._transport.send(request, **kwargs) - - async def open(self): - pass - - async def close(self): - pass - - async def __aenter__(self): - pass - - async def __aexit__(self, *args): # pylint: disable=arguments-differ - pass diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/constants.py b/azure/multiapi/storagev2/blob/v2021_04_10/_shared/constants.py deleted file mode 100644 index 8a39d93..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/constants.py +++ /dev/null @@ -1,28 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -from .._generated import AzureBlobStorage - - -X_MS_VERSION = AzureBlobStorage(url="get_api_version")._config.version # pylint: disable=protected-access - -# Socket timeout in seconds -CONNECTION_TIMEOUT = 20 -READ_TIMEOUT = 20 - -# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned) -# The socket timeout is now the maximum total duration to send all data. -if sys.version_info >= (3, 5): - # the timeout to connect is 20 seconds, and the read timeout is 80000 seconds - # the 80000 seconds was calculated with: - # 4000MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) - READ_TIMEOUT = 80000 - -DEFAULT_OAUTH_SCOPE = "/.default" -STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" - -SERVICE_HOST_BASE = 'core.windows.net' diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/encryption.py b/azure/multiapi/storagev2/blob/v2021_04_10/_shared/encryption.py deleted file mode 100644 index 62607cc..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/encryption.py +++ /dev/null @@ -1,542 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os -from os import urandom -from json import ( - dumps, - loads, -) -from collections import OrderedDict - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.ciphers import Cipher -from cryptography.hazmat.primitives.ciphers.algorithms import AES -from cryptography.hazmat.primitives.ciphers.modes import CBC -from cryptography.hazmat.primitives.padding import PKCS7 - -from azure.core.exceptions import HttpResponseError - -from .._version import VERSION -from . import encode_base64, decode_base64_to_bytes - - -_ENCRYPTION_PROTOCOL_V1 = '1.0' -_ERROR_OBJECT_INVALID = \ - '{0} does not define a complete interface. Value of {1} is either missing or invalid.' - - -def _validate_not_none(param_name, param): - if param is None: - raise ValueError('{0} should not be None.'.format(param_name)) - - -def _validate_key_encryption_key_wrap(kek): - # Note that None is not callable and so will fail the second clause of each check. - if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) - if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) - - -class _EncryptionAlgorithm(object): - ''' - Specifies which client encryption algorithm is used. - ''' - AES_CBC_256 = 'AES_CBC_256' - - -class _WrappedContentKey: - ''' - Represents the envelope key details stored on the service. - ''' - - def __init__(self, algorithm, encrypted_key, key_id): - ''' - :param str algorithm: - The algorithm used for wrapping. - :param bytes encrypted_key: - The encrypted content-encryption-key. - :param str key_id: - The key-encryption-key identifier string. - ''' - - _validate_not_none('algorithm', algorithm) - _validate_not_none('encrypted_key', encrypted_key) - _validate_not_none('key_id', key_id) - - self.algorithm = algorithm - self.encrypted_key = encrypted_key - self.key_id = key_id - - -class _EncryptionAgent: - ''' - Represents the encryption agent stored on the service. - It consists of the encryption protocol version and encryption algorithm used. - ''' - - def __init__(self, encryption_algorithm, protocol): - ''' - :param _EncryptionAlgorithm encryption_algorithm: - The algorithm used for encrypting the message contents. - :param str protocol: - The protocol version used for encryption. - ''' - - _validate_not_none('encryption_algorithm', encryption_algorithm) - _validate_not_none('protocol', protocol) - - self.encryption_algorithm = str(encryption_algorithm) - self.protocol = protocol - - -class _EncryptionData: - ''' - Represents the encryption data that is stored on the service. - ''' - - def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, - key_wrapping_metadata): - ''' - :param bytes content_encryption_IV: - The content encryption initialization vector. - :param _EncryptionAgent encryption_agent: - The encryption agent. - :param _WrappedContentKey wrapped_content_key: - An object that stores the wrapping algorithm, the key identifier, - and the encrypted key bytes. - :param dict key_wrapping_metadata: - A dict containing metadata related to the key wrapping. - ''' - - _validate_not_none('content_encryption_IV', content_encryption_IV) - _validate_not_none('encryption_agent', encryption_agent) - _validate_not_none('wrapped_content_key', wrapped_content_key) - - self.content_encryption_IV = content_encryption_IV - self.encryption_agent = encryption_agent - self.wrapped_content_key = wrapped_content_key - self.key_wrapping_metadata = key_wrapping_metadata - - -def _generate_encryption_data_dict(kek, cek, iv): - ''' - Generates and returns the encryption metadata as a dict. - - :param object kek: The key encryption key. See calling functions for more information. - :param bytes cek: The content encryption key. - :param bytes iv: The initialization vector. - :return: A dict containing all the encryption metadata. - :rtype: dict - ''' - # Encrypt the cek. - wrapped_cek = kek.wrap_key(cek) - - # Build the encryption_data dict. - # Use OrderedDict to comply with Java's ordering requirement. - wrapped_content_key = OrderedDict() - wrapped_content_key['KeyId'] = kek.get_kid() - wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek) - wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() - - encryption_agent = OrderedDict() - encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 - encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 - - encryption_data_dict = OrderedDict() - encryption_data_dict['WrappedContentKey'] = wrapped_content_key - encryption_data_dict['EncryptionAgent'] = encryption_agent - encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv) - encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION} - - return encryption_data_dict - - -def _dict_to_encryption_data(encryption_data_dict): - ''' - Converts the specified dictionary to an EncryptionData object for - eventual use in decryption. - - :param dict encryption_data_dict: - The dictionary containing the encryption data. - :return: an _EncryptionData object built from the dictionary. - :rtype: _EncryptionData - ''' - try: - if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: - raise ValueError("Unsupported encryption version.") - except KeyError: - raise ValueError("Unsupported encryption version.") - wrapped_content_key = encryption_data_dict['WrappedContentKey'] - wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], - decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), - wrapped_content_key['KeyId']) - - encryption_agent = encryption_data_dict['EncryptionAgent'] - encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], - encryption_agent['Protocol']) - - if 'KeyWrappingMetadata' in encryption_data_dict: - key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] - else: - key_wrapping_metadata = None - - encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), - encryption_agent, - wrapped_content_key, - key_wrapping_metadata) - - return encryption_data - - -def _generate_AES_CBC_cipher(cek, iv): - ''' - Generates and returns an encryption cipher for AES CBC using the given cek and iv. - - :param bytes[] cek: The content encryption key for the cipher. - :param bytes[] iv: The initialization vector for the cipher. - :return: A cipher for encrypting in AES256 CBC. - :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher - ''' - - backend = default_backend() - algorithm = AES(cek) - mode = CBC(iv) - return Cipher(algorithm, mode, backend) - - -def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): - ''' - Extracts and returns the content_encryption_key stored in the encryption_data object - and performs necessary validation on all parameters. - :param _EncryptionData encryption_data: - The encryption metadata of the retrieved value. - :param obj key_encryption_key: - The key_encryption_key used to unwrap the cek. Please refer to high-level service object - instance variables for more details. - :param func key_resolver: - A function used that, given a key_id, will return a key_encryption_key. Please refer - to high-level service object instance variables for more details. - :return: the content_encryption_key stored in the encryption_data object. - :rtype: bytes[] - ''' - - _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) - _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) - - if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol: - raise ValueError('Encryption version is not supported.') - - content_encryption_key = None - - # If the resolver exists, give priority to the key it finds. - if key_resolver is not None: - key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) - - _validate_not_none('key_encryption_key', key_encryption_key) - if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) - if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid(): - raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.') - # Will throw an exception if the specified algorithm is not supported. - content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, - encryption_data.wrapped_content_key.algorithm) - _validate_not_none('content_encryption_key', content_encryption_key) - - return content_encryption_key - - -def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None): - ''' - Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. - Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). - Returns the original plaintex. - - :param str message: - The ciphertext to be decrypted. - :param _EncryptionData encryption_data: - The metadata associated with this ciphertext. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted plaintext. - :rtype: str - ''' - _validate_not_none('message', message) - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) - - if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm: - raise ValueError('Specified encryption algorithm is not supported.') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) - - # decrypt data - decrypted_data = message - decryptor = cipher.decryptor() - decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) - - # unpad data - unpadder = PKCS7(128).unpadder() - decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) - - return decrypted_data - - -def encrypt_blob(blob, key_encryption_key): - ''' - Encrypts the given blob using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encryption metadata. This method should - only be used when a blob is small enough for single shot upload. Encrypting larger blobs - is done as a part of the upload_data_chunks method. - - :param bytes blob: - The blob to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. - :rtype: (str, bytes) - ''' - - _validate_not_none('blob', blob) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(blob) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - - return dumps(encryption_data), encrypted_data - - -def generate_blob_encryption_data(key_encryption_key): - ''' - Generates the encryption_metadata for the blob. - - :param bytes key_encryption_key: - The key-encryption-key used to wrap the cek associate with this blob. - :return: A tuple containing the cek and iv for this blob as well as the - serialized encryption metadata for the blob. - :rtype: (bytes, bytes, str) - ''' - encryption_data = None - content_encryption_key = None - initialization_vector = None - if key_encryption_key: - _validate_key_encryption_key_wrap(key_encryption_key) - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - encryption_data = _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - encryption_data = dumps(encryption_data) - - return content_encryption_key, initialization_vector, encryption_data - - -def decrypt_blob(require_encryption, key_encryption_key, key_resolver, - content, start_offset, end_offset, response_headers): - ''' - Decrypts the given blob contents and returns only the requested range. - - :param bool require_encryption: - Whether or not the calling blob service requires objects to be decrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :param key_resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted blob content. - :rtype: bytes - ''' - try: - encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata'])) - except: # pylint: disable=bare-except - if require_encryption: - raise ValueError( - 'Encryption required, but received data does not contain appropriate metatadata.' + \ - 'Data was either not encrypted or metadata has been lost.') - - return content - - if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256: - raise ValueError('Specified encryption algorithm is not supported.') - - blob_type = response_headers['x-ms-blob-type'] - - iv = None - unpad = False - if 'content-range' in response_headers: - content_range = response_headers['content-range'] - # Format: 'bytes x-y/size' - - # Ignore the word 'bytes' - content_range = content_range.split(' ') - - content_range = content_range[1].split('-') - content_range = content_range[1].split('/') - end_range = int(content_range[0]) - blob_size = int(content_range[1]) - - if start_offset >= 16: - iv = content[:16] - content = content[16:] - start_offset -= 16 - else: - iv = encryption_data.content_encryption_IV - - if end_range == blob_size - 1: - unpad = True - else: - unpad = True - iv = encryption_data.content_encryption_IV - - if blob_type == 'PageBlob': - unpad = False - - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) - cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) - decryptor = cipher.decryptor() - - content = decryptor.update(content) + decryptor.finalize() - if unpad: - unpadder = PKCS7(128).unpadder() - content = unpadder.update(content) + unpadder.finalize() - - return content[start_offset: len(content) - end_offset] - - -def get_blob_encryptor_and_padder(cek, iv, should_pad): - encryptor = None - padder = None - - if cek is not None and iv is not None: - cipher = _generate_AES_CBC_cipher(cek, iv) - encryptor = cipher.encryptor() - padder = PKCS7(128).padder() if should_pad else None - - return encryptor, padder - - -def encrypt_queue_message(message, key_encryption_key): - ''' - Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encrypted message and the encryption metadata. - - :param object message: - The plain text messge to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A json-formatted string containing the encrypted message and the encryption metadata. - :rtype: str - ''' - - _validate_not_none('message', message) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = os.urandom(32) - initialization_vector = os.urandom(16) - - # Queue encoding functions all return unicode strings, and encryption should - # operate on binary strings. - message = message.encode('utf-8') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(message) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - - # Build the dictionary structure. - queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data), - 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector)} - - return dumps(queue_message) - - -def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver): - ''' - Returns the decrypted message contents from an EncryptedQueueMessage. - If no encryption metadata is present, will return the unaltered message. - :param str message: - The JSON formatted QueueEncryptedMessage contents with all associated metadata. - :param bool require_encryption: - If set, will enforce that the retrieved messages are encrypted and decrypt them. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The plain text message from the queue message. - :rtype: str - ''' - - try: - message = loads(message) - - encryption_data = _dict_to_encryption_data(message['EncryptionData']) - decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents']) - except (KeyError, ValueError): - # Message was not json formatted and so was not encrypted - # or the user provided a json formatted message. - if require_encryption: - raise ValueError('Message was not encrypted.') - - return message - try: - return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=response, - error=error) diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/models.py b/azure/multiapi/storagev2/blob/v2021_04_10/_shared/models.py deleted file mode 100644 index 22e7b75..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/models.py +++ /dev/null @@ -1,480 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-instance-attributes -from enum import Enum - - -def get_enum_value(value): - if value is None or value in ["None", ""]: - return None - try: - return value.value - except AttributeError: - return value - - -class StorageErrorCode(str, Enum): - - # Generic storage values - account_already_exists = "AccountAlreadyExists" - account_being_created = "AccountBeingCreated" - account_is_disabled = "AccountIsDisabled" - authentication_failed = "AuthenticationFailed" - authorization_failure = "AuthorizationFailure" - no_authentication_information = "NoAuthenticationInformation" - condition_headers_not_supported = "ConditionHeadersNotSupported" - condition_not_met = "ConditionNotMet" - empty_metadata_key = "EmptyMetadataKey" - insufficient_account_permissions = "InsufficientAccountPermissions" - internal_error = "InternalError" - invalid_authentication_info = "InvalidAuthenticationInfo" - invalid_header_value = "InvalidHeaderValue" - invalid_http_verb = "InvalidHttpVerb" - invalid_input = "InvalidInput" - invalid_md5 = "InvalidMd5" - invalid_metadata = "InvalidMetadata" - invalid_query_parameter_value = "InvalidQueryParameterValue" - invalid_range = "InvalidRange" - invalid_resource_name = "InvalidResourceName" - invalid_uri = "InvalidUri" - invalid_xml_document = "InvalidXmlDocument" - invalid_xml_node_value = "InvalidXmlNodeValue" - md5_mismatch = "Md5Mismatch" - metadata_too_large = "MetadataTooLarge" - missing_content_length_header = "MissingContentLengthHeader" - missing_required_query_parameter = "MissingRequiredQueryParameter" - missing_required_header = "MissingRequiredHeader" - missing_required_xml_node = "MissingRequiredXmlNode" - multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" - operation_timed_out = "OperationTimedOut" - out_of_range_input = "OutOfRangeInput" - out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" - request_body_too_large = "RequestBodyTooLarge" - resource_type_mismatch = "ResourceTypeMismatch" - request_url_failed_to_parse = "RequestUrlFailedToParse" - resource_already_exists = "ResourceAlreadyExists" - resource_not_found = "ResourceNotFound" - server_busy = "ServerBusy" - unsupported_header = "UnsupportedHeader" - unsupported_xml_node = "UnsupportedXmlNode" - unsupported_query_parameter = "UnsupportedQueryParameter" - unsupported_http_verb = "UnsupportedHttpVerb" - - # Blob values - append_position_condition_not_met = "AppendPositionConditionNotMet" - blob_already_exists = "BlobAlreadyExists" - blob_not_found = "BlobNotFound" - blob_overwritten = "BlobOverwritten" - blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" - block_count_exceeds_limit = "BlockCountExceedsLimit" - block_list_too_long = "BlockListTooLong" - cannot_change_to_lower_tier = "CannotChangeToLowerTier" - cannot_verify_copy_source = "CannotVerifyCopySource" - container_already_exists = "ContainerAlreadyExists" - container_being_deleted = "ContainerBeingDeleted" - container_disabled = "ContainerDisabled" - container_not_found = "ContainerNotFound" - content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" - copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" - copy_id_mismatch = "CopyIdMismatch" - feature_version_mismatch = "FeatureVersionMismatch" - incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" - incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" - incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" - infinite_lease_duration_required = "InfiniteLeaseDurationRequired" - invalid_blob_or_block = "InvalidBlobOrBlock" - invalid_blob_tier = "InvalidBlobTier" - invalid_blob_type = "InvalidBlobType" - invalid_block_id = "InvalidBlockId" - invalid_block_list = "InvalidBlockList" - invalid_operation = "InvalidOperation" - invalid_page_range = "InvalidPageRange" - invalid_source_blob_type = "InvalidSourceBlobType" - invalid_source_blob_url = "InvalidSourceBlobUrl" - invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" - lease_already_present = "LeaseAlreadyPresent" - lease_already_broken = "LeaseAlreadyBroken" - lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" - lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" - lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" - lease_id_missing = "LeaseIdMissing" - lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" - lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" - lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" - lease_lost = "LeaseLost" - lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" - lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" - lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" - max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" - no_pending_copy_operation = "NoPendingCopyOperation" - operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" - pending_copy_operation = "PendingCopyOperation" - previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" - previous_snapshot_not_found = "PreviousSnapshotNotFound" - previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" - sequence_number_condition_not_met = "SequenceNumberConditionNotMet" - sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" - snapshot_count_exceeded = "SnapshotCountExceeded" - snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" - snapshots_present = "SnapshotsPresent" - source_condition_not_met = "SourceConditionNotMet" - system_in_use = "SystemInUse" - target_condition_not_met = "TargetConditionNotMet" - unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" - blob_being_rehydrated = "BlobBeingRehydrated" - blob_archived = "BlobArchived" - blob_not_archived = "BlobNotArchived" - - # Queue values - invalid_marker = "InvalidMarker" - message_not_found = "MessageNotFound" - message_too_large = "MessageTooLarge" - pop_receipt_mismatch = "PopReceiptMismatch" - queue_already_exists = "QueueAlreadyExists" - queue_being_deleted = "QueueBeingDeleted" - queue_disabled = "QueueDisabled" - queue_not_empty = "QueueNotEmpty" - queue_not_found = "QueueNotFound" - - # File values - cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" - client_cache_flush_delay = "ClientCacheFlushDelay" - delete_pending = "DeletePending" - directory_not_empty = "DirectoryNotEmpty" - file_lock_conflict = "FileLockConflict" - invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" - parent_not_found = "ParentNotFound" - read_only_attribute = "ReadOnlyAttribute" - share_already_exists = "ShareAlreadyExists" - share_being_deleted = "ShareBeingDeleted" - share_disabled = "ShareDisabled" - share_not_found = "ShareNotFound" - sharing_violation = "SharingViolation" - share_snapshot_in_progress = "ShareSnapshotInProgress" - share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" - share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" - share_has_snapshots = "ShareHasSnapshots" - container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" - - # DataLake values - content_length_must_be_zero = 'ContentLengthMustBeZero' - path_already_exists = 'PathAlreadyExists' - invalid_flush_position = 'InvalidFlushPosition' - invalid_property_name = 'InvalidPropertyName' - invalid_source_uri = 'InvalidSourceUri' - unsupported_rest_version = 'UnsupportedRestVersion' - file_system_not_found = 'FilesystemNotFound' - path_not_found = 'PathNotFound' - rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound' - source_path_not_found = 'SourcePathNotFound' - destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted' - file_system_already_exists = 'FilesystemAlreadyExists' - file_system_being_deleted = 'FilesystemBeingDeleted' - invalid_destination_path = 'InvalidDestinationPath' - invalid_rename_source_path = 'InvalidRenameSourcePath' - invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType' - lease_is_already_broken = 'LeaseIsAlreadyBroken' - lease_name_mismatch = 'LeaseNameMismatch' - path_conflict = 'PathConflict' - source_path_is_being_deleted = 'SourcePathIsBeingDeleted' - - -class DictMixin(object): - - def __setitem__(self, key, item): - self.__dict__[key] = item - - def __getitem__(self, key): - return self.__dict__[key] - - def __repr__(self): - return str(self) - - def __len__(self): - return len(self.keys()) - - def __delitem__(self, key): - self.__dict__[key] = None - - def __eq__(self, other): - """Compare objects by comparing all attributes.""" - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - def __ne__(self, other): - """Compare objects by comparing all attributes.""" - return not self.__eq__(other) - - def __str__(self): - return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) - - def has_key(self, k): - return k in self.__dict__ - - def update(self, *args, **kwargs): - return self.__dict__.update(*args, **kwargs) - - def keys(self): - return [k for k in self.__dict__ if not k.startswith('_')] - - def values(self): - return [v for k, v in self.__dict__.items() if not k.startswith('_')] - - def items(self): - return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] - - def get(self, key, default=None): - if key in self.__dict__: - return self.__dict__[key] - return default - - -class LocationMode(object): - """ - Specifies the location the request should be sent to. This mode only applies - for RA-GRS accounts which allow secondary read access. All other account types - must use PRIMARY. - """ - - PRIMARY = 'primary' #: Requests should be sent to the primary location. - SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. - - -class ResourceTypes(object): - """ - Specifies the resource types that are accessible with the account SAS. - - :param bool service: - Access to service-level APIs (e.g., Get/Set Service Properties, - Get Service Stats, List Containers/Queues/Shares) - :param bool container: - Access to container-level APIs (e.g., Create/Delete Container, - Create/Delete Queue, Create/Delete Share, - List Blobs/Files and Directories) - :param bool object: - Access to object-level APIs for blobs, queue messages, and - files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) - """ - - def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin - self.service = service - self.container = container - self.object = object - self._str = (('s' if self.service else '') + - ('c' if self.container else '') + - ('o' if self.object else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create a ResourceTypes from a string. - - To specify service, container, or object you need only to - include the first letter of the word in the string. E.g. service and container, - you would provide a string "sc". - - :param str string: Specify service, container, or object in - in the string with the first letter of the word. - :return: A ResourceTypes object - :rtype: ~azure.storage.blob.ResourceTypes - """ - res_service = 's' in string - res_container = 'c' in string - res_object = 'o' in string - - parsed = cls(res_service, res_container, res_object) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class AccountSasPermissions(object): - """ - :class:`~ResourceTypes` class to be used with generate_account_sas - function and for the AccessPolicies used with set_*_acl. There are two types of - SAS which may be used to grant resource access. One is to grant access to a - specific resource (resource-specific). Another is to grant access to the - entire service for a specific account and allow certain operations based on - perms found here. - - :param bool read: - Valid for all signed resources types (Service, Container, and Object). - Permits read permissions to the specified resource type. - :param bool write: - Valid for all signed resources types (Service, Container, and Object). - Permits write permissions to the specified resource type. - :param bool delete: - Valid for Container and Object resource types, except for queue messages. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool list: - Valid for Service and Container resource types only. - :param bool add: - Valid for the following Object resource types only: queue messages, and append blobs. - :param bool create: - Valid for the following Object resource types only: blobs and files. - Users can create new blobs or files, but may not overwrite existing - blobs or files. - :param bool update: - Valid for the following Object resource types only: queue messages. - :param bool process: - Valid for the following Object resource type only: queue messages. - :keyword bool tag: - To enable set or get tags on the blobs in the container. - :keyword bool filter_by_tags: - To enable get blobs by tags, this should be used together with list permission. - :keyword bool set_immutability_policy: - To enable operations related to set/delete immutability policy. - To get immutability policy, you just need read permission. - :keyword bool permanent_delete: - To enable permanent delete on the blob is permitted. - Valid for Object resource type of Blob only. - """ - def __init__(self, read=False, write=False, delete=False, - list=False, # pylint: disable=redefined-builtin - add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): - self.read = read - self.write = write - self.delete = delete - self.delete_previous_version = delete_previous_version - self.permanent_delete = kwargs.pop('permanent_delete', False) - self.list = list - self.add = add - self.create = create - self.update = update - self.process = process - self.tag = kwargs.pop('tag', False) - self.filter_by_tags = kwargs.pop('filter_by_tags', False) - self.set_immutability_policy = kwargs.pop('set_immutability_policy', False) - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('y' if self.permanent_delete else '') + - ('l' if self.list else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('u' if self.update else '') + - ('p' if self.process else '') + - ('f' if self.filter_by_tags else '') + - ('t' if self.tag else '') + - ('i' if self.set_immutability_policy else '') - ) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create AccountSasPermissions from a string. - - To specify read, write, delete, etc. permissions you need only to - include the first letter of the word in the string. E.g. for read and write - permissions you would provide a string "rw". - - :param str permission: Specify permissions in - the string with the first letter of the word. - :return: An AccountSasPermissions object - :rtype: ~azure.storage.blob.AccountSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_delete_previous_version = 'x' in permission - p_permanent_delete = 'y' in permission - p_list = 'l' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_update = 'u' in permission - p_process = 'p' in permission - p_tag = 't' in permission - p_filter_by_tags = 'f' in permission - p_set_immutability_policy = 'i' in permission - parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, - list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, - filter_by_tags=p_filter_by_tags, set_immutability_policy=p_set_immutability_policy, - permanent_delete=p_permanent_delete) - - return parsed - - -class Services(object): - """Specifies the services accessible with the account SAS. - - :param bool blob: - Access for the `~azure.storage.blob.BlobServiceClient` - :param bool queue: - Access for the `~azure.storage.queue.QueueServiceClient` - :param bool fileshare: - Access for the `~azure.storage.fileshare.ShareServiceClient` - """ - - def __init__(self, blob=False, queue=False, fileshare=False): - self.blob = blob - self.queue = queue - self.fileshare = fileshare - self._str = (('b' if self.blob else '') + - ('q' if self.queue else '') + - ('f' if self.fileshare else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create Services from a string. - - To specify blob, queue, or file you need only to - include the first letter of the word in the string. E.g. for blob and queue - you would provide a string "bq". - - :param str string: Specify blob, queue, or file in - in the string with the first letter of the word. - :return: A Services object - :rtype: ~azure.storage.blob.Services - """ - res_blob = 'b' in string - res_queue = 'q' in string - res_file = 'f' in string - - parsed = cls(res_blob, res_queue, res_file) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class UserDelegationKey(object): - """ - Represents a user delegation key, provided to the user by Azure Storage - based on their Azure Active Directory access token. - - The fields are saved as simple strings since the user does not have to interact with this object; - to generate an identify SAS, the user can simply pass it to the right API. - - :ivar str signed_oid: - Object ID of this token. - :ivar str signed_tid: - Tenant ID of the tenant that issued this token. - :ivar str signed_start: - The datetime this token becomes valid. - :ivar str signed_expiry: - The datetime this token expires. - :ivar str signed_service: - What service this key is valid for. - :ivar str signed_version: - The version identifier of the REST service that created this token. - :ivar str value: - The user delegation key. - """ - def __init__(self): - self.signed_oid = None - self.signed_tid = None - self.signed_start = None - self.signed_expiry = None - self.signed_service = None - self.signed_version = None - self.value = None diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/parser.py b/azure/multiapi/storagev2/blob/v2021_04_10/_shared/parser.py deleted file mode 100644 index c6feba8..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/parser.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys - -if sys.version_info < (3,): - def _str(value): - if isinstance(value, unicode): # pylint: disable=undefined-variable - return value.encode('utf-8') - - return str(value) -else: - _str = str - - -def _to_utc_datetime(value): - return value.strftime('%Y-%m-%dT%H:%M:%SZ') diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/policies.py b/azure/multiapi/storagev2/blob/v2021_04_10/_shared/policies.py deleted file mode 100644 index 21c689d..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/policies.py +++ /dev/null @@ -1,657 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import re -import random -from time import time -from io import SEEK_SET, UnsupportedOperation -import logging -import uuid -from typing import Any, TYPE_CHECKING -from wsgiref.handlers import format_date_time -try: - from urllib.parse import ( - urlparse, - parse_qsl, - urlunparse, - urlencode, - ) -except ImportError: - from urllib import urlencode # type: ignore - from urlparse import ( # type: ignore - urlparse, - parse_qsl, - urlunparse, - ) - -from azure.core.pipeline.policies import ( - BearerTokenCredentialPolicy, - HeadersPolicy, - HTTPPolicy, - NetworkTraceLoggingPolicy, - RequestHistory, - SansIOHTTPPolicy, -) -from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError - -from .authentication import StorageHttpChallenge -from .constants import DEFAULT_OAUTH_SCOPE, STORAGE_OAUTH_SCOPE -from .models import LocationMode - -try: - _unicode_type = unicode # type: ignore -except NameError: - _unicode_type = str - -if TYPE_CHECKING: - from azure.core.credentials import TokenCredential - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -def encode_base64(data): - if isinstance(data, _unicode_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def is_exhausted(settings): - """Are we out of retries?""" - retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) - retry_counts = list(filter(None, retry_counts)) - if not retry_counts: - return False - return min(retry_counts) < 0 - - -def retry_hook(settings, **kwargs): - if settings['hook']: - settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) - - -def is_retry(response, mode): # pylint: disable=too-many-return-statements - """Is this method/status code retryable? (Based on allowlists and control - variables such as the number of total retries to allow, whether to - respect the Retry-After header, whether this header is present, and - whether the returned status code is on the list of status codes to - be retried upon on the presence of the aforementioned header) - """ - status = response.http_response.status_code - if 300 <= status < 500: - # An exception occured, but in most cases it was expected. Examples could - # include a 309 Conflict or 412 Precondition Failed. - if status == 404 and mode == LocationMode.SECONDARY: - # Response code 404 should be retried if secondary was used. - return True - if status == 408: - # Response code 408 is a timeout and should be retried. - return True - return False - if status >= 500: - # Response codes above 500 with the exception of 501 Not Implemented and - # 505 Version Not Supported indicate a server issue and should be retried. - if status in [501, 505]: - return False - return True - # retry if invalid content md5 - if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): - computed_md5 = response.http_request.headers.get('content-md5', None) or \ - encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) - if response.http_response.headers['content-md5'] != computed_md5: - return True - return False - - -def urljoin(base_url, stub_url): - parsed = urlparse(base_url) - parsed = parsed._replace(path=parsed.path + '/' + stub_url) - return parsed.geturl() - - -class QueueMessagePolicy(SansIOHTTPPolicy): - - def on_request(self, request): - message_id = request.context.options.pop('queue_message_id', None) - if message_id: - request.http_request.url = urljoin( - request.http_request.url, - message_id) - - -class StorageHeadersPolicy(HeadersPolicy): - request_id_header_name = 'x-ms-client-request-id' - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - super(StorageHeadersPolicy, self).on_request(request) - current_time = format_date_time(time()) - request.http_request.headers['x-ms-date'] = current_time - - custom_id = request.context.options.pop('client_request_id', None) - request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) - - # def on_response(self, request, response): - # # raise exception if the echoed client request id from the service is not identical to the one we sent - # if self.request_id_header_name in response.http_response.headers: - - # client_request_id = request.http_request.headers.get(self.request_id_header_name) - - # if response.http_response.headers[self.request_id_header_name] != client_request_id: - # raise AzureError( - # "Echoed client request ID: {} does not match sent client request ID: {}. " - # "Service request ID: {}".format( - # response.http_response.headers[self.request_id_header_name], client_request_id, - # response.http_response.headers['x-ms-request-id']), - # response=response.http_response - # ) - - -class StorageHosts(SansIOHTTPPolicy): - - def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument - self.hosts = hosts - super(StorageHosts, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - request.context.options['hosts'] = self.hosts - parsed_url = urlparse(request.http_request.url) - - # Detect what location mode we're currently requesting with - location_mode = LocationMode.PRIMARY - for key, value in self.hosts.items(): - if parsed_url.netloc == value: - location_mode = key - - # See if a specific location mode has been specified, and if so, redirect - use_location = request.context.options.pop('use_location', None) - if use_location: - # Lock retries to the specific location - request.context.options['retry_to_secondary'] = False - if use_location not in self.hosts: - raise ValueError("Attempting to use undefined host location {}".format(use_location)) - if use_location != location_mode: - # Update request URL to use the specified location - updated = parsed_url._replace(netloc=self.hosts[use_location]) - request.http_request.url = updated.geturl() - location_mode = use_location - - request.context.options['location_mode'] = location_mode - - -class StorageLoggingPolicy(NetworkTraceLoggingPolicy): - """A policy that logs HTTP request and response to the DEBUG logger. - - This accepts both global configuration, and per-request level with "enable_http_logger" - """ - def __init__(self, logging_enable=False, **kwargs): - self.logging_body = kwargs.pop("logging_body", False) - super(StorageLoggingPolicy, self).__init__(logging_enable=logging_enable, **kwargs) - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - http_request = request.http_request - options = request.context.options - self.logging_body = self.logging_body or options.pop("logging_body", False) - if options.pop("logging_enable", self.enable_http_logger): - request.context["logging_enable"] = True - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - log_url = http_request.url - query_params = http_request.query - if 'sig' in query_params: - log_url = log_url.replace(query_params['sig'], "sig=*****") - _LOGGER.debug("Request URL: %r", log_url) - _LOGGER.debug("Request method: %r", http_request.method) - _LOGGER.debug("Request headers:") - for header, value in http_request.headers.items(): - if header.lower() == 'authorization': - value = '*****' - elif header.lower() == 'x-ms-copy-source' and 'sig' in value: - # take the url apart and scrub away the signed signature - scheme, netloc, path, params, query, fragment = urlparse(value) - parsed_qs = dict(parse_qsl(query)) - parsed_qs['sig'] = '*****' - - # the SAS needs to be put back together - value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) - - _LOGGER.debug(" %r: %r", header, value) - _LOGGER.debug("Request body:") - - if self.logging_body: - _LOGGER.debug(str(http_request.body)) - else: - # We don't want to log the binary data of a file upload. - _LOGGER.debug("Hidden body, please use logging_body to show body") - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log request: %r", err) - - def on_response(self, request, response): - # type: (PipelineRequest, PipelineResponse, Any) -> None - if response.context.pop("logging_enable", self.enable_http_logger): - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - _LOGGER.debug("Response status: %r", response.http_response.status_code) - _LOGGER.debug("Response headers:") - for res_header, value in response.http_response.headers.items(): - _LOGGER.debug(" %r: %r", res_header, value) - - # We don't want to log binary data if the response is a file. - _LOGGER.debug("Response content:") - pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) - header = response.http_response.headers.get('content-disposition') - resp_content_type = response.http_response.headers.get("content-type", "") - - if header and pattern.match(header): - filename = header.partition('=')[2] - _LOGGER.debug("File attachments: %s", filename) - elif resp_content_type.endswith("octet-stream"): - _LOGGER.debug("Body contains binary data.") - elif resp_content_type.startswith("image"): - _LOGGER.debug("Body contains image data.") - - if self.logging_body and resp_content_type.startswith("text"): - _LOGGER.debug(response.http_response.text()) - elif self.logging_body: - try: - _LOGGER.debug(response.http_response.body()) - except ValueError: - _LOGGER.debug("Body is streamable") - - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log response: %s", repr(err)) - - -class StorageRequestHook(SansIOHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._request_callback = kwargs.get('raw_request_hook') - super(StorageRequestHook, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, **Any) -> PipelineResponse - request_callback = request.context.options.pop('raw_request_hook', self._request_callback) - if request_callback: - request_callback(request) - - -class StorageResponseHook(HTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(StorageResponseHook, self).__init__() - - def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - # Values could be 0 - data_stream_total = request.context.get('data_stream_total') - if data_stream_total is None: - data_stream_total = request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') - if download_stream_current is None: - download_stream_current = request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') - if upload_stream_current is None: - upload_stream_current = request.context.options.pop('upload_stream_current', None) - - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = self.next.send(request) - - will_retry = is_retry(response, request.context.options.get('mode')) - # Auth error could come from Bearer challenge, in which case this request will be made again - is_auth_error = response.http_response.status_code == 401 - should_update_counts = not (will_retry or is_auth_error) - - if should_update_counts and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif should_update_counts and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - response_callback(response) - request.context['response_callback'] = response_callback - return response - - -class StorageContentValidation(SansIOHTTPPolicy): - """A simple policy that sends the given headers - with the request. - - This will overwrite any headers already defined in the request. - """ - header_name = 'Content-MD5' - - def __init__(self, **kwargs): # pylint: disable=unused-argument - super(StorageContentValidation, self).__init__() - - @staticmethod - def get_content_md5(data): - md5 = hashlib.md5() # nosec - if isinstance(data, bytes): - md5.update(data) - elif hasattr(data, 'read'): - pos = 0 - try: - pos = data.tell() - except: # pylint: disable=bare-except - pass - for chunk in iter(lambda: data.read(4096), b""): - md5.update(chunk) - try: - data.seek(pos, SEEK_SET) - except (AttributeError, IOError): - raise ValueError("Data should be bytes or a seekable file-like object.") - else: - raise ValueError("Data should be bytes or a seekable file-like object.") - - return md5.digest() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - validate_content = request.context.options.pop('validate_content', False) - if validate_content and request.http_request.method != 'GET': - computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) - request.http_request.headers[self.header_name] = computed_md5 - request.context['validate_content_md5'] = computed_md5 - request.context['validate_content'] = validate_content - - def on_response(self, request, response): - if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): - computed_md5 = request.context.get('validate_content_md5') or \ - encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) - if response.http_response.headers['content-md5'] != computed_md5: - raise AzureError( - 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format( - response.http_response.headers['content-md5'], computed_md5), - response=response.http_response - ) - - -class StorageRetryPolicy(HTTPPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - def __init__(self, **kwargs): - self.total_retries = kwargs.pop('retry_total', 10) - self.connect_retries = kwargs.pop('retry_connect', 3) - self.read_retries = kwargs.pop('retry_read', 3) - self.status_retries = kwargs.pop('retry_status', 3) - self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) - super(StorageRetryPolicy, self).__init__() - - def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use - """ - A function which sets the next host location on the request, if applicable. - - :param ~azure.storage.models.RetryContext context: - The retry context containing the previous host location and the request - to evaluate and possibly modify. - """ - if settings['hosts'] and all(settings['hosts'].values()): - url = urlparse(request.url) - # If there's more than one possible location, retry to the alternative - if settings['mode'] == LocationMode.PRIMARY: - settings['mode'] = LocationMode.SECONDARY - else: - settings['mode'] = LocationMode.PRIMARY - updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) - request.url = updated.geturl() - - def configure_retries(self, request): # pylint: disable=no-self-use - body_position = None - if hasattr(request.http_request.body, 'read'): - try: - body_position = request.http_request.body.tell() - except (AttributeError, UnsupportedOperation): - # if body position cannot be obtained, then retries will not work - pass - options = request.context.options - return { - 'total': options.pop("retry_total", self.total_retries), - 'connect': options.pop("retry_connect", self.connect_retries), - 'read': options.pop("retry_read", self.read_retries), - 'status': options.pop("retry_status", self.status_retries), - 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), - 'mode': options.pop("location_mode", LocationMode.PRIMARY), - 'hosts': options.pop("hosts", None), - 'hook': options.pop("retry_hook", None), - 'body_position': body_position, - 'count': 0, - 'history': [] - } - - def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use - """ Formula for computing the current backoff. - Should be calculated by child class. - - :rtype: float - """ - return 0 - - def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - transport.sleep(backoff) - - def increment(self, settings, request, response=None, error=None): - """Increment the retry counters. - - :param response: A pipeline response object. - :param error: An error encountered during the request, or - None if the response was received successfully. - - :return: Whether the retry attempts are exhausted. - """ - settings['total'] -= 1 - - if error and isinstance(error, ServiceRequestError): - # Errors when we're fairly sure that the server did not receive the - # request, so it should be safe to retry. - settings['connect'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - elif error and isinstance(error, ServiceResponseError): - # Errors that occur after the request has been started, so we should - # assume that the server began processing it. - settings['read'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - else: - # Incrementing because of a server error like a 500 in - # status_forcelist and a the given method is in the allowlist - if response: - settings['status'] -= 1 - settings['history'].append(RequestHistory(request, http_response=response)) - - if not is_exhausted(settings): - if request.method not in ['PUT'] and settings['retry_secondary']: - self._set_next_host_location(settings, request) - - # rewind the request body if it is a stream - if request.body and hasattr(request.body, 'read'): - # no position was saved, then retry would not work - if settings['body_position'] is None: - return False - try: - # attempt to rewind the body to the initial position - request.body.seek(settings['body_position'], SEEK_SET) - except (UnsupportedOperation, ValueError): - # if body is not seekable, then retry would not work - return False - settings['count'] += 1 - return True - return False - - def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(StorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(StorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class StorageBearerTokenCredentialPolicy(BearerTokenCredentialPolicy): - """ Custom Bearer token credential policy for following Storage Bearer challenges """ - - def __init__(self, credential, **kwargs): - # type: (TokenCredential, **Any) -> None - super(StorageBearerTokenCredentialPolicy, self).__init__(credential, STORAGE_OAUTH_SCOPE, **kwargs) - - def on_challenge(self, request, response): - # type: (PipelineRequest, PipelineResponse) -> bool - try: - auth_header = response.http_response.headers.get("WWW-Authenticate") - challenge = StorageHttpChallenge(auth_header) - except ValueError: - return False - - scope = challenge.resource_id + DEFAULT_OAUTH_SCOPE - self.authorize_request(request, scope, tenant_id=challenge.tenant_id) - - return True diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/policies_async.py b/azure/multiapi/storagev2/blob/v2021_04_10/_shared/policies_async.py deleted file mode 100644 index b0eae9f..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/policies_async.py +++ /dev/null @@ -1,253 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -import asyncio -import random -import logging -from typing import Any, TYPE_CHECKING - -from azure.core.pipeline.policies import AsyncBearerTokenCredentialPolicy, AsyncHTTPPolicy -from azure.core.exceptions import AzureError - -from .authentication import StorageHttpChallenge -from .constants import DEFAULT_OAUTH_SCOPE, STORAGE_OAUTH_SCOPE -from .policies import is_retry, StorageRetryPolicy - -if TYPE_CHECKING: - from azure.core.credentials_async import AsyncTokenCredential - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -async def retry_hook(settings, **kwargs): - if settings['hook']: - if asyncio.iscoroutine(settings['hook']): - await settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - else: - settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - - -class AsyncStorageResponseHook(AsyncHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(AsyncStorageResponseHook, self).__init__() - - async def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - # Values could be 0 - data_stream_total = request.context.get('data_stream_total') - if data_stream_total is None: - data_stream_total = request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') - if download_stream_current is None: - download_stream_current = request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') - if upload_stream_current is None: - upload_stream_current = request.context.options.pop('upload_stream_current', None) - - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = await self.next.send(request) - await response.http_response.load_body() - - will_retry = is_retry(response, request.context.options.get('mode')) - # Auth error could come from Bearer challenge, in which case this request will be made again - is_auth_error = response.http_response.status_code == 401 - should_update_counts = not (will_retry or is_auth_error) - - if should_update_counts and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif should_update_counts and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - if asyncio.iscoroutine(response_callback): - await response_callback(response) - else: - response_callback(response) - request.context['response_callback'] = response_callback - return response - -class AsyncStorageRetryPolicy(StorageRetryPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - async def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - await transport.sleep(backoff) - - async def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = await self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - await self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - await self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(AsyncStorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(AsyncStorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class AsyncStorageBearerTokenCredentialPolicy(AsyncBearerTokenCredentialPolicy): - """ Custom Bearer token credential policy for following Storage Bearer challenges """ - - def __init__(self, credential, **kwargs): - # type: (AsyncTokenCredential, **Any) -> None - super(AsyncStorageBearerTokenCredentialPolicy, self).__init__(credential, STORAGE_OAUTH_SCOPE, **kwargs) - - async def on_challenge(self, request, response): - # type: (PipelineRequest, PipelineResponse) -> bool - try: - auth_header = response.http_response.headers.get("WWW-Authenticate") - challenge = StorageHttpChallenge(auth_header) - except ValueError: - return False - - scope = challenge.resource_id + DEFAULT_OAUTH_SCOPE - await self.authorize_request(request, scope, tenant_id=challenge.tenant_id) - - return True diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/request_handlers.py b/azure/multiapi/storagev2/blob/v2021_04_10/_shared/request_handlers.py deleted file mode 100644 index ba76043..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/request_handlers.py +++ /dev/null @@ -1,278 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) - -import logging -from os import fstat -import stat -from io import (SEEK_END, SEEK_SET, UnsupportedOperation) - -import isodate - -from azure.core.exceptions import raise_with_traceback - - -_LOGGER = logging.getLogger(__name__) - -_REQUEST_DELIMITER_PREFIX = "batch_" -_HTTP1_1_IDENTIFIER = "HTTP/1.1" -_HTTP_LINE_ENDING = "\r\n" - - -def serialize_iso(attr): - """Serialize Datetime object into ISO-8601 formatted string. - - :param Datetime attr: Object to be serialized. - :rtype: str - :raises: ValueError if format invalid. - """ - if not attr: - return None - if isinstance(attr, str): - attr = isodate.parse_datetime(attr) - try: - utc = attr.utctimetuple() - if utc.tm_year > 9999 or utc.tm_year < 1: - raise OverflowError("Hit max or min date") - - date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( - utc.tm_year, utc.tm_mon, utc.tm_mday, - utc.tm_hour, utc.tm_min, utc.tm_sec) - return date + 'Z' - except (ValueError, OverflowError) as err: - msg = "Unable to serialize datetime object." - raise_with_traceback(ValueError, msg, err) - except AttributeError as err: - msg = "ISO-8601 object must be valid Datetime object." - raise_with_traceback(TypeError, msg, err) - - -def get_length(data): - length = None - # Check if object implements the __len__ method, covers most input cases such as bytearray. - try: - length = len(data) - except: # pylint: disable=bare-except - pass - - if not length: - # Check if the stream is a file-like stream object. - # If so, calculate the size using the file descriptor. - try: - fileno = data.fileno() - except (AttributeError, UnsupportedOperation): - pass - else: - try: - mode = fstat(fileno).st_mode - if stat.S_ISREG(mode) or stat.S_ISLNK(mode): - #st_size only meaningful if regular file or symlink, other types - # e.g. sockets may return misleading sizes like 0 - return fstat(fileno).st_size - except OSError: - # Not a valid fileno, may be possible requests returned - # a socket number? - pass - - # If the stream is seekable and tell() is implemented, calculate the stream size. - try: - current_position = data.tell() - data.seek(0, SEEK_END) - length = data.tell() - current_position - data.seek(current_position, SEEK_SET) - except (AttributeError, OSError, UnsupportedOperation): - pass - - return length - - -def read_length(data): - try: - if hasattr(data, 'read'): - read_data = b'' - for chunk in iter(lambda: data.read(4096), b""): - read_data += chunk - return len(read_data), read_data - if hasattr(data, '__iter__'): - read_data = b'' - for chunk in data: - read_data += chunk - return len(read_data), read_data - except: # pylint: disable=bare-except - pass - raise ValueError("Unable to calculate content length, please specify.") - - -def validate_and_format_range_headers( - start_range, end_range, start_range_required=True, - end_range_required=True, check_content_md5=False, align_to_page=False): - # If end range is provided, start range must be provided - if (start_range_required or end_range is not None) and start_range is None: - raise ValueError("start_range value cannot be None.") - if end_range_required and end_range is None: - raise ValueError("end_range value cannot be None.") - - # Page ranges must be 512 aligned - if align_to_page: - if start_range is not None and start_range % 512 != 0: - raise ValueError("Invalid page blob start_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(start_range)) - if end_range is not None and end_range % 512 != 511: - raise ValueError("Invalid page blob end_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(end_range)) - - # Format based on whether end_range is present - range_header = None - if end_range is not None: - range_header = 'bytes={0}-{1}'.format(start_range, end_range) - elif start_range is not None: - range_header = "bytes={0}-".format(start_range) - - # Content MD5 can only be provided for a complete range less than 4MB in size - range_validation = None - if check_content_md5: - if start_range is None or end_range is None: - raise ValueError("Both start and end range requied for MD5 content validation.") - if end_range - start_range > 4 * 1024 * 1024: - raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") - range_validation = 'true' - - return range_header, range_validation - - -def add_metadata_headers(metadata=None): - # type: (Optional[Dict[str, str]]) -> Dict[str, str] - headers = {} - if metadata: - for key, value in metadata.items(): - headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value - return headers - - -def serialize_batch_body(requests, batch_id): - """ - -- - - -- - (repeated as needed) - ---- - - Serializes the requests in this batch to a single HTTP mixed/multipart body. - - :param list[~azure.core.pipeline.transport.HttpRequest] requests: - a list of sub-request for the batch request - :param str batch_id: - to be embedded in batch sub-request delimiter - :return: The body bytes for this batch. - """ - - if requests is None or len(requests) == 0: - raise ValueError('Please provide sub-request(s) for this batch request') - - delimiter_bytes = (_get_batch_request_delimiter(batch_id, True, False) + _HTTP_LINE_ENDING).encode('utf-8') - newline_bytes = _HTTP_LINE_ENDING.encode('utf-8') - batch_body = list() - - content_index = 0 - for request in requests: - request.headers.update({ - "Content-ID": str(content_index), - "Content-Length": str(0) - }) - batch_body.append(delimiter_bytes) - batch_body.append(_make_body_from_sub_request(request)) - batch_body.append(newline_bytes) - content_index += 1 - - batch_body.append(_get_batch_request_delimiter(batch_id, True, True).encode('utf-8')) - # final line of body MUST have \r\n at the end, or it will not be properly read by the service - batch_body.append(newline_bytes) - - return bytes().join(batch_body) - - -def _get_batch_request_delimiter(batch_id, is_prepend_dashes=False, is_append_dashes=False): - """ - Gets the delimiter used for this batch request's mixed/multipart HTTP format. - - :param str batch_id: - Randomly generated id - :param bool is_prepend_dashes: - Whether to include the starting dashes. Used in the body, but non on defining the delimiter. - :param bool is_append_dashes: - Whether to include the ending dashes. Used in the body on the closing delimiter only. - :return: The delimiter, WITHOUT a trailing newline. - """ - - prepend_dashes = '--' if is_prepend_dashes else '' - append_dashes = '--' if is_append_dashes else '' - - return prepend_dashes + _REQUEST_DELIMITER_PREFIX + batch_id + append_dashes - - -def _make_body_from_sub_request(sub_request): - """ - Content-Type: application/http - Content-ID: - Content-Transfer-Encoding: (if present) - - HTTP/ -
:
(repeated as necessary) - Content-Length: - (newline if content length > 0) - (if content length > 0) - - Serializes an http request. - - :param ~azure.core.pipeline.transport.HttpRequest sub_request: - Request to serialize. - :return: The serialized sub-request in bytes - """ - - # put the sub-request's headers into a list for efficient str concatenation - sub_request_body = list() - - # get headers for ease of manipulation; remove headers as they are used - headers = sub_request.headers - - # append opening headers - sub_request_body.append("Content-Type: application/http") - sub_request_body.append(_HTTP_LINE_ENDING) - - sub_request_body.append("Content-ID: ") - sub_request_body.append(headers.pop("Content-ID", "")) - sub_request_body.append(_HTTP_LINE_ENDING) - - sub_request_body.append("Content-Transfer-Encoding: binary") - sub_request_body.append(_HTTP_LINE_ENDING) - - # append blank line - sub_request_body.append(_HTTP_LINE_ENDING) - - # append HTTP verb and path and query and HTTP version - sub_request_body.append(sub_request.method) - sub_request_body.append(' ') - sub_request_body.append(sub_request.url) - sub_request_body.append(' ') - sub_request_body.append(_HTTP1_1_IDENTIFIER) - sub_request_body.append(_HTTP_LINE_ENDING) - - # append remaining headers (this will set the Content-Length, as it was set on `sub-request`) - for header_name, header_value in headers.items(): - if header_value is not None: - sub_request_body.append(header_name) - sub_request_body.append(": ") - sub_request_body.append(header_value) - sub_request_body.append(_HTTP_LINE_ENDING) - - # append blank line - sub_request_body.append(_HTTP_LINE_ENDING) - - return ''.join(sub_request_body).encode() diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/response_handlers.py b/azure/multiapi/storagev2/blob/v2021_04_10/_shared/response_handlers.py deleted file mode 100644 index 4d90a17..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/response_handlers.py +++ /dev/null @@ -1,195 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging -from xml.etree.ElementTree import Element - -from azure.core.pipeline.policies import ContentDecodePolicy -from azure.core.exceptions import ( - HttpResponseError, - ResourceNotFoundError, - ResourceModifiedError, - ResourceExistsError, - ClientAuthenticationError, - DecodeError) - -from .parser import _to_utc_datetime -from .models import StorageErrorCode, UserDelegationKey, get_enum_value - -if TYPE_CHECKING: - from datetime import datetime - from azure.core.exceptions import AzureError - - -_LOGGER = logging.getLogger(__name__) - - -class PartialBatchErrorException(HttpResponseError): - """There is a partial failure in batch operations. - - :param str message: The message of the exception. - :param response: Server response to be deserialized. - :param list parts: A list of the parts in multipart response. - """ - - def __init__(self, message, response, parts): - self.parts = parts - super(PartialBatchErrorException, self).__init__(message=message, response=response) - - -def parse_length_from_content_range(content_range): - ''' - Parses the blob length from the content range header: bytes 1-3/65537 - ''' - if content_range is None: - return None - - # First, split in space and take the second half: '1-3/65537' - # Next, split on slash and take the second half: '65537' - # Finally, convert to an int: 65537 - return int(content_range.split(' ', 1)[1].split('/', 1)[1]) - - -def normalize_headers(headers): - normalized = {} - for key, value in headers.items(): - if key.startswith('x-ms-'): - key = key[5:] - normalized[key.lower().replace('-', '_')] = get_enum_value(value) - return normalized - - -def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument - raw_metadata = {k: v for k, v in response.http_response.headers.items() if k.startswith("x-ms-meta-")} - return {k[10:]: v for k, v in raw_metadata.items()} - - -def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers) - - -def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers), deserialized - - -def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return response.http_response.location_mode, deserialized - - -def process_storage_error(storage_error): # pylint:disable=too-many-statements - raise_error = HttpResponseError - serialized = False - if not storage_error.response: - raise storage_error - # If it is one of those three then it has been serialized prior by the generated layer. - if isinstance(storage_error, (PartialBatchErrorException, - ClientAuthenticationError, ResourceNotFoundError, ResourceExistsError)): - serialized = True - error_code = storage_error.response.headers.get('x-ms-error-code') - error_message = storage_error.message - additional_data = {} - error_dict = {} - try: - error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) - try: - error_body = error_body or storage_error.response.reason - except AttributeError: - error_body = '' - # If it is an XML response - if isinstance(error_body, Element): - error_dict = { - child.tag.lower(): child.text - for child in error_body - } - # If it is a JSON response - elif isinstance(error_body, dict): - error_dict = error_body.get('error', {}) - elif not error_code: - _LOGGER.warning( - 'Unexpected return type %s from ContentDecodePolicy.deserialize_from_http_generics.', type(error_body)) - error_dict = {'message': str(error_body)} - - # If we extracted from a Json or XML response - if error_dict: - error_code = error_dict.get('code') - error_message = error_dict.get('message') - additional_data = {k: v for k, v in error_dict.items() if k not in {'code', 'message'}} - except DecodeError: - pass - - try: - # This check would be unnecessary if we have already serialized the error - if error_code and not serialized: - error_code = StorageErrorCode(error_code) - if error_code in [StorageErrorCode.condition_not_met, - StorageErrorCode.blob_overwritten]: - raise_error = ResourceModifiedError - if error_code in [StorageErrorCode.invalid_authentication_info, - StorageErrorCode.authentication_failed]: - raise_error = ClientAuthenticationError - if error_code in [StorageErrorCode.resource_not_found, - StorageErrorCode.cannot_verify_copy_source, - StorageErrorCode.blob_not_found, - StorageErrorCode.queue_not_found, - StorageErrorCode.container_not_found, - StorageErrorCode.parent_not_found, - StorageErrorCode.share_not_found]: - raise_error = ResourceNotFoundError - if error_code in [StorageErrorCode.account_already_exists, - StorageErrorCode.account_being_created, - StorageErrorCode.resource_already_exists, - StorageErrorCode.resource_type_mismatch, - StorageErrorCode.blob_already_exists, - StorageErrorCode.queue_already_exists, - StorageErrorCode.container_already_exists, - StorageErrorCode.container_being_deleted, - StorageErrorCode.queue_being_deleted, - StorageErrorCode.share_already_exists, - StorageErrorCode.share_being_deleted]: - raise_error = ResourceExistsError - except ValueError: - # Got an unknown error code - pass - - # Error message should include all the error properties - try: - error_message += "\nErrorCode:{}".format(error_code.value) - except AttributeError: - error_message += "\nErrorCode:{}".format(error_code) - for name, info in additional_data.items(): - error_message += "\n{}:{}".format(name, info) - - # No need to create an instance if it has already been serialized by the generated layer - if serialized: - storage_error.message = error_message - error = storage_error - else: - error = raise_error(message=error_message, response=storage_error.response) - # Ensure these properties are stored in the error instance as well (not just the error message) - error.error_code = error_code - error.additional_info = additional_data - # error.args is what's surfaced on the traceback - show error message in all cases - error.args = (error.message,) - try: - # `from None` prevents us from double printing the exception (suppresses generated layer error context) - exec("raise error from None") # pylint: disable=exec-used # nosec - except SyntaxError: - raise error - - -def parse_to_internal_user_delegation_key(service_user_delegation_key): - internal_user_delegation_key = UserDelegationKey() - internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid - internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid - internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) - internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) - internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service - internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version - internal_user_delegation_key.value = service_user_delegation_key.value - return internal_user_delegation_key diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/shared_access_signature.py b/azure/multiapi/storagev2/blob/v2021_04_10/_shared/shared_access_signature.py deleted file mode 100644 index d2ebfc4..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/shared_access_signature.py +++ /dev/null @@ -1,230 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from datetime import date - -from .parser import _str, _to_utc_datetime -from .constants import X_MS_VERSION -from . import sign_string, url_quote - - -class QueryStringConstants(object): - SIGNED_SIGNATURE = 'sig' - SIGNED_PERMISSION = 'sp' - SIGNED_START = 'st' - SIGNED_EXPIRY = 'se' - SIGNED_RESOURCE = 'sr' - SIGNED_IDENTIFIER = 'si' - SIGNED_IP = 'sip' - SIGNED_PROTOCOL = 'spr' - SIGNED_VERSION = 'sv' - SIGNED_CACHE_CONTROL = 'rscc' - SIGNED_CONTENT_DISPOSITION = 'rscd' - SIGNED_CONTENT_ENCODING = 'rsce' - SIGNED_CONTENT_LANGUAGE = 'rscl' - SIGNED_CONTENT_TYPE = 'rsct' - START_PK = 'spk' - START_RK = 'srk' - END_PK = 'epk' - END_RK = 'erk' - SIGNED_RESOURCE_TYPES = 'srt' - SIGNED_SERVICES = 'ss' - SIGNED_OID = 'skoid' - SIGNED_TID = 'sktid' - SIGNED_KEY_START = 'skt' - SIGNED_KEY_EXPIRY = 'ske' - SIGNED_KEY_SERVICE = 'sks' - SIGNED_KEY_VERSION = 'skv' - - # for blob only - SIGNED_ENCRYPTION_SCOPE = 'ses' - - # for ADLS - SIGNED_AUTHORIZED_OID = 'saoid' - SIGNED_UNAUTHORIZED_OID = 'suoid' - SIGNED_CORRELATION_ID = 'scid' - SIGNED_DIRECTORY_DEPTH = 'sdd' - - @staticmethod - def to_list(): - return [ - QueryStringConstants.SIGNED_SIGNATURE, - QueryStringConstants.SIGNED_PERMISSION, - QueryStringConstants.SIGNED_START, - QueryStringConstants.SIGNED_EXPIRY, - QueryStringConstants.SIGNED_RESOURCE, - QueryStringConstants.SIGNED_IDENTIFIER, - QueryStringConstants.SIGNED_IP, - QueryStringConstants.SIGNED_PROTOCOL, - QueryStringConstants.SIGNED_VERSION, - QueryStringConstants.SIGNED_CACHE_CONTROL, - QueryStringConstants.SIGNED_CONTENT_DISPOSITION, - QueryStringConstants.SIGNED_CONTENT_ENCODING, - QueryStringConstants.SIGNED_CONTENT_LANGUAGE, - QueryStringConstants.SIGNED_CONTENT_TYPE, - QueryStringConstants.START_PK, - QueryStringConstants.START_RK, - QueryStringConstants.END_PK, - QueryStringConstants.END_RK, - QueryStringConstants.SIGNED_RESOURCE_TYPES, - QueryStringConstants.SIGNED_SERVICES, - QueryStringConstants.SIGNED_OID, - QueryStringConstants.SIGNED_TID, - QueryStringConstants.SIGNED_KEY_START, - QueryStringConstants.SIGNED_KEY_EXPIRY, - QueryStringConstants.SIGNED_KEY_SERVICE, - QueryStringConstants.SIGNED_KEY_VERSION, - # for blob only - QueryStringConstants.SIGNED_ENCRYPTION_SCOPE, - # for ADLS - QueryStringConstants.SIGNED_AUTHORIZED_OID, - QueryStringConstants.SIGNED_UNAUTHORIZED_OID, - QueryStringConstants.SIGNED_CORRELATION_ID, - QueryStringConstants.SIGNED_DIRECTORY_DEPTH, - ] - - -class SharedAccessSignature(object): - ''' - Provides a factory for creating account access - signature tokens with an account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - :param str x_ms_version: - The service version used to generate the shared access signatures. - ''' - self.account_name = account_name - self.account_key = account_key - self.x_ms_version = x_ms_version - - def generate_account(self, services, resource_types, permission, expiry, start=None, - ip=None, protocol=None, **kwargs): - ''' - Generates a shared access signature for the account. - Use the returned signature with the sas_token parameter of the service - or to create a new account object. - - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account - SAS. You can combine values to provide access to more than one - resource type. - :param AccountSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. You can combine - values to provide more than one permission. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_account(services, resource_types) - sas.add_encryption_scope(**kwargs) - sas.add_account_signature(self.account_name, self.account_key) - - return sas.get_token() - - -class _SharedAccessHelper(object): - def __init__(self): - self.query_dict = {} - - def _add_query(self, name, val): - if val: - self.query_dict[name] = _str(val) if val is not None else None - - def add_encryption_scope(self, **kwargs): - self._add_query(QueryStringConstants.SIGNED_ENCRYPTION_SCOPE, kwargs.pop('encryption_scope', None)) - - def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): - if isinstance(start, date): - start = _to_utc_datetime(start) - - if isinstance(expiry, date): - expiry = _to_utc_datetime(expiry) - - self._add_query(QueryStringConstants.SIGNED_START, start) - self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) - self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) - self._add_query(QueryStringConstants.SIGNED_IP, ip) - self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) - self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) - - def add_resource(self, resource): - self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) - - def add_id(self, policy_id): - self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) - - def add_account(self, services, resource_types): - self._add_query(QueryStringConstants.SIGNED_SERVICES, services) - self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) - - def add_override_response_headers(self, cache_control, - content_disposition, - content_encoding, - content_language, - content_type): - self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) - self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) - self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) - self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) - self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) - - def add_account_signature(self, account_name, account_key): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - string_to_sign = \ - (account_name + '\n' + - get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + - get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + - get_value_to_append(QueryStringConstants.SIGNED_START) + - get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - get_value_to_append(QueryStringConstants.SIGNED_IP) + - get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(QueryStringConstants.SIGNED_VERSION) + - get_value_to_append(QueryStringConstants.SIGNED_ENCRYPTION_SCOPE)) - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key, string_to_sign)) - - def get_token(self): - return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/uploads.py b/azure/multiapi/storagev2/blob/v2021_04_10/_shared/uploads.py deleted file mode 100644 index 941a90f..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/uploads.py +++ /dev/null @@ -1,603 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from concurrent import futures -from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) -from threading import Lock -from itertools import islice -from math import ceil - -import six - -from azure.core.tracing.common import with_current_context - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." - - -def _parallel_uploads(executor, uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(executor.submit(with_current_context(uploader), next_chunk)) - except StopIteration: - break - - # Wait for the remaining uploads to finish - done, _running = futures.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - validate_content=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - validate_content=validate_content, - **kwargs) - if parallel: - with futures.ThreadPoolExecutor(max_concurrency) as executor: - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - executor.submit(with_current_context(uploader.process_chunk), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - with futures.ThreadPoolExecutor(max_concurrency) as executor: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - executor.submit(with_current_context(uploader.process_substream_block), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] - if any(range_ids): - return sorted(range_ids) - return [] - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b"" - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError("Blob data should be of type bytes.") - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b"" or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - def _update_progress(self, length): - if self.progress_lock is not None: - with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = self._upload_chunk(chunk_offset, chunk_data) - self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield index, SubStream(self.stream, index, length, lock) - - def process_substream_block(self, block_data): - return self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - def _upload_substream_block(self, index, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_substream_block_with_progress(self, index, block_stream): - range_id = self._upload_substream_block(index, block_stream) - self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop("modified_access_conditions", None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - self.service.stage_block( - block_id, - len(chunk_data), - chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return index, block_id - - def _upload_substream_block(self, index, block_stream): - try: - block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) - self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - return not any(bytearray(chunk_data)) - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = self.service.upload_pages( - body=chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - def _upload_substream_block(self, index, block_stream): - pass - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - self.current_length = int(self.response_headers["blob_append_offset"]) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - def _upload_substream_block(self, index, block_stream): - pass - - -class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - self.response_headers = self.service.append_data( - body=chunk_data, - position=chunk_offset, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - def _upload_substream_block(self, index, block_stream): - try: - self.service.append_data( - body=block_stream, - position=index, - content_length=len(block_stream), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response - - # TODO: Implement this method. - def _upload_substream_block(self, index, block_stream): - pass - - -class SubStream(IOBase): - - def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): - # Python 2.7: file-like objects created with open() typically support seek(), but are not - # derivations of io.IOBase and thus do not implement seekable(). - # Python > 3.0: file-like objects created with open() are derived from io.IOBase. - try: - # only the main thread runs this, so there's no need grabbing the lock - wrapped_stream.seek(0, SEEK_CUR) - except: - raise ValueError("Wrapped stream must support seek().") - - self._lock = lockObj - self._wrapped_stream = wrapped_stream - self._position = 0 - self._stream_begin_index = stream_begin_index - self._length = length - self._buffer = BytesIO() - - # we must avoid buffering more than necessary, and also not use up too much memory - # so the max buffer size is capped at 4MB - self._max_buffer_size = ( - length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE - ) - self._current_buffer_start = 0 - self._current_buffer_size = 0 - super(SubStream, self).__init__() - - def __len__(self): - return self._length - - def close(self): - if self._buffer: - self._buffer.close() - self._wrapped_stream = None - IOBase.close(self) - - def fileno(self): - return self._wrapped_stream.fileno() - - def flush(self): - pass - - def read(self, size=None): - if self.closed: # pylint: disable=using-constant-test - raise ValueError("Stream is closed.") - - if size is None: - size = self._length - self._position - - # adjust if out of bounds - if size + self._position >= self._length: - size = self._length - self._position - - # return fast - if size == 0 or self._buffer.closed: - return b"" - - # attempt first read from the read buffer and update position - read_buffer = self._buffer.read(size) - bytes_read = len(read_buffer) - bytes_remaining = size - bytes_read - self._position += bytes_read - - # repopulate the read buffer from the underlying stream to fulfill the request - # ensure the seek and read operations are done atomically (only if a lock is provided) - if bytes_remaining > 0: - with self._buffer: - # either read in the max buffer size specified on the class - # or read in just enough data for the current block/sub stream - current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) - - # lock is only defined if max_concurrency > 1 (parallel uploads) - if self._lock: - with self._lock: - # reposition the underlying stream to match the start of the data to read - absolute_position = self._stream_begin_index + self._position - self._wrapped_stream.seek(absolute_position, SEEK_SET) - # If we can't seek to the right location, our read will be corrupted so fail fast. - if self._wrapped_stream.tell() != absolute_position: - raise IOError("Stream failed to seek to the desired location.") - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - else: - absolute_position = self._stream_begin_index + self._position - # It's possible that there's connection problem during data transfer, - # so when we retry we don't want to read from current position of wrapped stream, - # instead we should seek to where we want to read from. - if self._wrapped_stream.tell() != absolute_position: - self._wrapped_stream.seek(absolute_position, SEEK_SET) - - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - - if buffer_from_stream: - # update the buffer with new data from the wrapped stream - # we need to note down the start position and size of the buffer, in case seek is performed later - self._buffer = BytesIO(buffer_from_stream) - self._current_buffer_start = self._position - self._current_buffer_size = len(buffer_from_stream) - - # read the remaining bytes from the new buffer and update position - second_read_buffer = self._buffer.read(bytes_remaining) - read_buffer += second_read_buffer - self._position += len(second_read_buffer) - - return read_buffer - - def readable(self): - return True - - def readinto(self, b): - raise UnsupportedOperation - - def seek(self, offset, whence=0): - if whence is SEEK_SET: - start_index = 0 - elif whence is SEEK_CUR: - start_index = self._position - elif whence is SEEK_END: - start_index = self._length - offset = -offset - else: - raise ValueError("Invalid argument for the 'whence' parameter.") - - pos = start_index + offset - - if pos > self._length: - pos = self._length - elif pos < 0: - pos = 0 - - # check if buffer is still valid - # if not, drop buffer - if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: - self._buffer.close() - self._buffer = BytesIO() - else: # if yes seek to correct position - delta = pos - self._current_buffer_start - self._buffer.seek(delta, SEEK_SET) - - self._position = pos - return pos - - def seekable(self): - return True - - def tell(self): - return self._position - - def write(self): - raise UnsupportedOperation - - def writelines(self): - raise UnsupportedOperation - - def writeable(self): - return False - - -class IterStreamer(object): - """ - File-like streaming iterator. - """ - - def __init__(self, generator, encoding="UTF-8"): - self.generator = generator - self.iterator = iter(generator) - self.leftover = b"" - self.encoding = encoding - - def __len__(self): - return self.generator.__len__() - - def __iter__(self): - return self.iterator - - def seekable(self): - return False - - def __next__(self): - return next(self.iterator) - - next = __next__ # Python 2 compatibility. - - def tell(self, *args, **kwargs): - raise UnsupportedOperation("Data generator does not support tell.") - - def seek(self, *args, **kwargs): - raise UnsupportedOperation("Data generator is unseekable.") - - def read(self, size): - data = self.leftover - count = len(self.leftover) - try: - while count < size: - chunk = self.__next__() - if isinstance(chunk, six.text_type): - chunk = chunk.encode(self.encoding) - data += chunk - count += len(chunk) - # This means count < size and what's leftover will be returned in this call. - except StopIteration: - self.leftover = b"" - - if count >= size: - self.leftover = data[size:] - - return data[:size] diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/uploads_async.py b/azure/multiapi/storagev2/blob/v2021_04_10/_shared/uploads_async.py deleted file mode 100644 index 5ed192b..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_shared/uploads_async.py +++ /dev/null @@ -1,395 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -import asyncio -from asyncio import Lock -from itertools import islice -import threading - -from math import ceil - -import six - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder -from .uploads import SubStream, IterStreamer # pylint: disable=unused-import - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' - - -async def _parallel_uploads(uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(asyncio.ensure_future(uploader(next_chunk))) - except StopIteration: - break - - # Wait for the remaining uploads to finish - if running: - done, _running = await asyncio.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -async def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - asyncio.ensure_future(uploader.process_chunk(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [] - for chunk in uploader.get_chunk_streams(): - range_ids.append(await uploader.process_chunk(chunk)) - - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -async def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - asyncio.ensure_future(uploader.process_substream_block(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [] - for block in uploader.get_substream_blocks(): - range_ids.append(await uploader.process_substream_block(block)) - if any(range_ids): - return sorted(range_ids) - return - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = threading.Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b'' - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b'' or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - async def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - async def _update_progress(self, length): - if self.progress_lock is not None: - async with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - async def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = await self._upload_chunk(chunk_offset, chunk_data) - await self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield index, SubStream(self.stream, index, length, lock) - - async def process_substream_block(self, block_data): - return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - async def _upload_substream_block(self, index, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_substream_block_with_progress(self, index, block_stream): - range_id = await self._upload_substream_block(index, block_stream) - await self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop('modified_access_conditions', None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - await self.service.stage_block( - block_id, - len(chunk_data), - body=chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - return index, block_id - - async def _upload_substream_block(self, index, block_stream): - try: - block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) - await self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - for each_byte in chunk_data: - if each_byte not in [0, b'\x00']: - return False - return True - - async def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = await self.service.upload_pages( - body=chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - async def _upload_substream_block(self, index, block_stream): - pass - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = await self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - self.current_length = int(self.response_headers['blob_append_offset']) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = await self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - async def _upload_substream_block(self, index, block_stream): - pass - - -class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - self.response_headers = await self.service.append_data( - body=chunk_data, - position=chunk_offset, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - async def _upload_substream_block(self, index, block_stream): - try: - await self.service.append_data( - body=block_stream, - position=index, - content_length=len(block_stream), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = await self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - return range_id, response - - # TODO: Implement this method. - async def _upload_substream_block(self, index, block_stream): - pass diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_shared_access_signature.py b/azure/multiapi/storagev2/blob/v2021_04_10/_shared_access_signature.py deleted file mode 100644 index e3f1b24..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_shared_access_signature.py +++ /dev/null @@ -1,609 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, TYPE_CHECKING -) - -from ._shared import sign_string, url_quote -from ._shared.constants import X_MS_VERSION -from ._shared.models import Services, UserDelegationKey -from ._shared.shared_access_signature import SharedAccessSignature, _SharedAccessHelper, \ - QueryStringConstants - -if TYPE_CHECKING: - from datetime import datetime - from ..blob import ( - ResourceTypes, - AccountSasPermissions, - ContainerSasPermissions, - BlobSasPermissions - ) - - -class BlobQueryStringConstants(object): - SIGNED_TIMESTAMP = 'snapshot' - - -class BlobSharedAccessSignature(SharedAccessSignature): - ''' - Provides a factory for creating blob and container access - signature tokens with a common account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key=None, user_delegation_key=None): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - :param ~azure.storage.blob.models.UserDelegationKey user_delegation_key: - Instead of an account key, the user could pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished by calling get_user_delegation_key on any Blob service object. - ''' - super(BlobSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) - self.user_delegation_key = user_delegation_key - - def generate_blob(self, container_name, blob_name, snapshot=None, version_id=None, permission=None, - expiry=None, start=None, policy_id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None, **kwargs): - ''' - Generates a shared access signature for the blob or one of its snapshots. - Use the returned signature with the sas_token parameter of any BlobService. - - :param str container_name: - Name of container. - :param str blob_name: - Name of blob. - :param str snapshot: - The snapshot parameter is an opaque DateTime value that, - when present, specifies the blob snapshot to grant permission. - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered racwdxytmei. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or BlobSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_blob_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - resource_path = container_name + '/' + blob_name - - sas = _BlobSharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(policy_id) - - resource = 'bs' if snapshot else 'b' - resource = 'bv' if version_id else resource - resource = 'd' if kwargs.pop("is_directory", None) else resource - sas.add_resource(resource) - - sas.add_timestamp(snapshot or version_id) - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_encryption_scope(**kwargs) - sas.add_info_for_hns_account(**kwargs) - sas.add_resource_signature(self.account_name, self.account_key, resource_path, - user_delegation_key=self.user_delegation_key) - - return sas.get_token() - - def generate_container(self, container_name, permission=None, expiry=None, - start=None, policy_id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None, **kwargs): - ''' - Generates a shared access signature for the container. - Use the returned signature with the sas_token parameter of any BlobService. - - :param str container_name: - Name of container. - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered racwdxyltfmei. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ContainerSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_blob_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - sas = _BlobSharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(policy_id) - sas.add_resource('c') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_encryption_scope(**kwargs) - sas.add_info_for_hns_account(**kwargs) - sas.add_resource_signature(self.account_name, self.account_key, container_name, - user_delegation_key=self.user_delegation_key) - return sas.get_token() - - -class _BlobSharedAccessHelper(_SharedAccessHelper): - - def add_timestamp(self, timestamp): - self._add_query(BlobQueryStringConstants.SIGNED_TIMESTAMP, timestamp) - - def add_info_for_hns_account(self, **kwargs): - self._add_query(QueryStringConstants.SIGNED_DIRECTORY_DEPTH, kwargs.pop('sdd', None)) - self._add_query(QueryStringConstants.SIGNED_AUTHORIZED_OID, kwargs.pop('preauthorized_agent_object_id', None)) - self._add_query(QueryStringConstants.SIGNED_UNAUTHORIZED_OID, kwargs.pop('agent_object_id', None)) - self._add_query(QueryStringConstants.SIGNED_CORRELATION_ID, kwargs.pop('correlation_id', None)) - - def get_value_to_append(self, query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - def add_resource_signature(self, account_name, account_key, path, user_delegation_key=None): - # pylint: disable = no-member - if path[0] != '/': - path = '/' + path - - canonicalized_resource = '/blob/' + account_name + path + '\n' - - # Form the string to sign from shared_access_policy and canonicalized - # resource. The order of values is important. - string_to_sign = \ - (self.get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - self.get_value_to_append(QueryStringConstants.SIGNED_START) + - self.get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - canonicalized_resource) - - if user_delegation_key is not None: - self._add_query(QueryStringConstants.SIGNED_OID, user_delegation_key.signed_oid) - self._add_query(QueryStringConstants.SIGNED_TID, user_delegation_key.signed_tid) - self._add_query(QueryStringConstants.SIGNED_KEY_START, user_delegation_key.signed_start) - self._add_query(QueryStringConstants.SIGNED_KEY_EXPIRY, user_delegation_key.signed_expiry) - self._add_query(QueryStringConstants.SIGNED_KEY_SERVICE, user_delegation_key.signed_service) - self._add_query(QueryStringConstants.SIGNED_KEY_VERSION, user_delegation_key.signed_version) - - string_to_sign += \ - (self.get_value_to_append(QueryStringConstants.SIGNED_OID) + - self.get_value_to_append(QueryStringConstants.SIGNED_TID) + - self.get_value_to_append(QueryStringConstants.SIGNED_KEY_START) + - self.get_value_to_append(QueryStringConstants.SIGNED_KEY_EXPIRY) + - self.get_value_to_append(QueryStringConstants.SIGNED_KEY_SERVICE) + - self.get_value_to_append(QueryStringConstants.SIGNED_KEY_VERSION) + - self.get_value_to_append(QueryStringConstants.SIGNED_AUTHORIZED_OID) + - self.get_value_to_append(QueryStringConstants.SIGNED_UNAUTHORIZED_OID) + - self.get_value_to_append(QueryStringConstants.SIGNED_CORRELATION_ID)) - else: - string_to_sign += self.get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER) - - string_to_sign += \ - (self.get_value_to_append(QueryStringConstants.SIGNED_IP) + - self.get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - self.get_value_to_append(QueryStringConstants.SIGNED_VERSION) + - self.get_value_to_append(QueryStringConstants.SIGNED_RESOURCE) + - self.get_value_to_append(BlobQueryStringConstants.SIGNED_TIMESTAMP) + - self.get_value_to_append(QueryStringConstants.SIGNED_ENCRYPTION_SCOPE) + - self.get_value_to_append(QueryStringConstants.SIGNED_CACHE_CONTROL) + - self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_DISPOSITION) + - self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_ENCODING) + - self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_LANGUAGE) + - self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_TYPE)) - - # remove the trailing newline - if string_to_sign[-1] == '\n': - string_to_sign = string_to_sign[:-1] - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key if user_delegation_key is None else user_delegation_key.value, - string_to_sign)) - - def get_token(self): - # a conscious decision was made to exclude the timestamp in the generated token - # this is to avoid having two snapshot ids in the query parameters when the user appends the snapshot timestamp - exclude = [BlobQueryStringConstants.SIGNED_TIMESTAMP] - return '&'.join(['{0}={1}'.format(n, url_quote(v)) - for n, v in self.query_dict.items() if v is not None and n not in exclude]) - - -def generate_account_sas( - account_name, # type: str - account_key, # type: str - resource_types, # type: Union[ResourceTypes, str] - permission, # type: Union[AccountSasPermissions, str] - expiry, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): # type: (...) -> str - """Generates a shared access signature for the blob service. - - Use the returned signature with the credential parameter of any BlobServiceClient, - ContainerClient or BlobClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - :param resource_types: - Specifies the resource types that are accessible with the account SAS. - :type resource_types: str or ~azure.storage.blob.ResourceTypes - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.blob.AccountSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str encryption_scope: - Specifies the encryption scope for a request made so that all write operations will be service encrypted. - :return: A Shared Access Signature (sas) token. - :rtype: str - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication.py - :start-after: [START create_sas_token] - :end-before: [END create_sas_token] - :language: python - :dedent: 8 - :caption: Generating a shared access signature. - """ - sas = SharedAccessSignature(account_name, account_key) - return sas.generate_account( - services=Services(blob=True), - resource_types=resource_types, - permission=permission, - expiry=expiry, - start=start, - ip=ip, - **kwargs - ) # type: ignore - - -def generate_container_sas( - account_name, # type: str - container_name, # type: str - account_key=None, # type: Optional[str] - user_delegation_key=None, # type: Optional[UserDelegationKey] - permission=None, # type: Optional[Union[ContainerSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - policy_id=None, # type: Optional[str] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Any - """Generates a shared access signature for a container. - - Use the returned signature with the credential parameter of any BlobServiceClient, - ContainerClient or BlobClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str container_name: - The name of the container. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - Either `account_key` or `user_delegation_key` must be specified. - :param ~azure.storage.blob.UserDelegationKey user_delegation_key: - Instead of an account shared key, the user could pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered racwdxyltfmei. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.blob.ContainerSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - :func:`~azure.storage.blob.ContainerClient.set_container_access_policy`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :keyword str encryption_scope: - Specifies the encryption scope for a request made so that all write operations will be service encrypted. - :return: A Shared Access Signature (sas) token. - :rtype: str - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers.py - :start-after: [START generate_sas_token] - :end-before: [END generate_sas_token] - :language: python - :dedent: 12 - :caption: Generating a sas token. - """ - if not user_delegation_key and not account_key: - raise ValueError("Either user_delegation_key or account_key must be provided.") - if isinstance(account_key, UserDelegationKey): - user_delegation_key = account_key - if user_delegation_key: - sas = BlobSharedAccessSignature(account_name, user_delegation_key=user_delegation_key) - else: - sas = BlobSharedAccessSignature(account_name, account_key=account_key) - return sas.generate_container( - container_name, - permission=permission, - expiry=expiry, - start=start, - policy_id=policy_id, - ip=ip, - **kwargs - ) - - -def generate_blob_sas( - account_name, # type: str - container_name, # type: str - blob_name, # type: str - snapshot=None, # type: Optional[str] - account_key=None, # type: Optional[str] - user_delegation_key=None, # type: Optional[UserDelegationKey] - permission=None, # type: Optional[Union[BlobSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - policy_id=None, # type: Optional[str] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Any - """Generates a shared access signature for a blob. - - Use the returned signature with the credential parameter of any BlobServiceClient, - ContainerClient or BlobClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str container_name: - The name of the container. - :param str blob_name: - The name of the blob. - :param str snapshot: - An optional blob snapshot ID. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - Either `account_key` or `user_delegation_key` must be specified. - :param ~azure.storage.blob.UserDelegationKey user_delegation_key: - Instead of an account shared key, the user could pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered racwdxytmei. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.blob.BlobSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - :func:`~azure.storage.blob.ContainerClient.set_container_access_policy()`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str version_id: - An optional blob version ID. This parameter is only for versioning enabled account - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :keyword str encryption_scope: - Specifies the encryption scope for a request made so that all write operations will be service encrypted. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - if not user_delegation_key and not account_key: - raise ValueError("Either user_delegation_key or account_key must be provided.") - if isinstance(account_key, UserDelegationKey): - user_delegation_key = account_key - version_id = kwargs.pop('version_id', None) - if version_id and snapshot: - raise ValueError("snapshot and version_id cannot be set at the same time.") - if user_delegation_key: - sas = BlobSharedAccessSignature(account_name, user_delegation_key=user_delegation_key) - else: - sas = BlobSharedAccessSignature(account_name, account_key=account_key) - return sas.generate_blob( - container_name, - blob_name, - snapshot=snapshot, - version_id=version_id, - permission=permission, - expiry=expiry, - start=start, - policy_id=policy_id, - ip=ip, - **kwargs - ) diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_upload_helpers.py b/azure/multiapi/storagev2/blob/v2021_04_10/_upload_helpers.py deleted file mode 100644 index 30d5bfa..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_upload_helpers.py +++ /dev/null @@ -1,306 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from io import SEEK_SET, UnsupportedOperation -from typing import Optional, Union, Any, TypeVar, TYPE_CHECKING # pylint: disable=unused-import - -import six -from azure.core.exceptions import ResourceExistsError, ResourceModifiedError, HttpResponseError - -from ._shared.response_handlers import ( - process_storage_error, - return_response_headers) -from ._shared.models import StorageErrorCode -from ._shared.uploads import ( - upload_data_chunks, - upload_substream_blocks, - BlockBlobChunkUploader, - PageBlobChunkUploader, - AppendBlobChunkUploader) -from ._shared.encryption import generate_blob_encryption_data, encrypt_blob -from ._generated.models import ( - BlockLookupList, - AppendPositionAccessConditions, - ModifiedAccessConditions, -) - -if TYPE_CHECKING: - from datetime import datetime # pylint: disable=unused-import - BlobLeaseClient = TypeVar("BlobLeaseClient") - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' - - -def _convert_mod_error(error): - message = error.message.replace( - "The condition specified using HTTP conditional header(s) is not met.", - "The specified blob already exists.") - message = message.replace("ConditionNotMet", "BlobAlreadyExists") - overwrite_error = ResourceExistsError( - message=message, - response=error.response, - error=error) - overwrite_error.error_code = StorageErrorCode.blob_already_exists - raise overwrite_error - - -def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument - return any([ - modified_access_conditions.if_modified_since, - modified_access_conditions.if_unmodified_since, - modified_access_conditions.if_none_match, - modified_access_conditions.if_match - ]) - - -def upload_block_blob( # pylint: disable=too-many-locals - client=None, - data=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if not overwrite and not _any_conditions(**kwargs): - kwargs['modified_access_conditions'].if_none_match = '*' - adjusted_count = length - if (encryption_options.get('key') is not None) and (adjusted_count is not None): - adjusted_count += (16 - (length % 16)) - blob_headers = kwargs.pop('blob_headers', None) - tier = kwargs.pop('standard_blob_tier', None) - blob_tags_string = kwargs.pop('blob_tags_string', None) - - immutability_policy = kwargs.pop('immutability_policy', None) - immutability_policy_expiry = None if immutability_policy is None else immutability_policy.expiry_time - immutability_policy_mode = None if immutability_policy is None else immutability_policy.policy_mode - legal_hold = kwargs.pop('legal_hold', None) - - # Do single put if the size is smaller than or equal config.max_single_put_size - if adjusted_count is not None and (adjusted_count <= blob_settings.max_single_put_size): - try: - data = data.read(length) - if not isinstance(data, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - except AttributeError: - pass - if encryption_options.get('key'): - encryption_data, data = encrypt_blob(data, encryption_options['key']) - headers['x-ms-meta-encryptiondata'] = encryption_data - return client.upload( - body=data, - content_length=adjusted_count, - blob_http_headers=blob_headers, - headers=headers, - cls=return_response_headers, - validate_content=validate_content, - data_stream_total=adjusted_count, - upload_stream_current=0, - tier=tier.value if tier else None, - blob_tags_string=blob_tags_string, - immutability_policy_expiry=immutability_policy_expiry, - immutability_policy_mode=immutability_policy_mode, - legal_hold=legal_hold, - **kwargs) - - use_original_upload_path = blob_settings.use_byte_buffer or \ - validate_content or encryption_options.get('required') or \ - blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \ - hasattr(stream, 'seekable') and not stream.seekable() or \ - not hasattr(stream, 'seek') or not hasattr(stream, 'tell') - - if use_original_upload_path: - if encryption_options.get('key'): - cek, iv, encryption_data = generate_blob_encryption_data(encryption_options['key']) - headers['x-ms-meta-encryptiondata'] = encryption_data - encryption_options['cek'] = cek - encryption_options['vector'] = iv - block_ids = upload_data_chunks( - service=client, - uploader_class=BlockBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - encryption_options=encryption_options, - headers=headers, - **kwargs - ) - else: - block_ids = upload_substream_blocks( - service=client, - uploader_class=BlockBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - headers=headers, - **kwargs - ) - - block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) - block_lookup.latest = block_ids - return client.commit_block_list( - block_lookup, - blob_http_headers=blob_headers, - cls=return_response_headers, - validate_content=validate_content, - headers=headers, - tier=tier.value if tier else None, - blob_tags_string=blob_tags_string, - immutability_policy_expiry=immutability_policy_expiry, - immutability_policy_mode=immutability_policy_mode, - legal_hold=legal_hold, - **kwargs) - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceModifiedError as mod_error: - if not overwrite: - _convert_mod_error(mod_error) - raise - - -def upload_page_blob( - client=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if not overwrite and not _any_conditions(**kwargs): - kwargs['modified_access_conditions'].if_none_match = '*' - if length is None or length < 0: - raise ValueError("A content length must be specified for a Page Blob.") - if length % 512 != 0: - raise ValueError("Invalid page blob size: {0}. " - "The size must be aligned to a 512-byte boundary.".format(length)) - if kwargs.get('premium_page_blob_tier'): - premium_page_blob_tier = kwargs.pop('premium_page_blob_tier') - try: - headers['x-ms-access-tier'] = premium_page_blob_tier.value - except AttributeError: - headers['x-ms-access-tier'] = premium_page_blob_tier - if encryption_options and encryption_options.get('data'): - headers['x-ms-meta-encryptiondata'] = encryption_options['data'] - blob_tags_string = kwargs.pop('blob_tags_string', None) - - response = client.create( - content_length=0, - blob_content_length=length, - blob_sequence_number=None, - blob_http_headers=kwargs.pop('blob_headers', None), - blob_tags_string=blob_tags_string, - cls=return_response_headers, - headers=headers, - **kwargs) - if length == 0: - return response - - kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag']) - return upload_data_chunks( - service=client, - uploader_class=PageBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_page_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - encryption_options=encryption_options, - headers=headers, - **kwargs) - - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceModifiedError as mod_error: - if not overwrite: - _convert_mod_error(mod_error) - raise - - -def upload_append_blob( # pylint: disable=unused-argument - client=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if length == 0: - return {} - blob_headers = kwargs.pop('blob_headers', None) - append_conditions = AppendPositionAccessConditions( - max_size=kwargs.pop('maxsize_condition', None), - append_position=None) - blob_tags_string = kwargs.pop('blob_tags_string', None) - - try: - if overwrite: - client.create( - content_length=0, - blob_http_headers=blob_headers, - headers=headers, - blob_tags_string=blob_tags_string, - **kwargs) - return upload_data_chunks( - service=client, - uploader_class=AppendBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - append_position_access_conditions=append_conditions, - headers=headers, - **kwargs) - except HttpResponseError as error: - if error.response.status_code != 404: - raise - # rewind the request body if it is a stream - if hasattr(stream, 'read'): - try: - # attempt to rewind the body to the initial position - stream.seek(0, SEEK_SET) - except UnsupportedOperation: - # if body is not seekable, then retry would not work - raise error - client.create( - content_length=0, - blob_http_headers=blob_headers, - headers=headers, - blob_tags_string=blob_tags_string, - **kwargs) - return upload_data_chunks( - service=client, - uploader_class=AppendBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - append_position_access_conditions=append_conditions, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/_version.py b/azure/multiapi/storagev2/blob/v2021_04_10/_version.py deleted file mode 100644 index b08ef47..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/_version.py +++ /dev/null @@ -1,7 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -VERSION = "12.11.0" diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/aio/__init__.py b/azure/multiapi/storagev2/blob/v2021_04_10/aio/__init__.py deleted file mode 100644 index cfd991e..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/aio/__init__.py +++ /dev/null @@ -1,143 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os - -from ._list_blobs_helper import BlobPrefix -from .._models import BlobType -from .._shared.policies_async import ExponentialRetry, LinearRetry -from ._blob_client_async import BlobClient -from ._container_client_async import ContainerClient -from ._blob_service_client_async import BlobServiceClient -from ._lease_async import BlobLeaseClient -from ._download_async import StorageStreamDownloader - - -async def upload_blob_to_url( - blob_url, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - credential=None, # type: Any - **kwargs): - # type: (...) -> dict[str, Any] - """Upload data to a given URL - - The data will be uploaded as a block blob. - - :param str blob_url: - The full URI to the blob. This can also include a SAS token. - :param data: - The data to upload. This can be bytes, text, an iterable or a file-like object. - :type data: bytes or str or Iterable - :param credential: - The credentials with which to authenticate. This is optional if the - blob URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword bool overwrite: - Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob_to_url will overwrite any existing data. If set to False, the - operation will fail with a ResourceExistsError. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword dict(str,str) metadata: - Name-value pairs associated with the blob as metadata. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword str encoding: - Encoding to use if text is supplied as input. Defaults to UTF-8. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: dict(str, Any) - """ - async with BlobClient.from_blob_url(blob_url, credential=credential) as client: - return await client.upload_blob(data=data, blob_type=BlobType.BlockBlob, **kwargs) - - -async def _download_to_stream(client, handle, **kwargs): - """Download data to specified open file-handle.""" - stream = await client.download_blob(**kwargs) - await stream.readinto(handle) - - -async def download_blob_from_url( - blob_url, # type: str - output, # type: str - credential=None, # type: Any - **kwargs): - # type: (...) -> None - """Download the contents of a blob to a local file or stream. - - :param str blob_url: - The full URI to the blob. This can also include a SAS token. - :param output: - Where the data should be downloaded to. This could be either a file path to write to, - or an open IO handle to write to. - :type output: str or writable stream - :param credential: - The credentials with which to authenticate. This is optional if the - blob URL already has a SAS token or the blob is public. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, - an account shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword bool overwrite: - Whether the local file should be overwritten if it already exists. The default value is - `False` - in which case a ValueError will be raised if the file already exists. If set to - `True`, an attempt will be made to write to the existing file. If a stream handle is passed - in, this value is ignored. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :keyword int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :rtype: None - """ - overwrite = kwargs.pop('overwrite', False) - async with BlobClient.from_blob_url(blob_url, credential=credential) as client: - if hasattr(output, 'write'): - await _download_to_stream(client, output, **kwargs) - else: - if not overwrite and os.path.isfile(output): - raise ValueError("The file '{}' already exists.".format(output)) - with open(output, 'wb') as file_handle: - await _download_to_stream(client, file_handle, **kwargs) - - -__all__ = [ - 'upload_blob_to_url', - 'download_blob_from_url', - 'BlobServiceClient', - 'BlobPrefix', - 'ContainerClient', - 'BlobClient', - 'BlobLeaseClient', - 'ExponentialRetry', - 'LinearRetry', - 'StorageStreamDownloader' -] diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/aio/_blob_client_async.py b/azure/multiapi/storagev2/blob/v2021_04_10/aio/_blob_client_async.py deleted file mode 100644 index 203c31f..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/aio/_blob_client_async.py +++ /dev/null @@ -1,2629 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines, invalid-overridden-method -from functools import partial -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, - TYPE_CHECKING -) - -from azure.core.pipeline import AsyncPipeline - -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.exceptions import ResourceNotFoundError, HttpResponseError, ResourceExistsError - -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.policies_async import ExponentialRetry -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._deserialize import get_page_ranges_result, parse_tags, deserialize_pipeline_response_into_cls -from .._serialize import get_modify_conditions, get_api_version, get_access_conditions -from .._generated.aio import AzureBlobStorage -from .._generated.models import CpkInfo -from .._deserialize import deserialize_blob_properties -from .._blob_client import BlobClient as BlobClientBase -from ._upload_helpers import ( - upload_block_blob, - upload_append_blob, - upload_page_blob) -from .._models import BlobType, BlobBlock, BlobProperties -from ._lease_async import BlobLeaseClient -from ._download_async import StorageStreamDownloader - - -if TYPE_CHECKING: - from datetime import datetime - from .._models import ( # pylint: disable=unused-import - ContentSettings, - ImmutabilityPolicy, - PremiumPageBlobTier, - StandardBlobTier, - SequenceNumberAction - ) - - -class BlobClient(AsyncStorageAccountHostsMixin, BlobClientBase): # pylint: disable=too-many-public-methods - """A client to interact with a specific blob, although that blob may not yet exist. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the blob, - use the :func:`from_blob_url` classmethod. - :param container_name: The container name for the blob. - :type container_name: str - :param blob_name: The name of the blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob_name: str - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is the most recent service version that is - compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication_async.py - :start-after: [START create_blob_client] - :end-before: [END create_blob_client] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a URL to a public blob (no auth needed). - - .. literalinclude:: ../samples/blob_samples_authentication_async.py - :start-after: [START create_blob_client_sas_url] - :end-before: [END create_blob_client_sas_url] - :language: python - :dedent: 8 - :caption: Creating the BlobClient from a SAS URL to a blob. - """ - def __init__( - self, account_url, # type: str - container_name, # type: str - blob_name, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(BlobClient, self).__init__( - account_url, - container_name=container_name, - blob_name=blob_name, - snapshot=snapshot, - credential=credential, - **kwargs) - self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - - @distributed_trace_async - async def get_account_information(self, **kwargs): # type: ignore - # type: (Optional[int]) -> Dict[str, str] - """Gets information related to the storage account in which the blob resides. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - """ - try: - return await self._client.blob.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_blob_from_url(self, source_url, **kwargs): - # type: (str, Any) -> Dict[str, Any] - """ - Creates a new Block Blob where the content of the blob is read from a given URL. - The content of an existing blob is overwritten with the new blob. - - :param str source_url: - A URL of up to 2 KB in length that specifies a file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.blob.core.windows.net/mycontainer/myblob - - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - - https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. - :keyword bool include_source_blob_properties: - Indicates if properties from the source blob should be copied. Defaults to True. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - :paramtype tags: dict(str, str) - :keyword bytearray source_content_md5: - Specify the md5 that is used to verify the integrity of the source bytes. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword destination_lease: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword str source_authorization: - Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is - the prefix of the source_authorization string. - """ - options = self._upload_blob_from_url_options( - source_url=self._encode_source_url(source_url), - **kwargs) - try: - return await self._client.block_blob.put_blob_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_blob( - self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Any - """Creates a new blob from a data source with automatic chunking. - - :param data: The blob data to upload. - :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be - either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. The exception to the above is with Append - blob types: if set to False and the data already exists, an error will not be raised - and the data will be appended to the existing blob. If set overwrite=True, then the existing - append blob will be deleted, and a new one created. Defaults to False. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - If specified, upload_blob only succeeds if the - blob's lease is active and matches this ID. - Required if the blob has an active lease. - :paramtype: ~azure.storage.blob.aio.BlobLeaseClient - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: - Specifies the immutability policy of a blob, blob snapshot or blob version. - Currently this parameter of upload_blob() API is for BlockBlob only. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword bool legal_hold: - Specified if a legal hold should be set on the blob. - Currently this parameter of upload_blob() API is for BlockBlob only. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int max_concurrency: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world_async.py - :start-after: [START upload_a_blob] - :end-before: [END upload_a_blob] - :language: python - :dedent: 16 - :caption: Upload a blob to the container. - """ - options = self._upload_blob_options( - data, - blob_type=blob_type, - length=length, - metadata=metadata, - **kwargs) - if blob_type == BlobType.BlockBlob: - return await upload_block_blob(**options) - if blob_type == BlobType.PageBlob: - return await upload_page_blob(**options) - return await upload_append_blob(**options) - - @distributed_trace_async - async def download_blob(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader - """Downloads a blob to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the blob into - a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks. - - :param int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to download. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, download_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword str encoding: - Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.blob.aio.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world_async.py - :start-after: [START download_a_blob] - :end-before: [END download_a_blob] - :language: python - :dedent: 16 - :caption: Download a blob. - """ - options = self._download_blob_options( - offset=offset, - length=length, - **kwargs) - downloader = StorageStreamDownloader(**options) - await downloader._setup() # pylint: disable=protected-access - return downloader - - @distributed_trace_async - async def delete_blob(self, delete_snapshots=None, **kwargs): - # type: (str, Any) -> None - """Marks the specified blob for deletion. - - The blob is later deleted during garbage collection. - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the delete_blob() - operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob - and retains the blob for a specified number of days. - After the specified number of days, the blob's data is removed from the service during garbage collection. - Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']` - option. Soft-deleted blob can be restored using :func:`undelete` operation. - - :param str delete_snapshots: - Required if the blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to delete. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword lease: - Required if the blob has an active lease. If specified, delete_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world_async.py - :start-after: [START delete_blob] - :end-before: [END delete_blob] - :language: python - :dedent: 16 - :caption: Delete a blob. - """ - options = self._delete_blob_options(delete_snapshots=delete_snapshots, **kwargs) - try: - await self._client.blob.delete(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def undelete_blob(self, **kwargs): - # type: (Any) -> None - """Restores soft-deleted blobs or snapshots. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START undelete_blob] - :end-before: [END undelete_blob] - :language: python - :dedent: 12 - :caption: Undeleting a blob. - """ - try: - await self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a blob exists with the defined parameters, and returns - False otherwise. - - :kwarg str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to check if it exists. - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - try: - await self._client.blob.get_properties( - snapshot=self.snapshot, - **kwargs) - return True - # Encrypted with CPK - except ResourceExistsError: - return True - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceNotFoundError: - return False - - @distributed_trace_async - async def get_blob_properties(self, **kwargs): - # type: (Any) -> BlobProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the blob. It does not return the content of the blob. - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to get properties. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: BlobProperties - :rtype: ~azure.storage.blob.BlobProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START get_blob_properties] - :end-before: [END get_blob_properties] - :language: python - :dedent: 12 - :caption: Getting the properties for a blob. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - cpk = kwargs.pop('cpk', None) - cpk_info = None - if cpk: - if self.scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - try: - cls_method = kwargs.pop('cls', None) - if cls_method: - kwargs['cls'] = partial(deserialize_pipeline_response_into_cls, cls_method) - blob_props = await self._client.blob.get_properties( - timeout=kwargs.pop('timeout', None), - version_id=kwargs.pop('version_id', None), - snapshot=self.snapshot, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=kwargs.pop('cls', None) or deserialize_blob_properties, - cpk_info=cpk_info, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - blob_props.name = self.blob_name - if isinstance(blob_props, BlobProperties): - blob_props.container = self.container_name - blob_props.snapshot = self.snapshot - return blob_props # type: ignore - - @distributed_trace_async - async def set_http_headers(self, content_settings=None, **kwargs): - # type: (Optional[ContentSettings], Any) -> None - """Sets system properties on the blob. - - If one property is set for the content_settings, all properties will be overridden. - - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - options = self._set_http_headers_options(content_settings=content_settings, **kwargs) - try: - return await self._client.blob.set_http_headers(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_blob_metadata(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] - """Sets user-defined metadata for the blob as one or more name-value pairs. - - :param metadata: - Dict containing name and value pairs. Each call to this operation - replaces all existing metadata attached to the blob. To remove all - metadata from the blob, call this operation with no metadata headers. - :type metadata: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - """ - options = self._set_blob_metadata_options(metadata=metadata, **kwargs) - try: - return await self._client.blob.set_metadata(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_immutability_policy(self, immutability_policy, **kwargs): - # type: (ImmutabilityPolicy, **Any) -> Dict[str, str] - """The Set Immutability Policy operation sets the immutability policy on the blob. - - .. versionadded:: 12.10.0 - This operation was introduced in API version '2020-10-02'. - - :param ~azure.storage.blob.ImmutabilityPolicy immutability_policy: - Specifies the immutability policy of a blob, blob snapshot or blob version. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Key value pairs of blob tags. - :rtype: Dict[str, str] - """ - - kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time - kwargs['immutability_policy_mode'] = immutability_policy.policy_mode - return await self._client.blob.set_immutability_policy(cls=return_response_headers, **kwargs) - - @distributed_trace_async() - async def delete_immutability_policy(self, **kwargs): - # type: (**Any) -> None - """The Delete Immutability Policy operation deletes the immutability policy on the blob. - - .. versionadded:: 12.10.0 - This operation was introduced in API version '2020-10-02'. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Key value pairs of blob tags. - :rtype: Dict[str, str] - """ - - await self._client.blob.delete_immutability_policy(**kwargs) - - @distributed_trace_async - async def set_legal_hold(self, legal_hold, **kwargs): - # type: (bool, **Any) -> Dict[str, Union[str, datetime, bool]] - """The Set Legal Hold operation sets a legal hold on the blob. - - .. versionadded:: 12.10.0 - This operation was introduced in API version '2020-10-02'. - - :param bool legal_hold: - Specified if a legal hold should be set on the blob. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Key value pairs of blob tags. - :rtype: Dict[str, Union[str, datetime, bool]] - """ - - return await self._client.blob.set_legal_hold(legal_hold, cls=return_response_headers, **kwargs) - - @distributed_trace_async - async def create_page_blob( # type: ignore - self, size, # type: int - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Creates a new Page Blob of the specified size. - - :param int size: - This specifies the maximum size for the page blob, up to 1 TB. - The page blob size must be aligned to a 512-byte boundary. - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword int sequence_number: - Only for Page blobs. The sequence number is a user-controlled value that you can use to - track requests. The value of the sequence number must be between 0 - and 2^63 - 1.The default value is 0. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: - Specifies the immutability policy of a blob, blob snapshot or blob version. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword bool legal_hold: - Specified if a legal hold should be set on the blob. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict[str, Any] - """ - options = self._create_page_blob_options( - size, - content_settings=content_settings, - metadata=metadata, - premium_page_blob_tier=premium_page_blob_tier, - **kwargs) - try: - return await self._client.page_blob.create(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def create_append_blob(self, content_settings=None, metadata=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] - """Creates a new Append Blob. - - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: - Specifies the immutability policy of a blob, blob snapshot or blob version. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword bool legal_hold: - Specified if a legal hold should be set on the blob. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict[str, Any] - """ - options = self._create_append_blob_options( - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return await self._client.append_blob.create(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def create_snapshot(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] - """Creates a snapshot of the blob. - - A snapshot is a read-only version of a blob that's taken at a point in time. - It can be read, copied, or deleted, but not modified. Snapshots provide a way - to back up a blob as it appears at a moment in time. - - A snapshot of a blob has the same name as the base blob from which the snapshot - is taken, with a DateTime value appended to indicate the time at which the - snapshot was taken. - - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified). - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START create_blob_snapshot] - :end-before: [END create_blob_snapshot] - :language: python - :dedent: 12 - :caption: Create a snapshot of the blob. - """ - options = self._create_snapshot_options(metadata=metadata, **kwargs) - try: - return await self._client.blob.create_snapshot(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def start_copy_from_url(self, source_url, metadata=None, incremental_copy=False, **kwargs): - # type: (str, Optional[Dict[str, str]], bool, Any) -> Any - """Copies a blob asynchronously. - - This operation returns a copy operation - object that can be used to wait on the completion of the operation, - as well as check status or abort the copy operation. - The Blob service copies blobs on a best-effort basis. - - The source blob for a copy operation may be a block blob, an append blob, - or a page blob. If the destination blob already exists, it must be of the - same blob type as the source blob. Any existing destination blob will be - overwritten. The destination blob cannot be modified while a copy operation - is in progress. - - When copying from a page blob, the Blob service creates a destination page - blob of the source blob's length, initially containing all zeroes. Then - the source page ranges are enumerated, and non-empty ranges are copied. - - For a block blob or an append blob, the Blob service creates a committed - blob of zero length before returning from this operation. When copying - from a block blob, all committed blocks and their block IDs are copied. - Uncommitted blocks are not copied. At the end of the copy operation, the - destination blob will have the same committed block count as the source. - - When copying from an append blob, all committed blocks are copied. At the - end of the copy operation, the destination blob will have the same committed - block count as the source. - - For all blob types, you can call status() on the returned polling object - to check the status of the copy operation, or wait() to block until the - operation is complete. The final blob will be committed when the copy completes. - - :param str source_url: - A URL of up to 2 KB in length that specifies a file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.blob.core.windows.net/mycontainer/myblob - - https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= - - https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken - :param metadata: - Name-value pairs associated with the blob as metadata. If no name-value - pairs are specified, the operation will copy the metadata from the - source blob or file to the destination blob. If one or more name-value - pairs are specified, the destination blob is created with the specified - metadata, and metadata is not copied from the source blob or file. - :type metadata: dict(str, str) - :param bool incremental_copy: - Copies the snapshot of the source page blob to a destination page blob. - The snapshot is copied such that only the differential changes between - the previously copied snapshot are transferred to the destination. - The copied snapshots are complete copies of the original snapshot and - can be read or copied from as usual. Defaults to False. - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: - Specifies the immutability policy of a blob, blob snapshot or blob version. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword bool legal_hold: - Specified if a legal hold should be set on the blob. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source - blob has been modified since the specified date/time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source blob - has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has been modified since the specified date/time. - If the destination blob has not been modified, the Blob service returns - status code 412 (Precondition Failed). - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only - if the destination blob has not been modified since the specified - date/time. If the destination blob has been modified, the Blob service - returns status code 412 (Precondition Failed). - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword destination_lease: - The lease ID specified for this header must match the lease ID of the - destination blob. If the request does not include the lease ID or it is not - valid, the operation fails with status code 412 (Precondition Failed). - :paramtype destination_lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword source_lease: - Specify this to perform the Copy Blob operation only if - the lease ID given matches the active lease ID of the source blob. - :paramtype source_lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword bool seal_destination_blob: - Seal the destination append blob. This operation is only for append blob. - - .. versionadded:: 12.4.0 - - :keyword bool requires_sync: - Enforces that the service will not return a response until the copy is complete. - :keyword str source_authorization: - Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is - the prefix of the source_authorization string. This option is only available when `incremental_copy` is - set to False and `requires_sync` is set to True. - - .. versionadded:: 12.9.0 - - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the sync copied blob. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.10.0 - - :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status). - :rtype: dict[str, Union[str, ~datetime.datetime]] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START copy_blob_from_url] - :end-before: [END copy_blob_from_url] - :language: python - :dedent: 16 - :caption: Copy a blob from a URL. - """ - options = self._start_copy_from_url_options( - source_url=self._encode_source_url(source_url), - metadata=metadata, - incremental_copy=incremental_copy, - **kwargs) - try: - if incremental_copy: - return await self._client.page_blob.copy_incremental(**options) - return await self._client.blob.start_copy_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def abort_copy(self, copy_id, **kwargs): - # type: (Union[str, Dict[str, Any], BlobProperties], Any) -> None - """Abort an ongoing copy operation. - - This will leave a destination blob with zero length and full metadata. - This will raise an error if the copy operation has already ended. - - :param copy_id: - The copy operation to abort. This can be either an ID, or an - instance of BlobProperties. - :type copy_id: str or ~azure.storage.blob.BlobProperties - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START abort_copy_blob_from_url] - :end-before: [END abort_copy_blob_from_url] - :language: python - :dedent: 16 - :caption: Abort copying a blob from URL. - """ - options = self._abort_copy_options(copy_id, **kwargs) - try: - await self._client.blob.abort_copy_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): - # type: (int, Optional[str], Any) -> BlobLeaseClient - """Requests a new lease. - - If the blob does not have an active lease, the Blob - Service creates a lease on the blob and returns a new lease. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Blob Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A BlobLeaseClient object. - :rtype: ~azure.storage.blob.aio.BlobLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START acquire_lease_on_blob] - :end-before: [END acquire_lease_on_blob] - :language: python - :dedent: 12 - :caption: Acquiring a lease on a blob. - """ - lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore - await lease.acquire(lease_duration=lease_duration, **kwargs) - return lease - - @distributed_trace_async - async def set_standard_blob_tier(self, standard_blob_tier, **kwargs): - # type: (Union[str, StandardBlobTier], Any) -> None - """This operation sets the tier on a block blob. - - A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - :param standard_blob_tier: - Indicates the tier to be set on the blob. Options include 'Hot', 'Cool', - 'Archive'. The hot tier is optimized for storing data that is accessed - frequently. The cool storage tier is optimized for storing data that - is infrequently accessed and stored for at least a month. The archive - tier is optimized for storing data that is rarely accessed and stored - for at least six months with flexible latency requirements. - :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if standard_blob_tier is None: - raise ValueError("A StandardBlobTier must be specified") - try: - await self._client.blob.set_tier( - tier=standard_blob_tier, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def stage_block( - self, block_id, # type: str - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> None - """Creates a new block to be committed as part of a blob. - - :param str block_id: A string value that identifies the block. - The string should be less than or equal to 64 bytes in size. - For a given blob, the block_id must be the same size for each block. - :param data: The blob data. - :param int length: Size of the block. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword str encoding: - Defaults to UTF-8. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - options = self._stage_block_options( - block_id, - data, - length=length, - **kwargs) - try: - return await self._client.block_blob.stage_block(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def stage_block_from_url( - self, block_id, # type: Union[str, int] - source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - source_content_md5=None, # type: Optional[Union[bytes, bytearray]] - **kwargs - ): - # type: (...) -> None - """Creates a new block to be committed as part of a blob where - the contents are read from a URL. - - :param str block_id: A string value that identifies the block. - The string should be less than or equal to 64 bytes in size. - For a given blob, the block_id must be the same size for each block. - :param str source_url: The URL. - :param int source_offset: - Start of byte range to use for the block. - Must be set if source length is provided. - :param int source_length: The size of the block in bytes. - :param bytearray source_content_md5: - Specify the md5 calculated for the range of - bytes that must be read from the copy source. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str source_authorization: - Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is - the prefix of the source_authorization string. - :rtype: None - """ - options = self._stage_block_from_url_options( - block_id, - source_url=self._encode_source_url(source_url), - source_offset=source_offset, - source_length=source_length, - source_content_md5=source_content_md5, - **kwargs) - try: - return await self._client.block_blob.stage_block_from_url(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_block_list(self, block_list_type="committed", **kwargs): - # type: (Optional[str], Any) -> Tuple[List[BlobBlock], List[BlobBlock]] - """The Get Block List operation retrieves the list of blocks that have - been uploaded as part of a block blob. - - :param str block_list_type: - Specifies whether to return the list of committed - blocks, the list of uncommitted blocks, or both lists together. - Possible values include: 'committed', 'uncommitted', 'all' - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A tuple of two lists - committed and uncommitted blocks - :rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock)) - """ - access_conditions = get_access_conditions(kwargs.pop('kease', None)) - mod_conditions = get_modify_conditions(kwargs) - try: - blocks = await self._client.block_blob.get_block_list( - list_type=block_list_type, - snapshot=self.snapshot, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return self._get_block_list_result(blocks) - - @distributed_trace_async - async def commit_block_list( # type: ignore - self, block_list, # type: List[BlobBlock] - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """The Commit Block List operation writes a blob by specifying the list of - block IDs that make up the blob. - - :param list block_list: - List of Blockblobs. - :param ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict[str, str] - :keyword tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - - .. versionadded:: 12.4.0 - - :paramtype tags: dict(str, str) - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: - Specifies the immutability policy of a blob, blob snapshot or blob version. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword bool legal_hold: - Specified if a legal hold should be set on the blob. - - .. versionadded:: 12.10.0 - This was introduced in API version '2020-10-02'. - - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._commit_block_list_options( - block_list, - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return await self._client.block_blob.commit_block_list(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): - # type: (Union[str, PremiumPageBlobTier], **Any) -> None - """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts. - - :param premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_modify_conditions(kwargs) - if premium_page_blob_tier is None: - raise ValueError("A PremiumPageBlobTiermust be specified") - try: - await self._client.blob.set_tier( - tier=premium_page_blob_tier, - timeout=kwargs.pop('timeout', None), - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_blob_tags(self, tags=None, **kwargs): - # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] - """The Set Tags operation enables users to set tags on a blob or specific blob version, but not snapshot. - Each call to this operation replaces all existing tags attached to the blob. To remove all - tags from the blob, call this operation with no tags set. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :param tags: - Name-value pairs associated with the blob as tag. Tags are case-sensitive. - The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, - and tag values must be between 0 and 256 characters. - Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), - space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) - :type tags: dict(str, str) - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to delete. - :keyword bool validate_content: - If true, calculates an MD5 hash of the tags content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - options = self._set_blob_tags_options(tags=tags, **kwargs) - try: - return await self._client.blob.set_tags(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_blob_tags(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """The Get Tags operation enables users to get tags on a blob or specific blob version, but not snapshot. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to add tags to. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Key value pairs of blob tags. - :rtype: Dict[str, str] - """ - options = self._get_blob_tags_options(**kwargs) - try: - _, tags = await self._client.blob.get_tags(**options) - return parse_tags(tags) # pylint: disable=protected-access - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_page_ranges( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] - **kwargs - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a Page Blob or snapshot - of a page blob. - - :param int offset: - Start of byte range to use for getting valid page ranges. - If no length is given, all bytes after the offset will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for getting valid page ranges. - If length is given, offset must be provided. - This range will return valid page ranges from the offset start up to - the specified length. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param str previous_snapshot_diff: - The snapshot diff parameter that contains an opaque DateTime value that - specifies a previous blob snapshot to be compared - against a more recent snapshot or the current blob. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. - The first element are filled page ranges, the 2nd element is cleared page ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_page_ranges_options( - offset=offset, - length=length, - previous_snapshot_diff=previous_snapshot_diff, - **kwargs) - try: - if previous_snapshot_diff: - ranges = await self._client.page_blob.get_page_ranges_diff(**options) - else: - ranges = await self._client.page_blob.get_page_ranges(**options) - except HttpResponseError as error: - process_storage_error(error) - return get_page_ranges_result(ranges) - - @distributed_trace_async - async def get_page_range_diff_for_managed_disk( - self, previous_snapshot_url, # type: str - offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a managed disk or snapshot. - - .. note:: - This operation is only available for managed disk accounts. - - .. versionadded:: 12.2.0 - This operation was introduced in API version '2019-07-07'. - - :param previous_snapshot_url: - Specifies the URL of a previous snapshot of the managed disk. - The response will only contain pages that were changed between the target blob and - its previous snapshot. - :param int offset: - Start of byte range to use for getting valid page ranges. - If no length is given, all bytes after the offset will be searched. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for getting valid page ranges. - If length is given, offset must be provided. - This range will return valid page ranges from the offset start up to - the specified length. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. - The first element are filled page ranges, the 2nd element is cleared page ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_page_ranges_options( - offset=offset, - length=length, - prev_snapshot_url=previous_snapshot_url, - **kwargs) - try: - ranges = await self._client.page_blob.get_page_ranges_diff(**options) - except HttpResponseError as error: - process_storage_error(error) - return get_page_ranges_result(ranges) - - @distributed_trace_async - async def set_sequence_number( # type: ignore - self, sequence_number_action, # type: Union[str, SequenceNumberAction] - sequence_number=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the blob sequence number. - - :param str sequence_number_action: - This property indicates how the service should modify the blob's sequence - number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information. - :param str sequence_number: - This property sets the blob's sequence number. The sequence number is a - user-controlled property that you can use to track requests and manage - concurrency issues. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._set_sequence_number_options( - sequence_number_action, sequence_number=sequence_number, **kwargs) - try: - return await self._client.page_blob.update_sequence_number(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def resize_blob(self, size, **kwargs): - # type: (int, Any) -> Dict[str, Union[str, datetime]] - """Resizes a page blob to the specified size. - - If the specified value is less than the current size of the blob, - then all pages above the specified value are cleared. - - :param int size: - Size used to resize blob. Maximum size for a page blob is up to 1 TB. - The page blob size must be aligned to a 512-byte boundary. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._resize_blob_options(size, **kwargs) - try: - return await self._client.page_blob.resize(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_page( # type: ignore - self, page, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """The Upload Pages operation writes a range of pages to a page blob. - - :param bytes page: - Content of the page. - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._upload_page_options( - page=page, - offset=offset, - length=length, - **kwargs) - try: - return await self._client.page_blob.upload_pages(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_pages_from_url(self, source_url, # type: str - offset, # type: int - length, # type: int - source_offset, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """ - The Upload Pages operation writes a range of pages to a page blob where - the contents are read from a URL. - - :param str source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - The service will read the same number of bytes as the destination range (length-offset). - :keyword bytes source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str source_authorization: - Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is - the prefix of the source_authorization string. - """ - - options = self._upload_pages_from_url_options( - source_url=self._encode_source_url(source_url), - offset=offset, - length=length, - source_offset=source_offset, - **kwargs - ) - try: - return await self._client.page_blob.upload_pages_from_url(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def clear_page(self, offset, length, **kwargs): - # type: (int, int, Any) -> Dict[str, Union[str, datetime]] - """Clears a range of pages. - - :param int offset: - Start of byte range to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :param int length: - Number of bytes to use for writing to a section of the blob. - Pages must be aligned with 512-byte boundaries, the start offset - must be a modulus of 512 and the length must be a modulus of - 512. - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int if_sequence_number_lte: - If the blob's sequence number is less than or equal to - the specified value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_lt: - If the blob's sequence number is less than the specified - value, the request proceeds; otherwise it fails. - :keyword int if_sequence_number_eq: - If the blob's sequence number is equal to the specified - value, the request proceeds; otherwise it fails. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - options = self._clear_page_options(offset, length, **kwargs) - try: - return await self._client.page_blob.clear_pages(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def append_block( # type: ignore - self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """Commits a new block of data to the end of the existing append blob. - - :param data: - Content of the block. - :param int length: - Size of the block in bytes. - :keyword bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https, as https (the default), - will already validate. Note that this MD5 hash is not stored with the - blob. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword str encoding: - Defaults to UTF-8. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). - :rtype: dict(str, Any) - """ - options = self._append_block_options( - data, - length=length, - **kwargs - ) - try: - return await self._client.append_blob.append_block(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async() - async def append_block_from_url(self, copy_source_url, # type: str - source_offset=None, # type: Optional[int] - source_length=None, # type: Optional[int] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """ - Creates a new block to be committed as part of a blob, where the contents are read from a source url. - - :param str copy_source_url: - The URL of the source data. It can point to any Azure Blob or File, that is either public or has a - shared access signature attached. - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - :param int source_length: - This indicates the end of the range of bytes that has to be taken from the copy source. - :keyword bytearray source_content_md5: - If given, the service will calculate the MD5 hash of the block content and compare against this value. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the - AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The destination match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the source resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the source resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str source_authorization: - Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is - the prefix of the source_authorization string. - """ - options = self._append_block_from_url_options( - copy_source_url=self._encode_source_url(copy_source_url), - source_offset=source_offset, - source_length=source_length, - **kwargs - ) - try: - return await self._client.append_blob.append_block_from_url(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async() - async def seal_append_blob(self, **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """The Seal operation seals the Append Blob to make it read-only. - - .. versionadded:: 12.4.0 - - :keyword int appendpos_condition: - Optional conditional header, used only for the Append Block operation. - A number indicating the byte offset to compare. Append Block will - succeed only if the append position is equal to this number. If it - is not, the request will fail with the AppendPositionConditionNotMet error - (HTTP status code 412 - Precondition Failed). - :keyword lease: - Required if the blob has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). - :rtype: dict(str, Any) - """ - options = self._seal_append_blob_options(**kwargs) - try: - return await self._client.append_blob.seal(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _get_container_client(self): # pylint: disable=client-method-missing-kwargs - # type: (...) -> ContainerClient - """Get a client to interact with the blob's parent container. - - The container need not already exist. Defaults to current blob's credentials. - - :returns: A ContainerClient. - :rtype: ~azure.storage.blob.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START get_container_client_from_blob_client] - :end-before: [END get_container_client_from_blob_client] - :language: python - :dedent: 12 - :caption: Get container client from blob object. - """ - from ._container_client_async import ContainerClient - if not isinstance(self._pipeline._transport, AsyncTransportWrapper): # pylint: disable = protected-access - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - else: - _pipeline = self._pipeline # pylint: disable = protected-access - return ContainerClient( - "{}://{}".format(self.scheme, self.primary_hostname), container_name=self.container_name, - credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, - _location_mode=self._location_mode, _hosts=self._hosts, require_encryption=self.require_encryption, - _pipeline=_pipeline, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/aio/_blob_service_client_async.py b/azure/multiapi/storagev2/blob/v2021_04_10/aio/_blob_service_client_async.py deleted file mode 100644 index 10ec3cb..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/aio/_blob_service_client_async.py +++ /dev/null @@ -1,682 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import functools -import warnings -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, - TYPE_CHECKING -) - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import AsyncPipeline -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.async_paging import AsyncItemPaged - -from .._shared.models import LocationMode -from .._shared.policies_async import ExponentialRetry -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._shared.parser import _to_utc_datetime -from .._shared.response_handlers import parse_to_internal_user_delegation_key -from .._generated.aio import AzureBlobStorage -from .._generated.models import StorageServiceProperties, KeyInfo -from .._blob_service_client import BlobServiceClient as BlobServiceClientBase -from ._container_client_async import ContainerClient -from ._blob_client_async import BlobClient -from .._models import ContainerProperties -from .._deserialize import service_stats_deserialize, service_properties_deserialize -from .._serialize import get_api_version -from ._models import ContainerPropertiesPaged, FilteredBlobPaged - -if TYPE_CHECKING: - from datetime import datetime - from .._shared.models import AccountSasPermissions, ResourceTypes, UserDelegationKey - from ._lease_async import BlobLeaseClient - from .._models import ( - BlobProperties, - PublicAccess, - BlobAnalyticsLogging, - Metrics, - CorsRule, - RetentionPolicy, - StaticWebsite, - ) - - -class BlobServiceClient(AsyncStorageAccountHostsMixin, BlobServiceClientBase): - """A client to interact with the Blob Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete containers within the account. - For operations relating to a specific container or blob, clients for those entities - can also be retrieved using the `get_client` functions. - - :param str account_url: - The URL to the blob storage account. Any other entities included - in the URL path (e.g. container or blob) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is the most recent service version that is - compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_authentication_async.py - :start-after: [START create_blob_service_client] - :end-before: [END create_blob_service_client] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient with account url and credential. - - .. literalinclude:: ../samples/blob_samples_authentication_async.py - :start-after: [START create_blob_service_client_oauth] - :end-before: [END create_blob_service_client_oauth] - :language: python - :dedent: 8 - :caption: Creating the BlobServiceClient with Azure Identity credentials. - """ - - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(BlobServiceClient, self).__init__( - account_url, - credential=credential, - **kwargs) - self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - - @distributed_trace_async - async def get_user_delegation_key(self, key_start_time, # type: datetime - key_expiry_time, # type: datetime - **kwargs # type: Any - ): - # type: (...) -> UserDelegationKey - """ - Obtain a user delegation key for the purpose of signing SAS tokens. - A token credential must be present on the service object for this request to succeed. - - :param ~datetime.datetime key_start_time: - A DateTime value. Indicates when the key becomes valid. - :param ~datetime.datetime key_expiry_time: - A DateTime value. Indicates when the key stops being valid. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The user delegation key. - :rtype: ~azure.storage.blob.UserDelegationKey - """ - key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time)) - timeout = kwargs.pop('timeout', None) - try: - user_delegation_key = await self._client.service.get_user_delegation_key(key_info=key_info, - timeout=timeout, - **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - return parse_to_internal_user_delegation_key(user_delegation_key) # type: ignore - - @distributed_trace_async - async def get_account_information(self, **kwargs): - # type: (Any) -> Dict[str, str] - """Gets information related to the storage account. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START get_blob_service_account_info] - :end-before: [END get_blob_service_account_info] - :language: python - :dedent: 12 - :caption: Getting account information for the blob service. - """ - try: - return await self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_service_stats(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Retrieves statistics related to replication for the Blob service. - - It is only available when read-access geo-redundant replication is enabled for - the storage account. - - With geo-redundant replication, Azure Storage maintains your data durable - in two locations. In both locations, Azure Storage constantly maintains - multiple healthy replicas of your data. The location where you read, - create, update, or delete data is the primary storage account location. - The primary location exists in the region you choose at the time you - create an account via the Azure Management Azure classic portal, for - example, North Central US. The location to which your data is replicated - is the secondary location. The secondary location is automatically - determined based on the location of the primary; it is in a second data - center that resides in the same region as the primary location. Read-only - access is available from the secondary location, if read-access geo-redundant - replication is enabled for your storage account. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The blob service stats. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START get_blob_service_stats] - :end-before: [END get_blob_service_stats] - :language: python - :dedent: 12 - :caption: Getting service stats for the blob service. - """ - timeout = kwargs.pop('timeout', None) - try: - stats = await self._client.service.get_statistics( # type: ignore - timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs) - return service_stats_deserialize(stats) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_service_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An object containing blob service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START get_blob_service_properties] - :end-before: [END get_blob_service_properties] - :language: python - :dedent: 12 - :caption: Getting service properties for the blob service. - """ - timeout = kwargs.pop('timeout', None) - try: - service_props = await self._client.service.get_properties(timeout=timeout, **kwargs) - return service_properties_deserialize(service_props) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_service_properties( - self, analytics_logging=None, # type: Optional[BlobAnalyticsLogging] - hour_metrics=None, # type: Optional[Metrics] - minute_metrics=None, # type: Optional[Metrics] - cors=None, # type: Optional[List[CorsRule]] - target_version=None, # type: Optional[str] - delete_retention_policy=None, # type: Optional[RetentionPolicy] - static_website=None, # type: Optional[StaticWebsite] - **kwargs - ): - # type: (...) -> None - """Sets the properties of a storage account's Blob service, including - Azure Storage Analytics. - - If an element (e.g. analytics_logging) is left as None, the - existing settings on the service for that functionality are preserved. - - :param analytics_logging: - Groups the Azure Analytics Logging settings. - :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging - :param hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for blobs. - :type hour_metrics: ~azure.storage.blob.Metrics - :param minute_metrics: - The minute metrics settings provide request statistics - for each minute for blobs. - :type minute_metrics: ~azure.storage.blob.Metrics - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list[~azure.storage.blob.CorsRule] - :param str target_version: - Indicates the default version to use for requests if an incoming - request's version is not specified. - :param delete_retention_policy: - The delete retention policy specifies whether to retain deleted blobs. - It also specifies the number of days and versions of blob to keep. - :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy - :param static_website: - Specifies whether the static website feature is enabled, - and if yes, indicates the index document and 404 error document to use. - :type static_website: ~azure.storage.blob.StaticWebsite - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START set_blob_service_properties] - :end-before: [END set_blob_service_properties] - :language: python - :dedent: 12 - :caption: Setting service properties for the blob service. - """ - if all(parameter is None for parameter in [ - analytics_logging, hour_metrics, minute_metrics, cors, - target_version, delete_retention_policy, static_website]): - raise ValueError("set_service_properties should be called with at least one parameter") - - props = StorageServiceProperties( - logging=analytics_logging, - hour_metrics=hour_metrics, - minute_metrics=minute_metrics, - cors=cors, - default_service_version=target_version, - delete_retention_policy=delete_retention_policy, - static_website=static_website - ) - timeout = kwargs.pop('timeout', None) - try: - await self._client.service.set_properties(props, timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_containers( - self, name_starts_with=None, # type: Optional[str] - include_metadata=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> AsyncItemPaged[ContainerProperties] - """Returns a generator to list the containers under the specified account. - - The generator will lazily follow the continuation tokens returned by - the service and stop when all containers have been returned. - - :param str name_starts_with: - Filters the results to return only containers whose names - begin with the specified prefix. - :param bool include_metadata: - Specifies that container metadata to be returned in the response. - The default value is `False`. - :keyword bool include_deleted: - Specifies that deleted containers to be returned in the response. This is for container restore enabled - account. The default value is `False`. - .. versionadded:: 12.4.0 - :keyword bool include_system: - Flag specifying that system containers should be included. - .. versionadded:: 12.10.0 - :keyword int results_per_page: - The maximum number of container names to retrieve per API - call. If the request does not specify the server will return up to 5,000 items. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) of ContainerProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.ContainerProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_list_containers] - :end-before: [END bsc_list_containers] - :language: python - :dedent: 16 - :caption: Listing the containers in the blob service. - """ - include = ['metadata'] if include_metadata else [] - include_deleted = kwargs.pop('include_deleted', None) - if include_deleted: - include.append("deleted") - include_system = kwargs.pop('include_system', None) - if include_system: - include.append("system") - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.service.list_containers_segment, - prefix=name_starts_with, - include=include, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - page_iterator_class=ContainerPropertiesPaged - ) - - @distributed_trace - def find_blobs_by_tags(self, filter_expression, **kwargs): - # type: (str, **Any) -> AsyncItemPaged[FilteredBlob] - """The Filter Blobs operation enables callers to list blobs across all - containers whose tags match a given search expression. Filter blobs - searches across all containers within a storage account but can be - scoped within the expression to a single container. - - :param str filter_expression: - The expression to find blobs whose tags matches the specified condition. - eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'" - To specify a container, eg. "@container='containerName' and \"Name\"='C'" - :keyword int results_per_page: - The max result per page when paginating. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.FilteredBlob] - """ - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.service.filter_blobs, - where=filter_expression, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=FilteredBlobPaged) - - @distributed_trace_async - async def create_container( - self, name, # type: str - metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[Union[PublicAccess, str]] - **kwargs - ): - # type: (...) -> ContainerClient - """Creates a new container under the specified account. - - If the container with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created container. - - :param str name: The name of the container to create. - :param metadata: - A dict with name-value pairs to associate with the - container as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - Possible values include: 'container', 'blob'. - :type public_access: str or ~azure.storage.blob.PublicAccess - :keyword container_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - - .. versionadded:: 12.2.0 - - :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.aio.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_create_container] - :end-before: [END bsc_create_container] - :language: python - :dedent: 16 - :caption: Creating a container in the blob service. - """ - container = self.get_container_client(name) - timeout = kwargs.pop('timeout', None) - kwargs.setdefault('merge_span', True) - await container.create_container( - metadata=metadata, public_access=public_access, timeout=timeout, **kwargs) - return container - - @distributed_trace_async - async def delete_container( - self, container, # type: Union[ContainerProperties, str] - lease=None, # type: Optional[Union[BlobLeaseClient, str]] - **kwargs - ): - # type: (...) -> None - """Marks the specified container for deletion. - - The container and any blobs contained within it are later deleted during garbage collection. - If the container is not found, a ResourceNotFoundError will be raised. - - :param container: - The container to delete. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :param lease: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_delete_container] - :end-before: [END bsc_delete_container] - :language: python - :dedent: 16 - :caption: Deleting a container in the blob service. - """ - container = self.get_container_client(container) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - await container.delete_container( # type: ignore - lease=lease, - timeout=timeout, - **kwargs) - - @distributed_trace_async - async def _rename_container(self, name, new_name, **kwargs): - # type: (str, str, **Any) -> ContainerClient - """Renames a container. - - Operation is successful only if the source container exists. - - :param str name: - The name of the container to rename. - :param str new_name: - The new container name the user wants to rename to. - :keyword lease: - Specify this to perform only if the lease ID given - matches the active lease ID of the source container. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.ContainerClient - """ - renamed_container = self.get_container_client(new_name) - lease = kwargs.pop('lease', None) - try: - kwargs['source_lease_id'] = lease.id # type: str - except AttributeError: - kwargs['source_lease_id'] = lease - try: - await renamed_container._client.container.rename(name, **kwargs) # pylint: disable = protected-access - return renamed_container - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def undelete_container(self, deleted_container_name, deleted_container_version, **kwargs): - # type: (str, str, **Any) -> ContainerClient - """Restores soft-deleted container. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2019-12-12'. - - :param str deleted_container_name: - Specifies the name of the deleted container to restore. - :param str deleted_container_version: - Specifies the version of the deleted container to restore. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.aio.ContainerClient - """ - new_name = kwargs.pop('new_name', None) - if new_name: - warnings.warn("`new_name` is no longer supported.", DeprecationWarning) - container = self.get_container_client(new_name or deleted_container_name) - try: - await container._client.container.restore(deleted_container_name=deleted_container_name, # pylint: disable = protected-access - deleted_container_version=deleted_container_version, - timeout=kwargs.pop('timeout', None), **kwargs) - return container - except HttpResponseError as error: - process_storage_error(error) - - def get_container_client(self, container): - # type: (Union[ContainerProperties, str]) -> ContainerClient - """Get a client to interact with the specified container. - - The container need not already exist. - - :param container: - The container. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :returns: A ContainerClient. - :rtype: ~azure.storage.blob.aio.ContainerClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_get_container_client] - :end-before: [END bsc_get_container_client] - :language: python - :dedent: 12 - :caption: Getting the container client to interact with a specific container. - """ - try: - container_name = container.name - except AttributeError: - container_name = container - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ContainerClient( - self.url, container_name=container_name, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_blob_client( - self, container, # type: Union[ContainerProperties, str] - blob, # type: Union[BlobProperties, str] - snapshot=None # type: Optional[Union[Dict[str, Any], str]] - ): - # type: (...) -> BlobClient - """Get a client to interact with the specified blob. - - The blob need not already exist. - - :param container: - The container that the blob is in. This can either be the name of the container, - or an instance of ContainerProperties. - :type container: str or ~azure.storage.blob.ContainerProperties - :param blob: - The blob with which to interact. This can either be the name of the blob, - or an instance of BlobProperties. - :type blob: str or ~azure.storage.blob.BlobProperties - :param snapshot: - The optional blob snapshot on which to operate. This can either be the ID of the snapshot, - or a dictionary output returned by - :func:`~azure.storage.blob.aio.BlobClient.create_snapshot()`. - :type snapshot: str or dict(str, Any) - :returns: A BlobClient. - :rtype: ~azure.storage.blob.aio.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START bsc_get_blob_client] - :end-before: [END bsc_get_blob_client] - :language: python - :dedent: 16 - :caption: Getting the blob client to interact with a specific blob. - """ - try: - container_name = container.name - except AttributeError: - container_name = container - - try: - blob_name = blob.name - except AttributeError: - blob_name = blob - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return BlobClient( # type: ignore - self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/aio/_container_client_async.py b/azure/multiapi/storagev2/blob/v2021_04_10/aio/_container_client_async.py deleted file mode 100644 index 43e5608..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/aio/_container_client_async.py +++ /dev/null @@ -1,1255 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, AnyStr, Dict, List, IO, AsyncIterator, - TYPE_CHECKING -) - -from azure.core.exceptions import HttpResponseError, ResourceNotFoundError -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.async_paging import AsyncItemPaged -from azure.core.pipeline import AsyncPipeline -from azure.core.pipeline.transport import AsyncHttpResponse - -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.policies_async import ExponentialRetry -from .._shared.request_handlers import add_metadata_headers, serialize_iso -from .._shared.response_handlers import ( - process_storage_error, - return_response_headers, - return_headers_and_deserialized) -from .._generated.aio import AzureBlobStorage -from .._generated.models import SignedIdentifier -from .._deserialize import deserialize_container_properties -from .._serialize import get_modify_conditions, get_container_cpk_scope_info, get_api_version, get_access_conditions -from .._container_client import ContainerClient as ContainerClientBase, _get_blob_name -from .._models import ContainerProperties, BlobType, BlobProperties, FilteredBlob # pylint: disable=unused-import -from ._list_blobs_helper import BlobPropertiesPaged, BlobPrefix -from ._lease_async import BlobLeaseClient -from ._blob_client_async import BlobClient -from ._models import FilteredBlobPaged - -if TYPE_CHECKING: - from .._models import PublicAccess - from ._download_async import StorageStreamDownloader - from datetime import datetime - from .._models import ( # pylint: disable=unused-import - AccessPolicy, - StandardBlobTier, - PremiumPageBlobTier) - - -class ContainerClient(AsyncStorageAccountHostsMixin, ContainerClientBase): - """A client to interact with a specific container, although that container - may not yet exist. - - For operations relating to a specific blob within this container, a blob client can be - retrieved using the :func:`~get_blob_client` function. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the container, - use the :func:`from_container_url` classmethod. - :param container_name: - The name of the container for the blob. - :type container_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is the most recent service version that is - compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.2.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. - Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be - uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, - the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. - :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient - algorithm when uploading a block blob. Defaults to 4*1024*1024+1. - :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. - :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, - the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. - :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, - or 4MB. - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START create_container_client_from_service] - :end-before: [END create_container_client_from_service] - :language: python - :dedent: 8 - :caption: Get a ContainerClient from an existing BlobServiceClient. - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START create_container_client_sasurl] - :end-before: [END create_container_client_sasurl] - :language: python - :dedent: 12 - :caption: Creating the container client directly. - """ - def __init__( - self, account_url, # type: str - container_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(ContainerClient, self).__init__( - account_url, - container_name=container_name, - credential=credential, - **kwargs) - self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - - @distributed_trace_async - async def create_container(self, metadata=None, public_access=None, **kwargs): - # type: (Optional[Dict[str, str]], Optional[Union[PublicAccess, str]], **Any) -> None - """ - Creates a new container under the specified account. If the container - with the same name already exists, the operation fails. - - :param metadata: - A dict with name_value pairs to associate with the - container as metadata. Example:{'Category':'test'} - :type metadata: dict[str, str] - :param ~azure.storage.blob.PublicAccess public_access: - Possible values include: 'container', 'blob'. - :keyword container_encryption_scope: - Specifies the default encryption scope to set on the container and use for - all future writes. - - .. versionadded:: 12.2.0 - - :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START create_container] - :end-before: [END create_container] - :language: python - :dedent: 16 - :caption: Creating a container to store blobs. - """ - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - timeout = kwargs.pop('timeout', None) - container_cpk_scope_info = get_container_cpk_scope_info(kwargs) - try: - return await self._client.container.create( # type: ignore - timeout=timeout, - access=public_access, - container_cpk_scope_info=container_cpk_scope_info, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def _rename_container(self, new_name, **kwargs): - # type: (str, **Any) -> ContainerClient - """Renames a container. - - Operation is successful only if the source container exists. - - :param str new_name: - The new container name the user wants to rename to. - :keyword lease: - Specify this to perform only if the lease ID given - matches the active lease ID of the source container. - :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.blob.ContainerClient - """ - lease = kwargs.pop('lease', None) - try: - kwargs['source_lease_id'] = lease.id # type: str - except AttributeError: - kwargs['source_lease_id'] = lease - try: - renamed_container = ContainerClient( - "{}://{}".format(self.scheme, self.primary_hostname), container_name=new_name, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - await renamed_container._client.container.rename(self.container_name, **kwargs) # pylint: disable = protected-access - return renamed_container - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def delete_container( - self, **kwargs): - # type: (Any) -> None - """ - Marks the specified container for deletion. The container and any blobs - contained within it are later deleted during garbage collection. - - :keyword lease: - If specified, delete_container only succeeds if the - container's lease is active and matches this ID. - Required if the container has an active lease. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START delete_container] - :end-before: [END delete_container] - :language: python - :dedent: 16 - :caption: Delete a container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - mod_conditions = get_modify_conditions(kwargs) - timeout = kwargs.pop('timeout', None) - try: - await self._client.container.delete( - timeout=timeout, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def acquire_lease( - self, lease_duration=-1, # type: int - lease_id=None, # type: Optional[str] - **kwargs): - # type: (...) -> BlobLeaseClient - """ - Requests a new lease. If the container does not have an active lease, - the Blob service creates a lease on the container and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A BlobLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.blob.aio.BlobLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START acquire_lease_on_container] - :end-before: [END acquire_lease_on_container] - :language: python - :dedent: 12 - :caption: Acquiring a lease on the container. - """ - lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - await lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs) - return lease - - @distributed_trace_async - async def get_account_information(self, **kwargs): - # type: (**Any) -> Dict[str, str] - """Gets information related to the storage account. - - The information can also be retrieved if the user has a SAS to a container or blob. - The keys in the returned dictionary include 'sku_name' and 'account_kind'. - - :returns: A dict of account information (SKU and account type). - :rtype: dict(str, str) - """ - try: - return await self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_container_properties(self, **kwargs): - # type: (**Any) -> ContainerProperties - """Returns all user-defined metadata and system properties for the specified - container. The data returned does not include the container's list of blobs. - - :keyword lease: - If specified, get_container_properties only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Properties for the specified container within a container object. - :rtype: ~azure.storage.blob.ContainerProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START get_container_properties] - :end-before: [END get_container_properties] - :language: python - :dedent: 16 - :caption: Getting properties on the container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - response = await self._client.container.get_properties( - timeout=timeout, - lease_access_conditions=access_conditions, - cls=deserialize_container_properties, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - response.name = self.container_name - return response # type: ignore - - @distributed_trace_async - async def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a container exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - try: - await self._client.container.get_properties(**kwargs) - return True - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceNotFoundError: - return False - - @distributed_trace_async - async def set_container_metadata( # type: ignore - self, metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - container. Each call to this operation replaces all existing metadata - attached to the container. To remove all metadata from the container, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the container as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword lease: - If specified, set_container_metadata only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Container-updated property dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START set_container_metadata] - :end-before: [END set_container_metadata] - :language: python - :dedent: 16 - :caption: Setting metadata on the container. - """ - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - mod_conditions = get_modify_conditions(kwargs) - timeout = kwargs.pop('timeout', None) - try: - return await self._client.container.set_metadata( # type: ignore - timeout=timeout, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def _get_blob_service_client(self): # pylint: disable=client-method-missing-kwargs - # type: (...) -> BlobServiceClient - """Get a client to interact with the container's parent service account. - - Defaults to current container's credentials. - - :returns: A BlobServiceClient. - :rtype: ~azure.storage.blob.BlobServiceClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_service_async.py - :start-after: [START get_blob_service_client_from_container_client] - :end-before: [END get_blob_service_client_from_container_client] - :language: python - :dedent: 8 - :caption: Get blob service client from container object. - """ - from ._blob_service_client_async import BlobServiceClient - if not isinstance(self._pipeline._transport, AsyncTransportWrapper): # pylint: disable = protected-access - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - else: - _pipeline = self._pipeline # pylint: disable = protected-access - return BlobServiceClient( - "{}://{}".format(self.scheme, self.primary_hostname), - credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, - _location_mode=self._location_mode, _hosts=self._hosts, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function, - _pipeline=_pipeline) - - - @distributed_trace_async - async def get_container_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the specified container. - The permissions indicate whether container data may be accessed publicly. - - :keyword lease: - If specified, get_container_access_policy only succeeds if the - container's lease is active and matches this ID. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START get_container_access_policy] - :end-before: [END get_container_access_policy] - :language: python - :dedent: 16 - :caption: Getting the access policy on the container. - """ - lease = kwargs.pop('lease', None) - access_conditions = get_access_conditions(lease) - timeout = kwargs.pop('timeout', None) - try: - response, identifiers = await self._client.container.get_access_policy( - timeout=timeout, - lease_access_conditions=access_conditions, - cls=return_headers_and_deserialized, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return { - 'public_access': response.get('blob_public_access'), - 'signed_identifiers': identifiers or [] - } - - @distributed_trace_async - async def set_container_access_policy( - self, signed_identifiers, # type: Dict[str, AccessPolicy] - public_access=None, # type: Optional[Union[str, PublicAccess]] - **kwargs # type: Any - ): # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the permissions for the specified container or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether blobs in a container may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the container. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy] - :param ~azure.storage.blob.PublicAccess public_access: - Possible values include: 'container', 'blob'. - :keyword lease: - Required if the container has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified date/time. - :keyword ~datetime.datetime if_unmodified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Container-updated property dict (Etag and last modified). - :rtype: dict[str, str or ~datetime.datetime] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START set_container_access_policy] - :end-before: [END set_container_access_policy] - :language: python - :dedent: 16 - :caption: Setting access policy on the container. - """ - timeout = kwargs.pop('timeout', None) - lease = kwargs.pop('lease', None) - if len(signed_identifiers) > 5: - raise ValueError( - 'Too many access policies provided. The server does not support setting ' - 'more than 5 access policies on a single resource.') - identifiers = [] - for key, value in signed_identifiers.items(): - if value: - value.start = serialize_iso(value.start) - value.expiry = serialize_iso(value.expiry) - identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore - signed_identifiers = identifiers # type: ignore - - mod_conditions = get_modify_conditions(kwargs) - access_conditions = get_access_conditions(lease) - try: - return await self._client.container.set_access_policy( - container_acl=signed_identifiers or None, - timeout=timeout, - access=public_access, - lease_access_conditions=access_conditions, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_blobs(self, name_starts_with=None, include=None, **kwargs): - # type: (Optional[str], Optional[Union[str, List[str]]], **Any) -> AsyncItemPaged[BlobProperties] - """Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service. - - :param str name_starts_with: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param list[str] or str include: - Specifies one or more additional datasets to include in the response. - Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'deletedwithversions', - 'tags', 'versions'. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START list_blobs_in_container] - :end-before: [END list_blobs_in_container] - :language: python - :dedent: 12 - :caption: List the blobs in the container. - """ - if include and not isinstance(include, list): - include = [include] - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.list_blob_flat_segment, - include=include, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - page_iterator_class=BlobPropertiesPaged - ) - - @distributed_trace - def walk_blobs( - self, name_starts_with=None, # type: Optional[str] - include=None, # type: Optional[Any] - delimiter="/", # type: str - **kwargs # type: Optional[Any] - ): - # type: (...) -> AsyncItemPaged[BlobProperties] - """Returns a generator to list the blobs under the specified container. - The generator will lazily follow the continuation tokens returned by - the service. This operation will list blobs in accordance with a hierarchy, - as delimited by the specified delimiter character. - - :param str name_starts_with: - Filters the results to return only blobs whose names - begin with the specified prefix. - :param list[str] include: - Specifies one or more additional datasets to include in the response. - Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'. - :param str delimiter: - When the request includes this parameter, the operation returns a BlobPrefix - element in the response body that acts as a placeholder for all blobs whose - names begin with the same substring up to the appearance of the delimiter - character. The delimiter may be a single character or a string. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of BlobProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties] - """ - if include and not isinstance(include, list): - include = [include] - - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.list_blob_hierarchy_segment, - delimiter=delimiter, - include=include, - timeout=timeout, - **kwargs) - return BlobPrefix( - command, - prefix=name_starts_with, - results_per_page=results_per_page, - delimiter=delimiter) - - @distributed_trace - def find_blobs_by_tags( - self, filter_expression, # type: str - **kwargs # type: Optional[Any] - ): - # type: (...) -> AsyncItemPaged[FilteredBlob] - """Returns a generator to list the blobs under the specified container whose tags - match the given search expression. - The generator will lazily follow the continuation tokens returned by - the service. - - :param str filter_expression: - The expression to find blobs whose tags matches the specified condition. - eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'" - :keyword int results_per_page: - The max result per page when paginating. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of FilteredBlob. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties] - """ - results_per_page = kwargs.pop('results_per_page', None) - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.container.filter_blobs, - timeout=timeout, - where=filter_expression, - **kwargs) - return AsyncItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=FilteredBlobPaged) - - @distributed_trace_async - async def upload_blob( - self, name, # type: Union[str, BlobProperties] - data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] - length=None, # type: Optional[int] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs - ): - # type: (...) -> BlobClient - """Creates a new blob from a data source with automatic chunking. - - :param name: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type name: str or ~azure.storage.blob.BlobProperties - :param data: The blob data to upload. - :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be - either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :param metadata: - Name-value pairs associated with the blob as metadata. - :type metadata: dict(str, str) - :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. - If True, upload_blob will overwrite the existing data. If set to False, the - operation will fail with ResourceExistsError. The exception to the above is with Append - blob types: if set to False and the data already exists, an error will not be raised - and the data will be appended to the existing blob. If set overwrite=True, then the existing - append blob will be deleted, and a new one created. Defaults to False. - :keyword ~azure.storage.blob.ContentSettings content_settings: - ContentSettings object used to set blob properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the container has an active lease. Value can be a BlobLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: - A page blob tier value to set the blob to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: - A standard blob tier value to set the blob to. For this version of the library, - this is only applicable to block blobs on standard storage accounts. - :keyword int maxsize_condition: - Optional conditional header. The max length in bytes permitted for - the append blob. If the Append Block operation would cause the blob - to exceed that limit or if the blob size is already greater than the - value specified in this header, the request will fail with - MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). - :keyword int max_concurrency: - Maximum number of parallel connections to use when the blob size exceeds - 64MB. - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword str encryption_scope: - A predefined encryption scope used to encrypt the data on the service. An encryption - scope can be created using the Management API and referenced here by name. If a default - encryption scope has been defined at the container, this value will override it if the - container-level scope is configured to allow overrides. Otherwise an error will be raised. - - .. versionadded:: 12.2.0 - - :keyword str encoding: - Defaults to UTF-8. - :returns: A BlobClient to interact with the newly uploaded blob. - :rtype: ~azure.storage.blob.aio.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START upload_blob_to_container] - :end-before: [END upload_blob_to_container] - :language: python - :dedent: 12 - :caption: Upload blob to the container. - """ - blob = self.get_blob_client(name) - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - await blob.upload_blob( - data, - blob_type=blob_type, - length=length, - metadata=metadata, - timeout=timeout, - encoding=encoding, - **kwargs - ) - return blob - - @distributed_trace_async - async def delete_blob( - self, blob, # type: Union[str, BlobProperties] - delete_snapshots=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> None - """Marks the specified blob or snapshot for deletion. - - The blob is later deleted during garbage collection. - Note that in order to delete a blob, you must delete all of its - snapshots. You can delete both at the same time with the delete_blob - operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot - and retains the blob or snapshot for specified number of days. - After specified number of days, blob's data is removed from the service during garbage collection. - Soft deleted blob or snapshot is accessible through :func:`list_blobs()` specifying `include=["deleted"]` - option. Soft-deleted blob or snapshot can be restored using :func:`~BlobClient.undelete()` - - :param blob: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob: str or ~azure.storage.blob.BlobProperties - :param str delete_snapshots: - Required if the blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to delete. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword lease: - Required if the blob has an active lease. Value can be a Lease object - or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - blob = self.get_blob_client(blob) # type: ignore - kwargs.setdefault('merge_span', True) - timeout = kwargs.pop('timeout', None) - await blob.delete_blob( # type: ignore - delete_snapshots=delete_snapshots, - timeout=timeout, - **kwargs) - - @distributed_trace_async - async def download_blob(self, blob, offset=None, length=None, **kwargs): - # type: (Union[str, BlobProperties], Optional[int], Optional[int], Any) -> StorageStreamDownloader - """Downloads a blob to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the blob into - a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks. - - :param blob: The blob with which to interact. If specified, this value will override - a blob value specified in the blob URL. - :type blob: str or ~azure.storage.blob.BlobProperties - :param int offset: - Start of byte range to use for downloading a section of the blob. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword str version_id: - The version id parameter is an opaque DateTime - value that, when present, specifies the version of the blob to download. - - .. versionadded:: 12.4.0 - This keyword argument was introduced in API version '2019-12-12'. - - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the blob. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the blob has an active lease. If specified, download_blob only - succeeds if the blob's lease is active and matches this ID. Value can be a - BlobLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - As the encryption key itself is provided in the request, - a secure connection must be established to transfer the key. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword str encoding: - Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object. (StorageStreamDownloader) - :rtype: ~azure.storage.blob.aio.StorageStreamDownloader - """ - blob_client = self.get_blob_client(blob) # type: ignore - kwargs.setdefault('merge_span', True) - return await blob_client.download_blob( - offset=offset, - length=length, - **kwargs) - - @distributed_trace_async - async def delete_blobs( # pylint: disable=arguments-differ - self, *blobs: List[Union[str, BlobProperties, dict]], - **kwargs - ) -> AsyncIterator[AsyncHttpResponse]: - """Marks the specified blobs or snapshots for deletion. - - The blobs are later deleted during garbage collection. - Note that in order to delete blobs, you must delete all of their - snapshots. You can delete both at the same time with the delete_blobs operation. - - If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots - and retains the blobs or snapshots for specified number of days. - After specified number of days, blobs' data is removed from the service during garbage collection. - Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]` - Soft-deleted blobs or snapshots can be restored using :func:`~BlobClient.undelete()` - - The maximum number of blobs that can be deleted in a single request is 256. - - :param blobs: - The blobs to delete. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - snapshot you want to delete: - key: 'snapshot', value type: str - whether to delete snapthots when deleting blob: - key: 'delete_snapshots', value: 'include' or 'only' - if the blob modified or not: - key: 'if_modified_since', 'if_unmodified_since', value type: datetime - etag: - key: 'etag', value type: str - match the etag or not: - key: 'match_condition', value type: MatchConditions - tags match condition: - key: 'if_tags_match_condition', value type: str - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword str delete_snapshots: - Required if a blob has associated snapshots. Values include: - - "only": Deletes only the blobs snapshots. - - "include": Deletes the blob along with all snapshots. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. For optimal performance, - this should be set to False - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: An async iterator of responses, one for each blob in order - :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common_async.py - :start-after: [START delete_multiple_blobs] - :end-before: [END delete_multiple_blobs] - :language: python - :dedent: 12 - :caption: Deleting multiple blobs. - """ - if len(blobs) == 0: - return iter(list()) - - reqs, options = self._generate_delete_blobs_options(*blobs, **kwargs) - - return await self._batch_send(*reqs, **options) - - @distributed_trace - async def set_standard_blob_tier_blobs( - self, - standard_blob_tier: Union[str, 'StandardBlobTier'], - *blobs: List[Union[str, BlobProperties, dict]], - **kwargs - ) -> AsyncIterator[AsyncHttpResponse]: - """This operation sets the tier on block blobs. - - A block blob's tier determines Hot/Cool/Archive storage type. - This operation does not update the blob's ETag. - - The maximum number of blobs that can be updated in a single request is 256. - - :param standard_blob_tier: - Indicates the tier to be set on all blobs. Options include 'Hot', 'Cool', - 'Archive'. The hot tier is optimized for storing data that is accessed - frequently. The cool storage tier is optimized for storing data that - is infrequently accessed and stored for at least a month. The archive - tier is optimized for storing data that is rarely accessed and stored - for at least six months with flexible latency requirements. - - .. note:: - If you want to set different tier on different blobs please set this positional parameter to None. - Then the blob tier on every BlobProperties will be taken. - - :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier - :param blobs: - The blobs with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - blob name: - key: 'name', value type: str - standard blob tier: - key: 'blob_tier', value type: StandardBlobTier - rehydrate priority: - key: 'rehydrate_priority', value type: RehydratePriority - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - tags match condition: - key: 'if_tags_match_condition', value type: str - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: - Indicates the priority with which to rehydrate an archived blob - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. For optimal performance, - this should be set to False. - :return: An async iterator of responses, one for each blob in order - :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] - """ - reqs, options = self._generate_set_tiers_options(standard_blob_tier, *blobs, **kwargs) - - return await self._batch_send(*reqs, **options) - - @distributed_trace - async def set_premium_page_blob_tier_blobs( - self, - premium_page_blob_tier: Union[str, 'PremiumPageBlobTier'], - *blobs: List[Union[str, BlobProperties, dict]], - **kwargs - ) -> AsyncIterator[AsyncHttpResponse]: - """Sets the page blob tiers on the blobs. This API is only supported for page blobs on premium accounts. - - The maximum number of blobs that can be updated in a single request is 256. - - :param premium_page_blob_tier: - A page blob tier value to set on all blobs to. The tier correlates to the size of the - blob and number of allowed IOPS. This is only applicable to page blobs on - premium storage accounts. - - .. note:: - If you want to set different tier on different blobs please set this positional parameter to None. - Then the blob tier on every BlobProperties will be taken. - - :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier - :param blobs: The blobs with which to interact. This can be a single blob, or multiple values can - be supplied, where each value is either the name of the blob (str) or BlobProperties. - - .. note:: - When the blob type is dict, here's a list of keys, value rules. - - blob name: - key: 'name', value type: str - premium blob tier: - key: 'blob_tier', value type: PremiumPageBlobTier - lease: - key: 'lease_id', value type: Union[str, LeaseClient] - timeout for subrequest: - key: 'timeout', value type: int - - :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :keyword bool raise_on_any_failure: - This is a boolean param which defaults to True. When this is set, an exception - is raised even if there is a single operation failure. For optimal performance, - this should be set to False. - :return: An async iterator of responses, one for each blob in order - :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] - """ - reqs, options = self._generate_set_tiers_options(premium_page_blob_tier, *blobs, **kwargs) - - return await self._batch_send(*reqs, **options) - - def get_blob_client( - self, blob, # type: Union[BlobProperties, str] - snapshot=None # type: str - ): - # type: (...) -> BlobClient - """Get a client to interact with the specified blob. - - The blob need not already exist. - - :param blob: - The blob with which to interact. - :type blob: str or ~azure.storage.blob.BlobProperties - :param str snapshot: - The optional blob snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`~BlobClient.create_snapshot()`. - :returns: A BlobClient. - :rtype: ~azure.storage.blob.aio.BlobClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_containers_async.py - :start-after: [START get_blob_client] - :end-before: [END get_blob_client] - :language: python - :dedent: 12 - :caption: Get the blob client. - """ - blob_name = _get_blob_name(blob) - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return BlobClient( - self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot, - credential=self.credential, api_version=self.api_version, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/aio/_download_async.py b/azure/multiapi/storagev2/blob/v2021_04_10/aio/_download_async.py deleted file mode 100644 index 135fd66..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/aio/_download_async.py +++ /dev/null @@ -1,547 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -import asyncio -import sys -from io import BytesIO -from itertools import islice -import warnings -from typing import AsyncIterator - -from aiohttp import ClientPayloadError -from azure.core.exceptions import HttpResponseError, ServiceResponseError -from .._shared.encryption import decrypt_blob -from .._shared.request_handlers import validate_and_format_range_headers -from .._shared.response_handlers import process_storage_error, parse_length_from_content_range -from .._deserialize import get_page_ranges_result -from .._download import process_range_and_offset, _ChunkDownloader - -async def process_content(data, start_offset, end_offset, encryption): - if data is None: - raise ValueError("Response cannot be None.") - content = data.response.body() - if encryption.get('key') is not None or encryption.get('resolver') is not None: - try: - return decrypt_blob( - encryption.get('required'), - encryption.get('key'), - encryption.get('resolver'), - content, - start_offset, - end_offset, - data.response.headers) - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=data.response, - error=error) - return content - - -class _AsyncChunkDownloader(_ChunkDownloader): - def __init__(self, **kwargs): - super(_AsyncChunkDownloader, self).__init__(**kwargs) - self.stream_lock = asyncio.Lock() if kwargs.get('parallel') else None - self.progress_lock = asyncio.Lock() if kwargs.get('parallel') else None - - async def process_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - chunk_data = await self._download_chunk(chunk_start, chunk_end - 1) - length = chunk_end - chunk_start - if length > 0: - await self._write_to_stream(chunk_data, chunk_start) - await self._update_progress(length) - - async def yield_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - return await self._download_chunk(chunk_start, chunk_end - 1) - - async def _update_progress(self, length): - if self.progress_lock: - async with self.progress_lock: # pylint: disable=not-async-context-manager - self.progress_total += length - else: - self.progress_total += length - - async def _write_to_stream(self, chunk_data, chunk_start): - if self.stream_lock: - async with self.stream_lock: # pylint: disable=not-async-context-manager - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - else: - self.stream.write(chunk_data) - - async def _download_chunk(self, chunk_start, chunk_end): - download_range, offset = process_range_and_offset( - chunk_start, chunk_end, chunk_end, self.encryption_options) - - # No need to download the empty chunk from server if there's no data in the chunk to be downloaded. - # Do optimize and create empty chunk locally if condition is met. - if self._do_optimize(download_range[0], download_range[1]): - chunk_data = b"\x00" * self.chunk_size - else: - range_header, range_validation = validate_and_format_range_headers( - download_range[0], - download_range[1], - check_content_md5=self.validate_content - ) - retry_active = True - retry_total = 3 - while retry_active: - try: - _, response = await self.client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self.validate_content, - data_stream_total=self.total_size, - download_stream_current=self.progress_total, - **self.request_options - ) - retry_active = False - - except HttpResponseError as error: - process_storage_error(error) - except ClientPayloadError as error: - retry_total -= 1 - if retry_total <= 0: - raise ServiceResponseError(error, error=error) - await asyncio.sleep(1) - - chunk_data = await process_content(response, offset[0], offset[1], self.encryption_options) - - - # This makes sure that if_match is set so that we can validate - # that subsequent downloads are to an unmodified blob - if self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = response.properties.etag - - return chunk_data - - -class _AsyncChunkIterator(object): - """Async iterator for chunks in blob download stream.""" - - def __init__(self, size, content, downloader, chunk_size): - self.size = size - self._chunk_size = chunk_size - self._current_content = content - self._iter_downloader = downloader - self._iter_chunks = None - self._complete = (size == 0) - - def __len__(self): - return self.size - - def __iter__(self): - raise TypeError("Async stream must be iterated asynchronously.") - - def __aiter__(self): - return self - - async def __anext__(self): - """Iterate through responses.""" - if self._complete: - raise StopAsyncIteration("Download complete") - if not self._iter_downloader: - # cut the data obtained from initial GET into chunks - if len(self._current_content) > self._chunk_size: - return self._get_chunk_data() - self._complete = True - return self._current_content - - if not self._iter_chunks: - self._iter_chunks = self._iter_downloader.get_chunk_offsets() - - # initial GET result still has more than _chunk_size bytes of data - if len(self._current_content) >= self._chunk_size: - return self._get_chunk_data() - - try: - chunk = next(self._iter_chunks) - self._current_content += await self._iter_downloader.yield_chunk(chunk) - except StopIteration: - self._complete = True - # it's likely that there some data left in self._current_content - if self._current_content: - return self._current_content - raise StopAsyncIteration("Download complete") - - return self._get_chunk_data() - - def _get_chunk_data(self): - chunk_data = self._current_content[: self._chunk_size] - self._current_content = self._current_content[self._chunk_size:] - return chunk_data - - -class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the blob being downloaded. - :ivar str container: - The name of the container where the blob is. - :ivar ~azure.storage.blob.BlobProperties properties: - The properties of the blob being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the blob. - """ - - def __init__( - self, - clients=None, - config=None, - start_range=None, - end_range=None, - validate_content=None, - encryption_options=None, - max_concurrency=1, - name=None, - container=None, - encoding=None, - **kwargs - ): - self.name = name - self.container = container - self.properties = None - self.size = None - - self._clients = clients - self._config = config - self._start_range = start_range - self._end_range = end_range - self._max_concurrency = max_concurrency - self._encoding = encoding - self._validate_content = validate_content - self._encryption_options = encryption_options or {} - self._request_options = kwargs - self._location_mode = None - self._download_complete = False - self._current_content = None - self._file_size = None - self._non_empty_ranges = None - self._response = None - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - self._first_get_size = self._config.max_single_get_size if not self._validate_content \ - else self._config.max_chunk_get_size - initial_request_start = self._start_range if self._start_range is not None else 0 - if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: - initial_request_end = self._end_range - else: - initial_request_end = initial_request_start + self._first_get_size - 1 - - self._initial_range, self._initial_offset = process_range_and_offset( - initial_request_start, initial_request_end, self._end_range, self._encryption_options - ) - - def __len__(self): - return self.size - - async def _setup(self): - self._response = await self._initial_request() - self.properties = self._response.properties - self.properties.name = self.name - self.properties.container = self.container - - # Set the content length to the download size instead of the size of - # the last range - self.properties.size = self.size - - # Overwrite the content range to the user requested range - self.properties.content_range = 'bytes {0}-{1}/{2}'.format( - self._start_range, - self._end_range, - self._file_size - ) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - self.properties.content_md5 = None - - if self.size == 0: - self._current_content = b"" - else: - self._current_content = await process_content( - self._response, - self._initial_offset[0], - self._initial_offset[1], - self._encryption_options - ) - - async def _initial_request(self): - range_header, range_validation = validate_and_format_range_headers( - self._initial_range[0], - self._initial_range[1], - start_range_required=False, - end_range_required=False, - check_content_md5=self._validate_content) - - retry_active = True - retry_total = 3 - while retry_active: - try: - location_mode, response = await self._clients.blob.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self._validate_content, - data_stream_total=None, - download_stream_current=0, - **self._request_options) - - # Check the location we read from to ensure we use the same one - # for subsequent requests. - self._location_mode = location_mode - - # Parse the total file size and adjust the download size if ranges - # were specified - self._file_size = parse_length_from_content_range(response.properties.content_range) - if self._end_range is not None: - # Use the length unless it is over the end of the file - self.size = min(self._file_size, self._end_range - self._start_range + 1) - elif self._start_range is not None: - self.size = self._file_size - self._start_range - else: - self.size = self._file_size - retry_active = False - - except HttpResponseError as error: - if self._start_range is None and error.response.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - try: - _, response = await self._clients.blob.download( - validate_content=self._validate_content, - data_stream_total=0, - download_stream_current=0, - **self._request_options) - retry_active = False - except HttpResponseError as error: - process_storage_error(error) - - # Set the download size to empty - self.size = 0 - self._file_size = 0 - else: - process_storage_error(error) - - except ClientPayloadError as error: - retry_total -= 1 - if retry_total <= 0: - raise ServiceResponseError(error, error=error) - await asyncio.sleep(1) - - # get page ranges to optimize downloading sparse page blob - if response.properties.blob_type == 'PageBlob': - try: - page_ranges = await self._clients.page_blob.get_page_ranges() - self._non_empty_ranges = get_page_ranges_result(page_ranges)[0] - except HttpResponseError: - pass - - # If the file is small, the download is complete at this point. - # If file size is large, download the rest of the file in chunks. - if response.properties.size != self.size: - if self._request_options.get('modified_access_conditions'): - self._request_options['modified_access_conditions'].if_match = response.properties.etag - else: - self._download_complete = True - return response - - def chunks(self): - # type: () -> AsyncIterator[bytes] - """Iterate over chunks in the download stream. - - :rtype: AsyncIterator[bytes] - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_hello_world_async.py - :start-after: [START download_a_blob_in_chunk] - :end-before: [END download_a_blob_in_chunk] - :language: python - :dedent: 16 - :caption: Download a blob using chunks(). - """ - if self.size == 0 or self._download_complete: - iter_downloader = None - else: - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - iter_downloader = _AsyncChunkDownloader( - client=self._clients.blob, - non_empty_ranges=self._non_empty_ranges, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # Start where the first download ended - end_range=data_end, - stream=None, - parallel=False, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options) - return _AsyncChunkIterator( - size=self.size, - content=self._current_content, - downloader=iter_downloader, - chunk_size=self._config.max_chunk_get_size) - - async def readall(self): - """Download the contents of this blob. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - stream = BytesIO() - await self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - async def content_as_bytes(self, max_concurrency=1): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :rtype: bytes - """ - warnings.warn( - "content_as_bytes is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - return await self.readall() - - async def content_as_text(self, max_concurrency=1, encoding="UTF-8"): - """Download the contents of this blob, and decode as text. - - This operation is blocking until all data is downloaded. - - :param int max_concurrency: - The number of parallel connections with which to download. - :param str encoding: - Test encoding to decode the downloaded bytes. Default is UTF-8. - :rtype: str - """ - warnings.warn( - "content_as_text is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self._encoding = encoding - return await self.readall() - - async def readinto(self, stream): - """Download the contents of this blob to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - # the stream must be seekable if parallel download is required - parallel = self._max_concurrency > 1 - if parallel: - error_message = "Target stream handle must be seekable." - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(error_message) - - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(error_message) - - # Write the content to the user stream - stream.write(self._current_content) - if self._download_complete: - return self.size - - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - - downloader = _AsyncChunkDownloader( - client=self._clients.blob, - non_empty_ranges=self._non_empty_ranges, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # start where the first download ended - end_range=data_end, - stream=stream, - parallel=parallel, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options) - - dl_tasks = downloader.get_chunk_offsets() - running_futures = [ - asyncio.ensure_future(downloader.process_chunk(d)) - for d in islice(dl_tasks, 0, self._max_concurrency) - ] - while running_futures: - # Wait for some download to finish before adding a new one - done, running_futures = await asyncio.wait( - running_futures, return_when=asyncio.FIRST_COMPLETED) - try: - for task in done: - task.result() - except HttpResponseError as error: - process_storage_error(error) - try: - next_chunk = next(dl_tasks) - except StopIteration: - break - else: - running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk))) - - if running_futures: - # Wait for the remaining downloads to finish - done, _running_futures = await asyncio.wait(running_futures) - try: - for task in done: - task.result() - except HttpResponseError as error: - process_storage_error(error) - return self.size - - async def download_to_stream(self, stream, max_concurrency=1): - """Download the contents of this blob to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :param int max_concurrency: - The number of parallel connections with which to download. - :returns: The properties of the downloaded blob. - :rtype: Any - """ - warnings.warn( - "download_to_stream is deprecated, use readinto instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - await self.readinto(stream) - return self.properties diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/aio/_lease_async.py b/azure/multiapi/storagev2/blob/v2021_04_10/aio/_lease_async.py deleted file mode 100644 index 79e6733..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/aio/_lease_async.py +++ /dev/null @@ -1,325 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, - TypeVar, TYPE_CHECKING -) - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator_async import distributed_trace_async - -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._serialize import get_modify_conditions -from .._lease import BlobLeaseClient as LeaseClientBase - -if TYPE_CHECKING: - from datetime import datetime - from .._generated.operations import BlobOperations, ContainerOperations - BlobClient = TypeVar("BlobClient") - ContainerClient = TypeVar("ContainerClient") - - -class BlobLeaseClient(LeaseClientBase): - """Creates a new BlobLeaseClient. - - This client provides lease operations on a BlobClient or ContainerClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the blob or container to lease. - :type client: ~azure.storage.blob.aio.BlobClient or - ~azure.storage.blob.aio.ContainerClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - - def __enter__(self): - raise TypeError("Async lease must use 'async with'.") - - def __exit__(self, *args): - self.release() - - async def __aenter__(self): - return self - - async def __aexit__(self, *args): - await self.release() - - @distributed_trace_async - async def acquire(self, lease_duration=-1, **kwargs): - # type: (int, Any) -> None - """Requests a new lease. - - If the container does not have an active lease, the Blob service creates a - lease on the container and returns a new lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.acquire_lease( - timeout=kwargs.pop('timeout', None), - duration=lease_duration, - proposed_lease_id=self.id, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - self.etag = response.get('etag') # type: str - - @distributed_trace_async - async def renew(self, **kwargs): - # type: (Any) -> None - """Renews the lease. - - The lease can be renewed if the lease ID specified in the - lease client matches that associated with the container or blob. Note that - the lease may be renewed even if it has expired as long as the container - or blob has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.renew_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def release(self, **kwargs): - # type: (Any) -> None - """Release the lease. - - The lease may be released if the client lease id specified matches - that associated with the container or blob. Releasing the lease allows another client - to immediately acquire the lease for the container or blob as soon as the release is complete. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.release_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """Change the lease ID of an active lease. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The Blob service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.change_lease( - lease_id=self.id, - proposed_lease_id=proposed_lease_id, - timeout=kwargs.pop('timeout', None), - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def break_lease(self, lease_break_period=None, **kwargs): - # type: (Optional[int], Any) -> int - """Break the lease, if the container or blob has an active lease. - - Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. When a lease - is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the container or blob. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :param int lease_break_period: - This is the proposed duration of seconds that the lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the lease. If longer, the time remaining on the lease is used. - A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str if_tags_match_condition: - Specify a SQL where clause on blob tags to operate only on blob with a matching value. - eg. ``\"\\\"tagname\\\"='my tag'\"`` - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - mod_conditions = get_modify_conditions(kwargs) - try: - response = await self._client.break_lease( - timeout=kwargs.pop('timeout', None), - break_period=lease_break_period, - modified_access_conditions=mod_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/aio/_list_blobs_helper.py b/azure/multiapi/storagev2/blob/v2021_04_10/aio/_list_blobs_helper.py deleted file mode 100644 index 65acea1..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/aio/_list_blobs_helper.py +++ /dev/null @@ -1,171 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -try: - from urllib.parse import unquote -except ImportError: - from urllib import unquote -from azure.core.async_paging import AsyncPageIterator, AsyncItemPaged -from azure.core.exceptions import HttpResponseError -from .._deserialize import get_blob_properties_from_generated_code -from .._models import BlobProperties -from .._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix -from .._shared.models import DictMixin -from .._shared.response_handlers import return_context_and_deserialized, process_storage_error - - -class BlobPropertiesPaged(AsyncPageIterator): - """An Iterable of Blob properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.models.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - :param str container: The container that the blobs are listed from. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str continuation_token: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__( - self, command, - container=None, - prefix=None, - results_per_page=None, - continuation_token=None, - delimiter=None, - location_mode=None): - super(BlobPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.container = container - self.delimiter = delimiter - self.current_page = None - self.location_mode = location_mode - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - prefix=self.prefix, - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.container = self._response.container_name - self.current_page = [self._build_item(item) for item in self._response.segment.blob_items] - - return self._response.next_marker or None, self.current_page - - def _build_item(self, item): - if isinstance(item, BlobProperties): - return item - if isinstance(item, BlobItemInternal): - blob = get_blob_properties_from_generated_code(item) # pylint: disable=protected-access - blob.container = self.container - return blob - return item - - -class BlobPrefix(AsyncItemPaged, DictMixin): - """An Iterable of Blob properties. - - Returned from walk_blobs when a delimiter is used. - Can be thought of as a virtual blob directory. - - :ivar str name: The prefix, or "directory name" of the blob. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str marker: The continuation token of the current page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.models.BlobProperties) - :ivar str container: The container that the blobs are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only blobs whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str marker: An opaque continuation token. - :param str delimiter: - Used to capture blobs whose names begin with the same substring up to - the appearance of the delimiter character. The delimiter may be a single - character or a string. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__(self, *args, **kwargs): - super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs) - self.name = kwargs.get('prefix') - self.prefix = kwargs.get('prefix') - self.results_per_page = kwargs.get('results_per_page') - self.container = kwargs.get('container') - self.delimiter = kwargs.get('delimiter') - self.location_mode = kwargs.get('location_mode') - - -class BlobPrefixPaged(BlobPropertiesPaged): - def __init__(self, *args, **kwargs): - super(BlobPrefixPaged, self).__init__(*args, **kwargs) - self.name = self.prefix - - async def _extract_data_cb(self, get_next_return): - continuation_token, _ = await super(BlobPrefixPaged, self)._extract_data_cb(get_next_return) - self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items - self.current_page = [self._build_item(item) for item in self.current_page] - self.delimiter = self._response.delimiter - - return continuation_token, self.current_page - - def _build_item(self, item): - item = super(BlobPrefixPaged, self)._build_item(item) - if isinstance(item, GenBlobPrefix): - if item.name.encoded: - name = unquote(item.name.content) - else: - name = item.name.content - return BlobPrefix( - self._command, - container=self.container, - prefix=name, - results_per_page=self.results_per_page, - location_mode=self.location_mode) - return item diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/aio/_models.py b/azure/multiapi/storagev2/blob/v2021_04_10/aio/_models.py deleted file mode 100644 index 05edd78..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/aio/_models.py +++ /dev/null @@ -1,143 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines - -from azure.core.async_paging import AsyncPageIterator -from azure.core.exceptions import HttpResponseError -from .._deserialize import parse_tags - -from .._models import ContainerProperties, FilteredBlob -from .._shared.response_handlers import return_context_and_deserialized, process_storage_error - -from .._generated.models import FilterBlobItem - - -class ContainerPropertiesPaged(AsyncPageIterator): - """An Iterable of Container properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A container name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.models.ContainerProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only containers whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of container names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(ContainerPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [self._build_item(item) for item in self._response.container_items] - - return self._response.next_marker or None, self.current_page - - @staticmethod - def _build_item(item): - return ContainerProperties._from_generated(item) # pylint: disable=protected-access - - -class FilteredBlobPaged(AsyncPageIterator): - """An Iterable of Blob properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A blob name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.blob.BlobProperties) - :ivar str container: The container that the blobs are listed from. - - :param callable command: Function to retrieve the next page of items. - :param str container: The name of the container. - :param int results_per_page: The maximum number of blobs to retrieve per - call. - :param str continuation_token: An opaque continuation token. - :param location_mode: Specifies the location the request should be sent to. - This mode only applies for RA-GRS accounts which allow secondary read access. - Options include 'primary' or 'secondary'. - """ - def __init__( - self, command, - container=None, - results_per_page=None, - continuation_token=None, - location_mode=None): - super(FilteredBlobPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.marker = continuation_token - self.results_per_page = results_per_page - self.container = container - self.current_page = None - self.location_mode = location_mode - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.marker = self._response.next_marker - self.current_page = [self._build_item(item) for item in self._response.blobs] - - return self._response.next_marker or None, self.current_page - - @staticmethod - def _build_item(item): - if isinstance(item, FilterBlobItem): - tags = parse_tags(item.tags) - blob = FilteredBlob(name=item.name, container_name=item.container_name, tags=tags) - return blob - return item diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/aio/_upload_helpers.py b/azure/multiapi/storagev2/blob/v2021_04_10/aio/_upload_helpers.py deleted file mode 100644 index 985e731..0000000 --- a/azure/multiapi/storagev2/blob/v2021_04_10/aio/_upload_helpers.py +++ /dev/null @@ -1,281 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from io import SEEK_SET, UnsupportedOperation -from typing import Optional, Union, Any, TypeVar, TYPE_CHECKING # pylint: disable=unused-import - -import six -from azure.core.exceptions import ResourceModifiedError, HttpResponseError - -from .._shared.response_handlers import ( - process_storage_error, - return_response_headers) -from .._shared.uploads_async import ( - upload_data_chunks, - upload_substream_blocks, - BlockBlobChunkUploader, - PageBlobChunkUploader, - AppendBlobChunkUploader) -from .._shared.encryption import generate_blob_encryption_data, encrypt_blob -from .._generated.models import ( - BlockLookupList, - AppendPositionAccessConditions, - ModifiedAccessConditions, -) -from .._upload_helpers import _convert_mod_error, _any_conditions - -if TYPE_CHECKING: - from datetime import datetime # pylint: disable=unused-import - BlobLeaseClient = TypeVar("BlobLeaseClient") - - -async def upload_block_blob( # pylint: disable=too-many-locals - client=None, - data=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if not overwrite and not _any_conditions(**kwargs): - kwargs['modified_access_conditions'].if_none_match = '*' - adjusted_count = length - if (encryption_options.get('key') is not None) and (adjusted_count is not None): - adjusted_count += (16 - (length % 16)) - blob_headers = kwargs.pop('blob_headers', None) - tier = kwargs.pop('standard_blob_tier', None) - blob_tags_string = kwargs.pop('blob_tags_string', None) - - immutability_policy = kwargs.pop('immutability_policy', None) - immutability_policy_expiry = None if immutability_policy is None else immutability_policy.expiry_time - immutability_policy_mode = None if immutability_policy is None else immutability_policy.policy_mode - legal_hold = kwargs.pop('legal_hold', None) - - # Do single put if the size is smaller than config.max_single_put_size - if adjusted_count is not None and (adjusted_count <= blob_settings.max_single_put_size): - try: - data = data.read(length) - if not isinstance(data, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - except AttributeError: - pass - if encryption_options.get('key'): - encryption_data, data = encrypt_blob(data, encryption_options['key']) - headers['x-ms-meta-encryptiondata'] = encryption_data - return await client.upload( - body=data, - content_length=adjusted_count, - blob_http_headers=blob_headers, - headers=headers, - cls=return_response_headers, - validate_content=validate_content, - data_stream_total=adjusted_count, - upload_stream_current=0, - tier=tier.value if tier else None, - blob_tags_string=blob_tags_string, - immutability_policy_expiry=immutability_policy_expiry, - immutability_policy_mode=immutability_policy_mode, - legal_hold=legal_hold, - **kwargs) - - use_original_upload_path = blob_settings.use_byte_buffer or \ - validate_content or encryption_options.get('required') or \ - blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \ - hasattr(stream, 'seekable') and not stream.seekable() or \ - not hasattr(stream, 'seek') or not hasattr(stream, 'tell') - - if use_original_upload_path: - if encryption_options.get('key'): - cek, iv, encryption_data = generate_blob_encryption_data(encryption_options['key']) - headers['x-ms-meta-encryptiondata'] = encryption_data - encryption_options['cek'] = cek - encryption_options['vector'] = iv - block_ids = await upload_data_chunks( - service=client, - uploader_class=BlockBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - encryption_options=encryption_options, - headers=headers, - **kwargs - ) - else: - block_ids = await upload_substream_blocks( - service=client, - uploader_class=BlockBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - headers=headers, - **kwargs - ) - - block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) - block_lookup.latest = block_ids - return await client.commit_block_list( - block_lookup, - blob_http_headers=blob_headers, - cls=return_response_headers, - validate_content=validate_content, - headers=headers, - tier=tier.value if tier else None, - blob_tags_string=blob_tags_string, - immutability_policy_expiry=immutability_policy_expiry, - immutability_policy_mode=immutability_policy_mode, - legal_hold=legal_hold, - **kwargs) - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceModifiedError as mod_error: - if not overwrite: - _convert_mod_error(mod_error) - raise - - -async def upload_page_blob( - client=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if not overwrite and not _any_conditions(**kwargs): - kwargs['modified_access_conditions'].if_none_match = '*' - if length is None or length < 0: - raise ValueError("A content length must be specified for a Page Blob.") - if length % 512 != 0: - raise ValueError("Invalid page blob size: {0}. " - "The size must be aligned to a 512-byte boundary.".format(length)) - if kwargs.get('premium_page_blob_tier'): - premium_page_blob_tier = kwargs.pop('premium_page_blob_tier') - try: - headers['x-ms-access-tier'] = premium_page_blob_tier.value - except AttributeError: - headers['x-ms-access-tier'] = premium_page_blob_tier - if encryption_options and encryption_options.get('data'): - headers['x-ms-meta-encryptiondata'] = encryption_options['data'] - blob_tags_string = kwargs.pop('blob_tags_string', None) - - response = await client.create( - content_length=0, - blob_content_length=length, - blob_sequence_number=None, - blob_http_headers=kwargs.pop('blob_headers', None), - blob_tags_string=blob_tags_string, - cls=return_response_headers, - headers=headers, - **kwargs) - if length == 0: - return response - - kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag']) - return await upload_data_chunks( - service=client, - uploader_class=PageBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_page_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - encryption_options=encryption_options, - headers=headers, - **kwargs) - - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceModifiedError as mod_error: - if not overwrite: - _convert_mod_error(mod_error) - raise - - -async def upload_append_blob( # pylint: disable=unused-argument - client=None, - stream=None, - length=None, - overwrite=None, - headers=None, - validate_content=None, - max_concurrency=None, - blob_settings=None, - encryption_options=None, - **kwargs): - try: - if length == 0: - return {} - blob_headers = kwargs.pop('blob_headers', None) - append_conditions = AppendPositionAccessConditions( - max_size=kwargs.pop('maxsize_condition', None), - append_position=None) - blob_tags_string = kwargs.pop('blob_tags_string', None) - - try: - if overwrite: - await client.create( - content_length=0, - blob_http_headers=blob_headers, - headers=headers, - blob_tags_string=blob_tags_string, - **kwargs) - return await upload_data_chunks( - service=client, - uploader_class=AppendBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - append_position_access_conditions=append_conditions, - headers=headers, - **kwargs) - except HttpResponseError as error: - if error.response.status_code != 404: - raise - # rewind the request body if it is a stream - if hasattr(stream, 'read'): - try: - # attempt to rewind the body to the initial position - stream.seek(0, SEEK_SET) - except UnsupportedOperation: - # if body is not seekable, then retry would not work - raise error - await client.create( - content_length=0, - blob_http_headers=blob_headers, - headers=headers, - blob_tags_string=blob_tags_string, - **kwargs) - return await upload_data_chunks( - service=client, - uploader_class=AppendBlobChunkUploader, - total_size=length, - chunk_size=blob_settings.max_block_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - append_position_access_conditions=append_conditions, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2021_04_10/py.typed b/azure/multiapi/storagev2/blob/v2021_04_10/py.typed deleted file mode 100644 index e69de29..0000000 diff --git a/azure/multiapi/storagev2/blob/v2021_08_06/py.typed b/azure/multiapi/storagev2/blob/v2021_08_06/py.typed deleted file mode 100644 index e69de29..0000000 diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_data_lake_lease.py b/azure/multiapi/storagev2/filedatalake/v2018_11_09/_data_lake_lease.py deleted file mode 100644 index 3643601..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_data_lake_lease.py +++ /dev/null @@ -1,245 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import uuid - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, - TypeVar, TYPE_CHECKING -) -from azure.multiapi.storagev2.blob.v2019_07_07 import BlobLeaseClient - - -if TYPE_CHECKING: - from datetime import datetime - FileSystemClient = TypeVar("FileSystemClient") - DataLakeDirectoryClient = TypeVar("DataLakeDirectoryClient") - DataLakeFileClient = TypeVar("DataLakeFileClient") - - -class DataLakeLeaseClient(object): - """Creates a new DataLakeLeaseClient. - - This client provides lease operations on a FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the file system, directory, or file to lease. - :type client: ~azure.storage.filedatalake.FileSystemClient or - ~azure.storage.filedatalake.DataLakeDirectoryClient or ~azure.storage.filedatalake.DataLakeFileClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - def __init__( - self, client, lease_id=None - ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs - # type: (Union[FileSystemClient, DataLakeDirectoryClient, DataLakeFileClient], Optional[str]) -> None - self.id = lease_id or str(uuid.uuid4()) - self.last_modified = None - self.etag = None - - if hasattr(client, '_blob_client'): - _client = client._blob_client # type: ignore # pylint: disable=protected-access - elif hasattr(client, '_container_client'): - _client = client._container_client # type: ignore # pylint: disable=protected-access - else: - raise TypeError("Lease must use any of FileSystemClient DataLakeDirectoryClient, or DataLakeFileClient.") - - self._blob_lease_client = BlobLeaseClient(_client, lease_id=lease_id) - - def __enter__(self): - return self - - def __exit__(self, *args): - self.release() - - def acquire(self, lease_duration=-1, **kwargs): - # type: (int, Optional[int], **Any) -> None - """Requests a new lease. - - If the file/file system does not have an active lease, the DataLake service creates a - lease on the file/file system and returns a new lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - self._blob_lease_client.acquire(lease_duration=lease_duration, **kwargs) - self._update_lease_client_attributes() - - def renew(self, **kwargs): - # type: (Any) -> None - """Renews the lease. - - The lease can be renewed if the lease ID specified in the - lease client matches that associated with the file system or file. Note that - the lease may be renewed even if it has expired as long as the file system - or file has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - self._blob_lease_client.renew(**kwargs) - self._update_lease_client_attributes() - - def release(self, **kwargs): - # type: (Any) -> None - """Release the lease. - - The lease may be released if the client lease id specified matches - that associated with the file system or file. Releasing the lease allows another client - to immediately acquire the lease for the file system or file as soon as the release is complete. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - self._blob_lease_client.release(**kwargs) - self._update_lease_client_attributes() - - def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """Change the lease ID of an active lease. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - self._blob_lease_client.change(proposed_lease_id=proposed_lease_id, **kwargs) - self._update_lease_client_attributes() - - def break_lease(self, lease_break_period=None, **kwargs): - # type: (Optional[int], Any) -> int - """Break the lease, if the file system or file has an active lease. - - Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. When a lease - is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the file system or file. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :param int lease_break_period: - This is the proposed duration of seconds that the lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the lease. If longer, the time remaining on the lease is used. - A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - self._blob_lease_client.break_lease(lease_break_period=lease_break_period, **kwargs) - - def _update_lease_client_attributes(self): - self.id = self._blob_lease_client.id # type: str - self.last_modified = self._blob_lease_client.last_modified # type: datetime - self.etag = self._blob_lease_client.etag # type: str diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_download.py b/azure/multiapi/storagev2/filedatalake/v2018_11_09/_download.py deleted file mode 100644 index 181b503..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_download.py +++ /dev/null @@ -1,53 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ._models import FileProperties - - -class StorageStreamDownloader(object): - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the file being downloaded. - :ivar ~azure.storage.filedatalake.FileProperties properties: - The properties of the file being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the file. - """ - - def __init__(self, downloader): - self._downloader = downloader - self.name = self._downloader.name - self.properties = FileProperties._from_blob_properties(self._downloader.properties) # pylint: disable=protected-access - self.size = self._downloader.size - - def __len__(self): - return self.size - - def chunks(self): - return self._downloader.chunks() - - def readall(self): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - return self._downloader.readall() - - def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - return self._downloader.readinto(stream) diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_serialize.py b/azure/multiapi/storagev2/filedatalake/v2018_11_09/_serialize.py deleted file mode 100644 index eba593d..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_serialize.py +++ /dev/null @@ -1,81 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from azure.multiapi.storagev2.blob.v2019_07_07._serialize import _get_match_headers # pylint: disable=protected-access -from ._shared import encode_base64 -from ._generated.models import ModifiedAccessConditions, PathHTTPHeaders, \ - SourceModifiedAccessConditions, LeaseAccessConditions - - -def convert_dfs_url_to_blob_url(dfs_account_url): - return dfs_account_url.replace('.dfs.', '.blob.', 1) - - -def add_metadata_headers(metadata=None): - # type: (Optional[Dict[str, str]]) -> str - headers = list() - if metadata: - for key, value in metadata.items(): - headers.append(key + '=') - headers.append(encode_base64(value)) - headers.append(',') - - if headers: - del headers[-1] - - return ''.join(headers) - - -def get_mod_conditions(kwargs): - # type: (Dict[str, Any]) -> ModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'match_condition', 'etag') - return ModifiedAccessConditions( - if_modified_since=kwargs.pop('if_modified_since', None), - if_unmodified_since=kwargs.pop('if_unmodified_since', None), - if_match=if_match or kwargs.pop('if_match', None), - if_none_match=if_none_match or kwargs.pop('if_none_match', None) - ) - - -def get_source_mod_conditions(kwargs): - # type: (Dict[str, Any]) -> SourceModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') - return SourceModifiedAccessConditions( - source_if_modified_since=kwargs.pop('source_if_modified_since', None), - source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), - source_if_match=if_match or kwargs.pop('source_if_match', None), - source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None) - ) - - -def get_path_http_headers(content_settings): - path_headers = PathHTTPHeaders( - cache_control=content_settings.cache_control, - content_type=content_settings.content_type, - content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - content_encoding=content_settings.content_encoding, - content_language=content_settings.content_language, - content_disposition=content_settings.content_disposition - ) - return path_headers - - -def get_access_conditions(lease): - # type: (Optional[Union[BlobLeaseClient, str]]) -> Union[LeaseAccessConditions, None] - try: - lease_id = lease.id # type: ignore - except AttributeError: - lease_id = lease # type: ignore - return LeaseAccessConditions(lease_id=lease_id) if lease_id else None - - -def get_lease_id(lease): - if not lease: - return "" - try: - lease_id = lease.id - except AttributeError: - lease_id = lease - return lease_id diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/__init__.py b/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/__init__.py deleted file mode 100644 index 160f882..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import hmac - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore - -import six - - -def url_quote(url): - return quote(url) - - -def url_unquote(url): - return unquote(url) - - -def encode_base64(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def decode_base64_to_bytes(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - return base64.b64decode(data) - - -def decode_base64_to_text(data): - decoded_bytes = decode_base64_to_bytes(data) - return decoded_bytes.decode('utf-8') - - -def sign_string(key, string_to_sign, key_is_base64=True): - if key_is_base64: - key = decode_base64_to_bytes(key) - else: - if isinstance(key, six.text_type): - key = key.encode('utf-8') - if isinstance(string_to_sign, six.text_type): - string_to_sign = string_to_sign.encode('utf-8') - signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) - digest = signed_hmac_sha256.digest() - encoded_digest = encode_base64(digest) - return encoded_digest diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/encryption.py b/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/encryption.py deleted file mode 100644 index 62607cc..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/encryption.py +++ /dev/null @@ -1,542 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os -from os import urandom -from json import ( - dumps, - loads, -) -from collections import OrderedDict - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.ciphers import Cipher -from cryptography.hazmat.primitives.ciphers.algorithms import AES -from cryptography.hazmat.primitives.ciphers.modes import CBC -from cryptography.hazmat.primitives.padding import PKCS7 - -from azure.core.exceptions import HttpResponseError - -from .._version import VERSION -from . import encode_base64, decode_base64_to_bytes - - -_ENCRYPTION_PROTOCOL_V1 = '1.0' -_ERROR_OBJECT_INVALID = \ - '{0} does not define a complete interface. Value of {1} is either missing or invalid.' - - -def _validate_not_none(param_name, param): - if param is None: - raise ValueError('{0} should not be None.'.format(param_name)) - - -def _validate_key_encryption_key_wrap(kek): - # Note that None is not callable and so will fail the second clause of each check. - if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) - if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) - - -class _EncryptionAlgorithm(object): - ''' - Specifies which client encryption algorithm is used. - ''' - AES_CBC_256 = 'AES_CBC_256' - - -class _WrappedContentKey: - ''' - Represents the envelope key details stored on the service. - ''' - - def __init__(self, algorithm, encrypted_key, key_id): - ''' - :param str algorithm: - The algorithm used for wrapping. - :param bytes encrypted_key: - The encrypted content-encryption-key. - :param str key_id: - The key-encryption-key identifier string. - ''' - - _validate_not_none('algorithm', algorithm) - _validate_not_none('encrypted_key', encrypted_key) - _validate_not_none('key_id', key_id) - - self.algorithm = algorithm - self.encrypted_key = encrypted_key - self.key_id = key_id - - -class _EncryptionAgent: - ''' - Represents the encryption agent stored on the service. - It consists of the encryption protocol version and encryption algorithm used. - ''' - - def __init__(self, encryption_algorithm, protocol): - ''' - :param _EncryptionAlgorithm encryption_algorithm: - The algorithm used for encrypting the message contents. - :param str protocol: - The protocol version used for encryption. - ''' - - _validate_not_none('encryption_algorithm', encryption_algorithm) - _validate_not_none('protocol', protocol) - - self.encryption_algorithm = str(encryption_algorithm) - self.protocol = protocol - - -class _EncryptionData: - ''' - Represents the encryption data that is stored on the service. - ''' - - def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, - key_wrapping_metadata): - ''' - :param bytes content_encryption_IV: - The content encryption initialization vector. - :param _EncryptionAgent encryption_agent: - The encryption agent. - :param _WrappedContentKey wrapped_content_key: - An object that stores the wrapping algorithm, the key identifier, - and the encrypted key bytes. - :param dict key_wrapping_metadata: - A dict containing metadata related to the key wrapping. - ''' - - _validate_not_none('content_encryption_IV', content_encryption_IV) - _validate_not_none('encryption_agent', encryption_agent) - _validate_not_none('wrapped_content_key', wrapped_content_key) - - self.content_encryption_IV = content_encryption_IV - self.encryption_agent = encryption_agent - self.wrapped_content_key = wrapped_content_key - self.key_wrapping_metadata = key_wrapping_metadata - - -def _generate_encryption_data_dict(kek, cek, iv): - ''' - Generates and returns the encryption metadata as a dict. - - :param object kek: The key encryption key. See calling functions for more information. - :param bytes cek: The content encryption key. - :param bytes iv: The initialization vector. - :return: A dict containing all the encryption metadata. - :rtype: dict - ''' - # Encrypt the cek. - wrapped_cek = kek.wrap_key(cek) - - # Build the encryption_data dict. - # Use OrderedDict to comply with Java's ordering requirement. - wrapped_content_key = OrderedDict() - wrapped_content_key['KeyId'] = kek.get_kid() - wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek) - wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() - - encryption_agent = OrderedDict() - encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 - encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 - - encryption_data_dict = OrderedDict() - encryption_data_dict['WrappedContentKey'] = wrapped_content_key - encryption_data_dict['EncryptionAgent'] = encryption_agent - encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv) - encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION} - - return encryption_data_dict - - -def _dict_to_encryption_data(encryption_data_dict): - ''' - Converts the specified dictionary to an EncryptionData object for - eventual use in decryption. - - :param dict encryption_data_dict: - The dictionary containing the encryption data. - :return: an _EncryptionData object built from the dictionary. - :rtype: _EncryptionData - ''' - try: - if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: - raise ValueError("Unsupported encryption version.") - except KeyError: - raise ValueError("Unsupported encryption version.") - wrapped_content_key = encryption_data_dict['WrappedContentKey'] - wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], - decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), - wrapped_content_key['KeyId']) - - encryption_agent = encryption_data_dict['EncryptionAgent'] - encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], - encryption_agent['Protocol']) - - if 'KeyWrappingMetadata' in encryption_data_dict: - key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] - else: - key_wrapping_metadata = None - - encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), - encryption_agent, - wrapped_content_key, - key_wrapping_metadata) - - return encryption_data - - -def _generate_AES_CBC_cipher(cek, iv): - ''' - Generates and returns an encryption cipher for AES CBC using the given cek and iv. - - :param bytes[] cek: The content encryption key for the cipher. - :param bytes[] iv: The initialization vector for the cipher. - :return: A cipher for encrypting in AES256 CBC. - :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher - ''' - - backend = default_backend() - algorithm = AES(cek) - mode = CBC(iv) - return Cipher(algorithm, mode, backend) - - -def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): - ''' - Extracts and returns the content_encryption_key stored in the encryption_data object - and performs necessary validation on all parameters. - :param _EncryptionData encryption_data: - The encryption metadata of the retrieved value. - :param obj key_encryption_key: - The key_encryption_key used to unwrap the cek. Please refer to high-level service object - instance variables for more details. - :param func key_resolver: - A function used that, given a key_id, will return a key_encryption_key. Please refer - to high-level service object instance variables for more details. - :return: the content_encryption_key stored in the encryption_data object. - :rtype: bytes[] - ''' - - _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) - _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) - - if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol: - raise ValueError('Encryption version is not supported.') - - content_encryption_key = None - - # If the resolver exists, give priority to the key it finds. - if key_resolver is not None: - key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) - - _validate_not_none('key_encryption_key', key_encryption_key) - if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) - if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid(): - raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.') - # Will throw an exception if the specified algorithm is not supported. - content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, - encryption_data.wrapped_content_key.algorithm) - _validate_not_none('content_encryption_key', content_encryption_key) - - return content_encryption_key - - -def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None): - ''' - Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. - Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). - Returns the original plaintex. - - :param str message: - The ciphertext to be decrypted. - :param _EncryptionData encryption_data: - The metadata associated with this ciphertext. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted plaintext. - :rtype: str - ''' - _validate_not_none('message', message) - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) - - if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm: - raise ValueError('Specified encryption algorithm is not supported.') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) - - # decrypt data - decrypted_data = message - decryptor = cipher.decryptor() - decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) - - # unpad data - unpadder = PKCS7(128).unpadder() - decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) - - return decrypted_data - - -def encrypt_blob(blob, key_encryption_key): - ''' - Encrypts the given blob using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encryption metadata. This method should - only be used when a blob is small enough for single shot upload. Encrypting larger blobs - is done as a part of the upload_data_chunks method. - - :param bytes blob: - The blob to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. - :rtype: (str, bytes) - ''' - - _validate_not_none('blob', blob) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(blob) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - - return dumps(encryption_data), encrypted_data - - -def generate_blob_encryption_data(key_encryption_key): - ''' - Generates the encryption_metadata for the blob. - - :param bytes key_encryption_key: - The key-encryption-key used to wrap the cek associate with this blob. - :return: A tuple containing the cek and iv for this blob as well as the - serialized encryption metadata for the blob. - :rtype: (bytes, bytes, str) - ''' - encryption_data = None - content_encryption_key = None - initialization_vector = None - if key_encryption_key: - _validate_key_encryption_key_wrap(key_encryption_key) - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - encryption_data = _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - encryption_data = dumps(encryption_data) - - return content_encryption_key, initialization_vector, encryption_data - - -def decrypt_blob(require_encryption, key_encryption_key, key_resolver, - content, start_offset, end_offset, response_headers): - ''' - Decrypts the given blob contents and returns only the requested range. - - :param bool require_encryption: - Whether or not the calling blob service requires objects to be decrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :param key_resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted blob content. - :rtype: bytes - ''' - try: - encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata'])) - except: # pylint: disable=bare-except - if require_encryption: - raise ValueError( - 'Encryption required, but received data does not contain appropriate metatadata.' + \ - 'Data was either not encrypted or metadata has been lost.') - - return content - - if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256: - raise ValueError('Specified encryption algorithm is not supported.') - - blob_type = response_headers['x-ms-blob-type'] - - iv = None - unpad = False - if 'content-range' in response_headers: - content_range = response_headers['content-range'] - # Format: 'bytes x-y/size' - - # Ignore the word 'bytes' - content_range = content_range.split(' ') - - content_range = content_range[1].split('-') - content_range = content_range[1].split('/') - end_range = int(content_range[0]) - blob_size = int(content_range[1]) - - if start_offset >= 16: - iv = content[:16] - content = content[16:] - start_offset -= 16 - else: - iv = encryption_data.content_encryption_IV - - if end_range == blob_size - 1: - unpad = True - else: - unpad = True - iv = encryption_data.content_encryption_IV - - if blob_type == 'PageBlob': - unpad = False - - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) - cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) - decryptor = cipher.decryptor() - - content = decryptor.update(content) + decryptor.finalize() - if unpad: - unpadder = PKCS7(128).unpadder() - content = unpadder.update(content) + unpadder.finalize() - - return content[start_offset: len(content) - end_offset] - - -def get_blob_encryptor_and_padder(cek, iv, should_pad): - encryptor = None - padder = None - - if cek is not None and iv is not None: - cipher = _generate_AES_CBC_cipher(cek, iv) - encryptor = cipher.encryptor() - padder = PKCS7(128).padder() if should_pad else None - - return encryptor, padder - - -def encrypt_queue_message(message, key_encryption_key): - ''' - Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encrypted message and the encryption metadata. - - :param object message: - The plain text messge to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A json-formatted string containing the encrypted message and the encryption metadata. - :rtype: str - ''' - - _validate_not_none('message', message) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = os.urandom(32) - initialization_vector = os.urandom(16) - - # Queue encoding functions all return unicode strings, and encryption should - # operate on binary strings. - message = message.encode('utf-8') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(message) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - - # Build the dictionary structure. - queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data), - 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector)} - - return dumps(queue_message) - - -def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver): - ''' - Returns the decrypted message contents from an EncryptedQueueMessage. - If no encryption metadata is present, will return the unaltered message. - :param str message: - The JSON formatted QueueEncryptedMessage contents with all associated metadata. - :param bool require_encryption: - If set, will enforce that the retrieved messages are encrypted and decrypt them. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The plain text message from the queue message. - :rtype: str - ''' - - try: - message = loads(message) - - encryption_data = _dict_to_encryption_data(message['EncryptionData']) - decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents']) - except (KeyError, ValueError): - # Message was not json formatted and so was not encrypted - # or the user provided a json formatted message. - if require_encryption: - raise ValueError('Message was not encrypted.') - - return message - try: - return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=response, - error=error) diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/parser.py b/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/parser.py deleted file mode 100644 index c6feba8..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/parser.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys - -if sys.version_info < (3,): - def _str(value): - if isinstance(value, unicode): # pylint: disable=undefined-variable - return value.encode('utf-8') - - return str(value) -else: - _str = str - - -def _to_utc_datetime(value): - return value.strftime('%Y-%m-%dT%H:%M:%SZ') diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/policies.py b/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/policies.py deleted file mode 100644 index b4a2f9e..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/policies.py +++ /dev/null @@ -1,610 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import re -import random -from time import time -from io import SEEK_SET, UnsupportedOperation -import logging -import uuid -import types -from typing import Any, TYPE_CHECKING -from wsgiref.handlers import format_date_time -try: - from urllib.parse import ( - urlparse, - parse_qsl, - urlunparse, - urlencode, - ) -except ImportError: - from urllib import urlencode # type: ignore - from urlparse import ( # type: ignore - urlparse, - parse_qsl, - urlunparse, - ) - -from azure.core.pipeline.policies import ( - HeadersPolicy, - SansIOHTTPPolicy, - NetworkTraceLoggingPolicy, - HTTPPolicy, - RequestHistory -) -from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError - -from .models import LocationMode - -try: - _unicode_type = unicode # type: ignore -except NameError: - _unicode_type = str - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -def encode_base64(data): - if isinstance(data, _unicode_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def is_exhausted(settings): - """Are we out of retries?""" - retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) - retry_counts = list(filter(None, retry_counts)) - if not retry_counts: - return False - return min(retry_counts) < 0 - - -def retry_hook(settings, **kwargs): - if settings['hook']: - settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) - - -def is_retry(response, mode): - """Is this method/status code retryable? (Based on whitelists and control - variables such as the number of total retries to allow, whether to - respect the Retry-After header, whether this header is present, and - whether the returned status code is on the list of status codes to - be retried upon on the presence of the aforementioned header) - """ - status = response.http_response.status_code - if 300 <= status < 500: - # An exception occured, but in most cases it was expected. Examples could - # include a 309 Conflict or 412 Precondition Failed. - if status == 404 and mode == LocationMode.SECONDARY: - # Response code 404 should be retried if secondary was used. - return True - if status == 408: - # Response code 408 is a timeout and should be retried. - return True - return False - if status >= 500: - # Response codes above 500 with the exception of 501 Not Implemented and - # 505 Version Not Supported indicate a server issue and should be retried. - if status in [501, 505]: - return False - return True - return False - - -def urljoin(base_url, stub_url): - parsed = urlparse(base_url) - parsed = parsed._replace(path=parsed.path + '/' + stub_url) - return parsed.geturl() - - -class QueueMessagePolicy(SansIOHTTPPolicy): - - def on_request(self, request): - message_id = request.context.options.pop('queue_message_id', None) - if message_id: - request.http_request.url = urljoin( - request.http_request.url, - message_id) - - -class StorageHeadersPolicy(HeadersPolicy): - request_id_header_name = 'x-ms-client-request-id' - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - super(StorageHeadersPolicy, self).on_request(request) - current_time = format_date_time(time()) - request.http_request.headers['x-ms-date'] = current_time - - custom_id = request.context.options.pop('client_request_id', None) - request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) - - # def on_response(self, request, response): - # # raise exception if the echoed client request id from the service is not identical to the one we sent - # if self.request_id_header_name in response.http_response.headers: - - # client_request_id = request.http_request.headers.get(self.request_id_header_name) - - # if response.http_response.headers[self.request_id_header_name] != client_request_id: - # raise AzureError( - # "Echoed client request ID: {} does not match sent client request ID: {}. " - # "Service request ID: {}".format( - # response.http_response.headers[self.request_id_header_name], client_request_id, - # response.http_response.headers['x-ms-request-id']), - # response=response.http_response - # ) - - -class StorageHosts(SansIOHTTPPolicy): - - def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument - self.hosts = hosts - super(StorageHosts, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - request.context.options['hosts'] = self.hosts - parsed_url = urlparse(request.http_request.url) - - # Detect what location mode we're currently requesting with - location_mode = LocationMode.PRIMARY - for key, value in self.hosts.items(): - if parsed_url.netloc == value: - location_mode = key - - # See if a specific location mode has been specified, and if so, redirect - use_location = request.context.options.pop('use_location', None) - if use_location: - # Lock retries to the specific location - request.context.options['retry_to_secondary'] = False - if use_location not in self.hosts: - raise ValueError("Attempting to use undefined host location {}".format(use_location)) - if use_location != location_mode: - # Update request URL to use the specified location - updated = parsed_url._replace(netloc=self.hosts[use_location]) - request.http_request.url = updated.geturl() - location_mode = use_location - - request.context.options['location_mode'] = location_mode - - -class StorageLoggingPolicy(NetworkTraceLoggingPolicy): - """A policy that logs HTTP request and response to the DEBUG logger. - - This accepts both global configuration, and per-request level with "enable_http_logger" - """ - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - http_request = request.http_request - options = request.context.options - if options.pop("logging_enable", self.enable_http_logger): - request.context["logging_enable"] = True - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - log_url = http_request.url - query_params = http_request.query - if 'sig' in query_params: - log_url = log_url.replace(query_params['sig'], "sig=*****") - _LOGGER.debug("Request URL: %r", log_url) - _LOGGER.debug("Request method: %r", http_request.method) - _LOGGER.debug("Request headers:") - for header, value in http_request.headers.items(): - if header.lower() == 'authorization': - value = '*****' - elif header.lower() == 'x-ms-copy-source' and 'sig' in value: - # take the url apart and scrub away the signed signature - scheme, netloc, path, params, query, fragment = urlparse(value) - parsed_qs = dict(parse_qsl(query)) - parsed_qs['sig'] = '*****' - - # the SAS needs to be put back together - value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) - - _LOGGER.debug(" %r: %r", header, value) - _LOGGER.debug("Request body:") - - # We don't want to log the binary data of a file upload. - if isinstance(http_request.body, types.GeneratorType): - _LOGGER.debug("File upload") - else: - _LOGGER.debug(str(http_request.body)) - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log request: %r", err) - - def on_response(self, request, response): - # type: (PipelineRequest, PipelineResponse, Any) -> None - if response.context.pop("logging_enable", self.enable_http_logger): - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - _LOGGER.debug("Response status: %r", response.http_response.status_code) - _LOGGER.debug("Response headers:") - for res_header, value in response.http_response.headers.items(): - _LOGGER.debug(" %r: %r", res_header, value) - - # We don't want to log binary data if the response is a file. - _LOGGER.debug("Response content:") - pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) - header = response.http_response.headers.get('content-disposition') - - if header and pattern.match(header): - filename = header.partition('=')[2] - _LOGGER.debug("File attachments: %s", filename) - elif response.http_response.headers.get("content-type", "").endswith("octet-stream"): - _LOGGER.debug("Body contains binary data.") - elif response.http_response.headers.get("content-type", "").startswith("image"): - _LOGGER.debug("Body contains image data.") - else: - if response.context.options.get('stream', False): - _LOGGER.debug("Body is streamable") - else: - _LOGGER.debug(response.http_response.text()) - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log response: %s", repr(err)) - - -class StorageRequestHook(SansIOHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._request_callback = kwargs.get('raw_request_hook') - super(StorageRequestHook, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, **Any) -> PipelineResponse - request_callback = request.context.options.pop('raw_request_hook', self._request_callback) - if request_callback: - request_callback(request) - - -class StorageResponseHook(HTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(StorageResponseHook, self).__init__() - - def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = self.next.send(request) - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - response_callback(response) - request.context['response_callback'] = response_callback - return response - - -class StorageContentValidation(SansIOHTTPPolicy): - """A simple policy that sends the given headers - with the request. - - This will overwrite any headers already defined in the request. - """ - header_name = 'Content-MD5' - - def __init__(self, **kwargs): # pylint: disable=unused-argument - super(StorageContentValidation, self).__init__() - - @staticmethod - def get_content_md5(data): - md5 = hashlib.md5() - if isinstance(data, bytes): - md5.update(data) - elif hasattr(data, 'read'): - pos = 0 - try: - pos = data.tell() - except: # pylint: disable=bare-except - pass - for chunk in iter(lambda: data.read(4096), b""): - md5.update(chunk) - try: - data.seek(pos, SEEK_SET) - except (AttributeError, IOError): - raise ValueError("Data should be bytes or a seekable file-like object.") - else: - raise ValueError("Data should be bytes or a seekable file-like object.") - - return md5.digest() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - validate_content = request.context.options.pop('validate_content', False) - if validate_content and request.http_request.method != 'GET': - computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) - request.http_request.headers[self.header_name] = computed_md5 - request.context['validate_content_md5'] = computed_md5 - request.context['validate_content'] = validate_content - - def on_response(self, request, response): - if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): - computed_md5 = request.context.get('validate_content_md5') or \ - encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) - if response.http_response.headers['content-md5'] != computed_md5: - raise AzureError( - 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format( - response.http_response.headers['content-md5'], computed_md5), - response=response.http_response - ) - - -class StorageRetryPolicy(HTTPPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - def __init__(self, **kwargs): - self.total_retries = kwargs.pop('retry_total', 10) - self.connect_retries = kwargs.pop('retry_connect', 3) - self.read_retries = kwargs.pop('retry_read', 3) - self.status_retries = kwargs.pop('retry_status', 3) - self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) - super(StorageRetryPolicy, self).__init__() - - def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use - """ - A function which sets the next host location on the request, if applicable. - - :param ~azure.storage.models.RetryContext context: - The retry context containing the previous host location and the request - to evaluate and possibly modify. - """ - if settings['hosts'] and all(settings['hosts'].values()): - url = urlparse(request.url) - # If there's more than one possible location, retry to the alternative - if settings['mode'] == LocationMode.PRIMARY: - settings['mode'] = LocationMode.SECONDARY - else: - settings['mode'] = LocationMode.PRIMARY - updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) - request.url = updated.geturl() - - def configure_retries(self, request): # pylint: disable=no-self-use - body_position = None - if hasattr(request.http_request.body, 'read'): - try: - body_position = request.http_request.body.tell() - except (AttributeError, UnsupportedOperation): - # if body position cannot be obtained, then retries will not work - pass - options = request.context.options - return { - 'total': options.pop("retry_total", self.total_retries), - 'connect': options.pop("retry_connect", self.connect_retries), - 'read': options.pop("retry_read", self.read_retries), - 'status': options.pop("retry_status", self.status_retries), - 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), - 'mode': options.pop("location_mode", LocationMode.PRIMARY), - 'hosts': options.pop("hosts", None), - 'hook': options.pop("retry_hook", None), - 'body_position': body_position, - 'count': 0, - 'history': [] - } - - def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use - """ Formula for computing the current backoff. - Should be calculated by child class. - - :rtype: float - """ - return 0 - - def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - transport.sleep(backoff) - - def increment(self, settings, request, response=None, error=None): - """Increment the retry counters. - - :param response: A pipeline response object. - :param error: An error encountered during the request, or - None if the response was received successfully. - - :return: Whether the retry attempts are exhausted. - """ - settings['total'] -= 1 - - if error and isinstance(error, ServiceRequestError): - # Errors when we're fairly sure that the server did not receive the - # request, so it should be safe to retry. - settings['connect'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - elif error and isinstance(error, ServiceResponseError): - # Errors that occur after the request has been started, so we should - # assume that the server began processing it. - settings['read'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - else: - # Incrementing because of a server error like a 500 in - # status_forcelist and a the given method is in the whitelist - if response: - settings['status'] -= 1 - settings['history'].append(RequestHistory(request, http_response=response)) - - if not is_exhausted(settings): - if request.method not in ['PUT'] and settings['retry_secondary']: - self._set_next_host_location(settings, request) - - # rewind the request body if it is a stream - if request.body and hasattr(request.body, 'read'): - # no position was saved, then retry would not work - if settings['body_position'] is None: - return False - try: - # attempt to rewind the body to the initial position - request.body.seek(settings['body_position'], SEEK_SET) - except (UnsupportedOperation, ValueError): - # if body is not seekable, then retry would not work - return False - settings['count'] += 1 - return True - return False - - def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(StorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(StorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/response_handlers.py b/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/response_handlers.py deleted file mode 100644 index ac526e5..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/response_handlers.py +++ /dev/null @@ -1,159 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging - -from azure.core.pipeline.policies import ContentDecodePolicy -from azure.core.exceptions import ( - HttpResponseError, - ResourceNotFoundError, - ResourceModifiedError, - ResourceExistsError, - ClientAuthenticationError, - DecodeError) - -from .parser import _to_utc_datetime -from .models import StorageErrorCode, UserDelegationKey, get_enum_value - - -if TYPE_CHECKING: - from datetime import datetime - from azure.core.exceptions import AzureError - - -_LOGGER = logging.getLogger(__name__) - - -class PartialBatchErrorException(HttpResponseError): - """There is a partial failure in batch operations. - - :param str message: The message of the exception. - :param response: Server response to be deserialized. - :param list parts: A list of the parts in multipart response. - """ - - def __init__(self, message, response, parts): - self.parts = parts - super(PartialBatchErrorException, self).__init__(message=message, response=response) - - -def parse_length_from_content_range(content_range): - ''' - Parses the blob length from the content range header: bytes 1-3/65537 - ''' - if content_range is None: - return None - - # First, split in space and take the second half: '1-3/65537' - # Next, split on slash and take the second half: '65537' - # Finally, convert to an int: 65537 - return int(content_range.split(' ', 1)[1].split('/', 1)[1]) - - -def normalize_headers(headers): - normalized = {} - for key, value in headers.items(): - if key.startswith('x-ms-'): - key = key[5:] - normalized[key.lower().replace('-', '_')] = get_enum_value(value) - return normalized - - -def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument - raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} - return {k[10:]: v for k, v in raw_metadata.items()} - - -def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers) - - -def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers), deserialized - - -def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return response.location_mode, deserialized - - -def process_storage_error(storage_error): - raise_error = HttpResponseError - error_code = storage_error.response.headers.get('x-ms-error-code') - error_message = storage_error.message - additional_data = {} - try: - error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) - if error_body: - for info in error_body.iter(): - if info.tag.lower() == 'code': - error_code = info.text - elif info.tag.lower() == 'message': - error_message = info.text - else: - additional_data[info.tag] = info.text - except DecodeError: - pass - - try: - if error_code: - error_code = StorageErrorCode(error_code) - if error_code in [StorageErrorCode.condition_not_met, - StorageErrorCode.blob_overwritten]: - raise_error = ResourceModifiedError - if error_code in [StorageErrorCode.invalid_authentication_info, - StorageErrorCode.authentication_failed]: - raise_error = ClientAuthenticationError - if error_code in [StorageErrorCode.resource_not_found, - StorageErrorCode.cannot_verify_copy_source, - StorageErrorCode.blob_not_found, - StorageErrorCode.queue_not_found, - StorageErrorCode.container_not_found, - StorageErrorCode.parent_not_found, - StorageErrorCode.share_not_found]: - raise_error = ResourceNotFoundError - if error_code in [StorageErrorCode.account_already_exists, - StorageErrorCode.account_being_created, - StorageErrorCode.resource_already_exists, - StorageErrorCode.resource_type_mismatch, - StorageErrorCode.blob_already_exists, - StorageErrorCode.queue_already_exists, - StorageErrorCode.container_already_exists, - StorageErrorCode.container_being_deleted, - StorageErrorCode.queue_being_deleted, - StorageErrorCode.share_already_exists, - StorageErrorCode.share_being_deleted]: - raise_error = ResourceExistsError - except ValueError: - # Got an unknown error code - pass - - try: - error_message += "\nErrorCode:{}".format(error_code.value) - except AttributeError: - error_message += "\nErrorCode:{}".format(error_code) - for name, info in additional_data.items(): - error_message += "\n{}:{}".format(name, info) - - error = raise_error(message=error_message, response=storage_error.response) - error.error_code = error_code - error.additional_info = additional_data - raise error - - -def parse_to_internal_user_delegation_key(service_user_delegation_key): - internal_user_delegation_key = UserDelegationKey() - internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid - internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid - internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) - internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) - internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service - internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version - internal_user_delegation_key.value = service_user_delegation_key.value - return internal_user_delegation_key diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/shared_access_signature.py b/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/shared_access_signature.py deleted file mode 100644 index 367c655..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/shared_access_signature.py +++ /dev/null @@ -1,209 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from datetime import date - -from .parser import _str, _to_utc_datetime -from .constants import X_MS_VERSION -from . import sign_string, url_quote - - -class QueryStringConstants(object): - SIGNED_SIGNATURE = 'sig' - SIGNED_PERMISSION = 'sp' - SIGNED_START = 'st' - SIGNED_EXPIRY = 'se' - SIGNED_RESOURCE = 'sr' - SIGNED_IDENTIFIER = 'si' - SIGNED_IP = 'sip' - SIGNED_PROTOCOL = 'spr' - SIGNED_VERSION = 'sv' - SIGNED_CACHE_CONTROL = 'rscc' - SIGNED_CONTENT_DISPOSITION = 'rscd' - SIGNED_CONTENT_ENCODING = 'rsce' - SIGNED_CONTENT_LANGUAGE = 'rscl' - SIGNED_CONTENT_TYPE = 'rsct' - START_PK = 'spk' - START_RK = 'srk' - END_PK = 'epk' - END_RK = 'erk' - SIGNED_RESOURCE_TYPES = 'srt' - SIGNED_SERVICES = 'ss' - SIGNED_OID = 'skoid' - SIGNED_TID = 'sktid' - SIGNED_KEY_START = 'skt' - SIGNED_KEY_EXPIRY = 'ske' - SIGNED_KEY_SERVICE = 'sks' - SIGNED_KEY_VERSION = 'skv' - - @staticmethod - def to_list(): - return [ - QueryStringConstants.SIGNED_SIGNATURE, - QueryStringConstants.SIGNED_PERMISSION, - QueryStringConstants.SIGNED_START, - QueryStringConstants.SIGNED_EXPIRY, - QueryStringConstants.SIGNED_RESOURCE, - QueryStringConstants.SIGNED_IDENTIFIER, - QueryStringConstants.SIGNED_IP, - QueryStringConstants.SIGNED_PROTOCOL, - QueryStringConstants.SIGNED_VERSION, - QueryStringConstants.SIGNED_CACHE_CONTROL, - QueryStringConstants.SIGNED_CONTENT_DISPOSITION, - QueryStringConstants.SIGNED_CONTENT_ENCODING, - QueryStringConstants.SIGNED_CONTENT_LANGUAGE, - QueryStringConstants.SIGNED_CONTENT_TYPE, - QueryStringConstants.START_PK, - QueryStringConstants.START_RK, - QueryStringConstants.END_PK, - QueryStringConstants.END_RK, - QueryStringConstants.SIGNED_RESOURCE_TYPES, - QueryStringConstants.SIGNED_SERVICES, - QueryStringConstants.SIGNED_OID, - QueryStringConstants.SIGNED_TID, - QueryStringConstants.SIGNED_KEY_START, - QueryStringConstants.SIGNED_KEY_EXPIRY, - QueryStringConstants.SIGNED_KEY_SERVICE, - QueryStringConstants.SIGNED_KEY_VERSION, - ] - - -class SharedAccessSignature(object): - ''' - Provides a factory for creating account access - signature tokens with an account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - :param str x_ms_version: - The service version used to generate the shared access signatures. - ''' - self.account_name = account_name - self.account_key = account_key - self.x_ms_version = x_ms_version - - def generate_account(self, services, resource_types, permission, expiry, start=None, - ip=None, protocol=None): - ''' - Generates a shared access signature for the account. - Use the returned signature with the sas_token parameter of the service - or to create a new account object. - - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account - SAS. You can combine values to provide access to more than one - resource type. - :param AccountSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. You can combine - values to provide more than one permission. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_account(services, resource_types) - sas.add_account_signature(self.account_name, self.account_key) - - return sas.get_token() - - -class _SharedAccessHelper(object): - def __init__(self): - self.query_dict = {} - - def _add_query(self, name, val): - if val: - self.query_dict[name] = _str(val) if val is not None else None - - def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): - if isinstance(start, date): - start = _to_utc_datetime(start) - - if isinstance(expiry, date): - expiry = _to_utc_datetime(expiry) - - self._add_query(QueryStringConstants.SIGNED_START, start) - self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) - self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) - self._add_query(QueryStringConstants.SIGNED_IP, ip) - self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) - self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) - - def add_resource(self, resource): - self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) - - def add_id(self, policy_id): - self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) - - def add_account(self, services, resource_types): - self._add_query(QueryStringConstants.SIGNED_SERVICES, services) - self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) - - def add_override_response_headers(self, cache_control, - content_disposition, - content_encoding, - content_language, - content_type): - self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) - self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) - self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) - self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) - self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) - - def add_account_signature(self, account_name, account_key): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - string_to_sign = \ - (account_name + '\n' + - get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + - get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + - get_value_to_append(QueryStringConstants.SIGNED_START) + - get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - get_value_to_append(QueryStringConstants.SIGNED_IP) + - get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(QueryStringConstants.SIGNED_VERSION)) - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key, string_to_sign)) - - def get_token(self): - return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/uploads.py b/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/uploads.py deleted file mode 100644 index ceeeebe..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/uploads.py +++ /dev/null @@ -1,566 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from concurrent import futures -from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) -from threading import Lock -from itertools import islice -from math import ceil - -import six - -from azure.core.tracing.common import with_current_context - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." - - -def _parallel_uploads(executor, uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - next_chunk = next(pending) - except StopIteration: - break - else: - running.add(executor.submit(with_current_context(uploader), next_chunk)) - - # Wait for the remaining uploads to finish - done, _running = futures.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - validate_content=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - validate_content=validate_content, - **kwargs) - if parallel: - executor = futures.ThreadPoolExecutor(max_concurrency) - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - executor.submit(with_current_context(uploader.process_chunk), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - executor = futures.ThreadPoolExecutor(max_concurrency) - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - executor.submit(with_current_context(uploader.process_substream_block), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] - return sorted(range_ids) - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b"" - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError("Blob data should be of type bytes.") - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b"" or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - def _update_progress(self, length): - if self.progress_lock is not None: - with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = self._upload_chunk(chunk_offset, chunk_data) - self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) - - def process_substream_block(self, block_data): - return self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - def _upload_substream_block(self, block_id, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_substream_block_with_progress(self, block_id, block_stream): - range_id = self._upload_substream_block(block_id, block_stream) - self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop("modified_access_conditions", None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - self.service.stage_block( - block_id, - len(chunk_data), - chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return index, block_id - - def _upload_substream_block(self, block_id, block_stream): - try: - self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - return not any(bytearray(chunk_data)) - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = self.service.upload_pages( - chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = self.service.append_block( - chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - self.current_length = int(self.response_headers["blob_append_offset"]) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = self.service.append_block( - chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - -class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - self.response_headers = self.service.append_data( - body=chunk_data, - position=chunk_offset, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response - - -class SubStream(IOBase): - - def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): - # Python 2.7: file-like objects created with open() typically support seek(), but are not - # derivations of io.IOBase and thus do not implement seekable(). - # Python > 3.0: file-like objects created with open() are derived from io.IOBase. - try: - # only the main thread runs this, so there's no need grabbing the lock - wrapped_stream.seek(0, SEEK_CUR) - except: - raise ValueError("Wrapped stream must support seek().") - - self._lock = lockObj - self._wrapped_stream = wrapped_stream - self._position = 0 - self._stream_begin_index = stream_begin_index - self._length = length - self._buffer = BytesIO() - - # we must avoid buffering more than necessary, and also not use up too much memory - # so the max buffer size is capped at 4MB - self._max_buffer_size = ( - length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE - ) - self._current_buffer_start = 0 - self._current_buffer_size = 0 - super(SubStream, self).__init__() - - def __len__(self): - return self._length - - def close(self): - if self._buffer: - self._buffer.close() - self._wrapped_stream = None - IOBase.close(self) - - def fileno(self): - return self._wrapped_stream.fileno() - - def flush(self): - pass - - def read(self, size=None): - if self.closed: # pylint: disable=using-constant-test - raise ValueError("Stream is closed.") - - if size is None: - size = self._length - self._position - - # adjust if out of bounds - if size + self._position >= self._length: - size = self._length - self._position - - # return fast - if size == 0 or self._buffer.closed: - return b"" - - # attempt first read from the read buffer and update position - read_buffer = self._buffer.read(size) - bytes_read = len(read_buffer) - bytes_remaining = size - bytes_read - self._position += bytes_read - - # repopulate the read buffer from the underlying stream to fulfill the request - # ensure the seek and read operations are done atomically (only if a lock is provided) - if bytes_remaining > 0: - with self._buffer: - # either read in the max buffer size specified on the class - # or read in just enough data for the current block/sub stream - current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) - - # lock is only defined if max_concurrency > 1 (parallel uploads) - if self._lock: - with self._lock: - # reposition the underlying stream to match the start of the data to read - absolute_position = self._stream_begin_index + self._position - self._wrapped_stream.seek(absolute_position, SEEK_SET) - # If we can't seek to the right location, our read will be corrupted so fail fast. - if self._wrapped_stream.tell() != absolute_position: - raise IOError("Stream failed to seek to the desired location.") - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - else: - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - - if buffer_from_stream: - # update the buffer with new data from the wrapped stream - # we need to note down the start position and size of the buffer, in case seek is performed later - self._buffer = BytesIO(buffer_from_stream) - self._current_buffer_start = self._position - self._current_buffer_size = len(buffer_from_stream) - - # read the remaining bytes from the new buffer and update position - second_read_buffer = self._buffer.read(bytes_remaining) - read_buffer += second_read_buffer - self._position += len(second_read_buffer) - - return read_buffer - - def readable(self): - return True - - def readinto(self, b): - raise UnsupportedOperation - - def seek(self, offset, whence=0): - if whence is SEEK_SET: - start_index = 0 - elif whence is SEEK_CUR: - start_index = self._position - elif whence is SEEK_END: - start_index = self._length - offset = -offset - else: - raise ValueError("Invalid argument for the 'whence' parameter.") - - pos = start_index + offset - - if pos > self._length: - pos = self._length - elif pos < 0: - pos = 0 - - # check if buffer is still valid - # if not, drop buffer - if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: - self._buffer.close() - self._buffer = BytesIO() - else: # if yes seek to correct position - delta = pos - self._current_buffer_start - self._buffer.seek(delta, SEEK_SET) - - self._position = pos - return pos - - def seekable(self): - return True - - def tell(self): - return self._position - - def write(self): - raise UnsupportedOperation - - def writelines(self): - raise UnsupportedOperation - - def writeable(self): - return False - - -class IterStreamer(object): - """ - File-like streaming iterator. - """ - - def __init__(self, generator, encoding="UTF-8"): - self.generator = generator - self.iterator = iter(generator) - self.leftover = b"" - self.encoding = encoding - - def __len__(self): - return self.generator.__len__() - - def __iter__(self): - return self.iterator - - def seekable(self): - return False - - def next(self): - return next(self.iterator) - - def tell(self, *args, **kwargs): - raise UnsupportedOperation("Data generator does not support tell.") - - def seek(self, *args, **kwargs): - raise UnsupportedOperation("Data generator is unseekable.") - - def read(self, size): - data = self.leftover - count = len(self.leftover) - try: - while count < size: - chunk = self.next() - if isinstance(chunk, six.text_type): - chunk = chunk.encode(self.encoding) - data += chunk - count += len(chunk) - except StopIteration: - pass - - if count > size: - self.leftover = data[size:] - - return data[:size] diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/uploads_async.py b/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/uploads_async.py deleted file mode 100644 index a918d7d..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/uploads_async.py +++ /dev/null @@ -1,367 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -import asyncio -from asyncio import Lock -from itertools import islice -import threading - -from math import ceil - -import six - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder -from .uploads import SubStream, IterStreamer # pylint: disable=unused-import - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' - - -async def _parallel_uploads(uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - next_chunk = next(pending) - except StopIteration: - break - else: - running.add(asyncio.ensure_future(uploader(next_chunk))) - - # Wait for the remaining uploads to finish - if running: - done, _running = await asyncio.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -async def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - asyncio.ensure_future(uploader.process_chunk(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [] - for chunk in uploader.get_chunk_streams(): - range_ids.append(await uploader.process_chunk(chunk)) - - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -async def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - asyncio.ensure_future(uploader.process_substream_block(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [] - for block in uploader.get_substream_blocks(): - range_ids.append(await uploader.process_substream_block(block)) - return sorted(range_ids) - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = threading.Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b'' - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b'' or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - async def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - async def _update_progress(self, length): - if self.progress_lock is not None: - async with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - async def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = await self._upload_chunk(chunk_offset, chunk_data) - await self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) - - async def process_substream_block(self, block_data): - return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - async def _upload_substream_block(self, block_id, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_substream_block_with_progress(self, block_id, block_stream): - range_id = await self._upload_substream_block(block_id, block_stream) - await self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop('modified_access_conditions', None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - await self.service.stage_block( - block_id, - len(chunk_data), - chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - return index, block_id - - async def _upload_substream_block(self, block_id, block_stream): - try: - await self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - for each_byte in chunk_data: - if each_byte not in [0, b'\x00']: - return False - return True - - async def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = await self.service.upload_pages( - chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = await self.service.append_block( - chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - self.current_length = int(self.response_headers['blob_append_offset']) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = await self.service.append_block( - chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - -class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - self.response_headers = await self.service.append_data( - body=chunk_data, - position=chunk_offset, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - response = await self.service.upload_range( - chunk_data, - chunk_offset, - chunk_end, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - return range_id, response diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_upload_helper.py b/azure/multiapi/storagev2/filedatalake/v2018_11_09/_upload_helper.py deleted file mode 100644 index bf29cfc..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_upload_helper.py +++ /dev/null @@ -1,86 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from ._deserialize import ( - process_storage_error) -from ._generated.models import ( - StorageErrorException, -) -from ._shared.response_handlers import return_response_headers -from ._shared.uploads import ( - upload_data_chunks, - DataLakeFileChunkUploader) - - -def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument - return any([ - modified_access_conditions.if_modified_since, - modified_access_conditions.if_unmodified_since, - modified_access_conditions.if_none_match, - modified_access_conditions.if_match - ]) - - -def upload_datalake_file( # pylint: disable=unused-argument - client=None, - stream=None, - length=None, - overwrite=None, - validate_content=None, - max_concurrency=None, - **kwargs): - try: - if length == 0: - return {} - properties = kwargs.pop('properties', None) - umask = kwargs.pop('umask', None) - permissions = kwargs.pop('permissions', None) - path_http_headers = kwargs.pop('path_http_headers', None) - modified_access_conditions = kwargs.pop('modified_access_conditions', None) - - if not overwrite: - # if customers didn't specify access conditions, they cannot flush data to existing file - if not _any_conditions(modified_access_conditions): - modified_access_conditions.if_none_match = '*' - if properties or umask or permissions: - raise ValueError("metadata, umask and permissions can be set only when overwrite is enabled") - - if overwrite: - response = client.create( - resource='file', - path_http_headers=path_http_headers, - properties=properties, - modified_access_conditions=modified_access_conditions, - umask=umask, - permissions=permissions, - cls=return_response_headers, - **kwargs) - - # this modified_access_conditions will be applied to flush_data to make sure - # no other flush between create and the current flush - modified_access_conditions.if_match = response['etag'] - modified_access_conditions.if_none_match = None - modified_access_conditions.if_modified_since = None - modified_access_conditions.if_unmodified_since = None - - upload_data_chunks( - service=client, - uploader_class=DataLakeFileChunkUploader, - total_size=length, - chunk_size=100 * 1024 * 1024, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - **kwargs) - - return client.flush_data(position=length, - path_http_headers=path_http_headers, - modified_access_conditions=modified_access_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_version.py b/azure/multiapi/storagev2/filedatalake/v2018_11_09/_version.py deleted file mode 100644 index 44f2642..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_version.py +++ /dev/null @@ -1,7 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -VERSION = "12.0.1" diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/_data_lake_directory_client_async.py b/azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/_data_lake_directory_client_async.py deleted file mode 100644 index f655adc..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/_data_lake_directory_client_async.py +++ /dev/null @@ -1,509 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from ._data_lake_file_client_async import DataLakeFileClient -from .._data_lake_directory_client import DataLakeDirectoryClient as DataLakeDirectoryClientBase -from .._models import DirectoryProperties -from ._path_client_async import PathClient - - -class DataLakeDirectoryClient(PathClient, DataLakeDirectoryClientBase): - """A client to interact with the DataLake directory, even if the directory may not yet exist. - - For operations relating to a specific subdirectory or file under the directory, a directory client or file client - can be retrieved using the :func:`~get_sub_directory_client` or :func:`~get_file_client` functions. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param directory_name: - The whole path of the directory. eg. {directory under file system}/{directory to interact with} - :type directory_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, and account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_instantiate_client_async.py - :start-after: [START instantiate_directory_client_from_conn_str] - :end-before: [END instantiate_directory_client_from_conn_str] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. - """ - - def __init__( - self, account_url, # type: str - file_system_name, # type: str - directory_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - super(DataLakeDirectoryClient, self).__init__(account_url, file_system_name, directory_name, # pylint: disable=specify-parameter-names-in-call - credential=credential, **kwargs) - - async def create_directory(self, metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create a new directory. - - :param metadata: - Name-value pairs associated with the directory as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the directory has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: response dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory_async.py - :start-after: [START create_directory] - :end-before: [END create_directory] - :language: python - :dedent: 8 - :caption: Create directory. - """ - return await self._create('directory', metadata=metadata, **kwargs) - - async def delete_directory(self, **kwargs): - # type: (...) -> None - """ - Marks the specified directory for deletion. - - :keyword lease: - Required if the directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory_async.py - :start-after: [START delete_directory] - :end-before: [END delete_directory] - :language: python - :dedent: 4 - :caption: Delete directory. - """ - return await self._delete(**kwargs) - - async def get_directory_properties(self, **kwargs): - # type: (**Any) -> DirectoryProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the directory. It does not return the content of the directory. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: DirectoryProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory_async.py - :start-after: [START get_directory_properties] - :end-before: [END get_directory_properties] - :language: python - :dedent: 4 - :caption: Getting the properties for a file/directory. - """ - blob_properties = await self._get_path_properties(**kwargs) - return DirectoryProperties._from_blob_properties(blob_properties) # pylint: disable=protected-access - - async def rename_directory(self, new_name, # type: str - **kwargs): - # type: (**Any) -> DataLakeDirectoryClient - """ - Rename the source directory. - - :param str new_name: - the new directory name the user want to rename to. - The value must have the following format: "{filesystem}/{directory}/{subdirectory}". - :keyword source_lease: - A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory_async.py - :start-after: [START rename_directory] - :end-before: [END rename_directory] - :language: python - :dedent: 4 - :caption: Rename the source directory. - """ - new_name = new_name.strip('/') - new_file_system = new_name.split('/')[0] - path = new_name[len(new_file_system):] - - new_directory_client = DataLakeDirectoryClient( - self.url, new_file_system, directory_name=path, credential=self._raw_credential, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - await new_directory_client._rename_path('/' + self.file_system_name + '/' + self.path_name, # pylint: disable=protected-access - **kwargs) - return new_directory_client - - async def create_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Create a subdirectory and return the subdirectory client to be interacted with. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient for the subdirectory. - """ - subdir = self.get_sub_directory_client(sub_directory) - await subdir.create_directory(metadata=metadata, **kwargs) - return subdir - - async def delete_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Marks the specified subdirectory for deletion. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :keyword lease: - Required if the directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient for the subdirectory - """ - subdir = self.get_sub_directory_client(sub_directory) - await subdir.delete_directory(**kwargs) - return subdir - - async def create_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Create a new file and return the file client to be interacted with. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - """ - file_client = self.get_file_client(file) - await file_client.create_file(**kwargs) - return file_client - - def get_file_client(self, file # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. eg. directory/subdirectory/file - :type file: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/test_datalake_service_samples.py - :start-after: [START bsc_get_file_client] - :end-before: [END bsc_get_file_client] - :language: python - :dedent: 12 - :caption: Getting the file client to interact with a specific file. - """ - try: - file_path = file.name - except AttributeError: - file_path = self.path_name + '/' + file - - return DataLakeFileClient( - self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_sub_directory_client(self, sub_directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified subdirectory of the current directory. - - The sub subdirectory need not already exist. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/test_datalake_service_samples.py - :start-after: [START bsc_get_directory_client] - :end-before: [END bsc_get_directory_client] - :language: python - :dedent: 12 - :caption: Getting the directory client to interact with a specific directory. - """ - try: - subdir_path = sub_directory.name - except AttributeError: - subdir_path = self.path_name + '/' + sub_directory - - return DataLakeDirectoryClient( - self.url, self.file_system_name, directory_name=subdir_path, credential=self._raw_credential, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/_download_async.py b/azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/_download_async.py deleted file mode 100644 index 2fda96f..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/_download_async.py +++ /dev/null @@ -1,53 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from .._models import FileProperties - - -class StorageStreamDownloader(object): - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the file being downloaded. - :ivar ~azure.storage.filedatalake.FileProperties properties: - The properties of the file being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the file. - """ - - def __init__(self, downloader): - self._downloader = downloader - self.name = self._downloader.name - self.properties = FileProperties._from_blob_properties(self._downloader.properties) # pylint: disable=protected-access - self.size = self._downloader.size - - def __len__(self): - return self.size - - def chunks(self): - return self._downloader.chunks() - - async def readall(self): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - return await self._downloader.readall() - - async def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - return await self._downloader.readinto(stream) diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/_path_client_async.py b/azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/_path_client_async.py deleted file mode 100644 index 514af0a..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/_path_client_async.py +++ /dev/null @@ -1,489 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from azure.multiapi.storagev2.blob.v2019_07_07.aio import BlobClient -from .._shared.base_client_async import AsyncStorageAccountHostsMixin -from .._path_client import PathClient as PathClientBase -from .._models import DirectoryProperties -from .._generated.aio import DataLakeStorageClient -from ._data_lake_lease_async import DataLakeLeaseClient -from .._generated.models import StorageErrorException -from .._deserialize import process_storage_error -from .._shared.policies_async import ExponentialRetry - -_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = ( - 'The require_encryption flag is set, but encryption is not supported' - ' for this method.') - - -class PathClient(AsyncStorageAccountHostsMixin, PathClientBase): - def __init__( - self, account_url, # type: str - file_system_name, # type: str - path_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - - super(PathClient, self).__init__(account_url, file_system_name, path_name, # type: ignore # pylint: disable=specify-parameter-names-in-call - credential=credential, - **kwargs) - - kwargs.pop('_hosts', None) - self._blob_client = BlobClient(self._blob_account_url, file_system_name, blob_name=path_name, - credential=credential, _hosts=self._blob_client._hosts, **kwargs) # type: ignore # pylint: disable=protected-access - self._client = DataLakeStorageClient(self.url, file_system_name, path_name, pipeline=self._pipeline) - self._loop = kwargs.get('loop', None) - - async def __aexit__(self, *args): - await self._blob_client.close() - await super(PathClient, self).__aexit__(*args) - - async def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._blob_client.close() - await self.__aexit__() - - async def _create(self, resource_type, content_settings=None, metadata=None, **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create directory or file - - :param resource_type: - Required for Create File and Create Directory. - The value must be "file" or "directory". Possible values include: - 'directory', 'file' - :type resource_type: str - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file/directory as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file/directory has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :type permissions: str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Dict[str, Union[str, datetime]] - """ - options = self._create_path_options( - resource_type, - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return await self._client.path.create(**options) - except StorageErrorException as error: - process_storage_error(error) - - async def _delete(self, **kwargs): - # type: (bool, **Any) -> None - """ - Marks the specified path for deletion. - - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - options = self._delete_path_options(**kwargs) - try: - return await self._client.path.delete(**options) - except StorageErrorException as error: - process_storage_error(error) - - async def set_access_control(self, owner=None, # type: Optional[str] - group=None, # type: Optional[str] - permissions=None, # type: Optional[str] - acl=None, # type: Optional[str] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Set the owner, group, permissions, or access control list for a path. - - :param owner: - Optional. The owner of the file or directory. - :type owner: str - :param group: - Optional. The owning group of the file or directory. - :type group: str - :param permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - permissions and acl are mutually exclusive. - :type permissions: str - :param acl: - Sets POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - permissions and acl are mutually exclusive. - :type acl: str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword: response dict (Etag and last modified). - """ - options = self._set_access_control_options(owner=owner, group=group, permissions=permissions, acl=acl, **kwargs) - try: - return await self._client.path.set_access_control(**options) - except StorageErrorException as error: - process_storage_error(error) - - async def get_access_control(self, upn=None, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Any] - """ - Get the owner, group, permissions, or access control list for a path. - - :param upn: - Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword: response dict. - """ - options = self._get_access_control_options(upn=upn, **kwargs) - try: - return await self._client.path.get_properties(**options) - except StorageErrorException as error: - process_storage_error(error) - - async def _rename_path(self, rename_source, - **kwargs): - # type: (**Any) -> Dict[str, Any] - """ - Rename directory or file - - :param rename_source: The value must have the following format: "/{filesystem}/{path}". - :type rename_source: str - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword source_lease: A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._rename_path_options( - rename_source, - **kwargs) - try: - return await self._client.path.create(**options) - except StorageErrorException as error: - process_storage_error(error) - - async def _get_path_properties(self, **kwargs): - # type: (**Any) -> Union[FileProperties, DirectoryProperties] - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file or directory. It does not return the content of the directory or file. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: DirectoryProperties or FileProperties - """ - path_properties = await self._blob_client.get_blob_properties(**kwargs) - path_properties.__class__ = DirectoryProperties - return path_properties - - async def set_metadata(self, metadata, # type: Dict[str, str] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - file system. Each call to this operation replaces all existing metadata - attached to the file system. To remove all metadata from the file system, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the file system as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: file system-updated property dict (Etag and last modified). - """ - return await self._blob_client.set_blob_metadata(metadata=metadata, **kwargs) - - async def set_http_headers(self, content_settings=None, # type: Optional[ContentSettings] - **kwargs): - # type: (...) -> Dict[str, Any] - """Sets system properties on the file or directory. - - If one property is set for the content_settings, all properties will be overriden. - - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set file/directory properties. - :keyword lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: file/directory-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - return await self._blob_client.set_http_headers(content_settings=content_settings, **kwargs) - - async def acquire_lease(self, lease_duration=-1, # type: Optional[int] - lease_id=None, # type: Optional[str] - **kwargs): - # type: (...) -> DataLakeLeaseClient - """ - Requests a new lease. If the file or directory does not have an active lease, - the DataLake service creates a lease on the file/directory and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A DataLakeLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.filedatalake.aio.DataLakeLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/test_file_system_samples.py - :start-after: [START acquire_lease_on_file_system] - :end-before: [END acquire_lease_on_file_system] - :language: python - :dedent: 8 - :caption: Acquiring a lease on the file_system. - """ - lease = DataLakeLeaseClient(self, lease_id=lease_id) # type: ignore - await lease.acquire(lease_duration=lease_duration, **kwargs) - return lease diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/_upload_helper.py b/azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/_upload_helper.py deleted file mode 100644 index b2f10df..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/_upload_helper.py +++ /dev/null @@ -1,86 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from .._deserialize import ( - process_storage_error) -from .._generated.models import ( - StorageErrorException, -) -from .._shared.response_handlers import return_response_headers -from .._shared.uploads_async import ( - upload_data_chunks, - DataLakeFileChunkUploader) - - -def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument - return any([ - modified_access_conditions.if_modified_since, - modified_access_conditions.if_unmodified_since, - modified_access_conditions.if_none_match, - modified_access_conditions.if_match - ]) - - -async def upload_datalake_file( # pylint: disable=unused-argument - client=None, - stream=None, - length=None, - overwrite=None, - validate_content=None, - max_concurrency=None, - **kwargs): - try: - if length == 0: - return {} - properties = kwargs.pop('properties', None) - umask = kwargs.pop('umask', None) - permissions = kwargs.pop('permissions', None) - path_http_headers = kwargs.pop('path_http_headers', None) - modified_access_conditions = kwargs.pop('modified_access_conditions', None) - - if not overwrite: - # if customers didn't specify access conditions, they cannot flush data to existing file - if not _any_conditions(modified_access_conditions): - modified_access_conditions.if_none_match = '*' - if properties or umask or permissions: - raise ValueError("metadata, umask and permissions can be set only when overwrite is enabled") - - if overwrite: - response = await client.create( - resource='file', - path_http_headers=path_http_headers, - properties=properties, - modified_access_conditions=modified_access_conditions, - umask=umask, - permissions=permissions, - cls=return_response_headers, - **kwargs) - - # this modified_access_conditions will be applied to flush_data to make sure - # no other flush between create and the current flush - modified_access_conditions.if_match = response['etag'] - modified_access_conditions.if_none_match = None - modified_access_conditions.if_modified_since = None - modified_access_conditions.if_unmodified_since = None - - await upload_data_chunks( - service=client, - uploader_class=DataLakeFileChunkUploader, - total_size=length, - chunk_size=100 * 1024 * 1024, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - **kwargs) - - return await client.flush_data(position=length, - path_http_headers=path_http_headers, - modified_access_conditions=modified_access_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/__init__.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/__init__.py similarity index 91% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/__init__.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/__init__.py index d5bc4c9..fe21dbf 100644 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/__init__.py +++ b/azure/multiapi/storagev2/filedatalake/v2019_07_07/__init__.py @@ -4,7 +4,6 @@ # license information. # -------------------------------------------------------------------------- -from ._download import StorageStreamDownloader from ._data_lake_file_client import DataLakeFileClient from ._data_lake_directory_client import DataLakeDirectoryClient from ._file_system_client import FileSystemClient @@ -16,7 +15,6 @@ FileSystemProperties, FileSystemPropertiesPaged, DirectoryProperties, - FileProperties, PathProperties, PathPropertiesPaged, LeaseProperties, @@ -26,8 +24,7 @@ DirectorySasPermissions, FileSasPermissions, UserDelegationKey, - PublicAccess, - AccessPolicy, + PublicAccess ) from ._shared_access_signature import generate_account_sas, generate_file_system_sas, generate_directory_sas, \ generate_file_sas @@ -48,14 +45,12 @@ 'LinearRetry', 'LocationMode', 'PublicAccess', - 'AccessPolicy', 'ResourceTypes', 'StorageErrorCode', 'UserDelegationKey', 'FileSystemProperties', 'FileSystemPropertiesPaged', 'DirectoryProperties', - 'FileProperties', 'PathProperties', 'PathPropertiesPaged', 'LeaseProperties', @@ -69,5 +64,4 @@ 'generate_directory_sas', 'generate_file_sas', 'VERSION', - 'StorageStreamDownloader' ] diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_data_lake_directory_client.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_data_lake_directory_client.py similarity index 76% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_data_lake_directory_client.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_data_lake_directory_client.py index 90c525b..a7bed77 100644 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_data_lake_directory_client.py +++ b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_data_lake_directory_client.py @@ -37,12 +37,19 @@ class DataLakeDirectoryClient(PathClient): .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_instantiate_client.py - :start-after: [START instantiate_directory_client_from_conn_str] - :end-before: [END instantiate_directory_client_from_conn_str] + .. literalinclude:: ../samples/test_datalake_authentication_samples.py + :start-after: [START create_datalake_service_client] + :end-before: [END create_datalake_service_client] :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. + :dedent: 8 + :caption: Creating the DataLakeServiceClient with account url and credential. + + .. literalinclude:: ../samples/test_datalake_authentication_samples.py + :start-after: [START create_datalake_service_client_oauth] + :end-before: [END create_datalake_service_client_oauth] + :language: python + :dedent: 8 + :caption: Creating the DataLakeServiceClient with Azure Identity credentials. """ def __init__( self, account_url, # type: str @@ -68,11 +75,9 @@ def from_connection_string( :param str conn_str: A connection string to an Azure Storage account. - :param file_system_name: - The name of file system to interact with. + :param file_system_name: The name of file system to interact with. :type file_system_name: str - :param directory_name: - The name of directory to interact with. The directory is under file system. + :param directory_name: The name of directory to interact with. The directory is under file system. :type directory_name: str :param credential: The credentials with which to authenticate. This is optional if the @@ -83,41 +88,42 @@ def from_connection_string( :return a DataLakeDirectoryClient :rtype ~azure.storage.filedatalake.DataLakeDirectoryClient """ - account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'dfs') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary return cls( account_url, file_system_name=file_system_name, directory_name=directory_name, credential=credential, **kwargs) - def create_directory(self, metadata=None, # type: Optional[Dict[str, str]] + def create_directory(self, content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] **kwargs): # type: (...) -> Dict[str, Union[str, datetime]] """ Create a new directory. + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. :param metadata: - Name-value pairs associated with the file as metadata. + Name-value pairs associated with the blob as metadata. :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object + Required if the blob has an active lease. Value can be a DataLakeLeaseClient object or the lease ID as a string. :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. + :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. When creating a file or directory and the parent folder does not have a default ACL, the umask restricts the permissions of the file or directory to be created. The resulting permission is given by p & ^u, where p is the permission and u is the umask. For example, if p is 0777 and u is 0057, then the resulting permission is 0720. The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. + :keyword str permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -138,17 +144,8 @@ def create_directory(self, metadata=None, # type: Optional[Dict[str, str]] :keyword int timeout: The timeout parameter is expressed in seconds. :return: response dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory.py - :start-after: [START create_directory] - :end-before: [END create_directory] - :language: python - :dedent: 8 - :caption: Create directory. """ - return self._create('directory', metadata=metadata, **kwargs) + return self._create('directory', content_settings=content_settings, metadata=metadata, **kwargs) def delete_directory(self, **kwargs): # type: (...) -> None @@ -156,9 +153,9 @@ def delete_directory(self, **kwargs): Marks the specified directory for deletion. :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object + Required if the blob has an active lease. Value can be a LeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :type lease: ~azure.storage.blob.LeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -179,15 +176,6 @@ def delete_directory(self, **kwargs): :keyword int timeout: The timeout parameter is expressed in seconds. :return: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory.py - :start-after: [START delete_directory] - :end-before: [END delete_directory] - :language: python - :dedent: 4 - :caption: Delete directory. """ return self._delete(**kwargs) @@ -199,7 +187,7 @@ def get_directory_properties(self, **kwargs): :keyword lease: Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -223,34 +211,47 @@ def get_directory_properties(self, **kwargs): .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_directory.py - :start-after: [START get_directory_properties] - :end-before: [END get_directory_properties] + .. literalinclude:: ../tests/test_blob_samples_common.py + :start-after: [START get_blob_properties] + :end-before: [END get_blob_properties] :language: python - :dedent: 4 + :dedent: 8 :caption: Getting the properties for a file/directory. """ blob_properties = self._get_path_properties(**kwargs) return DirectoryProperties._from_blob_properties(blob_properties) # pylint: disable=protected-access - def rename_directory(self, new_name, # type: str - **kwargs): + def rename_directory(self, rename_destination, **kwargs): # type: (**Any) -> DataLakeDirectoryClient """ Rename the source directory. - :param str new_name: - the new directory name the user want to rename to. + :param str rename_destination: the new directory name the user want to rename to. The value must have the following format: "{filesystem}/{directory}/{subdirectory}". - :keyword source_lease: - A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword source_lease: A lease ID for the source path. If specified, + the source path must have an active lease and the leaase ID must + match. + :keyword source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. :keyword lease: Required if the file/directory has an active lease. Value can be a LeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :type permissions: str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -288,24 +289,15 @@ def rename_directory(self, new_name, # type: str :keyword int timeout: The timeout parameter is expressed in seconds. :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory.py - :start-after: [START rename_directory] - :end-before: [END rename_directory] - :language: python - :dedent: 4 - :caption: Rename the source directory. """ - new_name = new_name.strip('/') - new_file_system = new_name.split('/')[0] - path = new_name[len(new_file_system):] + rename_destination = rename_destination.strip('/') + new_file_system = rename_destination.split('/')[0] + path = rename_destination[len(new_file_system):] new_directory_client = DataLakeDirectoryClient( self.url, new_file_system, directory_name=path, credential=self._raw_credential, _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, + _location_mode=self._location_mode, require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function) new_directory_client._rename_path('/'+self.file_system_name+'/'+self.path_name, # pylint: disable=protected-access @@ -313,6 +305,7 @@ def rename_directory(self, new_name, # type: str return new_directory_client def create_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] + content_settings=None, # type: Optional[ContentSettings] metadata=None, # type: Optional[Dict[str, str]] **kwargs): # type: (...) -> DataLakeDirectoryClient @@ -323,30 +316,27 @@ def create_sub_directory(self, sub_directory, # type: Union[DirectoryProperties The directory with which to interact. This can either be the name of the directory, or an instance of DirectoryProperties. :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. :param metadata: - Name-value pairs associated with the file as metadata. + Name-value pairs associated with the blob as metadata. :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object + :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease: + Required if the blob has an active lease. Value can be a DataLakeLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. + :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. When creating a file or directory and the parent folder does not have a default ACL, the umask restricts the permissions of the file or directory to be created. The resulting permission is given by p & ^u, where p is the permission and u is the umask. For example, if p is 0777 and u is 0057, then the resulting permission is 0720. The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. + :keyword str permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -369,7 +359,7 @@ def create_sub_directory(self, sub_directory, # type: Union[DirectoryProperties :return: DataLakeDirectoryClient for the subdirectory. """ subdir = self.get_sub_directory_client(sub_directory) - subdir.create_directory(metadata=metadata, **kwargs) + subdir.create_directory(content_settings=content_settings, metadata=metadata, **kwargs) return subdir def delete_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] @@ -383,9 +373,9 @@ def delete_sub_directory(self, sub_directory, # type: Union[DirectoryProperties or an instance of DirectoryProperties. :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object + Required if the blob has an active lease. Value can be a LeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :type lease: ~azure.storage.blob.LeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -424,27 +414,24 @@ def create_file(self, file, # type: Union[FileProperties, str] :keyword ~azure.storage.filedatalake.ContentSettings content_settings: ContentSettings object used to set path properties. :keyword metadata: - Name-value pairs associated with the file as metadata. + Name-value pairs associated with the blob as metadata. :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object + :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease: + Required if the blob has an active lease. Value can be a DataLakeLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. + :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. When creating a file or directory and the parent folder does not have a default ACL, the umask restricts the permissions of the file or directory to be created. The resulting permission is given by p & ^u, where p is the permission and u is the umask. For example, if p is 0777 and u is 0057, then the resulting permission is 0720. The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. + :keyword str permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -483,6 +470,15 @@ def get_file_client(self, file # type: Union[FileProperties, str] :type file: str or ~azure.storage.filedatalake.FileProperties :returns: A DataLakeFileClient. :rtype: ~azure.storage.filedatalake..DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/test_datalake_service_samples.py + :start-after: [START bsc_get_file_client] + :end-before: [END bsc_get_file_client] + :language: python + :dedent: 12 + :caption: Getting the file client to interact with a specific file. """ try: file_path = file.name @@ -492,7 +488,7 @@ def get_file_client(self, file # type: Union[FileProperties, str] return DataLakeFileClient( self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, + _location_mode=self._location_mode, require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function) @@ -509,6 +505,15 @@ def get_sub_directory_client(self, sub_directory # type: Union[DirectoryPropert :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties :returns: A DataLakeDirectoryClient. :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/test_datalake_service_samples.py + :start-after: [START bsc_get_directory_client] + :end-before: [END bsc_get_directory_client] + :language: python + :dedent: 12 + :caption: Getting the directory client to interact with a specific directory. """ try: subdir_path = sub_directory.name @@ -518,6 +523,6 @@ def get_sub_directory_client(self, sub_directory # type: Union[DirectoryPropert return DataLakeDirectoryClient( self.url, self.file_system_name, directory_name=subdir_path, credential=self._raw_credential, _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, + _location_mode=self._location_mode, require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_data_lake_file_client.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_data_lake_file_client.py similarity index 72% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_data_lake_file_client.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_data_lake_file_client.py index e5973e8..1679c60 100644 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_data_lake_file_client.py +++ b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_data_lake_file_client.py @@ -3,18 +3,14 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- -from io import BytesIO import six from ._shared.base_client import parse_connection_str from ._shared.request_handlers import get_length, read_length from ._shared.response_handlers import return_response_headers -from ._shared.uploads import IterStreamer -from ._upload_helper import upload_datalake_file from ._generated.models import StorageErrorException -from ._download import StorageStreamDownloader from ._path_client import PathClient -from ._serialize import get_mod_conditions, get_path_http_headers, get_access_conditions, add_metadata_headers +from ._serialize import get_mod_conditions, get_path_http_headers, get_access_conditions from ._deserialize import process_storage_error from ._models import FileProperties @@ -45,12 +41,19 @@ class DataLakeFileClient(PathClient): .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_instantiate_client.py - :start-after: [START instantiate_file_client_from_conn_str] - :end-before: [END instantiate_file_client_from_conn_str] + .. literalinclude:: ../samples/test_datalake_authentication_samples.py + :start-after: [START create_datalake_service_client] + :end-before: [END create_datalake_service_client] :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. + :dedent: 8 + :caption: Creating the DataLakeServiceClient with account url and credential. + + .. literalinclude:: ../samples/test_datalake_authentication_samples.py + :start-after: [START create_datalake_service_client_oauth] + :end-before: [END create_datalake_service_client_oauth] + :language: python + :dedent: 8 + :caption: Creating the DataLakeServiceClient with Azure Identity credentials. """ def __init__( self, account_url, # type: str @@ -91,7 +94,9 @@ def from_connection_string( :return a DataLakeFileClient :rtype ~azure.storage.filedatalake.DataLakeFileClient """ - account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'dfs') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary return cls( account_url, file_system_name=file_system_name, file_path=file_path, credential=credential, **kwargs) @@ -106,27 +111,24 @@ def create_file(self, content_settings=None, # type: Optional[ContentSettings] :param ~azure.storage.filedatalake.ContentSettings content_settings: ContentSettings object used to set path properties. :param metadata: - Name-value pairs associated with the file as metadata. + Name-value pairs associated with the blob as metadata. :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object + :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease: + Required if the blob has an active lease. Value can be a DataLakeLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. + :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. When creating a file or directory and the parent folder does not have a default ACL, the umask restricts the permissions of the file or directory to be created. The resulting permission is given by p & ^u, where p is the permission and u is the umask. For example, if p is 0777 and u is 0057, then the resulting permission is 0720. The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. + :keyword str permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -147,15 +149,6 @@ def create_file(self, content_settings=None, # type: Optional[ContentSettings] :keyword int timeout: The timeout parameter is expressed in seconds. :return: response dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START create_file] - :end-before: [END create_file] - :language: python - :dedent: 4 - :caption: Create file. """ return self._create('file', content_settings=content_settings, metadata=metadata, **kwargs) @@ -165,9 +158,9 @@ def delete_file(self, **kwargs): Marks the specified file for deletion. :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object + Required if the blob has an active lease. Value can be a LeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :type lease: ~azure.storage.blob.LeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -188,15 +181,6 @@ def delete_file(self, **kwargs): :keyword int timeout: The timeout parameter is expressed in seconds. :return: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START delete_file] - :end-before: [END delete_file] - :language: python - :dedent: 4 - :caption: Delete file. """ return self._delete(**kwargs) @@ -232,120 +216,16 @@ def get_file_properties(self, **kwargs): .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START get_file_properties] - :end-before: [END get_file_properties] + .. literalinclude:: ../tests/test_blob_samples_common.py + :start-after: [START get_blob_properties] + :end-before: [END get_blob_properties] :language: python - :dedent: 4 - :caption: Getting the properties for a file. + :dedent: 8 + :caption: Getting the properties for a file/directory. """ blob_properties = self._get_path_properties(**kwargs) return FileProperties._from_blob_properties(blob_properties) # pylint: disable=protected-access - def _upload_options( # pylint:disable=too-many-statements - self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - - encoding = kwargs.pop('encoding', 'UTF-8') - if isinstance(data, six.text_type): - data = data.encode(encoding) # type: ignore - if length is None: - length = get_length(data) - if isinstance(data, bytes): - data = data[:length] - - if isinstance(data, bytes): - stream = BytesIO(data) - elif hasattr(data, 'read'): - stream = data - elif hasattr(data, '__iter__'): - stream = IterStreamer(data, encoding=encoding) - else: - raise TypeError("Unsupported data type: {}".format(type(data))) - - validate_content = kwargs.pop('validate_content', False) - content_settings = kwargs.pop('content_settings', None) - metadata = kwargs.pop('metadata', None) - max_concurrency = kwargs.pop('max_concurrency', 1) - - kwargs['properties'] = add_metadata_headers(metadata) - kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None)) - kwargs['modified_access_conditions'] = get_mod_conditions(kwargs) - - if content_settings: - kwargs['path_http_headers'] = get_path_http_headers(content_settings) - - kwargs['stream'] = stream - kwargs['length'] = length - kwargs['validate_content'] = validate_content - kwargs['max_concurrency'] = max_concurrency - kwargs['client'] = self._client.path - - return kwargs - - def upload_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - overwrite=False, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Any] - """ - Upload data to a file. - - :param data: Content to be uploaded to file - :param int length: Size of the data in bytes. - :param bool overwrite: to overwrite an existing file or not. - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword metadata: - Name-value pairs associated with the blob as metadata. - :paramtype metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease: - Required if the blob has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: response dict (Etag and last modified). - """ - options = self._upload_options( - data, - length=length, - overwrite=overwrite, - **kwargs) - return upload_datalake_file(**options) - @staticmethod def _append_data_options(data, offset, length=None, **kwargs): # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] @@ -388,21 +268,12 @@ def append_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] with the hash that was sent. This is primarily valuable for detecting bitflips on the wire if using http instead of https as https (the default) will already validate. Note that this MD5 hash is not stored with the - file. + blob. :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object + Required if the blob has an active lease. Value can be a LeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str :return: dict of the response header - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START append_data] - :end-before: [END append_data] - :language: python - :dedent: 4 - :caption: Append data to the file. """ options = self._append_data_options( data, @@ -453,8 +324,6 @@ def flush_data(self, offset, # type: int specified position are written to the file when flush succeeds, but this optional parameter allows data after the flush position to be retained for a future flush operation. - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. :keyword bool close: Azure Storage Events allow applications to receive notifications when files change. When Azure Storage Events are enabled, a file changed event is raised. This event has a property @@ -487,15 +356,6 @@ def flush_data(self, offset, # type: int :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. :return: response header in dict - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START upload_file_to_file_system] - :end-before: [END upload_file_to_file_system] - :language: python - :dedent: 8 - :caption: Commit the previous appended data. """ options = self._flush_data_options( offset, @@ -505,11 +365,13 @@ def flush_data(self, offset, # type: int except StorageErrorException as error: process_storage_error(error) - def download_file(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader - """Downloads a file to the StorageStreamDownloader. The readall() method must - be used to read all the content, or readinto() must be used to download the file into - a stream. + def read_file(self, offset=None, # type: Optional[int] + length=None, # type: Optional[int] + stream=None, # type: Optional[IO] + **kwargs): + # type: (...) -> Union[int, byte] + """Download a file from the service. Return the downloaded data in bytes or + write the downloaded data into user provided stream and return the written size. :param int offset: Start of byte range to use for downloading a section of the file. @@ -517,10 +379,12 @@ def download_file(self, offset=None, length=None, **kwargs): :param int length: Number of bytes to read from the stream. This is optional, but should be supplied for optimal performance. + :param int stream: + User provided stream to write the downloaded data into. :keyword lease: - If specified, download only succeeds if the file's lease is active - and matches this ID. Required if the file has an active lease. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + If specified, download_blob only succeeds if the blob's lease is active + and matches this ID. Required if the blob has an active lease. + :type lease: ~azure.storage.blob.LeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -544,39 +408,54 @@ def download_file(self, offset=None, length=None, **kwargs): The timeout parameter is expressed in seconds. This method may make multiple calls to the Azure service and the timeout will apply to each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.filedatalake.StorageStreamDownloader + :returns: downloaded data or the size of data written into the provided stream + :rtype: bytes or int .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START read_file] - :end-before: [END read_file] + .. literalinclude:: ../tests/test_blob_samples_hello_world.py + :start-after: [START download_a_blob] + :end-before: [END download_a_blob] :language: python - :dedent: 4 - :caption: Return the downloaded data. + :dedent: 12 + :caption: Download a blob. """ downloader = self._blob_client.download_blob(offset=offset, length=length, **kwargs) - return StorageStreamDownloader(downloader) + if stream: + return downloader.readinto(stream) + return downloader.readall() - def rename_file(self, new_name, # type: str - **kwargs): + def rename_file(self, rename_destination, **kwargs): # type: (**Any) -> DataLakeFileClient """ Rename the source file. - :param str new_name: the new file name the user want to rename to. + :param str rename_destination: the new file name the user want to rename to. The value must have the following format: "{filesystem}/{directory}/{subdirectory}/{file}". - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. :keyword source_lease: A lease ID for the source path. If specified, the source path must have an active lease and the leaase ID must match. - :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. :keyword lease: Required if the file/directory has an active lease. Value can be a LeaseClient object or the lease ID as a string. :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :type permissions: str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -613,21 +492,11 @@ def rename_file(self, new_name, # type: str The source match condition to use upon the etag. :keyword int timeout: The timeout parameter is expressed in seconds. - :return: the renamed file client - :rtype: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START rename_file] - :end-before: [END rename_file] - :language: python - :dedent: 4 - :caption: Rename the source file. + :return: """ - new_name = new_name.strip('/') - new_file_system = new_name.split('/')[0] - path = new_name[len(new_file_system):] + rename_destination = rename_destination.strip('/') + new_file_system = rename_destination.split('/')[0] + path = rename_destination[len(new_file_system):] new_directory_client = DataLakeFileClient( self.url, new_file_system, file_path=path, credential=self._raw_credential, diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_data_lake_lease.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_data_lake_lease.py similarity index 94% rename from azure/multiapi/storagev2/filedatalake/v2020_06_12/_data_lake_lease.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_data_lake_lease.py index ccdf525..0450516 100644 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_data_lake_lease.py +++ b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_data_lake_lease.py @@ -10,7 +10,7 @@ Union, Optional, Any, TypeVar, TYPE_CHECKING ) -from azure.multiapi.storagev2.blob.v2020_06_12 import BlobLeaseClient +from azure.storage.blob import BlobLeaseClient if TYPE_CHECKING: @@ -70,8 +70,8 @@ def acquire(self, lease_duration=-1, **kwargs): # type: (int, Optional[int], **Any) -> None """Requests a new lease. - If the file/file system does not have an active lease, the DataLake service creates a - lease on the file/file system and returns a new lease ID. + If the container does not have an active lease, the Blob service creates a + lease on the container and returns a new lease ID. :param int lease_duration: Specifies the duration of the lease, in seconds, or negative one @@ -107,9 +107,9 @@ def renew(self, **kwargs): """Renews the lease. The lease can be renewed if the lease ID specified in the - lease client matches that associated with the file system or file. Note that - the lease may be renewed even if it has expired as long as the file system - or file has not been leased again since the expiration of that lease. When you + lease client matches that associated with the container or blob. Note that + the lease may be renewed even if it has expired as long as the container + or blob has not been leased again since the expiration of that lease. When you renew a lease, the lease duration clock resets. :keyword ~datetime.datetime if_modified_since: @@ -141,8 +141,8 @@ def release(self, **kwargs): """Release the lease. The lease may be released if the client lease id specified matches - that associated with the file system or file. Releasing the lease allows another client - to immediately acquire the lease for the file system or file as soon as the release is complete. + that associated with the container or blob. Releasing the lease allows another client + to immediately acquire the lease for the container or blob as soon as the release is complete. :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. @@ -173,7 +173,7 @@ def change(self, proposed_lease_id, **kwargs): """Change the lease ID of an active lease. :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns 400 + Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. @@ -201,12 +201,12 @@ def change(self, proposed_lease_id, **kwargs): def break_lease(self, lease_break_period=None, **kwargs): # type: (Optional[int], Any) -> int - """Break the lease, if the file system or file has an active lease. + """Break the lease, if the container or blob has an active lease. Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; the request is not required to specify a matching lease ID. When a lease is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the file system or file. + no lease operation except break and release can be performed on the container or blob. When a lease is successfully broken, the response indicates the interval in seconds until a new lease can be acquired. diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_data_lake_service_client.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_data_lake_service_client.py similarity index 77% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_data_lake_service_client.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_data_lake_service_client.py index d2bccd6..a0675ae 100644 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_data_lake_service_client.py +++ b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_data_lake_service_client.py @@ -11,12 +11,12 @@ from azure.core.paging import ItemPaged -from azure.multiapi.storagev2.blob.v2019_07_07 import BlobServiceClient +from azure.storage.blob import BlobServiceClient from ._shared.base_client import StorageAccountHostsMixin, parse_query, parse_connection_str from ._file_system_client import FileSystemClient from ._data_lake_directory_client import DataLakeDirectoryClient from ._data_lake_file_client import DataLakeFileClient -from ._models import UserDelegationKey, FileSystemPropertiesPaged, LocationMode +from ._models import UserDelegationKey, FileSystemPropertiesPaged from ._serialize import convert_dfs_url_to_blob_url @@ -29,7 +29,8 @@ class DataLakeServiceClient(StorageAccountHostsMixin): can also be retrieved using the `get_client` functions. :ivar str url: - The full endpoint URL to the datalake service endpoint. + The full endpoint URL to the datalake service endpoint. This could be either the + primary endpoint, or the secondary endpoint depending on the current `location_mode`. :ivar str primary_endpoint: The full primary endpoint URL. :ivar str primary_hostname: @@ -46,14 +47,14 @@ class DataLakeServiceClient(StorageAccountHostsMixin): .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_service.py + .. literalinclude:: ../samples/test_datalake_authentication_samples.py :start-after: [START create_datalake_service_client] :end-before: [END create_datalake_service_client] :language: python :dedent: 8 - :caption: Creating the DataLakeServiceClient from connection string. + :caption: Creating the DataLakeServiceClient with account url and credential. - .. literalinclude:: ../samples/datalake_samples_service.py + .. literalinclude:: ../samples/test_datalake_authentication_samples.py :start-after: [START create_datalake_service_client_oauth] :end-before: [END create_datalake_service_client_oauth] :language: python @@ -79,30 +80,16 @@ def __init__( blob_account_url = convert_dfs_url_to_blob_url(account_url) self._blob_account_url = blob_account_url self._blob_service_client = BlobServiceClient(blob_account_url, credential, **kwargs) - self._blob_service_client._hosts[LocationMode.SECONDARY] = "" #pylint: disable=protected-access _, sas_token = parse_query(parsed_url.query) self._query_str, self._raw_credential = self._format_query_string(sas_token, credential) super(DataLakeServiceClient, self).__init__(parsed_url, service='dfs', credential=self._raw_credential, **kwargs) - # ADLS doesn't support secondary endpoint, make sure it's empty - self._hosts[LocationMode.SECONDARY] = "" - - def __exit__(self, *args): - self._blob_service_client.close() - super(DataLakeServiceClient, self).__exit__(*args) - - def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._blob_service_client.close() - self.__exit__() def _format_url(self, hostname): - """Format the endpoint URL according to hostname + """Format the endpoint URL according to the current location + mode hostname. """ formated_url = "{}://{}/{}".format(self.scheme, hostname, self._query_str) return formated_url @@ -126,17 +113,10 @@ def from_connection_string( Credentials provided here will take precedence over those in the connection string. :return a DataLakeServiceClient :rtype ~azure.storage.filedatalake.DataLakeServiceClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_data_lake_service_client_from_conn_str] - :end-before: [END create_data_lake_service_client_from_conn_str] - :language: python - :dedent: 8 - :caption: Creating the DataLakeServiceClient from a connection string. """ - account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'dfs') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary return cls(account_url, credential=credential, **kwargs) def get_user_delegation_key(self, key_start_time, # type: datetime @@ -156,20 +136,12 @@ def get_user_delegation_key(self, key_start_time, # type: datetime The timeout parameter is expressed in seconds. :return: The user delegation key. :rtype: ~azure.storage.filedatalake.UserDelegationKey - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START get_user_delegation_key] - :end-before: [END get_user_delegation_key] - :language: python - :dedent: 8 - :caption: Get user delegation key from datalake service client. """ delegation_key = self._blob_service_client.get_user_delegation_key(key_start_time=key_start_time, key_expiry_time=key_expiry_time, **kwargs) # pylint: disable=protected-access - return UserDelegationKey._from_generated(delegation_key) # pylint: disable=protected-access + delegation_key._class_ = UserDelegationKey # pylint: disable=protected-access + return delegation_key def list_file_systems(self, name_starts_with=None, # type: Optional[str] include_metadata=None, # type: Optional[bool] @@ -196,11 +168,11 @@ def list_file_systems(self, name_starts_with=None, # type: Optional[str] .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START list_file_systems] - :end-before: [END list_file_systems] + .. literalinclude:: ../samples/test_datalake_service_samples.py + :start-after: [START dsc_list_file_systems] + :end-before: [END dsc_list_file_systems] :language: python - :dedent: 8 + :dedent: 12 :caption: Listing the file systems in the datalake service. """ item_paged = self._blob_service_client.list_containers(name_starts_with=name_starts_with, @@ -220,8 +192,7 @@ def create_file_system(self, file_system, # type: Union[FileSystemProperties, s be raised. This method returns a client with which to interact with the newly created file system. - :param str file_system: - The name of the file system to create. + :param str file_system: The name of the file system to create. :param metadata: A dict with name-value pairs to associate with the file system as metadata. Example: `{'Category':'test'}` @@ -235,11 +206,11 @@ def create_file_system(self, file_system, # type: Union[FileSystemProperties, s .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START create_file_system_from_service_client] - :end-before: [END create_file_system_from_service_client] + .. literalinclude:: ../samples/test_datalake_service_samples.py + :start-after: [START dsc_create_file_system] + :end-before: [END dsc_create_file_system] :language: python - :dedent: 8 + :dedent: 12 :caption: Creating a file system in the datalake service. """ file_system_client = self.get_file_system_client(file_system) @@ -258,11 +229,10 @@ def delete_file_system(self, file_system, # type: Union[FileSystemProperties, s The file system to delete. This can either be the name of the file system, or an instance of FileSystemProperties. :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :keyword lease: + :keyword ~azure.storage.filedatalake.DataLakeLeaseClient lease: If specified, delete_file_system only succeeds if the file system's lease is active and matches this ID. Required if the file system has an active lease. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -286,11 +256,11 @@ def delete_file_system(self, file_system, # type: Union[FileSystemProperties, s .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START delete_file_system_from_service_client] - :end-before: [END delete_file_system_from_service_client] + .. literalinclude:: ../samples/test_datalake_service_samples.py + :start-after: [START bsc_delete_file_system] + :end-before: [END bsc_delete_file_system] :language: python - :dedent: 8 + :dedent: 12 :caption: Deleting a file system in the datalake service. """ file_system_client = self.get_file_system_client(file_system) @@ -313,21 +283,15 @@ def get_file_system_client(self, file_system # type: Union[FileSystemProperties .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_file_system_client_from_service] - :end-before: [END create_file_system_client_from_service] + .. literalinclude:: ../samples/test_datalake_service_samples.py + :start-after: [START bsc_get_file_system_client] + :end-before: [END bsc_get_file_system_client] :language: python :dedent: 8 :caption: Getting the file system client to interact with a specific file system. """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - - return FileSystemClient(self.url, file_system_name, credential=self._raw_credential, - _configuration=self._config, - _pipeline=self._pipeline, _hosts=self._hosts, + return FileSystemClient(self.url, file_system, credential=self._raw_credential, _configuration=self._config, + _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts, require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function) @@ -352,25 +316,17 @@ def get_directory_client(self, file_system, # type: Union[FileSystemProperties, .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START get_directory_client_from_service_client] - :end-before: [END get_directory_client_from_service_client] + .. literalinclude:: ../samples/test_datalake_service_samples.py + :start-after: [START bsc_get_directory_client] + :end-before: [END bsc_get_directory_client] :language: python - :dedent: 8 + :dedent: 12 :caption: Getting the directory client to interact with a specific directory. """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - try: - directory_name = directory.name - except AttributeError: - directory_name = directory - return DataLakeDirectoryClient(self.url, file_system_name, directory_name=directory_name, + return DataLakeDirectoryClient(self.url, file_system, directory_name=directory, credential=self._raw_credential, _configuration=self._config, _pipeline=self._pipeline, - _hosts=self._hosts, + _location_mode=self._location_mode, _hosts=self._hosts, require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function @@ -397,25 +353,21 @@ def get_file_client(self, file_system, # type: Union[FileSystemProperties, str] .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START get_file_client_from_service_client] - :end-before: [END get_file_client_from_service_client] + .. literalinclude:: ../samples/test_datalake_service_samples.py + :start-after: [START bsc_get_file_client] + :end-before: [END bsc_get_file_client] :language: python - :dedent: 8 + :dedent: 12 :caption: Getting the file client to interact with a specific file. """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system try: file_path = file_path.name except AttributeError: pass return DataLakeFileClient( - self.url, file_system_name, file_path=file_path, credential=self._raw_credential, + self.url, file_system, file_path=file_path, credential=self._raw_credential, _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, + _location_mode=self._location_mode, require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_deserialize.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_deserialize.py similarity index 96% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_deserialize.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_deserialize.py index 9d0881a..cdffe88 100644 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_deserialize.py +++ b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_deserialize.py @@ -34,6 +34,10 @@ def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argu return {k[10:]: v for k, v in raw_metadata.items()} +def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument + return normalize_headers(response_headers) + + def return_headers_and_deserialized_path_list(response, deserialized, response_headers): # pylint: disable=unused-argument return deserialized.paths if deserialized.paths else {}, normalize_headers(response_headers) diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_file_system_client.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_file_system_client.py similarity index 73% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_file_system_client.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_file_system_client.py index b719df7..fd27d08 100644 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_file_system_client.py +++ b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_file_system_client.py @@ -13,10 +13,10 @@ import six from azure.core.paging import ItemPaged -from azure.multiapi.storagev2.blob.v2019_07_07 import ContainerClient +from azure.storage.blob import ContainerClient from ._shared.base_client import StorageAccountHostsMixin, parse_query, parse_connection_str from ._serialize import convert_dfs_url_to_blob_url -from ._models import LocationMode, FileSystemProperties, PathPropertiesPaged, PublicAccess +from ._models import LocationMode, FileSystemProperties, PathPropertiesPaged from ._data_lake_file_client import DataLakeFileClient from ._data_lake_directory_client import DataLakeDirectoryClient from ._data_lake_lease import DataLakeLeaseClient @@ -49,12 +49,19 @@ class FileSystemClient(StorageAccountHostsMixin): .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_file_system.py + .. literalinclude:: ../samples/test_file_system_samples.py :start-after: [START create_file_system_client_from_service] :end-before: [END create_file_system_client_from_service] :language: python :dedent: 8 :caption: Get a FileSystemClient from an existing DataLakeServiceClient. + + .. literalinclude:: ../samples/test_file_system_samples.py + :start-after: [START create_file_system_client_sasurl] + :end-before: [END create_file_system_client_sasurl] + :language: python + :dedent: 8 + :caption: Creating the FileSystemClient client directly. """ def __init__( self, account_url, # type: str @@ -82,7 +89,9 @@ def __init__( blob_hosts = None if datalake_hosts: blob_primary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.PRIMARY]) - blob_hosts = {LocationMode.PRIMARY: blob_primary_account_url, LocationMode.SECONDARY: ""} + blob_secondary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.SECONDARY]) + blob_hosts = {LocationMode.PRIMARY: blob_primary_account_url, + LocationMode.SECONDARY: blob_secondary_account_url} self._container_client = ContainerClient(blob_account_url, file_system_name, credential=credential, _hosts=blob_hosts, **kwargs) @@ -92,8 +101,6 @@ def __init__( super(FileSystemClient, self).__init__(parsed_url, service='dfs', credential=self._raw_credential, _hosts=datalake_hosts, **kwargs) - # ADLS doesn't support secondary endpoint, make sure it's empty - self._hosts[LocationMode.SECONDARY] = "" self._client = DataLakeStorageClient(self.url, file_system_name, None, pipeline=self._pipeline) def _format_url(self, hostname): @@ -106,18 +113,6 @@ def _format_url(self, hostname): quote(file_system_name), self._query_str) - def __exit__(self, *args): - self._container_client.close() - super(FileSystemClient, self).__exit__(*args) - - def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._container_client.close() - self.__exit__() - @classmethod def from_connection_string( cls, conn_str, # type: str @@ -140,17 +135,10 @@ def from_connection_string( Credentials provided here will take precedence over those in the connection string. :return a FileSystemClient :rtype ~azure.storage.filedatalake.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_file_system_client_from_connection_string] - :end-before: [END create_file_system_client_from_connection_string] - :language: python - :dedent: 8 - :caption: Create FileSystemClient from connection string """ - account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'dfs') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary return cls( account_url, file_system_name=file_system_name, credential=credential, **kwargs) @@ -197,12 +185,12 @@ def acquire_lease( .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_file_system.py + .. literalinclude:: ../samples/test_file_system_samples.py :start-after: [START acquire_lease_on_file_system] :end-before: [END acquire_lease_on_file_system] :language: python :dedent: 8 - :caption: Acquiring a lease on the file system. + :caption: Acquiring a lease on the file_system. """ lease = DataLakeLeaseClient(self, lease_id=lease_id) lease.acquire(lease_duration=lease_duration, **kwargs) @@ -223,7 +211,7 @@ def create_file_system(self, metadata=None, # type: Optional[Dict[str, str]] file system as metadata. Example: `{'Category':'test'}` :type metadata: dict(str, str) :param public_access: - To specify whether data in the file system may be accessed publicly and the level of access. + Possible values include: file system, file. :type public_access: ~azure.storage.filedatalake.PublicAccess :keyword int timeout: The timeout parameter is expressed in seconds. @@ -231,7 +219,7 @@ def create_file_system(self, metadata=None, # type: Optional[Dict[str, str]] .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_file_system.py + .. literalinclude:: ../samples/test_file_system_samples.py :start-after: [START create_file_system] :end-before: [END create_file_system] :language: python @@ -276,7 +264,7 @@ def delete_file_system(self, **kwargs): .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_file_system.py + .. literalinclude:: ../samples/test_file_system_samples.py :start-after: [START delete_file_system] :end-before: [END delete_file_system] :language: python @@ -300,7 +288,7 @@ def get_file_system_properties(self, **kwargs): .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_file_system.py + .. literalinclude:: ../samples/test_file_system_samples.py :start-after: [START get_file_system_properties] :end-before: [END get_file_system_properties] :language: python @@ -311,7 +299,7 @@ def get_file_system_properties(self, **kwargs): return FileSystemProperties._convert_from_container_props(container_properties) # pylint: disable=protected-access def set_file_system_metadata( # type: ignore - self, metadata, # type: Dict[str, str] + self, metadata=None, # type: Optional[Dict[str, str]] **kwargs ): # type: (...) -> Dict[str, Union[str, datetime]] @@ -346,79 +334,19 @@ def set_file_system_metadata( # type: ignore The match condition to use upon the etag. :keyword int timeout: The timeout parameter is expressed in seconds. - :returns: filesystem-updated property dict (Etag and last modified). + :returns: file system-updated property dict (Etag and last modified). .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_file_system.py + .. literalinclude:: ../samples/test_file_system_samples.py :start-after: [START set_file_system_metadata] :end-before: [END set_file_system_metadata] :language: python :dedent: 12 - :caption: Setting metadata on the file system. + :caption: Setting metadata on the container. """ return self._container_client.set_container_metadata(metadata=metadata, **kwargs) - def set_file_system_access_policy( - self, signed_identifiers, # type: Dict[str, AccessPolicy] - public_access=None, # type: Optional[Union[str, PublicAccess]] - **kwargs - ): # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the permissions for the specified file system or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether files in a file system may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the file system. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict[str, ~azure.storage.filedatalake.AccessPolicy] - :param ~azure.storage.filedatalake.PublicAccess public_access: - To specify whether data in the file system may be accessed publicly and the level of access. - :keyword lease: - Required if the file system has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified date/time. - :keyword ~datetime.datetime if_unmodified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File System-updated property dict (Etag and last modified). - :rtype: dict[str, str or ~datetime.datetime] - """ - return self._container_client.set_container_access_policy(signed_identifiers, - public_access=public_access, **kwargs) - - def get_file_system_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the specified file system. - The permissions indicate whether file system data may be accessed publicly. - - :keyword lease: - If specified, the operation only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - """ - access_policy = self._container_client.get_container_access_policy(**kwargs) - return { - 'public_access': PublicAccess._from_generated(access_policy['public_access']), # pylint: disable=protected-access - 'signed_identifiers': access_policy['signed_identifiers'] - } - def get_paths(self, path=None, # type: Optional[str] recursive=True, # type: Optional[bool] max_results=None, # type: Optional[int] @@ -433,15 +361,14 @@ def get_paths(self, path=None, # type: Optional[str] :param int max_results: An optional value that specifies the maximum number of items to return per page. If omitted or greater than 5,000, the response will include up to 5,000 items per page. - :keyword upn: - Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. + :keyword upn: Optional. Valid only when Hierarchical Namespace is + enabled for the account. If "true", the user identity values returned + in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + transformed from Azure Active Directory Object IDs to User Principal + Names. If "false", the values will be returned as Azure Active + Directory Object IDs. The default value is false. Note that group and + application Object IDs are not translated because they do not have + unique friendly names. :type upn: bool :keyword int timeout: The timeout parameter is expressed in seconds. @@ -450,12 +377,12 @@ def get_paths(self, path=None, # type: Optional[str] .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START get_paths_in_file_system] - :end-before: [END get_paths_in_file_system] + .. literalinclude:: ../tests/test_blob_samples_containers.py + :start-after: [START list_blobs_in_container] + :end-before: [END list_blobs_in_container] :language: python :dedent: 8 - :caption: List the paths in the file system. + :caption: List the blobs in the container. """ timeout = kwargs.pop('timeout', None) command = functools.partial( @@ -468,6 +395,7 @@ def get_paths(self, path=None, # type: Optional[str] page_iterator_class=PathPropertiesPaged, **kwargs) def create_directory(self, directory, # type: Union[DirectoryProperties, str] + content_settings=None, # type: Optional[ContentSettings] metadata=None, # type: Optional[Dict[str, str]] **kwargs): # type: (...) -> DataLakeDirectoryClient @@ -478,30 +406,27 @@ def create_directory(self, directory, # type: Union[DirectoryProperties, str] The directory with which to interact. This can either be the name of the directory, or an instance of DirectoryProperties. :type directory: str or ~azure.storage.filedatalake.DirectoryProperties + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. :param metadata: - Name-value pairs associated with the file as metadata. + Name-value pairs associated with the blob as metadata. :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object + :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease: + Required if the blob has an active lease. Value can be a DataLakeLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. + :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. When creating a file or directory and the parent folder does not have a default ACL, the umask restricts the permissions of the file or directory to be created. The resulting permission is given by p & ^u, where p is the permission and u is the umask. For example, if p is 0777 and u is 0057, then the resulting permission is 0720. The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. + :keyword str permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -522,18 +447,9 @@ def create_directory(self, directory, # type: Union[DirectoryProperties, str] :keyword int timeout: The timeout parameter is expressed in seconds. :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_directory_from_file_system] - :end-before: [END create_directory_from_file_system] - :language: python - :dedent: 8 - :caption: Create directory in the file system. """ directory_client = self.get_directory_client(directory) - directory_client.create_directory(metadata=metadata, **kwargs) + directory_client.create_directory(content_settings=content_settings, metadata=metadata, **kwargs) return directory_client def delete_directory(self, directory, # type: Union[DirectoryProperties, str] @@ -547,9 +463,9 @@ def delete_directory(self, directory, # type: Union[DirectoryProperties, str] or an instance of DirectoryProperties. :type directory: str or ~azure.storage.filedatalake.DirectoryProperties :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object + Required if the blob has an active lease. Value can be a LeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :type lease: ~azure.storage.blob.LeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -570,15 +486,6 @@ def delete_directory(self, directory, # type: Union[DirectoryProperties, str] :keyword int timeout: The timeout parameter is expressed in seconds. :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START delete_directory_from_file_system] - :end-before: [END delete_directory_from_file_system] - :language: python - :dedent: 8 - :caption: Delete directory in the file system. """ directory_client = self.get_directory_client(directory) directory_client.delete_directory(**kwargs) @@ -597,27 +504,24 @@ def create_file(self, file, # type: Union[FileProperties, str] :param ~azure.storage.filedatalake.ContentSettings content_settings: ContentSettings object used to set path properties. :param metadata: - Name-value pairs associated with the file as metadata. + Name-value pairs associated with the blob as metadata. :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object + :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease: + Required if the blob has an active lease. Value can be a DataLakeLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. + :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. When creating a file or directory and the parent folder does not have a default ACL, the umask restricts the permissions of the file or directory to be created. The resulting permission is given by p & ^u, where p is the permission and u is the umask. For example, if p is 0777 and u is 0057, then the resulting permission is 0720. The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. + :keyword str permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -638,21 +542,13 @@ def create_file(self, file, # type: Union[FileProperties, str] :keyword int timeout: The timeout parameter is expressed in seconds. :return: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_file_from_file_system] - :end-before: [END create_file_from_file_system] - :language: python - :dedent: 8 - :caption: Create file in the file system. """ file_client = self.get_file_client(file) file_client.create_file(**kwargs) return file_client def delete_file(self, file, # type: Union[FileProperties, str] + lease=None, # type: Optional[Union[DataLakeLeaseClient, str]] **kwargs): # type: (...) -> DataLakeFileClient """ @@ -663,9 +559,9 @@ def delete_file(self, file, # type: Union[FileProperties, str] or an instance of FileProperties. :type file: str or ~azure.storage.filedatalake.FileProperties :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object + Required if the blob has an active lease. Value can be a LeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :type lease: ~azure.storage.blob.LeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -686,29 +582,11 @@ def delete_file(self, file, # type: Union[FileProperties, str] :keyword int timeout: The timeout parameter is expressed in seconds. :return: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START delete_file_from_file_system] - :end-before: [END delete_file_from_file_system] - :language: python - :dedent: 8 - :caption: Delete file in the file system. """ file_client = self.get_file_client(file) - file_client.delete_file(**kwargs) + file_client.delete_file(lease=lease, **kwargs) return file_client - def _get_root_directory_client(self): - # type: () -> DataLakeDirectoryClient - """Get a client to interact with the root directory. - - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient - """ - return self.get_directory_client('/') - def get_directory_client(self, directory # type: Union[DirectoryProperties, str] ): # type: (...) -> DataLakeDirectoryClient @@ -725,22 +603,17 @@ def get_directory_client(self, directory # type: Union[DirectoryProperties, str .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_file_system.py + .. literalinclude:: ../samples/test_file_system_samples.py :start-after: [START get_directory_client_from_file_system] :end-before: [END get_directory_client_from_file_system] :language: python - :dedent: 8 + :dedent: 12 :caption: Getting the directory client to interact with a specific directory. """ - try: - directory_name = directory.name - except AttributeError: - directory_name = directory - - return DataLakeDirectoryClient(self.url, self.file_system_name, directory_name=directory_name, + return DataLakeDirectoryClient(self.url, self.file_system_name, directory_name=directory, credential=self._raw_credential, _configuration=self._config, _pipeline=self._pipeline, - _hosts=self._hosts, + _location_mode=self._location_mode, _hosts=self._hosts, require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function @@ -762,11 +635,11 @@ def get_file_client(self, file_path # type: Union[FileProperties, str] .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_file_system.py + .. literalinclude:: ../samples/test_file_system_samples.py :start-after: [START get_file_client_from_file_system] :end-before: [END get_file_client_from_file_system] :language: python - :dedent: 8 + :dedent: 12 :caption: Getting the file client to interact with a specific file. """ try: @@ -777,6 +650,6 @@ def get_file_client(self, file_path # type: Union[FileProperties, str] return DataLakeFileClient( self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, + _location_mode=self._location_mode, require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/__init__.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/__init__.py similarity index 100% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/__init__.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/__init__.py diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/_configuration.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/_configuration.py similarity index 100% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/_configuration.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/_configuration.py diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/_data_lake_storage_client.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/_data_lake_storage_client.py similarity index 100% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/_data_lake_storage_client.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/_data_lake_storage_client.py diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/aio/__init__.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/__init__.py similarity index 100% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/aio/__init__.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/__init__.py diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/aio/_configuration_async.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/_configuration_async.py similarity index 100% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/aio/_configuration_async.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/_configuration_async.py diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/aio/_data_lake_storage_client_async.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/_data_lake_storage_client_async.py similarity index 92% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/aio/_data_lake_storage_client_async.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/_data_lake_storage_client_async.py index 3f41f1b..db3e60e 100644 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/aio/_data_lake_storage_client_async.py +++ b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/_data_lake_storage_client_async.py @@ -64,9 +64,3 @@ async def __aenter__(self): return self async def __aexit__(self, *exc_details): await self._client.__aexit__(*exc_details) - - async def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._client.close() diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/aio/operations_async/__init__.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/operations_async/__init__.py similarity index 100% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/aio/operations_async/__init__.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/operations_async/__init__.py diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/aio/operations_async/_file_system_operations_async.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/operations_async/_file_system_operations_async.py similarity index 100% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/aio/operations_async/_file_system_operations_async.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/operations_async/_file_system_operations_async.py diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/aio/operations_async/_path_operations_async.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/operations_async/_path_operations_async.py similarity index 100% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/aio/operations_async/_path_operations_async.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/operations_async/_path_operations_async.py diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/aio/operations_async/_service_operations_async.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/operations_async/_service_operations_async.py similarity index 100% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/aio/operations_async/_service_operations_async.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/aio/operations_async/_service_operations_async.py diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/models/__init__.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/models/__init__.py similarity index 100% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/models/__init__.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/models/__init__.py diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/models/_data_lake_storage_client_enums.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/models/_data_lake_storage_client_enums.py similarity index 100% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/models/_data_lake_storage_client_enums.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/models/_data_lake_storage_client_enums.py diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/models/_models.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/models/_models.py similarity index 100% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/models/_models.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/models/_models.py diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/models/_models_py3.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/models/_models_py3.py similarity index 100% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/models/_models_py3.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/models/_models_py3.py diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/operations/__init__.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/operations/__init__.py similarity index 100% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/operations/__init__.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/operations/__init__.py diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/operations/_file_system_operations.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/operations/_file_system_operations.py similarity index 100% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/operations/_file_system_operations.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/operations/_file_system_operations.py diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/operations/_path_operations.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/operations/_path_operations.py similarity index 100% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/operations/_path_operations.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/operations/_path_operations.py diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/operations/_service_operations.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/operations/_service_operations.py similarity index 100% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/operations/_service_operations.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/operations/_service_operations.py diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/version.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/version.py similarity index 100% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_generated/version.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_generated/version.py diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_models.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_models.py similarity index 78% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_models.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_models.py index d5e3a00..644db50 100644 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_models.py +++ b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_models.py @@ -8,15 +8,14 @@ from enum import Enum from azure.core.paging import PageIterator -from azure.multiapi.storagev2.blob.v2019_07_07 import LeaseProperties as BlobLeaseProperties -from azure.multiapi.storagev2.blob.v2019_07_07 import AccountSasPermissions as BlobAccountSasPermissions -from azure.multiapi.storagev2.blob.v2019_07_07 import ResourceTypes as BlobResourceTypes -from azure.multiapi.storagev2.blob.v2019_07_07 import UserDelegationKey as BlobUserDelegationKey -from azure.multiapi.storagev2.blob.v2019_07_07 import ContentSettings as BlobContentSettings -from azure.multiapi.storagev2.blob.v2019_07_07 import ContainerSasPermissions, BlobSasPermissions -from azure.multiapi.storagev2.blob.v2019_07_07 import AccessPolicy as BlobAccessPolicy -from azure.multiapi.storagev2.blob.v2019_07_07._generated.models import StorageErrorException -from azure.multiapi.storagev2.blob.v2019_07_07._models import ContainerPropertiesPaged +from azure.storage.blob import LeaseProperties as BlobLeaseProperties +from azure.storage.blob import AccountSasPermissions as BlobAccountSasPermissions +from azure.storage.blob import ResourceTypes as BlobResourceTypes +from azure.storage.blob import UserDelegationKey as BlobUserDelegationKey +from azure.storage.blob import ContentSettings as BlobContentSettings +from azure.storage.blob import ContainerSasPermissions, BlobSasPermissions +from azure.storage.blob._generated.models import StorageErrorException +from azure.storage.blob._models import ContainerPropertiesPaged from ._deserialize import return_headers_and_deserialized_path_list from ._generated.models import Path from ._shared.models import DictMixin @@ -117,7 +116,7 @@ class DirectoryProperties(DictMixin): :ivar str etag: The ETag contains a value that you can use to perform operations conditionally. :ivar bool deleted: if the current directory marked as deleted - :ivar dict metadata: Name-value pairs associated with the directory as metadata. + :ivar dict metadata: Name-value pairs associated with the blob as metadata. :ivar ~azure.storage.filedatalake.LeaseProperties lease: Stores all the lease information for the directory. :ivar ~datetime.datetime last_modified: @@ -164,7 +163,7 @@ class FileProperties(DictMixin): :ivar str etag: The ETag contains a value that you can use to perform operations conditionally. :ivar bool deleted: if the current file marked as deleted - :ivar dict metadata: Name-value pairs associated with the file as metadata. + :ivar dict metadata: Name-value pairs associated with the blob as metadata. :ivar ~azure.storage.filedatalake.LeaseProperties lease: Stores all the lease information for the file. :ivar ~datetime.datetime last_modified: @@ -221,7 +220,7 @@ class PathProperties(object): read, write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :ivar datetime last_modified: A datetime object representing the last time the directory/file was modified. + :ivar datetime last_modified: A datetime object representing the last time the blob was modified. :ivar bool is_directory: is the path a directory or not. :ivar str etag: The ETag contains a value that you can use to perform operations conditionally. @@ -356,31 +355,18 @@ class ContentSettings(BlobContentSettings): If the content_md5 has been set for the file, this response header is stored so that the client can check for message content integrity. - :keyword str content_type: - The content type specified for the file or directory. If no content type was - specified, the default content type is application/octet-stream. - :keyword str content_encoding: - If the content_encoding has previously been set - for the file, that value is stored. - :keyword str content_language: - If the content_language has previously been set - for the file, that value is stored. - :keyword str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the file, that value is stored. - :keyword str cache_control: - If the cache_control has previously been set for - the file, that value is stored. - :keyword str content_md5: - If the content_md5 has been set for the file, this response - header is stored so that the client can check for message content - integrity. """ def __init__( - self, **kwargs): + self, content_type=None, content_encoding=None, + content_language=None, content_disposition=None, + cache_control=None, content_md5=None, **kwargs): super(ContentSettings, self).__init__( + content_type=content_type, + content_encoding=content_encoding, + content_language=content_language, + content_disposition=content_disposition, + cache_control=cache_control, + content_md5=content_md5, **kwargs ) @@ -457,55 +443,6 @@ def __init__(self, read=False, create=False, write=False, ) -class AccessPolicy(BlobAccessPolicy): - """Access Policy class used by the set and get access policy methods in each service. - - A stored access policy can specify the start time, expiry time, and - permissions for the Shared Access Signatures with which it's associated. - Depending on how you want to control access to your resource, you can - specify all of these parameters within the stored access policy, and omit - them from the URL for the Shared Access Signature. Doing so permits you to - modify the associated signature's behavior at any time, as well as to revoke - it. Or you can specify one or more of the access policy parameters within - the stored access policy, and the others on the URL. Finally, you can - specify all of the parameters on the URL. In this case, you can use the - stored access policy to revoke the signature, but not to modify its behavior. - - Together the Shared Access Signature and the stored access policy must - include all fields required to authenticate the signature. If any required - fields are missing, the request will fail. Likewise, if a field is specified - both in the Shared Access Signature URL and in the stored access policy, the - request will fail with status code 400 (Bad Request). - - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.datalake.FileSystemSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :keyword start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :paramtype start: ~datetime.datetime or str - """ - def __init__(self, permission=None, expiry=None, **kwargs): - super(AccessPolicy, self).__init__( - permission=permission, expiry=expiry, start=kwargs.pop('start', None) - ) - - class ResourceTypes(BlobResourceTypes): """ Specifies the resource types that are accessible with the account SAS. @@ -547,17 +484,8 @@ class UserDelegationKey(BlobUserDelegationKey): :ivar str value: The user delegation key. """ - @classmethod - def _from_generated(cls, generated): - delegation_key = cls() - delegation_key.signed_oid = generated.signed_oid - delegation_key.signed_tid = generated.signed_tid - delegation_key.signed_start = generated.signed_start - delegation_key.signed_expiry = generated.signed_expiry - delegation_key.signed_service = generated.signed_service - delegation_key.signed_version = generated.signed_version - delegation_key.value = generated.value - return delegation_key + def _init(self): + super(UserDelegationKey, self).__init__() class PublicAccess(str, Enum): @@ -565,6 +493,12 @@ class PublicAccess(str, Enum): Specifies whether data in the file system may be accessed publicly and the level of access. """ + OFF = 'off' + """ + Specifies that there is no public read access for both the file systems and files within the file system. + Clients cannot enumerate the file systems within the storage account as well as the files within the file system. + """ + File = 'blob' """ Specifies public read access for files. file data within this file system can be read @@ -585,7 +519,8 @@ def _from_generated(cls, public_access): return cls.File elif public_access == "container": return cls.FileSystem - + elif public_access == "off": + return cls.OFF return None diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_path_client.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_path_client.py similarity index 83% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_path_client.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_path_client.py index f2f123f..a823d50 100644 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_path_client.py +++ b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_path_client.py @@ -12,7 +12,7 @@ import six -from azure.multiapi.storagev2.blob.v2019_07_07 import BlobClient +from azure.storage.blob import BlobClient from ._shared.base_client import StorageAccountHostsMixin, parse_query from ._shared.response_handlers import return_response_headers from ._serialize import convert_dfs_url_to_blob_url, get_mod_conditions, \ @@ -47,13 +47,9 @@ def __init__( # remove the preceding/trailing delimiter from the path components file_system_name = file_system_name.strip('/') - - # the name of root directory is / - if path_name != '/': - path_name = path_name.strip('/') - + path_name = path_name.strip('/') if not (file_system_name and path_name): - raise ValueError("Please specify a file system name and file path.") + raise ValueError("Please specify a container name and blob name.") if not parsed_url.netloc: raise ValueError("Invalid URL: {}".format(account_url)) @@ -64,7 +60,9 @@ def __init__( blob_hosts = None if datalake_hosts: blob_primary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.PRIMARY]) - blob_hosts = {LocationMode.PRIMARY: blob_primary_account_url, LocationMode.SECONDARY: ""} + blob_secondary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.SECONDARY]) + blob_hosts = {LocationMode.PRIMARY: blob_primary_account_url, + LocationMode.SECONDARY: blob_secondary_account_url} self._blob_client = BlobClient(blob_account_url, file_system_name, path_name, credential=credential, _hosts=blob_hosts, **kwargs) @@ -76,22 +74,8 @@ def __init__( super(PathClient, self).__init__(parsed_url, service='dfs', credential=self._raw_credential, _hosts=datalake_hosts, **kwargs) - # ADLS doesn't support secondary endpoint, make sure it's empty - self._hosts[LocationMode.SECONDARY] = "" self._client = DataLakeStorageClient(self.url, file_system_name, path_name, pipeline=self._pipeline) - def __exit__(self, *args): - self._blob_client.close() - super(PathClient, self).__exit__(*args) - - def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._blob_client.close() - self.__exit__() - def _format_url(self, hostname): file_system_name = self.file_system_name if isinstance(file_system_name, six.text_type): @@ -133,10 +117,9 @@ def _create(self, resource_type, content_settings=None, metadata=None, **kwargs) """ Create directory or file - :param resource_type: - Required for Create File and Create Directory. - The value must be "file" or "directory". Possible values include: - 'directory', 'file' + :param resource_type: Required for Create File and Create Directory. + The value must be "file" or "directory". Possible values include: + 'directory', 'file' :type resource_type: str :param ~azure.storage.filedatalake.ContentSettings content_settings: ContentSettings object used to set path properties. @@ -146,22 +129,20 @@ def _create(self, resource_type, content_settings=None, metadata=None, **kwargs) :keyword lease: Required if the file/directory has an active lease. Value can be a LeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. + :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. When creating a file or directory and the parent folder does not have a default ACL, the umask restricts the permissions of the file or directory to be created. The resulting permission is given by p & ^u, where p is the permission and u is the umask. For example, if p is 0777 and u is 0057, then the resulting permission is 0720. The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. + :keyword permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. :type permissions: str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. @@ -273,33 +254,29 @@ def set_access_control(self, owner=None, # type: Optional[str] """ Set the owner, group, permissions, or access control list for a path. - :param owner: - Optional. The owner of the file or directory. + :param owner: Optional. The owner of the file or directory. :type owner: str - :param group: - Optional. The owning group of the file or directory. + :param group: Optional. The owning group of the file or directory. :type group: str - :param permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - permissions and acl are mutually exclusive. + :param permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + permissions and acl are mutually exclusive. :type permissions: str - :param acl: - Sets POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - permissions and acl are mutually exclusive. + :param acl: Sets POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + permissions and acl are mutually exclusive. :type acl: str :keyword lease: Required if the file/directory has an active lease. Value can be a LeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -321,8 +298,6 @@ def set_access_control(self, owner=None, # type: Optional[str] The timeout parameter is expressed in seconds. :keyword: response dict (Etag and last modified). """ - if not any([owner, group, permissions, acl]): - raise ValueError("At least one parameter should be set for set_access_control API") options = self._set_access_control_options(owner=owner, group=group, permissions=permissions, acl=acl, **kwargs) try: return self._client.path.set_access_control(**options) @@ -351,20 +326,19 @@ def get_access_control(self, upn=None, # type: Optional[bool] **kwargs): # type: (...) -> Dict[str, Any] """ - :param upn: Optional. - Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. + :param upn: Optional. Valid only when Hierarchical Namespace is + enabled for the account. If "true", the user identity values returned + in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + transformed from Azure Active Directory Object IDs to User Principal + Names. If "false", the values will be returned as Azure Active + Directory Object IDs. The default value is false. Note that group and + application Object IDs are not translated because they do not have + unique friendly names. :type upn: bool :keyword lease: Required if the file/directory has an active lease. Value can be a LeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -396,8 +370,6 @@ def _rename_path_options(self, rename_source, content_settings=None, metadata=No # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] if self.require_encryption or (self.key_encryption_key is not None): raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - if metadata or kwargs.pop('permissions', None) or kwargs.pop('umask', None): - raise ValueError("metadata, permissions, umask is not supported for this operation") access_conditions = get_access_conditions(kwargs.pop('lease', None)) source_lease_id = get_lease_id(kwargs.pop('source_lease', None)) @@ -410,9 +382,9 @@ def _rename_path_options(self, rename_source, content_settings=None, metadata=No options = { 'rename_source': rename_source, - 'properties': None, - 'permissions': None, - 'umask': None, + 'properties': add_metadata_headers(metadata), + 'permissions': kwargs.pop('permissions', None), + 'umask': kwargs.pop('umask', None), 'path_http_headers': path_http_headers, 'lease_access_conditions': access_conditions, 'source_lease_id': source_lease_id, @@ -430,27 +402,39 @@ def _rename_path(self, rename_source, """ Rename directory or file - :param rename_source: - The value must have the following format: "/{filesystem}/{path}". + :param rename_source: The value must have the following format: "/{filesystem}/{path}". :type rename_source: str - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + :param source_lease: A lease ID for the source path. If specified, + the source path must have an active lease and the leaase ID must + match. + :type source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :param ~azure.storage.filedatalake.ContentSettings content_settings: ContentSettings object used to set path properties. - :keyword source_lease: - A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword lease: + :param lease: Required if the file/directory has an active lease. Value can be a LeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: + :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :param str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :param permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :type permissions: str + :param ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: + :param ~datetime.datetime if_unmodified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. @@ -461,13 +445,13 @@ def _rename_path(self, rename_source, and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: + :param ~datetime.datetime source_if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: + :param ~datetime.datetime source_if_unmodified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. @@ -478,8 +462,9 @@ def _rename_path(self, rename_source, and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions source_match_condition: The source match condition to use upon the etag. - :keyword int timeout: + :param int timeout: The timeout parameter is expressed in seconds. + :return: """ options = self._rename_path_options( rename_source, @@ -497,7 +482,7 @@ def _get_path_properties(self, **kwargs): :keyword lease: Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -532,7 +517,7 @@ def _get_path_properties(self, **kwargs): path_properties.__class__ = DirectoryProperties return path_properties - def set_metadata(self, metadata, # type: Dict[str, str] + def set_metadata(self, metadata=None, # type: Optional[Dict[str, str]] **kwargs): # type: (...) -> Dict[str, Union[str, datetime]] """Sets one or more user-defined name-value pairs for the specified @@ -544,10 +529,9 @@ def set_metadata(self, metadata, # type: Dict[str, str] A dict containing name-value pairs to associate with the file system as metadata. Example: {'category':'test'} :type metadata: dict[str, str] - :keyword lease: + :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: If specified, set_file_system_metadata only succeeds if the file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -568,6 +552,15 @@ def set_metadata(self, metadata, # type: Dict[str, str] :keyword int timeout: The timeout parameter is expressed in seconds. :returns: file system-updated property dict (Etag and last modified). + + .. admonition:: Example: + + .. literalinclude:: ../samples/test_file_system_samples.py + :start-after: [START set_file_system_metadata] + :end-before: [END set_file_system_metadata] + :language: python + :dedent: 12 + :caption: Setting metadata on the container. """ return self._blob_client.set_blob_metadata(metadata=metadata, **kwargs) @@ -580,10 +573,9 @@ def set_http_headers(self, content_settings=None, # type: Optional[ContentSetti :param ~azure.storage.filedatalake.ContentSettings content_settings: ContentSettings object used to set file/directory properties. - :keyword lease: + :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: If specified, set_file_system_metadata only succeeds if the file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -646,6 +638,15 @@ def acquire_lease(self, lease_duration=-1, # type: Optional[int] The timeout parameter is expressed in seconds. :returns: A DataLakeLeaseClient object, that can be run in a context manager. :rtype: ~azure.storage.filedatalake.DataLakeLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/test_file_system_samples.py + :start-after: [START acquire_lease_on_file_system] + :end-before: [END acquire_lease_on_file_system] + :language: python + :dedent: 8 + :caption: Acquiring a lease on the file_system. """ lease = DataLakeLeaseClient(self, lease_id=lease_id) # type: ignore lease.acquire(lease_duration=lease_duration, **kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_serialize.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_serialize.py similarity index 94% rename from azure/multiapi/storagev2/filedatalake/v2019_12_12/_serialize.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_serialize.py index 0de3e85..36f80e6 100644 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_serialize.py +++ b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_serialize.py @@ -3,14 +3,14 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- -from azure.multiapi.storagev2.blob.v2019_12_12._serialize import _get_match_headers # pylint: disable=protected-access +from azure.storage.blob._serialize import _get_match_headers # pylint: disable=protected-access from ._shared import encode_base64 from ._generated.models import ModifiedAccessConditions, PathHTTPHeaders, \ SourceModifiedAccessConditions, LeaseAccessConditions def convert_dfs_url_to_blob_url(dfs_account_url): - return dfs_account_url.replace('.dfs.', '.blob.', 1) + return dfs_account_url.replace('dfs.core.windows.net', 'blob.core.windows.net', 1) def add_metadata_headers(metadata=None): diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_shared/__init__.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/__init__.py similarity index 100% rename from azure/multiapi/storagev2/blob/v2019_02_02/_shared/__init__.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/__init__.py diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_shared/authentication.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/authentication.py similarity index 100% rename from azure/multiapi/storagev2/blob/v2019_02_02/_shared/authentication.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/authentication.py diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/base_client.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/base_client.py similarity index 98% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/base_client.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/base_client.py index d5aa27f..30a89cf 100644 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/base_client.py +++ b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/base_client.py @@ -36,7 +36,6 @@ ProxyPolicy, DistributedTracingPolicy, HttpLoggingPolicy, - UserAgentPolicy ) from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT @@ -45,6 +44,7 @@ from .shared_access_signature import QueryStringConstants from .policies import ( StorageHeadersPolicy, + StorageUserAgentPolicy, StorageContentValidation, StorageRequestHook, StorageResponseHook, @@ -53,7 +53,6 @@ QueueMessagePolicy, ExponentialRetry, ) -from .._version import VERSION from .._generated.models import StorageErrorException from .response_handlers import process_storage_error, PartialBatchErrorException @@ -63,7 +62,7 @@ "blob": {"primary": "BlobEndpoint", "secondary": "BlobSecondaryEndpoint"}, "queue": {"primary": "QueueEndpoint", "secondary": "QueueSecondaryEndpoint"}, "file": {"primary": "FileEndpoint", "secondary": "FileSecondaryEndpoint"}, - "dfs": {"primary": "BlobEndpoint", "secondary": "BlobEndpoint"}, + "dfs": {"primary": "BlobEndpoint", "secondary": "BlobSecondaryEndpoint"}, } @@ -268,8 +267,7 @@ def _batch_send( policies=[ StorageHeadersPolicy(), self._credential_policy - ], - enforce_https=False + ] ) pipeline_response = self._pipeline.run( @@ -380,8 +378,7 @@ def create_configuration(**kwargs): # type: (**Any) -> Configuration config = Configuration(**kwargs) config.headers_policy = StorageHeadersPolicy(**kwargs) - config.user_agent_policy = UserAgentPolicy( - sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) + config.user_agent_policy = StorageUserAgentPolicy(**kwargs) config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) config.logging_policy = StorageLoggingPolicy(**kwargs) config.proxy_policy = ProxyPolicy(**kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/base_client_async.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/base_client_async.py similarity index 99% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/base_client_async.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/base_client_async.py index 1772251..3c806d7 100644 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/base_client_async.py +++ b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/base_client_async.py @@ -124,8 +124,7 @@ async def _batch_send( policies=[ StorageHeadersPolicy(), self._credential_policy - ], - enforce_https=False + ] ) pipeline_response = await self._pipeline.run( diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/constants.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/constants.py similarity index 100% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/constants.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/constants.py diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_shared/encryption.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/encryption.py similarity index 100% rename from azure/multiapi/storagev2/blob/v2019_02_02/_shared/encryption.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/encryption.py diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/models.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/models.py similarity index 99% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/models.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/models.py index c72fc6d..2a30570 100644 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/models.py +++ b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/models.py @@ -24,7 +24,6 @@ class StorageErrorCode(str, Enum): account_is_disabled = "AccountIsDisabled" authentication_failed = "AuthenticationFailed" authorization_failure = "AuthorizationFailure" - no_authentication_information = "NoAuthenticationInformation" condition_headers_not_supported = "ConditionHeadersNotSupported" condition_not_met = "ConditionNotMet" empty_metadata_key = "EmptyMetadataKey" diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_shared/parser.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/parser.py similarity index 100% rename from azure/multiapi/storagev2/blob/v2019_02_02/_shared/parser.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/parser.py diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_shared/policies.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/policies.py similarity index 100% rename from azure/multiapi/storagev2/blob/v2019_02_02/_shared/policies.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/policies.py diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_shared/policies_async.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/policies_async.py similarity index 100% rename from azure/multiapi/storagev2/blob/v2019_02_02/_shared/policies_async.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/policies_async.py diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/request_handlers.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/request_handlers.py similarity index 100% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/request_handlers.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/request_handlers.py diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_shared/response_handlers.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/response_handlers.py similarity index 100% rename from azure/multiapi/storagev2/blob/v2019_02_02/_shared/response_handlers.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/response_handlers.py diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_shared/shared_access_signature.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/shared_access_signature.py similarity index 100% rename from azure/multiapi/storagev2/blob/v2019_02_02/_shared/shared_access_signature.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/shared_access_signature.py diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/uploads.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/uploads.py similarity index 99% rename from azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/uploads.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/uploads.py index 9aa0b2a..13b814e 100644 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/uploads.py +++ b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/uploads.py @@ -280,10 +280,7 @@ class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method def _is_chunk_empty(self, chunk_data): # read until non-zero byte is encountered # if reached the end without returning, then chunk_data is all 0's - for each_byte in chunk_data: - if each_byte not in [0, b"\x00"]: - return False - return True + return not any(bytearray(chunk_data)) def _upload_chunk(self, chunk_offset, chunk_data): # avoid uploading the empty pages diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_shared/uploads_async.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/uploads_async.py similarity index 100% rename from azure/multiapi/storagev2/blob/v2019_02_02/_shared/uploads_async.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared/uploads_async.py diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared_access_signature.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared_access_signature.py similarity index 73% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared_access_signature.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared_access_signature.py index 487d2c6..4d3d9ad 100644 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared_access_signature.py +++ b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_shared_access_signature.py @@ -3,14 +3,9 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -from azure.multiapi.storagev2.blob.v2019_07_07 import generate_account_sas as generate_blob_account_sas -from azure.multiapi.storagev2.blob.v2019_07_07 import generate_container_sas, generate_blob_sas -if TYPE_CHECKING: - import datetime - from ._models import AccountSasPermissions, FileSystemSasPermissions, FileSasPermissions, ResourceTypes, \ - UserDelegationKey +from azure.storage.blob import generate_account_sas as generate_blob_account_sas +from azure.storage.blob import generate_container_sas, generate_blob_sas def generate_account_sas( @@ -19,12 +14,14 @@ def generate_account_sas( resource_types, # type: Union[ResourceTypes, str] permission, # type: Union[AccountSasPermissions, str] expiry, # type: Optional[Union[datetime, str]] + start=None, # type: Optional[Union[datetime, str]] + ip=None, # type: Optional[str] **kwargs # type: Any ): # type: (...) -> str - """Generates a shared access signature for the DataLake service. + """Generates a shared access signature for the blob service. - Use the returned signature as the credential parameter of any DataLakeServiceClient, - FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. + Use the returned signature with the credential parameter of any BlobServiceClient, + ContainerClient or BlobClient. :param str account_name: The storage account name used to generate the shared access signature. @@ -32,14 +29,14 @@ def generate_account_sas( The access key to generate the shared access signature. :param resource_types: Specifies the resource types that are accessible with the account SAS. - :type resource_types: str or ~azure.storage.filedatalake.ResourceTypes + :type resource_types: str or ~azure.storage.blob.ResourceTypes :param permission: The permissions associated with the shared access signature. The user is restricted to operations allowed by the permissions. Required unless an id is given referencing a stored access policy which contains this field. This field must be omitted if it has been specified in an associated stored access policy. - :type permission: str or ~azure.storage.filedatalake.AccountSasPermissions + :type permission: str or ~azure.storage.blob.AccountSasPermissions :param expiry: The time at which the shared access signature becomes invalid. Required unless an id is given referencing a stored access policy @@ -48,14 +45,14 @@ def generate_account_sas( convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. :type expiry: ~datetime.datetime or str - :keyword start: + :param start: The time at which the shared access signature becomes valid. If omitted, start time for this call is assumed to be the time when the storage service receives the request. Azure will always convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. - :paramtype start: ~datetime.datetime or str - :keyword str ip: + :type start: ~datetime.datetime or str + :param str ip: Specifies an IP address or a range of IP addresses from which to accept requests. If the IP address from which the request originates does not match the IP address or address range specified on the SAS token, the request is not authenticated. @@ -65,6 +62,15 @@ def generate_account_sas( Specifies the protocol permitted for a request made. The default value is https. :return: A Shared Access Signature (sas) token. :rtype: str + + .. admonition:: Example: + + .. literalinclude:: ../tests/test_blob_samples_authentication.py + :start-after: [START create_sas_token] + :end-before: [END create_sas_token] + :language: python + :dedent: 8 + :caption: Generating a shared access signature. """ return generate_blob_account_sas( account_name=account_name, @@ -72,6 +78,8 @@ def generate_account_sas( resource_types=resource_types, permission=permission, expiry=expiry, + start=start, + ip=ip, **kwargs ) @@ -79,30 +87,32 @@ def generate_account_sas( def generate_file_system_sas( account_name, # type: str file_system_name, # type: str - credential, # type: Union[str, UserDelegationKey] + account_key=None, # type: Optional[str] + user_delegation_key=None, # type: Optional[UserDelegationKey] permission=None, # type: Optional[Union[FileSystemSasPermissions, str]] expiry=None, # type: Optional[Union[datetime, str]] + start=None, # type: Optional[Union[datetime, str]] + ip=None, # type: Optional[str] **kwargs # type: Any ): # type: (...) -> str - """Generates a shared access signature for a file system. + """Generates a shared access signature for a container. - Use the returned signature with the credential parameter of any DataLakeServiceClient, - FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. + Use the returned signature with the credential parameter of any BlobServiceClient, + ContainerClient or BlobClient. :param str account_name: The storage account name used to generate the shared access signature. :param str file_system_name: The name of the file system. - :param str credential: - Credential could be either account key or user delegation key. - If use account key is used as credential, then the credential type should be a str. - Instead of an account key, the user could also pass in a user delegation key. + :param str account_key: + The access key to generate the shared access signature. Either `account_key` or + `user_delegation_key` must be specified. + :param ~azure.storage.blob.UserDelegationKey user_delegation_key: + Instead of an account key, the user could pass in a user delegation key. A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished - by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`. + this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`. When present, the SAS is signed with the user delegation key instead. - :type credential: str or ~azure.storage.filedatalake.UserDelegationKey :param permission: The permissions associated with the shared access signature. The user is restricted to operations allowed by the permissions. @@ -110,7 +120,7 @@ def generate_file_system_sas( Required unless an id is given referencing a stored access policy which contains this field. This field must be omitted if it has been specified in an associated stored access policy. - :type permission: str or ~azure.storage.filedatalake.FileSystemSasPermissions + :type permission: str or ~azure.storage.blob.ContainerSasPermissions :param expiry: The time at which the shared access signature becomes invalid. Required unless an id is given referencing a stored access policy @@ -119,14 +129,14 @@ def generate_file_system_sas( convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. :type expiry: datetime or str - :keyword start: + :param start: The time at which the shared access signature becomes valid. If omitted, start time for this call is assumed to be the time when the storage service receives the request. Azure will always convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. - :paramtype start: datetime or str - :keyword str ip: + :type start: datetime or str + :param str ip: Specifies an IP address or a range of IP addresses from which to accept requests. If the IP address from which the request originates does not match the IP address or address range specified on the SAS token, the request is not authenticated. @@ -151,14 +161,25 @@ def generate_file_system_sas( using this shared access signature. :return: A Shared Access Signature (sas) token. :rtype: str + + .. admonition:: Example: + + .. literalinclude:: ../tests/test_blob_samples_containers.py + :start-after: [START generate_sas_token] + :end-before: [END generate_sas_token] + :language: python + :dedent: 12 + :caption: Generating a sas token. """ return generate_container_sas( account_name=account_name, container_name=file_system_name, - account_key=credential if isinstance(credential, str) else None, - user_delegation_key=credential if not isinstance(credential, str) else None, + account_key=account_key, + user_delegation_key=user_delegation_key, permission=permission, expiry=expiry, + start=start, + ip=ip, **kwargs) @@ -166,32 +187,36 @@ def generate_directory_sas( account_name, # type: str file_system_name, # type: str directory_name, # type: str - credential, # type: Union[str, UserDelegationKey] - permission=None, # type: Optional[Union[FileSasPermissions, str]] + account_key=None, # type: Optional[str] + user_delegation_key=None, # type: Optional[UserDelegationKey] + permission=None, # type: Optional[Union[BlobSasPermissions, str]] expiry=None, # type: Optional[Union[datetime, str]] + start=None, # type: Optional[Union[datetime, str]] + ip=None, # type: Optional[str] **kwargs # type: Any ): # type: (...) -> str - """Generates a shared access signature for a directory. + """Generates a shared access signature for a blob. - Use the returned signature with the credential parameter of any DataLakeServiceClient, - FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. + Use the returned signature with the credential parameter of any BlobServiceClient, + ContainerClient or BlobClient. :param str account_name: The storage account name used to generate the shared access signature. - :param str file_system_name: - The name of the file system. - :param str directory_name: - The name of the directory. - :param str credential: - Credential could be either account key or user delegation key. - If use account key is used as credential, then the credential type should be a str. - Instead of an account key, the user could also pass in a user delegation key. + :param str container_name: + The name of the container. + :param str blob_name: + The name of the blob. + :param str snapshot: + An optional blob snapshot ID. + :param str account_key: + The access key to generate the shared access signature. Either `account_key` or + `user_delegation_key` must be specified. + :param ~azure.storage.blob.UserDelegationKey user_delegation_key: + Instead of an account key, the user could pass in a user delegation key. A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished - by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`. + this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`. When present, the SAS is signed with the user delegation key instead. - :type credential: str or ~azure.storage.filedatalake.UserDelegationKey :param permission: The permissions associated with the shared access signature. The user is restricted to operations allowed by the permissions. @@ -199,7 +224,7 @@ def generate_directory_sas( Required unless an id is given referencing a stored access policy which contains this field. This field must be omitted if it has been specified in an associated stored access policy. - :type permission: str or ~azure.storage.filedatalake.FileSasPermissions + :type permission: str or ~azure.storage.blob.BlobSasPermissions :param expiry: The time at which the shared access signature becomes invalid. Required unless an id is given referencing a stored access policy @@ -208,14 +233,14 @@ def generate_directory_sas( convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. :type expiry: ~datetime.datetime or str - :keyword start: + :param start: The time at which the shared access signature becomes valid. If omitted, start time for this call is assumed to be the time when the storage service receives the request. Azure will always convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. - :paramtype start: ~datetime.datetime or str - :keyword str ip: + :type start: ~datetime.datetime or str + :param str ip: Specifies an IP address or a range of IP addresses from which to accept requests. If the IP address from which the request originates does not match the IP address or address range specified on the SAS token, the request is not authenticated. @@ -245,10 +270,12 @@ def generate_directory_sas( account_name=account_name, container_name=file_system_name, blob_name=directory_name, - account_key=credential if isinstance(credential, str) else None, - user_delegation_key=credential if not isinstance(credential, str) else None, + account_key=account_key, + user_delegation_key=user_delegation_key, permission=permission, expiry=expiry, + start=start, + ip=ip, **kwargs) @@ -257,34 +284,36 @@ def generate_file_sas( file_system_name, # type: str directory_name, # type: str file_name, # type: str - credential, # type: Union[str, UserDelegationKey] - permission=None, # type: Optional[Union[FileSasPermissions, str]] + account_key=None, # type: Optional[str] + user_delegation_key=None, # type: Optional[UserDelegationKey] + permission=None, # type: Optional[Union[BlobSasPermissions, str]] expiry=None, # type: Optional[Union[datetime, str]] - **kwargs # type: Any + start=None, # type: Optional[Union[datetime, str]] + ip=None, # type: Optional[str] + **kwargs # type: Any ): # type: (...) -> str - """Generates a shared access signature for a file. + """Generates a shared access signature for a blob. - Use the returned signature with the credential parameter of any BDataLakeServiceClient, - FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. + Use the returned signature with the credential parameter of any BlobServiceClient, + ContainerClient or BlobClient. :param str account_name: The storage account name used to generate the shared access signature. - :param str file_system_name: - The name of the file system. - :param str directory_name: - The name of the directory. - :param str file_name: - The name of the file. - :param str credential: - Credential could be either account key or user delegation key. - If use account key is used as credential, then the credential type should be a str. - Instead of an account key, the user could also pass in a user delegation key. + :param str container_name: + The name of the container. + :param str blob_name: + The name of the blob. + :param str snapshot: + An optional blob snapshot ID. + :param str account_key: + The access key to generate the shared access signature. Either `account_key` or + `user_delegation_key` must be specified. + :param ~azure.storage.blob.UserDelegationKey user_delegation_key: + Instead of an account key, the user could pass in a user delegation key. A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished - by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`. + this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`. When present, the SAS is signed with the user delegation key instead. - :type credential: str or ~azure.storage.filedatalake.UserDelegationKey :param permission: The permissions associated with the shared access signature. The user is restricted to operations allowed by the permissions. @@ -292,7 +321,7 @@ def generate_file_sas( Required unless an id is given referencing a stored access policy which contains this field. This field must be omitted if it has been specified in an associated stored access policy. - :type permission: str or ~azure.storage.filedatalake.FileSasPermissions + :type permission: str or ~azure.storage.blob.BlobSasPermissions :param expiry: The time at which the shared access signature becomes invalid. Required unless an id is given referencing a stored access policy @@ -301,14 +330,14 @@ def generate_file_sas( convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. :type expiry: ~datetime.datetime or str - :keyword start: + :param start: The time at which the shared access signature becomes valid. If omitted, start time for this call is assumed to be the time when the storage service receives the request. Azure will always convert values to UTC. If a date is passed in without timezone info, it is assumed to be UTC. - :paramtype start: ~datetime.datetime or str - :keyword str ip: + :type start: ~datetime.datetime or str + :param str ip: Specifies an IP address or a range of IP addresses from which to accept requests. If the IP address from which the request originates does not match the IP address or address range specified on the SAS token, the request is not authenticated. @@ -342,8 +371,10 @@ def generate_file_sas( account_name=account_name, container_name=file_system_name, blob_name=path, - account_key=credential if isinstance(credential, str) else None, - user_delegation_key=credential if not isinstance(credential, str) else None, + account_key=account_key, + user_delegation_key=user_delegation_key, permission=permission, expiry=expiry, + start=start, + ip=ip, **kwargs) diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_version.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_version.py similarity index 93% rename from azure/multiapi/storagev2/blob/v2019_12_12/_version.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/_version.py index c40634c..37b2923 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_version.py +++ b/azure/multiapi/storagev2/filedatalake/v2019_07_07/_version.py @@ -4,4 +4,4 @@ # license information. # -------------------------------------------------------------------------- -VERSION = "12.4.0" +VERSION = "12.0.0b7" diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/__init__.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/__init__.py similarity index 91% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/__init__.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/__init__.py index c24dde8..ed8a01a 100644 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/__init__.py +++ b/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/__init__.py @@ -4,7 +4,6 @@ # license information. # -------------------------------------------------------------------------- -from ._download_async import StorageStreamDownloader from .._shared.policies_async import ExponentialRetry, LinearRetry from ._data_lake_file_client_async import DataLakeFileClient from ._data_lake_directory_client_async import DataLakeDirectoryClient @@ -20,5 +19,4 @@ 'DataLakeLeaseClient', 'ExponentialRetry', 'LinearRetry', - 'StorageStreamDownloader' ] diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_data_lake_directory_client_async.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_data_lake_directory_client_async.py similarity index 85% rename from azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_data_lake_directory_client_async.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_data_lake_directory_client_async.py index 8d4eb3e..80b01c4 100644 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_data_lake_directory_client_async.py +++ b/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_data_lake_directory_client_async.py @@ -3,8 +3,6 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - from ._data_lake_file_client_async import DataLakeFileClient from .._data_lake_directory_client import DataLakeDirectoryClient as DataLakeDirectoryClientBase from .._models import DirectoryProperties @@ -39,12 +37,19 @@ class DataLakeDirectoryClient(PathClient, DataLakeDirectoryClientBase): .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_instantiate_client_async.py - :start-after: [START instantiate_directory_client_from_conn_str] - :end-before: [END instantiate_directory_client_from_conn_str] + .. literalinclude:: ../samples/test_datalake_authentication_samples.py + :start-after: [START create_datalake_service_client] + :end-before: [END create_datalake_service_client] + :language: python + :dedent: 8 + :caption: Creating the DataLakeServiceClient with account url and credential. + + .. literalinclude:: ../samples/test_datalake_authentication_samples.py + :start-after: [START create_datalake_service_client_oauth] + :end-before: [END create_datalake_service_client_oauth] :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. + :dedent: 8 + :caption: Creating the DataLakeServiceClient with Azure Identity credentials. """ def __init__( @@ -58,21 +63,22 @@ def __init__( super(DataLakeDirectoryClient, self).__init__(account_url, file_system_name, directory_name, # pylint: disable=specify-parameter-names-in-call credential=credential, **kwargs) - async def create_directory(self, metadata=None, # type: Optional[Dict[str, str]] + async def create_directory(self, content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] **kwargs): # type: (...) -> Dict[str, Union[str, datetime]] """ Create a new directory. + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. :param metadata: - Name-value pairs associated with the directory as metadata. + Name-value pairs associated with the blob as metadata. :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. :keyword lease: - Required if the directory has an active lease. Value can be a DataLakeLeaseClient object + Required if the blob has an active lease. Value can be a DataLakeLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. When creating a file or directory and the parent folder does not have a default ACL, @@ -108,17 +114,8 @@ async def create_directory(self, metadata=None, # type: Optional[Dict[str, str] :keyword int timeout: The timeout parameter is expressed in seconds. :return: response dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory_async.py - :start-after: [START create_directory] - :end-before: [END create_directory] - :language: python - :dedent: 8 - :caption: Create directory. """ - return await self._create('directory', metadata=metadata, **kwargs) + return await self._create('directory', content_settings=content_settings, metadata=metadata, **kwargs) async def delete_directory(self, **kwargs): # type: (...) -> None @@ -126,9 +123,9 @@ async def delete_directory(self, **kwargs): Marks the specified directory for deletion. :keyword lease: - Required if the directory has an active lease. Value can be a LeaseClient object + Required if the blob has an active lease. Value can be a LeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :type lease: ~azure.storage.blob.LeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -149,15 +146,6 @@ async def delete_directory(self, **kwargs): :keyword int timeout: The timeout parameter is expressed in seconds. :return: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory_async.py - :start-after: [START delete_directory] - :end-before: [END delete_directory] - :language: python - :dedent: 4 - :caption: Delete directory. """ return await self._delete(**kwargs) @@ -169,7 +157,7 @@ async def get_directory_properties(self, **kwargs): :keyword lease: Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -193,34 +181,51 @@ async def get_directory_properties(self, **kwargs): .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_directory_async.py - :start-after: [START get_directory_properties] - :end-before: [END get_directory_properties] + .. literalinclude:: ../tests/test_blob_samples_common.py + :start-after: [START get_blob_properties] + :end-before: [END get_blob_properties] :language: python - :dedent: 4 + :dedent: 8 :caption: Getting the properties for a file/directory. """ blob_properties = await self._get_path_properties(**kwargs) return DirectoryProperties._from_blob_properties(blob_properties) # pylint: disable=protected-access - async def rename_directory(self, new_name, # type: str - **kwargs): + async def rename_directory(self, rename_destination, **kwargs): # type: (**Any) -> DataLakeDirectoryClient """ Rename the source directory. - :param str new_name: + :param str rename_destination: the new directory name the user want to rename to. The value must have the following format: "{filesystem}/{directory}/{subdirectory}". :keyword source_lease: A lease ID for the source path. If specified, the source path must have an active lease and the leaase ID must match. - :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. :keyword lease: Required if the file/directory has an active lease. Value can be a LeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :type permissions: str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -258,19 +263,10 @@ async def rename_directory(self, new_name, # type: str :keyword int timeout: The timeout parameter is expressed in seconds. :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory_async.py - :start-after: [START rename_directory] - :end-before: [END rename_directory] - :language: python - :dedent: 4 - :caption: Rename the source directory. """ - new_name = new_name.strip('/') - new_file_system = new_name.split('/')[0] - path = new_name[len(new_file_system):] + rename_destination = rename_destination.strip('/') + new_file_system = rename_destination.split('/')[0] + path = rename_destination[len(new_file_system):] new_directory_client = DataLakeDirectoryClient( self.url, new_file_system, directory_name=path, credential=self._raw_credential, @@ -283,6 +279,7 @@ async def rename_directory(self, new_name, # type: str return new_directory_client async def create_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] + content_settings=None, # type: Optional[ContentSettings] metadata=None, # type: Optional[Dict[str, str]] **kwargs): # type: (...) -> DataLakeDirectoryClient @@ -293,15 +290,14 @@ async def create_sub_directory(self, sub_directory, # type: Union[DirectoryProp The directory with which to interact. This can either be the name of the directory, or an instance of DirectoryProperties. :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. :param metadata: - Name-value pairs associated with the file as metadata. + Name-value pairs associated with the blob as metadata. :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object + :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease: + Required if the blob has an active lease. Value can be a DataLakeLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. When creating a file or directory and the parent folder does not have a default ACL, @@ -339,7 +335,7 @@ async def create_sub_directory(self, sub_directory, # type: Union[DirectoryProp :return: DataLakeDirectoryClient for the subdirectory. """ subdir = self.get_sub_directory_client(sub_directory) - await subdir.create_directory(metadata=metadata, **kwargs) + await subdir.create_directory(content_settings=content_settings, metadata=metadata, **kwargs) return subdir async def delete_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] @@ -353,9 +349,9 @@ async def delete_sub_directory(self, sub_directory, # type: Union[DirectoryProp or an instance of DirectoryProperties. :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties :keyword lease: - Required if the directory has an active lease. Value can be a LeaseClient object + Required if the blob has an active lease. Value can be a LeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :type lease: ~azure.storage.blob.LeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -394,12 +390,11 @@ async def create_file(self, file, # type: Union[FileProperties, str] :keyword ~azure.storage.filedatalake.ContentSettings content_settings: ContentSettings object used to set path properties. :keyword metadata: - Name-value pairs associated with the file as metadata. + Name-value pairs associated with the blob as metadata. :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object + :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease: + Required if the blob has an active lease. Value can be a DataLakeLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. When creating a file or directory and the parent folder does not have a default ACL, @@ -452,7 +447,7 @@ def get_file_client(self, file # type: Union[FileProperties, str] or an instance of FileProperties. eg. directory/subdirectory/file :type file: str or ~azure.storage.filedatalake.FileProperties :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient + :rtype: ~azure.storage.filedatalake..DataLakeFileClient .. admonition:: Example: @@ -487,7 +482,7 @@ def get_sub_directory_client(self, sub_directory # type: Union[DirectoryPropert or an instance of DirectoryProperties. :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient + :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient .. admonition:: Example: diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/_data_lake_file_client_async.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_data_lake_file_client_async.py similarity index 72% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/_data_lake_file_client_async.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_data_lake_file_client_async.py index 98f96b1..af20ba6 100644 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/_data_lake_file_client_async.py +++ b/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_data_lake_file_client_async.py @@ -4,13 +4,11 @@ # license information. # -------------------------------------------------------------------------- -from ._download_async import StorageStreamDownloader from ._path_client_async import PathClient from .._data_lake_file_client import DataLakeFileClient as DataLakeFileClientBase from .._deserialize import process_storage_error from .._generated.models import StorageErrorException from .._models import FileProperties -from ..aio._upload_helper import upload_datalake_file class DataLakeFileClient(PathClient, DataLakeFileClientBase): @@ -39,12 +37,19 @@ class DataLakeFileClient(PathClient, DataLakeFileClientBase): .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_instantiate_client_async.py - :start-after: [START instantiate_file_client_from_conn_str] - :end-before: [END instantiate_file_client_from_conn_str] + .. literalinclude:: ../samples/test_datalake_authentication_samples.py + :start-after: [START create_datalake_service_client] + :end-before: [END create_datalake_service_client] :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. + :dedent: 8 + :caption: Creating the DataLakeServiceClient with account url and credential. + + .. literalinclude:: ../samples/test_datalake_authentication_samples.py + :start-after: [START create_datalake_service_client_oauth] + :end-before: [END create_datalake_service_client_oauth] + :language: python + :dedent: 8 + :caption: Creating the DataLakeServiceClient with Azure Identity credentials. """ def __init__( @@ -68,22 +73,19 @@ async def create_file(self, content_settings=None, # type: Optional[ContentSett :param ~azure.storage.filedatalake.ContentSettings content_settings: ContentSettings object used to set path properties. :param metadata: - Name-value pairs associated with the file as metadata. + Name-value pairs associated with the blob as metadata. :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object + :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease: + Required if the blob has an active lease. Value can be a DataLakeLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. + :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. When creating a file or directory and the parent folder does not have a default ACL, the umask restricts the permissions of the file or directory to be created. The resulting permission is given by p & ^u, where p is the permission and u is the umask. For example, if p is 0777 and u is 0057, then the resulting permission is 0720. The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace + :keyword str permissions: Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access permissions for the file owner, the file owning group, and others. Each class may be granted read, write, or execute permission. The sticky bit is also supported. @@ -109,15 +111,6 @@ async def create_file(self, content_settings=None, # type: Optional[ContentSett :keyword int timeout: The timeout parameter is expressed in seconds. :return: response dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START create_file] - :end-before: [END create_file] - :language: python - :dedent: 4 - :caption: Create file. """ return await self._create('file', content_settings=content_settings, metadata=metadata, **kwargs) @@ -127,9 +120,9 @@ async def delete_file(self, **kwargs): Marks the specified file for deletion. :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object + Required if the blob has an active lease. Value can be a LeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :type lease: ~azure.storage.blob.LeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -150,15 +143,6 @@ async def delete_file(self, **kwargs): :keyword int timeout: The timeout parameter is expressed in seconds. :return: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START delete_file] - :end-before: [END delete_file] - :language: python - :dedent: 4 - :caption: Delete file. """ return await self._delete(**kwargs) @@ -170,7 +154,7 @@ async def get_file_properties(self, **kwargs): :keyword lease: Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -194,76 +178,16 @@ async def get_file_properties(self, **kwargs): .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START get_file_properties] - :end-before: [END get_file_properties] + .. literalinclude:: ../tests/test_blob_samples_common.py + :start-after: [START get_blob_properties] + :end-before: [END get_blob_properties] :language: python - :dedent: 4 - :caption: Getting the properties for a file. + :dedent: 8 + :caption: Getting the properties for a file/directory. """ blob_properties = await self._get_path_properties(**kwargs) return FileProperties._from_blob_properties(blob_properties) # pylint: disable=protected-access - async def upload_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - overwrite=False, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Any] - """ - Upload data to a file. - - :param data: Content to be uploaded to file - :param int length: Size of the data in bytes. - :param bool overwrite: to overwrite an existing file or not. - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword metadata: - Name-value pairs associated with the blob as metadata. - :paramtype metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease: - Required if the blob has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: response dict (Etag and last modified). - """ - options = self._upload_options( - data, - length=length, - overwrite=overwrite, - **kwargs) - return await upload_datalake_file(**options) - async def append_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] offset, # type: int length=None, # type: Optional[int] @@ -280,21 +204,12 @@ async def append_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[An with the hash that was sent. This is primarily valuable for detecting bitflips on the wire if using http instead of https as https (the default) will already validate. Note that this MD5 hash is not stored with the - file. + blob. :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object + Required if the blob has an active lease. Value can be a LeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str :return: dict of the response header - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START append_data] - :end-before: [END append_data] - :language: python - :dedent: 4 - :caption: Append data to the file. """ options = self._append_data_options( data, @@ -321,8 +236,6 @@ async def flush_data(self, offset, # type: int specified position are written to the file when flush succeeds, but this optional parameter allows data after the flush position to be retained for a future flush operation. - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. :keyword bool close: Azure Storage Events allow applications to receive notifications when files change. When Azure Storage Events are enabled, a file changed event is raised. This event has a property @@ -355,15 +268,6 @@ async def flush_data(self, offset, # type: int :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. :return: response header in dict - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START upload_file_to_file_system] - :end-before: [END upload_file_to_file_system] - :language: python - :dedent: 12 - :caption: Commit the previous appended data. """ options = self._flush_data_options( offset, @@ -373,11 +277,13 @@ async def flush_data(self, offset, # type: int except StorageErrorException as error: process_storage_error(error) - async def download_file(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader - """Downloads a file to the StorageStreamDownloader. The readall() method must - be used to read all the content, or readinto() must be used to download the file into - a stream. + async def read_file(self, offset=None, # type: Optional[int] + length=None, # type: Optional[int] + stream=None, # type: Optional[IO] + **kwargs): + # type: (...) -> Union[int, byte] + """Download a file from the service. Return the downloaded data in bytes or + write the downloaded data into user provided stream and return the written size. :param int offset: Start of byte range to use for downloading a section of the file. @@ -385,10 +291,12 @@ async def download_file(self, offset=None, length=None, **kwargs): :param int length: Number of bytes to read from the stream. This is optional, but should be supplied for optimal performance. + :param int stream: + User provided stream to write the downloaded data into. :keyword lease: - If specified, download only succeeds if the file's lease is active - and matches this ID. Required if the file has an active lease. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + If specified, download_blob only succeeds if the blob's lease is active + and matches this ID. Required if the blob has an active lease. + :type lease: ~azure.storage.blob.LeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -412,39 +320,53 @@ async def download_file(self, offset=None, length=None, **kwargs): The timeout parameter is expressed in seconds. This method may make multiple calls to the Azure service and the timeout will apply to each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.filedatalake.aio.StorageStreamDownloader + :returns: downloaded data or the size of data written into the provided stream + :rtype: bytes or int .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START read_file] - :end-before: [END read_file] + .. literalinclude:: ../tests/test_blob_samples_hello_world.py + :start-after: [START download_a_blob] + :end-before: [END download_a_blob] :language: python - :dedent: 4 - :caption: Return the downloaded data. + :dedent: 12 + :caption: Download a blob. """ downloader = await self._blob_client.download_blob(offset=offset, length=length, **kwargs) - return StorageStreamDownloader(downloader) + if stream: + return await downloader.readinto(stream) + return await downloader.readall() - async def rename_file(self, new_name, # type: str - **kwargs): + async def rename_file(self, rename_destination, **kwargs): # type: (**Any) -> DataLakeFileClient """ Rename the source file. - :param str new_name: the new file name the user want to rename to. + :param str rename_destination: the new file name the user want to rename to. The value must have the following format: "{filesystem}/{directory}/{subdirectory}/{file}". - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. :keyword source_lease: A lease ID for the source path. If specified, the source path must have an active lease and the leaase ID must match. - :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. :keyword lease: Required if the file/directory has an active lease. Value can be a LeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. :type permissions: str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. @@ -482,21 +404,11 @@ async def rename_file(self, new_name, # type: str The source match condition to use upon the etag. :keyword int timeout: The timeout parameter is expressed in seconds. - :return: the renamed file client - :rtype: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START rename_file] - :end-before: [END rename_file] - :language: python - :dedent: 4 - :caption: Rename the source file. + :return: """ - new_name = new_name.strip('/') - new_file_system = new_name.split('/')[0] - path = new_name[len(new_file_system):] + rename_destination = rename_destination.strip('/') + new_file_system = rename_destination.split('/')[0] + path = rename_destination[len(new_file_system):] new_directory_client = DataLakeFileClient( self.url, new_file_system, file_path=path, credential=self._raw_credential, diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/_data_lake_lease_async.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_data_lake_lease_async.py similarity index 92% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/_data_lake_lease_async.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_data_lake_lease_async.py index 8e91b05..2d5194e 100644 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/_data_lake_lease_async.py +++ b/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_data_lake_lease_async.py @@ -8,7 +8,7 @@ Union, Optional, Any, TypeVar, TYPE_CHECKING ) -from azure.multiapi.storagev2.blob.v2019_07_07.aio import BlobLeaseClient +from azure.storage.blob.aio import BlobLeaseClient from .._data_lake_lease import DataLakeLeaseClient as DataLakeLeaseClientBase @@ -35,8 +35,8 @@ class DataLakeLeaseClient(DataLakeLeaseClientBase): :param client: The client of the file system, directory, or file to lease. - :type client: ~azure.storage.filedatalake.aio.FileSystemClient or - ~azure.storage.filedatalake.aio.DataLakeDirectoryClient or ~azure.storage.filedatalake.aio.DataLakeFileClient + :type client: ~azure.storage.filedatalake.FileSystemClient or + ~azure.storage.filedatalake.DataLakeDirectoryClient or ~azure.storage.filedatalake.DataLakeFileClient :param str lease_id: A string representing the lease ID of an existing lease. This value does not need to be specified in order to acquire a new lease, or break one. @@ -72,8 +72,8 @@ async def acquire(self, lease_duration=-1, **kwargs): # type: (int, Optional[int], **Any) -> None """Requests a new lease. - If the file/file system does not have an active lease, the DataLake service creates a - lease on the file/file system and returns a new lease ID. + If the container does not have an active lease, the Blob service creates a + lease on the container and returns a new lease ID. :param int lease_duration: Specifies the duration of the lease, in seconds, or negative one @@ -109,9 +109,9 @@ async def renew(self, **kwargs): """Renews the lease. The lease can be renewed if the lease ID specified in the - lease client matches that associated with the file system or file. Note that - the lease may be renewed even if it has expired as long as the file system - or file has not been leased again since the expiration of that lease. When you + lease client matches that associated with the container or blob. Note that + the lease may be renewed even if it has expired as long as the container + or blob has not been leased again since the expiration of that lease. When you renew a lease, the lease duration clock resets. :keyword ~datetime.datetime if_modified_since: @@ -143,8 +143,8 @@ async def release(self, **kwargs): """Release the lease. The lease may be released if the client lease id specified matches - that associated with the file system or file. Releasing the lease allows another client - to immediately acquire the lease for the file system or file as soon as the release is complete. + that associated with the container or blob. Releasing the lease allows another client + to immediately acquire the lease for the container or blob as soon as the release is complete. :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. @@ -175,7 +175,7 @@ async def change(self, proposed_lease_id, **kwargs): """Change the lease ID of an active lease. :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns 400 + Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. @@ -203,12 +203,12 @@ async def change(self, proposed_lease_id, **kwargs): async def break_lease(self, lease_break_period=None, **kwargs): # type: (Optional[int], Any) -> int - """Break the lease, if the file system or file has an active lease. + """Break the lease, if the container or blob has an active lease. Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; the request is not required to specify a matching lease ID. When a lease is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the file system or file. + no lease operation except break and release can be performed on the container or blob. When a lease is successfully broken, the response indicates the interval in seconds until a new lease can be acquired. diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/_data_lake_service_client_async.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_data_lake_service_client_async.py similarity index 76% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/_data_lake_service_client_async.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_data_lake_service_client_async.py index b0577a2..b2089a7 100644 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/_data_lake_service_client_async.py +++ b/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_data_lake_service_client_async.py @@ -5,7 +5,7 @@ # -------------------------------------------------------------------------- from azure.core.paging import ItemPaged -from azure.multiapi.storagev2.blob.v2019_07_07.aio import BlobServiceClient +from azure.storage.blob.aio import BlobServiceClient from .._generated.aio import DataLakeStorageClient from .._shared.base_client_async import AsyncStorageAccountHostsMixin from ._file_system_client_async import FileSystemClient @@ -14,7 +14,7 @@ from ._data_lake_directory_client_async import DataLakeDirectoryClient from ._data_lake_file_client_async import DataLakeFileClient from ._models import FileSystemPropertiesPaged -from .._models import UserDelegationKey, LocationMode +from .._models import UserDelegationKey class DataLakeServiceClient(AsyncStorageAccountHostsMixin, DataLakeServiceClientBase): @@ -26,7 +26,8 @@ class DataLakeServiceClient(AsyncStorageAccountHostsMixin, DataLakeServiceClient can also be retrieved using the `get_client` functions. :ivar str url: - The full endpoint URL to the datalake service endpoint. + The full endpoint URL to the datalake service endpoint. This could be either the + primary endpoint, or the secondary endpoint depending on the current `location_mode`. :ivar str primary_endpoint: The full primary endpoint URL. :ivar str primary_hostname: @@ -43,18 +44,18 @@ class DataLakeServiceClient(AsyncStorageAccountHostsMixin, DataLakeServiceClient .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_service_async.py + .. literalinclude:: ../samples/test_datalake_authentication_samples.py :start-after: [START create_datalake_service_client] :end-before: [END create_datalake_service_client] :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. + :dedent: 8 + :caption: Creating the DataLakeServiceClient with account url and credential. - .. literalinclude:: ../samples/datalake_samples_service_async.py + .. literalinclude:: ../samples/test_datalake_authentication_samples.py :start-after: [START create_datalake_service_client_oauth] :end-before: [END create_datalake_service_client_oauth] :language: python - :dedent: 4 + :dedent: 8 :caption: Creating the DataLakeServiceClient with Azure Identity credentials. """ @@ -71,22 +72,9 @@ def __init__( **kwargs ) self._blob_service_client = BlobServiceClient(self._blob_account_url, credential, **kwargs) - self._blob_service_client._hosts[LocationMode.SECONDARY] = "" #pylint: disable=protected-access self._client = DataLakeStorageClient(self.url, None, None, pipeline=self._pipeline) self._loop = kwargs.get('loop', None) - async def __aexit__(self, *args): - await self._blob_service_client.close() - await super(DataLakeServiceClient, self).__aexit__(*args) - - async def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._blob_service_client.close() - await self.__aexit__() - async def get_user_delegation_key(self, key_start_time, # type: datetime key_expiry_time, # type: datetime **kwargs # type: Any @@ -104,21 +92,13 @@ async def get_user_delegation_key(self, key_start_time, # type: datetime The timeout parameter is expressed in seconds. :return: The user delegation key. :rtype: ~azure.storage.filedatalake.UserDelegationKey - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START get_user_delegation_key] - :end-before: [END get_user_delegation_key] - :language: python - :dedent: 8 - :caption: Get user delegation key from datalake service client. """ delegation_key = await self._blob_service_client.get_user_delegation_key( key_start_time=key_start_time, key_expiry_time=key_expiry_time, **kwargs) # pylint: disable=protected-access - return UserDelegationKey._from_generated(delegation_key) # pylint: disable=protected-access + delegation_key._class_ = UserDelegationKey # pylint: disable=protected-access + return delegation_key def list_file_systems(self, name_starts_with=None, # type: Optional[str] include_metadata=None, # type: Optional[bool] @@ -145,11 +125,11 @@ def list_file_systems(self, name_starts_with=None, # type: Optional[str] .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START list_file_systems] - :end-before: [END list_file_systems] + .. literalinclude:: ../samples/test_datalake_service_samples.py + :start-after: [START dsc_list_file_systems] + :end-before: [END dsc_list_file_systems] :language: python - :dedent: 8 + :dedent: 12 :caption: Listing the file systems in the datalake service. """ item_paged = self._blob_service_client.list_containers(name_starts_with=name_starts_with, @@ -169,8 +149,7 @@ async def create_file_system(self, file_system, # type: Union[FileSystemPropert be raised. This method returns a client with which to interact with the newly created file system. - :param str file_system: - The name of the file system to create. + :param str file_system: The name of the file system to create. :param metadata: A dict with name-value pairs to associate with the file system as metadata. Example: `{'Category':'test'}` @@ -184,11 +163,11 @@ async def create_file_system(self, file_system, # type: Union[FileSystemPropert .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START create_file_system_from_service_client] - :end-before: [END create_file_system_from_service_client] + .. literalinclude:: ../samples/test_datalake_service_samples.py + :start-after: [START dsc_create_file_system] + :end-before: [END dsc_create_file_system] :language: python - :dedent: 8 + :dedent: 12 :caption: Creating a file system in the datalake service. """ file_system_client = self.get_file_system_client(file_system) @@ -207,11 +186,10 @@ async def delete_file_system(self, file_system, # type: Union[FileSystemPropert The file system to delete. This can either be the name of the file system, or an instance of FileSystemProperties. :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :keyword lease: + :keyword ~azure.storage.filedatalake.DataLakeLeaseClient lease: If specified, delete_file_system only succeeds if the file system's lease is active and matches this ID. Required if the file system has an active lease. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -235,11 +213,11 @@ async def delete_file_system(self, file_system, # type: Union[FileSystemPropert .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START delete_file_system_from_service_client] - :end-before: [END delete_file_system_from_service_client] + .. literalinclude:: ../samples/test_datalake_service_samples.py + :start-after: [START bsc_delete_file_system] + :end-before: [END bsc_delete_file_system] :language: python - :dedent: 8 + :dedent: 12 :caption: Deleting a file system in the datalake service. """ file_system_client = self.get_file_system_client(file_system) @@ -258,25 +236,19 @@ def get_file_system_client(self, file_system # type: Union[FileSystemProperties or an instance of FileSystemProperties. :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties :returns: A FileSystemClient. - :rtype: ~azure.storage.filedatalake.aio.FileSystemClient + :rtype: ~azure.storage.filedatalake.FileSystemClient .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_file_system_client_from_service] - :end-before: [END create_file_system_client_from_service] + .. literalinclude:: ../samples/test_datalake_service_samples.py + :start-after: [START bsc_get_file_system_client] + :end-before: [END bsc_get_file_system_client] :language: python :dedent: 8 :caption: Getting the file system client to interact with a specific file system. """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - - return FileSystemClient(self.url, file_system_name, credential=self._raw_credential, - _configuration=self._config, - _pipeline=self._pipeline, _hosts=self._hosts, + return FileSystemClient(self.url, file_system, credential=self._raw_credential, _configuration=self._config, + _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts, require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function) @@ -297,29 +269,21 @@ def get_directory_client(self, file_system, # type: Union[FileSystemProperties, or an instance of DirectoryProperties. :type directory: str or ~azure.storage.filedatalake.DirectoryProperties :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient + :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START get_directory_client_from_service_client] - :end-before: [END get_directory_client_from_service_client] + .. literalinclude:: ../samples/test_datalake_service_samples.py + :start-after: [START bsc_get_directory_client] + :end-before: [END bsc_get_directory_client] :language: python - :dedent: 8 + :dedent: 12 :caption: Getting the directory client to interact with a specific directory. """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - try: - directory_name = directory.name - except AttributeError: - directory_name = directory - return DataLakeDirectoryClient(self.url, file_system_name, directory_name=directory_name, + return DataLakeDirectoryClient(self.url, file_system, directory_name=directory, credential=self._raw_credential, _configuration=self._config, _pipeline=self._pipeline, - _hosts=self._hosts, + _location_mode=self._location_mode, _hosts=self._hosts, require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function @@ -342,29 +306,25 @@ def get_file_client(self, file_system, # type: Union[FileSystemProperties, str] or an instance of FileProperties. eg. directory/subdirectory/file :type file_path: str or ~azure.storage.filedatalake.FileProperties :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient + :rtype: ~azure.storage.filedatalake..DataLakeFileClient .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START get_file_client_from_service_client] - :end-before: [END get_file_client_from_service_client] + .. literalinclude:: ../samples/test_datalake_service_samples.py + :start-after: [START bsc_get_file_client] + :end-before: [END bsc_get_file_client] :language: python - :dedent: 8 + :dedent: 12 :caption: Getting the file client to interact with a specific file. """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system try: file_path = file_path.name except AttributeError: pass return DataLakeFileClient( - self.url, file_system_name, file_path=file_path, credential=self._raw_credential, + self.url, file_system, file_path=file_path, credential=self._raw_credential, _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, + _location_mode=self._location_mode, require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/_file_system_client_async.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_file_system_client_async.py similarity index 70% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/_file_system_client_async.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_file_system_client_async.py index d9cb4e5..0761c42 100644 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/_file_system_client_async.py +++ b/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_file_system_client_async.py @@ -10,12 +10,10 @@ Union, Optional, Any, Dict, TYPE_CHECKING ) -from azure.core.tracing.decorator import distributed_trace - from azure.core.async_paging import AsyncItemPaged from azure.core.tracing.decorator_async import distributed_trace_async -from azure.multiapi.storagev2.blob.v2019_07_07.aio import ContainerClient +from azure.storage.blob.aio import ContainerClient from ._data_lake_file_client_async import DataLakeFileClient from ._data_lake_directory_client_async import DataLakeDirectoryClient @@ -25,9 +23,10 @@ from .._generated.aio import DataLakeStorageClient from .._shared.base_client_async import AsyncStorageAccountHostsMixin from .._shared.policies_async import ExponentialRetry -from .._models import FileSystemProperties, PublicAccess +from .._models import FileSystemProperties if TYPE_CHECKING: + from .._models import PublicAccess from datetime import datetime from .._models import ( # pylint: disable=unused-import ContentSettings) @@ -57,14 +56,21 @@ class FileSystemClient(AsyncStorageAccountHostsMixin, FileSystemClientBase): shared access key, or an instance of a TokenCredentials class from azure.identity. If the URL already has a SAS token, specifying an explicit credential will take priority. - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_file_system_client_from_service] - :end-before: [END create_file_system_client_from_service] - :language: python - :dedent: 8 - :caption: Get a FileSystemClient from an existing DataLakeServiceClient. + .. admonition:: Example: + + .. literalinclude:: ../samples/test_file_system_samples.py + :start-after: [START create_file_system_client_from_service] + :end-before: [END create_file_system_client_from_service] + :language: python + :dedent: 8 + :caption: Get a FileSystemClient from an existing DataLakeServiceClient. + + .. literalinclude:: ../samples/test_file_system_samples.py + :start-after: [START create_file_system_client_sasurl] + :end-before: [END create_file_system_client_sasurl] + :language: python + :dedent: 8 + :caption: Creating the FileSystemClient client directly. """ def __init__( @@ -89,18 +95,6 @@ def __init__( self._client = DataLakeStorageClient(self.url, file_system_name, None, pipeline=self._pipeline) self._loop = kwargs.get('loop', None) - async def __aexit__(self, *args): - await self._container_client.close() - await super(FileSystemClient, self).__aexit__(*args) - - async def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._container_client.close() - await self.__aexit__() - @distributed_trace_async async def acquire_lease( self, lease_duration=-1, # type: int @@ -141,22 +135,21 @@ async def acquire_lease( :keyword int timeout: The timeout parameter is expressed in seconds. :returns: A DataLakeLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.filedatalake.aio.DataLakeLeaseClient + :rtype: ~azure.storage.filedatalake.DataLakeLeaseClient .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_file_system_async.py + .. literalinclude:: ../samples/test_file_system_samples.py :start-after: [START acquire_lease_on_file_system] :end-before: [END acquire_lease_on_file_system] :language: python - :dedent: 12 + :dedent: 8 :caption: Acquiring a lease on the file_system. """ lease = DataLakeLeaseClient(self, lease_id=lease_id) - await lease.acquire(lease_duration=lease_duration, **kwargs) + lease.acquire(lease_duration=lease_duration, **kwargs) return lease - @distributed_trace_async async def create_file_system(self, metadata=None, # type: Optional[Dict[str, str]] public_access=None, # type: Optional[PublicAccess] **kwargs): @@ -172,26 +165,25 @@ async def create_file_system(self, metadata=None, # type: Optional[Dict[str, st file system as metadata. Example: `{'Category':'test'}` :type metadata: dict(str, str) :param public_access: - To specify whether data in the file system may be accessed publicly and the level of access. + Possible values include: file system, file. :type public_access: ~azure.storage.filedatalake.PublicAccess :keyword int timeout: The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.aio.FileSystemClient + :rtype: ~azure.storage.filedatalake.FileSystemClient .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_file_system_async.py + .. literalinclude:: ../samples/test_file_system_samples.py :start-after: [START create_file_system] :end-before: [END create_file_system] :language: python - :dedent: 16 + :dedent: 12 :caption: Creating a file system in the datalake service. """ return await self._container_client.create_container(metadata=metadata, public_access=public_access, **kwargs) - @distributed_trace_async async def delete_file_system(self, **kwargs): # type: (Any) -> None """Marks the specified file system for deletion. @@ -199,11 +191,10 @@ async def delete_file_system(self, **kwargs): The file system and any files contained within it are later deleted during garbage collection. If the file system is not found, a ResourceNotFoundError will be raised. - :keyword lease: + :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: If specified, delete_file_system only succeeds if the file system's lease is active and matches this ID. Required if the file system has an active lease. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -227,25 +218,23 @@ async def delete_file_system(self, **kwargs): .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_file_system_async.py + .. literalinclude:: ../samples/test_file_system_samples.py :start-after: [START delete_file_system] :end-before: [END delete_file_system] :language: python - :dedent: 16 + :dedent: 12 :caption: Deleting a file system in the datalake service. """ await self._container_client.delete_container(**kwargs) - @distributed_trace_async async def get_file_system_properties(self, **kwargs): # type: (Any) -> FileSystemProperties """Returns all user-defined metadata and system properties for the specified file system. The data returned does not include the file system's list of paths. - :keyword lease: + :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: If specified, get_file_system_properties only succeeds if the file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str :keyword int timeout: The timeout parameter is expressed in seconds. :return: Properties for the specified file system within a file system object. @@ -253,19 +242,18 @@ async def get_file_system_properties(self, **kwargs): .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_file_system_async.py + .. literalinclude:: ../samples/test_file_system_samples.py :start-after: [START get_file_system_properties] :end-before: [END get_file_system_properties] :language: python - :dedent: 16 + :dedent: 12 :caption: Getting properties on the file system. """ container_properties = await self._container_client.get_container_properties(**kwargs) return FileSystemProperties._convert_from_container_props(container_properties) # pylint: disable=protected-access - @distributed_trace_async async def set_file_system_metadata( # type: ignore - self, metadata, # type: Dict[str, str] + self, metadata=None, # type: Optional[Dict[str, str]] **kwargs ): # type: (...) -> Dict[str, Union[str, datetime]] @@ -278,10 +266,9 @@ async def set_file_system_metadata( # type: ignore A dict containing name-value pairs to associate with the file system as metadata. Example: {'category':'test'} :type metadata: dict[str, str] - :keyword lease: + :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: If specified, set_file_system_metadata only succeeds if the file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -305,78 +292,15 @@ async def set_file_system_metadata( # type: ignore .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_file_system_async.py + .. literalinclude:: ../samples/test_file_system_samples.py :start-after: [START set_file_system_metadata] :end-before: [END set_file_system_metadata] :language: python - :dedent: 16 + :dedent: 12 :caption: Setting metadata on the container. """ return await self._container_client.set_container_metadata(metadata=metadata, **kwargs) - @distributed_trace_async - async def set_file_system_access_policy( - self, signed_identifiers, # type: Dict[str, AccessPolicy] - public_access=None, # type: Optional[Union[str, PublicAccess]] - **kwargs - ): # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the permissions for the specified file system or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether files in a file system may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the file system. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict[str, ~azure.storage.filedatalake.AccessPolicy] - :param ~azure.storage.filedatalake.PublicAccess public_access: - To specify whether data in the file system may be accessed publicly and the level of access. - :keyword lease: - Required if the file system has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified date/time. - :keyword ~datetime.datetime if_unmodified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: filesystem-updated property dict (Etag and last modified). - :rtype: dict[str, str or ~datetime.datetime] - """ - return await self._container_client.set_container_access_policy(signed_identifiers, - public_access=public_access, **kwargs) - - @distributed_trace_async - async def get_file_system_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the specified file system. - The permissions indicate whether file system data may be accessed publicly. - - :keyword lease: - If specified, get_file_system_access_policy only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - """ - access_policy = await self._container_client.get_container_access_policy(**kwargs) - return { - 'public_access': PublicAccess._from_generated(access_policy['public_access']), # pylint: disable=protected-access - 'signed_identifiers': access_policy['signed_identifiers'] - } - - @distributed_trace def get_paths(self, path=None, # type: Optional[str] recursive=True, # type: Optional[bool] max_results=None, # type: Optional[int] @@ -388,19 +312,17 @@ def get_paths(self, path=None, # type: Optional[str] :param str path: Filters the results to return only paths under the specified path. - :param int max_results: - An optional value that specifies the maximum + :param int max_results: An optional value that specifies the maximum number of items to return per page. If omitted or greater than 5,000, the response will include up to 5,000 items per page. - :keyword upn: - Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. + :keyword upn: Optional. Valid only when Hierarchical Namespace is + enabled for the account. If "true", the user identity values returned + in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + transformed from Azure Active Directory Object IDs to User Principal + Names. If "false", the values will be returned as Azure Active + Directory Object IDs. The default value is false. Note that group and + application Object IDs are not translated because they do not have + unique friendly names. :type upn: bool :keyword int timeout: The timeout parameter is expressed in seconds. @@ -409,12 +331,12 @@ def get_paths(self, path=None, # type: Optional[str] .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START get_paths_in_file_system] - :end-before: [END get_paths_in_file_system] + .. literalinclude:: ../tests/test_blob_samples_containers.py + :start-after: [START list_blobs_in_container] + :end-before: [END list_blobs_in_container] :language: python - :dedent: 12 - :caption: List the blobs in the file system. + :dedent: 8 + :caption: List the blobs in the container. """ timeout = kwargs.pop('timeout', None) command = functools.partial( @@ -426,8 +348,8 @@ def get_paths(self, path=None, # type: Optional[str] command, recursive, path=path, max_results=max_results, page_iterator_class=PathPropertiesPaged, **kwargs) - @distributed_trace_async async def create_directory(self, directory, # type: Union[DirectoryProperties, str] + content_settings=None, # type: Optional[ContentSettings] metadata=None, # type: Optional[Dict[str, str]] **kwargs): # type: (...) -> DataLakeDirectoryClient @@ -438,30 +360,27 @@ async def create_directory(self, directory, # type: Union[DirectoryProperties, The directory with which to interact. This can either be the name of the directory, or an instance of DirectoryProperties. :type directory: str or ~azure.storage.filedatalake.DirectoryProperties + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. :param metadata: - Name-value pairs associated with the file as metadata. + Name-value pairs associated with the blob as metadata. :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object + :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease: + Required if the blob has an active lease. Value can be a DataLakeLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. + :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. When creating a file or directory and the parent folder does not have a default ACL, the umask restricts the permissions of the file or directory to be created. The resulting permission is given by p & ^u, where p is the permission and u is the umask. For example, if p is 0777 and u is 0057, then the resulting permission is 0720. The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. + :keyword str permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -482,21 +401,11 @@ async def create_directory(self, directory, # type: Union[DirectoryProperties, :keyword int timeout: The timeout parameter is expressed in seconds. :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_directory_from_file_system] - :end-before: [END create_directory_from_file_system] - :language: python - :dedent: 12 - :caption: Create directory in the file system. """ directory_client = self.get_directory_client(directory) - await directory_client.create_directory(metadata=metadata, **kwargs) + await directory_client.create_directory(content_settings=content_settings, metadata=metadata, **kwargs) return directory_client - @distributed_trace_async async def delete_directory(self, directory, # type: Union[DirectoryProperties, str] **kwargs): # type: (...) -> DataLakeDirectoryClient @@ -508,9 +417,9 @@ async def delete_directory(self, directory, # type: Union[DirectoryProperties, or an instance of DirectoryProperties. :type directory: str or ~azure.storage.filedatalake.DirectoryProperties :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object + Required if the blob has an active lease. Value can be a LeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :type lease: ~azure.storage.blob.LeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -531,21 +440,11 @@ async def delete_directory(self, directory, # type: Union[DirectoryProperties, :keyword int timeout: The timeout parameter is expressed in seconds. :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START delete_directory_from_file_system] - :end-before: [END delete_directory_from_file_system] - :language: python - :dedent: 12 - :caption: Delete directory in the file system. """ directory_client = self.get_directory_client(directory) await directory_client.delete_directory(**kwargs) return directory_client - @distributed_trace_async async def create_file(self, file, # type: Union[FileProperties, str] **kwargs): # type: (...) -> DataLakeFileClient @@ -559,27 +458,24 @@ async def create_file(self, file, # type: Union[FileProperties, str] :param ~azure.storage.filedatalake.ContentSettings content_settings: ContentSettings object used to set path properties. :param metadata: - Name-value pairs associated with the file as metadata. + Name-value pairs associated with the blob as metadata. :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object + :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease: + Required if the blob has an active lease. Value can be a DataLakeLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. + :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. When creating a file or directory and the parent folder does not have a default ACL, the umask restricts the permissions of the file or directory to be created. The resulting permission is given by p & ^u, where p is the permission and u is the umask. For example, if p is 0777 and u is 0057, then the resulting permission is 0720. The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. + :keyword str permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -600,22 +496,13 @@ async def create_file(self, file, # type: Union[FileProperties, str] :keyword int timeout: The timeout parameter is expressed in seconds. :return: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_file_from_file_system] - :end-before: [END create_file_from_file_system] - :language: python - :dedent: 12 - :caption: Create file in the file system. """ file_client = self.get_file_client(file) await file_client.create_file(**kwargs) return file_client - @distributed_trace_async async def delete_file(self, file, # type: Union[FileProperties, str] + lease=None, # type: Optional[Union[DataLakeLeaseClient, str]] **kwargs): # type: (...) -> DataLakeFileClient """ @@ -626,9 +513,9 @@ async def delete_file(self, file, # type: Union[FileProperties, str] or an instance of FileProperties. :type file: str or ~azure.storage.filedatalake.FileProperties :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object + Required if the blob has an active lease. Value can be a LeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :type lease: ~azure.storage.blob.LeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -649,27 +536,11 @@ async def delete_file(self, file, # type: Union[FileProperties, str] :keyword int timeout: The timeout parameter is expressed in seconds. :return: DataLakeFileClient - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START delete_file_from_file_system] - :end-before: [END delete_file_from_file_system] - :language: python - :dedent: 12 - :caption: Delete file in the file system. """ file_client = self.get_file_client(file) - await file_client.delete_file(**kwargs) + await file_client.delete_file(lease=lease, **kwargs) return file_client - def _get_root_directory_client(self): - # type: () -> DataLakeDirectoryClient - """Get a client to interact with the root directory. - - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient - """ - return self.get_directory_client('/') - def get_directory_client(self, directory # type: Union[DirectoryProperties, str] ): # type: (...) -> DataLakeDirectoryClient @@ -682,26 +553,21 @@ def get_directory_client(self, directory # type: Union[DirectoryProperties, str or an instance of DirectoryProperties. :type directory: str or ~azure.storage.filedatalake.DirectoryProperties :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient + :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_file_system_async.py + .. literalinclude:: ../samples/test_file_system_samples.py :start-after: [START get_directory_client_from_file_system] :end-before: [END get_directory_client_from_file_system] :language: python :dedent: 12 :caption: Getting the directory client to interact with a specific directory. """ - try: - directory_name = directory.name - except AttributeError: - directory_name = directory - - return DataLakeDirectoryClient(self.url, self.file_system_name, directory_name=directory_name, + return DataLakeDirectoryClient(self.url, self.file_system_name, directory_name=directory, credential=self._raw_credential, _configuration=self._config, _pipeline=self._pipeline, - _hosts=self._hosts, + _location_mode=self._location_mode, _hosts=self._hosts, require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function, @@ -720,11 +586,11 @@ def get_file_client(self, file_path # type: Union[FileProperties, str] or an instance of FileProperties. eg. directory/subdirectory/file :type file_path: str or ~azure.storage.filedatalake.FileProperties :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient + :rtype: ~azure.storage.filedatalake..DataLakeFileClient .. admonition:: Example: - .. literalinclude:: ../samples/datalake_samples_file_system_async.py + .. literalinclude:: ../samples/test_file_system_samples.py :start-after: [START get_file_client_from_file_system] :end-before: [END get_file_client_from_file_system] :language: python @@ -739,6 +605,6 @@ def get_file_client(self, file_path # type: Union[FileProperties, str] return DataLakeFileClient( self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, + _location_mode=self._location_mode, require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function, loop=self._loop) diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/_models.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_models.py similarity index 98% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/_models.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_models.py index ba143f6..39eeb29 100644 --- a/azure/multiapi/storagev2/filedatalake/v2018_11_09/aio/_models.py +++ b/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_models.py @@ -6,7 +6,7 @@ # pylint: disable=too-few-public-methods, too-many-instance-attributes # pylint: disable=super-init-not-called, too-many-lines from azure.core.async_paging import AsyncPageIterator -from azure.multiapi.storagev2.blob.v2019_07_07.aio._models import ContainerPropertiesPaged +from azure.storage.blob.aio._models import ContainerPropertiesPaged from .._deserialize import return_headers_and_deserialized_path_list, process_storage_error from .._generated.models import StorageErrorException, Path diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_path_client_async.py b/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_path_client_async.py similarity index 88% rename from azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_path_client_async.py rename to azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_path_client_async.py index 2fa9d3f..ab5a8e8 100644 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_path_client_async.py +++ b/azure/multiapi/storagev2/filedatalake/v2019_07_07/aio/_path_client_async.py @@ -3,8 +3,7 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -from azure.multiapi.storagev2.blob.v2019_12_12.aio import BlobClient +from azure.storage.blob.aio import BlobClient from .._shared.base_client_async import AsyncStorageAccountHostsMixin from .._path_client import PathClient as PathClientBase from .._models import DirectoryProperties @@ -40,18 +39,6 @@ def __init__( self._client = DataLakeStorageClient(self.url, file_system_name, path_name, pipeline=self._pipeline) self._loop = kwargs.get('loop', None) - async def __aexit__(self, *args): - await self._blob_client.close() - await super(PathClient, self).__aexit__(*args) - - async def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._blob_client.close() - await self.__aexit__() - async def _create(self, resource_type, content_settings=None, metadata=None, **kwargs): # type: (...) -> Dict[str, Union[str, datetime]] """ @@ -68,9 +55,9 @@ async def _create(self, resource_type, content_settings=None, metadata=None, **k Name-value pairs associated with the file/directory as metadata. :type metadata: dict(str, str) :keyword lease: - Required if the file/directory has an active lease. Value can be a DataLakeLeaseClient object + Required if the file/directory has an active lease. Value can be a LeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. When creating a file or directory and the parent folder does not have a default ACL, @@ -126,14 +113,14 @@ async def _delete(self, **kwargs): :keyword lease: Required if the file/directory has an active lease. Value can be a LeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: + :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :param ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: + :param ~datetime.datetime if_unmodified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. @@ -144,7 +131,7 @@ async def _delete(self, **kwargs): and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. - :keyword int timeout: + :param int timeout: The timeout parameter is expressed in seconds. :return: None """ @@ -189,7 +176,7 @@ async def set_access_control(self, owner=None, # type: Optional[str] :keyword lease: Required if the file/directory has an active lease. Value can be a LeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -236,7 +223,7 @@ async def get_access_control(self, upn=None, # type: Optional[bool] :keyword lease: Required if the file/directory has an active lease. Value can be a LeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -272,23 +259,37 @@ async def _rename_path(self, rename_source, :param rename_source: The value must have the following format: "/{filesystem}/{path}". :type rename_source: str - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword source_lease: A lease ID for the source path. If specified, + :param source_lease: A lease ID for the source path. If specified, the source path must have an active lease and the leaase ID must match. - :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword lease: + :type source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :param lease: Required if the file/directory has an active lease. Value can be a LeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: + :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :param str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :param permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :type permissions: str + :param ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: + :param ~datetime.datetime if_unmodified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. @@ -299,13 +300,13 @@ async def _rename_path(self, rename_source, and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions match_condition: The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: + :param ~datetime.datetime source_if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: + :param ~datetime.datetime source_if_unmodified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. If a date is passed in without timezone info, it is assumed to be UTC. @@ -316,8 +317,9 @@ async def _rename_path(self, rename_source, and act according to the condition specified by the `match_condition` parameter. :keyword ~azure.core.MatchConditions source_match_condition: The source match condition to use upon the etag. - :keyword int timeout: + :param int timeout: The timeout parameter is expressed in seconds. + :return: """ options = self._rename_path_options( rename_source, @@ -335,7 +337,7 @@ async def _get_path_properties(self, **kwargs): :keyword lease: Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -356,12 +358,21 @@ async def _get_path_properties(self, **kwargs): :keyword int timeout: The timeout parameter is expressed in seconds. :rtype: DirectoryProperties or FileProperties + + .. admonition:: Example: + + .. literalinclude:: ../tests/test_blob_samples_common.py + :start-after: [START get_blob_properties] + :end-before: [END get_blob_properties] + :language: python + :dedent: 8 + :caption: Getting the properties for a file/directory. """ path_properties = await self._blob_client.get_blob_properties(**kwargs) path_properties.__class__ = DirectoryProperties return path_properties - async def set_metadata(self, metadata, # type: Dict[str, str] + async def set_metadata(self, metadata=None, # type: Optional[Dict[str, str]] **kwargs): # type: (...) -> Dict[str, Union[str, datetime]] """Sets one or more user-defined name-value pairs for the specified @@ -373,10 +384,9 @@ async def set_metadata(self, metadata, # type: Dict[str, str] A dict containing name-value pairs to associate with the file system as metadata. Example: {'category':'test'} :type metadata: dict[str, str] - :keyword lease: + :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: If specified, set_file_system_metadata only succeeds if the file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -397,6 +407,15 @@ async def set_metadata(self, metadata, # type: Dict[str, str] :keyword int timeout: The timeout parameter is expressed in seconds. :returns: file system-updated property dict (Etag and last modified). + + .. admonition:: Example: + + .. literalinclude:: ../samples/test_file_system_samples.py + :start-after: [START set_file_system_metadata] + :end-before: [END set_file_system_metadata] + :language: python + :dedent: 12 + :caption: Setting metadata on the container. """ return await self._blob_client.set_blob_metadata(metadata=metadata, **kwargs) @@ -409,10 +428,9 @@ async def set_http_headers(self, content_settings=None, # type: Optional[Conten :param ~azure.storage.filedatalake.ContentSettings content_settings: ContentSettings object used to set file/directory properties. - :keyword lease: + :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: If specified, set_file_system_metadata only succeeds if the file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str :keyword ~datetime.datetime if_modified_since: A DateTime value. Azure expects the date value passed in to be UTC. If timezone is included, any non-UTC datetimes will be converted to UTC. @@ -474,7 +492,7 @@ async def acquire_lease(self, lease_duration=-1, # type: Optional[int] :keyword int timeout: The timeout parameter is expressed in seconds. :returns: A DataLakeLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.filedatalake.aio.DataLakeLeaseClient + :rtype: ~azure.storage.filedatalake.DataLakeLeaseClient .. admonition:: Example: diff --git a/azure/multiapi/storagev2/filedatalake/v2019_07_07/mypy.ini b/azure/multiapi/storagev2/filedatalake/v2019_07_07/mypy.ini new file mode 100644 index 0000000..a3cb6d2 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2019_07_07/mypy.ini @@ -0,0 +1,14 @@ +[mypy] +python_version = 3.6 +warn_return_any = True +warn_unused_configs = True +ignore_missing_imports = True + +# Per-module options: + +[mypy-azure.storage.filedatalake._generated.*] +ignore_errors = True + +[mypy-azure.core.*] +ignore_errors = True + diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/__init__.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/__init__.py deleted file mode 100644 index a86368c..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/__init__.py +++ /dev/null @@ -1,79 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ._download import StorageStreamDownloader -from ._data_lake_file_client import DataLakeFileClient -from ._data_lake_directory_client import DataLakeDirectoryClient -from ._file_system_client import FileSystemClient -from ._data_lake_service_client import DataLakeServiceClient -from ._data_lake_lease import DataLakeLeaseClient -from ._models import ( - LocationMode, - ResourceTypes, - FileSystemProperties, - FileSystemPropertiesPaged, - DirectoryProperties, - FileProperties, - PathProperties, - PathPropertiesPaged, - LeaseProperties, - ContentSettings, - AccountSasPermissions, - FileSystemSasPermissions, - DirectorySasPermissions, - FileSasPermissions, - UserDelegationKey, - PublicAccess, - AccessPolicy, - DelimitedTextDialect, - DelimitedJsonDialect, - DataLakeFileQueryError -) -from ._shared_access_signature import generate_account_sas, generate_file_system_sas, generate_directory_sas, \ - generate_file_sas - -from ._shared.policies import ExponentialRetry, LinearRetry -from ._shared.models import StorageErrorCode -from ._version import VERSION - -__version__ = VERSION - -__all__ = [ - 'DataLakeServiceClient', - 'FileSystemClient', - 'DataLakeFileClient', - 'DataLakeDirectoryClient', - 'DataLakeLeaseClient', - 'ExponentialRetry', - 'LinearRetry', - 'LocationMode', - 'PublicAccess', - 'AccessPolicy', - 'ResourceTypes', - 'StorageErrorCode', - 'UserDelegationKey', - 'FileSystemProperties', - 'FileSystemPropertiesPaged', - 'DirectoryProperties', - 'FileProperties', - 'PathProperties', - 'PathPropertiesPaged', - 'LeaseProperties', - 'ContentSettings', - 'AccountSasPermissions', - 'FileSystemSasPermissions', - 'DirectorySasPermissions', - 'FileSasPermissions', - 'generate_account_sas', - 'generate_file_system_sas', - 'generate_directory_sas', - 'generate_file_sas', - 'VERSION', - 'StorageStreamDownloader', - 'DelimitedTextDialect', - 'DelimitedJsonDialect', - 'DataLakeFileQueryError' -] diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_data_lake_directory_client.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_data_lake_directory_client.py deleted file mode 100644 index 90c525b..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_data_lake_directory_client.py +++ /dev/null @@ -1,523 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from ._shared.base_client import parse_connection_str -from ._data_lake_file_client import DataLakeFileClient -from ._models import DirectoryProperties -from ._path_client import PathClient - - -class DataLakeDirectoryClient(PathClient): - """A client to interact with the DataLake directory, even if the directory may not yet exist. - - For operations relating to a specific subdirectory or file under the directory, a directory client or file client - can be retrieved using the :func:`~get_sub_directory_client` or :func:`~get_file_client` functions. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param directory_name: - The whole path of the directory. eg. {directory under file system}/{directory to interact with} - :type directory_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, and account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_instantiate_client.py - :start-after: [START instantiate_directory_client_from_conn_str] - :end-before: [END instantiate_directory_client_from_conn_str] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. - """ - def __init__( - self, account_url, # type: str - file_system_name, # type: str - directory_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - super(DataLakeDirectoryClient, self).__init__(account_url, file_system_name, path_name=directory_name, - credential=credential, **kwargs) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - file_system_name, # type: str - directory_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> DataLakeDirectoryClient - """ - Create DataLakeDirectoryClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param file_system_name: - The name of file system to interact with. - :type file_system_name: str - :param directory_name: - The name of directory to interact with. The directory is under file system. - :type directory_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, and account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :return a DataLakeDirectoryClient - :rtype ~azure.storage.filedatalake.DataLakeDirectoryClient - """ - account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') - return cls( - account_url, file_system_name=file_system_name, directory_name=directory_name, - credential=credential, **kwargs) - - def create_directory(self, metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create a new directory. - - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: response dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory.py - :start-after: [START create_directory] - :end-before: [END create_directory] - :language: python - :dedent: 8 - :caption: Create directory. - """ - return self._create('directory', metadata=metadata, **kwargs) - - def delete_directory(self, **kwargs): - # type: (...) -> None - """ - Marks the specified directory for deletion. - - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory.py - :start-after: [START delete_directory] - :end-before: [END delete_directory] - :language: python - :dedent: 4 - :caption: Delete directory. - """ - return self._delete(**kwargs) - - def get_directory_properties(self, **kwargs): - # type: (**Any) -> DirectoryProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the directory. It does not return the content of the directory. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: DirectoryProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory.py - :start-after: [START get_directory_properties] - :end-before: [END get_directory_properties] - :language: python - :dedent: 4 - :caption: Getting the properties for a file/directory. - """ - blob_properties = self._get_path_properties(**kwargs) - return DirectoryProperties._from_blob_properties(blob_properties) # pylint: disable=protected-access - - def rename_directory(self, new_name, # type: str - **kwargs): - # type: (**Any) -> DataLakeDirectoryClient - """ - Rename the source directory. - - :param str new_name: - the new directory name the user want to rename to. - The value must have the following format: "{filesystem}/{directory}/{subdirectory}". - :keyword source_lease: - A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory.py - :start-after: [START rename_directory] - :end-before: [END rename_directory] - :language: python - :dedent: 4 - :caption: Rename the source directory. - """ - new_name = new_name.strip('/') - new_file_system = new_name.split('/')[0] - path = new_name[len(new_file_system):] - - new_directory_client = DataLakeDirectoryClient( - self.url, new_file_system, directory_name=path, credential=self._raw_credential, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - new_directory_client._rename_path('/'+self.file_system_name+'/'+self.path_name, # pylint: disable=protected-access - **kwargs) - return new_directory_client - - def create_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Create a subdirectory and return the subdirectory client to be interacted with. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient for the subdirectory. - """ - subdir = self.get_sub_directory_client(sub_directory) - subdir.create_directory(metadata=metadata, **kwargs) - return subdir - - def delete_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Marks the specified subdirectory for deletion. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient for the subdirectory - """ - subdir = self.get_sub_directory_client(sub_directory) - subdir.delete_directory(**kwargs) - return subdir - - def create_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Create a new file and return the file client to be interacted with. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - """ - file_client = self.get_file_client(file) - file_client.create_file(**kwargs) - return file_client - - def get_file_client(self, file # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. eg. directory/subdirectory/file - :type file: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake..DataLakeFileClient - """ - try: - file_path = file.name - except AttributeError: - file_path = self.path_name + '/' + file - - return DataLakeFileClient( - self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_sub_directory_client(self, sub_directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified subdirectory of the current directory. - - The sub subdirectory need not already exist. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient - """ - try: - subdir_path = sub_directory.name - except AttributeError: - subdir_path = self.path_name + '/' + sub_directory - - return DataLakeDirectoryClient( - self.url, self.file_system_name, directory_name=subdir_path, credential=self._raw_credential, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_data_lake_file_client.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_data_lake_file_client.py deleted file mode 100644 index db076b8..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_data_lake_file_client.py +++ /dev/null @@ -1,708 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from io import BytesIO -import six - -from ._quick_query_helper import DataLakeFileQueryReader -from ._shared.base_client import parse_connection_str -from ._shared.request_handlers import get_length, read_length -from ._shared.response_handlers import return_response_headers -from ._shared.uploads import IterStreamer -from ._upload_helper import upload_datalake_file -from ._generated.models import StorageErrorException -from ._download import StorageStreamDownloader -from ._path_client import PathClient -from ._serialize import get_mod_conditions, get_path_http_headers, get_access_conditions, add_metadata_headers -from ._deserialize import process_storage_error -from ._models import FileProperties, DataLakeFileQueryError - - -class DataLakeFileClient(PathClient): - """A client to interact with the DataLake file, even if the file may not yet exist. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param file_path: - The whole file path, so that to interact with a specific file. - eg. "{directory}/{subdirectory}/{file}" - :type file_path: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, and account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_instantiate_client.py - :start-after: [START instantiate_file_client_from_conn_str] - :end-before: [END instantiate_file_client_from_conn_str] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. - """ - def __init__( - self, account_url, # type: str - file_system_name, # type: str - file_path, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - super(DataLakeFileClient, self).__init__(account_url, file_system_name, path_name=file_path, - credential=credential, **kwargs) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - file_system_name, # type: str - file_path, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> DataLakeFileClient - """ - Create DataLakeFileClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param file_system_name: The name of file system to interact with. - :type file_system_name: str - :param directory_name: The name of directory to interact with. The directory is under file system. - :type directory_name: str - :param file_name: The name of file to interact with. The file is under directory. - :type file_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, and account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :return a DataLakeFileClient - :rtype ~azure.storage.filedatalake.DataLakeFileClient - """ - account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') - return cls( - account_url, file_system_name=file_system_name, file_path=file_path, - credential=credential, **kwargs) - - def create_file(self, content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create a new file. - - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: response dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START create_file] - :end-before: [END create_file] - :language: python - :dedent: 4 - :caption: Create file. - """ - return self._create('file', content_settings=content_settings, metadata=metadata, **kwargs) - - def delete_file(self, **kwargs): - # type: (...) -> None - """ - Marks the specified file for deletion. - - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START delete_file] - :end-before: [END delete_file] - :language: python - :dedent: 4 - :caption: Delete file. - """ - return self._delete(**kwargs) - - def get_file_properties(self, **kwargs): - # type: (**Any) -> FileProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file. It does not return the content of the file. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: FileProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START get_file_properties] - :end-before: [END get_file_properties] - :language: python - :dedent: 4 - :caption: Getting the properties for a file. - """ - blob_properties = self._get_path_properties(**kwargs) - return FileProperties._from_blob_properties(blob_properties) # pylint: disable=protected-access - - def _upload_options( # pylint:disable=too-many-statements - self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - - encoding = kwargs.pop('encoding', 'UTF-8') - if isinstance(data, six.text_type): - data = data.encode(encoding) # type: ignore - if length is None: - length = get_length(data) - if isinstance(data, bytes): - data = data[:length] - - if isinstance(data, bytes): - stream = BytesIO(data) - elif hasattr(data, 'read'): - stream = data - elif hasattr(data, '__iter__'): - stream = IterStreamer(data, encoding=encoding) - else: - raise TypeError("Unsupported data type: {}".format(type(data))) - - validate_content = kwargs.pop('validate_content', False) - content_settings = kwargs.pop('content_settings', None) - metadata = kwargs.pop('metadata', None) - max_concurrency = kwargs.pop('max_concurrency', 1) - - kwargs['properties'] = add_metadata_headers(metadata) - kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None)) - kwargs['modified_access_conditions'] = get_mod_conditions(kwargs) - - if content_settings: - kwargs['path_http_headers'] = get_path_http_headers(content_settings) - - kwargs['stream'] = stream - kwargs['length'] = length - kwargs['validate_content'] = validate_content - kwargs['max_concurrency'] = max_concurrency - kwargs['client'] = self._client.path - - return kwargs - - def upload_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - overwrite=False, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Any] - """ - Upload data to a file. - - :param data: Content to be uploaded to file - :param int length: Size of the data in bytes. - :param bool overwrite: to overwrite an existing file or not. - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword metadata: - Name-value pairs associated with the blob as metadata. - :paramtype metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease: - Required if the blob has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword int chunk_size: - The maximum chunk size for uploading a file in chunks. - Defaults to 100*1024*1024, or 100MB. - :return: response dict (Etag and last modified). - """ - options = self._upload_options( - data, - length=length, - overwrite=overwrite, - **kwargs) - return upload_datalake_file(**options) - - @staticmethod - def _append_data_options(data, offset, length=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] - - if isinstance(data, six.text_type): - data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore - if length is None: - length = get_length(data) - if length is None: - length, data = read_length(data) - if isinstance(data, bytes): - data = data[:length] - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - - options = { - 'body': data, - 'position': offset, - 'content_length': length, - 'lease_access_conditions': access_conditions, - 'validate_content': kwargs.pop('validate_content', False), - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - def append_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - offset, # type: int - length=None, # type: Optional[int] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """Append data to the file. - - :param data: Content to be appended to file - :param offset: start position of the data to be appended to. - :param length: Size of the data in bytes. - :keyword bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - file. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :return: dict of the response header - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START append_data] - :end-before: [END append_data] - :language: python - :dedent: 4 - :caption: Append data to the file. - """ - options = self._append_data_options( - data, - offset, - length=length, - **kwargs) - try: - return self._client.path.append_data(**options) - except StorageErrorException as error: - process_storage_error(error) - - @staticmethod - def _flush_data_options(offset, content_settings=None, retain_uncommitted_data=False, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_mod_conditions(kwargs) - - path_http_headers = None - if content_settings: - path_http_headers = get_path_http_headers(content_settings) - - options = { - 'position': offset, - 'content_length': 0, - 'path_http_headers': path_http_headers, - 'retain_uncommitted_data': retain_uncommitted_data, - 'close': kwargs.pop('close', False), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - def flush_data(self, offset, # type: int - retain_uncommitted_data=False, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ Commit the previous appended data. - - :param offset: offset is equal to the length of the file after commit the - previous appended data. - :param bool retain_uncommitted_data: Valid only for flush operations. If - "true", uncommitted data is retained after the flush operation - completes; otherwise, the uncommitted data is deleted after the flush - operation. The default is false. Data at offsets less than the - specified position are written to the file when flush succeeds, but - this optional parameter allows data after the flush position to be - retained for a future flush operation. - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword bool close: Azure Storage Events allow applications to receive - notifications when files change. When Azure Storage Events are - enabled, a file changed event is raised. This event has a property - indicating whether this is the final change to distinguish the - difference between an intermediate flush to a file stream and the - final close of a file stream. The close query parameter is valid only - when the action is "flush" and change notifications are enabled. If - the value of close is "true" and the flush operation completes - successfully, the service raises a file change notification with a - property indicating that this is the final update (the file stream has - been closed). If "false" a change notification is raised indicating - the file has changed. The default is false. This query parameter is - set to true by the Hadoop ABFS driver to indicate that the file stream - has been closed." - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :return: response header in dict - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START upload_file_to_file_system] - :end-before: [END upload_file_to_file_system] - :language: python - :dedent: 8 - :caption: Commit the previous appended data. - """ - options = self._flush_data_options( - offset, - retain_uncommitted_data=retain_uncommitted_data, **kwargs) - try: - return self._client.path.flush_data(**options) - except StorageErrorException as error: - process_storage_error(error) - - def download_file(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader - """Downloads a file to the StorageStreamDownloader. The readall() method must - be used to read all the content, or readinto() must be used to download the file into - a stream. - - :param int offset: - Start of byte range to use for downloading a section of the file. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword lease: - If specified, download only succeeds if the file's lease is active - and matches this ID. Required if the file has an active lease. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.filedatalake.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START read_file] - :end-before: [END read_file] - :language: python - :dedent: 4 - :caption: Return the downloaded data. - """ - downloader = self._blob_client.download_blob(offset=offset, length=length, **kwargs) - return StorageStreamDownloader(downloader) - - def rename_file(self, new_name, # type: str - **kwargs): - # type: (**Any) -> DataLakeFileClient - """ - Rename the source file. - - :param str new_name: the new file name the user want to rename to. - The value must have the following format: "{filesystem}/{directory}/{subdirectory}/{file}". - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword source_lease: A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: the renamed file client - :rtype: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START rename_file] - :end-before: [END rename_file] - :language: python - :dedent: 4 - :caption: Rename the source file. - """ - new_name = new_name.strip('/') - new_file_system = new_name.split('/')[0] - path = new_name[len(new_file_system):] - - new_directory_client = DataLakeFileClient( - self.url, new_file_system, file_path=path, credential=self._raw_credential, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - new_directory_client._rename_path('/'+self.file_system_name+'/'+self.path_name, # pylint: disable=protected-access - **kwargs) - return new_directory_client - - def query_file(self, query_expression, **kwargs): - # type: (str, **Any) -> DataLakeFileQueryReader - """Enables users to select/project on datalake file data by providing simple query expressions. - This operations returns a DataLakeFileQueryReader, users need to use readall() or readinto() to get query data. - - :param str query_expression: - Required. a query statement. - eg. Select * from DataLakeStorage - :keyword Callable[Exception] on_error: - A function to be called on any processing errors returned by the service. - :keyword file_format: - Optional. Defines the serialization of the data currently stored in the file. The default is to - treat the file data as CSV data formatted in the default dialect. This can be overridden with - a custom DelimitedTextDialect, or alternatively a DelimitedJsonDialect. - :paramtype file_format: - ~azure.storage.filedatalake.DelimitedTextDialect or ~azure.storage.filedatalake.DelimitedJsonDialect - :keyword output_format: - Optional. Defines the output serialization for the data stream. By default the data will be returned - as it is represented in the file. By providing an output format, the file data will be reformatted - according to that profile. This value can be a DelimitedTextDialect or a DelimitedJsonDialect. - :paramtype output_format: - ~azure.storage.filedatalake.DelimitedTextDialect or ~azure.storage.filedatalake.DelimitedJsonDialect - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A streaming object (DataLakeFileQueryReader) - :rtype: ~azure.storage.filedatalake.DataLakeFileQueryReader - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_query.py - :start-after: [START query] - :end-before: [END query] - :language: python - :dedent: 4 - :caption: select/project on datalake file data by providing simple query expressions. - """ - query_expression = query_expression.replace("from DataLakeStorage", "from BlobStorage") - blob_quick_query_reader = self._blob_client.query_blob(query_expression, - blob_format=kwargs.pop('file_format', None), - error_cls=DataLakeFileQueryError, - **kwargs) - return DataLakeFileQueryReader(blob_quick_query_reader) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_data_lake_lease.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_data_lake_lease.py deleted file mode 100644 index d896ccb..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_data_lake_lease.py +++ /dev/null @@ -1,245 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import uuid - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, - TypeVar, TYPE_CHECKING -) -from azure.multiapi.storagev2.blob.v2019_12_12 import BlobLeaseClient - - -if TYPE_CHECKING: - from datetime import datetime - FileSystemClient = TypeVar("FileSystemClient") - DataLakeDirectoryClient = TypeVar("DataLakeDirectoryClient") - DataLakeFileClient = TypeVar("DataLakeFileClient") - - -class DataLakeLeaseClient(object): - """Creates a new DataLakeLeaseClient. - - This client provides lease operations on a FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the file system, directory, or file to lease. - :type client: ~azure.storage.filedatalake.FileSystemClient or - ~azure.storage.filedatalake.DataLakeDirectoryClient or ~azure.storage.filedatalake.DataLakeFileClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - def __init__( - self, client, lease_id=None - ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs - # type: (Union[FileSystemClient, DataLakeDirectoryClient, DataLakeFileClient], Optional[str]) -> None - self.id = lease_id or str(uuid.uuid4()) - self.last_modified = None - self.etag = None - - if hasattr(client, '_blob_client'): - _client = client._blob_client # type: ignore # pylint: disable=protected-access - elif hasattr(client, '_container_client'): - _client = client._container_client # type: ignore # pylint: disable=protected-access - else: - raise TypeError("Lease must use any of FileSystemClient DataLakeDirectoryClient, or DataLakeFileClient.") - - self._blob_lease_client = BlobLeaseClient(_client, lease_id=lease_id) - - def __enter__(self): - return self - - def __exit__(self, *args): - self.release() - - def acquire(self, lease_duration=-1, **kwargs): - # type: (int, Optional[int], **Any) -> None - """Requests a new lease. - - If the file/file system does not have an active lease, the DataLake service creates a - lease on the file/file system and returns a new lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - self._blob_lease_client.acquire(lease_duration=lease_duration, **kwargs) - self._update_lease_client_attributes() - - def renew(self, **kwargs): - # type: (Any) -> None - """Renews the lease. - - The lease can be renewed if the lease ID specified in the - lease client matches that associated with the file system or file. Note that - the lease may be renewed even if it has expired as long as the file system - or file has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - self._blob_lease_client.renew(**kwargs) - self._update_lease_client_attributes() - - def release(self, **kwargs): - # type: (Any) -> None - """Release the lease. - - The lease may be released if the client lease id specified matches - that associated with the file system or file. Releasing the lease allows another client - to immediately acquire the lease for the file system or file as soon as the release is complete. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - self._blob_lease_client.release(**kwargs) - self._update_lease_client_attributes() - - def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """Change the lease ID of an active lease. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - self._blob_lease_client.change(proposed_lease_id=proposed_lease_id, **kwargs) - self._update_lease_client_attributes() - - def break_lease(self, lease_break_period=None, **kwargs): - # type: (Optional[int], Any) -> int - """Break the lease, if the file system or file has an active lease. - - Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. When a lease - is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the file system or file. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :param int lease_break_period: - This is the proposed duration of seconds that the lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the lease. If longer, the time remaining on the lease is used. - A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - self._blob_lease_client.break_lease(lease_break_period=lease_break_period, **kwargs) - - def _update_lease_client_attributes(self): - self.id = self._blob_lease_client.id # type: str - self.last_modified = self._blob_lease_client.last_modified # type: datetime - self.etag = self._blob_lease_client.etag # type: str diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_data_lake_service_client.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_data_lake_service_client.py deleted file mode 100644 index acb8ca1..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_data_lake_service_client.py +++ /dev/null @@ -1,421 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -try: - from urllib.parse import urlparse -except ImportError: - from urlparse import urlparse # type: ignore - -from azure.core.paging import ItemPaged - -from azure.multiapi.storagev2.blob.v2019_12_12 import BlobServiceClient -from ._shared.base_client import StorageAccountHostsMixin, parse_query, parse_connection_str -from ._file_system_client import FileSystemClient -from ._data_lake_directory_client import DataLakeDirectoryClient -from ._data_lake_file_client import DataLakeFileClient -from ._models import UserDelegationKey, FileSystemPropertiesPaged, LocationMode -from ._serialize import convert_dfs_url_to_blob_url - - -class DataLakeServiceClient(StorageAccountHostsMixin): - """A client to interact with the DataLake Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete file systems within the account. - For operations relating to a specific file system, directory or file, clients for those entities - can also be retrieved using the `get_client` functions. - - :ivar str url: - The full endpoint URL to the datalake service endpoint. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URL to the DataLake storage account. Any other entities included - in the URL path (e.g. file system or file) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, and account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START create_datalake_service_client] - :end-before: [END create_datalake_service_client] - :language: python - :dedent: 8 - :caption: Creating the DataLakeServiceClient from connection string. - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START create_datalake_service_client_oauth] - :end-before: [END create_datalake_service_client_oauth] - :language: python - :dedent: 8 - :caption: Creating the DataLakeServiceClient with Azure Identity credentials. - """ - - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - blob_account_url = convert_dfs_url_to_blob_url(account_url) - self._blob_account_url = blob_account_url - self._blob_service_client = BlobServiceClient(blob_account_url, credential, **kwargs) - self._blob_service_client._hosts[LocationMode.SECONDARY] = "" #pylint: disable=protected-access - - _, sas_token = parse_query(parsed_url.query) - self._query_str, self._raw_credential = self._format_query_string(sas_token, credential) - - super(DataLakeServiceClient, self).__init__(parsed_url, service='dfs', - credential=self._raw_credential, **kwargs) - # ADLS doesn't support secondary endpoint, make sure it's empty - self._hosts[LocationMode.SECONDARY] = "" - - def __exit__(self, *args): - self._blob_service_client.close() - super(DataLakeServiceClient, self).__exit__(*args) - - def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._blob_service_client.close() - self.__exit__() - - def _format_url(self, hostname): - """Format the endpoint URL according to hostname - """ - formated_url = "{}://{}/{}".format(self.scheme, hostname, self._query_str) - return formated_url - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> DataLakeServiceClient - """ - Create DataLakeServiceClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, and account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :return a DataLakeServiceClient - :rtype ~azure.storage.filedatalake.DataLakeServiceClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_data_lake_service_client_from_conn_str] - :end-before: [END create_data_lake_service_client_from_conn_str] - :language: python - :dedent: 8 - :caption: Creating the DataLakeServiceClient from a connection string. - """ - account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') - return cls(account_url, credential=credential, **kwargs) - - def get_user_delegation_key(self, key_start_time, # type: datetime - key_expiry_time, # type: datetime - **kwargs # type: Any - ): - # type: (...) -> UserDelegationKey - """ - Obtain a user delegation key for the purpose of signing SAS tokens. - A token credential must be present on the service object for this request to succeed. - - :param ~datetime.datetime key_start_time: - A DateTime value. Indicates when the key becomes valid. - :param ~datetime.datetime key_expiry_time: - A DateTime value. Indicates when the key stops being valid. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The user delegation key. - :rtype: ~azure.storage.filedatalake.UserDelegationKey - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START get_user_delegation_key] - :end-before: [END get_user_delegation_key] - :language: python - :dedent: 8 - :caption: Get user delegation key from datalake service client. - """ - delegation_key = self._blob_service_client.get_user_delegation_key(key_start_time=key_start_time, - key_expiry_time=key_expiry_time, - **kwargs) # pylint: disable=protected-access - return UserDelegationKey._from_generated(delegation_key) # pylint: disable=protected-access - - def list_file_systems(self, name_starts_with=None, # type: Optional[str] - include_metadata=None, # type: Optional[bool] - **kwargs): - # type: (...) -> ItemPaged[FileSystemProperties] - """Returns a generator to list the file systems under the specified account. - - The generator will lazily follow the continuation tokens returned by - the service and stop when all file systems have been returned. - - :param str name_starts_with: - Filters the results to return only file systems whose names - begin with the specified prefix. - :param bool include_metadata: - Specifies that file system metadata be returned in the response. - The default value is `False`. - :keyword int results_per_page: - The maximum number of file system names to retrieve per API - call. If the request does not specify the server will return up to 5,000 items per page. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) of FileSystemProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.FileSystemProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START list_file_systems] - :end-before: [END list_file_systems] - :language: python - :dedent: 8 - :caption: Listing the file systems in the datalake service. - """ - item_paged = self._blob_service_client.list_containers(name_starts_with=name_starts_with, - include_metadata=include_metadata, - **kwargs) # pylint: disable=protected-access - item_paged._page_iterator_class = FileSystemPropertiesPaged # pylint: disable=protected-access - return item_paged - - def create_file_system(self, file_system, # type: Union[FileSystemProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[PublicAccess] - **kwargs): - # type: (...) -> FileSystemClient - """Creates a new file system under the specified account. - - If the file system with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created file system. - - :param str file_system: - The name of the file system to create. - :param metadata: - A dict with name-value pairs to associate with the - file system as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - Possible values include: file system, file. - :type public_access: ~azure.storage.filedatalake.PublicAccess - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START create_file_system_from_service_client] - :end-before: [END create_file_system_from_service_client] - :language: python - :dedent: 8 - :caption: Creating a file system in the datalake service. - """ - file_system_client = self.get_file_system_client(file_system) - file_system_client.create_file_system(metadata=metadata, public_access=public_access, **kwargs) - return file_system_client - - def delete_file_system(self, file_system, # type: Union[FileSystemProperties, str] - **kwargs): - # type: (...) -> FileSystemClient - """Marks the specified file system for deletion. - - The file system and any files contained within it are later deleted during garbage collection. - If the file system is not found, a ResourceNotFoundError will be raised. - - :param file_system: - The file system to delete. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :keyword lease: - If specified, delete_file_system only succeeds if the - file system's lease is active and matches this ID. - Required if the file system has an active lease. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START delete_file_system_from_service_client] - :end-before: [END delete_file_system_from_service_client] - :language: python - :dedent: 8 - :caption: Deleting a file system in the datalake service. - """ - file_system_client = self.get_file_system_client(file_system) - file_system_client.delete_file_system(**kwargs) - return file_system_client - - def get_file_system_client(self, file_system # type: Union[FileSystemProperties, str] - ): - # type: (...) -> FileSystemClient - """Get a client to interact with the specified file system. - - The file system need not already exist. - - :param file_system: - The file system. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :returns: A FileSystemClient. - :rtype: ~azure.storage.filedatalake.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_file_system_client_from_service] - :end-before: [END create_file_system_client_from_service] - :language: python - :dedent: 8 - :caption: Getting the file system client to interact with a specific file system. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - - return FileSystemClient(self.url, file_system_name, credential=self._raw_credential, - _configuration=self._config, - _pipeline=self._pipeline, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_directory_client(self, file_system, # type: Union[FileSystemProperties, str] - directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified directory. - - The directory need not already exist. - - :param file_system: - The file system that the directory is in. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START get_directory_client_from_service_client] - :end-before: [END get_directory_client_from_service_client] - :language: python - :dedent: 8 - :caption: Getting the directory client to interact with a specific directory. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - try: - directory_name = directory.name - except AttributeError: - directory_name = directory - return DataLakeDirectoryClient(self.url, file_system_name, directory_name=directory_name, - credential=self._raw_credential, - _configuration=self._config, _pipeline=self._pipeline, - _hosts=self._hosts, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function - ) - - def get_file_client(self, file_system, # type: Union[FileSystemProperties, str] - file_path # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file_system: - The file system that the file is in. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :param file_path: - The file with which to interact. This can either be the full path of the file(from the root directory), - or an instance of FileProperties. eg. directory/subdirectory/file - :type file_path: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake..DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START get_file_client_from_service_client] - :end-before: [END get_file_client_from_service_client] - :language: python - :dedent: 8 - :caption: Getting the file client to interact with a specific file. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - try: - file_path = file_path.name - except AttributeError: - pass - - return DataLakeFileClient( - self.url, file_system_name, file_path=file_path, credential=self._raw_credential, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_deserialize.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_deserialize.py deleted file mode 100644 index 9d0881a..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_deserialize.py +++ /dev/null @@ -1,106 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import logging -from typing import ( # pylint: disable=unused-import - TYPE_CHECKING -) - -from azure.core.pipeline.policies import ContentDecodePolicy -from azure.core.exceptions import HttpResponseError, DecodeError, ResourceModifiedError, ClientAuthenticationError, \ - ResourceNotFoundError, ResourceExistsError -from ._shared.models import StorageErrorCode - -if TYPE_CHECKING: - pass - -_LOGGER = logging.getLogger(__name__) - - -def normalize_headers(headers): - normalized = {} - for key, value in headers.items(): - if key.startswith('x-ms-'): - key = key[5:] - normalized[key.lower().replace('-', '_')] = value - return normalized - - -def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument - raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} - return {k[10:]: v for k, v in raw_metadata.items()} - - -def return_headers_and_deserialized_path_list(response, deserialized, response_headers): # pylint: disable=unused-argument - return deserialized.paths if deserialized.paths else {}, normalize_headers(response_headers) - - -def process_storage_error(storage_error): - raise_error = HttpResponseError - error_code = storage_error.response.headers.get('x-ms-error-code') - error_message = storage_error.message - additional_data = {} - try: - error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) - if error_body: - for info in error_body: - if info == 'code': - error_code = error_body[info] - elif info == 'message': - error_message = error_body[info] - else: - additional_data[info] = error_body[info] - except DecodeError: - pass - - try: - if error_code: - error_code = StorageErrorCode(error_code) - if error_code in [StorageErrorCode.condition_not_met]: - raise_error = ResourceModifiedError - if error_code in [StorageErrorCode.invalid_authentication_info, - StorageErrorCode.authentication_failed]: - raise_error = ClientAuthenticationError - if error_code in [StorageErrorCode.resource_not_found, - StorageErrorCode.invalid_property_name, - StorageErrorCode.invalid_source_uri, - StorageErrorCode.source_path_not_found, - StorageErrorCode.lease_name_mismatch, - StorageErrorCode.file_system_not_found, - StorageErrorCode.path_not_found, - StorageErrorCode.parent_not_found, - StorageErrorCode.invalid_destination_path, - StorageErrorCode.invalid_rename_source_path, - StorageErrorCode.lease_is_already_broken, - StorageErrorCode.invalid_source_or_destination_resource_type, - StorageErrorCode.rename_destination_parent_path_not_found]: - raise_error = ResourceNotFoundError - if error_code in [StorageErrorCode.account_already_exists, - StorageErrorCode.account_being_created, - StorageErrorCode.resource_already_exists, - StorageErrorCode.resource_type_mismatch, - StorageErrorCode.source_path_is_being_deleted, - StorageErrorCode.path_already_exists, - StorageErrorCode.destination_path_is_being_deleted, - StorageErrorCode.file_system_already_exists, - StorageErrorCode.file_system_being_deleted, - StorageErrorCode.path_conflict]: - raise_error = ResourceExistsError - except ValueError: - # Got an unknown error code - pass - - try: - error_message += "\nErrorCode:{}".format(error_code.value) - except AttributeError: - error_message += "\nErrorCode:{}".format(error_code) - for name, info in additional_data.items(): - error_message += "\n{}:{}".format(name, info) - - error = raise_error(message=error_message, response=storage_error.response) - error.error_code = error_code - error.additional_info = additional_data - raise error diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_download.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_download.py deleted file mode 100644 index 181b503..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_download.py +++ /dev/null @@ -1,53 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ._models import FileProperties - - -class StorageStreamDownloader(object): - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the file being downloaded. - :ivar ~azure.storage.filedatalake.FileProperties properties: - The properties of the file being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the file. - """ - - def __init__(self, downloader): - self._downloader = downloader - self.name = self._downloader.name - self.properties = FileProperties._from_blob_properties(self._downloader.properties) # pylint: disable=protected-access - self.size = self._downloader.size - - def __len__(self): - return self.size - - def chunks(self): - return self._downloader.chunks() - - def readall(self): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - return self._downloader.readall() - - def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - return self._downloader.readinto(stream) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_file_system_client.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_file_system_client.py deleted file mode 100644 index 51e6cbd..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_file_system_client.py +++ /dev/null @@ -1,782 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import functools - -try: - from urllib.parse import urlparse, quote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote # type: ignore - -import six -from azure.core.paging import ItemPaged -from azure.multiapi.storagev2.blob.v2019_12_12 import ContainerClient -from ._shared.base_client import StorageAccountHostsMixin, parse_query, parse_connection_str -from ._serialize import convert_dfs_url_to_blob_url -from ._models import LocationMode, FileSystemProperties, PathPropertiesPaged, PublicAccess -from ._data_lake_file_client import DataLakeFileClient -from ._data_lake_directory_client import DataLakeDirectoryClient -from ._data_lake_lease import DataLakeLeaseClient -from ._generated import DataLakeStorageClient - - -class FileSystemClient(StorageAccountHostsMixin): - """A client to interact with a specific file system, even if that file system - may not yet exist. - - For operations relating to a specific directory or file within this file system, a directory client or file client - can be retrieved using the :func:`~get_directory_client` or :func:`~get_file_client` functions. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, and account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_file_system_client_from_service] - :end-before: [END create_file_system_client_from_service] - :language: python - :dedent: 8 - :caption: Get a FileSystemClient from an existing DataLakeServiceClient. - """ - def __init__( - self, account_url, # type: str - file_system_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not file_system_name: - raise ValueError("Please specify a file system name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - blob_account_url = convert_dfs_url_to_blob_url(account_url) - # TODO: add self.account_url to base_client and remove _blob_account_url - self._blob_account_url = blob_account_url - - datalake_hosts = kwargs.pop('_hosts', None) - blob_hosts = None - if datalake_hosts: - blob_primary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.PRIMARY]) - blob_hosts = {LocationMode.PRIMARY: blob_primary_account_url, LocationMode.SECONDARY: ""} - self._container_client = ContainerClient(blob_account_url, file_system_name, - credential=credential, _hosts=blob_hosts, **kwargs) - - _, sas_token = parse_query(parsed_url.query) - self.file_system_name = file_system_name - self._query_str, self._raw_credential = self._format_query_string(sas_token, credential) - - super(FileSystemClient, self).__init__(parsed_url, service='dfs', credential=self._raw_credential, - _hosts=datalake_hosts, **kwargs) - # ADLS doesn't support secondary endpoint, make sure it's empty - self._hosts[LocationMode.SECONDARY] = "" - self._client = DataLakeStorageClient(self.url, file_system_name, None, pipeline=self._pipeline) - - def _format_url(self, hostname): - file_system_name = self.file_system_name - if isinstance(file_system_name, six.text_type): - file_system_name = file_system_name.encode('UTF-8') - return "{}://{}/{}{}".format( - self.scheme, - hostname, - quote(file_system_name), - self._query_str) - - def __exit__(self, *args): - self._container_client.close() - super(FileSystemClient, self).__exit__(*args) - - def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._container_client.close() - self.__exit__() - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - file_system_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> FileSystemClient - """ - Create FileSystemClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param file_system_name: The name of file system to interact with. - :type file_system_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, and account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :return a FileSystemClient - :rtype ~azure.storage.filedatalake.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_file_system_client_from_connection_string] - :end-before: [END create_file_system_client_from_connection_string] - :language: python - :dedent: 8 - :caption: Create FileSystemClient from connection string - """ - account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') - return cls( - account_url, file_system_name=file_system_name, credential=credential, **kwargs) - - def acquire_lease( - self, lease_duration=-1, # type: int - lease_id=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> DataLakeLeaseClient - """ - Requests a new lease. If the file system does not have an active lease, - the DataLake service creates a lease on the file system and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A DataLakeLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.filedatalake.DataLakeLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START acquire_lease_on_file_system] - :end-before: [END acquire_lease_on_file_system] - :language: python - :dedent: 8 - :caption: Acquiring a lease on the file system. - """ - lease = DataLakeLeaseClient(self, lease_id=lease_id) - lease.acquire(lease_duration=lease_duration, **kwargs) - return lease - - def create_file_system(self, metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[PublicAccess] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """Creates a new file system under the specified account. - - If the file system with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created file system. - - :param metadata: - A dict with name-value pairs to associate with the - file system as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - To specify whether data in the file system may be accessed publicly and the level of access. - :type public_access: ~azure.storage.filedatalake.PublicAccess - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_file_system] - :end-before: [END create_file_system] - :language: python - :dedent: 12 - :caption: Creating a file system in the datalake service. - """ - return self._container_client.create_container(metadata=metadata, - public_access=public_access, - **kwargs) - - def delete_file_system(self, **kwargs): - # type: (Any) -> None - """Marks the specified file system for deletion. - - The file system and any files contained within it are later deleted during garbage collection. - If the file system is not found, a ResourceNotFoundError will be raised. - - :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: - If specified, delete_file_system only succeeds if the - file system's lease is active and matches this ID. - Required if the file system has an active lease. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START delete_file_system] - :end-before: [END delete_file_system] - :language: python - :dedent: 12 - :caption: Deleting a file system in the datalake service. - """ - self._container_client.delete_container(**kwargs) - - def get_file_system_properties(self, **kwargs): - # type: (Any) -> FileSystemProperties - """Returns all user-defined metadata and system properties for the specified - file system. The data returned does not include the file system's list of paths. - - :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: - If specified, get_file_system_properties only succeeds if the - file system's lease is active and matches this ID. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Properties for the specified file system within a file system object. - :rtype: ~azure.storage.filedatalake.FileSystemProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START get_file_system_properties] - :end-before: [END get_file_system_properties] - :language: python - :dedent: 12 - :caption: Getting properties on the file system. - """ - container_properties = self._container_client.get_container_properties(**kwargs) - return FileSystemProperties._convert_from_container_props(container_properties) # pylint: disable=protected-access - - def set_file_system_metadata( # type: ignore - self, metadata, # type: Dict[str, str] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - file system. Each call to this operation replaces all existing metadata - attached to the file system. To remove all metadata from the file system, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the file system as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: filesystem-updated property dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START set_file_system_metadata] - :end-before: [END set_file_system_metadata] - :language: python - :dedent: 12 - :caption: Setting metadata on the file system. - """ - return self._container_client.set_container_metadata(metadata=metadata, **kwargs) - - def set_file_system_access_policy( - self, signed_identifiers, # type: Dict[str, AccessPolicy] - public_access=None, # type: Optional[Union[str, PublicAccess]] - **kwargs - ): # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the permissions for the specified file system or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether files in a file system may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the file system. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict[str, ~azure.storage.filedatalake.AccessPolicy] - :param ~azure.storage.filedatalake.PublicAccess public_access: - To specify whether data in the file system may be accessed publicly and the level of access. - :keyword lease: - Required if the file system has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified date/time. - :keyword ~datetime.datetime if_unmodified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File System-updated property dict (Etag and last modified). - :rtype: dict[str, str or ~datetime.datetime] - """ - return self._container_client.set_container_access_policy(signed_identifiers, - public_access=public_access, **kwargs) - - def get_file_system_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the specified file system. - The permissions indicate whether file system data may be accessed publicly. - - :keyword lease: - If specified, the operation only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - """ - access_policy = self._container_client.get_container_access_policy(**kwargs) - return { - 'public_access': PublicAccess._from_generated(access_policy['public_access']), # pylint: disable=protected-access - 'signed_identifiers': access_policy['signed_identifiers'] - } - - def get_paths(self, path=None, # type: Optional[str] - recursive=True, # type: Optional[bool] - max_results=None, # type: Optional[int] - **kwargs): - # type: (...) -> ItemPaged[PathProperties] - """Returns a generator to list the paths(could be files or directories) under the specified file system. - The generator will lazily follow the continuation tokens returned by - the service. - - :param str path: - Filters the results to return only paths under the specified path. - :param int max_results: An optional value that specifies the maximum - number of items to return per page. If omitted or greater than 5,000, the - response will include up to 5,000 items per page. - :keyword upn: - Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of PathProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.PathProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START get_paths_in_file_system] - :end-before: [END get_paths_in_file_system] - :language: python - :dedent: 8 - :caption: List the paths in the file system. - """ - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.file_system.list_paths, - path=path, - timeout=timeout, - **kwargs) - return ItemPaged( - command, recursive, path=path, max_results=max_results, - page_iterator_class=PathPropertiesPaged, **kwargs) - - def create_directory(self, directory, # type: Union[DirectoryProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Create directory - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_directory_from_file_system] - :end-before: [END create_directory_from_file_system] - :language: python - :dedent: 8 - :caption: Create directory in the file system. - """ - directory_client = self.get_directory_client(directory) - directory_client.create_directory(metadata=metadata, **kwargs) - return directory_client - - def delete_directory(self, directory, # type: Union[DirectoryProperties, str] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Marks the specified path for deletion. - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START delete_directory_from_file_system] - :end-before: [END delete_directory_from_file_system] - :language: python - :dedent: 8 - :caption: Delete directory in the file system. - """ - directory_client = self.get_directory_client(directory) - directory_client.delete_directory(**kwargs) - return directory_client - - def create_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Create file - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_file_from_file_system] - :end-before: [END create_file_from_file_system] - :language: python - :dedent: 8 - :caption: Create file in the file system. - """ - file_client = self.get_file_client(file) - file_client.create_file(**kwargs) - return file_client - - def delete_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Marks the specified file for deletion. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START delete_file_from_file_system] - :end-before: [END delete_file_from_file_system] - :language: python - :dedent: 8 - :caption: Delete file in the file system. - """ - file_client = self.get_file_client(file) - file_client.delete_file(**kwargs) - return file_client - - def _get_root_directory_client(self): - # type: () -> DataLakeDirectoryClient - """Get a client to interact with the root directory. - - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient - """ - return self.get_directory_client('/') - - def get_directory_client(self, directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified directory. - - The directory need not already exist. - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START get_directory_client_from_file_system] - :end-before: [END get_directory_client_from_file_system] - :language: python - :dedent: 8 - :caption: Getting the directory client to interact with a specific directory. - """ - try: - directory_name = directory.name - except AttributeError: - directory_name = directory - - return DataLakeDirectoryClient(self.url, self.file_system_name, directory_name=directory_name, - credential=self._raw_credential, - _configuration=self._config, _pipeline=self._pipeline, - _hosts=self._hosts, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function - ) - - def get_file_client(self, file_path # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file_path: - The file with which to interact. This can either be the path of the file(from root directory), - or an instance of FileProperties. eg. directory/subdirectory/file - :type file_path: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake..DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START get_file_client_from_file_system] - :end-before: [END get_file_client_from_file_system] - :language: python - :dedent: 8 - :caption: Getting the file client to interact with a specific file. - """ - try: - file_path = file_path.name - except AttributeError: - pass - - return DataLakeFileClient( - self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/__init__.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/__init__.py deleted file mode 100644 index 2c90133..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from ._data_lake_storage_client import DataLakeStorageClient -__all__ = ['DataLakeStorageClient'] - -from .version import VERSION - -__version__ = VERSION - diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/_configuration.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/_configuration.py deleted file mode 100644 index 5fc3466..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/_configuration.py +++ /dev/null @@ -1,64 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -from .version import VERSION - - -class DataLakeStorageClientConfiguration(Configuration): - """Configuration for DataLakeStorageClient - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, container, or blob that is the - targe of the desired operation. - :type url: str - :param file_system: The filesystem identifier. - :type file_system: str - :param path1: The file or directory path. - :type path1: str - :ivar resource: The value must be "filesystem" for all filesystem - operations. - :type resource: str - :ivar version: Specifies the version of the operation to use for this - request. - :type version: str - """ - - def __init__(self, url, file_system, path1, **kwargs): - - if url is None: - raise ValueError("Parameter 'url' must not be None.") - if file_system is None: - raise ValueError("Parameter 'file_system' must not be None.") - - super(DataLakeStorageClientConfiguration, self).__init__(**kwargs) - self._configure(**kwargs) - - self.user_agent_policy.add_user_agent('azsdk-python-datalakestorageclient/{}'.format(VERSION)) - self.generate_client_request_id = True - - self.url = url - self.file_system = file_system - self.path1 = path1 - self.resource = "filesystem" - self.version = "2019-12-12" - - def _configure(self, **kwargs): - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/__init__.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/__init__.py deleted file mode 100644 index 5f09159..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from ._data_lake_storage_client_async import DataLakeStorageClient -__all__ = ['DataLakeStorageClient'] diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/_configuration_async.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/_configuration_async.py deleted file mode 100644 index 5aaa28b..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/_configuration_async.py +++ /dev/null @@ -1,63 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -from ..version import VERSION - - -class DataLakeStorageClientConfiguration(Configuration): - """Configuration for DataLakeStorageClient - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, container, or blob that is the - targe of the desired operation. - :type url: str - :param file_system: The filesystem identifier. - :type file_system: str - :param path1: The file or directory path. - :type path1: str - :ivar resource: The value must be "filesystem" for all filesystem - operations. - :type resource: str - :ivar version: Specifies the version of the operation to use for this - request. - :type version: str - """ - - def __init__(self, url, file_system, path1, **kwargs): - - if url is None: - raise ValueError("Parameter 'url' must not be None.") - - super(DataLakeStorageClientConfiguration, self).__init__(**kwargs) - self._configure(**kwargs) - - self.user_agent_policy.add_user_agent('azsdk-python-datalakestorageclient/{}'.format(VERSION)) - self.generate_client_request_id = True - self.accept_language = None - - self.url = url - self.file_system = file_system - self.path1 = path1 - self.resource = "filesystem" - self.version = "2019-12-12" - - def _configure(self, **kwargs): - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/_data_lake_storage_client_async.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/_data_lake_storage_client_async.py deleted file mode 100644 index 929fece..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/_data_lake_storage_client_async.py +++ /dev/null @@ -1,68 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core import AsyncPipelineClient -from msrest import Serializer, Deserializer - -from ._configuration_async import DataLakeStorageClientConfiguration -from azure.core.exceptions import map_error -from .operations_async import ServiceOperations -from .operations_async import FileSystemOperations -from .operations_async import PathOperations -from .. import models - - -class DataLakeStorageClient(object): - """Azure Data Lake Storage provides storage for Hadoop and other big data workloads. - - - :ivar service: Service operations - :vartype service: azure.storage.filedatalake.aio.operations_async.ServiceOperations - :ivar file_system: FileSystem operations - :vartype file_system: azure.storage.filedatalake.aio.operations_async.FileSystemOperations - :ivar path: Path operations - :vartype path: azure.storage.filedatalake.aio.operations_async.PathOperations - - :param url: The URL of the service account, container, or blob that is the - targe of the desired operation. - :type url: str - :param file_system: The filesystem identifier. - :type file_system: str - :param path1: The file or directory path. - :type path1: str - """ - - def __init__( - self, url, file_system, path1, **kwargs): - - base_url = '{url}' - self._config = DataLakeStorageClientConfiguration(url, file_system, path1, **kwargs) - self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '2019-12-12' - self._serialize = Serializer(client_models) - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.file_system = FileSystemOperations( - self._client, self._config, self._serialize, self._deserialize) - self.path = PathOperations( - self._client, self._config, self._serialize, self._deserialize) - - async def close(self): - await self._client.close() - async def __aenter__(self): - await self._client.__aenter__() - return self - async def __aexit__(self, *exc_details): - await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/operations_async/_file_system_operations_async.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/operations_async/_file_system_operations_async.py deleted file mode 100644 index f1af068..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/operations_async/_file_system_operations_async.py +++ /dev/null @@ -1,462 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class FileSystemOperations: - """FileSystemOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - - async def create(self, properties=None, request_id=None, timeout=None, *, cls=None, **kwargs): - """Create FileSystem. - - Create a FileSystem rooted at the specified location. If the FileSystem - already exists, the operation fails. This operation does not support - conditional HTTP requests. - - :param properties: Optional. User-defined properties to be stored with - the filesystem, in the format of a comma-separated list of name and - value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded - string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties - not included in the list will be removed. All properties are removed - if the header is omitted. To merge new and existing properties, first - get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all - properties. - :type properties: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-namespace-enabled': self._deserialize('str', response.headers.get('x-ms-namespace-enabled')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{filesystem}'} - - async def set_properties(self, properties=None, request_id=None, timeout=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Set FileSystem Properties. - - Set properties for the FileSystem. This operation supports conditional - HTTP requests. For more information, see [Specifying Conditional - Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - - :param properties: Optional. User-defined properties to be stored with - the filesystem, in the format of a comma-separated list of name and - value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded - string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties - not included in the list will be removed. All properties are removed - if the header is omitted. To merge new and existing properties, first - get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all - properties. - :type properties: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - # Construct URL - url = self.set_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_properties.metadata = {'url': '/{filesystem}'} - - async def get_properties(self, request_id=None, timeout=None, *, cls=None, **kwargs): - """Get FileSystem Properties. - - All system and user-defined filesystem properties are specified in the - response headers. - - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), - 'x-ms-namespace-enabled': self._deserialize('str', response.headers.get('x-ms-namespace-enabled')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{filesystem}'} - - async def delete(self, request_id=None, timeout=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Delete FileSystem. - - Marks the FileSystem for deletion. When a FileSystem is deleted, a - FileSystem with the same identifier cannot be created for at least 30 - seconds. While the filesystem is being deleted, attempts to create a - filesystem with the same identifier will fail with status code 409 - (Conflict), with the service returning additional error information - indicating that the filesystem is being deleted. All other operations, - including operations on any files or directories within the filesystem, - will fail with status code 404 (Not Found) while the filesystem is - being deleted. This operation supports conditional HTTP requests. For - more information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{filesystem}'} - - async def list_paths(self, recursive, continuation=None, path=None, max_results=None, upn=None, request_id=None, timeout=None, *, cls=None, **kwargs): - """List Paths. - - List FileSystem paths and their properties. - - :param recursive: Required - :type recursive: bool - :param continuation: Optional. When deleting a directory, the number - of paths that are deleted with each invocation is limited. If the - number of paths to be deleted exceeds this limit, a continuation token - is returned in this response header. When a continuation token is - returned in the response, it must be specified in a subsequent - invocation of the delete operation to continue deleting the directory. - :type continuation: str - :param path: Optional. Filters results to paths within the specified - directory. An error occurs if the directory does not exist. - :type path: str - :param max_results: An optional value that specifies the maximum - number of items to return. If omitted or greater than 5,000, the - response will include up to 5,000 items. - :type max_results: int - :param upn: Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: PathList or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.PathList - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.list_paths.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - if path is not None: - query_parameters['directory'] = self._serialize.query("path", path, 'str') - query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') - if max_results is not None: - query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PathList', response) - header_dict = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_paths.metadata = {'url': '/{filesystem}'} diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/operations_async/_path_operations_async.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/operations_async/_path_operations_async.py deleted file mode 100644 index 0e8a109..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/operations_async/_path_operations_async.py +++ /dev/null @@ -1,1600 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class PathOperations: - """PathOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - - async def create(self, resource=None, continuation=None, mode=None, rename_source=None, source_lease_id=None, properties=None, permissions=None, umask=None, request_id=None, timeout=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs): - """Create File | Create Directory | Rename File | Rename Directory. - - Create or rename a file or directory. By default, the destination is - overwritten and if the destination already exists and has a lease the - lease is broken. This operation supports conditional HTTP requests. - For more information, see [Specifying Conditional Headers for Blob - Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - To fail if the destination already exists, use a conditional request - with If-None-Match: "*". - - :param resource: Required only for Create File and Create Directory. - The value must be "file" or "directory". Possible values include: - 'directory', 'file' - :type resource: str or - ~azure.storage.filedatalake.models.PathResourceType - :param continuation: Optional. When deleting a directory, the number - of paths that are deleted with each invocation is limited. If the - number of paths to be deleted exceeds this limit, a continuation token - is returned in this response header. When a continuation token is - returned in the response, it must be specified in a subsequent - invocation of the delete operation to continue deleting the directory. - :type continuation: str - :param mode: Optional. Valid only when namespace is enabled. This - parameter determines the behavior of the rename operation. The value - must be "legacy" or "posix", and the default value will be "posix". - Possible values include: 'legacy', 'posix' - :type mode: str or ~azure.storage.filedatalake.models.PathRenameMode - :param rename_source: An optional file or directory to be renamed. - The value must have the following format: "/{filesystem}/{path}". If - "x-ms-properties" is specified, the properties will overwrite the - existing properties; otherwise, the existing properties will be - preserved. This value must be a URL percent-encoded string. Note that - the string may only contain ASCII characters in the ISO-8859-1 - character set. - :type rename_source: str - :param source_lease_id: A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :type source_lease_id: str - :param properties: Optional. User-defined properties to be stored with - the filesystem, in the format of a comma-separated list of name and - value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded - string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties - not included in the list will be removed. All properties are removed - if the header is omitted. To merge new and existing properties, first - get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all - properties. - :type properties: str - :param permissions: Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :type permissions: str - :param umask: Optional and only valid if Hierarchical Namespace is - enabled for the account. When creating a file or directory and the - parent folder does not have a default ACL, the umask restricts the - permissions of the file or directory to be created. The resulting - permission is given by p bitwise and not u, where p is the permission - and u is the umask. For example, if p is 0777 and u is 0057, then the - resulting permission is 0720. The default permission is 0777 for a - directory and 0666 for a file. The default umask is 0027. The umask - must be specified in 4-digit octal notation (e.g. 0766). - :type umask: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param path_http_headers: Additional parameters for the operation - :type path_http_headers: - ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.filedatalake.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - cache_control = None - if path_http_headers is not None: - cache_control = path_http_headers.cache_control - content_encoding = None - if path_http_headers is not None: - content_encoding = path_http_headers.content_encoding - content_language = None - if path_http_headers is not None: - content_language = path_http_headers.content_language - content_disposition = None - if path_http_headers is not None: - content_disposition = path_http_headers.content_disposition - content_type = None - if path_http_headers is not None: - content_type = path_http_headers.content_type - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if resource is not None: - query_parameters['resource'] = self._serialize.query("resource", resource, 'PathResourceType') - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - if mode is not None: - query_parameters['mode'] = self._serialize.query("mode", mode, 'PathRenameMode') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if rename_source is not None: - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') - if umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("umask", umask, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') - if content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') - if content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') - if content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') - if content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{filesystem}/{path}'} - - async def update(self, action, body, mode=None, max_records=None, continuation=None, position=None, retain_uncommitted_data=None, close=None, content_length=None, properties=None, owner=None, group=None, permissions=None, acl=None, request_id=None, timeout=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Append Data | Flush Data | Set Properties | Set Access Control. - - Uploads data to be appended to a file, flushes (writes) previously - uploaded data to a file, sets properties for a file or directory, or - sets access control for a file or directory. Data can only be appended - to a file. This operation supports conditional HTTP requests. For more - information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - - :param action: The action must be "append" to upload data to be - appended to a file, "flush" to flush previously uploaded data to a - file, "setProperties" to set the properties of a file or directory, - "setAccessControl" to set the owner, group, permissions, or access - control list for a file or directory, or "setAccessControlRecursive" - to set the access control list for a directory recursively. Note that - Hierarchical Namespace must be enabled for the account in order to use - access control. Also note that the Access Control List (ACL) includes - permissions for the owner, owning group, and others, so the - x-ms-permissions and x-ms-acl request headers are mutually exclusive. - Possible values include: 'append', 'flush', 'setProperties', - 'setAccessControl', 'setAccessControlRecursive' - :type action: str or - ~azure.storage.filedatalake.models.PathUpdateAction - :param body: Initial data - :type body: Generator - :param mode: Optional. Valid and Required for - "SetAccessControlRecursive" operation. Mode "set" sets POSIX access - control rights on files and directories, "modify" modifies one or more - POSIX access control rights that pre-exist on files and directories, - "remove" removes one or more POSIX access control rights that were - present earlier on files and directories. Possible values include: - 'set', 'modify', 'remove' - :type mode: str or - ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode - :param max_records: Optional. Valid for "SetAccessControlRecursive" - operation. It specifies the maximum number of files or directories on - which the acl change will be applied. If omitted or greater than - 2,000, the request will process up to 2,000 items - :type max_records: int - :param continuation: Optional. The number of paths processed with each - invocation is limited. If the number of paths to be processed exceeds - this limit, a continuation token is returned in the response header - x-ms-continuation. When a continuation token is returned in the - response, it must be percent-encoded and specified in a subsequent - invocation of setAcessControlRecursive operation. - :type continuation: str - :param position: This parameter allows the caller to upload data in - parallel and control the order in which it is appended to the file. - It is required when uploading data to be appended to the file and when - flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not - immediately flushed, or written, to the file. To flush, the - previously uploaded data must be contiguous, the position parameter - must be specified and equal to the length of the file after all data - has been written, and there must not be a request entity body included - with the request. - :type position: long - :param retain_uncommitted_data: Valid only for flush operations. If - "true", uncommitted data is retained after the flush operation - completes; otherwise, the uncommitted data is deleted after the flush - operation. The default is false. Data at offsets less than the - specified position are written to the file when flush succeeds, but - this optional parameter allows data after the flush position to be - retained for a future flush operation. - :type retain_uncommitted_data: bool - :param close: Azure Storage Events allow applications to receive - notifications when files change. When Azure Storage Events are - enabled, a file changed event is raised. This event has a property - indicating whether this is the final change to distinguish the - difference between an intermediate flush to a file stream and the - final close of a file stream. The close query parameter is valid only - when the action is "flush" and change notifications are enabled. If - the value of close is "true" and the flush operation completes - successfully, the service raises a file change notification with a - property indicating that this is the final update (the file stream has - been closed). If "false" a change notification is raised indicating - the file has changed. The default is false. This query parameter is - set to true by the Hadoop ABFS driver to indicate that the file stream - has been closed." - :type close: bool - :param content_length: Required for "Append Data" and "Flush Data". - Must be 0 for "Flush Data". Must be the length of the request content - in bytes for "Append Data". - :type content_length: long - :param properties: Optional. User-defined properties to be stored with - the filesystem, in the format of a comma-separated list of name and - value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded - string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties - not included in the list will be removed. All properties are removed - if the header is omitted. To merge new and existing properties, first - get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all - properties. - :type properties: str - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param permissions: Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :type permissions: str - :param acl: Sets POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param path_http_headers: Additional parameters for the operation - :type path_http_headers: - ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: SetAccessControlRecursiveResponse or the result of - cls(response) - :rtype: - ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - content_md5 = None - if path_http_headers is not None: - content_md5 = path_http_headers.content_md5 - cache_control = None - if path_http_headers is not None: - cache_control = path_http_headers.cache_control - content_type = None - if path_http_headers is not None: - content_type = path_http_headers.content_type - content_disposition = None - if path_http_headers is not None: - content_disposition = path_http_headers.content_disposition - content_encoding = None - if path_http_headers is not None: - content_encoding = path_http_headers.content_encoding - content_language = None - if path_http_headers is not None: - content_language = path_http_headers.content_language - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - # Construct URL - url = self.update.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['action'] = self._serialize.query("action", action, 'PathUpdateAction') - if mode is not None: - query_parameters['mode'] = self._serialize.query("mode", mode, 'PathSetAccessControlRecursiveMode') - if max_records is not None: - query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - if position is not None: - query_parameters['position'] = self._serialize.query("position", position, 'long') - if retain_uncommitted_data is not None: - query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') - if close is not None: - query_parameters['close'] = self._serialize.query("close", close, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/octet-stream' - if content_length is not None: - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') - if acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", content_md5, 'bytearray') - if cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') - if content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') - if content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') - if content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') - if content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct body - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('SetAccessControlRecursiveResponse', response) - header_dict = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), - 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - update.metadata = {'url': '/{filesystem}/{path}'} - - async def lease(self, x_ms_lease_action, x_ms_lease_duration=None, x_ms_lease_break_period=None, proposed_lease_id=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Lease Path. - - Create and manage a lease to restrict write and delete access to the - path. This operation supports conditional HTTP requests. For more - information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - - :param x_ms_lease_action: There are five lease actions: "acquire", - "break", "change", "renew", and "release". Use "acquire" and specify - the "x-ms-proposed-lease-id" and "x-ms-lease-duration" to acquire a - new lease. Use "break" to break an existing lease. When a lease is - broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the - file. When a lease is successfully broken, the response indicates the - interval in seconds until a new lease can be acquired. Use "change" - and specify the current lease ID in "x-ms-lease-id" and the new lease - ID in "x-ms-proposed-lease-id" to change the lease ID of an active - lease. Use "renew" and specify the "x-ms-lease-id" to renew an - existing lease. Use "release" and specify the "x-ms-lease-id" to - release a lease. Possible values include: 'acquire', 'break', - 'change', 'renew', 'release' - :type x_ms_lease_action: str or - ~azure.storage.filedatalake.models.PathLeaseAction - :param x_ms_lease_duration: The lease duration is required to acquire - a lease, and specifies the duration of the lease in seconds. The - lease duration must be between 15 and 60 seconds or -1 for infinite - lease. - :type x_ms_lease_duration: int - :param x_ms_lease_break_period: The lease break period duration is - optional to break a lease, and specifies the break period of the - lease in seconds. The lease break duration must be between 0 and 60 - seconds. - :type x_ms_lease_break_period: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The Blob service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - # Construct URL - url = self.lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-action'] = self._serialize.header("x_ms_lease_action", x_ms_lease_action, 'PathLeaseAction') - if x_ms_lease_duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("x_ms_lease_duration", x_ms_lease_duration, 'int') - if x_ms_lease_break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("x_ms_lease_break_period", x_ms_lease_break_period, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 201, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-lease-time': self._deserialize('str', response.headers.get('x-ms-lease-time')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - lease.metadata = {'url': '/{filesystem}/{path}'} - - async def read(self, range=None, x_ms_range_get_content_md5=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Read File. - - Read the contents of a file. For read operations, range requests are - supported. This operation supports conditional HTTP requests. For more - information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - - :param range: The HTTP Range request header specifies one or more byte - ranges of the resource to be retrieved. - :type range: str - :param x_ms_range_get_content_md5: Optional. When this header is set - to "true" and specified together with the Range header, the service - returns the MD5 hash for the range, as long as the range is less than - or equal to 4MB in size. If this header is specified without the Range - header, the service returns status code 400 (Bad Request). If this - header is set to true when the range exceeds 4 MB in size, the service - returns status code 400 (Bad Request). - :type x_ms_range_get_content_md5: bool - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: object or the result of cls(response) - :rtype: Generator - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - # Construct URL - url = self.read.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if range is not None: - header_parameters['Range'] = self._serialize.header("range", range, 'str') - if x_ms_range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("x_ms_range_get_content_md5", x_ms_range_get_content_md5, 'bool') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - await response.load_body() - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')), - 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), - 'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')), - 'x-ms-content-md5': self._deserialize('str', response.headers.get('x-ms-content-md5')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - if response.status_code == 206: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')), - 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), - 'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')), - 'x-ms-content-md5': self._deserialize('str', response.headers.get('x-ms-content-md5')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - read.metadata = {'url': '/{filesystem}/{path}'} - - async def get_properties(self, action=None, upn=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Get Properties | Get Status | Get Access Control List. - - Get Properties returns all system and user defined properties for a - path. Get Status returns all system defined properties for a path. Get - Access Control List returns the access control list for a path. This - operation supports conditional HTTP requests. For more information, - see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - - :param action: Optional. If the value is "getStatus" only the system - defined properties for the path are returned. If the value is - "getAccessControl" the access control list is returned in the response - headers (Hierarchical Namespace must be enabled for the account), - otherwise the properties are returned. Possible values include: - 'getAccessControl', 'getStatus' - :type action: str or - ~azure.storage.filedatalake.models.PathGetPropertiesAction - :param upn: Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if action is not None: - query_parameters['action'] = self._serialize.query("action", action, 'PathGetPropertiesAction') - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')), - 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), - 'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')), - 'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')), - 'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')), - 'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')), - 'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{filesystem}/{path}'} - - async def delete(self, recursive=None, continuation=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Delete File | Delete Directory. - - Delete the file or directory. This operation supports conditional HTTP - requests. For more information, see [Specifying Conditional Headers - for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - - :param recursive: Required - :type recursive: bool - :param continuation: Optional. When deleting a directory, the number - of paths that are deleted with each invocation is limited. If the - number of paths to be deleted exceeds this limit, a continuation token - is returned in this response header. When a continuation token is - returned in the response, it must be specified in a subsequent - invocation of the delete operation to continue deleting the directory. - :type continuation: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if recursive is not None: - query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{filesystem}/{path}'} - - async def set_access_control(self, timeout=None, owner=None, group=None, permissions=None, acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Set the owner, group, permissions, or access control list for a path. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param permissions: Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :type permissions: str - :param acl: Sets POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "setAccessControl" - - # Construct URL - url = self.set_access_control.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') - if acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - } - return cls(response, None, response_headers) - set_access_control.metadata = {'url': '/{filesystem}/{path}'} - - async def set_access_control_recursive(self, mode, timeout=None, continuation=None, max_records=None, acl=None, request_id=None, *, cls=None, **kwargs): - """Set the access control list for a path and subpaths. - - :param mode: Mode "set" sets POSIX access control rights on files and - directories, "modify" modifies one or more POSIX access control rights - that pre-exist on files and directories, "remove" removes one or more - POSIX access control rights that were present earlier on files and - directories. Possible values include: 'set', 'modify', 'remove' - :type mode: str or - ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param continuation: Optional. When deleting a directory, the number - of paths that are deleted with each invocation is limited. If the - number of paths to be deleted exceeds this limit, a continuation token - is returned in this response header. When a continuation token is - returned in the response, it must be specified in a subsequent - invocation of the delete operation to continue deleting the directory. - :type continuation: str - :param max_records: Optional. It specifies the maximum number of files - or directories on which the acl change will be applied. If omitted or - greater than 2,000, the request will process up to 2,000 items - :type max_records: int - :param acl: Sets POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: SetAccessControlRecursiveResponse or the result of - cls(response) - :rtype: - ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - action = "setAccessControlRecursive" - - # Construct URL - url = self.set_access_control_recursive.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - query_parameters['mode'] = self._serialize.query("mode", mode, 'PathSetAccessControlRecursiveMode') - if max_records is not None: - query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('SetAccessControlRecursiveResponse', response) - header_dict = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - set_access_control_recursive.metadata = {'url': '/{filesystem}/{path}'} - - async def flush_data(self, timeout=None, position=None, retain_uncommitted_data=None, close=None, content_length=None, request_id=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Set the owner, group, permissions, or access control list for a path. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param position: This parameter allows the caller to upload data in - parallel and control the order in which it is appended to the file. - It is required when uploading data to be appended to the file and when - flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not - immediately flushed, or written, to the file. To flush, the - previously uploaded data must be contiguous, the position parameter - must be specified and equal to the length of the file after all data - has been written, and there must not be a request entity body included - with the request. - :type position: long - :param retain_uncommitted_data: Valid only for flush operations. If - "true", uncommitted data is retained after the flush operation - completes; otherwise, the uncommitted data is deleted after the flush - operation. The default is false. Data at offsets less than the - specified position are written to the file when flush succeeds, but - this optional parameter allows data after the flush position to be - retained for a future flush operation. - :type retain_uncommitted_data: bool - :param close: Azure Storage Events allow applications to receive - notifications when files change. When Azure Storage Events are - enabled, a file changed event is raised. This event has a property - indicating whether this is the final change to distinguish the - difference between an intermediate flush to a file stream and the - final close of a file stream. The close query parameter is valid only - when the action is "flush" and change notifications are enabled. If - the value of close is "true" and the flush operation completes - successfully, the service raises a file change notification with a - property indicating that this is the final update (the file stream has - been closed). If "false" a change notification is raised indicating - the file has changed. The default is false. This query parameter is - set to true by the Hadoop ABFS driver to indicate that the file stream - has been closed." - :type close: bool - :param content_length: Required for "Append Data" and "Flush Data". - Must be 0 for "Flush Data". Must be the length of the request content - in bytes for "Append Data". - :type content_length: long - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param path_http_headers: Additional parameters for the operation - :type path_http_headers: - ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - content_md5 = None - if path_http_headers is not None: - content_md5 = path_http_headers.content_md5 - cache_control = None - if path_http_headers is not None: - cache_control = path_http_headers.cache_control - content_type = None - if path_http_headers is not None: - content_type = path_http_headers.content_type - content_disposition = None - if path_http_headers is not None: - content_disposition = path_http_headers.content_disposition - content_encoding = None - if path_http_headers is not None: - content_encoding = path_http_headers.content_encoding - content_language = None - if path_http_headers is not None: - content_language = path_http_headers.content_language - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "flush" - - # Construct URL - url = self.flush_data.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if position is not None: - query_parameters['position'] = self._serialize.query("position", position, 'long') - if retain_uncommitted_data is not None: - query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') - if close is not None: - query_parameters['close'] = self._serialize.query("close", close, 'bool') - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if content_length is not None: - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", content_md5, 'bytearray') - if cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') - if content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') - if content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') - if content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') - if content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - } - return cls(response, None, response_headers) - flush_data.metadata = {'url': '/{filesystem}/{path}'} - - async def append_data(self, body, position=None, timeout=None, content_length=None, request_id=None, path_http_headers=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Append data to the file. - - :param body: Initial data - :type body: Generator - :param position: This parameter allows the caller to upload data in - parallel and control the order in which it is appended to the file. - It is required when uploading data to be appended to the file and when - flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not - immediately flushed, or written, to the file. To flush, the - previously uploaded data must be contiguous, the position parameter - must be specified and equal to the length of the file after all data - has been written, and there must not be a request entity body included - with the request. - :type position: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param content_length: Required for "Append Data" and "Flush Data". - Must be 0 for "Flush Data". Must be the length of the request content - in bytes for "Append Data". - :type content_length: long - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param path_http_headers: Additional parameters for the operation - :type path_http_headers: - ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.filedatalake.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - transactional_content_hash = None - if path_http_headers is not None: - transactional_content_hash = path_http_headers.transactional_content_hash - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - action = "append" - - # Construct URL - url = self.append_data.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if position is not None: - query_parameters['position'] = self._serialize.query("position", position, 'long') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if content_length is not None: - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if transactional_content_hash is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_hash", transactional_content_hash, 'bytearray') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct body - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - } - return cls(response, None, response_headers) - append_data.metadata = {'url': '/{filesystem}/{path}'} diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/operations_async/_service_operations_async.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/operations_async/_service_operations_async.py deleted file mode 100644 index b4cb9c5..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/operations_async/_service_operations_async.py +++ /dev/null @@ -1,128 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class ServiceOperations: - """ServiceOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar resource: The value must be "account" for all account operations. Constant value: "account". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.resource = "account" - - async def list_file_systems(self, prefix=None, continuation=None, max_results=None, request_id=None, timeout=None, *, cls=None, **kwargs): - """List FileSystems. - - List filesystems and their properties in given account. - - :param prefix: Filters results to filesystems within the specified - prefix. - :type prefix: str - :param continuation: Optional. When deleting a directory, the number - of paths that are deleted with each invocation is limited. If the - number of paths to be deleted exceeds this limit, a continuation token - is returned in this response header. When a continuation token is - returned in the response, it must be specified in a subsequent - invocation of the delete operation to continue deleting the directory. - :type continuation: str - :param max_results: An optional value that specifies the maximum - number of items to return. If omitted or greater than 5,000, the - response will include up to 5,000 items. - :type max_results: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: FileSystemList or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.FileSystemList - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.list_file_systems.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['resource'] = self._serialize.query("self.resource", self.resource, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - if max_results is not None: - query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('FileSystemList', response) - header_dict = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_file_systems.metadata = {'url': '/'} diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/models/__init__.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/models/__init__.py deleted file mode 100644 index 4a3401a..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/models/__init__.py +++ /dev/null @@ -1,66 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -try: - from ._models_py3 import AclFailedEntry - from ._models_py3 import FileSystem - from ._models_py3 import FileSystemList - from ._models_py3 import LeaseAccessConditions - from ._models_py3 import ModifiedAccessConditions - from ._models_py3 import Path - from ._models_py3 import PathHTTPHeaders - from ._models_py3 import PathList - from ._models_py3 import SetAccessControlRecursiveResponse - from ._models_py3 import SourceModifiedAccessConditions - from ._models_py3 import StorageError, StorageErrorException - from ._models_py3 import StorageErrorError -except (SyntaxError, ImportError): - from ._models import AclFailedEntry - from ._models import FileSystem - from ._models import FileSystemList - from ._models import LeaseAccessConditions - from ._models import ModifiedAccessConditions - from ._models import Path - from ._models import PathHTTPHeaders - from ._models import PathList - from ._models import SetAccessControlRecursiveResponse - from ._models import SourceModifiedAccessConditions - from ._models import StorageError, StorageErrorException - from ._models import StorageErrorError -from ._data_lake_storage_client_enums import ( - PathGetPropertiesAction, - PathLeaseAction, - PathRenameMode, - PathResourceType, - PathSetAccessControlRecursiveMode, - PathUpdateAction, -) - -__all__ = [ - 'AclFailedEntry', - 'FileSystem', - 'FileSystemList', - 'LeaseAccessConditions', - 'ModifiedAccessConditions', - 'Path', - 'PathHTTPHeaders', - 'PathList', - 'SetAccessControlRecursiveResponse', - 'SourceModifiedAccessConditions', - 'StorageError', 'StorageErrorException', - 'StorageErrorError', - 'PathResourceType', - 'PathRenameMode', - 'PathUpdateAction', - 'PathSetAccessControlRecursiveMode', - 'PathLeaseAction', - 'PathGetPropertiesAction', -] diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/models/_data_lake_storage_client_enums.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/models/_data_lake_storage_client_enums.py deleted file mode 100644 index 35a1a57..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/models/_data_lake_storage_client_enums.py +++ /dev/null @@ -1,55 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from enum import Enum - - -class PathResourceType(str, Enum): - - directory = "directory" - file = "file" - - -class PathRenameMode(str, Enum): - - legacy = "legacy" - posix = "posix" - - -class PathUpdateAction(str, Enum): - - append = "append" - flush = "flush" - set_properties = "setProperties" - set_access_control = "setAccessControl" - set_access_control_recursive = "setAccessControlRecursive" - - -class PathSetAccessControlRecursiveMode(str, Enum): - - set = "set" - modify = "modify" - remove = "remove" - - -class PathLeaseAction(str, Enum): - - acquire = "acquire" - break_enum = "break" - change = "change" - renew = "renew" - release = "release" - - -class PathGetPropertiesAction(str, Enum): - - get_access_control = "getAccessControl" - get_status = "getStatus" diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/models/_models.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/models/_models.py deleted file mode 100644 index 2f44279..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/models/_models.py +++ /dev/null @@ -1,350 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model -from azure.core.exceptions import HttpResponseError - - -class AclFailedEntry(Model): - """AclFailedEntry. - - :param name: - :type name: str - :param type: - :type type: str - :param error_message: - :type error_message: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'error_message': {'key': 'errorMessage', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(AclFailedEntry, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.type = kwargs.get('type', None) - self.error_message = kwargs.get('error_message', None) - - -class FileSystem(Model): - """FileSystem. - - :param name: - :type name: str - :param last_modified: - :type last_modified: str - :param e_tag: - :type e_tag: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'last_modified': {'key': 'lastModified', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(FileSystem, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.last_modified = kwargs.get('last_modified', None) - self.e_tag = kwargs.get('e_tag', None) - - -class FileSystemList(Model): - """FileSystemList. - - :param filesystems: - :type filesystems: list[~azure.storage.filedatalake.models.FileSystem] - """ - - _attribute_map = { - 'filesystems': {'key': 'filesystems', 'type': '[FileSystem]'}, - } - - def __init__(self, **kwargs): - super(FileSystemList, self).__init__(**kwargs) - self.filesystems = kwargs.get('filesystems', None) - - -class LeaseAccessConditions(Model): - """Additional parameters for a set of operations. - - :param lease_id: If specified, the operation only succeeds if the - resource's lease is active and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': '', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = kwargs.get('lease_id', None) - - -class ModifiedAccessConditions(Model): - """Additional parameters for a set of operations. - - :param if_modified_since: Specify this header value to operate only on a - blob if it has been modified since the specified date/time. - :type if_modified_since: datetime - :param if_unmodified_since: Specify this header value to operate only on a - blob if it has not been modified since the specified date/time. - :type if_unmodified_since: datetime - :param if_match: Specify an ETag value to operate only on blobs with a - matching value. - :type if_match: str - :param if_none_match: Specify an ETag value to operate only on blobs - without a matching value. - :type if_none_match: str - """ - - _attribute_map = { - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(ModifiedAccessConditions, self).__init__(**kwargs) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - - -class Path(Model): - """Path. - - :param name: - :type name: str - :param is_directory: Default value: False . - :type is_directory: bool - :param last_modified: - :type last_modified: str - :param e_tag: - :type e_tag: str - :param content_length: - :type content_length: long - :param owner: - :type owner: str - :param group: - :type group: str - :param permissions: - :type permissions: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'is_directory': {'key': 'isDirectory', 'type': 'bool'}, - 'last_modified': {'key': 'lastModified', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - 'content_length': {'key': 'contentLength', 'type': 'long'}, - 'owner': {'key': 'owner', 'type': 'str'}, - 'group': {'key': 'group', 'type': 'str'}, - 'permissions': {'key': 'permissions', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(Path, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.is_directory = kwargs.get('is_directory', False) - self.last_modified = kwargs.get('last_modified', None) - self.e_tag = kwargs.get('e_tag', None) - self.content_length = kwargs.get('content_length', None) - self.owner = kwargs.get('owner', None) - self.group = kwargs.get('group', None) - self.permissions = kwargs.get('permissions', None) - - -class PathHTTPHeaders(Model): - """Additional parameters for a set of operations, such as: Path_create, - Path_update, Path_flush_data, Path_append_data. - - :param cache_control: Optional. Sets the blob's cache control. If - specified, this property is stored with the blob and returned with a read - request. - :type cache_control: str - :param content_encoding: Optional. Sets the blob's content encoding. If - specified, this property is stored with the blob and returned with a read - request. - :type content_encoding: str - :param content_language: Optional. Set the blob's content language. If - specified, this property is stored with the blob and returned with a read - request. - :type content_language: str - :param content_disposition: Optional. Sets the blob's Content-Disposition - header. - :type content_disposition: str - :param content_type: Optional. Sets the blob's content type. If specified, - this property is stored with the blob and returned with a read request. - :type content_type: str - :param content_md5: Specify the transactional md5 for the body, to be - validated by the service. - :type content_md5: bytearray - :param transactional_content_hash: Specify the transactional md5 for the - body, to be validated by the service. - :type transactional_content_hash: bytearray - """ - - _attribute_map = { - 'cache_control': {'key': '', 'type': 'str'}, - 'content_encoding': {'key': '', 'type': 'str'}, - 'content_language': {'key': '', 'type': 'str'}, - 'content_disposition': {'key': '', 'type': 'str'}, - 'content_type': {'key': '', 'type': 'str'}, - 'content_md5': {'key': '', 'type': 'bytearray'}, - 'transactional_content_hash': {'key': '', 'type': 'bytearray'}, - } - - def __init__(self, **kwargs): - super(PathHTTPHeaders, self).__init__(**kwargs) - self.cache_control = kwargs.get('cache_control', None) - self.content_encoding = kwargs.get('content_encoding', None) - self.content_language = kwargs.get('content_language', None) - self.content_disposition = kwargs.get('content_disposition', None) - self.content_type = kwargs.get('content_type', None) - self.content_md5 = kwargs.get('content_md5', None) - self.transactional_content_hash = kwargs.get('transactional_content_hash', None) - - -class PathList(Model): - """PathList. - - :param paths: - :type paths: list[~azure.storage.filedatalake.models.Path] - """ - - _attribute_map = { - 'paths': {'key': 'paths', 'type': '[Path]'}, - } - - def __init__(self, **kwargs): - super(PathList, self).__init__(**kwargs) - self.paths = kwargs.get('paths', None) - - -class SetAccessControlRecursiveResponse(Model): - """SetAccessControlRecursiveResponse. - - :param directories_successful: - :type directories_successful: int - :param files_successful: - :type files_successful: int - :param failure_count: - :type failure_count: int - :param failed_entries: - :type failed_entries: - list[~azure.storage.filedatalake.models.AclFailedEntry] - """ - - _attribute_map = { - 'directories_successful': {'key': 'directoriesSuccessful', 'type': 'int'}, - 'files_successful': {'key': 'filesSuccessful', 'type': 'int'}, - 'failure_count': {'key': 'failureCount', 'type': 'int'}, - 'failed_entries': {'key': 'failedEntries', 'type': '[AclFailedEntry]'}, - } - - def __init__(self, **kwargs): - super(SetAccessControlRecursiveResponse, self).__init__(**kwargs) - self.directories_successful = kwargs.get('directories_successful', None) - self.files_successful = kwargs.get('files_successful', None) - self.failure_count = kwargs.get('failure_count', None) - self.failed_entries = kwargs.get('failed_entries', None) - - -class SourceModifiedAccessConditions(Model): - """Additional parameters for create operation. - - :param source_if_match: Specify an ETag value to operate only on blobs - with a matching value. - :type source_if_match: str - :param source_if_none_match: Specify an ETag value to operate only on - blobs without a matching value. - :type source_if_none_match: str - :param source_if_modified_since: Specify this header value to operate only - on a blob if it has been modified since the specified date/time. - :type source_if_modified_since: datetime - :param source_if_unmodified_since: Specify this header value to operate - only on a blob if it has not been modified since the specified date/time. - :type source_if_unmodified_since: datetime - """ - - _attribute_map = { - 'source_if_match': {'key': '', 'type': 'str'}, - 'source_if_none_match': {'key': '', 'type': 'str'}, - 'source_if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'source_if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_match = kwargs.get('source_if_match', None) - self.source_if_none_match = kwargs.get('source_if_none_match', None) - self.source_if_modified_since = kwargs.get('source_if_modified_since', None) - self.source_if_unmodified_since = kwargs.get('source_if_unmodified_since', None) - - -class StorageError(Model): - """StorageError. - - :param error: The service error response object. - :type error: ~azure.storage.filedatalake.models.StorageErrorError - """ - - _attribute_map = { - 'error': {'key': 'error', 'type': 'StorageErrorError'}, - } - - def __init__(self, **kwargs): - super(StorageError, self).__init__(**kwargs) - self.error = kwargs.get('error', None) - - -class StorageErrorException(HttpResponseError): - """Server responsed with exception of type: 'StorageError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, response, deserialize, *args): - - model_name = 'StorageError' - self.error = deserialize(model_name, response) - if self.error is None: - self.error = deserialize.dependencies[model_name]() - super(StorageErrorException, self).__init__(response=response) - - -class StorageErrorError(Model): - """The service error response object. - - :param code: The service error code. - :type code: str - :param message: The service error message. - :type message: str - """ - - _attribute_map = { - 'code': {'key': 'Code', 'type': 'str'}, - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(StorageErrorError, self).__init__(**kwargs) - self.code = kwargs.get('code', None) - self.message = kwargs.get('message', None) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/models/_models_py3.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/models/_models_py3.py deleted file mode 100644 index 3ca8d84..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/models/_models_py3.py +++ /dev/null @@ -1,350 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model -from azure.core.exceptions import HttpResponseError - - -class AclFailedEntry(Model): - """AclFailedEntry. - - :param name: - :type name: str - :param type: - :type type: str - :param error_message: - :type error_message: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'error_message': {'key': 'errorMessage', 'type': 'str'}, - } - - def __init__(self, *, name: str=None, type: str=None, error_message: str=None, **kwargs) -> None: - super(AclFailedEntry, self).__init__(**kwargs) - self.name = name - self.type = type - self.error_message = error_message - - -class FileSystem(Model): - """FileSystem. - - :param name: - :type name: str - :param last_modified: - :type last_modified: str - :param e_tag: - :type e_tag: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'last_modified': {'key': 'lastModified', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - } - - def __init__(self, *, name: str=None, last_modified: str=None, e_tag: str=None, **kwargs) -> None: - super(FileSystem, self).__init__(**kwargs) - self.name = name - self.last_modified = last_modified - self.e_tag = e_tag - - -class FileSystemList(Model): - """FileSystemList. - - :param filesystems: - :type filesystems: list[~azure.storage.filedatalake.models.FileSystem] - """ - - _attribute_map = { - 'filesystems': {'key': 'filesystems', 'type': '[FileSystem]'}, - } - - def __init__(self, *, filesystems=None, **kwargs) -> None: - super(FileSystemList, self).__init__(**kwargs) - self.filesystems = filesystems - - -class LeaseAccessConditions(Model): - """Additional parameters for a set of operations. - - :param lease_id: If specified, the operation only succeeds if the - resource's lease is active and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': '', 'type': 'str'}, - } - - def __init__(self, *, lease_id: str=None, **kwargs) -> None: - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = lease_id - - -class ModifiedAccessConditions(Model): - """Additional parameters for a set of operations. - - :param if_modified_since: Specify this header value to operate only on a - blob if it has been modified since the specified date/time. - :type if_modified_since: datetime - :param if_unmodified_since: Specify this header value to operate only on a - blob if it has not been modified since the specified date/time. - :type if_unmodified_since: datetime - :param if_match: Specify an ETag value to operate only on blobs with a - matching value. - :type if_match: str - :param if_none_match: Specify an ETag value to operate only on blobs - without a matching value. - :type if_none_match: str - """ - - _attribute_map = { - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - } - - def __init__(self, *, if_modified_since=None, if_unmodified_since=None, if_match: str=None, if_none_match: str=None, **kwargs) -> None: - super(ModifiedAccessConditions, self).__init__(**kwargs) - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - self.if_match = if_match - self.if_none_match = if_none_match - - -class Path(Model): - """Path. - - :param name: - :type name: str - :param is_directory: Default value: False . - :type is_directory: bool - :param last_modified: - :type last_modified: str - :param e_tag: - :type e_tag: str - :param content_length: - :type content_length: long - :param owner: - :type owner: str - :param group: - :type group: str - :param permissions: - :type permissions: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'is_directory': {'key': 'isDirectory', 'type': 'bool'}, - 'last_modified': {'key': 'lastModified', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - 'content_length': {'key': 'contentLength', 'type': 'long'}, - 'owner': {'key': 'owner', 'type': 'str'}, - 'group': {'key': 'group', 'type': 'str'}, - 'permissions': {'key': 'permissions', 'type': 'str'}, - } - - def __init__(self, *, name: str=None, is_directory: bool=False, last_modified: str=None, e_tag: str=None, content_length: int=None, owner: str=None, group: str=None, permissions: str=None, **kwargs) -> None: - super(Path, self).__init__(**kwargs) - self.name = name - self.is_directory = is_directory - self.last_modified = last_modified - self.e_tag = e_tag - self.content_length = content_length - self.owner = owner - self.group = group - self.permissions = permissions - - -class PathHTTPHeaders(Model): - """Additional parameters for a set of operations, such as: Path_create, - Path_update, Path_flush_data, Path_append_data. - - :param cache_control: Optional. Sets the blob's cache control. If - specified, this property is stored with the blob and returned with a read - request. - :type cache_control: str - :param content_encoding: Optional. Sets the blob's content encoding. If - specified, this property is stored with the blob and returned with a read - request. - :type content_encoding: str - :param content_language: Optional. Set the blob's content language. If - specified, this property is stored with the blob and returned with a read - request. - :type content_language: str - :param content_disposition: Optional. Sets the blob's Content-Disposition - header. - :type content_disposition: str - :param content_type: Optional. Sets the blob's content type. If specified, - this property is stored with the blob and returned with a read request. - :type content_type: str - :param content_md5: Specify the transactional md5 for the body, to be - validated by the service. - :type content_md5: bytearray - :param transactional_content_hash: Specify the transactional md5 for the - body, to be validated by the service. - :type transactional_content_hash: bytearray - """ - - _attribute_map = { - 'cache_control': {'key': '', 'type': 'str'}, - 'content_encoding': {'key': '', 'type': 'str'}, - 'content_language': {'key': '', 'type': 'str'}, - 'content_disposition': {'key': '', 'type': 'str'}, - 'content_type': {'key': '', 'type': 'str'}, - 'content_md5': {'key': '', 'type': 'bytearray'}, - 'transactional_content_hash': {'key': '', 'type': 'bytearray'}, - } - - def __init__(self, *, cache_control: str=None, content_encoding: str=None, content_language: str=None, content_disposition: str=None, content_type: str=None, content_md5: bytearray=None, transactional_content_hash: bytearray=None, **kwargs) -> None: - super(PathHTTPHeaders, self).__init__(**kwargs) - self.cache_control = cache_control - self.content_encoding = content_encoding - self.content_language = content_language - self.content_disposition = content_disposition - self.content_type = content_type - self.content_md5 = content_md5 - self.transactional_content_hash = transactional_content_hash - - -class PathList(Model): - """PathList. - - :param paths: - :type paths: list[~azure.storage.filedatalake.models.Path] - """ - - _attribute_map = { - 'paths': {'key': 'paths', 'type': '[Path]'}, - } - - def __init__(self, *, paths=None, **kwargs) -> None: - super(PathList, self).__init__(**kwargs) - self.paths = paths - - -class SetAccessControlRecursiveResponse(Model): - """SetAccessControlRecursiveResponse. - - :param directories_successful: - :type directories_successful: int - :param files_successful: - :type files_successful: int - :param failure_count: - :type failure_count: int - :param failed_entries: - :type failed_entries: - list[~azure.storage.filedatalake.models.AclFailedEntry] - """ - - _attribute_map = { - 'directories_successful': {'key': 'directoriesSuccessful', 'type': 'int'}, - 'files_successful': {'key': 'filesSuccessful', 'type': 'int'}, - 'failure_count': {'key': 'failureCount', 'type': 'int'}, - 'failed_entries': {'key': 'failedEntries', 'type': '[AclFailedEntry]'}, - } - - def __init__(self, *, directories_successful: int=None, files_successful: int=None, failure_count: int=None, failed_entries=None, **kwargs) -> None: - super(SetAccessControlRecursiveResponse, self).__init__(**kwargs) - self.directories_successful = directories_successful - self.files_successful = files_successful - self.failure_count = failure_count - self.failed_entries = failed_entries - - -class SourceModifiedAccessConditions(Model): - """Additional parameters for create operation. - - :param source_if_match: Specify an ETag value to operate only on blobs - with a matching value. - :type source_if_match: str - :param source_if_none_match: Specify an ETag value to operate only on - blobs without a matching value. - :type source_if_none_match: str - :param source_if_modified_since: Specify this header value to operate only - on a blob if it has been modified since the specified date/time. - :type source_if_modified_since: datetime - :param source_if_unmodified_since: Specify this header value to operate - only on a blob if it has not been modified since the specified date/time. - :type source_if_unmodified_since: datetime - """ - - _attribute_map = { - 'source_if_match': {'key': '', 'type': 'str'}, - 'source_if_none_match': {'key': '', 'type': 'str'}, - 'source_if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'source_if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, source_if_match: str=None, source_if_none_match: str=None, source_if_modified_since=None, source_if_unmodified_since=None, **kwargs) -> None: - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_match = source_if_match - self.source_if_none_match = source_if_none_match - self.source_if_modified_since = source_if_modified_since - self.source_if_unmodified_since = source_if_unmodified_since - - -class StorageError(Model): - """StorageError. - - :param error: The service error response object. - :type error: ~azure.storage.filedatalake.models.StorageErrorError - """ - - _attribute_map = { - 'error': {'key': 'error', 'type': 'StorageErrorError'}, - } - - def __init__(self, *, error=None, **kwargs) -> None: - super(StorageError, self).__init__(**kwargs) - self.error = error - - -class StorageErrorException(HttpResponseError): - """Server responsed with exception of type: 'StorageError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, response, deserialize, *args): - - model_name = 'StorageError' - self.error = deserialize(model_name, response) - if self.error is None: - self.error = deserialize.dependencies[model_name]() - super(StorageErrorException, self).__init__(response=response) - - -class StorageErrorError(Model): - """The service error response object. - - :param code: The service error code. - :type code: str - :param message: The service error message. - :type message: str - """ - - _attribute_map = { - 'code': {'key': 'Code', 'type': 'str'}, - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__(self, *, code: str=None, message: str=None, **kwargs) -> None: - super(StorageErrorError, self).__init__(**kwargs) - self.code = code - self.message = message diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/operations/__init__.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/operations/__init__.py deleted file mode 100644 index 9efa6df..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/operations/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._file_system_operations import FileSystemOperations -from ._path_operations import PathOperations - -__all__ = [ - 'ServiceOperations', - 'FileSystemOperations', - 'PathOperations', -] diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/operations/_file_system_operations.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/operations/_file_system_operations.py deleted file mode 100644 index b0d17ff..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/operations/_file_system_operations.py +++ /dev/null @@ -1,462 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class FileSystemOperations(object): - """FileSystemOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - - def create(self, properties=None, request_id=None, timeout=None, cls=None, **kwargs): - """Create FileSystem. - - Create a FileSystem rooted at the specified location. If the FileSystem - already exists, the operation fails. This operation does not support - conditional HTTP requests. - - :param properties: Optional. User-defined properties to be stored with - the filesystem, in the format of a comma-separated list of name and - value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded - string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties - not included in the list will be removed. All properties are removed - if the header is omitted. To merge new and existing properties, first - get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all - properties. - :type properties: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-namespace-enabled': self._deserialize('str', response.headers.get('x-ms-namespace-enabled')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{filesystem}'} - - def set_properties(self, properties=None, request_id=None, timeout=None, modified_access_conditions=None, cls=None, **kwargs): - """Set FileSystem Properties. - - Set properties for the FileSystem. This operation supports conditional - HTTP requests. For more information, see [Specifying Conditional - Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - - :param properties: Optional. User-defined properties to be stored with - the filesystem, in the format of a comma-separated list of name and - value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded - string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties - not included in the list will be removed. All properties are removed - if the header is omitted. To merge new and existing properties, first - get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all - properties. - :type properties: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - # Construct URL - url = self.set_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_properties.metadata = {'url': '/{filesystem}'} - - def get_properties(self, request_id=None, timeout=None, cls=None, **kwargs): - """Get FileSystem Properties. - - All system and user-defined filesystem properties are specified in the - response headers. - - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), - 'x-ms-namespace-enabled': self._deserialize('str', response.headers.get('x-ms-namespace-enabled')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{filesystem}'} - - def delete(self, request_id=None, timeout=None, modified_access_conditions=None, cls=None, **kwargs): - """Delete FileSystem. - - Marks the FileSystem for deletion. When a FileSystem is deleted, a - FileSystem with the same identifier cannot be created for at least 30 - seconds. While the filesystem is being deleted, attempts to create a - filesystem with the same identifier will fail with status code 409 - (Conflict), with the service returning additional error information - indicating that the filesystem is being deleted. All other operations, - including operations on any files or directories within the filesystem, - will fail with status code 404 (Not Found) while the filesystem is - being deleted. This operation supports conditional HTTP requests. For - more information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{filesystem}'} - - def list_paths(self, recursive, continuation=None, path=None, max_results=None, upn=None, request_id=None, timeout=None, cls=None, **kwargs): - """List Paths. - - List FileSystem paths and their properties. - - :param recursive: Required - :type recursive: bool - :param continuation: Optional. When deleting a directory, the number - of paths that are deleted with each invocation is limited. If the - number of paths to be deleted exceeds this limit, a continuation token - is returned in this response header. When a continuation token is - returned in the response, it must be specified in a subsequent - invocation of the delete operation to continue deleting the directory. - :type continuation: str - :param path: Optional. Filters results to paths within the specified - directory. An error occurs if the directory does not exist. - :type path: str - :param max_results: An optional value that specifies the maximum - number of items to return. If omitted or greater than 5,000, the - response will include up to 5,000 items. - :type max_results: int - :param upn: Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: PathList or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.PathList - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.list_paths.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - if path is not None: - query_parameters['directory'] = self._serialize.query("path", path, 'str') - query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') - if max_results is not None: - query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PathList', response) - header_dict = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_paths.metadata = {'url': '/{filesystem}'} diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/operations/_path_operations.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/operations/_path_operations.py deleted file mode 100644 index 58e7d7e..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/operations/_path_operations.py +++ /dev/null @@ -1,1599 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class PathOperations(object): - """PathOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - - def create(self, resource=None, continuation=None, mode=None, rename_source=None, source_lease_id=None, properties=None, permissions=None, umask=None, request_id=None, timeout=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, cls=None, **kwargs): - """Create File | Create Directory | Rename File | Rename Directory. - - Create or rename a file or directory. By default, the destination is - overwritten and if the destination already exists and has a lease the - lease is broken. This operation supports conditional HTTP requests. - For more information, see [Specifying Conditional Headers for Blob - Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - To fail if the destination already exists, use a conditional request - with If-None-Match: "*". - - :param resource: Required only for Create File and Create Directory. - The value must be "file" or "directory". Possible values include: - 'directory', 'file' - :type resource: str or - ~azure.storage.filedatalake.models.PathResourceType - :param continuation: Optional. When deleting a directory, the number - of paths that are deleted with each invocation is limited. If the - number of paths to be deleted exceeds this limit, a continuation token - is returned in this response header. When a continuation token is - returned in the response, it must be specified in a subsequent - invocation of the delete operation to continue deleting the directory. - :type continuation: str - :param mode: Optional. Valid only when namespace is enabled. This - parameter determines the behavior of the rename operation. The value - must be "legacy" or "posix", and the default value will be "posix". - Possible values include: 'legacy', 'posix' - :type mode: str or ~azure.storage.filedatalake.models.PathRenameMode - :param rename_source: An optional file or directory to be renamed. - The value must have the following format: "/{filesystem}/{path}". If - "x-ms-properties" is specified, the properties will overwrite the - existing properties; otherwise, the existing properties will be - preserved. This value must be a URL percent-encoded string. Note that - the string may only contain ASCII characters in the ISO-8859-1 - character set. - :type rename_source: str - :param source_lease_id: A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :type source_lease_id: str - :param properties: Optional. User-defined properties to be stored with - the filesystem, in the format of a comma-separated list of name and - value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded - string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties - not included in the list will be removed. All properties are removed - if the header is omitted. To merge new and existing properties, first - get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all - properties. - :type properties: str - :param permissions: Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :type permissions: str - :param umask: Optional and only valid if Hierarchical Namespace is - enabled for the account. When creating a file or directory and the - parent folder does not have a default ACL, the umask restricts the - permissions of the file or directory to be created. The resulting - permission is given by p bitwise and not u, where p is the permission - and u is the umask. For example, if p is 0777 and u is 0057, then the - resulting permission is 0720. The default permission is 0777 for a - directory and 0666 for a file. The default umask is 0027. The umask - must be specified in 4-digit octal notation (e.g. 0766). - :type umask: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param path_http_headers: Additional parameters for the operation - :type path_http_headers: - ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.filedatalake.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - cache_control = None - if path_http_headers is not None: - cache_control = path_http_headers.cache_control - content_encoding = None - if path_http_headers is not None: - content_encoding = path_http_headers.content_encoding - content_language = None - if path_http_headers is not None: - content_language = path_http_headers.content_language - content_disposition = None - if path_http_headers is not None: - content_disposition = path_http_headers.content_disposition - content_type = None - if path_http_headers is not None: - content_type = path_http_headers.content_type - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if resource is not None: - query_parameters['resource'] = self._serialize.query("resource", resource, 'PathResourceType') - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - if mode is not None: - query_parameters['mode'] = self._serialize.query("mode", mode, 'PathRenameMode') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if rename_source is not None: - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') - if umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("umask", umask, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') - if content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') - if content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') - if content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') - if content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{filesystem}/{path}'} - - def update(self, action, body, mode=None, max_records=None, continuation=None, position=None, retain_uncommitted_data=None, close=None, content_length=None, properties=None, owner=None, group=None, permissions=None, acl=None, request_id=None, timeout=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """Append Data | Flush Data | Set Properties | Set Access Control. - - Uploads data to be appended to a file, flushes (writes) previously - uploaded data to a file, sets properties for a file or directory, or - sets access control for a file or directory. Data can only be appended - to a file. This operation supports conditional HTTP requests. For more - information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - - :param action: The action must be "append" to upload data to be - appended to a file, "flush" to flush previously uploaded data to a - file, "setProperties" to set the properties of a file or directory, - "setAccessControl" to set the owner, group, permissions, or access - control list for a file or directory, or "setAccessControlRecursive" - to set the access control list for a directory recursively. Note that - Hierarchical Namespace must be enabled for the account in order to use - access control. Also note that the Access Control List (ACL) includes - permissions for the owner, owning group, and others, so the - x-ms-permissions and x-ms-acl request headers are mutually exclusive. - Possible values include: 'append', 'flush', 'setProperties', - 'setAccessControl', 'setAccessControlRecursive' - :type action: str or - ~azure.storage.filedatalake.models.PathUpdateAction - :param body: Initial data - :type body: Generator - :param mode: Optional. Valid and Required for - "SetAccessControlRecursive" operation. Mode "set" sets POSIX access - control rights on files and directories, "modify" modifies one or more - POSIX access control rights that pre-exist on files and directories, - "remove" removes one or more POSIX access control rights that were - present earlier on files and directories. Possible values include: - 'set', 'modify', 'remove' - :type mode: str or - ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode - :param max_records: Optional. Valid for "SetAccessControlRecursive" - operation. It specifies the maximum number of files or directories on - which the acl change will be applied. If omitted or greater than - 2,000, the request will process up to 2,000 items - :type max_records: int - :param continuation: Optional. The number of paths processed with each - invocation is limited. If the number of paths to be processed exceeds - this limit, a continuation token is returned in the response header - x-ms-continuation. When a continuation token is returned in the - response, it must be percent-encoded and specified in a subsequent - invocation of setAcessControlRecursive operation. - :type continuation: str - :param position: This parameter allows the caller to upload data in - parallel and control the order in which it is appended to the file. - It is required when uploading data to be appended to the file and when - flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not - immediately flushed, or written, to the file. To flush, the - previously uploaded data must be contiguous, the position parameter - must be specified and equal to the length of the file after all data - has been written, and there must not be a request entity body included - with the request. - :type position: long - :param retain_uncommitted_data: Valid only for flush operations. If - "true", uncommitted data is retained after the flush operation - completes; otherwise, the uncommitted data is deleted after the flush - operation. The default is false. Data at offsets less than the - specified position are written to the file when flush succeeds, but - this optional parameter allows data after the flush position to be - retained for a future flush operation. - :type retain_uncommitted_data: bool - :param close: Azure Storage Events allow applications to receive - notifications when files change. When Azure Storage Events are - enabled, a file changed event is raised. This event has a property - indicating whether this is the final change to distinguish the - difference between an intermediate flush to a file stream and the - final close of a file stream. The close query parameter is valid only - when the action is "flush" and change notifications are enabled. If - the value of close is "true" and the flush operation completes - successfully, the service raises a file change notification with a - property indicating that this is the final update (the file stream has - been closed). If "false" a change notification is raised indicating - the file has changed. The default is false. This query parameter is - set to true by the Hadoop ABFS driver to indicate that the file stream - has been closed." - :type close: bool - :param content_length: Required for "Append Data" and "Flush Data". - Must be 0 for "Flush Data". Must be the length of the request content - in bytes for "Append Data". - :type content_length: long - :param properties: Optional. User-defined properties to be stored with - the filesystem, in the format of a comma-separated list of name and - value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded - string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties - not included in the list will be removed. All properties are removed - if the header is omitted. To merge new and existing properties, first - get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all - properties. - :type properties: str - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param permissions: Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :type permissions: str - :param acl: Sets POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param path_http_headers: Additional parameters for the operation - :type path_http_headers: - ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: SetAccessControlRecursiveResponse or the result of - cls(response) - :rtype: - ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - content_md5 = None - if path_http_headers is not None: - content_md5 = path_http_headers.content_md5 - cache_control = None - if path_http_headers is not None: - cache_control = path_http_headers.cache_control - content_type = None - if path_http_headers is not None: - content_type = path_http_headers.content_type - content_disposition = None - if path_http_headers is not None: - content_disposition = path_http_headers.content_disposition - content_encoding = None - if path_http_headers is not None: - content_encoding = path_http_headers.content_encoding - content_language = None - if path_http_headers is not None: - content_language = path_http_headers.content_language - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - # Construct URL - url = self.update.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['action'] = self._serialize.query("action", action, 'PathUpdateAction') - if mode is not None: - query_parameters['mode'] = self._serialize.query("mode", mode, 'PathSetAccessControlRecursiveMode') - if max_records is not None: - query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - if position is not None: - query_parameters['position'] = self._serialize.query("position", position, 'long') - if retain_uncommitted_data is not None: - query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') - if close is not None: - query_parameters['close'] = self._serialize.query("close", close, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/octet-stream' - if content_length is not None: - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') - if acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", content_md5, 'bytearray') - if cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') - if content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') - if content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') - if content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') - if content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct body - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('SetAccessControlRecursiveResponse', response) - header_dict = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), - 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - update.metadata = {'url': '/{filesystem}/{path}'} - - def lease(self, x_ms_lease_action, x_ms_lease_duration=None, x_ms_lease_break_period=None, proposed_lease_id=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """Lease Path. - - Create and manage a lease to restrict write and delete access to the - path. This operation supports conditional HTTP requests. For more - information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - - :param x_ms_lease_action: There are five lease actions: "acquire", - "break", "change", "renew", and "release". Use "acquire" and specify - the "x-ms-proposed-lease-id" and "x-ms-lease-duration" to acquire a - new lease. Use "break" to break an existing lease. When a lease is - broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the - file. When a lease is successfully broken, the response indicates the - interval in seconds until a new lease can be acquired. Use "change" - and specify the current lease ID in "x-ms-lease-id" and the new lease - ID in "x-ms-proposed-lease-id" to change the lease ID of an active - lease. Use "renew" and specify the "x-ms-lease-id" to renew an - existing lease. Use "release" and specify the "x-ms-lease-id" to - release a lease. Possible values include: 'acquire', 'break', - 'change', 'renew', 'release' - :type x_ms_lease_action: str or - ~azure.storage.filedatalake.models.PathLeaseAction - :param x_ms_lease_duration: The lease duration is required to acquire - a lease, and specifies the duration of the lease in seconds. The - lease duration must be between 15 and 60 seconds or -1 for infinite - lease. - :type x_ms_lease_duration: int - :param x_ms_lease_break_period: The lease break period duration is - optional to break a lease, and specifies the break period of the - lease in seconds. The lease break duration must be between 0 and 60 - seconds. - :type x_ms_lease_break_period: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The Blob service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - # Construct URL - url = self.lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-action'] = self._serialize.header("x_ms_lease_action", x_ms_lease_action, 'PathLeaseAction') - if x_ms_lease_duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("x_ms_lease_duration", x_ms_lease_duration, 'int') - if x_ms_lease_break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("x_ms_lease_break_period", x_ms_lease_break_period, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 201, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-lease-time': self._deserialize('str', response.headers.get('x-ms-lease-time')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - lease.metadata = {'url': '/{filesystem}/{path}'} - - def read(self, range=None, x_ms_range_get_content_md5=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """Read File. - - Read the contents of a file. For read operations, range requests are - supported. This operation supports conditional HTTP requests. For more - information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - - :param range: The HTTP Range request header specifies one or more byte - ranges of the resource to be retrieved. - :type range: str - :param x_ms_range_get_content_md5: Optional. When this header is set - to "true" and specified together with the Range header, the service - returns the MD5 hash for the range, as long as the range is less than - or equal to 4MB in size. If this header is specified without the Range - header, the service returns status code 400 (Bad Request). If this - header is set to true when the range exceeds 4 MB in size, the service - returns status code 400 (Bad Request). - :type x_ms_range_get_content_md5: bool - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: object or the result of cls(response) - :rtype: Generator - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - # Construct URL - url = self.read.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if range is not None: - header_parameters['Range'] = self._serialize.header("range", range, 'str') - if x_ms_range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("x_ms_range_get_content_md5", x_ms_range_get_content_md5, 'bool') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')), - 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), - 'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')), - 'x-ms-content-md5': self._deserialize('str', response.headers.get('x-ms-content-md5')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - if response.status_code == 206: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')), - 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), - 'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')), - 'x-ms-content-md5': self._deserialize('str', response.headers.get('x-ms-content-md5')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - read.metadata = {'url': '/{filesystem}/{path}'} - - def get_properties(self, action=None, upn=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """Get Properties | Get Status | Get Access Control List. - - Get Properties returns all system and user defined properties for a - path. Get Status returns all system defined properties for a path. Get - Access Control List returns the access control list for a path. This - operation supports conditional HTTP requests. For more information, - see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - - :param action: Optional. If the value is "getStatus" only the system - defined properties for the path are returned. If the value is - "getAccessControl" the access control list is returned in the response - headers (Hierarchical Namespace must be enabled for the account), - otherwise the properties are returned. Possible values include: - 'getAccessControl', 'getStatus' - :type action: str or - ~azure.storage.filedatalake.models.PathGetPropertiesAction - :param upn: Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if action is not None: - query_parameters['action'] = self._serialize.query("action", action, 'PathGetPropertiesAction') - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')), - 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), - 'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')), - 'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')), - 'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')), - 'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')), - 'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{filesystem}/{path}'} - - def delete(self, recursive=None, continuation=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """Delete File | Delete Directory. - - Delete the file or directory. This operation supports conditional HTTP - requests. For more information, see [Specifying Conditional Headers - for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - - :param recursive: Required - :type recursive: bool - :param continuation: Optional. When deleting a directory, the number - of paths that are deleted with each invocation is limited. If the - number of paths to be deleted exceeds this limit, a continuation token - is returned in this response header. When a continuation token is - returned in the response, it must be specified in a subsequent - invocation of the delete operation to continue deleting the directory. - :type continuation: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if recursive is not None: - query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{filesystem}/{path}'} - - def set_access_control(self, timeout=None, owner=None, group=None, permissions=None, acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """Set the owner, group, permissions, or access control list for a path. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param permissions: Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :type permissions: str - :param acl: Sets POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "setAccessControl" - - # Construct URL - url = self.set_access_control.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') - if acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - } - return cls(response, None, response_headers) - set_access_control.metadata = {'url': '/{filesystem}/{path}'} - - def set_access_control_recursive(self, mode, timeout=None, continuation=None, max_records=None, acl=None, request_id=None, cls=None, **kwargs): - """Set the access control list for a path and subpaths. - - :param mode: Mode "set" sets POSIX access control rights on files and - directories, "modify" modifies one or more POSIX access control rights - that pre-exist on files and directories, "remove" removes one or more - POSIX access control rights that were present earlier on files and - directories. Possible values include: 'set', 'modify', 'remove' - :type mode: str or - ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param continuation: Optional. When deleting a directory, the number - of paths that are deleted with each invocation is limited. If the - number of paths to be deleted exceeds this limit, a continuation token - is returned in this response header. When a continuation token is - returned in the response, it must be specified in a subsequent - invocation of the delete operation to continue deleting the directory. - :type continuation: str - :param max_records: Optional. It specifies the maximum number of files - or directories on which the acl change will be applied. If omitted or - greater than 2,000, the request will process up to 2,000 items - :type max_records: int - :param acl: Sets POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: SetAccessControlRecursiveResponse or the result of - cls(response) - :rtype: - ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - action = "setAccessControlRecursive" - - # Construct URL - url = self.set_access_control_recursive.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - query_parameters['mode'] = self._serialize.query("mode", mode, 'PathSetAccessControlRecursiveMode') - if max_records is not None: - query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('SetAccessControlRecursiveResponse', response) - header_dict = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - set_access_control_recursive.metadata = {'url': '/{filesystem}/{path}'} - - def flush_data(self, timeout=None, position=None, retain_uncommitted_data=None, close=None, content_length=None, request_id=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): - """Set the owner, group, permissions, or access control list for a path. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param position: This parameter allows the caller to upload data in - parallel and control the order in which it is appended to the file. - It is required when uploading data to be appended to the file and when - flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not - immediately flushed, or written, to the file. To flush, the - previously uploaded data must be contiguous, the position parameter - must be specified and equal to the length of the file after all data - has been written, and there must not be a request entity body included - with the request. - :type position: long - :param retain_uncommitted_data: Valid only for flush operations. If - "true", uncommitted data is retained after the flush operation - completes; otherwise, the uncommitted data is deleted after the flush - operation. The default is false. Data at offsets less than the - specified position are written to the file when flush succeeds, but - this optional parameter allows data after the flush position to be - retained for a future flush operation. - :type retain_uncommitted_data: bool - :param close: Azure Storage Events allow applications to receive - notifications when files change. When Azure Storage Events are - enabled, a file changed event is raised. This event has a property - indicating whether this is the final change to distinguish the - difference between an intermediate flush to a file stream and the - final close of a file stream. The close query parameter is valid only - when the action is "flush" and change notifications are enabled. If - the value of close is "true" and the flush operation completes - successfully, the service raises a file change notification with a - property indicating that this is the final update (the file stream has - been closed). If "false" a change notification is raised indicating - the file has changed. The default is false. This query parameter is - set to true by the Hadoop ABFS driver to indicate that the file stream - has been closed." - :type close: bool - :param content_length: Required for "Append Data" and "Flush Data". - Must be 0 for "Flush Data". Must be the length of the request content - in bytes for "Append Data". - :type content_length: long - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param path_http_headers: Additional parameters for the operation - :type path_http_headers: - ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - content_md5 = None - if path_http_headers is not None: - content_md5 = path_http_headers.content_md5 - cache_control = None - if path_http_headers is not None: - cache_control = path_http_headers.cache_control - content_type = None - if path_http_headers is not None: - content_type = path_http_headers.content_type - content_disposition = None - if path_http_headers is not None: - content_disposition = path_http_headers.content_disposition - content_encoding = None - if path_http_headers is not None: - content_encoding = path_http_headers.content_encoding - content_language = None - if path_http_headers is not None: - content_language = path_http_headers.content_language - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "flush" - - # Construct URL - url = self.flush_data.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if position is not None: - query_parameters['position'] = self._serialize.query("position", position, 'long') - if retain_uncommitted_data is not None: - query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') - if close is not None: - query_parameters['close'] = self._serialize.query("close", close, 'bool') - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if content_length is not None: - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", content_md5, 'bytearray') - if cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') - if content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') - if content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') - if content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') - if content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - } - return cls(response, None, response_headers) - flush_data.metadata = {'url': '/{filesystem}/{path}'} - - def append_data(self, body, position=None, timeout=None, content_length=None, request_id=None, path_http_headers=None, lease_access_conditions=None, cls=None, **kwargs): - """Append data to the file. - - :param body: Initial data - :type body: Generator - :param position: This parameter allows the caller to upload data in - parallel and control the order in which it is appended to the file. - It is required when uploading data to be appended to the file and when - flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not - immediately flushed, or written, to the file. To flush, the - previously uploaded data must be contiguous, the position parameter - must be specified and equal to the length of the file after all data - has been written, and there must not be a request entity body included - with the request. - :type position: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param content_length: Required for "Append Data" and "Flush Data". - Must be 0 for "Flush Data". Must be the length of the request content - in bytes for "Append Data". - :type content_length: long - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param path_http_headers: Additional parameters for the operation - :type path_http_headers: - ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.filedatalake.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - transactional_content_hash = None - if path_http_headers is not None: - transactional_content_hash = path_http_headers.transactional_content_hash - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - action = "append" - - # Construct URL - url = self.append_data.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if position is not None: - query_parameters['position'] = self._serialize.query("position", position, 'long') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if content_length is not None: - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if transactional_content_hash is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_hash", transactional_content_hash, 'bytearray') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct body - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - } - return cls(response, None, response_headers) - append_data.metadata = {'url': '/{filesystem}/{path}'} diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/operations/_service_operations.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/operations/_service_operations.py deleted file mode 100644 index 540079a..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/operations/_service_operations.py +++ /dev/null @@ -1,128 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class ServiceOperations(object): - """ServiceOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar resource: The value must be "account" for all account operations. Constant value: "account". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.resource = "account" - - def list_file_systems(self, prefix=None, continuation=None, max_results=None, request_id=None, timeout=None, cls=None, **kwargs): - """List FileSystems. - - List filesystems and their properties in given account. - - :param prefix: Filters results to filesystems within the specified - prefix. - :type prefix: str - :param continuation: Optional. When deleting a directory, the number - of paths that are deleted with each invocation is limited. If the - number of paths to be deleted exceeds this limit, a continuation token - is returned in this response header. When a continuation token is - returned in the response, it must be specified in a subsequent - invocation of the delete operation to continue deleting the directory. - :type continuation: str - :param max_results: An optional value that specifies the maximum - number of items to return. If omitted or greater than 5,000, the - response will include up to 5,000 items. - :type max_results: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: FileSystemList or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.FileSystemList - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.list_file_systems.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['resource'] = self._serialize.query("self.resource", self.resource, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - if max_results is not None: - query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('FileSystemList', response) - header_dict = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_file_systems.metadata = {'url': '/'} diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/version.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/version.py deleted file mode 100644 index be04589..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/version.py +++ /dev/null @@ -1,13 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -VERSION = "2019-12-12" - diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_models.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_models.py deleted file mode 100644 index b208508..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_models.py +++ /dev/null @@ -1,648 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines -from enum import Enum - -from azure.core.paging import PageIterator -from azure.multiapi.storagev2.blob.v2019_12_12 import LeaseProperties as BlobLeaseProperties -from azure.multiapi.storagev2.blob.v2019_12_12 import AccountSasPermissions as BlobAccountSasPermissions -from azure.multiapi.storagev2.blob.v2019_12_12 import ResourceTypes as BlobResourceTypes -from azure.multiapi.storagev2.blob.v2019_12_12 import UserDelegationKey as BlobUserDelegationKey -from azure.multiapi.storagev2.blob.v2019_12_12 import ContentSettings as BlobContentSettings -from azure.multiapi.storagev2.blob.v2019_12_12 import ContainerSasPermissions, BlobSasPermissions -from azure.multiapi.storagev2.blob.v2019_12_12 import AccessPolicy as BlobAccessPolicy -from azure.multiapi.storagev2.blob.v2019_12_12 import DelimitedTextDialect as BlobDelimitedTextDialect -from azure.multiapi.storagev2.blob.v2019_12_12 import DelimitedJsonDialect as BlobDelimitedJSON -from azure.multiapi.storagev2.blob.v2019_12_12._generated.models import StorageErrorException -from azure.multiapi.storagev2.blob.v2019_12_12._models import ContainerPropertiesPaged -from ._deserialize import return_headers_and_deserialized_path_list -from ._generated.models import Path -from ._shared.models import DictMixin -from ._shared.response_handlers import process_storage_error - - -class FileSystemProperties(object): - """File System properties class. - - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the file system was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar ~azure.storage.filedatalake.LeaseProperties lease: - Stores all the lease information for the file system. - :ivar str public_access: Specifies whether data in the file system may be accessed - publicly and the level of access. - :ivar bool has_immutability_policy: - Represents whether the file system has an immutability policy. - :ivar bool has_legal_hold: - Represents whether the file system has a legal hold. - :ivar dict metadata: A dict with name-value pairs to associate with the - file system as metadata. - - Returned ``FileSystemProperties`` instances expose these values through a - dictionary interface, for example: ``file_system_props["last_modified"]``. - Additionally, the file system name is available as ``file_system_props["name"]``. - """ - def __init__(self): - self.name = None - self.last_modified = None - self.etag = None - self.lease = None - self.public_access = None - self.has_immutability_policy = None - self.has_legal_hold = None - self.metadata = None - - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.last_modified = generated.properties.last_modified - props.etag = generated.properties.etag - props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access - props.public_access = PublicAccess._from_generated( # pylint: disable=protected-access - generated.properties.public_access) - props.has_immutability_policy = generated.properties.has_immutability_policy - props.has_legal_hold = generated.properties.has_legal_hold - props.metadata = generated.metadata - return props - - @classmethod - def _convert_from_container_props(cls, container_properties): - container_properties.__class__ = cls - container_properties.public_access = PublicAccess._from_generated( # pylint: disable=protected-access - container_properties.public_access) - container_properties.lease.__class__ = LeaseProperties - return container_properties - - -class FileSystemPropertiesPaged(ContainerPropertiesPaged): - """An Iterable of File System properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file system name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.filedatalake.FileSystemProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only file systems whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of file system names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - - def __init__(self, *args, **kwargs): - super(FileSystemPropertiesPaged, self).__init__( - *args, - **kwargs - ) - - @staticmethod - def _build_item(item): - return FileSystemProperties._from_generated(item) # pylint: disable=protected-access - - -class DirectoryProperties(DictMixin): - """ - :ivar str name: name of the directory - :ivar str etag: The ETag contains a value that you can use to perform operations - conditionally. - :ivar bool deleted: if the current directory marked as deleted - :ivar dict metadata: Name-value pairs associated with the directory as metadata. - :ivar ~azure.storage.filedatalake.LeaseProperties lease: - Stores all the lease information for the directory. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the directory was modified. - :ivar ~datetime.datetime creation_time: - Indicates when the directory was created, in UTC. - :ivar int remaining_retention_days: The number of days that the directory will be retained - before being permanently deleted by the service. - :var ~azure.storage.filedatalake.ContentSettings content_settings: - """ - def __init__(self, **kwargs): - super(DirectoryProperties, self).__init__( - **kwargs - ) - self.name = None - self.etag = None - self.deleted = None - self.metadata = None - self.lease = None - self.last_modified = None - self.creation_time = None - self.deleted_time = None - self.remaining_retention_days = None - - @classmethod - def _from_blob_properties(cls, blob_properties): - directory_props = DirectoryProperties() - directory_props.name = blob_properties.name - directory_props.etag = blob_properties.etag - directory_props.deleted = blob_properties.deleted - directory_props.metadata = blob_properties.metadata - directory_props.lease = blob_properties.lease - directory_props.lease.__class__ = LeaseProperties - directory_props.last_modified = blob_properties.last_modified - directory_props.creation_time = blob_properties.creation_time - directory_props.deleted_time = blob_properties.deleted_time - directory_props.remaining_retention_days = blob_properties.remaining_retention_days - return directory_props - - -class FileProperties(DictMixin): - """ - :ivar str name: name of the file - :ivar str etag: The ETag contains a value that you can use to perform operations - conditionally. - :ivar bool deleted: if the current file marked as deleted - :ivar dict metadata: Name-value pairs associated with the file as metadata. - :ivar ~azure.storage.filedatalake.LeaseProperties lease: - Stores all the lease information for the file. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the file was modified. - :ivar ~datetime.datetime creation_time: - Indicates when the file was created, in UTC. - :ivar int size: size of the file - :ivar int remaining_retention_days: The number of days that the file will be retained - before being permanently deleted by the service. - :var ~azure.storage.filedatalake.ContentSettings content_settings: - """ - def __init__(self, **kwargs): - super(FileProperties, self).__init__( - **kwargs - ) - self.name = None - self.etag = None - self.deleted = None - self.metadata = None - self.lease = None - self.last_modified = None - self.creation_time = None - self.size = None - self.deleted_time = None - self.remaining_retention_days = None - self.content_settings = None - - @classmethod - def _from_blob_properties(cls, blob_properties): - file_props = FileProperties() - file_props.name = blob_properties.name - file_props.etag = blob_properties.etag - file_props.deleted = blob_properties.deleted - file_props.metadata = blob_properties.metadata - file_props.lease = blob_properties.lease - file_props.lease.__class__ = LeaseProperties - file_props.last_modified = blob_properties.last_modified - file_props.creation_time = blob_properties.creation_time - file_props.size = blob_properties.size - file_props.deleted_time = blob_properties.deleted_time - file_props.remaining_retention_days = blob_properties.remaining_retention_days - file_props.content_settings = blob_properties.content_settings - return file_props - - -class PathProperties(object): - """Path properties listed by get_paths api. - - :ivar str name: the full path for a file or directory. - :ivar str owner: The owner of the file or directory. - :ivar str group: he owning group of the file or directory. - :ivar str permissions: Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :ivar datetime last_modified: A datetime object representing the last time the directory/file was modified. - :ivar bool is_directory: is the path a directory or not. - :ivar str etag: The ETag contains a value that you can use to perform operations - conditionally. - :ivar content_length: the size of file if the path is a file. - """ - def __init__(self, **kwargs): - super(PathProperties, self).__init__( - **kwargs - ) - self.name = kwargs.pop('name', None) - self.owner = kwargs.get('owner', None) - self.group = kwargs.get('group', None) - self.permissions = kwargs.get('permissions', None) - self.last_modified = kwargs.get('last_modified', None) - self.is_directory = kwargs.get('is_directory', False) - self.etag = kwargs.get('etag', None) - self.content_length = kwargs.get('content_length', None) - - @classmethod - def _from_generated(cls, generated): - path_prop = PathProperties() - path_prop.name = generated.name - path_prop.owner = generated.owner - path_prop.group = generated.group - path_prop.permissions = generated.permissions - path_prop.last_modified = generated.last_modified - path_prop.is_directory = bool(generated.is_directory) - path_prop.etag = generated.additional_properties.get('etag') - path_prop.content_length = generated.content_length - return path_prop - - -class PathPropertiesPaged(PageIterator): - """An Iterable of Path properties. - - :ivar str path: Filters the results to return only paths under the specified path. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar list(~azure.storage.filedatalake.PathProperties) current_page: The current page of listed results. - - :param callable command: Function to retrieve the next page of items. - :param str path: Filters the results to return only paths under the specified path. - :param int max_results: The maximum number of psths to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__( - self, command, - recursive, - path=None, - max_results=None, - continuation_token=None, - upn=None): - super(PathPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.recursive = recursive - self.results_per_page = max_results - self.path = path - self.upn = upn - self.current_page = None - self.path_list = None - - def _get_next_cb(self, continuation_token): - try: - return self._command( - self.recursive, - continuation=continuation_token or None, - path=self.path, - max_results=self.results_per_page, - upn=self.upn, - cls=return_headers_and_deserialized_path_list) - except StorageErrorException as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.path_list, self._response = get_next_return - self.current_page = [self._build_item(item) for item in self.path_list] - - return self._response['continuation'] or None, self.current_page - - @staticmethod - def _build_item(item): - if isinstance(item, PathProperties): - return item - if isinstance(item, Path): - path = PathProperties._from_generated(item) # pylint: disable=protected-access - return path - return item - - -class LeaseProperties(BlobLeaseProperties): - """DataLake Lease Properties. - - :ivar str status: - The lease status of the file. Possible values: locked|unlocked - :ivar str state: - Lease state of the file. Possible values: available|leased|expired|breaking|broken - :ivar str duration: - When a file is leased, specifies whether the lease is of infinite or fixed duration. - """ - def __init__(self): - self.status = None - self.state = None - self.duration = None - - -class ContentSettings(BlobContentSettings): - """The content settings of a file or directory. - - :ivar str content_type: - The content type specified for the file or directory. If no content type was - specified, the default content type is application/octet-stream. - :ivar str content_encoding: - If the content_encoding has previously been set - for the file, that value is stored. - :ivar str content_language: - If the content_language has previously been set - for the file, that value is stored. - :ivar str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the file, that value is stored. - :ivar str cache_control: - If the cache_control has previously been set for - the file, that value is stored. - :ivar str content_md5: - If the content_md5 has been set for the file, this response - header is stored so that the client can check for message content - integrity. - :keyword str content_type: - The content type specified for the file or directory. If no content type was - specified, the default content type is application/octet-stream. - :keyword str content_encoding: - If the content_encoding has previously been set - for the file, that value is stored. - :keyword str content_language: - If the content_language has previously been set - for the file, that value is stored. - :keyword str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the file, that value is stored. - :keyword str cache_control: - If the cache_control has previously been set for - the file, that value is stored. - :keyword str content_md5: - If the content_md5 has been set for the file, this response - header is stored so that the client can check for message content - integrity. - """ - def __init__( - self, **kwargs): - super(ContentSettings, self).__init__( - **kwargs - ) - - -class AccountSasPermissions(BlobAccountSasPermissions): - def __init__(self, read=False, write=False, delete=False, list=False, # pylint: disable=redefined-builtin - create=False): - super(AccountSasPermissions, self).__init__( - read=read, create=create, write=write, list=list, - delete=delete - ) - - -class FileSystemSasPermissions(ContainerSasPermissions): - """FileSystemSasPermissions class to be used with the - :func:`~azure.storage.filedatalake.generate_file_system_sas` function. - - :param bool read: - Read the content, properties, metadata etc. - :param bool write: - Create or write content, properties, metadata. Lease the file system. - :param bool delete: - Delete the file system. - :param bool list: - List paths in the file system. - """ - def __init__(self, read=False, write=False, delete=False, list=False # pylint: disable=redefined-builtin - ): - super(FileSystemSasPermissions, self).__init__( - read=read, write=write, delete=delete, list=list - ) - - -class DirectorySasPermissions(BlobSasPermissions): - """DirectorySasPermissions class to be used with the - :func:`~azure.storage.filedatalake.generate_directory_sas` function. - - :param bool read: - Read the content, properties, metadata etc. - :param bool create: - Create a new directory - :param bool write: - Create or write content, properties, metadata. Lease the directory. - :param bool delete: - Delete the directory. - """ - def __init__(self, read=False, create=False, write=False, - delete=False): - super(DirectorySasPermissions, self).__init__( - read=read, create=create, write=write, - delete=delete - ) - - -class FileSasPermissions(BlobSasPermissions): - """FileSasPermissions class to be used with the - :func:`~azure.storage.filedatalake.generate_file_sas` function. - - :param bool read: - Read the content, properties, metadata etc. Use the file as - the source of a read operation. - :param bool create: - Write a new file - :param bool write: - Create or write content, properties, metadata. Lease the file. - :param bool delete: - Delete the file. - """ - def __init__(self, read=False, create=False, write=False, - delete=False): - super(FileSasPermissions, self).__init__( - read=read, create=create, write=write, - delete=delete - ) - - -class AccessPolicy(BlobAccessPolicy): - """Access Policy class used by the set and get access policy methods in each service. - - A stored access policy can specify the start time, expiry time, and - permissions for the Shared Access Signatures with which it's associated. - Depending on how you want to control access to your resource, you can - specify all of these parameters within the stored access policy, and omit - them from the URL for the Shared Access Signature. Doing so permits you to - modify the associated signature's behavior at any time, as well as to revoke - it. Or you can specify one or more of the access policy parameters within - the stored access policy, and the others on the URL. Finally, you can - specify all of the parameters on the URL. In this case, you can use the - stored access policy to revoke the signature, but not to modify its behavior. - - Together the Shared Access Signature and the stored access policy must - include all fields required to authenticate the signature. If any required - fields are missing, the request will fail. Likewise, if a field is specified - both in the Shared Access Signature URL and in the stored access policy, the - request will fail with status code 400 (Bad Request). - - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.datalake.FileSystemSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :keyword start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :paramtype start: ~datetime.datetime or str - """ - def __init__(self, permission=None, expiry=None, **kwargs): - super(AccessPolicy, self).__init__( - permission=permission, expiry=expiry, start=kwargs.pop('start', None) - ) - - -class ResourceTypes(BlobResourceTypes): - """ - Specifies the resource types that are accessible with the account SAS. - - :param bool service: - Access to service-level APIs (e.g.List File Systems) - :param bool file_system: - Access to file_system-level APIs (e.g., Create/Delete file system, - List Directories/Files) - :param bool object: - Access to object-level APIs for - files(e.g. Create File, etc.) - """ - def __init__(self, service=False, file_system=False, object=False # pylint: disable=redefined-builtin - ): - super(ResourceTypes, self).__init__(service=service, container=file_system, object=object) - - -class UserDelegationKey(BlobUserDelegationKey): - """ - Represents a user delegation key, provided to the user by Azure Storage - based on their Azure Active Directory access token. - - The fields are saved as simple strings since the user does not have to interact with this object; - to generate an identify SAS, the user can simply pass it to the right API. - - :ivar str signed_oid: - Object ID of this token. - :ivar str signed_tid: - Tenant ID of the tenant that issued this token. - :ivar str signed_start: - The datetime this token becomes valid. - :ivar str signed_expiry: - The datetime this token expires. - :ivar str signed_service: - What service this key is valid for. - :ivar str signed_version: - The version identifier of the REST service that created this token. - :ivar str value: - The user delegation key. - """ - @classmethod - def _from_generated(cls, generated): - delegation_key = cls() - delegation_key.signed_oid = generated.signed_oid - delegation_key.signed_tid = generated.signed_tid - delegation_key.signed_start = generated.signed_start - delegation_key.signed_expiry = generated.signed_expiry - delegation_key.signed_service = generated.signed_service - delegation_key.signed_version = generated.signed_version - delegation_key.value = generated.value - return delegation_key - - -class PublicAccess(str, Enum): - """ - Specifies whether data in the file system may be accessed publicly and the level of access. - """ - - File = 'blob' - """ - Specifies public read access for files. file data within this file system can be read - via anonymous request, but file system data is not available. Clients cannot enumerate - files within the container via anonymous request. - """ - - FileSystem = 'container' - """ - Specifies full public read access for file system and file data. Clients can enumerate - files within the file system via anonymous request, but cannot enumerate file systems - within the storage account. - """ - - @classmethod - def _from_generated(cls, public_access): - if public_access == "blob": # pylint:disable=no-else-return - return cls.File - elif public_access == "container": - return cls.FileSystem - - return None - - -class LocationMode(object): - """ - Specifies the location the request should be sent to. This mode only applies - for RA-GRS accounts which allow secondary read access. All other account types - must use PRIMARY. - """ - - PRIMARY = 'primary' #: Requests should be sent to the primary location. - SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. - - -class DelimitedJsonDialect(BlobDelimitedJSON): - """Defines the input or output JSON serialization for a datalake query. - - :keyword str delimiter: The line separator character, default value is '\n' - """ - - -class DelimitedTextDialect(BlobDelimitedTextDialect): - """Defines the input or output delimited (CSV) serialization for a datalake query request. - - :keyword str delimiter: - Column separator, defaults to ','. - :keyword str quotechar: - Field quote, defaults to '"'. - :keyword str lineterminator: - Record separator, defaults to '\n'. - :keyword str escapechar: - Escape char, defaults to empty. - :keyword bool has_header: - Whether the blob data includes headers in the first line. The default value is False, meaning that the - data will be returned inclusive of the first line. If set to True, the data will be returned exclusive - of the first line. - """ - - -class DataLakeFileQueryError(object): - """The error happened during quick query operation. - - :ivar str error: - The name of the error. - :ivar bool is_fatal: - If true, this error prevents further query processing. More result data may be returned, - but there is no guarantee that all of the original data will be processed. - If false, this error does not prevent further query processing. - :ivar str description: - A description of the error. - :ivar int position: - The blob offset at which the error occurred. - """ - def __init__(self, error=None, is_fatal=False, description=None, position=None): - self.error = error - self.is_fatal = is_fatal - self.description = description - self.position = position diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_path_client.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_path_client.py deleted file mode 100644 index d57c903..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_path_client.py +++ /dev/null @@ -1,649 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six - -from azure.multiapi.storagev2.blob.v2019_12_12 import BlobClient -from ._shared.base_client import StorageAccountHostsMixin, parse_query -from ._shared.response_handlers import return_response_headers -from ._serialize import convert_dfs_url_to_blob_url, get_mod_conditions, \ - get_path_http_headers, add_metadata_headers, get_lease_id, get_source_mod_conditions, get_access_conditions -from ._models import LocationMode, DirectoryProperties -from ._generated import DataLakeStorageClient -from ._data_lake_lease import DataLakeLeaseClient -from ._generated.models import StorageErrorException -from ._deserialize import process_storage_error - -_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = ( - 'The require_encryption flag is set, but encryption is not supported' - ' for this method.') - - -class PathClient(StorageAccountHostsMixin): - def __init__( - self, account_url, # type: str - file_system_name, # type: str - path_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - - # remove the preceding/trailing delimiter from the path components - file_system_name = file_system_name.strip('/') - - # the name of root directory is / - if path_name != '/': - path_name = path_name.strip('/') - - if not (file_system_name and path_name): - raise ValueError("Please specify a file system name and file path.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - blob_account_url = convert_dfs_url_to_blob_url(account_url) - self._blob_account_url = blob_account_url - - datalake_hosts = kwargs.pop('_hosts', None) - blob_hosts = None - if datalake_hosts: - blob_primary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.PRIMARY]) - blob_hosts = {LocationMode.PRIMARY: blob_primary_account_url, LocationMode.SECONDARY: ""} - self._blob_client = BlobClient(blob_account_url, file_system_name, path_name, - credential=credential, _hosts=blob_hosts, **kwargs) - - _, sas_token = parse_query(parsed_url.query) - self.file_system_name = file_system_name - self.path_name = path_name - - self._query_str, self._raw_credential = self._format_query_string(sas_token, credential) - - super(PathClient, self).__init__(parsed_url, service='dfs', credential=self._raw_credential, - _hosts=datalake_hosts, **kwargs) - # ADLS doesn't support secondary endpoint, make sure it's empty - self._hosts[LocationMode.SECONDARY] = "" - self._client = DataLakeStorageClient(self.url, file_system_name, path_name, pipeline=self._pipeline) - - def __exit__(self, *args): - self._blob_client.close() - super(PathClient, self).__exit__(*args) - - def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._blob_client.close() - self.__exit__() - - def _format_url(self, hostname): - file_system_name = self.file_system_name - if isinstance(file_system_name, six.text_type): - file_system_name = file_system_name.encode('UTF-8') - return "{}://{}/{}/{}{}".format( - self.scheme, - hostname, - quote(file_system_name), - quote(self.path_name, safe='~'), - self._query_str) - - def _create_path_options(self, resource_type, content_settings=None, metadata=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_mod_conditions(kwargs) - - path_http_headers = None - if content_settings: - path_http_headers = get_path_http_headers(content_settings) - - options = { - 'resource': resource_type, - 'properties': add_metadata_headers(metadata), - 'permissions': kwargs.pop('permissions', None), - 'umask': kwargs.pop('umask', None), - 'path_http_headers': path_http_headers, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - def _create(self, resource_type, content_settings=None, metadata=None, **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create directory or file - - :param resource_type: - Required for Create File and Create Directory. - The value must be "file" or "directory". Possible values include: - 'directory', 'file' - :type resource_type: str - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file/directory as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :type permissions: str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Dict[str, Union[str, datetime]] - """ - options = self._create_path_options( - resource_type, - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return self._client.path.create(**options) - except StorageErrorException as error: - process_storage_error(error) - - @staticmethod - def _delete_path_options(**kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_mod_conditions(kwargs) - - options = { - 'recursive': True, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None)} - options.update(kwargs) - return options - - def _delete(self, **kwargs): - # type: (bool, **Any) -> None - """ - Marks the specified path for deletion. - - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :param ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - options = self._delete_path_options(**kwargs) - try: - return self._client.path.delete(**options) - except StorageErrorException as error: - process_storage_error(error) - - @staticmethod - def _set_access_control_options(owner=None, group=None, permissions=None, acl=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_mod_conditions(kwargs) - - options = { - 'owner': owner, - 'group': group, - 'permissions': permissions, - 'acl': acl, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - def set_access_control(self, owner=None, # type: Optional[str] - group=None, # type: Optional[str] - permissions=None, # type: Optional[str] - acl=None, # type: Optional[str] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Set the owner, group, permissions, or access control list for a path. - - :param owner: - Optional. The owner of the file or directory. - :type owner: str - :param group: - Optional. The owning group of the file or directory. - :type group: str - :param permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - permissions and acl are mutually exclusive. - :type permissions: str - :param acl: - Sets POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - permissions and acl are mutually exclusive. - :type acl: str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword: response dict (Etag and last modified). - """ - if not any([owner, group, permissions, acl]): - raise ValueError("At least one parameter should be set for set_access_control API") - options = self._set_access_control_options(owner=owner, group=group, permissions=permissions, acl=acl, **kwargs) - try: - return self._client.path.set_access_control(**options) - except StorageErrorException as error: - process_storage_error(error) - - @staticmethod - def _get_access_control_options(upn=None, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Any] - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_mod_conditions(kwargs) - - options = { - 'action': 'getAccessControl', - 'upn': upn if upn else False, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - def get_access_control(self, upn=None, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Any] - """ - :param upn: Optional. - Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword: response dict. - """ - options = self._get_access_control_options(upn=upn, **kwargs) - try: - return self._client.path.get_properties(**options) - except StorageErrorException as error: - process_storage_error(error) - - def _rename_path_options(self, rename_source, content_settings=None, metadata=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - if metadata or kwargs.pop('permissions', None) or kwargs.pop('umask', None): - raise ValueError("metadata, permissions, umask is not supported for this operation") - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - source_lease_id = get_lease_id(kwargs.pop('source_lease', None)) - mod_conditions = get_mod_conditions(kwargs) - source_mod_conditions = get_source_mod_conditions(kwargs) - - path_http_headers = None - if content_settings: - path_http_headers = get_path_http_headers(content_settings) - - options = { - 'rename_source': quote(unquote(rename_source)), - 'path_http_headers': path_http_headers, - 'lease_access_conditions': access_conditions, - 'source_lease_id': source_lease_id, - 'modified_access_conditions': mod_conditions, - 'source_modified_access_conditions':source_mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'mode': 'legacy', - 'cls': return_response_headers} - options.update(kwargs) - return options - - def _rename_path(self, rename_source, - **kwargs): - # type: (**Any) -> Dict[str, Any] - """ - Rename directory or file - - :param rename_source: - The value must have the following format: "/{filesystem}/{path}". - :type rename_source: str - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword source_lease: - A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._rename_path_options( - rename_source, - **kwargs) - try: - return self._client.path.create(**options) - except StorageErrorException as error: - process_storage_error(error) - - def _get_path_properties(self, **kwargs): - # type: (**Any) -> Union[FileProperties, DirectoryProperties] - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file or directory. It does not return the content of the directory or file. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: DirectoryProperties or FileProperties - - .. admonition:: Example: - - .. literalinclude:: ../tests/test_blob_samples_common.py - :start-after: [START get_blob_properties] - :end-before: [END get_blob_properties] - :language: python - :dedent: 8 - :caption: Getting the properties for a file/directory. - """ - path_properties = self._blob_client.get_blob_properties(**kwargs) - path_properties.__class__ = DirectoryProperties - return path_properties - - def set_metadata(self, metadata, # type: Dict[str, str] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - file system. Each call to this operation replaces all existing metadata - attached to the file system. To remove all metadata from the file system, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the file system as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: file system-updated property dict (Etag and last modified). - """ - return self._blob_client.set_blob_metadata(metadata=metadata, **kwargs) - - def set_http_headers(self, content_settings=None, # type: Optional[ContentSettings] - **kwargs): - # type: (...) -> Dict[str, Any] - """Sets system properties on the file or directory. - - If one property is set for the content_settings, all properties will be overriden. - - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set file/directory properties. - :keyword lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: file/directory-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - return self._blob_client.set_http_headers(content_settings=content_settings, **kwargs) - - def acquire_lease(self, lease_duration=-1, # type: Optional[int] - lease_id=None, # type: Optional[str] - **kwargs): - # type: (...) -> DataLakeLeaseClient - """ - Requests a new lease. If the file or directory does not have an active lease, - the DataLake service creates a lease on the file/directory and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A DataLakeLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.filedatalake.DataLakeLeaseClient - """ - lease = DataLakeLeaseClient(self, lease_id=lease_id) # type: ignore - lease.acquire(lease_duration=lease_duration, **kwargs) - return lease diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_quick_query_helper.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_quick_query_helper.py deleted file mode 100644 index ff67d27..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_quick_query_helper.py +++ /dev/null @@ -1,71 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import Union, Iterable, IO # pylint: disable=unused-import - - -class DataLakeFileQueryReader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to read query results. - - :ivar str name: - The name of the blob being quered. - :ivar str container: - The name of the container where the blob is. - :ivar dict response_headers: - The response_headers of the quick query request. - :ivar bytes record_delimiter: - The delimiter used to separate lines, or records with the data. The `records` - method will return these lines via a generator. - """ - - def __init__( - self, - blob_query_reader - ): - self.name = blob_query_reader.name - self.file_system = blob_query_reader.container - self.response_headers = blob_query_reader.response_headers - self.record_delimiter = blob_query_reader.record_delimiter - self._bytes_processed = 0 - self._blob_query_reader = blob_query_reader - - def __len__(self): - return len(self._blob_query_reader) - - def readall(self): - # type: () -> Union[bytes, str] - """Return all query results. - - This operation is blocking until all data is downloaded. - If encoding has been configured - this will be used to decode individual - records are they are received. - - :rtype: Union[bytes, str] - """ - return self._blob_query_reader.readall() - - def readinto(self, stream): - # type: (IO) -> None - """Download the query result to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. - :returns: None - """ - self._blob_query_reader(stream) - - def records(self): - # type: () -> Iterable[Union[bytes, str]] - """Returns a record generator for the query result. - - Records will be returned line by line. - If encoding has been configured - this will be used to decode individual - records are they are received. - - :rtype: Iterable[Union[bytes, str]] - """ - return self._blob_query_reader.records() diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/__init__.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/__init__.py deleted file mode 100644 index 160f882..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import hmac - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore - -import six - - -def url_quote(url): - return quote(url) - - -def url_unquote(url): - return unquote(url) - - -def encode_base64(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def decode_base64_to_bytes(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - return base64.b64decode(data) - - -def decode_base64_to_text(data): - decoded_bytes = decode_base64_to_bytes(data) - return decoded_bytes.decode('utf-8') - - -def sign_string(key, string_to_sign, key_is_base64=True): - if key_is_base64: - key = decode_base64_to_bytes(key) - else: - if isinstance(key, six.text_type): - key = key.encode('utf-8') - if isinstance(string_to_sign, six.text_type): - string_to_sign = string_to_sign.encode('utf-8') - signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) - digest = signed_hmac_sha256.digest() - encoded_digest = encode_base64(digest) - return encoded_digest diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/authentication.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/authentication.py deleted file mode 100644 index b11dc57..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/authentication.py +++ /dev/null @@ -1,140 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import logging -import sys - -try: - from urllib.parse import urlparse, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import unquote # type: ignore - -try: - from yarl import URL -except ImportError: - pass - -try: - from azure.core.pipeline.transport import AioHttpTransport -except ImportError: - AioHttpTransport = None - -from azure.core.exceptions import ClientAuthenticationError -from azure.core.pipeline.policies import SansIOHTTPPolicy - -from . import sign_string - - -logger = logging.getLogger(__name__) - - - -# wraps a given exception with the desired exception type -def _wrap_exception(ex, desired_type): - msg = "" - if ex.args: - msg = ex.args[0] - if sys.version_info >= (3,): - # Automatic chaining in Python 3 means we keep the trace - return desired_type(msg) - # There isn't a good solution in 2 for keeping the stack trace - # in general, or that will not result in an error in 3 - # However, we can keep the previous error type and message - # TODO: In the future we will log the trace - return desired_type('{}: {}'.format(ex.__class__.__name__, msg)) - - -class AzureSigningError(ClientAuthenticationError): - """ - Represents a fatal error when attempting to sign a request. - In general, the cause of this exception is user error. For example, the given account key is not valid. - Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. - """ - - -# pylint: disable=no-self-use -class SharedKeyCredentialPolicy(SansIOHTTPPolicy): - - def __init__(self, account_name, account_key): - self.account_name = account_name - self.account_key = account_key - super(SharedKeyCredentialPolicy, self).__init__() - - @staticmethod - def _get_headers(request, headers_to_sign): - headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) - if 'content-length' in headers and headers['content-length'] == '0': - del headers['content-length'] - return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' - - @staticmethod - def _get_verb(request): - return request.http_request.method + '\n' - - def _get_canonicalized_resource(self, request): - uri_path = urlparse(request.http_request.url).path - try: - if isinstance(request.context.transport, AioHttpTransport) or \ - isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport): - uri_path = URL(uri_path) - return '/' + self.account_name + str(uri_path) - except TypeError: - pass - return '/' + self.account_name + uri_path - - @staticmethod - def _get_canonicalized_headers(request): - string_to_sign = '' - x_ms_headers = [] - for name, value in request.http_request.headers.items(): - if name.startswith('x-ms-'): - x_ms_headers.append((name.lower(), value)) - x_ms_headers.sort() - for name, value in x_ms_headers: - if value is not None: - string_to_sign += ''.join([name, ':', value, '\n']) - return string_to_sign - - @staticmethod - def _get_canonicalized_resource_query(request): - sorted_queries = list(request.http_request.query.items()) - sorted_queries.sort() - - string_to_sign = '' - for name, value in sorted_queries: - if value is not None: - string_to_sign += '\n' + name.lower() + ':' + unquote(value) - - return string_to_sign - - def _add_authorization_header(self, request, string_to_sign): - try: - signature = sign_string(self.account_key, string_to_sign) - auth_string = 'SharedKey ' + self.account_name + ':' + signature - request.http_request.headers['Authorization'] = auth_string - except Exception as ex: - # Wrap any error that occurred as signing error - # Doing so will clarify/locate the source of problem - raise _wrap_exception(ex, AzureSigningError) - - def on_request(self, request): - string_to_sign = \ - self._get_verb(request) + \ - self._get_headers( - request, - [ - 'content-encoding', 'content-language', 'content-length', - 'content-md5', 'content-type', 'date', 'if-modified-since', - 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' - ] - ) + \ - self._get_canonicalized_headers(request) + \ - self._get_canonicalized_resource(request) + \ - self._get_canonicalized_resource_query(request) - - self._add_authorization_header(request, string_to_sign) - #logger.debug("String_to_sign=%s", string_to_sign) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/base_client.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/base_client.py deleted file mode 100644 index 14deea6..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/base_client.py +++ /dev/null @@ -1,437 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, - Optional, - Any, - Iterable, - Dict, - List, - Type, - Tuple, - TYPE_CHECKING, -) -import logging - -try: - from urllib.parse import parse_qs, quote -except ImportError: - from urlparse import parse_qs # type: ignore - from urllib2 import quote # type: ignore - -import six - -from azure.core.configuration import Configuration -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline import Pipeline -from azure.core.pipeline.transport import RequestsTransport, HttpTransport -from azure.core.pipeline.policies import ( - RedirectPolicy, - ContentDecodePolicy, - BearerTokenCredentialPolicy, - ProxyPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, - UserAgentPolicy -) - -from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .models import LocationMode -from .authentication import SharedKeyCredentialPolicy -from .shared_access_signature import QueryStringConstants -from .policies import ( - StorageHeadersPolicy, - StorageContentValidation, - StorageRequestHook, - StorageResponseHook, - StorageLoggingPolicy, - StorageHosts, - QueueMessagePolicy, - ExponentialRetry, -) -from .._version import VERSION -from .._generated.models import StorageErrorException -from .response_handlers import process_storage_error, PartialBatchErrorException - - -_LOGGER = logging.getLogger(__name__) -_SERVICE_PARAMS = { - "blob": {"primary": "BlobEndpoint", "secondary": "BlobSecondaryEndpoint"}, - "queue": {"primary": "QueueEndpoint", "secondary": "QueueSecondaryEndpoint"}, - "file": {"primary": "FileEndpoint", "secondary": "FileSecondaryEndpoint"}, - "dfs": {"primary": "BlobEndpoint", "secondary": "BlobEndpoint"}, -} - - -class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - parsed_url, # type: Any - service, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) - self._hosts = kwargs.get("_hosts") - self.scheme = parsed_url.scheme - - if service not in ["blob", "queue", "file-share", "dfs"]: - raise ValueError("Invalid service: {}".format(service)) - service_name = service.split('-')[0] - account = parsed_url.netloc.split(".{}.core.".format(service_name)) - - self.account_name = account[0] if len(account) > 1 else None - if not self.account_name and parsed_url.netloc.startswith("localhost") \ - or parsed_url.netloc.startswith("127.0.0.1"): - self.account_name = parsed_url.path.strip("/") - - self.credential = _format_shared_key_credential(self.account_name, credential) - if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): - raise ValueError("Token credential is only supported with HTTPS.") - - secondary_hostname = None - if hasattr(self.credential, "account_name"): - self.account_name = self.credential.account_name - secondary_hostname = "{}-secondary.{}.{}".format( - self.credential.account_name, service_name, SERVICE_HOST_BASE) - - if not self._hosts: - if len(account) > 1: - secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") - if kwargs.get("secondary_hostname"): - secondary_hostname = kwargs["secondary_hostname"] - primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') - self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} - - self.require_encryption = kwargs.get("require_encryption", False) - self.key_encryption_key = kwargs.get("key_encryption_key") - self.key_resolver_function = kwargs.get("key_resolver_function") - self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) - - def __enter__(self): - self._client.__enter__() - return self - - def __exit__(self, *args): - self._client.__exit__(*args) - - def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._client.close() - - @property - def url(self): - """The full endpoint URL to this entity, including SAS token if used. - - This could be either the primary endpoint, - or the secondary endpoint depending on the current :func:`location_mode`. - """ - return self._format_url(self._hosts[self._location_mode]) - - @property - def primary_endpoint(self): - """The full primary endpoint URL. - - :type: str - """ - return self._format_url(self._hosts[LocationMode.PRIMARY]) - - @property - def primary_hostname(self): - """The hostname of the primary endpoint. - - :type: str - """ - return self._hosts[LocationMode.PRIMARY] - - @property - def secondary_endpoint(self): - """The full secondary endpoint URL if configured. - - If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str - :raise ValueError: - """ - if not self._hosts[LocationMode.SECONDARY]: - raise ValueError("No secondary host configured.") - return self._format_url(self._hosts[LocationMode.SECONDARY]) - - @property - def secondary_hostname(self): - """The hostname of the secondary endpoint. - - If not available this will be None. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str or None - """ - return self._hosts[LocationMode.SECONDARY] - - @property - def location_mode(self): - """The location mode that the client is currently using. - - By default this will be "primary". Options include "primary" and "secondary". - - :type: str - """ - - return self._location_mode - - @location_mode.setter - def location_mode(self, value): - if self._hosts.get(value): - self._location_mode = value - self._client._config.url = self.url # pylint: disable=protected-access - else: - raise ValueError("No host URL for location mode: {}".format(value)) - - @property - def api_version(self): - """The version of the Storage API used for requests. - - :type: str - """ - return self._client._config.version # pylint: disable=protected-access - - def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): - query_str = "?" - if snapshot: - query_str += "snapshot={}&".format(self.snapshot) - if share_snapshot: - query_str += "sharesnapshot={}&".format(self.snapshot) - if sas_token and not credential: - query_str += sas_token - elif is_credential_sastoken(credential): - query_str += credential.lstrip("?") - credential = None - return query_str.rstrip("?&"), credential - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, "get_token"): - self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - - config = kwargs.get("_configuration") or create_configuration(**kwargs) - if kwargs.get("_pipeline"): - return config, kwargs["_pipeline"] - config.transport = kwargs.get("transport") # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - config.transport = RequestsTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.headers_policy, - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - StorageRequestHook(**kwargs), - self._credential_policy, - ContentDecodePolicy(response_encoding="utf-8"), - RedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), - config.retry_policy, - config.logging_policy, - StorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs) - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, Pipeline(config.transport, policies=policies) - - def _batch_send( - self, *reqs, # type: HttpRequest - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - request = self._client._client.post( # pylint: disable=protected-access - url='https://{}/?comp=batch'.format(self.primary_hostname), - headers={ - 'x-ms-version': self.api_version - } - ) - - request.set_multipart_mixed( - *reqs, - policies=[ - StorageHeadersPolicy(), - self._credential_policy - ], - enforce_https=False - ) - - pipeline_response = self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() - if raise_on_any_failure: - parts = list(response.parts()) - if any(p for p in parts if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts - ) - raise error - return iter(parts) - return parts - except StorageErrorException as error: - process_storage_error(error) - -class TransportWrapper(HttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, transport): - self._transport = transport - - def send(self, request, **kwargs): - return self._transport.send(request, **kwargs) - - def open(self): - pass - - def close(self): - pass - - def __enter__(self): - pass - - def __exit__(self, *args): # pylint: disable=arguments-differ - pass - - -def _format_shared_key_credential(account_name, credential): - if isinstance(credential, six.string_types): - if not account_name: - raise ValueError("Unable to determine account name for shared key credential.") - credential = {"account_name": account_name, "account_key": credential} - if isinstance(credential, dict): - if "account_name" not in credential: - raise ValueError("Shared key credential missing 'account_name") - if "account_key" not in credential: - raise ValueError("Shared key credential missing 'account_key") - return SharedKeyCredentialPolicy(**credential) - return credential - - -def parse_connection_str(conn_str, credential, service): - conn_str = conn_str.rstrip(";") - conn_settings = [s.split("=", 1) for s in conn_str.split(";")] - if any(len(tup) != 2 for tup in conn_settings): - raise ValueError("Connection string is either blank or malformed.") - conn_settings = dict(conn_settings) - endpoints = _SERVICE_PARAMS[service] - primary = None - secondary = None - if not credential: - try: - credential = {"account_name": conn_settings["AccountName"], "account_key": conn_settings["AccountKey"]} - except KeyError: - credential = conn_settings.get("SharedAccessSignature") - if endpoints["primary"] in conn_settings: - primary = conn_settings[endpoints["primary"]] - if endpoints["secondary"] in conn_settings: - secondary = conn_settings[endpoints["secondary"]] - else: - if endpoints["secondary"] in conn_settings: - raise ValueError("Connection string specifies only secondary endpoint.") - try: - primary = "{}://{}.{}.{}".format( - conn_settings["DefaultEndpointsProtocol"], - conn_settings["AccountName"], - service, - conn_settings["EndpointSuffix"], - ) - secondary = "{}-secondary.{}.{}".format( - conn_settings["AccountName"], service, conn_settings["EndpointSuffix"] - ) - except KeyError: - pass - - if not primary: - try: - primary = "https://{}.{}.{}".format( - conn_settings["AccountName"], service, conn_settings.get("EndpointSuffix", SERVICE_HOST_BASE) - ) - except KeyError: - raise ValueError("Connection string missing required connection details.") - return primary, secondary, credential - - -def create_configuration(**kwargs): - # type: (**Any) -> Configuration - config = Configuration(**kwargs) - config.headers_policy = StorageHeadersPolicy(**kwargs) - config.user_agent_policy = UserAgentPolicy( - sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) - config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) - config.logging_policy = StorageLoggingPolicy(**kwargs) - config.proxy_policy = ProxyPolicy(**kwargs) - - # Storage settings - config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) - config.copy_polling_interval = 15 - - # Block blob uploads - config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) - config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) - config.use_byte_buffer = kwargs.get("use_byte_buffer", False) - - # Page blob uploads - config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) - - # Blob downloads - config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) - config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) - - # File uploads - config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) - return config - - -def parse_query(query_str): - sas_values = QueryStringConstants.to_list() - parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} - sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values] - sas_token = None - if sas_params: - sas_token = "&".join(sas_params) - - snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") - return snapshot, sas_token - - -def is_credential_sastoken(credential): - if not credential or not isinstance(credential, six.string_types): - return False - - sas_values = QueryStringConstants.to_list() - parsed_query = parse_qs(credential.lstrip("?")) - if parsed_query and all([k in sas_values for k in parsed_query.keys()]): - return True - return False diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/base_client_async.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/base_client_async.py deleted file mode 100644 index d252ad0..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/base_client_async.py +++ /dev/null @@ -1,179 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging -from azure.core.pipeline import AsyncPipeline -from azure.core.async_paging import AsyncList -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline.policies import ( - ContentDecodePolicy, - AsyncBearerTokenCredentialPolicy, - AsyncRedirectPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, -) -from azure.core.pipeline.transport import AsyncHttpTransport - -from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .authentication import SharedKeyCredentialPolicy -from .base_client import create_configuration -from .policies import ( - StorageContentValidation, - StorageRequestHook, - StorageHosts, - StorageHeadersPolicy, - QueueMessagePolicy -) -from .policies_async import AsyncStorageResponseHook - -from .._generated.models import StorageErrorException -from .response_handlers import process_storage_error, PartialBatchErrorException - -if TYPE_CHECKING: - from azure.core.pipeline import Pipeline - from azure.core.pipeline.transport import HttpRequest - from azure.core.configuration import Configuration -_LOGGER = logging.getLogger(__name__) - - -class AsyncStorageAccountHostsMixin(object): - - def __enter__(self): - raise TypeError("Async client only supports 'async with'.") - - def __exit__(self, *args): - pass - - async def __aenter__(self): - await self._client.__aenter__() - return self - - async def __aexit__(self, *args): - await self._client.__aexit__(*args) - - async def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._client.close() - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, 'get_token'): - self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - config = kwargs.get('_configuration') or create_configuration(**kwargs) - if kwargs.get('_pipeline'): - return config, kwargs['_pipeline'] - config.transport = kwargs.get('transport') # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - try: - from azure.core.pipeline.transport import AioHttpTransport - except ImportError: - raise ImportError("Unable to create async transport. Please check aiohttp is installed.") - config.transport = AioHttpTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.headers_policy, - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - StorageRequestHook(**kwargs), - self._credential_policy, - ContentDecodePolicy(response_encoding="utf-8"), - AsyncRedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), # type: ignore - config.retry_policy, - config.logging_policy, - AsyncStorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs), - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, AsyncPipeline(config.transport, policies=policies) - - async def _batch_send( - self, *reqs: 'HttpRequest', - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - request = self._client._client.post( # pylint: disable=protected-access - url='https://{}/?comp=batch'.format(self.primary_hostname), - headers={ - 'x-ms-version': self.api_version - } - ) - - request.set_multipart_mixed( - *reqs, - policies=[ - StorageHeadersPolicy(), - self._credential_policy - ], - enforce_https=False - ) - - pipeline_response = await self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() # Return an AsyncIterator - if raise_on_any_failure: - parts_list = [] - async for part in parts: - parts_list.append(part) - if any(p for p in parts_list if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts_list - ) - raise error - return AsyncList(parts_list) - return parts - except StorageErrorException as error: - process_storage_error(error) - - -class AsyncTransportWrapper(AsyncHttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, async_transport): - self._transport = async_transport - - async def send(self, request, **kwargs): - return await self._transport.send(request, **kwargs) - - async def open(self): - pass - - async def close(self): - pass - - async def __aenter__(self): - pass - - async def __aexit__(self, *args): # pylint: disable=arguments-differ - pass diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/encryption.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/encryption.py deleted file mode 100644 index 62607cc..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/encryption.py +++ /dev/null @@ -1,542 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os -from os import urandom -from json import ( - dumps, - loads, -) -from collections import OrderedDict - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.ciphers import Cipher -from cryptography.hazmat.primitives.ciphers.algorithms import AES -from cryptography.hazmat.primitives.ciphers.modes import CBC -from cryptography.hazmat.primitives.padding import PKCS7 - -from azure.core.exceptions import HttpResponseError - -from .._version import VERSION -from . import encode_base64, decode_base64_to_bytes - - -_ENCRYPTION_PROTOCOL_V1 = '1.0' -_ERROR_OBJECT_INVALID = \ - '{0} does not define a complete interface. Value of {1} is either missing or invalid.' - - -def _validate_not_none(param_name, param): - if param is None: - raise ValueError('{0} should not be None.'.format(param_name)) - - -def _validate_key_encryption_key_wrap(kek): - # Note that None is not callable and so will fail the second clause of each check. - if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) - if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) - - -class _EncryptionAlgorithm(object): - ''' - Specifies which client encryption algorithm is used. - ''' - AES_CBC_256 = 'AES_CBC_256' - - -class _WrappedContentKey: - ''' - Represents the envelope key details stored on the service. - ''' - - def __init__(self, algorithm, encrypted_key, key_id): - ''' - :param str algorithm: - The algorithm used for wrapping. - :param bytes encrypted_key: - The encrypted content-encryption-key. - :param str key_id: - The key-encryption-key identifier string. - ''' - - _validate_not_none('algorithm', algorithm) - _validate_not_none('encrypted_key', encrypted_key) - _validate_not_none('key_id', key_id) - - self.algorithm = algorithm - self.encrypted_key = encrypted_key - self.key_id = key_id - - -class _EncryptionAgent: - ''' - Represents the encryption agent stored on the service. - It consists of the encryption protocol version and encryption algorithm used. - ''' - - def __init__(self, encryption_algorithm, protocol): - ''' - :param _EncryptionAlgorithm encryption_algorithm: - The algorithm used for encrypting the message contents. - :param str protocol: - The protocol version used for encryption. - ''' - - _validate_not_none('encryption_algorithm', encryption_algorithm) - _validate_not_none('protocol', protocol) - - self.encryption_algorithm = str(encryption_algorithm) - self.protocol = protocol - - -class _EncryptionData: - ''' - Represents the encryption data that is stored on the service. - ''' - - def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, - key_wrapping_metadata): - ''' - :param bytes content_encryption_IV: - The content encryption initialization vector. - :param _EncryptionAgent encryption_agent: - The encryption agent. - :param _WrappedContentKey wrapped_content_key: - An object that stores the wrapping algorithm, the key identifier, - and the encrypted key bytes. - :param dict key_wrapping_metadata: - A dict containing metadata related to the key wrapping. - ''' - - _validate_not_none('content_encryption_IV', content_encryption_IV) - _validate_not_none('encryption_agent', encryption_agent) - _validate_not_none('wrapped_content_key', wrapped_content_key) - - self.content_encryption_IV = content_encryption_IV - self.encryption_agent = encryption_agent - self.wrapped_content_key = wrapped_content_key - self.key_wrapping_metadata = key_wrapping_metadata - - -def _generate_encryption_data_dict(kek, cek, iv): - ''' - Generates and returns the encryption metadata as a dict. - - :param object kek: The key encryption key. See calling functions for more information. - :param bytes cek: The content encryption key. - :param bytes iv: The initialization vector. - :return: A dict containing all the encryption metadata. - :rtype: dict - ''' - # Encrypt the cek. - wrapped_cek = kek.wrap_key(cek) - - # Build the encryption_data dict. - # Use OrderedDict to comply with Java's ordering requirement. - wrapped_content_key = OrderedDict() - wrapped_content_key['KeyId'] = kek.get_kid() - wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek) - wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() - - encryption_agent = OrderedDict() - encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 - encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 - - encryption_data_dict = OrderedDict() - encryption_data_dict['WrappedContentKey'] = wrapped_content_key - encryption_data_dict['EncryptionAgent'] = encryption_agent - encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv) - encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION} - - return encryption_data_dict - - -def _dict_to_encryption_data(encryption_data_dict): - ''' - Converts the specified dictionary to an EncryptionData object for - eventual use in decryption. - - :param dict encryption_data_dict: - The dictionary containing the encryption data. - :return: an _EncryptionData object built from the dictionary. - :rtype: _EncryptionData - ''' - try: - if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: - raise ValueError("Unsupported encryption version.") - except KeyError: - raise ValueError("Unsupported encryption version.") - wrapped_content_key = encryption_data_dict['WrappedContentKey'] - wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], - decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), - wrapped_content_key['KeyId']) - - encryption_agent = encryption_data_dict['EncryptionAgent'] - encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], - encryption_agent['Protocol']) - - if 'KeyWrappingMetadata' in encryption_data_dict: - key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] - else: - key_wrapping_metadata = None - - encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), - encryption_agent, - wrapped_content_key, - key_wrapping_metadata) - - return encryption_data - - -def _generate_AES_CBC_cipher(cek, iv): - ''' - Generates and returns an encryption cipher for AES CBC using the given cek and iv. - - :param bytes[] cek: The content encryption key for the cipher. - :param bytes[] iv: The initialization vector for the cipher. - :return: A cipher for encrypting in AES256 CBC. - :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher - ''' - - backend = default_backend() - algorithm = AES(cek) - mode = CBC(iv) - return Cipher(algorithm, mode, backend) - - -def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): - ''' - Extracts and returns the content_encryption_key stored in the encryption_data object - and performs necessary validation on all parameters. - :param _EncryptionData encryption_data: - The encryption metadata of the retrieved value. - :param obj key_encryption_key: - The key_encryption_key used to unwrap the cek. Please refer to high-level service object - instance variables for more details. - :param func key_resolver: - A function used that, given a key_id, will return a key_encryption_key. Please refer - to high-level service object instance variables for more details. - :return: the content_encryption_key stored in the encryption_data object. - :rtype: bytes[] - ''' - - _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) - _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) - - if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol: - raise ValueError('Encryption version is not supported.') - - content_encryption_key = None - - # If the resolver exists, give priority to the key it finds. - if key_resolver is not None: - key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) - - _validate_not_none('key_encryption_key', key_encryption_key) - if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) - if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid(): - raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.') - # Will throw an exception if the specified algorithm is not supported. - content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, - encryption_data.wrapped_content_key.algorithm) - _validate_not_none('content_encryption_key', content_encryption_key) - - return content_encryption_key - - -def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None): - ''' - Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. - Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). - Returns the original plaintex. - - :param str message: - The ciphertext to be decrypted. - :param _EncryptionData encryption_data: - The metadata associated with this ciphertext. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted plaintext. - :rtype: str - ''' - _validate_not_none('message', message) - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) - - if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm: - raise ValueError('Specified encryption algorithm is not supported.') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) - - # decrypt data - decrypted_data = message - decryptor = cipher.decryptor() - decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) - - # unpad data - unpadder = PKCS7(128).unpadder() - decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) - - return decrypted_data - - -def encrypt_blob(blob, key_encryption_key): - ''' - Encrypts the given blob using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encryption metadata. This method should - only be used when a blob is small enough for single shot upload. Encrypting larger blobs - is done as a part of the upload_data_chunks method. - - :param bytes blob: - The blob to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. - :rtype: (str, bytes) - ''' - - _validate_not_none('blob', blob) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(blob) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - - return dumps(encryption_data), encrypted_data - - -def generate_blob_encryption_data(key_encryption_key): - ''' - Generates the encryption_metadata for the blob. - - :param bytes key_encryption_key: - The key-encryption-key used to wrap the cek associate with this blob. - :return: A tuple containing the cek and iv for this blob as well as the - serialized encryption metadata for the blob. - :rtype: (bytes, bytes, str) - ''' - encryption_data = None - content_encryption_key = None - initialization_vector = None - if key_encryption_key: - _validate_key_encryption_key_wrap(key_encryption_key) - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - encryption_data = _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - encryption_data = dumps(encryption_data) - - return content_encryption_key, initialization_vector, encryption_data - - -def decrypt_blob(require_encryption, key_encryption_key, key_resolver, - content, start_offset, end_offset, response_headers): - ''' - Decrypts the given blob contents and returns only the requested range. - - :param bool require_encryption: - Whether or not the calling blob service requires objects to be decrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :param key_resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted blob content. - :rtype: bytes - ''' - try: - encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata'])) - except: # pylint: disable=bare-except - if require_encryption: - raise ValueError( - 'Encryption required, but received data does not contain appropriate metatadata.' + \ - 'Data was either not encrypted or metadata has been lost.') - - return content - - if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256: - raise ValueError('Specified encryption algorithm is not supported.') - - blob_type = response_headers['x-ms-blob-type'] - - iv = None - unpad = False - if 'content-range' in response_headers: - content_range = response_headers['content-range'] - # Format: 'bytes x-y/size' - - # Ignore the word 'bytes' - content_range = content_range.split(' ') - - content_range = content_range[1].split('-') - content_range = content_range[1].split('/') - end_range = int(content_range[0]) - blob_size = int(content_range[1]) - - if start_offset >= 16: - iv = content[:16] - content = content[16:] - start_offset -= 16 - else: - iv = encryption_data.content_encryption_IV - - if end_range == blob_size - 1: - unpad = True - else: - unpad = True - iv = encryption_data.content_encryption_IV - - if blob_type == 'PageBlob': - unpad = False - - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) - cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) - decryptor = cipher.decryptor() - - content = decryptor.update(content) + decryptor.finalize() - if unpad: - unpadder = PKCS7(128).unpadder() - content = unpadder.update(content) + unpadder.finalize() - - return content[start_offset: len(content) - end_offset] - - -def get_blob_encryptor_and_padder(cek, iv, should_pad): - encryptor = None - padder = None - - if cek is not None and iv is not None: - cipher = _generate_AES_CBC_cipher(cek, iv) - encryptor = cipher.encryptor() - padder = PKCS7(128).padder() if should_pad else None - - return encryptor, padder - - -def encrypt_queue_message(message, key_encryption_key): - ''' - Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encrypted message and the encryption metadata. - - :param object message: - The plain text messge to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A json-formatted string containing the encrypted message and the encryption metadata. - :rtype: str - ''' - - _validate_not_none('message', message) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = os.urandom(32) - initialization_vector = os.urandom(16) - - # Queue encoding functions all return unicode strings, and encryption should - # operate on binary strings. - message = message.encode('utf-8') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(message) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - - # Build the dictionary structure. - queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data), - 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector)} - - return dumps(queue_message) - - -def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver): - ''' - Returns the decrypted message contents from an EncryptedQueueMessage. - If no encryption metadata is present, will return the unaltered message. - :param str message: - The JSON formatted QueueEncryptedMessage contents with all associated metadata. - :param bool require_encryption: - If set, will enforce that the retrieved messages are encrypted and decrypt them. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The plain text message from the queue message. - :rtype: str - ''' - - try: - message = loads(message) - - encryption_data = _dict_to_encryption_data(message['EncryptionData']) - decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents']) - except (KeyError, ValueError): - # Message was not json formatted and so was not encrypted - # or the user provided a json formatted message. - if require_encryption: - raise ValueError('Message was not encrypted.') - - return message - try: - return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=response, - error=error) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/models.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/models.py deleted file mode 100644 index 6919763..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/models.py +++ /dev/null @@ -1,468 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-instance-attributes - -from enum import Enum - - -def get_enum_value(value): - if value is None or value in ["None", ""]: - return None - try: - return value.value - except AttributeError: - return value - - -class StorageErrorCode(str, Enum): - - # Generic storage values - account_already_exists = "AccountAlreadyExists" - account_being_created = "AccountBeingCreated" - account_is_disabled = "AccountIsDisabled" - authentication_failed = "AuthenticationFailed" - authorization_failure = "AuthorizationFailure" - no_authentication_information = "NoAuthenticationInformation" - condition_headers_not_supported = "ConditionHeadersNotSupported" - condition_not_met = "ConditionNotMet" - empty_metadata_key = "EmptyMetadataKey" - insufficient_account_permissions = "InsufficientAccountPermissions" - internal_error = "InternalError" - invalid_authentication_info = "InvalidAuthenticationInfo" - invalid_header_value = "InvalidHeaderValue" - invalid_http_verb = "InvalidHttpVerb" - invalid_input = "InvalidInput" - invalid_md5 = "InvalidMd5" - invalid_metadata = "InvalidMetadata" - invalid_query_parameter_value = "InvalidQueryParameterValue" - invalid_range = "InvalidRange" - invalid_resource_name = "InvalidResourceName" - invalid_uri = "InvalidUri" - invalid_xml_document = "InvalidXmlDocument" - invalid_xml_node_value = "InvalidXmlNodeValue" - md5_mismatch = "Md5Mismatch" - metadata_too_large = "MetadataTooLarge" - missing_content_length_header = "MissingContentLengthHeader" - missing_required_query_parameter = "MissingRequiredQueryParameter" - missing_required_header = "MissingRequiredHeader" - missing_required_xml_node = "MissingRequiredXmlNode" - multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" - operation_timed_out = "OperationTimedOut" - out_of_range_input = "OutOfRangeInput" - out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" - request_body_too_large = "RequestBodyTooLarge" - resource_type_mismatch = "ResourceTypeMismatch" - request_url_failed_to_parse = "RequestUrlFailedToParse" - resource_already_exists = "ResourceAlreadyExists" - resource_not_found = "ResourceNotFound" - server_busy = "ServerBusy" - unsupported_header = "UnsupportedHeader" - unsupported_xml_node = "UnsupportedXmlNode" - unsupported_query_parameter = "UnsupportedQueryParameter" - unsupported_http_verb = "UnsupportedHttpVerb" - - # Blob values - append_position_condition_not_met = "AppendPositionConditionNotMet" - blob_already_exists = "BlobAlreadyExists" - blob_not_found = "BlobNotFound" - blob_overwritten = "BlobOverwritten" - blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" - block_count_exceeds_limit = "BlockCountExceedsLimit" - block_list_too_long = "BlockListTooLong" - cannot_change_to_lower_tier = "CannotChangeToLowerTier" - cannot_verify_copy_source = "CannotVerifyCopySource" - container_already_exists = "ContainerAlreadyExists" - container_being_deleted = "ContainerBeingDeleted" - container_disabled = "ContainerDisabled" - container_not_found = "ContainerNotFound" - content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" - copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" - copy_id_mismatch = "CopyIdMismatch" - feature_version_mismatch = "FeatureVersionMismatch" - incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" - incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" - incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" - infinite_lease_duration_required = "InfiniteLeaseDurationRequired" - invalid_blob_or_block = "InvalidBlobOrBlock" - invalid_blob_tier = "InvalidBlobTier" - invalid_blob_type = "InvalidBlobType" - invalid_block_id = "InvalidBlockId" - invalid_block_list = "InvalidBlockList" - invalid_operation = "InvalidOperation" - invalid_page_range = "InvalidPageRange" - invalid_source_blob_type = "InvalidSourceBlobType" - invalid_source_blob_url = "InvalidSourceBlobUrl" - invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" - lease_already_present = "LeaseAlreadyPresent" - lease_already_broken = "LeaseAlreadyBroken" - lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" - lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" - lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" - lease_id_missing = "LeaseIdMissing" - lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" - lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" - lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" - lease_lost = "LeaseLost" - lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" - lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" - lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" - max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" - no_pending_copy_operation = "NoPendingCopyOperation" - operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" - pending_copy_operation = "PendingCopyOperation" - previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" - previous_snapshot_not_found = "PreviousSnapshotNotFound" - previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" - sequence_number_condition_not_met = "SequenceNumberConditionNotMet" - sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" - snapshot_count_exceeded = "SnapshotCountExceeded" - snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" - snapshots_present = "SnapshotsPresent" - source_condition_not_met = "SourceConditionNotMet" - system_in_use = "SystemInUse" - target_condition_not_met = "TargetConditionNotMet" - unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" - blob_being_rehydrated = "BlobBeingRehydrated" - blob_archived = "BlobArchived" - blob_not_archived = "BlobNotArchived" - - # Queue values - invalid_marker = "InvalidMarker" - message_not_found = "MessageNotFound" - message_too_large = "MessageTooLarge" - pop_receipt_mismatch = "PopReceiptMismatch" - queue_already_exists = "QueueAlreadyExists" - queue_being_deleted = "QueueBeingDeleted" - queue_disabled = "QueueDisabled" - queue_not_empty = "QueueNotEmpty" - queue_not_found = "QueueNotFound" - - # File values - cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" - client_cache_flush_delay = "ClientCacheFlushDelay" - delete_pending = "DeletePending" - directory_not_empty = "DirectoryNotEmpty" - file_lock_conflict = "FileLockConflict" - invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" - parent_not_found = "ParentNotFound" - read_only_attribute = "ReadOnlyAttribute" - share_already_exists = "ShareAlreadyExists" - share_being_deleted = "ShareBeingDeleted" - share_disabled = "ShareDisabled" - share_not_found = "ShareNotFound" - sharing_violation = "SharingViolation" - share_snapshot_in_progress = "ShareSnapshotInProgress" - share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" - share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" - share_has_snapshots = "ShareHasSnapshots" - container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" - - # DataLake values - content_length_must_be_zero = 'ContentLengthMustBeZero' - path_already_exists = 'PathAlreadyExists' - invalid_flush_position = 'InvalidFlushPosition' - invalid_property_name = 'InvalidPropertyName' - invalid_source_uri = 'InvalidSourceUri' - unsupported_rest_version = 'UnsupportedRestVersion' - file_system_not_found = 'FilesystemNotFound' - path_not_found = 'PathNotFound' - rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound' - source_path_not_found = 'SourcePathNotFound' - destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted' - file_system_already_exists = 'FilesystemAlreadyExists' - file_system_being_deleted = 'FilesystemBeingDeleted' - invalid_destination_path = 'InvalidDestinationPath' - invalid_rename_source_path = 'InvalidRenameSourcePath' - invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType' - lease_is_already_broken = 'LeaseIsAlreadyBroken' - lease_name_mismatch = 'LeaseNameMismatch' - path_conflict = 'PathConflict' - source_path_is_being_deleted = 'SourcePathIsBeingDeleted' - - -class DictMixin(object): - - def __setitem__(self, key, item): - self.__dict__[key] = item - - def __getitem__(self, key): - return self.__dict__[key] - - def __repr__(self): - return str(self) - - def __len__(self): - return len(self.keys()) - - def __delitem__(self, key): - self.__dict__[key] = None - - def __eq__(self, other): - """Compare objects by comparing all attributes.""" - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - def __ne__(self, other): - """Compare objects by comparing all attributes.""" - return not self.__eq__(other) - - def __str__(self): - return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) - - def has_key(self, k): - return k in self.__dict__ - - def update(self, *args, **kwargs): - return self.__dict__.update(*args, **kwargs) - - def keys(self): - return [k for k in self.__dict__ if not k.startswith('_')] - - def values(self): - return [v for k, v in self.__dict__.items() if not k.startswith('_')] - - def items(self): - return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] - - def get(self, key, default=None): - if key in self.__dict__: - return self.__dict__[key] - return default - - -class LocationMode(object): - """ - Specifies the location the request should be sent to. This mode only applies - for RA-GRS accounts which allow secondary read access. All other account types - must use PRIMARY. - """ - - PRIMARY = 'primary' #: Requests should be sent to the primary location. - SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. - - -class ResourceTypes(object): - """ - Specifies the resource types that are accessible with the account SAS. - - :param bool service: - Access to service-level APIs (e.g., Get/Set Service Properties, - Get Service Stats, List Containers/Queues/Shares) - :param bool container: - Access to container-level APIs (e.g., Create/Delete Container, - Create/Delete Queue, Create/Delete Share, - List Blobs/Files and Directories) - :param bool object: - Access to object-level APIs for blobs, queue messages, and - files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) - """ - - def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin - self.service = service - self.container = container - self.object = object - self._str = (('s' if self.service else '') + - ('c' if self.container else '') + - ('o' if self.object else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create a ResourceTypes from a string. - - To specify service, container, or object you need only to - include the first letter of the word in the string. E.g. service and container, - you would provide a string "sc". - - :param str string: Specify service, container, or object in - in the string with the first letter of the word. - :return: A ResourceTypes object - :rtype: ~azure.storage.blob.ResourceTypes - """ - res_service = 's' in string - res_container = 'c' in string - res_object = 'o' in string - - parsed = cls(res_service, res_container, res_object) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class AccountSasPermissions(object): - """ - :class:`~ResourceTypes` class to be used with generate_account_sas - function and for the AccessPolicies used with set_*_acl. There are two types of - SAS which may be used to grant resource access. One is to grant access to a - specific resource (resource-specific). Another is to grant access to the - entire service for a specific account and allow certain operations based on - perms found here. - - :param bool read: - Valid for all signed resources types (Service, Container, and Object). - Permits read permissions to the specified resource type. - :param bool write: - Valid for all signed resources types (Service, Container, and Object). - Permits write permissions to the specified resource type. - :param bool delete: - Valid for Container and Object resource types, except for queue messages. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool list: - Valid for Service and Container resource types only. - :param bool add: - Valid for the following Object resource types only: queue messages, and append blobs. - :param bool create: - Valid for the following Object resource types only: blobs and files. - Users can create new blobs or files, but may not overwrite existing - blobs or files. - :param bool update: - Valid for the following Object resource types only: queue messages. - :param bool process: - Valid for the following Object resource type only: queue messages. - :keyword bool tag: - To enable set or get tags on the blobs in the container. - :keyword bool filter_by_tags: - To enable get blobs by tags, this should be used together with list permission. - """ - def __init__(self, read=False, write=False, delete=False, - list=False, # pylint: disable=redefined-builtin - add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): - self.read = read - self.write = write - self.delete = delete - self.delete_previous_version = delete_previous_version - self.list = list - self.add = add - self.create = create - self.update = update - self.process = process - self.tag = kwargs.pop('tag', False) - self.filter_by_tags = kwargs.pop('filter_by_tags', False) - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('l' if self.list else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('u' if self.update else '') + - ('p' if self.process else '') + - ('f' if self.filter_by_tags else '') + - ('t' if self.tag else '') - ) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create AccountSasPermissions from a string. - - To specify read, write, delete, etc. permissions you need only to - include the first letter of the word in the string. E.g. for read and write - permissions you would provide a string "rw". - - :param str permission: Specify permissions in - the string with the first letter of the word. - :return: An AccountSasPermissions object - :rtype: ~azure.storage.filedatalake.AccountSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_delete_previous_version = 'x' in permission - p_list = 'l' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_update = 'u' in permission - p_process = 'p' in permission - p_tag = 't' in permission - p_filter_by_tags = 'f' in permission - parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, - list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, - filter_by_tags=p_filter_by_tags) - parsed._str = permission # pylint: disable = protected-access - return parsed - - -class Services(object): - """Specifies the services accessible with the account SAS. - - :param bool blob: - Access for the `~azure.storage.blob.BlobServiceClient` - :param bool queue: - Access for the `~azure.storage.queue.QueueServiceClient` - :param bool fileshare: - Access for the `~azure.storage.fileshare.ShareServiceClient` - """ - - def __init__(self, blob=False, queue=False, fileshare=False): - self.blob = blob - self.queue = queue - self.fileshare = fileshare - self._str = (('b' if self.blob else '') + - ('q' if self.queue else '') + - ('f' if self.fileshare else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create Services from a string. - - To specify blob, queue, or file you need only to - include the first letter of the word in the string. E.g. for blob and queue - you would provide a string "bq". - - :param str string: Specify blob, queue, or file in - in the string with the first letter of the word. - :return: A Services object - :rtype: ~azure.storage.blob.Services - """ - res_blob = 'b' in string - res_queue = 'q' in string - res_file = 'f' in string - - parsed = cls(res_blob, res_queue, res_file) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class UserDelegationKey(object): - """ - Represents a user delegation key, provided to the user by Azure Storage - based on their Azure Active Directory access token. - - The fields are saved as simple strings since the user does not have to interact with this object; - to generate an identify SAS, the user can simply pass it to the right API. - - :ivar str signed_oid: - Object ID of this token. - :ivar str signed_tid: - Tenant ID of the tenant that issued this token. - :ivar str signed_start: - The datetime this token becomes valid. - :ivar str signed_expiry: - The datetime this token expires. - :ivar str signed_service: - What service this key is valid for. - :ivar str signed_version: - The version identifier of the REST service that created this token. - :ivar str value: - The user delegation key. - """ - def __init__(self): - self.signed_oid = None - self.signed_tid = None - self.signed_start = None - self.signed_expiry = None - self.signed_service = None - self.signed_version = None - self.value = None diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/parser.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/parser.py deleted file mode 100644 index c6feba8..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/parser.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys - -if sys.version_info < (3,): - def _str(value): - if isinstance(value, unicode): # pylint: disable=undefined-variable - return value.encode('utf-8') - - return str(value) -else: - _str = str - - -def _to_utc_datetime(value): - return value.strftime('%Y-%m-%dT%H:%M:%SZ') diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/policies.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/policies.py deleted file mode 100644 index c9bc798..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/policies.py +++ /dev/null @@ -1,610 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import re -import random -from time import time -from io import SEEK_SET, UnsupportedOperation -import logging -import uuid -import types -from typing import Any, TYPE_CHECKING -from wsgiref.handlers import format_date_time -try: - from urllib.parse import ( - urlparse, - parse_qsl, - urlunparse, - urlencode, - ) -except ImportError: - from urllib import urlencode # type: ignore - from urlparse import ( # type: ignore - urlparse, - parse_qsl, - urlunparse, - ) - -from azure.core.pipeline.policies import ( - HeadersPolicy, - SansIOHTTPPolicy, - NetworkTraceLoggingPolicy, - HTTPPolicy, - RequestHistory -) -from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError - -from .models import LocationMode - -try: - _unicode_type = unicode # type: ignore -except NameError: - _unicode_type = str - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -def encode_base64(data): - if isinstance(data, _unicode_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def is_exhausted(settings): - """Are we out of retries?""" - retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) - retry_counts = list(filter(None, retry_counts)) - if not retry_counts: - return False - return min(retry_counts) < 0 - - -def retry_hook(settings, **kwargs): - if settings['hook']: - settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) - - -def is_retry(response, mode): - """Is this method/status code retryable? (Based on whitelists and control - variables such as the number of total retries to allow, whether to - respect the Retry-After header, whether this header is present, and - whether the returned status code is on the list of status codes to - be retried upon on the presence of the aforementioned header) - """ - status = response.http_response.status_code - if 300 <= status < 500: - # An exception occured, but in most cases it was expected. Examples could - # include a 309 Conflict or 412 Precondition Failed. - if status == 404 and mode == LocationMode.SECONDARY: - # Response code 404 should be retried if secondary was used. - return True - if status == 408: - # Response code 408 is a timeout and should be retried. - return True - return False - if status >= 500: - # Response codes above 500 with the exception of 501 Not Implemented and - # 505 Version Not Supported indicate a server issue and should be retried. - if status in [501, 505]: - return False - return True - return False - - -def urljoin(base_url, stub_url): - parsed = urlparse(base_url) - parsed = parsed._replace(path=parsed.path + '/' + stub_url) - return parsed.geturl() - - -class QueueMessagePolicy(SansIOHTTPPolicy): - - def on_request(self, request): - message_id = request.context.options.pop('queue_message_id', None) - if message_id: - request.http_request.url = urljoin( - request.http_request.url, - message_id) - - -class StorageHeadersPolicy(HeadersPolicy): - request_id_header_name = 'x-ms-client-request-id' - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - super(StorageHeadersPolicy, self).on_request(request) - current_time = format_date_time(time()) - request.http_request.headers['x-ms-date'] = current_time - - custom_id = request.context.options.pop('client_request_id', None) - request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) - - # def on_response(self, request, response): - # # raise exception if the echoed client request id from the service is not identical to the one we sent - # if self.request_id_header_name in response.http_response.headers: - - # client_request_id = request.http_request.headers.get(self.request_id_header_name) - - # if response.http_response.headers[self.request_id_header_name] != client_request_id: - # raise AzureError( - # "Echoed client request ID: {} does not match sent client request ID: {}. " - # "Service request ID: {}".format( - # response.http_response.headers[self.request_id_header_name], client_request_id, - # response.http_response.headers['x-ms-request-id']), - # response=response.http_response - # ) - - -class StorageHosts(SansIOHTTPPolicy): - - def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument - self.hosts = hosts - super(StorageHosts, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - request.context.options['hosts'] = self.hosts - parsed_url = urlparse(request.http_request.url) - - # Detect what location mode we're currently requesting with - location_mode = LocationMode.PRIMARY - for key, value in self.hosts.items(): - if parsed_url.netloc == value: - location_mode = key - - # See if a specific location mode has been specified, and if so, redirect - use_location = request.context.options.pop('use_location', None) - if use_location: - # Lock retries to the specific location - request.context.options['retry_to_secondary'] = False - if use_location not in self.hosts: - raise ValueError("Attempting to use undefined host location {}".format(use_location)) - if use_location != location_mode: - # Update request URL to use the specified location - updated = parsed_url._replace(netloc=self.hosts[use_location]) - request.http_request.url = updated.geturl() - location_mode = use_location - - request.context.options['location_mode'] = location_mode - - -class StorageLoggingPolicy(NetworkTraceLoggingPolicy): - """A policy that logs HTTP request and response to the DEBUG logger. - - This accepts both global configuration, and per-request level with "enable_http_logger" - """ - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - http_request = request.http_request - options = request.context.options - if options.pop("logging_enable", self.enable_http_logger): - request.context["logging_enable"] = True - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - log_url = http_request.url - query_params = http_request.query - if 'sig' in query_params: - log_url = log_url.replace(query_params['sig'], "sig=*****") - _LOGGER.debug("Request URL: %r", log_url) - _LOGGER.debug("Request method: %r", http_request.method) - _LOGGER.debug("Request headers:") - for header, value in http_request.headers.items(): - if header.lower() == 'authorization': - value = '*****' - elif header.lower() == 'x-ms-copy-source' and 'sig' in value: - # take the url apart and scrub away the signed signature - scheme, netloc, path, params, query, fragment = urlparse(value) - parsed_qs = dict(parse_qsl(query)) - parsed_qs['sig'] = '*****' - - # the SAS needs to be put back together - value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) - - _LOGGER.debug(" %r: %r", header, value) - _LOGGER.debug("Request body:") - - # We don't want to log the binary data of a file upload. - if isinstance(http_request.body, types.GeneratorType): - _LOGGER.debug("File upload") - else: - _LOGGER.debug(str(http_request.body)) - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log request: %r", err) - - def on_response(self, request, response): - # type: (PipelineRequest, PipelineResponse, Any) -> None - if response.context.pop("logging_enable", self.enable_http_logger): - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - _LOGGER.debug("Response status: %r", response.http_response.status_code) - _LOGGER.debug("Response headers:") - for res_header, value in response.http_response.headers.items(): - _LOGGER.debug(" %r: %r", res_header, value) - - # We don't want to log binary data if the response is a file. - _LOGGER.debug("Response content:") - pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) - header = response.http_response.headers.get('content-disposition') - - if header and pattern.match(header): - filename = header.partition('=')[2] - _LOGGER.debug("File attachments: %s", filename) - elif response.http_response.headers.get("content-type", "").endswith("octet-stream"): - _LOGGER.debug("Body contains binary data.") - elif response.http_response.headers.get("content-type", "").startswith("image"): - _LOGGER.debug("Body contains image data.") - else: - if response.context.options.get('stream', False): - _LOGGER.debug("Body is streamable") - else: - _LOGGER.debug(response.http_response.text()) - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log response: %s", repr(err)) - - -class StorageRequestHook(SansIOHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._request_callback = kwargs.get('raw_request_hook') - super(StorageRequestHook, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, **Any) -> PipelineResponse - request_callback = request.context.options.pop('raw_request_hook', self._request_callback) - if request_callback: - request_callback(request) - - -class StorageResponseHook(HTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(StorageResponseHook, self).__init__() - - def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = self.next.send(request) - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - response_callback(response) - request.context['response_callback'] = response_callback - return response - - -class StorageContentValidation(SansIOHTTPPolicy): - """A simple policy that sends the given headers - with the request. - - This will overwrite any headers already defined in the request. - """ - header_name = 'Content-MD5' - - def __init__(self, **kwargs): # pylint: disable=unused-argument - super(StorageContentValidation, self).__init__() - - @staticmethod - def get_content_md5(data): - md5 = hashlib.md5() # nosec - if isinstance(data, bytes): - md5.update(data) - elif hasattr(data, 'read'): - pos = 0 - try: - pos = data.tell() - except: # pylint: disable=bare-except - pass - for chunk in iter(lambda: data.read(4096), b""): - md5.update(chunk) - try: - data.seek(pos, SEEK_SET) - except (AttributeError, IOError): - raise ValueError("Data should be bytes or a seekable file-like object.") - else: - raise ValueError("Data should be bytes or a seekable file-like object.") - - return md5.digest() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - validate_content = request.context.options.pop('validate_content', False) - if validate_content and request.http_request.method != 'GET': - computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) - request.http_request.headers[self.header_name] = computed_md5 - request.context['validate_content_md5'] = computed_md5 - request.context['validate_content'] = validate_content - - def on_response(self, request, response): - if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): - computed_md5 = request.context.get('validate_content_md5') or \ - encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) - if response.http_response.headers['content-md5'] != computed_md5: - raise AzureError( - 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format( - response.http_response.headers['content-md5'], computed_md5), - response=response.http_response - ) - - -class StorageRetryPolicy(HTTPPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - def __init__(self, **kwargs): - self.total_retries = kwargs.pop('retry_total', 10) - self.connect_retries = kwargs.pop('retry_connect', 3) - self.read_retries = kwargs.pop('retry_read', 3) - self.status_retries = kwargs.pop('retry_status', 3) - self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) - super(StorageRetryPolicy, self).__init__() - - def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use - """ - A function which sets the next host location on the request, if applicable. - - :param ~azure.storage.models.RetryContext context: - The retry context containing the previous host location and the request - to evaluate and possibly modify. - """ - if settings['hosts'] and all(settings['hosts'].values()): - url = urlparse(request.url) - # If there's more than one possible location, retry to the alternative - if settings['mode'] == LocationMode.PRIMARY: - settings['mode'] = LocationMode.SECONDARY - else: - settings['mode'] = LocationMode.PRIMARY - updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) - request.url = updated.geturl() - - def configure_retries(self, request): # pylint: disable=no-self-use - body_position = None - if hasattr(request.http_request.body, 'read'): - try: - body_position = request.http_request.body.tell() - except (AttributeError, UnsupportedOperation): - # if body position cannot be obtained, then retries will not work - pass - options = request.context.options - return { - 'total': options.pop("retry_total", self.total_retries), - 'connect': options.pop("retry_connect", self.connect_retries), - 'read': options.pop("retry_read", self.read_retries), - 'status': options.pop("retry_status", self.status_retries), - 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), - 'mode': options.pop("location_mode", LocationMode.PRIMARY), - 'hosts': options.pop("hosts", None), - 'hook': options.pop("retry_hook", None), - 'body_position': body_position, - 'count': 0, - 'history': [] - } - - def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use - """ Formula for computing the current backoff. - Should be calculated by child class. - - :rtype: float - """ - return 0 - - def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - transport.sleep(backoff) - - def increment(self, settings, request, response=None, error=None): - """Increment the retry counters. - - :param response: A pipeline response object. - :param error: An error encountered during the request, or - None if the response was received successfully. - - :return: Whether the retry attempts are exhausted. - """ - settings['total'] -= 1 - - if error and isinstance(error, ServiceRequestError): - # Errors when we're fairly sure that the server did not receive the - # request, so it should be safe to retry. - settings['connect'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - elif error and isinstance(error, ServiceResponseError): - # Errors that occur after the request has been started, so we should - # assume that the server began processing it. - settings['read'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - else: - # Incrementing because of a server error like a 500 in - # status_forcelist and a the given method is in the whitelist - if response: - settings['status'] -= 1 - settings['history'].append(RequestHistory(request, http_response=response)) - - if not is_exhausted(settings): - if request.method not in ['PUT'] and settings['retry_secondary']: - self._set_next_host_location(settings, request) - - # rewind the request body if it is a stream - if request.body and hasattr(request.body, 'read'): - # no position was saved, then retry would not work - if settings['body_position'] is None: - return False - try: - # attempt to rewind the body to the initial position - request.body.seek(settings['body_position'], SEEK_SET) - except (UnsupportedOperation, ValueError): - # if body is not seekable, then retry would not work - return False - settings['count'] += 1 - return True - return False - - def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(StorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(StorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/policies_async.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/policies_async.py deleted file mode 100644 index e0926b8..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/policies_async.py +++ /dev/null @@ -1,220 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -import asyncio -import random -import logging -from typing import Any, TYPE_CHECKING - -from azure.core.pipeline.policies import AsyncHTTPPolicy -from azure.core.exceptions import AzureError - -from .policies import is_retry, StorageRetryPolicy - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -async def retry_hook(settings, **kwargs): - if settings['hook']: - if asyncio.iscoroutine(settings['hook']): - await settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - else: - settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - - -class AsyncStorageResponseHook(AsyncHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(AsyncStorageResponseHook, self).__init__() - - async def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = await self.next.send(request) - await response.http_response.load_body() - - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - if asyncio.iscoroutine(response_callback): - await response_callback(response) - else: - response_callback(response) - request.context['response_callback'] = response_callback - return response - -class AsyncStorageRetryPolicy(StorageRetryPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - async def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - await transport.sleep(backoff) - - async def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = await self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - await self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - await self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(AsyncStorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(AsyncStorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/request_handlers.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/request_handlers.py deleted file mode 100644 index 4f15b65..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/request_handlers.py +++ /dev/null @@ -1,147 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) - -import logging -from os import fstat -from io import (SEEK_END, SEEK_SET, UnsupportedOperation) - -import isodate - -from azure.core.exceptions import raise_with_traceback - - -_LOGGER = logging.getLogger(__name__) - - -def serialize_iso(attr): - """Serialize Datetime object into ISO-8601 formatted string. - - :param Datetime attr: Object to be serialized. - :rtype: str - :raises: ValueError if format invalid. - """ - if not attr: - return None - if isinstance(attr, str): - attr = isodate.parse_datetime(attr) - try: - utc = attr.utctimetuple() - if utc.tm_year > 9999 or utc.tm_year < 1: - raise OverflowError("Hit max or min date") - - date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( - utc.tm_year, utc.tm_mon, utc.tm_mday, - utc.tm_hour, utc.tm_min, utc.tm_sec) - return date + 'Z' - except (ValueError, OverflowError) as err: - msg = "Unable to serialize datetime object." - raise_with_traceback(ValueError, msg, err) - except AttributeError as err: - msg = "ISO-8601 object must be valid Datetime object." - raise_with_traceback(TypeError, msg, err) - - -def get_length(data): - length = None - # Check if object implements the __len__ method, covers most input cases such as bytearray. - try: - length = len(data) - except: # pylint: disable=bare-except - pass - - if not length: - # Check if the stream is a file-like stream object. - # If so, calculate the size using the file descriptor. - try: - fileno = data.fileno() - except (AttributeError, UnsupportedOperation): - pass - else: - try: - return fstat(fileno).st_size - except OSError: - # Not a valid fileno, may be possible requests returned - # a socket number? - pass - - # If the stream is seekable and tell() is implemented, calculate the stream size. - try: - current_position = data.tell() - data.seek(0, SEEK_END) - length = data.tell() - current_position - data.seek(current_position, SEEK_SET) - except (AttributeError, UnsupportedOperation): - pass - - return length - - -def read_length(data): - try: - if hasattr(data, 'read'): - read_data = b'' - for chunk in iter(lambda: data.read(4096), b""): - read_data += chunk - return len(read_data), read_data - if hasattr(data, '__iter__'): - read_data = b'' - for chunk in data: - read_data += chunk - return len(read_data), read_data - except: # pylint: disable=bare-except - pass - raise ValueError("Unable to calculate content length, please specify.") - - -def validate_and_format_range_headers( - start_range, end_range, start_range_required=True, - end_range_required=True, check_content_md5=False, align_to_page=False): - # If end range is provided, start range must be provided - if (start_range_required or end_range is not None) and start_range is None: - raise ValueError("start_range value cannot be None.") - if end_range_required and end_range is None: - raise ValueError("end_range value cannot be None.") - - # Page ranges must be 512 aligned - if align_to_page: - if start_range is not None and start_range % 512 != 0: - raise ValueError("Invalid page blob start_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(start_range)) - if end_range is not None and end_range % 512 != 511: - raise ValueError("Invalid page blob end_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(end_range)) - - # Format based on whether end_range is present - range_header = None - if end_range is not None: - range_header = 'bytes={0}-{1}'.format(start_range, end_range) - elif start_range is not None: - range_header = "bytes={0}-".format(start_range) - - # Content MD5 can only be provided for a complete range less than 4MB in size - range_validation = None - if check_content_md5: - if start_range is None or end_range is None: - raise ValueError("Both start and end range requied for MD5 content validation.") - if end_range - start_range > 4 * 1024 * 1024: - raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") - range_validation = 'true' - - return range_header, range_validation - - -def add_metadata_headers(metadata=None): - # type: (Optional[Dict[str, str]]) -> Dict[str, str] - headers = {} - if metadata: - for key, value in metadata.items(): - headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value - return headers diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/response_handlers.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/response_handlers.py deleted file mode 100644 index ac526e5..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/response_handlers.py +++ /dev/null @@ -1,159 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging - -from azure.core.pipeline.policies import ContentDecodePolicy -from azure.core.exceptions import ( - HttpResponseError, - ResourceNotFoundError, - ResourceModifiedError, - ResourceExistsError, - ClientAuthenticationError, - DecodeError) - -from .parser import _to_utc_datetime -from .models import StorageErrorCode, UserDelegationKey, get_enum_value - - -if TYPE_CHECKING: - from datetime import datetime - from azure.core.exceptions import AzureError - - -_LOGGER = logging.getLogger(__name__) - - -class PartialBatchErrorException(HttpResponseError): - """There is a partial failure in batch operations. - - :param str message: The message of the exception. - :param response: Server response to be deserialized. - :param list parts: A list of the parts in multipart response. - """ - - def __init__(self, message, response, parts): - self.parts = parts - super(PartialBatchErrorException, self).__init__(message=message, response=response) - - -def parse_length_from_content_range(content_range): - ''' - Parses the blob length from the content range header: bytes 1-3/65537 - ''' - if content_range is None: - return None - - # First, split in space and take the second half: '1-3/65537' - # Next, split on slash and take the second half: '65537' - # Finally, convert to an int: 65537 - return int(content_range.split(' ', 1)[1].split('/', 1)[1]) - - -def normalize_headers(headers): - normalized = {} - for key, value in headers.items(): - if key.startswith('x-ms-'): - key = key[5:] - normalized[key.lower().replace('-', '_')] = get_enum_value(value) - return normalized - - -def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument - raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} - return {k[10:]: v for k, v in raw_metadata.items()} - - -def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers) - - -def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers), deserialized - - -def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return response.location_mode, deserialized - - -def process_storage_error(storage_error): - raise_error = HttpResponseError - error_code = storage_error.response.headers.get('x-ms-error-code') - error_message = storage_error.message - additional_data = {} - try: - error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) - if error_body: - for info in error_body.iter(): - if info.tag.lower() == 'code': - error_code = info.text - elif info.tag.lower() == 'message': - error_message = info.text - else: - additional_data[info.tag] = info.text - except DecodeError: - pass - - try: - if error_code: - error_code = StorageErrorCode(error_code) - if error_code in [StorageErrorCode.condition_not_met, - StorageErrorCode.blob_overwritten]: - raise_error = ResourceModifiedError - if error_code in [StorageErrorCode.invalid_authentication_info, - StorageErrorCode.authentication_failed]: - raise_error = ClientAuthenticationError - if error_code in [StorageErrorCode.resource_not_found, - StorageErrorCode.cannot_verify_copy_source, - StorageErrorCode.blob_not_found, - StorageErrorCode.queue_not_found, - StorageErrorCode.container_not_found, - StorageErrorCode.parent_not_found, - StorageErrorCode.share_not_found]: - raise_error = ResourceNotFoundError - if error_code in [StorageErrorCode.account_already_exists, - StorageErrorCode.account_being_created, - StorageErrorCode.resource_already_exists, - StorageErrorCode.resource_type_mismatch, - StorageErrorCode.blob_already_exists, - StorageErrorCode.queue_already_exists, - StorageErrorCode.container_already_exists, - StorageErrorCode.container_being_deleted, - StorageErrorCode.queue_being_deleted, - StorageErrorCode.share_already_exists, - StorageErrorCode.share_being_deleted]: - raise_error = ResourceExistsError - except ValueError: - # Got an unknown error code - pass - - try: - error_message += "\nErrorCode:{}".format(error_code.value) - except AttributeError: - error_message += "\nErrorCode:{}".format(error_code) - for name, info in additional_data.items(): - error_message += "\n{}:{}".format(name, info) - - error = raise_error(message=error_message, response=storage_error.response) - error.error_code = error_code - error.additional_info = additional_data - raise error - - -def parse_to_internal_user_delegation_key(service_user_delegation_key): - internal_user_delegation_key = UserDelegationKey() - internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid - internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid - internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) - internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) - internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service - internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version - internal_user_delegation_key.value = service_user_delegation_key.value - return internal_user_delegation_key diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/shared_access_signature.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/shared_access_signature.py deleted file mode 100644 index 367c655..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/shared_access_signature.py +++ /dev/null @@ -1,209 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from datetime import date - -from .parser import _str, _to_utc_datetime -from .constants import X_MS_VERSION -from . import sign_string, url_quote - - -class QueryStringConstants(object): - SIGNED_SIGNATURE = 'sig' - SIGNED_PERMISSION = 'sp' - SIGNED_START = 'st' - SIGNED_EXPIRY = 'se' - SIGNED_RESOURCE = 'sr' - SIGNED_IDENTIFIER = 'si' - SIGNED_IP = 'sip' - SIGNED_PROTOCOL = 'spr' - SIGNED_VERSION = 'sv' - SIGNED_CACHE_CONTROL = 'rscc' - SIGNED_CONTENT_DISPOSITION = 'rscd' - SIGNED_CONTENT_ENCODING = 'rsce' - SIGNED_CONTENT_LANGUAGE = 'rscl' - SIGNED_CONTENT_TYPE = 'rsct' - START_PK = 'spk' - START_RK = 'srk' - END_PK = 'epk' - END_RK = 'erk' - SIGNED_RESOURCE_TYPES = 'srt' - SIGNED_SERVICES = 'ss' - SIGNED_OID = 'skoid' - SIGNED_TID = 'sktid' - SIGNED_KEY_START = 'skt' - SIGNED_KEY_EXPIRY = 'ske' - SIGNED_KEY_SERVICE = 'sks' - SIGNED_KEY_VERSION = 'skv' - - @staticmethod - def to_list(): - return [ - QueryStringConstants.SIGNED_SIGNATURE, - QueryStringConstants.SIGNED_PERMISSION, - QueryStringConstants.SIGNED_START, - QueryStringConstants.SIGNED_EXPIRY, - QueryStringConstants.SIGNED_RESOURCE, - QueryStringConstants.SIGNED_IDENTIFIER, - QueryStringConstants.SIGNED_IP, - QueryStringConstants.SIGNED_PROTOCOL, - QueryStringConstants.SIGNED_VERSION, - QueryStringConstants.SIGNED_CACHE_CONTROL, - QueryStringConstants.SIGNED_CONTENT_DISPOSITION, - QueryStringConstants.SIGNED_CONTENT_ENCODING, - QueryStringConstants.SIGNED_CONTENT_LANGUAGE, - QueryStringConstants.SIGNED_CONTENT_TYPE, - QueryStringConstants.START_PK, - QueryStringConstants.START_RK, - QueryStringConstants.END_PK, - QueryStringConstants.END_RK, - QueryStringConstants.SIGNED_RESOURCE_TYPES, - QueryStringConstants.SIGNED_SERVICES, - QueryStringConstants.SIGNED_OID, - QueryStringConstants.SIGNED_TID, - QueryStringConstants.SIGNED_KEY_START, - QueryStringConstants.SIGNED_KEY_EXPIRY, - QueryStringConstants.SIGNED_KEY_SERVICE, - QueryStringConstants.SIGNED_KEY_VERSION, - ] - - -class SharedAccessSignature(object): - ''' - Provides a factory for creating account access - signature tokens with an account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - :param str x_ms_version: - The service version used to generate the shared access signatures. - ''' - self.account_name = account_name - self.account_key = account_key - self.x_ms_version = x_ms_version - - def generate_account(self, services, resource_types, permission, expiry, start=None, - ip=None, protocol=None): - ''' - Generates a shared access signature for the account. - Use the returned signature with the sas_token parameter of the service - or to create a new account object. - - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account - SAS. You can combine values to provide access to more than one - resource type. - :param AccountSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. You can combine - values to provide more than one permission. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_account(services, resource_types) - sas.add_account_signature(self.account_name, self.account_key) - - return sas.get_token() - - -class _SharedAccessHelper(object): - def __init__(self): - self.query_dict = {} - - def _add_query(self, name, val): - if val: - self.query_dict[name] = _str(val) if val is not None else None - - def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): - if isinstance(start, date): - start = _to_utc_datetime(start) - - if isinstance(expiry, date): - expiry = _to_utc_datetime(expiry) - - self._add_query(QueryStringConstants.SIGNED_START, start) - self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) - self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) - self._add_query(QueryStringConstants.SIGNED_IP, ip) - self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) - self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) - - def add_resource(self, resource): - self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) - - def add_id(self, policy_id): - self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) - - def add_account(self, services, resource_types): - self._add_query(QueryStringConstants.SIGNED_SERVICES, services) - self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) - - def add_override_response_headers(self, cache_control, - content_disposition, - content_encoding, - content_language, - content_type): - self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) - self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) - self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) - self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) - self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) - - def add_account_signature(self, account_name, account_key): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - string_to_sign = \ - (account_name + '\n' + - get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + - get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + - get_value_to_append(QueryStringConstants.SIGNED_START) + - get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - get_value_to_append(QueryStringConstants.SIGNED_IP) + - get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(QueryStringConstants.SIGNED_VERSION)) - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key, string_to_sign)) - - def get_token(self): - return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/uploads.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/uploads.py deleted file mode 100644 index 29949d5..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/uploads.py +++ /dev/null @@ -1,568 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from concurrent import futures -from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) -from threading import Lock -from itertools import islice -from math import ceil - -import six - -from azure.core.tracing.common import with_current_context - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." - - -def _parallel_uploads(executor, uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(executor.submit(with_current_context(uploader), next_chunk)) - except StopIteration: - break - - # Wait for the remaining uploads to finish - done, _running = futures.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - validate_content=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - validate_content=validate_content, - **kwargs) - if parallel: - executor = futures.ThreadPoolExecutor(max_concurrency) - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - executor.submit(with_current_context(uploader.process_chunk), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - executor = futures.ThreadPoolExecutor(max_concurrency) - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - executor.submit(with_current_context(uploader.process_substream_block), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] - return sorted(range_ids) - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b"" - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError("Blob data should be of type bytes.") - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b"" or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - def _update_progress(self, length): - if self.progress_lock is not None: - with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = self._upload_chunk(chunk_offset, chunk_data) - self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) - - def process_substream_block(self, block_data): - return self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - def _upload_substream_block(self, block_id, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_substream_block_with_progress(self, block_id, block_stream): - range_id = self._upload_substream_block(block_id, block_stream) - self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop("modified_access_conditions", None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - self.service.stage_block( - block_id, - len(chunk_data), - chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return index, block_id - - def _upload_substream_block(self, block_id, block_stream): - try: - self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - return not any(bytearray(chunk_data)) - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = self.service.upload_pages( - chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = self.service.append_block( - chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - self.current_length = int(self.response_headers["blob_append_offset"]) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = self.service.append_block( - chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - -class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - self.response_headers = self.service.append_data( - body=chunk_data, - position=chunk_offset, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response - - -class SubStream(IOBase): - - def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): - # Python 2.7: file-like objects created with open() typically support seek(), but are not - # derivations of io.IOBase and thus do not implement seekable(). - # Python > 3.0: file-like objects created with open() are derived from io.IOBase. - try: - # only the main thread runs this, so there's no need grabbing the lock - wrapped_stream.seek(0, SEEK_CUR) - except: - raise ValueError("Wrapped stream must support seek().") - - self._lock = lockObj - self._wrapped_stream = wrapped_stream - self._position = 0 - self._stream_begin_index = stream_begin_index - self._length = length - self._buffer = BytesIO() - - # we must avoid buffering more than necessary, and also not use up too much memory - # so the max buffer size is capped at 4MB - self._max_buffer_size = ( - length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE - ) - self._current_buffer_start = 0 - self._current_buffer_size = 0 - super(SubStream, self).__init__() - - def __len__(self): - return self._length - - def close(self): - if self._buffer: - self._buffer.close() - self._wrapped_stream = None - IOBase.close(self) - - def fileno(self): - return self._wrapped_stream.fileno() - - def flush(self): - pass - - def read(self, size=None): - if self.closed: # pylint: disable=using-constant-test - raise ValueError("Stream is closed.") - - if size is None: - size = self._length - self._position - - # adjust if out of bounds - if size + self._position >= self._length: - size = self._length - self._position - - # return fast - if size == 0 or self._buffer.closed: - return b"" - - # attempt first read from the read buffer and update position - read_buffer = self._buffer.read(size) - bytes_read = len(read_buffer) - bytes_remaining = size - bytes_read - self._position += bytes_read - - # repopulate the read buffer from the underlying stream to fulfill the request - # ensure the seek and read operations are done atomically (only if a lock is provided) - if bytes_remaining > 0: - with self._buffer: - # either read in the max buffer size specified on the class - # or read in just enough data for the current block/sub stream - current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) - - # lock is only defined if max_concurrency > 1 (parallel uploads) - if self._lock: - with self._lock: - # reposition the underlying stream to match the start of the data to read - absolute_position = self._stream_begin_index + self._position - self._wrapped_stream.seek(absolute_position, SEEK_SET) - # If we can't seek to the right location, our read will be corrupted so fail fast. - if self._wrapped_stream.tell() != absolute_position: - raise IOError("Stream failed to seek to the desired location.") - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - else: - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - - if buffer_from_stream: - # update the buffer with new data from the wrapped stream - # we need to note down the start position and size of the buffer, in case seek is performed later - self._buffer = BytesIO(buffer_from_stream) - self._current_buffer_start = self._position - self._current_buffer_size = len(buffer_from_stream) - - # read the remaining bytes from the new buffer and update position - second_read_buffer = self._buffer.read(bytes_remaining) - read_buffer += second_read_buffer - self._position += len(second_read_buffer) - - return read_buffer - - def readable(self): - return True - - def readinto(self, b): - raise UnsupportedOperation - - def seek(self, offset, whence=0): - if whence is SEEK_SET: - start_index = 0 - elif whence is SEEK_CUR: - start_index = self._position - elif whence is SEEK_END: - start_index = self._length - offset = -offset - else: - raise ValueError("Invalid argument for the 'whence' parameter.") - - pos = start_index + offset - - if pos > self._length: - pos = self._length - elif pos < 0: - pos = 0 - - # check if buffer is still valid - # if not, drop buffer - if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: - self._buffer.close() - self._buffer = BytesIO() - else: # if yes seek to correct position - delta = pos - self._current_buffer_start - self._buffer.seek(delta, SEEK_SET) - - self._position = pos - return pos - - def seekable(self): - return True - - def tell(self): - return self._position - - def write(self): - raise UnsupportedOperation - - def writelines(self): - raise UnsupportedOperation - - def writeable(self): - return False - - -class IterStreamer(object): - """ - File-like streaming iterator. - """ - - def __init__(self, generator, encoding="UTF-8"): - self.generator = generator - self.iterator = iter(generator) - self.leftover = b"" - self.encoding = encoding - - def __len__(self): - return self.generator.__len__() - - def __iter__(self): - return self.iterator - - def seekable(self): - return False - - def __next__(self): - return next(self.iterator) - - next = __next__ # Python 2 compatibility. - - def tell(self, *args, **kwargs): - raise UnsupportedOperation("Data generator does not support tell.") - - def seek(self, *args, **kwargs): - raise UnsupportedOperation("Data generator is unseekable.") - - def read(self, size): - data = self.leftover - count = len(self.leftover) - try: - while count < size: - chunk = self.__next__() - if isinstance(chunk, six.text_type): - chunk = chunk.encode(self.encoding) - data += chunk - count += len(chunk) - except StopIteration: - pass - - if count > size: - self.leftover = data[size:] - - return data[:size] diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/uploads_async.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/uploads_async.py deleted file mode 100644 index 29c0ee4..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/uploads_async.py +++ /dev/null @@ -1,367 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -import asyncio -from asyncio import Lock -from itertools import islice -import threading - -from math import ceil - -import six - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder -from .uploads import SubStream, IterStreamer # pylint: disable=unused-import - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' - - -async def _parallel_uploads(uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(asyncio.ensure_future(uploader(next_chunk))) - except StopIteration: - break - - # Wait for the remaining uploads to finish - if running: - done, _running = await asyncio.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -async def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - asyncio.ensure_future(uploader.process_chunk(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [] - for chunk in uploader.get_chunk_streams(): - range_ids.append(await uploader.process_chunk(chunk)) - - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -async def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - asyncio.ensure_future(uploader.process_substream_block(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [] - for block in uploader.get_substream_blocks(): - range_ids.append(await uploader.process_substream_block(block)) - return sorted(range_ids) - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = threading.Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b'' - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b'' or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - async def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - async def _update_progress(self, length): - if self.progress_lock is not None: - async with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - async def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = await self._upload_chunk(chunk_offset, chunk_data) - await self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) - - async def process_substream_block(self, block_data): - return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - async def _upload_substream_block(self, block_id, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_substream_block_with_progress(self, block_id, block_stream): - range_id = await self._upload_substream_block(block_id, block_stream) - await self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop('modified_access_conditions', None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - await self.service.stage_block( - block_id, - len(chunk_data), - chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - return index, block_id - - async def _upload_substream_block(self, block_id, block_stream): - try: - await self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - for each_byte in chunk_data: - if each_byte not in [0, b'\x00']: - return False - return True - - async def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = await self.service.upload_pages( - chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = await self.service.append_block( - chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - self.current_length = int(self.response_headers['blob_append_offset']) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = await self.service.append_block( - chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - -class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - self.response_headers = await self.service.append_data( - body=chunk_data, - position=chunk_offset, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - response = await self.service.upload_range( - chunk_data, - chunk_offset, - chunk_end, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - return range_id, response diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared_access_signature.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared_access_signature.py deleted file mode 100644 index 1186afa..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared_access_signature.py +++ /dev/null @@ -1,349 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import TYPE_CHECKING - -from azure.multiapi.storagev2.blob.v2019_12_12 import generate_account_sas as generate_blob_account_sas -from azure.multiapi.storagev2.blob.v2019_12_12 import generate_container_sas, generate_blob_sas -if TYPE_CHECKING: - import datetime - from ._models import AccountSasPermissions, FileSystemSasPermissions, FileSasPermissions, ResourceTypes, \ - UserDelegationKey - - -def generate_account_sas( - account_name, # type: str - account_key, # type: str - resource_types, # type: Union[ResourceTypes, str] - permission, # type: Union[AccountSasPermissions, str] - expiry, # type: Optional[Union[datetime, str]] - **kwargs # type: Any - ): # type: (...) -> str - """Generates a shared access signature for the DataLake service. - - Use the returned signature as the credential parameter of any DataLakeServiceClient, - FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str account_key: - The access key to generate the shared access signature. - :param resource_types: - Specifies the resource types that are accessible with the account SAS. - :type resource_types: str or ~azure.storage.filedatalake.ResourceTypes - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.filedatalake.AccountSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :keyword start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :paramtype start: ~datetime.datetime or str - :keyword str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - return generate_blob_account_sas( - account_name=account_name, - account_key=account_key, - resource_types=resource_types, - permission=permission, - expiry=expiry, - **kwargs - ) - - -def generate_file_system_sas( - account_name, # type: str - file_system_name, # type: str - credential, # type: Union[str, UserDelegationKey] - permission=None, # type: Optional[Union[FileSystemSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - **kwargs # type: Any - ): - # type: (...) -> str - """Generates a shared access signature for a file system. - - Use the returned signature with the credential parameter of any DataLakeServiceClient, - FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str file_system_name: - The name of the file system. - :param str credential: - Credential could be either account key or user delegation key. - If use account key is used as credential, then the credential type should be a str. - Instead of an account key, the user could also pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished - by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :type credential: str or ~azure.storage.filedatalake.UserDelegationKey - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.filedatalake.FileSystemSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :keyword start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :paramtype start: datetime or str - :keyword str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - return generate_container_sas( - account_name=account_name, - container_name=file_system_name, - account_key=credential if isinstance(credential, str) else None, - user_delegation_key=credential if not isinstance(credential, str) else None, - permission=permission, - expiry=expiry, - **kwargs) - - -def generate_directory_sas( - account_name, # type: str - file_system_name, # type: str - directory_name, # type: str - credential, # type: Union[str, UserDelegationKey] - permission=None, # type: Optional[Union[FileSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - **kwargs # type: Any - ): - # type: (...) -> str - """Generates a shared access signature for a directory. - - Use the returned signature with the credential parameter of any DataLakeServiceClient, - FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str file_system_name: - The name of the file system. - :param str directory_name: - The name of the directory. - :param str credential: - Credential could be either account key or user delegation key. - If use account key is used as credential, then the credential type should be a str. - Instead of an account key, the user could also pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished - by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :type credential: str or ~azure.storage.filedatalake.UserDelegationKey - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.filedatalake.FileSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :keyword start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :paramtype start: ~datetime.datetime or str - :keyword str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - return generate_blob_sas( - account_name=account_name, - container_name=file_system_name, - blob_name=directory_name, - account_key=credential if isinstance(credential, str) else None, - user_delegation_key=credential if not isinstance(credential, str) else None, - permission=permission, - expiry=expiry, - **kwargs) - - -def generate_file_sas( - account_name, # type: str - file_system_name, # type: str - directory_name, # type: str - file_name, # type: str - credential, # type: Union[str, UserDelegationKey] - permission=None, # type: Optional[Union[FileSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - **kwargs # type: Any - ): - # type: (...) -> str - """Generates a shared access signature for a file. - - Use the returned signature with the credential parameter of any BDataLakeServiceClient, - FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str file_system_name: - The name of the file system. - :param str directory_name: - The name of the directory. - :param str file_name: - The name of the file. - :param str credential: - Credential could be either account key or user delegation key. - If use account key is used as credential, then the credential type should be a str. - Instead of an account key, the user could also pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished - by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :type credential: str or ~azure.storage.filedatalake.UserDelegationKey - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.filedatalake.FileSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :keyword start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :paramtype start: ~datetime.datetime or str - :keyword str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - if directory_name: - path = directory_name.rstrip('/') + "/" + file_name - else: - path = file_name - return generate_blob_sas( - account_name=account_name, - container_name=file_system_name, - blob_name=path, - account_key=credential if isinstance(credential, str) else None, - user_delegation_key=credential if not isinstance(credential, str) else None, - permission=permission, - expiry=expiry, - **kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_upload_helper.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_upload_helper.py deleted file mode 100644 index d1a98dd..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_upload_helper.py +++ /dev/null @@ -1,87 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from ._deserialize import ( - process_storage_error) -from ._generated.models import ( - StorageErrorException, -) -from ._shared.response_handlers import return_response_headers -from ._shared.uploads import ( - upload_data_chunks, - DataLakeFileChunkUploader) - - -def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument - return any([ - modified_access_conditions.if_modified_since, - modified_access_conditions.if_unmodified_since, - modified_access_conditions.if_none_match, - modified_access_conditions.if_match - ]) - - -def upload_datalake_file( # pylint: disable=unused-argument - client=None, - stream=None, - length=None, - overwrite=None, - validate_content=None, - max_concurrency=None, - **kwargs): - try: - if length == 0: - return {} - properties = kwargs.pop('properties', None) - umask = kwargs.pop('umask', None) - permissions = kwargs.pop('permissions', None) - path_http_headers = kwargs.pop('path_http_headers', None) - modified_access_conditions = kwargs.pop('modified_access_conditions', None) - chunk_size = kwargs.pop('chunk_size', 100 * 1024 * 1024) - - if not overwrite: - # if customers didn't specify access conditions, they cannot flush data to existing file - if not _any_conditions(modified_access_conditions): - modified_access_conditions.if_none_match = '*' - if properties or umask or permissions: - raise ValueError("metadata, umask and permissions can be set only when overwrite is enabled") - - if overwrite: - response = client.create( - resource='file', - path_http_headers=path_http_headers, - properties=properties, - modified_access_conditions=modified_access_conditions, - umask=umask, - permissions=permissions, - cls=return_response_headers, - **kwargs) - - # this modified_access_conditions will be applied to flush_data to make sure - # no other flush between create and the current flush - modified_access_conditions.if_match = response['etag'] - modified_access_conditions.if_none_match = None - modified_access_conditions.if_modified_since = None - modified_access_conditions.if_unmodified_since = None - - upload_data_chunks( - service=client, - uploader_class=DataLakeFileChunkUploader, - total_size=length, - chunk_size=chunk_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - **kwargs) - - return client.flush_data(position=length, - path_http_headers=path_http_headers, - modified_access_conditions=modified_access_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_version.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/_version.py deleted file mode 100644 index 8a3a444..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_version.py +++ /dev/null @@ -1,7 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -VERSION = "12.1.1" diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/__init__.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/__init__.py deleted file mode 100644 index c24dde8..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ._download_async import StorageStreamDownloader -from .._shared.policies_async import ExponentialRetry, LinearRetry -from ._data_lake_file_client_async import DataLakeFileClient -from ._data_lake_directory_client_async import DataLakeDirectoryClient -from ._file_system_client_async import FileSystemClient -from ._data_lake_service_client_async import DataLakeServiceClient -from ._data_lake_lease_async import DataLakeLeaseClient - -__all__ = [ - 'DataLakeServiceClient', - 'FileSystemClient', - 'DataLakeDirectoryClient', - 'DataLakeFileClient', - 'DataLakeLeaseClient', - 'ExponentialRetry', - 'LinearRetry', - 'StorageStreamDownloader' -] diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_data_lake_file_client_async.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_data_lake_file_client_async.py deleted file mode 100644 index e74cc13..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_data_lake_file_client_async.py +++ /dev/null @@ -1,513 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -from ._download_async import StorageStreamDownloader -from ._path_client_async import PathClient -from .._data_lake_file_client import DataLakeFileClient as DataLakeFileClientBase -from .._deserialize import process_storage_error -from .._generated.models import StorageErrorException -from .._models import FileProperties -from ..aio._upload_helper import upload_datalake_file - - -class DataLakeFileClient(PathClient, DataLakeFileClientBase): - """A client to interact with the DataLake file, even if the file may not yet exist. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param file_path: - The whole file path, so that to interact with a specific file. - eg. "{directory}/{subdirectory}/{file}" - :type file_path: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, and account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_instantiate_client_async.py - :start-after: [START instantiate_file_client_from_conn_str] - :end-before: [END instantiate_file_client_from_conn_str] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. - """ - - def __init__( - self, account_url, # type: str - file_system_name, # type: str - file_path, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - super(DataLakeFileClient, self).__init__(account_url, file_system_name, path_name=file_path, - credential=credential, **kwargs) - - async def create_file(self, content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create a new file. - - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: response dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START create_file] - :end-before: [END create_file] - :language: python - :dedent: 4 - :caption: Create file. - """ - return await self._create('file', content_settings=content_settings, metadata=metadata, **kwargs) - - async def delete_file(self, **kwargs): - # type: (...) -> None - """ - Marks the specified file for deletion. - - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START delete_file] - :end-before: [END delete_file] - :language: python - :dedent: 4 - :caption: Delete file. - """ - return await self._delete(**kwargs) - - async def get_file_properties(self, **kwargs): - # type: (**Any) -> FileProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file. It does not return the content of the file. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: FileProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START get_file_properties] - :end-before: [END get_file_properties] - :language: python - :dedent: 4 - :caption: Getting the properties for a file. - """ - blob_properties = await self._get_path_properties(**kwargs) - return FileProperties._from_blob_properties(blob_properties) # pylint: disable=protected-access - - async def upload_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - overwrite=False, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Any] - """ - Upload data to a file. - - :param data: Content to be uploaded to file - :param int length: Size of the data in bytes. - :param bool overwrite: to overwrite an existing file or not. - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword metadata: - Name-value pairs associated with the blob as metadata. - :paramtype metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease: - Required if the blob has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword int chunk_size: - The maximum chunk size for uploading a file in chunks. - Defaults to 100*1024*1024, or 100MB. - :return: response dict (Etag and last modified). - """ - options = self._upload_options( - data, - length=length, - overwrite=overwrite, - **kwargs) - return await upload_datalake_file(**options) - - async def append_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - offset, # type: int - length=None, # type: Optional[int] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """Append data to the file. - - :param data: Content to be appended to file - :param offset: start position of the data to be appended to. - :param length: Size of the data in bytes. - :keyword bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - file. - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :return: dict of the response header - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START append_data] - :end-before: [END append_data] - :language: python - :dedent: 4 - :caption: Append data to the file. - """ - options = self._append_data_options( - data, - offset, - length=length, - **kwargs) - try: - return await self._client.path.append_data(**options) - except StorageErrorException as error: - process_storage_error(error) - - async def flush_data(self, offset, # type: int - retain_uncommitted_data=False, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ Commit the previous appended data. - - :param offset: offset is equal to the length of the file after commit the - previous appended data. - :param bool retain_uncommitted_data: Valid only for flush operations. If - "true", uncommitted data is retained after the flush operation - completes; otherwise, the uncommitted data is deleted after the flush - operation. The default is false. Data at offsets less than the - specified position are written to the file when flush succeeds, but - this optional parameter allows data after the flush position to be - retained for a future flush operation. - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword bool close: Azure Storage Events allow applications to receive - notifications when files change. When Azure Storage Events are - enabled, a file changed event is raised. This event has a property - indicating whether this is the final change to distinguish the - difference between an intermediate flush to a file stream and the - final close of a file stream. The close query parameter is valid only - when the action is "flush" and change notifications are enabled. If - the value of close is "true" and the flush operation completes - successfully, the service raises a file change notification with a - property indicating that this is the final update (the file stream has - been closed). If "false" a change notification is raised indicating - the file has changed. The default is false. This query parameter is - set to true by the Hadoop ABFS driver to indicate that the file stream - has been closed." - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :return: response header in dict - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START upload_file_to_file_system] - :end-before: [END upload_file_to_file_system] - :language: python - :dedent: 12 - :caption: Commit the previous appended data. - """ - options = self._flush_data_options( - offset, - retain_uncommitted_data=retain_uncommitted_data, **kwargs) - try: - return await self._client.path.flush_data(**options) - except StorageErrorException as error: - process_storage_error(error) - - async def download_file(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader - """Downloads a file to the StorageStreamDownloader. The readall() method must - be used to read all the content, or readinto() must be used to download the file into - a stream. - - :param int offset: - Start of byte range to use for downloading a section of the file. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword lease: - If specified, download only succeeds if the file's lease is active - and matches this ID. Required if the file has an active lease. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.filedatalake.aio.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START read_file] - :end-before: [END read_file] - :language: python - :dedent: 4 - :caption: Return the downloaded data. - """ - downloader = await self._blob_client.download_blob(offset=offset, length=length, **kwargs) - return StorageStreamDownloader(downloader) - - async def rename_file(self, new_name, # type: str - **kwargs): - # type: (**Any) -> DataLakeFileClient - """ - Rename the source file. - - :param str new_name: the new file name the user want to rename to. - The value must have the following format: "{filesystem}/{directory}/{subdirectory}/{file}". - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword source_lease: A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :type permissions: str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: the renamed file client - :rtype: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START rename_file] - :end-before: [END rename_file] - :language: python - :dedent: 4 - :caption: Rename the source file. - """ - new_name = new_name.strip('/') - new_file_system = new_name.split('/')[0] - path = new_name[len(new_file_system):] - - new_directory_client = DataLakeFileClient( - self.url, new_file_system, file_path=path, credential=self._raw_credential, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - await new_directory_client._rename_path('/' + self.file_system_name + '/' + self.path_name, # pylint: disable=protected-access - **kwargs) - return new_directory_client diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_data_lake_lease_async.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_data_lake_lease_async.py deleted file mode 100644 index 9858f92..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_data_lake_lease_async.py +++ /dev/null @@ -1,243 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, - TypeVar, TYPE_CHECKING -) -from azure.multiapi.storagev2.blob.v2019_12_12.aio import BlobLeaseClient -from .._data_lake_lease import DataLakeLeaseClient as DataLakeLeaseClientBase - - -if TYPE_CHECKING: - FileSystemClient = TypeVar("FileSystemClient") - DataLakeDirectoryClient = TypeVar("DataLakeDirectoryClient") - DataLakeFileClient = TypeVar("DataLakeFileClient") - - -class DataLakeLeaseClient(DataLakeLeaseClientBase): - """Creates a new DataLakeLeaseClient. - - This client provides lease operations on a FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the file system, directory, or file to lease. - :type client: ~azure.storage.filedatalake.aio.FileSystemClient or - ~azure.storage.filedatalake.aio.DataLakeDirectoryClient or ~azure.storage.filedatalake.aio.DataLakeFileClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - def __init__( - self, client, lease_id=None - ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs - # type: (Union[FileSystemClient, DataLakeDirectoryClient, DataLakeFileClient], Optional[str]) -> None - super(DataLakeLeaseClient, self).__init__(client, lease_id) - - if hasattr(client, '_blob_client'): - _client = client._blob_client # type: ignore # pylint: disable=protected-access - elif hasattr(client, '_container_client'): - _client = client._container_client # type: ignore # pylint: disable=protected-access - else: - raise TypeError("Lease must use any of FileSystemClient DataLakeDirectoryClient, or DataLakeFileClient.") - - self._blob_lease_client = BlobLeaseClient(_client, lease_id=lease_id) - - def __enter__(self): - raise TypeError("Async lease must use 'async with'.") - - def __exit__(self, *args): - self.release() - - async def __aenter__(self): - return self - - async def __aexit__(self, *args): - await self.release() - - async def acquire(self, lease_duration=-1, **kwargs): - # type: (int, Optional[int], **Any) -> None - """Requests a new lease. - - If the file/file system does not have an active lease, the DataLake service creates a - lease on the file/file system and returns a new lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - await self._blob_lease_client.acquire(lease_duration=lease_duration, **kwargs) - self._update_lease_client_attributes() - - async def renew(self, **kwargs): - # type: (Any) -> None - """Renews the lease. - - The lease can be renewed if the lease ID specified in the - lease client matches that associated with the file system or file. Note that - the lease may be renewed even if it has expired as long as the file system - or file has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - await self._blob_lease_client.renew(**kwargs) - self._update_lease_client_attributes() - - async def release(self, **kwargs): - # type: (Any) -> None - """Release the lease. - - The lease may be released if the client lease id specified matches - that associated with the file system or file. Releasing the lease allows another client - to immediately acquire the lease for the file system or file as soon as the release is complete. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - await self._blob_lease_client.release(**kwargs) - self._update_lease_client_attributes() - - async def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """Change the lease ID of an active lease. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - await self._blob_lease_client.change(proposed_lease_id=proposed_lease_id, **kwargs) - self._update_lease_client_attributes() - - async def break_lease(self, lease_break_period=None, **kwargs): - # type: (Optional[int], Any) -> int - """Break the lease, if the file system or file has an active lease. - - Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. When a lease - is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the file system or file. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :param int lease_break_period: - This is the proposed duration of seconds that the lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the lease. If longer, the time remaining on the lease is used. - A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - await self._blob_lease_client.break_lease(lease_break_period=lease_break_period, **kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_data_lake_service_client_async.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_data_lake_service_client_async.py deleted file mode 100644 index a004499..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_data_lake_service_client_async.py +++ /dev/null @@ -1,372 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -from azure.core.paging import ItemPaged - -from azure.multiapi.storagev2.blob.v2019_12_12.aio import BlobServiceClient -from .._generated.aio import DataLakeStorageClient -from .._shared.base_client_async import AsyncStorageAccountHostsMixin -from ._file_system_client_async import FileSystemClient -from .._data_lake_service_client import DataLakeServiceClient as DataLakeServiceClientBase -from .._shared.policies_async import ExponentialRetry -from ._data_lake_directory_client_async import DataLakeDirectoryClient -from ._data_lake_file_client_async import DataLakeFileClient -from ._models import FileSystemPropertiesPaged -from .._models import UserDelegationKey, LocationMode - - -class DataLakeServiceClient(AsyncStorageAccountHostsMixin, DataLakeServiceClientBase): - """A client to interact with the DataLake Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete file systems within the account. - For operations relating to a specific file system, directory or file, clients for those entities - can also be retrieved using the `get_client` functions. - - :ivar str url: - The full endpoint URL to the datalake service endpoint. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URL to the DataLake storage account. Any other entities included - in the URL path (e.g. file system or file) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, and account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START create_datalake_service_client] - :end-before: [END create_datalake_service_client] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START create_datalake_service_client_oauth] - :end-before: [END create_datalake_service_client_oauth] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient with Azure Identity credentials. - """ - - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(DataLakeServiceClient, self).__init__( - account_url, - credential=credential, - **kwargs - ) - self._blob_service_client = BlobServiceClient(self._blob_account_url, credential, **kwargs) - self._blob_service_client._hosts[LocationMode.SECONDARY] = "" #pylint: disable=protected-access - self._client = DataLakeStorageClient(self.url, None, None, pipeline=self._pipeline) - self._loop = kwargs.get('loop', None) - - async def __aexit__(self, *args): - await self._blob_service_client.close() - await super(DataLakeServiceClient, self).__aexit__(*args) - - async def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._blob_service_client.close() - await self.__aexit__() - - async def get_user_delegation_key(self, key_start_time, # type: datetime - key_expiry_time, # type: datetime - **kwargs # type: Any - ): - # type: (...) -> UserDelegationKey - """ - Obtain a user delegation key for the purpose of signing SAS tokens. - A token credential must be present on the service object for this request to succeed. - - :param ~datetime.datetime key_start_time: - A DateTime value. Indicates when the key becomes valid. - :param ~datetime.datetime key_expiry_time: - A DateTime value. Indicates when the key stops being valid. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The user delegation key. - :rtype: ~azure.storage.filedatalake.UserDelegationKey - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START get_user_delegation_key] - :end-before: [END get_user_delegation_key] - :language: python - :dedent: 8 - :caption: Get user delegation key from datalake service client. - """ - delegation_key = await self._blob_service_client.get_user_delegation_key( - key_start_time=key_start_time, - key_expiry_time=key_expiry_time, - **kwargs) # pylint: disable=protected-access - return UserDelegationKey._from_generated(delegation_key) # pylint: disable=protected-access - - def list_file_systems(self, name_starts_with=None, # type: Optional[str] - include_metadata=None, # type: Optional[bool] - **kwargs): - # type: (...) -> ItemPaged[FileSystemProperties] - """Returns a generator to list the file systems under the specified account. - - The generator will lazily follow the continuation tokens returned by - the service and stop when all file systems have been returned. - - :param str name_starts_with: - Filters the results to return only file systems whose names - begin with the specified prefix. - :param bool include_metadata: - Specifies that file system metadata be returned in the response. - The default value is `False`. - :keyword int results_per_page: - The maximum number of file system names to retrieve per API - call. If the request does not specify the server will return up to 5,000 items per page. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) of FileSystemProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.FileSystemProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START list_file_systems] - :end-before: [END list_file_systems] - :language: python - :dedent: 8 - :caption: Listing the file systems in the datalake service. - """ - item_paged = self._blob_service_client.list_containers(name_starts_with=name_starts_with, - include_metadata=include_metadata, - **kwargs) # pylint: disable=protected-access - item_paged._page_iterator_class = FileSystemPropertiesPaged # pylint: disable=protected-access - return item_paged - - async def create_file_system(self, file_system, # type: Union[FileSystemProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[PublicAccess] - **kwargs): - # type: (...) -> FileSystemClient - """Creates a new file system under the specified account. - - If the file system with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created file system. - - :param str file_system: - The name of the file system to create. - :param metadata: - A dict with name-value pairs to associate with the - file system as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - Possible values include: file system, file. - :type public_access: ~azure.storage.filedatalake.PublicAccess - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START create_file_system_from_service_client] - :end-before: [END create_file_system_from_service_client] - :language: python - :dedent: 8 - :caption: Creating a file system in the datalake service. - """ - file_system_client = self.get_file_system_client(file_system) - await file_system_client.create_file_system(metadata=metadata, public_access=public_access, **kwargs) - return file_system_client - - async def delete_file_system(self, file_system, # type: Union[FileSystemProperties, str] - **kwargs): - # type: (...) -> FileSystemClient - """Marks the specified file system for deletion. - - The file system and any files contained within it are later deleted during garbage collection. - If the file system is not found, a ResourceNotFoundError will be raised. - - :param file_system: - The file system to delete. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :keyword lease: - If specified, delete_file_system only succeeds if the - file system's lease is active and matches this ID. - Required if the file system has an active lease. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START delete_file_system_from_service_client] - :end-before: [END delete_file_system_from_service_client] - :language: python - :dedent: 8 - :caption: Deleting a file system in the datalake service. - """ - file_system_client = self.get_file_system_client(file_system) - await file_system_client.delete_file_system(**kwargs) - return file_system_client - - def get_file_system_client(self, file_system # type: Union[FileSystemProperties, str] - ): - # type: (...) -> FileSystemClient - """Get a client to interact with the specified file system. - - The file system need not already exist. - - :param file_system: - The file system. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :returns: A FileSystemClient. - :rtype: ~azure.storage.filedatalake.aio.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_file_system_client_from_service] - :end-before: [END create_file_system_client_from_service] - :language: python - :dedent: 8 - :caption: Getting the file system client to interact with a specific file system. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - - return FileSystemClient(self.url, file_system_name, credential=self._raw_credential, - _configuration=self._config, - _pipeline=self._pipeline, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_directory_client(self, file_system, # type: Union[FileSystemProperties, str] - directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified directory. - - The directory need not already exist. - - :param file_system: - The file system that the directory is in. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START get_directory_client_from_service_client] - :end-before: [END get_directory_client_from_service_client] - :language: python - :dedent: 8 - :caption: Getting the directory client to interact with a specific directory. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - try: - directory_name = directory.name - except AttributeError: - directory_name = directory - return DataLakeDirectoryClient(self.url, file_system_name, directory_name=directory_name, - credential=self._raw_credential, - _configuration=self._config, _pipeline=self._pipeline, - _hosts=self._hosts, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function - ) - - def get_file_client(self, file_system, # type: Union[FileSystemProperties, str] - file_path # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file_system: - The file system that the file is in. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :param file_path: - The file with which to interact. This can either be the full path of the file(from the root directory), - or an instance of FileProperties. eg. directory/subdirectory/file - :type file_path: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START get_file_client_from_service_client] - :end-before: [END get_file_client_from_service_client] - :language: python - :dedent: 8 - :caption: Getting the file client to interact with a specific file. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - try: - file_path = file_path.name - except AttributeError: - pass - - return DataLakeFileClient( - self.url, file_system_name, file_path=file_path, credential=self._raw_credential, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_download_async.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_download_async.py deleted file mode 100644 index 2fda96f..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_download_async.py +++ /dev/null @@ -1,53 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from .._models import FileProperties - - -class StorageStreamDownloader(object): - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the file being downloaded. - :ivar ~azure.storage.filedatalake.FileProperties properties: - The properties of the file being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the file. - """ - - def __init__(self, downloader): - self._downloader = downloader - self.name = self._downloader.name - self.properties = FileProperties._from_blob_properties(self._downloader.properties) # pylint: disable=protected-access - self.size = self._downloader.size - - def __len__(self): - return self.size - - def chunks(self): - return self._downloader.chunks() - - async def readall(self): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - return await self._downloader.readall() - - async def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - return await self._downloader.readinto(stream) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_file_system_client_async.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_file_system_client_async.py deleted file mode 100644 index 5cb930d..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_file_system_client_async.py +++ /dev/null @@ -1,745 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Dict, TYPE_CHECKING -) - -from azure.core.tracing.decorator import distributed_trace - -from azure.core.async_paging import AsyncItemPaged - -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.multiapi.storagev2.blob.v2019_12_12.aio import ContainerClient - -from ._data_lake_file_client_async import DataLakeFileClient -from ._data_lake_directory_client_async import DataLakeDirectoryClient -from ._models import PathPropertiesPaged -from ._data_lake_lease_async import DataLakeLeaseClient -from .._file_system_client import FileSystemClient as FileSystemClientBase -from .._generated.aio import DataLakeStorageClient -from .._shared.base_client_async import AsyncStorageAccountHostsMixin -from .._shared.policies_async import ExponentialRetry -from .._models import FileSystemProperties, PublicAccess - -if TYPE_CHECKING: - from datetime import datetime - from .._models import ( # pylint: disable=unused-import - ContentSettings) - - -class FileSystemClient(AsyncStorageAccountHostsMixin, FileSystemClientBase): - """A client to interact with a specific file system, even if that file system - may not yet exist. - - For operations relating to a specific directory or file within this file system, a directory client or file client - can be retrieved using the :func:`~get_directory_client` or :func:`~get_file_client` functions. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, and account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the URL already has a SAS token, specifying an explicit credential will take priority. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_file_system_client_from_service] - :end-before: [END create_file_system_client_from_service] - :language: python - :dedent: 8 - :caption: Get a FileSystemClient from an existing DataLakeServiceClient. - """ - - def __init__( - self, account_url, # type: str - file_system_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(FileSystemClient, self).__init__( - account_url, - file_system_name=file_system_name, - credential=credential, - **kwargs) - # to override the class field _container_client sync version - kwargs.pop('_hosts', None) - self._container_client = ContainerClient(self._blob_account_url, file_system_name, - credential=credential, - _hosts=self._container_client._hosts,# pylint: disable=protected-access - **kwargs) # type: ignore # pylint: disable=protected-access - self._client = DataLakeStorageClient(self.url, file_system_name, None, pipeline=self._pipeline) - self._loop = kwargs.get('loop', None) - - async def __aexit__(self, *args): - await self._container_client.close() - await super(FileSystemClient, self).__aexit__(*args) - - async def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._container_client.close() - await self.__aexit__() - - @distributed_trace_async - async def acquire_lease( - self, lease_duration=-1, # type: int - lease_id=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> DataLakeLeaseClient - """ - Requests a new lease. If the file system does not have an active lease, - the DataLake service creates a lease on the file system and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A DataLakeLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.filedatalake.aio.DataLakeLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START acquire_lease_on_file_system] - :end-before: [END acquire_lease_on_file_system] - :language: python - :dedent: 12 - :caption: Acquiring a lease on the file_system. - """ - lease = DataLakeLeaseClient(self, lease_id=lease_id) - await lease.acquire(lease_duration=lease_duration, **kwargs) - return lease - - @distributed_trace_async - async def create_file_system(self, metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[PublicAccess] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """Creates a new file system under the specified account. - - If the file system with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created file system. - - :param metadata: - A dict with name-value pairs to associate with the - file system as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - To specify whether data in the file system may be accessed publicly and the level of access. - :type public_access: ~azure.storage.filedatalake.PublicAccess - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.aio.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_file_system] - :end-before: [END create_file_system] - :language: python - :dedent: 16 - :caption: Creating a file system in the datalake service. - """ - return await self._container_client.create_container(metadata=metadata, - public_access=public_access, - **kwargs) - - @distributed_trace_async - async def delete_file_system(self, **kwargs): - # type: (Any) -> None - """Marks the specified file system for deletion. - - The file system and any files contained within it are later deleted during garbage collection. - If the file system is not found, a ResourceNotFoundError will be raised. - - :keyword lease: - If specified, delete_file_system only succeeds if the - file system's lease is active and matches this ID. - Required if the file system has an active lease. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START delete_file_system] - :end-before: [END delete_file_system] - :language: python - :dedent: 16 - :caption: Deleting a file system in the datalake service. - """ - await self._container_client.delete_container(**kwargs) - - @distributed_trace_async - async def get_file_system_properties(self, **kwargs): - # type: (Any) -> FileSystemProperties - """Returns all user-defined metadata and system properties for the specified - file system. The data returned does not include the file system's list of paths. - - :keyword lease: - If specified, get_file_system_properties only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Properties for the specified file system within a file system object. - :rtype: ~azure.storage.filedatalake.FileSystemProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START get_file_system_properties] - :end-before: [END get_file_system_properties] - :language: python - :dedent: 16 - :caption: Getting properties on the file system. - """ - container_properties = await self._container_client.get_container_properties(**kwargs) - return FileSystemProperties._convert_from_container_props(container_properties) # pylint: disable=protected-access - - @distributed_trace_async - async def set_file_system_metadata( # type: ignore - self, metadata, # type: Dict[str, str] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - file system. Each call to this operation replaces all existing metadata - attached to the file system. To remove all metadata from the file system, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the file system as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: file system-updated property dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START set_file_system_metadata] - :end-before: [END set_file_system_metadata] - :language: python - :dedent: 16 - :caption: Setting metadata on the container. - """ - return await self._container_client.set_container_metadata(metadata=metadata, **kwargs) - - @distributed_trace_async - async def set_file_system_access_policy( - self, signed_identifiers, # type: Dict[str, AccessPolicy] - public_access=None, # type: Optional[Union[str, PublicAccess]] - **kwargs - ): # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the permissions for the specified file system or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether files in a file system may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the file system. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict[str, ~azure.storage.filedatalake.AccessPolicy] - :param ~azure.storage.filedatalake.PublicAccess public_access: - To specify whether data in the file system may be accessed publicly and the level of access. - :keyword lease: - Required if the file system has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified date/time. - :keyword ~datetime.datetime if_unmodified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: filesystem-updated property dict (Etag and last modified). - :rtype: dict[str, str or ~datetime.datetime] - """ - return await self._container_client.set_container_access_policy(signed_identifiers, - public_access=public_access, **kwargs) - - @distributed_trace_async - async def get_file_system_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the specified file system. - The permissions indicate whether file system data may be accessed publicly. - - :keyword lease: - If specified, get_file_system_access_policy only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - """ - access_policy = await self._container_client.get_container_access_policy(**kwargs) - return { - 'public_access': PublicAccess._from_generated(access_policy['public_access']), # pylint: disable=protected-access - 'signed_identifiers': access_policy['signed_identifiers'] - } - - @distributed_trace - def get_paths(self, path=None, # type: Optional[str] - recursive=True, # type: Optional[bool] - max_results=None, # type: Optional[int] - **kwargs): - # type: (...) -> ItemPaged[PathProperties] - """Returns a generator to list the paths(could be files or directories) under the specified file system. - The generator will lazily follow the continuation tokens returned by - the service. - - :param str path: - Filters the results to return only paths under the specified path. - :param int max_results: - An optional value that specifies the maximum - number of items to return per page. If omitted or greater than 5,000, the - response will include up to 5,000 items per page. - :keyword upn: - Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of PathProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.PathProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START get_paths_in_file_system] - :end-before: [END get_paths_in_file_system] - :language: python - :dedent: 12 - :caption: List the blobs in the file system. - """ - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.file_system.list_paths, - path=path, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, recursive, path=path, max_results=max_results, - page_iterator_class=PathPropertiesPaged, **kwargs) - - @distributed_trace_async - async def create_directory(self, directory, # type: Union[DirectoryProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Create directory - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_directory_from_file_system] - :end-before: [END create_directory_from_file_system] - :language: python - :dedent: 12 - :caption: Create directory in the file system. - """ - directory_client = self.get_directory_client(directory) - await directory_client.create_directory(metadata=metadata, **kwargs) - return directory_client - - @distributed_trace_async - async def delete_directory(self, directory, # type: Union[DirectoryProperties, str] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Marks the specified path for deletion. - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START delete_directory_from_file_system] - :end-before: [END delete_directory_from_file_system] - :language: python - :dedent: 12 - :caption: Delete directory in the file system. - """ - directory_client = self.get_directory_client(directory) - await directory_client.delete_directory(**kwargs) - return directory_client - - @distributed_trace_async - async def create_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Create file - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_file_from_file_system] - :end-before: [END create_file_from_file_system] - :language: python - :dedent: 12 - :caption: Create file in the file system. - """ - file_client = self.get_file_client(file) - await file_client.create_file(**kwargs) - return file_client - - @distributed_trace_async - async def delete_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Marks the specified file for deletion. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START delete_file_from_file_system] - :end-before: [END delete_file_from_file_system] - :language: python - :dedent: 12 - :caption: Delete file in the file system. - """ - file_client = self.get_file_client(file) - await file_client.delete_file(**kwargs) - return file_client - - def _get_root_directory_client(self): - # type: () -> DataLakeDirectoryClient - """Get a client to interact with the root directory. - - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient - """ - return self.get_directory_client('/') - - def get_directory_client(self, directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified directory. - - The directory need not already exist. - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START get_directory_client_from_file_system] - :end-before: [END get_directory_client_from_file_system] - :language: python - :dedent: 12 - :caption: Getting the directory client to interact with a specific directory. - """ - try: - directory_name = directory.name - except AttributeError: - directory_name = directory - - return DataLakeDirectoryClient(self.url, self.file_system_name, directory_name=directory_name, - credential=self._raw_credential, - _configuration=self._config, _pipeline=self._pipeline, - _hosts=self._hosts, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function, - loop=self._loop - ) - - def get_file_client(self, file_path # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file_path: - The file with which to interact. This can either be the path of the file(from root directory), - or an instance of FileProperties. eg. directory/subdirectory/file - :type file_path: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START get_file_client_from_file_system] - :end-before: [END get_file_client_from_file_system] - :language: python - :dedent: 12 - :caption: Getting the file client to interact with a specific file. - """ - try: - file_path = file_path.name - except AttributeError: - pass - - return DataLakeFileClient( - self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function, loop=self._loop) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_models.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_models.py deleted file mode 100644 index 9702ca6..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_models.py +++ /dev/null @@ -1,110 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines -from azure.core.async_paging import AsyncPageIterator -from azure.multiapi.storagev2.blob.v2019_12_12.aio._models import ContainerPropertiesPaged - -from .._deserialize import return_headers_and_deserialized_path_list, process_storage_error -from .._generated.models import StorageErrorException, Path -from .._models import PathProperties - -from .._models import FileSystemProperties - - -class FileSystemPropertiesPaged(ContainerPropertiesPaged): - """An Iterable of File System properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file system name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.filedatalake.FileSystemProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only file systems whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of file system names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - - def __init__(self, *args, **kwargs): - super(FileSystemPropertiesPaged, self).__init__( - *args, - **kwargs - ) - - @staticmethod - def _build_item(item): - return FileSystemProperties._from_generated(item) # pylint: disable=protected-access - - -class PathPropertiesPaged(AsyncPageIterator): - """An Iterable of Path properties. - - :ivar str path: Filters the results to return only paths under the specified path. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar list(~azure.storage.filedatalake.PathProperties) current_page: The current page of listed results. - - :param callable command: Function to retrieve the next page of items. - :param str path: Filters the results to return only paths under the specified path. - :param int max_results: The maximum number of psths to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - - def __init__( - self, command, - recursive, - path=None, - max_results=None, - continuation_token=None, - upn=None): - super(PathPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.recursive = recursive - self.results_per_page = max_results - self.path = path - self.upn = upn - self.current_page = None - self.path_list = None - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - self.recursive, - continuation=continuation_token or None, - path=self.path, - max_results=self.results_per_page, - upn=self.upn, - cls=return_headers_and_deserialized_path_list) - except StorageErrorException as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.path_list, self._response = get_next_return - self.current_page = [self._build_item(item) for item in self.path_list] - - return self._response['continuation'] or None, self.current_page - - @staticmethod - def _build_item(item): - if isinstance(item, PathProperties): - return item - if isinstance(item, Path): - path = PathProperties._from_generated(item) # pylint: disable=protected-access - return path - return item diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_upload_helper.py b/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_upload_helper.py deleted file mode 100644 index 93da7bf..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/aio/_upload_helper.py +++ /dev/null @@ -1,87 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from .._deserialize import ( - process_storage_error) -from .._generated.models import ( - StorageErrorException, -) -from .._shared.response_handlers import return_response_headers -from .._shared.uploads_async import ( - upload_data_chunks, - DataLakeFileChunkUploader) - - -def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument - return any([ - modified_access_conditions.if_modified_since, - modified_access_conditions.if_unmodified_since, - modified_access_conditions.if_none_match, - modified_access_conditions.if_match - ]) - - -async def upload_datalake_file( # pylint: disable=unused-argument - client=None, - stream=None, - length=None, - overwrite=None, - validate_content=None, - max_concurrency=None, - **kwargs): - try: - if length == 0: - return {} - properties = kwargs.pop('properties', None) - umask = kwargs.pop('umask', None) - permissions = kwargs.pop('permissions', None) - path_http_headers = kwargs.pop('path_http_headers', None) - modified_access_conditions = kwargs.pop('modified_access_conditions', None) - chunk_size = kwargs.pop('chunk_size', 100 * 1024 * 1024) - - if not overwrite: - # if customers didn't specify access conditions, they cannot flush data to existing file - if not _any_conditions(modified_access_conditions): - modified_access_conditions.if_none_match = '*' - if properties or umask or permissions: - raise ValueError("metadata, umask and permissions can be set only when overwrite is enabled") - - if overwrite: - response = await client.create( - resource='file', - path_http_headers=path_http_headers, - properties=properties, - modified_access_conditions=modified_access_conditions, - umask=umask, - permissions=permissions, - cls=return_response_headers, - **kwargs) - - # this modified_access_conditions will be applied to flush_data to make sure - # no other flush between create and the current flush - modified_access_conditions.if_match = response['etag'] - modified_access_conditions.if_none_match = None - modified_access_conditions.if_modified_since = None - modified_access_conditions.if_unmodified_since = None - - await upload_data_chunks( - service=client, - uploader_class=DataLakeFileChunkUploader, - total_size=length, - chunk_size=chunk_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - **kwargs) - - return await client.flush_data(position=length, - path_http_headers=path_http_headers, - modified_access_conditions=modified_access_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/__init__.py deleted file mode 100644 index 02a7559..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/__init__.py +++ /dev/null @@ -1,91 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ._download import StorageStreamDownloader -from ._data_lake_file_client import DataLakeFileClient -from ._data_lake_directory_client import DataLakeDirectoryClient -from ._file_system_client import FileSystemClient -from ._data_lake_service_client import DataLakeServiceClient -from ._data_lake_lease import DataLakeLeaseClient -from ._models import ( - LocationMode, - ResourceTypes, - FileSystemProperties, - FileSystemPropertiesPaged, - DirectoryProperties, - FileProperties, - PathProperties, - LeaseProperties, - ContentSettings, - AccountSasPermissions, - FileSystemSasPermissions, - DirectorySasPermissions, - FileSasPermissions, - UserDelegationKey, - PublicAccess, - AccessPolicy, - DelimitedTextDialect, - DelimitedJsonDialect, - ArrowDialect, - ArrowType, - DataLakeFileQueryError, - AccessControlChangeResult, - AccessControlChangeCounters, - AccessControlChangeFailure, - AccessControlChanges, -) - -from ._shared_access_signature import generate_account_sas, generate_file_system_sas, generate_directory_sas, \ - generate_file_sas - -from ._shared.policies import ExponentialRetry, LinearRetry -from ._shared.models import StorageErrorCode -from ._version import VERSION - -__version__ = VERSION - -__all__ = [ - 'DataLakeServiceClient', - 'FileSystemClient', - 'DataLakeFileClient', - 'DataLakeDirectoryClient', - 'DataLakeLeaseClient', - 'ExponentialRetry', - 'LinearRetry', - 'LocationMode', - 'PublicAccess', - 'AccessPolicy', - 'ResourceTypes', - 'StorageErrorCode', - 'UserDelegationKey', - 'FileSystemProperties', - 'FileSystemPropertiesPaged', - 'DirectoryProperties', - 'FileProperties', - 'PathProperties', - 'LeaseProperties', - 'ContentSettings', - 'AccessControlChangeResult', - 'AccessControlChangeCounters', - 'AccessControlChangeFailure', - 'AccessControlChanges', - 'AccountSasPermissions', - 'FileSystemSasPermissions', - 'DirectorySasPermissions', - 'FileSasPermissions', - 'generate_account_sas', - 'generate_file_system_sas', - 'generate_directory_sas', - 'generate_file_sas', - 'VERSION', - 'StorageStreamDownloader', - 'DelimitedTextDialect', - 'DelimitedJsonDialect', - 'DataLakeFileQueryError', - 'ArrowDialect', - 'ArrowType', - 'DataLakeFileQueryError' -] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_directory_client.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_directory_client.py deleted file mode 100644 index c42391e..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_directory_client.py +++ /dev/null @@ -1,563 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import Any - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore -from azure.core.pipeline import Pipeline -from ._deserialize import deserialize_dir_properties -from ._shared.base_client import TransportWrapper, parse_connection_str -from ._data_lake_file_client import DataLakeFileClient -from ._models import DirectoryProperties, FileProperties -from ._path_client import PathClient - - -class DataLakeDirectoryClient(PathClient): - """A client to interact with the DataLake directory, even if the directory may not yet exist. - - For operations relating to a specific subdirectory or file under the directory, a directory client or file client - can be retrieved using the :func:`~get_sub_directory_client` or :func:`~get_file_client` functions. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param directory_name: - The whole path of the directory. eg. {directory under file system}/{directory to interact with} - :type directory_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, and account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_instantiate_client.py - :start-after: [START instantiate_directory_client_from_conn_str] - :end-before: [END instantiate_directory_client_from_conn_str] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. - """ - def __init__( - self, account_url, # type: str - file_system_name, # type: str - directory_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - super(DataLakeDirectoryClient, self).__init__(account_url, file_system_name, path_name=directory_name, - credential=credential, **kwargs) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - file_system_name, # type: str - directory_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> DataLakeDirectoryClient - """ - Create DataLakeDirectoryClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param file_system_name: - The name of file system to interact with. - :type file_system_name: str - :param directory_name: - The name of directory to interact with. The directory is under file system. - :type directory_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, and account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :return a DataLakeDirectoryClient - :rtype ~azure.storage.filedatalake.DataLakeDirectoryClient - """ - account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') - return cls( - account_url, file_system_name=file_system_name, directory_name=directory_name, - credential=credential, **kwargs) - - def create_directory(self, metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create a new directory. - - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: response dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory.py - :start-after: [START create_directory] - :end-before: [END create_directory] - :language: python - :dedent: 8 - :caption: Create directory. - """ - return self._create('directory', metadata=metadata, **kwargs) - - def delete_directory(self, **kwargs): - # type: (...) -> None - """ - Marks the specified directory for deletion. - - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory.py - :start-after: [START delete_directory] - :end-before: [END delete_directory] - :language: python - :dedent: 4 - :caption: Delete directory. - """ - return self._delete(recursive=True, **kwargs) - - def get_directory_properties(self, **kwargs): - # type: (**Any) -> DirectoryProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the directory. It does not return the content of the directory. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: DirectoryProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory.py - :start-after: [START get_directory_properties] - :end-before: [END get_directory_properties] - :language: python - :dedent: 4 - :caption: Getting the properties for a file/directory. - """ - return self._get_path_properties(cls=deserialize_dir_properties, **kwargs) # pylint: disable=protected-access - - def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a directory exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return self._exists(**kwargs) - - def rename_directory(self, new_name, **kwargs): - # type: (str, **Any) -> DataLakeDirectoryClient - """ - Rename the source directory. - - :param str new_name: - the new directory name the user want to rename to. - The value must have the following format: "{filesystem}/{directory}/{subdirectory}". - :keyword source_lease: - A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory.py - :start-after: [START rename_directory] - :end-before: [END rename_directory] - :language: python - :dedent: 4 - :caption: Rename the source directory. - """ - new_name = new_name.strip('/') - new_file_system = new_name.split('/')[0] - new_path_and_token = new_name[len(new_file_system):].strip('/').split('?') - new_path = new_path_and_token[0] - try: - new_dir_sas = new_path_and_token[1] or self._query_str.strip('?') - except IndexError: - if not self._raw_credential and new_file_system != self.file_system_name: - raise ValueError("please provide the sas token for the new file") - if not self._raw_credential and new_file_system == self.file_system_name: - new_dir_sas = self._query_str.strip('?') - - new_directory_client = DataLakeDirectoryClient( - "{}://{}".format(self.scheme, self.primary_hostname), new_file_system, directory_name=new_path, - credential=self._raw_credential or new_dir_sas, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - new_directory_client._rename_path( # pylint: disable=protected-access - '/{}/{}{}'.format(quote(unquote(self.file_system_name)), - quote(unquote(self.path_name)), - self._query_str), - **kwargs) - return new_directory_client - - def create_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Create a subdirectory and return the subdirectory client to be interacted with. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient for the subdirectory. - """ - subdir = self.get_sub_directory_client(sub_directory) - subdir.create_directory(metadata=metadata, **kwargs) - return subdir - - def delete_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Marks the specified subdirectory for deletion. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient for the subdirectory - """ - subdir = self.get_sub_directory_client(sub_directory) - subdir.delete_directory(**kwargs) - return subdir - - def create_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Create a new file and return the file client to be interacted with. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - """ - file_client = self.get_file_client(file) - file_client.create_file(**kwargs) - return file_client - - def get_file_client(self, file # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. eg. directory/subdirectory/file - :type file: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake.DataLakeFileClient - """ - try: - file_path = file.get('name') - except AttributeError: - file_path = self.path_name + '/' + str(file) - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeFileClient( - self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_sub_directory_client(self, sub_directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified subdirectory of the current directory. - - The sub subdirectory need not already exist. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient - """ - try: - subdir_path = sub_directory.get('name') - except AttributeError: - subdir_path = self.path_name + '/' + str(sub_directory) - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeDirectoryClient( - self.url, self.file_system_name, directory_name=subdir_path, credential=self._raw_credential, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_file_client.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_file_client.py deleted file mode 100644 index e15842d..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_file_client.py +++ /dev/null @@ -1,777 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from io import BytesIO -from typing import Any - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore - -import six - -from azure.core.exceptions import HttpResponseError -from ._quick_query_helper import DataLakeFileQueryReader -from ._shared.base_client import parse_connection_str -from ._shared.request_handlers import get_length, read_length -from ._shared.response_handlers import return_response_headers -from ._shared.uploads import IterStreamer -from ._upload_helper import upload_datalake_file -from ._download import StorageStreamDownloader -from ._path_client import PathClient -from ._serialize import get_mod_conditions, get_path_http_headers, get_access_conditions, add_metadata_headers, \ - convert_datetime_to_rfc1123 -from ._deserialize import process_storage_error, deserialize_file_properties -from ._models import FileProperties, DataLakeFileQueryError - - -class DataLakeFileClient(PathClient): - """A client to interact with the DataLake file, even if the file may not yet exist. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param file_path: - The whole file path, so that to interact with a specific file. - eg. "{directory}/{subdirectory}/{file}" - :type file_path: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_instantiate_client.py - :start-after: [START instantiate_file_client_from_conn_str] - :end-before: [END instantiate_file_client_from_conn_str] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. - """ - def __init__( - self, account_url, # type: str - file_system_name, # type: str - file_path, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - super(DataLakeFileClient, self).__init__(account_url, file_system_name, path_name=file_path, - credential=credential, **kwargs) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - file_system_name, # type: str - file_path, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> DataLakeFileClient - """ - Create DataLakeFileClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param file_system_name: The name of file system to interact with. - :type file_system_name: str - :param directory_name: The name of directory to interact with. The directory is under file system. - :type directory_name: str - :param file_name: The name of file to interact with. The file is under directory. - :type file_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :return a DataLakeFileClient - :rtype ~azure.storage.filedatalake.DataLakeFileClient - """ - account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') - return cls( - account_url, file_system_name=file_system_name, file_path=file_path, - credential=credential, **kwargs) - - def create_file(self, content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create a new file. - - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: response dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START create_file] - :end-before: [END create_file] - :language: python - :dedent: 4 - :caption: Create file. - """ - return self._create('file', content_settings=content_settings, metadata=metadata, **kwargs) - - def delete_file(self, **kwargs): - # type: (...) -> None - """ - Marks the specified file for deletion. - - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START delete_file] - :end-before: [END delete_file] - :language: python - :dedent: 4 - :caption: Delete file. - """ - return self._delete(**kwargs) - - def get_file_properties(self, **kwargs): - # type: (**Any) -> FileProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file. It does not return the content of the file. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: FileProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START get_file_properties] - :end-before: [END get_file_properties] - :language: python - :dedent: 4 - :caption: Getting the properties for a file. - """ - return self._get_path_properties(cls=deserialize_file_properties, **kwargs) # pylint: disable=protected-access - - def set_file_expiry(self, expiry_options, # type: str - expires_on=None, # type: Optional[Union[datetime, int]] - **kwargs): - # type: (str, Optional[Union[datetime, int]], **Any) -> None - """Sets the time a file will expire and be deleted. - - :param str expiry_options: - Required. Indicates mode of the expiry time. - Possible values include: 'NeverExpire', 'RelativeToCreation', 'RelativeToNow', 'Absolute' - :param datetime or int expires_on: - The time to set the file to expiry. - When expiry_options is RelativeTo*, expires_on should be an int in milliseconds. - If the type of expires_on is datetime, it should be in UTC time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - try: - expires_on = convert_datetime_to_rfc1123(expires_on) - except AttributeError: - expires_on = str(expires_on) - self._datalake_client_for_blob_operation.path \ - .set_expiry(expiry_options, expires_on=expires_on, **kwargs) # pylint: disable=protected-access - - def _upload_options( # pylint:disable=too-many-statements - self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - - encoding = kwargs.pop('encoding', 'UTF-8') - if isinstance(data, six.text_type): - data = data.encode(encoding) # type: ignore - if length is None: - length = get_length(data) - if isinstance(data, bytes): - data = data[:length] - - if isinstance(data, bytes): - stream = BytesIO(data) - elif hasattr(data, 'read'): - stream = data - elif hasattr(data, '__iter__'): - stream = IterStreamer(data, encoding=encoding) - else: - raise TypeError("Unsupported data type: {}".format(type(data))) - - validate_content = kwargs.pop('validate_content', False) - content_settings = kwargs.pop('content_settings', None) - metadata = kwargs.pop('metadata', None) - max_concurrency = kwargs.pop('max_concurrency', 1) - - kwargs['properties'] = add_metadata_headers(metadata) - kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None)) - kwargs['modified_access_conditions'] = get_mod_conditions(kwargs) - - if content_settings: - kwargs['path_http_headers'] = get_path_http_headers(content_settings) - - kwargs['stream'] = stream - kwargs['length'] = length - kwargs['validate_content'] = validate_content - kwargs['max_concurrency'] = max_concurrency - kwargs['client'] = self._client.path - kwargs['file_settings'] = self._config - - return kwargs - - def upload_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - overwrite=False, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Any] - """ - Upload data to a file. - - :param data: Content to be uploaded to file - :param int length: Size of the data in bytes. - :param bool overwrite: to overwrite an existing file or not. - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword metadata: - Name-value pairs associated with the blob as metadata. - :paramtype metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease: - Required if the blob has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword int chunk_size: - The maximum chunk size for uploading a file in chunks. - Defaults to 100*1024*1024, or 100MB. - :return: response dict (Etag and last modified). - """ - options = self._upload_options( - data, - length=length, - overwrite=overwrite, - **kwargs) - return upload_datalake_file(**options) - - @staticmethod - def _append_data_options(data, offset, length=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] - - if isinstance(data, six.text_type): - data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore - if length is None: - length = get_length(data) - if length is None: - length, data = read_length(data) - if isinstance(data, bytes): - data = data[:length] - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - - options = { - 'body': data, - 'position': offset, - 'content_length': length, - 'lease_access_conditions': access_conditions, - 'validate_content': kwargs.pop('validate_content', False), - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - def append_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - offset, # type: int - length=None, # type: Optional[int] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """Append data to the file. - - :param data: Content to be appended to file - :param offset: start position of the data to be appended to. - :param length: Size of the data in bytes. - :keyword bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - file. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :return: dict of the response header - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START append_data] - :end-before: [END append_data] - :language: python - :dedent: 4 - :caption: Append data to the file. - """ - options = self._append_data_options( - data, - offset, - length=length, - **kwargs) - try: - return self._client.path.append_data(**options) - except HttpResponseError as error: - process_storage_error(error) - - @staticmethod - def _flush_data_options(offset, content_settings=None, retain_uncommitted_data=False, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_mod_conditions(kwargs) - - path_http_headers = None - if content_settings: - path_http_headers = get_path_http_headers(content_settings) - - options = { - 'position': offset, - 'content_length': 0, - 'path_http_headers': path_http_headers, - 'retain_uncommitted_data': retain_uncommitted_data, - 'close': kwargs.pop('close', False), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - def flush_data(self, offset, # type: int - retain_uncommitted_data=False, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ Commit the previous appended data. - - :param offset: offset is equal to the length of the file after commit the - previous appended data. - :param bool retain_uncommitted_data: Valid only for flush operations. If - "true", uncommitted data is retained after the flush operation - completes; otherwise, the uncommitted data is deleted after the flush - operation. The default is false. Data at offsets less than the - specified position are written to the file when flush succeeds, but - this optional parameter allows data after the flush position to be - retained for a future flush operation. - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword bool close: Azure Storage Events allow applications to receive - notifications when files change. When Azure Storage Events are - enabled, a file changed event is raised. This event has a property - indicating whether this is the final change to distinguish the - difference between an intermediate flush to a file stream and the - final close of a file stream. The close query parameter is valid only - when the action is "flush" and change notifications are enabled. If - the value of close is "true" and the flush operation completes - successfully, the service raises a file change notification with a - property indicating that this is the final update (the file stream has - been closed). If "false" a change notification is raised indicating - the file has changed. The default is false. This query parameter is - set to true by the Hadoop ABFS driver to indicate that the file stream - has been closed." - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :return: response header in dict - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START upload_file_to_file_system] - :end-before: [END upload_file_to_file_system] - :language: python - :dedent: 8 - :caption: Commit the previous appended data. - """ - options = self._flush_data_options( - offset, - retain_uncommitted_data=retain_uncommitted_data, **kwargs) - try: - return self._client.path.flush_data(**options) - except HttpResponseError as error: - process_storage_error(error) - - def download_file(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader - """Downloads a file to the StorageStreamDownloader. The readall() method must - be used to read all the content, or readinto() must be used to download the file into - a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks. - - :param int offset: - Start of byte range to use for downloading a section of the file. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword lease: - If specified, download only succeeds if the file's lease is active - and matches this ID. Required if the file has an active lease. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.filedatalake.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START read_file] - :end-before: [END read_file] - :language: python - :dedent: 4 - :caption: Return the downloaded data. - """ - downloader = self._blob_client.download_blob(offset=offset, length=length, **kwargs) - return StorageStreamDownloader(downloader) - - def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a file exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return self._exists(**kwargs) - - def rename_file(self, new_name, **kwargs): - # type: (str, **Any) -> DataLakeFileClient - """ - Rename the source file. - - :param str new_name: the new file name the user want to rename to. - The value must have the following format: "{filesystem}/{directory}/{subdirectory}/{file}". - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword source_lease: A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: the renamed file client - :rtype: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START rename_file] - :end-before: [END rename_file] - :language: python - :dedent: 4 - :caption: Rename the source file. - """ - new_name = new_name.strip('/') - new_file_system = new_name.split('/')[0] - new_path_and_token = new_name[len(new_file_system):].strip('/').split('?') - new_path = new_path_and_token[0] - try: - new_file_sas = new_path_and_token[1] or self._query_str.strip('?') - except IndexError: - if not self._raw_credential and new_file_system != self.file_system_name: - raise ValueError("please provide the sas token for the new file") - if not self._raw_credential and new_file_system == self.file_system_name: - new_file_sas = self._query_str.strip('?') - - new_file_client = DataLakeFileClient( - "{}://{}".format(self.scheme, self.primary_hostname), new_file_system, file_path=new_path, - credential=self._raw_credential or new_file_sas, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function - ) - new_file_client._rename_path( # pylint: disable=protected-access - '/{}/{}{}'.format(quote(unquote(self.file_system_name)), - quote(unquote(self.path_name)), - self._query_str), - **kwargs) - return new_file_client - - def query_file(self, query_expression, **kwargs): - # type: (str, **Any) -> DataLakeFileQueryReader - """ - Enables users to select/project on datalake file data by providing simple query expressions. - This operations returns a DataLakeFileQueryReader, users need to use readall() or readinto() to get query data. - - :param str query_expression: - Required. a query statement. - eg. Select * from DataLakeStorage - :keyword Callable[~azure.storage.filedatalake.DataLakeFileQueryError] on_error: - A function to be called on any processing errors returned by the service. - :keyword file_format: - Optional. Defines the serialization of the data currently stored in the file. The default is to - treat the file data as CSV data formatted in the default dialect. This can be overridden with - a custom DelimitedTextDialect, or alternatively a DelimitedJsonDialect. - :paramtype file_format: - ~azure.storage.filedatalake.DelimitedTextDialect or ~azure.storage.filedatalake.DelimitedJsonDialect - :keyword output_format: - Optional. Defines the output serialization for the data stream. By default the data will be returned - as it is represented in the file. By providing an output format, the file data will be reformatted - according to that profile. This value can be a DelimitedTextDialect or a DelimitedJsonDialect. - :paramtype output_format: - ~azure.storage.filedatalake.DelimitedTextDialect, ~azure.storage.filedatalake.DelimitedJsonDialect - or list[~azure.storage.filedatalake.ArrowDialect] - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A streaming object (DataLakeFileQueryReader) - :rtype: ~azure.storage.filedatalake.DataLakeFileQueryReader - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_query.py - :start-after: [START query] - :end-before: [END query] - :language: python - :dedent: 4 - :caption: select/project on datalake file data by providing simple query expressions. - """ - query_expression = query_expression.replace("from DataLakeStorage", "from BlobStorage") - blob_quick_query_reader = self._blob_client.query_blob(query_expression, - blob_format=kwargs.pop('file_format', None), - error_cls=DataLakeFileQueryError, - **kwargs) - return DataLakeFileQueryReader(blob_quick_query_reader) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_lease.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_lease.py deleted file mode 100644 index 7ac9a99..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_lease.py +++ /dev/null @@ -1,245 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import uuid - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, - TypeVar, TYPE_CHECKING -) -from azure.multiapi.storagev2.blob.v2020_02_10 import BlobLeaseClient - - -if TYPE_CHECKING: - from datetime import datetime - FileSystemClient = TypeVar("FileSystemClient") - DataLakeDirectoryClient = TypeVar("DataLakeDirectoryClient") - DataLakeFileClient = TypeVar("DataLakeFileClient") - - -class DataLakeLeaseClient(object): - """Creates a new DataLakeLeaseClient. - - This client provides lease operations on a FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the file system, directory, or file to lease. - :type client: ~azure.storage.filedatalake.FileSystemClient or - ~azure.storage.filedatalake.DataLakeDirectoryClient or ~azure.storage.filedatalake.DataLakeFileClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - def __init__( - self, client, lease_id=None - ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs - # type: (Union[FileSystemClient, DataLakeDirectoryClient, DataLakeFileClient], Optional[str]) -> None - self.id = lease_id or str(uuid.uuid4()) - self.last_modified = None - self.etag = None - - if hasattr(client, '_blob_client'): - _client = client._blob_client # type: ignore # pylint: disable=protected-access - elif hasattr(client, '_container_client'): - _client = client._container_client # type: ignore # pylint: disable=protected-access - else: - raise TypeError("Lease must use any of FileSystemClient DataLakeDirectoryClient, or DataLakeFileClient.") - - self._blob_lease_client = BlobLeaseClient(_client, lease_id=lease_id) - - def __enter__(self): - return self - - def __exit__(self, *args): - self.release() - - def acquire(self, lease_duration=-1, **kwargs): - # type: (int, Optional[int], **Any) -> None - """Requests a new lease. - - If the file/file system does not have an active lease, the DataLake service creates a - lease on the file/file system and returns a new lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - self._blob_lease_client.acquire(lease_duration=lease_duration, **kwargs) - self._update_lease_client_attributes() - - def renew(self, **kwargs): - # type: (Any) -> None - """Renews the lease. - - The lease can be renewed if the lease ID specified in the - lease client matches that associated with the file system or file. Note that - the lease may be renewed even if it has expired as long as the file system - or file has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - self._blob_lease_client.renew(**kwargs) - self._update_lease_client_attributes() - - def release(self, **kwargs): - # type: (Any) -> None - """Release the lease. - - The lease may be released if the client lease id specified matches - that associated with the file system or file. Releasing the lease allows another client - to immediately acquire the lease for the file system or file as soon as the release is complete. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - self._blob_lease_client.release(**kwargs) - self._update_lease_client_attributes() - - def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """Change the lease ID of an active lease. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - self._blob_lease_client.change(proposed_lease_id=proposed_lease_id, **kwargs) - self._update_lease_client_attributes() - - def break_lease(self, lease_break_period=None, **kwargs): - # type: (Optional[int], Any) -> int - """Break the lease, if the file system or file has an active lease. - - Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. When a lease - is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the file system or file. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :param int lease_break_period: - This is the proposed duration of seconds that the lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the lease. If longer, the time remaining on the lease is used. - A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - self._blob_lease_client.break_lease(lease_break_period=lease_break_period, **kwargs) - - def _update_lease_client_attributes(self): - self.id = self._blob_lease_client.id # type: str - self.last_modified = self._blob_lease_client.last_modified # type: datetime - self.etag = self._blob_lease_client.etag # type: str diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_service_client.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_service_client.py deleted file mode 100644 index f555fda..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_service_client.py +++ /dev/null @@ -1,494 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import Any - -try: - from urllib.parse import urlparse -except ImportError: - from urlparse import urlparse # type: ignore - -from azure.core.paging import ItemPaged -from azure.core.pipeline import Pipeline - -from azure.multiapi.storagev2.blob.v2020_02_10 import BlobServiceClient -from ._shared.base_client import TransportWrapper, StorageAccountHostsMixin, parse_query, parse_connection_str -from ._file_system_client import FileSystemClient -from ._data_lake_directory_client import DataLakeDirectoryClient -from ._data_lake_file_client import DataLakeFileClient -from ._models import UserDelegationKey, FileSystemPropertiesPaged, LocationMode -from ._serialize import convert_dfs_url_to_blob_url - - -class DataLakeServiceClient(StorageAccountHostsMixin): - """A client to interact with the DataLake Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete file systems within the account. - For operations relating to a specific file system, directory or file, clients for those entities - can also be retrieved using the `get_client` functions. - - :ivar str url: - The full endpoint URL to the datalake service endpoint. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URL to the DataLake storage account. Any other entities included - in the URL path (e.g. file system or file) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START create_datalake_service_client] - :end-before: [END create_datalake_service_client] - :language: python - :dedent: 8 - :caption: Creating the DataLakeServiceClient from connection string. - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START create_datalake_service_client_oauth] - :end-before: [END create_datalake_service_client_oauth] - :language: python - :dedent: 8 - :caption: Creating the DataLakeServiceClient with Azure Identity credentials. - """ - - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - blob_account_url = convert_dfs_url_to_blob_url(account_url) - self._blob_account_url = blob_account_url - self._blob_service_client = BlobServiceClient(blob_account_url, credential, **kwargs) - self._blob_service_client._hosts[LocationMode.SECONDARY] = "" #pylint: disable=protected-access - - _, sas_token = parse_query(parsed_url.query) - self._query_str, self._raw_credential = self._format_query_string(sas_token, credential) - - super(DataLakeServiceClient, self).__init__(parsed_url, service='dfs', - credential=self._raw_credential, **kwargs) - # ADLS doesn't support secondary endpoint, make sure it's empty - self._hosts[LocationMode.SECONDARY] = "" - - def __enter__(self): - self._blob_service_client.__enter__() - return self - - def __exit__(self, *args): - self._blob_service_client.close() - - def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._blob_service_client.close() - - def _format_url(self, hostname): - """Format the endpoint URL according to hostname - """ - formated_url = "{}://{}/{}".format(self.scheme, hostname, self._query_str) - return formated_url - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> DataLakeServiceClient - """ - Create DataLakeServiceClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :return a DataLakeServiceClient - :rtype ~azure.storage.filedatalake.DataLakeServiceClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_data_lake_service_client_from_conn_str] - :end-before: [END create_data_lake_service_client_from_conn_str] - :language: python - :dedent: 8 - :caption: Creating the DataLakeServiceClient from a connection string. - """ - account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') - return cls(account_url, credential=credential, **kwargs) - - def get_user_delegation_key(self, key_start_time, # type: datetime - key_expiry_time, # type: datetime - **kwargs # type: Any - ): - # type: (...) -> UserDelegationKey - """ - Obtain a user delegation key for the purpose of signing SAS tokens. - A token credential must be present on the service object for this request to succeed. - - :param ~datetime.datetime key_start_time: - A DateTime value. Indicates when the key becomes valid. - :param ~datetime.datetime key_expiry_time: - A DateTime value. Indicates when the key stops being valid. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The user delegation key. - :rtype: ~azure.storage.filedatalake.UserDelegationKey - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START get_user_delegation_key] - :end-before: [END get_user_delegation_key] - :language: python - :dedent: 8 - :caption: Get user delegation key from datalake service client. - """ - delegation_key = self._blob_service_client.get_user_delegation_key(key_start_time=key_start_time, - key_expiry_time=key_expiry_time, - **kwargs) # pylint: disable=protected-access - return UserDelegationKey._from_generated(delegation_key) # pylint: disable=protected-access - - def list_file_systems(self, name_starts_with=None, # type: Optional[str] - include_metadata=None, # type: Optional[bool] - **kwargs): - # type: (...) -> ItemPaged[FileSystemProperties] - """Returns a generator to list the file systems under the specified account. - - The generator will lazily follow the continuation tokens returned by - the service and stop when all file systems have been returned. - - :param str name_starts_with: - Filters the results to return only file systems whose names - begin with the specified prefix. - :param bool include_metadata: - Specifies that file system metadata be returned in the response. - The default value is `False`. - :keyword int results_per_page: - The maximum number of file system names to retrieve per API - call. If the request does not specify the server will return up to 5,000 items per page. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword bool include_deleted: - Specifies that deleted file systems to be returned in the response. This is for file system restore enabled - account. The default value is `False`. - .. versionadded:: 12.3.0 - :returns: An iterable (auto-paging) of FileSystemProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.FileSystemProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START list_file_systems] - :end-before: [END list_file_systems] - :language: python - :dedent: 8 - :caption: Listing the file systems in the datalake service. - """ - item_paged = self._blob_service_client.list_containers(name_starts_with=name_starts_with, - include_metadata=include_metadata, - **kwargs) # pylint: disable=protected-access - item_paged._page_iterator_class = FileSystemPropertiesPaged # pylint: disable=protected-access - return item_paged - - def create_file_system(self, file_system, # type: Union[FileSystemProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[PublicAccess] - **kwargs): - # type: (...) -> FileSystemClient - """Creates a new file system under the specified account. - - If the file system with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created file system. - - :param str file_system: - The name of the file system to create. - :param metadata: - A dict with name-value pairs to associate with the - file system as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - Possible values include: file system, file. - :type public_access: ~azure.storage.filedatalake.PublicAccess - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START create_file_system_from_service_client] - :end-before: [END create_file_system_from_service_client] - :language: python - :dedent: 8 - :caption: Creating a file system in the datalake service. - """ - file_system_client = self.get_file_system_client(file_system) - file_system_client.create_file_system(metadata=metadata, public_access=public_access, **kwargs) - return file_system_client - - def _rename_file_system(self, name, new_name, **kwargs): - # type: (str, str, **Any) -> FileSystemClient - """Renames a filesystem. - - Operation is successful only if the source filesystem exists. - - :param str name: - The name of the filesystem to rename. - :param str new_name: - The new filesystem name the user wants to rename to. - :keyword lease: - Specify this to perform only if the lease ID given - matches the active lease ID of the source filesystem. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - """ - self._blob_service_client._rename_container(name, new_name, **kwargs) # pylint: disable=protected-access - renamed_file_system = self.get_file_system_client(new_name) - return renamed_file_system - - def undelete_file_system(self, name, deleted_version, **kwargs): - # type: (str, str, **Any) -> FileSystemClient - """Restores soft-deleted filesystem. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.3.0 - This operation was introduced in API version '2019-12-12'. - - :param str name: - Specifies the name of the deleted filesystem to restore. - :param str deleted_version: - Specifies the version of the deleted filesystem to restore. - :keyword str new_name: - The new name for the deleted filesystem to be restored to. - If not specified "name" will be used as the restored filesystem name. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - """ - new_name = kwargs.pop('new_name', None) - file_system = self.get_file_system_client(new_name or name) - self._blob_service_client.undelete_container( - name, deleted_version, new_name=new_name, **kwargs) # pylint: disable=protected-access - return file_system - - def delete_file_system(self, file_system, # type: Union[FileSystemProperties, str] - **kwargs): - # type: (...) -> FileSystemClient - """Marks the specified file system for deletion. - - The file system and any files contained within it are later deleted during garbage collection. - If the file system is not found, a ResourceNotFoundError will be raised. - - :param file_system: - The file system to delete. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :keyword lease: - If specified, delete_file_system only succeeds if the - file system's lease is active and matches this ID. - Required if the file system has an active lease. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START delete_file_system_from_service_client] - :end-before: [END delete_file_system_from_service_client] - :language: python - :dedent: 8 - :caption: Deleting a file system in the datalake service. - """ - file_system_client = self.get_file_system_client(file_system) - file_system_client.delete_file_system(**kwargs) - return file_system_client - - def get_file_system_client(self, file_system # type: Union[FileSystemProperties, str] - ): - # type: (...) -> FileSystemClient - """Get a client to interact with the specified file system. - - The file system need not already exist. - - :param file_system: - The file system. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :returns: A FileSystemClient. - :rtype: ~azure.storage.filedatalake.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_file_system_client_from_service] - :end-before: [END create_file_system_client_from_service] - :language: python - :dedent: 8 - :caption: Getting the file system client to interact with a specific file system. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return FileSystemClient(self.url, file_system_name, credential=self._raw_credential, - _configuration=self._config, - _pipeline=_pipeline, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_directory_client(self, file_system, # type: Union[FileSystemProperties, str] - directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified directory. - - The directory need not already exist. - - :param file_system: - The file system that the directory is in. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START get_directory_client_from_service_client] - :end-before: [END get_directory_client_from_service_client] - :language: python - :dedent: 8 - :caption: Getting the directory client to interact with a specific directory. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - try: - directory_name = directory.name - except AttributeError: - directory_name = directory - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeDirectoryClient(self.url, file_system_name, directory_name=directory_name, - credential=self._raw_credential, - _configuration=self._config, _pipeline=_pipeline, - _hosts=self._hosts, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function - ) - - def get_file_client(self, file_system, # type: Union[FileSystemProperties, str] - file_path # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file_system: - The file system that the file is in. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :param file_path: - The file with which to interact. This can either be the full path of the file(from the root directory), - or an instance of FileProperties. eg. directory/subdirectory/file - :type file_path: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake..DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START get_file_client_from_service_client] - :end-before: [END get_file_client_from_service_client] - :language: python - :dedent: 8 - :caption: Getting the file client to interact with a specific file. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - try: - file_path = file_path.name - except AttributeError: - pass - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeFileClient( - self.url, file_system_name, file_path=file_path, credential=self._raw_credential, - _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_deserialize.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_deserialize.py deleted file mode 100644 index dfcaf8e..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_deserialize.py +++ /dev/null @@ -1,150 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import logging -from typing import ( # pylint: disable=unused-import - TYPE_CHECKING -) - -from azure.core.pipeline.policies import ContentDecodePolicy -from azure.core.exceptions import HttpResponseError, DecodeError, ResourceModifiedError, ClientAuthenticationError, \ - ResourceNotFoundError, ResourceExistsError -from ._models import FileProperties, DirectoryProperties, LeaseProperties, PathProperties -from ._shared.models import StorageErrorCode - -if TYPE_CHECKING: - pass - -_LOGGER = logging.getLogger(__name__) - - -def deserialize_dir_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - dir_properties = DirectoryProperties( - metadata=metadata, - **headers - ) - return dir_properties - - -def deserialize_file_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - file_properties = FileProperties( - metadata=metadata, - **headers - ) - if 'Content-Range' in headers: - if 'x-ms-blob-content-md5' in headers: - file_properties.content_settings.content_md5 = headers['x-ms-blob-content-md5'] - else: - file_properties.content_settings.content_md5 = None - return file_properties - - -def deserialize_path_properties(path_list): - return [PathProperties._from_generated(path) for path in path_list] # pylint: disable=protected-access - - -def from_blob_properties(blob_properties): - file_props = FileProperties() - file_props.name = blob_properties.name - file_props.etag = blob_properties.etag - file_props.deleted = blob_properties.deleted - file_props.metadata = blob_properties.metadata - file_props.lease = blob_properties.lease - file_props.lease.__class__ = LeaseProperties - file_props.last_modified = blob_properties.last_modified - file_props.creation_time = blob_properties.creation_time - file_props.size = blob_properties.size - file_props.deleted_time = blob_properties.deleted_time - file_props.remaining_retention_days = blob_properties.remaining_retention_days - file_props.content_settings = blob_properties.content_settings - return file_props - -def normalize_headers(headers): - normalized = {} - for key, value in headers.items(): - if key.startswith('x-ms-'): - key = key[5:] - normalized[key.lower().replace('-', '_')] = value - return normalized - - -def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument - try: - raw_metadata = {k: v for k, v in response.http_response.headers.items() if k.startswith("x-ms-meta-")} - except AttributeError: - raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} - return {k[10:]: v for k, v in raw_metadata.items()} - - -def process_storage_error(storage_error): - raise_error = HttpResponseError - error_code = storage_error.response.headers.get('x-ms-error-code') - error_message = storage_error.message - additional_data = {} - try: - error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) - if error_body: - for info in error_body: - if info == 'code': - error_code = error_body[info] - elif info == 'message': - error_message = error_body[info] - else: - additional_data[info] = error_body[info] - except DecodeError: - pass - - try: - if error_code: - error_code = StorageErrorCode(error_code) - if error_code in [StorageErrorCode.condition_not_met]: - raise_error = ResourceModifiedError - if error_code in [StorageErrorCode.invalid_authentication_info, - StorageErrorCode.authentication_failed]: - raise_error = ClientAuthenticationError - if error_code in [StorageErrorCode.resource_not_found, - StorageErrorCode.invalid_property_name, - StorageErrorCode.invalid_source_uri, - StorageErrorCode.source_path_not_found, - StorageErrorCode.lease_name_mismatch, - StorageErrorCode.file_system_not_found, - StorageErrorCode.path_not_found, - StorageErrorCode.parent_not_found, - StorageErrorCode.invalid_destination_path, - StorageErrorCode.invalid_rename_source_path, - StorageErrorCode.lease_is_already_broken, - StorageErrorCode.invalid_source_or_destination_resource_type, - StorageErrorCode.rename_destination_parent_path_not_found]: - raise_error = ResourceNotFoundError - if error_code in [StorageErrorCode.account_already_exists, - StorageErrorCode.account_being_created, - StorageErrorCode.resource_already_exists, - StorageErrorCode.resource_type_mismatch, - StorageErrorCode.source_path_is_being_deleted, - StorageErrorCode.path_already_exists, - StorageErrorCode.destination_path_is_being_deleted, - StorageErrorCode.file_system_already_exists, - StorageErrorCode.file_system_being_deleted, - StorageErrorCode.path_conflict]: - raise_error = ResourceExistsError - except ValueError: - # Got an unknown error code - pass - - try: - error_message += "\nErrorCode:{}".format(error_code.value) - except AttributeError: - error_message += "\nErrorCode:{}".format(error_code) - for name, info in additional_data.items(): - error_message += "\n{}:{}".format(name, info) - - error = raise_error(message=error_message, response=storage_error.response, - continuation_token=storage_error.continuation_token) - error.error_code = error_code - error.additional_info = additional_data - raise error diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_download.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_download.py deleted file mode 100644 index 61716d3..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_download.py +++ /dev/null @@ -1,59 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import Iterator - -from ._deserialize import from_blob_properties - - -class StorageStreamDownloader(object): - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the file being downloaded. - :ivar ~azure.storage.filedatalake.FileProperties properties: - The properties of the file being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the file. - """ - - def __init__(self, downloader): - self._downloader = downloader - self.name = self._downloader.name - self.properties = from_blob_properties(self._downloader.properties) # pylint: disable=protected-access - self.size = self._downloader.size - - def __len__(self): - return self.size - - def chunks(self): - # type: () -> Iterator[bytes] - """Iterate over chunks in the download stream. - - :rtype: Iterator[bytes] - """ - return self._downloader.chunks() - - def readall(self): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - return self._downloader.readall() - - def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - return self._downloader.readinto(stream) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_file_system_client.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_file_system_client.py deleted file mode 100644 index c0060a5..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_file_system_client.py +++ /dev/null @@ -1,829 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import Optional, Any - -try: - from urllib.parse import urlparse, quote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote # type: ignore - -import six -from azure.core.pipeline import Pipeline -from azure.core.paging import ItemPaged -from azure.multiapi.storagev2.blob.v2020_02_10 import ContainerClient -from ._shared.base_client import TransportWrapper, StorageAccountHostsMixin, parse_query, parse_connection_str -from ._serialize import convert_dfs_url_to_blob_url -from ._models import LocationMode, FileSystemProperties, PublicAccess, FileProperties, DirectoryProperties -from ._data_lake_file_client import DataLakeFileClient -from ._data_lake_directory_client import DataLakeDirectoryClient -from ._data_lake_lease import DataLakeLeaseClient -from ._generated import AzureDataLakeStorageRESTAPI -from ._deserialize import deserialize_path_properties - - -class FileSystemClient(StorageAccountHostsMixin): - """A client to interact with a specific file system, even if that file system - may not yet exist. - - For operations relating to a specific directory or file within this file system, a directory client or file client - can be retrieved using the :func:`~get_directory_client` or :func:`~get_file_client` functions. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_file_system_client_from_service] - :end-before: [END create_file_system_client_from_service] - :language: python - :dedent: 8 - :caption: Get a FileSystemClient from an existing DataLakeServiceClient. - """ - def __init__( - self, account_url, # type: str - file_system_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not file_system_name: - raise ValueError("Please specify a file system name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - blob_account_url = convert_dfs_url_to_blob_url(account_url) - # TODO: add self.account_url to base_client and remove _blob_account_url - self._blob_account_url = blob_account_url - - datalake_hosts = kwargs.pop('_hosts', None) - blob_hosts = None - if datalake_hosts: - blob_primary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.PRIMARY]) - blob_hosts = {LocationMode.PRIMARY: blob_primary_account_url, LocationMode.SECONDARY: ""} - self._container_client = ContainerClient(blob_account_url, file_system_name, - credential=credential, _hosts=blob_hosts, **kwargs) - - _, sas_token = parse_query(parsed_url.query) - self.file_system_name = file_system_name - self._query_str, self._raw_credential = self._format_query_string(sas_token, credential) - - super(FileSystemClient, self).__init__(parsed_url, service='dfs', credential=self._raw_credential, - _hosts=datalake_hosts, **kwargs) - # ADLS doesn't support secondary endpoint, make sure it's empty - self._hosts[LocationMode.SECONDARY] = "" - self._client = AzureDataLakeStorageRESTAPI(self.url, file_system=file_system_name, pipeline=self._pipeline) - - def _format_url(self, hostname): - file_system_name = self.file_system_name - if isinstance(file_system_name, six.text_type): - file_system_name = file_system_name.encode('UTF-8') - return "{}://{}/{}{}".format( - self.scheme, - hostname, - quote(file_system_name), - self._query_str) - - def __exit__(self, *args): - self._container_client.close() - super(FileSystemClient, self).__exit__(*args) - - def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._container_client.close() - self.__exit__() - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - file_system_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> FileSystemClient - """ - Create FileSystemClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param file_system_name: The name of file system to interact with. - :type file_system_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :return a FileSystemClient - :rtype ~azure.storage.filedatalake.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_file_system_client_from_connection_string] - :end-before: [END create_file_system_client_from_connection_string] - :language: python - :dedent: 8 - :caption: Create FileSystemClient from connection string - """ - account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') - return cls( - account_url, file_system_name=file_system_name, credential=credential, **kwargs) - - def acquire_lease( - self, lease_duration=-1, # type: int - lease_id=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> DataLakeLeaseClient - """ - Requests a new lease. If the file system does not have an active lease, - the DataLake service creates a lease on the file system and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A DataLakeLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.filedatalake.DataLakeLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START acquire_lease_on_file_system] - :end-before: [END acquire_lease_on_file_system] - :language: python - :dedent: 8 - :caption: Acquiring a lease on the file system. - """ - lease = DataLakeLeaseClient(self, lease_id=lease_id) - lease.acquire(lease_duration=lease_duration, **kwargs) - return lease - - def create_file_system(self, metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[PublicAccess] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """Creates a new file system under the specified account. - - If the file system with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created file system. - - :param metadata: - A dict with name-value pairs to associate with the - file system as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - To specify whether data in the file system may be accessed publicly and the level of access. - :type public_access: ~azure.storage.filedatalake.PublicAccess - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_file_system] - :end-before: [END create_file_system] - :language: python - :dedent: 12 - :caption: Creating a file system in the datalake service. - """ - return self._container_client.create_container(metadata=metadata, - public_access=public_access, - **kwargs) - - def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a file system exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return self._container_client.exists(**kwargs) - - def _rename_file_system(self, new_name, **kwargs): - # type: (str, **Any) -> FileSystemClient - """Renames a filesystem. - - Operation is successful only if the source filesystem exists. - - :param str new_name: - The new filesystem name the user wants to rename to. - :keyword lease: - Specify this to perform only if the lease ID given - matches the active lease ID of the source filesystem. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - """ - self._container_client._rename_container(new_name, **kwargs) # pylint: disable=protected-access - #TODO: self._raw_credential would not work with SAS tokens - renamed_file_system = FileSystemClient( - "{}://{}".format(self.scheme, self.primary_hostname), file_system_name=new_name, - credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, - _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - return renamed_file_system - - def delete_file_system(self, **kwargs): - # type: (Any) -> None - """Marks the specified file system for deletion. - - The file system and any files contained within it are later deleted during garbage collection. - If the file system is not found, a ResourceNotFoundError will be raised. - - :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: - If specified, delete_file_system only succeeds if the - file system's lease is active and matches this ID. - Required if the file system has an active lease. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START delete_file_system] - :end-before: [END delete_file_system] - :language: python - :dedent: 12 - :caption: Deleting a file system in the datalake service. - """ - self._container_client.delete_container(**kwargs) - - def get_file_system_properties(self, **kwargs): - # type: (Any) -> FileSystemProperties - """Returns all user-defined metadata and system properties for the specified - file system. The data returned does not include the file system's list of paths. - - :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: - If specified, get_file_system_properties only succeeds if the - file system's lease is active and matches this ID. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Properties for the specified file system within a file system object. - :rtype: ~azure.storage.filedatalake.FileSystemProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START get_file_system_properties] - :end-before: [END get_file_system_properties] - :language: python - :dedent: 12 - :caption: Getting properties on the file system. - """ - container_properties = self._container_client.get_container_properties(**kwargs) - return FileSystemProperties._convert_from_container_props(container_properties) # pylint: disable=protected-access - - def set_file_system_metadata( # type: ignore - self, metadata, # type: Dict[str, str] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - file system. Each call to this operation replaces all existing metadata - attached to the file system. To remove all metadata from the file system, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the file system as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: filesystem-updated property dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START set_file_system_metadata] - :end-before: [END set_file_system_metadata] - :language: python - :dedent: 12 - :caption: Setting metadata on the file system. - """ - return self._container_client.set_container_metadata(metadata=metadata, **kwargs) - - def set_file_system_access_policy( - self, signed_identifiers, # type: Dict[str, AccessPolicy] - public_access=None, # type: Optional[Union[str, PublicAccess]] - **kwargs - ): # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the permissions for the specified file system or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether files in a file system may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the file system. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict[str, ~azure.storage.filedatalake.AccessPolicy] - :param ~azure.storage.filedatalake.PublicAccess public_access: - To specify whether data in the file system may be accessed publicly and the level of access. - :keyword lease: - Required if the file system has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified date/time. - :keyword ~datetime.datetime if_unmodified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File System-updated property dict (Etag and last modified). - :rtype: dict[str, str or ~datetime.datetime] - """ - return self._container_client.set_container_access_policy(signed_identifiers, - public_access=public_access, **kwargs) - - def get_file_system_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the specified file system. - The permissions indicate whether file system data may be accessed publicly. - - :keyword lease: - If specified, the operation only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - """ - access_policy = self._container_client.get_container_access_policy(**kwargs) - return { - 'public_access': PublicAccess._from_generated(access_policy['public_access']), # pylint: disable=protected-access - 'signed_identifiers': access_policy['signed_identifiers'] - } - - def get_paths(self, path=None, # type: Optional[str] - recursive=True, # type: Optional[bool] - max_results=None, # type: Optional[int] - **kwargs): - # type: (...) -> ItemPaged[PathProperties] - """Returns a generator to list the paths(could be files or directories) under the specified file system. - The generator will lazily follow the continuation tokens returned by - the service. - - :param str path: - Filters the results to return only paths under the specified path. - :param int max_results: An optional value that specifies the maximum - number of items to return per page. If omitted or greater than 5,000, the - response will include up to 5,000 items per page. - :keyword upn: - Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of PathProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.PathProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START get_paths_in_file_system] - :end-before: [END get_paths_in_file_system] - :language: python - :dedent: 8 - :caption: List the paths in the file system. - """ - timeout = kwargs.pop('timeout', None) - return self._client.file_system.list_paths( - recursive=recursive, - max_results=max_results, - path=path, - timeout=timeout, - cls=deserialize_path_properties, - **kwargs) - - def create_directory(self, directory, # type: Union[DirectoryProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Create directory - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_directory_from_file_system] - :end-before: [END create_directory_from_file_system] - :language: python - :dedent: 8 - :caption: Create directory in the file system. - """ - directory_client = self.get_directory_client(directory) - directory_client.create_directory(metadata=metadata, **kwargs) - return directory_client - - def delete_directory(self, directory, # type: Union[DirectoryProperties, str] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Marks the specified path for deletion. - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START delete_directory_from_file_system] - :end-before: [END delete_directory_from_file_system] - :language: python - :dedent: 8 - :caption: Delete directory in the file system. - """ - directory_client = self.get_directory_client(directory) - directory_client.delete_directory(**kwargs) - return directory_client - - def create_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Create file - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_file_from_file_system] - :end-before: [END create_file_from_file_system] - :language: python - :dedent: 8 - :caption: Create file in the file system. - """ - file_client = self.get_file_client(file) - file_client.create_file(**kwargs) - return file_client - - def delete_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Marks the specified file for deletion. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START delete_file_from_file_system] - :end-before: [END delete_file_from_file_system] - :language: python - :dedent: 8 - :caption: Delete file in the file system. - """ - file_client = self.get_file_client(file) - file_client.delete_file(**kwargs) - return file_client - - def _get_root_directory_client(self): - # type: () -> DataLakeDirectoryClient - """Get a client to interact with the root directory. - - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient - """ - return self.get_directory_client('/') - - def get_directory_client(self, directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified directory. - - The directory need not already exist. - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START get_directory_client_from_file_system] - :end-before: [END get_directory_client_from_file_system] - :language: python - :dedent: 8 - :caption: Getting the directory client to interact with a specific directory. - """ - try: - directory_name = directory.get('name') - except AttributeError: - directory_name = str(directory) - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeDirectoryClient(self.url, self.file_system_name, directory_name=directory_name, - credential=self._raw_credential, - _configuration=self._config, _pipeline=_pipeline, - _hosts=self._hosts, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function - ) - - def get_file_client(self, file_path # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file_path: - The file with which to interact. This can either be the path of the file(from root directory), - or an instance of FileProperties. eg. directory/subdirectory/file - :type file_path: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake..DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START get_file_client_from_file_system] - :end-before: [END get_file_client_from_file_system] - :language: python - :dedent: 8 - :caption: Getting the file client to interact with a specific file. - """ - try: - file_path = file_path.get('name') - except AttributeError: - file_path = str(file_path) - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeFileClient( - self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, - _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/__init__.py deleted file mode 100644 index 5cd3ae2..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._azure_data_lake_storage_restapi import AzureDataLakeStorageRESTAPI -__all__ = ['AzureDataLakeStorageRESTAPI'] - -try: - from ._patch import patch_sdk # type: ignore - patch_sdk() -except ImportError: - pass diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/_azure_data_lake_storage_restapi.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/_azure_data_lake_storage_restapi.py deleted file mode 100644 index efb21f3..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/_azure_data_lake_storage_restapi.py +++ /dev/null @@ -1,71 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import TYPE_CHECKING - -from azure.core import PipelineClient -from msrest import Deserializer, Serializer - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any - -from ._configuration import AzureDataLakeStorageRESTAPIConfiguration -from .operations import ServiceOperations -from .operations import FileSystemOperations -from .operations import PathOperations -from . import models - - -class AzureDataLakeStorageRESTAPI(object): - """Azure Data Lake Storage provides storage for Hadoop and other big data workloads. - - :ivar service: ServiceOperations operations - :vartype service: azure.storage.filedatalake.operations.ServiceOperations - :ivar file_system: FileSystemOperations operations - :vartype file_system: azure.storage.filedatalake.operations.FileSystemOperations - :ivar path: PathOperations operations - :vartype path: azure.storage.filedatalake.operations.PathOperations - :param url: The URL of the service account, container, or blob that is the targe of the desired operation. - :type url: str - """ - - def __init__( - self, - url, # type: str - **kwargs # type: Any - ): - # type: (...) -> None - base_url = '{url}' - self._config = AzureDataLakeStorageRESTAPIConfiguration(url, **kwargs) - self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._serialize.client_side_validation = False - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.file_system = FileSystemOperations( - self._client, self._config, self._serialize, self._deserialize) - self.path = PathOperations( - self._client, self._config, self._serialize, self._deserialize) - - def close(self): - # type: () -> None - self._client.close() - - def __enter__(self): - # type: () -> AzureDataLakeStorageRESTAPI - self._client.__enter__() - return self - - def __exit__(self, *exc_details): - # type: (Any) -> None - self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/_configuration.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/_configuration.py deleted file mode 100644 index e3dd7f1..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/_configuration.py +++ /dev/null @@ -1,59 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import TYPE_CHECKING - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any - -VERSION = "unknown" - -class AzureDataLakeStorageRESTAPIConfiguration(Configuration): - """Configuration for AzureDataLakeStorageRESTAPI. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, container, or blob that is the targe of the desired operation. - :type url: str - """ - - def __init__( - self, - url, # type: str - **kwargs # type: Any - ): - # type: (...) -> None - if url is None: - raise ValueError("Parameter 'url' must not be None.") - super(AzureDataLakeStorageRESTAPIConfiguration, self).__init__(**kwargs) - - self.url = url - self.resource = "filesystem" - self.version = "2020-02-10" - kwargs.setdefault('sdk_moniker', 'azuredatalakestoragerestapi/{}'.format(VERSION)) - self._configure(**kwargs) - - def _configure( - self, - **kwargs # type: Any - ): - # type: (...) -> None - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) - self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/_data_lake_storage_client.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/_data_lake_storage_client.py deleted file mode 100644 index ae9969b..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/_data_lake_storage_client.py +++ /dev/null @@ -1,67 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core import PipelineClient -from msrest import Serializer, Deserializer - -from ._configuration import DataLakeStorageClientConfiguration -from azure.core.exceptions import map_error -from .operations import ServiceOperations -from .operations import FileSystemOperations -from .operations import PathOperations -from . import models - - -class DataLakeStorageClient(object): - """Azure Data Lake Storage provides storage for Hadoop and other big data workloads. - - - :ivar service: Service operations - :vartype service: azure.storage.filedatalake.operations.ServiceOperations - :ivar file_system: FileSystem operations - :vartype file_system: azure.storage.filedatalake.operations.FileSystemOperations - :ivar path: Path operations - :vartype path: azure.storage.filedatalake.operations.PathOperations - - :param url: The URL of the service account, container, or blob that is the - targe of the desired operation. - :type url: str - :param file_system: The filesystem identifier. - :type file_system: str - :param path1: The file or directory path. - :type path1: str - """ - - def __init__(self, url, file_system, path1, **kwargs): - - base_url = '{url}' - self._config = DataLakeStorageClientConfiguration(url, file_system, path1, **kwargs) - self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '2020-02-10' - self._serialize = Serializer(client_models) - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.file_system = FileSystemOperations( - self._client, self._config, self._serialize, self._deserialize) - self.path = PathOperations( - self._client, self._config, self._serialize, self._deserialize) - - def close(self): - self._client.close() - def __enter__(self): - self._client.__enter__() - return self - def __exit__(self, *exc_details): - self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/__init__.py deleted file mode 100644 index 24daed3..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._azure_data_lake_storage_restapi import AzureDataLakeStorageRESTAPI -__all__ = ['AzureDataLakeStorageRESTAPI'] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/_azure_data_lake_storage_restapi.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/_azure_data_lake_storage_restapi.py deleted file mode 100644 index 662a749..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/_azure_data_lake_storage_restapi.py +++ /dev/null @@ -1,63 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any - -from azure.core import AsyncPipelineClient -from msrest import Deserializer, Serializer - -from ._configuration import AzureDataLakeStorageRESTAPIConfiguration -from .operations import ServiceOperations -from .operations import FileSystemOperations -from .operations import PathOperations -from .. import models - - -class AzureDataLakeStorageRESTAPI(object): - """Azure Data Lake Storage provides storage for Hadoop and other big data workloads. - - :ivar service: ServiceOperations operations - :vartype service: azure.storage.filedatalake.aio.operations.ServiceOperations - :ivar file_system: FileSystemOperations operations - :vartype file_system: azure.storage.filedatalake.aio.operations.FileSystemOperations - :ivar path: PathOperations operations - :vartype path: azure.storage.filedatalake.aio.operations.PathOperations - :param url: The URL of the service account, container, or blob that is the targe of the desired operation. - :type url: str - """ - - def __init__( - self, - url: str, - **kwargs: Any - ) -> None: - base_url = '{url}' - self._config = AzureDataLakeStorageRESTAPIConfiguration(url, **kwargs) - self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._serialize.client_side_validation = False - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.file_system = FileSystemOperations( - self._client, self._config, self._serialize, self._deserialize) - self.path = PathOperations( - self._client, self._config, self._serialize, self._deserialize) - - async def close(self) -> None: - await self._client.close() - - async def __aenter__(self) -> "AzureDataLakeStorageRESTAPI": - await self._client.__aenter__() - return self - - async def __aexit__(self, *exc_details) -> None: - await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/_configuration.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/_configuration.py deleted file mode 100644 index e23526f..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/_configuration.py +++ /dev/null @@ -1,53 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -VERSION = "unknown" - -class AzureDataLakeStorageRESTAPIConfiguration(Configuration): - """Configuration for AzureDataLakeStorageRESTAPI. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, container, or blob that is the targe of the desired operation. - :type url: str - """ - - def __init__( - self, - url: str, - **kwargs: Any - ) -> None: - if url is None: - raise ValueError("Parameter 'url' must not be None.") - super(AzureDataLakeStorageRESTAPIConfiguration, self).__init__(**kwargs) - - self.url = url - self.resource = "filesystem" - self.version = "2020-02-10" - kwargs.setdefault('sdk_moniker', 'azuredatalakestoragerestapi/{}'.format(VERSION)) - self._configure(**kwargs) - - def _configure( - self, - **kwargs: Any - ) -> None: - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) - self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/_configuration_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/_configuration_async.py deleted file mode 100644 index 3fcd104..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/_configuration_async.py +++ /dev/null @@ -1,63 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -from ..version import VERSION - - -class DataLakeStorageClientConfiguration(Configuration): - """Configuration for DataLakeStorageClient - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, container, or blob that is the - targe of the desired operation. - :type url: str - :param file_system: The filesystem identifier. - :type file_system: str - :param path1: The file or directory path. - :type path1: str - :ivar resource: The value must be "filesystem" for all filesystem - operations. - :type resource: str - :ivar version: Specifies the version of the operation to use for this - request. - :type version: str - """ - - def __init__(self, url, file_system, path1, **kwargs): - - if url is None: - raise ValueError("Parameter 'url' must not be None.") - - super(DataLakeStorageClientConfiguration, self).__init__(**kwargs) - self._configure(**kwargs) - - self.user_agent_policy.add_user_agent('azsdk-python-datalakestorageclient/{}'.format(VERSION)) - self.generate_client_request_id = True - self.accept_language = None - - self.url = url - self.file_system = file_system - self.path1 = path1 - self.resource = "filesystem" - self.version = "2020-02-10" - - def _configure(self, **kwargs): - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations/__init__.py deleted file mode 100644 index 0db71e0..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._file_system_operations import FileSystemOperations -from ._path_operations import PathOperations - -__all__ = [ - 'ServiceOperations', - 'FileSystemOperations', - 'PathOperations', -] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations/_file_system_operations.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations/_file_system_operations.py deleted file mode 100644 index b6732e3..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations/_file_system_operations.py +++ /dev/null @@ -1,514 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar -import warnings - -from azure.core.async_paging import AsyncItemPaged, AsyncList -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class FileSystemOperations: - """FileSystemOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.filedatalake.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - properties: Optional[str] = None, - **kwargs - ) -> None: - """Create FileSystem. - - Create a FileSystem rooted at the specified location. If the FileSystem already exists, the - operation fails. This operation does not support conditional HTTP requests. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. - :type properties: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - accept = "application/json" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-namespace-enabled']=self._deserialize('str', response.headers.get('x-ms-namespace-enabled')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{filesystem}'} # type: ignore - - async def set_properties( - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - properties: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Set FileSystem Properties. - - Set properties for the FileSystem. This operation supports conditional HTTP requests. For - more information, see `Specifying Conditional Headers for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. - :type properties: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/{filesystem}'} # type: ignore - - async def get_properties( - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - **kwargs - ) -> None: - """Get FileSystem Properties. - - All system and user-defined filesystem properties are specified in the response headers. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - accept = "application/json" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-namespace-enabled']=self._deserialize('str', response.headers.get('x-ms-namespace-enabled')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{filesystem}'} # type: ignore - - async def delete( - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Delete FileSystem. - - Marks the FileSystem for deletion. When a FileSystem is deleted, a FileSystem with the same - identifier cannot be created for at least 30 seconds. While the filesystem is being deleted, - attempts to create a filesystem with the same identifier will fail with status code 409 - (Conflict), with the service returning additional error information indicating that the - filesystem is being deleted. All other operations, including operations on any files or - directories within the filesystem, will fail with status code 404 (Not Found) while the - filesystem is being deleted. This operation supports conditional HTTP requests. For more - information, see `Specifying Conditional Headers for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{filesystem}'} # type: ignore - - def list_paths( - self, - recursive: bool, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - continuation: Optional[str] = None, - path: Optional[str] = None, - max_results: Optional[int] = None, - upn: Optional[bool] = None, - **kwargs - ) -> AsyncIterable["_models.PathList"]: - """List Paths. - - List FileSystem paths and their properties. - - :param recursive: Required. - :type recursive: bool - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param path: Optional. Filters results to paths within the specified directory. An error - occurs if the directory does not exist. - :type path: str - :param max_results: An optional value that specifies the maximum number of items to return. If - omitted or greater than 5,000, the response will include up to 5,000 items. - :type max_results: int - :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If - "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response - headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If - "false", the values will be returned as Azure Active Directory Object IDs. The default value is - false. Note that group and application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: An iterator like instance of either PathList or the result of cls(response) - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.filedatalake.models.PathList] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PathList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - accept = "application/json" - - # TODO: change this once continuation/next_link autorest PR is merged - def prepare_request(next_link=None, cont_token=None): - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - if not next_link: - # Construct URL - url = self.list_paths.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - # TODO: change this once continuation/next_link autorest PR is merged - if cont_token is not None: - query_parameters['continuation'] = self._serialize.query("continuation", cont_token, 'str') - if path is not None: - query_parameters['directory'] = self._serialize.query("path", path, 'str') - query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') - if max_results is not None: - query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - - request = self._client.get(url, query_parameters, header_parameters) - else: - url = next_link - query_parameters = {} # type: Dict[str, Any] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - request = self._client.get(url, query_parameters, header_parameters) - return request - - async def extract_data(pipeline_response): - # TODO: change this once continuation/next_link autorest PR is merged - try: - cont_token = pipeline_response.http_response.headers['x-ms-continuation'] - except KeyError: - cont_token = None - deserialized = self._deserialize('PathList', pipeline_response) - list_of_elem = deserialized.paths - if cls: - list_of_elem = cls(list_of_elem) - return cont_token, AsyncList(list_of_elem) - - # TODO: change this once continuation/next_link autorest PR is merged - async def get_next(cont_token=None): - cont_token = cont_token if not continuation else continuation - request = prepare_request(cont_token=cont_token) - - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - error = self._deserialize(_models.StorageError, response) - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, model=error) - - return pipeline_response - - return AsyncItemPaged( - get_next, extract_data - ) - list_paths.metadata = {'url': '/{filesystem}'} # type: ignore diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations/_path_operations.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations/_path_operations.py deleted file mode 100644 index b090bc1..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations/_path_operations.py +++ /dev/null @@ -1,1697 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class PathOperations: - """PathOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.filedatalake.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - resource: Optional[Union[str, "_models.PathResourceType"]] = None, - continuation: Optional[str] = None, - mode: Optional[Union[str, "_models.PathRenameMode"]] = None, - rename_source: Optional[str] = None, - source_lease_id: Optional[str] = None, - properties: Optional[str] = None, - permissions: Optional[str] = None, - umask: Optional[str] = None, - path_http_headers: Optional["_models.PathHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Create File | Create Directory | Rename File | Rename Directory. - - Create or rename a file or directory. By default, the destination is overwritten and if the - destination already exists and has a lease the lease is broken. This operation supports - conditional HTTP requests. For more information, see `Specifying Conditional Headers for Blob - Service Operations `_. To fail if the destination already exists, - use a conditional request with If-None-Match: "*". - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param resource: Required only for Create File and Create Directory. The value must be "file" - or "directory". - :type resource: str or ~azure.storage.filedatalake.models.PathResourceType - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param mode: Optional. Valid only when namespace is enabled. This parameter determines the - behavior of the rename operation. The value must be "legacy" or "posix", and the default value - will be "posix". - :type mode: str or ~azure.storage.filedatalake.models.PathRenameMode - :param rename_source: An optional file or directory to be renamed. The value must have the - following format: "/{filesystem}/{path}". If "x-ms-properties" is specified, the properties - will overwrite the existing properties; otherwise, the existing properties will be preserved. - This value must be a URL percent-encoded string. Note that the string may only contain ASCII - characters in the ISO-8859-1 character set. - :type rename_source: str - :param source_lease_id: A lease ID for the source path. If specified, the source path must have - an active lease and the lease ID must match. - :type source_lease_id: str - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. - :type properties: str - :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type permissions: str - :param umask: Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, the umask - restricts the permissions of the file or directory to be created. The resulting permission is - given by p bitwise and not u, where p is the permission and u is the umask. For example, if p - is 0777 and u is 0057, then the resulting permission is 0720. The default permission is 0777 - for a directory and 0666 for a file. The default umask is 0027. The umask must be specified - in 4-digit octal notation (e.g. 0766). - :type umask: str - :param path_http_headers: Parameter group. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.filedatalake.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _cache_control = None - _content_encoding = None - _content_language = None - _content_disposition = None - _content_type = None - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _source_if_modified_since = None - _source_if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - if path_http_headers is not None: - _cache_control = path_http_headers.cache_control - _content_encoding = path_http_headers.content_encoding - _content_language = path_http_headers.content_language - _content_disposition = path_http_headers.content_disposition - _content_type = path_http_headers.content_type - if source_modified_access_conditions is not None: - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if resource is not None: - query_parameters['resource'] = self._serialize.query("resource", resource, 'str') - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - if mode is not None: - query_parameters['mode'] = self._serialize.query("mode", mode, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if rename_source is not None: - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') - if umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("umask", umask, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def update( - self, - action: Union[str, "_models.PathUpdateAction"], - mode: Union[str, "_models.PathSetAccessControlRecursiveMode"], - body: IO, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - max_records: Optional[int] = None, - continuation: Optional[str] = None, - force_flag: Optional[bool] = None, - position: Optional[int] = None, - retain_uncommitted_data: Optional[bool] = None, - close: Optional[bool] = None, - content_length: Optional[int] = None, - properties: Optional[str] = None, - owner: Optional[str] = None, - group: Optional[str] = None, - permissions: Optional[str] = None, - acl: Optional[str] = None, - path_http_headers: Optional["_models.PathHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> Optional["_models.SetAccessControlRecursiveResponse"]: - """Append Data | Flush Data | Set Properties | Set Access Control. - - Uploads data to be appended to a file, flushes (writes) previously uploaded data to a file, - sets properties for a file or directory, or sets access control for a file or directory. Data - can only be appended to a file. This operation supports conditional HTTP requests. For more - information, see `Specifying Conditional Headers for Blob Service Operations - `_. - - :param action: The action must be "append" to upload data to be appended to a file, "flush" to - flush previously uploaded data to a file, "setProperties" to set the properties of a file or - directory, "setAccessControl" to set the owner, group, permissions, or access control list for - a file or directory, or "setAccessControlRecursive" to set the access control list for a - directory recursively. Note that Hierarchical Namespace must be enabled for the account in - order to use access control. Also note that the Access Control List (ACL) includes permissions - for the owner, owning group, and others, so the x-ms-permissions and x-ms-acl request headers - are mutually exclusive. - :type action: str or ~azure.storage.filedatalake.models.PathUpdateAction - :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify" - modifies one or more POSIX access control rights that pre-exist on files and directories, - "remove" removes one or more POSIX access control rights that were present earlier on files - and directories. - :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode - :param body: Initial data. - :type body: IO - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param max_records: Optional. Valid for "SetAccessControlRecursive" operation. It specifies the - maximum number of files or directories on which the acl change will be applied. If omitted or - greater than 2,000, the request will process up to 2,000 items. - :type max_records: int - :param continuation: Optional. The number of paths processed with each invocation is limited. - If the number of paths to be processed exceeds this limit, a continuation token is returned in - the response header x-ms-continuation. When a continuation token is returned in the response, - it must be percent-encoded and specified in a subsequent invocation of setAcessControlRecursive - operation. - :type continuation: str - :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false, - the operation will terminate quickly on encountering user errors (4XX). If true, the operation - will ignore user errors and proceed with the operation on other sub-entities of the directory. - Continuation token will only be returned when forceFlag is true in case of user errors. If not - set the default value is false for this. - :type force_flag: bool - :param position: This parameter allows the caller to upload data in parallel and control the - order in which it is appended to the file. It is required when uploading data to be appended - to the file and when flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not immediately flushed, or - written, to the file. To flush, the previously uploaded data must be contiguous, the position - parameter must be specified and equal to the length of the file after all data has been - written, and there must not be a request entity body included with the request. - :type position: long - :param retain_uncommitted_data: Valid only for flush operations. If "true", uncommitted data - is retained after the flush operation completes; otherwise, the uncommitted data is deleted - after the flush operation. The default is false. Data at offsets less than the specified - position are written to the file when flush succeeds, but this optional parameter allows data - after the flush position to be retained for a future flush operation. - :type retain_uncommitted_data: bool - :param close: Azure Storage Events allow applications to receive notifications when files - change. When Azure Storage Events are enabled, a file changed event is raised. This event has a - property indicating whether this is the final change to distinguish the difference between an - intermediate flush to a file stream and the final close of a file stream. The close query - parameter is valid only when the action is "flush" and change notifications are enabled. If the - value of close is "true" and the flush operation completes successfully, the service raises a - file change notification with a property indicating that this is the final update (the file - stream has been closed). If "false" a change notification is raised indicating the file has - changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to - indicate that the file stream has been closed.". - :type close: bool - :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush - Data". Must be the length of the request content in bytes for "Append Data". - :type content_length: long - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. - :type properties: str - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type permissions: str - :param acl: Sets POSIX access control rights on files and directories. The value is a comma- - separated list of access control entries. Each access control entry (ACE) consists of a scope, - a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :param path_http_headers: Parameter group. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SetAccessControlRecursiveResponse, or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SetAccessControlRecursiveResponse"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _content_md5 = None - _lease_id = None - _cache_control = None - _content_type = None - _content_disposition = None - _content_encoding = None - _content_language = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - if path_http_headers is not None: - _content_md5 = path_http_headers.content_md5 - _cache_control = path_http_headers.cache_control - _content_type = path_http_headers.content_type - _content_disposition = path_http_headers.content_disposition - _content_encoding = path_http_headers.content_encoding - _content_language = path_http_headers.content_language - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/json" - - # Construct URL - url = self.update.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['action'] = self._serialize.query("action", action, 'str') - if max_records is not None: - query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - query_parameters['mode'] = self._serialize.query("mode", mode, 'str') - if force_flag is not None: - query_parameters['forceFlag'] = self._serialize.query("force_flag", force_flag, 'bool') - if position is not None: - query_parameters['position'] = self._serialize.query("position", position, 'long') - if retain_uncommitted_data is not None: - query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') - if close is not None: - query_parameters['close'] = self._serialize.query("close", close, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if content_length is not None: - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) - if _content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", _content_md5, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') - if acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - deserialized = None - if response.status_code == 200: - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('SetAccessControlRecursiveResponse', pipeline_response) - - if response.status_code == 202: - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - update.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def lease( - self, - x_ms_lease_action: Union[str, "_models.PathLeaseAction"], - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - x_ms_lease_duration: Optional[int] = None, - x_ms_lease_break_period: Optional[int] = None, - proposed_lease_id: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Lease Path. - - Create and manage a lease to restrict write and delete access to the path. This operation - supports conditional HTTP requests. For more information, see `Specifying Conditional Headers - for Blob Service Operations `_. - - :param x_ms_lease_action: There are five lease actions: "acquire", "break", "change", "renew", - and "release". Use "acquire" and specify the "x-ms-proposed-lease-id" and "x-ms-lease-duration" - to acquire a new lease. Use "break" to break an existing lease. When a lease is broken, the - lease break period is allowed to elapse, during which time no lease operation except break and - release can be performed on the file. When a lease is successfully broken, the response - indicates the interval in seconds until a new lease can be acquired. Use "change" and specify - the current lease ID in "x-ms-lease-id" and the new lease ID in "x-ms-proposed-lease-id" to - change the lease ID of an active lease. Use "renew" and specify the "x-ms-lease-id" to renew an - existing lease. Use "release" and specify the "x-ms-lease-id" to release a lease. - :type x_ms_lease_action: str or ~azure.storage.filedatalake.models.PathLeaseAction - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param x_ms_lease_duration: The lease duration is required to acquire a lease, and specifies - the duration of the lease in seconds. The lease duration must be between 15 and 60 seconds or - -1 for infinite lease. - :type x_ms_lease_duration: int - :param x_ms_lease_break_period: The lease break period duration is optional to break a lease, - and specifies the break period of the lease in seconds. The lease break duration must be - between 0 and 60 seconds. - :type x_ms_lease_break_period: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("x_ms_lease_action", x_ms_lease_action, 'str') - if x_ms_lease_duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("x_ms_lease_duration", x_ms_lease_duration, 'int') - if x_ms_lease_break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("x_ms_lease_break_period", x_ms_lease_break_period, 'int') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 201, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - - if response.status_code == 201: - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - - if response.status_code == 202: - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-lease-time']=self._deserialize('str', response.headers.get('x-ms-lease-time')) - - if cls: - return cls(pipeline_response, None, response_headers) - - lease.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def read( - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - range: Optional[str] = None, - x_ms_range_get_content_md5: Optional[bool] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> IO: - """Read File. - - Read the contents of a file. For read operations, range requests are supported. This operation - supports conditional HTTP requests. For more information, see `Specifying Conditional Headers - for Blob Service Operations `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: The HTTP Range request header specifies one or more byte ranges of the resource - to be retrieved. - :type range: str - :param x_ms_range_get_content_md5: Optional. When this header is set to "true" and specified - together with the Range header, the service returns the MD5 hash for the range, as long as the - range is less than or equal to 4MB in size. If this header is specified without the Range - header, the service returns status code 400 (Bad Request). If this header is set to true when - the range exceeds 4 MB in size, the service returns status code 400 (Bad Request). - :type x_ms_range_get_content_md5: bool - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.read.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['Range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if x_ms_range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("x_ms_range_get_content_md5", x_ms_range_get_content_md5, 'bool') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['x-ms-content-md5']=self._deserialize('str', response.headers.get('x-ms-content-md5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - read.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def get_properties( - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - action: Optional[Union[str, "_models.PathGetPropertiesAction"]] = None, - upn: Optional[bool] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Get Properties | Get Status | Get Access Control List. - - Get Properties returns all system and user defined properties for a path. Get Status returns - all system defined properties for a path. Get Access Control List returns the access control - list for a path. This operation supports conditional HTTP requests. For more information, see - `Specifying Conditional Headers for Blob Service Operations `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param action: Optional. If the value is "getStatus" only the system defined properties for the - path are returned. If the value is "getAccessControl" the access control list is returned in - the response headers (Hierarchical Namespace must be enabled for the account), otherwise the - properties are returned. - :type action: str or ~azure.storage.filedatalake.models.PathGetPropertiesAction - :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If - "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response - headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If - "false", the values will be returned as Azure Active Directory Object IDs. The default value is - false. Note that group and application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if action is not None: - query_parameters['action'] = self._serialize.query("action", action, 'str') - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) - response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) - response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) - response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def delete( - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - recursive: Optional[bool] = None, - continuation: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Delete File | Delete Directory. - - Delete the file or directory. This operation supports conditional HTTP requests. For more - information, see `Specifying Conditional Headers for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param recursive: Required. - :type recursive: bool - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if recursive is not None: - query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def set_access_control( - self, - timeout: Optional[int] = None, - owner: Optional[str] = None, - group: Optional[str] = None, - permissions: Optional[str] = None, - acl: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Set the owner, group, permissions, or access control list for a path. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type permissions: str - :param acl: Sets POSIX access control rights on files and directories. The value is a comma- - separated list of access control entries. Each access control entry (ACE) consists of a scope, - a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "setAccessControl" - accept = "application/json" - - # Construct URL - url = self.set_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') - if acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def set_access_control_recursive( - self, - mode: Union[str, "_models.PathSetAccessControlRecursiveMode"], - timeout: Optional[int] = None, - continuation: Optional[str] = None, - force_flag: Optional[bool] = None, - max_records: Optional[int] = None, - acl: Optional[str] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> "_models.SetAccessControlRecursiveResponse": - """Set the access control list for a path and subpaths. - - :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify" - modifies one or more POSIX access control rights that pre-exist on files and directories, - "remove" removes one or more POSIX access control rights that were present earlier on files - and directories. - :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false, - the operation will terminate quickly on encountering user errors (4XX). If true, the operation - will ignore user errors and proceed with the operation on other sub-entities of the directory. - Continuation token will only be returned when forceFlag is true in case of user errors. If not - set the default value is false for this. - :type force_flag: bool - :param max_records: Optional. It specifies the maximum number of files or directories on which - the acl change will be applied. If omitted or greater than 2,000, the request will process up - to 2,000 items. - :type max_records: int - :param acl: Sets POSIX access control rights on files and directories. The value is a comma- - separated list of access control entries. Each access control entry (ACE) consists of a scope, - a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SetAccessControlRecursiveResponse, or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.SetAccessControlRecursiveResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - action = "setAccessControlRecursive" - accept = "application/json" - - # Construct URL - url = self.set_access_control_recursive.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - query_parameters['mode'] = self._serialize.query("mode", mode, 'str') - if force_flag is not None: - query_parameters['forceFlag'] = self._serialize.query("force_flag", force_flag, 'bool') - if max_records is not None: - query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('SetAccessControlRecursiveResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - set_access_control_recursive.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def flush_data( - self, - timeout: Optional[int] = None, - position: Optional[int] = None, - retain_uncommitted_data: Optional[bool] = None, - close: Optional[bool] = None, - content_length: Optional[int] = None, - request_id_parameter: Optional[str] = None, - path_http_headers: Optional["_models.PathHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Set the owner, group, permissions, or access control list for a path. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param position: This parameter allows the caller to upload data in parallel and control the - order in which it is appended to the file. It is required when uploading data to be appended - to the file and when flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not immediately flushed, or - written, to the file. To flush, the previously uploaded data must be contiguous, the position - parameter must be specified and equal to the length of the file after all data has been - written, and there must not be a request entity body included with the request. - :type position: long - :param retain_uncommitted_data: Valid only for flush operations. If "true", uncommitted data - is retained after the flush operation completes; otherwise, the uncommitted data is deleted - after the flush operation. The default is false. Data at offsets less than the specified - position are written to the file when flush succeeds, but this optional parameter allows data - after the flush position to be retained for a future flush operation. - :type retain_uncommitted_data: bool - :param close: Azure Storage Events allow applications to receive notifications when files - change. When Azure Storage Events are enabled, a file changed event is raised. This event has a - property indicating whether this is the final change to distinguish the difference between an - intermediate flush to a file stream and the final close of a file stream. The close query - parameter is valid only when the action is "flush" and change notifications are enabled. If the - value of close is "true" and the flush operation completes successfully, the service raises a - file change notification with a property indicating that this is the final update (the file - stream has been closed). If "false" a change notification is raised indicating the file has - changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to - indicate that the file stream has been closed.". - :type close: bool - :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush - Data". Must be the length of the request content in bytes for "Append Data". - :type content_length: long - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param path_http_headers: Parameter group. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _content_md5 = None - _lease_id = None - _cache_control = None - _content_type = None - _content_disposition = None - _content_encoding = None - _content_language = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - if path_http_headers is not None: - _content_md5 = path_http_headers.content_md5 - _cache_control = path_http_headers.cache_control - _content_type = path_http_headers.content_type - _content_disposition = path_http_headers.content_disposition - _content_encoding = path_http_headers.content_encoding - _content_language = path_http_headers.content_language - action = "flush" - accept = "application/json" - - # Construct URL - url = self.flush_data.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if position is not None: - query_parameters['position'] = self._serialize.query("position", position, 'long') - if retain_uncommitted_data is not None: - query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') - if close is not None: - query_parameters['close'] = self._serialize.query("close", close, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if content_length is not None: - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) - if _content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", _content_md5, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - flush_data.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def append_data( - self, - body: IO, - position: Optional[int] = None, - timeout: Optional[int] = None, - content_length: Optional[int] = None, - transactional_content_crc64: Optional[bytearray] = None, - request_id_parameter: Optional[str] = None, - path_http_headers: Optional["_models.PathHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """Append data to the file. - - :param body: Initial data. - :type body: IO - :param position: This parameter allows the caller to upload data in parallel and control the - order in which it is appended to the file. It is required when uploading data to be appended - to the file and when flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not immediately flushed, or - written, to the file. To flush, the previously uploaded data must be contiguous, the position - parameter must be specified and equal to the length of the file after all data has been - written, and there must not be a request entity body included with the request. - :type position: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush - Data". Must be the length of the request content in bytes for "Append Data". - :type content_length: long - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param path_http_headers: Parameter group. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _transactional_content_hash = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if path_http_headers is not None: - _transactional_content_hash = path_http_headers.transactional_content_hash - action = "append" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.append_data.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if position is not None: - query_parameters['position'] = self._serialize.query("position", position, 'long') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if content_length is not None: - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) - if _transactional_content_hash is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_hash", _transactional_content_hash, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - append_data.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def set_expiry( - self, - expiry_options: Union[str, "_models.PathExpiryOptions"], - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - expires_on: Optional[str] = None, - **kwargs - ) -> None: - """Sets the time a blob will expire and be deleted. - - :param expiry_options: Required. Indicates mode of the expiry time. - :type expiry_options: str or ~azure.storage.filedatalake.models.PathExpiryOptions - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param expires_on: The time to set the blob to expiry. - :type expires_on: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "expiry" - accept = "application/json" - - # Construct URL - url = self.set_expiry.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') - if expires_on is not None: - header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_expiry.metadata = {'url': '/{filesystem}/{path}'} # type: ignore diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations/_service_operations.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations/_service_operations.py deleted file mode 100644 index b229c12..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations/_service_operations.py +++ /dev/null @@ -1,148 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar -import warnings - -from azure.core.async_paging import AsyncItemPaged, AsyncList -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class ServiceOperations: - """ServiceOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.filedatalake.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def list_file_systems( - self, - prefix: Optional[str] = None, - continuation: Optional[str] = None, - max_results: Optional[int] = None, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - **kwargs - ) -> AsyncIterable["_models.FileSystemList"]: - """List FileSystems. - - List filesystems and their properties in given account. - - :param prefix: Filters results to filesystems within the specified prefix. - :type prefix: str - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param max_results: An optional value that specifies the maximum number of items to return. If - omitted or greater than 5,000, the response will include up to 5,000 items. - :type max_results: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: An iterator like instance of either FileSystemList or the result of cls(response) - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.filedatalake.models.FileSystemList] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.FileSystemList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - resource = "account" - accept = "application/json" - - def prepare_request(next_link=None): - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - if not next_link: - # Construct URL - url = self.list_file_systems.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("resource", resource, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - if max_results is not None: - query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - request = self._client.get(url, query_parameters, header_parameters) - else: - url = next_link - query_parameters = {} # type: Dict[str, Any] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - request = self._client.get(url, query_parameters, header_parameters) - return request - - async def extract_data(pipeline_response): - deserialized = self._deserialize('FileSystemList', pipeline_response) - list_of_elem = deserialized.filesystems - if cls: - list_of_elem = cls(list_of_elem) - return None, AsyncList(list_of_elem) - - async def get_next(next_link=None): - request = prepare_request(next_link) - - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - error = self._deserialize(_models.StorageError, response) - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, model=error) - - return pipeline_response - - return AsyncItemPaged( - get_next, extract_data - ) - list_file_systems.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations_async/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations_async/__init__.py deleted file mode 100644 index 1190e52..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations_async/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations_async import ServiceOperations -from ._file_system_operations_async import FileSystemOperations -from ._path_operations_async import PathOperations - -__all__ = [ - 'ServiceOperations', - 'FileSystemOperations', - 'PathOperations', -] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations_async/_file_system_operations_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations_async/_file_system_operations_async.py deleted file mode 100644 index f1af068..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations_async/_file_system_operations_async.py +++ /dev/null @@ -1,462 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class FileSystemOperations: - """FileSystemOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - - async def create(self, properties=None, request_id=None, timeout=None, *, cls=None, **kwargs): - """Create FileSystem. - - Create a FileSystem rooted at the specified location. If the FileSystem - already exists, the operation fails. This operation does not support - conditional HTTP requests. - - :param properties: Optional. User-defined properties to be stored with - the filesystem, in the format of a comma-separated list of name and - value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded - string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties - not included in the list will be removed. All properties are removed - if the header is omitted. To merge new and existing properties, first - get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all - properties. - :type properties: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-namespace-enabled': self._deserialize('str', response.headers.get('x-ms-namespace-enabled')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{filesystem}'} - - async def set_properties(self, properties=None, request_id=None, timeout=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Set FileSystem Properties. - - Set properties for the FileSystem. This operation supports conditional - HTTP requests. For more information, see [Specifying Conditional - Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - - :param properties: Optional. User-defined properties to be stored with - the filesystem, in the format of a comma-separated list of name and - value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded - string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties - not included in the list will be removed. All properties are removed - if the header is omitted. To merge new and existing properties, first - get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all - properties. - :type properties: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - # Construct URL - url = self.set_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_properties.metadata = {'url': '/{filesystem}'} - - async def get_properties(self, request_id=None, timeout=None, *, cls=None, **kwargs): - """Get FileSystem Properties. - - All system and user-defined filesystem properties are specified in the - response headers. - - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), - 'x-ms-namespace-enabled': self._deserialize('str', response.headers.get('x-ms-namespace-enabled')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{filesystem}'} - - async def delete(self, request_id=None, timeout=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Delete FileSystem. - - Marks the FileSystem for deletion. When a FileSystem is deleted, a - FileSystem with the same identifier cannot be created for at least 30 - seconds. While the filesystem is being deleted, attempts to create a - filesystem with the same identifier will fail with status code 409 - (Conflict), with the service returning additional error information - indicating that the filesystem is being deleted. All other operations, - including operations on any files or directories within the filesystem, - will fail with status code 404 (Not Found) while the filesystem is - being deleted. This operation supports conditional HTTP requests. For - more information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{filesystem}'} - - async def list_paths(self, recursive, continuation=None, path=None, max_results=None, upn=None, request_id=None, timeout=None, *, cls=None, **kwargs): - """List Paths. - - List FileSystem paths and their properties. - - :param recursive: Required - :type recursive: bool - :param continuation: Optional. When deleting a directory, the number - of paths that are deleted with each invocation is limited. If the - number of paths to be deleted exceeds this limit, a continuation token - is returned in this response header. When a continuation token is - returned in the response, it must be specified in a subsequent - invocation of the delete operation to continue deleting the directory. - :type continuation: str - :param path: Optional. Filters results to paths within the specified - directory. An error occurs if the directory does not exist. - :type path: str - :param max_results: An optional value that specifies the maximum - number of items to return. If omitted or greater than 5,000, the - response will include up to 5,000 items. - :type max_results: int - :param upn: Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: PathList or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.PathList - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.list_paths.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - if path is not None: - query_parameters['directory'] = self._serialize.query("path", path, 'str') - query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') - if max_results is not None: - query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('PathList', response) - header_dict = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_paths.metadata = {'url': '/{filesystem}'} diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations_async/_path_operations_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations_async/_path_operations_async.py deleted file mode 100644 index 28f0999..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations_async/_path_operations_async.py +++ /dev/null @@ -1,1698 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class PathOperations: - """PathOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar comp: . Constant value: "expiry". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.comp = "expiry" - - async def create(self, resource=None, continuation=None, mode=None, rename_source=None, source_lease_id=None, properties=None, permissions=None, umask=None, request_id=None, timeout=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, source_modified_access_conditions=None, *, cls=None, **kwargs): - """Create File | Create Directory | Rename File | Rename Directory. - - Create or rename a file or directory. By default, the destination is - overwritten and if the destination already exists and has a lease the - lease is broken. This operation supports conditional HTTP requests. - For more information, see [Specifying Conditional Headers for Blob - Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - To fail if the destination already exists, use a conditional request - with If-None-Match: "*". - - :param resource: Required only for Create File and Create Directory. - The value must be "file" or "directory". Possible values include: - 'directory', 'file' - :type resource: str or - ~azure.storage.filedatalake.models.PathResourceType - :param continuation: Optional. When deleting a directory, the number - of paths that are deleted with each invocation is limited. If the - number of paths to be deleted exceeds this limit, a continuation token - is returned in this response header. When a continuation token is - returned in the response, it must be specified in a subsequent - invocation of the delete operation to continue deleting the directory. - :type continuation: str - :param mode: Optional. Valid only when namespace is enabled. This - parameter determines the behavior of the rename operation. The value - must be "legacy" or "posix", and the default value will be "posix". - Possible values include: 'legacy', 'posix' - :type mode: str or ~azure.storage.filedatalake.models.PathRenameMode - :param rename_source: An optional file or directory to be renamed. - The value must have the following format: "/{filesystem}/{path}". If - "x-ms-properties" is specified, the properties will overwrite the - existing properties; otherwise, the existing properties will be - preserved. This value must be a URL percent-encoded string. Note that - the string may only contain ASCII characters in the ISO-8859-1 - character set. - :type rename_source: str - :param source_lease_id: A lease ID for the source path. If specified, - the source path must have an active lease and the lease ID must match. - :type source_lease_id: str - :param properties: Optional. User-defined properties to be stored with - the filesystem, in the format of a comma-separated list of name and - value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded - string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties - not included in the list will be removed. All properties are removed - if the header is omitted. To merge new and existing properties, first - get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all - properties. - :type properties: str - :param permissions: Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :type permissions: str - :param umask: Optional and only valid if Hierarchical Namespace is - enabled for the account. When creating a file or directory and the - parent folder does not have a default ACL, the umask restricts the - permissions of the file or directory to be created. The resulting - permission is given by p bitwise and not u, where p is the permission - and u is the umask. For example, if p is 0777 and u is 0057, then the - resulting permission is 0720. The default permission is 0777 for a - directory and 0666 for a file. The default umask is 0027. The umask - must be specified in 4-digit octal notation (e.g. 0766). - :type umask: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param path_http_headers: Additional parameters for the operation - :type path_http_headers: - ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.filedatalake.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - cache_control = None - if path_http_headers is not None: - cache_control = path_http_headers.cache_control - content_encoding = None - if path_http_headers is not None: - content_encoding = path_http_headers.content_encoding - content_language = None - if path_http_headers is not None: - content_language = path_http_headers.content_language - content_disposition = None - if path_http_headers is not None: - content_disposition = path_http_headers.content_disposition - content_type = None - if path_http_headers is not None: - content_type = path_http_headers.content_type - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - source_if_match = None - if source_modified_access_conditions is not None: - source_if_match = source_modified_access_conditions.source_if_match - source_if_none_match = None - if source_modified_access_conditions is not None: - source_if_none_match = source_modified_access_conditions.source_if_none_match - source_if_modified_since = None - if source_modified_access_conditions is not None: - source_if_modified_since = source_modified_access_conditions.source_if_modified_since - source_if_unmodified_since = None - if source_modified_access_conditions is not None: - source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if resource is not None: - query_parameters['resource'] = self._serialize.query("resource", resource, 'PathResourceType') - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - if mode is not None: - query_parameters['mode'] = self._serialize.query("mode", mode, 'PathRenameMode') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if rename_source is not None: - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') - if umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("umask", umask, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') - if content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') - if content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') - if content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') - if content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", source_if_none_match, 'str') - if source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{filesystem}/{path}'} - - async def update(self, action, mode, body, max_records=None, continuation=None, force_flag=None, position=None, retain_uncommitted_data=None, close=None, content_length=None, properties=None, owner=None, group=None, permissions=None, acl=None, request_id=None, timeout=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Append Data | Flush Data | Set Properties | Set Access Control. - - Uploads data to be appended to a file, flushes (writes) previously - uploaded data to a file, sets properties for a file or directory, or - sets access control for a file or directory. Data can only be appended - to a file. This operation supports conditional HTTP requests. For more - information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - - :param action: The action must be "append" to upload data to be - appended to a file, "flush" to flush previously uploaded data to a - file, "setProperties" to set the properties of a file or directory, - "setAccessControl" to set the owner, group, permissions, or access - control list for a file or directory, or "setAccessControlRecursive" - to set the access control list for a directory recursively. Note that - Hierarchical Namespace must be enabled for the account in order to use - access control. Also note that the Access Control List (ACL) includes - permissions for the owner, owning group, and others, so the - x-ms-permissions and x-ms-acl request headers are mutually exclusive. - Possible values include: 'append', 'flush', 'setProperties', - 'setAccessControl', 'setAccessControlRecursive' - :type action: str or - ~azure.storage.filedatalake.models.PathUpdateAction - :param mode: Mode "set" sets POSIX access control rights on files and - directories, "modify" modifies one or more POSIX access control rights - that pre-exist on files and directories, "remove" removes one or more - POSIX access control rights that were present earlier on files and - directories. Possible values include: 'set', 'modify', 'remove' - :type mode: str or - ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode - :param body: Initial data - :type body: Generator - :param max_records: Optional. Valid for "SetAccessControlRecursive" - operation. It specifies the maximum number of files or directories on - which the acl change will be applied. If omitted or greater than - 2,000, the request will process up to 2,000 items - :type max_records: int - :param continuation: Optional. The number of paths processed with each - invocation is limited. If the number of paths to be processed exceeds - this limit, a continuation token is returned in the response header - x-ms-continuation. When a continuation token is returned in the - response, it must be percent-encoded and specified in a subsequent - invocation of setAcessControlRecursive operation. - :type continuation: str - :param force_flag: Optional. Valid for "SetAccessControlRecursive" - operation. If set to false, the operation will terminate quickly on - encountering user errors (4XX). If true, the operation will ignore - user errors and proceed with the operation on other sub-entities of - the directory. Continuation token will only be returned when forceFlag - is true in case of user errors. If not set the default value is false - for this. - :type force_flag: bool - :param position: This parameter allows the caller to upload data in - parallel and control the order in which it is appended to the file. - It is required when uploading data to be appended to the file and when - flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not - immediately flushed, or written, to the file. To flush, the - previously uploaded data must be contiguous, the position parameter - must be specified and equal to the length of the file after all data - has been written, and there must not be a request entity body included - with the request. - :type position: long - :param retain_uncommitted_data: Valid only for flush operations. If - "true", uncommitted data is retained after the flush operation - completes; otherwise, the uncommitted data is deleted after the flush - operation. The default is false. Data at offsets less than the - specified position are written to the file when flush succeeds, but - this optional parameter allows data after the flush position to be - retained for a future flush operation. - :type retain_uncommitted_data: bool - :param close: Azure Storage Events allow applications to receive - notifications when files change. When Azure Storage Events are - enabled, a file changed event is raised. This event has a property - indicating whether this is the final change to distinguish the - difference between an intermediate flush to a file stream and the - final close of a file stream. The close query parameter is valid only - when the action is "flush" and change notifications are enabled. If - the value of close is "true" and the flush operation completes - successfully, the service raises a file change notification with a - property indicating that this is the final update (the file stream has - been closed). If "false" a change notification is raised indicating - the file has changed. The default is false. This query parameter is - set to true by the Hadoop ABFS driver to indicate that the file stream - has been closed." - :type close: bool - :param content_length: Required for "Append Data" and "Flush Data". - Must be 0 for "Flush Data". Must be the length of the request content - in bytes for "Append Data". - :type content_length: long - :param properties: Optional. User-defined properties to be stored with - the filesystem, in the format of a comma-separated list of name and - value pairs "n1=v1, n2=v2, ...", where each value is a base64 encoded - string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties - not included in the list will be removed. All properties are removed - if the header is omitted. To merge new and existing properties, first - get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all - properties. - :type properties: str - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param permissions: Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :type permissions: str - :param acl: Sets POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param path_http_headers: Additional parameters for the operation - :type path_http_headers: - ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: SetAccessControlRecursiveResponse or the result of - cls(response) - :rtype: - ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - content_md5 = None - if path_http_headers is not None: - content_md5 = path_http_headers.content_md5 - cache_control = None - if path_http_headers is not None: - cache_control = path_http_headers.cache_control - content_type = None - if path_http_headers is not None: - content_type = path_http_headers.content_type - content_disposition = None - if path_http_headers is not None: - content_disposition = path_http_headers.content_disposition - content_encoding = None - if path_http_headers is not None: - content_encoding = path_http_headers.content_encoding - content_language = None - if path_http_headers is not None: - content_language = path_http_headers.content_language - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - # Construct URL - url = self.update.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['action'] = self._serialize.query("action", action, 'PathUpdateAction') - if max_records is not None: - query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - query_parameters['mode'] = self._serialize.query("mode", mode, 'PathSetAccessControlRecursiveMode') - if force_flag is not None: - query_parameters['forceFlag'] = self._serialize.query("force_flag", force_flag, 'bool') - if position is not None: - query_parameters['position'] = self._serialize.query("position", position, 'long') - if retain_uncommitted_data is not None: - query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') - if close is not None: - query_parameters['close'] = self._serialize.query("close", close, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/octet-stream' - if content_length is not None: - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') - if acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", content_md5, 'bytearray') - if cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') - if content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') - if content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') - if content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') - if content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct body - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('SetAccessControlRecursiveResponse', response) - header_dict = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), - 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - update.metadata = {'url': '/{filesystem}/{path}'} - - async def lease(self, x_ms_lease_action, x_ms_lease_duration=None, x_ms_lease_break_period=None, proposed_lease_id=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Lease Path. - - Create and manage a lease to restrict write and delete access to the - path. This operation supports conditional HTTP requests. For more - information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - - :param x_ms_lease_action: There are five lease actions: "acquire", - "break", "change", "renew", and "release". Use "acquire" and specify - the "x-ms-proposed-lease-id" and "x-ms-lease-duration" to acquire a - new lease. Use "break" to break an existing lease. When a lease is - broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the - file. When a lease is successfully broken, the response indicates the - interval in seconds until a new lease can be acquired. Use "change" - and specify the current lease ID in "x-ms-lease-id" and the new lease - ID in "x-ms-proposed-lease-id" to change the lease ID of an active - lease. Use "renew" and specify the "x-ms-lease-id" to renew an - existing lease. Use "release" and specify the "x-ms-lease-id" to - release a lease. Possible values include: 'acquire', 'break', - 'change', 'renew', 'release' - :type x_ms_lease_action: str or - ~azure.storage.filedatalake.models.PathLeaseAction - :param x_ms_lease_duration: The lease duration is required to acquire - a lease, and specifies the duration of the lease in seconds. The - lease duration must be between 15 and 60 seconds or -1 for infinite - lease. - :type x_ms_lease_duration: int - :param x_ms_lease_break_period: The lease break period duration is - optional to break a lease, and specifies the break period of the - lease in seconds. The lease break duration must be between 0 and 60 - seconds. - :type x_ms_lease_break_period: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The Blob service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - # Construct URL - url = self.lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-action'] = self._serialize.header("x_ms_lease_action", x_ms_lease_action, 'PathLeaseAction') - if x_ms_lease_duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("x_ms_lease_duration", x_ms_lease_duration, 'int') - if x_ms_lease_break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("x_ms_lease_break_period", x_ms_lease_break_period, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 201, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-lease-time': self._deserialize('str', response.headers.get('x-ms-lease-time')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - lease.metadata = {'url': '/{filesystem}/{path}'} - - async def read(self, range=None, x_ms_range_get_content_md5=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Read File. - - Read the contents of a file. For read operations, range requests are - supported. This operation supports conditional HTTP requests. For more - information, see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - - :param range: The HTTP Range request header specifies one or more byte - ranges of the resource to be retrieved. - :type range: str - :param x_ms_range_get_content_md5: Optional. When this header is set - to "true" and specified together with the Range header, the service - returns the MD5 hash for the range, as long as the range is less than - or equal to 4MB in size. If this header is specified without the Range - header, the service returns status code 400 (Bad Request). If this - header is set to true when the range exceeds 4 MB in size, the service - returns status code 400 (Bad Request). - :type x_ms_range_get_content_md5: bool - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: object or the result of cls(response) - :rtype: Generator - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - # Construct URL - url = self.read.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if range is not None: - header_parameters['Range'] = self._serialize.header("range", range, 'str') - if x_ms_range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("x_ms_range_get_content_md5", x_ms_range_get_content_md5, 'bool') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - await response.load_body() - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')), - 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), - 'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')), - 'x-ms-content-md5': self._deserialize('str', response.headers.get('x-ms-content-md5')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - if response.status_code == 206: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')), - 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), - 'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')), - 'x-ms-content-md5': self._deserialize('str', response.headers.get('x-ms-content-md5')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - read.metadata = {'url': '/{filesystem}/{path}'} - - async def get_properties(self, action=None, upn=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Get Properties | Get Status | Get Access Control List. - - Get Properties returns all system and user defined properties for a - path. Get Status returns all system defined properties for a path. Get - Access Control List returns the access control list for a path. This - operation supports conditional HTTP requests. For more information, - see [Specifying Conditional Headers for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - - :param action: Optional. If the value is "getStatus" only the system - defined properties for the path are returned. If the value is - "getAccessControl" the access control list is returned in the response - headers (Hierarchical Namespace must be enabled for the account), - otherwise the properties are returned. Possible values include: - 'getAccessControl', 'getStatus' - :type action: str or - ~azure.storage.filedatalake.models.PathGetPropertiesAction - :param upn: Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if action is not None: - query_parameters['action'] = self._serialize.query("action", action, 'PathGetPropertiesAction') - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-resource-type': self._deserialize('str', response.headers.get('x-ms-resource-type')), - 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), - 'x-ms-owner': self._deserialize('str', response.headers.get('x-ms-owner')), - 'x-ms-group': self._deserialize('str', response.headers.get('x-ms-group')), - 'x-ms-permissions': self._deserialize('str', response.headers.get('x-ms-permissions')), - 'x-ms-acl': self._deserialize('str', response.headers.get('x-ms-acl')), - 'x-ms-lease-duration': self._deserialize('str', response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize('str', response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize('str', response.headers.get('x-ms-lease-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{filesystem}/{path}'} - - async def delete(self, recursive=None, continuation=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Delete File | Delete Directory. - - Delete the file or directory. This operation supports conditional HTTP - requests. For more information, see [Specifying Conditional Headers - for Blob Service - Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). - - :param recursive: Required - :type recursive: bool - :param continuation: Optional. When deleting a directory, the number - of paths that are deleted with each invocation is limited. If the - number of paths to be deleted exceeds this limit, a continuation token - is returned in this response header. When a continuation token is - returned in the response, it must be specified in a subsequent - invocation of the delete operation to continue deleting the directory. - :type continuation: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if recursive is not None: - query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{filesystem}/{path}'} - - async def set_access_control(self, timeout=None, owner=None, group=None, permissions=None, acl=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Set the owner, group, permissions, or access control list for a path. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param permissions: Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :type permissions: str - :param acl: Sets POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "setAccessControl" - - # Construct URL - url = self.set_access_control.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') - if acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - } - return cls(response, None, response_headers) - set_access_control.metadata = {'url': '/{filesystem}/{path}'} - - async def set_access_control_recursive(self, mode, timeout=None, continuation=None, force_flag=None, max_records=None, acl=None, request_id=None, *, cls=None, **kwargs): - """Set the access control list for a path and subpaths. - - :param mode: Mode "set" sets POSIX access control rights on files and - directories, "modify" modifies one or more POSIX access control rights - that pre-exist on files and directories, "remove" removes one or more - POSIX access control rights that were present earlier on files and - directories. Possible values include: 'set', 'modify', 'remove' - :type mode: str or - ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param continuation: Optional. When deleting a directory, the number - of paths that are deleted with each invocation is limited. If the - number of paths to be deleted exceeds this limit, a continuation token - is returned in this response header. When a continuation token is - returned in the response, it must be specified in a subsequent - invocation of the delete operation to continue deleting the directory. - :type continuation: str - :param force_flag: Optional. Valid for "SetAccessControlRecursive" - operation. If set to false, the operation will terminate quickly on - encountering user errors (4XX). If true, the operation will ignore - user errors and proceed with the operation on other sub-entities of - the directory. Continuation token will only be returned when forceFlag - is true in case of user errors. If not set the default value is false - for this. - :type force_flag: bool - :param max_records: Optional. It specifies the maximum number of files - or directories on which the acl change will be applied. If omitted or - greater than 2,000, the request will process up to 2,000 items - :type max_records: int - :param acl: Sets POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: SetAccessControlRecursiveResponse or the result of - cls(response) - :rtype: - ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - action = "setAccessControlRecursive" - - # Construct URL - url = self.set_access_control_recursive.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - query_parameters['mode'] = self._serialize.query("mode", mode, 'PathSetAccessControlRecursiveMode') - if force_flag is not None: - query_parameters['forceFlag'] = self._serialize.query("force_flag", force_flag, 'bool') - if max_records is not None: - query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('SetAccessControlRecursiveResponse', response) - header_dict = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - set_access_control_recursive.metadata = {'url': '/{filesystem}/{path}'} - - async def flush_data(self, timeout=None, position=None, retain_uncommitted_data=None, close=None, content_length=None, request_id=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): - """Set the owner, group, permissions, or access control list for a path. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param position: This parameter allows the caller to upload data in - parallel and control the order in which it is appended to the file. - It is required when uploading data to be appended to the file and when - flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not - immediately flushed, or written, to the file. To flush, the - previously uploaded data must be contiguous, the position parameter - must be specified and equal to the length of the file after all data - has been written, and there must not be a request entity body included - with the request. - :type position: long - :param retain_uncommitted_data: Valid only for flush operations. If - "true", uncommitted data is retained after the flush operation - completes; otherwise, the uncommitted data is deleted after the flush - operation. The default is false. Data at offsets less than the - specified position are written to the file when flush succeeds, but - this optional parameter allows data after the flush position to be - retained for a future flush operation. - :type retain_uncommitted_data: bool - :param close: Azure Storage Events allow applications to receive - notifications when files change. When Azure Storage Events are - enabled, a file changed event is raised. This event has a property - indicating whether this is the final change to distinguish the - difference between an intermediate flush to a file stream and the - final close of a file stream. The close query parameter is valid only - when the action is "flush" and change notifications are enabled. If - the value of close is "true" and the flush operation completes - successfully, the service raises a file change notification with a - property indicating that this is the final update (the file stream has - been closed). If "false" a change notification is raised indicating - the file has changed. The default is false. This query parameter is - set to true by the Hadoop ABFS driver to indicate that the file stream - has been closed." - :type close: bool - :param content_length: Required for "Append Data" and "Flush Data". - Must be 0 for "Flush Data". Must be the length of the request content - in bytes for "Append Data". - :type content_length: long - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param path_http_headers: Additional parameters for the operation - :type path_http_headers: - ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Additional parameters for the - operation - :type modified_access_conditions: - ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - content_md5 = None - if path_http_headers is not None: - content_md5 = path_http_headers.content_md5 - cache_control = None - if path_http_headers is not None: - cache_control = path_http_headers.cache_control - content_type = None - if path_http_headers is not None: - content_type = path_http_headers.content_type - content_disposition = None - if path_http_headers is not None: - content_disposition = path_http_headers.content_disposition - content_encoding = None - if path_http_headers is not None: - content_encoding = path_http_headers.content_encoding - content_language = None - if path_http_headers is not None: - content_language = path_http_headers.content_language - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - if_match = None - if modified_access_conditions is not None: - if_match = modified_access_conditions.if_match - if_none_match = None - if modified_access_conditions is not None: - if_none_match = modified_access_conditions.if_none_match - if_modified_since = None - if modified_access_conditions is not None: - if_modified_since = modified_access_conditions.if_modified_since - if_unmodified_since = None - if modified_access_conditions is not None: - if_unmodified_since = modified_access_conditions.if_unmodified_since - - action = "flush" - - # Construct URL - url = self.flush_data.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if position is not None: - query_parameters['position'] = self._serialize.query("position", position, 'long') - if retain_uncommitted_data is not None: - query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') - if close is not None: - query_parameters['close'] = self._serialize.query("close", close, 'bool') - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - if content_length is not None: - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", content_md5, 'bytearray') - if cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", cache_control, 'str') - if content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", content_type, 'str') - if content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", content_disposition, 'str') - if content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", content_encoding, 'str') - if content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", content_language, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - } - return cls(response, None, response_headers) - flush_data.metadata = {'url': '/{filesystem}/{path}'} - - async def append_data(self, body, position=None, timeout=None, content_length=None, transactional_content_crc64=None, request_id=None, path_http_headers=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Append data to the file. - - :param body: Initial data - :type body: Generator - :param position: This parameter allows the caller to upload data in - parallel and control the order in which it is appended to the file. - It is required when uploading data to be appended to the file and when - flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not - immediately flushed, or written, to the file. To flush, the - previously uploaded data must be contiguous, the position parameter - must be specified and equal to the length of the file after all data - has been written, and there must not be a request entity body included - with the request. - :type position: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param content_length: Required for "Append Data" and "Flush Data". - Must be 0 for "Flush Data". Must be the length of the request content - in bytes for "Append Data". - :type content_length: long - :param transactional_content_crc64: Specify the transactional crc64 - for the body, to be validated by the service. - :type transactional_content_crc64: bytearray - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param path_http_headers: Additional parameters for the operation - :type path_http_headers: - ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.filedatalake.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - transactional_content_hash = None - if path_http_headers is not None: - transactional_content_hash = path_http_headers.transactional_content_hash - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - action = "append" - - # Construct URL - url = self.append_data.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if position is not None: - query_parameters['position'] = self._serialize.query("position", position, 'long') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['action'] = self._serialize.query("action", action, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if content_length is not None: - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if transactional_content_hash is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_hash", transactional_content_hash, 'bytearray') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct body - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - } - return cls(response, None, response_headers) - append_data.metadata = {'url': '/{filesystem}/{path}'} - - async def set_expiry(self, expiry_options, timeout=None, request_id=None, expires_on=None, *, cls=None, **kwargs): - """Sets the time a blob will expire and be deleted. - - :param expiry_options: Required. Indicates mode of the expiry time. - Possible values include: 'NeverExpire', 'RelativeToCreation', - 'RelativeToNow', 'Absolute' - :type expiry_options: str or - ~azure.storage.filedatalake.models.PathExpiryOptions - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param expires_on: The time to set the blob to expiry - :type expires_on: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.set_expiry.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("self.comp", self.comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') - if expires_on is not None: - header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_expiry.metadata = {'url': '/{filesystem}/{path}'} diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations_async/_service_operations_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations_async/_service_operations_async.py deleted file mode 100644 index b4cb9c5..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/operations_async/_service_operations_async.py +++ /dev/null @@ -1,128 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class ServiceOperations: - """ServiceOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar resource: The value must be "account" for all account operations. Constant value: "account". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.resource = "account" - - async def list_file_systems(self, prefix=None, continuation=None, max_results=None, request_id=None, timeout=None, *, cls=None, **kwargs): - """List FileSystems. - - List filesystems and their properties in given account. - - :param prefix: Filters results to filesystems within the specified - prefix. - :type prefix: str - :param continuation: Optional. When deleting a directory, the number - of paths that are deleted with each invocation is limited. If the - number of paths to be deleted exceeds this limit, a continuation token - is returned in this response header. When a continuation token is - returned in the response, it must be specified in a subsequent - invocation of the delete operation to continue deleting the directory. - :type continuation: str - :param max_results: An optional value that specifies the maximum - number of items to return. If omitted or greater than 5,000, the - response will include up to 5,000 items. - :type max_results: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for Blob Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: FileSystemList or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.FileSystemList - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.list_file_systems.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['resource'] = self._serialize.query("self.resource", self.resource, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - if max_results is not None: - query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('FileSystemList', response) - header_dict = { - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_file_systems.metadata = {'url': '/'} diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/__init__.py deleted file mode 100644 index 769623c..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/__init__.py +++ /dev/null @@ -1,66 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -try: - from ._models_py3 import AclFailedEntry - from ._models_py3 import FileSystem - from ._models_py3 import FileSystemList - from ._models_py3 import LeaseAccessConditions - from ._models_py3 import ModifiedAccessConditions - from ._models_py3 import Path - from ._models_py3 import PathHTTPHeaders - from ._models_py3 import PathList - from ._models_py3 import SetAccessControlRecursiveResponse - from ._models_py3 import SourceModifiedAccessConditions - from ._models_py3 import StorageError - from ._models_py3 import StorageErrorAutoGenerated -except (SyntaxError, ImportError): - from ._models import AclFailedEntry # type: ignore - from ._models import FileSystem # type: ignore - from ._models import FileSystemList # type: ignore - from ._models import LeaseAccessConditions # type: ignore - from ._models import ModifiedAccessConditions # type: ignore - from ._models import Path # type: ignore - from ._models import PathHTTPHeaders # type: ignore - from ._models import PathList # type: ignore - from ._models import SetAccessControlRecursiveResponse # type: ignore - from ._models import SourceModifiedAccessConditions # type: ignore - from ._models import StorageError # type: ignore - from ._models import StorageErrorAutoGenerated # type: ignore - -from ._azure_data_lake_storage_restapi_enums import ( - PathExpiryOptions, - PathGetPropertiesAction, - PathLeaseAction, - PathRenameMode, - PathResourceType, - PathSetAccessControlRecursiveMode, - PathUpdateAction, -) - -__all__ = [ - 'AclFailedEntry', - 'FileSystem', - 'FileSystemList', - 'LeaseAccessConditions', - 'ModifiedAccessConditions', - 'Path', - 'PathHTTPHeaders', - 'PathList', - 'SetAccessControlRecursiveResponse', - 'SourceModifiedAccessConditions', - 'StorageError', - 'StorageErrorAutoGenerated', - 'PathExpiryOptions', - 'PathGetPropertiesAction', - 'PathLeaseAction', - 'PathRenameMode', - 'PathResourceType', - 'PathSetAccessControlRecursiveMode', - 'PathUpdateAction', -] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/_azure_data_lake_storage_restapi_enums.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/_azure_data_lake_storage_restapi_enums.py deleted file mode 100644 index e9fe9d7..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/_azure_data_lake_storage_restapi_enums.py +++ /dev/null @@ -1,71 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from enum import Enum, EnumMeta -from six import with_metaclass - -class _CaseInsensitiveEnumMeta(EnumMeta): - def __getitem__(self, name): - return super().__getitem__(name.upper()) - - def __getattr__(cls, name): - """Return the enum member matching `name` - We use __getattr__ instead of descriptors or inserting into the enum - class' __dict__ in order to support `name` and `value` being both - properties for enum members (which live in the class' __dict__) and - enum members themselves. - """ - try: - return cls._member_map_[name.upper()] - except KeyError: - raise AttributeError(name) - - -class PathExpiryOptions(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - NEVER_EXPIRE = "NeverExpire" - RELATIVE_TO_CREATION = "RelativeToCreation" - RELATIVE_TO_NOW = "RelativeToNow" - ABSOLUTE = "Absolute" - -class PathGetPropertiesAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - GET_ACCESS_CONTROL = "getAccessControl" - GET_STATUS = "getStatus" - -class PathLeaseAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - ACQUIRE = "acquire" - BREAK_ENUM = "break" - CHANGE = "change" - RENEW = "renew" - RELEASE = "release" - -class PathRenameMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - LEGACY = "legacy" - POSIX = "posix" - -class PathResourceType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - DIRECTORY = "directory" - FILE = "file" - -class PathSetAccessControlRecursiveMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - SET = "set" - MODIFY = "modify" - REMOVE = "remove" - -class PathUpdateAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - APPEND = "append" - FLUSH = "flush" - SET_PROPERTIES = "setProperties" - SET_ACCESS_CONTROL = "setAccessControl" - SET_ACCESS_CONTROL_RECURSIVE = "setAccessControlRecursive" diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/_data_lake_storage_client_enums.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/_data_lake_storage_client_enums.py deleted file mode 100644 index 93d9c27..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/_data_lake_storage_client_enums.py +++ /dev/null @@ -1,63 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from enum import Enum - - -class PathSetAccessControlRecursiveMode(str, Enum): - - set = "set" - modify = "modify" - remove = "remove" - - -class PathExpiryOptions(str, Enum): - - never_expire = "NeverExpire" - relative_to_creation = "RelativeToCreation" - relative_to_now = "RelativeToNow" - absolute = "Absolute" - - -class PathResourceType(str, Enum): - - directory = "directory" - file = "file" - - -class PathRenameMode(str, Enum): - - legacy = "legacy" - posix = "posix" - - -class PathUpdateAction(str, Enum): - - append = "append" - flush = "flush" - set_properties = "setProperties" - set_access_control = "setAccessControl" - set_access_control_recursive = "setAccessControlRecursive" - - -class PathLeaseAction(str, Enum): - - acquire = "acquire" - break_enum = "break" - change = "change" - renew = "renew" - release = "release" - - -class PathGetPropertiesAction(str, Enum): - - get_access_control = "getAccessControl" - get_status = "getStatus" diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/_models.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/_models.py deleted file mode 100644 index 099e37c..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/_models.py +++ /dev/null @@ -1,357 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import HttpResponseError -import msrest.serialization - - -class AclFailedEntry(msrest.serialization.Model): - """AclFailedEntry. - - :param name: - :type name: str - :param type: - :type type: str - :param error_message: - :type error_message: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'error_message': {'key': 'errorMessage', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(AclFailedEntry, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.type = kwargs.get('type', None) - self.error_message = kwargs.get('error_message', None) - - -class FileSystem(msrest.serialization.Model): - """FileSystem. - - :param name: - :type name: str - :param last_modified: - :type last_modified: str - :param e_tag: - :type e_tag: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'last_modified': {'key': 'lastModified', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(FileSystem, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.last_modified = kwargs.get('last_modified', None) - self.e_tag = kwargs.get('e_tag', None) - - -class FileSystemList(msrest.serialization.Model): - """FileSystemList. - - :param filesystems: - :type filesystems: list[~azure.storage.filedatalake.models.FileSystem] - """ - - _attribute_map = { - 'filesystems': {'key': 'filesystems', 'type': '[FileSystem]'}, - } - - def __init__( - self, - **kwargs - ): - super(FileSystemList, self).__init__(**kwargs) - self.filesystems = kwargs.get('filesystems', None) - - -class LeaseAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param lease_id: If specified, the operation only succeeds if the resource's lease is active - and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': 'leaseId', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = kwargs.get('lease_id', None) - - -class ModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param if_modified_since: Specify this header value to operate only on a blob if it has been - modified since the specified date/time. - :type if_modified_since: ~datetime.datetime - :param if_unmodified_since: Specify this header value to operate only on a blob if it has not - been modified since the specified date/time. - :type if_unmodified_since: ~datetime.datetime - :param if_match: Specify an ETag value to operate only on blobs with a matching value. - :type if_match: str - :param if_none_match: Specify an ETag value to operate only on blobs without a matching value. - :type if_none_match: str - """ - - _attribute_map = { - 'if_modified_since': {'key': 'ifModifiedSince', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': 'ifUnmodifiedSince', 'type': 'rfc-1123'}, - 'if_match': {'key': 'ifMatch', 'type': 'str'}, - 'if_none_match': {'key': 'ifNoneMatch', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(ModifiedAccessConditions, self).__init__(**kwargs) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - - -class Path(msrest.serialization.Model): - """Path. - - :param name: - :type name: str - :param is_directory: - :type is_directory: bool - :param last_modified: - :type last_modified: str - :param e_tag: - :type e_tag: str - :param content_length: - :type content_length: long - :param owner: - :type owner: str - :param group: - :type group: str - :param permissions: - :type permissions: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'is_directory': {'key': 'isDirectory', 'type': 'bool'}, - 'last_modified': {'key': 'lastModified', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - 'content_length': {'key': 'contentLength', 'type': 'long'}, - 'owner': {'key': 'owner', 'type': 'str'}, - 'group': {'key': 'group', 'type': 'str'}, - 'permissions': {'key': 'permissions', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(Path, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.is_directory = kwargs.get('is_directory', False) - self.last_modified = kwargs.get('last_modified', None) - self.e_tag = kwargs.get('e_tag', None) - self.content_length = kwargs.get('content_length', None) - self.owner = kwargs.get('owner', None) - self.group = kwargs.get('group', None) - self.permissions = kwargs.get('permissions', None) - - -class PathHTTPHeaders(msrest.serialization.Model): - """Parameter group. - - :param cache_control: Optional. Sets the blob's cache control. If specified, this property is - stored with the blob and returned with a read request. - :type cache_control: str - :param content_encoding: Optional. Sets the blob's content encoding. If specified, this - property is stored with the blob and returned with a read request. - :type content_encoding: str - :param content_language: Optional. Set the blob's content language. If specified, this property - is stored with the blob and returned with a read request. - :type content_language: str - :param content_disposition: Optional. Sets the blob's Content-Disposition header. - :type content_disposition: str - :param content_type: Optional. Sets the blob's content type. If specified, this property is - stored with the blob and returned with a read request. - :type content_type: str - :param content_md5: Specify the transactional md5 for the body, to be validated by the service. - :type content_md5: bytearray - :param transactional_content_hash: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_hash: bytearray - """ - - _attribute_map = { - 'cache_control': {'key': 'cacheControl', 'type': 'str'}, - 'content_encoding': {'key': 'contentEncoding', 'type': 'str'}, - 'content_language': {'key': 'contentLanguage', 'type': 'str'}, - 'content_disposition': {'key': 'contentDisposition', 'type': 'str'}, - 'content_type': {'key': 'contentType', 'type': 'str'}, - 'content_md5': {'key': 'contentMD5', 'type': 'bytearray'}, - 'transactional_content_hash': {'key': 'transactionalContentHash', 'type': 'bytearray'}, - } - - def __init__( - self, - **kwargs - ): - super(PathHTTPHeaders, self).__init__(**kwargs) - self.cache_control = kwargs.get('cache_control', None) - self.content_encoding = kwargs.get('content_encoding', None) - self.content_language = kwargs.get('content_language', None) - self.content_disposition = kwargs.get('content_disposition', None) - self.content_type = kwargs.get('content_type', None) - self.content_md5 = kwargs.get('content_md5', None) - self.transactional_content_hash = kwargs.get('transactional_content_hash', None) - - -class PathList(msrest.serialization.Model): - """PathList. - - :param paths: - :type paths: list[~azure.storage.filedatalake.models.Path] - """ - - _attribute_map = { - 'paths': {'key': 'paths', 'type': '[Path]'}, - } - - def __init__( - self, - **kwargs - ): - super(PathList, self).__init__(**kwargs) - self.paths = kwargs.get('paths', None) - - -class SetAccessControlRecursiveResponse(msrest.serialization.Model): - """SetAccessControlRecursiveResponse. - - :param directories_successful: - :type directories_successful: int - :param files_successful: - :type files_successful: int - :param failure_count: - :type failure_count: int - :param failed_entries: - :type failed_entries: list[~azure.storage.filedatalake.models.AclFailedEntry] - """ - - _attribute_map = { - 'directories_successful': {'key': 'directoriesSuccessful', 'type': 'int'}, - 'files_successful': {'key': 'filesSuccessful', 'type': 'int'}, - 'failure_count': {'key': 'failureCount', 'type': 'int'}, - 'failed_entries': {'key': 'failedEntries', 'type': '[AclFailedEntry]'}, - } - - def __init__( - self, - **kwargs - ): - super(SetAccessControlRecursiveResponse, self).__init__(**kwargs) - self.directories_successful = kwargs.get('directories_successful', None) - self.files_successful = kwargs.get('files_successful', None) - self.failure_count = kwargs.get('failure_count', None) - self.failed_entries = kwargs.get('failed_entries', None) - - -class SourceModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param source_if_match: Specify an ETag value to operate only on blobs with a matching value. - :type source_if_match: str - :param source_if_none_match: Specify an ETag value to operate only on blobs without a matching - value. - :type source_if_none_match: str - :param source_if_modified_since: Specify this header value to operate only on a blob if it has - been modified since the specified date/time. - :type source_if_modified_since: ~datetime.datetime - :param source_if_unmodified_since: Specify this header value to operate only on a blob if it - has not been modified since the specified date/time. - :type source_if_unmodified_since: ~datetime.datetime - """ - - _attribute_map = { - 'source_if_match': {'key': 'sourceIfMatch', 'type': 'str'}, - 'source_if_none_match': {'key': 'sourceIfNoneMatch', 'type': 'str'}, - 'source_if_modified_since': {'key': 'sourceIfModifiedSince', 'type': 'rfc-1123'}, - 'source_if_unmodified_since': {'key': 'sourceIfUnmodifiedSince', 'type': 'rfc-1123'}, - } - - def __init__( - self, - **kwargs - ): - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_match = kwargs.get('source_if_match', None) - self.source_if_none_match = kwargs.get('source_if_none_match', None) - self.source_if_modified_since = kwargs.get('source_if_modified_since', None) - self.source_if_unmodified_since = kwargs.get('source_if_unmodified_since', None) - - -class StorageError(msrest.serialization.Model): - """StorageError. - - :param error: The service error response object. - :type error: ~azure.storage.filedatalake.models.StorageErrorAutoGenerated - """ - - _attribute_map = { - 'error': {'key': 'error', 'type': 'StorageErrorAutoGenerated'}, - } - - def __init__( - self, - **kwargs - ): - super(StorageError, self).__init__(**kwargs) - self.error = kwargs.get('error', None) - - -class StorageErrorAutoGenerated(msrest.serialization.Model): - """The service error response object. - - :param code: The service error code. - :type code: str - :param message: The service error message. - :type message: str - """ - - _attribute_map = { - 'code': {'key': 'Code', 'type': 'str'}, - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(StorageErrorAutoGenerated, self).__init__(**kwargs) - self.code = kwargs.get('code', None) - self.message = kwargs.get('message', None) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/_models_py3.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/_models_py3.py deleted file mode 100644 index 337388d..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/models/_models_py3.py +++ /dev/null @@ -1,411 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -import datetime -from typing import List, Optional - -from azure.core.exceptions import HttpResponseError -import msrest.serialization - - -class AclFailedEntry(msrest.serialization.Model): - """AclFailedEntry. - - :param name: - :type name: str - :param type: - :type type: str - :param error_message: - :type error_message: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'error_message': {'key': 'errorMessage', 'type': 'str'}, - } - - def __init__( - self, - *, - name: Optional[str] = None, - type: Optional[str] = None, - error_message: Optional[str] = None, - **kwargs - ): - super(AclFailedEntry, self).__init__(**kwargs) - self.name = name - self.type = type - self.error_message = error_message - - -class FileSystem(msrest.serialization.Model): - """FileSystem. - - :param name: - :type name: str - :param last_modified: - :type last_modified: str - :param e_tag: - :type e_tag: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'last_modified': {'key': 'lastModified', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - } - - def __init__( - self, - *, - name: Optional[str] = None, - last_modified: Optional[str] = None, - e_tag: Optional[str] = None, - **kwargs - ): - super(FileSystem, self).__init__(**kwargs) - self.name = name - self.last_modified = last_modified - self.e_tag = e_tag - - -class FileSystemList(msrest.serialization.Model): - """FileSystemList. - - :param filesystems: - :type filesystems: list[~azure.storage.filedatalake.models.FileSystem] - """ - - _attribute_map = { - 'filesystems': {'key': 'filesystems', 'type': '[FileSystem]'}, - } - - def __init__( - self, - *, - filesystems: Optional[List["FileSystem"]] = None, - **kwargs - ): - super(FileSystemList, self).__init__(**kwargs) - self.filesystems = filesystems - - -class LeaseAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param lease_id: If specified, the operation only succeeds if the resource's lease is active - and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': 'leaseId', 'type': 'str'}, - } - - def __init__( - self, - *, - lease_id: Optional[str] = None, - **kwargs - ): - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = lease_id - - -class ModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param if_modified_since: Specify this header value to operate only on a blob if it has been - modified since the specified date/time. - :type if_modified_since: ~datetime.datetime - :param if_unmodified_since: Specify this header value to operate only on a blob if it has not - been modified since the specified date/time. - :type if_unmodified_since: ~datetime.datetime - :param if_match: Specify an ETag value to operate only on blobs with a matching value. - :type if_match: str - :param if_none_match: Specify an ETag value to operate only on blobs without a matching value. - :type if_none_match: str - """ - - _attribute_map = { - 'if_modified_since': {'key': 'ifModifiedSince', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': 'ifUnmodifiedSince', 'type': 'rfc-1123'}, - 'if_match': {'key': 'ifMatch', 'type': 'str'}, - 'if_none_match': {'key': 'ifNoneMatch', 'type': 'str'}, - } - - def __init__( - self, - *, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - **kwargs - ): - super(ModifiedAccessConditions, self).__init__(**kwargs) - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - self.if_match = if_match - self.if_none_match = if_none_match - - -class Path(msrest.serialization.Model): - """Path. - - :param name: - :type name: str - :param is_directory: - :type is_directory: bool - :param last_modified: - :type last_modified: str - :param e_tag: - :type e_tag: str - :param content_length: - :type content_length: long - :param owner: - :type owner: str - :param group: - :type group: str - :param permissions: - :type permissions: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'is_directory': {'key': 'isDirectory', 'type': 'bool'}, - 'last_modified': {'key': 'lastModified', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - 'content_length': {'key': 'contentLength', 'type': 'long'}, - 'owner': {'key': 'owner', 'type': 'str'}, - 'group': {'key': 'group', 'type': 'str'}, - 'permissions': {'key': 'permissions', 'type': 'str'}, - } - - def __init__( - self, - *, - name: Optional[str] = None, - is_directory: Optional[bool] = False, - last_modified: Optional[str] = None, - e_tag: Optional[str] = None, - content_length: Optional[int] = None, - owner: Optional[str] = None, - group: Optional[str] = None, - permissions: Optional[str] = None, - **kwargs - ): - super(Path, self).__init__(**kwargs) - self.name = name - self.is_directory = is_directory - self.last_modified = last_modified - self.e_tag = e_tag - self.content_length = content_length - self.owner = owner - self.group = group - self.permissions = permissions - - -class PathHTTPHeaders(msrest.serialization.Model): - """Parameter group. - - :param cache_control: Optional. Sets the blob's cache control. If specified, this property is - stored with the blob and returned with a read request. - :type cache_control: str - :param content_encoding: Optional. Sets the blob's content encoding. If specified, this - property is stored with the blob and returned with a read request. - :type content_encoding: str - :param content_language: Optional. Set the blob's content language. If specified, this property - is stored with the blob and returned with a read request. - :type content_language: str - :param content_disposition: Optional. Sets the blob's Content-Disposition header. - :type content_disposition: str - :param content_type: Optional. Sets the blob's content type. If specified, this property is - stored with the blob and returned with a read request. - :type content_type: str - :param content_md5: Specify the transactional md5 for the body, to be validated by the service. - :type content_md5: bytearray - :param transactional_content_hash: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_hash: bytearray - """ - - _attribute_map = { - 'cache_control': {'key': 'cacheControl', 'type': 'str'}, - 'content_encoding': {'key': 'contentEncoding', 'type': 'str'}, - 'content_language': {'key': 'contentLanguage', 'type': 'str'}, - 'content_disposition': {'key': 'contentDisposition', 'type': 'str'}, - 'content_type': {'key': 'contentType', 'type': 'str'}, - 'content_md5': {'key': 'contentMD5', 'type': 'bytearray'}, - 'transactional_content_hash': {'key': 'transactionalContentHash', 'type': 'bytearray'}, - } - - def __init__( - self, - *, - cache_control: Optional[str] = None, - content_encoding: Optional[str] = None, - content_language: Optional[str] = None, - content_disposition: Optional[str] = None, - content_type: Optional[str] = None, - content_md5: Optional[bytearray] = None, - transactional_content_hash: Optional[bytearray] = None, - **kwargs - ): - super(PathHTTPHeaders, self).__init__(**kwargs) - self.cache_control = cache_control - self.content_encoding = content_encoding - self.content_language = content_language - self.content_disposition = content_disposition - self.content_type = content_type - self.content_md5 = content_md5 - self.transactional_content_hash = transactional_content_hash - - -class PathList(msrest.serialization.Model): - """PathList. - - :param paths: - :type paths: list[~azure.storage.filedatalake.models.Path] - """ - - _attribute_map = { - 'paths': {'key': 'paths', 'type': '[Path]'}, - } - - def __init__( - self, - *, - paths: Optional[List["Path"]] = None, - **kwargs - ): - super(PathList, self).__init__(**kwargs) - self.paths = paths - - -class SetAccessControlRecursiveResponse(msrest.serialization.Model): - """SetAccessControlRecursiveResponse. - - :param directories_successful: - :type directories_successful: int - :param files_successful: - :type files_successful: int - :param failure_count: - :type failure_count: int - :param failed_entries: - :type failed_entries: list[~azure.storage.filedatalake.models.AclFailedEntry] - """ - - _attribute_map = { - 'directories_successful': {'key': 'directoriesSuccessful', 'type': 'int'}, - 'files_successful': {'key': 'filesSuccessful', 'type': 'int'}, - 'failure_count': {'key': 'failureCount', 'type': 'int'}, - 'failed_entries': {'key': 'failedEntries', 'type': '[AclFailedEntry]'}, - } - - def __init__( - self, - *, - directories_successful: Optional[int] = None, - files_successful: Optional[int] = None, - failure_count: Optional[int] = None, - failed_entries: Optional[List["AclFailedEntry"]] = None, - **kwargs - ): - super(SetAccessControlRecursiveResponse, self).__init__(**kwargs) - self.directories_successful = directories_successful - self.files_successful = files_successful - self.failure_count = failure_count - self.failed_entries = failed_entries - - -class SourceModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param source_if_match: Specify an ETag value to operate only on blobs with a matching value. - :type source_if_match: str - :param source_if_none_match: Specify an ETag value to operate only on blobs without a matching - value. - :type source_if_none_match: str - :param source_if_modified_since: Specify this header value to operate only on a blob if it has - been modified since the specified date/time. - :type source_if_modified_since: ~datetime.datetime - :param source_if_unmodified_since: Specify this header value to operate only on a blob if it - has not been modified since the specified date/time. - :type source_if_unmodified_since: ~datetime.datetime - """ - - _attribute_map = { - 'source_if_match': {'key': 'sourceIfMatch', 'type': 'str'}, - 'source_if_none_match': {'key': 'sourceIfNoneMatch', 'type': 'str'}, - 'source_if_modified_since': {'key': 'sourceIfModifiedSince', 'type': 'rfc-1123'}, - 'source_if_unmodified_since': {'key': 'sourceIfUnmodifiedSince', 'type': 'rfc-1123'}, - } - - def __init__( - self, - *, - source_if_match: Optional[str] = None, - source_if_none_match: Optional[str] = None, - source_if_modified_since: Optional[datetime.datetime] = None, - source_if_unmodified_since: Optional[datetime.datetime] = None, - **kwargs - ): - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_match = source_if_match - self.source_if_none_match = source_if_none_match - self.source_if_modified_since = source_if_modified_since - self.source_if_unmodified_since = source_if_unmodified_since - - -class StorageError(msrest.serialization.Model): - """StorageError. - - :param error: The service error response object. - :type error: ~azure.storage.filedatalake.models.StorageErrorAutoGenerated - """ - - _attribute_map = { - 'error': {'key': 'error', 'type': 'StorageErrorAutoGenerated'}, - } - - def __init__( - self, - *, - error: Optional["StorageErrorAutoGenerated"] = None, - **kwargs - ): - super(StorageError, self).__init__(**kwargs) - self.error = error - - -class StorageErrorAutoGenerated(msrest.serialization.Model): - """The service error response object. - - :param code: The service error code. - :type code: str - :param message: The service error message. - :type message: str - """ - - _attribute_map = { - 'code': {'key': 'Code', 'type': 'str'}, - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__( - self, - *, - code: Optional[str] = None, - message: Optional[str] = None, - **kwargs - ): - super(StorageErrorAutoGenerated, self).__init__(**kwargs) - self.code = code - self.message = message diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/operations/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/operations/__init__.py deleted file mode 100644 index 0db71e0..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/operations/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._file_system_operations import FileSystemOperations -from ._path_operations import PathOperations - -__all__ = [ - 'ServiceOperations', - 'FileSystemOperations', - 'PathOperations', -] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/operations/_file_system_operations.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/operations/_file_system_operations.py deleted file mode 100644 index 3726a2d..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/operations/_file_system_operations.py +++ /dev/null @@ -1,524 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.paging import ItemPaged -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class FileSystemOperations(object): - """FileSystemOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.filedatalake.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - properties=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Create FileSystem. - - Create a FileSystem rooted at the specified location. If the FileSystem already exists, the - operation fails. This operation does not support conditional HTTP requests. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. - :type properties: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - accept = "application/json" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-namespace-enabled']=self._deserialize('str', response.headers.get('x-ms-namespace-enabled')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{filesystem}'} # type: ignore - - def set_properties( - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - properties=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Set FileSystem Properties. - - Set properties for the FileSystem. This operation supports conditional HTTP requests. For - more information, see `Specifying Conditional Headers for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. - :type properties: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/{filesystem}'} # type: ignore - - def get_properties( - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Get FileSystem Properties. - - All system and user-defined filesystem properties are specified in the response headers. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - accept = "application/json" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-namespace-enabled']=self._deserialize('str', response.headers.get('x-ms-namespace-enabled')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{filesystem}'} # type: ignore - - def delete( - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Delete FileSystem. - - Marks the FileSystem for deletion. When a FileSystem is deleted, a FileSystem with the same - identifier cannot be created for at least 30 seconds. While the filesystem is being deleted, - attempts to create a filesystem with the same identifier will fail with status code 409 - (Conflict), with the service returning additional error information indicating that the - filesystem is being deleted. All other operations, including operations on any files or - directories within the filesystem, will fail with status code 404 (Not Found) while the - filesystem is being deleted. This operation supports conditional HTTP requests. For more - information, see `Specifying Conditional Headers for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{filesystem}'} # type: ignore - - def list_paths( - self, - recursive, # type: bool - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - continuation=None, # type: Optional[str] - path=None, # type: Optional[str] - max_results=None, # type: Optional[int] - upn=None, # type: Optional[bool] - **kwargs # type: Any - ): - # type: (...) -> Iterable["_models.PathList"] - """List Paths. - - List FileSystem paths and their properties. - - :param recursive: Required. - :type recursive: bool - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param path: Optional. Filters results to paths within the specified directory. An error - occurs if the directory does not exist. - :type path: str - :param max_results: An optional value that specifies the maximum number of items to return. If - omitted or greater than 5,000, the response will include up to 5,000 items. - :type max_results: int - :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If - "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response - headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If - "false", the values will be returned as Azure Active Directory Object IDs. The default value is - false. Note that group and application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: An iterator like instance of either PathList or the result of cls(response) - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.models.PathList] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PathList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - accept = "application/json" - - # TODO: change this once continuation/next_link autorest PR is merged - def prepare_request(next_link=None, cont_token=None): - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - if not next_link: - # Construct URL - url = self.list_paths.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - # TODO: change this once continuation/next_link autorest PR is merged - if cont_token is not None: - query_parameters['continuation'] = self._serialize.query("continuation", cont_token, 'str') - if path is not None: - query_parameters['directory'] = self._serialize.query("path", path, 'str') - query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') - if max_results is not None: - query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - - request = self._client.get(url, query_parameters, header_parameters) - else: - url = next_link - query_parameters = {} # type: Dict[str, Any] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - request = self._client.get(url, query_parameters, header_parameters) - return request - - def extract_data(pipeline_response): - # TODO: change this once continuation/next_link autorest PR is merged - try: - cont_token = pipeline_response.http_response.headers['x-ms-continuation'] - except KeyError: - cont_token = None - deserialized = self._deserialize('PathList', pipeline_response) - list_of_elem = deserialized.paths - if cls: - list_of_elem = cls(list_of_elem) - # TODO: change this once continuation/next_link autorest PR is merged - return cont_token, iter(list_of_elem) - - # TODO: change this once continuation/next_link autorest PR is merged - def get_next(cont_token=None): - cont_token = cont_token if not continuation else continuation - request = prepare_request(cont_token=cont_token) - - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - error = self._deserialize(_models.StorageError, response) - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, model=error) - - return pipeline_response - - return ItemPaged( - get_next, extract_data - ) - list_paths.metadata = {'url': '/{filesystem}'} # type: ignore diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/operations/_path_operations.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/operations/_path_operations.py deleted file mode 100644 index bd52794..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/operations/_path_operations.py +++ /dev/null @@ -1,1712 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class PathOperations(object): - """PathOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.filedatalake.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - resource=None, # type: Optional[Union[str, "_models.PathResourceType"]] - continuation=None, # type: Optional[str] - mode=None, # type: Optional[Union[str, "_models.PathRenameMode"]] - rename_source=None, # type: Optional[str] - source_lease_id=None, # type: Optional[str] - properties=None, # type: Optional[str] - permissions=None, # type: Optional[str] - umask=None, # type: Optional[str] - path_http_headers=None, # type: Optional["_models.PathHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Create File | Create Directory | Rename File | Rename Directory. - - Create or rename a file or directory. By default, the destination is overwritten and if the - destination already exists and has a lease the lease is broken. This operation supports - conditional HTTP requests. For more information, see `Specifying Conditional Headers for Blob - Service Operations `_. To fail if the destination already exists, - use a conditional request with If-None-Match: "*". - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param resource: Required only for Create File and Create Directory. The value must be "file" - or "directory". - :type resource: str or ~azure.storage.filedatalake.models.PathResourceType - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param mode: Optional. Valid only when namespace is enabled. This parameter determines the - behavior of the rename operation. The value must be "legacy" or "posix", and the default value - will be "posix". - :type mode: str or ~azure.storage.filedatalake.models.PathRenameMode - :param rename_source: An optional file or directory to be renamed. The value must have the - following format: "/{filesystem}/{path}". If "x-ms-properties" is specified, the properties - will overwrite the existing properties; otherwise, the existing properties will be preserved. - This value must be a URL percent-encoded string. Note that the string may only contain ASCII - characters in the ISO-8859-1 character set. - :type rename_source: str - :param source_lease_id: A lease ID for the source path. If specified, the source path must have - an active lease and the lease ID must match. - :type source_lease_id: str - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. - :type properties: str - :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type permissions: str - :param umask: Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, the umask - restricts the permissions of the file or directory to be created. The resulting permission is - given by p bitwise and not u, where p is the permission and u is the umask. For example, if p - is 0777 and u is 0057, then the resulting permission is 0720. The default permission is 0777 - for a directory and 0666 for a file. The default umask is 0027. The umask must be specified - in 4-digit octal notation (e.g. 0766). - :type umask: str - :param path_http_headers: Parameter group. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.filedatalake.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _cache_control = None - _content_encoding = None - _content_language = None - _content_disposition = None - _content_type = None - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _source_if_modified_since = None - _source_if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - if path_http_headers is not None: - _cache_control = path_http_headers.cache_control - _content_encoding = path_http_headers.content_encoding - _content_language = path_http_headers.content_language - _content_disposition = path_http_headers.content_disposition - _content_type = path_http_headers.content_type - if source_modified_access_conditions is not None: - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if resource is not None: - query_parameters['resource'] = self._serialize.query("resource", resource, 'str') - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - if mode is not None: - query_parameters['mode'] = self._serialize.query("mode", mode, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if rename_source is not None: - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') - if umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("umask", umask, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def update( - self, - action, # type: Union[str, "_models.PathUpdateAction"] - mode, # type: Union[str, "_models.PathSetAccessControlRecursiveMode"] - body, # type: IO - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - max_records=None, # type: Optional[int] - continuation=None, # type: Optional[str] - force_flag=None, # type: Optional[bool] - position=None, # type: Optional[int] - retain_uncommitted_data=None, # type: Optional[bool] - close=None, # type: Optional[bool] - content_length=None, # type: Optional[int] - properties=None, # type: Optional[str] - owner=None, # type: Optional[str] - group=None, # type: Optional[str] - permissions=None, # type: Optional[str] - acl=None, # type: Optional[str] - path_http_headers=None, # type: Optional["_models.PathHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> Optional["_models.SetAccessControlRecursiveResponse"] - """Append Data | Flush Data | Set Properties | Set Access Control. - - Uploads data to be appended to a file, flushes (writes) previously uploaded data to a file, - sets properties for a file or directory, or sets access control for a file or directory. Data - can only be appended to a file. This operation supports conditional HTTP requests. For more - information, see `Specifying Conditional Headers for Blob Service Operations - `_. - - :param action: The action must be "append" to upload data to be appended to a file, "flush" to - flush previously uploaded data to a file, "setProperties" to set the properties of a file or - directory, "setAccessControl" to set the owner, group, permissions, or access control list for - a file or directory, or "setAccessControlRecursive" to set the access control list for a - directory recursively. Note that Hierarchical Namespace must be enabled for the account in - order to use access control. Also note that the Access Control List (ACL) includes permissions - for the owner, owning group, and others, so the x-ms-permissions and x-ms-acl request headers - are mutually exclusive. - :type action: str or ~azure.storage.filedatalake.models.PathUpdateAction - :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify" - modifies one or more POSIX access control rights that pre-exist on files and directories, - "remove" removes one or more POSIX access control rights that were present earlier on files - and directories. - :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode - :param body: Initial data. - :type body: IO - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param max_records: Optional. Valid for "SetAccessControlRecursive" operation. It specifies the - maximum number of files or directories on which the acl change will be applied. If omitted or - greater than 2,000, the request will process up to 2,000 items. - :type max_records: int - :param continuation: Optional. The number of paths processed with each invocation is limited. - If the number of paths to be processed exceeds this limit, a continuation token is returned in - the response header x-ms-continuation. When a continuation token is returned in the response, - it must be percent-encoded and specified in a subsequent invocation of setAcessControlRecursive - operation. - :type continuation: str - :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false, - the operation will terminate quickly on encountering user errors (4XX). If true, the operation - will ignore user errors and proceed with the operation on other sub-entities of the directory. - Continuation token will only be returned when forceFlag is true in case of user errors. If not - set the default value is false for this. - :type force_flag: bool - :param position: This parameter allows the caller to upload data in parallel and control the - order in which it is appended to the file. It is required when uploading data to be appended - to the file and when flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not immediately flushed, or - written, to the file. To flush, the previously uploaded data must be contiguous, the position - parameter must be specified and equal to the length of the file after all data has been - written, and there must not be a request entity body included with the request. - :type position: long - :param retain_uncommitted_data: Valid only for flush operations. If "true", uncommitted data - is retained after the flush operation completes; otherwise, the uncommitted data is deleted - after the flush operation. The default is false. Data at offsets less than the specified - position are written to the file when flush succeeds, but this optional parameter allows data - after the flush position to be retained for a future flush operation. - :type retain_uncommitted_data: bool - :param close: Azure Storage Events allow applications to receive notifications when files - change. When Azure Storage Events are enabled, a file changed event is raised. This event has a - property indicating whether this is the final change to distinguish the difference between an - intermediate flush to a file stream and the final close of a file stream. The close query - parameter is valid only when the action is "flush" and change notifications are enabled. If the - value of close is "true" and the flush operation completes successfully, the service raises a - file change notification with a property indicating that this is the final update (the file - stream has been closed). If "false" a change notification is raised indicating the file has - changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to - indicate that the file stream has been closed.". - :type close: bool - :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush - Data". Must be the length of the request content in bytes for "Append Data". - :type content_length: long - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. - :type properties: str - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type permissions: str - :param acl: Sets POSIX access control rights on files and directories. The value is a comma- - separated list of access control entries. Each access control entry (ACE) consists of a scope, - a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :param path_http_headers: Parameter group. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SetAccessControlRecursiveResponse, or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SetAccessControlRecursiveResponse"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _content_md5 = None - _lease_id = None - _cache_control = None - _content_type = None - _content_disposition = None - _content_encoding = None - _content_language = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - if path_http_headers is not None: - _content_md5 = path_http_headers.content_md5 - _cache_control = path_http_headers.cache_control - _content_type = path_http_headers.content_type - _content_disposition = path_http_headers.content_disposition - _content_encoding = path_http_headers.content_encoding - _content_language = path_http_headers.content_language - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/json" - - # Construct URL - url = self.update.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['action'] = self._serialize.query("action", action, 'str') - if max_records is not None: - query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - query_parameters['mode'] = self._serialize.query("mode", mode, 'str') - if force_flag is not None: - query_parameters['forceFlag'] = self._serialize.query("force_flag", force_flag, 'bool') - if position is not None: - query_parameters['position'] = self._serialize.query("position", position, 'long') - if retain_uncommitted_data is not None: - query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') - if close is not None: - query_parameters['close'] = self._serialize.query("close", close, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if content_length is not None: - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) - if _content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", _content_md5, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') - if acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - deserialized = None - if response.status_code == 200: - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('SetAccessControlRecursiveResponse', pipeline_response) - - if response.status_code == 202: - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - update.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def lease( - self, - x_ms_lease_action, # type: Union[str, "_models.PathLeaseAction"] - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - x_ms_lease_duration=None, # type: Optional[int] - x_ms_lease_break_period=None, # type: Optional[int] - proposed_lease_id=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Lease Path. - - Create and manage a lease to restrict write and delete access to the path. This operation - supports conditional HTTP requests. For more information, see `Specifying Conditional Headers - for Blob Service Operations `_. - - :param x_ms_lease_action: There are five lease actions: "acquire", "break", "change", "renew", - and "release". Use "acquire" and specify the "x-ms-proposed-lease-id" and "x-ms-lease-duration" - to acquire a new lease. Use "break" to break an existing lease. When a lease is broken, the - lease break period is allowed to elapse, during which time no lease operation except break and - release can be performed on the file. When a lease is successfully broken, the response - indicates the interval in seconds until a new lease can be acquired. Use "change" and specify - the current lease ID in "x-ms-lease-id" and the new lease ID in "x-ms-proposed-lease-id" to - change the lease ID of an active lease. Use "renew" and specify the "x-ms-lease-id" to renew an - existing lease. Use "release" and specify the "x-ms-lease-id" to release a lease. - :type x_ms_lease_action: str or ~azure.storage.filedatalake.models.PathLeaseAction - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param x_ms_lease_duration: The lease duration is required to acquire a lease, and specifies - the duration of the lease in seconds. The lease duration must be between 15 and 60 seconds or - -1 for infinite lease. - :type x_ms_lease_duration: int - :param x_ms_lease_break_period: The lease break period duration is optional to break a lease, - and specifies the break period of the lease in seconds. The lease break duration must be - between 0 and 60 seconds. - :type x_ms_lease_break_period: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("x_ms_lease_action", x_ms_lease_action, 'str') - if x_ms_lease_duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("x_ms_lease_duration", x_ms_lease_duration, 'int') - if x_ms_lease_break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("x_ms_lease_break_period", x_ms_lease_break_period, 'int') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 201, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - - if response.status_code == 201: - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - - if response.status_code == 202: - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-lease-time']=self._deserialize('str', response.headers.get('x-ms-lease-time')) - - if cls: - return cls(pipeline_response, None, response_headers) - - lease.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def read( - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - range=None, # type: Optional[str] - x_ms_range_get_content_md5=None, # type: Optional[bool] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> IO - """Read File. - - Read the contents of a file. For read operations, range requests are supported. This operation - supports conditional HTTP requests. For more information, see `Specifying Conditional Headers - for Blob Service Operations `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: The HTTP Range request header specifies one or more byte ranges of the resource - to be retrieved. - :type range: str - :param x_ms_range_get_content_md5: Optional. When this header is set to "true" and specified - together with the Range header, the service returns the MD5 hash for the range, as long as the - range is less than or equal to 4MB in size. If this header is specified without the Range - header, the service returns status code 400 (Bad Request). If this header is set to true when - the range exceeds 4 MB in size, the service returns status code 400 (Bad Request). - :type x_ms_range_get_content_md5: bool - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.read.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['Range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if x_ms_range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("x_ms_range_get_content_md5", x_ms_range_get_content_md5, 'bool') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['x-ms-content-md5']=self._deserialize('str', response.headers.get('x-ms-content-md5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - read.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def get_properties( - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - action=None, # type: Optional[Union[str, "_models.PathGetPropertiesAction"]] - upn=None, # type: Optional[bool] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Get Properties | Get Status | Get Access Control List. - - Get Properties returns all system and user defined properties for a path. Get Status returns - all system defined properties for a path. Get Access Control List returns the access control - list for a path. This operation supports conditional HTTP requests. For more information, see - `Specifying Conditional Headers for Blob Service Operations `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param action: Optional. If the value is "getStatus" only the system defined properties for the - path are returned. If the value is "getAccessControl" the access control list is returned in - the response headers (Hierarchical Namespace must be enabled for the account), otherwise the - properties are returned. - :type action: str or ~azure.storage.filedatalake.models.PathGetPropertiesAction - :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If - "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response - headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If - "false", the values will be returned as Azure Active Directory Object IDs. The default value is - false. Note that group and application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if action is not None: - query_parameters['action'] = self._serialize.query("action", action, 'str') - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) - response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) - response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) - response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def delete( - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - recursive=None, # type: Optional[bool] - continuation=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Delete File | Delete Directory. - - Delete the file or directory. This operation supports conditional HTTP requests. For more - information, see `Specifying Conditional Headers for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param recursive: Required. - :type recursive: bool - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if recursive is not None: - query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def set_access_control( - self, - timeout=None, # type: Optional[int] - owner=None, # type: Optional[str] - group=None, # type: Optional[str] - permissions=None, # type: Optional[str] - acl=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Set the owner, group, permissions, or access control list for a path. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type permissions: str - :param acl: Sets POSIX access control rights on files and directories. The value is a comma- - separated list of access control entries. Each access control entry (ACE) consists of a scope, - a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "setAccessControl" - accept = "application/json" - - # Construct URL - url = self.set_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') - if acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def set_access_control_recursive( - self, - mode, # type: Union[str, "_models.PathSetAccessControlRecursiveMode"] - timeout=None, # type: Optional[int] - continuation=None, # type: Optional[str] - force_flag=None, # type: Optional[bool] - max_records=None, # type: Optional[int] - acl=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.SetAccessControlRecursiveResponse" - """Set the access control list for a path and subpaths. - - :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify" - modifies one or more POSIX access control rights that pre-exist on files and directories, - "remove" removes one or more POSIX access control rights that were present earlier on files - and directories. - :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false, - the operation will terminate quickly on encountering user errors (4XX). If true, the operation - will ignore user errors and proceed with the operation on other sub-entities of the directory. - Continuation token will only be returned when forceFlag is true in case of user errors. If not - set the default value is false for this. - :type force_flag: bool - :param max_records: Optional. It specifies the maximum number of files or directories on which - the acl change will be applied. If omitted or greater than 2,000, the request will process up - to 2,000 items. - :type max_records: int - :param acl: Sets POSIX access control rights on files and directories. The value is a comma- - separated list of access control entries. Each access control entry (ACE) consists of a scope, - a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SetAccessControlRecursiveResponse, or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.SetAccessControlRecursiveResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - action = "setAccessControlRecursive" - accept = "application/json" - - # Construct URL - url = self.set_access_control_recursive.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - query_parameters['mode'] = self._serialize.query("mode", mode, 'str') - if force_flag is not None: - query_parameters['forceFlag'] = self._serialize.query("force_flag", force_flag, 'bool') - if max_records is not None: - query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('SetAccessControlRecursiveResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - set_access_control_recursive.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def flush_data( - self, - timeout=None, # type: Optional[int] - position=None, # type: Optional[int] - retain_uncommitted_data=None, # type: Optional[bool] - close=None, # type: Optional[bool] - content_length=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - path_http_headers=None, # type: Optional["_models.PathHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Set the owner, group, permissions, or access control list for a path. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param position: This parameter allows the caller to upload data in parallel and control the - order in which it is appended to the file. It is required when uploading data to be appended - to the file and when flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not immediately flushed, or - written, to the file. To flush, the previously uploaded data must be contiguous, the position - parameter must be specified and equal to the length of the file after all data has been - written, and there must not be a request entity body included with the request. - :type position: long - :param retain_uncommitted_data: Valid only for flush operations. If "true", uncommitted data - is retained after the flush operation completes; otherwise, the uncommitted data is deleted - after the flush operation. The default is false. Data at offsets less than the specified - position are written to the file when flush succeeds, but this optional parameter allows data - after the flush position to be retained for a future flush operation. - :type retain_uncommitted_data: bool - :param close: Azure Storage Events allow applications to receive notifications when files - change. When Azure Storage Events are enabled, a file changed event is raised. This event has a - property indicating whether this is the final change to distinguish the difference between an - intermediate flush to a file stream and the final close of a file stream. The close query - parameter is valid only when the action is "flush" and change notifications are enabled. If the - value of close is "true" and the flush operation completes successfully, the service raises a - file change notification with a property indicating that this is the final update (the file - stream has been closed). If "false" a change notification is raised indicating the file has - changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to - indicate that the file stream has been closed.". - :type close: bool - :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush - Data". Must be the length of the request content in bytes for "Append Data". - :type content_length: long - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param path_http_headers: Parameter group. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _content_md5 = None - _lease_id = None - _cache_control = None - _content_type = None - _content_disposition = None - _content_encoding = None - _content_language = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - if path_http_headers is not None: - _content_md5 = path_http_headers.content_md5 - _cache_control = path_http_headers.cache_control - _content_type = path_http_headers.content_type - _content_disposition = path_http_headers.content_disposition - _content_encoding = path_http_headers.content_encoding - _content_language = path_http_headers.content_language - action = "flush" - accept = "application/json" - - # Construct URL - url = self.flush_data.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if position is not None: - query_parameters['position'] = self._serialize.query("position", position, 'long') - if retain_uncommitted_data is not None: - query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') - if close is not None: - query_parameters['close'] = self._serialize.query("close", close, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if content_length is not None: - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) - if _content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", _content_md5, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - flush_data.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def append_data( - self, - body, # type: IO - position=None, # type: Optional[int] - timeout=None, # type: Optional[int] - content_length=None, # type: Optional[int] - transactional_content_crc64=None, # type: Optional[bytearray] - request_id_parameter=None, # type: Optional[str] - path_http_headers=None, # type: Optional["_models.PathHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Append data to the file. - - :param body: Initial data. - :type body: IO - :param position: This parameter allows the caller to upload data in parallel and control the - order in which it is appended to the file. It is required when uploading data to be appended - to the file and when flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not immediately flushed, or - written, to the file. To flush, the previously uploaded data must be contiguous, the position - parameter must be specified and equal to the length of the file after all data has been - written, and there must not be a request entity body included with the request. - :type position: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush - Data". Must be the length of the request content in bytes for "Append Data". - :type content_length: long - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param path_http_headers: Parameter group. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _transactional_content_hash = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if path_http_headers is not None: - _transactional_content_hash = path_http_headers.transactional_content_hash - action = "append" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.append_data.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if position is not None: - query_parameters['position'] = self._serialize.query("position", position, 'long') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if content_length is not None: - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) - if _transactional_content_hash is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_hash", _transactional_content_hash, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - append_data.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def set_expiry( - self, - expiry_options, # type: Union[str, "_models.PathExpiryOptions"] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - expires_on=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Sets the time a blob will expire and be deleted. - - :param expiry_options: Required. Indicates mode of the expiry time. - :type expiry_options: str or ~azure.storage.filedatalake.models.PathExpiryOptions - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param expires_on: The time to set the blob to expiry. - :type expires_on: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "expiry" - accept = "application/json" - - # Construct URL - url = self.set_expiry.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') - if expires_on is not None: - header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_expiry.metadata = {'url': '/{filesystem}/{path}'} # type: ignore diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/operations/_service_operations.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/operations/_service_operations.py deleted file mode 100644 index c5a9555..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/operations/_service_operations.py +++ /dev/null @@ -1,153 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.paging import ItemPaged -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class ServiceOperations(object): - """ServiceOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.filedatalake.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def list_file_systems( - self, - prefix=None, # type: Optional[str] - continuation=None, # type: Optional[str] - max_results=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> Iterable["_models.FileSystemList"] - """List FileSystems. - - List filesystems and their properties in given account. - - :param prefix: Filters results to filesystems within the specified prefix. - :type prefix: str - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param max_results: An optional value that specifies the maximum number of items to return. If - omitted or greater than 5,000, the response will include up to 5,000 items. - :type max_results: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: An iterator like instance of either FileSystemList or the result of cls(response) - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.models.FileSystemList] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.FileSystemList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - resource = "account" - accept = "application/json" - - def prepare_request(next_link=None): - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - if not next_link: - # Construct URL - url = self.list_file_systems.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("resource", resource, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - if max_results is not None: - query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - request = self._client.get(url, query_parameters, header_parameters) - else: - url = next_link - query_parameters = {} # type: Dict[str, Any] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - request = self._client.get(url, query_parameters, header_parameters) - return request - - def extract_data(pipeline_response): - deserialized = self._deserialize('FileSystemList', pipeline_response) - list_of_elem = deserialized.filesystems - if cls: - list_of_elem = cls(list_of_elem) - return None, iter(list_of_elem) - - def get_next(next_link=None): - request = prepare_request(next_link) - - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - error = self._deserialize(_models.StorageError, response) - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, model=error) - - return pipeline_response - - return ItemPaged( - get_next, extract_data - ) - list_file_systems.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/version.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/version.py deleted file mode 100644 index 6ef707d..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/version.py +++ /dev/null @@ -1,13 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -VERSION = "2020-02-10" - diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_list_paths_helper.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_list_paths_helper.py deleted file mode 100644 index ea7b73b..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_list_paths_helper.py +++ /dev/null @@ -1,72 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from azure.core.paging import PageIterator -from azure.core.exceptions import HttpResponseError -from ._models import PathProperties -from ._generated.models import Path -from ._shared.response_handlers import process_storage_error - - -class PathPropertiesPaged(PageIterator): - """An Iterable of Path properties. - - :ivar str path: Filters the results to return only paths under the specified path. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar list(~azure.storage.filedatalake.PathProperties) current_page: The current page of listed results. - - :param callable command: Function to retrieve the next page of items. - :param str path: Filters the results to return only paths under the specified path. - :param int max_results: The maximum number of psths to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__( - self, command, - recursive, - path=None, - max_results=None, - continuation_token=None, - upn=None): - super(PathPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.recursive = recursive - self.results_per_page = max_results - self.path = path - self.upn = upn - self.current_page = None - self.path_list = None - - def _get_next_cb(self, continuation_token): - try: - return self._command( - self.recursive, - continuation=continuation_token or None, - path=self.path, - max_results=self.results_per_page, - upn=self.upn) - except HttpResponseError as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - get_next_return = list(get_next_return) - self.path_list = get_next_return - self.current_page = [self._build_item(item) for item in self.path_list] - - return None, self.current_page - - @staticmethod - def _build_item(item): - if isinstance(item, PathProperties): - return item - if isinstance(item, Path): - path = PathProperties._from_generated(item) # pylint: disable=protected-access - return path - return item diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_models.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_models.py deleted file mode 100644 index 6880951..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_models.py +++ /dev/null @@ -1,829 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines -from datetime import datetime -from enum import Enum - -from azure.multiapi.storagev2.blob.v2020_02_10 import LeaseProperties as BlobLeaseProperties -from azure.multiapi.storagev2.blob.v2020_02_10 import AccountSasPermissions as BlobAccountSasPermissions -from azure.multiapi.storagev2.blob.v2020_02_10 import ResourceTypes as BlobResourceTypes -from azure.multiapi.storagev2.blob.v2020_02_10 import UserDelegationKey as BlobUserDelegationKey -from azure.multiapi.storagev2.blob.v2020_02_10 import ContentSettings as BlobContentSettings -from azure.multiapi.storagev2.blob.v2020_02_10 import AccessPolicy as BlobAccessPolicy -from azure.multiapi.storagev2.blob.v2020_02_10 import DelimitedTextDialect as BlobDelimitedTextDialect -from azure.multiapi.storagev2.blob.v2020_02_10 import DelimitedJsonDialect as BlobDelimitedJSON -from azure.multiapi.storagev2.blob.v2020_02_10 import ArrowDialect as BlobArrowDialect -from azure.multiapi.storagev2.blob.v2020_02_10._models import ContainerPropertiesPaged -from ._shared.models import DictMixin - - -class FileSystemProperties(object): - """File System properties class. - - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the file system was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar ~azure.storage.filedatalake.LeaseProperties lease: - Stores all the lease information for the file system. - :ivar str public_access: Specifies whether data in the file system may be accessed - publicly and the level of access. - :ivar bool has_immutability_policy: - Represents whether the file system has an immutability policy. - :ivar bool has_legal_hold: - Represents whether the file system has a legal hold. - :ivar dict metadata: A dict with name-value pairs to associate with the - file system as metadata. - :ivar bool deleted: - Whether this file system was deleted. - :ivar str deleted_version: - The version of a deleted file system. - - Returned ``FileSystemProperties`` instances expose these values through a - dictionary interface, for example: ``file_system_props["last_modified"]``. - Additionally, the file system name is available as ``file_system_props["name"]``. - """ - - def __init__(self): - self.name = None - self.last_modified = None - self.etag = None - self.lease = None - self.public_access = None - self.has_immutability_policy = None - self.has_legal_hold = None - self.metadata = None - self.deleted = None - self.deleted_version = None - - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.last_modified = generated.properties.last_modified - props.deleted = generated.deleted - props.deleted_version = generated.version - props.etag = generated.properties.etag - props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access - props.public_access = PublicAccess._from_generated( # pylint: disable=protected-access - generated.properties.public_access) - props.has_immutability_policy = generated.properties.has_immutability_policy - props.has_legal_hold = generated.properties.has_legal_hold - props.metadata = generated.metadata - return props - - @classmethod - def _convert_from_container_props(cls, container_properties): - container_properties.__class__ = cls - container_properties.public_access = PublicAccess._from_generated( # pylint: disable=protected-access - container_properties.public_access) - container_properties.lease.__class__ = LeaseProperties - return container_properties - - -class FileSystemPropertiesPaged(ContainerPropertiesPaged): - """An Iterable of File System properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file system name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.filedatalake.FileSystemProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only file systems whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of file system names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - - def __init__(self, *args, **kwargs): - super(FileSystemPropertiesPaged, self).__init__( - *args, - **kwargs - ) - - @staticmethod - def _build_item(item): - return FileSystemProperties._from_generated(item) # pylint: disable=protected-access - - -class DirectoryProperties(DictMixin): - """ - :ivar str name: name of the directory - :ivar str etag: The ETag contains a value that you can use to perform operations - conditionally. - :ivar bool deleted: if the current directory marked as deleted - :ivar dict metadata: Name-value pairs associated with the directory as metadata. - :ivar ~azure.storage.filedatalake.LeaseProperties lease: - Stores all the lease information for the directory. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the directory was modified. - :ivar ~datetime.datetime creation_time: - Indicates when the directory was created, in UTC. - :ivar int remaining_retention_days: The number of days that the directory will be retained - before being permanently deleted by the service. - :var ~azure.storage.filedatalake.ContentSettings content_settings: - """ - - def __init__(self, **kwargs): - self.name = kwargs.get('name') - self.etag = kwargs.get('ETag') - self.deleted = False - self.metadata = kwargs.get('metadata') - self.lease = LeaseProperties(**kwargs) - self.last_modified = kwargs.get('Last-Modified') - self.creation_time = kwargs.get('x-ms-creation-time') - self.deleted_time = None - self.remaining_retention_days = None - - -class FileProperties(DictMixin): - """ - :ivar str name: name of the file - :ivar str etag: The ETag contains a value that you can use to perform operations - conditionally. - :ivar bool deleted: if the current file marked as deleted - :ivar dict metadata: Name-value pairs associated with the file as metadata. - :ivar ~azure.storage.filedatalake.LeaseProperties lease: - Stores all the lease information for the file. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the file was modified. - :ivar ~datetime.datetime creation_time: - Indicates when the file was created, in UTC. - :ivar int size: size of the file - :ivar int remaining_retention_days: The number of days that the file will be retained - before being permanently deleted by the service. - :var ~azure.storage.filedatalake.ContentSettings content_settings: - """ - - def __init__(self, **kwargs): - self.name = kwargs.get('name') - self.etag = kwargs.get('ETag') - self.deleted = False - self.metadata = kwargs.get('metadata') - self.lease = LeaseProperties(**kwargs) - self.last_modified = kwargs.get('Last-Modified') - self.creation_time = kwargs.get('x-ms-creation-time') - self.size = kwargs.get('Content-Length') - self.deleted_time = None - self.expiry_time = kwargs.get("x-ms-expiry-time") - self.remaining_retention_days = None - self.content_settings = ContentSettings(**kwargs) - - -class PathProperties(object): - """Path properties listed by get_paths api. - - :ivar str name: the full path for a file or directory. - :ivar str owner: The owner of the file or directory. - :ivar str group: he owning group of the file or directory. - :ivar str permissions: Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :ivar datetime last_modified: A datetime object representing the last time the directory/file was modified. - :ivar bool is_directory: is the path a directory or not. - :ivar str etag: The ETag contains a value that you can use to perform operations - conditionally. - :ivar content_length: the size of file if the path is a file. - """ - - def __init__(self, **kwargs): - super(PathProperties, self).__init__( - **kwargs - ) - self.name = kwargs.pop('name', None) - self.owner = kwargs.get('owner', None) - self.group = kwargs.get('group', None) - self.permissions = kwargs.get('permissions', None) - self.last_modified = kwargs.get('last_modified', None) - self.is_directory = kwargs.get('is_directory', False) - self.etag = kwargs.get('etag', None) - self.content_length = kwargs.get('content_length', None) - - @classmethod - def _from_generated(cls, generated): - path_prop = PathProperties() - path_prop.name = generated.name - path_prop.owner = generated.owner - path_prop.group = generated.group - path_prop.permissions = generated.permissions - path_prop.last_modified = datetime.strptime(generated.last_modified, "%a, %d %b %Y %H:%M:%S %Z") - path_prop.is_directory = bool(generated.is_directory) - path_prop.etag = generated.additional_properties.get('etag') - path_prop.content_length = generated.content_length - return path_prop - - -class LeaseProperties(BlobLeaseProperties): - """DataLake Lease Properties. - - :ivar str status: - The lease status of the file. Possible values: locked|unlocked - :ivar str state: - Lease state of the file. Possible values: available|leased|expired|breaking|broken - :ivar str duration: - When a file is leased, specifies whether the lease is of infinite or fixed duration. - """ - - -class ContentSettings(BlobContentSettings): - """The content settings of a file or directory. - - :ivar str content_type: - The content type specified for the file or directory. If no content type was - specified, the default content type is application/octet-stream. - :ivar str content_encoding: - If the content_encoding has previously been set - for the file, that value is stored. - :ivar str content_language: - If the content_language has previously been set - for the file, that value is stored. - :ivar str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the file, that value is stored. - :ivar str cache_control: - If the cache_control has previously been set for - the file, that value is stored. - :ivar bytearray content_md5: - If the content_md5 has been set for the file, this response - header is stored so that the client can check for message content - integrity. - :keyword str content_type: - The content type specified for the file or directory. If no content type was - specified, the default content type is application/octet-stream. - :keyword str content_encoding: - If the content_encoding has previously been set - for the file, that value is stored. - :keyword str content_language: - If the content_language has previously been set - for the file, that value is stored. - :keyword str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the file, that value is stored. - :keyword str cache_control: - If the cache_control has previously been set for - the file, that value is stored. - :keyword bytearray content_md5: - If the content_md5 has been set for the file, this response - header is stored so that the client can check for message content - integrity. - """ - - def __init__( - self, **kwargs): - super(ContentSettings, self).__init__( - **kwargs - ) - - -class AccountSasPermissions(BlobAccountSasPermissions): - def __init__(self, read=False, write=False, delete=False, list=False, # pylint: disable=redefined-builtin - create=False): - super(AccountSasPermissions, self).__init__( - read=read, create=create, write=write, list=list, - delete=delete - ) - - -class FileSystemSasPermissions(object): - """FileSystemSasPermissions class to be used with the - :func:`~azure.storage.filedatalake.generate_file_system_sas` function. - - :param bool read: - Read the content, properties, metadata etc. - :param bool write: - Create or write content, properties, metadata. Lease the file system. - :param bool delete: - Delete the file system. - :param bool list: - List paths in the file system. - :keyword bool move: - Move any file in the directory to a new location. - Note the move operation can optionally be restricted to the child file or directory owner or - the parent directory owner if the saoid parameter is included in the token and the sticky bit is set - on the parent directory. - :keyword bool execute: - Get the status (system defined properties) and ACL of any file in the directory. - If the caller is the owner, set access control on any file in the directory. - :keyword bool manage_ownership: - Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory - within a folder that has the sticky bit set. - :keyword bool manage_access_control: - Allows the user to set permissions and POSIX ACLs on files and directories. - """ - - def __init__(self, read=False, write=False, delete=False, list=False, # pylint: disable=redefined-builtin - **kwargs): - self.read = read - self.write = write - self.delete = delete - self.list = list - self.move = kwargs.pop('move', None) - self.execute = kwargs.pop('execute', None) - self.manage_ownership = kwargs.pop('manage_ownership', None) - self.manage_access_control = kwargs.pop('manage_access_control', None) - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('l' if self.list else '') + - ('m' if self.move else '') + - ('e' if self.execute else '') + - ('o' if self.manage_ownership else '') + - ('p' if self.manage_access_control else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a FileSystemSasPermissions from a string. - - To specify read, write, or delete permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, add, create, - write, or delete permissions. - :return: A FileSystemSasPermissions object - :rtype: ~azure.storage.fildatalake.FileSystemSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_list = 'l' in permission - p_move = 'm' in permission - p_execute = 'e' in permission - p_manage_ownership = 'o' in permission - p_manage_access_control = 'p' in permission - - parsed = cls(read=p_read, write=p_write, delete=p_delete, - list=p_list, move=p_move, execute=p_execute, manage_ownership=p_manage_ownership, - manage_access_control=p_manage_access_control) - return parsed - - -class DirectorySasPermissions(object): - """DirectorySasPermissions class to be used with the - :func:`~azure.storage.filedatalake.generate_directory_sas` function. - - :param bool read: - Read the content, properties, metadata etc. - :param bool create: - Create a new directory - :param bool write: - Create or write content, properties, metadata. Lease the directory. - :param bool delete: - Delete the directory. - :keyword bool list: - List any files in the directory. Implies Execute. - :keyword bool move: - Move any file in the directory to a new location. - Note the move operation can optionally be restricted to the child file or directory owner or - the parent directory owner if the saoid parameter is included in the token and the sticky bit is set - on the parent directory. - :keyword bool execute: - Get the status (system defined properties) and ACL of any file in the directory. - If the caller is the owner, set access control on any file in the directory. - :keyword bool manage_ownership: - Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory - within a folder that has the sticky bit set. - :keyword bool manage_access_control: - Allows the user to set permissions and POSIX ACLs on files and directories. - """ - - def __init__(self, read=False, create=False, write=False, - delete=False, **kwargs): - self.read = read - self.create = create - self.write = write - self.delete = delete - self.list = kwargs.pop('list', None) - self.move = kwargs.pop('move', None) - self.execute = kwargs.pop('execute', None) - self.manage_ownership = kwargs.pop('manage_ownership', None) - self.manage_access_control = kwargs.pop('manage_access_control', None) - self._str = (('r' if self.read else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('l' if self.list else '') + - ('m' if self.move else '') + - ('e' if self.execute else '') + - ('o' if self.manage_ownership else '') + - ('p' if self.manage_access_control else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a DirectorySasPermissions from a string. - - To specify read, create, write, or delete permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, add, create, - write, or delete permissions. - :return: A DirectorySasPermissions object - :rtype: ~azure.storage.filedatalake.DirectorySasPermissions - """ - p_read = 'r' in permission - p_create = 'c' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_list = 'l' in permission - p_move = 'm' in permission - p_execute = 'e' in permission - p_manage_ownership = 'o' in permission - p_manage_access_control = 'p' in permission - - parsed = cls(read=p_read, create=p_create, write=p_write, delete=p_delete, - list=p_list, move=p_move, execute=p_execute, manage_ownership=p_manage_ownership, - manage_access_control=p_manage_access_control) - return parsed - - -class FileSasPermissions(object): - """FileSasPermissions class to be used with the - :func:`~azure.storage.filedatalake.generate_file_sas` function. - - :param bool read: - Read the content, properties, metadata etc. Use the file as - the source of a read operation. - :param bool create: - Write a new file - :param bool write: - Create or write content, properties, metadata. Lease the file. - :param bool delete: - Delete the file. - :keyword bool move: - Move any file in the directory to a new location. - Note the move operation can optionally be restricted to the child file or directory owner or - the parent directory owner if the saoid parameter is included in the token and the sticky bit is set - on the parent directory. - :keyword bool execute: - Get the status (system defined properties) and ACL of any file in the directory. - If the caller is the owner, set access control on any file in the directory. - :keyword bool manage_ownership: - Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory - within a folder that has the sticky bit set. - :keyword bool manage_access_control: - Allows the user to set permissions and POSIX ACLs on files and directories. - """ - - def __init__(self, read=False, create=False, write=False, delete=False, **kwargs): - self.read = read - self.create = create - self.write = write - self.delete = delete - self.list = list - self.move = kwargs.pop('move', None) - self.execute = kwargs.pop('execute', None) - self.manage_ownership = kwargs.pop('manage_ownership', None) - self.manage_access_control = kwargs.pop('manage_access_control', None) - self._str = (('r' if self.read else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('m' if self.move else '') + - ('e' if self.execute else '') + - ('o' if self.manage_ownership else '') + - ('p' if self.manage_access_control else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a FileSasPermissions from a string. - - To specify read, write, or delete permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, add, create, - write, or delete permissions. - :return: A FileSasPermissions object - :rtype: ~azure.storage.fildatalake.FileSasPermissions - """ - p_read = 'r' in permission - p_create = 'c' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_move = 'm' in permission - p_execute = 'e' in permission - p_manage_ownership = 'o' in permission - p_manage_access_control = 'p' in permission - - parsed = cls(read=p_read, create=p_create, write=p_write, delete=p_delete, - move=p_move, execute=p_execute, manage_ownership=p_manage_ownership, - manage_access_control=p_manage_access_control) - return parsed - - -class AccessPolicy(BlobAccessPolicy): - """Access Policy class used by the set and get access policy methods in each service. - - A stored access policy can specify the start time, expiry time, and - permissions for the Shared Access Signatures with which it's associated. - Depending on how you want to control access to your resource, you can - specify all of these parameters within the stored access policy, and omit - them from the URL for the Shared Access Signature. Doing so permits you to - modify the associated signature's behavior at any time, as well as to revoke - it. Or you can specify one or more of the access policy parameters within - the stored access policy, and the others on the URL. Finally, you can - specify all of the parameters on the URL. In this case, you can use the - stored access policy to revoke the signature, but not to modify its behavior. - - Together the Shared Access Signature and the stored access policy must - include all fields required to authenticate the signature. If any required - fields are missing, the request will fail. Likewise, if a field is specified - both in the Shared Access Signature URL and in the stored access policy, the - request will fail with status code 400 (Bad Request). - - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.datalake.FileSystemSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :keyword start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :paramtype start: ~datetime.datetime or str - """ - - def __init__(self, permission=None, expiry=None, **kwargs): - super(AccessPolicy, self).__init__( - permission=permission, expiry=expiry, start=kwargs.pop('start', None) - ) - - -class ResourceTypes(BlobResourceTypes): - """ - Specifies the resource types that are accessible with the account SAS. - - :param bool service: - Access to service-level APIs (e.g.List File Systems) - :param bool file_system: - Access to file_system-level APIs (e.g., Create/Delete file system, - List Directories/Files) - :param bool object: - Access to object-level APIs for - files(e.g. Create File, etc.) - """ - - def __init__(self, service=False, file_system=False, object=False # pylint: disable=redefined-builtin - ): - super(ResourceTypes, self).__init__(service=service, container=file_system, object=object) - - -class UserDelegationKey(BlobUserDelegationKey): - """ - Represents a user delegation key, provided to the user by Azure Storage - based on their Azure Active Directory access token. - - The fields are saved as simple strings since the user does not have to interact with this object; - to generate an identify SAS, the user can simply pass it to the right API. - - :ivar str signed_oid: - Object ID of this token. - :ivar str signed_tid: - Tenant ID of the tenant that issued this token. - :ivar str signed_start: - The datetime this token becomes valid. - :ivar str signed_expiry: - The datetime this token expires. - :ivar str signed_service: - What service this key is valid for. - :ivar str signed_version: - The version identifier of the REST service that created this token. - :ivar str value: - The user delegation key. - """ - - @classmethod - def _from_generated(cls, generated): - delegation_key = cls() - delegation_key.signed_oid = generated.signed_oid - delegation_key.signed_tid = generated.signed_tid - delegation_key.signed_start = generated.signed_start - delegation_key.signed_expiry = generated.signed_expiry - delegation_key.signed_service = generated.signed_service - delegation_key.signed_version = generated.signed_version - delegation_key.value = generated.value - return delegation_key - - -class PublicAccess(str, Enum): - """ - Specifies whether data in the file system may be accessed publicly and the level of access. - """ - - File = 'blob' - """ - Specifies public read access for files. file data within this file system can be read - via anonymous request, but file system data is not available. Clients cannot enumerate - files within the container via anonymous request. - """ - - FileSystem = 'container' - """ - Specifies full public read access for file system and file data. Clients can enumerate - files within the file system via anonymous request, but cannot enumerate file systems - within the storage account. - """ - - @classmethod - def _from_generated(cls, public_access): - if public_access == "blob": # pylint:disable=no-else-return - return cls.File - elif public_access == "container": - return cls.FileSystem - - return None - - -class LocationMode(object): - """ - Specifies the location the request should be sent to. This mode only applies - for RA-GRS accounts which allow secondary read access. All other account types - must use PRIMARY. - """ - - PRIMARY = 'primary' #: Requests should be sent to the primary location. - SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. - - -class DelimitedJsonDialect(BlobDelimitedJSON): - """Defines the input or output JSON serialization for a datalake query. - - :keyword str delimiter: The line separator character, default value is '\n' - """ - - -class DelimitedTextDialect(BlobDelimitedTextDialect): - """Defines the input or output delimited (CSV) serialization for a datalake query request. - - :keyword str delimiter: - Column separator, defaults to ','. - :keyword str quotechar: - Field quote, defaults to '"'. - :keyword str lineterminator: - Record separator, defaults to '\n'. - :keyword str escapechar: - Escape char, defaults to empty. - :keyword bool has_header: - Whether the blob data includes headers in the first line. The default value is False, meaning that the - data will be returned inclusive of the first line. If set to True, the data will be returned exclusive - of the first line. - """ - - -class ArrowDialect(BlobArrowDialect): - """field of an arrow schema. - - All required parameters must be populated in order to send to Azure. - - :param str type: Required. - :keyword str name: The name of the field. - :keyword int precision: The precision of the field. - :keyword int scale: The scale of the field. - """ - - -class ArrowType(str, Enum): - - INT64 = "int64" - BOOL = "bool" - TIMESTAMP_MS = "timestamp[ms]" - STRING = "string" - DOUBLE = "double" - DECIMAL = 'decimal' - - -class DataLakeFileQueryError(object): - """The error happened during quick query operation. - - :ivar str error: - The name of the error. - :ivar bool is_fatal: - If true, this error prevents further query processing. More result data may be returned, - but there is no guarantee that all of the original data will be processed. - If false, this error does not prevent further query processing. - :ivar str description: - A description of the error. - :ivar int position: - The blob offset at which the error occurred. - """ - - def __init__(self, error=None, is_fatal=False, description=None, position=None): - self.error = error - self.is_fatal = is_fatal - self.description = description - self.position = position - - -class AccessControlChangeCounters(DictMixin): - """ - AccessControlChangeCounters contains counts of operations that change Access Control Lists recursively. - - :ivar int directories_successful: - Number of directories where Access Control List has been updated successfully. - :ivar int files_successful: - Number of files where Access Control List has been updated successfully. - :ivar int failure_count: - Number of paths where Access Control List update has failed. - """ - - def __init__(self, directories_successful, files_successful, failure_count): - self.directories_successful = directories_successful - self.files_successful = files_successful - self.failure_count = failure_count - - -class AccessControlChangeResult(DictMixin): - """ - AccessControlChangeResult contains result of operations that change Access Control Lists recursively. - - :ivar ~azure.storage.filedatalake.AccessControlChangeCounters counters: - Contains counts of paths changed from start of the operation. - :ivar str continuation: - Optional continuation token. - Value is present when operation is split into multiple batches and can be used to resume progress. - """ - - def __init__(self, counters, continuation): - self.counters = counters - self.continuation = continuation - - -class AccessControlChangeFailure(DictMixin): - """ - Represents an entry that failed to update Access Control List. - - :ivar str name: - Name of the entry. - :ivar bool is_directory: - Indicates whether the entry is a directory. - :ivar str error_message: - Indicates the reason why the entry failed to update. - """ - - def __init__(self, name, is_directory, error_message): - self.name = name - self.is_directory = is_directory - self.error_message = error_message - - -class AccessControlChanges(DictMixin): - """ - AccessControlChanges contains batch and cumulative counts of operations - that change Access Control Lists recursively. - Additionally it exposes path entries that failed to update while these operations progress. - - :ivar ~azure.storage.filedatalake.AccessControlChangeCounters batch_counters: - Contains counts of paths changed within single batch. - :ivar ~azure.storage.filedatalake.AccessControlChangeCounters aggregate_counters: - Contains counts of paths changed from start of the operation. - :ivar list(~azure.storage.filedatalake.AccessControlChangeFailure) batch_failures: - List of path entries that failed to update Access Control List within single batch. - :ivar str continuation: - An opaque continuation token that may be used to resume the operations in case of failures. - """ - - def __init__(self, batch_counters, aggregate_counters, batch_failures, continuation): - self.batch_counters = batch_counters - self.aggregate_counters = aggregate_counters - self.batch_failures = batch_failures - self.continuation = continuation diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_path_client.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_path_client.py deleted file mode 100644 index 0518141..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_path_client.py +++ /dev/null @@ -1,894 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import Any, Dict - -try: - from urllib.parse import urlparse, quote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote # type: ignore - -import six - -from azure.core.exceptions import AzureError, HttpResponseError -from azure.multiapi.storagev2.blob.v2020_02_10 import BlobClient -from ._data_lake_lease import DataLakeLeaseClient -from ._deserialize import process_storage_error -from ._generated import AzureDataLakeStorageRESTAPI -from ._models import LocationMode, DirectoryProperties, AccessControlChangeResult, AccessControlChanges, \ - AccessControlChangeCounters, AccessControlChangeFailure -from ._serialize import convert_dfs_url_to_blob_url, get_mod_conditions, \ - get_path_http_headers, add_metadata_headers, get_lease_id, get_source_mod_conditions, get_access_conditions -from ._shared.base_client import StorageAccountHostsMixin, parse_query -from ._shared.response_handlers import return_response_headers, return_headers_and_deserialized - -_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = ( - 'The require_encryption flag is set, but encryption is not supported' - ' for this method.') - - -class PathClient(StorageAccountHostsMixin): - def __init__( - self, account_url, # type: str - file_system_name, # type: str - path_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - - # remove the preceding/trailing delimiter from the path components - file_system_name = file_system_name.strip('/') - - # the name of root directory is / - if path_name != '/': - path_name = path_name.strip('/') - - if not (file_system_name and path_name): - raise ValueError("Please specify a file system name and file path.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - blob_account_url = convert_dfs_url_to_blob_url(account_url) - self._blob_account_url = blob_account_url - - datalake_hosts = kwargs.pop('_hosts', None) - blob_hosts = None - if datalake_hosts: - blob_primary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.PRIMARY]) - blob_hosts = {LocationMode.PRIMARY: blob_primary_account_url, LocationMode.SECONDARY: ""} - self._blob_client = BlobClient(blob_account_url, file_system_name, path_name, - credential=credential, _hosts=blob_hosts, **kwargs) - - _, sas_token = parse_query(parsed_url.query) - self.file_system_name = file_system_name - self.path_name = path_name - - self._query_str, self._raw_credential = self._format_query_string(sas_token, credential) - - super(PathClient, self).__init__(parsed_url, service='dfs', credential=self._raw_credential, - _hosts=datalake_hosts, **kwargs) - # ADLS doesn't support secondary endpoint, make sure it's empty - self._hosts[LocationMode.SECONDARY] = "" - self._client = AzureDataLakeStorageRESTAPI(self.url, file_system=file_system_name, path=path_name, - pipeline=self._pipeline) - self._datalake_client_for_blob_operation = AzureDataLakeStorageRESTAPI( - self._blob_client.url, - file_system=file_system_name, - path=path_name, - pipeline=self._pipeline) - - def __exit__(self, *args): - self._blob_client.close() - super(PathClient, self).__exit__(*args) - - def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._blob_client.close() - self.__exit__() - - def _format_url(self, hostname): - file_system_name = self.file_system_name - if isinstance(file_system_name, six.text_type): - file_system_name = file_system_name.encode('UTF-8') - return "{}://{}/{}/{}{}".format( - self.scheme, - hostname, - quote(file_system_name), - quote(self.path_name, safe='~'), - self._query_str) - - def _create_path_options(self, resource_type, content_settings=None, metadata=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_mod_conditions(kwargs) - - path_http_headers = None - if content_settings: - path_http_headers = get_path_http_headers(content_settings) - - options = { - 'resource': resource_type, - 'properties': add_metadata_headers(metadata), - 'permissions': kwargs.pop('permissions', None), - 'umask': kwargs.pop('umask', None), - 'path_http_headers': path_http_headers, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - def _create(self, resource_type, content_settings=None, metadata=None, **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create directory or file - - :param resource_type: - Required for Create File and Create Directory. - The value must be "file" or "directory". Possible values include: - 'directory', 'file' - :type resource_type: str - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file/directory as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :type permissions: str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Dict[str, Union[str, datetime]] - """ - options = self._create_path_options( - resource_type, - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return self._client.path.create(**options) - except HttpResponseError as error: - process_storage_error(error) - - @staticmethod - def _delete_path_options(**kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_mod_conditions(kwargs) - - options = { - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None)} - options.update(kwargs) - return options - - def _delete(self, **kwargs): - # type: (bool, **Any) -> None - """ - Marks the specified path for deletion. - - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :param ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - options = self._delete_path_options(**kwargs) - try: - return self._client.path.delete(**options) - except HttpResponseError as error: - process_storage_error(error) - - @staticmethod - def _set_access_control_options(owner=None, group=None, permissions=None, acl=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_mod_conditions(kwargs) - - options = { - 'owner': owner, - 'group': group, - 'permissions': permissions, - 'acl': acl, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - def set_access_control(self, owner=None, # type: Optional[str] - group=None, # type: Optional[str] - permissions=None, # type: Optional[str] - acl=None, # type: Optional[str] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Set the owner, group, permissions, or access control list for a path. - - :param owner: - Optional. The owner of the file or directory. - :type owner: str - :param group: - Optional. The owning group of the file or directory. - :type group: str - :param permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - permissions and acl are mutually exclusive. - :type permissions: str - :param acl: - Sets POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - permissions and acl are mutually exclusive. - :type acl: str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword: response dict (Etag and last modified). - """ - if not any([owner, group, permissions, acl]): - raise ValueError("At least one parameter should be set for set_access_control API") - options = self._set_access_control_options(owner=owner, group=group, permissions=permissions, acl=acl, **kwargs) - try: - return self._client.path.set_access_control(**options) - except HttpResponseError as error: - process_storage_error(error) - - @staticmethod - def _get_access_control_options(upn=None, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Any] - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_mod_conditions(kwargs) - - options = { - 'action': 'getAccessControl', - 'upn': upn if upn else False, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - def get_access_control(self, upn=None, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Any] - """ - :param upn: Optional. - Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword: response dict. - """ - options = self._get_access_control_options(upn=upn, **kwargs) - try: - return self._client.path.get_properties(**options) - except HttpResponseError as error: - process_storage_error(error) - - @staticmethod - def _set_access_control_recursive_options(mode, acl, **kwargs): - # type: (str, str, **Any) -> Dict[str, Any] - - options = { - 'mode': mode, - 'force_flag': kwargs.pop('continue_on_failure', None), - 'timeout': kwargs.pop('timeout', None), - 'continuation': kwargs.pop('continuation_token', None), - 'max_records': kwargs.pop('batch_size', None), - 'acl': acl, - 'cls': return_headers_and_deserialized} - options.update(kwargs) - return options - - def set_access_control_recursive(self, - acl, - **kwargs): - # type: (str, **Any) -> AccessControlChangeResult - """ - Sets the Access Control on a path and sub-paths. - - :param acl: - Sets POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: - Callback where the caller can track progress of the operation - as well as collect paths that failed to change Access Control. - :keyword str continuation_token: - Optional continuation token that can be used to resume previously stopped operation. - :keyword int batch_size: - Optional. If data set size exceeds batch size then operation will be split into multiple - requests so that progress can be tracked. Batch size should be between 1 and 2000. - The default when unspecified is 2000. - :keyword int max_batches: - Optional. Defines maximum number of batches that single change Access Control operation can execute. - If maximum is reached before all sub-paths are processed, - then continuation token can be used to resume operation. - Empty value indicates that maximum number of batches in unbound and operation continues till end. - :keyword bool continue_on_failure: - If set to False, the operation will terminate quickly on encountering user errors (4XX). - If True, the operation will ignore user errors and proceed with the operation on other sub-entities of - the directory. - Continuation token will only be returned when continue_on_failure is True in case of user errors. - If not set the default value is False for this. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: A summary of the recursive operations, including the count of successes and failures, - as well as a continuation token in case the operation was terminated prematurely. - :rtype: :class:`~azure.storage.filedatalake.AccessControlChangeResult` - :raises ~azure.core.exceptions.AzureError: - User can restart the operation using continuation_token field of AzureError if the token is available. - """ - if not acl: - raise ValueError("The Access Control List must be set for this operation") - - progress_hook = kwargs.pop('progress_hook', None) - max_batches = kwargs.pop('max_batches', None) - options = self._set_access_control_recursive_options(mode='set', acl=acl, **kwargs) - return self._set_access_control_internal(options=options, progress_hook=progress_hook, - max_batches=max_batches) - - def update_access_control_recursive(self, - acl, - **kwargs): - # type: (str, **Any) -> AccessControlChangeResult - """ - Modifies the Access Control on a path and sub-paths. - - :param acl: - Modifies POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: - Callback where the caller can track progress of the operation - as well as collect paths that failed to change Access Control. - :keyword str continuation_token: - Optional continuation token that can be used to resume previously stopped operation. - :keyword int batch_size: - Optional. If data set size exceeds batch size then operation will be split into multiple - requests so that progress can be tracked. Batch size should be between 1 and 2000. - The default when unspecified is 2000. - :keyword int max_batches: - Optional. Defines maximum number of batches that single change Access Control operation can execute. - If maximum is reached before all sub-paths are processed, - then continuation token can be used to resume operation. - Empty value indicates that maximum number of batches in unbound and operation continues till end. - :keyword bool continue_on_failure: - If set to False, the operation will terminate quickly on encountering user errors (4XX). - If True, the operation will ignore user errors and proceed with the operation on other sub-entities of - the directory. - Continuation token will only be returned when continue_on_failure is True in case of user errors. - If not set the default value is False for this. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: A summary of the recursive operations, including the count of successes and failures, - as well as a continuation token in case the operation was terminated prematurely. - :rtype: :class:`~azure.storage.filedatalake.AccessControlChangeResult` - :raises ~azure.core.exceptions.AzureError: - User can restart the operation using continuation_token field of AzureError if the token is available. - """ - if not acl: - raise ValueError("The Access Control List must be set for this operation") - - progress_hook = kwargs.pop('progress_hook', None) - max_batches = kwargs.pop('max_batches', None) - options = self._set_access_control_recursive_options(mode='modify', acl=acl, **kwargs) - return self._set_access_control_internal(options=options, progress_hook=progress_hook, - max_batches=max_batches) - - def remove_access_control_recursive(self, - acl, - **kwargs): - # type: (str, **Any) -> AccessControlChangeResult - """ - Removes the Access Control on a path and sub-paths. - - :param acl: - Removes POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, and a user or - group identifier in the format "[scope:][type]:[id]". - :type acl: str - :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: - Callback where the caller can track progress of the operation - as well as collect paths that failed to change Access Control. - :keyword str continuation_token: - Optional continuation token that can be used to resume previously stopped operation. - :keyword int batch_size: - Optional. If data set size exceeds batch size then operation will be split into multiple - requests so that progress can be tracked. Batch size should be between 1 and 2000. - The default when unspecified is 2000. - :keyword int max_batches: - Optional. Defines maximum number of batches that single change Access Control operation can execute. - If maximum is reached before all sub-paths are processed then, - continuation token can be used to resume operation. - Empty value indicates that maximum number of batches in unbound and operation continues till end. - :keyword bool continue_on_failure: - If set to False, the operation will terminate quickly on encountering user errors (4XX). - If True, the operation will ignore user errors and proceed with the operation on other sub-entities of - the directory. - Continuation token will only be returned when continue_on_failure is True in case of user errors. - If not set the default value is False for this. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: A summary of the recursive operations, including the count of successes and failures, - as well as a continuation token in case the operation was terminated prematurely. - :rtype: :class:`~azure.storage.filedatalake.AccessControlChangeResult` - :raises ~azure.core.exceptions.AzureError: - User can restart the operation using continuation_token field of AzureError if the token is available. - """ - if not acl: - raise ValueError("The Access Control List must be set for this operation") - - progress_hook = kwargs.pop('progress_hook', None) - max_batches = kwargs.pop('max_batches', None) - options = self._set_access_control_recursive_options(mode='remove', acl=acl, **kwargs) - return self._set_access_control_internal(options=options, progress_hook=progress_hook, - max_batches=max_batches) - - def _set_access_control_internal(self, options, progress_hook, max_batches=None): - try: - continue_on_failure = options.get('force_flag') - total_directories_successful = 0 - total_files_success = 0 - total_failure_count = 0 - batch_count = 0 - last_continuation_token = None - current_continuation_token = None - continue_operation = True - while continue_operation: - headers, resp = self._client.path.set_access_control_recursive(**options) - - # make a running tally so that we can report the final results - total_directories_successful += resp.directories_successful - total_files_success += resp.files_successful - total_failure_count += resp.failure_count - batch_count += 1 - current_continuation_token = headers['continuation'] - - if current_continuation_token is not None: - last_continuation_token = current_continuation_token - - if progress_hook is not None: - progress_hook(AccessControlChanges( - batch_counters=AccessControlChangeCounters( - directories_successful=resp.directories_successful, - files_successful=resp.files_successful, - failure_count=resp.failure_count, - ), - aggregate_counters=AccessControlChangeCounters( - directories_successful=total_directories_successful, - files_successful=total_files_success, - failure_count=total_failure_count, - ), - batch_failures=[AccessControlChangeFailure( - name=failure.name, - is_directory=failure.type == 'DIRECTORY', - error_message=failure.error_message) for failure in resp.failed_entries], - continuation=last_continuation_token)) - - # update the continuation token, if there are more operations that cannot be completed in a single call - max_batches_satisfied = (max_batches is not None and batch_count == max_batches) - continue_operation = bool(current_continuation_token) and not max_batches_satisfied - options['continuation'] = current_continuation_token - - # currently the service stops on any failure, so we should send back the last continuation token - # for the user to retry the failed updates - # otherwise we should just return what the service gave us - return AccessControlChangeResult(counters=AccessControlChangeCounters( - directories_successful=total_directories_successful, - files_successful=total_files_success, - failure_count=total_failure_count), - continuation=last_continuation_token - if total_failure_count > 0 and not continue_on_failure else current_continuation_token) - except HttpResponseError as error: - error.continuation_token = last_continuation_token - process_storage_error(error) - except AzureError as error: - error.continuation_token = last_continuation_token - raise error - - def _rename_path_options(self, rename_source, content_settings=None, metadata=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - if metadata or kwargs.pop('permissions', None) or kwargs.pop('umask', None): - raise ValueError("metadata, permissions, umask is not supported for this operation") - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - source_lease_id = get_lease_id(kwargs.pop('source_lease', None)) - mod_conditions = get_mod_conditions(kwargs) - source_mod_conditions = get_source_mod_conditions(kwargs) - - path_http_headers = None - if content_settings: - path_http_headers = get_path_http_headers(content_settings) - - options = { - 'rename_source': rename_source, - 'path_http_headers': path_http_headers, - 'lease_access_conditions': access_conditions, - 'source_lease_id': source_lease_id, - 'modified_access_conditions': mod_conditions, - 'source_modified_access_conditions': source_mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'mode': 'legacy', - 'cls': return_response_headers} - options.update(kwargs) - return options - - def _rename_path(self, rename_source, **kwargs): - # type: (str, **Any) -> Dict[str, Any] - """ - Rename directory or file - - :param rename_source: - The value must have the following format: "/{filesystem}/{path}". - :type rename_source: str - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword source_lease: - A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._rename_path_options( - rename_source, - **kwargs) - try: - return self._client.path.create(**options) - except HttpResponseError as error: - process_storage_error(error) - - def _get_path_properties(self, **kwargs): - # type: (**Any) -> Union[FileProperties, DirectoryProperties] - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file or directory. It does not return the content of the directory or file. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: DirectoryProperties or FileProperties - - .. admonition:: Example: - - .. literalinclude:: ../tests/test_blob_samples_common.py - :start-after: [START get_blob_properties] - :end-before: [END get_blob_properties] - :language: python - :dedent: 8 - :caption: Getting the properties for a file/directory. - """ - path_properties = self._blob_client.get_blob_properties(**kwargs) - return path_properties - - def _exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a path exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return self._blob_client.exists(**kwargs) - - def set_metadata(self, metadata, # type: Dict[str, str] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - file system. Each call to this operation replaces all existing metadata - attached to the file system. To remove all metadata from the file system, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the file system as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: file system-updated property dict (Etag and last modified). - """ - return self._blob_client.set_blob_metadata(metadata=metadata, **kwargs) - - def set_http_headers(self, content_settings=None, # type: Optional[ContentSettings] - **kwargs): - # type: (...) -> Dict[str, Any] - """Sets system properties on the file or directory. - - If one property is set for the content_settings, all properties will be overriden. - - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set file/directory properties. - :keyword lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: file/directory-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - return self._blob_client.set_http_headers(content_settings=content_settings, **kwargs) - - def acquire_lease(self, lease_duration=-1, # type: Optional[int] - lease_id=None, # type: Optional[str] - **kwargs): - # type: (...) -> DataLakeLeaseClient - """ - Requests a new lease. If the file or directory does not have an active lease, - the DataLake service creates a lease on the file/directory and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A DataLakeLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.filedatalake.DataLakeLeaseClient - """ - lease = DataLakeLeaseClient(self, lease_id=lease_id) # type: ignore - lease.acquire(lease_duration=lease_duration, **kwargs) - return lease diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_quick_query_helper.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_quick_query_helper.py deleted file mode 100644 index ff67d27..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_quick_query_helper.py +++ /dev/null @@ -1,71 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import Union, Iterable, IO # pylint: disable=unused-import - - -class DataLakeFileQueryReader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to read query results. - - :ivar str name: - The name of the blob being quered. - :ivar str container: - The name of the container where the blob is. - :ivar dict response_headers: - The response_headers of the quick query request. - :ivar bytes record_delimiter: - The delimiter used to separate lines, or records with the data. The `records` - method will return these lines via a generator. - """ - - def __init__( - self, - blob_query_reader - ): - self.name = blob_query_reader.name - self.file_system = blob_query_reader.container - self.response_headers = blob_query_reader.response_headers - self.record_delimiter = blob_query_reader.record_delimiter - self._bytes_processed = 0 - self._blob_query_reader = blob_query_reader - - def __len__(self): - return len(self._blob_query_reader) - - def readall(self): - # type: () -> Union[bytes, str] - """Return all query results. - - This operation is blocking until all data is downloaded. - If encoding has been configured - this will be used to decode individual - records are they are received. - - :rtype: Union[bytes, str] - """ - return self._blob_query_reader.readall() - - def readinto(self, stream): - # type: (IO) -> None - """Download the query result to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. - :returns: None - """ - self._blob_query_reader(stream) - - def records(self): - # type: () -> Iterable[Union[bytes, str]] - """Returns a record generator for the query result. - - Records will be returned line by line. - If encoding has been configured - this will be used to decode individual - records are they are received. - - :rtype: Iterable[Union[bytes, str]] - """ - return self._blob_query_reader.records() diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_serialize.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_serialize.py deleted file mode 100644 index 0287150..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_serialize.py +++ /dev/null @@ -1,89 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from azure.multiapi.storagev2.blob.v2020_02_10._serialize import _get_match_headers # pylint: disable=protected-access -from ._shared import encode_base64 -from ._generated.models import ModifiedAccessConditions, PathHTTPHeaders, \ - SourceModifiedAccessConditions, LeaseAccessConditions - - -def convert_dfs_url_to_blob_url(dfs_account_url): - return dfs_account_url.replace('.dfs.', '.blob.', 1) - - -def convert_datetime_to_rfc1123(date): - weekday = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][date.weekday()] - month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", - "Oct", "Nov", "Dec"][date.month - 1] - return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (weekday, date.day, month, - date.year, date.hour, date.minute, date.second) - - -def add_metadata_headers(metadata=None): - # type: (Optional[Dict[str, str]]) -> str - headers = list() - if metadata: - for key, value in metadata.items(): - headers.append(key + '=') - headers.append(encode_base64(value)) - headers.append(',') - - if headers: - del headers[-1] - - return ''.join(headers) - - -def get_mod_conditions(kwargs): - # type: (Dict[str, Any]) -> ModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'match_condition', 'etag') - return ModifiedAccessConditions( - if_modified_since=kwargs.pop('if_modified_since', None), - if_unmodified_since=kwargs.pop('if_unmodified_since', None), - if_match=if_match or kwargs.pop('if_match', None), - if_none_match=if_none_match or kwargs.pop('if_none_match', None) - ) - - -def get_source_mod_conditions(kwargs): - # type: (Dict[str, Any]) -> SourceModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') - return SourceModifiedAccessConditions( - source_if_modified_since=kwargs.pop('source_if_modified_since', None), - source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), - source_if_match=if_match or kwargs.pop('source_if_match', None), - source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None) - ) - - -def get_path_http_headers(content_settings): - path_headers = PathHTTPHeaders( - cache_control=content_settings.cache_control, - content_type=content_settings.content_type, - content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - content_encoding=content_settings.content_encoding, - content_language=content_settings.content_language, - content_disposition=content_settings.content_disposition - ) - return path_headers - - -def get_access_conditions(lease): - # type: (Optional[Union[BlobLeaseClient, str]]) -> Union[LeaseAccessConditions, None] - try: - lease_id = lease.id # type: ignore - except AttributeError: - lease_id = lease # type: ignore - return LeaseAccessConditions(lease_id=lease_id) if lease_id else None - - -def get_lease_id(lease): - if not lease: - return "" - try: - lease_id = lease.id - except AttributeError: - lease_id = lease - return lease_id diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/__init__.py deleted file mode 100644 index 160f882..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import hmac - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore - -import six - - -def url_quote(url): - return quote(url) - - -def url_unquote(url): - return unquote(url) - - -def encode_base64(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def decode_base64_to_bytes(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - return base64.b64decode(data) - - -def decode_base64_to_text(data): - decoded_bytes = decode_base64_to_bytes(data) - return decoded_bytes.decode('utf-8') - - -def sign_string(key, string_to_sign, key_is_base64=True): - if key_is_base64: - key = decode_base64_to_bytes(key) - else: - if isinstance(key, six.text_type): - key = key.encode('utf-8') - if isinstance(string_to_sign, six.text_type): - string_to_sign = string_to_sign.encode('utf-8') - signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) - digest = signed_hmac_sha256.digest() - encoded_digest = encode_base64(digest) - return encoded_digest diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/authentication.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/authentication.py deleted file mode 100644 index d04c1e4..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/authentication.py +++ /dev/null @@ -1,142 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import logging -import sys - -try: - from urllib.parse import urlparse, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import unquote # type: ignore - -try: - from yarl import URL -except ImportError: - pass - -try: - from azure.core.pipeline.transport import AioHttpTransport -except ImportError: - AioHttpTransport = None - -from azure.core.exceptions import ClientAuthenticationError -from azure.core.pipeline.policies import SansIOHTTPPolicy - -from . import sign_string - - -logger = logging.getLogger(__name__) - - - -# wraps a given exception with the desired exception type -def _wrap_exception(ex, desired_type): - msg = "" - if ex.args: - msg = ex.args[0] - if sys.version_info >= (3,): - # Automatic chaining in Python 3 means we keep the trace - return desired_type(msg) - # There isn't a good solution in 2 for keeping the stack trace - # in general, or that will not result in an error in 3 - # However, we can keep the previous error type and message - # TODO: In the future we will log the trace - return desired_type('{}: {}'.format(ex.__class__.__name__, msg)) - - -class AzureSigningError(ClientAuthenticationError): - """ - Represents a fatal error when attempting to sign a request. - In general, the cause of this exception is user error. For example, the given account key is not valid. - Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. - """ - - -# pylint: disable=no-self-use -class SharedKeyCredentialPolicy(SansIOHTTPPolicy): - - def __init__(self, account_name, account_key): - self.account_name = account_name - self.account_key = account_key - super(SharedKeyCredentialPolicy, self).__init__() - - @staticmethod - def _get_headers(request, headers_to_sign): - headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) - if 'content-length' in headers and headers['content-length'] == '0': - del headers['content-length'] - return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' - - @staticmethod - def _get_verb(request): - return request.http_request.method + '\n' - - def _get_canonicalized_resource(self, request): - uri_path = urlparse(request.http_request.url).path - try: - if isinstance(request.context.transport, AioHttpTransport) or \ - isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \ - isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None), - AioHttpTransport): - uri_path = URL(uri_path) - return '/' + self.account_name + str(uri_path) - except TypeError: - pass - return '/' + self.account_name + uri_path - - @staticmethod - def _get_canonicalized_headers(request): - string_to_sign = '' - x_ms_headers = [] - for name, value in request.http_request.headers.items(): - if name.startswith('x-ms-'): - x_ms_headers.append((name.lower(), value)) - x_ms_headers.sort() - for name, value in x_ms_headers: - if value is not None: - string_to_sign += ''.join([name, ':', value, '\n']) - return string_to_sign - - @staticmethod - def _get_canonicalized_resource_query(request): - sorted_queries = list(request.http_request.query.items()) - sorted_queries.sort() - - string_to_sign = '' - for name, value in sorted_queries: - if value is not None: - string_to_sign += '\n' + name.lower() + ':' + unquote(value) - - return string_to_sign - - def _add_authorization_header(self, request, string_to_sign): - try: - signature = sign_string(self.account_key, string_to_sign) - auth_string = 'SharedKey ' + self.account_name + ':' + signature - request.http_request.headers['Authorization'] = auth_string - except Exception as ex: - # Wrap any error that occurred as signing error - # Doing so will clarify/locate the source of problem - raise _wrap_exception(ex, AzureSigningError) - - def on_request(self, request): - string_to_sign = \ - self._get_verb(request) + \ - self._get_headers( - request, - [ - 'content-encoding', 'content-language', 'content-length', - 'content-md5', 'content-type', 'date', 'if-modified-since', - 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' - ] - ) + \ - self._get_canonicalized_headers(request) + \ - self._get_canonicalized_resource(request) + \ - self._get_canonicalized_resource_query(request) - - self._add_authorization_header(request, string_to_sign) - #logger.debug("String_to_sign=%s", string_to_sign) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/base_client.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/base_client.py deleted file mode 100644 index 5e524b2..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/base_client.py +++ /dev/null @@ -1,459 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import logging -import uuid -from typing import ( # pylint: disable=unused-import - Optional, - Any, - Tuple, -) - -try: - from urllib.parse import parse_qs, quote -except ImportError: - from urlparse import parse_qs # type: ignore - from urllib2 import quote # type: ignore - -import six - -from azure.core.configuration import Configuration -from azure.core.credentials import AzureSasCredential -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline import Pipeline -from azure.core.pipeline.transport import RequestsTransport, HttpTransport -from azure.core.pipeline.policies import ( - RedirectPolicy, - ContentDecodePolicy, - BearerTokenCredentialPolicy, - ProxyPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, - UserAgentPolicy, - AzureSasCredentialPolicy -) - -from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .models import LocationMode -from .authentication import SharedKeyCredentialPolicy -from .shared_access_signature import QueryStringConstants -from .request_handlers import serialize_batch_body, _get_batch_request_delimiter -from .policies import ( - StorageHeadersPolicy, - StorageContentValidation, - StorageRequestHook, - StorageResponseHook, - StorageLoggingPolicy, - StorageHosts, - QueueMessagePolicy, - ExponentialRetry, -) -from .._version import VERSION -from .response_handlers import process_storage_error, PartialBatchErrorException - - -_LOGGER = logging.getLogger(__name__) -_SERVICE_PARAMS = { - "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"}, - "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"}, - "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"}, - "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"}, -} - -class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - parsed_url, # type: Any - service, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) - self._hosts = kwargs.get("_hosts") - self.scheme = parsed_url.scheme - - if service not in ["blob", "queue", "file-share", "dfs"]: - raise ValueError("Invalid service: {}".format(service)) - service_name = service.split('-')[0] - account = parsed_url.netloc.split(".{}.core.".format(service_name)) - - self.account_name = account[0] if len(account) > 1 else None - if not self.account_name and parsed_url.netloc.startswith("localhost") \ - or parsed_url.netloc.startswith("127.0.0.1"): - self.account_name = parsed_url.path.strip("/") - - self.credential = _format_shared_key_credential(self.account_name, credential) - if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): - raise ValueError("Token credential is only supported with HTTPS.") - - secondary_hostname = None - if hasattr(self.credential, "account_name"): - self.account_name = self.credential.account_name - secondary_hostname = "{}-secondary.{}.{}".format( - self.credential.account_name, service_name, SERVICE_HOST_BASE) - - if not self._hosts: - if len(account) > 1: - secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") - if kwargs.get("secondary_hostname"): - secondary_hostname = kwargs["secondary_hostname"] - primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') - self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} - - self.require_encryption = kwargs.get("require_encryption", False) - self.key_encryption_key = kwargs.get("key_encryption_key") - self.key_resolver_function = kwargs.get("key_resolver_function") - self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) - - def __enter__(self): - self._client.__enter__() - return self - - def __exit__(self, *args): - self._client.__exit__(*args) - - def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._client.close() - - @property - def url(self): - """The full endpoint URL to this entity, including SAS token if used. - - This could be either the primary endpoint, - or the secondary endpoint depending on the current :func:`location_mode`. - """ - return self._format_url(self._hosts[self._location_mode]) - - @property - def primary_endpoint(self): - """The full primary endpoint URL. - - :type: str - """ - return self._format_url(self._hosts[LocationMode.PRIMARY]) - - @property - def primary_hostname(self): - """The hostname of the primary endpoint. - - :type: str - """ - return self._hosts[LocationMode.PRIMARY] - - @property - def secondary_endpoint(self): - """The full secondary endpoint URL if configured. - - If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str - :raise ValueError: - """ - if not self._hosts[LocationMode.SECONDARY]: - raise ValueError("No secondary host configured.") - return self._format_url(self._hosts[LocationMode.SECONDARY]) - - @property - def secondary_hostname(self): - """The hostname of the secondary endpoint. - - If not available this will be None. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str or None - """ - return self._hosts[LocationMode.SECONDARY] - - @property - def location_mode(self): - """The location mode that the client is currently using. - - By default this will be "primary". Options include "primary" and "secondary". - - :type: str - """ - - return self._location_mode - - @location_mode.setter - def location_mode(self, value): - if self._hosts.get(value): - self._location_mode = value - self._client._config.url = self.url # pylint: disable=protected-access - else: - raise ValueError("No host URL for location mode: {}".format(value)) - - @property - def api_version(self): - """The version of the Storage API used for requests. - - :type: str - """ - return self._client._config.version # pylint: disable=protected-access - - def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): - query_str = "?" - if snapshot: - query_str += "snapshot={}&".format(self.snapshot) - if share_snapshot: - query_str += "sharesnapshot={}&".format(self.snapshot) - if sas_token and isinstance(credential, AzureSasCredential): - raise ValueError( - "You cannot use AzureSasCredential when the resource URI also contains a Shared Access Signature.") - if sas_token and not credential: - query_str += sas_token - elif is_credential_sastoken(credential): - query_str += credential.lstrip("?") - credential = None - return query_str.rstrip("?&"), credential - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, "get_token"): - self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif isinstance(credential, AzureSasCredential): - self._credential_policy = AzureSasCredentialPolicy(credential) - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - - config = kwargs.get("_configuration") or create_configuration(**kwargs) - if kwargs.get("_pipeline"): - return config, kwargs["_pipeline"] - config.transport = kwargs.get("transport") # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - config.transport = RequestsTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - ContentDecodePolicy(response_encoding="utf-8"), - RedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), - config.retry_policy, - config.headers_policy, - StorageRequestHook(**kwargs), - self._credential_policy, - config.logging_policy, - StorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs) - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, Pipeline(config.transport, policies=policies) - - def _batch_send( - self, - *reqs, # type: HttpRequest - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - batch_id = str(uuid.uuid1()) - - request = self._client._client.post( # pylint: disable=protected-access - url='{}://{}/{}?{}comp=batch{}{}'.format( - self.scheme, - self.primary_hostname, - kwargs.pop('path', ""), - kwargs.pop('restype', ""), - kwargs.pop('sas', ""), - kwargs.pop('timeout', "") - ), - headers={ - 'x-ms-version': self.api_version, - "Content-Type": "multipart/mixed; boundary=" + _get_batch_request_delimiter(batch_id, False, False) - } - ) - - policies = [StorageHeadersPolicy()] - if self._credential_policy: - policies.append(self._credential_policy) - - request.set_multipart_mixed( - *reqs, - policies=policies, - enforce_https=False - ) - - Pipeline._prepare_multipart_mixed_request(request) # pylint: disable=protected-access - body = serialize_batch_body(request.multipart_mixed_info[0], batch_id) - request.set_bytes_body(body) - - temp = request.multipart_mixed_info - request.multipart_mixed_info = None - pipeline_response = self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - request.multipart_mixed_info = temp - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() - if raise_on_any_failure: - parts = list(response.parts()) - if any(p for p in parts if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts - ) - raise error - return iter(parts) - return parts - except HttpResponseError as error: - process_storage_error(error) - -class TransportWrapper(HttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, transport): - self._transport = transport - - def send(self, request, **kwargs): - return self._transport.send(request, **kwargs) - - def open(self): - pass - - def close(self): - pass - - def __enter__(self): - pass - - def __exit__(self, *args): # pylint: disable=arguments-differ - pass - - -def _format_shared_key_credential(account_name, credential): - if isinstance(credential, six.string_types): - if not account_name: - raise ValueError("Unable to determine account name for shared key credential.") - credential = {"account_name": account_name, "account_key": credential} - if isinstance(credential, dict): - if "account_name" not in credential: - raise ValueError("Shared key credential missing 'account_name") - if "account_key" not in credential: - raise ValueError("Shared key credential missing 'account_key") - return SharedKeyCredentialPolicy(**credential) - return credential - - -def parse_connection_str(conn_str, credential, service): - conn_str = conn_str.rstrip(";") - conn_settings = [s.split("=", 1) for s in conn_str.split(";")] - if any(len(tup) != 2 for tup in conn_settings): - raise ValueError("Connection string is either blank or malformed.") - conn_settings = dict((key.upper(), val) for key, val in conn_settings) - endpoints = _SERVICE_PARAMS[service] - primary = None - secondary = None - if not credential: - try: - credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]} - except KeyError: - credential = conn_settings.get("SHAREDACCESSSIGNATURE") - if endpoints["primary"] in conn_settings: - primary = conn_settings[endpoints["primary"]] - if endpoints["secondary"] in conn_settings: - secondary = conn_settings[endpoints["secondary"]] - else: - if endpoints["secondary"] in conn_settings: - raise ValueError("Connection string specifies only secondary endpoint.") - try: - primary = "{}://{}.{}.{}".format( - conn_settings["DEFAULTENDPOINTSPROTOCOL"], - conn_settings["ACCOUNTNAME"], - service, - conn_settings["ENDPOINTSUFFIX"], - ) - secondary = "{}-secondary.{}.{}".format( - conn_settings["ACCOUNTNAME"], service, conn_settings["ENDPOINTSUFFIX"] - ) - except KeyError: - pass - - if not primary: - try: - primary = "https://{}.{}.{}".format( - conn_settings["ACCOUNTNAME"], service, conn_settings.get("ENDPOINTSUFFIX", SERVICE_HOST_BASE) - ) - except KeyError: - raise ValueError("Connection string missing required connection details.") - return primary, secondary, credential - - -def create_configuration(**kwargs): - # type: (**Any) -> Configuration - config = Configuration(**kwargs) - config.headers_policy = StorageHeadersPolicy(**kwargs) - config.user_agent_policy = UserAgentPolicy( - sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) - config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) - config.logging_policy = StorageLoggingPolicy(**kwargs) - config.proxy_policy = ProxyPolicy(**kwargs) - - # Storage settings - config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) - config.copy_polling_interval = 15 - - # Block blob uploads - config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) - config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) - config.use_byte_buffer = kwargs.get("use_byte_buffer", False) - - # Page blob uploads - config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) - - # Datalake file uploads - config.min_large_chunk_upload_threshold = kwargs.get("min_large_chunk_upload_threshold", 100 * 1024 * 1024 + 1) - - # Blob downloads - config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) - config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) - - # File uploads - config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) - return config - - -def parse_query(query_str): - sas_values = QueryStringConstants.to_list() - parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} - sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values] - sas_token = None - if sas_params: - sas_token = "&".join(sas_params) - - snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") - return snapshot, sas_token - - -def is_credential_sastoken(credential): - if not credential or not isinstance(credential, six.string_types): - return False - - sas_values = QueryStringConstants.to_list() - parsed_query = parse_qs(credential.lstrip("?")) - if parsed_query and all([k in sas_values for k in parsed_query.keys()]): - return True - return False diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/base_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/base_client_async.py deleted file mode 100644 index 091c350..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/base_client_async.py +++ /dev/null @@ -1,183 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging - -from azure.core.credentials import AzureSasCredential -from azure.core.pipeline import AsyncPipeline -from azure.core.async_paging import AsyncList -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline.policies import ( - ContentDecodePolicy, - AsyncBearerTokenCredentialPolicy, - AsyncRedirectPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, - AzureSasCredentialPolicy, -) -from azure.core.pipeline.transport import AsyncHttpTransport - -from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .authentication import SharedKeyCredentialPolicy -from .base_client import create_configuration -from .policies import ( - StorageContentValidation, - StorageRequestHook, - StorageHosts, - StorageHeadersPolicy, - QueueMessagePolicy -) -from .policies_async import AsyncStorageResponseHook - -from .response_handlers import process_storage_error, PartialBatchErrorException - -if TYPE_CHECKING: - from azure.core.pipeline import Pipeline - from azure.core.pipeline.transport import HttpRequest - from azure.core.configuration import Configuration -_LOGGER = logging.getLogger(__name__) - - -class AsyncStorageAccountHostsMixin(object): - - def __enter__(self): - raise TypeError("Async client only supports 'async with'.") - - def __exit__(self, *args): - pass - - async def __aenter__(self): - await self._client.__aenter__() - return self - - async def __aexit__(self, *args): - await self._client.__aexit__(*args) - - async def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._client.close() - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, 'get_token'): - self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif isinstance(credential, AzureSasCredential): - self._credential_policy = AzureSasCredentialPolicy(credential) - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - config = kwargs.get('_configuration') or create_configuration(**kwargs) - if kwargs.get('_pipeline'): - return config, kwargs['_pipeline'] - config.transport = kwargs.get('transport') # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - try: - from azure.core.pipeline.transport import AioHttpTransport - except ImportError: - raise ImportError("Unable to create async transport. Please check aiohttp is installed.") - config.transport = AioHttpTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.headers_policy, - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - StorageRequestHook(**kwargs), - self._credential_policy, - ContentDecodePolicy(response_encoding="utf-8"), - AsyncRedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), # type: ignore - config.retry_policy, - config.logging_policy, - AsyncStorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs), - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, AsyncPipeline(config.transport, policies=policies) - - async def _batch_send( - self, *reqs: 'HttpRequest', - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - request = self._client._client.post( # pylint: disable=protected-access - url='https://{}/?comp=batch'.format(self.primary_hostname), - headers={ - 'x-ms-version': self.api_version - } - ) - - request.set_multipart_mixed( - *reqs, - policies=[ - StorageHeadersPolicy(), - self._credential_policy - ], - enforce_https=False - ) - - pipeline_response = await self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() # Return an AsyncIterator - if raise_on_any_failure: - parts_list = [] - async for part in parts: - parts_list.append(part) - if any(p for p in parts_list if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts_list - ) - raise error - return AsyncList(parts_list) - return parts - except HttpResponseError as error: - process_storage_error(error) - - -class AsyncTransportWrapper(AsyncHttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, async_transport): - self._transport = async_transport - - async def send(self, request, **kwargs): - return await self._transport.send(request, **kwargs) - - async def open(self): - pass - - async def close(self): - pass - - async def __aenter__(self): - pass - - async def __aexit__(self, *args): # pylint: disable=arguments-differ - pass diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/constants.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/constants.py deleted file mode 100644 index a50e8b5..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/constants.py +++ /dev/null @@ -1,27 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys - -from .._generated import AzureDataLakeStorageRESTAPI - - -X_MS_VERSION = AzureDataLakeStorageRESTAPI(url="get_api_version")._config.version # pylint: disable=protected-access - -# Socket timeout in seconds -CONNECTION_TIMEOUT = 20 -READ_TIMEOUT = 20 - -# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned) -# The socket timeout is now the maximum total duration to send all data. -if sys.version_info >= (3, 5): - # the timeout to connect is 20 seconds, and the read timeout is 2000 seconds - # the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) - READ_TIMEOUT = 2000 - -STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" - -SERVICE_HOST_BASE = 'core.windows.net' diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/encryption.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/encryption.py deleted file mode 100644 index 62607cc..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/encryption.py +++ /dev/null @@ -1,542 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os -from os import urandom -from json import ( - dumps, - loads, -) -from collections import OrderedDict - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.ciphers import Cipher -from cryptography.hazmat.primitives.ciphers.algorithms import AES -from cryptography.hazmat.primitives.ciphers.modes import CBC -from cryptography.hazmat.primitives.padding import PKCS7 - -from azure.core.exceptions import HttpResponseError - -from .._version import VERSION -from . import encode_base64, decode_base64_to_bytes - - -_ENCRYPTION_PROTOCOL_V1 = '1.0' -_ERROR_OBJECT_INVALID = \ - '{0} does not define a complete interface. Value of {1} is either missing or invalid.' - - -def _validate_not_none(param_name, param): - if param is None: - raise ValueError('{0} should not be None.'.format(param_name)) - - -def _validate_key_encryption_key_wrap(kek): - # Note that None is not callable and so will fail the second clause of each check. - if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) - if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) - - -class _EncryptionAlgorithm(object): - ''' - Specifies which client encryption algorithm is used. - ''' - AES_CBC_256 = 'AES_CBC_256' - - -class _WrappedContentKey: - ''' - Represents the envelope key details stored on the service. - ''' - - def __init__(self, algorithm, encrypted_key, key_id): - ''' - :param str algorithm: - The algorithm used for wrapping. - :param bytes encrypted_key: - The encrypted content-encryption-key. - :param str key_id: - The key-encryption-key identifier string. - ''' - - _validate_not_none('algorithm', algorithm) - _validate_not_none('encrypted_key', encrypted_key) - _validate_not_none('key_id', key_id) - - self.algorithm = algorithm - self.encrypted_key = encrypted_key - self.key_id = key_id - - -class _EncryptionAgent: - ''' - Represents the encryption agent stored on the service. - It consists of the encryption protocol version and encryption algorithm used. - ''' - - def __init__(self, encryption_algorithm, protocol): - ''' - :param _EncryptionAlgorithm encryption_algorithm: - The algorithm used for encrypting the message contents. - :param str protocol: - The protocol version used for encryption. - ''' - - _validate_not_none('encryption_algorithm', encryption_algorithm) - _validate_not_none('protocol', protocol) - - self.encryption_algorithm = str(encryption_algorithm) - self.protocol = protocol - - -class _EncryptionData: - ''' - Represents the encryption data that is stored on the service. - ''' - - def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, - key_wrapping_metadata): - ''' - :param bytes content_encryption_IV: - The content encryption initialization vector. - :param _EncryptionAgent encryption_agent: - The encryption agent. - :param _WrappedContentKey wrapped_content_key: - An object that stores the wrapping algorithm, the key identifier, - and the encrypted key bytes. - :param dict key_wrapping_metadata: - A dict containing metadata related to the key wrapping. - ''' - - _validate_not_none('content_encryption_IV', content_encryption_IV) - _validate_not_none('encryption_agent', encryption_agent) - _validate_not_none('wrapped_content_key', wrapped_content_key) - - self.content_encryption_IV = content_encryption_IV - self.encryption_agent = encryption_agent - self.wrapped_content_key = wrapped_content_key - self.key_wrapping_metadata = key_wrapping_metadata - - -def _generate_encryption_data_dict(kek, cek, iv): - ''' - Generates and returns the encryption metadata as a dict. - - :param object kek: The key encryption key. See calling functions for more information. - :param bytes cek: The content encryption key. - :param bytes iv: The initialization vector. - :return: A dict containing all the encryption metadata. - :rtype: dict - ''' - # Encrypt the cek. - wrapped_cek = kek.wrap_key(cek) - - # Build the encryption_data dict. - # Use OrderedDict to comply with Java's ordering requirement. - wrapped_content_key = OrderedDict() - wrapped_content_key['KeyId'] = kek.get_kid() - wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek) - wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() - - encryption_agent = OrderedDict() - encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 - encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 - - encryption_data_dict = OrderedDict() - encryption_data_dict['WrappedContentKey'] = wrapped_content_key - encryption_data_dict['EncryptionAgent'] = encryption_agent - encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv) - encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION} - - return encryption_data_dict - - -def _dict_to_encryption_data(encryption_data_dict): - ''' - Converts the specified dictionary to an EncryptionData object for - eventual use in decryption. - - :param dict encryption_data_dict: - The dictionary containing the encryption data. - :return: an _EncryptionData object built from the dictionary. - :rtype: _EncryptionData - ''' - try: - if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: - raise ValueError("Unsupported encryption version.") - except KeyError: - raise ValueError("Unsupported encryption version.") - wrapped_content_key = encryption_data_dict['WrappedContentKey'] - wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], - decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), - wrapped_content_key['KeyId']) - - encryption_agent = encryption_data_dict['EncryptionAgent'] - encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], - encryption_agent['Protocol']) - - if 'KeyWrappingMetadata' in encryption_data_dict: - key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] - else: - key_wrapping_metadata = None - - encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), - encryption_agent, - wrapped_content_key, - key_wrapping_metadata) - - return encryption_data - - -def _generate_AES_CBC_cipher(cek, iv): - ''' - Generates and returns an encryption cipher for AES CBC using the given cek and iv. - - :param bytes[] cek: The content encryption key for the cipher. - :param bytes[] iv: The initialization vector for the cipher. - :return: A cipher for encrypting in AES256 CBC. - :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher - ''' - - backend = default_backend() - algorithm = AES(cek) - mode = CBC(iv) - return Cipher(algorithm, mode, backend) - - -def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): - ''' - Extracts and returns the content_encryption_key stored in the encryption_data object - and performs necessary validation on all parameters. - :param _EncryptionData encryption_data: - The encryption metadata of the retrieved value. - :param obj key_encryption_key: - The key_encryption_key used to unwrap the cek. Please refer to high-level service object - instance variables for more details. - :param func key_resolver: - A function used that, given a key_id, will return a key_encryption_key. Please refer - to high-level service object instance variables for more details. - :return: the content_encryption_key stored in the encryption_data object. - :rtype: bytes[] - ''' - - _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) - _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) - - if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol: - raise ValueError('Encryption version is not supported.') - - content_encryption_key = None - - # If the resolver exists, give priority to the key it finds. - if key_resolver is not None: - key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) - - _validate_not_none('key_encryption_key', key_encryption_key) - if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) - if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid(): - raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.') - # Will throw an exception if the specified algorithm is not supported. - content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, - encryption_data.wrapped_content_key.algorithm) - _validate_not_none('content_encryption_key', content_encryption_key) - - return content_encryption_key - - -def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None): - ''' - Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. - Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). - Returns the original plaintex. - - :param str message: - The ciphertext to be decrypted. - :param _EncryptionData encryption_data: - The metadata associated with this ciphertext. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted plaintext. - :rtype: str - ''' - _validate_not_none('message', message) - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) - - if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm: - raise ValueError('Specified encryption algorithm is not supported.') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) - - # decrypt data - decrypted_data = message - decryptor = cipher.decryptor() - decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) - - # unpad data - unpadder = PKCS7(128).unpadder() - decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) - - return decrypted_data - - -def encrypt_blob(blob, key_encryption_key): - ''' - Encrypts the given blob using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encryption metadata. This method should - only be used when a blob is small enough for single shot upload. Encrypting larger blobs - is done as a part of the upload_data_chunks method. - - :param bytes blob: - The blob to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. - :rtype: (str, bytes) - ''' - - _validate_not_none('blob', blob) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(blob) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - - return dumps(encryption_data), encrypted_data - - -def generate_blob_encryption_data(key_encryption_key): - ''' - Generates the encryption_metadata for the blob. - - :param bytes key_encryption_key: - The key-encryption-key used to wrap the cek associate with this blob. - :return: A tuple containing the cek and iv for this blob as well as the - serialized encryption metadata for the blob. - :rtype: (bytes, bytes, str) - ''' - encryption_data = None - content_encryption_key = None - initialization_vector = None - if key_encryption_key: - _validate_key_encryption_key_wrap(key_encryption_key) - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - encryption_data = _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - encryption_data = dumps(encryption_data) - - return content_encryption_key, initialization_vector, encryption_data - - -def decrypt_blob(require_encryption, key_encryption_key, key_resolver, - content, start_offset, end_offset, response_headers): - ''' - Decrypts the given blob contents and returns only the requested range. - - :param bool require_encryption: - Whether or not the calling blob service requires objects to be decrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :param key_resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted blob content. - :rtype: bytes - ''' - try: - encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata'])) - except: # pylint: disable=bare-except - if require_encryption: - raise ValueError( - 'Encryption required, but received data does not contain appropriate metatadata.' + \ - 'Data was either not encrypted or metadata has been lost.') - - return content - - if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256: - raise ValueError('Specified encryption algorithm is not supported.') - - blob_type = response_headers['x-ms-blob-type'] - - iv = None - unpad = False - if 'content-range' in response_headers: - content_range = response_headers['content-range'] - # Format: 'bytes x-y/size' - - # Ignore the word 'bytes' - content_range = content_range.split(' ') - - content_range = content_range[1].split('-') - content_range = content_range[1].split('/') - end_range = int(content_range[0]) - blob_size = int(content_range[1]) - - if start_offset >= 16: - iv = content[:16] - content = content[16:] - start_offset -= 16 - else: - iv = encryption_data.content_encryption_IV - - if end_range == blob_size - 1: - unpad = True - else: - unpad = True - iv = encryption_data.content_encryption_IV - - if blob_type == 'PageBlob': - unpad = False - - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) - cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) - decryptor = cipher.decryptor() - - content = decryptor.update(content) + decryptor.finalize() - if unpad: - unpadder = PKCS7(128).unpadder() - content = unpadder.update(content) + unpadder.finalize() - - return content[start_offset: len(content) - end_offset] - - -def get_blob_encryptor_and_padder(cek, iv, should_pad): - encryptor = None - padder = None - - if cek is not None and iv is not None: - cipher = _generate_AES_CBC_cipher(cek, iv) - encryptor = cipher.encryptor() - padder = PKCS7(128).padder() if should_pad else None - - return encryptor, padder - - -def encrypt_queue_message(message, key_encryption_key): - ''' - Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encrypted message and the encryption metadata. - - :param object message: - The plain text messge to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A json-formatted string containing the encrypted message and the encryption metadata. - :rtype: str - ''' - - _validate_not_none('message', message) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = os.urandom(32) - initialization_vector = os.urandom(16) - - # Queue encoding functions all return unicode strings, and encryption should - # operate on binary strings. - message = message.encode('utf-8') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(message) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - - # Build the dictionary structure. - queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data), - 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector)} - - return dumps(queue_message) - - -def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver): - ''' - Returns the decrypted message contents from an EncryptedQueueMessage. - If no encryption metadata is present, will return the unaltered message. - :param str message: - The JSON formatted QueueEncryptedMessage contents with all associated metadata. - :param bool require_encryption: - If set, will enforce that the retrieved messages are encrypted and decrypt them. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The plain text message from the queue message. - :rtype: str - ''' - - try: - message = loads(message) - - encryption_data = _dict_to_encryption_data(message['EncryptionData']) - decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents']) - except (KeyError, ValueError): - # Message was not json formatted and so was not encrypted - # or the user provided a json formatted message. - if require_encryption: - raise ValueError('Message was not encrypted.') - - return message - try: - return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=response, - error=error) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/models.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/models.py deleted file mode 100644 index 0aeb96a..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/models.py +++ /dev/null @@ -1,468 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-instance-attributes - -from enum import Enum - - -def get_enum_value(value): - if value is None or value in ["None", ""]: - return None - try: - return value.value - except AttributeError: - return value - - -class StorageErrorCode(str, Enum): - - # Generic storage values - account_already_exists = "AccountAlreadyExists" - account_being_created = "AccountBeingCreated" - account_is_disabled = "AccountIsDisabled" - authentication_failed = "AuthenticationFailed" - authorization_failure = "AuthorizationFailure" - no_authentication_information = "NoAuthenticationInformation" - condition_headers_not_supported = "ConditionHeadersNotSupported" - condition_not_met = "ConditionNotMet" - empty_metadata_key = "EmptyMetadataKey" - insufficient_account_permissions = "InsufficientAccountPermissions" - internal_error = "InternalError" - invalid_authentication_info = "InvalidAuthenticationInfo" - invalid_header_value = "InvalidHeaderValue" - invalid_http_verb = "InvalidHttpVerb" - invalid_input = "InvalidInput" - invalid_md5 = "InvalidMd5" - invalid_metadata = "InvalidMetadata" - invalid_query_parameter_value = "InvalidQueryParameterValue" - invalid_range = "InvalidRange" - invalid_resource_name = "InvalidResourceName" - invalid_uri = "InvalidUri" - invalid_xml_document = "InvalidXmlDocument" - invalid_xml_node_value = "InvalidXmlNodeValue" - md5_mismatch = "Md5Mismatch" - metadata_too_large = "MetadataTooLarge" - missing_content_length_header = "MissingContentLengthHeader" - missing_required_query_parameter = "MissingRequiredQueryParameter" - missing_required_header = "MissingRequiredHeader" - missing_required_xml_node = "MissingRequiredXmlNode" - multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" - operation_timed_out = "OperationTimedOut" - out_of_range_input = "OutOfRangeInput" - out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" - request_body_too_large = "RequestBodyTooLarge" - resource_type_mismatch = "ResourceTypeMismatch" - request_url_failed_to_parse = "RequestUrlFailedToParse" - resource_already_exists = "ResourceAlreadyExists" - resource_not_found = "ResourceNotFound" - server_busy = "ServerBusy" - unsupported_header = "UnsupportedHeader" - unsupported_xml_node = "UnsupportedXmlNode" - unsupported_query_parameter = "UnsupportedQueryParameter" - unsupported_http_verb = "UnsupportedHttpVerb" - - # Blob values - append_position_condition_not_met = "AppendPositionConditionNotMet" - blob_already_exists = "BlobAlreadyExists" - blob_not_found = "BlobNotFound" - blob_overwritten = "BlobOverwritten" - blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" - block_count_exceeds_limit = "BlockCountExceedsLimit" - block_list_too_long = "BlockListTooLong" - cannot_change_to_lower_tier = "CannotChangeToLowerTier" - cannot_verify_copy_source = "CannotVerifyCopySource" - container_already_exists = "ContainerAlreadyExists" - container_being_deleted = "ContainerBeingDeleted" - container_disabled = "ContainerDisabled" - container_not_found = "ContainerNotFound" - content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" - copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" - copy_id_mismatch = "CopyIdMismatch" - feature_version_mismatch = "FeatureVersionMismatch" - incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" - incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" - incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" - infinite_lease_duration_required = "InfiniteLeaseDurationRequired" - invalid_blob_or_block = "InvalidBlobOrBlock" - invalid_blob_tier = "InvalidBlobTier" - invalid_blob_type = "InvalidBlobType" - invalid_block_id = "InvalidBlockId" - invalid_block_list = "InvalidBlockList" - invalid_operation = "InvalidOperation" - invalid_page_range = "InvalidPageRange" - invalid_source_blob_type = "InvalidSourceBlobType" - invalid_source_blob_url = "InvalidSourceBlobUrl" - invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" - lease_already_present = "LeaseAlreadyPresent" - lease_already_broken = "LeaseAlreadyBroken" - lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" - lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" - lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" - lease_id_missing = "LeaseIdMissing" - lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" - lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" - lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" - lease_lost = "LeaseLost" - lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" - lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" - lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" - max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" - no_pending_copy_operation = "NoPendingCopyOperation" - operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" - pending_copy_operation = "PendingCopyOperation" - previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" - previous_snapshot_not_found = "PreviousSnapshotNotFound" - previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" - sequence_number_condition_not_met = "SequenceNumberConditionNotMet" - sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" - snapshot_count_exceeded = "SnapshotCountExceeded" - snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" - snapshots_present = "SnapshotsPresent" - source_condition_not_met = "SourceConditionNotMet" - system_in_use = "SystemInUse" - target_condition_not_met = "TargetConditionNotMet" - unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" - blob_being_rehydrated = "BlobBeingRehydrated" - blob_archived = "BlobArchived" - blob_not_archived = "BlobNotArchived" - - # Queue values - invalid_marker = "InvalidMarker" - message_not_found = "MessageNotFound" - message_too_large = "MessageTooLarge" - pop_receipt_mismatch = "PopReceiptMismatch" - queue_already_exists = "QueueAlreadyExists" - queue_being_deleted = "QueueBeingDeleted" - queue_disabled = "QueueDisabled" - queue_not_empty = "QueueNotEmpty" - queue_not_found = "QueueNotFound" - - # File values - cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" - client_cache_flush_delay = "ClientCacheFlushDelay" - delete_pending = "DeletePending" - directory_not_empty = "DirectoryNotEmpty" - file_lock_conflict = "FileLockConflict" - invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" - parent_not_found = "ParentNotFound" - read_only_attribute = "ReadOnlyAttribute" - share_already_exists = "ShareAlreadyExists" - share_being_deleted = "ShareBeingDeleted" - share_disabled = "ShareDisabled" - share_not_found = "ShareNotFound" - sharing_violation = "SharingViolation" - share_snapshot_in_progress = "ShareSnapshotInProgress" - share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" - share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" - share_has_snapshots = "ShareHasSnapshots" - container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" - - # DataLake values - content_length_must_be_zero = 'ContentLengthMustBeZero' - path_already_exists = 'PathAlreadyExists' - invalid_flush_position = 'InvalidFlushPosition' - invalid_property_name = 'InvalidPropertyName' - invalid_source_uri = 'InvalidSourceUri' - unsupported_rest_version = 'UnsupportedRestVersion' - file_system_not_found = 'FilesystemNotFound' - path_not_found = 'PathNotFound' - rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound' - source_path_not_found = 'SourcePathNotFound' - destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted' - file_system_already_exists = 'FilesystemAlreadyExists' - file_system_being_deleted = 'FilesystemBeingDeleted' - invalid_destination_path = 'InvalidDestinationPath' - invalid_rename_source_path = 'InvalidRenameSourcePath' - invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType' - lease_is_already_broken = 'LeaseIsAlreadyBroken' - lease_name_mismatch = 'LeaseNameMismatch' - path_conflict = 'PathConflict' - source_path_is_being_deleted = 'SourcePathIsBeingDeleted' - - -class DictMixin(object): - - def __setitem__(self, key, item): - self.__dict__[key] = item - - def __getitem__(self, key): - return self.__dict__[key] - - def __repr__(self): - return str(self) - - def __len__(self): - return len(self.keys()) - - def __delitem__(self, key): - self.__dict__[key] = None - - def __eq__(self, other): - """Compare objects by comparing all attributes.""" - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - def __ne__(self, other): - """Compare objects by comparing all attributes.""" - return not self.__eq__(other) - - def __str__(self): - return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) - - def has_key(self, k): - return k in self.__dict__ - - def update(self, *args, **kwargs): - return self.__dict__.update(*args, **kwargs) - - def keys(self): - return [k for k in self.__dict__ if not k.startswith('_')] - - def values(self): - return [v for k, v in self.__dict__.items() if not k.startswith('_')] - - def items(self): - return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] - - def get(self, key, default=None): - if key in self.__dict__: - return self.__dict__[key] - return default - - -class LocationMode(object): - """ - Specifies the location the request should be sent to. This mode only applies - for RA-GRS accounts which allow secondary read access. All other account types - must use PRIMARY. - """ - - PRIMARY = 'primary' #: Requests should be sent to the primary location. - SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. - - -class ResourceTypes(object): - """ - Specifies the resource types that are accessible with the account SAS. - - :param bool service: - Access to service-level APIs (e.g., Get/Set Service Properties, - Get Service Stats, List Containers/Queues/Shares) - :param bool container: - Access to container-level APIs (e.g., Create/Delete Container, - Create/Delete Queue, Create/Delete Share, - List Blobs/Files and Directories) - :param bool object: - Access to object-level APIs for blobs, queue messages, and - files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) - """ - - def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin - self.service = service - self.container = container - self.object = object - self._str = (('s' if self.service else '') + - ('c' if self.container else '') + - ('o' if self.object else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create a ResourceTypes from a string. - - To specify service, container, or object you need only to - include the first letter of the word in the string. E.g. service and container, - you would provide a string "sc". - - :param str string: Specify service, container, or object in - in the string with the first letter of the word. - :return: A ResourceTypes object - :rtype: ~azure.storage.blob.ResourceTypes - """ - res_service = 's' in string - res_container = 'c' in string - res_object = 'o' in string - - parsed = cls(res_service, res_container, res_object) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class AccountSasPermissions(object): - """ - :class:`~ResourceTypes` class to be used with generate_account_sas - function and for the AccessPolicies used with set_*_acl. There are two types of - SAS which may be used to grant resource access. One is to grant access to a - specific resource (resource-specific). Another is to grant access to the - entire service for a specific account and allow certain operations based on - perms found here. - - :param bool read: - Valid for all signed resources types (Service, Container, and Object). - Permits read permissions to the specified resource type. - :param bool write: - Valid for all signed resources types (Service, Container, and Object). - Permits write permissions to the specified resource type. - :param bool delete: - Valid for Container and Object resource types, except for queue messages. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool list: - Valid for Service and Container resource types only. - :param bool add: - Valid for the following Object resource types only: queue messages, and append blobs. - :param bool create: - Valid for the following Object resource types only: blobs and files. - Users can create new blobs or files, but may not overwrite existing - blobs or files. - :param bool update: - Valid for the following Object resource types only: queue messages. - :param bool process: - Valid for the following Object resource type only: queue messages. - :keyword bool tag: - To enable set or get tags on the blobs in the container. - :keyword bool filter_by_tags: - To enable get blobs by tags, this should be used together with list permission. - """ - def __init__(self, read=False, write=False, delete=False, - list=False, # pylint: disable=redefined-builtin - add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): - self.read = read - self.write = write - self.delete = delete - self.delete_previous_version = delete_previous_version - self.list = list - self.add = add - self.create = create - self.update = update - self.process = process - self.tag = kwargs.pop('tag', False) - self.filter_by_tags = kwargs.pop('filter_by_tags', False) - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('l' if self.list else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('u' if self.update else '') + - ('p' if self.process else '') + - ('f' if self.filter_by_tags else '') + - ('t' if self.tag else '') - ) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create AccountSasPermissions from a string. - - To specify read, write, delete, etc. permissions you need only to - include the first letter of the word in the string. E.g. for read and write - permissions you would provide a string "rw". - - :param str permission: Specify permissions in - the string with the first letter of the word. - :return: An AccountSasPermissions object - :rtype: ~azure.storage.filedatalake.AccountSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_delete_previous_version = 'x' in permission - p_list = 'l' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_update = 'u' in permission - p_process = 'p' in permission - p_tag = 't' in permission - p_filter_by_tags = 'f' in permission - parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, - list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, - filter_by_tags=p_filter_by_tags) - - return parsed - - -class Services(object): - """Specifies the services accessible with the account SAS. - - :param bool blob: - Access for the `~azure.storage.blob.BlobServiceClient` - :param bool queue: - Access for the `~azure.storage.queue.QueueServiceClient` - :param bool fileshare: - Access for the `~azure.storage.fileshare.ShareServiceClient` - """ - - def __init__(self, blob=False, queue=False, fileshare=False): - self.blob = blob - self.queue = queue - self.fileshare = fileshare - self._str = (('b' if self.blob else '') + - ('q' if self.queue else '') + - ('f' if self.fileshare else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create Services from a string. - - To specify blob, queue, or file you need only to - include the first letter of the word in the string. E.g. for blob and queue - you would provide a string "bq". - - :param str string: Specify blob, queue, or file in - in the string with the first letter of the word. - :return: A Services object - :rtype: ~azure.storage.blob.Services - """ - res_blob = 'b' in string - res_queue = 'q' in string - res_file = 'f' in string - - parsed = cls(res_blob, res_queue, res_file) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class UserDelegationKey(object): - """ - Represents a user delegation key, provided to the user by Azure Storage - based on their Azure Active Directory access token. - - The fields are saved as simple strings since the user does not have to interact with this object; - to generate an identify SAS, the user can simply pass it to the right API. - - :ivar str signed_oid: - Object ID of this token. - :ivar str signed_tid: - Tenant ID of the tenant that issued this token. - :ivar str signed_start: - The datetime this token becomes valid. - :ivar str signed_expiry: - The datetime this token expires. - :ivar str signed_service: - What service this key is valid for. - :ivar str signed_version: - The version identifier of the REST service that created this token. - :ivar str value: - The user delegation key. - """ - def __init__(self): - self.signed_oid = None - self.signed_tid = None - self.signed_start = None - self.signed_expiry = None - self.signed_service = None - self.signed_version = None - self.value = None diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/parser.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/parser.py deleted file mode 100644 index c6feba8..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/parser.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys - -if sys.version_info < (3,): - def _str(value): - if isinstance(value, unicode): # pylint: disable=undefined-variable - return value.encode('utf-8') - - return str(value) -else: - _str = str - - -def _to_utc_datetime(value): - return value.strftime('%Y-%m-%dT%H:%M:%SZ') diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/policies.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/policies.py deleted file mode 100644 index c9bc798..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/policies.py +++ /dev/null @@ -1,610 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import re -import random -from time import time -from io import SEEK_SET, UnsupportedOperation -import logging -import uuid -import types -from typing import Any, TYPE_CHECKING -from wsgiref.handlers import format_date_time -try: - from urllib.parse import ( - urlparse, - parse_qsl, - urlunparse, - urlencode, - ) -except ImportError: - from urllib import urlencode # type: ignore - from urlparse import ( # type: ignore - urlparse, - parse_qsl, - urlunparse, - ) - -from azure.core.pipeline.policies import ( - HeadersPolicy, - SansIOHTTPPolicy, - NetworkTraceLoggingPolicy, - HTTPPolicy, - RequestHistory -) -from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError - -from .models import LocationMode - -try: - _unicode_type = unicode # type: ignore -except NameError: - _unicode_type = str - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -def encode_base64(data): - if isinstance(data, _unicode_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def is_exhausted(settings): - """Are we out of retries?""" - retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) - retry_counts = list(filter(None, retry_counts)) - if not retry_counts: - return False - return min(retry_counts) < 0 - - -def retry_hook(settings, **kwargs): - if settings['hook']: - settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) - - -def is_retry(response, mode): - """Is this method/status code retryable? (Based on whitelists and control - variables such as the number of total retries to allow, whether to - respect the Retry-After header, whether this header is present, and - whether the returned status code is on the list of status codes to - be retried upon on the presence of the aforementioned header) - """ - status = response.http_response.status_code - if 300 <= status < 500: - # An exception occured, but in most cases it was expected. Examples could - # include a 309 Conflict or 412 Precondition Failed. - if status == 404 and mode == LocationMode.SECONDARY: - # Response code 404 should be retried if secondary was used. - return True - if status == 408: - # Response code 408 is a timeout and should be retried. - return True - return False - if status >= 500: - # Response codes above 500 with the exception of 501 Not Implemented and - # 505 Version Not Supported indicate a server issue and should be retried. - if status in [501, 505]: - return False - return True - return False - - -def urljoin(base_url, stub_url): - parsed = urlparse(base_url) - parsed = parsed._replace(path=parsed.path + '/' + stub_url) - return parsed.geturl() - - -class QueueMessagePolicy(SansIOHTTPPolicy): - - def on_request(self, request): - message_id = request.context.options.pop('queue_message_id', None) - if message_id: - request.http_request.url = urljoin( - request.http_request.url, - message_id) - - -class StorageHeadersPolicy(HeadersPolicy): - request_id_header_name = 'x-ms-client-request-id' - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - super(StorageHeadersPolicy, self).on_request(request) - current_time = format_date_time(time()) - request.http_request.headers['x-ms-date'] = current_time - - custom_id = request.context.options.pop('client_request_id', None) - request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) - - # def on_response(self, request, response): - # # raise exception if the echoed client request id from the service is not identical to the one we sent - # if self.request_id_header_name in response.http_response.headers: - - # client_request_id = request.http_request.headers.get(self.request_id_header_name) - - # if response.http_response.headers[self.request_id_header_name] != client_request_id: - # raise AzureError( - # "Echoed client request ID: {} does not match sent client request ID: {}. " - # "Service request ID: {}".format( - # response.http_response.headers[self.request_id_header_name], client_request_id, - # response.http_response.headers['x-ms-request-id']), - # response=response.http_response - # ) - - -class StorageHosts(SansIOHTTPPolicy): - - def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument - self.hosts = hosts - super(StorageHosts, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - request.context.options['hosts'] = self.hosts - parsed_url = urlparse(request.http_request.url) - - # Detect what location mode we're currently requesting with - location_mode = LocationMode.PRIMARY - for key, value in self.hosts.items(): - if parsed_url.netloc == value: - location_mode = key - - # See if a specific location mode has been specified, and if so, redirect - use_location = request.context.options.pop('use_location', None) - if use_location: - # Lock retries to the specific location - request.context.options['retry_to_secondary'] = False - if use_location not in self.hosts: - raise ValueError("Attempting to use undefined host location {}".format(use_location)) - if use_location != location_mode: - # Update request URL to use the specified location - updated = parsed_url._replace(netloc=self.hosts[use_location]) - request.http_request.url = updated.geturl() - location_mode = use_location - - request.context.options['location_mode'] = location_mode - - -class StorageLoggingPolicy(NetworkTraceLoggingPolicy): - """A policy that logs HTTP request and response to the DEBUG logger. - - This accepts both global configuration, and per-request level with "enable_http_logger" - """ - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - http_request = request.http_request - options = request.context.options - if options.pop("logging_enable", self.enable_http_logger): - request.context["logging_enable"] = True - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - log_url = http_request.url - query_params = http_request.query - if 'sig' in query_params: - log_url = log_url.replace(query_params['sig'], "sig=*****") - _LOGGER.debug("Request URL: %r", log_url) - _LOGGER.debug("Request method: %r", http_request.method) - _LOGGER.debug("Request headers:") - for header, value in http_request.headers.items(): - if header.lower() == 'authorization': - value = '*****' - elif header.lower() == 'x-ms-copy-source' and 'sig' in value: - # take the url apart and scrub away the signed signature - scheme, netloc, path, params, query, fragment = urlparse(value) - parsed_qs = dict(parse_qsl(query)) - parsed_qs['sig'] = '*****' - - # the SAS needs to be put back together - value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) - - _LOGGER.debug(" %r: %r", header, value) - _LOGGER.debug("Request body:") - - # We don't want to log the binary data of a file upload. - if isinstance(http_request.body, types.GeneratorType): - _LOGGER.debug("File upload") - else: - _LOGGER.debug(str(http_request.body)) - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log request: %r", err) - - def on_response(self, request, response): - # type: (PipelineRequest, PipelineResponse, Any) -> None - if response.context.pop("logging_enable", self.enable_http_logger): - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - _LOGGER.debug("Response status: %r", response.http_response.status_code) - _LOGGER.debug("Response headers:") - for res_header, value in response.http_response.headers.items(): - _LOGGER.debug(" %r: %r", res_header, value) - - # We don't want to log binary data if the response is a file. - _LOGGER.debug("Response content:") - pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) - header = response.http_response.headers.get('content-disposition') - - if header and pattern.match(header): - filename = header.partition('=')[2] - _LOGGER.debug("File attachments: %s", filename) - elif response.http_response.headers.get("content-type", "").endswith("octet-stream"): - _LOGGER.debug("Body contains binary data.") - elif response.http_response.headers.get("content-type", "").startswith("image"): - _LOGGER.debug("Body contains image data.") - else: - if response.context.options.get('stream', False): - _LOGGER.debug("Body is streamable") - else: - _LOGGER.debug(response.http_response.text()) - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log response: %s", repr(err)) - - -class StorageRequestHook(SansIOHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._request_callback = kwargs.get('raw_request_hook') - super(StorageRequestHook, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, **Any) -> PipelineResponse - request_callback = request.context.options.pop('raw_request_hook', self._request_callback) - if request_callback: - request_callback(request) - - -class StorageResponseHook(HTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(StorageResponseHook, self).__init__() - - def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = self.next.send(request) - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - response_callback(response) - request.context['response_callback'] = response_callback - return response - - -class StorageContentValidation(SansIOHTTPPolicy): - """A simple policy that sends the given headers - with the request. - - This will overwrite any headers already defined in the request. - """ - header_name = 'Content-MD5' - - def __init__(self, **kwargs): # pylint: disable=unused-argument - super(StorageContentValidation, self).__init__() - - @staticmethod - def get_content_md5(data): - md5 = hashlib.md5() # nosec - if isinstance(data, bytes): - md5.update(data) - elif hasattr(data, 'read'): - pos = 0 - try: - pos = data.tell() - except: # pylint: disable=bare-except - pass - for chunk in iter(lambda: data.read(4096), b""): - md5.update(chunk) - try: - data.seek(pos, SEEK_SET) - except (AttributeError, IOError): - raise ValueError("Data should be bytes or a seekable file-like object.") - else: - raise ValueError("Data should be bytes or a seekable file-like object.") - - return md5.digest() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - validate_content = request.context.options.pop('validate_content', False) - if validate_content and request.http_request.method != 'GET': - computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) - request.http_request.headers[self.header_name] = computed_md5 - request.context['validate_content_md5'] = computed_md5 - request.context['validate_content'] = validate_content - - def on_response(self, request, response): - if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): - computed_md5 = request.context.get('validate_content_md5') or \ - encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) - if response.http_response.headers['content-md5'] != computed_md5: - raise AzureError( - 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format( - response.http_response.headers['content-md5'], computed_md5), - response=response.http_response - ) - - -class StorageRetryPolicy(HTTPPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - def __init__(self, **kwargs): - self.total_retries = kwargs.pop('retry_total', 10) - self.connect_retries = kwargs.pop('retry_connect', 3) - self.read_retries = kwargs.pop('retry_read', 3) - self.status_retries = kwargs.pop('retry_status', 3) - self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) - super(StorageRetryPolicy, self).__init__() - - def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use - """ - A function which sets the next host location on the request, if applicable. - - :param ~azure.storage.models.RetryContext context: - The retry context containing the previous host location and the request - to evaluate and possibly modify. - """ - if settings['hosts'] and all(settings['hosts'].values()): - url = urlparse(request.url) - # If there's more than one possible location, retry to the alternative - if settings['mode'] == LocationMode.PRIMARY: - settings['mode'] = LocationMode.SECONDARY - else: - settings['mode'] = LocationMode.PRIMARY - updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) - request.url = updated.geturl() - - def configure_retries(self, request): # pylint: disable=no-self-use - body_position = None - if hasattr(request.http_request.body, 'read'): - try: - body_position = request.http_request.body.tell() - except (AttributeError, UnsupportedOperation): - # if body position cannot be obtained, then retries will not work - pass - options = request.context.options - return { - 'total': options.pop("retry_total", self.total_retries), - 'connect': options.pop("retry_connect", self.connect_retries), - 'read': options.pop("retry_read", self.read_retries), - 'status': options.pop("retry_status", self.status_retries), - 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), - 'mode': options.pop("location_mode", LocationMode.PRIMARY), - 'hosts': options.pop("hosts", None), - 'hook': options.pop("retry_hook", None), - 'body_position': body_position, - 'count': 0, - 'history': [] - } - - def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use - """ Formula for computing the current backoff. - Should be calculated by child class. - - :rtype: float - """ - return 0 - - def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - transport.sleep(backoff) - - def increment(self, settings, request, response=None, error=None): - """Increment the retry counters. - - :param response: A pipeline response object. - :param error: An error encountered during the request, or - None if the response was received successfully. - - :return: Whether the retry attempts are exhausted. - """ - settings['total'] -= 1 - - if error and isinstance(error, ServiceRequestError): - # Errors when we're fairly sure that the server did not receive the - # request, so it should be safe to retry. - settings['connect'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - elif error and isinstance(error, ServiceResponseError): - # Errors that occur after the request has been started, so we should - # assume that the server began processing it. - settings['read'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - else: - # Incrementing because of a server error like a 500 in - # status_forcelist and a the given method is in the whitelist - if response: - settings['status'] -= 1 - settings['history'].append(RequestHistory(request, http_response=response)) - - if not is_exhausted(settings): - if request.method not in ['PUT'] and settings['retry_secondary']: - self._set_next_host_location(settings, request) - - # rewind the request body if it is a stream - if request.body and hasattr(request.body, 'read'): - # no position was saved, then retry would not work - if settings['body_position'] is None: - return False - try: - # attempt to rewind the body to the initial position - request.body.seek(settings['body_position'], SEEK_SET) - except (UnsupportedOperation, ValueError): - # if body is not seekable, then retry would not work - return False - settings['count'] += 1 - return True - return False - - def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(StorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(StorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/policies_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/policies_async.py deleted file mode 100644 index e0926b8..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/policies_async.py +++ /dev/null @@ -1,220 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -import asyncio -import random -import logging -from typing import Any, TYPE_CHECKING - -from azure.core.pipeline.policies import AsyncHTTPPolicy -from azure.core.exceptions import AzureError - -from .policies import is_retry, StorageRetryPolicy - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -async def retry_hook(settings, **kwargs): - if settings['hook']: - if asyncio.iscoroutine(settings['hook']): - await settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - else: - settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - - -class AsyncStorageResponseHook(AsyncHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(AsyncStorageResponseHook, self).__init__() - - async def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = await self.next.send(request) - await response.http_response.load_body() - - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - if asyncio.iscoroutine(response_callback): - await response_callback(response) - else: - response_callback(response) - request.context['response_callback'] = response_callback - return response - -class AsyncStorageRetryPolicy(StorageRetryPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - async def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - await transport.sleep(backoff) - - async def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = await self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - await self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - await self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(AsyncStorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(AsyncStorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/request_handlers.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/request_handlers.py deleted file mode 100644 index 37354d7..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/request_handlers.py +++ /dev/null @@ -1,273 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) - -import logging -from os import fstat -from io import (SEEK_END, SEEK_SET, UnsupportedOperation) - -import isodate - -from azure.core.exceptions import raise_with_traceback - - -_LOGGER = logging.getLogger(__name__) - -_REQUEST_DELIMITER_PREFIX = "batch_" -_HTTP1_1_IDENTIFIER = "HTTP/1.1" -_HTTP_LINE_ENDING = "\r\n" - - -def serialize_iso(attr): - """Serialize Datetime object into ISO-8601 formatted string. - - :param Datetime attr: Object to be serialized. - :rtype: str - :raises: ValueError if format invalid. - """ - if not attr: - return None - if isinstance(attr, str): - attr = isodate.parse_datetime(attr) - try: - utc = attr.utctimetuple() - if utc.tm_year > 9999 or utc.tm_year < 1: - raise OverflowError("Hit max or min date") - - date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( - utc.tm_year, utc.tm_mon, utc.tm_mday, - utc.tm_hour, utc.tm_min, utc.tm_sec) - return date + 'Z' - except (ValueError, OverflowError) as err: - msg = "Unable to serialize datetime object." - raise_with_traceback(ValueError, msg, err) - except AttributeError as err: - msg = "ISO-8601 object must be valid Datetime object." - raise_with_traceback(TypeError, msg, err) - - -def get_length(data): - length = None - # Check if object implements the __len__ method, covers most input cases such as bytearray. - try: - length = len(data) - except: # pylint: disable=bare-except - pass - - if not length: - # Check if the stream is a file-like stream object. - # If so, calculate the size using the file descriptor. - try: - fileno = data.fileno() - except (AttributeError, UnsupportedOperation): - pass - else: - try: - return fstat(fileno).st_size - except OSError: - # Not a valid fileno, may be possible requests returned - # a socket number? - pass - - # If the stream is seekable and tell() is implemented, calculate the stream size. - try: - current_position = data.tell() - data.seek(0, SEEK_END) - length = data.tell() - current_position - data.seek(current_position, SEEK_SET) - except (AttributeError, UnsupportedOperation): - pass - - return length - - -def read_length(data): - try: - if hasattr(data, 'read'): - read_data = b'' - for chunk in iter(lambda: data.read(4096), b""): - read_data += chunk - return len(read_data), read_data - if hasattr(data, '__iter__'): - read_data = b'' - for chunk in data: - read_data += chunk - return len(read_data), read_data - except: # pylint: disable=bare-except - pass - raise ValueError("Unable to calculate content length, please specify.") - - -def validate_and_format_range_headers( - start_range, end_range, start_range_required=True, - end_range_required=True, check_content_md5=False, align_to_page=False): - # If end range is provided, start range must be provided - if (start_range_required or end_range is not None) and start_range is None: - raise ValueError("start_range value cannot be None.") - if end_range_required and end_range is None: - raise ValueError("end_range value cannot be None.") - - # Page ranges must be 512 aligned - if align_to_page: - if start_range is not None and start_range % 512 != 0: - raise ValueError("Invalid page blob start_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(start_range)) - if end_range is not None and end_range % 512 != 511: - raise ValueError("Invalid page blob end_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(end_range)) - - # Format based on whether end_range is present - range_header = None - if end_range is not None: - range_header = 'bytes={0}-{1}'.format(start_range, end_range) - elif start_range is not None: - range_header = "bytes={0}-".format(start_range) - - # Content MD5 can only be provided for a complete range less than 4MB in size - range_validation = None - if check_content_md5: - if start_range is None or end_range is None: - raise ValueError("Both start and end range requied for MD5 content validation.") - if end_range - start_range > 4 * 1024 * 1024: - raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") - range_validation = 'true' - - return range_header, range_validation - - -def add_metadata_headers(metadata=None): - # type: (Optional[Dict[str, str]]) -> Dict[str, str] - headers = {} - if metadata: - for key, value in metadata.items(): - headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value - return headers - - -def serialize_batch_body(requests, batch_id): - """ - -- - - -- - (repeated as needed) - ---- - - Serializes the requests in this batch to a single HTTP mixed/multipart body. - - :param list[~azure.core.pipeline.transport.HttpRequest] requests: - a list of sub-request for the batch request - :param str batch_id: - to be embedded in batch sub-request delimiter - :return: The body bytes for this batch. - """ - - if requests is None or len(requests) == 0: - raise ValueError('Please provide sub-request(s) for this batch request') - - delimiter_bytes = (_get_batch_request_delimiter(batch_id, True, False) + _HTTP_LINE_ENDING).encode('utf-8') - newline_bytes = _HTTP_LINE_ENDING.encode('utf-8') - batch_body = list() - - content_index = 0 - for request in requests: - request.headers.update({ - "Content-ID": str(content_index), - "Content-Length": str(0) - }) - batch_body.append(delimiter_bytes) - batch_body.append(_make_body_from_sub_request(request)) - batch_body.append(newline_bytes) - content_index += 1 - - batch_body.append(_get_batch_request_delimiter(batch_id, True, True).encode('utf-8')) - # final line of body MUST have \r\n at the end, or it will not be properly read by the service - batch_body.append(newline_bytes) - - return bytes().join(batch_body) - - -def _get_batch_request_delimiter(batch_id, is_prepend_dashes=False, is_append_dashes=False): - """ - Gets the delimiter used for this batch request's mixed/multipart HTTP format. - - :param str batch_id: - Randomly generated id - :param bool is_prepend_dashes: - Whether to include the starting dashes. Used in the body, but non on defining the delimiter. - :param bool is_append_dashes: - Whether to include the ending dashes. Used in the body on the closing delimiter only. - :return: The delimiter, WITHOUT a trailing newline. - """ - - prepend_dashes = '--' if is_prepend_dashes else '' - append_dashes = '--' if is_append_dashes else '' - - return prepend_dashes + _REQUEST_DELIMITER_PREFIX + batch_id + append_dashes - - -def _make_body_from_sub_request(sub_request): - """ - Content-Type: application/http - Content-ID: - Content-Transfer-Encoding: (if present) - - HTTP/ -
:
(repeated as necessary) - Content-Length: - (newline if content length > 0) - (if content length > 0) - - Serializes an http request. - - :param ~azure.core.pipeline.transport.HttpRequest sub_request: - Request to serialize. - :return: The serialized sub-request in bytes - """ - - # put the sub-request's headers into a list for efficient str concatenation - sub_request_body = list() - - # get headers for ease of manipulation; remove headers as they are used - headers = sub_request.headers - - # append opening headers - sub_request_body.append("Content-Type: application/http") - sub_request_body.append(_HTTP_LINE_ENDING) - - sub_request_body.append("Content-ID: ") - sub_request_body.append(headers.pop("Content-ID", "")) - sub_request_body.append(_HTTP_LINE_ENDING) - - sub_request_body.append("Content-Transfer-Encoding: binary") - sub_request_body.append(_HTTP_LINE_ENDING) - - # append blank line - sub_request_body.append(_HTTP_LINE_ENDING) - - # append HTTP verb and path and query and HTTP version - sub_request_body.append(sub_request.method) - sub_request_body.append(' ') - sub_request_body.append(sub_request.url) - sub_request_body.append(' ') - sub_request_body.append(_HTTP1_1_IDENTIFIER) - sub_request_body.append(_HTTP_LINE_ENDING) - - # append remaining headers (this will set the Content-Length, as it was set on `sub-request`) - for header_name, header_value in headers.items(): - if header_value is not None: - sub_request_body.append(header_name) - sub_request_body.append(": ") - sub_request_body.append(header_value) - sub_request_body.append(_HTTP_LINE_ENDING) - - # append blank line - sub_request_body.append(_HTTP_LINE_ENDING) - - return ''.join(sub_request_body).encode() diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/response_handlers.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/response_handlers.py deleted file mode 100644 index 5df2f5c..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/response_handlers.py +++ /dev/null @@ -1,159 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging - -from azure.core.pipeline.policies import ContentDecodePolicy -from azure.core.exceptions import ( - HttpResponseError, - ResourceNotFoundError, - ResourceModifiedError, - ResourceExistsError, - ClientAuthenticationError, - DecodeError) - -from .parser import _to_utc_datetime -from .models import StorageErrorCode, UserDelegationKey, get_enum_value - - -if TYPE_CHECKING: - from datetime import datetime - from azure.core.exceptions import AzureError - - -_LOGGER = logging.getLogger(__name__) - - -class PartialBatchErrorException(HttpResponseError): - """There is a partial failure in batch operations. - - :param str message: The message of the exception. - :param response: Server response to be deserialized. - :param list parts: A list of the parts in multipart response. - """ - - def __init__(self, message, response, parts): - self.parts = parts - super(PartialBatchErrorException, self).__init__(message=message, response=response) - - -def parse_length_from_content_range(content_range): - ''' - Parses the blob length from the content range header: bytes 1-3/65537 - ''' - if content_range is None: - return None - - # First, split in space and take the second half: '1-3/65537' - # Next, split on slash and take the second half: '65537' - # Finally, convert to an int: 65537 - return int(content_range.split(' ', 1)[1].split('/', 1)[1]) - - -def normalize_headers(headers): - normalized = {} - for key, value in headers.items(): - if key.startswith('x-ms-'): - key = key[5:] - normalized[key.lower().replace('-', '_')] = get_enum_value(value) - return normalized - - -def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument - raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} - return {k[10:]: v for k, v in raw_metadata.items()} - - -def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers) - - -def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers), deserialized - - -def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return response.location_mode, deserialized - - -def process_storage_error(storage_error): - raise_error = HttpResponseError - error_code = storage_error.response.headers.get('x-ms-error-code') - error_message = storage_error.message - additional_data = {} - try: - error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) - if error_body: - for info in error_body.iter(): - if info.tag.lower() == 'code': - error_code = info.text - elif info.tag.lower() == 'message': - error_message = info.text - else: - additional_data[info.tag] = info.text - except DecodeError: - pass - - try: - if error_code: - error_code = StorageErrorCode(error_code) - if error_code in [StorageErrorCode.condition_not_met, - StorageErrorCode.blob_overwritten]: - raise_error = ResourceModifiedError - if error_code in [StorageErrorCode.invalid_authentication_info, - StorageErrorCode.authentication_failed]: - raise_error = ClientAuthenticationError - if error_code in [StorageErrorCode.resource_not_found, - StorageErrorCode.cannot_verify_copy_source, - StorageErrorCode.blob_not_found, - StorageErrorCode.queue_not_found, - StorageErrorCode.container_not_found, - StorageErrorCode.parent_not_found, - StorageErrorCode.share_not_found]: - raise_error = ResourceNotFoundError - if error_code in [StorageErrorCode.account_already_exists, - StorageErrorCode.account_being_created, - StorageErrorCode.resource_already_exists, - StorageErrorCode.resource_type_mismatch, - StorageErrorCode.blob_already_exists, - StorageErrorCode.queue_already_exists, - StorageErrorCode.container_already_exists, - StorageErrorCode.container_being_deleted, - StorageErrorCode.queue_being_deleted, - StorageErrorCode.share_already_exists, - StorageErrorCode.share_being_deleted]: - raise_error = ResourceExistsError - except ValueError: - # Got an unknown error code - pass - - try: - error_message += "\nErrorCode:{}".format(error_code.value) - except AttributeError: - error_message += "\nErrorCode:{}".format(error_code) - for name, info in additional_data.items(): - error_message += "\n{}:{}".format(name, info) - - error = raise_error(message=error_message, response=storage_error.response) - error.error_code = error_code - error.additional_info = additional_data - error.raise_with_traceback() - - -def parse_to_internal_user_delegation_key(service_user_delegation_key): - internal_user_delegation_key = UserDelegationKey() - internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid - internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid - internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) - internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) - internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service - internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version - internal_user_delegation_key.value = service_user_delegation_key.value - return internal_user_delegation_key diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/shared_access_signature.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/shared_access_signature.py deleted file mode 100644 index 07aad5f..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/shared_access_signature.py +++ /dev/null @@ -1,220 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from datetime import date - -from .parser import _str, _to_utc_datetime -from .constants import X_MS_VERSION -from . import sign_string, url_quote - - -class QueryStringConstants(object): - SIGNED_SIGNATURE = 'sig' - SIGNED_PERMISSION = 'sp' - SIGNED_START = 'st' - SIGNED_EXPIRY = 'se' - SIGNED_RESOURCE = 'sr' - SIGNED_IDENTIFIER = 'si' - SIGNED_IP = 'sip' - SIGNED_PROTOCOL = 'spr' - SIGNED_VERSION = 'sv' - SIGNED_CACHE_CONTROL = 'rscc' - SIGNED_CONTENT_DISPOSITION = 'rscd' - SIGNED_CONTENT_ENCODING = 'rsce' - SIGNED_CONTENT_LANGUAGE = 'rscl' - SIGNED_CONTENT_TYPE = 'rsct' - START_PK = 'spk' - START_RK = 'srk' - END_PK = 'epk' - END_RK = 'erk' - SIGNED_RESOURCE_TYPES = 'srt' - SIGNED_SERVICES = 'ss' - SIGNED_OID = 'skoid' - SIGNED_TID = 'sktid' - SIGNED_KEY_START = 'skt' - SIGNED_KEY_EXPIRY = 'ske' - SIGNED_KEY_SERVICE = 'sks' - SIGNED_KEY_VERSION = 'skv' - - # for ADLS - SIGNED_AUTHORIZED_OID = 'saoid' - SIGNED_UNAUTHORIZED_OID = 'suoid' - SIGNED_CORRELATION_ID = 'scid' - SIGNED_DIRECTORY_DEPTH = 'sdd' - - @staticmethod - def to_list(): - return [ - QueryStringConstants.SIGNED_SIGNATURE, - QueryStringConstants.SIGNED_PERMISSION, - QueryStringConstants.SIGNED_START, - QueryStringConstants.SIGNED_EXPIRY, - QueryStringConstants.SIGNED_RESOURCE, - QueryStringConstants.SIGNED_IDENTIFIER, - QueryStringConstants.SIGNED_IP, - QueryStringConstants.SIGNED_PROTOCOL, - QueryStringConstants.SIGNED_VERSION, - QueryStringConstants.SIGNED_CACHE_CONTROL, - QueryStringConstants.SIGNED_CONTENT_DISPOSITION, - QueryStringConstants.SIGNED_CONTENT_ENCODING, - QueryStringConstants.SIGNED_CONTENT_LANGUAGE, - QueryStringConstants.SIGNED_CONTENT_TYPE, - QueryStringConstants.START_PK, - QueryStringConstants.START_RK, - QueryStringConstants.END_PK, - QueryStringConstants.END_RK, - QueryStringConstants.SIGNED_RESOURCE_TYPES, - QueryStringConstants.SIGNED_SERVICES, - QueryStringConstants.SIGNED_OID, - QueryStringConstants.SIGNED_TID, - QueryStringConstants.SIGNED_KEY_START, - QueryStringConstants.SIGNED_KEY_EXPIRY, - QueryStringConstants.SIGNED_KEY_SERVICE, - QueryStringConstants.SIGNED_KEY_VERSION, - # for ADLS - QueryStringConstants.SIGNED_AUTHORIZED_OID, - QueryStringConstants.SIGNED_UNAUTHORIZED_OID, - QueryStringConstants.SIGNED_CORRELATION_ID, - QueryStringConstants.SIGNED_DIRECTORY_DEPTH, - ] - - -class SharedAccessSignature(object): - ''' - Provides a factory for creating account access - signature tokens with an account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - :param str x_ms_version: - The service version used to generate the shared access signatures. - ''' - self.account_name = account_name - self.account_key = account_key - self.x_ms_version = x_ms_version - - def generate_account(self, services, resource_types, permission, expiry, start=None, - ip=None, protocol=None): - ''' - Generates a shared access signature for the account. - Use the returned signature with the sas_token parameter of the service - or to create a new account object. - - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account - SAS. You can combine values to provide access to more than one - resource type. - :param AccountSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. You can combine - values to provide more than one permission. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_account(services, resource_types) - sas.add_account_signature(self.account_name, self.account_key) - - return sas.get_token() - - -class _SharedAccessHelper(object): - def __init__(self): - self.query_dict = {} - - def _add_query(self, name, val): - if val: - self.query_dict[name] = _str(val) if val is not None else None - - def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): - if isinstance(start, date): - start = _to_utc_datetime(start) - - if isinstance(expiry, date): - expiry = _to_utc_datetime(expiry) - - self._add_query(QueryStringConstants.SIGNED_START, start) - self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) - self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) - self._add_query(QueryStringConstants.SIGNED_IP, ip) - self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) - self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) - - def add_resource(self, resource): - self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) - - def add_id(self, policy_id): - self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) - - def add_account(self, services, resource_types): - self._add_query(QueryStringConstants.SIGNED_SERVICES, services) - self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) - - def add_override_response_headers(self, cache_control, - content_disposition, - content_encoding, - content_language, - content_type): - self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) - self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) - self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) - self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) - self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) - - def add_account_signature(self, account_name, account_key): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - string_to_sign = \ - (account_name + '\n' + - get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + - get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + - get_value_to_append(QueryStringConstants.SIGNED_START) + - get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - get_value_to_append(QueryStringConstants.SIGNED_IP) + - get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(QueryStringConstants.SIGNED_VERSION)) - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key, string_to_sign)) - - def get_token(self): - return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/uploads.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/uploads.py deleted file mode 100644 index 1b619df..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/uploads.py +++ /dev/null @@ -1,602 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from concurrent import futures -from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) -from threading import Lock -from itertools import islice -from math import ceil - -import six - -from azure.core.tracing.common import with_current_context - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." - - -def _parallel_uploads(executor, uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(executor.submit(with_current_context(uploader), next_chunk)) - except StopIteration: - break - - # Wait for the remaining uploads to finish - done, _running = futures.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - validate_content=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - validate_content=validate_content, - **kwargs) - if parallel: - with futures.ThreadPoolExecutor(max_concurrency) as executor: - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - executor.submit(with_current_context(uploader.process_chunk), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - with futures.ThreadPoolExecutor(max_concurrency) as executor: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - executor.submit(with_current_context(uploader.process_substream_block), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] - if any(range_ids): - return sorted(range_ids) - return [] - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b"" - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError("Blob data should be of type bytes.") - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b"" or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - def _update_progress(self, length): - if self.progress_lock is not None: - with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = self._upload_chunk(chunk_offset, chunk_data) - self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield index, SubStream(self.stream, index, length, lock) - - def process_substream_block(self, block_data): - return self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - def _upload_substream_block(self, index, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_substream_block_with_progress(self, index, block_stream): - range_id = self._upload_substream_block(index, block_stream) - self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop("modified_access_conditions", None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - self.service.stage_block( - block_id, - len(chunk_data), - chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return index, block_id - - def _upload_substream_block(self, index, block_stream): - try: - block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) - self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - return not any(bytearray(chunk_data)) - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = self.service.upload_pages( - body=chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - def _upload_substream_block(self, index, block_stream): - pass - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - self.current_length = int(self.response_headers["blob_append_offset"]) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - def _upload_substream_block(self, index, block_stream): - pass - - -class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - self.response_headers = self.service.append_data( - body=chunk_data, - position=chunk_offset, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - def _upload_substream_block(self, index, block_stream): - try: - self.service.append_data( - body=block_stream, - position=index, - content_length=len(block_stream), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response - - # TODO: Implement this method. - def _upload_substream_block(self, index, block_stream): - pass - - -class SubStream(IOBase): - - def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): - # Python 2.7: file-like objects created with open() typically support seek(), but are not - # derivations of io.IOBase and thus do not implement seekable(). - # Python > 3.0: file-like objects created with open() are derived from io.IOBase. - try: - # only the main thread runs this, so there's no need grabbing the lock - wrapped_stream.seek(0, SEEK_CUR) - except: - raise ValueError("Wrapped stream must support seek().") - - self._lock = lockObj - self._wrapped_stream = wrapped_stream - self._position = 0 - self._stream_begin_index = stream_begin_index - self._length = length - self._buffer = BytesIO() - - # we must avoid buffering more than necessary, and also not use up too much memory - # so the max buffer size is capped at 4MB - self._max_buffer_size = ( - length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE - ) - self._current_buffer_start = 0 - self._current_buffer_size = 0 - super(SubStream, self).__init__() - - def __len__(self): - return self._length - - def close(self): - if self._buffer: - self._buffer.close() - self._wrapped_stream = None - IOBase.close(self) - - def fileno(self): - return self._wrapped_stream.fileno() - - def flush(self): - pass - - def read(self, size=None): - if self.closed: # pylint: disable=using-constant-test - raise ValueError("Stream is closed.") - - if size is None: - size = self._length - self._position - - # adjust if out of bounds - if size + self._position >= self._length: - size = self._length - self._position - - # return fast - if size == 0 or self._buffer.closed: - return b"" - - # attempt first read from the read buffer and update position - read_buffer = self._buffer.read(size) - bytes_read = len(read_buffer) - bytes_remaining = size - bytes_read - self._position += bytes_read - - # repopulate the read buffer from the underlying stream to fulfill the request - # ensure the seek and read operations are done atomically (only if a lock is provided) - if bytes_remaining > 0: - with self._buffer: - # either read in the max buffer size specified on the class - # or read in just enough data for the current block/sub stream - current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) - - # lock is only defined if max_concurrency > 1 (parallel uploads) - if self._lock: - with self._lock: - # reposition the underlying stream to match the start of the data to read - absolute_position = self._stream_begin_index + self._position - self._wrapped_stream.seek(absolute_position, SEEK_SET) - # If we can't seek to the right location, our read will be corrupted so fail fast. - if self._wrapped_stream.tell() != absolute_position: - raise IOError("Stream failed to seek to the desired location.") - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - else: - absolute_position = self._stream_begin_index + self._position - # It's possible that there's connection problem during data transfer, - # so when we retry we don't want to read from current position of wrapped stream, - # instead we should seek to where we want to read from. - if self._wrapped_stream.tell() != absolute_position: - self._wrapped_stream.seek(absolute_position, SEEK_SET) - - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - - if buffer_from_stream: - # update the buffer with new data from the wrapped stream - # we need to note down the start position and size of the buffer, in case seek is performed later - self._buffer = BytesIO(buffer_from_stream) - self._current_buffer_start = self._position - self._current_buffer_size = len(buffer_from_stream) - - # read the remaining bytes from the new buffer and update position - second_read_buffer = self._buffer.read(bytes_remaining) - read_buffer += second_read_buffer - self._position += len(second_read_buffer) - - return read_buffer - - def readable(self): - return True - - def readinto(self, b): - raise UnsupportedOperation - - def seek(self, offset, whence=0): - if whence is SEEK_SET: - start_index = 0 - elif whence is SEEK_CUR: - start_index = self._position - elif whence is SEEK_END: - start_index = self._length - offset = -offset - else: - raise ValueError("Invalid argument for the 'whence' parameter.") - - pos = start_index + offset - - if pos > self._length: - pos = self._length - elif pos < 0: - pos = 0 - - # check if buffer is still valid - # if not, drop buffer - if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: - self._buffer.close() - self._buffer = BytesIO() - else: # if yes seek to correct position - delta = pos - self._current_buffer_start - self._buffer.seek(delta, SEEK_SET) - - self._position = pos - return pos - - def seekable(self): - return True - - def tell(self): - return self._position - - def write(self): - raise UnsupportedOperation - - def writelines(self): - raise UnsupportedOperation - - def writeable(self): - return False - - -class IterStreamer(object): - """ - File-like streaming iterator. - """ - - def __init__(self, generator, encoding="UTF-8"): - self.generator = generator - self.iterator = iter(generator) - self.leftover = b"" - self.encoding = encoding - - def __len__(self): - return self.generator.__len__() - - def __iter__(self): - return self.iterator - - def seekable(self): - return False - - def __next__(self): - return next(self.iterator) - - next = __next__ # Python 2 compatibility. - - def tell(self, *args, **kwargs): - raise UnsupportedOperation("Data generator does not support tell.") - - def seek(self, *args, **kwargs): - raise UnsupportedOperation("Data generator is unseekable.") - - def read(self, size): - data = self.leftover - count = len(self.leftover) - try: - while count < size: - chunk = self.__next__() - if isinstance(chunk, six.text_type): - chunk = chunk.encode(self.encoding) - data += chunk - count += len(chunk) - except StopIteration: - pass - - if count > size: - self.leftover = data[size:] - - return data[:size] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/uploads_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/uploads_async.py deleted file mode 100644 index 5ed192b..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/uploads_async.py +++ /dev/null @@ -1,395 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -import asyncio -from asyncio import Lock -from itertools import islice -import threading - -from math import ceil - -import six - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder -from .uploads import SubStream, IterStreamer # pylint: disable=unused-import - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' - - -async def _parallel_uploads(uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(asyncio.ensure_future(uploader(next_chunk))) - except StopIteration: - break - - # Wait for the remaining uploads to finish - if running: - done, _running = await asyncio.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -async def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - asyncio.ensure_future(uploader.process_chunk(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [] - for chunk in uploader.get_chunk_streams(): - range_ids.append(await uploader.process_chunk(chunk)) - - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -async def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - asyncio.ensure_future(uploader.process_substream_block(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [] - for block in uploader.get_substream_blocks(): - range_ids.append(await uploader.process_substream_block(block)) - if any(range_ids): - return sorted(range_ids) - return - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = threading.Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b'' - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b'' or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - async def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - async def _update_progress(self, length): - if self.progress_lock is not None: - async with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - async def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = await self._upload_chunk(chunk_offset, chunk_data) - await self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield index, SubStream(self.stream, index, length, lock) - - async def process_substream_block(self, block_data): - return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - async def _upload_substream_block(self, index, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_substream_block_with_progress(self, index, block_stream): - range_id = await self._upload_substream_block(index, block_stream) - await self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop('modified_access_conditions', None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - await self.service.stage_block( - block_id, - len(chunk_data), - body=chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - return index, block_id - - async def _upload_substream_block(self, index, block_stream): - try: - block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) - await self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - for each_byte in chunk_data: - if each_byte not in [0, b'\x00']: - return False - return True - - async def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = await self.service.upload_pages( - body=chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - async def _upload_substream_block(self, index, block_stream): - pass - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = await self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - self.current_length = int(self.response_headers['blob_append_offset']) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = await self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - async def _upload_substream_block(self, index, block_stream): - pass - - -class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - self.response_headers = await self.service.append_data( - body=chunk_data, - position=chunk_offset, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - async def _upload_substream_block(self, index, block_stream): - try: - await self.service.append_data( - body=block_stream, - position=index, - content_length=len(block_stream), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = await self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - return range_id, response - - # TODO: Implement this method. - async def _upload_substream_block(self, index, block_stream): - pass diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared_access_signature.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared_access_signature.py deleted file mode 100644 index 0808155..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared_access_signature.py +++ /dev/null @@ -1,391 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import TYPE_CHECKING - -from azure.multiapi.storagev2.blob.v2020_02_10 import generate_account_sas as generate_blob_account_sas -from azure.multiapi.storagev2.blob.v2020_02_10 import generate_container_sas, generate_blob_sas -if TYPE_CHECKING: - import datetime - from ._models import AccountSasPermissions, FileSystemSasPermissions, FileSasPermissions, ResourceTypes, \ - UserDelegationKey - - -def generate_account_sas( - account_name, # type: str - account_key, # type: str - resource_types, # type: Union[ResourceTypes, str] - permission, # type: Union[AccountSasPermissions, str] - expiry, # type: Optional[Union[datetime, str]] - **kwargs # type: Any - ): # type: (...) -> str - """Generates a shared access signature for the DataLake service. - - Use the returned signature as the credential parameter of any DataLakeServiceClient, - FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str account_key: - The access key to generate the shared access signature. - :param resource_types: - Specifies the resource types that are accessible with the account SAS. - :type resource_types: str or ~azure.storage.filedatalake.ResourceTypes - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.filedatalake.AccountSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :keyword start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :paramtype start: ~datetime.datetime or str - :keyword str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - return generate_blob_account_sas( - account_name=account_name, - account_key=account_key, - resource_types=resource_types, - permission=permission, - expiry=expiry, - **kwargs - ) - - -def generate_file_system_sas( - account_name, # type: str - file_system_name, # type: str - credential, # type: Union[str, UserDelegationKey] - permission=None, # type: Optional[Union[FileSystemSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - **kwargs # type: Any - ): - # type: (...) -> str - """Generates a shared access signature for a file system. - - Use the returned signature with the credential parameter of any DataLakeServiceClient, - FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str file_system_name: - The name of the file system. - :param str credential: - Credential could be either account key or user delegation key. - If use account key is used as credential, then the credential type should be a str. - Instead of an account key, the user could also pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished - by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :type credential: str or ~azure.storage.filedatalake.UserDelegationKey - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.filedatalake.FileSystemSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :keyword start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :paramtype start: datetime or str - :keyword str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :keyword str preauthorized_agent_object_id: - The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform - the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the - user delegation key has the required permissions before granting access but no additional permission check for - the agent object id will be performed. - :keyword str agent_object_id: - The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to - perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner - of the user delegation key has the required permissions before granting access and the service will perform an - additional POSIX ACL check to determine if this user is authorized to perform the requested operation. - :keyword str correlation_id: - The correlation id to correlate the storage audit logs with the audit logs used by the principal - generating and distributing the SAS. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - return generate_container_sas( - account_name=account_name, - container_name=file_system_name, - account_key=credential if isinstance(credential, str) else None, - user_delegation_key=credential if not isinstance(credential, str) else None, - permission=permission, - expiry=expiry, - **kwargs) - - -def generate_directory_sas( - account_name, # type: str - file_system_name, # type: str - directory_name, # type: str - credential, # type: Union[str, UserDelegationKey] - permission=None, # type: Optional[Union[FileSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - **kwargs # type: Any - ): - # type: (...) -> str - """Generates a shared access signature for a directory. - - Use the returned signature with the credential parameter of any DataLakeServiceClient, - FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str file_system_name: - The name of the file system. - :param str directory_name: - The name of the directory. - :param str credential: - Credential could be either account key or user delegation key. - If use account key is used as credential, then the credential type should be a str. - Instead of an account key, the user could also pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished - by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :type credential: str or ~azure.storage.filedatalake.UserDelegationKey - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.filedatalake.FileSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :keyword start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :paramtype start: ~datetime.datetime or str - :keyword str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :keyword str preauthorized_agent_object_id: - The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform - the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the - user delegation key has the required permissions before granting access but no additional permission check for - the agent object id will be performed. - :keyword str agent_object_id: - The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to - perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner - of the user delegation key has the required permissions before granting access and the service will perform an - additional POSIX ACL check to determine if this user is authorized to perform the requested operation. - :keyword str correlation_id: - The correlation id to correlate the storage audit logs with the audit logs used by the principal - generating and distributing the SAS. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - depth = len(directory_name.strip("/").split("/")) - return generate_blob_sas( - account_name=account_name, - container_name=file_system_name, - blob_name=directory_name, - account_key=credential if isinstance(credential, str) else None, - user_delegation_key=credential if not isinstance(credential, str) else None, - permission=permission, - expiry=expiry, - sdd=depth, - is_directory=True, - **kwargs) - - -def generate_file_sas( - account_name, # type: str - file_system_name, # type: str - directory_name, # type: str - file_name, # type: str - credential, # type: Union[str, UserDelegationKey] - permission=None, # type: Optional[Union[FileSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - **kwargs # type: Any - ): - # type: (...) -> str - """Generates a shared access signature for a file. - - Use the returned signature with the credential parameter of any BDataLakeServiceClient, - FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str file_system_name: - The name of the file system. - :param str directory_name: - The name of the directory. - :param str file_name: - The name of the file. - :param str credential: - Credential could be either account key or user delegation key. - If use account key is used as credential, then the credential type should be a str. - Instead of an account key, the user could also pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished - by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :type credential: str or ~azure.storage.filedatalake.UserDelegationKey - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.filedatalake.FileSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :keyword start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :paramtype start: ~datetime.datetime or str - :keyword str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :keyword str preauthorized_agent_object_id: - The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform - the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the - user delegation key has the required permissions before granting access but no additional permission check for - the agent object id will be performed. - :keyword str agent_object_id: - The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to - perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner - of the user delegation key has the required permissions before granting access and the service will perform an - additional POSIX ACL check to determine if this user is authorized to perform the requested operation. - :keyword str correlation_id: - The correlation id to correlate the storage audit logs with the audit logs used by the principal - generating and distributing the SAS. This can only be used when to generate sas with delegation key. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - if directory_name: - path = directory_name.rstrip('/') + "/" + file_name - else: - path = file_name - return generate_blob_sas( - account_name=account_name, - container_name=file_system_name, - blob_name=path, - account_key=credential if isinstance(credential, str) else None, - user_delegation_key=credential if not isinstance(credential, str) else None, - permission=permission, - expiry=expiry, - **kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_upload_helper.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_upload_helper.py deleted file mode 100644 index 6d88c32..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_upload_helper.py +++ /dev/null @@ -1,104 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from ._deserialize import ( - process_storage_error) -from ._shared.response_handlers import return_response_headers -from ._shared.uploads import ( - upload_data_chunks, - DataLakeFileChunkUploader, upload_substream_blocks) -from azure.core.exceptions import HttpResponseError - - -def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument - return any([ - modified_access_conditions.if_modified_since, - modified_access_conditions.if_unmodified_since, - modified_access_conditions.if_none_match, - modified_access_conditions.if_match - ]) - - -def upload_datalake_file( # pylint: disable=unused-argument - client=None, - stream=None, - length=None, - overwrite=None, - validate_content=None, - max_concurrency=None, - file_settings=None, - **kwargs): - try: - if length == 0: - return {} - properties = kwargs.pop('properties', None) - umask = kwargs.pop('umask', None) - permissions = kwargs.pop('permissions', None) - path_http_headers = kwargs.pop('path_http_headers', None) - modified_access_conditions = kwargs.pop('modified_access_conditions', None) - chunk_size = kwargs.pop('chunk_size', 100 * 1024 * 1024) - - if not overwrite: - # if customers didn't specify access conditions, they cannot flush data to existing file - if not _any_conditions(modified_access_conditions): - modified_access_conditions.if_none_match = '*' - if properties or umask or permissions: - raise ValueError("metadata, umask and permissions can be set only when overwrite is enabled") - - if overwrite: - response = client.create( - resource='file', - path_http_headers=path_http_headers, - properties=properties, - modified_access_conditions=modified_access_conditions, - umask=umask, - permissions=permissions, - cls=return_response_headers, - **kwargs) - - # this modified_access_conditions will be applied to flush_data to make sure - # no other flush between create and the current flush - modified_access_conditions.if_match = response['etag'] - modified_access_conditions.if_none_match = None - modified_access_conditions.if_modified_since = None - modified_access_conditions.if_unmodified_since = None - - use_original_upload_path = file_settings.use_byte_buffer or \ - validate_content or chunk_size < file_settings.min_large_chunk_upload_threshold or \ - hasattr(stream, 'seekable') and not stream.seekable() or \ - not hasattr(stream, 'seek') or not hasattr(stream, 'tell') - - if use_original_upload_path: - upload_data_chunks( - service=client, - uploader_class=DataLakeFileChunkUploader, - total_size=length, - chunk_size=chunk_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - **kwargs) - else: - upload_substream_blocks( - service=client, - uploader_class=DataLakeFileChunkUploader, - total_size=length, - chunk_size=chunk_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - **kwargs - ) - - return client.flush_data(position=length, - path_http_headers=path_http_headers, - modified_access_conditions=modified_access_conditions, - close=True, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_version.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_version.py deleted file mode 100644 index bf23efa..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_version.py +++ /dev/null @@ -1,7 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -VERSION = "12.3.1" diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/__init__.py deleted file mode 100644 index c24dde8..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ._download_async import StorageStreamDownloader -from .._shared.policies_async import ExponentialRetry, LinearRetry -from ._data_lake_file_client_async import DataLakeFileClient -from ._data_lake_directory_client_async import DataLakeDirectoryClient -from ._file_system_client_async import FileSystemClient -from ._data_lake_service_client_async import DataLakeServiceClient -from ._data_lake_lease_async import DataLakeLeaseClient - -__all__ = [ - 'DataLakeServiceClient', - 'FileSystemClient', - 'DataLakeDirectoryClient', - 'DataLakeFileClient', - 'DataLakeLeaseClient', - 'ExponentialRetry', - 'LinearRetry', - 'StorageStreamDownloader' -] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_directory_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_directory_client_async.py deleted file mode 100644 index 7d0adef..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_directory_client_async.py +++ /dev/null @@ -1,551 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -from typing import Any - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore -from azure.core.pipeline import AsyncPipeline -from ._data_lake_file_client_async import DataLakeFileClient -from .._data_lake_directory_client import DataLakeDirectoryClient as DataLakeDirectoryClientBase -from .._models import DirectoryProperties, FileProperties -from .._deserialize import deserialize_dir_properties -from ._path_client_async import PathClient -from .._shared.base_client_async import AsyncTransportWrapper - - -class DataLakeDirectoryClient(PathClient, DataLakeDirectoryClientBase): - """A client to interact with the DataLake directory, even if the directory may not yet exist. - - For operations relating to a specific subdirectory or file under the directory, a directory client or file client - can be retrieved using the :func:`~get_sub_directory_client` or :func:`~get_file_client` functions. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param directory_name: - The whole path of the directory. eg. {directory under file system}/{directory to interact with} - :type directory_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_instantiate_client_async.py - :start-after: [START instantiate_directory_client_from_conn_str] - :end-before: [END instantiate_directory_client_from_conn_str] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. - """ - - def __init__( - self, account_url, # type: str - file_system_name, # type: str - directory_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - super(DataLakeDirectoryClient, self).__init__(account_url, file_system_name, directory_name, # pylint: disable=specify-parameter-names-in-call - credential=credential, **kwargs) - - async def create_directory(self, metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create a new directory. - - :param metadata: - Name-value pairs associated with the directory as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the directory has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: response dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory_async.py - :start-after: [START create_directory] - :end-before: [END create_directory] - :language: python - :dedent: 8 - :caption: Create directory. - """ - return await self._create('directory', metadata=metadata, **kwargs) - - async def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a directory exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return await self._exists(**kwargs) - - async def delete_directory(self, **kwargs): - # type: (...) -> None - """ - Marks the specified directory for deletion. - - :keyword lease: - Required if the directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory_async.py - :start-after: [START delete_directory] - :end-before: [END delete_directory] - :language: python - :dedent: 4 - :caption: Delete directory. - """ - return await self._delete(recursive=True, **kwargs) - - async def get_directory_properties(self, **kwargs): - # type: (**Any) -> DirectoryProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the directory. It does not return the content of the directory. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: DirectoryProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory_async.py - :start-after: [START get_directory_properties] - :end-before: [END get_directory_properties] - :language: python - :dedent: 4 - :caption: Getting the properties for a file/directory. - """ - return await self._get_path_properties(cls=deserialize_dir_properties, **kwargs) # pylint: disable=protected-access - - async def rename_directory(self, new_name, # type: str - **kwargs): - # type: (**Any) -> DataLakeDirectoryClient - """ - Rename the source directory. - - :param str new_name: - the new directory name the user want to rename to. - The value must have the following format: "{filesystem}/{directory}/{subdirectory}". - :keyword source_lease: - A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory_async.py - :start-after: [START rename_directory] - :end-before: [END rename_directory] - :language: python - :dedent: 4 - :caption: Rename the source directory. - """ - new_name = new_name.strip('/') - new_file_system = new_name.split('/')[0] - new_path_and_token = new_name[len(new_file_system):].strip('/').split('?') - new_path = new_path_and_token[0] - try: - new_dir_sas = new_path_and_token[1] or self._query_str.strip('?') - except IndexError: - if not self._raw_credential and new_file_system != self.file_system_name: - raise ValueError("please provide the sas token for the new directory") - if not self._raw_credential and new_file_system == self.file_system_name: - new_dir_sas = self._query_str.strip('?') - - new_directory_client = DataLakeDirectoryClient( - "{}://{}".format(self.scheme, self.primary_hostname), new_file_system, directory_name=new_path, - credential=self._raw_credential or new_dir_sas, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - await new_directory_client._rename_path( # pylint: disable=protected-access - '/{}/{}{}'.format(quote(unquote(self.file_system_name)), - quote(unquote(self.path_name)), - self._query_str), - **kwargs) - return new_directory_client - - async def create_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Create a subdirectory and return the subdirectory client to be interacted with. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient for the subdirectory. - """ - subdir = self.get_sub_directory_client(sub_directory) - await subdir.create_directory(metadata=metadata, **kwargs) - return subdir - - async def delete_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Marks the specified subdirectory for deletion. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :keyword lease: - Required if the directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient for the subdirectory - """ - subdir = self.get_sub_directory_client(sub_directory) - await subdir.delete_directory(**kwargs) - return subdir - - async def create_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Create a new file and return the file client to be interacted with. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - """ - file_client = self.get_file_client(file) - await file_client.create_file(**kwargs) - return file_client - - def get_file_client(self, file # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. eg. directory/subdirectory/file - :type file: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/test_datalake_service_samples.py - :start-after: [START bsc_get_file_client] - :end-before: [END bsc_get_file_client] - :language: python - :dedent: 12 - :caption: Getting the file client to interact with a specific file. - """ - try: - file_path = file.get('name') - except AttributeError: - file_path = self.path_name + '/' + str(file) - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeFileClient( - self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_sub_directory_client(self, sub_directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified subdirectory of the current directory. - - The sub subdirectory need not already exist. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/test_datalake_service_samples.py - :start-after: [START bsc_get_directory_client] - :end-before: [END bsc_get_directory_client] - :language: python - :dedent: 12 - :caption: Getting the directory client to interact with a specific directory. - """ - try: - subdir_path = sub_directory.get('name') - except AttributeError: - subdir_path = self.path_name + '/' + str(sub_directory) - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeDirectoryClient( - self.url, self.file_system_name, directory_name=subdir_path, credential=self._raw_credential, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_file_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_file_client_async.py deleted file mode 100644 index df25ecf..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_file_client_async.py +++ /dev/null @@ -1,574 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -from typing import Any -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore - -from azure.core.exceptions import HttpResponseError -from ._download_async import StorageStreamDownloader -from ._path_client_async import PathClient -from .._data_lake_file_client import DataLakeFileClient as DataLakeFileClientBase -from .._serialize import convert_datetime_to_rfc1123 -from .._deserialize import process_storage_error, deserialize_file_properties -from .._models import FileProperties -from ..aio._upload_helper import upload_datalake_file - - -class DataLakeFileClient(PathClient, DataLakeFileClientBase): - """A client to interact with the DataLake file, even if the file may not yet exist. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param file_path: - The whole file path, so that to interact with a specific file. - eg. "{directory}/{subdirectory}/{file}" - :type file_path: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_instantiate_client_async.py - :start-after: [START instantiate_file_client_from_conn_str] - :end-before: [END instantiate_file_client_from_conn_str] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. - """ - - def __init__( - self, account_url, # type: str - file_system_name, # type: str - file_path, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - super(DataLakeFileClient, self).__init__(account_url, file_system_name, path_name=file_path, - credential=credential, **kwargs) - - async def create_file(self, content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create a new file. - - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: response dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START create_file] - :end-before: [END create_file] - :language: python - :dedent: 4 - :caption: Create file. - """ - return await self._create('file', content_settings=content_settings, metadata=metadata, **kwargs) - - async def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a file exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return await self._exists(**kwargs) - - async def delete_file(self, **kwargs): - # type: (...) -> None - """ - Marks the specified file for deletion. - - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START delete_file] - :end-before: [END delete_file] - :language: python - :dedent: 4 - :caption: Delete file. - """ - return await self._delete(**kwargs) - - async def get_file_properties(self, **kwargs): - # type: (**Any) -> FileProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file. It does not return the content of the file. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: FileProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START get_file_properties] - :end-before: [END get_file_properties] - :language: python - :dedent: 4 - :caption: Getting the properties for a file. - """ - return await self._get_path_properties(cls=deserialize_file_properties, **kwargs) # pylint: disable=protected-access - - async def set_file_expiry(self, expiry_options, # type: str - expires_on=None, # type: Optional[Union[datetime, int]] - **kwargs): - # type: (str, Optional[Union[datetime, int]], **Any) -> None - """Sets the time a file will expire and be deleted. - - :param str expiry_options: - Required. Indicates mode of the expiry time. - Possible values include: 'NeverExpire', 'RelativeToCreation', 'RelativeToNow', 'Absolute' - :param datetime or int expires_on: - The time to set the file to expiry. - When expiry_options is RelativeTo*, expires_on should be an int in milliseconds - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - try: - expires_on = convert_datetime_to_rfc1123(expires_on) - except AttributeError: - expires_on = str(expires_on) - await self._datalake_client_for_blob_operation.path.set_expiry(expiry_options, expires_on=expires_on, - **kwargs) # pylint: disable=protected-access - - async def upload_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - overwrite=False, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Any] - """ - Upload data to a file. - - :param data: Content to be uploaded to file - :param int length: Size of the data in bytes. - :param bool overwrite: to overwrite an existing file or not. - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword metadata: - Name-value pairs associated with the blob as metadata. - :paramtype metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease: - Required if the blob has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword int chunk_size: - The maximum chunk size for uploading a file in chunks. - Defaults to 100*1024*1024, or 100MB. - :return: response dict (Etag and last modified). - """ - options = self._upload_options( - data, - length=length, - overwrite=overwrite, - **kwargs) - return await upload_datalake_file(**options) - - async def append_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - offset, # type: int - length=None, # type: Optional[int] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """Append data to the file. - - :param data: Content to be appended to file - :param offset: start position of the data to be appended to. - :param length: Size of the data in bytes. - :keyword bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - file. - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :return: dict of the response header - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START append_data] - :end-before: [END append_data] - :language: python - :dedent: 4 - :caption: Append data to the file. - """ - options = self._append_data_options( - data, - offset, - length=length, - **kwargs) - try: - return await self._client.path.append_data(**options) - except HttpResponseError as error: - process_storage_error(error) - - async def flush_data(self, offset, # type: int - retain_uncommitted_data=False, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ Commit the previous appended data. - - :param offset: offset is equal to the length of the file after commit the - previous appended data. - :param bool retain_uncommitted_data: Valid only for flush operations. If - "true", uncommitted data is retained after the flush operation - completes; otherwise, the uncommitted data is deleted after the flush - operation. The default is false. Data at offsets less than the - specified position are written to the file when flush succeeds, but - this optional parameter allows data after the flush position to be - retained for a future flush operation. - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword bool close: Azure Storage Events allow applications to receive - notifications when files change. When Azure Storage Events are - enabled, a file changed event is raised. This event has a property - indicating whether this is the final change to distinguish the - difference between an intermediate flush to a file stream and the - final close of a file stream. The close query parameter is valid only - when the action is "flush" and change notifications are enabled. If - the value of close is "true" and the flush operation completes - successfully, the service raises a file change notification with a - property indicating that this is the final update (the file stream has - been closed). If "false" a change notification is raised indicating - the file has changed. The default is false. This query parameter is - set to true by the Hadoop ABFS driver to indicate that the file stream - has been closed." - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :return: response header in dict - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START upload_file_to_file_system] - :end-before: [END upload_file_to_file_system] - :language: python - :dedent: 12 - :caption: Commit the previous appended data. - """ - options = self._flush_data_options( - offset, - retain_uncommitted_data=retain_uncommitted_data, **kwargs) - try: - return await self._client.path.flush_data(**options) - except HttpResponseError as error: - process_storage_error(error) - - async def download_file(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader - """Downloads a file to the StorageStreamDownloader. The readall() method must - be used to read all the content, or readinto() must be used to download the file into - a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks. - - :param int offset: - Start of byte range to use for downloading a section of the file. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword lease: - If specified, download only succeeds if the file's lease is active - and matches this ID. Required if the file has an active lease. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.filedatalake.aio.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START read_file] - :end-before: [END read_file] - :language: python - :dedent: 4 - :caption: Return the downloaded data. - """ - downloader = await self._blob_client.download_blob(offset=offset, length=length, **kwargs) - return StorageStreamDownloader(downloader) - - async def rename_file(self, new_name, **kwargs): - # type: (str, **Any) -> DataLakeFileClient - """ - Rename the source file. - - :param str new_name: the new file name the user want to rename to. - The value must have the following format: "{filesystem}/{directory}/{subdirectory}/{file}". - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword source_lease: A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :type permissions: str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: the renamed file client - :rtype: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START rename_file] - :end-before: [END rename_file] - :language: python - :dedent: 4 - :caption: Rename the source file. - """ - new_name = new_name.strip('/') - new_file_system = new_name.split('/')[0] - new_path_and_token = new_name[len(new_file_system):].strip('/').split('?') - new_path = new_path_and_token[0] - try: - new_file_sas = new_path_and_token[1] or self._query_str.strip('?') - except IndexError: - if not self._raw_credential and new_file_system != self.file_system_name: - raise ValueError("please provide the sas token for the new file") - if not self._raw_credential and new_file_system == self.file_system_name: - new_file_sas = self._query_str.strip('?') - - new_file_client = DataLakeFileClient( - "{}://{}".format(self.scheme, self.primary_hostname), new_file_system, file_path=new_path, - credential=self._raw_credential or new_file_sas, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - await new_file_client._rename_path( # pylint: disable=protected-access - '/{}/{}{}'.format(quote(unquote(self.file_system_name)), - quote(unquote(self.path_name)), - self._query_str), - **kwargs) - return new_file_client diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_lease_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_lease_async.py deleted file mode 100644 index 53e3255..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_lease_async.py +++ /dev/null @@ -1,243 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, - TypeVar, TYPE_CHECKING -) -from azure.multiapi.storagev2.blob.v2020_02_10.aio import BlobLeaseClient -from .._data_lake_lease import DataLakeLeaseClient as DataLakeLeaseClientBase - - -if TYPE_CHECKING: - FileSystemClient = TypeVar("FileSystemClient") - DataLakeDirectoryClient = TypeVar("DataLakeDirectoryClient") - DataLakeFileClient = TypeVar("DataLakeFileClient") - - -class DataLakeLeaseClient(DataLakeLeaseClientBase): - """Creates a new DataLakeLeaseClient. - - This client provides lease operations on a FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the file system, directory, or file to lease. - :type client: ~azure.storage.filedatalake.aio.FileSystemClient or - ~azure.storage.filedatalake.aio.DataLakeDirectoryClient or ~azure.storage.filedatalake.aio.DataLakeFileClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - def __init__( - self, client, lease_id=None - ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs - # type: (Union[FileSystemClient, DataLakeDirectoryClient, DataLakeFileClient], Optional[str]) -> None - super(DataLakeLeaseClient, self).__init__(client, lease_id) - - if hasattr(client, '_blob_client'): - _client = client._blob_client # type: ignore # pylint: disable=protected-access - elif hasattr(client, '_container_client'): - _client = client._container_client # type: ignore # pylint: disable=protected-access - else: - raise TypeError("Lease must use any of FileSystemClient DataLakeDirectoryClient, or DataLakeFileClient.") - - self._blob_lease_client = BlobLeaseClient(_client, lease_id=lease_id) - - def __enter__(self): - raise TypeError("Async lease must use 'async with'.") - - def __exit__(self, *args): - self.release() - - async def __aenter__(self): - return self - - async def __aexit__(self, *args): - await self.release() - - async def acquire(self, lease_duration=-1, **kwargs): - # type: (int, Optional[int], **Any) -> None - """Requests a new lease. - - If the file/file system does not have an active lease, the DataLake service creates a - lease on the file/file system and returns a new lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - await self._blob_lease_client.acquire(lease_duration=lease_duration, **kwargs) - self._update_lease_client_attributes() - - async def renew(self, **kwargs): - # type: (Any) -> None - """Renews the lease. - - The lease can be renewed if the lease ID specified in the - lease client matches that associated with the file system or file. Note that - the lease may be renewed even if it has expired as long as the file system - or file has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - await self._blob_lease_client.renew(**kwargs) - self._update_lease_client_attributes() - - async def release(self, **kwargs): - # type: (Any) -> None - """Release the lease. - - The lease may be released if the client lease id specified matches - that associated with the file system or file. Releasing the lease allows another client - to immediately acquire the lease for the file system or file as soon as the release is complete. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - await self._blob_lease_client.release(**kwargs) - self._update_lease_client_attributes() - - async def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """Change the lease ID of an active lease. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - await self._blob_lease_client.change(proposed_lease_id=proposed_lease_id, **kwargs) - self._update_lease_client_attributes() - - async def break_lease(self, lease_break_period=None, **kwargs): - # type: (Optional[int], Any) -> int - """Break the lease, if the file system or file has an active lease. - - Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. When a lease - is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the file system or file. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :param int lease_break_period: - This is the proposed duration of seconds that the lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the lease. If longer, the time remaining on the lease is used. - A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - await self._blob_lease_client.break_lease(lease_break_period=lease_break_period, **kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_service_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_service_client_async.py deleted file mode 100644 index 23fc2eb..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_service_client_async.py +++ /dev/null @@ -1,443 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -from typing import Any - -from azure.core.paging import ItemPaged -from azure.core.pipeline import AsyncPipeline - -from azure.multiapi.storagev2.blob.v2020_02_10.aio import BlobServiceClient -from .._generated.aio import AzureDataLakeStorageRESTAPI -from .._shared.base_client_async import AsyncTransportWrapper, AsyncStorageAccountHostsMixin -from ._file_system_client_async import FileSystemClient -from .._data_lake_service_client import DataLakeServiceClient as DataLakeServiceClientBase -from .._shared.policies_async import ExponentialRetry -from ._data_lake_directory_client_async import DataLakeDirectoryClient -from ._data_lake_file_client_async import DataLakeFileClient -from ._models import FileSystemPropertiesPaged -from .._models import UserDelegationKey, LocationMode - - -class DataLakeServiceClient(AsyncStorageAccountHostsMixin, DataLakeServiceClientBase): - """A client to interact with the DataLake Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete file systems within the account. - For operations relating to a specific file system, directory or file, clients for those entities - can also be retrieved using the `get_client` functions. - - :ivar str url: - The full endpoint URL to the datalake service endpoint. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URL to the DataLake storage account. Any other entities included - in the URL path (e.g. file system or file) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START create_datalake_service_client] - :end-before: [END create_datalake_service_client] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START create_datalake_service_client_oauth] - :end-before: [END create_datalake_service_client_oauth] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient with Azure Identity credentials. - """ - - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(DataLakeServiceClient, self).__init__( - account_url, - credential=credential, - **kwargs - ) - self._blob_service_client = BlobServiceClient(self._blob_account_url, credential, **kwargs) - self._blob_service_client._hosts[LocationMode.SECONDARY] = "" #pylint: disable=protected-access - self._client = AzureDataLakeStorageRESTAPI(self.url, pipeline=self._pipeline) - self._loop = kwargs.get('loop', None) - - async def __aenter__(self): - await self._blob_service_client.__aenter__() - return self - - async def __aexit__(self, *args): - await self._blob_service_client.close() - - async def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._blob_service_client.close() - - async def get_user_delegation_key(self, key_start_time, # type: datetime - key_expiry_time, # type: datetime - **kwargs # type: Any - ): - # type: (...) -> UserDelegationKey - """ - Obtain a user delegation key for the purpose of signing SAS tokens. - A token credential must be present on the service object for this request to succeed. - - :param ~datetime.datetime key_start_time: - A DateTime value. Indicates when the key becomes valid. - :param ~datetime.datetime key_expiry_time: - A DateTime value. Indicates when the key stops being valid. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The user delegation key. - :rtype: ~azure.storage.filedatalake.UserDelegationKey - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START get_user_delegation_key] - :end-before: [END get_user_delegation_key] - :language: python - :dedent: 8 - :caption: Get user delegation key from datalake service client. - """ - delegation_key = await self._blob_service_client.get_user_delegation_key( - key_start_time=key_start_time, - key_expiry_time=key_expiry_time, - **kwargs) # pylint: disable=protected-access - return UserDelegationKey._from_generated(delegation_key) # pylint: disable=protected-access - - def list_file_systems(self, name_starts_with=None, # type: Optional[str] - include_metadata=None, # type: Optional[bool] - **kwargs): - # type: (...) -> ItemPaged[FileSystemProperties] - """Returns a generator to list the file systems under the specified account. - - The generator will lazily follow the continuation tokens returned by - the service and stop when all file systems have been returned. - - :param str name_starts_with: - Filters the results to return only file systems whose names - begin with the specified prefix. - :param bool include_metadata: - Specifies that file system metadata be returned in the response. - The default value is `False`. - :keyword int results_per_page: - The maximum number of file system names to retrieve per API - call. If the request does not specify the server will return up to 5,000 items per page. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword bool include_deleted: - Specifies that deleted file systems to be returned in the response. This is for file system restore enabled - account. The default value is `False`. - .. versionadded:: 12.3.0 - :returns: An iterable (auto-paging) of FileSystemProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.FileSystemProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START list_file_systems] - :end-before: [END list_file_systems] - :language: python - :dedent: 8 - :caption: Listing the file systems in the datalake service. - """ - item_paged = self._blob_service_client.list_containers(name_starts_with=name_starts_with, - include_metadata=include_metadata, - **kwargs) # pylint: disable=protected-access - item_paged._page_iterator_class = FileSystemPropertiesPaged # pylint: disable=protected-access - return item_paged - - async def create_file_system(self, file_system, # type: Union[FileSystemProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[PublicAccess] - **kwargs): - # type: (...) -> FileSystemClient - """Creates a new file system under the specified account. - - If the file system with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created file system. - - :param str file_system: - The name of the file system to create. - :param metadata: - A dict with name-value pairs to associate with the - file system as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - Possible values include: file system, file. - :type public_access: ~azure.storage.filedatalake.PublicAccess - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START create_file_system_from_service_client] - :end-before: [END create_file_system_from_service_client] - :language: python - :dedent: 8 - :caption: Creating a file system in the datalake service. - """ - file_system_client = self.get_file_system_client(file_system) - await file_system_client.create_file_system(metadata=metadata, public_access=public_access, **kwargs) - return file_system_client - - async def _rename_file_system(self, name, new_name, **kwargs): - # type: (str, str, **Any) -> FileSystemClient - """Renames a filesystem. - - Operation is successful only if the source filesystem exists. - - :param str name: - The name of the filesystem to rename. - :param str new_name: - The new filesystem name the user wants to rename to. - :keyword lease: - Specify this to perform only if the lease ID given - matches the active lease ID of the source filesystem. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - """ - await self._blob_service_client._rename_container(name, new_name, **kwargs) # pylint: disable=protected-access - renamed_file_system = self.get_file_system_client(new_name) - return renamed_file_system - - async def undelete_file_system(self, name, deleted_version, **kwargs): - # type: (str, str, **Any) -> FileSystemClient - """Restores soft-deleted filesystem. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.3.0 - This operation was introduced in API version '2019-12-12'. - - :param str name: - Specifies the name of the deleted filesystem to restore. - :param str deleted_version: - Specifies the version of the deleted filesystem to restore. - :keyword str new_name: - The new name for the deleted filesystem to be restored to. - If not specified "name" will be used as the restored filesystem name. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - """ - new_name = kwargs.pop('new_name', None) - await self._blob_service_client.undelete_container(name, deleted_version, new_name=new_name, **kwargs) # pylint: disable=protected-access - file_system = self.get_file_system_client(new_name or name) - return file_system - - async def delete_file_system(self, file_system, # type: Union[FileSystemProperties, str] - **kwargs): - # type: (...) -> FileSystemClient - """Marks the specified file system for deletion. - - The file system and any files contained within it are later deleted during garbage collection. - If the file system is not found, a ResourceNotFoundError will be raised. - - :param file_system: - The file system to delete. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :keyword lease: - If specified, delete_file_system only succeeds if the - file system's lease is active and matches this ID. - Required if the file system has an active lease. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START delete_file_system_from_service_client] - :end-before: [END delete_file_system_from_service_client] - :language: python - :dedent: 8 - :caption: Deleting a file system in the datalake service. - """ - file_system_client = self.get_file_system_client(file_system) - await file_system_client.delete_file_system(**kwargs) - return file_system_client - - def get_file_system_client(self, file_system # type: Union[FileSystemProperties, str] - ): - # type: (...) -> FileSystemClient - """Get a client to interact with the specified file system. - - The file system need not already exist. - - :param file_system: - The file system. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :returns: A FileSystemClient. - :rtype: ~azure.storage.filedatalake.aio.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_file_system_client_from_service] - :end-before: [END create_file_system_client_from_service] - :language: python - :dedent: 8 - :caption: Getting the file system client to interact with a specific file system. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return FileSystemClient(self.url, file_system_name, credential=self._raw_credential, - _configuration=self._config, - _pipeline=self._pipeline, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_directory_client(self, file_system, # type: Union[FileSystemProperties, str] - directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified directory. - - The directory need not already exist. - - :param file_system: - The file system that the directory is in. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START get_directory_client_from_service_client] - :end-before: [END get_directory_client_from_service_client] - :language: python - :dedent: 8 - :caption: Getting the directory client to interact with a specific directory. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - try: - directory_name = directory.name - except AttributeError: - directory_name = directory - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeDirectoryClient(self.url, file_system_name, directory_name=directory_name, - credential=self._raw_credential, - _configuration=self._config, _pipeline=self._pipeline, - _hosts=self._hosts, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function - ) - - def get_file_client(self, file_system, # type: Union[FileSystemProperties, str] - file_path # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file_system: - The file system that the file is in. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :param file_path: - The file with which to interact. This can either be the full path of the file(from the root directory), - or an instance of FileProperties. eg. directory/subdirectory/file - :type file_path: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START get_file_client_from_service_client] - :end-before: [END get_file_client_from_service_client] - :language: python - :dedent: 8 - :caption: Getting the file client to interact with a specific file. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - try: - file_path = file_path.name - except AttributeError: - pass - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeFileClient( - self.url, file_system_name, file_path=file_path, credential=self._raw_credential, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_download_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_download_async.py deleted file mode 100644 index 5685478..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_download_async.py +++ /dev/null @@ -1,59 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import AsyncIterator - -from .._deserialize import from_blob_properties - - -class StorageStreamDownloader(object): - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the file being downloaded. - :ivar ~azure.storage.filedatalake.FileProperties properties: - The properties of the file being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the file. - """ - - def __init__(self, downloader): - self._downloader = downloader - self.name = self._downloader.name - self.properties = from_blob_properties(self._downloader.properties) # pylint: disable=protected-access - self.size = self._downloader.size - - def __len__(self): - return self.size - - def chunks(self): - # type: () -> AsyncIterator[bytes] - """Iterate over chunks in the download stream. - - :rtype: AsyncIterator[bytes] - """ - return self._downloader.chunks() - - async def readall(self): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - return await self._downloader.readall() - - async def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - return await self._downloader.readinto(stream) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_file_system_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_file_system_client_async.py deleted file mode 100644 index ea0ddc9..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_file_system_client_async.py +++ /dev/null @@ -1,790 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Dict, TYPE_CHECKING -) - -from azure.core.tracing.decorator import distributed_trace - -from azure.core.pipeline import AsyncPipeline -from azure.core.async_paging import AsyncItemPaged - -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.multiapi.storagev2.blob.v2020_02_10.aio import ContainerClient - -from ._data_lake_file_client_async import DataLakeFileClient -from ._data_lake_directory_client_async import DataLakeDirectoryClient -from ._data_lake_lease_async import DataLakeLeaseClient -from .._deserialize import deserialize_path_properties -from .._file_system_client import FileSystemClient as FileSystemClientBase -from .._generated.aio import AzureDataLakeStorageRESTAPI -from .._shared.base_client_async import AsyncTransportWrapper, AsyncStorageAccountHostsMixin -from .._shared.policies_async import ExponentialRetry -from .._models import FileSystemProperties, PublicAccess, DirectoryProperties, FileProperties - -if TYPE_CHECKING: - from datetime import datetime - from .._models import ( # pylint: disable=unused-import - ContentSettings) - - -class FileSystemClient(AsyncStorageAccountHostsMixin, FileSystemClientBase): - """A client to interact with a specific file system, even if that file system - may not yet exist. - - For operations relating to a specific directory or file within this file system, a directory client or file client - can be retrieved using the :func:`~get_directory_client` or :func:`~get_file_client` functions. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_file_system_client_from_service] - :end-before: [END create_file_system_client_from_service] - :language: python - :dedent: 8 - :caption: Get a FileSystemClient from an existing DataLakeServiceClient. - """ - - def __init__( - self, account_url, # type: str - file_system_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(FileSystemClient, self).__init__( - account_url, - file_system_name=file_system_name, - credential=credential, - **kwargs) - # to override the class field _container_client sync version - kwargs.pop('_hosts', None) - self._container_client = ContainerClient(self._blob_account_url, file_system_name, - credential=credential, - _hosts=self._container_client._hosts,# pylint: disable=protected-access - **kwargs) # type: ignore # pylint: disable=protected-access - self._client = AzureDataLakeStorageRESTAPI(self.url, file_system=file_system_name, pipeline=self._pipeline) - self._loop = kwargs.get('loop', None) - - async def __aexit__(self, *args): - await self._container_client.close() - await super(FileSystemClient, self).__aexit__(*args) - - async def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._container_client.close() - await self.__aexit__() - - @distributed_trace_async - async def acquire_lease( - self, lease_duration=-1, # type: int - lease_id=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> DataLakeLeaseClient - """ - Requests a new lease. If the file system does not have an active lease, - the DataLake service creates a lease on the file system and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A DataLakeLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.filedatalake.aio.DataLakeLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START acquire_lease_on_file_system] - :end-before: [END acquire_lease_on_file_system] - :language: python - :dedent: 12 - :caption: Acquiring a lease on the file_system. - """ - lease = DataLakeLeaseClient(self, lease_id=lease_id) - await lease.acquire(lease_duration=lease_duration, **kwargs) - return lease - - @distributed_trace_async - async def create_file_system(self, metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[PublicAccess] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """Creates a new file system under the specified account. - - If the file system with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created file system. - - :param metadata: - A dict with name-value pairs to associate with the - file system as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - To specify whether data in the file system may be accessed publicly and the level of access. - :type public_access: ~azure.storage.filedatalake.PublicAccess - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.aio.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_file_system] - :end-before: [END create_file_system] - :language: python - :dedent: 16 - :caption: Creating a file system in the datalake service. - """ - return await self._container_client.create_container(metadata=metadata, - public_access=public_access, - **kwargs) - - @distributed_trace_async - async def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a file system exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return await self._container_client.exists(**kwargs) - - @distributed_trace_async - async def _rename_file_system(self, new_name, **kwargs): - # type: (str, **Any) -> FileSystemClient - """Renames a filesystem. - - Operation is successful only if the source filesystem exists. - - :param str new_name: - The new filesystem name the user wants to rename to. - :keyword lease: - Specify this to perform only if the lease ID given - matches the active lease ID of the source filesystem. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - """ - await self._container_client._rename_container(new_name, **kwargs) # pylint: disable=protected-access - renamed_file_system = FileSystemClient( - "{}://{}".format(self.scheme, self.primary_hostname), file_system_name=new_name, - credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, - _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - return renamed_file_system - - @distributed_trace_async - async def delete_file_system(self, **kwargs): - # type: (Any) -> None - """Marks the specified file system for deletion. - - The file system and any files contained within it are later deleted during garbage collection. - If the file system is not found, a ResourceNotFoundError will be raised. - - :keyword lease: - If specified, delete_file_system only succeeds if the - file system's lease is active and matches this ID. - Required if the file system has an active lease. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START delete_file_system] - :end-before: [END delete_file_system] - :language: python - :dedent: 16 - :caption: Deleting a file system in the datalake service. - """ - await self._container_client.delete_container(**kwargs) - - @distributed_trace_async - async def get_file_system_properties(self, **kwargs): - # type: (Any) -> FileSystemProperties - """Returns all user-defined metadata and system properties for the specified - file system. The data returned does not include the file system's list of paths. - - :keyword lease: - If specified, get_file_system_properties only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Properties for the specified file system within a file system object. - :rtype: ~azure.storage.filedatalake.FileSystemProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START get_file_system_properties] - :end-before: [END get_file_system_properties] - :language: python - :dedent: 16 - :caption: Getting properties on the file system. - """ - container_properties = await self._container_client.get_container_properties(**kwargs) - return FileSystemProperties._convert_from_container_props(container_properties) # pylint: disable=protected-access - - @distributed_trace_async - async def set_file_system_metadata( # type: ignore - self, metadata, # type: Dict[str, str] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - file system. Each call to this operation replaces all existing metadata - attached to the file system. To remove all metadata from the file system, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the file system as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: file system-updated property dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START set_file_system_metadata] - :end-before: [END set_file_system_metadata] - :language: python - :dedent: 16 - :caption: Setting metadata on the container. - """ - return await self._container_client.set_container_metadata(metadata=metadata, **kwargs) - - @distributed_trace_async - async def set_file_system_access_policy( - self, signed_identifiers, # type: Dict[str, AccessPolicy] - public_access=None, # type: Optional[Union[str, PublicAccess]] - **kwargs - ): # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the permissions for the specified file system or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether files in a file system may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the file system. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict[str, ~azure.storage.filedatalake.AccessPolicy] - :param ~azure.storage.filedatalake.PublicAccess public_access: - To specify whether data in the file system may be accessed publicly and the level of access. - :keyword lease: - Required if the file system has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified date/time. - :keyword ~datetime.datetime if_unmodified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: filesystem-updated property dict (Etag and last modified). - :rtype: dict[str, str or ~datetime.datetime] - """ - return await self._container_client.set_container_access_policy(signed_identifiers, - public_access=public_access, **kwargs) - - @distributed_trace_async - async def get_file_system_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the specified file system. - The permissions indicate whether file system data may be accessed publicly. - - :keyword lease: - If specified, get_file_system_access_policy only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - """ - access_policy = await self._container_client.get_container_access_policy(**kwargs) - return { - 'public_access': PublicAccess._from_generated(access_policy['public_access']), # pylint: disable=protected-access - 'signed_identifiers': access_policy['signed_identifiers'] - } - - @distributed_trace - def get_paths(self, path=None, # type: Optional[str] - recursive=True, # type: Optional[bool] - max_results=None, # type: Optional[int] - **kwargs): - # type: (...) -> AsyncItemPaged[PathProperties] - """Returns a generator to list the paths(could be files or directories) under the specified file system. - The generator will lazily follow the continuation tokens returned by - the service. - - :param str path: - Filters the results to return only paths under the specified path. - :param int max_results: - An optional value that specifies the maximum - number of items to return per page. If omitted or greater than 5,000, the - response will include up to 5,000 items per page. - :keyword upn: - Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of PathProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.PathProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START get_paths_in_file_system] - :end-before: [END get_paths_in_file_system] - :language: python - :dedent: 12 - :caption: List the blobs in the file system. - """ - timeout = kwargs.pop('timeout', None) - return self._client.file_system.list_paths( - recursive=recursive, - max_results=max_results, - path=path, - timeout=timeout, - cls=deserialize_path_properties, - **kwargs) - - @distributed_trace_async - async def create_directory(self, directory, # type: Union[DirectoryProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Create directory - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_directory_from_file_system] - :end-before: [END create_directory_from_file_system] - :language: python - :dedent: 12 - :caption: Create directory in the file system. - """ - directory_client = self.get_directory_client(directory) - await directory_client.create_directory(metadata=metadata, **kwargs) - return directory_client - - @distributed_trace_async - async def delete_directory(self, directory, # type: Union[DirectoryProperties, str] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Marks the specified path for deletion. - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START delete_directory_from_file_system] - :end-before: [END delete_directory_from_file_system] - :language: python - :dedent: 12 - :caption: Delete directory in the file system. - """ - directory_client = self.get_directory_client(directory) - await directory_client.delete_directory(**kwargs) - return directory_client - - @distributed_trace_async - async def create_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Create file - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_file_from_file_system] - :end-before: [END create_file_from_file_system] - :language: python - :dedent: 12 - :caption: Create file in the file system. - """ - file_client = self.get_file_client(file) - await file_client.create_file(**kwargs) - return file_client - - @distributed_trace_async - async def delete_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Marks the specified file for deletion. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START delete_file_from_file_system] - :end-before: [END delete_file_from_file_system] - :language: python - :dedent: 12 - :caption: Delete file in the file system. - """ - file_client = self.get_file_client(file) - await file_client.delete_file(**kwargs) - return file_client - - def _get_root_directory_client(self): - # type: () -> DataLakeDirectoryClient - """Get a client to interact with the root directory. - - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient - """ - return self.get_directory_client('/') - - def get_directory_client(self, directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified directory. - - The directory need not already exist. - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START get_directory_client_from_file_system] - :end-before: [END get_directory_client_from_file_system] - :language: python - :dedent: 12 - :caption: Getting the directory client to interact with a specific directory. - """ - try: - directory_name = directory.get('name') - except AttributeError: - directory_name = str(directory) - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeDirectoryClient(self.url, self.file_system_name, directory_name=directory_name, - credential=self._raw_credential, - _configuration=self._config, _pipeline=_pipeline, - _hosts=self._hosts, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function, - loop=self._loop - ) - - def get_file_client(self, file_path # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file_path: - The file with which to interact. This can either be the path of the file(from root directory), - or an instance of FileProperties. eg. directory/subdirectory/file - :type file_path: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START get_file_client_from_file_system] - :end-before: [END get_file_client_from_file_system] - :language: python - :dedent: 12 - :caption: Getting the file client to interact with a specific file. - """ - try: - file_path = file_path.get('name') - except AttributeError: - file_path = str(file_path) - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeFileClient( - self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, - _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function, loop=self._loop) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_models.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_models.py deleted file mode 100644 index 648d134..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_models.py +++ /dev/null @@ -1,41 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines -from azure.multiapi.storagev2.blob.v2020_02_10.aio._models import ContainerPropertiesPaged -from .._models import FileSystemProperties - - -class FileSystemPropertiesPaged(ContainerPropertiesPaged): - """An Iterable of File System properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file system name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.filedatalake.FileSystemProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only file systems whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of file system names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - - def __init__(self, *args, **kwargs): - super(FileSystemPropertiesPaged, self).__init__( - *args, - **kwargs - ) - - @staticmethod - def _build_item(item): - return FileSystemProperties._from_generated(item) # pylint: disable=protected-access diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_path_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_path_client_async.py deleted file mode 100644 index 0a8ee4f..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_path_client_async.py +++ /dev/null @@ -1,726 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -from typing import Any, Dict - -from azure.core.exceptions import AzureError, HttpResponseError -from azure.multiapi.storagev2.blob.v2020_02_10.aio import BlobClient -from .._shared.base_client_async import AsyncStorageAccountHostsMixin -from .._path_client import PathClient as PathClientBase -from .._models import DirectoryProperties, AccessControlChangeResult, AccessControlChangeFailure, \ - AccessControlChangeCounters, AccessControlChanges -from .._generated.aio import AzureDataLakeStorageRESTAPI -from ._data_lake_lease_async import DataLakeLeaseClient -from .._deserialize import process_storage_error -from .._shared.policies_async import ExponentialRetry - -_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = ( - 'The require_encryption flag is set, but encryption is not supported' - ' for this method.') - - -class PathClient(AsyncStorageAccountHostsMixin, PathClientBase): - def __init__( - self, account_url, # type: str - file_system_name, # type: str - path_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - - super(PathClient, self).__init__(account_url, # pylint: disable=specify-parameter-names-in-call - file_system_name, path_name, - credential=credential, - **kwargs) # type: ignore - - kwargs.pop('_hosts', None) - - self._blob_client = BlobClient(account_url=self._blob_account_url, container_name=file_system_name, - blob_name=path_name, - credential=credential, - _hosts=self._blob_client._hosts, # pylint: disable=protected-access - **kwargs) - - self._client = AzureDataLakeStorageRESTAPI(self.url, file_system=file_system_name, path=path_name, - pipeline=self._pipeline) - self._datalake_client_for_blob_operation = AzureDataLakeStorageRESTAPI(self._blob_client.url, - file_system=file_system_name, - path=path_name, - pipeline=self._pipeline) - self._loop = kwargs.get('loop', None) - - async def __aexit__(self, *args): - await self._blob_client.close() - await super(PathClient, self).__aexit__(*args) - - async def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._blob_client.close() - await self.__aexit__() - - async def _create(self, resource_type, content_settings=None, metadata=None, **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create directory or file - - :param resource_type: - Required for Create File and Create Directory. - The value must be "file" or "directory". Possible values include: - 'directory', 'file' - :type resource_type: str - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file/directory as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file/directory has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :type permissions: str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Dict[str, Union[str, datetime]] - """ - options = self._create_path_options( - resource_type, - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return await self._client.path.create(**options) - except HttpResponseError as error: - process_storage_error(error) - - async def _delete(self, **kwargs): - # type: (bool, **Any) -> None - """ - Marks the specified path for deletion. - - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - options = self._delete_path_options(**kwargs) - try: - return await self._client.path.delete(**options) - except HttpResponseError as error: - process_storage_error(error) - - async def set_access_control(self, owner=None, # type: Optional[str] - group=None, # type: Optional[str] - permissions=None, # type: Optional[str] - acl=None, # type: Optional[str] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Set the owner, group, permissions, or access control list for a path. - - :param owner: - Optional. The owner of the file or directory. - :type owner: str - :param group: - Optional. The owning group of the file or directory. - :type group: str - :param permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - permissions and acl are mutually exclusive. - :type permissions: str - :param acl: - Sets POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - permissions and acl are mutually exclusive. - :type acl: str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword: response dict (Etag and last modified). - """ - options = self._set_access_control_options(owner=owner, group=group, permissions=permissions, acl=acl, **kwargs) - try: - return await self._client.path.set_access_control(**options) - except HttpResponseError as error: - process_storage_error(error) - - async def get_access_control(self, upn=None, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Any] - """ - Get the owner, group, permissions, or access control list for a path. - - :param upn: - Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword: response dict. - """ - options = self._get_access_control_options(upn=upn, **kwargs) - try: - return await self._client.path.get_properties(**options) - except HttpResponseError as error: - process_storage_error(error) - - async def set_access_control_recursive(self, - acl, - **kwargs): - # type: (str, **Any) -> AccessControlChangeResult - """ - Sets the Access Control on a path and sub-paths. - - :param acl: - Sets POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: - Callback where the caller can track progress of the operation - as well as collect paths that failed to change Access Control. - :keyword str continuation_token: - Optional continuation token that can be used to resume previously stopped operation. - :keyword int batch_size: - Optional. If data set size exceeds batch size then operation will be split into multiple - requests so that progress can be tracked. Batch size should be between 1 and 2000. - The default when unspecified is 2000. - :keyword int max_batches: - Optional. Defines maximum number of batches that single change Access Control operation can execute. - If maximum is reached before all sub-paths are processed, - then continuation token can be used to resume operation. - Empty value indicates that maximum number of batches in unbound and operation continues till end. - :keyword bool continue_on_failure: - If set to False, the operation will terminate quickly on encountering user errors (4XX). - If True, the operation will ignore user errors and proceed with the operation on other sub-entities of - the directory. - Continuation token will only be returned when continue_on_failure is True in case of user errors. - If not set the default value is False for this. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: A summary of the recursive operations, including the count of successes and failures, - as well as a continuation token in case the operation was terminated prematurely. - :rtype: :~azure.storage.filedatalake.AccessControlChangeResult` - :raises ~azure.core.exceptions.AzureError: - User can restart the operation using continuation_token field of AzureError if the token is available. - """ - if not acl: - raise ValueError("The Access Control List must be set for this operation") - - progress_hook = kwargs.pop('progress_hook', None) - max_batches = kwargs.pop('max_batches', None) - options = self._set_access_control_recursive_options(mode='set', acl=acl, **kwargs) - return await self._set_access_control_internal(options=options, progress_hook=progress_hook, - max_batches=max_batches) - - async def update_access_control_recursive(self, acl, **kwargs): - # type: (str, **Any) -> AccessControlChangeResult - """ - Modifies the Access Control on a path and sub-paths. - - :param acl: - Modifies POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: - Callback where the caller can track progress of the operation - as well as collect paths that failed to change Access Control. - :keyword str continuation_token: - Optional continuation token that can be used to resume previously stopped operation. - :keyword int batch_size: - Optional. If data set size exceeds batch size then operation will be split into multiple - requests so that progress can be tracked. Batch size should be between 1 and 2000. - The default when unspecified is 2000. - :keyword int max_batches: - Optional. Defines maximum number of batches that single, - change Access Control operation can execute. - If maximum is reached before all sub-paths are processed, - then continuation token can be used to resume operation. - Empty value indicates that maximum number of batches in unbound and operation continues till end. - :keyword bool continue_on_failure: - If set to False, the operation will terminate quickly on encountering user errors (4XX). - If True, the operation will ignore user errors and proceed with the operation on other sub-entities of - the directory. - Continuation token will only be returned when continue_on_failure is True in case of user errors. - If not set the default value is False for this. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: A summary of the recursive operations, including the count of successes and failures, - as well as a continuation token in case the operation was terminated prematurely. - :rtype: :~azure.storage.filedatalake.AccessControlChangeResult` - :raises ~azure.core.exceptions.AzureError: - User can restart the operation using continuation_token field of AzureError if the token is available. - """ - if not acl: - raise ValueError("The Access Control List must be set for this operation") - - progress_hook = kwargs.pop('progress_hook', None) - max_batches = kwargs.pop('max_batches', None) - options = self._set_access_control_recursive_options(mode='modify', acl=acl, **kwargs) - return await self._set_access_control_internal(options=options, progress_hook=progress_hook, - max_batches=max_batches) - - async def remove_access_control_recursive(self, - acl, - **kwargs): - # type: (str, **Any) -> AccessControlChangeResult - """ - Removes the Access Control on a path and sub-paths. - - :param acl: - Removes POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, and a user or - group identifier in the format "[scope:][type]:[id]". - :type acl: str - :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: - Callback where the caller can track progress of the operation - as well as collect paths that failed to change Access Control. - :keyword str continuation_token: - Optional continuation token that can be used to resume previously stopped operation. - :keyword int batch_size: - Optional. If data set size exceeds batch size then operation will be split into multiple - requests so that progress can be tracked. Batch size should be between 1 and 2000. - The default when unspecified is 2000. - :keyword int max_batches: - Optional. Defines maximum number of batches that single change Access Control operation can execute. - If maximum is reached before all sub-paths are processed, - then continuation token can be used to resume operation. - Empty value indicates that maximum number of batches in unbound and operation continues till end. - :keyword bool continue_on_failure: - If set to False, the operation will terminate quickly on encountering user errors (4XX). - If True, the operation will ignore user errors and proceed with the operation on other sub-entities of - the directory. - Continuation token will only be returned when continue_on_failure is True in case of user errors. - If not set the default value is False for this. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: A summary of the recursive operations, including the count of successes and failures, - as well as a continuation token in case the operation was terminated prematurely. - :rtype: :~azure.storage.filedatalake.AccessControlChangeResult` - :raises ~azure.core.exceptions.AzureError: - User can restart the operation using continuation_token field of AzureError if the token is available. - """ - if not acl: - raise ValueError("The Access Control List must be set for this operation") - - progress_hook = kwargs.pop('progress_hook', None) - max_batches = kwargs.pop('max_batches', None) - options = self._set_access_control_recursive_options(mode='remove', acl=acl, **kwargs) - return await self._set_access_control_internal(options=options, progress_hook=progress_hook, - max_batches=max_batches) - - async def _set_access_control_internal(self, options, progress_hook, max_batches=None): - try: - continue_on_failure = options.get('force_flag') - total_directories_successful = 0 - total_files_success = 0 - total_failure_count = 0 - batch_count = 0 - last_continuation_token = None - current_continuation_token = None - continue_operation = True - while continue_operation: - headers, resp = await self._client.path.set_access_control_recursive(**options) - - # make a running tally so that we can report the final results - total_directories_successful += resp.directories_successful - total_files_success += resp.files_successful - total_failure_count += resp.failure_count - batch_count += 1 - current_continuation_token = headers['continuation'] - - if current_continuation_token is not None: - last_continuation_token = current_continuation_token - - if progress_hook is not None: - await progress_hook(AccessControlChanges( - batch_counters=AccessControlChangeCounters( - directories_successful=resp.directories_successful, - files_successful=resp.files_successful, - failure_count=resp.failure_count, - ), - aggregate_counters=AccessControlChangeCounters( - directories_successful=total_directories_successful, - files_successful=total_files_success, - failure_count=total_failure_count, - ), - batch_failures=[AccessControlChangeFailure( - name=failure.name, - is_directory=failure.type == 'DIRECTORY', - error_message=failure.error_message) for failure in resp.failed_entries], - continuation=last_continuation_token)) - - # update the continuation token, if there are more operations that cannot be completed in a single call - max_batches_satisfied = (max_batches is not None and batch_count == max_batches) - continue_operation = bool(current_continuation_token) and not max_batches_satisfied - options['continuation'] = current_continuation_token - - # currently the service stops on any failure, so we should send back the last continuation token - # for the user to retry the failed updates - # otherwise we should just return what the service gave us - return AccessControlChangeResult(counters=AccessControlChangeCounters( - directories_successful=total_directories_successful, - files_successful=total_files_success, - failure_count=total_failure_count), - continuation=last_continuation_token - if total_failure_count > 0 and not continue_on_failure else current_continuation_token) - except HttpResponseError as error: - error.continuation_token = last_continuation_token - process_storage_error(error) - except AzureError as error: - error.continuation_token = last_continuation_token - raise error - - async def _rename_path(self, rename_source, **kwargs): - # type: (str, **Any) -> Dict[str, Any] - """ - Rename directory or file - - :param rename_source: The value must have the following format: "/{filesystem}/{path}". - :type rename_source: str - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword source_lease: A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._rename_path_options( - rename_source, - **kwargs) - try: - return await self._client.path.create(**options) - except HttpResponseError as error: - process_storage_error(error) - - async def _get_path_properties(self, **kwargs): - # type: (**Any) -> Union[FileProperties, DirectoryProperties] - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file or directory. It does not return the content of the directory or file. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: DirectoryProperties or FileProperties - """ - path_properties = await self._blob_client.get_blob_properties(**kwargs) - return path_properties - - async def _exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a path exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return await self._blob_client.exists(**kwargs) - - async def set_metadata(self, metadata, # type: Dict[str, str] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - file system. Each call to this operation replaces all existing metadata - attached to the file system. To remove all metadata from the file system, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the file system as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: file system-updated property dict (Etag and last modified). - """ - return await self._blob_client.set_blob_metadata(metadata=metadata, **kwargs) - - async def set_http_headers(self, content_settings=None, # type: Optional[ContentSettings] - **kwargs): - # type: (...) -> Dict[str, Any] - """Sets system properties on the file or directory. - - If one property is set for the content_settings, all properties will be overriden. - - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set file/directory properties. - :keyword lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: file/directory-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - return await self._blob_client.set_http_headers(content_settings=content_settings, **kwargs) - - async def acquire_lease(self, lease_duration=-1, # type: Optional[int] - lease_id=None, # type: Optional[str] - **kwargs): - # type: (...) -> DataLakeLeaseClient - """ - Requests a new lease. If the file or directory does not have an active lease, - the DataLake service creates a lease on the file/directory and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A DataLakeLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.filedatalake.aio.DataLakeLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/test_file_system_samples.py - :start-after: [START acquire_lease_on_file_system] - :end-before: [END acquire_lease_on_file_system] - :language: python - :dedent: 8 - :caption: Acquiring a lease on the file_system. - """ - lease = DataLakeLeaseClient(self, lease_id=lease_id) # type: ignore - await lease.acquire(lease_duration=lease_duration, **kwargs) - return lease diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_upload_helper.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_upload_helper.py deleted file mode 100644 index 00d5bf1..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_upload_helper.py +++ /dev/null @@ -1,103 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use -from azure.core.exceptions import HttpResponseError -from .._deserialize import ( - process_storage_error) -from .._shared.response_handlers import return_response_headers -from .._shared.uploads_async import ( - upload_data_chunks, - DataLakeFileChunkUploader, upload_substream_blocks) - - -def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument - return any([ - modified_access_conditions.if_modified_since, - modified_access_conditions.if_unmodified_since, - modified_access_conditions.if_none_match, - modified_access_conditions.if_match - ]) - - -async def upload_datalake_file( # pylint: disable=unused-argument - client=None, - stream=None, - length=None, - overwrite=None, - validate_content=None, - max_concurrency=None, - file_settings=None, - **kwargs): - try: - if length == 0: - return {} - properties = kwargs.pop('properties', None) - umask = kwargs.pop('umask', None) - permissions = kwargs.pop('permissions', None) - path_http_headers = kwargs.pop('path_http_headers', None) - modified_access_conditions = kwargs.pop('modified_access_conditions', None) - chunk_size = kwargs.pop('chunk_size', 100 * 1024 * 1024) - - if not overwrite: - # if customers didn't specify access conditions, they cannot flush data to existing file - if not _any_conditions(modified_access_conditions): - modified_access_conditions.if_none_match = '*' - if properties or umask or permissions: - raise ValueError("metadata, umask and permissions can be set only when overwrite is enabled") - - if overwrite: - response = await client.create( - resource='file', - path_http_headers=path_http_headers, - properties=properties, - modified_access_conditions=modified_access_conditions, - umask=umask, - permissions=permissions, - cls=return_response_headers, - **kwargs) - - # this modified_access_conditions will be applied to flush_data to make sure - # no other flush between create and the current flush - modified_access_conditions.if_match = response['etag'] - modified_access_conditions.if_none_match = None - modified_access_conditions.if_modified_since = None - modified_access_conditions.if_unmodified_since = None - - use_original_upload_path = file_settings.use_byte_buffer or \ - validate_content or chunk_size < file_settings.min_large_chunk_upload_threshold or \ - hasattr(stream, 'seekable') and not stream.seekable() or \ - not hasattr(stream, 'seek') or not hasattr(stream, 'tell') - - if use_original_upload_path: - await upload_data_chunks( - service=client, - uploader_class=DataLakeFileChunkUploader, - total_size=length, - chunk_size=chunk_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - **kwargs) - else: - await upload_substream_blocks( - service=client, - uploader_class=DataLakeFileChunkUploader, - total_size=length, - chunk_size=chunk_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - **kwargs - ) - - return await client.flush_data(position=length, - path_http_headers=path_http_headers, - modified_access_conditions=modified_access_conditions, - close=True, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/__init__.py deleted file mode 100644 index 99d2ef7..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/__init__.py +++ /dev/null @@ -1,105 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ._download import StorageStreamDownloader -from ._data_lake_file_client import DataLakeFileClient -from ._data_lake_directory_client import DataLakeDirectoryClient -from ._file_system_client import FileSystemClient -from ._data_lake_service_client import DataLakeServiceClient -from ._data_lake_lease import DataLakeLeaseClient -from ._models import ( - LocationMode, - ResourceTypes, - FileSystemProperties, - FileSystemPropertiesPaged, - DirectoryProperties, - FileProperties, - PathProperties, - LeaseProperties, - ContentSettings, - AccountSasPermissions, - FileSystemSasPermissions, - DirectorySasPermissions, - FileSasPermissions, - UserDelegationKey, - PublicAccess, - AccessPolicy, - DelimitedTextDialect, - DelimitedJsonDialect, - ArrowDialect, - ArrowType, - QuickQueryDialect, - DataLakeFileQueryError, - AccessControlChangeResult, - AccessControlChangeCounters, - AccessControlChangeFailure, - AccessControlChanges, - AnalyticsLogging, - Metrics, - RetentionPolicy, - StaticWebsite, - CorsRule, - DeletedPathProperties -) - -from ._shared_access_signature import generate_account_sas, generate_file_system_sas, generate_directory_sas, \ - generate_file_sas - -from ._shared.policies import ExponentialRetry, LinearRetry -from ._shared.models import StorageErrorCode -from ._version import VERSION - -__version__ = VERSION - -__all__ = [ - 'DataLakeServiceClient', - 'FileSystemClient', - 'DataLakeFileClient', - 'DataLakeDirectoryClient', - 'DataLakeLeaseClient', - 'ExponentialRetry', - 'LinearRetry', - 'LocationMode', - 'PublicAccess', - 'AccessPolicy', - 'ResourceTypes', - 'StorageErrorCode', - 'UserDelegationKey', - 'FileSystemProperties', - 'FileSystemPropertiesPaged', - 'DirectoryProperties', - 'FileProperties', - 'PathProperties', - 'LeaseProperties', - 'ContentSettings', - 'AccessControlChangeResult', - 'AccessControlChangeCounters', - 'AccessControlChangeFailure', - 'AccessControlChanges', - 'AccountSasPermissions', - 'FileSystemSasPermissions', - 'DirectorySasPermissions', - 'FileSasPermissions', - 'generate_account_sas', - 'generate_file_system_sas', - 'generate_directory_sas', - 'generate_file_sas', - 'VERSION', - 'StorageStreamDownloader', - 'DelimitedTextDialect', - 'DelimitedJsonDialect', - 'DataLakeFileQueryError', - 'ArrowDialect', - 'ArrowType', - 'QuickQueryDialect', - 'DataLakeFileQueryError', - 'AnalyticsLogging', - 'Metrics', - 'RetentionPolicy', - 'StaticWebsite', - 'CorsRule', - 'DeletedPathProperties' -] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_data_lake_directory_client.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_data_lake_directory_client.py deleted file mode 100644 index 042fa05..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_data_lake_directory_client.py +++ /dev/null @@ -1,565 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import Any - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore -from azure.core.pipeline import Pipeline -from ._deserialize import deserialize_dir_properties -from ._shared.base_client import TransportWrapper, parse_connection_str -from ._data_lake_file_client import DataLakeFileClient -from ._models import DirectoryProperties, FileProperties -from ._path_client import PathClient - - -class DataLakeDirectoryClient(PathClient): - """A client to interact with the DataLake directory, even if the directory may not yet exist. - - For operations relating to a specific subdirectory or file under the directory, a directory client or file client - can be retrieved using the :func:`~get_sub_directory_client` or :func:`~get_file_client` functions. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param directory_name: - The whole path of the directory. eg. {directory under file system}/{directory to interact with} - :type directory_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, and account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_instantiate_client.py - :start-after: [START instantiate_directory_client_from_conn_str] - :end-before: [END instantiate_directory_client_from_conn_str] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. - """ - def __init__( - self, account_url, # type: str - file_system_name, # type: str - directory_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - super(DataLakeDirectoryClient, self).__init__(account_url, file_system_name, path_name=directory_name, - credential=credential, **kwargs) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - file_system_name, # type: str - directory_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> DataLakeDirectoryClient - """ - Create DataLakeDirectoryClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param file_system_name: - The name of file system to interact with. - :type file_system_name: str - :param directory_name: - The name of directory to interact with. The directory is under file system. - :type directory_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, and account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :return a DataLakeDirectoryClient - :rtype ~azure.storage.filedatalake.DataLakeDirectoryClient - """ - account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') - return cls( - account_url, file_system_name=file_system_name, directory_name=directory_name, - credential=credential, **kwargs) - - def create_directory(self, metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create a new directory. - - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: response dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory.py - :start-after: [START create_directory] - :end-before: [END create_directory] - :language: python - :dedent: 8 - :caption: Create directory. - """ - return self._create('directory', metadata=metadata, **kwargs) - - def delete_directory(self, **kwargs): - # type: (...) -> None - """ - Marks the specified directory for deletion. - - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory.py - :start-after: [START delete_directory] - :end-before: [END delete_directory] - :language: python - :dedent: 4 - :caption: Delete directory. - """ - return self._delete(recursive=True, **kwargs) - - def get_directory_properties(self, **kwargs): - # type: (**Any) -> DirectoryProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the directory. It does not return the content of the directory. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: DirectoryProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory.py - :start-after: [START get_directory_properties] - :end-before: [END get_directory_properties] - :language: python - :dedent: 4 - :caption: Getting the properties for a file/directory. - """ - return self._get_path_properties(cls=deserialize_dir_properties, **kwargs) # pylint: disable=protected-access - - def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a directory exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return self._exists(**kwargs) - - def rename_directory(self, new_name, **kwargs): - # type: (str, **Any) -> DataLakeDirectoryClient - """ - Rename the source directory. - - :param str new_name: - the new directory name the user want to rename to. - The value must have the following format: "{filesystem}/{directory}/{subdirectory}". - :keyword source_lease: - A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory.py - :start-after: [START rename_directory] - :end-before: [END rename_directory] - :language: python - :dedent: 4 - :caption: Rename the source directory. - """ - new_name = new_name.strip('/') - new_file_system = new_name.split('/')[0] - new_path_and_token = new_name[len(new_file_system):].strip('/').split('?') - new_path = new_path_and_token[0] - try: - new_dir_sas = new_path_and_token[1] or self._query_str.strip('?') - except IndexError: - if not self._raw_credential and new_file_system != self.file_system_name: - raise ValueError("please provide the sas token for the new file") - if not self._raw_credential and new_file_system == self.file_system_name: - new_dir_sas = self._query_str.strip('?') - - new_directory_client = DataLakeDirectoryClient( - "{}://{}".format(self.scheme, self.primary_hostname), new_file_system, directory_name=new_path, - credential=self._raw_credential or new_dir_sas, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - new_directory_client._rename_path( # pylint: disable=protected-access - '/{}/{}{}'.format(quote(unquote(self.file_system_name)), - quote(unquote(self.path_name)), - self._query_str), - **kwargs) - return new_directory_client - - def create_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Create a subdirectory and return the subdirectory client to be interacted with. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient for the subdirectory. - """ - subdir = self.get_sub_directory_client(sub_directory) - subdir.create_directory(metadata=metadata, **kwargs) - return subdir - - def delete_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Marks the specified subdirectory for deletion. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient for the subdirectory - """ - subdir = self.get_sub_directory_client(sub_directory) - subdir.delete_directory(**kwargs) - return subdir - - def create_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Create a new file and return the file client to be interacted with. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - """ - file_client = self.get_file_client(file) - file_client.create_file(**kwargs) - return file_client - - def get_file_client(self, file # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. eg. directory/subdirectory/file - :type file: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake.DataLakeFileClient - """ - try: - file_path = file.get('name') - except AttributeError: - file_path = self.path_name + '/' + str(file) - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeFileClient( - self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, - api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_sub_directory_client(self, sub_directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified subdirectory of the current directory. - - The sub subdirectory need not already exist. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient - """ - try: - subdir_path = sub_directory.get('name') - except AttributeError: - subdir_path = self.path_name + '/' + str(sub_directory) - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeDirectoryClient( - self.url, self.file_system_name, directory_name=subdir_path, credential=self._raw_credential, - api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_data_lake_file_client.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_data_lake_file_client.py deleted file mode 100644 index fe074b3..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_data_lake_file_client.py +++ /dev/null @@ -1,781 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from io import BytesIO -from typing import Any - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore - -import six - -from azure.core.exceptions import HttpResponseError -from ._quick_query_helper import DataLakeFileQueryReader -from ._shared.base_client import parse_connection_str -from ._shared.request_handlers import get_length, read_length -from ._shared.response_handlers import return_response_headers -from ._shared.uploads import IterStreamer -from ._upload_helper import upload_datalake_file -from ._download import StorageStreamDownloader -from ._path_client import PathClient -from ._serialize import get_mod_conditions, get_path_http_headers, get_access_conditions, add_metadata_headers, \ - convert_datetime_to_rfc1123 -from ._deserialize import process_storage_error, deserialize_file_properties -from ._models import FileProperties, DataLakeFileQueryError - - -class DataLakeFileClient(PathClient): - """A client to interact with the DataLake file, even if the file may not yet exist. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param file_path: - The whole file path, so that to interact with a specific file. - eg. "{directory}/{subdirectory}/{file}" - :type file_path: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_instantiate_client.py - :start-after: [START instantiate_file_client_from_conn_str] - :end-before: [END instantiate_file_client_from_conn_str] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. - """ - def __init__( - self, account_url, # type: str - file_system_name, # type: str - file_path, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - super(DataLakeFileClient, self).__init__(account_url, file_system_name, path_name=file_path, - credential=credential, **kwargs) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - file_system_name, # type: str - file_path, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> DataLakeFileClient - """ - Create DataLakeFileClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param file_system_name: The name of file system to interact with. - :type file_system_name: str - :param directory_name: The name of directory to interact with. The directory is under file system. - :type directory_name: str - :param file_name: The name of file to interact with. The file is under directory. - :type file_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :return a DataLakeFileClient - :rtype ~azure.storage.filedatalake.DataLakeFileClient - """ - account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') - return cls( - account_url, file_system_name=file_system_name, file_path=file_path, - credential=credential, **kwargs) - - def create_file(self, content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create a new file. - - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: response dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START create_file] - :end-before: [END create_file] - :language: python - :dedent: 4 - :caption: Create file. - """ - return self._create('file', content_settings=content_settings, metadata=metadata, **kwargs) - - def delete_file(self, **kwargs): - # type: (...) -> None - """ - Marks the specified file for deletion. - - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START delete_file] - :end-before: [END delete_file] - :language: python - :dedent: 4 - :caption: Delete file. - """ - return self._delete(**kwargs) - - def get_file_properties(self, **kwargs): - # type: (**Any) -> FileProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file. It does not return the content of the file. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: FileProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START get_file_properties] - :end-before: [END get_file_properties] - :language: python - :dedent: 4 - :caption: Getting the properties for a file. - """ - return self._get_path_properties(cls=deserialize_file_properties, **kwargs) # pylint: disable=protected-access - - def set_file_expiry(self, expiry_options, # type: str - expires_on=None, # type: Optional[Union[datetime, int]] - **kwargs): - # type: (str, Optional[Union[datetime, int]], **Any) -> None - """Sets the time a file will expire and be deleted. - - :param str expiry_options: - Required. Indicates mode of the expiry time. - Possible values include: 'NeverExpire', 'RelativeToCreation', 'RelativeToNow', 'Absolute' - :param datetime or int expires_on: - The time to set the file to expiry. - When expiry_options is RelativeTo*, expires_on should be an int in milliseconds. - If the type of expires_on is datetime, it should be in UTC time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - try: - expires_on = convert_datetime_to_rfc1123(expires_on) - except AttributeError: - expires_on = str(expires_on) - self._datalake_client_for_blob_operation.path \ - .set_expiry(expiry_options, expires_on=expires_on, **kwargs) # pylint: disable=protected-access - - def _upload_options( # pylint:disable=too-many-statements - self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - - encoding = kwargs.pop('encoding', 'UTF-8') - if isinstance(data, six.text_type): - data = data.encode(encoding) # type: ignore - if length is None: - length = get_length(data) - if isinstance(data, bytes): - data = data[:length] - - if isinstance(data, bytes): - stream = BytesIO(data) - elif hasattr(data, 'read'): - stream = data - elif hasattr(data, '__iter__'): - stream = IterStreamer(data, encoding=encoding) - else: - raise TypeError("Unsupported data type: {}".format(type(data))) - - validate_content = kwargs.pop('validate_content', False) - content_settings = kwargs.pop('content_settings', None) - metadata = kwargs.pop('metadata', None) - max_concurrency = kwargs.pop('max_concurrency', 1) - - kwargs['properties'] = add_metadata_headers(metadata) - kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None)) - kwargs['modified_access_conditions'] = get_mod_conditions(kwargs) - - if content_settings: - kwargs['path_http_headers'] = get_path_http_headers(content_settings) - - kwargs['stream'] = stream - kwargs['length'] = length - kwargs['validate_content'] = validate_content - kwargs['max_concurrency'] = max_concurrency - kwargs['client'] = self._client.path - kwargs['file_settings'] = self._config - - return kwargs - - def upload_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - overwrite=False, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Any] - """ - Upload data to a file. - - :param data: Content to be uploaded to file - :param int length: Size of the data in bytes. - :param bool overwrite: to overwrite an existing file or not. - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword metadata: - Name-value pairs associated with the blob as metadata. - :paramtype metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease: - Required if the blob has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword int chunk_size: - The maximum chunk size for uploading a file in chunks. - Defaults to 100*1024*1024, or 100MB. - :return: response dict (Etag and last modified). - """ - options = self._upload_options( - data, - length=length, - overwrite=overwrite, - **kwargs) - return upload_datalake_file(**options) - - @staticmethod - def _append_data_options(data, offset, length=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] - - if isinstance(data, six.text_type): - data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore - if length is None: - length = get_length(data) - if length is None: - length, data = read_length(data) - if isinstance(data, bytes): - data = data[:length] - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - - options = { - 'body': data, - 'position': offset, - 'content_length': length, - 'lease_access_conditions': access_conditions, - 'validate_content': kwargs.pop('validate_content', False), - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - def append_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - offset, # type: int - length=None, # type: Optional[int] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """Append data to the file. - - :param data: Content to be appended to file - :param offset: start position of the data to be appended to. - :param length: Size of the data in bytes. - :keyword bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - file. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :return: dict of the response header - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START append_data] - :end-before: [END append_data] - :language: python - :dedent: 4 - :caption: Append data to the file. - """ - options = self._append_data_options( - data, - offset, - length=length, - **kwargs) - try: - return self._client.path.append_data(**options) - except HttpResponseError as error: - process_storage_error(error) - - @staticmethod - def _flush_data_options(offset, content_settings=None, retain_uncommitted_data=False, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_mod_conditions(kwargs) - - path_http_headers = None - if content_settings: - path_http_headers = get_path_http_headers(content_settings) - - options = { - 'position': offset, - 'content_length': 0, - 'path_http_headers': path_http_headers, - 'retain_uncommitted_data': retain_uncommitted_data, - 'close': kwargs.pop('close', False), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - def flush_data(self, offset, # type: int - retain_uncommitted_data=False, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ Commit the previous appended data. - - :param offset: offset is equal to the length of the file after commit the - previous appended data. - :param bool retain_uncommitted_data: Valid only for flush operations. If - "true", uncommitted data is retained after the flush operation - completes; otherwise, the uncommitted data is deleted after the flush - operation. The default is false. Data at offsets less than the - specified position are written to the file when flush succeeds, but - this optional parameter allows data after the flush position to be - retained for a future flush operation. - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword bool close: Azure Storage Events allow applications to receive - notifications when files change. When Azure Storage Events are - enabled, a file changed event is raised. This event has a property - indicating whether this is the final change to distinguish the - difference between an intermediate flush to a file stream and the - final close of a file stream. The close query parameter is valid only - when the action is "flush" and change notifications are enabled. If - the value of close is "true" and the flush operation completes - successfully, the service raises a file change notification with a - property indicating that this is the final update (the file stream has - been closed). If "false" a change notification is raised indicating - the file has changed. The default is false. This query parameter is - set to true by the Hadoop ABFS driver to indicate that the file stream - has been closed." - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :return: response header in dict - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START upload_file_to_file_system] - :end-before: [END upload_file_to_file_system] - :language: python - :dedent: 8 - :caption: Commit the previous appended data. - """ - options = self._flush_data_options( - offset, - retain_uncommitted_data=retain_uncommitted_data, **kwargs) - try: - return self._client.path.flush_data(**options) - except HttpResponseError as error: - process_storage_error(error) - - def download_file(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader - """Downloads a file to the StorageStreamDownloader. The readall() method must - be used to read all the content, or readinto() must be used to download the file into - a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks. - - :param int offset: - Start of byte range to use for downloading a section of the file. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword lease: - If specified, download only succeeds if the file's lease is active - and matches this ID. Required if the file has an active lease. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.filedatalake.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START read_file] - :end-before: [END read_file] - :language: python - :dedent: 4 - :caption: Return the downloaded data. - """ - downloader = self._blob_client.download_blob(offset=offset, length=length, **kwargs) - return StorageStreamDownloader(downloader) - - def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a file exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return self._exists(**kwargs) - - def rename_file(self, new_name, **kwargs): - # type: (str, **Any) -> DataLakeFileClient - """ - Rename the source file. - - :param str new_name: the new file name the user want to rename to. - The value must have the following format: "{filesystem}/{directory}/{subdirectory}/{file}". - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword source_lease: A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: the renamed file client - :rtype: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START rename_file] - :end-before: [END rename_file] - :language: python - :dedent: 4 - :caption: Rename the source file. - """ - new_name = new_name.strip('/') - new_file_system = new_name.split('/')[0] - new_path_and_token = new_name[len(new_file_system):].strip('/').split('?') - new_path = new_path_and_token[0] - try: - new_file_sas = new_path_and_token[1] or self._query_str.strip('?') - except IndexError: - if not self._raw_credential and new_file_system != self.file_system_name: - raise ValueError("please provide the sas token for the new file") - if not self._raw_credential and new_file_system == self.file_system_name: - new_file_sas = self._query_str.strip('?') - - new_file_client = DataLakeFileClient( - "{}://{}".format(self.scheme, self.primary_hostname), new_file_system, file_path=new_path, - credential=self._raw_credential or new_file_sas, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function - ) - new_file_client._rename_path( # pylint: disable=protected-access - '/{}/{}{}'.format(quote(unquote(self.file_system_name)), - quote(unquote(self.path_name)), - self._query_str), - **kwargs) - return new_file_client - - def query_file(self, query_expression, **kwargs): - # type: (str, **Any) -> DataLakeFileQueryReader - """ - Enables users to select/project on datalake file data by providing simple query expressions. - This operations returns a DataLakeFileQueryReader, users need to use readall() or readinto() to get query data. - - :param str query_expression: - Required. a query statement. - eg. Select * from DataLakeStorage - :keyword Callable[~azure.storage.filedatalake.DataLakeFileQueryError] on_error: - A function to be called on any processing errors returned by the service. - :keyword file_format: - Optional. Defines the serialization of the data currently stored in the file. The default is to - treat the file data as CSV data formatted in the default dialect. This can be overridden with - a custom DelimitedTextDialect, or DelimitedJsonDialect or "ParquetDialect" (passed as a string or enum). - These dialects can be passed through their respective classes, the QuickQueryDialect enum or as a string. - :paramtype file_format: - ~azure.storage.filedatalake.DelimitedTextDialect or ~azure.storage.filedatalake.DelimitedJsonDialect or - ~azure.storage.filedatalake.QuickQueryDialect or str - :keyword output_format: - Optional. Defines the output serialization for the data stream. By default the data will be returned - as it is represented in the file. By providing an output format, - the file data will be reformatted according to that profile. - This value can be a DelimitedTextDialect or a DelimitedJsonDialect or ArrowDialect. - These dialects can be passed through their respective classes, the QuickQueryDialect enum or as a string. - :paramtype output_format: - ~azure.storage.filedatalake.DelimitedTextDialect or ~azure.storage.filedatalake.DelimitedJsonDialect - or list[~azure.storage.filedatalake.ArrowDialect] or ~azure.storage.filedatalake.QuickQueryDialect or str - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A streaming object (DataLakeFileQueryReader) - :rtype: ~azure.storage.filedatalake.DataLakeFileQueryReader - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_query.py - :start-after: [START query] - :end-before: [END query] - :language: python - :dedent: 4 - :caption: select/project on datalake file data by providing simple query expressions. - """ - query_expression = query_expression.replace("from DataLakeStorage", "from BlobStorage") - blob_quick_query_reader = self._blob_client.query_blob(query_expression, - blob_format=kwargs.pop('file_format', None), - error_cls=DataLakeFileQueryError, - **kwargs) - return DataLakeFileQueryReader(blob_quick_query_reader) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_data_lake_service_client.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_data_lake_service_client.py deleted file mode 100644 index d46af1f..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_data_lake_service_client.py +++ /dev/null @@ -1,560 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import Optional, Dict, Any - -try: - from urllib.parse import urlparse -except ImportError: - from urlparse import urlparse # type: ignore - -from azure.core.paging import ItemPaged -from azure.core.pipeline import Pipeline - -from azure.multiapi.storagev2.blob.v2020_06_12 import BlobServiceClient -from ._shared.base_client import TransportWrapper, StorageAccountHostsMixin, parse_query, parse_connection_str -from ._deserialize import get_datalake_service_properties -from ._file_system_client import FileSystemClient -from ._data_lake_directory_client import DataLakeDirectoryClient -from ._data_lake_file_client import DataLakeFileClient -from ._models import UserDelegationKey, FileSystemPropertiesPaged, LocationMode -from ._serialize import convert_dfs_url_to_blob_url, get_api_version -from ._generated import AzureDataLakeStorageRESTAPI - - -class DataLakeServiceClient(StorageAccountHostsMixin): - """A client to interact with the DataLake Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete file systems within the account. - For operations relating to a specific file system, directory or file, clients for those entities - can also be retrieved using the `get_client` functions. - - :ivar str url: - The full endpoint URL to the datalake service endpoint. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URL to the DataLake storage account. Any other entities included - in the URL path (e.g. file system or file) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START create_datalake_service_client] - :end-before: [END create_datalake_service_client] - :language: python - :dedent: 8 - :caption: Creating the DataLakeServiceClient from connection string. - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START create_datalake_service_client_oauth] - :end-before: [END create_datalake_service_client_oauth] - :language: python - :dedent: 8 - :caption: Creating the DataLakeServiceClient with Azure Identity credentials. - """ - - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - blob_account_url = convert_dfs_url_to_blob_url(account_url) - self._blob_account_url = blob_account_url - self._blob_service_client = BlobServiceClient(blob_account_url, credential, **kwargs) - self._blob_service_client._hosts[LocationMode.SECONDARY] = "" #pylint: disable=protected-access - - _, sas_token = parse_query(parsed_url.query) - self._query_str, self._raw_credential = self._format_query_string(sas_token, credential) - - super(DataLakeServiceClient, self).__init__(parsed_url, service='dfs', - credential=self._raw_credential, **kwargs) - # ADLS doesn't support secondary endpoint, make sure it's empty - self._hosts[LocationMode.SECONDARY] = "" - - self._client = AzureDataLakeStorageRESTAPI(self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs) #pylint: disable=protected-access - - def __enter__(self): - self._blob_service_client.__enter__() - return self - - def __exit__(self, *args): - self._blob_service_client.close() - - def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._blob_service_client.close() - - def _format_url(self, hostname): - """Format the endpoint URL according to hostname - """ - formated_url = "{}://{}/{}".format(self.scheme, hostname, self._query_str) - return formated_url - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> DataLakeServiceClient - """ - Create DataLakeServiceClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :return a DataLakeServiceClient - :rtype ~azure.storage.filedatalake.DataLakeServiceClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_data_lake_service_client_from_conn_str] - :end-before: [END create_data_lake_service_client_from_conn_str] - :language: python - :dedent: 8 - :caption: Creating the DataLakeServiceClient from a connection string. - """ - account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') - return cls(account_url, credential=credential, **kwargs) - - def get_user_delegation_key(self, key_start_time, # type: datetime - key_expiry_time, # type: datetime - **kwargs # type: Any - ): - # type: (...) -> UserDelegationKey - """ - Obtain a user delegation key for the purpose of signing SAS tokens. - A token credential must be present on the service object for this request to succeed. - - :param ~datetime.datetime key_start_time: - A DateTime value. Indicates when the key becomes valid. - :param ~datetime.datetime key_expiry_time: - A DateTime value. Indicates when the key stops being valid. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The user delegation key. - :rtype: ~azure.storage.filedatalake.UserDelegationKey - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START get_user_delegation_key] - :end-before: [END get_user_delegation_key] - :language: python - :dedent: 8 - :caption: Get user delegation key from datalake service client. - """ - delegation_key = self._blob_service_client.get_user_delegation_key(key_start_time=key_start_time, - key_expiry_time=key_expiry_time, - **kwargs) # pylint: disable=protected-access - return UserDelegationKey._from_generated(delegation_key) # pylint: disable=protected-access - - def list_file_systems(self, name_starts_with=None, # type: Optional[str] - include_metadata=None, # type: Optional[bool] - **kwargs): - # type: (...) -> ItemPaged[FileSystemProperties] - """Returns a generator to list the file systems under the specified account. - - The generator will lazily follow the continuation tokens returned by - the service and stop when all file systems have been returned. - - :param str name_starts_with: - Filters the results to return only file systems whose names - begin with the specified prefix. - :param bool include_metadata: - Specifies that file system metadata be returned in the response. - The default value is `False`. - :keyword int results_per_page: - The maximum number of file system names to retrieve per API - call. If the request does not specify the server will return up to 5,000 items per page. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword bool include_deleted: - Specifies that deleted file systems to be returned in the response. This is for file system restore enabled - account. The default value is `False`. - .. versionadded:: 12.3.0 - :returns: An iterable (auto-paging) of FileSystemProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.FileSystemProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START list_file_systems] - :end-before: [END list_file_systems] - :language: python - :dedent: 8 - :caption: Listing the file systems in the datalake service. - """ - item_paged = self._blob_service_client.list_containers(name_starts_with=name_starts_with, - include_metadata=include_metadata, - **kwargs) # pylint: disable=protected-access - item_paged._page_iterator_class = FileSystemPropertiesPaged # pylint: disable=protected-access - return item_paged - - def create_file_system(self, file_system, # type: Union[FileSystemProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[PublicAccess] - **kwargs): - # type: (...) -> FileSystemClient - """Creates a new file system under the specified account. - - If the file system with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created file system. - - :param str file_system: - The name of the file system to create. - :param metadata: - A dict with name-value pairs to associate with the - file system as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - Possible values include: file system, file. - :type public_access: ~azure.storage.filedatalake.PublicAccess - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START create_file_system_from_service_client] - :end-before: [END create_file_system_from_service_client] - :language: python - :dedent: 8 - :caption: Creating a file system in the datalake service. - """ - file_system_client = self.get_file_system_client(file_system) - file_system_client.create_file_system(metadata=metadata, public_access=public_access, **kwargs) - return file_system_client - - def _rename_file_system(self, name, new_name, **kwargs): - # type: (str, str, **Any) -> FileSystemClient - """Renames a filesystem. - - Operation is successful only if the source filesystem exists. - - :param str name: - The name of the filesystem to rename. - :param str new_name: - The new filesystem name the user wants to rename to. - :keyword lease: - Specify this to perform only if the lease ID given - matches the active lease ID of the source filesystem. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - """ - self._blob_service_client._rename_container(name, new_name, **kwargs) # pylint: disable=protected-access - renamed_file_system = self.get_file_system_client(new_name) - return renamed_file_system - - def undelete_file_system(self, name, deleted_version, **kwargs): - # type: (str, str, **Any) -> FileSystemClient - """Restores soft-deleted filesystem. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.3.0 - This operation was introduced in API version '2019-12-12'. - - :param str name: - Specifies the name of the deleted filesystem to restore. - :param str deleted_version: - Specifies the version of the deleted filesystem to restore. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - """ - new_name = kwargs.pop('new_name', None) - file_system = self.get_file_system_client(new_name or name) - self._blob_service_client.undelete_container( - name, deleted_version, new_name=new_name, **kwargs) # pylint: disable=protected-access - return file_system - - def delete_file_system(self, file_system, # type: Union[FileSystemProperties, str] - **kwargs): - # type: (...) -> FileSystemClient - """Marks the specified file system for deletion. - - The file system and any files contained within it are later deleted during garbage collection. - If the file system is not found, a ResourceNotFoundError will be raised. - - :param file_system: - The file system to delete. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :keyword lease: - If specified, delete_file_system only succeeds if the - file system's lease is active and matches this ID. - Required if the file system has an active lease. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START delete_file_system_from_service_client] - :end-before: [END delete_file_system_from_service_client] - :language: python - :dedent: 8 - :caption: Deleting a file system in the datalake service. - """ - file_system_client = self.get_file_system_client(file_system) - file_system_client.delete_file_system(**kwargs) - return file_system_client - - def get_file_system_client(self, file_system # type: Union[FileSystemProperties, str] - ): - # type: (...) -> FileSystemClient - """Get a client to interact with the specified file system. - - The file system need not already exist. - - :param file_system: - The file system. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :returns: A FileSystemClient. - :rtype: ~azure.storage.filedatalake.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_file_system_client_from_service] - :end-before: [END create_file_system_client_from_service] - :language: python - :dedent: 8 - :caption: Getting the file system client to interact with a specific file system. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return FileSystemClient(self.url, file_system_name, credential=self._raw_credential, - api_version=self.api_version, - _configuration=self._config, - _pipeline=_pipeline, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_directory_client(self, file_system, # type: Union[FileSystemProperties, str] - directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified directory. - - The directory need not already exist. - - :param file_system: - The file system that the directory is in. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START get_directory_client_from_service_client] - :end-before: [END get_directory_client_from_service_client] - :language: python - :dedent: 8 - :caption: Getting the directory client to interact with a specific directory. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - try: - directory_name = directory.name - except AttributeError: - directory_name = directory - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeDirectoryClient(self.url, file_system_name, directory_name=directory_name, - credential=self._raw_credential, - api_version=self.api_version, - _configuration=self._config, _pipeline=_pipeline, - _hosts=self._hosts, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function - ) - - def get_file_client(self, file_system, # type: Union[FileSystemProperties, str] - file_path # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file_system: - The file system that the file is in. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :param file_path: - The file with which to interact. This can either be the full path of the file(from the root directory), - or an instance of FileProperties. eg. directory/subdirectory/file - :type file_path: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake.DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START get_file_client_from_service_client] - :end-before: [END get_file_client_from_service_client] - :language: python - :dedent: 8 - :caption: Getting the file client to interact with a specific file. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - try: - file_path = file_path.name - except AttributeError: - pass - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeFileClient( - self.url, file_system_name, file_path=file_path, credential=self._raw_credential, - api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def set_service_properties(self, **kwargs): - # type: (**Any) -> None - """Sets the properties of a storage account's Datalake service, including - Azure Storage Analytics. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2020-06-12'. - - If an element (e.g. analytics_logging) is left as None, the - existing settings on the service for that functionality are preserved. - - :keyword analytics_logging: - Groups the Azure Analytics Logging settings. - :type analytics_logging: ~azure.storage.filedatalake.AnalyticsLogging - :keyword hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates. - :type hour_metrics: ~azure.storage.filedatalake.Metrics - :keyword minute_metrics: - The minute metrics settings provide request statistics - for each minute. - :type minute_metrics: ~azure.storage.filedatalake.Metrics - :keyword cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list[~azure.storage.filedatalake.CorsRule] - :keyword str target_version: - Indicates the default version to use for requests if an incoming - request's version is not specified. - :keyword delete_retention_policy: - The delete retention policy specifies whether to retain deleted files/directories. - It also specifies the number of days and versions of file/directory to keep. - :type delete_retention_policy: ~azure.storage.filedatalake.RetentionPolicy - :keyword static_website: - Specifies whether the static website feature is enabled, - and if yes, indicates the index document and 404 error document to use. - :type static_website: ~azure.storage.filedatalake.StaticWebsite - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - return self._blob_service_client.set_service_properties(**kwargs) # pylint: disable=protected-access - - def get_service_properties(self, **kwargs): - # type: (**Any) -> Dict[str, Any] - """Gets the properties of a storage account's datalake service, including - Azure Storage Analytics. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2020-06-12'. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An object containing datalake service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - """ - props = self._blob_service_client.get_service_properties(**kwargs) # pylint: disable=protected-access - return get_datalake_service_properties(props) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_deserialize.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_deserialize.py deleted file mode 100644 index a323995..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_deserialize.py +++ /dev/null @@ -1,212 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import logging -from typing import ( # pylint: disable=unused-import - TYPE_CHECKING -) -from xml.etree.ElementTree import Element - -from azure.core.pipeline.policies import ContentDecodePolicy -from azure.core.exceptions import HttpResponseError, DecodeError, ResourceModifiedError, ClientAuthenticationError, \ - ResourceNotFoundError, ResourceExistsError -from ._models import FileProperties, DirectoryProperties, LeaseProperties, DeletedPathProperties, StaticWebsite, \ - RetentionPolicy, Metrics, AnalyticsLogging, PathProperties # pylint: disable=protected-access -from ._shared.models import StorageErrorCode - -if TYPE_CHECKING: - pass - -_LOGGER = logging.getLogger(__name__) - - -def deserialize_dir_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - dir_properties = DirectoryProperties( - metadata=metadata, - **headers - ) - return dir_properties - - -def deserialize_file_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - file_properties = FileProperties( - metadata=metadata, - **headers - ) - if 'Content-Range' in headers: - if 'x-ms-blob-content-md5' in headers: - file_properties.content_settings.content_md5 = headers['x-ms-blob-content-md5'] - else: - file_properties.content_settings.content_md5 = None - return file_properties - - -def deserialize_path_properties(path_list): - return [PathProperties._from_generated(path) for path in path_list] # pylint: disable=protected-access - - -def get_deleted_path_properties_from_generated_code(generated): - deleted_path = DeletedPathProperties() - deleted_path.name = generated.name - deleted_path.deleted_time = generated.properties.deleted_time - deleted_path.remaining_retention_days = generated.properties.remaining_retention_days - deleted_path.deletion_id = generated.deletion_id - return deleted_path - - -def is_file_path(_, __, headers): - if headers['x-ms-resource-type'] == "file": - return True - return False - - -def get_datalake_service_properties(datalake_properties): - datalake_properties["analytics_logging"] = AnalyticsLogging._from_generated( # pylint: disable=protected-access - datalake_properties["analytics_logging"]) - datalake_properties["hour_metrics"] = Metrics._from_generated(datalake_properties["hour_metrics"]) # pylint: disable=protected-access - datalake_properties["minute_metrics"] = Metrics._from_generated( # pylint: disable=protected-access - datalake_properties["minute_metrics"]) - datalake_properties["delete_retention_policy"] = RetentionPolicy._from_generated( # pylint: disable=protected-access - datalake_properties["delete_retention_policy"]) - datalake_properties["static_website"] = StaticWebsite._from_generated( # pylint: disable=protected-access - datalake_properties["static_website"]) - return datalake_properties - - -def from_blob_properties(blob_properties): - file_props = FileProperties() - file_props.name = blob_properties.name - file_props.etag = blob_properties.etag - file_props.deleted = blob_properties.deleted - file_props.metadata = blob_properties.metadata - file_props.lease = blob_properties.lease - file_props.lease.__class__ = LeaseProperties - file_props.last_modified = blob_properties.last_modified - file_props.creation_time = blob_properties.creation_time - file_props.size = blob_properties.size - file_props.deleted_time = blob_properties.deleted_time - file_props.remaining_retention_days = blob_properties.remaining_retention_days - file_props.content_settings = blob_properties.content_settings - return file_props - - -def normalize_headers(headers): - normalized = {} - for key, value in headers.items(): - if key.startswith('x-ms-'): - key = key[5:] - normalized[key.lower().replace('-', '_')] = value - return normalized - - -def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument - try: - raw_metadata = {k: v for k, v in response.http_response.headers.items() if k.startswith("x-ms-meta-")} - except AttributeError: - raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} - return {k[10:]: v for k, v in raw_metadata.items()} - - -def process_storage_error(storage_error): # pylint:disable=too-many-statements - raise_error = HttpResponseError - serialized = False - if not storage_error.response: - raise storage_error - # If it is one of those three then it has been serialized prior by the generated layer. - if isinstance(storage_error, (ResourceNotFoundError, ClientAuthenticationError, ResourceExistsError)): - serialized = True - error_code = storage_error.response.headers.get('x-ms-error-code') - error_message = storage_error.message - additional_data = {} - error_dict = {} - try: - error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) - # If it is an XML response - if isinstance(error_body, Element): - error_dict = { - child.tag.lower(): child.text - for child in error_body - } - # If it is a JSON response - elif isinstance(error_body, dict): - error_dict = error_body.get('error', {}) - elif not error_code: - _LOGGER.warning( - 'Unexpected return type % from ContentDecodePolicy.deserialize_from_http_generics.', type(error_body)) - error_dict = {'message': str(error_body)} - - # If we extracted from a Json or XML response - if error_dict: - error_code = error_dict.get('code') - error_message = error_dict.get('message') - additional_data = {k: v for k, v in error_dict.items() if k not in {'code', 'message'}} - - except DecodeError: - pass - - try: - # This check would be unnecessary if we have already serialized the error. - if error_code and not serialized: - error_code = StorageErrorCode(error_code) - if error_code in [StorageErrorCode.condition_not_met]: - raise_error = ResourceModifiedError - if error_code in [StorageErrorCode.invalid_authentication_info, - StorageErrorCode.authentication_failed]: - raise_error = ClientAuthenticationError - if error_code in [StorageErrorCode.resource_not_found, - StorageErrorCode.invalid_property_name, - StorageErrorCode.invalid_source_uri, - StorageErrorCode.source_path_not_found, - StorageErrorCode.lease_name_mismatch, - StorageErrorCode.file_system_not_found, - StorageErrorCode.path_not_found, - StorageErrorCode.parent_not_found, - StorageErrorCode.invalid_destination_path, - StorageErrorCode.invalid_rename_source_path, - StorageErrorCode.lease_is_already_broken, - StorageErrorCode.invalid_source_or_destination_resource_type, - StorageErrorCode.rename_destination_parent_path_not_found]: - raise_error = ResourceNotFoundError - if error_code in [StorageErrorCode.account_already_exists, - StorageErrorCode.account_being_created, - StorageErrorCode.resource_already_exists, - StorageErrorCode.resource_type_mismatch, - StorageErrorCode.source_path_is_being_deleted, - StorageErrorCode.path_already_exists, - StorageErrorCode.destination_path_is_being_deleted, - StorageErrorCode.file_system_already_exists, - StorageErrorCode.file_system_being_deleted, - StorageErrorCode.path_conflict]: - raise_error = ResourceExistsError - except ValueError: - # Got an unknown error code - pass - - # Error message should include all the error properties - try: - error_message += "\nErrorCode:{}".format(error_code.value) - except AttributeError: - error_message += "\nErrorCode:{}".format(error_code) - for name, info in additional_data.items(): - error_message += "\n{}:{}".format(name, info) - - # No need to create an instance if it has already been serialized by the generated layer - if serialized: - storage_error.message = error_message - error = storage_error - else: - error = raise_error(message=error_message, response=storage_error.response) - # Ensure these properties are stored in the error instance as well (not just the error message) - error.error_code = error_code - error.additional_info = additional_data - # error.args is what's surfaced on the traceback - show error message in all cases - error.args = (error.message,) - try: - # `from None` prevents us from double printing the exception (suppresses generated layer error context) - exec("raise error from None") # pylint: disable=exec-used # nosec - except SyntaxError: - raise error diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_download.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_download.py deleted file mode 100644 index 61716d3..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_download.py +++ /dev/null @@ -1,59 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import Iterator - -from ._deserialize import from_blob_properties - - -class StorageStreamDownloader(object): - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the file being downloaded. - :ivar ~azure.storage.filedatalake.FileProperties properties: - The properties of the file being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the file. - """ - - def __init__(self, downloader): - self._downloader = downloader - self.name = self._downloader.name - self.properties = from_blob_properties(self._downloader.properties) # pylint: disable=protected-access - self.size = self._downloader.size - - def __len__(self): - return self.size - - def chunks(self): - # type: () -> Iterator[bytes] - """Iterate over chunks in the download stream. - - :rtype: Iterator[bytes] - """ - return self._downloader.chunks() - - def readall(self): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - return self._downloader.readall() - - def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - return self._downloader.readinto(stream) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_file_system_client.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_file_system_client.py deleted file mode 100644 index 3be2ac3..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_file_system_client.py +++ /dev/null @@ -1,922 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import functools -from typing import Optional, Any, Union - - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore -import six - -from azure.core.pipeline import Pipeline -from azure.core.exceptions import HttpResponseError -from azure.core.paging import ItemPaged -from azure.multiapi.storagev2.blob.v2020_06_12 import ContainerClient -from ._shared.base_client import TransportWrapper, StorageAccountHostsMixin, parse_query, parse_connection_str -from ._serialize import convert_dfs_url_to_blob_url, get_api_version -from ._list_paths_helper import DeletedPathPropertiesPaged -from ._models import LocationMode, FileSystemProperties, PublicAccess, DeletedPathProperties, FileProperties, \ - DirectoryProperties -from ._data_lake_file_client import DataLakeFileClient -from ._data_lake_directory_client import DataLakeDirectoryClient -from ._data_lake_lease import DataLakeLeaseClient -from ._generated import AzureDataLakeStorageRESTAPI -from ._generated.models import ListBlobsIncludeItem -from ._deserialize import deserialize_path_properties, process_storage_error, is_file_path - - -class FileSystemClient(StorageAccountHostsMixin): - """A client to interact with a specific file system, even if that file system - may not yet exist. - - For operations relating to a specific directory or file within this file system, a directory client or file client - can be retrieved using the :func:`~get_directory_client` or :func:`~get_file_client` functions. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_file_system_client_from_service] - :end-before: [END create_file_system_client_from_service] - :language: python - :dedent: 8 - :caption: Get a FileSystemClient from an existing DataLakeServiceClient. - """ - def __init__( - self, account_url, # type: str - file_system_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not file_system_name: - raise ValueError("Please specify a file system name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - blob_account_url = convert_dfs_url_to_blob_url(account_url) - # TODO: add self.account_url to base_client and remove _blob_account_url - self._blob_account_url = blob_account_url - - datalake_hosts = kwargs.pop('_hosts', None) - blob_hosts = None - if datalake_hosts: - blob_primary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.PRIMARY]) - blob_hosts = {LocationMode.PRIMARY: blob_primary_account_url, LocationMode.SECONDARY: ""} - self._container_client = ContainerClient(blob_account_url, file_system_name, - credential=credential, _hosts=blob_hosts, **kwargs) - - _, sas_token = parse_query(parsed_url.query) - self.file_system_name = file_system_name - self._query_str, self._raw_credential = self._format_query_string(sas_token, credential) - - super(FileSystemClient, self).__init__(parsed_url, service='dfs', credential=self._raw_credential, - _hosts=datalake_hosts, **kwargs) - # ADLS doesn't support secondary endpoint, make sure it's empty - self._hosts[LocationMode.SECONDARY] = "" - self._client = AzureDataLakeStorageRESTAPI(self.url, file_system=file_system_name, pipeline=self._pipeline) - api_version = get_api_version(kwargs) - self._client._config.version = api_version # pylint: disable=protected-access - self._datalake_client_for_blob_operation = AzureDataLakeStorageRESTAPI(self._container_client.url, - file_system=file_system_name, - pipeline=self._pipeline) - self._datalake_client_for_blob_operation._config.version = api_version # pylint: disable=protected-access - - def _format_url(self, hostname): - file_system_name = self.file_system_name - if isinstance(file_system_name, six.text_type): - file_system_name = file_system_name.encode('UTF-8') - return "{}://{}/{}{}".format( - self.scheme, - hostname, - quote(file_system_name), - self._query_str) - - def __exit__(self, *args): - self._container_client.close() - super(FileSystemClient, self).__exit__(*args) - - def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._container_client.close() - self.__exit__() - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - file_system_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> FileSystemClient - """ - Create FileSystemClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param file_system_name: The name of file system to interact with. - :type file_system_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :return a FileSystemClient - :rtype ~azure.storage.filedatalake.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_file_system_client_from_connection_string] - :end-before: [END create_file_system_client_from_connection_string] - :language: python - :dedent: 8 - :caption: Create FileSystemClient from connection string - """ - account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') - return cls( - account_url, file_system_name=file_system_name, credential=credential, **kwargs) - - def acquire_lease( - self, lease_duration=-1, # type: int - lease_id=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> DataLakeLeaseClient - """ - Requests a new lease. If the file system does not have an active lease, - the DataLake service creates a lease on the file system and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A DataLakeLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.filedatalake.DataLakeLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START acquire_lease_on_file_system] - :end-before: [END acquire_lease_on_file_system] - :language: python - :dedent: 8 - :caption: Acquiring a lease on the file system. - """ - lease = DataLakeLeaseClient(self, lease_id=lease_id) - lease.acquire(lease_duration=lease_duration, **kwargs) - return lease - - def create_file_system(self, metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[PublicAccess] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """Creates a new file system under the specified account. - - If the file system with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created file system. - - :param metadata: - A dict with name-value pairs to associate with the - file system as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - To specify whether data in the file system may be accessed publicly and the level of access. - :type public_access: ~azure.storage.filedatalake.PublicAccess - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_file_system] - :end-before: [END create_file_system] - :language: python - :dedent: 12 - :caption: Creating a file system in the datalake service. - """ - return self._container_client.create_container(metadata=metadata, - public_access=public_access, - **kwargs) - - def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a file system exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return self._container_client.exists(**kwargs) - - def _rename_file_system(self, new_name, **kwargs): - # type: (str, **Any) -> FileSystemClient - """Renames a filesystem. - - Operation is successful only if the source filesystem exists. - - :param str new_name: - The new filesystem name the user wants to rename to. - :keyword lease: - Specify this to perform only if the lease ID given - matches the active lease ID of the source filesystem. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - """ - self._container_client._rename_container(new_name, **kwargs) # pylint: disable=protected-access - #TODO: self._raw_credential would not work with SAS tokens - renamed_file_system = FileSystemClient( - "{}://{}".format(self.scheme, self.primary_hostname), file_system_name=new_name, - credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, - _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - return renamed_file_system - - def delete_file_system(self, **kwargs): - # type: (Any) -> None - """Marks the specified file system for deletion. - - The file system and any files contained within it are later deleted during garbage collection. - If the file system is not found, a ResourceNotFoundError will be raised. - - :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: - If specified, delete_file_system only succeeds if the - file system's lease is active and matches this ID. - Required if the file system has an active lease. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START delete_file_system] - :end-before: [END delete_file_system] - :language: python - :dedent: 12 - :caption: Deleting a file system in the datalake service. - """ - self._container_client.delete_container(**kwargs) - - def get_file_system_properties(self, **kwargs): - # type: (Any) -> FileSystemProperties - """Returns all user-defined metadata and system properties for the specified - file system. The data returned does not include the file system's list of paths. - - :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: - If specified, get_file_system_properties only succeeds if the - file system's lease is active and matches this ID. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Properties for the specified file system within a file system object. - :rtype: ~azure.storage.filedatalake.FileSystemProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START get_file_system_properties] - :end-before: [END get_file_system_properties] - :language: python - :dedent: 12 - :caption: Getting properties on the file system. - """ - container_properties = self._container_client.get_container_properties(**kwargs) - return FileSystemProperties._convert_from_container_props(container_properties) # pylint: disable=protected-access - - def set_file_system_metadata( # type: ignore - self, metadata, # type: Dict[str, str] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - file system. Each call to this operation replaces all existing metadata - attached to the file system. To remove all metadata from the file system, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the file system as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: filesystem-updated property dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START set_file_system_metadata] - :end-before: [END set_file_system_metadata] - :language: python - :dedent: 12 - :caption: Setting metadata on the file system. - """ - return self._container_client.set_container_metadata(metadata=metadata, **kwargs) - - def set_file_system_access_policy( - self, signed_identifiers, # type: Dict[str, AccessPolicy] - public_access=None, # type: Optional[Union[str, PublicAccess]] - **kwargs - ): # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the permissions for the specified file system or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether files in a file system may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the file system. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict[str, ~azure.storage.filedatalake.AccessPolicy] - :param ~azure.storage.filedatalake.PublicAccess public_access: - To specify whether data in the file system may be accessed publicly and the level of access. - :keyword lease: - Required if the file system has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified date/time. - :keyword ~datetime.datetime if_unmodified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File System-updated property dict (Etag and last modified). - :rtype: dict[str, str or ~datetime.datetime] - """ - return self._container_client.set_container_access_policy(signed_identifiers, - public_access=public_access, **kwargs) - - def get_file_system_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the specified file system. - The permissions indicate whether file system data may be accessed publicly. - - :keyword lease: - If specified, the operation only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - """ - access_policy = self._container_client.get_container_access_policy(**kwargs) - return { - 'public_access': PublicAccess._from_generated(access_policy['public_access']), # pylint: disable=protected-access - 'signed_identifiers': access_policy['signed_identifiers'] - } - - def get_paths(self, path=None, # type: Optional[str] - recursive=True, # type: Optional[bool] - max_results=None, # type: Optional[int] - **kwargs): - # type: (...) -> ItemPaged[PathProperties] - """Returns a generator to list the paths(could be files or directories) under the specified file system. - The generator will lazily follow the continuation tokens returned by - the service. - - :param str path: - Filters the results to return only paths under the specified path. - :param int max_results: An optional value that specifies the maximum - number of items to return per page. If omitted or greater than 5,000, the - response will include up to 5,000 items per page. - :keyword upn: - Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of PathProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.PathProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START get_paths_in_file_system] - :end-before: [END get_paths_in_file_system] - :language: python - :dedent: 8 - :caption: List the paths in the file system. - """ - timeout = kwargs.pop('timeout', None) - return self._client.file_system.list_paths( - recursive=recursive, - max_results=max_results, - path=path, - timeout=timeout, - cls=deserialize_path_properties, - **kwargs) - - def create_directory(self, directory, # type: Union[DirectoryProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Create directory - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_directory_from_file_system] - :end-before: [END create_directory_from_file_system] - :language: python - :dedent: 8 - :caption: Create directory in the file system. - """ - directory_client = self.get_directory_client(directory) - directory_client.create_directory(metadata=metadata, **kwargs) - return directory_client - - def delete_directory(self, directory, # type: Union[DirectoryProperties, str] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Marks the specified path for deletion. - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START delete_directory_from_file_system] - :end-before: [END delete_directory_from_file_system] - :language: python - :dedent: 8 - :caption: Delete directory in the file system. - """ - directory_client = self.get_directory_client(directory) - directory_client.delete_directory(**kwargs) - return directory_client - - def create_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Create file - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_file_from_file_system] - :end-before: [END create_file_from_file_system] - :language: python - :dedent: 8 - :caption: Create file in the file system. - """ - file_client = self.get_file_client(file) - file_client.create_file(**kwargs) - return file_client - - def delete_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Marks the specified file for deletion. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START delete_file_from_file_system] - :end-before: [END delete_file_from_file_system] - :language: python - :dedent: 8 - :caption: Delete file in the file system. - """ - file_client = self.get_file_client(file) - file_client.delete_file(**kwargs) - return file_client - - def _undelete_path_options(self, deleted_path_name, deletion_id): - quoted_path = quote(unquote(deleted_path_name.strip('/'))) - - url_and_token = self.url.replace('.dfs.', '.blob.').split('?') - try: - url = url_and_token[0] + '/' + quoted_path + url_and_token[1] - except IndexError: - url = url_and_token[0] + '/' + quoted_path - - undelete_source = quoted_path + '?deletionid={}'.format(deletion_id) if deletion_id else None - - return quoted_path, url, undelete_source - - def _undelete_path(self, deleted_path_name, deletion_id, **kwargs): - # type: (str, str, **Any) -> Union[DataLakeDirectoryClient, DataLakeFileClient] - """Restores soft-deleted path. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2020-06-12'. - - :param str deleted_path_name: - Specifies the path (file or directory) to restore. - :param str deletion_id: - Specifies the version of the deleted path to restore. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.file.datalake.DataLakeDirectoryClient or azure.storage.file.datalake.DataLakeFileClient - """ - _, url, undelete_source = self._undelete_path_options(deleted_path_name, deletion_id) - - pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - path_client = AzureDataLakeStorageRESTAPI( - url, filesystem=self.file_system_name, path=deleted_path_name, pipeline=pipeline) - try: - is_file = path_client.path.undelete(undelete_source=undelete_source, cls=is_file_path, **kwargs) - if is_file: - return self.get_file_client(deleted_path_name) - return self.get_directory_client(deleted_path_name) - except HttpResponseError as error: - process_storage_error(error) - - def _get_root_directory_client(self): - # type: () -> DataLakeDirectoryClient - """Get a client to interact with the root directory. - - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient - """ - return self.get_directory_client('/') - - def get_directory_client(self, directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified directory. - - The directory need not already exist. - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START get_directory_client_from_file_system] - :end-before: [END get_directory_client_from_file_system] - :language: python - :dedent: 8 - :caption: Getting the directory client to interact with a specific directory. - """ - try: - directory_name = directory.get('name') - except AttributeError: - directory_name = str(directory) - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeDirectoryClient(self.url, self.file_system_name, directory_name=directory_name, - credential=self._raw_credential, - api_version=self.api_version, - _configuration=self._config, _pipeline=_pipeline, - _hosts=self._hosts, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function - ) - - def get_file_client(self, file_path # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file_path: - The file with which to interact. This can either be the path of the file(from root directory), - or an instance of FileProperties. eg. directory/subdirectory/file - :type file_path: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake.DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START get_file_client_from_file_system] - :end-before: [END get_file_client_from_file_system] - :language: python - :dedent: 8 - :caption: Getting the file client to interact with a specific file. - """ - try: - file_path = file_path.get('name') - except AttributeError: - file_path = str(file_path) - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeFileClient( - self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, - api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def list_deleted_paths(self, **kwargs): - # type: (Any) -> ItemPaged[DeletedPathProperties] - """Returns a generator to list the deleted (file or directory) paths under the specified file system. - The generator will lazily follow the continuation tokens returned by - the service. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2020-06-12'. - - :keyword str path_prefix: - Filters the results to return only paths under the specified path. - :keyword int results_per_page: - An optional value that specifies the maximum number of items to return per page. - If omitted or greater than 5,000, the response will include up to 5,000 items per page. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of DeletedPathProperties. - :rtype: - ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.DeletedPathProperties] - """ - path_prefix = kwargs.pop('path_prefix', None) - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._datalake_client_for_blob_operation.file_system.list_blob_hierarchy_segment, - showonly=ListBlobsIncludeItem.deleted, - timeout=timeout, - **kwargs) - return ItemPaged( - command, prefix=path_prefix, page_iterator_class=DeletedPathPropertiesPaged, - results_per_page=results_per_page, **kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/__init__.py deleted file mode 100644 index 5cd3ae2..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._azure_data_lake_storage_restapi import AzureDataLakeStorageRESTAPI -__all__ = ['AzureDataLakeStorageRESTAPI'] - -try: - from ._patch import patch_sdk # type: ignore - patch_sdk() -except ImportError: - pass diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/_azure_data_lake_storage_restapi.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/_azure_data_lake_storage_restapi.py deleted file mode 100644 index fbd0a79..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/_azure_data_lake_storage_restapi.py +++ /dev/null @@ -1,91 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import TYPE_CHECKING - -from azure.core import PipelineClient -from msrest import Deserializer, Serializer - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any - - from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from ._configuration import AzureDataLakeStorageRESTAPIConfiguration -from .operations import ServiceOperations -from .operations import FileSystemOperations -from .operations import PathOperations -from . import models - - -class AzureDataLakeStorageRESTAPI(object): - """Azure Data Lake Storage provides storage for Hadoop and other big data workloads. - - :ivar service: ServiceOperations operations - :vartype service: azure.storage.filedatalake.operations.ServiceOperations - :ivar file_system: FileSystemOperations operations - :vartype file_system: azure.storage.filedatalake.operations.FileSystemOperations - :ivar path: PathOperations operations - :vartype path: azure.storage.filedatalake.operations.PathOperations - :param url: The URL of the service account, container, or blob that is the targe of the desired operation. - :type url: str - """ - - def __init__( - self, - url, # type: str - **kwargs # type: Any - ): - # type: (...) -> None - base_url = '{url}' - self._config = AzureDataLakeStorageRESTAPIConfiguration(url, **kwargs) - self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._serialize.client_side_validation = False - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.file_system = FileSystemOperations( - self._client, self._config, self._serialize, self._deserialize) - self.path = PathOperations( - self._client, self._config, self._serialize, self._deserialize) - - def _send_request(self, http_request, **kwargs): - # type: (HttpRequest, Any) -> HttpResponse - """Runs the network request through the client's chained policies. - - :param http_request: The network request you want to make. Required. - :type http_request: ~azure.core.pipeline.transport.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to True. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.pipeline.transport.HttpResponse - """ - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - http_request.url = self._client.format_url(http_request.url, **path_format_arguments) - stream = kwargs.pop("stream", True) - pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs) - return pipeline_response.http_response - - def close(self): - # type: () -> None - self._client.close() - - def __enter__(self): - # type: () -> AzureDataLakeStorageRESTAPI - self._client.__enter__() - return self - - def __exit__(self, *exc_details): - # type: (Any) -> None - self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/_configuration.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/_configuration.py deleted file mode 100644 index 3bfff36..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/_configuration.py +++ /dev/null @@ -1,59 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import TYPE_CHECKING - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any - -VERSION = "unknown" - -class AzureDataLakeStorageRESTAPIConfiguration(Configuration): - """Configuration for AzureDataLakeStorageRESTAPI. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, container, or blob that is the targe of the desired operation. - :type url: str - """ - - def __init__( - self, - url, # type: str - **kwargs # type: Any - ): - # type: (...) -> None - if url is None: - raise ValueError("Parameter 'url' must not be None.") - super(AzureDataLakeStorageRESTAPIConfiguration, self).__init__(**kwargs) - - self.url = url - self.resource = "filesystem" - self.version = "2020-06-12" - kwargs.setdefault('sdk_moniker', 'azuredatalakestoragerestapi/{}'.format(VERSION)) - self._configure(**kwargs) - - def _configure( - self, - **kwargs # type: Any - ): - # type: (...) -> None - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) - self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/__init__.py deleted file mode 100644 index 24daed3..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._azure_data_lake_storage_restapi import AzureDataLakeStorageRESTAPI -__all__ = ['AzureDataLakeStorageRESTAPI'] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/_azure_data_lake_storage_restapi.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/_azure_data_lake_storage_restapi.py deleted file mode 100644 index efeeeb3..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/_azure_data_lake_storage_restapi.py +++ /dev/null @@ -1,81 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any - -from azure.core import AsyncPipelineClient -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest -from msrest import Deserializer, Serializer - -from ._configuration import AzureDataLakeStorageRESTAPIConfiguration -from .operations import ServiceOperations -from .operations import FileSystemOperations -from .operations import PathOperations -from .. import models - - -class AzureDataLakeStorageRESTAPI(object): - """Azure Data Lake Storage provides storage for Hadoop and other big data workloads. - - :ivar service: ServiceOperations operations - :vartype service: azure.storage.filedatalake.aio.operations.ServiceOperations - :ivar file_system: FileSystemOperations operations - :vartype file_system: azure.storage.filedatalake.aio.operations.FileSystemOperations - :ivar path: PathOperations operations - :vartype path: azure.storage.filedatalake.aio.operations.PathOperations - :param url: The URL of the service account, container, or blob that is the targe of the desired operation. - :type url: str - """ - - def __init__( - self, - url: str, - **kwargs: Any - ) -> None: - base_url = '{url}' - self._config = AzureDataLakeStorageRESTAPIConfiguration(url, **kwargs) - self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._serialize.client_side_validation = False - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.file_system = FileSystemOperations( - self._client, self._config, self._serialize, self._deserialize) - self.path = PathOperations( - self._client, self._config, self._serialize, self._deserialize) - - async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse: - """Runs the network request through the client's chained policies. - - :param http_request: The network request you want to make. Required. - :type http_request: ~azure.core.pipeline.transport.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to True. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.pipeline.transport.AsyncHttpResponse - """ - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - http_request.url = self._client.format_url(http_request.url, **path_format_arguments) - stream = kwargs.pop("stream", True) - pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs) - return pipeline_response.http_response - - async def close(self) -> None: - await self._client.close() - - async def __aenter__(self) -> "AzureDataLakeStorageRESTAPI": - await self._client.__aenter__() - return self - - async def __aexit__(self, *exc_details) -> None: - await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/_configuration.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/_configuration.py deleted file mode 100644 index 8223472..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/_configuration.py +++ /dev/null @@ -1,53 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -VERSION = "unknown" - -class AzureDataLakeStorageRESTAPIConfiguration(Configuration): - """Configuration for AzureDataLakeStorageRESTAPI. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, container, or blob that is the targe of the desired operation. - :type url: str - """ - - def __init__( - self, - url: str, - **kwargs: Any - ) -> None: - if url is None: - raise ValueError("Parameter 'url' must not be None.") - super(AzureDataLakeStorageRESTAPIConfiguration, self).__init__(**kwargs) - - self.url = url - self.resource = "filesystem" - self.version = "2020-06-12" - kwargs.setdefault('sdk_moniker', 'azuredatalakestoragerestapi/{}'.format(VERSION)) - self._configure(**kwargs) - - def _configure( - self, - **kwargs: Any - ) -> None: - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) - self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/operations/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/operations/__init__.py deleted file mode 100644 index 0db71e0..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/operations/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._file_system_operations import FileSystemOperations -from ._path_operations import PathOperations - -__all__ = [ - 'ServiceOperations', - 'FileSystemOperations', - 'PathOperations', -] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/operations/_file_system_operations.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/operations/_file_system_operations.py deleted file mode 100644 index d4e206a..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/operations/_file_system_operations.py +++ /dev/null @@ -1,631 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union -import warnings - -from azure.core.async_paging import AsyncItemPaged, AsyncList -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class FileSystemOperations: - """FileSystemOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.filedatalake.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - properties: Optional[str] = None, - **kwargs - ) -> None: - """Create FileSystem. - - Create a FileSystem rooted at the specified location. If the FileSystem already exists, the - operation fails. This operation does not support conditional HTTP requests. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. - :type properties: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - accept = "application/json" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-namespace-enabled']=self._deserialize('str', response.headers.get('x-ms-namespace-enabled')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{filesystem}'} # type: ignore - - async def set_properties( - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - properties: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Set FileSystem Properties. - - Set properties for the FileSystem. This operation supports conditional HTTP requests. For - more information, see `Specifying Conditional Headers for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. - :type properties: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/{filesystem}'} # type: ignore - - async def get_properties( - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - **kwargs - ) -> None: - """Get FileSystem Properties. - - All system and user-defined filesystem properties are specified in the response headers. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - accept = "application/json" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-namespace-enabled']=self._deserialize('str', response.headers.get('x-ms-namespace-enabled')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{filesystem}'} # type: ignore - - async def delete( - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Delete FileSystem. - - Marks the FileSystem for deletion. When a FileSystem is deleted, a FileSystem with the same - identifier cannot be created for at least 30 seconds. While the filesystem is being deleted, - attempts to create a filesystem with the same identifier will fail with status code 409 - (Conflict), with the service returning additional error information indicating that the - filesystem is being deleted. All other operations, including operations on any files or - directories within the filesystem, will fail with status code 404 (Not Found) while the - filesystem is being deleted. This operation supports conditional HTTP requests. For more - information, see `Specifying Conditional Headers for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{filesystem}'} # type: ignore - - def list_paths( - self, - recursive: bool, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - continuation: Optional[str] = None, - path: Optional[str] = None, - max_results: Optional[int] = None, - upn: Optional[bool] = None, - **kwargs - ) -> AsyncIterable["_models.PathList"]: - """List Paths. - - List FileSystem paths and their properties. - - :param recursive: Required. - :type recursive: bool - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param path: Optional. Filters results to paths within the specified directory. An error - occurs if the directory does not exist. - :type path: str - :param max_results: An optional value that specifies the maximum number of items to return. If - omitted or greater than 5,000, the response will include up to 5,000 items. - :type max_results: int - :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If - "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response - headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If - "false", the values will be returned as Azure Active Directory Object IDs. The default value is - false. Note that group and application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: An iterator like instance of either PathList or the result of cls(response) - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.filedatalake.models.PathList] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PathList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - accept = "application/json" - - # TODO: change this once continuation/next_link autorest PR is merged - def prepare_request(next_link=None, cont_token=None): - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", - request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, - 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - if not next_link: - # Construct URL - url = self.list_paths.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, - 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - # TODO: change this once continuation/next_link autorest PR is merged - if cont_token is not None: - query_parameters['continuation'] = self._serialize.query("continuation", cont_token, 'str') - if path is not None: - query_parameters['directory'] = self._serialize.query("path", path, 'str') - query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') - if max_results is not None: - query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - - request = self._client.get(url, query_parameters, header_parameters) - else: - url = next_link - query_parameters = {} # type: Dict[str, Any] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - request = self._client.get(url, query_parameters, header_parameters) - return request - - async def extract_data(pipeline_response): - # TODO: change this once continuation/next_link autorest PR is merged - try: - cont_token = pipeline_response.http_response.headers['x-ms-continuation'] - except KeyError: - cont_token = None - deserialized = self._deserialize('PathList', pipeline_response) - list_of_elem = deserialized.paths - if cls: - list_of_elem = cls(list_of_elem) - return cont_token, AsyncList(list_of_elem) - - # TODO: change this once continuation/next_link autorest PR is merged - async def get_next(cont_token=None): - cont_token = cont_token if not continuation else continuation - request = prepare_request(cont_token=cont_token) - - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, model=error) - - return pipeline_response - - return AsyncItemPaged( - get_next, extract_data - ) - list_paths.metadata = {'url': '/{filesystem}'} # type: ignore - - async def list_blob_hierarchy_segment( - self, - prefix: Optional[str] = None, - delimiter: Optional[str] = None, - marker: Optional[str] = None, - max_results: Optional[int] = None, - include: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] = None, - showonly: Optional[str] = "deleted", - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> "_models.ListBlobsHierarchySegmentResponse": - """The List Blobs operation returns a list of the blobs under the specified container. - - :param prefix: Filters results to filesystems within the specified prefix. - :type prefix: str - :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix - element in the response body that acts as a placeholder for all blobs whose names begin with - the same substring up to the appearance of the delimiter character. The delimiter may be a - single character or a string. - :type delimiter: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param max_results: An optional value that specifies the maximum number of items to return. If - omitted or greater than 5,000, the response will include up to 5,000 items. - :type max_results: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.filedatalake.models.ListBlobsIncludeItem] - :param showonly: Include this parameter to specify one or more datasets to include in the - response. - :type showonly: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListBlobsHierarchySegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.ListBlobsHierarchySegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsHierarchySegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_blob_hierarchy_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if delimiter is not None: - query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if max_results is not None: - query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if showonly is not None: - query_parameters['showonly'] = self._serialize.query("showonly", showonly, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_blob_hierarchy_segment.metadata = {'url': '/{filesystem}'} # type: ignore diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/operations/_path_operations.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/operations/_path_operations.py deleted file mode 100644 index 7cec589..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/operations/_path_operations.py +++ /dev/null @@ -1,1773 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class PathOperations: - """PathOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.filedatalake.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - resource: Optional[Union[str, "_models.PathResourceType"]] = None, - continuation: Optional[str] = None, - mode: Optional[Union[str, "_models.PathRenameMode"]] = None, - rename_source: Optional[str] = None, - source_lease_id: Optional[str] = None, - properties: Optional[str] = None, - permissions: Optional[str] = None, - umask: Optional[str] = None, - path_http_headers: Optional["_models.PathHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Create File | Create Directory | Rename File | Rename Directory. - - Create or rename a file or directory. By default, the destination is overwritten and if the - destination already exists and has a lease the lease is broken. This operation supports - conditional HTTP requests. For more information, see `Specifying Conditional Headers for Blob - Service Operations `_. To fail if the destination already exists, - use a conditional request with If-None-Match: "*". - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param resource: Required only for Create File and Create Directory. The value must be "file" - or "directory". - :type resource: str or ~azure.storage.filedatalake.models.PathResourceType - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param mode: Optional. Valid only when namespace is enabled. This parameter determines the - behavior of the rename operation. The value must be "legacy" or "posix", and the default value - will be "posix". - :type mode: str or ~azure.storage.filedatalake.models.PathRenameMode - :param rename_source: An optional file or directory to be renamed. The value must have the - following format: "/{filesystem}/{path}". If "x-ms-properties" is specified, the properties - will overwrite the existing properties; otherwise, the existing properties will be preserved. - This value must be a URL percent-encoded string. Note that the string may only contain ASCII - characters in the ISO-8859-1 character set. - :type rename_source: str - :param source_lease_id: A lease ID for the source path. If specified, the source path must have - an active lease and the lease ID must match. - :type source_lease_id: str - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. - :type properties: str - :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type permissions: str - :param umask: Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, the umask - restricts the permissions of the file or directory to be created. The resulting permission is - given by p bitwise and not u, where p is the permission and u is the umask. For example, if p - is 0777 and u is 0057, then the resulting permission is 0720. The default permission is 0777 - for a directory and 0666 for a file. The default umask is 0027. The umask must be specified - in 4-digit octal notation (e.g. 0766). - :type umask: str - :param path_http_headers: Parameter group. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.filedatalake.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _cache_control = None - _content_encoding = None - _content_language = None - _content_disposition = None - _content_type = None - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _source_if_modified_since = None - _source_if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - if path_http_headers is not None: - _cache_control = path_http_headers.cache_control - _content_encoding = path_http_headers.content_encoding - _content_language = path_http_headers.content_language - _content_disposition = path_http_headers.content_disposition - _content_type = path_http_headers.content_type - if source_modified_access_conditions is not None: - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if resource is not None: - query_parameters['resource'] = self._serialize.query("resource", resource, 'str') - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - if mode is not None: - query_parameters['mode'] = self._serialize.query("mode", mode, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if rename_source is not None: - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') - if umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("umask", umask, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def update( - self, - action: Union[str, "_models.PathUpdateAction"], - mode: Union[str, "_models.PathSetAccessControlRecursiveMode"], - body: IO, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - max_records: Optional[int] = None, - continuation: Optional[str] = None, - force_flag: Optional[bool] = None, - position: Optional[int] = None, - retain_uncommitted_data: Optional[bool] = None, - close: Optional[bool] = None, - content_length: Optional[int] = None, - properties: Optional[str] = None, - owner: Optional[str] = None, - group: Optional[str] = None, - permissions: Optional[str] = None, - acl: Optional[str] = None, - path_http_headers: Optional["_models.PathHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> Optional["_models.SetAccessControlRecursiveResponse"]: - """Append Data | Flush Data | Set Properties | Set Access Control. - - Uploads data to be appended to a file, flushes (writes) previously uploaded data to a file, - sets properties for a file or directory, or sets access control for a file or directory. Data - can only be appended to a file. Concurrent writes to the same file using multiple clients are - not supported. This operation supports conditional HTTP requests. For more information, see - `Specifying Conditional Headers for Blob Service Operations `_. - - :param action: The action must be "append" to upload data to be appended to a file, "flush" to - flush previously uploaded data to a file, "setProperties" to set the properties of a file or - directory, "setAccessControl" to set the owner, group, permissions, or access control list for - a file or directory, or "setAccessControlRecursive" to set the access control list for a - directory recursively. Note that Hierarchical Namespace must be enabled for the account in - order to use access control. Also note that the Access Control List (ACL) includes permissions - for the owner, owning group, and others, so the x-ms-permissions and x-ms-acl request headers - are mutually exclusive. - :type action: str or ~azure.storage.filedatalake.models.PathUpdateAction - :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify" - modifies one or more POSIX access control rights that pre-exist on files and directories, - "remove" removes one or more POSIX access control rights that were present earlier on files - and directories. - :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode - :param body: Initial data. - :type body: IO - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param max_records: Optional. Valid for "SetAccessControlRecursive" operation. It specifies the - maximum number of files or directories on which the acl change will be applied. If omitted or - greater than 2,000, the request will process up to 2,000 items. - :type max_records: int - :param continuation: Optional. The number of paths processed with each invocation is limited. - If the number of paths to be processed exceeds this limit, a continuation token is returned in - the response header x-ms-continuation. When a continuation token is returned in the response, - it must be percent-encoded and specified in a subsequent invocation of setAcessControlRecursive - operation. - :type continuation: str - :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false, - the operation will terminate quickly on encountering user errors (4XX). If true, the operation - will ignore user errors and proceed with the operation on other sub-entities of the directory. - Continuation token will only be returned when forceFlag is true in case of user errors. If not - set the default value is false for this. - :type force_flag: bool - :param position: This parameter allows the caller to upload data in parallel and control the - order in which it is appended to the file. It is required when uploading data to be appended - to the file and when flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not immediately flushed, or - written, to the file. To flush, the previously uploaded data must be contiguous, the position - parameter must be specified and equal to the length of the file after all data has been - written, and there must not be a request entity body included with the request. - :type position: long - :param retain_uncommitted_data: Valid only for flush operations. If "true", uncommitted data - is retained after the flush operation completes; otherwise, the uncommitted data is deleted - after the flush operation. The default is false. Data at offsets less than the specified - position are written to the file when flush succeeds, but this optional parameter allows data - after the flush position to be retained for a future flush operation. - :type retain_uncommitted_data: bool - :param close: Azure Storage Events allow applications to receive notifications when files - change. When Azure Storage Events are enabled, a file changed event is raised. This event has a - property indicating whether this is the final change to distinguish the difference between an - intermediate flush to a file stream and the final close of a file stream. The close query - parameter is valid only when the action is "flush" and change notifications are enabled. If the - value of close is "true" and the flush operation completes successfully, the service raises a - file change notification with a property indicating that this is the final update (the file - stream has been closed). If "false" a change notification is raised indicating the file has - changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to - indicate that the file stream has been closed.". - :type close: bool - :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush - Data". Must be the length of the request content in bytes for "Append Data". - :type content_length: long - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. - :type properties: str - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type permissions: str - :param acl: Sets POSIX access control rights on files and directories. The value is a comma- - separated list of access control entries. Each access control entry (ACE) consists of a scope, - a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :param path_http_headers: Parameter group. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SetAccessControlRecursiveResponse, or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SetAccessControlRecursiveResponse"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _content_md5 = None - _lease_id = None - _cache_control = None - _content_type = None - _content_disposition = None - _content_encoding = None - _content_language = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - if path_http_headers is not None: - _content_md5 = path_http_headers.content_md5 - _cache_control = path_http_headers.cache_control - _content_type = path_http_headers.content_type - _content_disposition = path_http_headers.content_disposition - _content_encoding = path_http_headers.content_encoding - _content_language = path_http_headers.content_language - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/json" - - # Construct URL - url = self.update.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['action'] = self._serialize.query("action", action, 'str') - if max_records is not None: - query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - query_parameters['mode'] = self._serialize.query("mode", mode, 'str') - if force_flag is not None: - query_parameters['forceFlag'] = self._serialize.query("force_flag", force_flag, 'bool') - if position is not None: - query_parameters['position'] = self._serialize.query("position", position, 'long') - if retain_uncommitted_data is not None: - query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') - if close is not None: - query_parameters['close'] = self._serialize.query("close", close, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if content_length is not None: - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) - if _content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", _content_md5, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') - if acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - deserialized = None - if response.status_code == 200: - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('SetAccessControlRecursiveResponse', pipeline_response) - - if response.status_code == 202: - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - update.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def lease( - self, - x_ms_lease_action: Union[str, "_models.PathLeaseAction"], - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - x_ms_lease_duration: Optional[int] = None, - x_ms_lease_break_period: Optional[int] = None, - proposed_lease_id: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Lease Path. - - Create and manage a lease to restrict write and delete access to the path. This operation - supports conditional HTTP requests. For more information, see `Specifying Conditional Headers - for Blob Service Operations `_. - - :param x_ms_lease_action: There are five lease actions: "acquire", "break", "change", "renew", - and "release". Use "acquire" and specify the "x-ms-proposed-lease-id" and "x-ms-lease-duration" - to acquire a new lease. Use "break" to break an existing lease. When a lease is broken, the - lease break period is allowed to elapse, during which time no lease operation except break and - release can be performed on the file. When a lease is successfully broken, the response - indicates the interval in seconds until a new lease can be acquired. Use "change" and specify - the current lease ID in "x-ms-lease-id" and the new lease ID in "x-ms-proposed-lease-id" to - change the lease ID of an active lease. Use "renew" and specify the "x-ms-lease-id" to renew an - existing lease. Use "release" and specify the "x-ms-lease-id" to release a lease. - :type x_ms_lease_action: str or ~azure.storage.filedatalake.models.PathLeaseAction - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param x_ms_lease_duration: The lease duration is required to acquire a lease, and specifies - the duration of the lease in seconds. The lease duration must be between 15 and 60 seconds or - -1 for infinite lease. - :type x_ms_lease_duration: int - :param x_ms_lease_break_period: The lease break period duration is optional to break a lease, - and specifies the break period of the lease in seconds. The lease break duration must be - between 0 and 60 seconds. - :type x_ms_lease_break_period: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("x_ms_lease_action", x_ms_lease_action, 'str') - if x_ms_lease_duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("x_ms_lease_duration", x_ms_lease_duration, 'int') - if x_ms_lease_break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("x_ms_lease_break_period", x_ms_lease_break_period, 'int') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 201, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - - if response.status_code == 201: - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - - if response.status_code == 202: - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-lease-time']=self._deserialize('str', response.headers.get('x-ms-lease-time')) - - if cls: - return cls(pipeline_response, None, response_headers) - - lease.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def read( - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - range: Optional[str] = None, - x_ms_range_get_content_md5: Optional[bool] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> IO: - """Read File. - - Read the contents of a file. For read operations, range requests are supported. This operation - supports conditional HTTP requests. For more information, see `Specifying Conditional Headers - for Blob Service Operations `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: The HTTP Range request header specifies one or more byte ranges of the resource - to be retrieved. - :type range: str - :param x_ms_range_get_content_md5: Optional. When this header is set to "true" and specified - together with the Range header, the service returns the MD5 hash for the range, as long as the - range is less than or equal to 4MB in size. If this header is specified without the Range - header, the service returns status code 400 (Bad Request). If this header is set to true when - the range exceeds 4 MB in size, the service returns status code 400 (Bad Request). - :type x_ms_range_get_content_md5: bool - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.read.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['Range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if x_ms_range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("x_ms_range_get_content_md5", x_ms_range_get_content_md5, 'bool') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['x-ms-content-md5']=self._deserialize('str', response.headers.get('x-ms-content-md5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - read.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def get_properties( - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - action: Optional[Union[str, "_models.PathGetPropertiesAction"]] = None, - upn: Optional[bool] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Get Properties | Get Status | Get Access Control List. - - Get Properties returns all system and user defined properties for a path. Get Status returns - all system defined properties for a path. Get Access Control List returns the access control - list for a path. This operation supports conditional HTTP requests. For more information, see - `Specifying Conditional Headers for Blob Service Operations `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param action: Optional. If the value is "getStatus" only the system defined properties for the - path are returned. If the value is "getAccessControl" the access control list is returned in - the response headers (Hierarchical Namespace must be enabled for the account), otherwise the - properties are returned. - :type action: str or ~azure.storage.filedatalake.models.PathGetPropertiesAction - :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If - "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response - headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If - "false", the values will be returned as Azure Active Directory Object IDs. The default value is - false. Note that group and application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if action is not None: - query_parameters['action'] = self._serialize.query("action", action, 'str') - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) - response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) - response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) - response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def delete( - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - recursive: Optional[bool] = None, - continuation: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Delete File | Delete Directory. - - Delete the file or directory. This operation supports conditional HTTP requests. For more - information, see `Specifying Conditional Headers for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param recursive: Required. - :type recursive: bool - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if recursive is not None: - query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['x-ms-deletion-id']=self._deserialize('str', response.headers.get('x-ms-deletion-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def set_access_control( - self, - timeout: Optional[int] = None, - owner: Optional[str] = None, - group: Optional[str] = None, - permissions: Optional[str] = None, - acl: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Set the owner, group, permissions, or access control list for a path. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type permissions: str - :param acl: Sets POSIX access control rights on files and directories. The value is a comma- - separated list of access control entries. Each access control entry (ACE) consists of a scope, - a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "setAccessControl" - accept = "application/json" - - # Construct URL - url = self.set_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') - if acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def set_access_control_recursive( - self, - mode: Union[str, "_models.PathSetAccessControlRecursiveMode"], - timeout: Optional[int] = None, - continuation: Optional[str] = None, - force_flag: Optional[bool] = None, - max_records: Optional[int] = None, - acl: Optional[str] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> "_models.SetAccessControlRecursiveResponse": - """Set the access control list for a path and subpaths. - - :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify" - modifies one or more POSIX access control rights that pre-exist on files and directories, - "remove" removes one or more POSIX access control rights that were present earlier on files - and directories. - :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false, - the operation will terminate quickly on encountering user errors (4XX). If true, the operation - will ignore user errors and proceed with the operation on other sub-entities of the directory. - Continuation token will only be returned when forceFlag is true in case of user errors. If not - set the default value is false for this. - :type force_flag: bool - :param max_records: Optional. It specifies the maximum number of files or directories on which - the acl change will be applied. If omitted or greater than 2,000, the request will process up - to 2,000 items. - :type max_records: int - :param acl: Sets POSIX access control rights on files and directories. The value is a comma- - separated list of access control entries. Each access control entry (ACE) consists of a scope, - a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SetAccessControlRecursiveResponse, or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.SetAccessControlRecursiveResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - action = "setAccessControlRecursive" - accept = "application/json" - - # Construct URL - url = self.set_access_control_recursive.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - query_parameters['mode'] = self._serialize.query("mode", mode, 'str') - if force_flag is not None: - query_parameters['forceFlag'] = self._serialize.query("force_flag", force_flag, 'bool') - if max_records is not None: - query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('SetAccessControlRecursiveResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - set_access_control_recursive.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def flush_data( - self, - timeout: Optional[int] = None, - position: Optional[int] = None, - retain_uncommitted_data: Optional[bool] = None, - close: Optional[bool] = None, - content_length: Optional[int] = None, - request_id_parameter: Optional[str] = None, - path_http_headers: Optional["_models.PathHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs - ) -> None: - """Set the owner, group, permissions, or access control list for a path. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param position: This parameter allows the caller to upload data in parallel and control the - order in which it is appended to the file. It is required when uploading data to be appended - to the file and when flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not immediately flushed, or - written, to the file. To flush, the previously uploaded data must be contiguous, the position - parameter must be specified and equal to the length of the file after all data has been - written, and there must not be a request entity body included with the request. - :type position: long - :param retain_uncommitted_data: Valid only for flush operations. If "true", uncommitted data - is retained after the flush operation completes; otherwise, the uncommitted data is deleted - after the flush operation. The default is false. Data at offsets less than the specified - position are written to the file when flush succeeds, but this optional parameter allows data - after the flush position to be retained for a future flush operation. - :type retain_uncommitted_data: bool - :param close: Azure Storage Events allow applications to receive notifications when files - change. When Azure Storage Events are enabled, a file changed event is raised. This event has a - property indicating whether this is the final change to distinguish the difference between an - intermediate flush to a file stream and the final close of a file stream. The close query - parameter is valid only when the action is "flush" and change notifications are enabled. If the - value of close is "true" and the flush operation completes successfully, the service raises a - file change notification with a property indicating that this is the final update (the file - stream has been closed). If "false" a change notification is raised indicating the file has - changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to - indicate that the file stream has been closed.". - :type close: bool - :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush - Data". Must be the length of the request content in bytes for "Append Data". - :type content_length: long - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param path_http_headers: Parameter group. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _content_md5 = None - _lease_id = None - _cache_control = None - _content_type = None - _content_disposition = None - _content_encoding = None - _content_language = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - if path_http_headers is not None: - _content_md5 = path_http_headers.content_md5 - _cache_control = path_http_headers.cache_control - _content_type = path_http_headers.content_type - _content_disposition = path_http_headers.content_disposition - _content_encoding = path_http_headers.content_encoding - _content_language = path_http_headers.content_language - action = "flush" - accept = "application/json" - - # Construct URL - url = self.flush_data.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if position is not None: - query_parameters['position'] = self._serialize.query("position", position, 'long') - if retain_uncommitted_data is not None: - query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') - if close is not None: - query_parameters['close'] = self._serialize.query("close", close, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if content_length is not None: - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) - if _content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", _content_md5, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - flush_data.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def append_data( - self, - body: IO, - position: Optional[int] = None, - timeout: Optional[int] = None, - content_length: Optional[int] = None, - transactional_content_crc64: Optional[bytearray] = None, - request_id_parameter: Optional[str] = None, - path_http_headers: Optional["_models.PathHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """Append data to the file. - - :param body: Initial data. - :type body: IO - :param position: This parameter allows the caller to upload data in parallel and control the - order in which it is appended to the file. It is required when uploading data to be appended - to the file and when flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not immediately flushed, or - written, to the file. To flush, the previously uploaded data must be contiguous, the position - parameter must be specified and equal to the length of the file after all data has been - written, and there must not be a request entity body included with the request. - :type position: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush - Data". Must be the length of the request content in bytes for "Append Data". - :type content_length: long - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param path_http_headers: Parameter group. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _transactional_content_hash = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if path_http_headers is not None: - _transactional_content_hash = path_http_headers.transactional_content_hash - action = "append" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.append_data.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if position is not None: - query_parameters['position'] = self._serialize.query("position", position, 'long') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if content_length is not None: - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) - if _transactional_content_hash is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_hash", _transactional_content_hash, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - append_data.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def set_expiry( - self, - expiry_options: Union[str, "_models.PathExpiryOptions"], - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - expires_on: Optional[str] = None, - **kwargs - ) -> None: - """Sets the time a blob will expire and be deleted. - - :param expiry_options: Required. Indicates mode of the expiry time. - :type expiry_options: str or ~azure.storage.filedatalake.models.PathExpiryOptions - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param expires_on: The time to set the blob to expiry. - :type expires_on: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "expiry" - accept = "application/json" - - # Construct URL - url = self.set_expiry.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') - if expires_on is not None: - header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_expiry.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def undelete( - self, - timeout: Optional[int] = None, - undelete_source: Optional[str] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> None: - """Undelete a path that was previously soft deleted. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param undelete_source: Only for hierarchical namespace enabled accounts. Optional. The path of - the soft deleted blob to undelete. - :type undelete_source: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "undelete" - accept = "application/json" - - # Construct URL - url = self.undelete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if undelete_source is not None: - header_parameters['x-ms-undelete-source'] = self._serialize.header("undelete_source", undelete_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - undelete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/operations/_service_operations.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/operations/_service_operations.py deleted file mode 100644 index f8ae878..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/operations/_service_operations.py +++ /dev/null @@ -1,148 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar -import warnings - -from azure.core.async_paging import AsyncItemPaged, AsyncList -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class ServiceOperations: - """ServiceOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.filedatalake.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def list_file_systems( - self, - prefix: Optional[str] = None, - continuation: Optional[str] = None, - max_results: Optional[int] = None, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - **kwargs - ) -> AsyncIterable["_models.FileSystemList"]: - """List FileSystems. - - List filesystems and their properties in given account. - - :param prefix: Filters results to filesystems within the specified prefix. - :type prefix: str - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param max_results: An optional value that specifies the maximum number of items to return. If - omitted or greater than 5,000, the response will include up to 5,000 items. - :type max_results: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: An iterator like instance of either FileSystemList or the result of cls(response) - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.filedatalake.models.FileSystemList] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.FileSystemList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - resource = "account" - accept = "application/json" - - def prepare_request(next_link=None): - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - if not next_link: - # Construct URL - url = self.list_file_systems.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("resource", resource, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - if max_results is not None: - query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - request = self._client.get(url, query_parameters, header_parameters) - else: - url = next_link - query_parameters = {} # type: Dict[str, Any] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - request = self._client.get(url, query_parameters, header_parameters) - return request - - async def extract_data(pipeline_response): - deserialized = self._deserialize('FileSystemList', pipeline_response) - list_of_elem = deserialized.filesystems - if cls: - list_of_elem = cls(list_of_elem) - return None, AsyncList(list_of_elem) - - async def get_next(next_link=None): - request = prepare_request(next_link) - - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, model=error) - - return pipeline_response - - return AsyncItemPaged( - get_next, extract_data - ) - list_file_systems.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/models/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/models/__init__.py deleted file mode 100644 index fc4548f..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/models/__init__.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -try: - from ._models_py3 import AclFailedEntry - from ._models_py3 import BlobHierarchyListSegment - from ._models_py3 import BlobItemInternal - from ._models_py3 import BlobPrefix - from ._models_py3 import BlobPropertiesInternal - from ._models_py3 import FileSystem - from ._models_py3 import FileSystemList - from ._models_py3 import LeaseAccessConditions - from ._models_py3 import ListBlobsHierarchySegmentResponse - from ._models_py3 import ModifiedAccessConditions - from ._models_py3 import Path - from ._models_py3 import PathHTTPHeaders - from ._models_py3 import PathList - from ._models_py3 import SetAccessControlRecursiveResponse - from ._models_py3 import SourceModifiedAccessConditions - from ._models_py3 import StorageError - from ._models_py3 import StorageErrorError -except (SyntaxError, ImportError): - from ._models import AclFailedEntry # type: ignore - from ._models import BlobHierarchyListSegment # type: ignore - from ._models import BlobItemInternal # type: ignore - from ._models import BlobPrefix # type: ignore - from ._models import BlobPropertiesInternal # type: ignore - from ._models import FileSystem # type: ignore - from ._models import FileSystemList # type: ignore - from ._models import LeaseAccessConditions # type: ignore - from ._models import ListBlobsHierarchySegmentResponse # type: ignore - from ._models import ModifiedAccessConditions # type: ignore - from ._models import Path # type: ignore - from ._models import PathHTTPHeaders # type: ignore - from ._models import PathList # type: ignore - from ._models import SetAccessControlRecursiveResponse # type: ignore - from ._models import SourceModifiedAccessConditions # type: ignore - from ._models import StorageError # type: ignore - from ._models import StorageErrorError # type: ignore - -from ._azure_data_lake_storage_restapi_enums import ( - ListBlobsIncludeItem, - PathExpiryOptions, - PathGetPropertiesAction, - PathLeaseAction, - PathRenameMode, - PathResourceType, - PathSetAccessControlRecursiveMode, - PathUpdateAction, -) - -__all__ = [ - 'AclFailedEntry', - 'BlobHierarchyListSegment', - 'BlobItemInternal', - 'BlobPrefix', - 'BlobPropertiesInternal', - 'FileSystem', - 'FileSystemList', - 'LeaseAccessConditions', - 'ListBlobsHierarchySegmentResponse', - 'ModifiedAccessConditions', - 'Path', - 'PathHTTPHeaders', - 'PathList', - 'SetAccessControlRecursiveResponse', - 'SourceModifiedAccessConditions', - 'StorageError', - 'StorageErrorError', - 'ListBlobsIncludeItem', - 'PathExpiryOptions', - 'PathGetPropertiesAction', - 'PathLeaseAction', - 'PathRenameMode', - 'PathResourceType', - 'PathSetAccessControlRecursiveMode', - 'PathUpdateAction', -] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/models/_azure_data_lake_storage_restapi_enums.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/models/_azure_data_lake_storage_restapi_enums.py deleted file mode 100644 index 804050e..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/models/_azure_data_lake_storage_restapi_enums.py +++ /dev/null @@ -1,81 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from enum import Enum, EnumMeta -from six import with_metaclass - -class _CaseInsensitiveEnumMeta(EnumMeta): - def __getitem__(self, name): - return super().__getitem__(name.upper()) - - def __getattr__(cls, name): - """Return the enum member matching `name` - We use __getattr__ instead of descriptors or inserting into the enum - class' __dict__ in order to support `name` and `value` being both - properties for enum members (which live in the class' __dict__) and - enum members themselves. - """ - try: - return cls._member_map_[name.upper()] - except KeyError: - raise AttributeError(name) - - -class ListBlobsIncludeItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - COPY = "copy" - DELETED = "deleted" - METADATA = "metadata" - SNAPSHOTS = "snapshots" - UNCOMMITTEDBLOBS = "uncommittedblobs" - VERSIONS = "versions" - TAGS = "tags" - -class PathExpiryOptions(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - NEVER_EXPIRE = "NeverExpire" - RELATIVE_TO_CREATION = "RelativeToCreation" - RELATIVE_TO_NOW = "RelativeToNow" - ABSOLUTE = "Absolute" - -class PathGetPropertiesAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - GET_ACCESS_CONTROL = "getAccessControl" - GET_STATUS = "getStatus" - -class PathLeaseAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - ACQUIRE = "acquire" - BREAK_ENUM = "break" - CHANGE = "change" - RENEW = "renew" - RELEASE = "release" - -class PathRenameMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - LEGACY = "legacy" - POSIX = "posix" - -class PathResourceType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - DIRECTORY = "directory" - FILE = "file" - -class PathSetAccessControlRecursiveMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - SET = "set" - MODIFY = "modify" - REMOVE = "remove" - -class PathUpdateAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - APPEND = "append" - FLUSH = "flush" - SET_PROPERTIES = "setProperties" - SET_ACCESS_CONTROL = "setAccessControl" - SET_ACCESS_CONTROL_RECURSIVE = "setAccessControlRecursive" diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/models/_models.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/models/_models.py deleted file mode 100644 index 237617a..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/models/_models.py +++ /dev/null @@ -1,672 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import HttpResponseError -import msrest.serialization - - -class AclFailedEntry(msrest.serialization.Model): - """AclFailedEntry. - - :param name: - :type name: str - :param type: - :type type: str - :param error_message: - :type error_message: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'error_message': {'key': 'errorMessage', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(AclFailedEntry, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.type = kwargs.get('type', None) - self.error_message = kwargs.get('error_message', None) - - -class BlobHierarchyListSegment(msrest.serialization.Model): - """BlobHierarchyListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_prefixes: - :type blob_prefixes: list[~azure.storage.filedatalake.models.BlobPrefix] - :param blob_items: Required. - :type blob_items: list[~azure.storage.filedatalake.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]'}, - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]'}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__( - self, - **kwargs - ): - super(BlobHierarchyListSegment, self).__init__(**kwargs) - self.blob_prefixes = kwargs.get('blob_prefixes', None) - self.blob_items = kwargs['blob_items'] - - -class BlobItemInternal(msrest.serialization.Model): - """An Azure Storage blob. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param deleted: Required. - :type deleted: bool - :param snapshot: Required. - :type snapshot: str - :param version_id: - :type version_id: str - :param is_current_version: - :type is_current_version: bool - :param properties: Required. Properties of a blob. - :type properties: ~azure.storage.filedatalake.models.BlobPropertiesInternal - :param deletion_id: - :type deletion_id: str - """ - - _validation = { - 'name': {'required': True}, - 'deleted': {'required': True}, - 'snapshot': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'deleted': {'key': 'Deleted', 'type': 'bool'}, - 'snapshot': {'key': 'Snapshot', 'type': 'str'}, - 'version_id': {'key': 'VersionId', 'type': 'str'}, - 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool'}, - 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal'}, - 'deletion_id': {'key': 'DeletionId', 'type': 'str'}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__( - self, - **kwargs - ): - super(BlobItemInternal, self).__init__(**kwargs) - self.name = kwargs['name'] - self.deleted = kwargs['deleted'] - self.snapshot = kwargs['snapshot'] - self.version_id = kwargs.get('version_id', None) - self.is_current_version = kwargs.get('is_current_version', None) - self.properties = kwargs['properties'] - self.deletion_id = kwargs.get('deletion_id', None) - - -class BlobPrefix(msrest.serialization.Model): - """BlobPrefix. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(BlobPrefix, self).__init__(**kwargs) - self.name = kwargs['name'] - - -class BlobPropertiesInternal(msrest.serialization.Model): - """Properties of a blob. - - All required parameters must be populated in order to send to Azure. - - :param creation_time: - :type creation_time: ~datetime.datetime - :param last_modified: Required. - :type last_modified: ~datetime.datetime - :param etag: Required. - :type etag: str - :param content_length: Size in bytes. - :type content_length: long - :param content_type: - :type content_type: str - :param content_encoding: - :type content_encoding: str - :param content_language: - :type content_language: str - :param content_md5: - :type content_md5: bytearray - :param content_disposition: - :type content_disposition: str - :param cache_control: - :type cache_control: str - :param blob_sequence_number: - :type blob_sequence_number: long - :param copy_id: - :type copy_id: str - :param copy_source: - :type copy_source: str - :param copy_progress: - :type copy_progress: str - :param copy_completion_time: - :type copy_completion_time: ~datetime.datetime - :param copy_status_description: - :type copy_status_description: str - :param server_encrypted: - :type server_encrypted: bool - :param incremental_copy: - :type incremental_copy: bool - :param destination_snapshot: - :type destination_snapshot: str - :param deleted_time: - :type deleted_time: ~datetime.datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param access_tier_inferred: - :type access_tier_inferred: bool - :param customer_provided_key_sha256: - :type customer_provided_key_sha256: str - :param encryption_scope: The name of the encryption scope under which the blob is encrypted. - :type encryption_scope: str - :param access_tier_change_time: - :type access_tier_change_time: ~datetime.datetime - :param tag_count: - :type tag_count: int - :param expires_on: - :type expires_on: ~datetime.datetime - :param is_sealed: - :type is_sealed: bool - :param last_accessed_on: - :type last_accessed_on: ~datetime.datetime - :param delete_time: - :type delete_time: ~datetime.datetime - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123'}, - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - 'content_length': {'key': 'Content-Length', 'type': 'long'}, - 'content_type': {'key': 'Content-Type', 'type': 'str'}, - 'content_encoding': {'key': 'Content-Encoding', 'type': 'str'}, - 'content_language': {'key': 'Content-Language', 'type': 'str'}, - 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray'}, - 'content_disposition': {'key': 'Content-Disposition', 'type': 'str'}, - 'cache_control': {'key': 'Cache-Control', 'type': 'str'}, - 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long'}, - 'copy_id': {'key': 'CopyId', 'type': 'str'}, - 'copy_source': {'key': 'CopySource', 'type': 'str'}, - 'copy_progress': {'key': 'CopyProgress', 'type': 'str'}, - 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123'}, - 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str'}, - 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool'}, - 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool'}, - 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str'}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, - 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool'}, - 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str'}, - 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str'}, - 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'}, - 'tag_count': {'key': 'TagCount', 'type': 'int'}, - 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123'}, - 'is_sealed': {'key': 'Sealed', 'type': 'bool'}, - 'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123'}, - 'delete_time': {'key': 'DeleteTime', 'type': 'rfc-1123'}, - } - _xml_map = { - 'name': 'Properties' - } - - def __init__( - self, - **kwargs - ): - super(BlobPropertiesInternal, self).__init__(**kwargs) - self.creation_time = kwargs.get('creation_time', None) - self.last_modified = kwargs['last_modified'] - self.etag = kwargs['etag'] - self.content_length = kwargs.get('content_length', None) - self.content_type = kwargs.get('content_type', None) - self.content_encoding = kwargs.get('content_encoding', None) - self.content_language = kwargs.get('content_language', None) - self.content_md5 = kwargs.get('content_md5', None) - self.content_disposition = kwargs.get('content_disposition', None) - self.cache_control = kwargs.get('cache_control', None) - self.blob_sequence_number = kwargs.get('blob_sequence_number', None) - self.copy_id = kwargs.get('copy_id', None) - self.copy_source = kwargs.get('copy_source', None) - self.copy_progress = kwargs.get('copy_progress', None) - self.copy_completion_time = kwargs.get('copy_completion_time', None) - self.copy_status_description = kwargs.get('copy_status_description', None) - self.server_encrypted = kwargs.get('server_encrypted', None) - self.incremental_copy = kwargs.get('incremental_copy', None) - self.destination_snapshot = kwargs.get('destination_snapshot', None) - self.deleted_time = kwargs.get('deleted_time', None) - self.remaining_retention_days = kwargs.get('remaining_retention_days', None) - self.access_tier_inferred = kwargs.get('access_tier_inferred', None) - self.customer_provided_key_sha256 = kwargs.get('customer_provided_key_sha256', None) - self.encryption_scope = kwargs.get('encryption_scope', None) - self.access_tier_change_time = kwargs.get('access_tier_change_time', None) - self.tag_count = kwargs.get('tag_count', None) - self.expires_on = kwargs.get('expires_on', None) - self.is_sealed = kwargs.get('is_sealed', None) - self.last_accessed_on = kwargs.get('last_accessed_on', None) - self.delete_time = kwargs.get('delete_time', None) - - -class FileSystem(msrest.serialization.Model): - """FileSystem. - - :param name: - :type name: str - :param last_modified: - :type last_modified: str - :param e_tag: - :type e_tag: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'last_modified': {'key': 'lastModified', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(FileSystem, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.last_modified = kwargs.get('last_modified', None) - self.e_tag = kwargs.get('e_tag', None) - - -class FileSystemList(msrest.serialization.Model): - """FileSystemList. - - :param filesystems: - :type filesystems: list[~azure.storage.filedatalake.models.FileSystem] - """ - - _attribute_map = { - 'filesystems': {'key': 'filesystems', 'type': '[FileSystem]'}, - } - - def __init__( - self, - **kwargs - ): - super(FileSystemList, self).__init__(**kwargs) - self.filesystems = kwargs.get('filesystems', None) - - -class LeaseAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param lease_id: If specified, the operation only succeeds if the resource's lease is active - and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': 'leaseId', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = kwargs.get('lease_id', None) - - -class ListBlobsHierarchySegmentResponse(msrest.serialization.Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param delimiter: - :type delimiter: str - :param segment: Required. - :type segment: ~azure.storage.filedatalake.models.BlobHierarchyListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'delimiter': {'key': 'Delimiter', 'type': 'str'}, - 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment'}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - **kwargs - ): - super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs['service_endpoint'] - self.container_name = kwargs['container_name'] - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.delimiter = kwargs.get('delimiter', None) - self.segment = kwargs['segment'] - self.next_marker = kwargs.get('next_marker', None) - - -class ModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param if_modified_since: Specify this header value to operate only on a blob if it has been - modified since the specified date/time. - :type if_modified_since: ~datetime.datetime - :param if_unmodified_since: Specify this header value to operate only on a blob if it has not - been modified since the specified date/time. - :type if_unmodified_since: ~datetime.datetime - :param if_match: Specify an ETag value to operate only on blobs with a matching value. - :type if_match: str - :param if_none_match: Specify an ETag value to operate only on blobs without a matching value. - :type if_none_match: str - """ - - _attribute_map = { - 'if_modified_since': {'key': 'ifModifiedSince', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': 'ifUnmodifiedSince', 'type': 'rfc-1123'}, - 'if_match': {'key': 'ifMatch', 'type': 'str'}, - 'if_none_match': {'key': 'ifNoneMatch', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(ModifiedAccessConditions, self).__init__(**kwargs) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - - -class Path(msrest.serialization.Model): - """Path. - - :param name: - :type name: str - :param is_directory: - :type is_directory: bool - :param last_modified: - :type last_modified: str - :param e_tag: - :type e_tag: str - :param content_length: - :type content_length: long - :param owner: - :type owner: str - :param group: - :type group: str - :param permissions: - :type permissions: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'is_directory': {'key': 'isDirectory', 'type': 'bool'}, - 'last_modified': {'key': 'lastModified', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - 'content_length': {'key': 'contentLength', 'type': 'long'}, - 'owner': {'key': 'owner', 'type': 'str'}, - 'group': {'key': 'group', 'type': 'str'}, - 'permissions': {'key': 'permissions', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(Path, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.is_directory = kwargs.get('is_directory', False) - self.last_modified = kwargs.get('last_modified', None) - self.e_tag = kwargs.get('e_tag', None) - self.content_length = kwargs.get('content_length', None) - self.owner = kwargs.get('owner', None) - self.group = kwargs.get('group', None) - self.permissions = kwargs.get('permissions', None) - - -class PathHTTPHeaders(msrest.serialization.Model): - """Parameter group. - - :param cache_control: Optional. Sets the blob's cache control. If specified, this property is - stored with the blob and returned with a read request. - :type cache_control: str - :param content_encoding: Optional. Sets the blob's content encoding. If specified, this - property is stored with the blob and returned with a read request. - :type content_encoding: str - :param content_language: Optional. Set the blob's content language. If specified, this property - is stored with the blob and returned with a read request. - :type content_language: str - :param content_disposition: Optional. Sets the blob's Content-Disposition header. - :type content_disposition: str - :param content_type: Optional. Sets the blob's content type. If specified, this property is - stored with the blob and returned with a read request. - :type content_type: str - :param content_md5: Specify the transactional md5 for the body, to be validated by the service. - :type content_md5: bytearray - :param transactional_content_hash: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_hash: bytearray - """ - - _attribute_map = { - 'cache_control': {'key': 'cacheControl', 'type': 'str'}, - 'content_encoding': {'key': 'contentEncoding', 'type': 'str'}, - 'content_language': {'key': 'contentLanguage', 'type': 'str'}, - 'content_disposition': {'key': 'contentDisposition', 'type': 'str'}, - 'content_type': {'key': 'contentType', 'type': 'str'}, - 'content_md5': {'key': 'contentMD5', 'type': 'bytearray'}, - 'transactional_content_hash': {'key': 'transactionalContentHash', 'type': 'bytearray'}, - } - - def __init__( - self, - **kwargs - ): - super(PathHTTPHeaders, self).__init__(**kwargs) - self.cache_control = kwargs.get('cache_control', None) - self.content_encoding = kwargs.get('content_encoding', None) - self.content_language = kwargs.get('content_language', None) - self.content_disposition = kwargs.get('content_disposition', None) - self.content_type = kwargs.get('content_type', None) - self.content_md5 = kwargs.get('content_md5', None) - self.transactional_content_hash = kwargs.get('transactional_content_hash', None) - - -class PathList(msrest.serialization.Model): - """PathList. - - :param paths: - :type paths: list[~azure.storage.filedatalake.models.Path] - """ - - _attribute_map = { - 'paths': {'key': 'paths', 'type': '[Path]'}, - } - - def __init__( - self, - **kwargs - ): - super(PathList, self).__init__(**kwargs) - self.paths = kwargs.get('paths', None) - - -class SetAccessControlRecursiveResponse(msrest.serialization.Model): - """SetAccessControlRecursiveResponse. - - :param directories_successful: - :type directories_successful: int - :param files_successful: - :type files_successful: int - :param failure_count: - :type failure_count: int - :param failed_entries: - :type failed_entries: list[~azure.storage.filedatalake.models.AclFailedEntry] - """ - - _attribute_map = { - 'directories_successful': {'key': 'directoriesSuccessful', 'type': 'int'}, - 'files_successful': {'key': 'filesSuccessful', 'type': 'int'}, - 'failure_count': {'key': 'failureCount', 'type': 'int'}, - 'failed_entries': {'key': 'failedEntries', 'type': '[AclFailedEntry]'}, - } - - def __init__( - self, - **kwargs - ): - super(SetAccessControlRecursiveResponse, self).__init__(**kwargs) - self.directories_successful = kwargs.get('directories_successful', None) - self.files_successful = kwargs.get('files_successful', None) - self.failure_count = kwargs.get('failure_count', None) - self.failed_entries = kwargs.get('failed_entries', None) - - -class SourceModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param source_if_match: Specify an ETag value to operate only on blobs with a matching value. - :type source_if_match: str - :param source_if_none_match: Specify an ETag value to operate only on blobs without a matching - value. - :type source_if_none_match: str - :param source_if_modified_since: Specify this header value to operate only on a blob if it has - been modified since the specified date/time. - :type source_if_modified_since: ~datetime.datetime - :param source_if_unmodified_since: Specify this header value to operate only on a blob if it - has not been modified since the specified date/time. - :type source_if_unmodified_since: ~datetime.datetime - """ - - _attribute_map = { - 'source_if_match': {'key': 'sourceIfMatch', 'type': 'str'}, - 'source_if_none_match': {'key': 'sourceIfNoneMatch', 'type': 'str'}, - 'source_if_modified_since': {'key': 'sourceIfModifiedSince', 'type': 'rfc-1123'}, - 'source_if_unmodified_since': {'key': 'sourceIfUnmodifiedSince', 'type': 'rfc-1123'}, - } - - def __init__( - self, - **kwargs - ): - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_match = kwargs.get('source_if_match', None) - self.source_if_none_match = kwargs.get('source_if_none_match', None) - self.source_if_modified_since = kwargs.get('source_if_modified_since', None) - self.source_if_unmodified_since = kwargs.get('source_if_unmodified_since', None) - - -class StorageError(msrest.serialization.Model): - """StorageError. - - :param error: The service error response object. - :type error: ~azure.storage.filedatalake.models.StorageErrorError - """ - - _attribute_map = { - 'error': {'key': 'error', 'type': 'StorageErrorError'}, - } - - def __init__( - self, - **kwargs - ): - super(StorageError, self).__init__(**kwargs) - self.error = kwargs.get('error', None) - - -class StorageErrorError(msrest.serialization.Model): - """The service error response object. - - :param code: The service error code. - :type code: str - :param message: The service error message. - :type message: str - """ - - _attribute_map = { - 'code': {'key': 'Code', 'type': 'str'}, - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(StorageErrorError, self).__init__(**kwargs) - self.code = kwargs.get('code', None) - self.message = kwargs.get('message', None) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/models/_models_py3.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/models/_models_py3.py deleted file mode 100644 index bbe361c..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/models/_models_py3.py +++ /dev/null @@ -1,779 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -import datetime -from typing import List, Optional - -from azure.core.exceptions import HttpResponseError -import msrest.serialization - - -class AclFailedEntry(msrest.serialization.Model): - """AclFailedEntry. - - :param name: - :type name: str - :param type: - :type type: str - :param error_message: - :type error_message: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'error_message': {'key': 'errorMessage', 'type': 'str'}, - } - - def __init__( - self, - *, - name: Optional[str] = None, - type: Optional[str] = None, - error_message: Optional[str] = None, - **kwargs - ): - super(AclFailedEntry, self).__init__(**kwargs) - self.name = name - self.type = type - self.error_message = error_message - - -class BlobHierarchyListSegment(msrest.serialization.Model): - """BlobHierarchyListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_prefixes: - :type blob_prefixes: list[~azure.storage.filedatalake.models.BlobPrefix] - :param blob_items: Required. - :type blob_items: list[~azure.storage.filedatalake.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]'}, - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]'}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__( - self, - *, - blob_items: List["BlobItemInternal"], - blob_prefixes: Optional[List["BlobPrefix"]] = None, - **kwargs - ): - super(BlobHierarchyListSegment, self).__init__(**kwargs) - self.blob_prefixes = blob_prefixes - self.blob_items = blob_items - - -class BlobItemInternal(msrest.serialization.Model): - """An Azure Storage blob. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param deleted: Required. - :type deleted: bool - :param snapshot: Required. - :type snapshot: str - :param version_id: - :type version_id: str - :param is_current_version: - :type is_current_version: bool - :param properties: Required. Properties of a blob. - :type properties: ~azure.storage.filedatalake.models.BlobPropertiesInternal - :param deletion_id: - :type deletion_id: str - """ - - _validation = { - 'name': {'required': True}, - 'deleted': {'required': True}, - 'snapshot': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'deleted': {'key': 'Deleted', 'type': 'bool'}, - 'snapshot': {'key': 'Snapshot', 'type': 'str'}, - 'version_id': {'key': 'VersionId', 'type': 'str'}, - 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool'}, - 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal'}, - 'deletion_id': {'key': 'DeletionId', 'type': 'str'}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__( - self, - *, - name: str, - deleted: bool, - snapshot: str, - properties: "BlobPropertiesInternal", - version_id: Optional[str] = None, - is_current_version: Optional[bool] = None, - deletion_id: Optional[str] = None, - **kwargs - ): - super(BlobItemInternal, self).__init__(**kwargs) - self.name = name - self.deleted = deleted - self.snapshot = snapshot - self.version_id = version_id - self.is_current_version = is_current_version - self.properties = properties - self.deletion_id = deletion_id - - -class BlobPrefix(msrest.serialization.Model): - """BlobPrefix. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(BlobPrefix, self).__init__(**kwargs) - self.name = name - - -class BlobPropertiesInternal(msrest.serialization.Model): - """Properties of a blob. - - All required parameters must be populated in order to send to Azure. - - :param creation_time: - :type creation_time: ~datetime.datetime - :param last_modified: Required. - :type last_modified: ~datetime.datetime - :param etag: Required. - :type etag: str - :param content_length: Size in bytes. - :type content_length: long - :param content_type: - :type content_type: str - :param content_encoding: - :type content_encoding: str - :param content_language: - :type content_language: str - :param content_md5: - :type content_md5: bytearray - :param content_disposition: - :type content_disposition: str - :param cache_control: - :type cache_control: str - :param blob_sequence_number: - :type blob_sequence_number: long - :param copy_id: - :type copy_id: str - :param copy_source: - :type copy_source: str - :param copy_progress: - :type copy_progress: str - :param copy_completion_time: - :type copy_completion_time: ~datetime.datetime - :param copy_status_description: - :type copy_status_description: str - :param server_encrypted: - :type server_encrypted: bool - :param incremental_copy: - :type incremental_copy: bool - :param destination_snapshot: - :type destination_snapshot: str - :param deleted_time: - :type deleted_time: ~datetime.datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param access_tier_inferred: - :type access_tier_inferred: bool - :param customer_provided_key_sha256: - :type customer_provided_key_sha256: str - :param encryption_scope: The name of the encryption scope under which the blob is encrypted. - :type encryption_scope: str - :param access_tier_change_time: - :type access_tier_change_time: ~datetime.datetime - :param tag_count: - :type tag_count: int - :param expires_on: - :type expires_on: ~datetime.datetime - :param is_sealed: - :type is_sealed: bool - :param last_accessed_on: - :type last_accessed_on: ~datetime.datetime - :param delete_time: - :type delete_time: ~datetime.datetime - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123'}, - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - 'content_length': {'key': 'Content-Length', 'type': 'long'}, - 'content_type': {'key': 'Content-Type', 'type': 'str'}, - 'content_encoding': {'key': 'Content-Encoding', 'type': 'str'}, - 'content_language': {'key': 'Content-Language', 'type': 'str'}, - 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray'}, - 'content_disposition': {'key': 'Content-Disposition', 'type': 'str'}, - 'cache_control': {'key': 'Cache-Control', 'type': 'str'}, - 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long'}, - 'copy_id': {'key': 'CopyId', 'type': 'str'}, - 'copy_source': {'key': 'CopySource', 'type': 'str'}, - 'copy_progress': {'key': 'CopyProgress', 'type': 'str'}, - 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123'}, - 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str'}, - 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool'}, - 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool'}, - 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str'}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, - 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool'}, - 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str'}, - 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str'}, - 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'}, - 'tag_count': {'key': 'TagCount', 'type': 'int'}, - 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123'}, - 'is_sealed': {'key': 'Sealed', 'type': 'bool'}, - 'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123'}, - 'delete_time': {'key': 'DeleteTime', 'type': 'rfc-1123'}, - } - _xml_map = { - 'name': 'Properties' - } - - def __init__( - self, - *, - last_modified: datetime.datetime, - etag: str, - creation_time: Optional[datetime.datetime] = None, - content_length: Optional[int] = None, - content_type: Optional[str] = None, - content_encoding: Optional[str] = None, - content_language: Optional[str] = None, - content_md5: Optional[bytearray] = None, - content_disposition: Optional[str] = None, - cache_control: Optional[str] = None, - blob_sequence_number: Optional[int] = None, - copy_id: Optional[str] = None, - copy_source: Optional[str] = None, - copy_progress: Optional[str] = None, - copy_completion_time: Optional[datetime.datetime] = None, - copy_status_description: Optional[str] = None, - server_encrypted: Optional[bool] = None, - incremental_copy: Optional[bool] = None, - destination_snapshot: Optional[str] = None, - deleted_time: Optional[datetime.datetime] = None, - remaining_retention_days: Optional[int] = None, - access_tier_inferred: Optional[bool] = None, - customer_provided_key_sha256: Optional[str] = None, - encryption_scope: Optional[str] = None, - access_tier_change_time: Optional[datetime.datetime] = None, - tag_count: Optional[int] = None, - expires_on: Optional[datetime.datetime] = None, - is_sealed: Optional[bool] = None, - last_accessed_on: Optional[datetime.datetime] = None, - delete_time: Optional[datetime.datetime] = None, - **kwargs - ): - super(BlobPropertiesInternal, self).__init__(**kwargs) - self.creation_time = creation_time - self.last_modified = last_modified - self.etag = etag - self.content_length = content_length - self.content_type = content_type - self.content_encoding = content_encoding - self.content_language = content_language - self.content_md5 = content_md5 - self.content_disposition = content_disposition - self.cache_control = cache_control - self.blob_sequence_number = blob_sequence_number - self.copy_id = copy_id - self.copy_source = copy_source - self.copy_progress = copy_progress - self.copy_completion_time = copy_completion_time - self.copy_status_description = copy_status_description - self.server_encrypted = server_encrypted - self.incremental_copy = incremental_copy - self.destination_snapshot = destination_snapshot - self.deleted_time = deleted_time - self.remaining_retention_days = remaining_retention_days - self.access_tier_inferred = access_tier_inferred - self.customer_provided_key_sha256 = customer_provided_key_sha256 - self.encryption_scope = encryption_scope - self.access_tier_change_time = access_tier_change_time - self.tag_count = tag_count - self.expires_on = expires_on - self.is_sealed = is_sealed - self.last_accessed_on = last_accessed_on - self.delete_time = delete_time - - -class FileSystem(msrest.serialization.Model): - """FileSystem. - - :param name: - :type name: str - :param last_modified: - :type last_modified: str - :param e_tag: - :type e_tag: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'last_modified': {'key': 'lastModified', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - } - - def __init__( - self, - *, - name: Optional[str] = None, - last_modified: Optional[str] = None, - e_tag: Optional[str] = None, - **kwargs - ): - super(FileSystem, self).__init__(**kwargs) - self.name = name - self.last_modified = last_modified - self.e_tag = e_tag - - -class FileSystemList(msrest.serialization.Model): - """FileSystemList. - - :param filesystems: - :type filesystems: list[~azure.storage.filedatalake.models.FileSystem] - """ - - _attribute_map = { - 'filesystems': {'key': 'filesystems', 'type': '[FileSystem]'}, - } - - def __init__( - self, - *, - filesystems: Optional[List["FileSystem"]] = None, - **kwargs - ): - super(FileSystemList, self).__init__(**kwargs) - self.filesystems = filesystems - - -class LeaseAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param lease_id: If specified, the operation only succeeds if the resource's lease is active - and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': 'leaseId', 'type': 'str'}, - } - - def __init__( - self, - *, - lease_id: Optional[str] = None, - **kwargs - ): - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = lease_id - - -class ListBlobsHierarchySegmentResponse(msrest.serialization.Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param delimiter: - :type delimiter: str - :param segment: Required. - :type segment: ~azure.storage.filedatalake.models.BlobHierarchyListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'delimiter': {'key': 'Delimiter', 'type': 'str'}, - 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment'}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - *, - service_endpoint: str, - container_name: str, - segment: "BlobHierarchyListSegment", - prefix: Optional[str] = None, - marker: Optional[str] = None, - max_results: Optional[int] = None, - delimiter: Optional[str] = None, - next_marker: Optional[str] = None, - **kwargs - ): - super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.container_name = container_name - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.delimiter = delimiter - self.segment = segment - self.next_marker = next_marker - - -class ModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param if_modified_since: Specify this header value to operate only on a blob if it has been - modified since the specified date/time. - :type if_modified_since: ~datetime.datetime - :param if_unmodified_since: Specify this header value to operate only on a blob if it has not - been modified since the specified date/time. - :type if_unmodified_since: ~datetime.datetime - :param if_match: Specify an ETag value to operate only on blobs with a matching value. - :type if_match: str - :param if_none_match: Specify an ETag value to operate only on blobs without a matching value. - :type if_none_match: str - """ - - _attribute_map = { - 'if_modified_since': {'key': 'ifModifiedSince', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': 'ifUnmodifiedSince', 'type': 'rfc-1123'}, - 'if_match': {'key': 'ifMatch', 'type': 'str'}, - 'if_none_match': {'key': 'ifNoneMatch', 'type': 'str'}, - } - - def __init__( - self, - *, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - **kwargs - ): - super(ModifiedAccessConditions, self).__init__(**kwargs) - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - self.if_match = if_match - self.if_none_match = if_none_match - - -class Path(msrest.serialization.Model): - """Path. - - :param name: - :type name: str - :param is_directory: - :type is_directory: bool - :param last_modified: - :type last_modified: str - :param e_tag: - :type e_tag: str - :param content_length: - :type content_length: long - :param owner: - :type owner: str - :param group: - :type group: str - :param permissions: - :type permissions: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'is_directory': {'key': 'isDirectory', 'type': 'bool'}, - 'last_modified': {'key': 'lastModified', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - 'content_length': {'key': 'contentLength', 'type': 'long'}, - 'owner': {'key': 'owner', 'type': 'str'}, - 'group': {'key': 'group', 'type': 'str'}, - 'permissions': {'key': 'permissions', 'type': 'str'}, - } - - def __init__( - self, - *, - name: Optional[str] = None, - is_directory: Optional[bool] = False, - last_modified: Optional[str] = None, - e_tag: Optional[str] = None, - content_length: Optional[int] = None, - owner: Optional[str] = None, - group: Optional[str] = None, - permissions: Optional[str] = None, - **kwargs - ): - super(Path, self).__init__(**kwargs) - self.name = name - self.is_directory = is_directory - self.last_modified = last_modified - self.e_tag = e_tag - self.content_length = content_length - self.owner = owner - self.group = group - self.permissions = permissions - - -class PathHTTPHeaders(msrest.serialization.Model): - """Parameter group. - - :param cache_control: Optional. Sets the blob's cache control. If specified, this property is - stored with the blob and returned with a read request. - :type cache_control: str - :param content_encoding: Optional. Sets the blob's content encoding. If specified, this - property is stored with the blob and returned with a read request. - :type content_encoding: str - :param content_language: Optional. Set the blob's content language. If specified, this property - is stored with the blob and returned with a read request. - :type content_language: str - :param content_disposition: Optional. Sets the blob's Content-Disposition header. - :type content_disposition: str - :param content_type: Optional. Sets the blob's content type. If specified, this property is - stored with the blob and returned with a read request. - :type content_type: str - :param content_md5: Specify the transactional md5 for the body, to be validated by the service. - :type content_md5: bytearray - :param transactional_content_hash: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_hash: bytearray - """ - - _attribute_map = { - 'cache_control': {'key': 'cacheControl', 'type': 'str'}, - 'content_encoding': {'key': 'contentEncoding', 'type': 'str'}, - 'content_language': {'key': 'contentLanguage', 'type': 'str'}, - 'content_disposition': {'key': 'contentDisposition', 'type': 'str'}, - 'content_type': {'key': 'contentType', 'type': 'str'}, - 'content_md5': {'key': 'contentMD5', 'type': 'bytearray'}, - 'transactional_content_hash': {'key': 'transactionalContentHash', 'type': 'bytearray'}, - } - - def __init__( - self, - *, - cache_control: Optional[str] = None, - content_encoding: Optional[str] = None, - content_language: Optional[str] = None, - content_disposition: Optional[str] = None, - content_type: Optional[str] = None, - content_md5: Optional[bytearray] = None, - transactional_content_hash: Optional[bytearray] = None, - **kwargs - ): - super(PathHTTPHeaders, self).__init__(**kwargs) - self.cache_control = cache_control - self.content_encoding = content_encoding - self.content_language = content_language - self.content_disposition = content_disposition - self.content_type = content_type - self.content_md5 = content_md5 - self.transactional_content_hash = transactional_content_hash - - -class PathList(msrest.serialization.Model): - """PathList. - - :param paths: - :type paths: list[~azure.storage.filedatalake.models.Path] - """ - - _attribute_map = { - 'paths': {'key': 'paths', 'type': '[Path]'}, - } - - def __init__( - self, - *, - paths: Optional[List["Path"]] = None, - **kwargs - ): - super(PathList, self).__init__(**kwargs) - self.paths = paths - - -class SetAccessControlRecursiveResponse(msrest.serialization.Model): - """SetAccessControlRecursiveResponse. - - :param directories_successful: - :type directories_successful: int - :param files_successful: - :type files_successful: int - :param failure_count: - :type failure_count: int - :param failed_entries: - :type failed_entries: list[~azure.storage.filedatalake.models.AclFailedEntry] - """ - - _attribute_map = { - 'directories_successful': {'key': 'directoriesSuccessful', 'type': 'int'}, - 'files_successful': {'key': 'filesSuccessful', 'type': 'int'}, - 'failure_count': {'key': 'failureCount', 'type': 'int'}, - 'failed_entries': {'key': 'failedEntries', 'type': '[AclFailedEntry]'}, - } - - def __init__( - self, - *, - directories_successful: Optional[int] = None, - files_successful: Optional[int] = None, - failure_count: Optional[int] = None, - failed_entries: Optional[List["AclFailedEntry"]] = None, - **kwargs - ): - super(SetAccessControlRecursiveResponse, self).__init__(**kwargs) - self.directories_successful = directories_successful - self.files_successful = files_successful - self.failure_count = failure_count - self.failed_entries = failed_entries - - -class SourceModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param source_if_match: Specify an ETag value to operate only on blobs with a matching value. - :type source_if_match: str - :param source_if_none_match: Specify an ETag value to operate only on blobs without a matching - value. - :type source_if_none_match: str - :param source_if_modified_since: Specify this header value to operate only on a blob if it has - been modified since the specified date/time. - :type source_if_modified_since: ~datetime.datetime - :param source_if_unmodified_since: Specify this header value to operate only on a blob if it - has not been modified since the specified date/time. - :type source_if_unmodified_since: ~datetime.datetime - """ - - _attribute_map = { - 'source_if_match': {'key': 'sourceIfMatch', 'type': 'str'}, - 'source_if_none_match': {'key': 'sourceIfNoneMatch', 'type': 'str'}, - 'source_if_modified_since': {'key': 'sourceIfModifiedSince', 'type': 'rfc-1123'}, - 'source_if_unmodified_since': {'key': 'sourceIfUnmodifiedSince', 'type': 'rfc-1123'}, - } - - def __init__( - self, - *, - source_if_match: Optional[str] = None, - source_if_none_match: Optional[str] = None, - source_if_modified_since: Optional[datetime.datetime] = None, - source_if_unmodified_since: Optional[datetime.datetime] = None, - **kwargs - ): - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_match = source_if_match - self.source_if_none_match = source_if_none_match - self.source_if_modified_since = source_if_modified_since - self.source_if_unmodified_since = source_if_unmodified_since - - -class StorageError(msrest.serialization.Model): - """StorageError. - - :param error: The service error response object. - :type error: ~azure.storage.filedatalake.models.StorageErrorError - """ - - _attribute_map = { - 'error': {'key': 'error', 'type': 'StorageErrorError'}, - } - - def __init__( - self, - *, - error: Optional["StorageErrorError"] = None, - **kwargs - ): - super(StorageError, self).__init__(**kwargs) - self.error = error - - -class StorageErrorError(msrest.serialization.Model): - """The service error response object. - - :param code: The service error code. - :type code: str - :param message: The service error message. - :type message: str - """ - - _attribute_map = { - 'code': {'key': 'Code', 'type': 'str'}, - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__( - self, - *, - code: Optional[str] = None, - message: Optional[str] = None, - **kwargs - ): - super(StorageErrorError, self).__init__(**kwargs) - self.code = code - self.message = message diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/operations/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/operations/__init__.py deleted file mode 100644 index 0db71e0..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/operations/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._file_system_operations import FileSystemOperations -from ._path_operations import PathOperations - -__all__ = [ - 'ServiceOperations', - 'FileSystemOperations', - 'PathOperations', -] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/operations/_file_system_operations.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/operations/_file_system_operations.py deleted file mode 100644 index 991890a..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/operations/_file_system_operations.py +++ /dev/null @@ -1,643 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.paging import ItemPaged -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class FileSystemOperations(object): - """FileSystemOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.filedatalake.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - properties=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Create FileSystem. - - Create a FileSystem rooted at the specified location. If the FileSystem already exists, the - operation fails. This operation does not support conditional HTTP requests. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. - :type properties: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - accept = "application/json" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-namespace-enabled']=self._deserialize('str', response.headers.get('x-ms-namespace-enabled')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{filesystem}'} # type: ignore - - def set_properties( - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - properties=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Set FileSystem Properties. - - Set properties for the FileSystem. This operation supports conditional HTTP requests. For - more information, see `Specifying Conditional Headers for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. - :type properties: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/{filesystem}'} # type: ignore - - def get_properties( - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Get FileSystem Properties. - - All system and user-defined filesystem properties are specified in the response headers. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - accept = "application/json" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-namespace-enabled']=self._deserialize('str', response.headers.get('x-ms-namespace-enabled')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{filesystem}'} # type: ignore - - def delete( - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Delete FileSystem. - - Marks the FileSystem for deletion. When a FileSystem is deleted, a FileSystem with the same - identifier cannot be created for at least 30 seconds. While the filesystem is being deleted, - attempts to create a filesystem with the same identifier will fail with status code 409 - (Conflict), with the service returning additional error information indicating that the - filesystem is being deleted. All other operations, including operations on any files or - directories within the filesystem, will fail with status code 404 (Not Found) while the - filesystem is being deleted. This operation supports conditional HTTP requests. For more - information, see `Specifying Conditional Headers for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{filesystem}'} # type: ignore - - def list_paths( - self, - recursive, # type: bool - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - continuation=None, # type: Optional[str] - path=None, # type: Optional[str] - max_results=None, # type: Optional[int] - upn=None, # type: Optional[bool] - **kwargs # type: Any - ): - # type: (...) -> Iterable["_models.PathList"] - """List Paths. - - List FileSystem paths and their properties. - - :param recursive: Required. - :type recursive: bool - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param path: Optional. Filters results to paths within the specified directory. An error - occurs if the directory does not exist. - :type path: str - :param max_results: An optional value that specifies the maximum number of items to return. If - omitted or greater than 5,000, the response will include up to 5,000 items. - :type max_results: int - :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If - "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response - headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If - "false", the values will be returned as Azure Active Directory Object IDs. The default value is - false. Note that group and application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: An iterator like instance of either PathList or the result of cls(response) - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.models.PathList] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PathList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - accept = "application/json" - - # TODO: change this once continuation/next_link autorest PR is merged - def prepare_request(next_link=None, cont_token=None): - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", - request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, - 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - if not next_link: - # Construct URL - url = self.list_paths.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, - 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - # TODO: change this once continuation/next_link autorest PR is merged - if cont_token is not None: - query_parameters['continuation'] = self._serialize.query("continuation", cont_token, 'str') - if path is not None: - query_parameters['directory'] = self._serialize.query("path", path, 'str') - query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') - if max_results is not None: - query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - - request = self._client.get(url, query_parameters, header_parameters) - else: - url = next_link - query_parameters = {} # type: Dict[str, Any] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - request = self._client.get(url, query_parameters, header_parameters) - return request - - def extract_data(pipeline_response): - # TODO: change this once continuation/next_link autorest PR is merged - try: - cont_token = pipeline_response.http_response.headers['x-ms-continuation'] - except KeyError: - cont_token = None - deserialized = self._deserialize('PathList', pipeline_response) - list_of_elem = deserialized.paths - if cls: - list_of_elem = cls(list_of_elem) - # TODO: change this once continuation/next_link autorest PR is merged - return cont_token, iter(list_of_elem) - - # TODO: change this once continuation/next_link autorest PR is merged - def get_next(cont_token=None): - cont_token = cont_token if not continuation else continuation - request = prepare_request(cont_token=cont_token) - - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, model=error) - - return pipeline_response - - return ItemPaged( - get_next, extract_data - ) - - list_paths.metadata = {'url': '/{filesystem}'} # type: ignore - - def list_blob_hierarchy_segment( - self, - prefix=None, # type: Optional[str] - delimiter=None, # type: Optional[str] - marker=None, # type: Optional[str] - max_results=None, # type: Optional[int] - include=None, # type: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] - showonly="deleted", # type: Optional[str] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.ListBlobsHierarchySegmentResponse" - """The List Blobs operation returns a list of the blobs under the specified container. - - :param prefix: Filters results to filesystems within the specified prefix. - :type prefix: str - :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix - element in the response body that acts as a placeholder for all blobs whose names begin with - the same substring up to the appearance of the delimiter character. The delimiter may be a - single character or a string. - :type delimiter: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param max_results: An optional value that specifies the maximum number of items to return. If - omitted or greater than 5,000, the response will include up to 5,000 items. - :type max_results: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.filedatalake.models.ListBlobsIncludeItem] - :param showonly: Include this parameter to specify one or more datasets to include in the - response. - :type showonly: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListBlobsHierarchySegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.ListBlobsHierarchySegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsHierarchySegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_blob_hierarchy_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if delimiter is not None: - query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if max_results is not None: - query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if showonly is not None: - query_parameters['showonly'] = self._serialize.query("showonly", showonly, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_blob_hierarchy_segment.metadata = {'url': '/{filesystem}'} # type: ignore diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/operations/_path_operations.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/operations/_path_operations.py deleted file mode 100644 index 5517c96..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/operations/_path_operations.py +++ /dev/null @@ -1,1789 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class PathOperations(object): - """PathOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.filedatalake.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - resource=None, # type: Optional[Union[str, "_models.PathResourceType"]] - continuation=None, # type: Optional[str] - mode=None, # type: Optional[Union[str, "_models.PathRenameMode"]] - rename_source=None, # type: Optional[str] - source_lease_id=None, # type: Optional[str] - properties=None, # type: Optional[str] - permissions=None, # type: Optional[str] - umask=None, # type: Optional[str] - path_http_headers=None, # type: Optional["_models.PathHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Create File | Create Directory | Rename File | Rename Directory. - - Create or rename a file or directory. By default, the destination is overwritten and if the - destination already exists and has a lease the lease is broken. This operation supports - conditional HTTP requests. For more information, see `Specifying Conditional Headers for Blob - Service Operations `_. To fail if the destination already exists, - use a conditional request with If-None-Match: "*". - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param resource: Required only for Create File and Create Directory. The value must be "file" - or "directory". - :type resource: str or ~azure.storage.filedatalake.models.PathResourceType - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param mode: Optional. Valid only when namespace is enabled. This parameter determines the - behavior of the rename operation. The value must be "legacy" or "posix", and the default value - will be "posix". - :type mode: str or ~azure.storage.filedatalake.models.PathRenameMode - :param rename_source: An optional file or directory to be renamed. The value must have the - following format: "/{filesystem}/{path}". If "x-ms-properties" is specified, the properties - will overwrite the existing properties; otherwise, the existing properties will be preserved. - This value must be a URL percent-encoded string. Note that the string may only contain ASCII - characters in the ISO-8859-1 character set. - :type rename_source: str - :param source_lease_id: A lease ID for the source path. If specified, the source path must have - an active lease and the lease ID must match. - :type source_lease_id: str - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. - :type properties: str - :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type permissions: str - :param umask: Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, the umask - restricts the permissions of the file or directory to be created. The resulting permission is - given by p bitwise and not u, where p is the permission and u is the umask. For example, if p - is 0777 and u is 0057, then the resulting permission is 0720. The default permission is 0777 - for a directory and 0666 for a file. The default umask is 0027. The umask must be specified - in 4-digit octal notation (e.g. 0766). - :type umask: str - :param path_http_headers: Parameter group. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.filedatalake.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _cache_control = None - _content_encoding = None - _content_language = None - _content_disposition = None - _content_type = None - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _source_if_modified_since = None - _source_if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - if path_http_headers is not None: - _cache_control = path_http_headers.cache_control - _content_encoding = path_http_headers.content_encoding - _content_language = path_http_headers.content_language - _content_disposition = path_http_headers.content_disposition - _content_type = path_http_headers.content_type - if source_modified_access_conditions is not None: - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if resource is not None: - query_parameters['resource'] = self._serialize.query("resource", resource, 'str') - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - if mode is not None: - query_parameters['mode'] = self._serialize.query("mode", mode, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if rename_source is not None: - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') - if umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("umask", umask, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def update( - self, - action, # type: Union[str, "_models.PathUpdateAction"] - mode, # type: Union[str, "_models.PathSetAccessControlRecursiveMode"] - body, # type: IO - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - max_records=None, # type: Optional[int] - continuation=None, # type: Optional[str] - force_flag=None, # type: Optional[bool] - position=None, # type: Optional[int] - retain_uncommitted_data=None, # type: Optional[bool] - close=None, # type: Optional[bool] - content_length=None, # type: Optional[int] - properties=None, # type: Optional[str] - owner=None, # type: Optional[str] - group=None, # type: Optional[str] - permissions=None, # type: Optional[str] - acl=None, # type: Optional[str] - path_http_headers=None, # type: Optional["_models.PathHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> Optional["_models.SetAccessControlRecursiveResponse"] - """Append Data | Flush Data | Set Properties | Set Access Control. - - Uploads data to be appended to a file, flushes (writes) previously uploaded data to a file, - sets properties for a file or directory, or sets access control for a file or directory. Data - can only be appended to a file. Concurrent writes to the same file using multiple clients are - not supported. This operation supports conditional HTTP requests. For more information, see - `Specifying Conditional Headers for Blob Service Operations `_. - - :param action: The action must be "append" to upload data to be appended to a file, "flush" to - flush previously uploaded data to a file, "setProperties" to set the properties of a file or - directory, "setAccessControl" to set the owner, group, permissions, or access control list for - a file or directory, or "setAccessControlRecursive" to set the access control list for a - directory recursively. Note that Hierarchical Namespace must be enabled for the account in - order to use access control. Also note that the Access Control List (ACL) includes permissions - for the owner, owning group, and others, so the x-ms-permissions and x-ms-acl request headers - are mutually exclusive. - :type action: str or ~azure.storage.filedatalake.models.PathUpdateAction - :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify" - modifies one or more POSIX access control rights that pre-exist on files and directories, - "remove" removes one or more POSIX access control rights that were present earlier on files - and directories. - :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode - :param body: Initial data. - :type body: IO - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param max_records: Optional. Valid for "SetAccessControlRecursive" operation. It specifies the - maximum number of files or directories on which the acl change will be applied. If omitted or - greater than 2,000, the request will process up to 2,000 items. - :type max_records: int - :param continuation: Optional. The number of paths processed with each invocation is limited. - If the number of paths to be processed exceeds this limit, a continuation token is returned in - the response header x-ms-continuation. When a continuation token is returned in the response, - it must be percent-encoded and specified in a subsequent invocation of setAcessControlRecursive - operation. - :type continuation: str - :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false, - the operation will terminate quickly on encountering user errors (4XX). If true, the operation - will ignore user errors and proceed with the operation on other sub-entities of the directory. - Continuation token will only be returned when forceFlag is true in case of user errors. If not - set the default value is false for this. - :type force_flag: bool - :param position: This parameter allows the caller to upload data in parallel and control the - order in which it is appended to the file. It is required when uploading data to be appended - to the file and when flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not immediately flushed, or - written, to the file. To flush, the previously uploaded data must be contiguous, the position - parameter must be specified and equal to the length of the file after all data has been - written, and there must not be a request entity body included with the request. - :type position: long - :param retain_uncommitted_data: Valid only for flush operations. If "true", uncommitted data - is retained after the flush operation completes; otherwise, the uncommitted data is deleted - after the flush operation. The default is false. Data at offsets less than the specified - position are written to the file when flush succeeds, but this optional parameter allows data - after the flush position to be retained for a future flush operation. - :type retain_uncommitted_data: bool - :param close: Azure Storage Events allow applications to receive notifications when files - change. When Azure Storage Events are enabled, a file changed event is raised. This event has a - property indicating whether this is the final change to distinguish the difference between an - intermediate flush to a file stream and the final close of a file stream. The close query - parameter is valid only when the action is "flush" and change notifications are enabled. If the - value of close is "true" and the flush operation completes successfully, the service raises a - file change notification with a property indicating that this is the final update (the file - stream has been closed). If "false" a change notification is raised indicating the file has - changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to - indicate that the file stream has been closed.". - :type close: bool - :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush - Data". Must be the length of the request content in bytes for "Append Data". - :type content_length: long - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. - :type properties: str - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type permissions: str - :param acl: Sets POSIX access control rights on files and directories. The value is a comma- - separated list of access control entries. Each access control entry (ACE) consists of a scope, - a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :param path_http_headers: Parameter group. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SetAccessControlRecursiveResponse, or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SetAccessControlRecursiveResponse"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _content_md5 = None - _lease_id = None - _cache_control = None - _content_type = None - _content_disposition = None - _content_encoding = None - _content_language = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - if path_http_headers is not None: - _content_md5 = path_http_headers.content_md5 - _cache_control = path_http_headers.cache_control - _content_type = path_http_headers.content_type - _content_disposition = path_http_headers.content_disposition - _content_encoding = path_http_headers.content_encoding - _content_language = path_http_headers.content_language - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/json" - - # Construct URL - url = self.update.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['action'] = self._serialize.query("action", action, 'str') - if max_records is not None: - query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - query_parameters['mode'] = self._serialize.query("mode", mode, 'str') - if force_flag is not None: - query_parameters['forceFlag'] = self._serialize.query("force_flag", force_flag, 'bool') - if position is not None: - query_parameters['position'] = self._serialize.query("position", position, 'long') - if retain_uncommitted_data is not None: - query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') - if close is not None: - query_parameters['close'] = self._serialize.query("close", close, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if content_length is not None: - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) - if _content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", _content_md5, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') - if acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - deserialized = None - if response.status_code == 200: - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('SetAccessControlRecursiveResponse', pipeline_response) - - if response.status_code == 202: - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - update.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def lease( - self, - x_ms_lease_action, # type: Union[str, "_models.PathLeaseAction"] - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - x_ms_lease_duration=None, # type: Optional[int] - x_ms_lease_break_period=None, # type: Optional[int] - proposed_lease_id=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Lease Path. - - Create and manage a lease to restrict write and delete access to the path. This operation - supports conditional HTTP requests. For more information, see `Specifying Conditional Headers - for Blob Service Operations `_. - - :param x_ms_lease_action: There are five lease actions: "acquire", "break", "change", "renew", - and "release". Use "acquire" and specify the "x-ms-proposed-lease-id" and "x-ms-lease-duration" - to acquire a new lease. Use "break" to break an existing lease. When a lease is broken, the - lease break period is allowed to elapse, during which time no lease operation except break and - release can be performed on the file. When a lease is successfully broken, the response - indicates the interval in seconds until a new lease can be acquired. Use "change" and specify - the current lease ID in "x-ms-lease-id" and the new lease ID in "x-ms-proposed-lease-id" to - change the lease ID of an active lease. Use "renew" and specify the "x-ms-lease-id" to renew an - existing lease. Use "release" and specify the "x-ms-lease-id" to release a lease. - :type x_ms_lease_action: str or ~azure.storage.filedatalake.models.PathLeaseAction - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param x_ms_lease_duration: The lease duration is required to acquire a lease, and specifies - the duration of the lease in seconds. The lease duration must be between 15 and 60 seconds or - -1 for infinite lease. - :type x_ms_lease_duration: int - :param x_ms_lease_break_period: The lease break period duration is optional to break a lease, - and specifies the break period of the lease in seconds. The lease break duration must be - between 0 and 60 seconds. - :type x_ms_lease_break_period: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("x_ms_lease_action", x_ms_lease_action, 'str') - if x_ms_lease_duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("x_ms_lease_duration", x_ms_lease_duration, 'int') - if x_ms_lease_break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("x_ms_lease_break_period", x_ms_lease_break_period, 'int') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 201, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - - if response.status_code == 201: - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - - if response.status_code == 202: - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-lease-time']=self._deserialize('str', response.headers.get('x-ms-lease-time')) - - if cls: - return cls(pipeline_response, None, response_headers) - - lease.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def read( - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - range=None, # type: Optional[str] - x_ms_range_get_content_md5=None, # type: Optional[bool] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> IO - """Read File. - - Read the contents of a file. For read operations, range requests are supported. This operation - supports conditional HTTP requests. For more information, see `Specifying Conditional Headers - for Blob Service Operations `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: The HTTP Range request header specifies one or more byte ranges of the resource - to be retrieved. - :type range: str - :param x_ms_range_get_content_md5: Optional. When this header is set to "true" and specified - together with the Range header, the service returns the MD5 hash for the range, as long as the - range is less than or equal to 4MB in size. If this header is specified without the Range - header, the service returns status code 400 (Bad Request). If this header is set to true when - the range exceeds 4 MB in size, the service returns status code 400 (Bad Request). - :type x_ms_range_get_content_md5: bool - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.read.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['Range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if x_ms_range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("x_ms_range_get_content_md5", x_ms_range_get_content_md5, 'bool') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['x-ms-content-md5']=self._deserialize('str', response.headers.get('x-ms-content-md5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - read.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def get_properties( - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - action=None, # type: Optional[Union[str, "_models.PathGetPropertiesAction"]] - upn=None, # type: Optional[bool] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Get Properties | Get Status | Get Access Control List. - - Get Properties returns all system and user defined properties for a path. Get Status returns - all system defined properties for a path. Get Access Control List returns the access control - list for a path. This operation supports conditional HTTP requests. For more information, see - `Specifying Conditional Headers for Blob Service Operations `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param action: Optional. If the value is "getStatus" only the system defined properties for the - path are returned. If the value is "getAccessControl" the access control list is returned in - the response headers (Hierarchical Namespace must be enabled for the account), otherwise the - properties are returned. - :type action: str or ~azure.storage.filedatalake.models.PathGetPropertiesAction - :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If - "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response - headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If - "false", the values will be returned as Azure Active Directory Object IDs. The default value is - false. Note that group and application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if action is not None: - query_parameters['action'] = self._serialize.query("action", action, 'str') - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) - response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) - response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) - response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def delete( - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - recursive=None, # type: Optional[bool] - continuation=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Delete File | Delete Directory. - - Delete the file or directory. This operation supports conditional HTTP requests. For more - information, see `Specifying Conditional Headers for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param recursive: Required. - :type recursive: bool - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if recursive is not None: - query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['x-ms-deletion-id']=self._deserialize('str', response.headers.get('x-ms-deletion-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def set_access_control( - self, - timeout=None, # type: Optional[int] - owner=None, # type: Optional[str] - group=None, # type: Optional[str] - permissions=None, # type: Optional[str] - acl=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Set the owner, group, permissions, or access control list for a path. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type permissions: str - :param acl: Sets POSIX access control rights on files and directories. The value is a comma- - separated list of access control entries. Each access control entry (ACE) consists of a scope, - a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "setAccessControl" - accept = "application/json" - - # Construct URL - url = self.set_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') - if acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def set_access_control_recursive( - self, - mode, # type: Union[str, "_models.PathSetAccessControlRecursiveMode"] - timeout=None, # type: Optional[int] - continuation=None, # type: Optional[str] - force_flag=None, # type: Optional[bool] - max_records=None, # type: Optional[int] - acl=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.SetAccessControlRecursiveResponse" - """Set the access control list for a path and subpaths. - - :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify" - modifies one or more POSIX access control rights that pre-exist on files and directories, - "remove" removes one or more POSIX access control rights that were present earlier on files - and directories. - :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false, - the operation will terminate quickly on encountering user errors (4XX). If true, the operation - will ignore user errors and proceed with the operation on other sub-entities of the directory. - Continuation token will only be returned when forceFlag is true in case of user errors. If not - set the default value is false for this. - :type force_flag: bool - :param max_records: Optional. It specifies the maximum number of files or directories on which - the acl change will be applied. If omitted or greater than 2,000, the request will process up - to 2,000 items. - :type max_records: int - :param acl: Sets POSIX access control rights on files and directories. The value is a comma- - separated list of access control entries. Each access control entry (ACE) consists of a scope, - a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SetAccessControlRecursiveResponse, or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.SetAccessControlRecursiveResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - action = "setAccessControlRecursive" - accept = "application/json" - - # Construct URL - url = self.set_access_control_recursive.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - query_parameters['mode'] = self._serialize.query("mode", mode, 'str') - if force_flag is not None: - query_parameters['forceFlag'] = self._serialize.query("force_flag", force_flag, 'bool') - if max_records is not None: - query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('SetAccessControlRecursiveResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - set_access_control_recursive.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def flush_data( - self, - timeout=None, # type: Optional[int] - position=None, # type: Optional[int] - retain_uncommitted_data=None, # type: Optional[bool] - close=None, # type: Optional[bool] - content_length=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - path_http_headers=None, # type: Optional["_models.PathHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Set the owner, group, permissions, or access control list for a path. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param position: This parameter allows the caller to upload data in parallel and control the - order in which it is appended to the file. It is required when uploading data to be appended - to the file and when flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not immediately flushed, or - written, to the file. To flush, the previously uploaded data must be contiguous, the position - parameter must be specified and equal to the length of the file after all data has been - written, and there must not be a request entity body included with the request. - :type position: long - :param retain_uncommitted_data: Valid only for flush operations. If "true", uncommitted data - is retained after the flush operation completes; otherwise, the uncommitted data is deleted - after the flush operation. The default is false. Data at offsets less than the specified - position are written to the file when flush succeeds, but this optional parameter allows data - after the flush position to be retained for a future flush operation. - :type retain_uncommitted_data: bool - :param close: Azure Storage Events allow applications to receive notifications when files - change. When Azure Storage Events are enabled, a file changed event is raised. This event has a - property indicating whether this is the final change to distinguish the difference between an - intermediate flush to a file stream and the final close of a file stream. The close query - parameter is valid only when the action is "flush" and change notifications are enabled. If the - value of close is "true" and the flush operation completes successfully, the service raises a - file change notification with a property indicating that this is the final update (the file - stream has been closed). If "false" a change notification is raised indicating the file has - changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to - indicate that the file stream has been closed.". - :type close: bool - :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush - Data". Must be the length of the request content in bytes for "Append Data". - :type content_length: long - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param path_http_headers: Parameter group. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _content_md5 = None - _lease_id = None - _cache_control = None - _content_type = None - _content_disposition = None - _content_encoding = None - _content_language = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - if path_http_headers is not None: - _content_md5 = path_http_headers.content_md5 - _cache_control = path_http_headers.cache_control - _content_type = path_http_headers.content_type - _content_disposition = path_http_headers.content_disposition - _content_encoding = path_http_headers.content_encoding - _content_language = path_http_headers.content_language - action = "flush" - accept = "application/json" - - # Construct URL - url = self.flush_data.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if position is not None: - query_parameters['position'] = self._serialize.query("position", position, 'long') - if retain_uncommitted_data is not None: - query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') - if close is not None: - query_parameters['close'] = self._serialize.query("close", close, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if content_length is not None: - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) - if _content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", _content_md5, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - flush_data.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def append_data( - self, - body, # type: IO - position=None, # type: Optional[int] - timeout=None, # type: Optional[int] - content_length=None, # type: Optional[int] - transactional_content_crc64=None, # type: Optional[bytearray] - request_id_parameter=None, # type: Optional[str] - path_http_headers=None, # type: Optional["_models.PathHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Append data to the file. - - :param body: Initial data. - :type body: IO - :param position: This parameter allows the caller to upload data in parallel and control the - order in which it is appended to the file. It is required when uploading data to be appended - to the file and when flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not immediately flushed, or - written, to the file. To flush, the previously uploaded data must be contiguous, the position - parameter must be specified and equal to the length of the file after all data has been - written, and there must not be a request entity body included with the request. - :type position: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush - Data". Must be the length of the request content in bytes for "Append Data". - :type content_length: long - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param path_http_headers: Parameter group. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _transactional_content_hash = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if path_http_headers is not None: - _transactional_content_hash = path_http_headers.transactional_content_hash - action = "append" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.append_data.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if position is not None: - query_parameters['position'] = self._serialize.query("position", position, 'long') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if content_length is not None: - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) - if _transactional_content_hash is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_hash", _transactional_content_hash, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - append_data.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def set_expiry( - self, - expiry_options, # type: Union[str, "_models.PathExpiryOptions"] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - expires_on=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Sets the time a blob will expire and be deleted. - - :param expiry_options: Required. Indicates mode of the expiry time. - :type expiry_options: str or ~azure.storage.filedatalake.models.PathExpiryOptions - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param expires_on: The time to set the blob to expiry. - :type expires_on: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "expiry" - accept = "application/json" - - # Construct URL - url = self.set_expiry.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') - if expires_on is not None: - header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_expiry.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def undelete( - self, - timeout=None, # type: Optional[int] - undelete_source=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Undelete a path that was previously soft deleted. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :param undelete_source: Only for hierarchical namespace enabled accounts. Optional. The path of - the soft deleted blob to undelete. - :type undelete_source: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "undelete" - accept = "application/json" - - # Construct URL - url = self.undelete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if undelete_source is not None: - header_parameters['x-ms-undelete-source'] = self._serialize.header("undelete_source", undelete_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - undelete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/operations/_service_operations.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/operations/_service_operations.py deleted file mode 100644 index 2db3801..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/operations/_service_operations.py +++ /dev/null @@ -1,153 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.paging import ItemPaged -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class ServiceOperations(object): - """ServiceOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.filedatalake.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def list_file_systems( - self, - prefix=None, # type: Optional[str] - continuation=None, # type: Optional[str] - max_results=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> Iterable["_models.FileSystemList"] - """List FileSystems. - - List filesystems and their properties in given account. - - :param prefix: Filters results to filesystems within the specified prefix. - :type prefix: str - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param max_results: An optional value that specifies the maximum number of items to return. If - omitted or greater than 5,000, the response will include up to 5,000 items. - :type max_results: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for Blob Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: An iterator like instance of either FileSystemList or the result of cls(response) - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.models.FileSystemList] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.FileSystemList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - resource = "account" - accept = "application/json" - - def prepare_request(next_link=None): - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - if not next_link: - # Construct URL - url = self.list_file_systems.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("resource", resource, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - if max_results is not None: - query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - request = self._client.get(url, query_parameters, header_parameters) - else: - url = next_link - query_parameters = {} # type: Dict[str, Any] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - request = self._client.get(url, query_parameters, header_parameters) - return request - - def extract_data(pipeline_response): - deserialized = self._deserialize('FileSystemList', pipeline_response) - list_of_elem = deserialized.filesystems - if cls: - list_of_elem = cls(list_of_elem) - return None, iter(list_of_elem) - - def get_next(next_link=None): - request = prepare_request(next_link) - - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, model=error) - - return pipeline_response - - return ItemPaged( - get_next, extract_data - ) - list_file_systems.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_list_paths_helper.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_list_paths_helper.py deleted file mode 100644 index 543e1e1..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_list_paths_helper.py +++ /dev/null @@ -1,108 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from azure.core.paging import PageIterator -from azure.core.exceptions import HttpResponseError -from ._deserialize import process_storage_error, get_deleted_path_properties_from_generated_code -from ._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix -from ._shared.models import DictMixin -from ._shared.response_handlers import return_context_and_deserialized - - -class DeletedPathPropertiesPaged(PageIterator): - """An Iterable of deleted path properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A path name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.filedatalake.DeletedPathProperties) - :ivar str container: The container that the paths are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - """ - def __init__( - self, command, - container=None, - prefix=None, - results_per_page=None, - continuation_token=None, - delimiter=None, - location_mode=None): - super(DeletedPathPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.container = container - self.delimiter = delimiter - self.current_page = None - self.location_mode = location_mode - - def _get_next_cb(self, continuation_token): - try: - return self._command( - prefix=self.prefix, - marker=continuation_token or None, - max_results=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.container = self._response.container_name - self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items - self.current_page = [self._build_item(item) for item in self.current_page] - self.delimiter = self._response.delimiter - - return self._response.next_marker or None, self.current_page - - def _build_item(self, item): - if isinstance(item, BlobItemInternal): - file_props = get_deleted_path_properties_from_generated_code(item) - file_props.file_system = self.container - return file_props - if isinstance(item, GenBlobPrefix): - return DirectoryPrefix( - container=self.container, - prefix=item.name, - results_per_page=self.results_per_page, - location_mode=self.location_mode) - return item - - -class DirectoryPrefix(DictMixin): - """Directory prefix. - - :ivar str name: Name of the deleted directory. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar str file_system: The file system that the deleted paths are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - """ - def __init__(self, **kwargs): - self.name = kwargs.get('prefix') - self.results_per_page = kwargs.get('results_per_page') - self.file_system = kwargs.get('container') - self.delimiter = kwargs.get('delimiter') - self.location_mode = kwargs.get('location_mode') diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_models.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_models.py deleted file mode 100644 index 4517f46..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_models.py +++ /dev/null @@ -1,1036 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines -from datetime import datetime -from enum import Enum - -from azure.multiapi.storagev2.blob.v2020_06_12 import LeaseProperties as BlobLeaseProperties -from azure.multiapi.storagev2.blob.v2020_06_12 import AccountSasPermissions as BlobAccountSasPermissions -from azure.multiapi.storagev2.blob.v2020_06_12 import ResourceTypes as BlobResourceTypes -from azure.multiapi.storagev2.blob.v2020_06_12 import UserDelegationKey as BlobUserDelegationKey -from azure.multiapi.storagev2.blob.v2020_06_12 import ContentSettings as BlobContentSettings -from azure.multiapi.storagev2.blob.v2020_06_12 import AccessPolicy as BlobAccessPolicy -from azure.multiapi.storagev2.blob.v2020_06_12 import DelimitedTextDialect as BlobDelimitedTextDialect -from azure.multiapi.storagev2.blob.v2020_06_12 import DelimitedJsonDialect as BlobDelimitedJSON -from azure.multiapi.storagev2.blob.v2020_06_12 import ArrowDialect as BlobArrowDialect -from azure.multiapi.storagev2.blob.v2020_06_12._models import ContainerPropertiesPaged -from azure.multiapi.storagev2.blob.v2020_06_12._generated.models import Logging as GenLogging, Metrics as GenMetrics, \ - RetentionPolicy as GenRetentionPolicy, StaticWebsite as GenStaticWebsite, CorsRule as GenCorsRule -from ._shared.models import DictMixin - - -class FileSystemProperties(object): - """File System properties class. - - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the file system was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar ~azure.storage.filedatalake.LeaseProperties lease: - Stores all the lease information for the file system. - :ivar str public_access: Specifies whether data in the file system may be accessed - publicly and the level of access. - :ivar bool has_immutability_policy: - Represents whether the file system has an immutability policy. - :ivar bool has_legal_hold: - Represents whether the file system has a legal hold. - :ivar dict metadata: A dict with name-value pairs to associate with the - file system as metadata. - :ivar bool deleted: - Whether this file system was deleted. - :ivar str deleted_version: - The version of a deleted file system. - - Returned ``FileSystemProperties`` instances expose these values through a - dictionary interface, for example: ``file_system_props["last_modified"]``. - Additionally, the file system name is available as ``file_system_props["name"]``. - """ - - def __init__(self): - self.name = None - self.last_modified = None - self.etag = None - self.lease = None - self.public_access = None - self.has_immutability_policy = None - self.has_legal_hold = None - self.metadata = None - self.deleted = None - self.deleted_version = None - - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.last_modified = generated.properties.last_modified - props.deleted = generated.deleted - props.deleted_version = generated.version - props.etag = generated.properties.etag - props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access - props.public_access = PublicAccess._from_generated( # pylint: disable=protected-access - generated.properties.public_access) - props.has_immutability_policy = generated.properties.has_immutability_policy - props.has_legal_hold = generated.properties.has_legal_hold - props.metadata = generated.metadata - return props - - @classmethod - def _convert_from_container_props(cls, container_properties): - container_properties.__class__ = cls - container_properties.public_access = PublicAccess._from_generated( # pylint: disable=protected-access - container_properties.public_access) - container_properties.lease.__class__ = LeaseProperties - return container_properties - - -class FileSystemPropertiesPaged(ContainerPropertiesPaged): - """An Iterable of File System properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file system name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.filedatalake.FileSystemProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only file systems whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of file system names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - - def __init__(self, *args, **kwargs): - super(FileSystemPropertiesPaged, self).__init__( - *args, - **kwargs - ) - - @staticmethod - def _build_item(item): - return FileSystemProperties._from_generated(item) # pylint: disable=protected-access - - -class DirectoryProperties(DictMixin): - """ - :ivar str name: name of the directory - :ivar str etag: The ETag contains a value that you can use to perform operations - conditionally. - :ivar bool deleted: if the current directory marked as deleted - :ivar dict metadata: Name-value pairs associated with the directory as metadata. - :ivar ~azure.storage.filedatalake.LeaseProperties lease: - Stores all the lease information for the directory. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the directory was modified. - :ivar ~datetime.datetime creation_time: - Indicates when the directory was created, in UTC. - :ivar int remaining_retention_days: The number of days that the directory will be retained - before being permanently deleted by the service. - :var ~azure.storage.filedatalake.ContentSettings content_settings: - """ - - def __init__(self, **kwargs): - self.name = kwargs.get('name') - self.etag = kwargs.get('ETag') - self.deleted = False - self.metadata = kwargs.get('metadata') - self.lease = LeaseProperties(**kwargs) - self.last_modified = kwargs.get('Last-Modified') - self.creation_time = kwargs.get('x-ms-creation-time') - self.deleted_time = None - self.remaining_retention_days = None - - -class FileProperties(DictMixin): - """ - :ivar str name: name of the file - :ivar str etag: The ETag contains a value that you can use to perform operations - conditionally. - :ivar bool deleted: if the current file marked as deleted - :ivar dict metadata: Name-value pairs associated with the file as metadata. - :ivar ~azure.storage.filedatalake.LeaseProperties lease: - Stores all the lease information for the file. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the file was modified. - :ivar ~datetime.datetime creation_time: - Indicates when the file was created, in UTC. - :ivar int size: size of the file - :ivar int remaining_retention_days: The number of days that the file will be retained - before being permanently deleted by the service. - :var ~azure.storage.filedatalake.ContentSettings content_settings: - """ - - def __init__(self, **kwargs): - self.name = kwargs.get('name') - self.etag = kwargs.get('ETag') - self.deleted = False - self.metadata = kwargs.get('metadata') - self.lease = LeaseProperties(**kwargs) - self.last_modified = kwargs.get('Last-Modified') - self.creation_time = kwargs.get('x-ms-creation-time') - self.size = kwargs.get('Content-Length') - self.deleted_time = None - self.expiry_time = kwargs.get("x-ms-expiry-time") - self.remaining_retention_days = None - self.content_settings = ContentSettings(**kwargs) - - -class PathProperties(object): - """Path properties listed by get_paths api. - - :ivar str name: the full path for a file or directory. - :ivar str owner: The owner of the file or directory. - :ivar str group: he owning group of the file or directory. - :ivar str permissions: Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :ivar datetime last_modified: A datetime object representing the last time the directory/file was modified. - :ivar bool is_directory: is the path a directory or not. - :ivar str etag: The ETag contains a value that you can use to perform operations - conditionally. - :ivar content_length: the size of file if the path is a file. - """ - - def __init__(self, **kwargs): - self.name = kwargs.pop('name', None) - self.owner = kwargs.get('owner', None) - self.group = kwargs.get('group', None) - self.permissions = kwargs.get('permissions', None) - self.last_modified = kwargs.get('last_modified', None) - self.is_directory = kwargs.get('is_directory', False) - self.etag = kwargs.get('etag', None) - self.content_length = kwargs.get('content_length', None) - - @classmethod - def _from_generated(cls, generated): - path_prop = PathProperties() - path_prop.name = generated.name - path_prop.owner = generated.owner - path_prop.group = generated.group - path_prop.permissions = generated.permissions - path_prop.last_modified = datetime.strptime(generated.last_modified, "%a, %d %b %Y %H:%M:%S %Z") - path_prop.is_directory = bool(generated.is_directory) - path_prop.etag = generated.additional_properties.get('etag') - path_prop.content_length = generated.content_length - return path_prop - - -class LeaseProperties(BlobLeaseProperties): - """DataLake Lease Properties. - - :ivar str status: - The lease status of the file. Possible values: locked|unlocked - :ivar str state: - Lease state of the file. Possible values: available|leased|expired|breaking|broken - :ivar str duration: - When a file is leased, specifies whether the lease is of infinite or fixed duration. - """ - - -class ContentSettings(BlobContentSettings): - """The content settings of a file or directory. - - :ivar str content_type: - The content type specified for the file or directory. If no content type was - specified, the default content type is application/octet-stream. - :ivar str content_encoding: - If the content_encoding has previously been set - for the file, that value is stored. - :ivar str content_language: - If the content_language has previously been set - for the file, that value is stored. - :ivar str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the file, that value is stored. - :ivar str cache_control: - If the cache_control has previously been set for - the file, that value is stored. - :ivar bytearray content_md5: - If the content_md5 has been set for the file, this response - header is stored so that the client can check for message content - integrity. - :keyword str content_type: - The content type specified for the file or directory. If no content type was - specified, the default content type is application/octet-stream. - :keyword str content_encoding: - If the content_encoding has previously been set - for the file, that value is stored. - :keyword str content_language: - If the content_language has previously been set - for the file, that value is stored. - :keyword str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the file, that value is stored. - :keyword str cache_control: - If the cache_control has previously been set for - the file, that value is stored. - :keyword bytearray content_md5: - If the content_md5 has been set for the file, this response - header is stored so that the client can check for message content - integrity. - """ - - def __init__( - self, **kwargs): - super(ContentSettings, self).__init__( - **kwargs - ) - - -class AccountSasPermissions(BlobAccountSasPermissions): - def __init__(self, read=False, write=False, delete=False, list=False, # pylint: disable=redefined-builtin - create=False): - super(AccountSasPermissions, self).__init__( - read=read, create=create, write=write, list=list, - delete=delete - ) - - -class FileSystemSasPermissions(object): - """FileSystemSasPermissions class to be used with the - :func:`~azure.storage.filedatalake.generate_file_system_sas` function. - - :param bool read: - Read the content, properties, metadata etc. - :param bool write: - Create or write content, properties, metadata. Lease the file system. - :param bool delete: - Delete the file system. - :param bool list: - List paths in the file system. - :keyword bool move: - Move any file in the directory to a new location. - Note the move operation can optionally be restricted to the child file or directory owner or - the parent directory owner if the saoid parameter is included in the token and the sticky bit is set - on the parent directory. - :keyword bool execute: - Get the status (system defined properties) and ACL of any file in the directory. - If the caller is the owner, set access control on any file in the directory. - :keyword bool manage_ownership: - Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory - within a folder that has the sticky bit set. - :keyword bool manage_access_control: - Allows the user to set permissions and POSIX ACLs on files and directories. - """ - - def __init__(self, read=False, write=False, delete=False, list=False, # pylint: disable=redefined-builtin - **kwargs): - self.read = read - self.write = write - self.delete = delete - self.list = list - self.move = kwargs.pop('move', None) - self.execute = kwargs.pop('execute', None) - self.manage_ownership = kwargs.pop('manage_ownership', None) - self.manage_access_control = kwargs.pop('manage_access_control', None) - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('l' if self.list else '') + - ('m' if self.move else '') + - ('e' if self.execute else '') + - ('o' if self.manage_ownership else '') + - ('p' if self.manage_access_control else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a FileSystemSasPermissions from a string. - - To specify read, write, or delete permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, add, create, - write, or delete permissions. - :return: A FileSystemSasPermissions object - :rtype: ~azure.storage.fildatalake.FileSystemSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_list = 'l' in permission - p_move = 'm' in permission - p_execute = 'e' in permission - p_manage_ownership = 'o' in permission - p_manage_access_control = 'p' in permission - - parsed = cls(read=p_read, write=p_write, delete=p_delete, - list=p_list, move=p_move, execute=p_execute, manage_ownership=p_manage_ownership, - manage_access_control=p_manage_access_control) - return parsed - - -class DirectorySasPermissions(object): - """DirectorySasPermissions class to be used with the - :func:`~azure.storage.filedatalake.generate_directory_sas` function. - - :param bool read: - Read the content, properties, metadata etc. - :param bool create: - Create a new directory - :param bool write: - Create or write content, properties, metadata. Lease the directory. - :param bool delete: - Delete the directory. - :keyword bool list: - List any files in the directory. Implies Execute. - :keyword bool move: - Move any file in the directory to a new location. - Note the move operation can optionally be restricted to the child file or directory owner or - the parent directory owner if the saoid parameter is included in the token and the sticky bit is set - on the parent directory. - :keyword bool execute: - Get the status (system defined properties) and ACL of any file in the directory. - If the caller is the owner, set access control on any file in the directory. - :keyword bool manage_ownership: - Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory - within a folder that has the sticky bit set. - :keyword bool manage_access_control: - Allows the user to set permissions and POSIX ACLs on files and directories. - """ - - def __init__(self, read=False, create=False, write=False, - delete=False, **kwargs): - self.read = read - self.create = create - self.write = write - self.delete = delete - self.list = kwargs.pop('list', None) - self.move = kwargs.pop('move', None) - self.execute = kwargs.pop('execute', None) - self.manage_ownership = kwargs.pop('manage_ownership', None) - self.manage_access_control = kwargs.pop('manage_access_control', None) - self._str = (('r' if self.read else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('l' if self.list else '') + - ('m' if self.move else '') + - ('e' if self.execute else '') + - ('o' if self.manage_ownership else '') + - ('p' if self.manage_access_control else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a DirectorySasPermissions from a string. - - To specify read, create, write, or delete permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, add, create, - write, or delete permissions. - :return: A DirectorySasPermissions object - :rtype: ~azure.storage.filedatalake.DirectorySasPermissions - """ - p_read = 'r' in permission - p_create = 'c' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_list = 'l' in permission - p_move = 'm' in permission - p_execute = 'e' in permission - p_manage_ownership = 'o' in permission - p_manage_access_control = 'p' in permission - - parsed = cls(read=p_read, create=p_create, write=p_write, delete=p_delete, - list=p_list, move=p_move, execute=p_execute, manage_ownership=p_manage_ownership, - manage_access_control=p_manage_access_control) - return parsed - - -class FileSasPermissions(object): - """FileSasPermissions class to be used with the - :func:`~azure.storage.filedatalake.generate_file_sas` function. - - :param bool read: - Read the content, properties, metadata etc. Use the file as - the source of a read operation. - :param bool create: - Write a new file - :param bool write: - Create or write content, properties, metadata. Lease the file. - :param bool delete: - Delete the file. - :keyword bool move: - Move any file in the directory to a new location. - Note the move operation can optionally be restricted to the child file or directory owner or - the parent directory owner if the saoid parameter is included in the token and the sticky bit is set - on the parent directory. - :keyword bool execute: - Get the status (system defined properties) and ACL of any file in the directory. - If the caller is the owner, set access control on any file in the directory. - :keyword bool manage_ownership: - Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory - within a folder that has the sticky bit set. - :keyword bool manage_access_control: - Allows the user to set permissions and POSIX ACLs on files and directories. - """ - - def __init__(self, read=False, create=False, write=False, delete=False, **kwargs): - self.read = read - self.create = create - self.write = write - self.delete = delete - self.list = list - self.move = kwargs.pop('move', None) - self.execute = kwargs.pop('execute', None) - self.manage_ownership = kwargs.pop('manage_ownership', None) - self.manage_access_control = kwargs.pop('manage_access_control', None) - self._str = (('r' if self.read else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('m' if self.move else '') + - ('e' if self.execute else '') + - ('o' if self.manage_ownership else '') + - ('p' if self.manage_access_control else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a FileSasPermissions from a string. - - To specify read, write, or delete permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, add, create, - write, or delete permissions. - :return: A FileSasPermissions object - :rtype: ~azure.storage.fildatalake.FileSasPermissions - """ - p_read = 'r' in permission - p_create = 'c' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_move = 'm' in permission - p_execute = 'e' in permission - p_manage_ownership = 'o' in permission - p_manage_access_control = 'p' in permission - - parsed = cls(read=p_read, create=p_create, write=p_write, delete=p_delete, - move=p_move, execute=p_execute, manage_ownership=p_manage_ownership, - manage_access_control=p_manage_access_control) - return parsed - - -class AccessPolicy(BlobAccessPolicy): - """Access Policy class used by the set and get access policy methods in each service. - - A stored access policy can specify the start time, expiry time, and - permissions for the Shared Access Signatures with which it's associated. - Depending on how you want to control access to your resource, you can - specify all of these parameters within the stored access policy, and omit - them from the URL for the Shared Access Signature. Doing so permits you to - modify the associated signature's behavior at any time, as well as to revoke - it. Or you can specify one or more of the access policy parameters within - the stored access policy, and the others on the URL. Finally, you can - specify all of the parameters on the URL. In this case, you can use the - stored access policy to revoke the signature, but not to modify its behavior. - - Together the Shared Access Signature and the stored access policy must - include all fields required to authenticate the signature. If any required - fields are missing, the request will fail. Likewise, if a field is specified - both in the Shared Access Signature URL and in the stored access policy, the - request will fail with status code 400 (Bad Request). - - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.datalake.FileSystemSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :keyword start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :paramtype start: ~datetime.datetime or str - """ - - def __init__(self, permission=None, expiry=None, **kwargs): - super(AccessPolicy, self).__init__( - permission=permission, expiry=expiry, start=kwargs.pop('start', None) - ) - - -class ResourceTypes(BlobResourceTypes): - """ - Specifies the resource types that are accessible with the account SAS. - - :param bool service: - Access to service-level APIs (e.g.List File Systems) - :param bool file_system: - Access to file_system-level APIs (e.g., Create/Delete file system, - List Directories/Files) - :param bool object: - Access to object-level APIs for - files(e.g. Create File, etc.) - """ - - def __init__(self, service=False, file_system=False, object=False # pylint: disable=redefined-builtin - ): - super(ResourceTypes, self).__init__(service=service, container=file_system, object=object) - - -class UserDelegationKey(BlobUserDelegationKey): - """ - Represents a user delegation key, provided to the user by Azure Storage - based on their Azure Active Directory access token. - - The fields are saved as simple strings since the user does not have to interact with this object; - to generate an identify SAS, the user can simply pass it to the right API. - - :ivar str signed_oid: - Object ID of this token. - :ivar str signed_tid: - Tenant ID of the tenant that issued this token. - :ivar str signed_start: - The datetime this token becomes valid. - :ivar str signed_expiry: - The datetime this token expires. - :ivar str signed_service: - What service this key is valid for. - :ivar str signed_version: - The version identifier of the REST service that created this token. - :ivar str value: - The user delegation key. - """ - - @classmethod - def _from_generated(cls, generated): - delegation_key = cls() - delegation_key.signed_oid = generated.signed_oid - delegation_key.signed_tid = generated.signed_tid - delegation_key.signed_start = generated.signed_start - delegation_key.signed_expiry = generated.signed_expiry - delegation_key.signed_service = generated.signed_service - delegation_key.signed_version = generated.signed_version - delegation_key.value = generated.value - return delegation_key - - -class PublicAccess(str, Enum): - """ - Specifies whether data in the file system may be accessed publicly and the level of access. - """ - - File = 'blob' - """ - Specifies public read access for files. file data within this file system can be read - via anonymous request, but file system data is not available. Clients cannot enumerate - files within the container via anonymous request. - """ - - FileSystem = 'container' - """ - Specifies full public read access for file system and file data. Clients can enumerate - files within the file system via anonymous request, but cannot enumerate file systems - within the storage account. - """ - - @classmethod - def _from_generated(cls, public_access): - if public_access == "blob": # pylint:disable=no-else-return - return cls.File - elif public_access == "container": - return cls.FileSystem - - return None - - -class LocationMode(object): - """ - Specifies the location the request should be sent to. This mode only applies - for RA-GRS accounts which allow secondary read access. All other account types - must use PRIMARY. - """ - - PRIMARY = 'primary' #: Requests should be sent to the primary location. - SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. - - -class DelimitedJsonDialect(BlobDelimitedJSON): - """Defines the input or output JSON serialization for a datalake query. - - :keyword str delimiter: The line separator character, default value is '\n' - """ - - -class DelimitedTextDialect(BlobDelimitedTextDialect): - """Defines the input or output delimited (CSV) serialization for a datalake query request. - - :keyword str delimiter: - Column separator, defaults to ','. - :keyword str quotechar: - Field quote, defaults to '"'. - :keyword str lineterminator: - Record separator, defaults to '\n'. - :keyword str escapechar: - Escape char, defaults to empty. - :keyword bool has_header: - Whether the blob data includes headers in the first line. The default value is False, meaning that the - data will be returned inclusive of the first line. If set to True, the data will be returned exclusive - of the first line. - """ - - -class ArrowDialect(BlobArrowDialect): - """field of an arrow schema. - - All required parameters must be populated in order to send to Azure. - - :param str type: Required. - :keyword str name: The name of the field. - :keyword int precision: The precision of the field. - :keyword int scale: The scale of the field. - """ - - -class QuickQueryDialect(str, Enum): - """Specifies the quick query input/output dialect.""" - - DelimitedText = 'DelimitedTextDialect' - DelimitedJson = 'DelimitedJsonDialect' - Parquet = 'ParquetDialect' - - -class ArrowType(str, Enum): - - INT64 = "int64" - BOOL = "bool" - TIMESTAMP_MS = "timestamp[ms]" - STRING = "string" - DOUBLE = "double" - DECIMAL = 'decimal' - - -class DataLakeFileQueryError(object): - """The error happened during quick query operation. - - :ivar str error: - The name of the error. - :ivar bool is_fatal: - If true, this error prevents further query processing. More result data may be returned, - but there is no guarantee that all of the original data will be processed. - If false, this error does not prevent further query processing. - :ivar str description: - A description of the error. - :ivar int position: - The blob offset at which the error occurred. - """ - - def __init__(self, error=None, is_fatal=False, description=None, position=None): - self.error = error - self.is_fatal = is_fatal - self.description = description - self.position = position - - -class AccessControlChangeCounters(DictMixin): - """ - AccessControlChangeCounters contains counts of operations that change Access Control Lists recursively. - - :ivar int directories_successful: - Number of directories where Access Control List has been updated successfully. - :ivar int files_successful: - Number of files where Access Control List has been updated successfully. - :ivar int failure_count: - Number of paths where Access Control List update has failed. - """ - - def __init__(self, directories_successful, files_successful, failure_count): - self.directories_successful = directories_successful - self.files_successful = files_successful - self.failure_count = failure_count - - -class AccessControlChangeResult(DictMixin): - """ - AccessControlChangeResult contains result of operations that change Access Control Lists recursively. - - :ivar ~azure.storage.filedatalake.AccessControlChangeCounters counters: - Contains counts of paths changed from start of the operation. - :ivar str continuation: - Optional continuation token. - Value is present when operation is split into multiple batches and can be used to resume progress. - """ - - def __init__(self, counters, continuation): - self.counters = counters - self.continuation = continuation - - -class AccessControlChangeFailure(DictMixin): - """ - Represents an entry that failed to update Access Control List. - - :ivar str name: - Name of the entry. - :ivar bool is_directory: - Indicates whether the entry is a directory. - :ivar str error_message: - Indicates the reason why the entry failed to update. - """ - - def __init__(self, name, is_directory, error_message): - self.name = name - self.is_directory = is_directory - self.error_message = error_message - - -class AccessControlChanges(DictMixin): - """ - AccessControlChanges contains batch and cumulative counts of operations - that change Access Control Lists recursively. - Additionally it exposes path entries that failed to update while these operations progress. - - :ivar ~azure.storage.filedatalake.AccessControlChangeCounters batch_counters: - Contains counts of paths changed within single batch. - :ivar ~azure.storage.filedatalake.AccessControlChangeCounters aggregate_counters: - Contains counts of paths changed from start of the operation. - :ivar list(~azure.storage.filedatalake.AccessControlChangeFailure) batch_failures: - List of path entries that failed to update Access Control List within single batch. - :ivar str continuation: - An opaque continuation token that may be used to resume the operations in case of failures. - """ - - def __init__(self, batch_counters, aggregate_counters, batch_failures, continuation): - self.batch_counters = batch_counters - self.aggregate_counters = aggregate_counters - self.batch_failures = batch_failures - self.continuation = continuation - - -class DeletedPathProperties(DictMixin): - """ - Properties populated for a deleted path. - - :ivar str name: - The name of the file in the path. - :ivar ~datetime.datetime deleted_time: - A datetime object representing the time at which the path was deleted. - :ivar int remaining_retention_days: - The number of days that the path will be retained before being permanently deleted by the service. - :ivar str deletion_id: - The id associated with the deleted path. - """ - def __init__(self, **kwargs): - self.name = kwargs.get('name') - self.deleted_time = None - self.remaining_retention_days = None - self.deletion_id = None - - -class AnalyticsLogging(GenLogging): - """Azure Analytics Logging settings. - - :keyword str version: - The version of Storage Analytics to configure. The default value is 1.0. - :keyword bool delete: - Indicates whether all delete requests should be logged. The default value is `False`. - :keyword bool read: - Indicates whether all read requests should be logged. The default value is `False`. - :keyword bool write: - Indicates whether all write requests should be logged. The default value is `False`. - :keyword ~azure.storage.filedatalake.RetentionPolicy retention_policy: - Determines how long the associated data should persist. If not specified the retention - policy will be disabled by default. - """ - - def __init__(self, **kwargs): - self.version = kwargs.get('version', u'1.0') - self.delete = kwargs.get('delete', False) - self.read = kwargs.get('read', False) - self.write = kwargs.get('write', False) - self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - version=generated.version, - delete=generated.delete, - read=generated.read, - write=generated.write, - retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access - ) - - -class Metrics(GenMetrics): - """A summary of request statistics grouped by API in hour or minute aggregates. - - :keyword str version: - The version of Storage Analytics to configure. The default value is 1.0. - :keyword bool enabled: - Indicates whether metrics are enabled for the Datalake service. - The default value is `False`. - :keyword bool include_apis: - Indicates whether metrics should generate summary statistics for called API operations. - :keyword ~azure.storage.filedatalake.RetentionPolicy retention_policy: - Determines how long the associated data should persist. If not specified the retention - policy will be disabled by default. - """ - - def __init__(self, **kwargs): - self.version = kwargs.get('version', u'1.0') - self.enabled = kwargs.get('enabled', False) - self.include_apis = kwargs.get('include_apis') - self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - version=generated.version, - enabled=generated.enabled, - include_apis=generated.include_apis, - retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access - ) - - -class RetentionPolicy(GenRetentionPolicy): - """The retention policy which determines how long the associated data should - persist. - - :param bool enabled: - Indicates whether a retention policy is enabled for the storage service. - The default value is False. - :param int days: - Indicates the number of days that metrics or logging or - soft-deleted data should be retained. All data older than this value will - be deleted. If enabled=True, the number of days must be specified. - """ - - def __init__(self, enabled=False, days=None): - super(RetentionPolicy, self).__init__(enabled=enabled, days=days, allow_permanent_delete=None) - if self.enabled and (self.days is None): - raise ValueError("If policy is enabled, 'days' must be specified.") - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - enabled=generated.enabled, - days=generated.days, - ) - - -class StaticWebsite(GenStaticWebsite): - """The properties that enable an account to host a static website. - - :keyword bool enabled: - Indicates whether this account is hosting a static website. - The default value is `False`. - :keyword str index_document: - The default name of the index page under each directory. - :keyword str error_document404_path: - The absolute path of the custom 404 page. - :keyword str default_index_document_path: - Absolute path of the default index page. - """ - - def __init__(self, **kwargs): - self.enabled = kwargs.get('enabled', False) - if self.enabled: - self.index_document = kwargs.get('index_document') - self.error_document404_path = kwargs.get('error_document404_path') - self.default_index_document_path = kwargs.get('default_index_document_path') - else: - self.index_document = None - self.error_document404_path = None - self.default_index_document_path = None - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - enabled=generated.enabled, - index_document=generated.index_document, - error_document404_path=generated.error_document404_path, - default_index_document_path=generated.default_index_document_path - ) - - -class CorsRule(GenCorsRule): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. - - :param list(str) allowed_origins: - A list of origin domains that will be allowed via CORS, or "*" to allow - all domains. The list of must contain at least one entry. Limited to 64 - origin domains. Each allowed origin can have up to 256 characters. - :param list(str) allowed_methods: - A list of HTTP methods that are allowed to be executed by the origin. - The list of must contain at least one entry. For Azure Storage, - permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. - :keyword list(str) allowed_headers: - Defaults to an empty list. A list of headers allowed to be part of - the cross-origin request. Limited to 64 defined headers and 2 prefixed - headers. Each header can be up to 256 characters. - :keyword list(str) exposed_headers: - Defaults to an empty list. A list of response headers to expose to CORS - clients. Limited to 64 defined headers and two prefixed headers. Each - header can be up to 256 characters. - :keyword int max_age_in_seconds: - The number of seconds that the client/browser should cache a - preflight response. - """ - - def __init__(self, allowed_origins, allowed_methods, **kwargs): - self.allowed_origins = ','.join(allowed_origins) - self.allowed_methods = ','.join(allowed_methods) - self.allowed_headers = ','.join(kwargs.get('allowed_headers', [])) - self.exposed_headers = ','.join(kwargs.get('exposed_headers', [])) - self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0) - - @classmethod - def _from_generated(cls, generated): - return cls( - [generated.allowed_origins], - [generated.allowed_methods], - allowed_headers=[generated.allowed_headers], - exposed_headers=[generated.exposed_headers], - max_age_in_seconds=generated.max_age_in_seconds, - ) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_path_client.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_path_client.py deleted file mode 100644 index d0bbeb3..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_path_client.py +++ /dev/null @@ -1,902 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from datetime import datetime -from typing import Any, Dict, Union - -try: - from urllib.parse import urlparse, quote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote # type: ignore - -import six - -from azure.core.exceptions import AzureError, HttpResponseError -from azure.multiapi.storagev2.blob.v2020_06_12 import BlobClient -from ._data_lake_lease import DataLakeLeaseClient -from ._deserialize import process_storage_error -from ._generated import AzureDataLakeStorageRESTAPI -from ._models import LocationMode, DirectoryProperties, AccessControlChangeResult, AccessControlChanges, \ - AccessControlChangeCounters, AccessControlChangeFailure -from ._serialize import convert_dfs_url_to_blob_url, get_mod_conditions, \ - get_path_http_headers, add_metadata_headers, get_lease_id, get_source_mod_conditions, get_access_conditions, \ - get_api_version -from ._shared.base_client import StorageAccountHostsMixin, parse_query -from ._shared.response_handlers import return_response_headers, return_headers_and_deserialized - -_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = ( - 'The require_encryption flag is set, but encryption is not supported' - ' for this method.') - - -class PathClient(StorageAccountHostsMixin): - def __init__( - self, account_url, # type: str - file_system_name, # type: str - path_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - - # remove the preceding/trailing delimiter from the path components - file_system_name = file_system_name.strip('/') - - # the name of root directory is / - if path_name != '/': - path_name = path_name.strip('/') - - if not (file_system_name and path_name): - raise ValueError("Please specify a file system name and file path.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - blob_account_url = convert_dfs_url_to_blob_url(account_url) - self._blob_account_url = blob_account_url - - datalake_hosts = kwargs.pop('_hosts', None) - blob_hosts = None - if datalake_hosts: - blob_primary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.PRIMARY]) - blob_hosts = {LocationMode.PRIMARY: blob_primary_account_url, LocationMode.SECONDARY: ""} - self._blob_client = BlobClient(blob_account_url, file_system_name, path_name, - credential=credential, _hosts=blob_hosts, **kwargs) - - _, sas_token = parse_query(parsed_url.query) - self.file_system_name = file_system_name - self.path_name = path_name - - self._query_str, self._raw_credential = self._format_query_string(sas_token, credential) - - super(PathClient, self).__init__(parsed_url, service='dfs', credential=self._raw_credential, - _hosts=datalake_hosts, **kwargs) - # ADLS doesn't support secondary endpoint, make sure it's empty - self._hosts[LocationMode.SECONDARY] = "" - api_version = get_api_version(kwargs) - - self._client = AzureDataLakeStorageRESTAPI(self.url, file_system=file_system_name, path=path_name, - pipeline=self._pipeline) - self._client._config.version = api_version # pylint: disable=protected-access - - self._datalake_client_for_blob_operation = AzureDataLakeStorageRESTAPI( - self._blob_client.url, - file_system=file_system_name, - path=path_name, - pipeline=self._pipeline) - self._datalake_client_for_blob_operation._config.version = api_version # pylint: disable=protected-access - - def __exit__(self, *args): - self._blob_client.close() - super(PathClient, self).__exit__(*args) - - def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._blob_client.close() - self.__exit__() - - def _format_url(self, hostname): - file_system_name = self.file_system_name - if isinstance(file_system_name, six.text_type): - file_system_name = file_system_name.encode('UTF-8') - return "{}://{}/{}/{}{}".format( - self.scheme, - hostname, - quote(file_system_name), - quote(self.path_name, safe='~'), - self._query_str) - - def _create_path_options(self, resource_type, content_settings=None, metadata=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_mod_conditions(kwargs) - - path_http_headers = None - if content_settings: - path_http_headers = get_path_http_headers(content_settings) - - options = { - 'resource': resource_type, - 'properties': add_metadata_headers(metadata), - 'permissions': kwargs.pop('permissions', None), - 'umask': kwargs.pop('umask', None), - 'path_http_headers': path_http_headers, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - def _create(self, resource_type, content_settings=None, metadata=None, **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create directory or file - - :param resource_type: - Required for Create File and Create Directory. - The value must be "file" or "directory". Possible values include: - 'directory', 'file' - :type resource_type: str - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file/directory as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :type permissions: str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Dict[str, Union[str, datetime]] - """ - options = self._create_path_options( - resource_type, - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return self._client.path.create(**options) - except HttpResponseError as error: - process_storage_error(error) - - @staticmethod - def _delete_path_options(**kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_mod_conditions(kwargs) - - options = { - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers, - 'timeout': kwargs.pop('timeout', None)} - options.update(kwargs) - return options - - def _delete(self, **kwargs): - # type: (**Any) -> Dict[Union[datetime, str]] - """ - Marks the specified path for deletion. - - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :param ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - options = self._delete_path_options(**kwargs) - try: - return self._client.path.delete(**options) - except HttpResponseError as error: - process_storage_error(error) - - @staticmethod - def _set_access_control_options(owner=None, group=None, permissions=None, acl=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_mod_conditions(kwargs) - - options = { - 'owner': owner, - 'group': group, - 'permissions': permissions, - 'acl': acl, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - def set_access_control(self, owner=None, # type: Optional[str] - group=None, # type: Optional[str] - permissions=None, # type: Optional[str] - acl=None, # type: Optional[str] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Set the owner, group, permissions, or access control list for a path. - - :param owner: - Optional. The owner of the file or directory. - :type owner: str - :param group: - Optional. The owning group of the file or directory. - :type group: str - :param permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - permissions and acl are mutually exclusive. - :type permissions: str - :param acl: - Sets POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - permissions and acl are mutually exclusive. - :type acl: str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword: response dict (Etag and last modified). - """ - if not any([owner, group, permissions, acl]): - raise ValueError("At least one parameter should be set for set_access_control API") - options = self._set_access_control_options(owner=owner, group=group, permissions=permissions, acl=acl, **kwargs) - try: - return self._client.path.set_access_control(**options) - except HttpResponseError as error: - process_storage_error(error) - - @staticmethod - def _get_access_control_options(upn=None, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Any] - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_mod_conditions(kwargs) - - options = { - 'action': 'getAccessControl', - 'upn': upn if upn else False, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - def get_access_control(self, upn=None, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Any] - """ - :param upn: Optional. - Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword: response dict. - """ - options = self._get_access_control_options(upn=upn, **kwargs) - try: - return self._client.path.get_properties(**options) - except HttpResponseError as error: - process_storage_error(error) - - @staticmethod - def _set_access_control_recursive_options(mode, acl, **kwargs): - # type: (str, str, **Any) -> Dict[str, Any] - - options = { - 'mode': mode, - 'force_flag': kwargs.pop('continue_on_failure', None), - 'timeout': kwargs.pop('timeout', None), - 'continuation': kwargs.pop('continuation_token', None), - 'max_records': kwargs.pop('batch_size', None), - 'acl': acl, - 'cls': return_headers_and_deserialized} - options.update(kwargs) - return options - - def set_access_control_recursive(self, - acl, - **kwargs): - # type: (str, **Any) -> AccessControlChangeResult - """ - Sets the Access Control on a path and sub-paths. - - :param acl: - Sets POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: - Callback where the caller can track progress of the operation - as well as collect paths that failed to change Access Control. - :keyword str continuation_token: - Optional continuation token that can be used to resume previously stopped operation. - :keyword int batch_size: - Optional. If data set size exceeds batch size then operation will be split into multiple - requests so that progress can be tracked. Batch size should be between 1 and 2000. - The default when unspecified is 2000. - :keyword int max_batches: - Optional. Defines maximum number of batches that single change Access Control operation can execute. - If maximum is reached before all sub-paths are processed, - then continuation token can be used to resume operation. - Empty value indicates that maximum number of batches in unbound and operation continues till end. - :keyword bool continue_on_failure: - If set to False, the operation will terminate quickly on encountering user errors (4XX). - If True, the operation will ignore user errors and proceed with the operation on other sub-entities of - the directory. - Continuation token will only be returned when continue_on_failure is True in case of user errors. - If not set the default value is False for this. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: A summary of the recursive operations, including the count of successes and failures, - as well as a continuation token in case the operation was terminated prematurely. - :rtype: :class:`~azure.storage.filedatalake.AccessControlChangeResult` - :raises ~azure.core.exceptions.AzureError: - User can restart the operation using continuation_token field of AzureError if the token is available. - """ - if not acl: - raise ValueError("The Access Control List must be set for this operation") - - progress_hook = kwargs.pop('progress_hook', None) - max_batches = kwargs.pop('max_batches', None) - options = self._set_access_control_recursive_options(mode='set', acl=acl, **kwargs) - return self._set_access_control_internal(options=options, progress_hook=progress_hook, - max_batches=max_batches) - - def update_access_control_recursive(self, - acl, - **kwargs): - # type: (str, **Any) -> AccessControlChangeResult - """ - Modifies the Access Control on a path and sub-paths. - - :param acl: - Modifies POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: - Callback where the caller can track progress of the operation - as well as collect paths that failed to change Access Control. - :keyword str continuation_token: - Optional continuation token that can be used to resume previously stopped operation. - :keyword int batch_size: - Optional. If data set size exceeds batch size then operation will be split into multiple - requests so that progress can be tracked. Batch size should be between 1 and 2000. - The default when unspecified is 2000. - :keyword int max_batches: - Optional. Defines maximum number of batches that single change Access Control operation can execute. - If maximum is reached before all sub-paths are processed, - then continuation token can be used to resume operation. - Empty value indicates that maximum number of batches in unbound and operation continues till end. - :keyword bool continue_on_failure: - If set to False, the operation will terminate quickly on encountering user errors (4XX). - If True, the operation will ignore user errors and proceed with the operation on other sub-entities of - the directory. - Continuation token will only be returned when continue_on_failure is True in case of user errors. - If not set the default value is False for this. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: A summary of the recursive operations, including the count of successes and failures, - as well as a continuation token in case the operation was terminated prematurely. - :rtype: :class:`~azure.storage.filedatalake.AccessControlChangeResult` - :raises ~azure.core.exceptions.AzureError: - User can restart the operation using continuation_token field of AzureError if the token is available. - """ - if not acl: - raise ValueError("The Access Control List must be set for this operation") - - progress_hook = kwargs.pop('progress_hook', None) - max_batches = kwargs.pop('max_batches', None) - options = self._set_access_control_recursive_options(mode='modify', acl=acl, **kwargs) - return self._set_access_control_internal(options=options, progress_hook=progress_hook, - max_batches=max_batches) - - def remove_access_control_recursive(self, - acl, - **kwargs): - # type: (str, **Any) -> AccessControlChangeResult - """ - Removes the Access Control on a path and sub-paths. - - :param acl: - Removes POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, and a user or - group identifier in the format "[scope:][type]:[id]". - :type acl: str - :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: - Callback where the caller can track progress of the operation - as well as collect paths that failed to change Access Control. - :keyword str continuation_token: - Optional continuation token that can be used to resume previously stopped operation. - :keyword int batch_size: - Optional. If data set size exceeds batch size then operation will be split into multiple - requests so that progress can be tracked. Batch size should be between 1 and 2000. - The default when unspecified is 2000. - :keyword int max_batches: - Optional. Defines maximum number of batches that single change Access Control operation can execute. - If maximum is reached before all sub-paths are processed then, - continuation token can be used to resume operation. - Empty value indicates that maximum number of batches in unbound and operation continues till end. - :keyword bool continue_on_failure: - If set to False, the operation will terminate quickly on encountering user errors (4XX). - If True, the operation will ignore user errors and proceed with the operation on other sub-entities of - the directory. - Continuation token will only be returned when continue_on_failure is True in case of user errors. - If not set the default value is False for this. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: A summary of the recursive operations, including the count of successes and failures, - as well as a continuation token in case the operation was terminated prematurely. - :rtype: :class:`~azure.storage.filedatalake.AccessControlChangeResult` - :raises ~azure.core.exceptions.AzureError: - User can restart the operation using continuation_token field of AzureError if the token is available. - """ - if not acl: - raise ValueError("The Access Control List must be set for this operation") - - progress_hook = kwargs.pop('progress_hook', None) - max_batches = kwargs.pop('max_batches', None) - options = self._set_access_control_recursive_options(mode='remove', acl=acl, **kwargs) - return self._set_access_control_internal(options=options, progress_hook=progress_hook, - max_batches=max_batches) - - def _set_access_control_internal(self, options, progress_hook, max_batches=None): - try: - continue_on_failure = options.get('force_flag') - total_directories_successful = 0 - total_files_success = 0 - total_failure_count = 0 - batch_count = 0 - last_continuation_token = None - current_continuation_token = None - continue_operation = True - while continue_operation: - headers, resp = self._client.path.set_access_control_recursive(**options) - - # make a running tally so that we can report the final results - total_directories_successful += resp.directories_successful - total_files_success += resp.files_successful - total_failure_count += resp.failure_count - batch_count += 1 - current_continuation_token = headers['continuation'] - - if current_continuation_token is not None: - last_continuation_token = current_continuation_token - - if progress_hook is not None: - progress_hook(AccessControlChanges( - batch_counters=AccessControlChangeCounters( - directories_successful=resp.directories_successful, - files_successful=resp.files_successful, - failure_count=resp.failure_count, - ), - aggregate_counters=AccessControlChangeCounters( - directories_successful=total_directories_successful, - files_successful=total_files_success, - failure_count=total_failure_count, - ), - batch_failures=[AccessControlChangeFailure( - name=failure.name, - is_directory=failure.type == 'DIRECTORY', - error_message=failure.error_message) for failure in resp.failed_entries], - continuation=last_continuation_token)) - - # update the continuation token, if there are more operations that cannot be completed in a single call - max_batches_satisfied = (max_batches is not None and batch_count == max_batches) - continue_operation = bool(current_continuation_token) and not max_batches_satisfied - options['continuation'] = current_continuation_token - - # currently the service stops on any failure, so we should send back the last continuation token - # for the user to retry the failed updates - # otherwise we should just return what the service gave us - return AccessControlChangeResult(counters=AccessControlChangeCounters( - directories_successful=total_directories_successful, - files_successful=total_files_success, - failure_count=total_failure_count), - continuation=last_continuation_token - if total_failure_count > 0 and not continue_on_failure else current_continuation_token) - except HttpResponseError as error: - error.continuation_token = last_continuation_token - process_storage_error(error) - except AzureError as error: - error.continuation_token = last_continuation_token - raise error - - def _rename_path_options(self, rename_source, content_settings=None, metadata=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - if metadata or kwargs.pop('permissions', None) or kwargs.pop('umask', None): - raise ValueError("metadata, permissions, umask is not supported for this operation") - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - source_lease_id = get_lease_id(kwargs.pop('source_lease', None)) - mod_conditions = get_mod_conditions(kwargs) - source_mod_conditions = get_source_mod_conditions(kwargs) - - path_http_headers = None - if content_settings: - path_http_headers = get_path_http_headers(content_settings) - - options = { - 'rename_source': rename_source, - 'path_http_headers': path_http_headers, - 'lease_access_conditions': access_conditions, - 'source_lease_id': source_lease_id, - 'modified_access_conditions': mod_conditions, - 'source_modified_access_conditions': source_mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'mode': 'legacy', - 'cls': return_response_headers} - options.update(kwargs) - return options - - def _rename_path(self, rename_source, **kwargs): - # type: (str, **Any) -> Dict[str, Any] - """ - Rename directory or file - - :param rename_source: - The value must have the following format: "/{filesystem}/{path}". - :type rename_source: str - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword source_lease: - A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._rename_path_options( - rename_source, - **kwargs) - try: - return self._client.path.create(**options) - except HttpResponseError as error: - process_storage_error(error) - - def _get_path_properties(self, **kwargs): - # type: (**Any) -> Union[FileProperties, DirectoryProperties] - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file or directory. It does not return the content of the directory or file. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: DirectoryProperties or FileProperties - - .. admonition:: Example: - - .. literalinclude:: ../tests/test_blob_samples_common.py - :start-after: [START get_blob_properties] - :end-before: [END get_blob_properties] - :language: python - :dedent: 8 - :caption: Getting the properties for a file/directory. - """ - path_properties = self._blob_client.get_blob_properties(**kwargs) - return path_properties - - def _exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a path exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return self._blob_client.exists(**kwargs) - - def set_metadata(self, metadata, # type: Dict[str, str] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - file system. Each call to this operation replaces all existing metadata - attached to the file system. To remove all metadata from the file system, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the file system as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: file system-updated property dict (Etag and last modified). - """ - return self._blob_client.set_blob_metadata(metadata=metadata, **kwargs) - - def set_http_headers(self, content_settings=None, # type: Optional[ContentSettings] - **kwargs): - # type: (...) -> Dict[str, Any] - """Sets system properties on the file or directory. - - If one property is set for the content_settings, all properties will be overriden. - - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set file/directory properties. - :keyword lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: file/directory-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - return self._blob_client.set_http_headers(content_settings=content_settings, **kwargs) - - def acquire_lease(self, lease_duration=-1, # type: Optional[int] - lease_id=None, # type: Optional[str] - **kwargs): - # type: (...) -> DataLakeLeaseClient - """ - Requests a new lease. If the file or directory does not have an active lease, - the DataLake service creates a lease on the file/directory and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A DataLakeLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.filedatalake.DataLakeLeaseClient - """ - lease = DataLakeLeaseClient(self, lease_id=lease_id) # type: ignore - lease.acquire(lease_duration=lease_duration, **kwargs) - return lease diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_quick_query_helper.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_quick_query_helper.py deleted file mode 100644 index ff67d27..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_quick_query_helper.py +++ /dev/null @@ -1,71 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import Union, Iterable, IO # pylint: disable=unused-import - - -class DataLakeFileQueryReader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to read query results. - - :ivar str name: - The name of the blob being quered. - :ivar str container: - The name of the container where the blob is. - :ivar dict response_headers: - The response_headers of the quick query request. - :ivar bytes record_delimiter: - The delimiter used to separate lines, or records with the data. The `records` - method will return these lines via a generator. - """ - - def __init__( - self, - blob_query_reader - ): - self.name = blob_query_reader.name - self.file_system = blob_query_reader.container - self.response_headers = blob_query_reader.response_headers - self.record_delimiter = blob_query_reader.record_delimiter - self._bytes_processed = 0 - self._blob_query_reader = blob_query_reader - - def __len__(self): - return len(self._blob_query_reader) - - def readall(self): - # type: () -> Union[bytes, str] - """Return all query results. - - This operation is blocking until all data is downloaded. - If encoding has been configured - this will be used to decode individual - records are they are received. - - :rtype: Union[bytes, str] - """ - return self._blob_query_reader.readall() - - def readinto(self, stream): - # type: (IO) -> None - """Download the query result to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. - :returns: None - """ - self._blob_query_reader(stream) - - def records(self): - # type: () -> Iterable[Union[bytes, str]] - """Returns a record generator for the query result. - - Records will be returned line by line. - If encoding has been configured - this will be used to decode individual - records are they are received. - - :rtype: Iterable[Union[bytes, str]] - """ - return self._blob_query_reader.records() diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_serialize.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_serialize.py deleted file mode 100644 index 88fac96..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_serialize.py +++ /dev/null @@ -1,111 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from azure.multiapi.storagev2.blob.v2020_06_12._serialize import _get_match_headers # pylint: disable=protected-access -from ._shared import encode_base64 -from ._generated.models import ModifiedAccessConditions, PathHTTPHeaders, \ - SourceModifiedAccessConditions, LeaseAccessConditions - - -_SUPPORTED_API_VERSIONS = [ - '2019-02-02', - '2019-07-07', - '2019-10-10', - '2019-12-12', - '2020-02-10', - '2020-04-08', - '2020-06-12', - '2020-08-04', - '2020-10-02' -] - - -def get_api_version(kwargs): - # type: (Dict[str, Any], str) -> str - api_version = kwargs.get('api_version', None) - if api_version and api_version not in _SUPPORTED_API_VERSIONS: - versions = '\n'.join(_SUPPORTED_API_VERSIONS) - raise ValueError("Unsupported API version '{}'. Please select from:\n{}".format(api_version, versions)) - return api_version or _SUPPORTED_API_VERSIONS[-1] - - -def convert_dfs_url_to_blob_url(dfs_account_url): - return dfs_account_url.replace('.dfs.', '.blob.', 1) - - -def convert_datetime_to_rfc1123(date): - weekday = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][date.weekday()] - month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", - "Oct", "Nov", "Dec"][date.month - 1] - return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (weekday, date.day, month, - date.year, date.hour, date.minute, date.second) - - -def add_metadata_headers(metadata=None): - # type: (Optional[Dict[str, str]]) -> str - headers = list() - if metadata: - for key, value in metadata.items(): - headers.append(key + '=') - headers.append(encode_base64(value)) - headers.append(',') - - if headers: - del headers[-1] - - return ''.join(headers) - - -def get_mod_conditions(kwargs): - # type: (Dict[str, Any]) -> ModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'match_condition', 'etag') - return ModifiedAccessConditions( - if_modified_since=kwargs.pop('if_modified_since', None), - if_unmodified_since=kwargs.pop('if_unmodified_since', None), - if_match=if_match or kwargs.pop('if_match', None), - if_none_match=if_none_match or kwargs.pop('if_none_match', None) - ) - - -def get_source_mod_conditions(kwargs): - # type: (Dict[str, Any]) -> SourceModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') - return SourceModifiedAccessConditions( - source_if_modified_since=kwargs.pop('source_if_modified_since', None), - source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), - source_if_match=if_match or kwargs.pop('source_if_match', None), - source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None) - ) - - -def get_path_http_headers(content_settings): - path_headers = PathHTTPHeaders( - cache_control=content_settings.cache_control, - content_type=content_settings.content_type, - content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - content_encoding=content_settings.content_encoding, - content_language=content_settings.content_language, - content_disposition=content_settings.content_disposition - ) - return path_headers - - -def get_access_conditions(lease): - # type: (Optional[Union[BlobLeaseClient, str]]) -> Union[LeaseAccessConditions, None] - try: - lease_id = lease.id # type: ignore - except AttributeError: - lease_id = lease # type: ignore - return LeaseAccessConditions(lease_id=lease_id) if lease_id else None - - -def get_lease_id(lease): - if not lease: - return "" - try: - lease_id = lease.id - except AttributeError: - lease_id = lease - return lease_id diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/__init__.py deleted file mode 100644 index 160f882..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import hmac - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore - -import six - - -def url_quote(url): - return quote(url) - - -def url_unquote(url): - return unquote(url) - - -def encode_base64(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def decode_base64_to_bytes(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - return base64.b64decode(data) - - -def decode_base64_to_text(data): - decoded_bytes = decode_base64_to_bytes(data) - return decoded_bytes.decode('utf-8') - - -def sign_string(key, string_to_sign, key_is_base64=True): - if key_is_base64: - key = decode_base64_to_bytes(key) - else: - if isinstance(key, six.text_type): - key = key.encode('utf-8') - if isinstance(string_to_sign, six.text_type): - string_to_sign = string_to_sign.encode('utf-8') - signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) - digest = signed_hmac_sha256.digest() - encoded_digest = encode_base64(digest) - return encoded_digest diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/authentication.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/authentication.py deleted file mode 100644 index d04c1e4..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/authentication.py +++ /dev/null @@ -1,142 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import logging -import sys - -try: - from urllib.parse import urlparse, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import unquote # type: ignore - -try: - from yarl import URL -except ImportError: - pass - -try: - from azure.core.pipeline.transport import AioHttpTransport -except ImportError: - AioHttpTransport = None - -from azure.core.exceptions import ClientAuthenticationError -from azure.core.pipeline.policies import SansIOHTTPPolicy - -from . import sign_string - - -logger = logging.getLogger(__name__) - - - -# wraps a given exception with the desired exception type -def _wrap_exception(ex, desired_type): - msg = "" - if ex.args: - msg = ex.args[0] - if sys.version_info >= (3,): - # Automatic chaining in Python 3 means we keep the trace - return desired_type(msg) - # There isn't a good solution in 2 for keeping the stack trace - # in general, or that will not result in an error in 3 - # However, we can keep the previous error type and message - # TODO: In the future we will log the trace - return desired_type('{}: {}'.format(ex.__class__.__name__, msg)) - - -class AzureSigningError(ClientAuthenticationError): - """ - Represents a fatal error when attempting to sign a request. - In general, the cause of this exception is user error. For example, the given account key is not valid. - Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. - """ - - -# pylint: disable=no-self-use -class SharedKeyCredentialPolicy(SansIOHTTPPolicy): - - def __init__(self, account_name, account_key): - self.account_name = account_name - self.account_key = account_key - super(SharedKeyCredentialPolicy, self).__init__() - - @staticmethod - def _get_headers(request, headers_to_sign): - headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) - if 'content-length' in headers and headers['content-length'] == '0': - del headers['content-length'] - return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' - - @staticmethod - def _get_verb(request): - return request.http_request.method + '\n' - - def _get_canonicalized_resource(self, request): - uri_path = urlparse(request.http_request.url).path - try: - if isinstance(request.context.transport, AioHttpTransport) or \ - isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \ - isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None), - AioHttpTransport): - uri_path = URL(uri_path) - return '/' + self.account_name + str(uri_path) - except TypeError: - pass - return '/' + self.account_name + uri_path - - @staticmethod - def _get_canonicalized_headers(request): - string_to_sign = '' - x_ms_headers = [] - for name, value in request.http_request.headers.items(): - if name.startswith('x-ms-'): - x_ms_headers.append((name.lower(), value)) - x_ms_headers.sort() - for name, value in x_ms_headers: - if value is not None: - string_to_sign += ''.join([name, ':', value, '\n']) - return string_to_sign - - @staticmethod - def _get_canonicalized_resource_query(request): - sorted_queries = list(request.http_request.query.items()) - sorted_queries.sort() - - string_to_sign = '' - for name, value in sorted_queries: - if value is not None: - string_to_sign += '\n' + name.lower() + ':' + unquote(value) - - return string_to_sign - - def _add_authorization_header(self, request, string_to_sign): - try: - signature = sign_string(self.account_key, string_to_sign) - auth_string = 'SharedKey ' + self.account_name + ':' + signature - request.http_request.headers['Authorization'] = auth_string - except Exception as ex: - # Wrap any error that occurred as signing error - # Doing so will clarify/locate the source of problem - raise _wrap_exception(ex, AzureSigningError) - - def on_request(self, request): - string_to_sign = \ - self._get_verb(request) + \ - self._get_headers( - request, - [ - 'content-encoding', 'content-language', 'content-length', - 'content-md5', 'content-type', 'date', 'if-modified-since', - 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' - ] - ) + \ - self._get_canonicalized_headers(request) + \ - self._get_canonicalized_resource(request) + \ - self._get_canonicalized_resource_query(request) - - self._add_authorization_header(request, string_to_sign) - #logger.debug("String_to_sign=%s", string_to_sign) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/base_client.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/base_client.py deleted file mode 100644 index 5e524b2..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/base_client.py +++ /dev/null @@ -1,459 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import logging -import uuid -from typing import ( # pylint: disable=unused-import - Optional, - Any, - Tuple, -) - -try: - from urllib.parse import parse_qs, quote -except ImportError: - from urlparse import parse_qs # type: ignore - from urllib2 import quote # type: ignore - -import six - -from azure.core.configuration import Configuration -from azure.core.credentials import AzureSasCredential -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline import Pipeline -from azure.core.pipeline.transport import RequestsTransport, HttpTransport -from azure.core.pipeline.policies import ( - RedirectPolicy, - ContentDecodePolicy, - BearerTokenCredentialPolicy, - ProxyPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, - UserAgentPolicy, - AzureSasCredentialPolicy -) - -from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .models import LocationMode -from .authentication import SharedKeyCredentialPolicy -from .shared_access_signature import QueryStringConstants -from .request_handlers import serialize_batch_body, _get_batch_request_delimiter -from .policies import ( - StorageHeadersPolicy, - StorageContentValidation, - StorageRequestHook, - StorageResponseHook, - StorageLoggingPolicy, - StorageHosts, - QueueMessagePolicy, - ExponentialRetry, -) -from .._version import VERSION -from .response_handlers import process_storage_error, PartialBatchErrorException - - -_LOGGER = logging.getLogger(__name__) -_SERVICE_PARAMS = { - "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"}, - "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"}, - "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"}, - "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"}, -} - -class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - parsed_url, # type: Any - service, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) - self._hosts = kwargs.get("_hosts") - self.scheme = parsed_url.scheme - - if service not in ["blob", "queue", "file-share", "dfs"]: - raise ValueError("Invalid service: {}".format(service)) - service_name = service.split('-')[0] - account = parsed_url.netloc.split(".{}.core.".format(service_name)) - - self.account_name = account[0] if len(account) > 1 else None - if not self.account_name and parsed_url.netloc.startswith("localhost") \ - or parsed_url.netloc.startswith("127.0.0.1"): - self.account_name = parsed_url.path.strip("/") - - self.credential = _format_shared_key_credential(self.account_name, credential) - if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): - raise ValueError("Token credential is only supported with HTTPS.") - - secondary_hostname = None - if hasattr(self.credential, "account_name"): - self.account_name = self.credential.account_name - secondary_hostname = "{}-secondary.{}.{}".format( - self.credential.account_name, service_name, SERVICE_HOST_BASE) - - if not self._hosts: - if len(account) > 1: - secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") - if kwargs.get("secondary_hostname"): - secondary_hostname = kwargs["secondary_hostname"] - primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') - self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} - - self.require_encryption = kwargs.get("require_encryption", False) - self.key_encryption_key = kwargs.get("key_encryption_key") - self.key_resolver_function = kwargs.get("key_resolver_function") - self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) - - def __enter__(self): - self._client.__enter__() - return self - - def __exit__(self, *args): - self._client.__exit__(*args) - - def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._client.close() - - @property - def url(self): - """The full endpoint URL to this entity, including SAS token if used. - - This could be either the primary endpoint, - or the secondary endpoint depending on the current :func:`location_mode`. - """ - return self._format_url(self._hosts[self._location_mode]) - - @property - def primary_endpoint(self): - """The full primary endpoint URL. - - :type: str - """ - return self._format_url(self._hosts[LocationMode.PRIMARY]) - - @property - def primary_hostname(self): - """The hostname of the primary endpoint. - - :type: str - """ - return self._hosts[LocationMode.PRIMARY] - - @property - def secondary_endpoint(self): - """The full secondary endpoint URL if configured. - - If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str - :raise ValueError: - """ - if not self._hosts[LocationMode.SECONDARY]: - raise ValueError("No secondary host configured.") - return self._format_url(self._hosts[LocationMode.SECONDARY]) - - @property - def secondary_hostname(self): - """The hostname of the secondary endpoint. - - If not available this will be None. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str or None - """ - return self._hosts[LocationMode.SECONDARY] - - @property - def location_mode(self): - """The location mode that the client is currently using. - - By default this will be "primary". Options include "primary" and "secondary". - - :type: str - """ - - return self._location_mode - - @location_mode.setter - def location_mode(self, value): - if self._hosts.get(value): - self._location_mode = value - self._client._config.url = self.url # pylint: disable=protected-access - else: - raise ValueError("No host URL for location mode: {}".format(value)) - - @property - def api_version(self): - """The version of the Storage API used for requests. - - :type: str - """ - return self._client._config.version # pylint: disable=protected-access - - def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): - query_str = "?" - if snapshot: - query_str += "snapshot={}&".format(self.snapshot) - if share_snapshot: - query_str += "sharesnapshot={}&".format(self.snapshot) - if sas_token and isinstance(credential, AzureSasCredential): - raise ValueError( - "You cannot use AzureSasCredential when the resource URI also contains a Shared Access Signature.") - if sas_token and not credential: - query_str += sas_token - elif is_credential_sastoken(credential): - query_str += credential.lstrip("?") - credential = None - return query_str.rstrip("?&"), credential - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, "get_token"): - self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif isinstance(credential, AzureSasCredential): - self._credential_policy = AzureSasCredentialPolicy(credential) - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - - config = kwargs.get("_configuration") or create_configuration(**kwargs) - if kwargs.get("_pipeline"): - return config, kwargs["_pipeline"] - config.transport = kwargs.get("transport") # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - config.transport = RequestsTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - ContentDecodePolicy(response_encoding="utf-8"), - RedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), - config.retry_policy, - config.headers_policy, - StorageRequestHook(**kwargs), - self._credential_policy, - config.logging_policy, - StorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs) - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, Pipeline(config.transport, policies=policies) - - def _batch_send( - self, - *reqs, # type: HttpRequest - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - batch_id = str(uuid.uuid1()) - - request = self._client._client.post( # pylint: disable=protected-access - url='{}://{}/{}?{}comp=batch{}{}'.format( - self.scheme, - self.primary_hostname, - kwargs.pop('path', ""), - kwargs.pop('restype', ""), - kwargs.pop('sas', ""), - kwargs.pop('timeout', "") - ), - headers={ - 'x-ms-version': self.api_version, - "Content-Type": "multipart/mixed; boundary=" + _get_batch_request_delimiter(batch_id, False, False) - } - ) - - policies = [StorageHeadersPolicy()] - if self._credential_policy: - policies.append(self._credential_policy) - - request.set_multipart_mixed( - *reqs, - policies=policies, - enforce_https=False - ) - - Pipeline._prepare_multipart_mixed_request(request) # pylint: disable=protected-access - body = serialize_batch_body(request.multipart_mixed_info[0], batch_id) - request.set_bytes_body(body) - - temp = request.multipart_mixed_info - request.multipart_mixed_info = None - pipeline_response = self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - request.multipart_mixed_info = temp - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() - if raise_on_any_failure: - parts = list(response.parts()) - if any(p for p in parts if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts - ) - raise error - return iter(parts) - return parts - except HttpResponseError as error: - process_storage_error(error) - -class TransportWrapper(HttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, transport): - self._transport = transport - - def send(self, request, **kwargs): - return self._transport.send(request, **kwargs) - - def open(self): - pass - - def close(self): - pass - - def __enter__(self): - pass - - def __exit__(self, *args): # pylint: disable=arguments-differ - pass - - -def _format_shared_key_credential(account_name, credential): - if isinstance(credential, six.string_types): - if not account_name: - raise ValueError("Unable to determine account name for shared key credential.") - credential = {"account_name": account_name, "account_key": credential} - if isinstance(credential, dict): - if "account_name" not in credential: - raise ValueError("Shared key credential missing 'account_name") - if "account_key" not in credential: - raise ValueError("Shared key credential missing 'account_key") - return SharedKeyCredentialPolicy(**credential) - return credential - - -def parse_connection_str(conn_str, credential, service): - conn_str = conn_str.rstrip(";") - conn_settings = [s.split("=", 1) for s in conn_str.split(";")] - if any(len(tup) != 2 for tup in conn_settings): - raise ValueError("Connection string is either blank or malformed.") - conn_settings = dict((key.upper(), val) for key, val in conn_settings) - endpoints = _SERVICE_PARAMS[service] - primary = None - secondary = None - if not credential: - try: - credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]} - except KeyError: - credential = conn_settings.get("SHAREDACCESSSIGNATURE") - if endpoints["primary"] in conn_settings: - primary = conn_settings[endpoints["primary"]] - if endpoints["secondary"] in conn_settings: - secondary = conn_settings[endpoints["secondary"]] - else: - if endpoints["secondary"] in conn_settings: - raise ValueError("Connection string specifies only secondary endpoint.") - try: - primary = "{}://{}.{}.{}".format( - conn_settings["DEFAULTENDPOINTSPROTOCOL"], - conn_settings["ACCOUNTNAME"], - service, - conn_settings["ENDPOINTSUFFIX"], - ) - secondary = "{}-secondary.{}.{}".format( - conn_settings["ACCOUNTNAME"], service, conn_settings["ENDPOINTSUFFIX"] - ) - except KeyError: - pass - - if not primary: - try: - primary = "https://{}.{}.{}".format( - conn_settings["ACCOUNTNAME"], service, conn_settings.get("ENDPOINTSUFFIX", SERVICE_HOST_BASE) - ) - except KeyError: - raise ValueError("Connection string missing required connection details.") - return primary, secondary, credential - - -def create_configuration(**kwargs): - # type: (**Any) -> Configuration - config = Configuration(**kwargs) - config.headers_policy = StorageHeadersPolicy(**kwargs) - config.user_agent_policy = UserAgentPolicy( - sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) - config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) - config.logging_policy = StorageLoggingPolicy(**kwargs) - config.proxy_policy = ProxyPolicy(**kwargs) - - # Storage settings - config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) - config.copy_polling_interval = 15 - - # Block blob uploads - config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) - config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) - config.use_byte_buffer = kwargs.get("use_byte_buffer", False) - - # Page blob uploads - config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) - - # Datalake file uploads - config.min_large_chunk_upload_threshold = kwargs.get("min_large_chunk_upload_threshold", 100 * 1024 * 1024 + 1) - - # Blob downloads - config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) - config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) - - # File uploads - config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) - return config - - -def parse_query(query_str): - sas_values = QueryStringConstants.to_list() - parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} - sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values] - sas_token = None - if sas_params: - sas_token = "&".join(sas_params) - - snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") - return snapshot, sas_token - - -def is_credential_sastoken(credential): - if not credential or not isinstance(credential, six.string_types): - return False - - sas_values = QueryStringConstants.to_list() - parsed_query = parse_qs(credential.lstrip("?")) - if parsed_query and all([k in sas_values for k in parsed_query.keys()]): - return True - return False diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/base_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/base_client_async.py deleted file mode 100644 index 091c350..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/base_client_async.py +++ /dev/null @@ -1,183 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging - -from azure.core.credentials import AzureSasCredential -from azure.core.pipeline import AsyncPipeline -from azure.core.async_paging import AsyncList -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline.policies import ( - ContentDecodePolicy, - AsyncBearerTokenCredentialPolicy, - AsyncRedirectPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, - AzureSasCredentialPolicy, -) -from azure.core.pipeline.transport import AsyncHttpTransport - -from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .authentication import SharedKeyCredentialPolicy -from .base_client import create_configuration -from .policies import ( - StorageContentValidation, - StorageRequestHook, - StorageHosts, - StorageHeadersPolicy, - QueueMessagePolicy -) -from .policies_async import AsyncStorageResponseHook - -from .response_handlers import process_storage_error, PartialBatchErrorException - -if TYPE_CHECKING: - from azure.core.pipeline import Pipeline - from azure.core.pipeline.transport import HttpRequest - from azure.core.configuration import Configuration -_LOGGER = logging.getLogger(__name__) - - -class AsyncStorageAccountHostsMixin(object): - - def __enter__(self): - raise TypeError("Async client only supports 'async with'.") - - def __exit__(self, *args): - pass - - async def __aenter__(self): - await self._client.__aenter__() - return self - - async def __aexit__(self, *args): - await self._client.__aexit__(*args) - - async def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._client.close() - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, 'get_token'): - self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif isinstance(credential, AzureSasCredential): - self._credential_policy = AzureSasCredentialPolicy(credential) - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - config = kwargs.get('_configuration') or create_configuration(**kwargs) - if kwargs.get('_pipeline'): - return config, kwargs['_pipeline'] - config.transport = kwargs.get('transport') # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - try: - from azure.core.pipeline.transport import AioHttpTransport - except ImportError: - raise ImportError("Unable to create async transport. Please check aiohttp is installed.") - config.transport = AioHttpTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.headers_policy, - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - StorageRequestHook(**kwargs), - self._credential_policy, - ContentDecodePolicy(response_encoding="utf-8"), - AsyncRedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), # type: ignore - config.retry_policy, - config.logging_policy, - AsyncStorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs), - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, AsyncPipeline(config.transport, policies=policies) - - async def _batch_send( - self, *reqs: 'HttpRequest', - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - request = self._client._client.post( # pylint: disable=protected-access - url='https://{}/?comp=batch'.format(self.primary_hostname), - headers={ - 'x-ms-version': self.api_version - } - ) - - request.set_multipart_mixed( - *reqs, - policies=[ - StorageHeadersPolicy(), - self._credential_policy - ], - enforce_https=False - ) - - pipeline_response = await self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() # Return an AsyncIterator - if raise_on_any_failure: - parts_list = [] - async for part in parts: - parts_list.append(part) - if any(p for p in parts_list if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts_list - ) - raise error - return AsyncList(parts_list) - return parts - except HttpResponseError as error: - process_storage_error(error) - - -class AsyncTransportWrapper(AsyncHttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, async_transport): - self._transport = async_transport - - async def send(self, request, **kwargs): - return await self._transport.send(request, **kwargs) - - async def open(self): - pass - - async def close(self): - pass - - async def __aenter__(self): - pass - - async def __aexit__(self, *args): # pylint: disable=arguments-differ - pass diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/constants.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/constants.py deleted file mode 100644 index a50e8b5..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/constants.py +++ /dev/null @@ -1,27 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys - -from .._generated import AzureDataLakeStorageRESTAPI - - -X_MS_VERSION = AzureDataLakeStorageRESTAPI(url="get_api_version")._config.version # pylint: disable=protected-access - -# Socket timeout in seconds -CONNECTION_TIMEOUT = 20 -READ_TIMEOUT = 20 - -# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned) -# The socket timeout is now the maximum total duration to send all data. -if sys.version_info >= (3, 5): - # the timeout to connect is 20 seconds, and the read timeout is 2000 seconds - # the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) - READ_TIMEOUT = 2000 - -STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" - -SERVICE_HOST_BASE = 'core.windows.net' diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/encryption.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/encryption.py deleted file mode 100644 index 62607cc..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/encryption.py +++ /dev/null @@ -1,542 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os -from os import urandom -from json import ( - dumps, - loads, -) -from collections import OrderedDict - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.ciphers import Cipher -from cryptography.hazmat.primitives.ciphers.algorithms import AES -from cryptography.hazmat.primitives.ciphers.modes import CBC -from cryptography.hazmat.primitives.padding import PKCS7 - -from azure.core.exceptions import HttpResponseError - -from .._version import VERSION -from . import encode_base64, decode_base64_to_bytes - - -_ENCRYPTION_PROTOCOL_V1 = '1.0' -_ERROR_OBJECT_INVALID = \ - '{0} does not define a complete interface. Value of {1} is either missing or invalid.' - - -def _validate_not_none(param_name, param): - if param is None: - raise ValueError('{0} should not be None.'.format(param_name)) - - -def _validate_key_encryption_key_wrap(kek): - # Note that None is not callable and so will fail the second clause of each check. - if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) - if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) - - -class _EncryptionAlgorithm(object): - ''' - Specifies which client encryption algorithm is used. - ''' - AES_CBC_256 = 'AES_CBC_256' - - -class _WrappedContentKey: - ''' - Represents the envelope key details stored on the service. - ''' - - def __init__(self, algorithm, encrypted_key, key_id): - ''' - :param str algorithm: - The algorithm used for wrapping. - :param bytes encrypted_key: - The encrypted content-encryption-key. - :param str key_id: - The key-encryption-key identifier string. - ''' - - _validate_not_none('algorithm', algorithm) - _validate_not_none('encrypted_key', encrypted_key) - _validate_not_none('key_id', key_id) - - self.algorithm = algorithm - self.encrypted_key = encrypted_key - self.key_id = key_id - - -class _EncryptionAgent: - ''' - Represents the encryption agent stored on the service. - It consists of the encryption protocol version and encryption algorithm used. - ''' - - def __init__(self, encryption_algorithm, protocol): - ''' - :param _EncryptionAlgorithm encryption_algorithm: - The algorithm used for encrypting the message contents. - :param str protocol: - The protocol version used for encryption. - ''' - - _validate_not_none('encryption_algorithm', encryption_algorithm) - _validate_not_none('protocol', protocol) - - self.encryption_algorithm = str(encryption_algorithm) - self.protocol = protocol - - -class _EncryptionData: - ''' - Represents the encryption data that is stored on the service. - ''' - - def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, - key_wrapping_metadata): - ''' - :param bytes content_encryption_IV: - The content encryption initialization vector. - :param _EncryptionAgent encryption_agent: - The encryption agent. - :param _WrappedContentKey wrapped_content_key: - An object that stores the wrapping algorithm, the key identifier, - and the encrypted key bytes. - :param dict key_wrapping_metadata: - A dict containing metadata related to the key wrapping. - ''' - - _validate_not_none('content_encryption_IV', content_encryption_IV) - _validate_not_none('encryption_agent', encryption_agent) - _validate_not_none('wrapped_content_key', wrapped_content_key) - - self.content_encryption_IV = content_encryption_IV - self.encryption_agent = encryption_agent - self.wrapped_content_key = wrapped_content_key - self.key_wrapping_metadata = key_wrapping_metadata - - -def _generate_encryption_data_dict(kek, cek, iv): - ''' - Generates and returns the encryption metadata as a dict. - - :param object kek: The key encryption key. See calling functions for more information. - :param bytes cek: The content encryption key. - :param bytes iv: The initialization vector. - :return: A dict containing all the encryption metadata. - :rtype: dict - ''' - # Encrypt the cek. - wrapped_cek = kek.wrap_key(cek) - - # Build the encryption_data dict. - # Use OrderedDict to comply with Java's ordering requirement. - wrapped_content_key = OrderedDict() - wrapped_content_key['KeyId'] = kek.get_kid() - wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek) - wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() - - encryption_agent = OrderedDict() - encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 - encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 - - encryption_data_dict = OrderedDict() - encryption_data_dict['WrappedContentKey'] = wrapped_content_key - encryption_data_dict['EncryptionAgent'] = encryption_agent - encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv) - encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION} - - return encryption_data_dict - - -def _dict_to_encryption_data(encryption_data_dict): - ''' - Converts the specified dictionary to an EncryptionData object for - eventual use in decryption. - - :param dict encryption_data_dict: - The dictionary containing the encryption data. - :return: an _EncryptionData object built from the dictionary. - :rtype: _EncryptionData - ''' - try: - if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: - raise ValueError("Unsupported encryption version.") - except KeyError: - raise ValueError("Unsupported encryption version.") - wrapped_content_key = encryption_data_dict['WrappedContentKey'] - wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], - decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), - wrapped_content_key['KeyId']) - - encryption_agent = encryption_data_dict['EncryptionAgent'] - encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], - encryption_agent['Protocol']) - - if 'KeyWrappingMetadata' in encryption_data_dict: - key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] - else: - key_wrapping_metadata = None - - encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), - encryption_agent, - wrapped_content_key, - key_wrapping_metadata) - - return encryption_data - - -def _generate_AES_CBC_cipher(cek, iv): - ''' - Generates and returns an encryption cipher for AES CBC using the given cek and iv. - - :param bytes[] cek: The content encryption key for the cipher. - :param bytes[] iv: The initialization vector for the cipher. - :return: A cipher for encrypting in AES256 CBC. - :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher - ''' - - backend = default_backend() - algorithm = AES(cek) - mode = CBC(iv) - return Cipher(algorithm, mode, backend) - - -def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): - ''' - Extracts and returns the content_encryption_key stored in the encryption_data object - and performs necessary validation on all parameters. - :param _EncryptionData encryption_data: - The encryption metadata of the retrieved value. - :param obj key_encryption_key: - The key_encryption_key used to unwrap the cek. Please refer to high-level service object - instance variables for more details. - :param func key_resolver: - A function used that, given a key_id, will return a key_encryption_key. Please refer - to high-level service object instance variables for more details. - :return: the content_encryption_key stored in the encryption_data object. - :rtype: bytes[] - ''' - - _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) - _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) - - if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol: - raise ValueError('Encryption version is not supported.') - - content_encryption_key = None - - # If the resolver exists, give priority to the key it finds. - if key_resolver is not None: - key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) - - _validate_not_none('key_encryption_key', key_encryption_key) - if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) - if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid(): - raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.') - # Will throw an exception if the specified algorithm is not supported. - content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, - encryption_data.wrapped_content_key.algorithm) - _validate_not_none('content_encryption_key', content_encryption_key) - - return content_encryption_key - - -def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None): - ''' - Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. - Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). - Returns the original plaintex. - - :param str message: - The ciphertext to be decrypted. - :param _EncryptionData encryption_data: - The metadata associated with this ciphertext. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted plaintext. - :rtype: str - ''' - _validate_not_none('message', message) - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) - - if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm: - raise ValueError('Specified encryption algorithm is not supported.') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) - - # decrypt data - decrypted_data = message - decryptor = cipher.decryptor() - decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) - - # unpad data - unpadder = PKCS7(128).unpadder() - decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) - - return decrypted_data - - -def encrypt_blob(blob, key_encryption_key): - ''' - Encrypts the given blob using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encryption metadata. This method should - only be used when a blob is small enough for single shot upload. Encrypting larger blobs - is done as a part of the upload_data_chunks method. - - :param bytes blob: - The blob to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. - :rtype: (str, bytes) - ''' - - _validate_not_none('blob', blob) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(blob) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - - return dumps(encryption_data), encrypted_data - - -def generate_blob_encryption_data(key_encryption_key): - ''' - Generates the encryption_metadata for the blob. - - :param bytes key_encryption_key: - The key-encryption-key used to wrap the cek associate with this blob. - :return: A tuple containing the cek and iv for this blob as well as the - serialized encryption metadata for the blob. - :rtype: (bytes, bytes, str) - ''' - encryption_data = None - content_encryption_key = None - initialization_vector = None - if key_encryption_key: - _validate_key_encryption_key_wrap(key_encryption_key) - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - encryption_data = _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - encryption_data = dumps(encryption_data) - - return content_encryption_key, initialization_vector, encryption_data - - -def decrypt_blob(require_encryption, key_encryption_key, key_resolver, - content, start_offset, end_offset, response_headers): - ''' - Decrypts the given blob contents and returns only the requested range. - - :param bool require_encryption: - Whether or not the calling blob service requires objects to be decrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :param key_resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted blob content. - :rtype: bytes - ''' - try: - encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata'])) - except: # pylint: disable=bare-except - if require_encryption: - raise ValueError( - 'Encryption required, but received data does not contain appropriate metatadata.' + \ - 'Data was either not encrypted or metadata has been lost.') - - return content - - if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256: - raise ValueError('Specified encryption algorithm is not supported.') - - blob_type = response_headers['x-ms-blob-type'] - - iv = None - unpad = False - if 'content-range' in response_headers: - content_range = response_headers['content-range'] - # Format: 'bytes x-y/size' - - # Ignore the word 'bytes' - content_range = content_range.split(' ') - - content_range = content_range[1].split('-') - content_range = content_range[1].split('/') - end_range = int(content_range[0]) - blob_size = int(content_range[1]) - - if start_offset >= 16: - iv = content[:16] - content = content[16:] - start_offset -= 16 - else: - iv = encryption_data.content_encryption_IV - - if end_range == blob_size - 1: - unpad = True - else: - unpad = True - iv = encryption_data.content_encryption_IV - - if blob_type == 'PageBlob': - unpad = False - - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) - cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) - decryptor = cipher.decryptor() - - content = decryptor.update(content) + decryptor.finalize() - if unpad: - unpadder = PKCS7(128).unpadder() - content = unpadder.update(content) + unpadder.finalize() - - return content[start_offset: len(content) - end_offset] - - -def get_blob_encryptor_and_padder(cek, iv, should_pad): - encryptor = None - padder = None - - if cek is not None and iv is not None: - cipher = _generate_AES_CBC_cipher(cek, iv) - encryptor = cipher.encryptor() - padder = PKCS7(128).padder() if should_pad else None - - return encryptor, padder - - -def encrypt_queue_message(message, key_encryption_key): - ''' - Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encrypted message and the encryption metadata. - - :param object message: - The plain text messge to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A json-formatted string containing the encrypted message and the encryption metadata. - :rtype: str - ''' - - _validate_not_none('message', message) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = os.urandom(32) - initialization_vector = os.urandom(16) - - # Queue encoding functions all return unicode strings, and encryption should - # operate on binary strings. - message = message.encode('utf-8') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(message) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - - # Build the dictionary structure. - queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data), - 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector)} - - return dumps(queue_message) - - -def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver): - ''' - Returns the decrypted message contents from an EncryptedQueueMessage. - If no encryption metadata is present, will return the unaltered message. - :param str message: - The JSON formatted QueueEncryptedMessage contents with all associated metadata. - :param bool require_encryption: - If set, will enforce that the retrieved messages are encrypted and decrypt them. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The plain text message from the queue message. - :rtype: str - ''' - - try: - message = loads(message) - - encryption_data = _dict_to_encryption_data(message['EncryptionData']) - decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents']) - except (KeyError, ValueError): - # Message was not json formatted and so was not encrypted - # or the user provided a json formatted message. - if require_encryption: - raise ValueError('Message was not encrypted.') - - return message - try: - return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=response, - error=error) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/models.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/models.py deleted file mode 100644 index 0aeb96a..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/models.py +++ /dev/null @@ -1,468 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-instance-attributes - -from enum import Enum - - -def get_enum_value(value): - if value is None or value in ["None", ""]: - return None - try: - return value.value - except AttributeError: - return value - - -class StorageErrorCode(str, Enum): - - # Generic storage values - account_already_exists = "AccountAlreadyExists" - account_being_created = "AccountBeingCreated" - account_is_disabled = "AccountIsDisabled" - authentication_failed = "AuthenticationFailed" - authorization_failure = "AuthorizationFailure" - no_authentication_information = "NoAuthenticationInformation" - condition_headers_not_supported = "ConditionHeadersNotSupported" - condition_not_met = "ConditionNotMet" - empty_metadata_key = "EmptyMetadataKey" - insufficient_account_permissions = "InsufficientAccountPermissions" - internal_error = "InternalError" - invalid_authentication_info = "InvalidAuthenticationInfo" - invalid_header_value = "InvalidHeaderValue" - invalid_http_verb = "InvalidHttpVerb" - invalid_input = "InvalidInput" - invalid_md5 = "InvalidMd5" - invalid_metadata = "InvalidMetadata" - invalid_query_parameter_value = "InvalidQueryParameterValue" - invalid_range = "InvalidRange" - invalid_resource_name = "InvalidResourceName" - invalid_uri = "InvalidUri" - invalid_xml_document = "InvalidXmlDocument" - invalid_xml_node_value = "InvalidXmlNodeValue" - md5_mismatch = "Md5Mismatch" - metadata_too_large = "MetadataTooLarge" - missing_content_length_header = "MissingContentLengthHeader" - missing_required_query_parameter = "MissingRequiredQueryParameter" - missing_required_header = "MissingRequiredHeader" - missing_required_xml_node = "MissingRequiredXmlNode" - multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" - operation_timed_out = "OperationTimedOut" - out_of_range_input = "OutOfRangeInput" - out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" - request_body_too_large = "RequestBodyTooLarge" - resource_type_mismatch = "ResourceTypeMismatch" - request_url_failed_to_parse = "RequestUrlFailedToParse" - resource_already_exists = "ResourceAlreadyExists" - resource_not_found = "ResourceNotFound" - server_busy = "ServerBusy" - unsupported_header = "UnsupportedHeader" - unsupported_xml_node = "UnsupportedXmlNode" - unsupported_query_parameter = "UnsupportedQueryParameter" - unsupported_http_verb = "UnsupportedHttpVerb" - - # Blob values - append_position_condition_not_met = "AppendPositionConditionNotMet" - blob_already_exists = "BlobAlreadyExists" - blob_not_found = "BlobNotFound" - blob_overwritten = "BlobOverwritten" - blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" - block_count_exceeds_limit = "BlockCountExceedsLimit" - block_list_too_long = "BlockListTooLong" - cannot_change_to_lower_tier = "CannotChangeToLowerTier" - cannot_verify_copy_source = "CannotVerifyCopySource" - container_already_exists = "ContainerAlreadyExists" - container_being_deleted = "ContainerBeingDeleted" - container_disabled = "ContainerDisabled" - container_not_found = "ContainerNotFound" - content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" - copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" - copy_id_mismatch = "CopyIdMismatch" - feature_version_mismatch = "FeatureVersionMismatch" - incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" - incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" - incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" - infinite_lease_duration_required = "InfiniteLeaseDurationRequired" - invalid_blob_or_block = "InvalidBlobOrBlock" - invalid_blob_tier = "InvalidBlobTier" - invalid_blob_type = "InvalidBlobType" - invalid_block_id = "InvalidBlockId" - invalid_block_list = "InvalidBlockList" - invalid_operation = "InvalidOperation" - invalid_page_range = "InvalidPageRange" - invalid_source_blob_type = "InvalidSourceBlobType" - invalid_source_blob_url = "InvalidSourceBlobUrl" - invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" - lease_already_present = "LeaseAlreadyPresent" - lease_already_broken = "LeaseAlreadyBroken" - lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" - lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" - lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" - lease_id_missing = "LeaseIdMissing" - lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" - lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" - lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" - lease_lost = "LeaseLost" - lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" - lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" - lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" - max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" - no_pending_copy_operation = "NoPendingCopyOperation" - operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" - pending_copy_operation = "PendingCopyOperation" - previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" - previous_snapshot_not_found = "PreviousSnapshotNotFound" - previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" - sequence_number_condition_not_met = "SequenceNumberConditionNotMet" - sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" - snapshot_count_exceeded = "SnapshotCountExceeded" - snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" - snapshots_present = "SnapshotsPresent" - source_condition_not_met = "SourceConditionNotMet" - system_in_use = "SystemInUse" - target_condition_not_met = "TargetConditionNotMet" - unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" - blob_being_rehydrated = "BlobBeingRehydrated" - blob_archived = "BlobArchived" - blob_not_archived = "BlobNotArchived" - - # Queue values - invalid_marker = "InvalidMarker" - message_not_found = "MessageNotFound" - message_too_large = "MessageTooLarge" - pop_receipt_mismatch = "PopReceiptMismatch" - queue_already_exists = "QueueAlreadyExists" - queue_being_deleted = "QueueBeingDeleted" - queue_disabled = "QueueDisabled" - queue_not_empty = "QueueNotEmpty" - queue_not_found = "QueueNotFound" - - # File values - cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" - client_cache_flush_delay = "ClientCacheFlushDelay" - delete_pending = "DeletePending" - directory_not_empty = "DirectoryNotEmpty" - file_lock_conflict = "FileLockConflict" - invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" - parent_not_found = "ParentNotFound" - read_only_attribute = "ReadOnlyAttribute" - share_already_exists = "ShareAlreadyExists" - share_being_deleted = "ShareBeingDeleted" - share_disabled = "ShareDisabled" - share_not_found = "ShareNotFound" - sharing_violation = "SharingViolation" - share_snapshot_in_progress = "ShareSnapshotInProgress" - share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" - share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" - share_has_snapshots = "ShareHasSnapshots" - container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" - - # DataLake values - content_length_must_be_zero = 'ContentLengthMustBeZero' - path_already_exists = 'PathAlreadyExists' - invalid_flush_position = 'InvalidFlushPosition' - invalid_property_name = 'InvalidPropertyName' - invalid_source_uri = 'InvalidSourceUri' - unsupported_rest_version = 'UnsupportedRestVersion' - file_system_not_found = 'FilesystemNotFound' - path_not_found = 'PathNotFound' - rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound' - source_path_not_found = 'SourcePathNotFound' - destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted' - file_system_already_exists = 'FilesystemAlreadyExists' - file_system_being_deleted = 'FilesystemBeingDeleted' - invalid_destination_path = 'InvalidDestinationPath' - invalid_rename_source_path = 'InvalidRenameSourcePath' - invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType' - lease_is_already_broken = 'LeaseIsAlreadyBroken' - lease_name_mismatch = 'LeaseNameMismatch' - path_conflict = 'PathConflict' - source_path_is_being_deleted = 'SourcePathIsBeingDeleted' - - -class DictMixin(object): - - def __setitem__(self, key, item): - self.__dict__[key] = item - - def __getitem__(self, key): - return self.__dict__[key] - - def __repr__(self): - return str(self) - - def __len__(self): - return len(self.keys()) - - def __delitem__(self, key): - self.__dict__[key] = None - - def __eq__(self, other): - """Compare objects by comparing all attributes.""" - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - def __ne__(self, other): - """Compare objects by comparing all attributes.""" - return not self.__eq__(other) - - def __str__(self): - return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) - - def has_key(self, k): - return k in self.__dict__ - - def update(self, *args, **kwargs): - return self.__dict__.update(*args, **kwargs) - - def keys(self): - return [k for k in self.__dict__ if not k.startswith('_')] - - def values(self): - return [v for k, v in self.__dict__.items() if not k.startswith('_')] - - def items(self): - return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] - - def get(self, key, default=None): - if key in self.__dict__: - return self.__dict__[key] - return default - - -class LocationMode(object): - """ - Specifies the location the request should be sent to. This mode only applies - for RA-GRS accounts which allow secondary read access. All other account types - must use PRIMARY. - """ - - PRIMARY = 'primary' #: Requests should be sent to the primary location. - SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. - - -class ResourceTypes(object): - """ - Specifies the resource types that are accessible with the account SAS. - - :param bool service: - Access to service-level APIs (e.g., Get/Set Service Properties, - Get Service Stats, List Containers/Queues/Shares) - :param bool container: - Access to container-level APIs (e.g., Create/Delete Container, - Create/Delete Queue, Create/Delete Share, - List Blobs/Files and Directories) - :param bool object: - Access to object-level APIs for blobs, queue messages, and - files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) - """ - - def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin - self.service = service - self.container = container - self.object = object - self._str = (('s' if self.service else '') + - ('c' if self.container else '') + - ('o' if self.object else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create a ResourceTypes from a string. - - To specify service, container, or object you need only to - include the first letter of the word in the string. E.g. service and container, - you would provide a string "sc". - - :param str string: Specify service, container, or object in - in the string with the first letter of the word. - :return: A ResourceTypes object - :rtype: ~azure.storage.blob.ResourceTypes - """ - res_service = 's' in string - res_container = 'c' in string - res_object = 'o' in string - - parsed = cls(res_service, res_container, res_object) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class AccountSasPermissions(object): - """ - :class:`~ResourceTypes` class to be used with generate_account_sas - function and for the AccessPolicies used with set_*_acl. There are two types of - SAS which may be used to grant resource access. One is to grant access to a - specific resource (resource-specific). Another is to grant access to the - entire service for a specific account and allow certain operations based on - perms found here. - - :param bool read: - Valid for all signed resources types (Service, Container, and Object). - Permits read permissions to the specified resource type. - :param bool write: - Valid for all signed resources types (Service, Container, and Object). - Permits write permissions to the specified resource type. - :param bool delete: - Valid for Container and Object resource types, except for queue messages. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool list: - Valid for Service and Container resource types only. - :param bool add: - Valid for the following Object resource types only: queue messages, and append blobs. - :param bool create: - Valid for the following Object resource types only: blobs and files. - Users can create new blobs or files, but may not overwrite existing - blobs or files. - :param bool update: - Valid for the following Object resource types only: queue messages. - :param bool process: - Valid for the following Object resource type only: queue messages. - :keyword bool tag: - To enable set or get tags on the blobs in the container. - :keyword bool filter_by_tags: - To enable get blobs by tags, this should be used together with list permission. - """ - def __init__(self, read=False, write=False, delete=False, - list=False, # pylint: disable=redefined-builtin - add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): - self.read = read - self.write = write - self.delete = delete - self.delete_previous_version = delete_previous_version - self.list = list - self.add = add - self.create = create - self.update = update - self.process = process - self.tag = kwargs.pop('tag', False) - self.filter_by_tags = kwargs.pop('filter_by_tags', False) - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('l' if self.list else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('u' if self.update else '') + - ('p' if self.process else '') + - ('f' if self.filter_by_tags else '') + - ('t' if self.tag else '') - ) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create AccountSasPermissions from a string. - - To specify read, write, delete, etc. permissions you need only to - include the first letter of the word in the string. E.g. for read and write - permissions you would provide a string "rw". - - :param str permission: Specify permissions in - the string with the first letter of the word. - :return: An AccountSasPermissions object - :rtype: ~azure.storage.filedatalake.AccountSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_delete_previous_version = 'x' in permission - p_list = 'l' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_update = 'u' in permission - p_process = 'p' in permission - p_tag = 't' in permission - p_filter_by_tags = 'f' in permission - parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, - list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, - filter_by_tags=p_filter_by_tags) - - return parsed - - -class Services(object): - """Specifies the services accessible with the account SAS. - - :param bool blob: - Access for the `~azure.storage.blob.BlobServiceClient` - :param bool queue: - Access for the `~azure.storage.queue.QueueServiceClient` - :param bool fileshare: - Access for the `~azure.storage.fileshare.ShareServiceClient` - """ - - def __init__(self, blob=False, queue=False, fileshare=False): - self.blob = blob - self.queue = queue - self.fileshare = fileshare - self._str = (('b' if self.blob else '') + - ('q' if self.queue else '') + - ('f' if self.fileshare else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create Services from a string. - - To specify blob, queue, or file you need only to - include the first letter of the word in the string. E.g. for blob and queue - you would provide a string "bq". - - :param str string: Specify blob, queue, or file in - in the string with the first letter of the word. - :return: A Services object - :rtype: ~azure.storage.blob.Services - """ - res_blob = 'b' in string - res_queue = 'q' in string - res_file = 'f' in string - - parsed = cls(res_blob, res_queue, res_file) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class UserDelegationKey(object): - """ - Represents a user delegation key, provided to the user by Azure Storage - based on their Azure Active Directory access token. - - The fields are saved as simple strings since the user does not have to interact with this object; - to generate an identify SAS, the user can simply pass it to the right API. - - :ivar str signed_oid: - Object ID of this token. - :ivar str signed_tid: - Tenant ID of the tenant that issued this token. - :ivar str signed_start: - The datetime this token becomes valid. - :ivar str signed_expiry: - The datetime this token expires. - :ivar str signed_service: - What service this key is valid for. - :ivar str signed_version: - The version identifier of the REST service that created this token. - :ivar str value: - The user delegation key. - """ - def __init__(self): - self.signed_oid = None - self.signed_tid = None - self.signed_start = None - self.signed_expiry = None - self.signed_service = None - self.signed_version = None - self.value = None diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/parser.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/parser.py deleted file mode 100644 index c6feba8..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/parser.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys - -if sys.version_info < (3,): - def _str(value): - if isinstance(value, unicode): # pylint: disable=undefined-variable - return value.encode('utf-8') - - return str(value) -else: - _str = str - - -def _to_utc_datetime(value): - return value.strftime('%Y-%m-%dT%H:%M:%SZ') diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/policies.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/policies.py deleted file mode 100644 index 11fc984..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/policies.py +++ /dev/null @@ -1,608 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import re -import random -from time import time -from io import SEEK_SET, UnsupportedOperation -import logging -import uuid -import types -from typing import Any, TYPE_CHECKING -from wsgiref.handlers import format_date_time -try: - from urllib.parse import ( - urlparse, - parse_qsl, - urlunparse, - urlencode, - ) -except ImportError: - from urllib import urlencode # type: ignore - from urlparse import ( # type: ignore - urlparse, - parse_qsl, - urlunparse, - ) - -from azure.core.pipeline.policies import ( - HeadersPolicy, - SansIOHTTPPolicy, - NetworkTraceLoggingPolicy, - HTTPPolicy, - RequestHistory -) -from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError - -from .models import LocationMode - -try: - _unicode_type = unicode # type: ignore -except NameError: - _unicode_type = str - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -def encode_base64(data): - if isinstance(data, _unicode_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def is_exhausted(settings): - """Are we out of retries?""" - retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) - retry_counts = list(filter(None, retry_counts)) - if not retry_counts: - return False - return min(retry_counts) < 0 - - -def retry_hook(settings, **kwargs): - if settings['hook']: - settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) - - -def is_retry(response, mode): - """Is this method/status code retryable? (Based on whitelists and control - variables such as the number of total retries to allow, whether to - respect the Retry-After header, whether this header is present, and - whether the returned status code is on the list of status codes to - be retried upon on the presence of the aforementioned header) - """ - status = response.http_response.status_code - if 300 <= status < 500: - # An exception occured, but in most cases it was expected. Examples could - # include a 309 Conflict or 412 Precondition Failed. - if status == 404 and mode == LocationMode.SECONDARY: - # Response code 404 should be retried if secondary was used. - return True - if status == 408: - # Response code 408 is a timeout and should be retried. - return True - return False - if status >= 500: - # Response codes above 500 with the exception of 501 Not Implemented and - # 505 Version Not Supported indicate a server issue and should be retried. - if status in [501, 505]: - return False - return True - return False - - -def urljoin(base_url, stub_url): - parsed = urlparse(base_url) - parsed = parsed._replace(path=parsed.path + '/' + stub_url) - return parsed.geturl() - - -class QueueMessagePolicy(SansIOHTTPPolicy): - - def on_request(self, request): - message_id = request.context.options.pop('queue_message_id', None) - if message_id: - request.http_request.url = urljoin( - request.http_request.url, - message_id) - - -class StorageHeadersPolicy(HeadersPolicy): - request_id_header_name = 'x-ms-client-request-id' - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - super(StorageHeadersPolicy, self).on_request(request) - current_time = format_date_time(time()) - request.http_request.headers['x-ms-date'] = current_time - - custom_id = request.context.options.pop('client_request_id', None) - request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) - - # def on_response(self, request, response): - # # raise exception if the echoed client request id from the service is not identical to the one we sent - # if self.request_id_header_name in response.http_response.headers: - - # client_request_id = request.http_request.headers.get(self.request_id_header_name) - - # if response.http_response.headers[self.request_id_header_name] != client_request_id: - # raise AzureError( - # "Echoed client request ID: {} does not match sent client request ID: {}. " - # "Service request ID: {}".format( - # response.http_response.headers[self.request_id_header_name], client_request_id, - # response.http_response.headers['x-ms-request-id']), - # response=response.http_response - # ) - - -class StorageHosts(SansIOHTTPPolicy): - - def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument - self.hosts = hosts - super(StorageHosts, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - request.context.options['hosts'] = self.hosts - parsed_url = urlparse(request.http_request.url) - - # Detect what location mode we're currently requesting with - location_mode = LocationMode.PRIMARY - for key, value in self.hosts.items(): - if parsed_url.netloc == value: - location_mode = key - - # See if a specific location mode has been specified, and if so, redirect - use_location = request.context.options.pop('use_location', None) - if use_location: - # Lock retries to the specific location - request.context.options['retry_to_secondary'] = False - if use_location not in self.hosts: - raise ValueError("Attempting to use undefined host location {}".format(use_location)) - if use_location != location_mode: - # Update request URL to use the specified location - updated = parsed_url._replace(netloc=self.hosts[use_location]) - request.http_request.url = updated.geturl() - location_mode = use_location - - request.context.options['location_mode'] = location_mode - - -class StorageLoggingPolicy(NetworkTraceLoggingPolicy): - """A policy that logs HTTP request and response to the DEBUG logger. - - This accepts both global configuration, and per-request level with "enable_http_logger" - """ - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - http_request = request.http_request - options = request.context.options - if options.pop("logging_enable", self.enable_http_logger): - request.context["logging_enable"] = True - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - log_url = http_request.url - query_params = http_request.query - if 'sig' in query_params: - log_url = log_url.replace(query_params['sig'], "sig=*****") - _LOGGER.debug("Request URL: %r", log_url) - _LOGGER.debug("Request method: %r", http_request.method) - _LOGGER.debug("Request headers:") - for header, value in http_request.headers.items(): - if header.lower() == 'authorization': - value = '*****' - elif header.lower() == 'x-ms-copy-source' and 'sig' in value: - # take the url apart and scrub away the signed signature - scheme, netloc, path, params, query, fragment = urlparse(value) - parsed_qs = dict(parse_qsl(query)) - parsed_qs['sig'] = '*****' - - # the SAS needs to be put back together - value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) - - _LOGGER.debug(" %r: %r", header, value) - _LOGGER.debug("Request body:") - - # We don't want to log the binary data of a file upload. - if isinstance(http_request.body, types.GeneratorType): - _LOGGER.debug("File upload") - else: - _LOGGER.debug(str(http_request.body)) - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log request: %r", err) - - def on_response(self, request, response): - # type: (PipelineRequest, PipelineResponse, Any) -> None - if response.context.pop("logging_enable", self.enable_http_logger): - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - _LOGGER.debug("Response status: %r", response.http_response.status_code) - _LOGGER.debug("Response headers:") - for res_header, value in response.http_response.headers.items(): - _LOGGER.debug(" %r: %r", res_header, value) - - # We don't want to log binary data if the response is a file. - _LOGGER.debug("Response content:") - pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) - header = response.http_response.headers.get('content-disposition') - - if header and pattern.match(header): - filename = header.partition('=')[2] - _LOGGER.debug("File attachments: %s", filename) - elif response.http_response.headers.get("content-type", "").endswith("octet-stream"): - _LOGGER.debug("Body contains binary data.") - elif response.http_response.headers.get("content-type", "").startswith("image"): - _LOGGER.debug("Body contains image data.") - else: - if response.context.options.get('stream', False): - _LOGGER.debug("Body is streamable") - else: - _LOGGER.debug(response.http_response.text()) - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log response: %s", repr(err)) - - -class StorageRequestHook(SansIOHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._request_callback = kwargs.get('raw_request_hook') - super(StorageRequestHook, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, **Any) -> PipelineResponse - request_callback = request.context.options.pop('raw_request_hook', self._request_callback) - if request_callback: - request_callback(request) - - -class StorageResponseHook(HTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(StorageResponseHook, self).__init__() - - def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = self.next.send(request) - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - response_callback(response) - request.context['response_callback'] = response_callback - return response - - -class StorageContentValidation(SansIOHTTPPolicy): - """A simple policy that sends the given headers - with the request. - - This will overwrite any headers already defined in the request. - """ - header_name = 'Content-MD5' - - def __init__(self, **kwargs): # pylint: disable=unused-argument - super(StorageContentValidation, self).__init__() - - @staticmethod - def get_content_md5(data): - md5 = hashlib.md5() # nosec - if isinstance(data, bytes): - md5.update(data) - elif hasattr(data, 'read'): - pos = 0 - try: - pos = data.tell() - except: # pylint: disable=bare-except - pass - for chunk in iter(lambda: data.read(4096), b""): - md5.update(chunk) - try: - data.seek(pos, SEEK_SET) - except (AttributeError, IOError): - raise ValueError("Data should be bytes or a seekable file-like object.") - else: - raise ValueError("Data should be bytes or a seekable file-like object.") - - return md5.digest() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - validate_content = request.context.options.pop('validate_content', False) - if validate_content and request.http_request.method != 'GET': - computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) - request.http_request.headers[self.header_name] = computed_md5 - request.context['validate_content_md5'] = computed_md5 - request.context['validate_content'] = validate_content - - def on_response(self, request, response): - if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): - computed_md5 = request.context.get('validate_content_md5') or \ - encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) - if response.http_response.headers['content-md5'] != computed_md5: - raise AzureError( - 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format( - response.http_response.headers['content-md5'], computed_md5), - response=response.http_response - ) - - -class StorageRetryPolicy(HTTPPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - def __init__(self, **kwargs): - self.total_retries = kwargs.pop('retry_total', 10) - self.connect_retries = kwargs.pop('retry_connect', 3) - self.read_retries = kwargs.pop('retry_read', 3) - self.status_retries = kwargs.pop('retry_status', 3) - self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) - super(StorageRetryPolicy, self).__init__() - - def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use - """ - A function which sets the next host location on the request, if applicable. - - :param ~azure.storage.models.RetryContext context: - The retry context containing the previous host location and the request - to evaluate and possibly modify. - """ - if settings['hosts'] and all(settings['hosts'].values()): - url = urlparse(request.url) - # If there's more than one possible location, retry to the alternative - if settings['mode'] == LocationMode.PRIMARY: - settings['mode'] = LocationMode.SECONDARY - else: - settings['mode'] = LocationMode.PRIMARY - updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) - request.url = updated.geturl() - - def configure_retries(self, request): # pylint: disable=no-self-use - body_position = None - if hasattr(request.http_request.body, 'read'): - try: - body_position = request.http_request.body.tell() - except (AttributeError, UnsupportedOperation): - # if body position cannot be obtained, then retries will not work - pass - options = request.context.options - return { - 'total': options.pop("retry_total", self.total_retries), - 'connect': options.pop("retry_connect", self.connect_retries), - 'read': options.pop("retry_read", self.read_retries), - 'status': options.pop("retry_status", self.status_retries), - 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), - 'mode': options.pop("location_mode", LocationMode.PRIMARY), - 'hosts': options.pop("hosts", None), - 'hook': options.pop("retry_hook", None), - 'body_position': body_position, - 'count': 0, - 'history': [] - } - - def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use - """ Formula for computing the current backoff. - Should be calculated by child class. - - :rtype: float - """ - return 0 - - def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - transport.sleep(backoff) - - def increment(self, settings, request, response=None, error=None): - """Increment the retry counters. - - :param response: A pipeline response object. - :param error: An error encountered during the request, or - None if the response was received successfully. - - :return: Whether the retry attempts are exhausted. - """ - settings['total'] -= 1 - - if error and isinstance(error, ServiceRequestError): - # Errors when we're fairly sure that the server did not receive the - # request, so it should be safe to retry. - settings['connect'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - elif error and isinstance(error, ServiceResponseError): - # Errors that occur after the request has been started, so we should - # assume that the server began processing it. - settings['read'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - else: - # Incrementing because of a server error like a 500 in - # status_forcelist and a the given method is in the whitelist - if response: - settings['status'] -= 1 - settings['history'].append(RequestHistory(request, http_response=response)) - - if not is_exhausted(settings): - if request.method not in ['PUT'] and settings['retry_secondary']: - self._set_next_host_location(settings, request) - - # rewind the request body if it is a stream - if request.body and hasattr(request.body, 'read'): - # no position was saved, then retry would not work - if settings['body_position'] is None: - return False - try: - # attempt to rewind the body to the initial position - request.body.seek(settings['body_position'], SEEK_SET) - except (UnsupportedOperation, ValueError): - # if body is not seekable, then retry would not work - return False - settings['count'] += 1 - return True - return False - - def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(StorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(StorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/policies_async.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/policies_async.py deleted file mode 100644 index e0926b8..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/policies_async.py +++ /dev/null @@ -1,220 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -import asyncio -import random -import logging -from typing import Any, TYPE_CHECKING - -from azure.core.pipeline.policies import AsyncHTTPPolicy -from azure.core.exceptions import AzureError - -from .policies import is_retry, StorageRetryPolicy - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -async def retry_hook(settings, **kwargs): - if settings['hook']: - if asyncio.iscoroutine(settings['hook']): - await settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - else: - settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - - -class AsyncStorageResponseHook(AsyncHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(AsyncStorageResponseHook, self).__init__() - - async def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = await self.next.send(request) - await response.http_response.load_body() - - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - if asyncio.iscoroutine(response_callback): - await response_callback(response) - else: - response_callback(response) - request.context['response_callback'] = response_callback - return response - -class AsyncStorageRetryPolicy(StorageRetryPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - async def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - await transport.sleep(backoff) - - async def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = await self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - await self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - await self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(AsyncStorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(AsyncStorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/request_handlers.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/request_handlers.py deleted file mode 100644 index 37354d7..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/request_handlers.py +++ /dev/null @@ -1,273 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) - -import logging -from os import fstat -from io import (SEEK_END, SEEK_SET, UnsupportedOperation) - -import isodate - -from azure.core.exceptions import raise_with_traceback - - -_LOGGER = logging.getLogger(__name__) - -_REQUEST_DELIMITER_PREFIX = "batch_" -_HTTP1_1_IDENTIFIER = "HTTP/1.1" -_HTTP_LINE_ENDING = "\r\n" - - -def serialize_iso(attr): - """Serialize Datetime object into ISO-8601 formatted string. - - :param Datetime attr: Object to be serialized. - :rtype: str - :raises: ValueError if format invalid. - """ - if not attr: - return None - if isinstance(attr, str): - attr = isodate.parse_datetime(attr) - try: - utc = attr.utctimetuple() - if utc.tm_year > 9999 or utc.tm_year < 1: - raise OverflowError("Hit max or min date") - - date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( - utc.tm_year, utc.tm_mon, utc.tm_mday, - utc.tm_hour, utc.tm_min, utc.tm_sec) - return date + 'Z' - except (ValueError, OverflowError) as err: - msg = "Unable to serialize datetime object." - raise_with_traceback(ValueError, msg, err) - except AttributeError as err: - msg = "ISO-8601 object must be valid Datetime object." - raise_with_traceback(TypeError, msg, err) - - -def get_length(data): - length = None - # Check if object implements the __len__ method, covers most input cases such as bytearray. - try: - length = len(data) - except: # pylint: disable=bare-except - pass - - if not length: - # Check if the stream is a file-like stream object. - # If so, calculate the size using the file descriptor. - try: - fileno = data.fileno() - except (AttributeError, UnsupportedOperation): - pass - else: - try: - return fstat(fileno).st_size - except OSError: - # Not a valid fileno, may be possible requests returned - # a socket number? - pass - - # If the stream is seekable and tell() is implemented, calculate the stream size. - try: - current_position = data.tell() - data.seek(0, SEEK_END) - length = data.tell() - current_position - data.seek(current_position, SEEK_SET) - except (AttributeError, UnsupportedOperation): - pass - - return length - - -def read_length(data): - try: - if hasattr(data, 'read'): - read_data = b'' - for chunk in iter(lambda: data.read(4096), b""): - read_data += chunk - return len(read_data), read_data - if hasattr(data, '__iter__'): - read_data = b'' - for chunk in data: - read_data += chunk - return len(read_data), read_data - except: # pylint: disable=bare-except - pass - raise ValueError("Unable to calculate content length, please specify.") - - -def validate_and_format_range_headers( - start_range, end_range, start_range_required=True, - end_range_required=True, check_content_md5=False, align_to_page=False): - # If end range is provided, start range must be provided - if (start_range_required or end_range is not None) and start_range is None: - raise ValueError("start_range value cannot be None.") - if end_range_required and end_range is None: - raise ValueError("end_range value cannot be None.") - - # Page ranges must be 512 aligned - if align_to_page: - if start_range is not None and start_range % 512 != 0: - raise ValueError("Invalid page blob start_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(start_range)) - if end_range is not None and end_range % 512 != 511: - raise ValueError("Invalid page blob end_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(end_range)) - - # Format based on whether end_range is present - range_header = None - if end_range is not None: - range_header = 'bytes={0}-{1}'.format(start_range, end_range) - elif start_range is not None: - range_header = "bytes={0}-".format(start_range) - - # Content MD5 can only be provided for a complete range less than 4MB in size - range_validation = None - if check_content_md5: - if start_range is None or end_range is None: - raise ValueError("Both start and end range requied for MD5 content validation.") - if end_range - start_range > 4 * 1024 * 1024: - raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") - range_validation = 'true' - - return range_header, range_validation - - -def add_metadata_headers(metadata=None): - # type: (Optional[Dict[str, str]]) -> Dict[str, str] - headers = {} - if metadata: - for key, value in metadata.items(): - headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value - return headers - - -def serialize_batch_body(requests, batch_id): - """ - -- - - -- - (repeated as needed) - ---- - - Serializes the requests in this batch to a single HTTP mixed/multipart body. - - :param list[~azure.core.pipeline.transport.HttpRequest] requests: - a list of sub-request for the batch request - :param str batch_id: - to be embedded in batch sub-request delimiter - :return: The body bytes for this batch. - """ - - if requests is None or len(requests) == 0: - raise ValueError('Please provide sub-request(s) for this batch request') - - delimiter_bytes = (_get_batch_request_delimiter(batch_id, True, False) + _HTTP_LINE_ENDING).encode('utf-8') - newline_bytes = _HTTP_LINE_ENDING.encode('utf-8') - batch_body = list() - - content_index = 0 - for request in requests: - request.headers.update({ - "Content-ID": str(content_index), - "Content-Length": str(0) - }) - batch_body.append(delimiter_bytes) - batch_body.append(_make_body_from_sub_request(request)) - batch_body.append(newline_bytes) - content_index += 1 - - batch_body.append(_get_batch_request_delimiter(batch_id, True, True).encode('utf-8')) - # final line of body MUST have \r\n at the end, or it will not be properly read by the service - batch_body.append(newline_bytes) - - return bytes().join(batch_body) - - -def _get_batch_request_delimiter(batch_id, is_prepend_dashes=False, is_append_dashes=False): - """ - Gets the delimiter used for this batch request's mixed/multipart HTTP format. - - :param str batch_id: - Randomly generated id - :param bool is_prepend_dashes: - Whether to include the starting dashes. Used in the body, but non on defining the delimiter. - :param bool is_append_dashes: - Whether to include the ending dashes. Used in the body on the closing delimiter only. - :return: The delimiter, WITHOUT a trailing newline. - """ - - prepend_dashes = '--' if is_prepend_dashes else '' - append_dashes = '--' if is_append_dashes else '' - - return prepend_dashes + _REQUEST_DELIMITER_PREFIX + batch_id + append_dashes - - -def _make_body_from_sub_request(sub_request): - """ - Content-Type: application/http - Content-ID: - Content-Transfer-Encoding: (if present) - - HTTP/ -
:
(repeated as necessary) - Content-Length: - (newline if content length > 0) - (if content length > 0) - - Serializes an http request. - - :param ~azure.core.pipeline.transport.HttpRequest sub_request: - Request to serialize. - :return: The serialized sub-request in bytes - """ - - # put the sub-request's headers into a list for efficient str concatenation - sub_request_body = list() - - # get headers for ease of manipulation; remove headers as they are used - headers = sub_request.headers - - # append opening headers - sub_request_body.append("Content-Type: application/http") - sub_request_body.append(_HTTP_LINE_ENDING) - - sub_request_body.append("Content-ID: ") - sub_request_body.append(headers.pop("Content-ID", "")) - sub_request_body.append(_HTTP_LINE_ENDING) - - sub_request_body.append("Content-Transfer-Encoding: binary") - sub_request_body.append(_HTTP_LINE_ENDING) - - # append blank line - sub_request_body.append(_HTTP_LINE_ENDING) - - # append HTTP verb and path and query and HTTP version - sub_request_body.append(sub_request.method) - sub_request_body.append(' ') - sub_request_body.append(sub_request.url) - sub_request_body.append(' ') - sub_request_body.append(_HTTP1_1_IDENTIFIER) - sub_request_body.append(_HTTP_LINE_ENDING) - - # append remaining headers (this will set the Content-Length, as it was set on `sub-request`) - for header_name, header_value in headers.items(): - if header_value is not None: - sub_request_body.append(header_name) - sub_request_body.append(": ") - sub_request_body.append(header_value) - sub_request_body.append(_HTTP_LINE_ENDING) - - # append blank line - sub_request_body.append(_HTTP_LINE_ENDING) - - return ''.join(sub_request_body).encode() diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/response_handlers.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/response_handlers.py deleted file mode 100644 index 32a923f..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/response_handlers.py +++ /dev/null @@ -1,192 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging -from xml.etree.ElementTree import Element - -from azure.core.pipeline.policies import ContentDecodePolicy -from azure.core.exceptions import ( - HttpResponseError, - ResourceNotFoundError, - ResourceModifiedError, - ResourceExistsError, - ClientAuthenticationError, - DecodeError) - -from .parser import _to_utc_datetime -from .models import StorageErrorCode, UserDelegationKey, get_enum_value - - -if TYPE_CHECKING: - from datetime import datetime - from azure.core.exceptions import AzureError - - -_LOGGER = logging.getLogger(__name__) - - -class PartialBatchErrorException(HttpResponseError): - """There is a partial failure in batch operations. - - :param str message: The message of the exception. - :param response: Server response to be deserialized. - :param list parts: A list of the parts in multipart response. - """ - - def __init__(self, message, response, parts): - self.parts = parts - super(PartialBatchErrorException, self).__init__(message=message, response=response) - - -def parse_length_from_content_range(content_range): - ''' - Parses the blob length from the content range header: bytes 1-3/65537 - ''' - if content_range is None: - return None - - # First, split in space and take the second half: '1-3/65537' - # Next, split on slash and take the second half: '65537' - # Finally, convert to an int: 65537 - return int(content_range.split(' ', 1)[1].split('/', 1)[1]) - - -def normalize_headers(headers): - normalized = {} - for key, value in headers.items(): - if key.startswith('x-ms-'): - key = key[5:] - normalized[key.lower().replace('-', '_')] = get_enum_value(value) - return normalized - - -def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument - raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} - return {k[10:]: v for k, v in raw_metadata.items()} - - -def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers) - - -def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers), deserialized - - -def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return response.http_response.location_mode, deserialized - - -def process_storage_error(storage_error): # pylint:disable=too-many-statements - raise_error = HttpResponseError - serialized = False - if not storage_error.response: - raise storage_error - # If it is one of those three then it has been serialized prior by the generated layer. - if isinstance(storage_error, (PartialBatchErrorException, - ClientAuthenticationError, ResourceNotFoundError, ResourceExistsError)): - serialized = True - error_code = storage_error.response.headers.get('x-ms-error-code') - error_message = storage_error.message - additional_data = {} - error_dict = {} - try: - error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) - # If it is an XML response - if isinstance(error_body, Element): - error_dict = { - child.tag.lower(): child.text - for child in error_body - } - # If it is a JSON response - elif isinstance(error_body, dict): - error_dict = error_body.get('error', {}) - elif not error_code: - _LOGGER.warning( - 'Unexpected return type % from ContentDecodePolicy.deserialize_from_http_generics.', type(error_body)) - error_dict = {'message': str(error_body)} - - # If we extracted from a Json or XML response - if error_dict: - error_code = error_dict.get('code') - error_message = error_dict.get('message') - additional_data = {k: v for k, v in error_dict.items() if k not in {'code', 'message'}} - except DecodeError: - pass - - try: - # This check would be unnecessary if we have already serialized the error - if error_code and not serialized: - error_code = StorageErrorCode(error_code) - if error_code in [StorageErrorCode.condition_not_met, - StorageErrorCode.blob_overwritten]: - raise_error = ResourceModifiedError - if error_code in [StorageErrorCode.invalid_authentication_info, - StorageErrorCode.authentication_failed]: - raise_error = ClientAuthenticationError - if error_code in [StorageErrorCode.resource_not_found, - StorageErrorCode.cannot_verify_copy_source, - StorageErrorCode.blob_not_found, - StorageErrorCode.queue_not_found, - StorageErrorCode.container_not_found, - StorageErrorCode.parent_not_found, - StorageErrorCode.share_not_found]: - raise_error = ResourceNotFoundError - if error_code in [StorageErrorCode.account_already_exists, - StorageErrorCode.account_being_created, - StorageErrorCode.resource_already_exists, - StorageErrorCode.resource_type_mismatch, - StorageErrorCode.blob_already_exists, - StorageErrorCode.queue_already_exists, - StorageErrorCode.container_already_exists, - StorageErrorCode.container_being_deleted, - StorageErrorCode.queue_being_deleted, - StorageErrorCode.share_already_exists, - StorageErrorCode.share_being_deleted]: - raise_error = ResourceExistsError - except ValueError: - # Got an unknown error code - pass - - # Error message should include all the error properties - try: - error_message += "\nErrorCode:{}".format(error_code.value) - except AttributeError: - error_message += "\nErrorCode:{}".format(error_code) - for name, info in additional_data.items(): - error_message += "\n{}:{}".format(name, info) - - # No need to create an instance if it has already been serialized by the generated layer - if serialized: - storage_error.message = error_message - error = storage_error - else: - error = raise_error(message=error_message, response=storage_error.response) - # Ensure these properties are stored in the error instance as well (not just the error message) - error.error_code = error_code - error.additional_info = additional_data - # error.args is what's surfaced on the traceback - show error message in all cases - error.args = (error.message,) - try: - # `from None` prevents us from double printing the exception (suppresses generated layer error context) - exec("raise error from None") # pylint: disable=exec-used # nosec - except SyntaxError: - raise error - - -def parse_to_internal_user_delegation_key(service_user_delegation_key): - internal_user_delegation_key = UserDelegationKey() - internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid - internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid - internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) - internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) - internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service - internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version - internal_user_delegation_key.value = service_user_delegation_key.value - return internal_user_delegation_key diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/shared_access_signature.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/shared_access_signature.py deleted file mode 100644 index 07aad5f..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/shared_access_signature.py +++ /dev/null @@ -1,220 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from datetime import date - -from .parser import _str, _to_utc_datetime -from .constants import X_MS_VERSION -from . import sign_string, url_quote - - -class QueryStringConstants(object): - SIGNED_SIGNATURE = 'sig' - SIGNED_PERMISSION = 'sp' - SIGNED_START = 'st' - SIGNED_EXPIRY = 'se' - SIGNED_RESOURCE = 'sr' - SIGNED_IDENTIFIER = 'si' - SIGNED_IP = 'sip' - SIGNED_PROTOCOL = 'spr' - SIGNED_VERSION = 'sv' - SIGNED_CACHE_CONTROL = 'rscc' - SIGNED_CONTENT_DISPOSITION = 'rscd' - SIGNED_CONTENT_ENCODING = 'rsce' - SIGNED_CONTENT_LANGUAGE = 'rscl' - SIGNED_CONTENT_TYPE = 'rsct' - START_PK = 'spk' - START_RK = 'srk' - END_PK = 'epk' - END_RK = 'erk' - SIGNED_RESOURCE_TYPES = 'srt' - SIGNED_SERVICES = 'ss' - SIGNED_OID = 'skoid' - SIGNED_TID = 'sktid' - SIGNED_KEY_START = 'skt' - SIGNED_KEY_EXPIRY = 'ske' - SIGNED_KEY_SERVICE = 'sks' - SIGNED_KEY_VERSION = 'skv' - - # for ADLS - SIGNED_AUTHORIZED_OID = 'saoid' - SIGNED_UNAUTHORIZED_OID = 'suoid' - SIGNED_CORRELATION_ID = 'scid' - SIGNED_DIRECTORY_DEPTH = 'sdd' - - @staticmethod - def to_list(): - return [ - QueryStringConstants.SIGNED_SIGNATURE, - QueryStringConstants.SIGNED_PERMISSION, - QueryStringConstants.SIGNED_START, - QueryStringConstants.SIGNED_EXPIRY, - QueryStringConstants.SIGNED_RESOURCE, - QueryStringConstants.SIGNED_IDENTIFIER, - QueryStringConstants.SIGNED_IP, - QueryStringConstants.SIGNED_PROTOCOL, - QueryStringConstants.SIGNED_VERSION, - QueryStringConstants.SIGNED_CACHE_CONTROL, - QueryStringConstants.SIGNED_CONTENT_DISPOSITION, - QueryStringConstants.SIGNED_CONTENT_ENCODING, - QueryStringConstants.SIGNED_CONTENT_LANGUAGE, - QueryStringConstants.SIGNED_CONTENT_TYPE, - QueryStringConstants.START_PK, - QueryStringConstants.START_RK, - QueryStringConstants.END_PK, - QueryStringConstants.END_RK, - QueryStringConstants.SIGNED_RESOURCE_TYPES, - QueryStringConstants.SIGNED_SERVICES, - QueryStringConstants.SIGNED_OID, - QueryStringConstants.SIGNED_TID, - QueryStringConstants.SIGNED_KEY_START, - QueryStringConstants.SIGNED_KEY_EXPIRY, - QueryStringConstants.SIGNED_KEY_SERVICE, - QueryStringConstants.SIGNED_KEY_VERSION, - # for ADLS - QueryStringConstants.SIGNED_AUTHORIZED_OID, - QueryStringConstants.SIGNED_UNAUTHORIZED_OID, - QueryStringConstants.SIGNED_CORRELATION_ID, - QueryStringConstants.SIGNED_DIRECTORY_DEPTH, - ] - - -class SharedAccessSignature(object): - ''' - Provides a factory for creating account access - signature tokens with an account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - :param str x_ms_version: - The service version used to generate the shared access signatures. - ''' - self.account_name = account_name - self.account_key = account_key - self.x_ms_version = x_ms_version - - def generate_account(self, services, resource_types, permission, expiry, start=None, - ip=None, protocol=None): - ''' - Generates a shared access signature for the account. - Use the returned signature with the sas_token parameter of the service - or to create a new account object. - - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account - SAS. You can combine values to provide access to more than one - resource type. - :param AccountSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. You can combine - values to provide more than one permission. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_account(services, resource_types) - sas.add_account_signature(self.account_name, self.account_key) - - return sas.get_token() - - -class _SharedAccessHelper(object): - def __init__(self): - self.query_dict = {} - - def _add_query(self, name, val): - if val: - self.query_dict[name] = _str(val) if val is not None else None - - def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): - if isinstance(start, date): - start = _to_utc_datetime(start) - - if isinstance(expiry, date): - expiry = _to_utc_datetime(expiry) - - self._add_query(QueryStringConstants.SIGNED_START, start) - self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) - self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) - self._add_query(QueryStringConstants.SIGNED_IP, ip) - self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) - self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) - - def add_resource(self, resource): - self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) - - def add_id(self, policy_id): - self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) - - def add_account(self, services, resource_types): - self._add_query(QueryStringConstants.SIGNED_SERVICES, services) - self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) - - def add_override_response_headers(self, cache_control, - content_disposition, - content_encoding, - content_language, - content_type): - self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) - self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) - self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) - self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) - self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) - - def add_account_signature(self, account_name, account_key): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - string_to_sign = \ - (account_name + '\n' + - get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + - get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + - get_value_to_append(QueryStringConstants.SIGNED_START) + - get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - get_value_to_append(QueryStringConstants.SIGNED_IP) + - get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(QueryStringConstants.SIGNED_VERSION)) - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key, string_to_sign)) - - def get_token(self): - return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/uploads.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/uploads.py deleted file mode 100644 index 1b619df..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/uploads.py +++ /dev/null @@ -1,602 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from concurrent import futures -from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) -from threading import Lock -from itertools import islice -from math import ceil - -import six - -from azure.core.tracing.common import with_current_context - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." - - -def _parallel_uploads(executor, uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(executor.submit(with_current_context(uploader), next_chunk)) - except StopIteration: - break - - # Wait for the remaining uploads to finish - done, _running = futures.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - validate_content=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - validate_content=validate_content, - **kwargs) - if parallel: - with futures.ThreadPoolExecutor(max_concurrency) as executor: - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - executor.submit(with_current_context(uploader.process_chunk), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - with futures.ThreadPoolExecutor(max_concurrency) as executor: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - executor.submit(with_current_context(uploader.process_substream_block), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] - if any(range_ids): - return sorted(range_ids) - return [] - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b"" - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError("Blob data should be of type bytes.") - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b"" or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - def _update_progress(self, length): - if self.progress_lock is not None: - with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = self._upload_chunk(chunk_offset, chunk_data) - self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield index, SubStream(self.stream, index, length, lock) - - def process_substream_block(self, block_data): - return self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - def _upload_substream_block(self, index, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_substream_block_with_progress(self, index, block_stream): - range_id = self._upload_substream_block(index, block_stream) - self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop("modified_access_conditions", None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - self.service.stage_block( - block_id, - len(chunk_data), - chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return index, block_id - - def _upload_substream_block(self, index, block_stream): - try: - block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) - self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - return not any(bytearray(chunk_data)) - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = self.service.upload_pages( - body=chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - def _upload_substream_block(self, index, block_stream): - pass - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - self.current_length = int(self.response_headers["blob_append_offset"]) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - def _upload_substream_block(self, index, block_stream): - pass - - -class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - self.response_headers = self.service.append_data( - body=chunk_data, - position=chunk_offset, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - def _upload_substream_block(self, index, block_stream): - try: - self.service.append_data( - body=block_stream, - position=index, - content_length=len(block_stream), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response - - # TODO: Implement this method. - def _upload_substream_block(self, index, block_stream): - pass - - -class SubStream(IOBase): - - def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): - # Python 2.7: file-like objects created with open() typically support seek(), but are not - # derivations of io.IOBase and thus do not implement seekable(). - # Python > 3.0: file-like objects created with open() are derived from io.IOBase. - try: - # only the main thread runs this, so there's no need grabbing the lock - wrapped_stream.seek(0, SEEK_CUR) - except: - raise ValueError("Wrapped stream must support seek().") - - self._lock = lockObj - self._wrapped_stream = wrapped_stream - self._position = 0 - self._stream_begin_index = stream_begin_index - self._length = length - self._buffer = BytesIO() - - # we must avoid buffering more than necessary, and also not use up too much memory - # so the max buffer size is capped at 4MB - self._max_buffer_size = ( - length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE - ) - self._current_buffer_start = 0 - self._current_buffer_size = 0 - super(SubStream, self).__init__() - - def __len__(self): - return self._length - - def close(self): - if self._buffer: - self._buffer.close() - self._wrapped_stream = None - IOBase.close(self) - - def fileno(self): - return self._wrapped_stream.fileno() - - def flush(self): - pass - - def read(self, size=None): - if self.closed: # pylint: disable=using-constant-test - raise ValueError("Stream is closed.") - - if size is None: - size = self._length - self._position - - # adjust if out of bounds - if size + self._position >= self._length: - size = self._length - self._position - - # return fast - if size == 0 or self._buffer.closed: - return b"" - - # attempt first read from the read buffer and update position - read_buffer = self._buffer.read(size) - bytes_read = len(read_buffer) - bytes_remaining = size - bytes_read - self._position += bytes_read - - # repopulate the read buffer from the underlying stream to fulfill the request - # ensure the seek and read operations are done atomically (only if a lock is provided) - if bytes_remaining > 0: - with self._buffer: - # either read in the max buffer size specified on the class - # or read in just enough data for the current block/sub stream - current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) - - # lock is only defined if max_concurrency > 1 (parallel uploads) - if self._lock: - with self._lock: - # reposition the underlying stream to match the start of the data to read - absolute_position = self._stream_begin_index + self._position - self._wrapped_stream.seek(absolute_position, SEEK_SET) - # If we can't seek to the right location, our read will be corrupted so fail fast. - if self._wrapped_stream.tell() != absolute_position: - raise IOError("Stream failed to seek to the desired location.") - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - else: - absolute_position = self._stream_begin_index + self._position - # It's possible that there's connection problem during data transfer, - # so when we retry we don't want to read from current position of wrapped stream, - # instead we should seek to where we want to read from. - if self._wrapped_stream.tell() != absolute_position: - self._wrapped_stream.seek(absolute_position, SEEK_SET) - - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - - if buffer_from_stream: - # update the buffer with new data from the wrapped stream - # we need to note down the start position and size of the buffer, in case seek is performed later - self._buffer = BytesIO(buffer_from_stream) - self._current_buffer_start = self._position - self._current_buffer_size = len(buffer_from_stream) - - # read the remaining bytes from the new buffer and update position - second_read_buffer = self._buffer.read(bytes_remaining) - read_buffer += second_read_buffer - self._position += len(second_read_buffer) - - return read_buffer - - def readable(self): - return True - - def readinto(self, b): - raise UnsupportedOperation - - def seek(self, offset, whence=0): - if whence is SEEK_SET: - start_index = 0 - elif whence is SEEK_CUR: - start_index = self._position - elif whence is SEEK_END: - start_index = self._length - offset = -offset - else: - raise ValueError("Invalid argument for the 'whence' parameter.") - - pos = start_index + offset - - if pos > self._length: - pos = self._length - elif pos < 0: - pos = 0 - - # check if buffer is still valid - # if not, drop buffer - if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: - self._buffer.close() - self._buffer = BytesIO() - else: # if yes seek to correct position - delta = pos - self._current_buffer_start - self._buffer.seek(delta, SEEK_SET) - - self._position = pos - return pos - - def seekable(self): - return True - - def tell(self): - return self._position - - def write(self): - raise UnsupportedOperation - - def writelines(self): - raise UnsupportedOperation - - def writeable(self): - return False - - -class IterStreamer(object): - """ - File-like streaming iterator. - """ - - def __init__(self, generator, encoding="UTF-8"): - self.generator = generator - self.iterator = iter(generator) - self.leftover = b"" - self.encoding = encoding - - def __len__(self): - return self.generator.__len__() - - def __iter__(self): - return self.iterator - - def seekable(self): - return False - - def __next__(self): - return next(self.iterator) - - next = __next__ # Python 2 compatibility. - - def tell(self, *args, **kwargs): - raise UnsupportedOperation("Data generator does not support tell.") - - def seek(self, *args, **kwargs): - raise UnsupportedOperation("Data generator is unseekable.") - - def read(self, size): - data = self.leftover - count = len(self.leftover) - try: - while count < size: - chunk = self.__next__() - if isinstance(chunk, six.text_type): - chunk = chunk.encode(self.encoding) - data += chunk - count += len(chunk) - except StopIteration: - pass - - if count > size: - self.leftover = data[size:] - - return data[:size] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/uploads_async.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/uploads_async.py deleted file mode 100644 index 5ed192b..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/uploads_async.py +++ /dev/null @@ -1,395 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -import asyncio -from asyncio import Lock -from itertools import islice -import threading - -from math import ceil - -import six - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder -from .uploads import SubStream, IterStreamer # pylint: disable=unused-import - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' - - -async def _parallel_uploads(uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(asyncio.ensure_future(uploader(next_chunk))) - except StopIteration: - break - - # Wait for the remaining uploads to finish - if running: - done, _running = await asyncio.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -async def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - asyncio.ensure_future(uploader.process_chunk(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [] - for chunk in uploader.get_chunk_streams(): - range_ids.append(await uploader.process_chunk(chunk)) - - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -async def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - asyncio.ensure_future(uploader.process_substream_block(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [] - for block in uploader.get_substream_blocks(): - range_ids.append(await uploader.process_substream_block(block)) - if any(range_ids): - return sorted(range_ids) - return - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = threading.Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b'' - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b'' or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - async def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - async def _update_progress(self, length): - if self.progress_lock is not None: - async with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - async def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = await self._upload_chunk(chunk_offset, chunk_data) - await self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield index, SubStream(self.stream, index, length, lock) - - async def process_substream_block(self, block_data): - return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - async def _upload_substream_block(self, index, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_substream_block_with_progress(self, index, block_stream): - range_id = await self._upload_substream_block(index, block_stream) - await self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop('modified_access_conditions', None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - await self.service.stage_block( - block_id, - len(chunk_data), - body=chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - return index, block_id - - async def _upload_substream_block(self, index, block_stream): - try: - block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) - await self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - for each_byte in chunk_data: - if each_byte not in [0, b'\x00']: - return False - return True - - async def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = await self.service.upload_pages( - body=chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - async def _upload_substream_block(self, index, block_stream): - pass - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = await self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - self.current_length = int(self.response_headers['blob_append_offset']) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = await self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - async def _upload_substream_block(self, index, block_stream): - pass - - -class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - self.response_headers = await self.service.append_data( - body=chunk_data, - position=chunk_offset, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - async def _upload_substream_block(self, index, block_stream): - try: - await self.service.append_data( - body=block_stream, - position=index, - content_length=len(block_stream), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = await self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - return range_id, response - - # TODO: Implement this method. - async def _upload_substream_block(self, index, block_stream): - pass diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared_access_signature.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared_access_signature.py deleted file mode 100644 index ba27198..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared_access_signature.py +++ /dev/null @@ -1,391 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import TYPE_CHECKING - -from azure.multiapi.storagev2.blob.v2020_06_12 import generate_account_sas as generate_blob_account_sas -from azure.multiapi.storagev2.blob.v2020_06_12 import generate_container_sas, generate_blob_sas -if TYPE_CHECKING: - import datetime - from ._models import AccountSasPermissions, FileSystemSasPermissions, FileSasPermissions, ResourceTypes, \ - UserDelegationKey - - -def generate_account_sas( - account_name, # type: str - account_key, # type: str - resource_types, # type: Union[ResourceTypes, str] - permission, # type: Union[AccountSasPermissions, str] - expiry, # type: Optional[Union[datetime, str]] - **kwargs # type: Any - ): # type: (...) -> str - """Generates a shared access signature for the DataLake service. - - Use the returned signature as the credential parameter of any DataLakeServiceClient, - FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str account_key: - The access key to generate the shared access signature. - :param resource_types: - Specifies the resource types that are accessible with the account SAS. - :type resource_types: str or ~azure.storage.filedatalake.ResourceTypes - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.filedatalake.AccountSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :keyword start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :paramtype start: ~datetime.datetime or str - :keyword str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - return generate_blob_account_sas( - account_name=account_name, - account_key=account_key, - resource_types=resource_types, - permission=permission, - expiry=expiry, - **kwargs - ) - - -def generate_file_system_sas( - account_name, # type: str - file_system_name, # type: str - credential, # type: Union[str, UserDelegationKey] - permission=None, # type: Optional[Union[FileSystemSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - **kwargs # type: Any - ): - # type: (...) -> str - """Generates a shared access signature for a file system. - - Use the returned signature with the credential parameter of any DataLakeServiceClient, - FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str file_system_name: - The name of the file system. - :param str credential: - Credential could be either account key or user delegation key. - If use account key is used as credential, then the credential type should be a str. - Instead of an account key, the user could also pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished - by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :type credential: str or ~azure.storage.filedatalake.UserDelegationKey - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.filedatalake.FileSystemSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :keyword start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :paramtype start: datetime or str - :keyword str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :keyword str preauthorized_agent_object_id: - The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform - the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the - user delegation key has the required permissions before granting access but no additional permission check for - the agent object id will be performed. - :keyword str agent_object_id: - The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to - perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner - of the user delegation key has the required permissions before granting access and the service will perform an - additional POSIX ACL check to determine if this user is authorized to perform the requested operation. - :keyword str correlation_id: - The correlation id to correlate the storage audit logs with the audit logs used by the principal - generating and distributing the SAS. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - return generate_container_sas( - account_name=account_name, - container_name=file_system_name, - account_key=credential if isinstance(credential, str) else None, - user_delegation_key=credential if not isinstance(credential, str) else None, - permission=permission, - expiry=expiry, - **kwargs) - - -def generate_directory_sas( - account_name, # type: str - file_system_name, # type: str - directory_name, # type: str - credential, # type: Union[str, UserDelegationKey] - permission=None, # type: Optional[Union[FileSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - **kwargs # type: Any - ): - # type: (...) -> str - """Generates a shared access signature for a directory. - - Use the returned signature with the credential parameter of any DataLakeServiceClient, - FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str file_system_name: - The name of the file system. - :param str directory_name: - The name of the directory. - :param str credential: - Credential could be either account key or user delegation key. - If use account key is used as credential, then the credential type should be a str. - Instead of an account key, the user could also pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished - by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :type credential: str or ~azure.storage.filedatalake.UserDelegationKey - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.filedatalake.FileSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :keyword start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :paramtype start: ~datetime.datetime or str - :keyword str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :keyword str preauthorized_agent_object_id: - The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform - the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the - user delegation key has the required permissions before granting access but no additional permission check for - the agent object id will be performed. - :keyword str agent_object_id: - The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to - perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner - of the user delegation key has the required permissions before granting access and the service will perform an - additional POSIX ACL check to determine if this user is authorized to perform the requested operation. - :keyword str correlation_id: - The correlation id to correlate the storage audit logs with the audit logs used by the principal - generating and distributing the SAS. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - depth = len(directory_name.strip("/").split("/")) - return generate_blob_sas( - account_name=account_name, - container_name=file_system_name, - blob_name=directory_name, - account_key=credential if isinstance(credential, str) else None, - user_delegation_key=credential if not isinstance(credential, str) else None, - permission=permission, - expiry=expiry, - sdd=depth, - is_directory=True, - **kwargs) - - -def generate_file_sas( - account_name, # type: str - file_system_name, # type: str - directory_name, # type: str - file_name, # type: str - credential, # type: Union[str, UserDelegationKey] - permission=None, # type: Optional[Union[FileSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - **kwargs # type: Any - ): - # type: (...) -> str - """Generates a shared access signature for a file. - - Use the returned signature with the credential parameter of any BDataLakeServiceClient, - FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str file_system_name: - The name of the file system. - :param str directory_name: - The name of the directory. - :param str file_name: - The name of the file. - :param str credential: - Credential could be either account key or user delegation key. - If use account key is used as credential, then the credential type should be a str. - Instead of an account key, the user could also pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished - by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :type credential: str or ~azure.storage.filedatalake.UserDelegationKey - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.filedatalake.FileSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :keyword start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :paramtype start: ~datetime.datetime or str - :keyword str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :keyword str preauthorized_agent_object_id: - The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform - the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the - user delegation key has the required permissions before granting access but no additional permission check for - the agent object id will be performed. - :keyword str agent_object_id: - The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to - perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner - of the user delegation key has the required permissions before granting access and the service will perform an - additional POSIX ACL check to determine if this user is authorized to perform the requested operation. - :keyword str correlation_id: - The correlation id to correlate the storage audit logs with the audit logs used by the principal - generating and distributing the SAS. This can only be used when to generate sas with delegation key. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - if directory_name: - path = directory_name.rstrip('/') + "/" + file_name - else: - path = file_name - return generate_blob_sas( - account_name=account_name, - container_name=file_system_name, - blob_name=path, - account_key=credential if isinstance(credential, str) else None, - user_delegation_key=credential if not isinstance(credential, str) else None, - permission=permission, - expiry=expiry, - **kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_upload_helper.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_upload_helper.py deleted file mode 100644 index 6d88c32..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_upload_helper.py +++ /dev/null @@ -1,104 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from ._deserialize import ( - process_storage_error) -from ._shared.response_handlers import return_response_headers -from ._shared.uploads import ( - upload_data_chunks, - DataLakeFileChunkUploader, upload_substream_blocks) -from azure.core.exceptions import HttpResponseError - - -def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument - return any([ - modified_access_conditions.if_modified_since, - modified_access_conditions.if_unmodified_since, - modified_access_conditions.if_none_match, - modified_access_conditions.if_match - ]) - - -def upload_datalake_file( # pylint: disable=unused-argument - client=None, - stream=None, - length=None, - overwrite=None, - validate_content=None, - max_concurrency=None, - file_settings=None, - **kwargs): - try: - if length == 0: - return {} - properties = kwargs.pop('properties', None) - umask = kwargs.pop('umask', None) - permissions = kwargs.pop('permissions', None) - path_http_headers = kwargs.pop('path_http_headers', None) - modified_access_conditions = kwargs.pop('modified_access_conditions', None) - chunk_size = kwargs.pop('chunk_size', 100 * 1024 * 1024) - - if not overwrite: - # if customers didn't specify access conditions, they cannot flush data to existing file - if not _any_conditions(modified_access_conditions): - modified_access_conditions.if_none_match = '*' - if properties or umask or permissions: - raise ValueError("metadata, umask and permissions can be set only when overwrite is enabled") - - if overwrite: - response = client.create( - resource='file', - path_http_headers=path_http_headers, - properties=properties, - modified_access_conditions=modified_access_conditions, - umask=umask, - permissions=permissions, - cls=return_response_headers, - **kwargs) - - # this modified_access_conditions will be applied to flush_data to make sure - # no other flush between create and the current flush - modified_access_conditions.if_match = response['etag'] - modified_access_conditions.if_none_match = None - modified_access_conditions.if_modified_since = None - modified_access_conditions.if_unmodified_since = None - - use_original_upload_path = file_settings.use_byte_buffer or \ - validate_content or chunk_size < file_settings.min_large_chunk_upload_threshold or \ - hasattr(stream, 'seekable') and not stream.seekable() or \ - not hasattr(stream, 'seek') or not hasattr(stream, 'tell') - - if use_original_upload_path: - upload_data_chunks( - service=client, - uploader_class=DataLakeFileChunkUploader, - total_size=length, - chunk_size=chunk_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - **kwargs) - else: - upload_substream_blocks( - service=client, - uploader_class=DataLakeFileChunkUploader, - total_size=length, - chunk_size=chunk_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - **kwargs - ) - - return client.flush_data(position=length, - path_http_headers=path_http_headers, - modified_access_conditions=modified_access_conditions, - close=True, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_version.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_version.py deleted file mode 100644 index d731da5..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_version.py +++ /dev/null @@ -1,7 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -VERSION = "12.5.0" diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/__init__.py deleted file mode 100644 index c24dde8..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ._download_async import StorageStreamDownloader -from .._shared.policies_async import ExponentialRetry, LinearRetry -from ._data_lake_file_client_async import DataLakeFileClient -from ._data_lake_directory_client_async import DataLakeDirectoryClient -from ._file_system_client_async import FileSystemClient -from ._data_lake_service_client_async import DataLakeServiceClient -from ._data_lake_lease_async import DataLakeLeaseClient - -__all__ = [ - 'DataLakeServiceClient', - 'FileSystemClient', - 'DataLakeDirectoryClient', - 'DataLakeFileClient', - 'DataLakeLeaseClient', - 'ExponentialRetry', - 'LinearRetry', - 'StorageStreamDownloader' -] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_data_lake_directory_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_data_lake_directory_client_async.py deleted file mode 100644 index 6254efb..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_data_lake_directory_client_async.py +++ /dev/null @@ -1,553 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -from typing import Any - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore -from azure.core.pipeline import AsyncPipeline -from ._data_lake_file_client_async import DataLakeFileClient -from .._data_lake_directory_client import DataLakeDirectoryClient as DataLakeDirectoryClientBase -from .._models import DirectoryProperties, FileProperties -from .._deserialize import deserialize_dir_properties -from ._path_client_async import PathClient -from .._shared.base_client_async import AsyncTransportWrapper - - -class DataLakeDirectoryClient(PathClient, DataLakeDirectoryClientBase): - """A client to interact with the DataLake directory, even if the directory may not yet exist. - - For operations relating to a specific subdirectory or file under the directory, a directory client or file client - can be retrieved using the :func:`~get_sub_directory_client` or :func:`~get_file_client` functions. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param directory_name: - The whole path of the directory. eg. {directory under file system}/{directory to interact with} - :type directory_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_instantiate_client_async.py - :start-after: [START instantiate_directory_client_from_conn_str] - :end-before: [END instantiate_directory_client_from_conn_str] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. - """ - - def __init__( - self, account_url, # type: str - file_system_name, # type: str - directory_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - super(DataLakeDirectoryClient, self).__init__(account_url, file_system_name, directory_name, # pylint: disable=specify-parameter-names-in-call - credential=credential, **kwargs) - - async def create_directory(self, metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create a new directory. - - :param metadata: - Name-value pairs associated with the directory as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the directory has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: response dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory_async.py - :start-after: [START create_directory] - :end-before: [END create_directory] - :language: python - :dedent: 8 - :caption: Create directory. - """ - return await self._create('directory', metadata=metadata, **kwargs) - - async def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a directory exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return await self._exists(**kwargs) - - async def delete_directory(self, **kwargs): - # type: (...) -> None - """ - Marks the specified directory for deletion. - - :keyword lease: - Required if the directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory_async.py - :start-after: [START delete_directory] - :end-before: [END delete_directory] - :language: python - :dedent: 4 - :caption: Delete directory. - """ - return await self._delete(recursive=True, **kwargs) - - async def get_directory_properties(self, **kwargs): - # type: (**Any) -> DirectoryProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the directory. It does not return the content of the directory. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: DirectoryProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory_async.py - :start-after: [START get_directory_properties] - :end-before: [END get_directory_properties] - :language: python - :dedent: 4 - :caption: Getting the properties for a file/directory. - """ - return await self._get_path_properties(cls=deserialize_dir_properties, **kwargs) # pylint: disable=protected-access - - async def rename_directory(self, new_name, # type: str - **kwargs): - # type: (**Any) -> DataLakeDirectoryClient - """ - Rename the source directory. - - :param str new_name: - the new directory name the user want to rename to. - The value must have the following format: "{filesystem}/{directory}/{subdirectory}". - :keyword source_lease: - A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory_async.py - :start-after: [START rename_directory] - :end-before: [END rename_directory] - :language: python - :dedent: 4 - :caption: Rename the source directory. - """ - new_name = new_name.strip('/') - new_file_system = new_name.split('/')[0] - new_path_and_token = new_name[len(new_file_system):].strip('/').split('?') - new_path = new_path_and_token[0] - try: - new_dir_sas = new_path_and_token[1] or self._query_str.strip('?') - except IndexError: - if not self._raw_credential and new_file_system != self.file_system_name: - raise ValueError("please provide the sas token for the new directory") - if not self._raw_credential and new_file_system == self.file_system_name: - new_dir_sas = self._query_str.strip('?') - - new_directory_client = DataLakeDirectoryClient( - "{}://{}".format(self.scheme, self.primary_hostname), new_file_system, directory_name=new_path, - credential=self._raw_credential or new_dir_sas, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - await new_directory_client._rename_path( # pylint: disable=protected-access - '/{}/{}{}'.format(quote(unquote(self.file_system_name)), - quote(unquote(self.path_name)), - self._query_str), - **kwargs) - return new_directory_client - - async def create_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Create a subdirectory and return the subdirectory client to be interacted with. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient for the subdirectory. - """ - subdir = self.get_sub_directory_client(sub_directory) - await subdir.create_directory(metadata=metadata, **kwargs) - return subdir - - async def delete_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Marks the specified subdirectory for deletion. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :keyword lease: - Required if the directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient for the subdirectory - """ - subdir = self.get_sub_directory_client(sub_directory) - await subdir.delete_directory(**kwargs) - return subdir - - async def create_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Create a new file and return the file client to be interacted with. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - """ - file_client = self.get_file_client(file) - await file_client.create_file(**kwargs) - return file_client - - def get_file_client(self, file # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. eg. directory/subdirectory/file - :type file: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/test_datalake_service_samples.py - :start-after: [START bsc_get_file_client] - :end-before: [END bsc_get_file_client] - :language: python - :dedent: 12 - :caption: Getting the file client to interact with a specific file. - """ - try: - file_path = file.get('name') - except AttributeError: - file_path = self.path_name + '/' + str(file) - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeFileClient( - self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, - api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_sub_directory_client(self, sub_directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified subdirectory of the current directory. - - The sub subdirectory need not already exist. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/test_datalake_service_samples.py - :start-after: [START bsc_get_directory_client] - :end-before: [END bsc_get_directory_client] - :language: python - :dedent: 12 - :caption: Getting the directory client to interact with a specific directory. - """ - try: - subdir_path = sub_directory.get('name') - except AttributeError: - subdir_path = self.path_name + '/' + str(sub_directory) - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeDirectoryClient( - self.url, self.file_system_name, directory_name=subdir_path, credential=self._raw_credential, - api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_data_lake_file_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_data_lake_file_client_async.py deleted file mode 100644 index df25ecf..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_data_lake_file_client_async.py +++ /dev/null @@ -1,574 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -from typing import Any -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore - -from azure.core.exceptions import HttpResponseError -from ._download_async import StorageStreamDownloader -from ._path_client_async import PathClient -from .._data_lake_file_client import DataLakeFileClient as DataLakeFileClientBase -from .._serialize import convert_datetime_to_rfc1123 -from .._deserialize import process_storage_error, deserialize_file_properties -from .._models import FileProperties -from ..aio._upload_helper import upload_datalake_file - - -class DataLakeFileClient(PathClient, DataLakeFileClientBase): - """A client to interact with the DataLake file, even if the file may not yet exist. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param file_path: - The whole file path, so that to interact with a specific file. - eg. "{directory}/{subdirectory}/{file}" - :type file_path: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_instantiate_client_async.py - :start-after: [START instantiate_file_client_from_conn_str] - :end-before: [END instantiate_file_client_from_conn_str] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. - """ - - def __init__( - self, account_url, # type: str - file_system_name, # type: str - file_path, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - super(DataLakeFileClient, self).__init__(account_url, file_system_name, path_name=file_path, - credential=credential, **kwargs) - - async def create_file(self, content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create a new file. - - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: response dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START create_file] - :end-before: [END create_file] - :language: python - :dedent: 4 - :caption: Create file. - """ - return await self._create('file', content_settings=content_settings, metadata=metadata, **kwargs) - - async def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a file exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return await self._exists(**kwargs) - - async def delete_file(self, **kwargs): - # type: (...) -> None - """ - Marks the specified file for deletion. - - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START delete_file] - :end-before: [END delete_file] - :language: python - :dedent: 4 - :caption: Delete file. - """ - return await self._delete(**kwargs) - - async def get_file_properties(self, **kwargs): - # type: (**Any) -> FileProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file. It does not return the content of the file. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: FileProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START get_file_properties] - :end-before: [END get_file_properties] - :language: python - :dedent: 4 - :caption: Getting the properties for a file. - """ - return await self._get_path_properties(cls=deserialize_file_properties, **kwargs) # pylint: disable=protected-access - - async def set_file_expiry(self, expiry_options, # type: str - expires_on=None, # type: Optional[Union[datetime, int]] - **kwargs): - # type: (str, Optional[Union[datetime, int]], **Any) -> None - """Sets the time a file will expire and be deleted. - - :param str expiry_options: - Required. Indicates mode of the expiry time. - Possible values include: 'NeverExpire', 'RelativeToCreation', 'RelativeToNow', 'Absolute' - :param datetime or int expires_on: - The time to set the file to expiry. - When expiry_options is RelativeTo*, expires_on should be an int in milliseconds - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - try: - expires_on = convert_datetime_to_rfc1123(expires_on) - except AttributeError: - expires_on = str(expires_on) - await self._datalake_client_for_blob_operation.path.set_expiry(expiry_options, expires_on=expires_on, - **kwargs) # pylint: disable=protected-access - - async def upload_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - overwrite=False, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Any] - """ - Upload data to a file. - - :param data: Content to be uploaded to file - :param int length: Size of the data in bytes. - :param bool overwrite: to overwrite an existing file or not. - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword metadata: - Name-value pairs associated with the blob as metadata. - :paramtype metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease: - Required if the blob has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword int chunk_size: - The maximum chunk size for uploading a file in chunks. - Defaults to 100*1024*1024, or 100MB. - :return: response dict (Etag and last modified). - """ - options = self._upload_options( - data, - length=length, - overwrite=overwrite, - **kwargs) - return await upload_datalake_file(**options) - - async def append_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - offset, # type: int - length=None, # type: Optional[int] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """Append data to the file. - - :param data: Content to be appended to file - :param offset: start position of the data to be appended to. - :param length: Size of the data in bytes. - :keyword bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - file. - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :return: dict of the response header - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START append_data] - :end-before: [END append_data] - :language: python - :dedent: 4 - :caption: Append data to the file. - """ - options = self._append_data_options( - data, - offset, - length=length, - **kwargs) - try: - return await self._client.path.append_data(**options) - except HttpResponseError as error: - process_storage_error(error) - - async def flush_data(self, offset, # type: int - retain_uncommitted_data=False, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ Commit the previous appended data. - - :param offset: offset is equal to the length of the file after commit the - previous appended data. - :param bool retain_uncommitted_data: Valid only for flush operations. If - "true", uncommitted data is retained after the flush operation - completes; otherwise, the uncommitted data is deleted after the flush - operation. The default is false. Data at offsets less than the - specified position are written to the file when flush succeeds, but - this optional parameter allows data after the flush position to be - retained for a future flush operation. - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword bool close: Azure Storage Events allow applications to receive - notifications when files change. When Azure Storage Events are - enabled, a file changed event is raised. This event has a property - indicating whether this is the final change to distinguish the - difference between an intermediate flush to a file stream and the - final close of a file stream. The close query parameter is valid only - when the action is "flush" and change notifications are enabled. If - the value of close is "true" and the flush operation completes - successfully, the service raises a file change notification with a - property indicating that this is the final update (the file stream has - been closed). If "false" a change notification is raised indicating - the file has changed. The default is false. This query parameter is - set to true by the Hadoop ABFS driver to indicate that the file stream - has been closed." - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :return: response header in dict - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START upload_file_to_file_system] - :end-before: [END upload_file_to_file_system] - :language: python - :dedent: 12 - :caption: Commit the previous appended data. - """ - options = self._flush_data_options( - offset, - retain_uncommitted_data=retain_uncommitted_data, **kwargs) - try: - return await self._client.path.flush_data(**options) - except HttpResponseError as error: - process_storage_error(error) - - async def download_file(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader - """Downloads a file to the StorageStreamDownloader. The readall() method must - be used to read all the content, or readinto() must be used to download the file into - a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks. - - :param int offset: - Start of byte range to use for downloading a section of the file. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword lease: - If specified, download only succeeds if the file's lease is active - and matches this ID. Required if the file has an active lease. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.filedatalake.aio.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START read_file] - :end-before: [END read_file] - :language: python - :dedent: 4 - :caption: Return the downloaded data. - """ - downloader = await self._blob_client.download_blob(offset=offset, length=length, **kwargs) - return StorageStreamDownloader(downloader) - - async def rename_file(self, new_name, **kwargs): - # type: (str, **Any) -> DataLakeFileClient - """ - Rename the source file. - - :param str new_name: the new file name the user want to rename to. - The value must have the following format: "{filesystem}/{directory}/{subdirectory}/{file}". - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword source_lease: A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :type permissions: str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: the renamed file client - :rtype: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START rename_file] - :end-before: [END rename_file] - :language: python - :dedent: 4 - :caption: Rename the source file. - """ - new_name = new_name.strip('/') - new_file_system = new_name.split('/')[0] - new_path_and_token = new_name[len(new_file_system):].strip('/').split('?') - new_path = new_path_and_token[0] - try: - new_file_sas = new_path_and_token[1] or self._query_str.strip('?') - except IndexError: - if not self._raw_credential and new_file_system != self.file_system_name: - raise ValueError("please provide the sas token for the new file") - if not self._raw_credential and new_file_system == self.file_system_name: - new_file_sas = self._query_str.strip('?') - - new_file_client = DataLakeFileClient( - "{}://{}".format(self.scheme, self.primary_hostname), new_file_system, file_path=new_path, - credential=self._raw_credential or new_file_sas, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - await new_file_client._rename_path( # pylint: disable=protected-access - '/{}/{}{}'.format(quote(unquote(self.file_system_name)), - quote(unquote(self.path_name)), - self._query_str), - **kwargs) - return new_file_client diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_data_lake_lease_async.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_data_lake_lease_async.py deleted file mode 100644 index a5e4ccc..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_data_lake_lease_async.py +++ /dev/null @@ -1,243 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, - TypeVar, TYPE_CHECKING -) -from azure.multiapi.storagev2.blob.v2020_06_12.aio import BlobLeaseClient -from .._data_lake_lease import DataLakeLeaseClient as DataLakeLeaseClientBase - - -if TYPE_CHECKING: - FileSystemClient = TypeVar("FileSystemClient") - DataLakeDirectoryClient = TypeVar("DataLakeDirectoryClient") - DataLakeFileClient = TypeVar("DataLakeFileClient") - - -class DataLakeLeaseClient(DataLakeLeaseClientBase): - """Creates a new DataLakeLeaseClient. - - This client provides lease operations on a FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the file system, directory, or file to lease. - :type client: ~azure.storage.filedatalake.aio.FileSystemClient or - ~azure.storage.filedatalake.aio.DataLakeDirectoryClient or ~azure.storage.filedatalake.aio.DataLakeFileClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - def __init__( - self, client, lease_id=None - ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs - # type: (Union[FileSystemClient, DataLakeDirectoryClient, DataLakeFileClient], Optional[str]) -> None - super(DataLakeLeaseClient, self).__init__(client, lease_id) - - if hasattr(client, '_blob_client'): - _client = client._blob_client # type: ignore # pylint: disable=protected-access - elif hasattr(client, '_container_client'): - _client = client._container_client # type: ignore # pylint: disable=protected-access - else: - raise TypeError("Lease must use any of FileSystemClient DataLakeDirectoryClient, or DataLakeFileClient.") - - self._blob_lease_client = BlobLeaseClient(_client, lease_id=lease_id) - - def __enter__(self): - raise TypeError("Async lease must use 'async with'.") - - def __exit__(self, *args): - self.release() - - async def __aenter__(self): - return self - - async def __aexit__(self, *args): - await self.release() - - async def acquire(self, lease_duration=-1, **kwargs): - # type: (int, Optional[int], **Any) -> None - """Requests a new lease. - - If the file/file system does not have an active lease, the DataLake service creates a - lease on the file/file system and returns a new lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - await self._blob_lease_client.acquire(lease_duration=lease_duration, **kwargs) - self._update_lease_client_attributes() - - async def renew(self, **kwargs): - # type: (Any) -> None - """Renews the lease. - - The lease can be renewed if the lease ID specified in the - lease client matches that associated with the file system or file. Note that - the lease may be renewed even if it has expired as long as the file system - or file has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - await self._blob_lease_client.renew(**kwargs) - self._update_lease_client_attributes() - - async def release(self, **kwargs): - # type: (Any) -> None - """Release the lease. - - The lease may be released if the client lease id specified matches - that associated with the file system or file. Releasing the lease allows another client - to immediately acquire the lease for the file system or file as soon as the release is complete. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - await self._blob_lease_client.release(**kwargs) - self._update_lease_client_attributes() - - async def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """Change the lease ID of an active lease. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - await self._blob_lease_client.change(proposed_lease_id=proposed_lease_id, **kwargs) - self._update_lease_client_attributes() - - async def break_lease(self, lease_break_period=None, **kwargs): - # type: (Optional[int], Any) -> int - """Break the lease, if the file system or file has an active lease. - - Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. When a lease - is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the file system or file. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :param int lease_break_period: - This is the proposed duration of seconds that the lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the lease. If longer, the time remaining on the lease is used. - A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - await self._blob_lease_client.break_lease(lease_break_period=lease_break_period, **kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_data_lake_service_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_data_lake_service_client_async.py deleted file mode 100644 index 905cd3b..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_data_lake_service_client_async.py +++ /dev/null @@ -1,507 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -from typing import Optional, Any, Dict - -from azure.core.paging import ItemPaged -from azure.core.pipeline import AsyncPipeline - -from azure.multiapi.storagev2.blob.v2020_06_12.aio import BlobServiceClient -from .._serialize import get_api_version -from .._generated.aio import AzureDataLakeStorageRESTAPI -from .._deserialize import get_datalake_service_properties -from .._shared.base_client_async import AsyncTransportWrapper, AsyncStorageAccountHostsMixin -from ._file_system_client_async import FileSystemClient -from .._data_lake_service_client import DataLakeServiceClient as DataLakeServiceClientBase -from .._shared.policies_async import ExponentialRetry -from ._data_lake_directory_client_async import DataLakeDirectoryClient -from ._data_lake_file_client_async import DataLakeFileClient -from ._models import FileSystemPropertiesPaged -from .._models import UserDelegationKey, LocationMode - - -class DataLakeServiceClient(AsyncStorageAccountHostsMixin, DataLakeServiceClientBase): - """A client to interact with the DataLake Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete file systems within the account. - For operations relating to a specific file system, directory or file, clients for those entities - can also be retrieved using the `get_client` functions. - - :ivar str url: - The full endpoint URL to the datalake service endpoint. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URL to the DataLake storage account. Any other entities included - in the URL path (e.g. file system or file) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START create_datalake_service_client] - :end-before: [END create_datalake_service_client] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START create_datalake_service_client_oauth] - :end-before: [END create_datalake_service_client_oauth] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient with Azure Identity credentials. - """ - - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(DataLakeServiceClient, self).__init__( - account_url, - credential=credential, - **kwargs - ) - self._blob_service_client = BlobServiceClient(self._blob_account_url, credential, **kwargs) - self._blob_service_client._hosts[LocationMode.SECONDARY] = "" #pylint: disable=protected-access - self._client = AzureDataLakeStorageRESTAPI(self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs) #pylint: disable=protected-access - self._loop = kwargs.get('loop', None) - - async def __aenter__(self): - await self._blob_service_client.__aenter__() - return self - - async def __aexit__(self, *args): - await self._blob_service_client.close() - - async def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._blob_service_client.close() - - async def get_user_delegation_key(self, key_start_time, # type: datetime - key_expiry_time, # type: datetime - **kwargs # type: Any - ): - # type: (...) -> UserDelegationKey - """ - Obtain a user delegation key for the purpose of signing SAS tokens. - A token credential must be present on the service object for this request to succeed. - - :param ~datetime.datetime key_start_time: - A DateTime value. Indicates when the key becomes valid. - :param ~datetime.datetime key_expiry_time: - A DateTime value. Indicates when the key stops being valid. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The user delegation key. - :rtype: ~azure.storage.filedatalake.UserDelegationKey - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START get_user_delegation_key] - :end-before: [END get_user_delegation_key] - :language: python - :dedent: 8 - :caption: Get user delegation key from datalake service client. - """ - delegation_key = await self._blob_service_client.get_user_delegation_key( - key_start_time=key_start_time, - key_expiry_time=key_expiry_time, - **kwargs) # pylint: disable=protected-access - return UserDelegationKey._from_generated(delegation_key) # pylint: disable=protected-access - - def list_file_systems(self, name_starts_with=None, # type: Optional[str] - include_metadata=None, # type: Optional[bool] - **kwargs): - # type: (...) -> ItemPaged[FileSystemProperties] - """Returns a generator to list the file systems under the specified account. - - The generator will lazily follow the continuation tokens returned by - the service and stop when all file systems have been returned. - - :param str name_starts_with: - Filters the results to return only file systems whose names - begin with the specified prefix. - :param bool include_metadata: - Specifies that file system metadata be returned in the response. - The default value is `False`. - :keyword int results_per_page: - The maximum number of file system names to retrieve per API - call. If the request does not specify the server will return up to 5,000 items per page. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword bool include_deleted: - Specifies that deleted file systems to be returned in the response. This is for file system restore enabled - account. The default value is `False`. - .. versionadded:: 12.3.0 - :returns: An iterable (auto-paging) of FileSystemProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.FileSystemProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START list_file_systems] - :end-before: [END list_file_systems] - :language: python - :dedent: 8 - :caption: Listing the file systems in the datalake service. - """ - item_paged = self._blob_service_client.list_containers(name_starts_with=name_starts_with, - include_metadata=include_metadata, - **kwargs) # pylint: disable=protected-access - item_paged._page_iterator_class = FileSystemPropertiesPaged # pylint: disable=protected-access - return item_paged - - async def create_file_system(self, file_system, # type: Union[FileSystemProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[PublicAccess] - **kwargs): - # type: (...) -> FileSystemClient - """Creates a new file system under the specified account. - - If the file system with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created file system. - - :param str file_system: - The name of the file system to create. - :param metadata: - A dict with name-value pairs to associate with the - file system as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - Possible values include: file system, file. - :type public_access: ~azure.storage.filedatalake.PublicAccess - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START create_file_system_from_service_client] - :end-before: [END create_file_system_from_service_client] - :language: python - :dedent: 8 - :caption: Creating a file system in the datalake service. - """ - file_system_client = self.get_file_system_client(file_system) - await file_system_client.create_file_system(metadata=metadata, public_access=public_access, **kwargs) - return file_system_client - - async def _rename_file_system(self, name, new_name, **kwargs): - # type: (str, str, **Any) -> FileSystemClient - """Renames a filesystem. - - Operation is successful only if the source filesystem exists. - - :param str name: - The name of the filesystem to rename. - :param str new_name: - The new filesystem name the user wants to rename to. - :keyword lease: - Specify this to perform only if the lease ID given - matches the active lease ID of the source filesystem. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - """ - await self._blob_service_client._rename_container(name, new_name, **kwargs) # pylint: disable=protected-access - renamed_file_system = self.get_file_system_client(new_name) - return renamed_file_system - - async def undelete_file_system(self, name, deleted_version, **kwargs): - # type: (str, str, **Any) -> FileSystemClient - """Restores soft-deleted filesystem. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.3.0 - This operation was introduced in API version '2019-12-12'. - - :param str name: - Specifies the name of the deleted filesystem to restore. - :param str deleted_version: - Specifies the version of the deleted filesystem to restore. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - """ - new_name = kwargs.pop('new_name', None) - await self._blob_service_client.undelete_container(name, deleted_version, new_name=new_name, **kwargs) # pylint: disable=protected-access - file_system = self.get_file_system_client(new_name or name) - return file_system - - async def delete_file_system(self, file_system, # type: Union[FileSystemProperties, str] - **kwargs): - # type: (...) -> FileSystemClient - """Marks the specified file system for deletion. - - The file system and any files contained within it are later deleted during garbage collection. - If the file system is not found, a ResourceNotFoundError will be raised. - - :param file_system: - The file system to delete. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :keyword lease: - If specified, delete_file_system only succeeds if the - file system's lease is active and matches this ID. - Required if the file system has an active lease. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START delete_file_system_from_service_client] - :end-before: [END delete_file_system_from_service_client] - :language: python - :dedent: 8 - :caption: Deleting a file system in the datalake service. - """ - file_system_client = self.get_file_system_client(file_system) - await file_system_client.delete_file_system(**kwargs) - return file_system_client - - def get_file_system_client(self, file_system # type: Union[FileSystemProperties, str] - ): - # type: (...) -> FileSystemClient - """Get a client to interact with the specified file system. - - The file system need not already exist. - - :param file_system: - The file system. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :returns: A FileSystemClient. - :rtype: ~azure.storage.filedatalake.aio.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_file_system_client_from_service] - :end-before: [END create_file_system_client_from_service] - :language: python - :dedent: 8 - :caption: Getting the file system client to interact with a specific file system. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return FileSystemClient(self.url, file_system_name, credential=self._raw_credential, - api_version=self.api_version, - _configuration=self._config, - _pipeline=self._pipeline, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_directory_client(self, file_system, # type: Union[FileSystemProperties, str] - directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified directory. - - The directory need not already exist. - - :param file_system: - The file system that the directory is in. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START get_directory_client_from_service_client] - :end-before: [END get_directory_client_from_service_client] - :language: python - :dedent: 8 - :caption: Getting the directory client to interact with a specific directory. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - try: - directory_name = directory.name - except AttributeError: - directory_name = directory - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeDirectoryClient(self.url, file_system_name, directory_name=directory_name, - credential=self._raw_credential, - api_version=self.api_version, - _configuration=self._config, _pipeline=self._pipeline, - _hosts=self._hosts, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function - ) - - def get_file_client(self, file_system, # type: Union[FileSystemProperties, str] - file_path # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file_system: - The file system that the file is in. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :param file_path: - The file with which to interact. This can either be the full path of the file(from the root directory), - or an instance of FileProperties. eg. directory/subdirectory/file - :type file_path: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START get_file_client_from_service_client] - :end-before: [END get_file_client_from_service_client] - :language: python - :dedent: 8 - :caption: Getting the file client to interact with a specific file. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - try: - file_path = file_path.name - except AttributeError: - pass - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeFileClient( - self.url, file_system_name, file_path=file_path, credential=self._raw_credential, - api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - async def set_service_properties(self, **kwargs): - # type: (**Any) -> None - """Sets the properties of a storage account's Datalake service, including - Azure Storage Analytics. - - If an element (e.g. analytics_logging) is left as None, the - existing settings on the service for that functionality are preserved. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2020-06-12'. - - :keyword analytics_logging: - Groups the Azure Analytics Logging settings. - :type analytics_logging: ~azure.storage.filedatalake.AnalyticsLogging - :keyword hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates. - :type hour_metrics: ~azure.storage.filedatalake.Metrics - :keyword minute_metrics: - The minute metrics settings provide request statistics - for each minute. - :type minute_metrics: ~azure.storage.filedatalake.Metrics - :keyword cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list[~azure.storage.filedatalake.CorsRule] - :keyword str target_version: - Indicates the default version to use for requests if an incoming - request's version is not specified. - :keyword delete_retention_policy: - The delete retention policy specifies whether to retain deleted files/directories. - It also specifies the number of days and versions of file/directory to keep. - :type delete_retention_policy: ~azure.storage.filedatalake.RetentionPolicy - :keyword static_website: - Specifies whether the static website feature is enabled, - and if yes, indicates the index document and 404 error document to use. - :type static_website: ~azure.storage.filedatalake.StaticWebsite - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - return await self._blob_service_client.set_service_properties(**kwargs) # pylint: disable=protected-access - - async def get_service_properties(self, **kwargs): - # type: (**Any) -> Dict[str, Any] - """Gets the properties of a storage account's datalake service, including - Azure Storage Analytics. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2020-06-12'. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An object containing datalake service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - """ - props = await self._blob_service_client.get_service_properties(**kwargs) # pylint: disable=protected-access - return get_datalake_service_properties(props) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_download_async.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_download_async.py deleted file mode 100644 index 5685478..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_download_async.py +++ /dev/null @@ -1,59 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import AsyncIterator - -from .._deserialize import from_blob_properties - - -class StorageStreamDownloader(object): - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the file being downloaded. - :ivar ~azure.storage.filedatalake.FileProperties properties: - The properties of the file being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the file. - """ - - def __init__(self, downloader): - self._downloader = downloader - self.name = self._downloader.name - self.properties = from_blob_properties(self._downloader.properties) # pylint: disable=protected-access - self.size = self._downloader.size - - def __len__(self): - return self.size - - def chunks(self): - # type: () -> AsyncIterator[bytes] - """Iterate over chunks in the download stream. - - :rtype: AsyncIterator[bytes] - """ - return self._downloader.chunks() - - async def readall(self): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - return await self._downloader.readall() - - async def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - return await self._downloader.readinto(stream) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_file_system_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_file_system_client_async.py deleted file mode 100644 index b78bf6a..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_file_system_client_async.py +++ /dev/null @@ -1,874 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Dict, TYPE_CHECKING -) - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator import distributed_trace - -from azure.core.pipeline import AsyncPipeline -from azure.core.async_paging import AsyncItemPaged - -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.multiapi.storagev2.blob.v2020_06_12.aio import ContainerClient -from .._serialize import get_api_version -from .._deserialize import process_storage_error, is_file_path -from .._generated.models import ListBlobsIncludeItem - -from ._data_lake_file_client_async import DataLakeFileClient -from ._data_lake_directory_client_async import DataLakeDirectoryClient -from ._data_lake_lease_async import DataLakeLeaseClient -from .._deserialize import deserialize_path_properties -from .._file_system_client import FileSystemClient as FileSystemClientBase -from .._generated.aio import AzureDataLakeStorageRESTAPI -from .._shared.base_client_async import AsyncTransportWrapper, AsyncStorageAccountHostsMixin -from .._shared.policies_async import ExponentialRetry -from .._models import FileSystemProperties, PublicAccess, DirectoryProperties, FileProperties, DeletedPathProperties -from ._list_paths_helper import DeletedPathPropertiesPaged - - -if TYPE_CHECKING: - from datetime import datetime - from .._models import ( # pylint: disable=unused-import - ContentSettings) - - -class FileSystemClient(AsyncStorageAccountHostsMixin, FileSystemClientBase): - """A client to interact with a specific file system, even if that file system - may not yet exist. - - For operations relating to a specific directory or file within this file system, a directory client or file client - can be retrieved using the :func:`~get_directory_client` or :func:`~get_file_client` functions. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_file_system_client_from_service] - :end-before: [END create_file_system_client_from_service] - :language: python - :dedent: 8 - :caption: Get a FileSystemClient from an existing DataLakeServiceClient. - """ - - def __init__( - self, account_url, # type: str - file_system_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(FileSystemClient, self).__init__( - account_url, - file_system_name=file_system_name, - credential=credential, - **kwargs) - # to override the class field _container_client sync version - kwargs.pop('_hosts', None) - self._container_client = ContainerClient(self._blob_account_url, file_system_name, - credential=credential, - _hosts=self._container_client._hosts,# pylint: disable=protected-access - **kwargs) # type: ignore # pylint: disable=protected-access - self._client = AzureDataLakeStorageRESTAPI(self.url, file_system=file_system_name, pipeline=self._pipeline) - self._datalake_client_for_blob_operation = AzureDataLakeStorageRESTAPI(self._container_client.url, - file_system=file_system_name, - pipeline=self._pipeline) - api_version = get_api_version(kwargs) - self._client._config.version = api_version # pylint: disable=protected-access - self._datalake_client_for_blob_operation._config.version = api_version # pylint: disable=protected-access - - self._loop = kwargs.get('loop', None) - - async def __aexit__(self, *args): - await self._container_client.close() - await super(FileSystemClient, self).__aexit__(*args) - - async def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._container_client.close() - await self.__aexit__() - - @distributed_trace_async - async def acquire_lease( - self, lease_duration=-1, # type: int - lease_id=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> DataLakeLeaseClient - """ - Requests a new lease. If the file system does not have an active lease, - the DataLake service creates a lease on the file system and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A DataLakeLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.filedatalake.aio.DataLakeLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START acquire_lease_on_file_system] - :end-before: [END acquire_lease_on_file_system] - :language: python - :dedent: 12 - :caption: Acquiring a lease on the file_system. - """ - lease = DataLakeLeaseClient(self, lease_id=lease_id) - await lease.acquire(lease_duration=lease_duration, **kwargs) - return lease - - @distributed_trace_async - async def create_file_system(self, metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[PublicAccess] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """Creates a new file system under the specified account. - - If the file system with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created file system. - - :param metadata: - A dict with name-value pairs to associate with the - file system as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - To specify whether data in the file system may be accessed publicly and the level of access. - :type public_access: ~azure.storage.filedatalake.PublicAccess - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.aio.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_file_system] - :end-before: [END create_file_system] - :language: python - :dedent: 16 - :caption: Creating a file system in the datalake service. - """ - return await self._container_client.create_container(metadata=metadata, - public_access=public_access, - **kwargs) - - @distributed_trace_async - async def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a file system exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return await self._container_client.exists(**kwargs) - - @distributed_trace_async - async def _rename_file_system(self, new_name, **kwargs): - # type: (str, **Any) -> FileSystemClient - """Renames a filesystem. - - Operation is successful only if the source filesystem exists. - - :param str new_name: - The new filesystem name the user wants to rename to. - :keyword lease: - Specify this to perform only if the lease ID given - matches the active lease ID of the source filesystem. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - """ - await self._container_client._rename_container(new_name, **kwargs) # pylint: disable=protected-access - renamed_file_system = FileSystemClient( - "{}://{}".format(self.scheme, self.primary_hostname), file_system_name=new_name, - credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, - _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - return renamed_file_system - - @distributed_trace_async - async def delete_file_system(self, **kwargs): - # type: (Any) -> None - """Marks the specified file system for deletion. - - The file system and any files contained within it are later deleted during garbage collection. - If the file system is not found, a ResourceNotFoundError will be raised. - - :keyword lease: - If specified, delete_file_system only succeeds if the - file system's lease is active and matches this ID. - Required if the file system has an active lease. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START delete_file_system] - :end-before: [END delete_file_system] - :language: python - :dedent: 16 - :caption: Deleting a file system in the datalake service. - """ - await self._container_client.delete_container(**kwargs) - - @distributed_trace_async - async def get_file_system_properties(self, **kwargs): - # type: (Any) -> FileSystemProperties - """Returns all user-defined metadata and system properties for the specified - file system. The data returned does not include the file system's list of paths. - - :keyword lease: - If specified, get_file_system_properties only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Properties for the specified file system within a file system object. - :rtype: ~azure.storage.filedatalake.FileSystemProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START get_file_system_properties] - :end-before: [END get_file_system_properties] - :language: python - :dedent: 16 - :caption: Getting properties on the file system. - """ - container_properties = await self._container_client.get_container_properties(**kwargs) - return FileSystemProperties._convert_from_container_props(container_properties) # pylint: disable=protected-access - - @distributed_trace_async - async def set_file_system_metadata( # type: ignore - self, metadata, # type: Dict[str, str] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - file system. Each call to this operation replaces all existing metadata - attached to the file system. To remove all metadata from the file system, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the file system as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: file system-updated property dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START set_file_system_metadata] - :end-before: [END set_file_system_metadata] - :language: python - :dedent: 16 - :caption: Setting metadata on the container. - """ - return await self._container_client.set_container_metadata(metadata=metadata, **kwargs) - - @distributed_trace_async - async def set_file_system_access_policy( - self, signed_identifiers, # type: Dict[str, AccessPolicy] - public_access=None, # type: Optional[Union[str, PublicAccess]] - **kwargs - ): # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the permissions for the specified file system or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether files in a file system may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the file system. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict[str, ~azure.storage.filedatalake.AccessPolicy] - :param ~azure.storage.filedatalake.PublicAccess public_access: - To specify whether data in the file system may be accessed publicly and the level of access. - :keyword lease: - Required if the file system has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified date/time. - :keyword ~datetime.datetime if_unmodified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: filesystem-updated property dict (Etag and last modified). - :rtype: dict[str, str or ~datetime.datetime] - """ - return await self._container_client.set_container_access_policy(signed_identifiers, - public_access=public_access, **kwargs) - - @distributed_trace_async - async def get_file_system_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the specified file system. - The permissions indicate whether file system data may be accessed publicly. - - :keyword lease: - If specified, get_file_system_access_policy only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - """ - access_policy = await self._container_client.get_container_access_policy(**kwargs) - return { - 'public_access': PublicAccess._from_generated(access_policy['public_access']), # pylint: disable=protected-access - 'signed_identifiers': access_policy['signed_identifiers'] - } - - @distributed_trace - def get_paths(self, path=None, # type: Optional[str] - recursive=True, # type: Optional[bool] - max_results=None, # type: Optional[int] - **kwargs): - # type: (...) -> AsyncItemPaged[PathProperties] - """Returns a generator to list the paths(could be files or directories) under the specified file system. - The generator will lazily follow the continuation tokens returned by - the service. - - :param str path: - Filters the results to return only paths under the specified path. - :param int max_results: - An optional value that specifies the maximum - number of items to return per page. If omitted or greater than 5,000, the - response will include up to 5,000 items per page. - :keyword upn: - Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of PathProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.PathProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START get_paths_in_file_system] - :end-before: [END get_paths_in_file_system] - :language: python - :dedent: 12 - :caption: List the blobs in the file system. - """ - timeout = kwargs.pop('timeout', None) - return self._client.file_system.list_paths( - recursive=recursive, - max_results=max_results, - path=path, - timeout=timeout, - cls=deserialize_path_properties, - **kwargs) - - @distributed_trace_async - async def create_directory(self, directory, # type: Union[DirectoryProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Create directory - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_directory_from_file_system] - :end-before: [END create_directory_from_file_system] - :language: python - :dedent: 12 - :caption: Create directory in the file system. - """ - directory_client = self.get_directory_client(directory) - await directory_client.create_directory(metadata=metadata, **kwargs) - return directory_client - - @distributed_trace_async - async def delete_directory(self, directory, # type: Union[DirectoryProperties, str] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Marks the specified path for deletion. - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START delete_directory_from_file_system] - :end-before: [END delete_directory_from_file_system] - :language: python - :dedent: 12 - :caption: Delete directory in the file system. - """ - directory_client = self.get_directory_client(directory) - await directory_client.delete_directory(**kwargs) - return directory_client - - @distributed_trace_async - async def create_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Create file - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_file_from_file_system] - :end-before: [END create_file_from_file_system] - :language: python - :dedent: 12 - :caption: Create file in the file system. - """ - file_client = self.get_file_client(file) - await file_client.create_file(**kwargs) - return file_client - - @distributed_trace_async - async def delete_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Marks the specified file for deletion. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START delete_file_from_file_system] - :end-before: [END delete_file_from_file_system] - :language: python - :dedent: 12 - :caption: Delete file in the file system. - """ - file_client = self.get_file_client(file) - await file_client.delete_file(**kwargs) - return file_client - - @distributed_trace_async - async def _undelete_path(self, deleted_path_name, deletion_id, **kwargs): - # type: (str, str, **Any) -> Union[DataLakeDirectoryClient, DataLakeFileClient] - """Restores soft-deleted path. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2020-06-12'. - - :param str deleted_path_name: - Specifies the name of the deleted container to restore. - :param str deletion_id: - Specifies the version of the deleted container to restore. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.file.datalake.aio.DataLakeDirectoryClient - or azure.storage.file.datalake.aio.DataLakeFileClient - """ - _, url, undelete_source = self._undelete_path_options(deleted_path_name, deletion_id) - - pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - path_client = AzureDataLakeStorageRESTAPI( - url, filesystem=self.file_system_name, path=deleted_path_name, pipeline=pipeline) - try: - is_file = await path_client.path.undelete(undelete_source=undelete_source, cls=is_file_path, **kwargs) - if is_file: - return self.get_file_client(deleted_path_name) - return self.get_directory_client(deleted_path_name) - except HttpResponseError as error: - process_storage_error(error) - - def _get_root_directory_client(self): - # type: () -> DataLakeDirectoryClient - """Get a client to interact with the root directory. - - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient - """ - return self.get_directory_client('/') - - def get_directory_client(self, directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified directory. - - The directory need not already exist. - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START get_directory_client_from_file_system] - :end-before: [END get_directory_client_from_file_system] - :language: python - :dedent: 12 - :caption: Getting the directory client to interact with a specific directory. - """ - try: - directory_name = directory.get('name') - except AttributeError: - directory_name = str(directory) - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeDirectoryClient(self.url, self.file_system_name, directory_name=directory_name, - credential=self._raw_credential, - api_version=self.api_version, - _configuration=self._config, _pipeline=_pipeline, - _hosts=self._hosts, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function, - loop=self._loop - ) - - def get_file_client(self, file_path # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file_path: - The file with which to interact. This can either be the path of the file(from root directory), - or an instance of FileProperties. eg. directory/subdirectory/file - :type file_path: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START get_file_client_from_file_system] - :end-before: [END get_file_client_from_file_system] - :language: python - :dedent: 12 - :caption: Getting the file client to interact with a specific file. - """ - try: - file_path = file_path.get('name') - except AttributeError: - file_path = str(file_path) - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeFileClient( - self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, - api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function, loop=self._loop) - - @distributed_trace - def list_deleted_paths(self, **kwargs): - # type: (Any) -> AsyncItemPaged[DeletedPathProperties] - """Returns a generator to list the deleted (file or directory) paths under the specified file system. - The generator will lazily follow the continuation tokens returned by - the service. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2020-06-12'. - - :keyword str path_prefix: - Filters the results to return only paths under the specified path. - :keyword int results_per_page: - An optional value that specifies the maximum number of items to return per page. - If omitted or greater than 5,000, the response will include up to 5,000 items per page. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of DeletedPathProperties. - :rtype: - ~azure.core.paging.AsyncItemPaged[~azure.storage.filedatalake.DeletedPathProperties] - """ - path_prefix = kwargs.pop('path_prefix', None) - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._datalake_client_for_blob_operation.file_system.list_blob_hierarchy_segment, - showonly=ListBlobsIncludeItem.deleted, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, prefix=path_prefix, page_iterator_class=DeletedPathPropertiesPaged, - results_per_page=results_per_page, **kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_list_paths_helper.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_list_paths_helper.py deleted file mode 100644 index 03831a5..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_list_paths_helper.py +++ /dev/null @@ -1,111 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines -from azure.core.exceptions import HttpResponseError -from azure.core.async_paging import AsyncPageIterator - -from .._deserialize import process_storage_error, get_deleted_path_properties_from_generated_code -from .._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix - -from .._shared.models import DictMixin -from .._shared.response_handlers import return_context_and_deserialized - - -class DeletedPathPropertiesPaged(AsyncPageIterator): - """An Iterable of deleted path properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A path name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.filedatalake.DeletedPathProperties) - :ivar str container: The container that the paths are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - """ - def __init__( - self, command, - container=None, - prefix=None, - results_per_page=None, - continuation_token=None, - delimiter=None, - location_mode=None): - super(DeletedPathPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.container = container - self.delimiter = delimiter - self.current_page = None - self.location_mode = location_mode - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - prefix=self.prefix, - marker=continuation_token or None, - max_results=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.container = self._response.container_name - self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items - self.current_page = [self._build_item(item) for item in self.current_page] - self.delimiter = self._response.delimiter - - return self._response.next_marker or None, self.current_page - - def _build_item(self, item): - if isinstance(item, BlobItemInternal): - file_props = get_deleted_path_properties_from_generated_code(item) - file_props.file_system = self.container - return file_props - if isinstance(item, GenBlobPrefix): - return DirectoryPrefix( - container=self.container, - prefix=item.name, - results_per_page=self.results_per_page, - location_mode=self.location_mode) - return item - - -class DirectoryPrefix(DictMixin): - """Directory prefix. - - :ivar str name: Name of the deleted directory. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar str file_system: The file system that the deleted paths are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - """ - def __init__(self, **kwargs): - self.name = kwargs.get('prefix') - self.results_per_page = kwargs.get('results_per_page') - self.file_system = kwargs.get('container') - self.delimiter = kwargs.get('delimiter') - self.location_mode = kwargs.get('location_mode') diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_models.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_models.py deleted file mode 100644 index 16fe23c..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_models.py +++ /dev/null @@ -1,41 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines -from azure.multiapi.storagev2.blob.v2020_06_12.aio._models import ContainerPropertiesPaged -from .._models import FileSystemProperties - - -class FileSystemPropertiesPaged(ContainerPropertiesPaged): - """An Iterable of File System properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file system name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.filedatalake.FileSystemProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only file systems whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of file system names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - - def __init__(self, *args, **kwargs): - super(FileSystemPropertiesPaged, self).__init__( - *args, - **kwargs - ) - - @staticmethod - def _build_item(item): - return FileSystemProperties._from_generated(item) # pylint: disable=protected-access diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_path_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_path_client_async.py deleted file mode 100644 index 8a5ad46..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_path_client_async.py +++ /dev/null @@ -1,732 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -from datetime import datetime -from typing import Any, Dict, Union - -from azure.core.exceptions import AzureError, HttpResponseError -from azure.multiapi.storagev2.blob.v2020_06_12.aio import BlobClient -from .._serialize import get_api_version -from .._shared.base_client_async import AsyncStorageAccountHostsMixin -from .._path_client import PathClient as PathClientBase -from .._models import DirectoryProperties, AccessControlChangeResult, AccessControlChangeFailure, \ - AccessControlChangeCounters, AccessControlChanges -from .._generated.aio import AzureDataLakeStorageRESTAPI -from ._data_lake_lease_async import DataLakeLeaseClient -from .._deserialize import process_storage_error -from .._shared.policies_async import ExponentialRetry - -_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = ( - 'The require_encryption flag is set, but encryption is not supported' - ' for this method.') - - -class PathClient(AsyncStorageAccountHostsMixin, PathClientBase): - def __init__( - self, account_url, # type: str - file_system_name, # type: str - path_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - - super(PathClient, self).__init__(account_url, # pylint: disable=specify-parameter-names-in-call - file_system_name, path_name, - credential=credential, - **kwargs) # type: ignore - - kwargs.pop('_hosts', None) - - self._blob_client = BlobClient(account_url=self._blob_account_url, container_name=file_system_name, - blob_name=path_name, - credential=credential, - _hosts=self._blob_client._hosts, # pylint: disable=protected-access - **kwargs) - - self._client = AzureDataLakeStorageRESTAPI(self.url, file_system=file_system_name, path=path_name, - pipeline=self._pipeline) - self._datalake_client_for_blob_operation = AzureDataLakeStorageRESTAPI(self._blob_client.url, - file_system=file_system_name, - path=path_name, - pipeline=self._pipeline) - api_version = get_api_version(kwargs) - self._client._config.version = api_version # pylint: disable=protected-access - self._datalake_client_for_blob_operation._config.version = api_version # pylint: disable=protected-access - - self._loop = kwargs.get('loop', None) - - async def __aexit__(self, *args): - await self._blob_client.close() - await super(PathClient, self).__aexit__(*args) - - async def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._blob_client.close() - await self.__aexit__() - - async def _create(self, resource_type, content_settings=None, metadata=None, **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create directory or file - - :param resource_type: - Required for Create File and Create Directory. - The value must be "file" or "directory". Possible values include: - 'directory', 'file' - :type resource_type: str - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file/directory as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file/directory has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :type permissions: str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Dict[str, Union[str, datetime]] - """ - options = self._create_path_options( - resource_type, - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return await self._client.path.create(**options) - except HttpResponseError as error: - process_storage_error(error) - - async def _delete(self, **kwargs): - # type: (**Any) -> Dict[Union[datetime, str]] - """ - Marks the specified path for deletion. - - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - options = self._delete_path_options(**kwargs) - try: - return await self._client.path.delete(**options) - except HttpResponseError as error: - process_storage_error(error) - - async def set_access_control(self, owner=None, # type: Optional[str] - group=None, # type: Optional[str] - permissions=None, # type: Optional[str] - acl=None, # type: Optional[str] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Set the owner, group, permissions, or access control list for a path. - - :param owner: - Optional. The owner of the file or directory. - :type owner: str - :param group: - Optional. The owning group of the file or directory. - :type group: str - :param permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - permissions and acl are mutually exclusive. - :type permissions: str - :param acl: - Sets POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - permissions and acl are mutually exclusive. - :type acl: str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword: response dict (Etag and last modified). - """ - options = self._set_access_control_options(owner=owner, group=group, permissions=permissions, acl=acl, **kwargs) - try: - return await self._client.path.set_access_control(**options) - except HttpResponseError as error: - process_storage_error(error) - - async def get_access_control(self, upn=None, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Any] - """ - Get the owner, group, permissions, or access control list for a path. - - :param upn: - Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword: response dict. - """ - options = self._get_access_control_options(upn=upn, **kwargs) - try: - return await self._client.path.get_properties(**options) - except HttpResponseError as error: - process_storage_error(error) - - async def set_access_control_recursive(self, - acl, - **kwargs): - # type: (str, **Any) -> AccessControlChangeResult - """ - Sets the Access Control on a path and sub-paths. - - :param acl: - Sets POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: - Callback where the caller can track progress of the operation - as well as collect paths that failed to change Access Control. - :keyword str continuation_token: - Optional continuation token that can be used to resume previously stopped operation. - :keyword int batch_size: - Optional. If data set size exceeds batch size then operation will be split into multiple - requests so that progress can be tracked. Batch size should be between 1 and 2000. - The default when unspecified is 2000. - :keyword int max_batches: - Optional. Defines maximum number of batches that single change Access Control operation can execute. - If maximum is reached before all sub-paths are processed, - then continuation token can be used to resume operation. - Empty value indicates that maximum number of batches in unbound and operation continues till end. - :keyword bool continue_on_failure: - If set to False, the operation will terminate quickly on encountering user errors (4XX). - If True, the operation will ignore user errors and proceed with the operation on other sub-entities of - the directory. - Continuation token will only be returned when continue_on_failure is True in case of user errors. - If not set the default value is False for this. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: A summary of the recursive operations, including the count of successes and failures, - as well as a continuation token in case the operation was terminated prematurely. - :rtype: :~azure.storage.filedatalake.AccessControlChangeResult` - :raises ~azure.core.exceptions.AzureError: - User can restart the operation using continuation_token field of AzureError if the token is available. - """ - if not acl: - raise ValueError("The Access Control List must be set for this operation") - - progress_hook = kwargs.pop('progress_hook', None) - max_batches = kwargs.pop('max_batches', None) - options = self._set_access_control_recursive_options(mode='set', acl=acl, **kwargs) - return await self._set_access_control_internal(options=options, progress_hook=progress_hook, - max_batches=max_batches) - - async def update_access_control_recursive(self, acl, **kwargs): - # type: (str, **Any) -> AccessControlChangeResult - """ - Modifies the Access Control on a path and sub-paths. - - :param acl: - Modifies POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: - Callback where the caller can track progress of the operation - as well as collect paths that failed to change Access Control. - :keyword str continuation_token: - Optional continuation token that can be used to resume previously stopped operation. - :keyword int batch_size: - Optional. If data set size exceeds batch size then operation will be split into multiple - requests so that progress can be tracked. Batch size should be between 1 and 2000. - The default when unspecified is 2000. - :keyword int max_batches: - Optional. Defines maximum number of batches that single, - change Access Control operation can execute. - If maximum is reached before all sub-paths are processed, - then continuation token can be used to resume operation. - Empty value indicates that maximum number of batches in unbound and operation continues till end. - :keyword bool continue_on_failure: - If set to False, the operation will terminate quickly on encountering user errors (4XX). - If True, the operation will ignore user errors and proceed with the operation on other sub-entities of - the directory. - Continuation token will only be returned when continue_on_failure is True in case of user errors. - If not set the default value is False for this. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: A summary of the recursive operations, including the count of successes and failures, - as well as a continuation token in case the operation was terminated prematurely. - :rtype: :~azure.storage.filedatalake.AccessControlChangeResult` - :raises ~azure.core.exceptions.AzureError: - User can restart the operation using continuation_token field of AzureError if the token is available. - """ - if not acl: - raise ValueError("The Access Control List must be set for this operation") - - progress_hook = kwargs.pop('progress_hook', None) - max_batches = kwargs.pop('max_batches', None) - options = self._set_access_control_recursive_options(mode='modify', acl=acl, **kwargs) - return await self._set_access_control_internal(options=options, progress_hook=progress_hook, - max_batches=max_batches) - - async def remove_access_control_recursive(self, - acl, - **kwargs): - # type: (str, **Any) -> AccessControlChangeResult - """ - Removes the Access Control on a path and sub-paths. - - :param acl: - Removes POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, and a user or - group identifier in the format "[scope:][type]:[id]". - :type acl: str - :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: - Callback where the caller can track progress of the operation - as well as collect paths that failed to change Access Control. - :keyword str continuation_token: - Optional continuation token that can be used to resume previously stopped operation. - :keyword int batch_size: - Optional. If data set size exceeds batch size then operation will be split into multiple - requests so that progress can be tracked. Batch size should be between 1 and 2000. - The default when unspecified is 2000. - :keyword int max_batches: - Optional. Defines maximum number of batches that single change Access Control operation can execute. - If maximum is reached before all sub-paths are processed, - then continuation token can be used to resume operation. - Empty value indicates that maximum number of batches in unbound and operation continues till end. - :keyword bool continue_on_failure: - If set to False, the operation will terminate quickly on encountering user errors (4XX). - If True, the operation will ignore user errors and proceed with the operation on other sub-entities of - the directory. - Continuation token will only be returned when continue_on_failure is True in case of user errors. - If not set the default value is False for this. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: A summary of the recursive operations, including the count of successes and failures, - as well as a continuation token in case the operation was terminated prematurely. - :rtype: :~azure.storage.filedatalake.AccessControlChangeResult` - :raises ~azure.core.exceptions.AzureError: - User can restart the operation using continuation_token field of AzureError if the token is available. - """ - if not acl: - raise ValueError("The Access Control List must be set for this operation") - - progress_hook = kwargs.pop('progress_hook', None) - max_batches = kwargs.pop('max_batches', None) - options = self._set_access_control_recursive_options(mode='remove', acl=acl, **kwargs) - return await self._set_access_control_internal(options=options, progress_hook=progress_hook, - max_batches=max_batches) - - async def _set_access_control_internal(self, options, progress_hook, max_batches=None): - try: - continue_on_failure = options.get('force_flag') - total_directories_successful = 0 - total_files_success = 0 - total_failure_count = 0 - batch_count = 0 - last_continuation_token = None - current_continuation_token = None - continue_operation = True - while continue_operation: - headers, resp = await self._client.path.set_access_control_recursive(**options) - - # make a running tally so that we can report the final results - total_directories_successful += resp.directories_successful - total_files_success += resp.files_successful - total_failure_count += resp.failure_count - batch_count += 1 - current_continuation_token = headers['continuation'] - - if current_continuation_token is not None: - last_continuation_token = current_continuation_token - - if progress_hook is not None: - await progress_hook(AccessControlChanges( - batch_counters=AccessControlChangeCounters( - directories_successful=resp.directories_successful, - files_successful=resp.files_successful, - failure_count=resp.failure_count, - ), - aggregate_counters=AccessControlChangeCounters( - directories_successful=total_directories_successful, - files_successful=total_files_success, - failure_count=total_failure_count, - ), - batch_failures=[AccessControlChangeFailure( - name=failure.name, - is_directory=failure.type == 'DIRECTORY', - error_message=failure.error_message) for failure in resp.failed_entries], - continuation=last_continuation_token)) - - # update the continuation token, if there are more operations that cannot be completed in a single call - max_batches_satisfied = (max_batches is not None and batch_count == max_batches) - continue_operation = bool(current_continuation_token) and not max_batches_satisfied - options['continuation'] = current_continuation_token - - # currently the service stops on any failure, so we should send back the last continuation token - # for the user to retry the failed updates - # otherwise we should just return what the service gave us - return AccessControlChangeResult(counters=AccessControlChangeCounters( - directories_successful=total_directories_successful, - files_successful=total_files_success, - failure_count=total_failure_count), - continuation=last_continuation_token - if total_failure_count > 0 and not continue_on_failure else current_continuation_token) - except HttpResponseError as error: - error.continuation_token = last_continuation_token - process_storage_error(error) - except AzureError as error: - error.continuation_token = last_continuation_token - raise error - - async def _rename_path(self, rename_source, **kwargs): - # type: (str, **Any) -> Dict[str, Any] - """ - Rename directory or file - - :param rename_source: The value must have the following format: "/{filesystem}/{path}". - :type rename_source: str - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword source_lease: A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._rename_path_options( - rename_source, - **kwargs) - try: - return await self._client.path.create(**options) - except HttpResponseError as error: - process_storage_error(error) - - async def _get_path_properties(self, **kwargs): - # type: (**Any) -> Union[FileProperties, DirectoryProperties] - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file or directory. It does not return the content of the directory or file. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: DirectoryProperties or FileProperties - """ - path_properties = await self._blob_client.get_blob_properties(**kwargs) - return path_properties - - async def _exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a path exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return await self._blob_client.exists(**kwargs) - - async def set_metadata(self, metadata, # type: Dict[str, str] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - file system. Each call to this operation replaces all existing metadata - attached to the file system. To remove all metadata from the file system, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the file system as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: file system-updated property dict (Etag and last modified). - """ - return await self._blob_client.set_blob_metadata(metadata=metadata, **kwargs) - - async def set_http_headers(self, content_settings=None, # type: Optional[ContentSettings] - **kwargs): - # type: (...) -> Dict[str, Any] - """Sets system properties on the file or directory. - - If one property is set for the content_settings, all properties will be overriden. - - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set file/directory properties. - :keyword lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: file/directory-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - return await self._blob_client.set_http_headers(content_settings=content_settings, **kwargs) - - async def acquire_lease(self, lease_duration=-1, # type: Optional[int] - lease_id=None, # type: Optional[str] - **kwargs): - # type: (...) -> DataLakeLeaseClient - """ - Requests a new lease. If the file or directory does not have an active lease, - the DataLake service creates a lease on the file/directory and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A DataLakeLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.filedatalake.aio.DataLakeLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/test_file_system_samples.py - :start-after: [START acquire_lease_on_file_system] - :end-before: [END acquire_lease_on_file_system] - :language: python - :dedent: 8 - :caption: Acquiring a lease on the file_system. - """ - lease = DataLakeLeaseClient(self, lease_id=lease_id) # type: ignore - await lease.acquire(lease_duration=lease_duration, **kwargs) - return lease diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_upload_helper.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_upload_helper.py deleted file mode 100644 index 00d5bf1..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_upload_helper.py +++ /dev/null @@ -1,103 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use -from azure.core.exceptions import HttpResponseError -from .._deserialize import ( - process_storage_error) -from .._shared.response_handlers import return_response_headers -from .._shared.uploads_async import ( - upload_data_chunks, - DataLakeFileChunkUploader, upload_substream_blocks) - - -def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument - return any([ - modified_access_conditions.if_modified_since, - modified_access_conditions.if_unmodified_since, - modified_access_conditions.if_none_match, - modified_access_conditions.if_match - ]) - - -async def upload_datalake_file( # pylint: disable=unused-argument - client=None, - stream=None, - length=None, - overwrite=None, - validate_content=None, - max_concurrency=None, - file_settings=None, - **kwargs): - try: - if length == 0: - return {} - properties = kwargs.pop('properties', None) - umask = kwargs.pop('umask', None) - permissions = kwargs.pop('permissions', None) - path_http_headers = kwargs.pop('path_http_headers', None) - modified_access_conditions = kwargs.pop('modified_access_conditions', None) - chunk_size = kwargs.pop('chunk_size', 100 * 1024 * 1024) - - if not overwrite: - # if customers didn't specify access conditions, they cannot flush data to existing file - if not _any_conditions(modified_access_conditions): - modified_access_conditions.if_none_match = '*' - if properties or umask or permissions: - raise ValueError("metadata, umask and permissions can be set only when overwrite is enabled") - - if overwrite: - response = await client.create( - resource='file', - path_http_headers=path_http_headers, - properties=properties, - modified_access_conditions=modified_access_conditions, - umask=umask, - permissions=permissions, - cls=return_response_headers, - **kwargs) - - # this modified_access_conditions will be applied to flush_data to make sure - # no other flush between create and the current flush - modified_access_conditions.if_match = response['etag'] - modified_access_conditions.if_none_match = None - modified_access_conditions.if_modified_since = None - modified_access_conditions.if_unmodified_since = None - - use_original_upload_path = file_settings.use_byte_buffer or \ - validate_content or chunk_size < file_settings.min_large_chunk_upload_threshold or \ - hasattr(stream, 'seekable') and not stream.seekable() or \ - not hasattr(stream, 'seek') or not hasattr(stream, 'tell') - - if use_original_upload_path: - await upload_data_chunks( - service=client, - uploader_class=DataLakeFileChunkUploader, - total_size=length, - chunk_size=chunk_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - **kwargs) - else: - await upload_substream_blocks( - service=client, - uploader_class=DataLakeFileChunkUploader, - total_size=length, - chunk_size=chunk_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - **kwargs - ) - - return await client.flush_data(position=length, - path_http_headers=path_http_headers, - modified_access_conditions=modified_access_conditions, - close=True, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/py.typed b/azure/multiapi/storagev2/filedatalake/v2020_06_12/py.typed deleted file mode 100644 index e69de29..0000000 diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/__init__.py deleted file mode 100644 index 99d2ef7..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/__init__.py +++ /dev/null @@ -1,105 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ._download import StorageStreamDownloader -from ._data_lake_file_client import DataLakeFileClient -from ._data_lake_directory_client import DataLakeDirectoryClient -from ._file_system_client import FileSystemClient -from ._data_lake_service_client import DataLakeServiceClient -from ._data_lake_lease import DataLakeLeaseClient -from ._models import ( - LocationMode, - ResourceTypes, - FileSystemProperties, - FileSystemPropertiesPaged, - DirectoryProperties, - FileProperties, - PathProperties, - LeaseProperties, - ContentSettings, - AccountSasPermissions, - FileSystemSasPermissions, - DirectorySasPermissions, - FileSasPermissions, - UserDelegationKey, - PublicAccess, - AccessPolicy, - DelimitedTextDialect, - DelimitedJsonDialect, - ArrowDialect, - ArrowType, - QuickQueryDialect, - DataLakeFileQueryError, - AccessControlChangeResult, - AccessControlChangeCounters, - AccessControlChangeFailure, - AccessControlChanges, - AnalyticsLogging, - Metrics, - RetentionPolicy, - StaticWebsite, - CorsRule, - DeletedPathProperties -) - -from ._shared_access_signature import generate_account_sas, generate_file_system_sas, generate_directory_sas, \ - generate_file_sas - -from ._shared.policies import ExponentialRetry, LinearRetry -from ._shared.models import StorageErrorCode -from ._version import VERSION - -__version__ = VERSION - -__all__ = [ - 'DataLakeServiceClient', - 'FileSystemClient', - 'DataLakeFileClient', - 'DataLakeDirectoryClient', - 'DataLakeLeaseClient', - 'ExponentialRetry', - 'LinearRetry', - 'LocationMode', - 'PublicAccess', - 'AccessPolicy', - 'ResourceTypes', - 'StorageErrorCode', - 'UserDelegationKey', - 'FileSystemProperties', - 'FileSystemPropertiesPaged', - 'DirectoryProperties', - 'FileProperties', - 'PathProperties', - 'LeaseProperties', - 'ContentSettings', - 'AccessControlChangeResult', - 'AccessControlChangeCounters', - 'AccessControlChangeFailure', - 'AccessControlChanges', - 'AccountSasPermissions', - 'FileSystemSasPermissions', - 'DirectorySasPermissions', - 'FileSasPermissions', - 'generate_account_sas', - 'generate_file_system_sas', - 'generate_directory_sas', - 'generate_file_sas', - 'VERSION', - 'StorageStreamDownloader', - 'DelimitedTextDialect', - 'DelimitedJsonDialect', - 'DataLakeFileQueryError', - 'ArrowDialect', - 'ArrowType', - 'QuickQueryDialect', - 'DataLakeFileQueryError', - 'AnalyticsLogging', - 'Metrics', - 'RetentionPolicy', - 'StaticWebsite', - 'CorsRule', - 'DeletedPathProperties' -] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_data_lake_directory_client.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_data_lake_directory_client.py deleted file mode 100644 index 49aac08..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_data_lake_directory_client.py +++ /dev/null @@ -1,568 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import Any, TypeVar - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore -from azure.core.pipeline import Pipeline -from ._deserialize import deserialize_dir_properties -from ._shared.base_client import TransportWrapper, parse_connection_str -from ._data_lake_file_client import DataLakeFileClient -from ._models import DirectoryProperties, FileProperties -from ._path_client import PathClient - -ClassType = TypeVar("ClassType") - - -class DataLakeDirectoryClient(PathClient): - """A client to interact with the DataLake directory, even if the directory may not yet exist. - - For operations relating to a specific subdirectory or file under the directory, a directory client or file client - can be retrieved using the :func:`~get_sub_directory_client` or :func:`~get_file_client` functions. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param directory_name: - The whole path of the directory. eg. {directory under file system}/{directory to interact with} - :type directory_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, and account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_instantiate_client.py - :start-after: [START instantiate_directory_client_from_conn_str] - :end-before: [END instantiate_directory_client_from_conn_str] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. - """ - def __init__( - self, account_url, # type: str - file_system_name, # type: str - directory_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - super(DataLakeDirectoryClient, self).__init__(account_url, file_system_name, path_name=directory_name, - credential=credential, **kwargs) - - @classmethod - def from_connection_string( - cls, # type: Type[ClassType] - conn_str, # type: str - file_system_name, # type: str - directory_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> ClassType - """ - Create DataLakeDirectoryClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param file_system_name: - The name of file system to interact with. - :type file_system_name: str - :param directory_name: - The name of directory to interact with. The directory is under file system. - :type directory_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, and account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :return a DataLakeDirectoryClient - :rtype ~azure.storage.filedatalake.DataLakeDirectoryClient - """ - account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') - return cls( - account_url, file_system_name=file_system_name, directory_name=directory_name, - credential=credential, **kwargs) - - def create_directory(self, metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create a new directory. - - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: response dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory.py - :start-after: [START create_directory] - :end-before: [END create_directory] - :language: python - :dedent: 8 - :caption: Create directory. - """ - return self._create('directory', metadata=metadata, **kwargs) - - def delete_directory(self, **kwargs): - # type: (...) -> None - """ - Marks the specified directory for deletion. - - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory.py - :start-after: [START delete_directory] - :end-before: [END delete_directory] - :language: python - :dedent: 4 - :caption: Delete directory. - """ - return self._delete(recursive=True, **kwargs) - - def get_directory_properties(self, **kwargs): - # type: (**Any) -> DirectoryProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the directory. It does not return the content of the directory. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: DirectoryProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory.py - :start-after: [START get_directory_properties] - :end-before: [END get_directory_properties] - :language: python - :dedent: 4 - :caption: Getting the properties for a file/directory. - """ - return self._get_path_properties(cls=deserialize_dir_properties, **kwargs) # pylint: disable=protected-access - - def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a directory exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return self._exists(**kwargs) - - def rename_directory(self, new_name, **kwargs): - # type: (str, **Any) -> DataLakeDirectoryClient - """ - Rename the source directory. - - :param str new_name: - the new directory name the user want to rename to. - The value must have the following format: "{filesystem}/{directory}/{subdirectory}". - :keyword source_lease: - A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory.py - :start-after: [START rename_directory] - :end-before: [END rename_directory] - :language: python - :dedent: 4 - :caption: Rename the source directory. - """ - new_name = new_name.strip('/') - new_file_system = new_name.split('/')[0] - new_path_and_token = new_name[len(new_file_system):].strip('/').split('?') - new_path = new_path_and_token[0] - try: - new_dir_sas = new_path_and_token[1] or self._query_str.strip('?') - except IndexError: - if not self._raw_credential and new_file_system != self.file_system_name: - raise ValueError("please provide the sas token for the new file") - if not self._raw_credential and new_file_system == self.file_system_name: - new_dir_sas = self._query_str.strip('?') - - new_directory_client = DataLakeDirectoryClient( - "{}://{}".format(self.scheme, self.primary_hostname), new_file_system, directory_name=new_path, - credential=self._raw_credential or new_dir_sas, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - new_directory_client._rename_path( # pylint: disable=protected-access - '/{}/{}{}'.format(quote(unquote(self.file_system_name)), - quote(unquote(self.path_name)), - self._query_str), - **kwargs) - return new_directory_client - - def create_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Create a subdirectory and return the subdirectory client to be interacted with. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient for the subdirectory. - """ - subdir = self.get_sub_directory_client(sub_directory) - subdir.create_directory(metadata=metadata, **kwargs) - return subdir - - def delete_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Marks the specified subdirectory for deletion. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient for the subdirectory - """ - subdir = self.get_sub_directory_client(sub_directory) - subdir.delete_directory(**kwargs) - return subdir - - def create_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Create a new file and return the file client to be interacted with. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - """ - file_client = self.get_file_client(file) - file_client.create_file(**kwargs) - return file_client - - def get_file_client(self, file # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. eg. directory/subdirectory/file - :type file: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake.DataLakeFileClient - """ - try: - file_path = file.get('name') - except AttributeError: - file_path = self.path_name + '/' + str(file) - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeFileClient( - self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, - api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_sub_directory_client(self, sub_directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified subdirectory of the current directory. - - The sub subdirectory need not already exist. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient - """ - try: - subdir_path = sub_directory.get('name') - except AttributeError: - subdir_path = self.path_name + '/' + str(sub_directory) - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeDirectoryClient( - self.url, self.file_system_name, directory_name=subdir_path, credential=self._raw_credential, - api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_data_lake_file_client.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_data_lake_file_client.py deleted file mode 100644 index e0a14fa..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_data_lake_file_client.py +++ /dev/null @@ -1,784 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from io import BytesIO -from typing import Any, TypeVar - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore - -import six - -from azure.core.exceptions import HttpResponseError -from ._quick_query_helper import DataLakeFileQueryReader -from ._shared.base_client import parse_connection_str -from ._shared.request_handlers import get_length, read_length -from ._shared.response_handlers import return_response_headers -from ._shared.uploads import IterStreamer -from ._upload_helper import upload_datalake_file -from ._download import StorageStreamDownloader -from ._path_client import PathClient -from ._serialize import get_mod_conditions, get_path_http_headers, get_access_conditions, add_metadata_headers, \ - convert_datetime_to_rfc1123 -from ._deserialize import process_storage_error, deserialize_file_properties -from ._models import FileProperties, DataLakeFileQueryError - -ClassType = TypeVar("ClassType") - - -class DataLakeFileClient(PathClient): - """A client to interact with the DataLake file, even if the file may not yet exist. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param file_path: - The whole file path, so that to interact with a specific file. - eg. "{directory}/{subdirectory}/{file}" - :type file_path: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_instantiate_client.py - :start-after: [START instantiate_file_client_from_conn_str] - :end-before: [END instantiate_file_client_from_conn_str] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. - """ - def __init__( - self, account_url, # type: str - file_system_name, # type: str - file_path, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - super(DataLakeFileClient, self).__init__(account_url, file_system_name, path_name=file_path, - credential=credential, **kwargs) - - @classmethod - def from_connection_string( - cls, # type: Type[ClassType] - conn_str, # type: str - file_system_name, # type: str - file_path, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> ClassType - """ - Create DataLakeFileClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param file_system_name: The name of file system to interact with. - :type file_system_name: str - :param directory_name: The name of directory to interact with. The directory is under file system. - :type directory_name: str - :param file_name: The name of file to interact with. The file is under directory. - :type file_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :return a DataLakeFileClient - :rtype ~azure.storage.filedatalake.DataLakeFileClient - """ - account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') - return cls( - account_url, file_system_name=file_system_name, file_path=file_path, - credential=credential, **kwargs) - - def create_file(self, content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create a new file. - - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: response dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START create_file] - :end-before: [END create_file] - :language: python - :dedent: 4 - :caption: Create file. - """ - return self._create('file', content_settings=content_settings, metadata=metadata, **kwargs) - - def delete_file(self, **kwargs): - # type: (...) -> None - """ - Marks the specified file for deletion. - - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START delete_file] - :end-before: [END delete_file] - :language: python - :dedent: 4 - :caption: Delete file. - """ - return self._delete(**kwargs) - - def get_file_properties(self, **kwargs): - # type: (**Any) -> FileProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file. It does not return the content of the file. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: FileProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START get_file_properties] - :end-before: [END get_file_properties] - :language: python - :dedent: 4 - :caption: Getting the properties for a file. - """ - return self._get_path_properties(cls=deserialize_file_properties, **kwargs) # pylint: disable=protected-access - - def set_file_expiry(self, expiry_options, # type: str - expires_on=None, # type: Optional[Union[datetime, int]] - **kwargs): - # type: (...) -> None - """Sets the time a file will expire and be deleted. - - :param str expiry_options: - Required. Indicates mode of the expiry time. - Possible values include: 'NeverExpire', 'RelativeToCreation', 'RelativeToNow', 'Absolute' - :param datetime or int expires_on: - The time to set the file to expiry. - When expiry_options is RelativeTo*, expires_on should be an int in milliseconds. - If the type of expires_on is datetime, it should be in UTC time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - try: - expires_on = convert_datetime_to_rfc1123(expires_on) - except AttributeError: - expires_on = str(expires_on) - self._datalake_client_for_blob_operation.path \ - .set_expiry(expiry_options, expires_on=expires_on, **kwargs) # pylint: disable=protected-access - - def _upload_options( # pylint:disable=too-many-statements - self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - - encoding = kwargs.pop('encoding', 'UTF-8') - if isinstance(data, six.text_type): - data = data.encode(encoding) # type: ignore - if length is None: - length = get_length(data) - if isinstance(data, bytes): - data = data[:length] - - if isinstance(data, bytes): - stream = BytesIO(data) - elif hasattr(data, 'read'): - stream = data - elif hasattr(data, '__iter__'): - stream = IterStreamer(data, encoding=encoding) - else: - raise TypeError("Unsupported data type: {}".format(type(data))) - - validate_content = kwargs.pop('validate_content', False) - content_settings = kwargs.pop('content_settings', None) - metadata = kwargs.pop('metadata', None) - max_concurrency = kwargs.pop('max_concurrency', 1) - - kwargs['properties'] = add_metadata_headers(metadata) - kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None)) - kwargs['modified_access_conditions'] = get_mod_conditions(kwargs) - - if content_settings: - kwargs['path_http_headers'] = get_path_http_headers(content_settings) - - kwargs['stream'] = stream - kwargs['length'] = length - kwargs['validate_content'] = validate_content - kwargs['max_concurrency'] = max_concurrency - kwargs['client'] = self._client.path - kwargs['file_settings'] = self._config - - return kwargs - - def upload_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - overwrite=False, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Any] - """ - Upload data to a file. - - :param data: Content to be uploaded to file - :param int length: Size of the data in bytes. - :param bool overwrite: to overwrite an existing file or not. - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword metadata: - Name-value pairs associated with the blob as metadata. - :paramtype metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease: - Required if the blob has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword int chunk_size: - The maximum chunk size for uploading a file in chunks. - Defaults to 100*1024*1024, or 100MB. - :return: response dict (Etag and last modified). - """ - options = self._upload_options( - data, - length=length, - overwrite=overwrite, - **kwargs) - return upload_datalake_file(**options) - - @staticmethod - def _append_data_options(data, offset, length=None, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] - - if isinstance(data, six.text_type): - data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore - if length is None: - length = get_length(data) - if length is None: - length, data = read_length(data) - if isinstance(data, bytes): - data = data[:length] - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - - options = { - 'body': data, - 'position': offset, - 'content_length': length, - 'lease_access_conditions': access_conditions, - 'validate_content': kwargs.pop('validate_content', False), - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - def append_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - offset, # type: int - length=None, # type: Optional[int] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """Append data to the file. - - :param data: Content to be appended to file - :param offset: start position of the data to be appended to. - :param length: Size of the data in bytes. - :keyword bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - file. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :return: dict of the response header - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START append_data] - :end-before: [END append_data] - :language: python - :dedent: 4 - :caption: Append data to the file. - """ - options = self._append_data_options( - data, - offset, - length=length, - **kwargs) - try: - return self._client.path.append_data(**options) - except HttpResponseError as error: - process_storage_error(error) - - @staticmethod - def _flush_data_options(offset, content_settings=None, retain_uncommitted_data=False, **kwargs): - # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_mod_conditions(kwargs) - - path_http_headers = None - if content_settings: - path_http_headers = get_path_http_headers(content_settings) - - options = { - 'position': offset, - 'content_length': 0, - 'path_http_headers': path_http_headers, - 'retain_uncommitted_data': retain_uncommitted_data, - 'close': kwargs.pop('close', False), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - def flush_data(self, offset, # type: int - retain_uncommitted_data=False, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ Commit the previous appended data. - - :param offset: offset is equal to the length of the file after commit the - previous appended data. - :param bool retain_uncommitted_data: Valid only for flush operations. If - "true", uncommitted data is retained after the flush operation - completes; otherwise, the uncommitted data is deleted after the flush - operation. The default is false. Data at offsets less than the - specified position are written to the file when flush succeeds, but - this optional parameter allows data after the flush position to be - retained for a future flush operation. - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword bool close: Azure Storage Events allow applications to receive - notifications when files change. When Azure Storage Events are - enabled, a file changed event is raised. This event has a property - indicating whether this is the final change to distinguish the - difference between an intermediate flush to a file stream and the - final close of a file stream. The close query parameter is valid only - when the action is "flush" and change notifications are enabled. If - the value of close is "true" and the flush operation completes - successfully, the service raises a file change notification with a - property indicating that this is the final update (the file stream has - been closed). If "false" a change notification is raised indicating - the file has changed. The default is false. This query parameter is - set to true by the Hadoop ABFS driver to indicate that the file stream - has been closed." - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :return: response header in dict - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START upload_file_to_file_system] - :end-before: [END upload_file_to_file_system] - :language: python - :dedent: 8 - :caption: Commit the previous appended data. - """ - options = self._flush_data_options( - offset, - retain_uncommitted_data=retain_uncommitted_data, **kwargs) - try: - return self._client.path.flush_data(**options) - except HttpResponseError as error: - process_storage_error(error) - - def download_file(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader - """Downloads a file to the StorageStreamDownloader. The readall() method must - be used to read all the content, or readinto() must be used to download the file into - a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks. - - :param int offset: - Start of byte range to use for downloading a section of the file. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword lease: - If specified, download only succeeds if the file's lease is active - and matches this ID. Required if the file has an active lease. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.filedatalake.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START read_file] - :end-before: [END read_file] - :language: python - :dedent: 4 - :caption: Return the downloaded data. - """ - downloader = self._blob_client.download_blob(offset=offset, length=length, **kwargs) - return StorageStreamDownloader(downloader) - - def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a file exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return self._exists(**kwargs) - - def rename_file(self, new_name, **kwargs): - # type: (str, **Any) -> DataLakeFileClient - """ - Rename the source file. - - :param str new_name: the new file name the user want to rename to. - The value must have the following format: "{filesystem}/{directory}/{subdirectory}/{file}". - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword source_lease: A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: the renamed file client - :rtype: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START rename_file] - :end-before: [END rename_file] - :language: python - :dedent: 4 - :caption: Rename the source file. - """ - new_name = new_name.strip('/') - new_file_system = new_name.split('/')[0] - new_path_and_token = new_name[len(new_file_system):].strip('/').split('?') - new_path = new_path_and_token[0] - try: - new_file_sas = new_path_and_token[1] or self._query_str.strip('?') - except IndexError: - if not self._raw_credential and new_file_system != self.file_system_name: - raise ValueError("please provide the sas token for the new file") - if not self._raw_credential and new_file_system == self.file_system_name: - new_file_sas = self._query_str.strip('?') - - new_file_client = DataLakeFileClient( - "{}://{}".format(self.scheme, self.primary_hostname), new_file_system, file_path=new_path, - credential=self._raw_credential or new_file_sas, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function - ) - new_file_client._rename_path( # pylint: disable=protected-access - '/{}/{}{}'.format(quote(unquote(self.file_system_name)), - quote(unquote(self.path_name)), - self._query_str), - **kwargs) - return new_file_client - - def query_file(self, query_expression, **kwargs): - # type: (str, **Any) -> DataLakeFileQueryReader - """ - Enables users to select/project on datalake file data by providing simple query expressions. - This operations returns a DataLakeFileQueryReader, users need to use readall() or readinto() to get query data. - - :param str query_expression: - Required. a query statement. - eg. Select * from DataLakeStorage - :keyword Callable[~azure.storage.filedatalake.DataLakeFileQueryError] on_error: - A function to be called on any processing errors returned by the service. - :keyword file_format: - Optional. Defines the serialization of the data currently stored in the file. The default is to - treat the file data as CSV data formatted in the default dialect. This can be overridden with - a custom DelimitedTextDialect, or DelimitedJsonDialect or "ParquetDialect" (passed as a string or enum). - These dialects can be passed through their respective classes, the QuickQueryDialect enum or as a string. - :paramtype file_format: - ~azure.storage.filedatalake.DelimitedTextDialect or ~azure.storage.filedatalake.DelimitedJsonDialect or - ~azure.storage.filedatalake.QuickQueryDialect or str - :keyword output_format: - Optional. Defines the output serialization for the data stream. By default the data will be returned - as it is represented in the file. By providing an output format, - the file data will be reformatted according to that profile. - This value can be a DelimitedTextDialect or a DelimitedJsonDialect or ArrowDialect. - These dialects can be passed through their respective classes, the QuickQueryDialect enum or as a string. - :paramtype output_format: - ~azure.storage.filedatalake.DelimitedTextDialect or ~azure.storage.filedatalake.DelimitedJsonDialect - or list[~azure.storage.filedatalake.ArrowDialect] or ~azure.storage.filedatalake.QuickQueryDialect or str - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A streaming object (DataLakeFileQueryReader) - :rtype: ~azure.storage.filedatalake.DataLakeFileQueryReader - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_query.py - :start-after: [START query] - :end-before: [END query] - :language: python - :dedent: 4 - :caption: select/project on datalake file data by providing simple query expressions. - """ - query_expression = query_expression.replace("from DataLakeStorage", "from BlobStorage") - blob_quick_query_reader = self._blob_client.query_blob(query_expression, - blob_format=kwargs.pop('file_format', None), - error_cls=DataLakeFileQueryError, - **kwargs) - return DataLakeFileQueryReader(blob_quick_query_reader) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_data_lake_lease.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_data_lake_lease.py deleted file mode 100644 index edbe162..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_data_lake_lease.py +++ /dev/null @@ -1,245 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import uuid - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, - TypeVar, TYPE_CHECKING -) -from azure.multiapi.storagev2.blob.v2020_10_02 import BlobLeaseClient - - -if TYPE_CHECKING: - from datetime import datetime - FileSystemClient = TypeVar("FileSystemClient") - DataLakeDirectoryClient = TypeVar("DataLakeDirectoryClient") - DataLakeFileClient = TypeVar("DataLakeFileClient") - - -class DataLakeLeaseClient(object): - """Creates a new DataLakeLeaseClient. - - This client provides lease operations on a FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the file system, directory, or file to lease. - :type client: ~azure.storage.filedatalake.FileSystemClient or - ~azure.storage.filedatalake.DataLakeDirectoryClient or ~azure.storage.filedatalake.DataLakeFileClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - def __init__( - self, client, lease_id=None - ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs - # type: (Union[FileSystemClient, DataLakeDirectoryClient, DataLakeFileClient], Optional[str]) -> None - self.id = lease_id or str(uuid.uuid4()) - self.last_modified = None - self.etag = None - - if hasattr(client, '_blob_client'): - _client = client._blob_client # type: ignore # pylint: disable=protected-access - elif hasattr(client, '_container_client'): - _client = client._container_client # type: ignore # pylint: disable=protected-access - else: - raise TypeError("Lease must use any of FileSystemClient DataLakeDirectoryClient, or DataLakeFileClient.") - - self._blob_lease_client = BlobLeaseClient(_client, lease_id=lease_id) - - def __enter__(self): - return self - - def __exit__(self, *args): - self.release() - - def acquire(self, lease_duration=-1, **kwargs): - # type: (int, Optional[int], **Any) -> None - """Requests a new lease. - - If the file/file system does not have an active lease, the DataLake service creates a - lease on the file/file system and returns a new lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - self._blob_lease_client.acquire(lease_duration=lease_duration, **kwargs) - self._update_lease_client_attributes() - - def renew(self, **kwargs): - # type: (Any) -> None - """Renews the lease. - - The lease can be renewed if the lease ID specified in the - lease client matches that associated with the file system or file. Note that - the lease may be renewed even if it has expired as long as the file system - or file has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - self._blob_lease_client.renew(**kwargs) - self._update_lease_client_attributes() - - def release(self, **kwargs): - # type: (Any) -> None - """Release the lease. - - The lease may be released if the client lease id specified matches - that associated with the file system or file. Releasing the lease allows another client - to immediately acquire the lease for the file system or file as soon as the release is complete. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - self._blob_lease_client.release(**kwargs) - self._update_lease_client_attributes() - - def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """Change the lease ID of an active lease. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - self._blob_lease_client.change(proposed_lease_id=proposed_lease_id, **kwargs) - self._update_lease_client_attributes() - - def break_lease(self, lease_break_period=None, **kwargs): - # type: (Optional[int], Any) -> int - """Break the lease, if the file system or file has an active lease. - - Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. When a lease - is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the file system or file. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :param int lease_break_period: - This is the proposed duration of seconds that the lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the lease. If longer, the time remaining on the lease is used. - A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - self._blob_lease_client.break_lease(lease_break_period=lease_break_period, **kwargs) - - def _update_lease_client_attributes(self): - self.id = self._blob_lease_client.id # type: str - self.last_modified = self._blob_lease_client.last_modified # type: datetime - self.etag = self._blob_lease_client.etag # type: str diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_data_lake_service_client.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_data_lake_service_client.py deleted file mode 100644 index b5d32ff..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_data_lake_service_client.py +++ /dev/null @@ -1,566 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import Optional, Dict, Any, TypeVar - -try: - from urllib.parse import urlparse -except ImportError: - from urlparse import urlparse # type: ignore - -from azure.core.paging import ItemPaged -from azure.core.pipeline import Pipeline - -from azure.multiapi.storagev2.blob.v2020_10_02 import BlobServiceClient -from ._shared.base_client import TransportWrapper, StorageAccountHostsMixin, parse_query, parse_connection_str -from ._deserialize import get_datalake_service_properties -from ._file_system_client import FileSystemClient -from ._data_lake_directory_client import DataLakeDirectoryClient -from ._data_lake_file_client import DataLakeFileClient -from ._models import UserDelegationKey, FileSystemPropertiesPaged, LocationMode -from ._serialize import convert_dfs_url_to_blob_url, get_api_version -from ._generated import AzureDataLakeStorageRESTAPI - -ClassType = TypeVar("ClassType") - - -class DataLakeServiceClient(StorageAccountHostsMixin): - """A client to interact with the DataLake Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete file systems within the account. - For operations relating to a specific file system, directory or file, clients for those entities - can also be retrieved using the `get_client` functions. - - :ivar str url: - The full endpoint URL to the datalake service endpoint. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URL to the DataLake storage account. Any other entities included - in the URL path (e.g. file system or file) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START create_datalake_service_client] - :end-before: [END create_datalake_service_client] - :language: python - :dedent: 8 - :caption: Creating the DataLakeServiceClient from connection string. - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START create_datalake_service_client_oauth] - :end-before: [END create_datalake_service_client_oauth] - :language: python - :dedent: 8 - :caption: Creating the DataLakeServiceClient with Azure Identity credentials. - """ - - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - blob_account_url = convert_dfs_url_to_blob_url(account_url) - self._blob_account_url = blob_account_url - self._blob_service_client = BlobServiceClient(blob_account_url, credential, **kwargs) - self._blob_service_client._hosts[LocationMode.SECONDARY] = "" #pylint: disable=protected-access - - _, sas_token = parse_query(parsed_url.query) - self._query_str, self._raw_credential = self._format_query_string(sas_token, credential) - - super(DataLakeServiceClient, self).__init__(parsed_url, service='dfs', - credential=self._raw_credential, **kwargs) - # ADLS doesn't support secondary endpoint, make sure it's empty - self._hosts[LocationMode.SECONDARY] = "" - - self._client = AzureDataLakeStorageRESTAPI(self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs) #pylint: disable=protected-access - - def __enter__(self): - self._blob_service_client.__enter__() - return self - - def __exit__(self, *args): - self._blob_service_client.close() - - def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._blob_service_client.close() - - def _format_url(self, hostname): - """Format the endpoint URL according to hostname - """ - formated_url = "{}://{}/{}".format(self.scheme, hostname, self._query_str) - return formated_url - - @classmethod - def from_connection_string( - cls, # type: Type[ClassType] - conn_str, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> ClassType - """ - Create DataLakeServiceClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :return a DataLakeServiceClient - :rtype ~azure.storage.filedatalake.DataLakeServiceClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_data_lake_service_client_from_conn_str] - :end-before: [END create_data_lake_service_client_from_conn_str] - :language: python - :dedent: 8 - :caption: Creating the DataLakeServiceClient from a connection string. - """ - account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') - return cls(account_url, credential=credential, **kwargs) - - def get_user_delegation_key(self, key_start_time, # type: datetime - key_expiry_time, # type: datetime - **kwargs # type: Any - ): - # type: (...) -> UserDelegationKey - """ - Obtain a user delegation key for the purpose of signing SAS tokens. - A token credential must be present on the service object for this request to succeed. - - :param ~datetime.datetime key_start_time: - A DateTime value. Indicates when the key becomes valid. - :param ~datetime.datetime key_expiry_time: - A DateTime value. Indicates when the key stops being valid. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The user delegation key. - :rtype: ~azure.storage.filedatalake.UserDelegationKey - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START get_user_delegation_key] - :end-before: [END get_user_delegation_key] - :language: python - :dedent: 8 - :caption: Get user delegation key from datalake service client. - """ - delegation_key = self._blob_service_client.get_user_delegation_key(key_start_time=key_start_time, - key_expiry_time=key_expiry_time, - **kwargs) # pylint: disable=protected-access - return UserDelegationKey._from_generated(delegation_key) # pylint: disable=protected-access - - def list_file_systems(self, name_starts_with=None, # type: Optional[str] - include_metadata=None, # type: Optional[bool] - **kwargs): - # type: (...) -> ItemPaged[FileSystemProperties] - """Returns a generator to list the file systems under the specified account. - - The generator will lazily follow the continuation tokens returned by - the service and stop when all file systems have been returned. - - :param str name_starts_with: - Filters the results to return only file systems whose names - begin with the specified prefix. - :param bool include_metadata: - Specifies that file system metadata be returned in the response. - The default value is `False`. - :keyword int results_per_page: - The maximum number of file system names to retrieve per API - call. If the request does not specify the server will return up to 5,000 items per page. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword bool include_deleted: - Specifies that deleted file systems to be returned in the response. This is for file system restore enabled - account. The default value is `False`. - .. versionadded:: 12.3.0 - :keyword bool include_system: - Flag specifying that system filesystems should be included. - .. versionadded:: 12.6.0 - :returns: An iterable (auto-paging) of FileSystemProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.FileSystemProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START list_file_systems] - :end-before: [END list_file_systems] - :language: python - :dedent: 8 - :caption: Listing the file systems in the datalake service. - """ - item_paged = self._blob_service_client.list_containers(name_starts_with=name_starts_with, - include_metadata=include_metadata, - **kwargs) # pylint: disable=protected-access - item_paged._page_iterator_class = FileSystemPropertiesPaged # pylint: disable=protected-access - return item_paged - - def create_file_system(self, file_system, # type: Union[FileSystemProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[PublicAccess] - **kwargs): - # type: (...) -> FileSystemClient - """Creates a new file system under the specified account. - - If the file system with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created file system. - - :param str file_system: - The name of the file system to create. - :param metadata: - A dict with name-value pairs to associate with the - file system as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - Possible values include: file system, file. - :type public_access: ~azure.storage.filedatalake.PublicAccess - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START create_file_system_from_service_client] - :end-before: [END create_file_system_from_service_client] - :language: python - :dedent: 8 - :caption: Creating a file system in the datalake service. - """ - file_system_client = self.get_file_system_client(file_system) - file_system_client.create_file_system(metadata=metadata, public_access=public_access, **kwargs) - return file_system_client - - def _rename_file_system(self, name, new_name, **kwargs): - # type: (str, str, **Any) -> FileSystemClient - """Renames a filesystem. - - Operation is successful only if the source filesystem exists. - - :param str name: - The name of the filesystem to rename. - :param str new_name: - The new filesystem name the user wants to rename to. - :keyword lease: - Specify this to perform only if the lease ID given - matches the active lease ID of the source filesystem. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - """ - self._blob_service_client._rename_container(name, new_name, **kwargs) # pylint: disable=protected-access - renamed_file_system = self.get_file_system_client(new_name) - return renamed_file_system - - def undelete_file_system(self, name, deleted_version, **kwargs): - # type: (str, str, **Any) -> FileSystemClient - """Restores soft-deleted filesystem. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.3.0 - This operation was introduced in API version '2019-12-12'. - - :param str name: - Specifies the name of the deleted filesystem to restore. - :param str deleted_version: - Specifies the version of the deleted filesystem to restore. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - """ - new_name = kwargs.pop('new_name', None) - file_system = self.get_file_system_client(new_name or name) - self._blob_service_client.undelete_container( - name, deleted_version, new_name=new_name, **kwargs) # pylint: disable=protected-access - return file_system - - def delete_file_system(self, file_system, # type: Union[FileSystemProperties, str] - **kwargs): - # type: (...) -> FileSystemClient - """Marks the specified file system for deletion. - - The file system and any files contained within it are later deleted during garbage collection. - If the file system is not found, a ResourceNotFoundError will be raised. - - :param file_system: - The file system to delete. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :keyword lease: - If specified, delete_file_system only succeeds if the - file system's lease is active and matches this ID. - Required if the file system has an active lease. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START delete_file_system_from_service_client] - :end-before: [END delete_file_system_from_service_client] - :language: python - :dedent: 8 - :caption: Deleting a file system in the datalake service. - """ - file_system_client = self.get_file_system_client(file_system) - file_system_client.delete_file_system(**kwargs) - return file_system_client - - def get_file_system_client(self, file_system # type: Union[FileSystemProperties, str] - ): - # type: (...) -> FileSystemClient - """Get a client to interact with the specified file system. - - The file system need not already exist. - - :param file_system: - The file system. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :returns: A FileSystemClient. - :rtype: ~azure.storage.filedatalake.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_file_system_client_from_service] - :end-before: [END create_file_system_client_from_service] - :language: python - :dedent: 8 - :caption: Getting the file system client to interact with a specific file system. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return FileSystemClient(self.url, file_system_name, credential=self._raw_credential, - api_version=self.api_version, - _configuration=self._config, - _pipeline=_pipeline, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_directory_client(self, file_system, # type: Union[FileSystemProperties, str] - directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified directory. - - The directory need not already exist. - - :param file_system: - The file system that the directory is in. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START get_directory_client_from_service_client] - :end-before: [END get_directory_client_from_service_client] - :language: python - :dedent: 8 - :caption: Getting the directory client to interact with a specific directory. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - try: - directory_name = directory.name - except AttributeError: - directory_name = directory - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeDirectoryClient(self.url, file_system_name, directory_name=directory_name, - credential=self._raw_credential, - api_version=self.api_version, - _configuration=self._config, _pipeline=_pipeline, - _hosts=self._hosts, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function - ) - - def get_file_client(self, file_system, # type: Union[FileSystemProperties, str] - file_path # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file_system: - The file system that the file is in. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :param file_path: - The file with which to interact. This can either be the full path of the file(from the root directory), - or an instance of FileProperties. eg. directory/subdirectory/file - :type file_path: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake.DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START get_file_client_from_service_client] - :end-before: [END get_file_client_from_service_client] - :language: python - :dedent: 8 - :caption: Getting the file client to interact with a specific file. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - try: - file_path = file_path.name - except AttributeError: - pass - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeFileClient( - self.url, file_system_name, file_path=file_path, credential=self._raw_credential, - api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def set_service_properties(self, **kwargs): - # type: (**Any) -> None - """Sets the properties of a storage account's Datalake service, including - Azure Storage Analytics. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2020-06-12'. - - If an element (e.g. analytics_logging) is left as None, the - existing settings on the service for that functionality are preserved. - - :keyword analytics_logging: - Groups the Azure Analytics Logging settings. - :type analytics_logging: ~azure.storage.filedatalake.AnalyticsLogging - :keyword hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates. - :type hour_metrics: ~azure.storage.filedatalake.Metrics - :keyword minute_metrics: - The minute metrics settings provide request statistics - for each minute. - :type minute_metrics: ~azure.storage.filedatalake.Metrics - :keyword cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list[~azure.storage.filedatalake.CorsRule] - :keyword str target_version: - Indicates the default version to use for requests if an incoming - request's version is not specified. - :keyword delete_retention_policy: - The delete retention policy specifies whether to retain deleted files/directories. - It also specifies the number of days and versions of file/directory to keep. - :type delete_retention_policy: ~azure.storage.filedatalake.RetentionPolicy - :keyword static_website: - Specifies whether the static website feature is enabled, - and if yes, indicates the index document and 404 error document to use. - :type static_website: ~azure.storage.filedatalake.StaticWebsite - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - return self._blob_service_client.set_service_properties(**kwargs) # pylint: disable=protected-access - - def get_service_properties(self, **kwargs): - # type: (**Any) -> Dict[str, Any] - """Gets the properties of a storage account's datalake service, including - Azure Storage Analytics. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2020-06-12'. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An object containing datalake service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - """ - props = self._blob_service_client.get_service_properties(**kwargs) # pylint: disable=protected-access - return get_datalake_service_properties(props) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_deserialize.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_deserialize.py deleted file mode 100644 index 0a7c688..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_deserialize.py +++ /dev/null @@ -1,216 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import logging -from typing import ( # pylint: disable=unused-import - TYPE_CHECKING -) -from xml.etree.ElementTree import Element - -from azure.core.pipeline.policies import ContentDecodePolicy -from azure.core.exceptions import HttpResponseError, DecodeError, ResourceModifiedError, ClientAuthenticationError, \ - ResourceNotFoundError, ResourceExistsError -from ._models import FileProperties, DirectoryProperties, LeaseProperties, DeletedPathProperties, StaticWebsite, \ - RetentionPolicy, Metrics, AnalyticsLogging, PathProperties # pylint: disable=protected-access -from ._shared.models import StorageErrorCode - -if TYPE_CHECKING: - pass - -_LOGGER = logging.getLogger(__name__) - - -def deserialize_dir_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - dir_properties = DirectoryProperties( - metadata=metadata, - **headers - ) - return dir_properties - - -def deserialize_file_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - file_properties = FileProperties( - metadata=metadata, - **headers - ) - if 'Content-Range' in headers: - if 'x-ms-blob-content-md5' in headers: - file_properties.content_settings.content_md5 = headers['x-ms-blob-content-md5'] - else: - file_properties.content_settings.content_md5 = None - return file_properties - - -def deserialize_path_properties(path_list): - return [PathProperties._from_generated(path) for path in path_list] # pylint: disable=protected-access - - -def return_headers_and_deserialized_path_list(response, deserialized, response_headers): # pylint: disable=unused-argument - return deserialized.paths if deserialized.paths else {}, normalize_headers(response_headers) - - -def get_deleted_path_properties_from_generated_code(generated): - deleted_path = DeletedPathProperties() - deleted_path.name = generated.name - deleted_path.deleted_time = generated.properties.deleted_time - deleted_path.remaining_retention_days = generated.properties.remaining_retention_days - deleted_path.deletion_id = generated.deletion_id - return deleted_path - - -def is_file_path(_, __, headers): - if headers['x-ms-resource-type'] == "file": - return True - return False - - -def get_datalake_service_properties(datalake_properties): - datalake_properties["analytics_logging"] = AnalyticsLogging._from_generated( # pylint: disable=protected-access - datalake_properties["analytics_logging"]) - datalake_properties["hour_metrics"] = Metrics._from_generated(datalake_properties["hour_metrics"]) # pylint: disable=protected-access - datalake_properties["minute_metrics"] = Metrics._from_generated( # pylint: disable=protected-access - datalake_properties["minute_metrics"]) - datalake_properties["delete_retention_policy"] = RetentionPolicy._from_generated( # pylint: disable=protected-access - datalake_properties["delete_retention_policy"]) - datalake_properties["static_website"] = StaticWebsite._from_generated( # pylint: disable=protected-access - datalake_properties["static_website"]) - return datalake_properties - - -def from_blob_properties(blob_properties): - file_props = FileProperties() - file_props.name = blob_properties.name - file_props.etag = blob_properties.etag - file_props.deleted = blob_properties.deleted - file_props.metadata = blob_properties.metadata - file_props.lease = blob_properties.lease - file_props.lease.__class__ = LeaseProperties - file_props.last_modified = blob_properties.last_modified - file_props.creation_time = blob_properties.creation_time - file_props.size = blob_properties.size - file_props.deleted_time = blob_properties.deleted_time - file_props.remaining_retention_days = blob_properties.remaining_retention_days - file_props.content_settings = blob_properties.content_settings - return file_props - - -def normalize_headers(headers): - normalized = {} - for key, value in headers.items(): - if key.startswith('x-ms-'): - key = key[5:] - normalized[key.lower().replace('-', '_')] = value - return normalized - - -def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument - try: - raw_metadata = {k: v for k, v in response.http_response.headers.items() if k.startswith("x-ms-meta-")} - except AttributeError: - raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} - return {k[10:]: v for k, v in raw_metadata.items()} - - -def process_storage_error(storage_error): # pylint:disable=too-many-statements - raise_error = HttpResponseError - serialized = False - if not storage_error.response: - raise storage_error - # If it is one of those three then it has been serialized prior by the generated layer. - if isinstance(storage_error, (ResourceNotFoundError, ClientAuthenticationError, ResourceExistsError)): - serialized = True - error_code = storage_error.response.headers.get('x-ms-error-code') - error_message = storage_error.message - additional_data = {} - error_dict = {} - try: - error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) - # If it is an XML response - if isinstance(error_body, Element): - error_dict = { - child.tag.lower(): child.text - for child in error_body - } - # If it is a JSON response - elif isinstance(error_body, dict): - error_dict = error_body.get('error', {}) - elif not error_code: - _LOGGER.warning( - 'Unexpected return type % from ContentDecodePolicy.deserialize_from_http_generics.', type(error_body)) - error_dict = {'message': str(error_body)} - - # If we extracted from a Json or XML response - if error_dict: - error_code = error_dict.get('code') - error_message = error_dict.get('message') - additional_data = {k: v for k, v in error_dict.items() if k not in {'code', 'message'}} - - except DecodeError: - pass - - try: - # This check would be unnecessary if we have already serialized the error. - if error_code and not serialized: - error_code = StorageErrorCode(error_code) - if error_code in [StorageErrorCode.condition_not_met]: - raise_error = ResourceModifiedError - if error_code in [StorageErrorCode.invalid_authentication_info, - StorageErrorCode.authentication_failed]: - raise_error = ClientAuthenticationError - if error_code in [StorageErrorCode.resource_not_found, - StorageErrorCode.invalid_property_name, - StorageErrorCode.invalid_source_uri, - StorageErrorCode.source_path_not_found, - StorageErrorCode.lease_name_mismatch, - StorageErrorCode.file_system_not_found, - StorageErrorCode.path_not_found, - StorageErrorCode.parent_not_found, - StorageErrorCode.invalid_destination_path, - StorageErrorCode.invalid_rename_source_path, - StorageErrorCode.lease_is_already_broken, - StorageErrorCode.invalid_source_or_destination_resource_type, - StorageErrorCode.rename_destination_parent_path_not_found]: - raise_error = ResourceNotFoundError - if error_code in [StorageErrorCode.account_already_exists, - StorageErrorCode.account_being_created, - StorageErrorCode.resource_already_exists, - StorageErrorCode.resource_type_mismatch, - StorageErrorCode.source_path_is_being_deleted, - StorageErrorCode.path_already_exists, - StorageErrorCode.destination_path_is_being_deleted, - StorageErrorCode.file_system_already_exists, - StorageErrorCode.file_system_being_deleted, - StorageErrorCode.path_conflict]: - raise_error = ResourceExistsError - except ValueError: - # Got an unknown error code - pass - - # Error message should include all the error properties - try: - error_message += "\nErrorCode:{}".format(error_code.value) - except AttributeError: - error_message += "\nErrorCode:{}".format(error_code) - for name, info in additional_data.items(): - error_message += "\n{}:{}".format(name, info) - - # No need to create an instance if it has already been serialized by the generated layer - if serialized: - storage_error.message = error_message - error = storage_error - else: - error = raise_error(message=error_message, response=storage_error.response) - # Ensure these properties are stored in the error instance as well (not just the error message) - error.error_code = error_code - error.additional_info = additional_data - # error.args is what's surfaced on the traceback - show error message in all cases - error.args = (error.message,) - try: - # `from None` prevents us from double printing the exception (suppresses generated layer error context) - exec("raise error from None") # pylint: disable=exec-used # nosec - except SyntaxError: - raise error diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_download.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_download.py deleted file mode 100644 index 61716d3..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_download.py +++ /dev/null @@ -1,59 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import Iterator - -from ._deserialize import from_blob_properties - - -class StorageStreamDownloader(object): - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the file being downloaded. - :ivar ~azure.storage.filedatalake.FileProperties properties: - The properties of the file being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the file. - """ - - def __init__(self, downloader): - self._downloader = downloader - self.name = self._downloader.name - self.properties = from_blob_properties(self._downloader.properties) # pylint: disable=protected-access - self.size = self._downloader.size - - def __len__(self): - return self.size - - def chunks(self): - # type: () -> Iterator[bytes] - """Iterate over chunks in the download stream. - - :rtype: Iterator[bytes] - """ - return self._downloader.chunks() - - def readall(self): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - return self._downloader.readall() - - def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - return self._downloader.readinto(stream) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_file_system_client.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_file_system_client.py deleted file mode 100644 index 8bdf4c6..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_file_system_client.py +++ /dev/null @@ -1,992 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import functools -from typing import Optional, Any, Union, TypeVar - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore -import six - -from azure.core.pipeline import Pipeline -from azure.core.exceptions import HttpResponseError -from azure.core.paging import ItemPaged -from azure.multiapi.storagev2.blob.v2020_10_02 import ContainerClient -from ._shared.base_client import TransportWrapper, StorageAccountHostsMixin, parse_query, parse_connection_str -from ._serialize import convert_dfs_url_to_blob_url, get_api_version -from ._list_paths_helper import DeletedPathPropertiesPaged, PathPropertiesPaged -from ._models import LocationMode, FileSystemProperties, PublicAccess, DeletedPathProperties, FileProperties, \ - DirectoryProperties -from ._data_lake_file_client import DataLakeFileClient -from ._data_lake_directory_client import DataLakeDirectoryClient -from ._data_lake_lease import DataLakeLeaseClient -from ._generated import AzureDataLakeStorageRESTAPI -from ._generated.models import ListBlobsIncludeItem -from ._deserialize import process_storage_error, is_file_path - - -ClassType = TypeVar("ClassType") - - -class FileSystemClient(StorageAccountHostsMixin): - """A client to interact with a specific file system, even if that file system - may not yet exist. - - For operations relating to a specific directory or file within this file system, a directory client or file client - can be retrieved using the :func:`~get_directory_client` or :func:`~get_file_client` functions. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_file_system_client_from_service] - :end-before: [END create_file_system_client_from_service] - :language: python - :dedent: 8 - :caption: Get a FileSystemClient from an existing DataLakeServiceClient. - """ - def __init__( - self, account_url, # type: str - file_system_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not file_system_name: - raise ValueError("Please specify a file system name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - blob_account_url = convert_dfs_url_to_blob_url(account_url) - # TODO: add self.account_url to base_client and remove _blob_account_url - self._blob_account_url = blob_account_url - - datalake_hosts = kwargs.pop('_hosts', None) - blob_hosts = None - if datalake_hosts: - blob_primary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.PRIMARY]) - blob_hosts = {LocationMode.PRIMARY: blob_primary_account_url, LocationMode.SECONDARY: ""} - self._container_client = ContainerClient(blob_account_url, file_system_name, - credential=credential, _hosts=blob_hosts, **kwargs) - - _, sas_token = parse_query(parsed_url.query) - self.file_system_name = file_system_name - self._query_str, self._raw_credential = self._format_query_string(sas_token, credential) - - super(FileSystemClient, self).__init__(parsed_url, service='dfs', credential=self._raw_credential, - _hosts=datalake_hosts, **kwargs) - # ADLS doesn't support secondary endpoint, make sure it's empty - self._hosts[LocationMode.SECONDARY] = "" - self._client = AzureDataLakeStorageRESTAPI(self.url, file_system=file_system_name, pipeline=self._pipeline) - api_version = get_api_version(kwargs) - self._client._config.version = api_version # pylint: disable=protected-access - self._datalake_client_for_blob_operation = AzureDataLakeStorageRESTAPI(self._container_client.url, - file_system=file_system_name, - pipeline=self._pipeline) - self._datalake_client_for_blob_operation._config.version = api_version # pylint: disable=protected-access - - def _format_url(self, hostname): - file_system_name = self.file_system_name - if isinstance(file_system_name, six.text_type): - file_system_name = file_system_name.encode('UTF-8') - return "{}://{}/{}{}".format( - self.scheme, - hostname, - quote(file_system_name), - self._query_str) - - def __exit__(self, *args): - self._container_client.close() - super(FileSystemClient, self).__exit__(*args) - - def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._container_client.close() - self.__exit__() - - @classmethod - def from_connection_string( - cls, # type: Type[ClassType] - conn_str, # type: str - file_system_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> ClassType - """ - Create FileSystemClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param file_system_name: The name of file system to interact with. - :type file_system_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :return a FileSystemClient - :rtype ~azure.storage.filedatalake.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_file_system_client_from_connection_string] - :end-before: [END create_file_system_client_from_connection_string] - :language: python - :dedent: 8 - :caption: Create FileSystemClient from connection string - """ - account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') - return cls( - account_url, file_system_name=file_system_name, credential=credential, **kwargs) - - def acquire_lease( - self, lease_duration=-1, # type: int - lease_id=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> DataLakeLeaseClient - """ - Requests a new lease. If the file system does not have an active lease, - the DataLake service creates a lease on the file system and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A DataLakeLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.filedatalake.DataLakeLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START acquire_lease_on_file_system] - :end-before: [END acquire_lease_on_file_system] - :language: python - :dedent: 8 - :caption: Acquiring a lease on the file system. - """ - lease = DataLakeLeaseClient(self, lease_id=lease_id) - lease.acquire(lease_duration=lease_duration, **kwargs) - return lease - - def create_file_system(self, metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[PublicAccess] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """Creates a new file system under the specified account. - - If the file system with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created file system. - - :param metadata: - A dict with name-value pairs to associate with the - file system as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - To specify whether data in the file system may be accessed publicly and the level of access. - :type public_access: ~azure.storage.filedatalake.PublicAccess - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_file_system] - :end-before: [END create_file_system] - :language: python - :dedent: 12 - :caption: Creating a file system in the datalake service. - """ - return self._container_client.create_container(metadata=metadata, - public_access=public_access, - **kwargs) - - def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a file system exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return self._container_client.exists(**kwargs) - - def _rename_file_system(self, new_name, **kwargs): - # type: (str, **Any) -> FileSystemClient - """Renames a filesystem. - - Operation is successful only if the source filesystem exists. - - :param str new_name: - The new filesystem name the user wants to rename to. - :keyword lease: - Specify this to perform only if the lease ID given - matches the active lease ID of the source filesystem. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - """ - self._container_client._rename_container(new_name, **kwargs) # pylint: disable=protected-access - #TODO: self._raw_credential would not work with SAS tokens - renamed_file_system = FileSystemClient( - "{}://{}".format(self.scheme, self.primary_hostname), file_system_name=new_name, - credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, - _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - return renamed_file_system - - def delete_file_system(self, **kwargs): - # type: (Any) -> None - """Marks the specified file system for deletion. - - The file system and any files contained within it are later deleted during garbage collection. - If the file system is not found, a ResourceNotFoundError will be raised. - - :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: - If specified, delete_file_system only succeeds if the - file system's lease is active and matches this ID. - Required if the file system has an active lease. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START delete_file_system] - :end-before: [END delete_file_system] - :language: python - :dedent: 12 - :caption: Deleting a file system in the datalake service. - """ - self._container_client.delete_container(**kwargs) - - def get_file_system_properties(self, **kwargs): - # type: (Any) -> FileSystemProperties - """Returns all user-defined metadata and system properties for the specified - file system. The data returned does not include the file system's list of paths. - - :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: - If specified, get_file_system_properties only succeeds if the - file system's lease is active and matches this ID. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Properties for the specified file system within a file system object. - :rtype: ~azure.storage.filedatalake.FileSystemProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START get_file_system_properties] - :end-before: [END get_file_system_properties] - :language: python - :dedent: 12 - :caption: Getting properties on the file system. - """ - container_properties = self._container_client.get_container_properties(**kwargs) - return FileSystemProperties._convert_from_container_props(container_properties) # pylint: disable=protected-access - - def set_file_system_metadata( # type: ignore - self, metadata, # type: Dict[str, str] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - file system. Each call to this operation replaces all existing metadata - attached to the file system. To remove all metadata from the file system, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the file system as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: filesystem-updated property dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START set_file_system_metadata] - :end-before: [END set_file_system_metadata] - :language: python - :dedent: 12 - :caption: Setting metadata on the file system. - """ - return self._container_client.set_container_metadata(metadata=metadata, **kwargs) - - def set_file_system_access_policy( - self, signed_identifiers, # type: Dict[str, AccessPolicy] - public_access=None, # type: Optional[Union[str, PublicAccess]] - **kwargs - ): # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the permissions for the specified file system or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether files in a file system may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the file system. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict[str, ~azure.storage.filedatalake.AccessPolicy] - :param ~azure.storage.filedatalake.PublicAccess public_access: - To specify whether data in the file system may be accessed publicly and the level of access. - :keyword lease: - Required if the file system has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified date/time. - :keyword ~datetime.datetime if_unmodified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File System-updated property dict (Etag and last modified). - :rtype: dict[str, str or ~datetime.datetime] - """ - return self._container_client.set_container_access_policy(signed_identifiers, - public_access=public_access, **kwargs) - - def get_file_system_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the specified file system. - The permissions indicate whether file system data may be accessed publicly. - - :keyword lease: - If specified, the operation only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - """ - access_policy = self._container_client.get_container_access_policy(**kwargs) - return { - 'public_access': PublicAccess._from_generated(access_policy['public_access']), # pylint: disable=protected-access - 'signed_identifiers': access_policy['signed_identifiers'] - } - - def get_paths(self, path=None, # type: Optional[str] - recursive=True, # type: Optional[bool] - max_results=None, # type: Optional[int] - **kwargs): - # type: (...) -> ItemPaged[PathProperties] - """Returns a generator to list the paths(could be files or directories) under the specified file system. - The generator will lazily follow the continuation tokens returned by - the service. - - :param str path: - Filters the results to return only paths under the specified path. - :param int max_results: An optional value that specifies the maximum - number of items to return per page. If omitted or greater than 5,000, the - response will include up to 5,000 items per page. - :keyword upn: - Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of PathProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.PathProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START get_paths_in_file_system] - :end-before: [END get_paths_in_file_system] - :language: python - :dedent: 8 - :caption: List the paths in the file system. - """ - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.file_system.list_paths, - path=path, - timeout=timeout, - **kwargs) - return ItemPaged( - command, recursive, path=path, max_results=max_results, - page_iterator_class=PathPropertiesPaged, **kwargs) - - def create_directory(self, directory, # type: Union[DirectoryProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Create directory - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_directory_from_file_system] - :end-before: [END create_directory_from_file_system] - :language: python - :dedent: 8 - :caption: Create directory in the file system. - """ - directory_client = self.get_directory_client(directory) - directory_client.create_directory(metadata=metadata, **kwargs) - return directory_client - - def delete_directory(self, directory, # type: Union[DirectoryProperties, str] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Marks the specified path for deletion. - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START delete_directory_from_file_system] - :end-before: [END delete_directory_from_file_system] - :language: python - :dedent: 8 - :caption: Delete directory in the file system. - """ - directory_client = self.get_directory_client(directory) - directory_client.delete_directory(**kwargs) - return directory_client - - def create_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Create file - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_file_from_file_system] - :end-before: [END create_file_from_file_system] - :language: python - :dedent: 8 - :caption: Create file in the file system. - """ - file_client = self.get_file_client(file) - file_client.create_file(**kwargs) - return file_client - - def delete_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Marks the specified file for deletion. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START delete_file_from_file_system] - :end-before: [END delete_file_from_file_system] - :language: python - :dedent: 8 - :caption: Delete file in the file system. - """ - file_client = self.get_file_client(file) - file_client.delete_file(**kwargs) - return file_client - - def _undelete_path_options(self, deleted_path_name, deletion_id): - quoted_path = quote(unquote(deleted_path_name.strip('/'))) - - url_and_token = self.url.replace('.dfs.', '.blob.').split('?') - try: - url = url_and_token[0] + '/' + quoted_path + url_and_token[1] - except IndexError: - url = url_and_token[0] + '/' + quoted_path - - undelete_source = quoted_path + '?deletionid={}'.format(deletion_id) if deletion_id else None - - return quoted_path, url, undelete_source - - def _undelete_path(self, deleted_path_name, deletion_id, **kwargs): - # type: (str, str, **Any) -> Union[DataLakeDirectoryClient, DataLakeFileClient] - """Restores soft-deleted path. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2020-06-12'. - - :param str deleted_path_name: - Specifies the path (file or directory) to restore. - :param str deletion_id: - Specifies the version of the deleted path to restore. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.file.datalake.DataLakeDirectoryClient or azure.storage.file.datalake.DataLakeFileClient - """ - _, url, undelete_source = self._undelete_path_options(deleted_path_name, deletion_id) - - pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - path_client = AzureDataLakeStorageRESTAPI( - url, filesystem=self.file_system_name, path=deleted_path_name, pipeline=pipeline) - try: - is_file = path_client.path.undelete(undelete_source=undelete_source, cls=is_file_path, **kwargs) - if is_file: - return self.get_file_client(deleted_path_name) - return self.get_directory_client(deleted_path_name) - except HttpResponseError as error: - process_storage_error(error) - - def _get_root_directory_client(self): - # type: () -> DataLakeDirectoryClient - """Get a client to interact with the root directory. - - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient - """ - return self.get_directory_client('/') - - # TODO: Temporarily removing this for GA release. - # def delete_files(self, *files, **kwargs): - # # type: (...) -> Iterator[HttpResponse] - # """Marks the specified files or empty directories for deletion. - - # The files/empty directories are later deleted during garbage collection. - - # If a delete retention policy is enabled for the service, then this operation soft deletes the - # files/empty directories and retains the files or snapshots for specified number of days. - # After specified number of days, files' data is removed from the service during garbage collection. - # Soft deleted files/empty directories are accessible through :func:`list_deleted_paths()`. - - # :param files: - # The files/empty directories to delete. This can be a single file/empty directory, or multiple values can - # be supplied, where each value is either the name of the file/directory (str) or - # FileProperties/DirectoryProperties. - - # .. note:: - # When the file/dir type is dict, here's a list of keys, value rules. - - # blob name: - # key: 'name', value type: str - # if the file modified or not: - # key: 'if_modified_since', 'if_unmodified_since', value type: datetime - # etag: - # key: 'etag', value type: str - # match the etag or not: - # key: 'match_condition', value type: MatchConditions - # lease: - # key: 'lease_id', value type: Union[str, LeaseClient] - # timeout for subrequest: - # key: 'timeout', value type: int - - # :type files: list[str], list[dict], - # or list[Union[~azure.storage.filedatalake.FileProperties, ~azure.storage.filedatalake.DirectoryProperties] - # :keyword ~datetime.datetime if_modified_since: - # A DateTime value. Azure expects the date value passed in to be UTC. - # If timezone is included, any non-UTC datetimes will be converted to UTC. - # If a date is passed in without timezone info, it is assumed to be UTC. - # Specify this header to perform the operation only - # if the resource has been modified since the specified time. - # :keyword ~datetime.datetime if_unmodified_since: - # A DateTime value. Azure expects the date value passed in to be UTC. - # If timezone is included, any non-UTC datetimes will be converted to UTC. - # If a date is passed in without timezone info, it is assumed to be UTC. - # Specify this header to perform the operation only if - # the resource has not been modified since the specified date/time. - # :keyword bool raise_on_any_failure: - # This is a boolean param which defaults to True. When this is set, an exception - # is raised even if there is a single operation failure. - # :keyword int timeout: - # The timeout parameter is expressed in seconds. - # :return: An iterator of responses, one for each blob in order - # :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse] - - # .. admonition:: Example: - - # .. literalinclude:: ../samples/datalake_samples_file_system_async.py - # :start-after: [START batch_delete_files_or_empty_directories] - # :end-before: [END batch_delete_files_or_empty_directories] - # :language: python - # :dedent: 4 - # :caption: Deleting multiple files or empty directories. - # """ - # return self._container_client.delete_blobs(*files, **kwargs) - - def get_directory_client(self, directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified directory. - - The directory need not already exist. - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START get_directory_client_from_file_system] - :end-before: [END get_directory_client_from_file_system] - :language: python - :dedent: 8 - :caption: Getting the directory client to interact with a specific directory. - """ - try: - directory_name = directory.get('name') - except AttributeError: - directory_name = str(directory) - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeDirectoryClient(self.url, self.file_system_name, directory_name=directory_name, - credential=self._raw_credential, - api_version=self.api_version, - _configuration=self._config, _pipeline=_pipeline, - _hosts=self._hosts, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function - ) - - def get_file_client(self, file_path # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file_path: - The file with which to interact. This can either be the path of the file(from root directory), - or an instance of FileProperties. eg. directory/subdirectory/file - :type file_path: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake.DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START get_file_client_from_file_system] - :end-before: [END get_file_client_from_file_system] - :language: python - :dedent: 8 - :caption: Getting the file client to interact with a specific file. - """ - try: - file_path = file_path.get('name') - except AttributeError: - file_path = str(file_path) - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeFileClient( - self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, - api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def list_deleted_paths(self, **kwargs): - # type: (Any) -> ItemPaged[DeletedPathProperties] - """Returns a generator to list the deleted (file or directory) paths under the specified file system. - The generator will lazily follow the continuation tokens returned by - the service. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2020-06-12'. - - :keyword str path_prefix: - Filters the results to return only paths under the specified path. - :keyword int results_per_page: - An optional value that specifies the maximum number of items to return per page. - If omitted or greater than 5,000, the response will include up to 5,000 items per page. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of DeletedPathProperties. - :rtype: - ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.DeletedPathProperties] - """ - path_prefix = kwargs.pop('path_prefix', None) - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._datalake_client_for_blob_operation.file_system.list_blob_hierarchy_segment, - showonly=ListBlobsIncludeItem.deleted, - timeout=timeout, - **kwargs) - return ItemPaged( - command, prefix=path_prefix, page_iterator_class=DeletedPathPropertiesPaged, - results_per_page=results_per_page, **kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/__init__.py deleted file mode 100644 index 5cd3ae2..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._azure_data_lake_storage_restapi import AzureDataLakeStorageRESTAPI -__all__ = ['AzureDataLakeStorageRESTAPI'] - -try: - from ._patch import patch_sdk # type: ignore - patch_sdk() -except ImportError: - pass diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/_azure_data_lake_storage_restapi.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/_azure_data_lake_storage_restapi.py deleted file mode 100644 index 33e26f2..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/_azure_data_lake_storage_restapi.py +++ /dev/null @@ -1,91 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import TYPE_CHECKING - -from azure.core import PipelineClient -from msrest import Deserializer, Serializer - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any - - from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from ._configuration import AzureDataLakeStorageRESTAPIConfiguration -from .operations import ServiceOperations -from .operations import FileSystemOperations -from .operations import PathOperations -from . import models - - -class AzureDataLakeStorageRESTAPI(object): - """Azure Data Lake Storage provides storage for Hadoop and other big data workloads. - - :ivar service: ServiceOperations operations - :vartype service: azure.storage.filedatalake.operations.ServiceOperations - :ivar file_system: FileSystemOperations operations - :vartype file_system: azure.storage.filedatalake.operations.FileSystemOperations - :ivar path: PathOperations operations - :vartype path: azure.storage.filedatalake.operations.PathOperations - :param url: The URL of the service account, container, or blob that is the target of the desired operation. - :type url: str - """ - - def __init__( - self, - url, # type: str - **kwargs # type: Any - ): - # type: (...) -> None - base_url = '{url}' - self._config = AzureDataLakeStorageRESTAPIConfiguration(url, **kwargs) - self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._serialize.client_side_validation = False - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.file_system = FileSystemOperations( - self._client, self._config, self._serialize, self._deserialize) - self.path = PathOperations( - self._client, self._config, self._serialize, self._deserialize) - - def _send_request(self, http_request, **kwargs): - # type: (HttpRequest, Any) -> HttpResponse - """Runs the network request through the client's chained policies. - - :param http_request: The network request you want to make. Required. - :type http_request: ~azure.core.pipeline.transport.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to True. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.pipeline.transport.HttpResponse - """ - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - http_request.url = self._client.format_url(http_request.url, **path_format_arguments) - stream = kwargs.pop("stream", True) - pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs) - return pipeline_response.http_response - - def close(self): - # type: () -> None - self._client.close() - - def __enter__(self): - # type: () -> AzureDataLakeStorageRESTAPI - self._client.__enter__() - return self - - def __exit__(self, *exc_details): - # type: (Any) -> None - self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/_configuration.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/_configuration.py deleted file mode 100644 index e461c9f..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/_configuration.py +++ /dev/null @@ -1,59 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import TYPE_CHECKING - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any - -VERSION = "unknown" - -class AzureDataLakeStorageRESTAPIConfiguration(Configuration): - """Configuration for AzureDataLakeStorageRESTAPI. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, container, or blob that is the target of the desired operation. - :type url: str - """ - - def __init__( - self, - url, # type: str - **kwargs # type: Any - ): - # type: (...) -> None - if url is None: - raise ValueError("Parameter 'url' must not be None.") - super(AzureDataLakeStorageRESTAPIConfiguration, self).__init__(**kwargs) - - self.url = url - self.resource = "filesystem" - self.version = "2020-10-02" - kwargs.setdefault('sdk_moniker', 'azuredatalakestoragerestapi/{}'.format(VERSION)) - self._configure(**kwargs) - - def _configure( - self, - **kwargs # type: Any - ): - # type: (...) -> None - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) - self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/aio/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/aio/__init__.py deleted file mode 100644 index 24daed3..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/aio/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._azure_data_lake_storage_restapi import AzureDataLakeStorageRESTAPI -__all__ = ['AzureDataLakeStorageRESTAPI'] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/aio/_azure_data_lake_storage_restapi.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/aio/_azure_data_lake_storage_restapi.py deleted file mode 100644 index 438366e..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/aio/_azure_data_lake_storage_restapi.py +++ /dev/null @@ -1,81 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any - -from azure.core import AsyncPipelineClient -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest -from msrest import Deserializer, Serializer - -from ._configuration import AzureDataLakeStorageRESTAPIConfiguration -from .operations import ServiceOperations -from .operations import FileSystemOperations -from .operations import PathOperations -from .. import models - - -class AzureDataLakeStorageRESTAPI(object): - """Azure Data Lake Storage provides storage for Hadoop and other big data workloads. - - :ivar service: ServiceOperations operations - :vartype service: azure.storage.filedatalake.aio.operations.ServiceOperations - :ivar file_system: FileSystemOperations operations - :vartype file_system: azure.storage.filedatalake.aio.operations.FileSystemOperations - :ivar path: PathOperations operations - :vartype path: azure.storage.filedatalake.aio.operations.PathOperations - :param url: The URL of the service account, container, or blob that is the target of the desired operation. - :type url: str - """ - - def __init__( - self, - url: str, - **kwargs: Any - ) -> None: - base_url = '{url}' - self._config = AzureDataLakeStorageRESTAPIConfiguration(url, **kwargs) - self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._serialize.client_side_validation = False - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.file_system = FileSystemOperations( - self._client, self._config, self._serialize, self._deserialize) - self.path = PathOperations( - self._client, self._config, self._serialize, self._deserialize) - - async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse: - """Runs the network request through the client's chained policies. - - :param http_request: The network request you want to make. Required. - :type http_request: ~azure.core.pipeline.transport.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to True. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.pipeline.transport.AsyncHttpResponse - """ - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - http_request.url = self._client.format_url(http_request.url, **path_format_arguments) - stream = kwargs.pop("stream", True) - pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs) - return pipeline_response.http_response - - async def close(self) -> None: - await self._client.close() - - async def __aenter__(self) -> "AzureDataLakeStorageRESTAPI": - await self._client.__aenter__() - return self - - async def __aexit__(self, *exc_details) -> None: - await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/aio/_configuration.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/aio/_configuration.py deleted file mode 100644 index 8faa4a2..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/aio/_configuration.py +++ /dev/null @@ -1,53 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -VERSION = "unknown" - -class AzureDataLakeStorageRESTAPIConfiguration(Configuration): - """Configuration for AzureDataLakeStorageRESTAPI. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, container, or blob that is the target of the desired operation. - :type url: str - """ - - def __init__( - self, - url: str, - **kwargs: Any - ) -> None: - if url is None: - raise ValueError("Parameter 'url' must not be None.") - super(AzureDataLakeStorageRESTAPIConfiguration, self).__init__(**kwargs) - - self.url = url - self.resource = "filesystem" - self.version = "2020-10-02" - kwargs.setdefault('sdk_moniker', 'azuredatalakestoragerestapi/{}'.format(VERSION)) - self._configure(**kwargs) - - def _configure( - self, - **kwargs: Any - ) -> None: - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) - self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/aio/operations/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/aio/operations/__init__.py deleted file mode 100644 index 0db71e0..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/aio/operations/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._file_system_operations import FileSystemOperations -from ._path_operations import PathOperations - -__all__ = [ - 'ServiceOperations', - 'FileSystemOperations', - 'PathOperations', -] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/aio/operations/_file_system_operations.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/aio/operations/_file_system_operations.py deleted file mode 100644 index 17fe0a0..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/aio/operations/_file_system_operations.py +++ /dev/null @@ -1,609 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class FileSystemOperations: - """FileSystemOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.filedatalake.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - properties: Optional[str] = None, - **kwargs: Any - ) -> None: - """Create FileSystem. - - Create a FileSystem rooted at the specified location. If the FileSystem already exists, the - operation fails. This operation does not support conditional HTTP requests. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. - :type properties: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - accept = "application/json" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-namespace-enabled']=self._deserialize('str', response.headers.get('x-ms-namespace-enabled')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{filesystem}'} # type: ignore - - async def set_properties( - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - properties: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Set FileSystem Properties. - - Set properties for the FileSystem. This operation supports conditional HTTP requests. For - more information, see `Specifying Conditional Headers for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. - :type properties: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/{filesystem}'} # type: ignore - - async def get_properties( - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - **kwargs: Any - ) -> None: - """Get FileSystem Properties. - - All system and user-defined filesystem properties are specified in the response headers. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - accept = "application/json" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-namespace-enabled']=self._deserialize('str', response.headers.get('x-ms-namespace-enabled')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{filesystem}'} # type: ignore - - async def delete( - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Delete FileSystem. - - Marks the FileSystem for deletion. When a FileSystem is deleted, a FileSystem with the same - identifier cannot be created for at least 30 seconds. While the filesystem is being deleted, - attempts to create a filesystem with the same identifier will fail with status code 409 - (Conflict), with the service returning additional error information indicating that the - filesystem is being deleted. All other operations, including operations on any files or - directories within the filesystem, will fail with status code 404 (Not Found) while the - filesystem is being deleted. This operation supports conditional HTTP requests. For more - information, see `Specifying Conditional Headers for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{filesystem}'} # type: ignore - - async def list_paths( - self, - recursive: bool, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - continuation: Optional[str] = None, - path: Optional[str] = None, - max_results: Optional[int] = None, - upn: Optional[bool] = None, - **kwargs: Any - ) -> "_models.PathList": - """List Paths. - - List FileSystem paths and their properties. - - :param recursive: Required. - :type recursive: bool - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param path: Optional. Filters results to paths within the specified directory. An error - occurs if the directory does not exist. - :type path: str - :param max_results: An optional value that specifies the maximum number of items to return. If - omitted or greater than 5,000, the response will include up to 5,000 items. - :type max_results: int - :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If - "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response - headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If - "false", the values will be returned as Azure Active Directory Object IDs. The default value is - false. Note that group and application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PathList, or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.PathList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PathList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - accept = "application/json" - - # Construct URL - url = self.list_paths.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - if path is not None: - query_parameters['directory'] = self._serialize.query("path", path, 'str') - query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') - if max_results is not None: - query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - deserialized = self._deserialize('PathList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_paths.metadata = {'url': '/{filesystem}'} # type: ignore - - async def list_blob_hierarchy_segment( - self, - prefix: Optional[str] = None, - delimiter: Optional[str] = None, - marker: Optional[str] = None, - max_results: Optional[int] = None, - include: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] = None, - showonly: Optional[str] = "deleted", - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> "_models.ListBlobsHierarchySegmentResponse": - """The List Blobs operation returns a list of the blobs under the specified container. - - :param prefix: Filters results to filesystems within the specified prefix. - :type prefix: str - :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix - element in the response body that acts as a placeholder for all blobs whose names begin with - the same substring up to the appearance of the delimiter character. The delimiter may be a - single character or a string. - :type delimiter: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param max_results: An optional value that specifies the maximum number of items to return. If - omitted or greater than 5,000, the response will include up to 5,000 items. - :type max_results: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.filedatalake.models.ListBlobsIncludeItem] - :param showonly: Include this parameter to specify one or more datasets to include in the - response. - :type showonly: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListBlobsHierarchySegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.ListBlobsHierarchySegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsHierarchySegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_blob_hierarchy_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if delimiter is not None: - query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if max_results is not None: - query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if showonly is not None: - query_parameters['showonly'] = self._serialize.query("showonly", showonly, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_blob_hierarchy_segment.metadata = {'url': '/{filesystem}'} # type: ignore diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/aio/operations/_path_operations.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/aio/operations/_path_operations.py deleted file mode 100644 index 3391320..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/aio/operations/_path_operations.py +++ /dev/null @@ -1,1784 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class PathOperations: - """PathOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.filedatalake.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - resource: Optional[Union[str, "_models.PathResourceType"]] = None, - continuation: Optional[str] = None, - mode: Optional[Union[str, "_models.PathRenameMode"]] = None, - rename_source: Optional[str] = None, - source_lease_id: Optional[str] = None, - properties: Optional[str] = None, - permissions: Optional[str] = None, - umask: Optional[str] = None, - path_http_headers: Optional["_models.PathHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Create File | Create Directory | Rename File | Rename Directory. - - Create or rename a file or directory. By default, the destination is overwritten and if the - destination already exists and has a lease the lease is broken. This operation supports - conditional HTTP requests. For more information, see `Specifying Conditional Headers for Blob - Service Operations - `_. - To fail if the destination already exists, use a conditional request with If-None-Match: "*". - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param resource: Required only for Create File and Create Directory. The value must be "file" - or "directory". - :type resource: str or ~azure.storage.filedatalake.models.PathResourceType - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param mode: Optional. Valid only when namespace is enabled. This parameter determines the - behavior of the rename operation. The value must be "legacy" or "posix", and the default value - will be "posix". - :type mode: str or ~azure.storage.filedatalake.models.PathRenameMode - :param rename_source: An optional file or directory to be renamed. The value must have the - following format: "/{filesystem}/{path}". If "x-ms-properties" is specified, the properties - will overwrite the existing properties; otherwise, the existing properties will be preserved. - This value must be a URL percent-encoded string. Note that the string may only contain ASCII - characters in the ISO-8859-1 character set. - :type rename_source: str - :param source_lease_id: A lease ID for the source path. If specified, the source path must have - an active lease and the lease ID must match. - :type source_lease_id: str - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. - :type properties: str - :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type permissions: str - :param umask: Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, the umask - restricts the permissions of the file or directory to be created. The resulting permission is - given by p bitwise and not u, where p is the permission and u is the umask. For example, if p - is 0777 and u is 0057, then the resulting permission is 0720. The default permission is 0777 - for a directory and 0666 for a file. The default umask is 0027. The umask must be specified - in 4-digit octal notation (e.g. 0766). - :type umask: str - :param path_http_headers: Parameter group. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.filedatalake.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _cache_control = None - _content_encoding = None - _content_language = None - _content_disposition = None - _content_type = None - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _source_if_modified_since = None - _source_if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - if path_http_headers is not None: - _cache_control = path_http_headers.cache_control - _content_encoding = path_http_headers.content_encoding - _content_language = path_http_headers.content_language - _content_disposition = path_http_headers.content_disposition - _content_type = path_http_headers.content_type - if source_modified_access_conditions is not None: - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if resource is not None: - query_parameters['resource'] = self._serialize.query("resource", resource, 'str') - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - if mode is not None: - query_parameters['mode'] = self._serialize.query("mode", mode, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if rename_source is not None: - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') - if umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("umask", umask, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def update( - self, - action: Union[str, "_models.PathUpdateAction"], - mode: Union[str, "_models.PathSetAccessControlRecursiveMode"], - body: IO, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - max_records: Optional[int] = None, - continuation: Optional[str] = None, - force_flag: Optional[bool] = None, - position: Optional[int] = None, - retain_uncommitted_data: Optional[bool] = None, - close: Optional[bool] = None, - content_length: Optional[int] = None, - properties: Optional[str] = None, - owner: Optional[str] = None, - group: Optional[str] = None, - permissions: Optional[str] = None, - acl: Optional[str] = None, - path_http_headers: Optional["_models.PathHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> Optional["_models.SetAccessControlRecursiveResponse"]: - """Append Data | Flush Data | Set Properties | Set Access Control. - - Uploads data to be appended to a file, flushes (writes) previously uploaded data to a file, - sets properties for a file or directory, or sets access control for a file or directory. Data - can only be appended to a file. Concurrent writes to the same file using multiple clients are - not supported. This operation supports conditional HTTP requests. For more information, see - `Specifying Conditional Headers for Blob Service Operations - `_. - - :param action: The action must be "append" to upload data to be appended to a file, "flush" to - flush previously uploaded data to a file, "setProperties" to set the properties of a file or - directory, "setAccessControl" to set the owner, group, permissions, or access control list for - a file or directory, or "setAccessControlRecursive" to set the access control list for a - directory recursively. Note that Hierarchical Namespace must be enabled for the account in - order to use access control. Also note that the Access Control List (ACL) includes permissions - for the owner, owning group, and others, so the x-ms-permissions and x-ms-acl request headers - are mutually exclusive. - :type action: str or ~azure.storage.filedatalake.models.PathUpdateAction - :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify" - modifies one or more POSIX access control rights that pre-exist on files and directories, - "remove" removes one or more POSIX access control rights that were present earlier on files - and directories. - :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode - :param body: Initial data. - :type body: IO - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param max_records: Optional. Valid for "SetAccessControlRecursive" operation. It specifies the - maximum number of files or directories on which the acl change will be applied. If omitted or - greater than 2,000, the request will process up to 2,000 items. - :type max_records: int - :param continuation: Optional. The number of paths processed with each invocation is limited. - If the number of paths to be processed exceeds this limit, a continuation token is returned in - the response header x-ms-continuation. When a continuation token is returned in the response, - it must be percent-encoded and specified in a subsequent invocation of - setAccessControlRecursive operation. - :type continuation: str - :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false, - the operation will terminate quickly on encountering user errors (4XX). If true, the operation - will ignore user errors and proceed with the operation on other sub-entities of the directory. - Continuation token will only be returned when forceFlag is true in case of user errors. If not - set the default value is false for this. - :type force_flag: bool - :param position: This parameter allows the caller to upload data in parallel and control the - order in which it is appended to the file. It is required when uploading data to be appended - to the file and when flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not immediately flushed, or - written, to the file. To flush, the previously uploaded data must be contiguous, the position - parameter must be specified and equal to the length of the file after all data has been - written, and there must not be a request entity body included with the request. - :type position: long - :param retain_uncommitted_data: Valid only for flush operations. If "true", uncommitted data - is retained after the flush operation completes; otherwise, the uncommitted data is deleted - after the flush operation. The default is false. Data at offsets less than the specified - position are written to the file when flush succeeds, but this optional parameter allows data - after the flush position to be retained for a future flush operation. - :type retain_uncommitted_data: bool - :param close: Azure Storage Events allow applications to receive notifications when files - change. When Azure Storage Events are enabled, a file changed event is raised. This event has a - property indicating whether this is the final change to distinguish the difference between an - intermediate flush to a file stream and the final close of a file stream. The close query - parameter is valid only when the action is "flush" and change notifications are enabled. If the - value of close is "true" and the flush operation completes successfully, the service raises a - file change notification with a property indicating that this is the final update (the file - stream has been closed). If "false" a change notification is raised indicating the file has - changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to - indicate that the file stream has been closed.". - :type close: bool - :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush - Data". Must be the length of the request content in bytes for "Append Data". - :type content_length: long - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. - :type properties: str - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type permissions: str - :param acl: Sets POSIX access control rights on files and directories. The value is a - comma-separated list of access control entries. Each access control entry (ACE) consists of a - scope, a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :param path_http_headers: Parameter group. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SetAccessControlRecursiveResponse, or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SetAccessControlRecursiveResponse"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _content_md5 = None - _lease_id = None - _cache_control = None - _content_type = None - _content_disposition = None - _content_encoding = None - _content_language = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - if path_http_headers is not None: - _content_md5 = path_http_headers.content_md5 - _cache_control = path_http_headers.cache_control - _content_type = path_http_headers.content_type - _content_disposition = path_http_headers.content_disposition - _content_encoding = path_http_headers.content_encoding - _content_language = path_http_headers.content_language - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/json" - - # Construct URL - url = self.update.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['action'] = self._serialize.query("action", action, 'str') - if max_records is not None: - query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - query_parameters['mode'] = self._serialize.query("mode", mode, 'str') - if force_flag is not None: - query_parameters['forceFlag'] = self._serialize.query("force_flag", force_flag, 'bool') - if position is not None: - query_parameters['position'] = self._serialize.query("position", position, 'long') - if retain_uncommitted_data is not None: - query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') - if close is not None: - query_parameters['close'] = self._serialize.query("close", close, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if content_length is not None: - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) - if _content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", _content_md5, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') - if acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - deserialized = None - if response.status_code == 200: - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('SetAccessControlRecursiveResponse', pipeline_response) - - if response.status_code == 202: - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - update.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def lease( - self, - x_ms_lease_action: Union[str, "_models.PathLeaseAction"], - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - x_ms_lease_duration: Optional[int] = None, - x_ms_lease_break_period: Optional[int] = None, - proposed_lease_id: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Lease Path. - - Create and manage a lease to restrict write and delete access to the path. This operation - supports conditional HTTP requests. For more information, see `Specifying Conditional Headers - for Blob Service Operations - `_. - - :param x_ms_lease_action: There are five lease actions: "acquire", "break", "change", "renew", - and "release". Use "acquire" and specify the "x-ms-proposed-lease-id" and "x-ms-lease-duration" - to acquire a new lease. Use "break" to break an existing lease. When a lease is broken, the - lease break period is allowed to elapse, during which time no lease operation except break and - release can be performed on the file. When a lease is successfully broken, the response - indicates the interval in seconds until a new lease can be acquired. Use "change" and specify - the current lease ID in "x-ms-lease-id" and the new lease ID in "x-ms-proposed-lease-id" to - change the lease ID of an active lease. Use "renew" and specify the "x-ms-lease-id" to renew an - existing lease. Use "release" and specify the "x-ms-lease-id" to release a lease. - :type x_ms_lease_action: str or ~azure.storage.filedatalake.models.PathLeaseAction - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param x_ms_lease_duration: The lease duration is required to acquire a lease, and specifies - the duration of the lease in seconds. The lease duration must be between 15 and 60 seconds or - -1 for infinite lease. - :type x_ms_lease_duration: int - :param x_ms_lease_break_period: The lease break period duration is optional to break a lease, - and specifies the break period of the lease in seconds. The lease break duration must be - between 0 and 60 seconds. - :type x_ms_lease_break_period: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("x_ms_lease_action", x_ms_lease_action, 'str') - if x_ms_lease_duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("x_ms_lease_duration", x_ms_lease_duration, 'int') - if x_ms_lease_break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("x_ms_lease_break_period", x_ms_lease_break_period, 'int') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 201, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - - if response.status_code == 201: - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - - if response.status_code == 202: - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-lease-time']=self._deserialize('str', response.headers.get('x-ms-lease-time')) - - if cls: - return cls(pipeline_response, None, response_headers) - - lease.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def read( - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - range: Optional[str] = None, - x_ms_range_get_content_md5: Optional[bool] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> IO: - """Read File. - - Read the contents of a file. For read operations, range requests are supported. This operation - supports conditional HTTP requests. For more information, see `Specifying Conditional Headers - for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: The HTTP Range request header specifies one or more byte ranges of the resource - to be retrieved. - :type range: str - :param x_ms_range_get_content_md5: Optional. When this header is set to "true" and specified - together with the Range header, the service returns the MD5 hash for the range, as long as the - range is less than or equal to 4MB in size. If this header is specified without the Range - header, the service returns status code 400 (Bad Request). If this header is set to true when - the range exceeds 4 MB in size, the service returns status code 400 (Bad Request). - :type x_ms_range_get_content_md5: bool - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.read.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['Range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if x_ms_range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("x_ms_range_get_content_md5", x_ms_range_get_content_md5, 'bool') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['x-ms-content-md5']=self._deserialize('str', response.headers.get('x-ms-content-md5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - read.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def get_properties( - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - action: Optional[Union[str, "_models.PathGetPropertiesAction"]] = None, - upn: Optional[bool] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Get Properties | Get Status | Get Access Control List. - - Get Properties returns all system and user defined properties for a path. Get Status returns - all system defined properties for a path. Get Access Control List returns the access control - list for a path. This operation supports conditional HTTP requests. For more information, see - `Specifying Conditional Headers for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param action: Optional. If the value is "getStatus" only the system defined properties for the - path are returned. If the value is "getAccessControl" the access control list is returned in - the response headers (Hierarchical Namespace must be enabled for the account), otherwise the - properties are returned. - :type action: str or ~azure.storage.filedatalake.models.PathGetPropertiesAction - :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If - "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response - headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If - "false", the values will be returned as Azure Active Directory Object IDs. The default value is - false. Note that group and application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if action is not None: - query_parameters['action'] = self._serialize.query("action", action, 'str') - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) - response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) - response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) - response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def delete( - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - recursive: Optional[bool] = None, - continuation: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Delete File | Delete Directory. - - Delete the file or directory. This operation supports conditional HTTP requests. For more - information, see `Specifying Conditional Headers for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param recursive: Required. - :type recursive: bool - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if recursive is not None: - query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['x-ms-deletion-id']=self._deserialize('str', response.headers.get('x-ms-deletion-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def set_access_control( - self, - timeout: Optional[int] = None, - owner: Optional[str] = None, - group: Optional[str] = None, - permissions: Optional[str] = None, - acl: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Set the owner, group, permissions, or access control list for a path. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type permissions: str - :param acl: Sets POSIX access control rights on files and directories. The value is a - comma-separated list of access control entries. Each access control entry (ACE) consists of a - scope, a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "setAccessControl" - accept = "application/json" - - # Construct URL - url = self.set_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') - if acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def set_access_control_recursive( - self, - mode: Union[str, "_models.PathSetAccessControlRecursiveMode"], - timeout: Optional[int] = None, - continuation: Optional[str] = None, - force_flag: Optional[bool] = None, - max_records: Optional[int] = None, - acl: Optional[str] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> "_models.SetAccessControlRecursiveResponse": - """Set the access control list for a path and sub-paths. - - :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify" - modifies one or more POSIX access control rights that pre-exist on files and directories, - "remove" removes one or more POSIX access control rights that were present earlier on files - and directories. - :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false, - the operation will terminate quickly on encountering user errors (4XX). If true, the operation - will ignore user errors and proceed with the operation on other sub-entities of the directory. - Continuation token will only be returned when forceFlag is true in case of user errors. If not - set the default value is false for this. - :type force_flag: bool - :param max_records: Optional. It specifies the maximum number of files or directories on which - the acl change will be applied. If omitted or greater than 2,000, the request will process up - to 2,000 items. - :type max_records: int - :param acl: Sets POSIX access control rights on files and directories. The value is a - comma-separated list of access control entries. Each access control entry (ACE) consists of a - scope, a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SetAccessControlRecursiveResponse, or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.SetAccessControlRecursiveResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - action = "setAccessControlRecursive" - accept = "application/json" - - # Construct URL - url = self.set_access_control_recursive.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - query_parameters['mode'] = self._serialize.query("mode", mode, 'str') - if force_flag is not None: - query_parameters['forceFlag'] = self._serialize.query("force_flag", force_flag, 'bool') - if max_records is not None: - query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('SetAccessControlRecursiveResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - set_access_control_recursive.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def flush_data( - self, - timeout: Optional[int] = None, - position: Optional[int] = None, - retain_uncommitted_data: Optional[bool] = None, - close: Optional[bool] = None, - content_length: Optional[int] = None, - request_id_parameter: Optional[str] = None, - path_http_headers: Optional["_models.PathHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Set the owner, group, permissions, or access control list for a path. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param position: This parameter allows the caller to upload data in parallel and control the - order in which it is appended to the file. It is required when uploading data to be appended - to the file and when flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not immediately flushed, or - written, to the file. To flush, the previously uploaded data must be contiguous, the position - parameter must be specified and equal to the length of the file after all data has been - written, and there must not be a request entity body included with the request. - :type position: long - :param retain_uncommitted_data: Valid only for flush operations. If "true", uncommitted data - is retained after the flush operation completes; otherwise, the uncommitted data is deleted - after the flush operation. The default is false. Data at offsets less than the specified - position are written to the file when flush succeeds, but this optional parameter allows data - after the flush position to be retained for a future flush operation. - :type retain_uncommitted_data: bool - :param close: Azure Storage Events allow applications to receive notifications when files - change. When Azure Storage Events are enabled, a file changed event is raised. This event has a - property indicating whether this is the final change to distinguish the difference between an - intermediate flush to a file stream and the final close of a file stream. The close query - parameter is valid only when the action is "flush" and change notifications are enabled. If the - value of close is "true" and the flush operation completes successfully, the service raises a - file change notification with a property indicating that this is the final update (the file - stream has been closed). If "false" a change notification is raised indicating the file has - changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to - indicate that the file stream has been closed.". - :type close: bool - :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush - Data". Must be the length of the request content in bytes for "Append Data". - :type content_length: long - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param path_http_headers: Parameter group. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _content_md5 = None - _lease_id = None - _cache_control = None - _content_type = None - _content_disposition = None - _content_encoding = None - _content_language = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - if path_http_headers is not None: - _content_md5 = path_http_headers.content_md5 - _cache_control = path_http_headers.cache_control - _content_type = path_http_headers.content_type - _content_disposition = path_http_headers.content_disposition - _content_encoding = path_http_headers.content_encoding - _content_language = path_http_headers.content_language - action = "flush" - accept = "application/json" - - # Construct URL - url = self.flush_data.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if position is not None: - query_parameters['position'] = self._serialize.query("position", position, 'long') - if retain_uncommitted_data is not None: - query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') - if close is not None: - query_parameters['close'] = self._serialize.query("close", close, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if content_length is not None: - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) - if _content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", _content_md5, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - flush_data.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def append_data( - self, - body: IO, - position: Optional[int] = None, - timeout: Optional[int] = None, - content_length: Optional[int] = None, - transactional_content_crc64: Optional[bytearray] = None, - request_id_parameter: Optional[str] = None, - path_http_headers: Optional["_models.PathHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Append data to the file. - - :param body: Initial data. - :type body: IO - :param position: This parameter allows the caller to upload data in parallel and control the - order in which it is appended to the file. It is required when uploading data to be appended - to the file and when flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not immediately flushed, or - written, to the file. To flush, the previously uploaded data must be contiguous, the position - parameter must be specified and equal to the length of the file after all data has been - written, and there must not be a request entity body included with the request. - :type position: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush - Data". Must be the length of the request content in bytes for "Append Data". - :type content_length: long - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param path_http_headers: Parameter group. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _transactional_content_hash = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if path_http_headers is not None: - _transactional_content_hash = path_http_headers.transactional_content_hash - action = "append" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.append_data.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if position is not None: - query_parameters['position'] = self._serialize.query("position", position, 'long') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if content_length is not None: - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) - if _transactional_content_hash is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_hash", _transactional_content_hash, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - append_data.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def set_expiry( - self, - expiry_options: Union[str, "_models.PathExpiryOptions"], - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - expires_on: Optional[str] = None, - **kwargs: Any - ) -> None: - """Sets the time a blob will expire and be deleted. - - :param expiry_options: Required. Indicates mode of the expiry time. - :type expiry_options: str or ~azure.storage.filedatalake.models.PathExpiryOptions - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param expires_on: The time to set the blob to expiry. - :type expires_on: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "expiry" - accept = "application/json" - - # Construct URL - url = self.set_expiry.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') - if expires_on is not None: - header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_expiry.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - async def undelete( - self, - timeout: Optional[int] = None, - undelete_source: Optional[str] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> None: - """Undelete a path that was previously soft deleted. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param undelete_source: Only for hierarchical namespace enabled accounts. Optional. The path of - the soft deleted blob to undelete. - :type undelete_source: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "undelete" - accept = "application/json" - - # Construct URL - url = self.undelete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if undelete_source is not None: - header_parameters['x-ms-undelete-source'] = self._serialize.header("undelete_source", undelete_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - undelete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/aio/operations/_service_operations.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/aio/operations/_service_operations.py deleted file mode 100644 index 4572042..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/aio/operations/_service_operations.py +++ /dev/null @@ -1,149 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar -import warnings - -from azure.core.async_paging import AsyncItemPaged, AsyncList -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class ServiceOperations: - """ServiceOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.filedatalake.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def list_file_systems( - self, - prefix: Optional[str] = None, - continuation: Optional[str] = None, - max_results: Optional[int] = None, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - **kwargs: Any - ) -> AsyncIterable["_models.FileSystemList"]: - """List FileSystems. - - List filesystems and their properties in given account. - - :param prefix: Filters results to filesystems within the specified prefix. - :type prefix: str - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param max_results: An optional value that specifies the maximum number of items to return. If - omitted or greater than 5,000, the response will include up to 5,000 items. - :type max_results: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: An iterator like instance of either FileSystemList or the result of cls(response) - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.filedatalake.models.FileSystemList] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.FileSystemList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - resource = "account" - accept = "application/json" - - def prepare_request(next_link=None): - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - if not next_link: - # Construct URL - url = self.list_file_systems.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("resource", resource, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - if max_results is not None: - query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - request = self._client.get(url, query_parameters, header_parameters) - else: - url = next_link - query_parameters = {} # type: Dict[str, Any] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - request = self._client.get(url, query_parameters, header_parameters) - return request - - async def extract_data(pipeline_response): - deserialized = self._deserialize('FileSystemList', pipeline_response) - list_of_elem = deserialized.filesystems - if cls: - list_of_elem = cls(list_of_elem) - return None, AsyncList(list_of_elem) - - async def get_next(next_link=None): - request = prepare_request(next_link) - - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, model=error) - - return pipeline_response - - return AsyncItemPaged( - get_next, extract_data - ) - list_file_systems.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/models/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/models/__init__.py deleted file mode 100644 index fc4548f..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/models/__init__.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -try: - from ._models_py3 import AclFailedEntry - from ._models_py3 import BlobHierarchyListSegment - from ._models_py3 import BlobItemInternal - from ._models_py3 import BlobPrefix - from ._models_py3 import BlobPropertiesInternal - from ._models_py3 import FileSystem - from ._models_py3 import FileSystemList - from ._models_py3 import LeaseAccessConditions - from ._models_py3 import ListBlobsHierarchySegmentResponse - from ._models_py3 import ModifiedAccessConditions - from ._models_py3 import Path - from ._models_py3 import PathHTTPHeaders - from ._models_py3 import PathList - from ._models_py3 import SetAccessControlRecursiveResponse - from ._models_py3 import SourceModifiedAccessConditions - from ._models_py3 import StorageError - from ._models_py3 import StorageErrorError -except (SyntaxError, ImportError): - from ._models import AclFailedEntry # type: ignore - from ._models import BlobHierarchyListSegment # type: ignore - from ._models import BlobItemInternal # type: ignore - from ._models import BlobPrefix # type: ignore - from ._models import BlobPropertiesInternal # type: ignore - from ._models import FileSystem # type: ignore - from ._models import FileSystemList # type: ignore - from ._models import LeaseAccessConditions # type: ignore - from ._models import ListBlobsHierarchySegmentResponse # type: ignore - from ._models import ModifiedAccessConditions # type: ignore - from ._models import Path # type: ignore - from ._models import PathHTTPHeaders # type: ignore - from ._models import PathList # type: ignore - from ._models import SetAccessControlRecursiveResponse # type: ignore - from ._models import SourceModifiedAccessConditions # type: ignore - from ._models import StorageError # type: ignore - from ._models import StorageErrorError # type: ignore - -from ._azure_data_lake_storage_restapi_enums import ( - ListBlobsIncludeItem, - PathExpiryOptions, - PathGetPropertiesAction, - PathLeaseAction, - PathRenameMode, - PathResourceType, - PathSetAccessControlRecursiveMode, - PathUpdateAction, -) - -__all__ = [ - 'AclFailedEntry', - 'BlobHierarchyListSegment', - 'BlobItemInternal', - 'BlobPrefix', - 'BlobPropertiesInternal', - 'FileSystem', - 'FileSystemList', - 'LeaseAccessConditions', - 'ListBlobsHierarchySegmentResponse', - 'ModifiedAccessConditions', - 'Path', - 'PathHTTPHeaders', - 'PathList', - 'SetAccessControlRecursiveResponse', - 'SourceModifiedAccessConditions', - 'StorageError', - 'StorageErrorError', - 'ListBlobsIncludeItem', - 'PathExpiryOptions', - 'PathGetPropertiesAction', - 'PathLeaseAction', - 'PathRenameMode', - 'PathResourceType', - 'PathSetAccessControlRecursiveMode', - 'PathUpdateAction', -] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/models/_azure_data_lake_storage_restapi_enums.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/models/_azure_data_lake_storage_restapi_enums.py deleted file mode 100644 index 804050e..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/models/_azure_data_lake_storage_restapi_enums.py +++ /dev/null @@ -1,81 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from enum import Enum, EnumMeta -from six import with_metaclass - -class _CaseInsensitiveEnumMeta(EnumMeta): - def __getitem__(self, name): - return super().__getitem__(name.upper()) - - def __getattr__(cls, name): - """Return the enum member matching `name` - We use __getattr__ instead of descriptors or inserting into the enum - class' __dict__ in order to support `name` and `value` being both - properties for enum members (which live in the class' __dict__) and - enum members themselves. - """ - try: - return cls._member_map_[name.upper()] - except KeyError: - raise AttributeError(name) - - -class ListBlobsIncludeItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - COPY = "copy" - DELETED = "deleted" - METADATA = "metadata" - SNAPSHOTS = "snapshots" - UNCOMMITTEDBLOBS = "uncommittedblobs" - VERSIONS = "versions" - TAGS = "tags" - -class PathExpiryOptions(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - NEVER_EXPIRE = "NeverExpire" - RELATIVE_TO_CREATION = "RelativeToCreation" - RELATIVE_TO_NOW = "RelativeToNow" - ABSOLUTE = "Absolute" - -class PathGetPropertiesAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - GET_ACCESS_CONTROL = "getAccessControl" - GET_STATUS = "getStatus" - -class PathLeaseAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - ACQUIRE = "acquire" - BREAK_ENUM = "break" - CHANGE = "change" - RENEW = "renew" - RELEASE = "release" - -class PathRenameMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - LEGACY = "legacy" - POSIX = "posix" - -class PathResourceType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - DIRECTORY = "directory" - FILE = "file" - -class PathSetAccessControlRecursiveMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - SET = "set" - MODIFY = "modify" - REMOVE = "remove" - -class PathUpdateAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - APPEND = "append" - FLUSH = "flush" - SET_PROPERTIES = "setProperties" - SET_ACCESS_CONTROL = "setAccessControl" - SET_ACCESS_CONTROL_RECURSIVE = "setAccessControlRecursive" diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/models/_models.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/models/_models.py deleted file mode 100644 index 3814aa7..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/models/_models.py +++ /dev/null @@ -1,676 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import HttpResponseError -import msrest.serialization - - -class AclFailedEntry(msrest.serialization.Model): - """AclFailedEntry. - - :param name: - :type name: str - :param type: - :type type: str - :param error_message: - :type error_message: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'error_message': {'key': 'errorMessage', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(AclFailedEntry, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.type = kwargs.get('type', None) - self.error_message = kwargs.get('error_message', None) - - -class BlobHierarchyListSegment(msrest.serialization.Model): - """BlobHierarchyListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_prefixes: - :type blob_prefixes: list[~azure.storage.filedatalake.models.BlobPrefix] - :param blob_items: Required. - :type blob_items: list[~azure.storage.filedatalake.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]'}, - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]'}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__( - self, - **kwargs - ): - super(BlobHierarchyListSegment, self).__init__(**kwargs) - self.blob_prefixes = kwargs.get('blob_prefixes', None) - self.blob_items = kwargs['blob_items'] - - -class BlobItemInternal(msrest.serialization.Model): - """An Azure Storage blob. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param deleted: Required. - :type deleted: bool - :param snapshot: Required. - :type snapshot: str - :param version_id: - :type version_id: str - :param is_current_version: - :type is_current_version: bool - :param properties: Required. Properties of a blob. - :type properties: ~azure.storage.filedatalake.models.BlobPropertiesInternal - :param deletion_id: - :type deletion_id: str - """ - - _validation = { - 'name': {'required': True}, - 'deleted': {'required': True}, - 'snapshot': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'deleted': {'key': 'Deleted', 'type': 'bool'}, - 'snapshot': {'key': 'Snapshot', 'type': 'str'}, - 'version_id': {'key': 'VersionId', 'type': 'str'}, - 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool'}, - 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal'}, - 'deletion_id': {'key': 'DeletionId', 'type': 'str'}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__( - self, - **kwargs - ): - super(BlobItemInternal, self).__init__(**kwargs) - self.name = kwargs['name'] - self.deleted = kwargs['deleted'] - self.snapshot = kwargs['snapshot'] - self.version_id = kwargs.get('version_id', None) - self.is_current_version = kwargs.get('is_current_version', None) - self.properties = kwargs['properties'] - self.deletion_id = kwargs.get('deletion_id', None) - - -class BlobPrefix(msrest.serialization.Model): - """BlobPrefix. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(BlobPrefix, self).__init__(**kwargs) - self.name = kwargs['name'] - - -class BlobPropertiesInternal(msrest.serialization.Model): - """Properties of a blob. - - All required parameters must be populated in order to send to Azure. - - :param creation_time: - :type creation_time: ~datetime.datetime - :param last_modified: Required. - :type last_modified: ~datetime.datetime - :param etag: Required. - :type etag: str - :param content_length: Size in bytes. - :type content_length: long - :param content_type: - :type content_type: str - :param content_encoding: - :type content_encoding: str - :param content_language: - :type content_language: str - :param content_md5: - :type content_md5: bytearray - :param content_disposition: - :type content_disposition: str - :param cache_control: - :type cache_control: str - :param blob_sequence_number: - :type blob_sequence_number: long - :param copy_id: - :type copy_id: str - :param copy_source: - :type copy_source: str - :param copy_progress: - :type copy_progress: str - :param copy_completion_time: - :type copy_completion_time: ~datetime.datetime - :param copy_status_description: - :type copy_status_description: str - :param server_encrypted: - :type server_encrypted: bool - :param incremental_copy: - :type incremental_copy: bool - :param destination_snapshot: - :type destination_snapshot: str - :param deleted_time: - :type deleted_time: ~datetime.datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param access_tier_inferred: - :type access_tier_inferred: bool - :param customer_provided_key_sha256: - :type customer_provided_key_sha256: str - :param encryption_scope: The name of the encryption scope under which the blob is encrypted. - :type encryption_scope: str - :param access_tier_change_time: - :type access_tier_change_time: ~datetime.datetime - :param tag_count: - :type tag_count: int - :param expires_on: - :type expires_on: ~datetime.datetime - :param is_sealed: - :type is_sealed: bool - :param last_accessed_on: - :type last_accessed_on: ~datetime.datetime - :param delete_time: - :type delete_time: ~datetime.datetime - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123'}, - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - 'content_length': {'key': 'Content-Length', 'type': 'long'}, - 'content_type': {'key': 'Content-Type', 'type': 'str'}, - 'content_encoding': {'key': 'Content-Encoding', 'type': 'str'}, - 'content_language': {'key': 'Content-Language', 'type': 'str'}, - 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray'}, - 'content_disposition': {'key': 'Content-Disposition', 'type': 'str'}, - 'cache_control': {'key': 'Cache-Control', 'type': 'str'}, - 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long'}, - 'copy_id': {'key': 'CopyId', 'type': 'str'}, - 'copy_source': {'key': 'CopySource', 'type': 'str'}, - 'copy_progress': {'key': 'CopyProgress', 'type': 'str'}, - 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123'}, - 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str'}, - 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool'}, - 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool'}, - 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str'}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, - 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool'}, - 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str'}, - 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str'}, - 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'}, - 'tag_count': {'key': 'TagCount', 'type': 'int'}, - 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123'}, - 'is_sealed': {'key': 'Sealed', 'type': 'bool'}, - 'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123'}, - 'delete_time': {'key': 'DeleteTime', 'type': 'rfc-1123'}, - } - _xml_map = { - 'name': 'Properties' - } - - def __init__( - self, - **kwargs - ): - super(BlobPropertiesInternal, self).__init__(**kwargs) - self.creation_time = kwargs.get('creation_time', None) - self.last_modified = kwargs['last_modified'] - self.etag = kwargs['etag'] - self.content_length = kwargs.get('content_length', None) - self.content_type = kwargs.get('content_type', None) - self.content_encoding = kwargs.get('content_encoding', None) - self.content_language = kwargs.get('content_language', None) - self.content_md5 = kwargs.get('content_md5', None) - self.content_disposition = kwargs.get('content_disposition', None) - self.cache_control = kwargs.get('cache_control', None) - self.blob_sequence_number = kwargs.get('blob_sequence_number', None) - self.copy_id = kwargs.get('copy_id', None) - self.copy_source = kwargs.get('copy_source', None) - self.copy_progress = kwargs.get('copy_progress', None) - self.copy_completion_time = kwargs.get('copy_completion_time', None) - self.copy_status_description = kwargs.get('copy_status_description', None) - self.server_encrypted = kwargs.get('server_encrypted', None) - self.incremental_copy = kwargs.get('incremental_copy', None) - self.destination_snapshot = kwargs.get('destination_snapshot', None) - self.deleted_time = kwargs.get('deleted_time', None) - self.remaining_retention_days = kwargs.get('remaining_retention_days', None) - self.access_tier_inferred = kwargs.get('access_tier_inferred', None) - self.customer_provided_key_sha256 = kwargs.get('customer_provided_key_sha256', None) - self.encryption_scope = kwargs.get('encryption_scope', None) - self.access_tier_change_time = kwargs.get('access_tier_change_time', None) - self.tag_count = kwargs.get('tag_count', None) - self.expires_on = kwargs.get('expires_on', None) - self.is_sealed = kwargs.get('is_sealed', None) - self.last_accessed_on = kwargs.get('last_accessed_on', None) - self.delete_time = kwargs.get('delete_time', None) - - -class FileSystem(msrest.serialization.Model): - """FileSystem. - - :param name: - :type name: str - :param last_modified: - :type last_modified: str - :param e_tag: - :type e_tag: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'last_modified': {'key': 'lastModified', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(FileSystem, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.last_modified = kwargs.get('last_modified', None) - self.e_tag = kwargs.get('e_tag', None) - - -class FileSystemList(msrest.serialization.Model): - """FileSystemList. - - :param filesystems: - :type filesystems: list[~azure.storage.filedatalake.models.FileSystem] - """ - - _attribute_map = { - 'filesystems': {'key': 'filesystems', 'type': '[FileSystem]'}, - } - - def __init__( - self, - **kwargs - ): - super(FileSystemList, self).__init__(**kwargs) - self.filesystems = kwargs.get('filesystems', None) - - -class LeaseAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param lease_id: If specified, the operation only succeeds if the resource's lease is active - and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': 'leaseId', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = kwargs.get('lease_id', None) - - -class ListBlobsHierarchySegmentResponse(msrest.serialization.Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param delimiter: - :type delimiter: str - :param segment: Required. - :type segment: ~azure.storage.filedatalake.models.BlobHierarchyListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'delimiter': {'key': 'Delimiter', 'type': 'str'}, - 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment'}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - **kwargs - ): - super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs['service_endpoint'] - self.container_name = kwargs['container_name'] - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.delimiter = kwargs.get('delimiter', None) - self.segment = kwargs['segment'] - self.next_marker = kwargs.get('next_marker', None) - - -class ModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param if_modified_since: Specify this header value to operate only on a blob if it has been - modified since the specified date/time. - :type if_modified_since: ~datetime.datetime - :param if_unmodified_since: Specify this header value to operate only on a blob if it has not - been modified since the specified date/time. - :type if_unmodified_since: ~datetime.datetime - :param if_match: Specify an ETag value to operate only on blobs with a matching value. - :type if_match: str - :param if_none_match: Specify an ETag value to operate only on blobs without a matching value. - :type if_none_match: str - """ - - _attribute_map = { - 'if_modified_since': {'key': 'ifModifiedSince', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': 'ifUnmodifiedSince', 'type': 'rfc-1123'}, - 'if_match': {'key': 'ifMatch', 'type': 'str'}, - 'if_none_match': {'key': 'ifNoneMatch', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(ModifiedAccessConditions, self).__init__(**kwargs) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - - -class Path(msrest.serialization.Model): - """Path. - - :param name: - :type name: str - :param is_directory: - :type is_directory: bool - :param last_modified: - :type last_modified: str - :param e_tag: - :type e_tag: str - :param content_length: - :type content_length: long - :param owner: - :type owner: str - :param group: - :type group: str - :param permissions: - :type permissions: str - :param encryption_scope: The name of the encryption scope under which the blob is encrypted. - :type encryption_scope: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'is_directory': {'key': 'isDirectory', 'type': 'bool'}, - 'last_modified': {'key': 'lastModified', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - 'content_length': {'key': 'contentLength', 'type': 'long'}, - 'owner': {'key': 'owner', 'type': 'str'}, - 'group': {'key': 'group', 'type': 'str'}, - 'permissions': {'key': 'permissions', 'type': 'str'}, - 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(Path, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.is_directory = kwargs.get('is_directory', False) - self.last_modified = kwargs.get('last_modified', None) - self.e_tag = kwargs.get('e_tag', None) - self.content_length = kwargs.get('content_length', None) - self.owner = kwargs.get('owner', None) - self.group = kwargs.get('group', None) - self.permissions = kwargs.get('permissions', None) - self.encryption_scope = kwargs.get('encryption_scope', None) - - -class PathHTTPHeaders(msrest.serialization.Model): - """Parameter group. - - :param cache_control: Optional. Sets the blob's cache control. If specified, this property is - stored with the blob and returned with a read request. - :type cache_control: str - :param content_encoding: Optional. Sets the blob's content encoding. If specified, this - property is stored with the blob and returned with a read request. - :type content_encoding: str - :param content_language: Optional. Set the blob's content language. If specified, this property - is stored with the blob and returned with a read request. - :type content_language: str - :param content_disposition: Optional. Sets the blob's Content-Disposition header. - :type content_disposition: str - :param content_type: Optional. Sets the blob's content type. If specified, this property is - stored with the blob and returned with a read request. - :type content_type: str - :param content_md5: Specify the transactional md5 for the body, to be validated by the service. - :type content_md5: bytearray - :param transactional_content_hash: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_hash: bytearray - """ - - _attribute_map = { - 'cache_control': {'key': 'cacheControl', 'type': 'str'}, - 'content_encoding': {'key': 'contentEncoding', 'type': 'str'}, - 'content_language': {'key': 'contentLanguage', 'type': 'str'}, - 'content_disposition': {'key': 'contentDisposition', 'type': 'str'}, - 'content_type': {'key': 'contentType', 'type': 'str'}, - 'content_md5': {'key': 'contentMD5', 'type': 'bytearray'}, - 'transactional_content_hash': {'key': 'transactionalContentHash', 'type': 'bytearray'}, - } - - def __init__( - self, - **kwargs - ): - super(PathHTTPHeaders, self).__init__(**kwargs) - self.cache_control = kwargs.get('cache_control', None) - self.content_encoding = kwargs.get('content_encoding', None) - self.content_language = kwargs.get('content_language', None) - self.content_disposition = kwargs.get('content_disposition', None) - self.content_type = kwargs.get('content_type', None) - self.content_md5 = kwargs.get('content_md5', None) - self.transactional_content_hash = kwargs.get('transactional_content_hash', None) - - -class PathList(msrest.serialization.Model): - """PathList. - - :param paths: - :type paths: list[~azure.storage.filedatalake.models.Path] - """ - - _attribute_map = { - 'paths': {'key': 'paths', 'type': '[Path]'}, - } - - def __init__( - self, - **kwargs - ): - super(PathList, self).__init__(**kwargs) - self.paths = kwargs.get('paths', None) - - -class SetAccessControlRecursiveResponse(msrest.serialization.Model): - """SetAccessControlRecursiveResponse. - - :param directories_successful: - :type directories_successful: int - :param files_successful: - :type files_successful: int - :param failure_count: - :type failure_count: int - :param failed_entries: - :type failed_entries: list[~azure.storage.filedatalake.models.AclFailedEntry] - """ - - _attribute_map = { - 'directories_successful': {'key': 'directoriesSuccessful', 'type': 'int'}, - 'files_successful': {'key': 'filesSuccessful', 'type': 'int'}, - 'failure_count': {'key': 'failureCount', 'type': 'int'}, - 'failed_entries': {'key': 'failedEntries', 'type': '[AclFailedEntry]'}, - } - - def __init__( - self, - **kwargs - ): - super(SetAccessControlRecursiveResponse, self).__init__(**kwargs) - self.directories_successful = kwargs.get('directories_successful', None) - self.files_successful = kwargs.get('files_successful', None) - self.failure_count = kwargs.get('failure_count', None) - self.failed_entries = kwargs.get('failed_entries', None) - - -class SourceModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param source_if_match: Specify an ETag value to operate only on blobs with a matching value. - :type source_if_match: str - :param source_if_none_match: Specify an ETag value to operate only on blobs without a matching - value. - :type source_if_none_match: str - :param source_if_modified_since: Specify this header value to operate only on a blob if it has - been modified since the specified date/time. - :type source_if_modified_since: ~datetime.datetime - :param source_if_unmodified_since: Specify this header value to operate only on a blob if it - has not been modified since the specified date/time. - :type source_if_unmodified_since: ~datetime.datetime - """ - - _attribute_map = { - 'source_if_match': {'key': 'sourceIfMatch', 'type': 'str'}, - 'source_if_none_match': {'key': 'sourceIfNoneMatch', 'type': 'str'}, - 'source_if_modified_since': {'key': 'sourceIfModifiedSince', 'type': 'rfc-1123'}, - 'source_if_unmodified_since': {'key': 'sourceIfUnmodifiedSince', 'type': 'rfc-1123'}, - } - - def __init__( - self, - **kwargs - ): - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_match = kwargs.get('source_if_match', None) - self.source_if_none_match = kwargs.get('source_if_none_match', None) - self.source_if_modified_since = kwargs.get('source_if_modified_since', None) - self.source_if_unmodified_since = kwargs.get('source_if_unmodified_since', None) - - -class StorageError(msrest.serialization.Model): - """StorageError. - - :param error: The service error response object. - :type error: ~azure.storage.filedatalake.models.StorageErrorError - """ - - _attribute_map = { - 'error': {'key': 'error', 'type': 'StorageErrorError'}, - } - - def __init__( - self, - **kwargs - ): - super(StorageError, self).__init__(**kwargs) - self.error = kwargs.get('error', None) - - -class StorageErrorError(msrest.serialization.Model): - """The service error response object. - - :param code: The service error code. - :type code: str - :param message: The service error message. - :type message: str - """ - - _attribute_map = { - 'code': {'key': 'Code', 'type': 'str'}, - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(StorageErrorError, self).__init__(**kwargs) - self.code = kwargs.get('code', None) - self.message = kwargs.get('message', None) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/models/_models_py3.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/models/_models_py3.py deleted file mode 100644 index df25478..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/models/_models_py3.py +++ /dev/null @@ -1,784 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -import datetime -from typing import List, Optional - -from azure.core.exceptions import HttpResponseError -import msrest.serialization - - -class AclFailedEntry(msrest.serialization.Model): - """AclFailedEntry. - - :param name: - :type name: str - :param type: - :type type: str - :param error_message: - :type error_message: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'error_message': {'key': 'errorMessage', 'type': 'str'}, - } - - def __init__( - self, - *, - name: Optional[str] = None, - type: Optional[str] = None, - error_message: Optional[str] = None, - **kwargs - ): - super(AclFailedEntry, self).__init__(**kwargs) - self.name = name - self.type = type - self.error_message = error_message - - -class BlobHierarchyListSegment(msrest.serialization.Model): - """BlobHierarchyListSegment. - - All required parameters must be populated in order to send to Azure. - - :param blob_prefixes: - :type blob_prefixes: list[~azure.storage.filedatalake.models.BlobPrefix] - :param blob_items: Required. - :type blob_items: list[~azure.storage.filedatalake.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]'}, - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]'}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__( - self, - *, - blob_items: List["BlobItemInternal"], - blob_prefixes: Optional[List["BlobPrefix"]] = None, - **kwargs - ): - super(BlobHierarchyListSegment, self).__init__(**kwargs) - self.blob_prefixes = blob_prefixes - self.blob_items = blob_items - - -class BlobItemInternal(msrest.serialization.Model): - """An Azure Storage blob. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param deleted: Required. - :type deleted: bool - :param snapshot: Required. - :type snapshot: str - :param version_id: - :type version_id: str - :param is_current_version: - :type is_current_version: bool - :param properties: Required. Properties of a blob. - :type properties: ~azure.storage.filedatalake.models.BlobPropertiesInternal - :param deletion_id: - :type deletion_id: str - """ - - _validation = { - 'name': {'required': True}, - 'deleted': {'required': True}, - 'snapshot': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'deleted': {'key': 'Deleted', 'type': 'bool'}, - 'snapshot': {'key': 'Snapshot', 'type': 'str'}, - 'version_id': {'key': 'VersionId', 'type': 'str'}, - 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool'}, - 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal'}, - 'deletion_id': {'key': 'DeletionId', 'type': 'str'}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__( - self, - *, - name: str, - deleted: bool, - snapshot: str, - properties: "BlobPropertiesInternal", - version_id: Optional[str] = None, - is_current_version: Optional[bool] = None, - deletion_id: Optional[str] = None, - **kwargs - ): - super(BlobItemInternal, self).__init__(**kwargs) - self.name = name - self.deleted = deleted - self.snapshot = snapshot - self.version_id = version_id - self.is_current_version = is_current_version - self.properties = properties - self.deletion_id = deletion_id - - -class BlobPrefix(msrest.serialization.Model): - """BlobPrefix. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(BlobPrefix, self).__init__(**kwargs) - self.name = name - - -class BlobPropertiesInternal(msrest.serialization.Model): - """Properties of a blob. - - All required parameters must be populated in order to send to Azure. - - :param creation_time: - :type creation_time: ~datetime.datetime - :param last_modified: Required. - :type last_modified: ~datetime.datetime - :param etag: Required. - :type etag: str - :param content_length: Size in bytes. - :type content_length: long - :param content_type: - :type content_type: str - :param content_encoding: - :type content_encoding: str - :param content_language: - :type content_language: str - :param content_md5: - :type content_md5: bytearray - :param content_disposition: - :type content_disposition: str - :param cache_control: - :type cache_control: str - :param blob_sequence_number: - :type blob_sequence_number: long - :param copy_id: - :type copy_id: str - :param copy_source: - :type copy_source: str - :param copy_progress: - :type copy_progress: str - :param copy_completion_time: - :type copy_completion_time: ~datetime.datetime - :param copy_status_description: - :type copy_status_description: str - :param server_encrypted: - :type server_encrypted: bool - :param incremental_copy: - :type incremental_copy: bool - :param destination_snapshot: - :type destination_snapshot: str - :param deleted_time: - :type deleted_time: ~datetime.datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param access_tier_inferred: - :type access_tier_inferred: bool - :param customer_provided_key_sha256: - :type customer_provided_key_sha256: str - :param encryption_scope: The name of the encryption scope under which the blob is encrypted. - :type encryption_scope: str - :param access_tier_change_time: - :type access_tier_change_time: ~datetime.datetime - :param tag_count: - :type tag_count: int - :param expires_on: - :type expires_on: ~datetime.datetime - :param is_sealed: - :type is_sealed: bool - :param last_accessed_on: - :type last_accessed_on: ~datetime.datetime - :param delete_time: - :type delete_time: ~datetime.datetime - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123'}, - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - 'content_length': {'key': 'Content-Length', 'type': 'long'}, - 'content_type': {'key': 'Content-Type', 'type': 'str'}, - 'content_encoding': {'key': 'Content-Encoding', 'type': 'str'}, - 'content_language': {'key': 'Content-Language', 'type': 'str'}, - 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray'}, - 'content_disposition': {'key': 'Content-Disposition', 'type': 'str'}, - 'cache_control': {'key': 'Cache-Control', 'type': 'str'}, - 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long'}, - 'copy_id': {'key': 'CopyId', 'type': 'str'}, - 'copy_source': {'key': 'CopySource', 'type': 'str'}, - 'copy_progress': {'key': 'CopyProgress', 'type': 'str'}, - 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123'}, - 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str'}, - 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool'}, - 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool'}, - 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str'}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, - 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool'}, - 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str'}, - 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str'}, - 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'}, - 'tag_count': {'key': 'TagCount', 'type': 'int'}, - 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123'}, - 'is_sealed': {'key': 'Sealed', 'type': 'bool'}, - 'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123'}, - 'delete_time': {'key': 'DeleteTime', 'type': 'rfc-1123'}, - } - _xml_map = { - 'name': 'Properties' - } - - def __init__( - self, - *, - last_modified: datetime.datetime, - etag: str, - creation_time: Optional[datetime.datetime] = None, - content_length: Optional[int] = None, - content_type: Optional[str] = None, - content_encoding: Optional[str] = None, - content_language: Optional[str] = None, - content_md5: Optional[bytearray] = None, - content_disposition: Optional[str] = None, - cache_control: Optional[str] = None, - blob_sequence_number: Optional[int] = None, - copy_id: Optional[str] = None, - copy_source: Optional[str] = None, - copy_progress: Optional[str] = None, - copy_completion_time: Optional[datetime.datetime] = None, - copy_status_description: Optional[str] = None, - server_encrypted: Optional[bool] = None, - incremental_copy: Optional[bool] = None, - destination_snapshot: Optional[str] = None, - deleted_time: Optional[datetime.datetime] = None, - remaining_retention_days: Optional[int] = None, - access_tier_inferred: Optional[bool] = None, - customer_provided_key_sha256: Optional[str] = None, - encryption_scope: Optional[str] = None, - access_tier_change_time: Optional[datetime.datetime] = None, - tag_count: Optional[int] = None, - expires_on: Optional[datetime.datetime] = None, - is_sealed: Optional[bool] = None, - last_accessed_on: Optional[datetime.datetime] = None, - delete_time: Optional[datetime.datetime] = None, - **kwargs - ): - super(BlobPropertiesInternal, self).__init__(**kwargs) - self.creation_time = creation_time - self.last_modified = last_modified - self.etag = etag - self.content_length = content_length - self.content_type = content_type - self.content_encoding = content_encoding - self.content_language = content_language - self.content_md5 = content_md5 - self.content_disposition = content_disposition - self.cache_control = cache_control - self.blob_sequence_number = blob_sequence_number - self.copy_id = copy_id - self.copy_source = copy_source - self.copy_progress = copy_progress - self.copy_completion_time = copy_completion_time - self.copy_status_description = copy_status_description - self.server_encrypted = server_encrypted - self.incremental_copy = incremental_copy - self.destination_snapshot = destination_snapshot - self.deleted_time = deleted_time - self.remaining_retention_days = remaining_retention_days - self.access_tier_inferred = access_tier_inferred - self.customer_provided_key_sha256 = customer_provided_key_sha256 - self.encryption_scope = encryption_scope - self.access_tier_change_time = access_tier_change_time - self.tag_count = tag_count - self.expires_on = expires_on - self.is_sealed = is_sealed - self.last_accessed_on = last_accessed_on - self.delete_time = delete_time - - -class FileSystem(msrest.serialization.Model): - """FileSystem. - - :param name: - :type name: str - :param last_modified: - :type last_modified: str - :param e_tag: - :type e_tag: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'last_modified': {'key': 'lastModified', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - } - - def __init__( - self, - *, - name: Optional[str] = None, - last_modified: Optional[str] = None, - e_tag: Optional[str] = None, - **kwargs - ): - super(FileSystem, self).__init__(**kwargs) - self.name = name - self.last_modified = last_modified - self.e_tag = e_tag - - -class FileSystemList(msrest.serialization.Model): - """FileSystemList. - - :param filesystems: - :type filesystems: list[~azure.storage.filedatalake.models.FileSystem] - """ - - _attribute_map = { - 'filesystems': {'key': 'filesystems', 'type': '[FileSystem]'}, - } - - def __init__( - self, - *, - filesystems: Optional[List["FileSystem"]] = None, - **kwargs - ): - super(FileSystemList, self).__init__(**kwargs) - self.filesystems = filesystems - - -class LeaseAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param lease_id: If specified, the operation only succeeds if the resource's lease is active - and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': 'leaseId', 'type': 'str'}, - } - - def __init__( - self, - *, - lease_id: Optional[str] = None, - **kwargs - ): - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = lease_id - - -class ListBlobsHierarchySegmentResponse(msrest.serialization.Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param container_name: Required. - :type container_name: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param delimiter: - :type delimiter: str - :param segment: Required. - :type segment: ~azure.storage.filedatalake.models.BlobHierarchyListSegment - :param next_marker: - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'delimiter': {'key': 'Delimiter', 'type': 'str'}, - 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment'}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - *, - service_endpoint: str, - container_name: str, - segment: "BlobHierarchyListSegment", - prefix: Optional[str] = None, - marker: Optional[str] = None, - max_results: Optional[int] = None, - delimiter: Optional[str] = None, - next_marker: Optional[str] = None, - **kwargs - ): - super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.container_name = container_name - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.delimiter = delimiter - self.segment = segment - self.next_marker = next_marker - - -class ModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param if_modified_since: Specify this header value to operate only on a blob if it has been - modified since the specified date/time. - :type if_modified_since: ~datetime.datetime - :param if_unmodified_since: Specify this header value to operate only on a blob if it has not - been modified since the specified date/time. - :type if_unmodified_since: ~datetime.datetime - :param if_match: Specify an ETag value to operate only on blobs with a matching value. - :type if_match: str - :param if_none_match: Specify an ETag value to operate only on blobs without a matching value. - :type if_none_match: str - """ - - _attribute_map = { - 'if_modified_since': {'key': 'ifModifiedSince', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': 'ifUnmodifiedSince', 'type': 'rfc-1123'}, - 'if_match': {'key': 'ifMatch', 'type': 'str'}, - 'if_none_match': {'key': 'ifNoneMatch', 'type': 'str'}, - } - - def __init__( - self, - *, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - **kwargs - ): - super(ModifiedAccessConditions, self).__init__(**kwargs) - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - self.if_match = if_match - self.if_none_match = if_none_match - - -class Path(msrest.serialization.Model): - """Path. - - :param name: - :type name: str - :param is_directory: - :type is_directory: bool - :param last_modified: - :type last_modified: str - :param e_tag: - :type e_tag: str - :param content_length: - :type content_length: long - :param owner: - :type owner: str - :param group: - :type group: str - :param permissions: - :type permissions: str - :param encryption_scope: The name of the encryption scope under which the blob is encrypted. - :type encryption_scope: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'is_directory': {'key': 'isDirectory', 'type': 'bool'}, - 'last_modified': {'key': 'lastModified', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - 'content_length': {'key': 'contentLength', 'type': 'long'}, - 'owner': {'key': 'owner', 'type': 'str'}, - 'group': {'key': 'group', 'type': 'str'}, - 'permissions': {'key': 'permissions', 'type': 'str'}, - 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str'}, - } - - def __init__( - self, - *, - name: Optional[str] = None, - is_directory: Optional[bool] = False, - last_modified: Optional[str] = None, - e_tag: Optional[str] = None, - content_length: Optional[int] = None, - owner: Optional[str] = None, - group: Optional[str] = None, - permissions: Optional[str] = None, - encryption_scope: Optional[str] = None, - **kwargs - ): - super(Path, self).__init__(**kwargs) - self.name = name - self.is_directory = is_directory - self.last_modified = last_modified - self.e_tag = e_tag - self.content_length = content_length - self.owner = owner - self.group = group - self.permissions = permissions - self.encryption_scope = encryption_scope - - -class PathHTTPHeaders(msrest.serialization.Model): - """Parameter group. - - :param cache_control: Optional. Sets the blob's cache control. If specified, this property is - stored with the blob and returned with a read request. - :type cache_control: str - :param content_encoding: Optional. Sets the blob's content encoding. If specified, this - property is stored with the blob and returned with a read request. - :type content_encoding: str - :param content_language: Optional. Set the blob's content language. If specified, this property - is stored with the blob and returned with a read request. - :type content_language: str - :param content_disposition: Optional. Sets the blob's Content-Disposition header. - :type content_disposition: str - :param content_type: Optional. Sets the blob's content type. If specified, this property is - stored with the blob and returned with a read request. - :type content_type: str - :param content_md5: Specify the transactional md5 for the body, to be validated by the service. - :type content_md5: bytearray - :param transactional_content_hash: Specify the transactional md5 for the body, to be validated - by the service. - :type transactional_content_hash: bytearray - """ - - _attribute_map = { - 'cache_control': {'key': 'cacheControl', 'type': 'str'}, - 'content_encoding': {'key': 'contentEncoding', 'type': 'str'}, - 'content_language': {'key': 'contentLanguage', 'type': 'str'}, - 'content_disposition': {'key': 'contentDisposition', 'type': 'str'}, - 'content_type': {'key': 'contentType', 'type': 'str'}, - 'content_md5': {'key': 'contentMD5', 'type': 'bytearray'}, - 'transactional_content_hash': {'key': 'transactionalContentHash', 'type': 'bytearray'}, - } - - def __init__( - self, - *, - cache_control: Optional[str] = None, - content_encoding: Optional[str] = None, - content_language: Optional[str] = None, - content_disposition: Optional[str] = None, - content_type: Optional[str] = None, - content_md5: Optional[bytearray] = None, - transactional_content_hash: Optional[bytearray] = None, - **kwargs - ): - super(PathHTTPHeaders, self).__init__(**kwargs) - self.cache_control = cache_control - self.content_encoding = content_encoding - self.content_language = content_language - self.content_disposition = content_disposition - self.content_type = content_type - self.content_md5 = content_md5 - self.transactional_content_hash = transactional_content_hash - - -class PathList(msrest.serialization.Model): - """PathList. - - :param paths: - :type paths: list[~azure.storage.filedatalake.models.Path] - """ - - _attribute_map = { - 'paths': {'key': 'paths', 'type': '[Path]'}, - } - - def __init__( - self, - *, - paths: Optional[List["Path"]] = None, - **kwargs - ): - super(PathList, self).__init__(**kwargs) - self.paths = paths - - -class SetAccessControlRecursiveResponse(msrest.serialization.Model): - """SetAccessControlRecursiveResponse. - - :param directories_successful: - :type directories_successful: int - :param files_successful: - :type files_successful: int - :param failure_count: - :type failure_count: int - :param failed_entries: - :type failed_entries: list[~azure.storage.filedatalake.models.AclFailedEntry] - """ - - _attribute_map = { - 'directories_successful': {'key': 'directoriesSuccessful', 'type': 'int'}, - 'files_successful': {'key': 'filesSuccessful', 'type': 'int'}, - 'failure_count': {'key': 'failureCount', 'type': 'int'}, - 'failed_entries': {'key': 'failedEntries', 'type': '[AclFailedEntry]'}, - } - - def __init__( - self, - *, - directories_successful: Optional[int] = None, - files_successful: Optional[int] = None, - failure_count: Optional[int] = None, - failed_entries: Optional[List["AclFailedEntry"]] = None, - **kwargs - ): - super(SetAccessControlRecursiveResponse, self).__init__(**kwargs) - self.directories_successful = directories_successful - self.files_successful = files_successful - self.failure_count = failure_count - self.failed_entries = failed_entries - - -class SourceModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param source_if_match: Specify an ETag value to operate only on blobs with a matching value. - :type source_if_match: str - :param source_if_none_match: Specify an ETag value to operate only on blobs without a matching - value. - :type source_if_none_match: str - :param source_if_modified_since: Specify this header value to operate only on a blob if it has - been modified since the specified date/time. - :type source_if_modified_since: ~datetime.datetime - :param source_if_unmodified_since: Specify this header value to operate only on a blob if it - has not been modified since the specified date/time. - :type source_if_unmodified_since: ~datetime.datetime - """ - - _attribute_map = { - 'source_if_match': {'key': 'sourceIfMatch', 'type': 'str'}, - 'source_if_none_match': {'key': 'sourceIfNoneMatch', 'type': 'str'}, - 'source_if_modified_since': {'key': 'sourceIfModifiedSince', 'type': 'rfc-1123'}, - 'source_if_unmodified_since': {'key': 'sourceIfUnmodifiedSince', 'type': 'rfc-1123'}, - } - - def __init__( - self, - *, - source_if_match: Optional[str] = None, - source_if_none_match: Optional[str] = None, - source_if_modified_since: Optional[datetime.datetime] = None, - source_if_unmodified_since: Optional[datetime.datetime] = None, - **kwargs - ): - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_match = source_if_match - self.source_if_none_match = source_if_none_match - self.source_if_modified_since = source_if_modified_since - self.source_if_unmodified_since = source_if_unmodified_since - - -class StorageError(msrest.serialization.Model): - """StorageError. - - :param error: The service error response object. - :type error: ~azure.storage.filedatalake.models.StorageErrorError - """ - - _attribute_map = { - 'error': {'key': 'error', 'type': 'StorageErrorError'}, - } - - def __init__( - self, - *, - error: Optional["StorageErrorError"] = None, - **kwargs - ): - super(StorageError, self).__init__(**kwargs) - self.error = error - - -class StorageErrorError(msrest.serialization.Model): - """The service error response object. - - :param code: The service error code. - :type code: str - :param message: The service error message. - :type message: str - """ - - _attribute_map = { - 'code': {'key': 'Code', 'type': 'str'}, - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__( - self, - *, - code: Optional[str] = None, - message: Optional[str] = None, - **kwargs - ): - super(StorageErrorError, self).__init__(**kwargs) - self.code = code - self.message = message diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/operations/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/operations/__init__.py deleted file mode 100644 index 0db71e0..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/operations/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._file_system_operations import FileSystemOperations -from ._path_operations import PathOperations - -__all__ = [ - 'ServiceOperations', - 'FileSystemOperations', - 'PathOperations', -] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/operations/_file_system_operations.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/operations/_file_system_operations.py deleted file mode 100644 index 8f1c7fb..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/operations/_file_system_operations.py +++ /dev/null @@ -1,619 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class FileSystemOperations(object): - """FileSystemOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.filedatalake.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - properties=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Create FileSystem. - - Create a FileSystem rooted at the specified location. If the FileSystem already exists, the - operation fails. This operation does not support conditional HTTP requests. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. - :type properties: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - accept = "application/json" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-namespace-enabled']=self._deserialize('str', response.headers.get('x-ms-namespace-enabled')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{filesystem}'} # type: ignore - - def set_properties( - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - properties=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Set FileSystem Properties. - - Set properties for the FileSystem. This operation supports conditional HTTP requests. For - more information, see `Specifying Conditional Headers for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. - :type properties: str - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/{filesystem}'} # type: ignore - - def get_properties( - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Get FileSystem Properties. - - All system and user-defined filesystem properties are specified in the response headers. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - accept = "application/json" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-namespace-enabled']=self._deserialize('str', response.headers.get('x-ms-namespace-enabled')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{filesystem}'} # type: ignore - - def delete( - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Delete FileSystem. - - Marks the FileSystem for deletion. When a FileSystem is deleted, a FileSystem with the same - identifier cannot be created for at least 30 seconds. While the filesystem is being deleted, - attempts to create a filesystem with the same identifier will fail with status code 409 - (Conflict), with the service returning additional error information indicating that the - filesystem is being deleted. All other operations, including operations on any files or - directories within the filesystem, will fail with status code 404 (Not Found) while the - filesystem is being deleted. This operation supports conditional HTTP requests. For more - information, see `Specifying Conditional Headers for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{filesystem}'} # type: ignore - - def list_paths( - self, - recursive, # type: bool - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - continuation=None, # type: Optional[str] - path=None, # type: Optional[str] - max_results=None, # type: Optional[int] - upn=None, # type: Optional[bool] - **kwargs # type: Any - ): - # type: (...) -> "_models.PathList" - """List Paths. - - List FileSystem paths and their properties. - - :param recursive: Required. - :type recursive: bool - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param path: Optional. Filters results to paths within the specified directory. An error - occurs if the directory does not exist. - :type path: str - :param max_results: An optional value that specifies the maximum number of items to return. If - omitted or greater than 5,000, the response will include up to 5,000 items. - :type max_results: int - :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If - "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response - headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If - "false", the values will be returned as Azure Active Directory Object IDs. The default value is - false. Note that group and application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PathList, or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.PathList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PathList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - accept = "application/json" - - # Construct URL - url = self.list_paths.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - if path is not None: - query_parameters['directory'] = self._serialize.query("path", path, 'str') - query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') - if max_results is not None: - query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - deserialized = self._deserialize('PathList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_paths.metadata = {'url': '/{filesystem}'} # type: ignore - - def list_blob_hierarchy_segment( - self, - prefix=None, # type: Optional[str] - delimiter=None, # type: Optional[str] - marker=None, # type: Optional[str] - max_results=None, # type: Optional[int] - include=None, # type: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] - showonly="deleted", # type: Optional[str] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.ListBlobsHierarchySegmentResponse" - """The List Blobs operation returns a list of the blobs under the specified container. - - :param prefix: Filters results to filesystems within the specified prefix. - :type prefix: str - :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix - element in the response body that acts as a placeholder for all blobs whose names begin with - the same substring up to the appearance of the delimiter character. The delimiter may be a - single character or a string. - :type delimiter: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. - :type marker: str - :param max_results: An optional value that specifies the maximum number of items to return. If - omitted or greater than 5,000, the response will include up to 5,000 items. - :type max_results: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.filedatalake.models.ListBlobsIncludeItem] - :param showonly: Include this parameter to specify one or more datasets to include in the - response. - :type showonly: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListBlobsHierarchySegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.ListBlobsHierarchySegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsHierarchySegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "container" - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_blob_hierarchy_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if delimiter is not None: - query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if max_results is not None: - query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if showonly is not None: - query_parameters['showonly'] = self._serialize.query("showonly", showonly, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_blob_hierarchy_segment.metadata = {'url': '/{filesystem}'} # type: ignore diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/operations/_path_operations.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/operations/_path_operations.py deleted file mode 100644 index c143409..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/operations/_path_operations.py +++ /dev/null @@ -1,1800 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class PathOperations(object): - """PathOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.filedatalake.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - resource=None, # type: Optional[Union[str, "_models.PathResourceType"]] - continuation=None, # type: Optional[str] - mode=None, # type: Optional[Union[str, "_models.PathRenameMode"]] - rename_source=None, # type: Optional[str] - source_lease_id=None, # type: Optional[str] - properties=None, # type: Optional[str] - permissions=None, # type: Optional[str] - umask=None, # type: Optional[str] - path_http_headers=None, # type: Optional["_models.PathHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Create File | Create Directory | Rename File | Rename Directory. - - Create or rename a file or directory. By default, the destination is overwritten and if the - destination already exists and has a lease the lease is broken. This operation supports - conditional HTTP requests. For more information, see `Specifying Conditional Headers for Blob - Service Operations - `_. - To fail if the destination already exists, use a conditional request with If-None-Match: "*". - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param resource: Required only for Create File and Create Directory. The value must be "file" - or "directory". - :type resource: str or ~azure.storage.filedatalake.models.PathResourceType - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param mode: Optional. Valid only when namespace is enabled. This parameter determines the - behavior of the rename operation. The value must be "legacy" or "posix", and the default value - will be "posix". - :type mode: str or ~azure.storage.filedatalake.models.PathRenameMode - :param rename_source: An optional file or directory to be renamed. The value must have the - following format: "/{filesystem}/{path}". If "x-ms-properties" is specified, the properties - will overwrite the existing properties; otherwise, the existing properties will be preserved. - This value must be a URL percent-encoded string. Note that the string may only contain ASCII - characters in the ISO-8859-1 character set. - :type rename_source: str - :param source_lease_id: A lease ID for the source path. If specified, the source path must have - an active lease and the lease ID must match. - :type source_lease_id: str - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. - :type properties: str - :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type permissions: str - :param umask: Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, the umask - restricts the permissions of the file or directory to be created. The resulting permission is - given by p bitwise and not u, where p is the permission and u is the umask. For example, if p - is 0777 and u is 0057, then the resulting permission is 0720. The default permission is 0777 - for a directory and 0666 for a file. The default umask is 0027. The umask must be specified - in 4-digit octal notation (e.g. 0766). - :type umask: str - :param path_http_headers: Parameter group. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.filedatalake.models.SourceModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _cache_control = None - _content_encoding = None - _content_language = None - _content_disposition = None - _content_type = None - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _source_if_modified_since = None - _source_if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - if path_http_headers is not None: - _cache_control = path_http_headers.cache_control - _content_encoding = path_http_headers.content_encoding - _content_language = path_http_headers.content_language - _content_disposition = path_http_headers.content_disposition - _content_type = path_http_headers.content_type - if source_modified_access_conditions is not None: - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if resource is not None: - query_parameters['resource'] = self._serialize.query("resource", resource, 'str') - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - if mode is not None: - query_parameters['mode'] = self._serialize.query("mode", mode, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if rename_source is not None: - header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') - if umask is not None: - header_parameters['x-ms-umask'] = self._serialize.header("umask", umask, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if _source_if_match is not None: - header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') - if _source_if_none_match is not None: - header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') - if _source_if_modified_since is not None: - header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') - if _source_if_unmodified_since is not None: - header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def update( - self, - action, # type: Union[str, "_models.PathUpdateAction"] - mode, # type: Union[str, "_models.PathSetAccessControlRecursiveMode"] - body, # type: IO - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - max_records=None, # type: Optional[int] - continuation=None, # type: Optional[str] - force_flag=None, # type: Optional[bool] - position=None, # type: Optional[int] - retain_uncommitted_data=None, # type: Optional[bool] - close=None, # type: Optional[bool] - content_length=None, # type: Optional[int] - properties=None, # type: Optional[str] - owner=None, # type: Optional[str] - group=None, # type: Optional[str] - permissions=None, # type: Optional[str] - acl=None, # type: Optional[str] - path_http_headers=None, # type: Optional["_models.PathHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> Optional["_models.SetAccessControlRecursiveResponse"] - """Append Data | Flush Data | Set Properties | Set Access Control. - - Uploads data to be appended to a file, flushes (writes) previously uploaded data to a file, - sets properties for a file or directory, or sets access control for a file or directory. Data - can only be appended to a file. Concurrent writes to the same file using multiple clients are - not supported. This operation supports conditional HTTP requests. For more information, see - `Specifying Conditional Headers for Blob Service Operations - `_. - - :param action: The action must be "append" to upload data to be appended to a file, "flush" to - flush previously uploaded data to a file, "setProperties" to set the properties of a file or - directory, "setAccessControl" to set the owner, group, permissions, or access control list for - a file or directory, or "setAccessControlRecursive" to set the access control list for a - directory recursively. Note that Hierarchical Namespace must be enabled for the account in - order to use access control. Also note that the Access Control List (ACL) includes permissions - for the owner, owning group, and others, so the x-ms-permissions and x-ms-acl request headers - are mutually exclusive. - :type action: str or ~azure.storage.filedatalake.models.PathUpdateAction - :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify" - modifies one or more POSIX access control rights that pre-exist on files and directories, - "remove" removes one or more POSIX access control rights that were present earlier on files - and directories. - :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode - :param body: Initial data. - :type body: IO - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param max_records: Optional. Valid for "SetAccessControlRecursive" operation. It specifies the - maximum number of files or directories on which the acl change will be applied. If omitted or - greater than 2,000, the request will process up to 2,000 items. - :type max_records: int - :param continuation: Optional. The number of paths processed with each invocation is limited. - If the number of paths to be processed exceeds this limit, a continuation token is returned in - the response header x-ms-continuation. When a continuation token is returned in the response, - it must be percent-encoded and specified in a subsequent invocation of - setAccessControlRecursive operation. - :type continuation: str - :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false, - the operation will terminate quickly on encountering user errors (4XX). If true, the operation - will ignore user errors and proceed with the operation on other sub-entities of the directory. - Continuation token will only be returned when forceFlag is true in case of user errors. If not - set the default value is false for this. - :type force_flag: bool - :param position: This parameter allows the caller to upload data in parallel and control the - order in which it is appended to the file. It is required when uploading data to be appended - to the file and when flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not immediately flushed, or - written, to the file. To flush, the previously uploaded data must be contiguous, the position - parameter must be specified and equal to the length of the file after all data has been - written, and there must not be a request entity body included with the request. - :type position: long - :param retain_uncommitted_data: Valid only for flush operations. If "true", uncommitted data - is retained after the flush operation completes; otherwise, the uncommitted data is deleted - after the flush operation. The default is false. Data at offsets less than the specified - position are written to the file when flush succeeds, but this optional parameter allows data - after the flush position to be retained for a future flush operation. - :type retain_uncommitted_data: bool - :param close: Azure Storage Events allow applications to receive notifications when files - change. When Azure Storage Events are enabled, a file changed event is raised. This event has a - property indicating whether this is the final change to distinguish the difference between an - intermediate flush to a file stream and the final close of a file stream. The close query - parameter is valid only when the action is "flush" and change notifications are enabled. If the - value of close is "true" and the flush operation completes successfully, the service raises a - file change notification with a property indicating that this is the final update (the file - stream has been closed). If "false" a change notification is raised indicating the file has - changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to - indicate that the file stream has been closed.". - :type close: bool - :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush - Data". Must be the length of the request content in bytes for "Append Data". - :type content_length: long - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. - :type properties: str - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type permissions: str - :param acl: Sets POSIX access control rights on files and directories. The value is a - comma-separated list of access control entries. Each access control entry (ACE) consists of a - scope, a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :param path_http_headers: Parameter group. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SetAccessControlRecursiveResponse, or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SetAccessControlRecursiveResponse"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _content_md5 = None - _lease_id = None - _cache_control = None - _content_type = None - _content_disposition = None - _content_encoding = None - _content_language = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - if path_http_headers is not None: - _content_md5 = path_http_headers.content_md5 - _cache_control = path_http_headers.cache_control - _content_type = path_http_headers.content_type - _content_disposition = path_http_headers.content_disposition - _content_encoding = path_http_headers.content_encoding - _content_language = path_http_headers.content_language - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/json" - - # Construct URL - url = self.update.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['action'] = self._serialize.query("action", action, 'str') - if max_records is not None: - query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - query_parameters['mode'] = self._serialize.query("mode", mode, 'str') - if force_flag is not None: - query_parameters['forceFlag'] = self._serialize.query("force_flag", force_flag, 'bool') - if position is not None: - query_parameters['position'] = self._serialize.query("position", position, 'long') - if retain_uncommitted_data is not None: - query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') - if close is not None: - query_parameters['close'] = self._serialize.query("close", close, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if content_length is not None: - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) - if _content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", _content_md5, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if properties is not None: - header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') - if acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - deserialized = None - if response.status_code == 200: - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('SetAccessControlRecursiveResponse', pipeline_response) - - if response.status_code == 202: - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - update.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def lease( - self, - x_ms_lease_action, # type: Union[str, "_models.PathLeaseAction"] - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - x_ms_lease_duration=None, # type: Optional[int] - x_ms_lease_break_period=None, # type: Optional[int] - proposed_lease_id=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Lease Path. - - Create and manage a lease to restrict write and delete access to the path. This operation - supports conditional HTTP requests. For more information, see `Specifying Conditional Headers - for Blob Service Operations - `_. - - :param x_ms_lease_action: There are five lease actions: "acquire", "break", "change", "renew", - and "release". Use "acquire" and specify the "x-ms-proposed-lease-id" and "x-ms-lease-duration" - to acquire a new lease. Use "break" to break an existing lease. When a lease is broken, the - lease break period is allowed to elapse, during which time no lease operation except break and - release can be performed on the file. When a lease is successfully broken, the response - indicates the interval in seconds until a new lease can be acquired. Use "change" and specify - the current lease ID in "x-ms-lease-id" and the new lease ID in "x-ms-proposed-lease-id" to - change the lease ID of an active lease. Use "renew" and specify the "x-ms-lease-id" to renew an - existing lease. Use "release" and specify the "x-ms-lease-id" to release a lease. - :type x_ms_lease_action: str or ~azure.storage.filedatalake.models.PathLeaseAction - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param x_ms_lease_duration: The lease duration is required to acquire a lease, and specifies - the duration of the lease in seconds. The lease duration must be between 15 and 60 seconds or - -1 for infinite lease. - :type x_ms_lease_duration: int - :param x_ms_lease_break_period: The lease break period duration is optional to break a lease, - and specifies the break period of the lease in seconds. The lease break duration must be - between 0 and 60 seconds. - :type x_ms_lease_break_period: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("x_ms_lease_action", x_ms_lease_action, 'str') - if x_ms_lease_duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("x_ms_lease_duration", x_ms_lease_duration, 'int') - if x_ms_lease_break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("x_ms_lease_break_period", x_ms_lease_break_period, 'int') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.post(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 201, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - - if response.status_code == 201: - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - - if response.status_code == 202: - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-lease-time']=self._deserialize('str', response.headers.get('x-ms-lease-time')) - - if cls: - return cls(pipeline_response, None, response_headers) - - lease.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def read( - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - range=None, # type: Optional[str] - x_ms_range_get_content_md5=None, # type: Optional[bool] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> IO - """Read File. - - Read the contents of a file. For read operations, range requests are supported. This operation - supports conditional HTTP requests. For more information, see `Specifying Conditional Headers - for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param range: The HTTP Range request header specifies one or more byte ranges of the resource - to be retrieved. - :type range: str - :param x_ms_range_get_content_md5: Optional. When this header is set to "true" and specified - together with the Range header, the service returns the MD5 hash for the range, as long as the - range is less than or equal to 4MB in size. If this header is specified without the Range - header, the service returns status code 400 (Bad Request). If this header is set to true when - the range exceeds 4 MB in size, the service returns status code 400 (Bad Request). - :type x_ms_range_get_content_md5: bool - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.read.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['Range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if x_ms_range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("x_ms_range_get_content_md5", x_ms_range_get_content_md5, 'bool') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['x-ms-content-md5']=self._deserialize('str', response.headers.get('x-ms-content-md5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - read.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def get_properties( - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - action=None, # type: Optional[Union[str, "_models.PathGetPropertiesAction"]] - upn=None, # type: Optional[bool] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Get Properties | Get Status | Get Access Control List. - - Get Properties returns all system and user defined properties for a path. Get Status returns - all system defined properties for a path. Get Access Control List returns the access control - list for a path. This operation supports conditional HTTP requests. For more information, see - `Specifying Conditional Headers for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param action: Optional. If the value is "getStatus" only the system defined properties for the - path are returned. If the value is "getAccessControl" the access control list is returned in - the response headers (Hierarchical Namespace must be enabled for the account), otherwise the - properties are returned. - :type action: str or ~azure.storage.filedatalake.models.PathGetPropertiesAction - :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If - "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response - headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If - "false", the values will be returned as Azure Active Directory Object IDs. The default value is - false. Note that group and application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if action is not None: - query_parameters['action'] = self._serialize.query("action", action, 'str') - if upn is not None: - query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) - response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) - response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) - response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def delete( - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - recursive=None, # type: Optional[bool] - continuation=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Delete File | Delete Directory. - - Delete the file or directory. This operation supports conditional HTTP requests. For more - information, see `Specifying Conditional Headers for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param recursive: Required. - :type recursive: bool - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - accept = "application/json" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if recursive is not None: - query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['x-ms-deletion-id']=self._deserialize('str', response.headers.get('x-ms-deletion-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def set_access_control( - self, - timeout=None, # type: Optional[int] - owner=None, # type: Optional[str] - group=None, # type: Optional[str] - permissions=None, # type: Optional[str] - acl=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Set the owner, group, permissions, or access control list for a path. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. - :type owner: str - :param group: Optional. The owning group of the blob or directory. - :type group: str - :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - :type permissions: str - :param acl: Sets POSIX access control rights on files and directories. The value is a - comma-separated list of access control entries. Each access control entry (ACE) consists of a - scope, a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - action = "setAccessControl" - accept = "application/json" - - # Construct URL - url = self.set_access_control.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if owner is not None: - header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') - if group is not None: - header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') - if permissions is not None: - header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') - if acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def set_access_control_recursive( - self, - mode, # type: Union[str, "_models.PathSetAccessControlRecursiveMode"] - timeout=None, # type: Optional[int] - continuation=None, # type: Optional[str] - force_flag=None, # type: Optional[bool] - max_records=None, # type: Optional[int] - acl=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.SetAccessControlRecursiveResponse" - """Set the access control list for a path and sub-paths. - - :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify" - modifies one or more POSIX access control rights that pre-exist on files and directories, - "remove" removes one or more POSIX access control rights that were present earlier on files - and directories. - :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false, - the operation will terminate quickly on encountering user errors (4XX). If true, the operation - will ignore user errors and proceed with the operation on other sub-entities of the directory. - Continuation token will only be returned when forceFlag is true in case of user errors. If not - set the default value is false for this. - :type force_flag: bool - :param max_records: Optional. It specifies the maximum number of files or directories on which - the acl change will be applied. If omitted or greater than 2,000, the request will process up - to 2,000 items. - :type max_records: int - :param acl: Sets POSIX access control rights on files and directories. The value is a - comma-separated list of access control entries. Each access control entry (ACE) consists of a - scope, a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SetAccessControlRecursiveResponse, or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.SetAccessControlRecursiveResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - action = "setAccessControlRecursive" - accept = "application/json" - - # Construct URL - url = self.set_access_control_recursive.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - query_parameters['mode'] = self._serialize.query("mode", mode, 'str') - if force_flag is not None: - query_parameters['forceFlag'] = self._serialize.query("force_flag", force_flag, 'bool') - if max_records is not None: - query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if acl is not None: - header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('SetAccessControlRecursiveResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - set_access_control_recursive.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def flush_data( - self, - timeout=None, # type: Optional[int] - position=None, # type: Optional[int] - retain_uncommitted_data=None, # type: Optional[bool] - close=None, # type: Optional[bool] - content_length=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - path_http_headers=None, # type: Optional["_models.PathHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Set the owner, group, permissions, or access control list for a path. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param position: This parameter allows the caller to upload data in parallel and control the - order in which it is appended to the file. It is required when uploading data to be appended - to the file and when flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not immediately flushed, or - written, to the file. To flush, the previously uploaded data must be contiguous, the position - parameter must be specified and equal to the length of the file after all data has been - written, and there must not be a request entity body included with the request. - :type position: long - :param retain_uncommitted_data: Valid only for flush operations. If "true", uncommitted data - is retained after the flush operation completes; otherwise, the uncommitted data is deleted - after the flush operation. The default is false. Data at offsets less than the specified - position are written to the file when flush succeeds, but this optional parameter allows data - after the flush position to be retained for a future flush operation. - :type retain_uncommitted_data: bool - :param close: Azure Storage Events allow applications to receive notifications when files - change. When Azure Storage Events are enabled, a file changed event is raised. This event has a - property indicating whether this is the final change to distinguish the difference between an - intermediate flush to a file stream and the final close of a file stream. The close query - parameter is valid only when the action is "flush" and change notifications are enabled. If the - value of close is "true" and the flush operation completes successfully, the service raises a - file change notification with a property indicating that this is the final update (the file - stream has been closed). If "false" a change notification is raised indicating the file has - changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to - indicate that the file stream has been closed.". - :type close: bool - :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush - Data". Must be the length of the request content in bytes for "Append Data". - :type content_length: long - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param path_http_headers: Parameter group. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _content_md5 = None - _lease_id = None - _cache_control = None - _content_type = None - _content_disposition = None - _content_encoding = None - _content_language = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - if path_http_headers is not None: - _content_md5 = path_http_headers.content_md5 - _cache_control = path_http_headers.cache_control - _content_type = path_http_headers.content_type - _content_disposition = path_http_headers.content_disposition - _content_encoding = path_http_headers.content_encoding - _content_language = path_http_headers.content_language - action = "flush" - accept = "application/json" - - # Construct URL - url = self.flush_data.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if position is not None: - query_parameters['position'] = self._serialize.query("position", position, 'long') - if retain_uncommitted_data is not None: - query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') - if close is not None: - query_parameters['close'] = self._serialize.query("close", close, 'bool') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if content_length is not None: - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) - if _content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", _content_md5, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if _cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') - if _content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') - if _content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') - if _content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') - if _content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') - if _if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') - if _if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.patch(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - flush_data.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def append_data( - self, - body, # type: IO - position=None, # type: Optional[int] - timeout=None, # type: Optional[int] - content_length=None, # type: Optional[int] - transactional_content_crc64=None, # type: Optional[bytearray] - request_id_parameter=None, # type: Optional[str] - path_http_headers=None, # type: Optional["_models.PathHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Append data to the file. - - :param body: Initial data. - :type body: IO - :param position: This parameter allows the caller to upload data in parallel and control the - order in which it is appended to the file. It is required when uploading data to be appended - to the file and when flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not immediately flushed, or - written, to the file. To flush, the previously uploaded data must be contiguous, the position - parameter must be specified and equal to the length of the file after all data has been - written, and there must not be a request entity body included with the request. - :type position: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush - Data". Must be the length of the request content in bytes for "Append Data". - :type content_length: long - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. - :type transactional_content_crc64: bytearray - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param path_http_headers: Parameter group. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _transactional_content_hash = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if path_http_headers is not None: - _transactional_content_hash = path_http_headers.transactional_content_hash - action = "append" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/json" - - # Construct URL - url = self.append_data.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['action'] = self._serialize.query("action", action, 'str') - if position is not None: - query_parameters['position'] = self._serialize.query("position", position, 'long') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if content_length is not None: - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) - if _transactional_content_hash is not None: - header_parameters['Content-MD5'] = self._serialize.header("transactional_content_hash", _transactional_content_hash, 'bytearray') - if transactional_content_crc64 is not None: - header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = body - request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - append_data.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def set_expiry( - self, - expiry_options, # type: Union[str, "_models.PathExpiryOptions"] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - expires_on=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Sets the time a blob will expire and be deleted. - - :param expiry_options: Required. Indicates mode of the expiry time. - :type expiry_options: str or ~azure.storage.filedatalake.models.PathExpiryOptions - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param expires_on: The time to set the blob to expiry. - :type expires_on: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "expiry" - accept = "application/json" - - # Construct URL - url = self.set_expiry.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') - if expires_on is not None: - header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_expiry.metadata = {'url': '/{filesystem}/{path}'} # type: ignore - - def undelete( - self, - timeout=None, # type: Optional[int] - undelete_source=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Undelete a path that was previously soft deleted. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :param undelete_source: Only for hierarchical namespace enabled accounts. Optional. The path of - the soft deleted blob to undelete. - :type undelete_source: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "undelete" - accept = "application/json" - - # Construct URL - url = self.undelete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if undelete_source is not None: - header_parameters['x-ms-undelete-source'] = self._serialize.header("undelete_source", undelete_source, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - undelete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/operations/_service_operations.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/operations/_service_operations.py deleted file mode 100644 index fd5af5c..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_generated/operations/_service_operations.py +++ /dev/null @@ -1,154 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.paging import ItemPaged -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class ServiceOperations(object): - """ServiceOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.filedatalake.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def list_file_systems( - self, - prefix=None, # type: Optional[str] - continuation=None, # type: Optional[str] - max_results=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> Iterable["_models.FileSystemList"] - """List FileSystems. - - List filesystems and their properties in given account. - - :param prefix: Filters results to filesystems within the specified prefix. - :type prefix: str - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. - :type continuation: str - :param max_results: An optional value that specifies the maximum number of items to return. If - omitted or greater than 5,000, the response will include up to 5,000 items. - :type max_results: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: An iterator like instance of either FileSystemList or the result of cls(response) - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.models.FileSystemList] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.FileSystemList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - resource = "account" - accept = "application/json" - - def prepare_request(next_link=None): - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - if not next_link: - # Construct URL - url = self.list_file_systems.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['resource'] = self._serialize.query("resource", resource, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if continuation is not None: - query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') - if max_results is not None: - query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - request = self._client.get(url, query_parameters, header_parameters) - else: - url = next_link - query_parameters = {} # type: Dict[str, Any] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - request = self._client.get(url, query_parameters, header_parameters) - return request - - def extract_data(pipeline_response): - deserialized = self._deserialize('FileSystemList', pipeline_response) - list_of_elem = deserialized.filesystems - if cls: - list_of_elem = cls(list_of_elem) - return None, iter(list_of_elem) - - def get_next(next_link=None): - request = prepare_request(next_link) - - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise HttpResponseError(response=response, model=error) - - return pipeline_response - - return ItemPaged( - get_next, extract_data - ) - list_file_systems.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_list_paths_helper.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_list_paths_helper.py deleted file mode 100644 index e5cb8f6..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_list_paths_helper.py +++ /dev/null @@ -1,173 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from azure.core.paging import PageIterator -from azure.core.exceptions import HttpResponseError -from ._deserialize import process_storage_error, get_deleted_path_properties_from_generated_code, \ - return_headers_and_deserialized_path_list -from ._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix -from ._shared.models import DictMixin -from ._shared.response_handlers import return_context_and_deserialized -from ._models import PathProperties -from ._generated.models import Path - - -class DeletedPathPropertiesPaged(PageIterator): - """An Iterable of deleted path properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A path name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.filedatalake.DeletedPathProperties) - :ivar str container: The container that the paths are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - """ - def __init__( - self, command, - container=None, - prefix=None, - results_per_page=None, - continuation_token=None, - delimiter=None, - location_mode=None): - super(DeletedPathPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.container = container - self.delimiter = delimiter - self.current_page = None - self.location_mode = location_mode - - def _get_next_cb(self, continuation_token): - try: - return self._command( - prefix=self.prefix, - marker=continuation_token or None, - max_results=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.container = self._response.container_name - self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items - self.current_page = [self._build_item(item) for item in self.current_page] - self.delimiter = self._response.delimiter - - return self._response.next_marker or None, self.current_page - - def _build_item(self, item): - if isinstance(item, BlobItemInternal): - file_props = get_deleted_path_properties_from_generated_code(item) - file_props.file_system = self.container - return file_props - if isinstance(item, GenBlobPrefix): - return DirectoryPrefix( - container=self.container, - prefix=item.name, - results_per_page=self.results_per_page, - location_mode=self.location_mode) - return item - - -class DirectoryPrefix(DictMixin): - """Directory prefix. - - :ivar str name: Name of the deleted directory. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar str file_system: The file system that the deleted paths are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - """ - def __init__(self, **kwargs): - self.name = kwargs.get('prefix') - self.results_per_page = kwargs.get('results_per_page') - self.file_system = kwargs.get('container') - self.delimiter = kwargs.get('delimiter') - self.location_mode = kwargs.get('location_mode') - - -class PathPropertiesPaged(PageIterator): - """An Iterable of Path properties. - - :ivar str path: Filters the results to return only paths under the specified path. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar list(~azure.storage.filedatalake.PathProperties) current_page: The current page of listed results. - - :param callable command: Function to retrieve the next page of items. - :param str path: Filters the results to return only paths under the specified path. - :param int max_results: The maximum number of psths to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__( - self, command, - recursive, - path=None, - max_results=None, - continuation_token=None, - upn=None): - super(PathPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.recursive = recursive - self.results_per_page = max_results - self.path = path - self.upn = upn - self.current_page = None - self.path_list = None - - def _get_next_cb(self, continuation_token): - try: - return self._command( - self.recursive, - continuation=continuation_token or None, - path=self.path, - max_results=self.results_per_page, - upn=self.upn, - cls=return_headers_and_deserialized_path_list) - except HttpResponseError as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.path_list, self._response = get_next_return - self.current_page = [self._build_item(item) for item in self.path_list] - - return self._response['continuation'] or None, self.current_page - - @staticmethod - def _build_item(item): - if isinstance(item, PathProperties): - return item - if isinstance(item, Path): - path = PathProperties._from_generated(item) # pylint: disable=protected-access - return path - return item diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_models.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_models.py deleted file mode 100644 index 879da8b..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_models.py +++ /dev/null @@ -1,1058 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines -from datetime import datetime -from enum import Enum - -from azure.multiapi.storagev2.blob.v2020_10_02 import LeaseProperties as BlobLeaseProperties -from azure.multiapi.storagev2.blob.v2020_10_02 import AccountSasPermissions as BlobAccountSasPermissions -from azure.multiapi.storagev2.blob.v2020_10_02 import ResourceTypes as BlobResourceTypes -from azure.multiapi.storagev2.blob.v2020_10_02 import UserDelegationKey as BlobUserDelegationKey -from azure.multiapi.storagev2.blob.v2020_10_02 import ContentSettings as BlobContentSettings -from azure.multiapi.storagev2.blob.v2020_10_02 import AccessPolicy as BlobAccessPolicy -from azure.multiapi.storagev2.blob.v2020_10_02 import DelimitedTextDialect as BlobDelimitedTextDialect -from azure.multiapi.storagev2.blob.v2020_10_02 import DelimitedJsonDialect as BlobDelimitedJSON -from azure.multiapi.storagev2.blob.v2020_10_02 import ArrowDialect as BlobArrowDialect -from azure.multiapi.storagev2.blob.v2020_10_02._models import ContainerPropertiesPaged -from azure.multiapi.storagev2.blob.v2020_10_02._generated.models import Logging as GenLogging, Metrics as GenMetrics, \ - RetentionPolicy as GenRetentionPolicy, StaticWebsite as GenStaticWebsite, CorsRule as GenCorsRule -from ._shared.models import DictMixin - - -class FileSystemProperties(DictMixin): - """File System properties class. - - :ivar str name: - Name of the filesystem. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the file system was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar ~azure.storage.filedatalake.LeaseProperties lease: - Stores all the lease information for the file system. - :ivar str public_access: Specifies whether data in the file system may be accessed - publicly and the level of access. - :ivar bool has_immutability_policy: - Represents whether the file system has an immutability policy. - :ivar bool has_legal_hold: - Represents whether the file system has a legal hold. - :ivar dict metadata: A dict with name-value pairs to associate with the - file system as metadata. - :ivar bool deleted: - Whether this file system was deleted. - :ivar str deleted_version: - The version of a deleted file system. - - Returned ``FileSystemProperties`` instances expose these values through a - dictionary interface, for example: ``file_system_props["last_modified"]``. - Additionally, the file system name is available as ``file_system_props["name"]``. - """ - - def __init__(self): - self.name = None - self.last_modified = None - self.etag = None - self.lease = None - self.public_access = None - self.has_immutability_policy = None - self.has_legal_hold = None - self.metadata = None - self.deleted = None - self.deleted_version = None - - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.last_modified = generated.properties.last_modified - props.deleted = generated.deleted - props.deleted_version = generated.version - props.etag = generated.properties.etag - props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access - props.public_access = PublicAccess._from_generated( # pylint: disable=protected-access - generated.properties.public_access) - props.has_immutability_policy = generated.properties.has_immutability_policy - props.has_legal_hold = generated.properties.has_legal_hold - props.metadata = generated.metadata - return props - - @classmethod - def _convert_from_container_props(cls, container_properties): - container_properties.__class__ = cls - container_properties.public_access = PublicAccess._from_generated( # pylint: disable=protected-access - container_properties.public_access) - container_properties.lease.__class__ = LeaseProperties - return container_properties - - -class FileSystemPropertiesPaged(ContainerPropertiesPaged): - """An Iterable of File System properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file system name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.filedatalake.FileSystemProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only file systems whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of file system names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - - def __init__(self, *args, **kwargs): - super(FileSystemPropertiesPaged, self).__init__( - *args, - **kwargs - ) - - @staticmethod - def _build_item(item): - return FileSystemProperties._from_generated(item) # pylint: disable=protected-access - - -class DirectoryProperties(DictMixin): - """ - :ivar str name: name of the directory - :ivar str etag: The ETag contains a value that you can use to perform operations - conditionally. - :ivar bool deleted: if the current directory marked as deleted - :ivar dict metadata: Name-value pairs associated with the directory as metadata. - :ivar ~azure.storage.filedatalake.LeaseProperties lease: - Stores all the lease information for the directory. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the directory was modified. - :ivar ~datetime.datetime creation_time: - Indicates when the directory was created, in UTC. - :ivar int remaining_retention_days: The number of days that the directory will be retained - before being permanently deleted by the service. - :var ~azure.storage.filedatalake.ContentSettings content_settings: - """ - - def __init__(self, **kwargs): - self.name = kwargs.get('name') - self.etag = kwargs.get('ETag') - self.deleted = False - self.metadata = kwargs.get('metadata') - self.lease = LeaseProperties(**kwargs) - self.last_modified = kwargs.get('Last-Modified') - self.creation_time = kwargs.get('x-ms-creation-time') - self.deleted_time = None - self.remaining_retention_days = None - - -class FileProperties(DictMixin): - """ - :ivar str name: name of the file - :ivar str etag: The ETag contains a value that you can use to perform operations - conditionally. - :ivar bool deleted: if the current file marked as deleted - :ivar dict metadata: Name-value pairs associated with the file as metadata. - :ivar ~azure.storage.filedatalake.LeaseProperties lease: - Stores all the lease information for the file. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the file was modified. - :ivar ~datetime.datetime creation_time: - Indicates when the file was created, in UTC. - :ivar int size: size of the file - :ivar int remaining_retention_days: The number of days that the file will be retained - before being permanently deleted by the service. - :var ~azure.storage.filedatalake.ContentSettings content_settings: - """ - - def __init__(self, **kwargs): - self.name = kwargs.get('name') - self.etag = kwargs.get('ETag') - self.deleted = False - self.metadata = kwargs.get('metadata') - self.lease = LeaseProperties(**kwargs) - self.last_modified = kwargs.get('Last-Modified') - self.creation_time = kwargs.get('x-ms-creation-time') - self.size = kwargs.get('Content-Length') - self.deleted_time = None - self.expiry_time = kwargs.get("x-ms-expiry-time") - self.remaining_retention_days = None - self.content_settings = ContentSettings(**kwargs) - - -class PathProperties(DictMixin): - """Path properties listed by get_paths api. - - :ivar str name: the full path for a file or directory. - :ivar str owner: The owner of the file or directory. - :ivar str group: he owning group of the file or directory. - :ivar str permissions: Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :ivar datetime last_modified: A datetime object representing the last time the directory/file was modified. - :ivar bool is_directory: is the path a directory or not. - :ivar str etag: The ETag contains a value that you can use to perform operations - conditionally. - :ivar content_length: the size of file if the path is a file. - """ - - def __init__(self, **kwargs): - self.name = kwargs.pop('name', None) - self.owner = kwargs.get('owner', None) - self.group = kwargs.get('group', None) - self.permissions = kwargs.get('permissions', None) - self.last_modified = kwargs.get('last_modified', None) - self.is_directory = kwargs.get('is_directory', False) - self.etag = kwargs.get('etag', None) - self.content_length = kwargs.get('content_length', None) - - @classmethod - def _from_generated(cls, generated): - path_prop = PathProperties() - path_prop.name = generated.name - path_prop.owner = generated.owner - path_prop.group = generated.group - path_prop.permissions = generated.permissions - path_prop.last_modified = datetime.strptime(generated.last_modified, "%a, %d %b %Y %H:%M:%S %Z") - path_prop.is_directory = bool(generated.is_directory) - path_prop.etag = generated.additional_properties.get('etag') - path_prop.content_length = generated.content_length - return path_prop - - -class LeaseProperties(BlobLeaseProperties): - """DataLake Lease Properties. - - :ivar str status: - The lease status of the file. Possible values: locked|unlocked - :ivar str state: - Lease state of the file. Possible values: available|leased|expired|breaking|broken - :ivar str duration: - When a file is leased, specifies whether the lease is of infinite or fixed duration. - """ - - -class ContentSettings(BlobContentSettings): - """The content settings of a file or directory. - - :ivar str content_type: - The content type specified for the file or directory. If no content type was - specified, the default content type is application/octet-stream. - :ivar str content_encoding: - If the content_encoding has previously been set - for the file, that value is stored. - :ivar str content_language: - If the content_language has previously been set - for the file, that value is stored. - :ivar str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the file, that value is stored. - :ivar str cache_control: - If the cache_control has previously been set for - the file, that value is stored. - :ivar bytearray content_md5: - If the content_md5 has been set for the file, this response - header is stored so that the client can check for message content - integrity. - :keyword str content_type: - The content type specified for the file or directory. If no content type was - specified, the default content type is application/octet-stream. - :keyword str content_encoding: - If the content_encoding has previously been set - for the file, that value is stored. - :keyword str content_language: - If the content_language has previously been set - for the file, that value is stored. - :keyword str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the file, that value is stored. - :keyword str cache_control: - If the cache_control has previously been set for - the file, that value is stored. - :keyword bytearray content_md5: - If the content_md5 has been set for the file, this response - header is stored so that the client can check for message content - integrity. - """ - - def __init__( - self, **kwargs): - super(ContentSettings, self).__init__( - **kwargs - ) - - -class AccountSasPermissions(BlobAccountSasPermissions): - def __init__(self, read=False, write=False, delete=False, list=False, # pylint: disable=redefined-builtin - create=False): - super(AccountSasPermissions, self).__init__( - read=read, create=create, write=write, list=list, - delete=delete - ) - - -class FileSystemSasPermissions(object): - """FileSystemSasPermissions class to be used with the - :func:`~azure.storage.filedatalake.generate_file_system_sas` function. - - :param bool read: - Read the content, properties, metadata etc. - :param bool write: - Create or write content, properties, metadata. Lease the file system. - :param bool delete: - Delete the file system. - :param bool list: - List paths in the file system. - :keyword bool add: - Append data to a file in the directory. - :keyword bool create: - Write a new file, snapshot a file, or copy a file to a new file. - :keyword bool move: - Move any file in the directory to a new location. - Note the move operation can optionally be restricted to the child file or directory owner or - the parent directory owner if the saoid parameter is included in the token and the sticky bit is set - on the parent directory. - :keyword bool execute: - Get the status (system defined properties) and ACL of any file in the directory. - If the caller is the owner, set access control on any file in the directory. - :keyword bool manage_ownership: - Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory - within a folder that has the sticky bit set. - :keyword bool manage_access_control: - Allows the user to set permissions and POSIX ACLs on files and directories. - """ - - def __init__(self, read=False, write=False, delete=False, list=False, # pylint: disable=redefined-builtin - **kwargs): - self.read = read - self.add = kwargs.pop('add', None) - self.create = kwargs.pop('create', None) - self.write = write - self.delete = delete - self.list = list - self.move = kwargs.pop('move', None) - self.execute = kwargs.pop('execute', None) - self.manage_ownership = kwargs.pop('manage_ownership', None) - self.manage_access_control = kwargs.pop('manage_access_control', None) - self._str = (('r' if self.read else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('l' if self.list else '') + - ('m' if self.move else '') + - ('e' if self.execute else '') + - ('o' if self.manage_ownership else '') + - ('p' if self.manage_access_control else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a FileSystemSasPermissions from a string. - - To specify read, write, or delete permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, add, create, - write, or delete permissions. - :return: A FileSystemSasPermissions object - :rtype: ~azure.storage.fildatalake.FileSystemSasPermissions - """ - p_read = 'r' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_list = 'l' in permission - p_move = 'm' in permission - p_execute = 'e' in permission - p_manage_ownership = 'o' in permission - p_manage_access_control = 'p' in permission - - parsed = cls(read=p_read, write=p_write, delete=p_delete, - list=p_list, add=p_add, create=p_create, move=p_move, - execute=p_execute, manage_ownership=p_manage_ownership, - manage_access_control=p_manage_access_control) - return parsed - - -class DirectorySasPermissions(object): - """DirectorySasPermissions class to be used with the - :func:`~azure.storage.filedatalake.generate_directory_sas` function. - - :param bool read: - Read the content, properties, metadata etc. - :param bool create: - Create a new directory - :param bool write: - Create or write content, properties, metadata. Lease the directory. - :param bool delete: - Delete the directory. - :keyword bool add: - Append data to a file in the directory. - :keyword bool list: - List any files in the directory. Implies Execute. - :keyword bool move: - Move any file in the directory to a new location. - Note the move operation can optionally be restricted to the child file or directory owner or - the parent directory owner if the saoid parameter is included in the token and the sticky bit is set - on the parent directory. - :keyword bool execute: - Get the status (system defined properties) and ACL of any file in the directory. - If the caller is the owner, set access control on any file in the directory. - :keyword bool manage_ownership: - Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory - within a folder that has the sticky bit set. - :keyword bool manage_access_control: - Allows the user to set permissions and POSIX ACLs on files and directories. - """ - - def __init__(self, read=False, create=False, write=False, - delete=False, **kwargs): - self.read = read - self.add = kwargs.pop('add', None) - self.create = create - self.write = write - self.delete = delete - self.list = kwargs.pop('list', None) - self.move = kwargs.pop('move', None) - self.execute = kwargs.pop('execute', None) - self.manage_ownership = kwargs.pop('manage_ownership', None) - self.manage_access_control = kwargs.pop('manage_access_control', None) - self._str = (('r' if self.read else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('l' if self.list else '') + - ('m' if self.move else '') + - ('e' if self.execute else '') + - ('o' if self.manage_ownership else '') + - ('p' if self.manage_access_control else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a DirectorySasPermissions from a string. - - To specify read, create, write, or delete permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, add, create, - write, or delete permissions. - :return: A DirectorySasPermissions object - :rtype: ~azure.storage.filedatalake.DirectorySasPermissions - """ - p_read = 'r' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_list = 'l' in permission - p_move = 'm' in permission - p_execute = 'e' in permission - p_manage_ownership = 'o' in permission - p_manage_access_control = 'p' in permission - - parsed = cls(read=p_read, create=p_create, write=p_write, delete=p_delete, add=p_add, - list=p_list, move=p_move, execute=p_execute, manage_ownership=p_manage_ownership, - manage_access_control=p_manage_access_control) - return parsed - - -class FileSasPermissions(object): - """FileSasPermissions class to be used with the - :func:`~azure.storage.filedatalake.generate_file_sas` function. - - :param bool read: - Read the content, properties, metadata etc. Use the file as - the source of a read operation. - :param bool create: - Write a new file. - :param bool write: - Create or write content, properties, metadata. Lease the file. - :param bool delete: - Delete the file. - :keyword bool add: - Append data to the file. - :keyword bool move: - Move any file in the directory to a new location. - Note the move operation can optionally be restricted to the child file or directory owner or - the parent directory owner if the saoid parameter is included in the token and the sticky bit is set - on the parent directory. - :keyword bool execute: - Get the status (system defined properties) and ACL of any file in the directory. - If the caller is the owner, set access control on any file in the directory. - :keyword bool manage_ownership: - Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory - within a folder that has the sticky bit set. - :keyword bool manage_access_control: - Allows the user to set permissions and POSIX ACLs on files and directories. - """ - - def __init__(self, read=False, create=False, write=False, delete=False, **kwargs): - self.read = read - self.add = kwargs.pop('add', None) - self.create = create - self.write = write - self.delete = delete - self.move = kwargs.pop('move', None) - self.execute = kwargs.pop('execute', None) - self.manage_ownership = kwargs.pop('manage_ownership', None) - self.manage_access_control = kwargs.pop('manage_access_control', None) - self._str = (('r' if self.read else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('m' if self.move else '') + - ('e' if self.execute else '') + - ('o' if self.manage_ownership else '') + - ('p' if self.manage_access_control else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a FileSasPermissions from a string. - - To specify read, write, or delete permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, add, create, - write, or delete permissions. - :return: A FileSasPermissions object - :rtype: ~azure.storage.fildatalake.FileSasPermissions - """ - p_read = 'r' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_move = 'm' in permission - p_execute = 'e' in permission - p_manage_ownership = 'o' in permission - p_manage_access_control = 'p' in permission - - parsed = cls(read=p_read, create=p_create, write=p_write, delete=p_delete, add=p_add, - move=p_move, execute=p_execute, manage_ownership=p_manage_ownership, - manage_access_control=p_manage_access_control) - return parsed - - -class AccessPolicy(BlobAccessPolicy): - """Access Policy class used by the set and get access policy methods in each service. - - A stored access policy can specify the start time, expiry time, and - permissions for the Shared Access Signatures with which it's associated. - Depending on how you want to control access to your resource, you can - specify all of these parameters within the stored access policy, and omit - them from the URL for the Shared Access Signature. Doing so permits you to - modify the associated signature's behavior at any time, as well as to revoke - it. Or you can specify one or more of the access policy parameters within - the stored access policy, and the others on the URL. Finally, you can - specify all of the parameters on the URL. In this case, you can use the - stored access policy to revoke the signature, but not to modify its behavior. - - Together the Shared Access Signature and the stored access policy must - include all fields required to authenticate the signature. If any required - fields are missing, the request will fail. Likewise, if a field is specified - both in the Shared Access Signature URL and in the stored access policy, the - request will fail with status code 400 (Bad Request). - - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.datalake.FileSystemSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :keyword start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :paramtype start: ~datetime.datetime or str - """ - - def __init__(self, permission=None, expiry=None, **kwargs): - super(AccessPolicy, self).__init__( - permission=permission, expiry=expiry, start=kwargs.pop('start', None) - ) - - -class ResourceTypes(BlobResourceTypes): - """ - Specifies the resource types that are accessible with the account SAS. - - :param bool service: - Access to service-level APIs (e.g.List File Systems) - :param bool file_system: - Access to file_system-level APIs (e.g., Create/Delete file system, - List Directories/Files) - :param bool object: - Access to object-level APIs for - files(e.g. Create File, etc.) - """ - - def __init__(self, service=False, file_system=False, object=False # pylint: disable=redefined-builtin - ): - super(ResourceTypes, self).__init__(service=service, container=file_system, object=object) - - -class UserDelegationKey(BlobUserDelegationKey): - """ - Represents a user delegation key, provided to the user by Azure Storage - based on their Azure Active Directory access token. - - The fields are saved as simple strings since the user does not have to interact with this object; - to generate an identify SAS, the user can simply pass it to the right API. - - :ivar str signed_oid: - Object ID of this token. - :ivar str signed_tid: - Tenant ID of the tenant that issued this token. - :ivar str signed_start: - The datetime this token becomes valid. - :ivar str signed_expiry: - The datetime this token expires. - :ivar str signed_service: - What service this key is valid for. - :ivar str signed_version: - The version identifier of the REST service that created this token. - :ivar str value: - The user delegation key. - """ - - @classmethod - def _from_generated(cls, generated): - delegation_key = cls() - delegation_key.signed_oid = generated.signed_oid - delegation_key.signed_tid = generated.signed_tid - delegation_key.signed_start = generated.signed_start - delegation_key.signed_expiry = generated.signed_expiry - delegation_key.signed_service = generated.signed_service - delegation_key.signed_version = generated.signed_version - delegation_key.value = generated.value - return delegation_key - - -class PublicAccess(str, Enum): - """ - Specifies whether data in the file system may be accessed publicly and the level of access. - """ - - File = 'blob' - """ - Specifies public read access for files. file data within this file system can be read - via anonymous request, but file system data is not available. Clients cannot enumerate - files within the container via anonymous request. - """ - - FileSystem = 'container' - """ - Specifies full public read access for file system and file data. Clients can enumerate - files within the file system via anonymous request, but cannot enumerate file systems - within the storage account. - """ - - @classmethod - def _from_generated(cls, public_access): - if public_access == "blob": # pylint:disable=no-else-return - return cls.File - elif public_access == "container": - return cls.FileSystem - - return None - - -class LocationMode(object): - """ - Specifies the location the request should be sent to. This mode only applies - for RA-GRS accounts which allow secondary read access. All other account types - must use PRIMARY. - """ - - PRIMARY = 'primary' #: Requests should be sent to the primary location. - SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. - - -class DelimitedJsonDialect(BlobDelimitedJSON): - """Defines the input or output JSON serialization for a datalake query. - - :keyword str delimiter: The line separator character, default value is '\n' - """ - - -class DelimitedTextDialect(BlobDelimitedTextDialect): - """Defines the input or output delimited (CSV) serialization for a datalake query request. - - :keyword str delimiter: - Column separator, defaults to ','. - :keyword str quotechar: - Field quote, defaults to '"'. - :keyword str lineterminator: - Record separator, defaults to '\n'. - :keyword str escapechar: - Escape char, defaults to empty. - :keyword bool has_header: - Whether the blob data includes headers in the first line. The default value is False, meaning that the - data will be returned inclusive of the first line. If set to True, the data will be returned exclusive - of the first line. - """ - - -class ArrowDialect(BlobArrowDialect): - """field of an arrow schema. - - All required parameters must be populated in order to send to Azure. - - :param str type: Required. - :keyword str name: The name of the field. - :keyword int precision: The precision of the field. - :keyword int scale: The scale of the field. - """ - - -class QuickQueryDialect(str, Enum): - """Specifies the quick query input/output dialect.""" - - DelimitedText = 'DelimitedTextDialect' - DelimitedJson = 'DelimitedJsonDialect' - Parquet = 'ParquetDialect' - - -class ArrowType(str, Enum): - - INT64 = "int64" - BOOL = "bool" - TIMESTAMP_MS = "timestamp[ms]" - STRING = "string" - DOUBLE = "double" - DECIMAL = 'decimal' - - -class DataLakeFileQueryError(object): - """The error happened during quick query operation. - - :ivar str error: - The name of the error. - :ivar bool is_fatal: - If true, this error prevents further query processing. More result data may be returned, - but there is no guarantee that all of the original data will be processed. - If false, this error does not prevent further query processing. - :ivar str description: - A description of the error. - :ivar int position: - The blob offset at which the error occurred. - """ - - def __init__(self, error=None, is_fatal=False, description=None, position=None): - self.error = error - self.is_fatal = is_fatal - self.description = description - self.position = position - - -class AccessControlChangeCounters(DictMixin): - """ - AccessControlChangeCounters contains counts of operations that change Access Control Lists recursively. - - :ivar int directories_successful: - Number of directories where Access Control List has been updated successfully. - :ivar int files_successful: - Number of files where Access Control List has been updated successfully. - :ivar int failure_count: - Number of paths where Access Control List update has failed. - """ - - def __init__(self, directories_successful, files_successful, failure_count): - self.directories_successful = directories_successful - self.files_successful = files_successful - self.failure_count = failure_count - - -class AccessControlChangeResult(DictMixin): - """ - AccessControlChangeResult contains result of operations that change Access Control Lists recursively. - - :ivar ~azure.storage.filedatalake.AccessControlChangeCounters counters: - Contains counts of paths changed from start of the operation. - :ivar str continuation: - Optional continuation token. - Value is present when operation is split into multiple batches and can be used to resume progress. - """ - - def __init__(self, counters, continuation): - self.counters = counters - self.continuation = continuation - - -class AccessControlChangeFailure(DictMixin): - """ - Represents an entry that failed to update Access Control List. - - :ivar str name: - Name of the entry. - :ivar bool is_directory: - Indicates whether the entry is a directory. - :ivar str error_message: - Indicates the reason why the entry failed to update. - """ - - def __init__(self, name, is_directory, error_message): - self.name = name - self.is_directory = is_directory - self.error_message = error_message - - -class AccessControlChanges(DictMixin): - """ - AccessControlChanges contains batch and cumulative counts of operations - that change Access Control Lists recursively. - Additionally it exposes path entries that failed to update while these operations progress. - - :ivar ~azure.storage.filedatalake.AccessControlChangeCounters batch_counters: - Contains counts of paths changed within single batch. - :ivar ~azure.storage.filedatalake.AccessControlChangeCounters aggregate_counters: - Contains counts of paths changed from start of the operation. - :ivar list(~azure.storage.filedatalake.AccessControlChangeFailure) batch_failures: - List of path entries that failed to update Access Control List within single batch. - :ivar str continuation: - An opaque continuation token that may be used to resume the operations in case of failures. - """ - - def __init__(self, batch_counters, aggregate_counters, batch_failures, continuation): - self.batch_counters = batch_counters - self.aggregate_counters = aggregate_counters - self.batch_failures = batch_failures - self.continuation = continuation - - -class DeletedPathProperties(DictMixin): - """ - Properties populated for a deleted path. - - :ivar str name: - The name of the file in the path. - :ivar ~datetime.datetime deleted_time: - A datetime object representing the time at which the path was deleted. - :ivar int remaining_retention_days: - The number of days that the path will be retained before being permanently deleted by the service. - :ivar str deletion_id: - The id associated with the deleted path. - """ - def __init__(self, **kwargs): - self.name = kwargs.get('name') - self.deleted_time = None - self.remaining_retention_days = None - self.deletion_id = None - - -class AnalyticsLogging(GenLogging): - """Azure Analytics Logging settings. - - :keyword str version: - The version of Storage Analytics to configure. The default value is 1.0. - :keyword bool delete: - Indicates whether all delete requests should be logged. The default value is `False`. - :keyword bool read: - Indicates whether all read requests should be logged. The default value is `False`. - :keyword bool write: - Indicates whether all write requests should be logged. The default value is `False`. - :keyword ~azure.storage.filedatalake.RetentionPolicy retention_policy: - Determines how long the associated data should persist. If not specified the retention - policy will be disabled by default. - """ - - def __init__(self, **kwargs): - self.version = kwargs.get('version', u'1.0') - self.delete = kwargs.get('delete', False) - self.read = kwargs.get('read', False) - self.write = kwargs.get('write', False) - self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - version=generated.version, - delete=generated.delete, - read=generated.read, - write=generated.write, - retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access - ) - - -class Metrics(GenMetrics): - """A summary of request statistics grouped by API in hour or minute aggregates. - - :keyword str version: - The version of Storage Analytics to configure. The default value is 1.0. - :keyword bool enabled: - Indicates whether metrics are enabled for the Datalake service. - The default value is `False`. - :keyword bool include_apis: - Indicates whether metrics should generate summary statistics for called API operations. - :keyword ~azure.storage.filedatalake.RetentionPolicy retention_policy: - Determines how long the associated data should persist. If not specified the retention - policy will be disabled by default. - """ - - def __init__(self, **kwargs): - self.version = kwargs.get('version', u'1.0') - self.enabled = kwargs.get('enabled', False) - self.include_apis = kwargs.get('include_apis') - self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - version=generated.version, - enabled=generated.enabled, - include_apis=generated.include_apis, - retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access - ) - - -class RetentionPolicy(GenRetentionPolicy): - """The retention policy which determines how long the associated data should - persist. - - :param bool enabled: - Indicates whether a retention policy is enabled for the storage service. - The default value is False. - :param int days: - Indicates the number of days that metrics or logging or - soft-deleted data should be retained. All data older than this value will - be deleted. If enabled=True, the number of days must be specified. - """ - - def __init__(self, enabled=False, days=None): - super(RetentionPolicy, self).__init__(enabled=enabled, days=days, allow_permanent_delete=None) - if self.enabled and (self.days is None): - raise ValueError("If policy is enabled, 'days' must be specified.") - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - enabled=generated.enabled, - days=generated.days, - ) - - -class StaticWebsite(GenStaticWebsite): - """The properties that enable an account to host a static website. - - :keyword bool enabled: - Indicates whether this account is hosting a static website. - The default value is `False`. - :keyword str index_document: - The default name of the index page under each directory. - :keyword str error_document404_path: - The absolute path of the custom 404 page. - :keyword str default_index_document_path: - Absolute path of the default index page. - """ - - def __init__(self, **kwargs): - self.enabled = kwargs.get('enabled', False) - if self.enabled: - self.index_document = kwargs.get('index_document') - self.error_document404_path = kwargs.get('error_document404_path') - self.default_index_document_path = kwargs.get('default_index_document_path') - else: - self.index_document = None - self.error_document404_path = None - self.default_index_document_path = None - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - enabled=generated.enabled, - index_document=generated.index_document, - error_document404_path=generated.error_document404_path, - default_index_document_path=generated.default_index_document_path - ) - - -class CorsRule(GenCorsRule): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. - - :param list(str) allowed_origins: - A list of origin domains that will be allowed via CORS, or "*" to allow - all domains. The list of must contain at least one entry. Limited to 64 - origin domains. Each allowed origin can have up to 256 characters. - :param list(str) allowed_methods: - A list of HTTP methods that are allowed to be executed by the origin. - The list of must contain at least one entry. For Azure Storage, - permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. - :keyword list(str) allowed_headers: - Defaults to an empty list. A list of headers allowed to be part of - the cross-origin request. Limited to 64 defined headers and 2 prefixed - headers. Each header can be up to 256 characters. - :keyword list(str) exposed_headers: - Defaults to an empty list. A list of response headers to expose to CORS - clients. Limited to 64 defined headers and two prefixed headers. Each - header can be up to 256 characters. - :keyword int max_age_in_seconds: - The number of seconds that the client/browser should cache a - preflight response. - """ - - def __init__(self, allowed_origins, allowed_methods, **kwargs): - self.allowed_origins = ','.join(allowed_origins) - self.allowed_methods = ','.join(allowed_methods) - self.allowed_headers = ','.join(kwargs.get('allowed_headers', [])) - self.exposed_headers = ','.join(kwargs.get('exposed_headers', [])) - self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0) - - @classmethod - def _from_generated(cls, generated): - return cls( - [generated.allowed_origins], - [generated.allowed_methods], - allowed_headers=[generated.allowed_headers], - exposed_headers=[generated.exposed_headers], - max_age_in_seconds=generated.max_age_in_seconds, - ) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_path_client.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_path_client.py deleted file mode 100644 index bbcefc3..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_path_client.py +++ /dev/null @@ -1,908 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from datetime import datetime -from typing import Any, Dict, Union - -try: - from urllib.parse import urlparse, quote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote # type: ignore - -import six - -from azure.core.exceptions import AzureError, HttpResponseError -from azure.multiapi.storagev2.blob.v2020_10_02 import BlobClient -from ._data_lake_lease import DataLakeLeaseClient -from ._deserialize import process_storage_error -from ._generated import AzureDataLakeStorageRESTAPI -from ._models import LocationMode, DirectoryProperties, AccessControlChangeResult, AccessControlChanges, \ - AccessControlChangeCounters, AccessControlChangeFailure -from ._serialize import convert_dfs_url_to_blob_url, get_mod_conditions, \ - get_path_http_headers, add_metadata_headers, get_lease_id, get_source_mod_conditions, get_access_conditions, \ - get_api_version -from ._shared.base_client import StorageAccountHostsMixin, parse_query -from ._shared.response_handlers import return_response_headers, return_headers_and_deserialized - -_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = ( - 'The require_encryption flag is set, but encryption is not supported' - ' for this method.') - - -class PathClient(StorageAccountHostsMixin): - def __init__( - self, account_url, # type: str - file_system_name, # type: str - path_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - - # remove the preceding/trailing delimiter from the path components - file_system_name = file_system_name.strip('/') - - # the name of root directory is / - if path_name != '/': - path_name = path_name.strip('/') - - if not (file_system_name and path_name): - raise ValueError("Please specify a file system name and file path.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - blob_account_url = convert_dfs_url_to_blob_url(account_url) - self._blob_account_url = blob_account_url - - datalake_hosts = kwargs.pop('_hosts', None) - blob_hosts = None - if datalake_hosts: - blob_primary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.PRIMARY]) - blob_hosts = {LocationMode.PRIMARY: blob_primary_account_url, LocationMode.SECONDARY: ""} - self._blob_client = BlobClient(blob_account_url, file_system_name, path_name, - credential=credential, _hosts=blob_hosts, **kwargs) - - _, sas_token = parse_query(parsed_url.query) - self.file_system_name = file_system_name - self.path_name = path_name - - self._query_str, self._raw_credential = self._format_query_string(sas_token, credential) - - super(PathClient, self).__init__(parsed_url, service='dfs', credential=self._raw_credential, - _hosts=datalake_hosts, **kwargs) - # ADLS doesn't support secondary endpoint, make sure it's empty - self._hosts[LocationMode.SECONDARY] = "" - api_version = get_api_version(kwargs) - - self._client = AzureDataLakeStorageRESTAPI(self.url, file_system=file_system_name, path=path_name, - pipeline=self._pipeline) - self._client._config.version = api_version # pylint: disable=protected-access - - self._datalake_client_for_blob_operation = AzureDataLakeStorageRESTAPI( - self._blob_client.url, - file_system=file_system_name, - path=path_name, - pipeline=self._pipeline) - self._datalake_client_for_blob_operation._config.version = api_version # pylint: disable=protected-access - - def __exit__(self, *args): - self._blob_client.close() - super(PathClient, self).__exit__(*args) - - def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._blob_client.close() - self.__exit__() - - def _format_url(self, hostname): - file_system_name = self.file_system_name - if isinstance(file_system_name, six.text_type): - file_system_name = file_system_name.encode('UTF-8') - return "{}://{}/{}/{}{}".format( - self.scheme, - hostname, - quote(file_system_name), - quote(self.path_name, safe='~'), - self._query_str) - - def _create_path_options(self, resource_type, - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_mod_conditions(kwargs) - - path_http_headers = None - if content_settings: - path_http_headers = get_path_http_headers(content_settings) - - options = { - 'resource': resource_type, - 'properties': add_metadata_headers(metadata), - 'permissions': kwargs.pop('permissions', None), - 'umask': kwargs.pop('umask', None), - 'path_http_headers': path_http_headers, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - def _create(self, resource_type, content_settings=None, metadata=None, **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create directory or file - - :param resource_type: - Required for Create File and Create Directory. - The value must be "file" or "directory". Possible values include: - 'directory', 'file' - :type resource_type: str - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file/directory as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :type permissions: str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Dict[str, Union[str, datetime]] - """ - options = self._create_path_options( - resource_type, - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return self._client.path.create(**options) - except HttpResponseError as error: - process_storage_error(error) - - @staticmethod - def _delete_path_options(**kwargs): - # type: (**Any) -> Dict[str, Any] - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_mod_conditions(kwargs) - - options = { - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers, - 'timeout': kwargs.pop('timeout', None)} - options.update(kwargs) - return options - - def _delete(self, **kwargs): - # type: (**Any) -> Dict[Union[datetime, str]] - """ - Marks the specified path for deletion. - - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :param ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - options = self._delete_path_options(**kwargs) - try: - return self._client.path.delete(**options) - except HttpResponseError as error: - process_storage_error(error) - - @staticmethod - def _set_access_control_options(owner=None, group=None, permissions=None, acl=None, **kwargs): - # type: (...) -> Dict[str, Any] - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_mod_conditions(kwargs) - - options = { - 'owner': owner, - 'group': group, - 'permissions': permissions, - 'acl': acl, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - def set_access_control(self, owner=None, # type: Optional[str] - group=None, # type: Optional[str] - permissions=None, # type: Optional[str] - acl=None, # type: Optional[str] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Set the owner, group, permissions, or access control list for a path. - - :param owner: - Optional. The owner of the file or directory. - :type owner: str - :param group: - Optional. The owning group of the file or directory. - :type group: str - :param permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - permissions and acl are mutually exclusive. - :type permissions: str - :param acl: - Sets POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - permissions and acl are mutually exclusive. - :type acl: str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword: response dict (Etag and last modified). - """ - if not any([owner, group, permissions, acl]): - raise ValueError("At least one parameter should be set for set_access_control API") - options = self._set_access_control_options(owner=owner, group=group, permissions=permissions, acl=acl, **kwargs) - try: - return self._client.path.set_access_control(**options) - except HttpResponseError as error: - process_storage_error(error) - - @staticmethod - def _get_access_control_options(upn=None, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Any] - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_mod_conditions(kwargs) - - options = { - 'action': 'getAccessControl', - 'upn': upn if upn else False, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - def get_access_control(self, upn=None, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Any] - """ - :param upn: Optional. - Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword: response dict. - """ - options = self._get_access_control_options(upn=upn, **kwargs) - try: - return self._client.path.get_properties(**options) - except HttpResponseError as error: - process_storage_error(error) - - @staticmethod - def _set_access_control_recursive_options(mode, acl, **kwargs): - # type: (str, str, **Any) -> Dict[str, Any] - - options = { - 'mode': mode, - 'force_flag': kwargs.pop('continue_on_failure', None), - 'timeout': kwargs.pop('timeout', None), - 'continuation': kwargs.pop('continuation_token', None), - 'max_records': kwargs.pop('batch_size', None), - 'acl': acl, - 'cls': return_headers_and_deserialized} - options.update(kwargs) - return options - - def set_access_control_recursive(self, - acl, - **kwargs): - # type: (str, **Any) -> AccessControlChangeResult - """ - Sets the Access Control on a path and sub-paths. - - :param acl: - Sets POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: - Callback where the caller can track progress of the operation - as well as collect paths that failed to change Access Control. - :keyword str continuation_token: - Optional continuation token that can be used to resume previously stopped operation. - :keyword int batch_size: - Optional. If data set size exceeds batch size then operation will be split into multiple - requests so that progress can be tracked. Batch size should be between 1 and 2000. - The default when unspecified is 2000. - :keyword int max_batches: - Optional. Defines maximum number of batches that single change Access Control operation can execute. - If maximum is reached before all sub-paths are processed, - then continuation token can be used to resume operation. - Empty value indicates that maximum number of batches in unbound and operation continues till end. - :keyword bool continue_on_failure: - If set to False, the operation will terminate quickly on encountering user errors (4XX). - If True, the operation will ignore user errors and proceed with the operation on other sub-entities of - the directory. - Continuation token will only be returned when continue_on_failure is True in case of user errors. - If not set the default value is False for this. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: A summary of the recursive operations, including the count of successes and failures, - as well as a continuation token in case the operation was terminated prematurely. - :rtype: :class:`~azure.storage.filedatalake.AccessControlChangeResult` - :raises ~azure.core.exceptions.AzureError: - User can restart the operation using continuation_token field of AzureError if the token is available. - """ - if not acl: - raise ValueError("The Access Control List must be set for this operation") - - progress_hook = kwargs.pop('progress_hook', None) - max_batches = kwargs.pop('max_batches', None) - options = self._set_access_control_recursive_options(mode='set', acl=acl, **kwargs) - return self._set_access_control_internal(options=options, progress_hook=progress_hook, - max_batches=max_batches) - - def update_access_control_recursive(self, - acl, - **kwargs): - # type: (str, **Any) -> AccessControlChangeResult - """ - Modifies the Access Control on a path and sub-paths. - - :param acl: - Modifies POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: - Callback where the caller can track progress of the operation - as well as collect paths that failed to change Access Control. - :keyword str continuation_token: - Optional continuation token that can be used to resume previously stopped operation. - :keyword int batch_size: - Optional. If data set size exceeds batch size then operation will be split into multiple - requests so that progress can be tracked. Batch size should be between 1 and 2000. - The default when unspecified is 2000. - :keyword int max_batches: - Optional. Defines maximum number of batches that single change Access Control operation can execute. - If maximum is reached before all sub-paths are processed, - then continuation token can be used to resume operation. - Empty value indicates that maximum number of batches in unbound and operation continues till end. - :keyword bool continue_on_failure: - If set to False, the operation will terminate quickly on encountering user errors (4XX). - If True, the operation will ignore user errors and proceed with the operation on other sub-entities of - the directory. - Continuation token will only be returned when continue_on_failure is True in case of user errors. - If not set the default value is False for this. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: A summary of the recursive operations, including the count of successes and failures, - as well as a continuation token in case the operation was terminated prematurely. - :rtype: :class:`~azure.storage.filedatalake.AccessControlChangeResult` - :raises ~azure.core.exceptions.AzureError: - User can restart the operation using continuation_token field of AzureError if the token is available. - """ - if not acl: - raise ValueError("The Access Control List must be set for this operation") - - progress_hook = kwargs.pop('progress_hook', None) - max_batches = kwargs.pop('max_batches', None) - options = self._set_access_control_recursive_options(mode='modify', acl=acl, **kwargs) - return self._set_access_control_internal(options=options, progress_hook=progress_hook, - max_batches=max_batches) - - def remove_access_control_recursive(self, - acl, - **kwargs): - # type: (str, **Any) -> AccessControlChangeResult - """ - Removes the Access Control on a path and sub-paths. - - :param acl: - Removes POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, and a user or - group identifier in the format "[scope:][type]:[id]". - :type acl: str - :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: - Callback where the caller can track progress of the operation - as well as collect paths that failed to change Access Control. - :keyword str continuation_token: - Optional continuation token that can be used to resume previously stopped operation. - :keyword int batch_size: - Optional. If data set size exceeds batch size then operation will be split into multiple - requests so that progress can be tracked. Batch size should be between 1 and 2000. - The default when unspecified is 2000. - :keyword int max_batches: - Optional. Defines maximum number of batches that single change Access Control operation can execute. - If maximum is reached before all sub-paths are processed then, - continuation token can be used to resume operation. - Empty value indicates that maximum number of batches in unbound and operation continues till end. - :keyword bool continue_on_failure: - If set to False, the operation will terminate quickly on encountering user errors (4XX). - If True, the operation will ignore user errors and proceed with the operation on other sub-entities of - the directory. - Continuation token will only be returned when continue_on_failure is True in case of user errors. - If not set the default value is False for this. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: A summary of the recursive operations, including the count of successes and failures, - as well as a continuation token in case the operation was terminated prematurely. - :rtype: :class:`~azure.storage.filedatalake.AccessControlChangeResult` - :raises ~azure.core.exceptions.AzureError: - User can restart the operation using continuation_token field of AzureError if the token is available. - """ - if not acl: - raise ValueError("The Access Control List must be set for this operation") - - progress_hook = kwargs.pop('progress_hook', None) - max_batches = kwargs.pop('max_batches', None) - options = self._set_access_control_recursive_options(mode='remove', acl=acl, **kwargs) - return self._set_access_control_internal(options=options, progress_hook=progress_hook, - max_batches=max_batches) - - def _set_access_control_internal(self, options, progress_hook, max_batches=None): - try: - continue_on_failure = options.get('force_flag') - total_directories_successful = 0 - total_files_success = 0 - total_failure_count = 0 - batch_count = 0 - last_continuation_token = None - current_continuation_token = None - continue_operation = True - while continue_operation: - headers, resp = self._client.path.set_access_control_recursive(**options) - - # make a running tally so that we can report the final results - total_directories_successful += resp.directories_successful - total_files_success += resp.files_successful - total_failure_count += resp.failure_count - batch_count += 1 - current_continuation_token = headers['continuation'] - - if current_continuation_token is not None: - last_continuation_token = current_continuation_token - - if progress_hook is not None: - progress_hook(AccessControlChanges( - batch_counters=AccessControlChangeCounters( - directories_successful=resp.directories_successful, - files_successful=resp.files_successful, - failure_count=resp.failure_count, - ), - aggregate_counters=AccessControlChangeCounters( - directories_successful=total_directories_successful, - files_successful=total_files_success, - failure_count=total_failure_count, - ), - batch_failures=[AccessControlChangeFailure( - name=failure.name, - is_directory=failure.type == 'DIRECTORY', - error_message=failure.error_message) for failure in resp.failed_entries], - continuation=last_continuation_token)) - - # update the continuation token, if there are more operations that cannot be completed in a single call - max_batches_satisfied = (max_batches is not None and batch_count == max_batches) - continue_operation = bool(current_continuation_token) and not max_batches_satisfied - options['continuation'] = current_continuation_token - - # currently the service stops on any failure, so we should send back the last continuation token - # for the user to retry the failed updates - # otherwise we should just return what the service gave us - return AccessControlChangeResult(counters=AccessControlChangeCounters( - directories_successful=total_directories_successful, - files_successful=total_files_success, - failure_count=total_failure_count), - continuation=last_continuation_token - if total_failure_count > 0 and not continue_on_failure else current_continuation_token) - except HttpResponseError as error: - error.continuation_token = last_continuation_token - process_storage_error(error) - except AzureError as error: - error.continuation_token = last_continuation_token - raise error - - def _rename_path_options(self, rename_source, - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - if metadata or kwargs.pop('permissions', None) or kwargs.pop('umask', None): - raise ValueError("metadata, permissions, umask is not supported for this operation") - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - source_lease_id = get_lease_id(kwargs.pop('source_lease', None)) - mod_conditions = get_mod_conditions(kwargs) - source_mod_conditions = get_source_mod_conditions(kwargs) - - path_http_headers = None - if content_settings: - path_http_headers = get_path_http_headers(content_settings) - - options = { - 'rename_source': rename_source, - 'path_http_headers': path_http_headers, - 'lease_access_conditions': access_conditions, - 'source_lease_id': source_lease_id, - 'modified_access_conditions': mod_conditions, - 'source_modified_access_conditions': source_mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'mode': 'legacy', - 'cls': return_response_headers} - options.update(kwargs) - return options - - def _rename_path(self, rename_source, **kwargs): - # type: (str, **Any) -> Dict[str, Any] - """ - Rename directory or file - - :param rename_source: - The value must have the following format: "/{filesystem}/{path}". - :type rename_source: str - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword source_lease: - A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._rename_path_options( - rename_source, - **kwargs) - try: - return self._client.path.create(**options) - except HttpResponseError as error: - process_storage_error(error) - - def _get_path_properties(self, **kwargs): - # type: (**Any) -> Union[FileProperties, DirectoryProperties] - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file or directory. It does not return the content of the directory or file. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: DirectoryProperties or FileProperties - - .. admonition:: Example: - - .. literalinclude:: ../tests/test_blob_samples_common.py - :start-after: [START get_blob_properties] - :end-before: [END get_blob_properties] - :language: python - :dedent: 8 - :caption: Getting the properties for a file/directory. - """ - path_properties = self._blob_client.get_blob_properties(**kwargs) - return path_properties - - def _exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a path exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return self._blob_client.exists(**kwargs) - - def set_metadata(self, metadata, # type: Dict[str, str] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - file system. Each call to this operation replaces all existing metadata - attached to the file system. To remove all metadata from the file system, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the file system as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: file system-updated property dict (Etag and last modified). - """ - return self._blob_client.set_blob_metadata(metadata=metadata, **kwargs) - - def set_http_headers(self, content_settings=None, # type: Optional[ContentSettings] - **kwargs): - # type: (...) -> Dict[str, Any] - """Sets system properties on the file or directory. - - If one property is set for the content_settings, all properties will be overriden. - - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set file/directory properties. - :keyword lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: file/directory-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - return self._blob_client.set_http_headers(content_settings=content_settings, **kwargs) - - def acquire_lease(self, lease_duration=-1, # type: Optional[int] - lease_id=None, # type: Optional[str] - **kwargs): - # type: (...) -> DataLakeLeaseClient - """ - Requests a new lease. If the file or directory does not have an active lease, - the DataLake service creates a lease on the file/directory and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A DataLakeLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.filedatalake.DataLakeLeaseClient - """ - lease = DataLakeLeaseClient(self, lease_id=lease_id) # type: ignore - lease.acquire(lease_duration=lease_duration, **kwargs) - return lease diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_quick_query_helper.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_quick_query_helper.py deleted file mode 100644 index ff67d27..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_quick_query_helper.py +++ /dev/null @@ -1,71 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import Union, Iterable, IO # pylint: disable=unused-import - - -class DataLakeFileQueryReader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to read query results. - - :ivar str name: - The name of the blob being quered. - :ivar str container: - The name of the container where the blob is. - :ivar dict response_headers: - The response_headers of the quick query request. - :ivar bytes record_delimiter: - The delimiter used to separate lines, or records with the data. The `records` - method will return these lines via a generator. - """ - - def __init__( - self, - blob_query_reader - ): - self.name = blob_query_reader.name - self.file_system = blob_query_reader.container - self.response_headers = blob_query_reader.response_headers - self.record_delimiter = blob_query_reader.record_delimiter - self._bytes_processed = 0 - self._blob_query_reader = blob_query_reader - - def __len__(self): - return len(self._blob_query_reader) - - def readall(self): - # type: () -> Union[bytes, str] - """Return all query results. - - This operation is blocking until all data is downloaded. - If encoding has been configured - this will be used to decode individual - records are they are received. - - :rtype: Union[bytes, str] - """ - return self._blob_query_reader.readall() - - def readinto(self, stream): - # type: (IO) -> None - """Download the query result to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. - :returns: None - """ - self._blob_query_reader(stream) - - def records(self): - # type: () -> Iterable[Union[bytes, str]] - """Returns a record generator for the query result. - - Records will be returned line by line. - If encoding has been configured - this will be used to decode individual - records are they are received. - - :rtype: Iterable[Union[bytes, str]] - """ - return self._blob_query_reader.records() diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_serialize.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_serialize.py deleted file mode 100644 index 491cf3b..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_serialize.py +++ /dev/null @@ -1,111 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from azure.multiapi.storagev2.blob.v2020_10_02._serialize import _get_match_headers # pylint: disable=protected-access -from ._shared import encode_base64 -from ._generated.models import ModifiedAccessConditions, PathHTTPHeaders, \ - SourceModifiedAccessConditions, LeaseAccessConditions - - -_SUPPORTED_API_VERSIONS = [ - '2019-02-02', - '2019-07-07', - '2019-10-10', - '2019-12-12', - '2020-02-10', - '2020-04-08', - '2020-06-12', - '2020-08-04', - '2020-10-02' -] - - -def get_api_version(kwargs): - # type: (Dict[str, Any]) -> str - api_version = kwargs.get('api_version', None) - if api_version and api_version not in _SUPPORTED_API_VERSIONS: - versions = '\n'.join(_SUPPORTED_API_VERSIONS) - raise ValueError("Unsupported API version '{}'. Please select from:\n{}".format(api_version, versions)) - return api_version or _SUPPORTED_API_VERSIONS[-1] - - -def convert_dfs_url_to_blob_url(dfs_account_url): - return dfs_account_url.replace('.dfs.', '.blob.', 1) - - -def convert_datetime_to_rfc1123(date): - weekday = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][date.weekday()] - month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", - "Oct", "Nov", "Dec"][date.month - 1] - return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (weekday, date.day, month, - date.year, date.hour, date.minute, date.second) - - -def add_metadata_headers(metadata=None): - # type: (Optional[Dict[str, str]]) -> str - headers = list() - if metadata: - for key, value in metadata.items(): - headers.append(key + '=') - headers.append(encode_base64(value)) - headers.append(',') - - if headers: - del headers[-1] - - return ''.join(headers) - - -def get_mod_conditions(kwargs): - # type: (Dict[str, Any]) -> ModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'match_condition', 'etag') - return ModifiedAccessConditions( - if_modified_since=kwargs.pop('if_modified_since', None), - if_unmodified_since=kwargs.pop('if_unmodified_since', None), - if_match=if_match or kwargs.pop('if_match', None), - if_none_match=if_none_match or kwargs.pop('if_none_match', None) - ) - - -def get_source_mod_conditions(kwargs): - # type: (Dict[str, Any]) -> SourceModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') - return SourceModifiedAccessConditions( - source_if_modified_since=kwargs.pop('source_if_modified_since', None), - source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), - source_if_match=if_match or kwargs.pop('source_if_match', None), - source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None) - ) - - -def get_path_http_headers(content_settings): - path_headers = PathHTTPHeaders( - cache_control=content_settings.cache_control, - content_type=content_settings.content_type, - content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - content_encoding=content_settings.content_encoding, - content_language=content_settings.content_language, - content_disposition=content_settings.content_disposition - ) - return path_headers - - -def get_access_conditions(lease): - # type: (Optional[Union[BlobLeaseClient, str]]) -> Union[LeaseAccessConditions, None] - try: - lease_id = lease.id # type: ignore - except AttributeError: - lease_id = lease # type: ignore - return LeaseAccessConditions(lease_id=lease_id) if lease_id else None - - -def get_lease_id(lease): - if not lease: - return "" - try: - lease_id = lease.id - except AttributeError: - lease_id = lease - return lease_id diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/__init__.py deleted file mode 100644 index 160f882..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import hmac - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore - -import six - - -def url_quote(url): - return quote(url) - - -def url_unquote(url): - return unquote(url) - - -def encode_base64(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def decode_base64_to_bytes(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - return base64.b64decode(data) - - -def decode_base64_to_text(data): - decoded_bytes = decode_base64_to_bytes(data) - return decoded_bytes.decode('utf-8') - - -def sign_string(key, string_to_sign, key_is_base64=True): - if key_is_base64: - key = decode_base64_to_bytes(key) - else: - if isinstance(key, six.text_type): - key = key.encode('utf-8') - if isinstance(string_to_sign, six.text_type): - string_to_sign = string_to_sign.encode('utf-8') - signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) - digest = signed_hmac_sha256.digest() - encoded_digest = encode_base64(digest) - return encoded_digest diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/authentication.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/authentication.py deleted file mode 100644 index d04c1e4..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/authentication.py +++ /dev/null @@ -1,142 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import logging -import sys - -try: - from urllib.parse import urlparse, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import unquote # type: ignore - -try: - from yarl import URL -except ImportError: - pass - -try: - from azure.core.pipeline.transport import AioHttpTransport -except ImportError: - AioHttpTransport = None - -from azure.core.exceptions import ClientAuthenticationError -from azure.core.pipeline.policies import SansIOHTTPPolicy - -from . import sign_string - - -logger = logging.getLogger(__name__) - - - -# wraps a given exception with the desired exception type -def _wrap_exception(ex, desired_type): - msg = "" - if ex.args: - msg = ex.args[0] - if sys.version_info >= (3,): - # Automatic chaining in Python 3 means we keep the trace - return desired_type(msg) - # There isn't a good solution in 2 for keeping the stack trace - # in general, or that will not result in an error in 3 - # However, we can keep the previous error type and message - # TODO: In the future we will log the trace - return desired_type('{}: {}'.format(ex.__class__.__name__, msg)) - - -class AzureSigningError(ClientAuthenticationError): - """ - Represents a fatal error when attempting to sign a request. - In general, the cause of this exception is user error. For example, the given account key is not valid. - Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. - """ - - -# pylint: disable=no-self-use -class SharedKeyCredentialPolicy(SansIOHTTPPolicy): - - def __init__(self, account_name, account_key): - self.account_name = account_name - self.account_key = account_key - super(SharedKeyCredentialPolicy, self).__init__() - - @staticmethod - def _get_headers(request, headers_to_sign): - headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) - if 'content-length' in headers and headers['content-length'] == '0': - del headers['content-length'] - return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' - - @staticmethod - def _get_verb(request): - return request.http_request.method + '\n' - - def _get_canonicalized_resource(self, request): - uri_path = urlparse(request.http_request.url).path - try: - if isinstance(request.context.transport, AioHttpTransport) or \ - isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \ - isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None), - AioHttpTransport): - uri_path = URL(uri_path) - return '/' + self.account_name + str(uri_path) - except TypeError: - pass - return '/' + self.account_name + uri_path - - @staticmethod - def _get_canonicalized_headers(request): - string_to_sign = '' - x_ms_headers = [] - for name, value in request.http_request.headers.items(): - if name.startswith('x-ms-'): - x_ms_headers.append((name.lower(), value)) - x_ms_headers.sort() - for name, value in x_ms_headers: - if value is not None: - string_to_sign += ''.join([name, ':', value, '\n']) - return string_to_sign - - @staticmethod - def _get_canonicalized_resource_query(request): - sorted_queries = list(request.http_request.query.items()) - sorted_queries.sort() - - string_to_sign = '' - for name, value in sorted_queries: - if value is not None: - string_to_sign += '\n' + name.lower() + ':' + unquote(value) - - return string_to_sign - - def _add_authorization_header(self, request, string_to_sign): - try: - signature = sign_string(self.account_key, string_to_sign) - auth_string = 'SharedKey ' + self.account_name + ':' + signature - request.http_request.headers['Authorization'] = auth_string - except Exception as ex: - # Wrap any error that occurred as signing error - # Doing so will clarify/locate the source of problem - raise _wrap_exception(ex, AzureSigningError) - - def on_request(self, request): - string_to_sign = \ - self._get_verb(request) + \ - self._get_headers( - request, - [ - 'content-encoding', 'content-language', 'content-length', - 'content-md5', 'content-type', 'date', 'if-modified-since', - 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' - ] - ) + \ - self._get_canonicalized_headers(request) + \ - self._get_canonicalized_resource(request) + \ - self._get_canonicalized_resource_query(request) - - self._add_authorization_header(request, string_to_sign) - #logger.debug("String_to_sign=%s", string_to_sign) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/base_client.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/base_client.py deleted file mode 100644 index f8fae9e..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/base_client.py +++ /dev/null @@ -1,462 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import logging -import uuid -from typing import ( # pylint: disable=unused-import - Optional, - Any, - Tuple, -) - -try: - from urllib.parse import parse_qs, quote -except ImportError: - from urlparse import parse_qs # type: ignore - from urllib2 import quote # type: ignore - -import six - -from azure.core.configuration import Configuration -from azure.core.credentials import AzureSasCredential -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline import Pipeline -from azure.core.pipeline.transport import RequestsTransport, HttpTransport -from azure.core.pipeline.policies import ( - RedirectPolicy, - ContentDecodePolicy, - BearerTokenCredentialPolicy, - ProxyPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, - UserAgentPolicy, - AzureSasCredentialPolicy -) - -from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .models import LocationMode -from .authentication import SharedKeyCredentialPolicy -from .shared_access_signature import QueryStringConstants -from .request_handlers import serialize_batch_body, _get_batch_request_delimiter -from .policies import ( - StorageHeadersPolicy, - StorageContentValidation, - StorageRequestHook, - StorageResponseHook, - StorageLoggingPolicy, - StorageHosts, - QueueMessagePolicy, - ExponentialRetry, -) -from .._version import VERSION -from .response_handlers import process_storage_error, PartialBatchErrorException - - -_LOGGER = logging.getLogger(__name__) -_SERVICE_PARAMS = { - "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"}, - "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"}, - "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"}, - "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"}, -} - -class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - parsed_url, # type: Any - service, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) - self._hosts = kwargs.get("_hosts") - self.scheme = parsed_url.scheme - - if service not in ["blob", "queue", "file-share", "dfs"]: - raise ValueError("Invalid service: {}".format(service)) - service_name = service.split('-')[0] - account = parsed_url.netloc.split(".{}.core.".format(service_name)) - - self.account_name = account[0] if len(account) > 1 else None - if not self.account_name and parsed_url.netloc.startswith("localhost") \ - or parsed_url.netloc.startswith("127.0.0.1"): - self.account_name = parsed_url.path.strip("/") - - self.credential = _format_shared_key_credential(self.account_name, credential) - if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): - raise ValueError("Token credential is only supported with HTTPS.") - - secondary_hostname = None - if hasattr(self.credential, "account_name"): - self.account_name = self.credential.account_name - secondary_hostname = "{}-secondary.{}.{}".format( - self.credential.account_name, service_name, SERVICE_HOST_BASE) - - if not self._hosts: - if len(account) > 1: - secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") - if kwargs.get("secondary_hostname"): - secondary_hostname = kwargs["secondary_hostname"] - primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') - self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} - - self.require_encryption = kwargs.get("require_encryption", False) - self.key_encryption_key = kwargs.get("key_encryption_key") - self.key_resolver_function = kwargs.get("key_resolver_function") - self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) - - def __enter__(self): - self._client.__enter__() - return self - - def __exit__(self, *args): - self._client.__exit__(*args) - - def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._client.close() - - @property - def url(self): - """The full endpoint URL to this entity, including SAS token if used. - - This could be either the primary endpoint, - or the secondary endpoint depending on the current :func:`location_mode`. - """ - return self._format_url(self._hosts[self._location_mode]) - - @property - def primary_endpoint(self): - """The full primary endpoint URL. - - :type: str - """ - return self._format_url(self._hosts[LocationMode.PRIMARY]) - - @property - def primary_hostname(self): - """The hostname of the primary endpoint. - - :type: str - """ - return self._hosts[LocationMode.PRIMARY] - - @property - def secondary_endpoint(self): - """The full secondary endpoint URL if configured. - - If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str - :raise ValueError: - """ - if not self._hosts[LocationMode.SECONDARY]: - raise ValueError("No secondary host configured.") - return self._format_url(self._hosts[LocationMode.SECONDARY]) - - @property - def secondary_hostname(self): - """The hostname of the secondary endpoint. - - If not available this will be None. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str or None - """ - return self._hosts[LocationMode.SECONDARY] - - @property - def location_mode(self): - """The location mode that the client is currently using. - - By default this will be "primary". Options include "primary" and "secondary". - - :type: str - """ - - return self._location_mode - - @location_mode.setter - def location_mode(self, value): - if self._hosts.get(value): - self._location_mode = value - self._client._config.url = self.url # pylint: disable=protected-access - else: - raise ValueError("No host URL for location mode: {}".format(value)) - - @property - def api_version(self): - """The version of the Storage API used for requests. - - :type: str - """ - return self._client._config.version # pylint: disable=protected-access - - def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): - query_str = "?" - if snapshot: - query_str += "snapshot={}&".format(self.snapshot) - if share_snapshot: - query_str += "sharesnapshot={}&".format(self.snapshot) - if sas_token and isinstance(credential, AzureSasCredential): - raise ValueError( - "You cannot use AzureSasCredential when the resource URI also contains a Shared Access Signature.") - if sas_token and not credential: - query_str += sas_token - elif is_credential_sastoken(credential): - query_str += credential.lstrip("?") - credential = None - return query_str.rstrip("?&"), credential - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, "get_token"): - self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif isinstance(credential, AzureSasCredential): - self._credential_policy = AzureSasCredentialPolicy(credential) - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - - config = kwargs.get("_configuration") or create_configuration(**kwargs) - if kwargs.get("_pipeline"): - return config, kwargs["_pipeline"] - config.transport = kwargs.get("transport") # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - config.transport = RequestsTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - ContentDecodePolicy(response_encoding="utf-8"), - RedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), - config.retry_policy, - config.headers_policy, - StorageRequestHook(**kwargs), - self._credential_policy, - config.logging_policy, - StorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs) - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, Pipeline(config.transport, policies=policies) - - def _batch_send( - self, - *reqs, # type: HttpRequest - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - batch_id = str(uuid.uuid1()) - - request = self._client._client.post( # pylint: disable=protected-access - url='{}://{}/{}?{}comp=batch{}{}'.format( - self.scheme, - self.primary_hostname, - kwargs.pop('path', ""), - kwargs.pop('restype', ""), - kwargs.pop('sas', ""), - kwargs.pop('timeout', "") - ), - headers={ - 'x-ms-version': self.api_version, - "Content-Type": "multipart/mixed; boundary=" + _get_batch_request_delimiter(batch_id, False, False) - } - ) - - policies = [StorageHeadersPolicy()] - if self._credential_policy: - policies.append(self._credential_policy) - - request.set_multipart_mixed( - *reqs, - policies=policies, - enforce_https=False - ) - - Pipeline._prepare_multipart_mixed_request(request) # pylint: disable=protected-access - body = serialize_batch_body(request.multipart_mixed_info[0], batch_id) - request.set_bytes_body(body) - - temp = request.multipart_mixed_info - request.multipart_mixed_info = None - pipeline_response = self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - request.multipart_mixed_info = temp - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() - if raise_on_any_failure: - parts = list(response.parts()) - if any(p for p in parts if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts - ) - raise error - return iter(parts) - return parts - except HttpResponseError as error: - process_storage_error(error) - -class TransportWrapper(HttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, transport): - self._transport = transport - - def send(self, request, **kwargs): - return self._transport.send(request, **kwargs) - - def open(self): - pass - - def close(self): - pass - - def __enter__(self): - pass - - def __exit__(self, *args): # pylint: disable=arguments-differ - pass - - -def _format_shared_key_credential(account_name, credential): - if isinstance(credential, six.string_types): - if not account_name: - raise ValueError("Unable to determine account name for shared key credential.") - credential = {"account_name": account_name, "account_key": credential} - if isinstance(credential, dict): - if "account_name" not in credential: - raise ValueError("Shared key credential missing 'account_name") - if "account_key" not in credential: - raise ValueError("Shared key credential missing 'account_key") - return SharedKeyCredentialPolicy(**credential) - return credential - - -def parse_connection_str(conn_str, credential, service): - conn_str = conn_str.rstrip(";") - conn_settings = [s.split("=", 1) for s in conn_str.split(";")] - if any(len(tup) != 2 for tup in conn_settings): - raise ValueError("Connection string is either blank or malformed.") - conn_settings = dict((key.upper(), val) for key, val in conn_settings) - endpoints = _SERVICE_PARAMS[service] - primary = None - secondary = None - if not credential: - try: - credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]} - except KeyError: - credential = conn_settings.get("SHAREDACCESSSIGNATURE") - if endpoints["primary"] in conn_settings: - primary = conn_settings[endpoints["primary"]] - if endpoints["secondary"] in conn_settings: - secondary = conn_settings[endpoints["secondary"]] - else: - if endpoints["secondary"] in conn_settings: - raise ValueError("Connection string specifies only secondary endpoint.") - try: - primary = "{}://{}.{}.{}".format( - conn_settings["DEFAULTENDPOINTSPROTOCOL"], - conn_settings["ACCOUNTNAME"], - service, - conn_settings["ENDPOINTSUFFIX"], - ) - secondary = "{}-secondary.{}.{}".format( - conn_settings["ACCOUNTNAME"], service, conn_settings["ENDPOINTSUFFIX"] - ) - except KeyError: - pass - - if not primary: - try: - primary = "https://{}.{}.{}".format( - conn_settings["ACCOUNTNAME"], service, conn_settings.get("ENDPOINTSUFFIX", SERVICE_HOST_BASE) - ) - except KeyError: - raise ValueError("Connection string missing required connection details.") - if service == "dfs": - primary = primary.replace(".blob.", ".dfs.") - secondary = secondary.replace(".blob.", ".dfs.") - return primary, secondary, credential - - -def create_configuration(**kwargs): - # type: (**Any) -> Configuration - config = Configuration(**kwargs) - config.headers_policy = StorageHeadersPolicy(**kwargs) - config.user_agent_policy = UserAgentPolicy( - sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) - config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) - config.logging_policy = StorageLoggingPolicy(**kwargs) - config.proxy_policy = ProxyPolicy(**kwargs) - - # Storage settings - config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) - config.copy_polling_interval = 15 - - # Block blob uploads - config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) - config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) - config.use_byte_buffer = kwargs.get("use_byte_buffer", False) - - # Page blob uploads - config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) - - # Datalake file uploads - config.min_large_chunk_upload_threshold = kwargs.get("min_large_chunk_upload_threshold", 100 * 1024 * 1024 + 1) - - # Blob downloads - config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) - config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) - - # File uploads - config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) - return config - - -def parse_query(query_str): - sas_values = QueryStringConstants.to_list() - parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} - sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values] - sas_token = None - if sas_params: - sas_token = "&".join(sas_params) - - snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") - return snapshot, sas_token - - -def is_credential_sastoken(credential): - if not credential or not isinstance(credential, six.string_types): - return False - - sas_values = QueryStringConstants.to_list() - parsed_query = parse_qs(credential.lstrip("?")) - if parsed_query and all([k in sas_values for k in parsed_query.keys()]): - return True - return False diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/base_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/base_client_async.py deleted file mode 100644 index 091c350..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/base_client_async.py +++ /dev/null @@ -1,183 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging - -from azure.core.credentials import AzureSasCredential -from azure.core.pipeline import AsyncPipeline -from azure.core.async_paging import AsyncList -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline.policies import ( - ContentDecodePolicy, - AsyncBearerTokenCredentialPolicy, - AsyncRedirectPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, - AzureSasCredentialPolicy, -) -from azure.core.pipeline.transport import AsyncHttpTransport - -from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .authentication import SharedKeyCredentialPolicy -from .base_client import create_configuration -from .policies import ( - StorageContentValidation, - StorageRequestHook, - StorageHosts, - StorageHeadersPolicy, - QueueMessagePolicy -) -from .policies_async import AsyncStorageResponseHook - -from .response_handlers import process_storage_error, PartialBatchErrorException - -if TYPE_CHECKING: - from azure.core.pipeline import Pipeline - from azure.core.pipeline.transport import HttpRequest - from azure.core.configuration import Configuration -_LOGGER = logging.getLogger(__name__) - - -class AsyncStorageAccountHostsMixin(object): - - def __enter__(self): - raise TypeError("Async client only supports 'async with'.") - - def __exit__(self, *args): - pass - - async def __aenter__(self): - await self._client.__aenter__() - return self - - async def __aexit__(self, *args): - await self._client.__aexit__(*args) - - async def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._client.close() - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, 'get_token'): - self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif isinstance(credential, AzureSasCredential): - self._credential_policy = AzureSasCredentialPolicy(credential) - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - config = kwargs.get('_configuration') or create_configuration(**kwargs) - if kwargs.get('_pipeline'): - return config, kwargs['_pipeline'] - config.transport = kwargs.get('transport') # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - try: - from azure.core.pipeline.transport import AioHttpTransport - except ImportError: - raise ImportError("Unable to create async transport. Please check aiohttp is installed.") - config.transport = AioHttpTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.headers_policy, - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - StorageRequestHook(**kwargs), - self._credential_policy, - ContentDecodePolicy(response_encoding="utf-8"), - AsyncRedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), # type: ignore - config.retry_policy, - config.logging_policy, - AsyncStorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs), - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, AsyncPipeline(config.transport, policies=policies) - - async def _batch_send( - self, *reqs: 'HttpRequest', - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - request = self._client._client.post( # pylint: disable=protected-access - url='https://{}/?comp=batch'.format(self.primary_hostname), - headers={ - 'x-ms-version': self.api_version - } - ) - - request.set_multipart_mixed( - *reqs, - policies=[ - StorageHeadersPolicy(), - self._credential_policy - ], - enforce_https=False - ) - - pipeline_response = await self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() # Return an AsyncIterator - if raise_on_any_failure: - parts_list = [] - async for part in parts: - parts_list.append(part) - if any(p for p in parts_list if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts_list - ) - raise error - return AsyncList(parts_list) - return parts - except HttpResponseError as error: - process_storage_error(error) - - -class AsyncTransportWrapper(AsyncHttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, async_transport): - self._transport = async_transport - - async def send(self, request, **kwargs): - return await self._transport.send(request, **kwargs) - - async def open(self): - pass - - async def close(self): - pass - - async def __aenter__(self): - pass - - async def __aexit__(self, *args): # pylint: disable=arguments-differ - pass diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/constants.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/constants.py deleted file mode 100644 index a50e8b5..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/constants.py +++ /dev/null @@ -1,27 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys - -from .._generated import AzureDataLakeStorageRESTAPI - - -X_MS_VERSION = AzureDataLakeStorageRESTAPI(url="get_api_version")._config.version # pylint: disable=protected-access - -# Socket timeout in seconds -CONNECTION_TIMEOUT = 20 -READ_TIMEOUT = 20 - -# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned) -# The socket timeout is now the maximum total duration to send all data. -if sys.version_info >= (3, 5): - # the timeout to connect is 20 seconds, and the read timeout is 2000 seconds - # the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) - READ_TIMEOUT = 2000 - -STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" - -SERVICE_HOST_BASE = 'core.windows.net' diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/encryption.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/encryption.py deleted file mode 100644 index 62607cc..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/encryption.py +++ /dev/null @@ -1,542 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os -from os import urandom -from json import ( - dumps, - loads, -) -from collections import OrderedDict - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.ciphers import Cipher -from cryptography.hazmat.primitives.ciphers.algorithms import AES -from cryptography.hazmat.primitives.ciphers.modes import CBC -from cryptography.hazmat.primitives.padding import PKCS7 - -from azure.core.exceptions import HttpResponseError - -from .._version import VERSION -from . import encode_base64, decode_base64_to_bytes - - -_ENCRYPTION_PROTOCOL_V1 = '1.0' -_ERROR_OBJECT_INVALID = \ - '{0} does not define a complete interface. Value of {1} is either missing or invalid.' - - -def _validate_not_none(param_name, param): - if param is None: - raise ValueError('{0} should not be None.'.format(param_name)) - - -def _validate_key_encryption_key_wrap(kek): - # Note that None is not callable and so will fail the second clause of each check. - if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) - if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) - - -class _EncryptionAlgorithm(object): - ''' - Specifies which client encryption algorithm is used. - ''' - AES_CBC_256 = 'AES_CBC_256' - - -class _WrappedContentKey: - ''' - Represents the envelope key details stored on the service. - ''' - - def __init__(self, algorithm, encrypted_key, key_id): - ''' - :param str algorithm: - The algorithm used for wrapping. - :param bytes encrypted_key: - The encrypted content-encryption-key. - :param str key_id: - The key-encryption-key identifier string. - ''' - - _validate_not_none('algorithm', algorithm) - _validate_not_none('encrypted_key', encrypted_key) - _validate_not_none('key_id', key_id) - - self.algorithm = algorithm - self.encrypted_key = encrypted_key - self.key_id = key_id - - -class _EncryptionAgent: - ''' - Represents the encryption agent stored on the service. - It consists of the encryption protocol version and encryption algorithm used. - ''' - - def __init__(self, encryption_algorithm, protocol): - ''' - :param _EncryptionAlgorithm encryption_algorithm: - The algorithm used for encrypting the message contents. - :param str protocol: - The protocol version used for encryption. - ''' - - _validate_not_none('encryption_algorithm', encryption_algorithm) - _validate_not_none('protocol', protocol) - - self.encryption_algorithm = str(encryption_algorithm) - self.protocol = protocol - - -class _EncryptionData: - ''' - Represents the encryption data that is stored on the service. - ''' - - def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, - key_wrapping_metadata): - ''' - :param bytes content_encryption_IV: - The content encryption initialization vector. - :param _EncryptionAgent encryption_agent: - The encryption agent. - :param _WrappedContentKey wrapped_content_key: - An object that stores the wrapping algorithm, the key identifier, - and the encrypted key bytes. - :param dict key_wrapping_metadata: - A dict containing metadata related to the key wrapping. - ''' - - _validate_not_none('content_encryption_IV', content_encryption_IV) - _validate_not_none('encryption_agent', encryption_agent) - _validate_not_none('wrapped_content_key', wrapped_content_key) - - self.content_encryption_IV = content_encryption_IV - self.encryption_agent = encryption_agent - self.wrapped_content_key = wrapped_content_key - self.key_wrapping_metadata = key_wrapping_metadata - - -def _generate_encryption_data_dict(kek, cek, iv): - ''' - Generates and returns the encryption metadata as a dict. - - :param object kek: The key encryption key. See calling functions for more information. - :param bytes cek: The content encryption key. - :param bytes iv: The initialization vector. - :return: A dict containing all the encryption metadata. - :rtype: dict - ''' - # Encrypt the cek. - wrapped_cek = kek.wrap_key(cek) - - # Build the encryption_data dict. - # Use OrderedDict to comply with Java's ordering requirement. - wrapped_content_key = OrderedDict() - wrapped_content_key['KeyId'] = kek.get_kid() - wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek) - wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() - - encryption_agent = OrderedDict() - encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 - encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 - - encryption_data_dict = OrderedDict() - encryption_data_dict['WrappedContentKey'] = wrapped_content_key - encryption_data_dict['EncryptionAgent'] = encryption_agent - encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv) - encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION} - - return encryption_data_dict - - -def _dict_to_encryption_data(encryption_data_dict): - ''' - Converts the specified dictionary to an EncryptionData object for - eventual use in decryption. - - :param dict encryption_data_dict: - The dictionary containing the encryption data. - :return: an _EncryptionData object built from the dictionary. - :rtype: _EncryptionData - ''' - try: - if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: - raise ValueError("Unsupported encryption version.") - except KeyError: - raise ValueError("Unsupported encryption version.") - wrapped_content_key = encryption_data_dict['WrappedContentKey'] - wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], - decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), - wrapped_content_key['KeyId']) - - encryption_agent = encryption_data_dict['EncryptionAgent'] - encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], - encryption_agent['Protocol']) - - if 'KeyWrappingMetadata' in encryption_data_dict: - key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] - else: - key_wrapping_metadata = None - - encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), - encryption_agent, - wrapped_content_key, - key_wrapping_metadata) - - return encryption_data - - -def _generate_AES_CBC_cipher(cek, iv): - ''' - Generates and returns an encryption cipher for AES CBC using the given cek and iv. - - :param bytes[] cek: The content encryption key for the cipher. - :param bytes[] iv: The initialization vector for the cipher. - :return: A cipher for encrypting in AES256 CBC. - :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher - ''' - - backend = default_backend() - algorithm = AES(cek) - mode = CBC(iv) - return Cipher(algorithm, mode, backend) - - -def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): - ''' - Extracts and returns the content_encryption_key stored in the encryption_data object - and performs necessary validation on all parameters. - :param _EncryptionData encryption_data: - The encryption metadata of the retrieved value. - :param obj key_encryption_key: - The key_encryption_key used to unwrap the cek. Please refer to high-level service object - instance variables for more details. - :param func key_resolver: - A function used that, given a key_id, will return a key_encryption_key. Please refer - to high-level service object instance variables for more details. - :return: the content_encryption_key stored in the encryption_data object. - :rtype: bytes[] - ''' - - _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) - _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) - - if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol: - raise ValueError('Encryption version is not supported.') - - content_encryption_key = None - - # If the resolver exists, give priority to the key it finds. - if key_resolver is not None: - key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) - - _validate_not_none('key_encryption_key', key_encryption_key) - if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) - if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid(): - raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.') - # Will throw an exception if the specified algorithm is not supported. - content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, - encryption_data.wrapped_content_key.algorithm) - _validate_not_none('content_encryption_key', content_encryption_key) - - return content_encryption_key - - -def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None): - ''' - Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. - Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). - Returns the original plaintex. - - :param str message: - The ciphertext to be decrypted. - :param _EncryptionData encryption_data: - The metadata associated with this ciphertext. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted plaintext. - :rtype: str - ''' - _validate_not_none('message', message) - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) - - if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm: - raise ValueError('Specified encryption algorithm is not supported.') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) - - # decrypt data - decrypted_data = message - decryptor = cipher.decryptor() - decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) - - # unpad data - unpadder = PKCS7(128).unpadder() - decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) - - return decrypted_data - - -def encrypt_blob(blob, key_encryption_key): - ''' - Encrypts the given blob using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encryption metadata. This method should - only be used when a blob is small enough for single shot upload. Encrypting larger blobs - is done as a part of the upload_data_chunks method. - - :param bytes blob: - The blob to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. - :rtype: (str, bytes) - ''' - - _validate_not_none('blob', blob) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(blob) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - - return dumps(encryption_data), encrypted_data - - -def generate_blob_encryption_data(key_encryption_key): - ''' - Generates the encryption_metadata for the blob. - - :param bytes key_encryption_key: - The key-encryption-key used to wrap the cek associate with this blob. - :return: A tuple containing the cek and iv for this blob as well as the - serialized encryption metadata for the blob. - :rtype: (bytes, bytes, str) - ''' - encryption_data = None - content_encryption_key = None - initialization_vector = None - if key_encryption_key: - _validate_key_encryption_key_wrap(key_encryption_key) - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - encryption_data = _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - encryption_data = dumps(encryption_data) - - return content_encryption_key, initialization_vector, encryption_data - - -def decrypt_blob(require_encryption, key_encryption_key, key_resolver, - content, start_offset, end_offset, response_headers): - ''' - Decrypts the given blob contents and returns only the requested range. - - :param bool require_encryption: - Whether or not the calling blob service requires objects to be decrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :param key_resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted blob content. - :rtype: bytes - ''' - try: - encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata'])) - except: # pylint: disable=bare-except - if require_encryption: - raise ValueError( - 'Encryption required, but received data does not contain appropriate metatadata.' + \ - 'Data was either not encrypted or metadata has been lost.') - - return content - - if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256: - raise ValueError('Specified encryption algorithm is not supported.') - - blob_type = response_headers['x-ms-blob-type'] - - iv = None - unpad = False - if 'content-range' in response_headers: - content_range = response_headers['content-range'] - # Format: 'bytes x-y/size' - - # Ignore the word 'bytes' - content_range = content_range.split(' ') - - content_range = content_range[1].split('-') - content_range = content_range[1].split('/') - end_range = int(content_range[0]) - blob_size = int(content_range[1]) - - if start_offset >= 16: - iv = content[:16] - content = content[16:] - start_offset -= 16 - else: - iv = encryption_data.content_encryption_IV - - if end_range == blob_size - 1: - unpad = True - else: - unpad = True - iv = encryption_data.content_encryption_IV - - if blob_type == 'PageBlob': - unpad = False - - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) - cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) - decryptor = cipher.decryptor() - - content = decryptor.update(content) + decryptor.finalize() - if unpad: - unpadder = PKCS7(128).unpadder() - content = unpadder.update(content) + unpadder.finalize() - - return content[start_offset: len(content) - end_offset] - - -def get_blob_encryptor_and_padder(cek, iv, should_pad): - encryptor = None - padder = None - - if cek is not None and iv is not None: - cipher = _generate_AES_CBC_cipher(cek, iv) - encryptor = cipher.encryptor() - padder = PKCS7(128).padder() if should_pad else None - - return encryptor, padder - - -def encrypt_queue_message(message, key_encryption_key): - ''' - Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encrypted message and the encryption metadata. - - :param object message: - The plain text messge to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A json-formatted string containing the encrypted message and the encryption metadata. - :rtype: str - ''' - - _validate_not_none('message', message) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = os.urandom(32) - initialization_vector = os.urandom(16) - - # Queue encoding functions all return unicode strings, and encryption should - # operate on binary strings. - message = message.encode('utf-8') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(message) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - - # Build the dictionary structure. - queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data), - 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector)} - - return dumps(queue_message) - - -def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver): - ''' - Returns the decrypted message contents from an EncryptedQueueMessage. - If no encryption metadata is present, will return the unaltered message. - :param str message: - The JSON formatted QueueEncryptedMessage contents with all associated metadata. - :param bool require_encryption: - If set, will enforce that the retrieved messages are encrypted and decrypt them. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The plain text message from the queue message. - :rtype: str - ''' - - try: - message = loads(message) - - encryption_data = _dict_to_encryption_data(message['EncryptionData']) - decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents']) - except (KeyError, ValueError): - # Message was not json formatted and so was not encrypted - # or the user provided a json formatted message. - if require_encryption: - raise ValueError('Message was not encrypted.') - - return message - try: - return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=response, - error=error) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/models.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/models.py deleted file mode 100644 index 3d46f71..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/models.py +++ /dev/null @@ -1,481 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-instance-attributes - -from enum import Enum - - -def get_enum_value(value): - if value is None or value in ["None", ""]: - return None - try: - return value.value - except AttributeError: - return value - - -class StorageErrorCode(str, Enum): - - # Generic storage values - account_already_exists = "AccountAlreadyExists" - account_being_created = "AccountBeingCreated" - account_is_disabled = "AccountIsDisabled" - authentication_failed = "AuthenticationFailed" - authorization_failure = "AuthorizationFailure" - no_authentication_information = "NoAuthenticationInformation" - condition_headers_not_supported = "ConditionHeadersNotSupported" - condition_not_met = "ConditionNotMet" - empty_metadata_key = "EmptyMetadataKey" - insufficient_account_permissions = "InsufficientAccountPermissions" - internal_error = "InternalError" - invalid_authentication_info = "InvalidAuthenticationInfo" - invalid_header_value = "InvalidHeaderValue" - invalid_http_verb = "InvalidHttpVerb" - invalid_input = "InvalidInput" - invalid_md5 = "InvalidMd5" - invalid_metadata = "InvalidMetadata" - invalid_query_parameter_value = "InvalidQueryParameterValue" - invalid_range = "InvalidRange" - invalid_resource_name = "InvalidResourceName" - invalid_uri = "InvalidUri" - invalid_xml_document = "InvalidXmlDocument" - invalid_xml_node_value = "InvalidXmlNodeValue" - md5_mismatch = "Md5Mismatch" - metadata_too_large = "MetadataTooLarge" - missing_content_length_header = "MissingContentLengthHeader" - missing_required_query_parameter = "MissingRequiredQueryParameter" - missing_required_header = "MissingRequiredHeader" - missing_required_xml_node = "MissingRequiredXmlNode" - multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" - operation_timed_out = "OperationTimedOut" - out_of_range_input = "OutOfRangeInput" - out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" - request_body_too_large = "RequestBodyTooLarge" - resource_type_mismatch = "ResourceTypeMismatch" - request_url_failed_to_parse = "RequestUrlFailedToParse" - resource_already_exists = "ResourceAlreadyExists" - resource_not_found = "ResourceNotFound" - server_busy = "ServerBusy" - unsupported_header = "UnsupportedHeader" - unsupported_xml_node = "UnsupportedXmlNode" - unsupported_query_parameter = "UnsupportedQueryParameter" - unsupported_http_verb = "UnsupportedHttpVerb" - - # Blob values - append_position_condition_not_met = "AppendPositionConditionNotMet" - blob_already_exists = "BlobAlreadyExists" - blob_not_found = "BlobNotFound" - blob_overwritten = "BlobOverwritten" - blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" - block_count_exceeds_limit = "BlockCountExceedsLimit" - block_list_too_long = "BlockListTooLong" - cannot_change_to_lower_tier = "CannotChangeToLowerTier" - cannot_verify_copy_source = "CannotVerifyCopySource" - container_already_exists = "ContainerAlreadyExists" - container_being_deleted = "ContainerBeingDeleted" - container_disabled = "ContainerDisabled" - container_not_found = "ContainerNotFound" - content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" - copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" - copy_id_mismatch = "CopyIdMismatch" - feature_version_mismatch = "FeatureVersionMismatch" - incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" - incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" - incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" - infinite_lease_duration_required = "InfiniteLeaseDurationRequired" - invalid_blob_or_block = "InvalidBlobOrBlock" - invalid_blob_tier = "InvalidBlobTier" - invalid_blob_type = "InvalidBlobType" - invalid_block_id = "InvalidBlockId" - invalid_block_list = "InvalidBlockList" - invalid_operation = "InvalidOperation" - invalid_page_range = "InvalidPageRange" - invalid_source_blob_type = "InvalidSourceBlobType" - invalid_source_blob_url = "InvalidSourceBlobUrl" - invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" - lease_already_present = "LeaseAlreadyPresent" - lease_already_broken = "LeaseAlreadyBroken" - lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" - lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" - lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" - lease_id_missing = "LeaseIdMissing" - lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" - lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" - lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" - lease_lost = "LeaseLost" - lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" - lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" - lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" - max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" - no_pending_copy_operation = "NoPendingCopyOperation" - operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" - pending_copy_operation = "PendingCopyOperation" - previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" - previous_snapshot_not_found = "PreviousSnapshotNotFound" - previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" - sequence_number_condition_not_met = "SequenceNumberConditionNotMet" - sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" - snapshot_count_exceeded = "SnapshotCountExceeded" - snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" - snapshots_present = "SnapshotsPresent" - source_condition_not_met = "SourceConditionNotMet" - system_in_use = "SystemInUse" - target_condition_not_met = "TargetConditionNotMet" - unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" - blob_being_rehydrated = "BlobBeingRehydrated" - blob_archived = "BlobArchived" - blob_not_archived = "BlobNotArchived" - - # Queue values - invalid_marker = "InvalidMarker" - message_not_found = "MessageNotFound" - message_too_large = "MessageTooLarge" - pop_receipt_mismatch = "PopReceiptMismatch" - queue_already_exists = "QueueAlreadyExists" - queue_being_deleted = "QueueBeingDeleted" - queue_disabled = "QueueDisabled" - queue_not_empty = "QueueNotEmpty" - queue_not_found = "QueueNotFound" - - # File values - cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" - client_cache_flush_delay = "ClientCacheFlushDelay" - delete_pending = "DeletePending" - directory_not_empty = "DirectoryNotEmpty" - file_lock_conflict = "FileLockConflict" - invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" - parent_not_found = "ParentNotFound" - read_only_attribute = "ReadOnlyAttribute" - share_already_exists = "ShareAlreadyExists" - share_being_deleted = "ShareBeingDeleted" - share_disabled = "ShareDisabled" - share_not_found = "ShareNotFound" - sharing_violation = "SharingViolation" - share_snapshot_in_progress = "ShareSnapshotInProgress" - share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" - share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" - share_has_snapshots = "ShareHasSnapshots" - container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" - - # DataLake values - content_length_must_be_zero = 'ContentLengthMustBeZero' - path_already_exists = 'PathAlreadyExists' - invalid_flush_position = 'InvalidFlushPosition' - invalid_property_name = 'InvalidPropertyName' - invalid_source_uri = 'InvalidSourceUri' - unsupported_rest_version = 'UnsupportedRestVersion' - file_system_not_found = 'FilesystemNotFound' - path_not_found = 'PathNotFound' - rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound' - source_path_not_found = 'SourcePathNotFound' - destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted' - file_system_already_exists = 'FilesystemAlreadyExists' - file_system_being_deleted = 'FilesystemBeingDeleted' - invalid_destination_path = 'InvalidDestinationPath' - invalid_rename_source_path = 'InvalidRenameSourcePath' - invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType' - lease_is_already_broken = 'LeaseIsAlreadyBroken' - lease_name_mismatch = 'LeaseNameMismatch' - path_conflict = 'PathConflict' - source_path_is_being_deleted = 'SourcePathIsBeingDeleted' - - -class DictMixin(object): - - def __setitem__(self, key, item): - self.__dict__[key] = item - - def __getitem__(self, key): - return self.__dict__[key] - - def __repr__(self): - return str(self) - - def __len__(self): - return len(self.keys()) - - def __delitem__(self, key): - self.__dict__[key] = None - - def __eq__(self, other): - """Compare objects by comparing all attributes.""" - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - def __ne__(self, other): - """Compare objects by comparing all attributes.""" - return not self.__eq__(other) - - def __str__(self): - return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) - - def has_key(self, k): - return k in self.__dict__ - - def update(self, *args, **kwargs): - return self.__dict__.update(*args, **kwargs) - - def keys(self): - return [k for k in self.__dict__ if not k.startswith('_')] - - def values(self): - return [v for k, v in self.__dict__.items() if not k.startswith('_')] - - def items(self): - return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] - - def get(self, key, default=None): - if key in self.__dict__: - return self.__dict__[key] - return default - - -class LocationMode(object): - """ - Specifies the location the request should be sent to. This mode only applies - for RA-GRS accounts which allow secondary read access. All other account types - must use PRIMARY. - """ - - PRIMARY = 'primary' #: Requests should be sent to the primary location. - SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. - - -class ResourceTypes(object): - """ - Specifies the resource types that are accessible with the account SAS. - - :param bool service: - Access to service-level APIs (e.g., Get/Set Service Properties, - Get Service Stats, List Containers/Queues/Shares) - :param bool container: - Access to container-level APIs (e.g., Create/Delete Container, - Create/Delete Queue, Create/Delete Share, - List Blobs/Files and Directories) - :param bool object: - Access to object-level APIs for blobs, queue messages, and - files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) - """ - - def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin - self.service = service - self.container = container - self.object = object - self._str = (('s' if self.service else '') + - ('c' if self.container else '') + - ('o' if self.object else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create a ResourceTypes from a string. - - To specify service, container, or object you need only to - include the first letter of the word in the string. E.g. service and container, - you would provide a string "sc". - - :param str string: Specify service, container, or object in - in the string with the first letter of the word. - :return: A ResourceTypes object - :rtype: ~azure.storage.blob.ResourceTypes - """ - res_service = 's' in string - res_container = 'c' in string - res_object = 'o' in string - - parsed = cls(res_service, res_container, res_object) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class AccountSasPermissions(object): - """ - :class:`~ResourceTypes` class to be used with generate_account_sas - function and for the AccessPolicies used with set_*_acl. There are two types of - SAS which may be used to grant resource access. One is to grant access to a - specific resource (resource-specific). Another is to grant access to the - entire service for a specific account and allow certain operations based on - perms found here. - - :param bool read: - Valid for all signed resources types (Service, Container, and Object). - Permits read permissions to the specified resource type. - :param bool write: - Valid for all signed resources types (Service, Container, and Object). - Permits write permissions to the specified resource type. - :param bool delete: - Valid for Container and Object resource types, except for queue messages. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool list: - Valid for Service and Container resource types only. - :param bool add: - Valid for the following Object resource types only: queue messages, and append blobs. - :param bool create: - Valid for the following Object resource types only: blobs and files. - Users can create new blobs or files, but may not overwrite existing - blobs or files. - :param bool update: - Valid for the following Object resource types only: queue messages. - :param bool process: - Valid for the following Object resource type only: queue messages. - :keyword bool tag: - To enable set or get tags on the blobs in the container. - :keyword bool filter_by_tags: - To enable get blobs by tags, this should be used together with list permission. - :keyword bool set_immutability_policy: - To enable operations related to set/delete immutability policy. - To get immutability policy, you just need read permission. - :keyword bool permanent_delete: - To enable permanent delete on the blob is permitted. - Valid for Object resource type of Blob only. - """ - def __init__(self, read=False, write=False, delete=False, - list=False, # pylint: disable=redefined-builtin - add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): - self.read = read - self.write = write - self.delete = delete - self.delete_previous_version = delete_previous_version - self.permanent_delete = kwargs.pop('permanent_delete', False) - self.list = list - self.add = add - self.create = create - self.update = update - self.process = process - self.tag = kwargs.pop('tag', False) - self.filter_by_tags = kwargs.pop('filter_by_tags', False) - self.set_immutability_policy = kwargs.pop('set_immutability_policy', False) - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('y' if self.permanent_delete else '') + - ('l' if self.list else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('u' if self.update else '') + - ('p' if self.process else '') + - ('f' if self.filter_by_tags else '') + - ('t' if self.tag else '') + - ('i' if self.set_immutability_policy else '') - ) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create AccountSasPermissions from a string. - - To specify read, write, delete, etc. permissions you need only to - include the first letter of the word in the string. E.g. for read and write - permissions you would provide a string "rw". - - :param str permission: Specify permissions in - the string with the first letter of the word. - :return: An AccountSasPermissions object - :rtype: ~azure.storage.filedatalake.AccountSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_delete_previous_version = 'x' in permission - p_permanent_delete = 'y' in permission - p_list = 'l' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_update = 'u' in permission - p_process = 'p' in permission - p_tag = 't' in permission - p_filter_by_tags = 'f' in permission - p_set_immutability_policy = 'i' in permission - parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, - list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, - filter_by_tags=p_filter_by_tags, set_immutability_policy=p_set_immutability_policy, - permanent_delete=p_permanent_delete) - - return parsed - - -class Services(object): - """Specifies the services accessible with the account SAS. - - :param bool blob: - Access for the `~azure.storage.blob.BlobServiceClient` - :param bool queue: - Access for the `~azure.storage.queue.QueueServiceClient` - :param bool fileshare: - Access for the `~azure.storage.fileshare.ShareServiceClient` - """ - - def __init__(self, blob=False, queue=False, fileshare=False): - self.blob = blob - self.queue = queue - self.fileshare = fileshare - self._str = (('b' if self.blob else '') + - ('q' if self.queue else '') + - ('f' if self.fileshare else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create Services from a string. - - To specify blob, queue, or file you need only to - include the first letter of the word in the string. E.g. for blob and queue - you would provide a string "bq". - - :param str string: Specify blob, queue, or file in - in the string with the first letter of the word. - :return: A Services object - :rtype: ~azure.storage.blob.Services - """ - res_blob = 'b' in string - res_queue = 'q' in string - res_file = 'f' in string - - parsed = cls(res_blob, res_queue, res_file) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class UserDelegationKey(object): - """ - Represents a user delegation key, provided to the user by Azure Storage - based on their Azure Active Directory access token. - - The fields are saved as simple strings since the user does not have to interact with this object; - to generate an identify SAS, the user can simply pass it to the right API. - - :ivar str signed_oid: - Object ID of this token. - :ivar str signed_tid: - Tenant ID of the tenant that issued this token. - :ivar str signed_start: - The datetime this token becomes valid. - :ivar str signed_expiry: - The datetime this token expires. - :ivar str signed_service: - What service this key is valid for. - :ivar str signed_version: - The version identifier of the REST service that created this token. - :ivar str value: - The user delegation key. - """ - def __init__(self): - self.signed_oid = None - self.signed_tid = None - self.signed_start = None - self.signed_expiry = None - self.signed_service = None - self.signed_version = None - self.value = None diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/parser.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/parser.py deleted file mode 100644 index c6feba8..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/parser.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys - -if sys.version_info < (3,): - def _str(value): - if isinstance(value, unicode): # pylint: disable=undefined-variable - return value.encode('utf-8') - - return str(value) -else: - _str = str - - -def _to_utc_datetime(value): - return value.strftime('%Y-%m-%dT%H:%M:%SZ') diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/policies.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/policies.py deleted file mode 100644 index 1c77692..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/policies.py +++ /dev/null @@ -1,622 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import re -import random -from time import time -from io import SEEK_SET, UnsupportedOperation -import logging -import uuid -from typing import Any, TYPE_CHECKING -from wsgiref.handlers import format_date_time -try: - from urllib.parse import ( - urlparse, - parse_qsl, - urlunparse, - urlencode, - ) -except ImportError: - from urllib import urlencode # type: ignore - from urlparse import ( # type: ignore - urlparse, - parse_qsl, - urlunparse, - ) - -from azure.core.pipeline.policies import ( - HeadersPolicy, - SansIOHTTPPolicy, - NetworkTraceLoggingPolicy, - HTTPPolicy, - RequestHistory -) -from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError - -from .models import LocationMode - -try: - _unicode_type = unicode # type: ignore -except NameError: - _unicode_type = str - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -def encode_base64(data): - if isinstance(data, _unicode_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def is_exhausted(settings): - """Are we out of retries?""" - retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) - retry_counts = list(filter(None, retry_counts)) - if not retry_counts: - return False - return min(retry_counts) < 0 - - -def retry_hook(settings, **kwargs): - if settings['hook']: - settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) - - -def is_retry(response, mode): # pylint: disable=too-many-return-statements - """Is this method/status code retryable? (Based on allowlists and control - variables such as the number of total retries to allow, whether to - respect the Retry-After header, whether this header is present, and - whether the returned status code is on the list of status codes to - be retried upon on the presence of the aforementioned header) - """ - status = response.http_response.status_code - if 300 <= status < 500: - # An exception occured, but in most cases it was expected. Examples could - # include a 309 Conflict or 412 Precondition Failed. - if status == 404 and mode == LocationMode.SECONDARY: - # Response code 404 should be retried if secondary was used. - return True - if status == 408: - # Response code 408 is a timeout and should be retried. - return True - return False - if status >= 500: - # Response codes above 500 with the exception of 501 Not Implemented and - # 505 Version Not Supported indicate a server issue and should be retried. - if status in [501, 505]: - return False - return True - # retry if invalid content md5 - if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): - computed_md5 = response.http_request.headers.get('content-md5', None) or \ - encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) - if response.http_response.headers['content-md5'] != computed_md5: - return True - return False - - -def urljoin(base_url, stub_url): - parsed = urlparse(base_url) - parsed = parsed._replace(path=parsed.path + '/' + stub_url) - return parsed.geturl() - - -class QueueMessagePolicy(SansIOHTTPPolicy): - - def on_request(self, request): - message_id = request.context.options.pop('queue_message_id', None) - if message_id: - request.http_request.url = urljoin( - request.http_request.url, - message_id) - - -class StorageHeadersPolicy(HeadersPolicy): - request_id_header_name = 'x-ms-client-request-id' - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - super(StorageHeadersPolicy, self).on_request(request) - current_time = format_date_time(time()) - request.http_request.headers['x-ms-date'] = current_time - - custom_id = request.context.options.pop('client_request_id', None) - request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) - - # def on_response(self, request, response): - # # raise exception if the echoed client request id from the service is not identical to the one we sent - # if self.request_id_header_name in response.http_response.headers: - - # client_request_id = request.http_request.headers.get(self.request_id_header_name) - - # if response.http_response.headers[self.request_id_header_name] != client_request_id: - # raise AzureError( - # "Echoed client request ID: {} does not match sent client request ID: {}. " - # "Service request ID: {}".format( - # response.http_response.headers[self.request_id_header_name], client_request_id, - # response.http_response.headers['x-ms-request-id']), - # response=response.http_response - # ) - - -class StorageHosts(SansIOHTTPPolicy): - - def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument - self.hosts = hosts - super(StorageHosts, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - request.context.options['hosts'] = self.hosts - parsed_url = urlparse(request.http_request.url) - - # Detect what location mode we're currently requesting with - location_mode = LocationMode.PRIMARY - for key, value in self.hosts.items(): - if parsed_url.netloc == value: - location_mode = key - - # See if a specific location mode has been specified, and if so, redirect - use_location = request.context.options.pop('use_location', None) - if use_location: - # Lock retries to the specific location - request.context.options['retry_to_secondary'] = False - if use_location not in self.hosts: - raise ValueError("Attempting to use undefined host location {}".format(use_location)) - if use_location != location_mode: - # Update request URL to use the specified location - updated = parsed_url._replace(netloc=self.hosts[use_location]) - request.http_request.url = updated.geturl() - location_mode = use_location - - request.context.options['location_mode'] = location_mode - - -class StorageLoggingPolicy(NetworkTraceLoggingPolicy): - """A policy that logs HTTP request and response to the DEBUG logger. - - This accepts both global configuration, and per-request level with "enable_http_logger" - """ - def __init__(self, logging_enable=False, **kwargs): - self.logging_body = kwargs.pop("logging_body", False) - super(StorageLoggingPolicy, self).__init__(logging_enable=logging_enable, **kwargs) - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - http_request = request.http_request - options = request.context.options - self.logging_body = self.logging_body or options.pop("logging_body", False) - if options.pop("logging_enable", self.enable_http_logger): - request.context["logging_enable"] = True - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - log_url = http_request.url - query_params = http_request.query - if 'sig' in query_params: - log_url = log_url.replace(query_params['sig'], "sig=*****") - _LOGGER.debug("Request URL: %r", log_url) - _LOGGER.debug("Request method: %r", http_request.method) - _LOGGER.debug("Request headers:") - for header, value in http_request.headers.items(): - if header.lower() == 'authorization': - value = '*****' - elif header.lower() == 'x-ms-copy-source' and 'sig' in value: - # take the url apart and scrub away the signed signature - scheme, netloc, path, params, query, fragment = urlparse(value) - parsed_qs = dict(parse_qsl(query)) - parsed_qs['sig'] = '*****' - - # the SAS needs to be put back together - value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) - - _LOGGER.debug(" %r: %r", header, value) - _LOGGER.debug("Request body:") - - if self.logging_body: - _LOGGER.debug(str(http_request.body)) - else: - # We don't want to log the binary data of a file upload. - _LOGGER.debug("Hidden body, please use logging_body to show body") - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log request: %r", err) - - def on_response(self, request, response): - # type: (PipelineRequest, PipelineResponse, Any) -> None - if response.context.pop("logging_enable", self.enable_http_logger): - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - _LOGGER.debug("Response status: %r", response.http_response.status_code) - _LOGGER.debug("Response headers:") - for res_header, value in response.http_response.headers.items(): - _LOGGER.debug(" %r: %r", res_header, value) - - # We don't want to log binary data if the response is a file. - _LOGGER.debug("Response content:") - pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) - header = response.http_response.headers.get('content-disposition') - resp_content_type = response.http_response.headers.get("content-type", "") - - if header and pattern.match(header): - filename = header.partition('=')[2] - _LOGGER.debug("File attachments: %s", filename) - elif resp_content_type.endswith("octet-stream"): - _LOGGER.debug("Body contains binary data.") - elif resp_content_type.startswith("image"): - _LOGGER.debug("Body contains image data.") - - if self.logging_body and resp_content_type.startswith("text"): - _LOGGER.debug(response.http_response.text()) - elif self.logging_body: - try: - _LOGGER.debug(response.http_response.body()) - except ValueError: - _LOGGER.debug("Body is streamable") - - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log response: %s", repr(err)) - - -class StorageRequestHook(SansIOHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._request_callback = kwargs.get('raw_request_hook') - super(StorageRequestHook, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, **Any) -> PipelineResponse - request_callback = request.context.options.pop('raw_request_hook', self._request_callback) - if request_callback: - request_callback(request) - - -class StorageResponseHook(HTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(StorageResponseHook, self).__init__() - - def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = self.next.send(request) - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - response_callback(response) - request.context['response_callback'] = response_callback - return response - - -class StorageContentValidation(SansIOHTTPPolicy): - """A simple policy that sends the given headers - with the request. - - This will overwrite any headers already defined in the request. - """ - header_name = 'Content-MD5' - - def __init__(self, **kwargs): # pylint: disable=unused-argument - super(StorageContentValidation, self).__init__() - - @staticmethod - def get_content_md5(data): - md5 = hashlib.md5() # nosec - if isinstance(data, bytes): - md5.update(data) - elif hasattr(data, 'read'): - pos = 0 - try: - pos = data.tell() - except: # pylint: disable=bare-except - pass - for chunk in iter(lambda: data.read(4096), b""): - md5.update(chunk) - try: - data.seek(pos, SEEK_SET) - except (AttributeError, IOError): - raise ValueError("Data should be bytes or a seekable file-like object.") - else: - raise ValueError("Data should be bytes or a seekable file-like object.") - - return md5.digest() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - validate_content = request.context.options.pop('validate_content', False) - if validate_content and request.http_request.method != 'GET': - computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) - request.http_request.headers[self.header_name] = computed_md5 - request.context['validate_content_md5'] = computed_md5 - request.context['validate_content'] = validate_content - - def on_response(self, request, response): - if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): - computed_md5 = request.context.get('validate_content_md5') or \ - encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) - if response.http_response.headers['content-md5'] != computed_md5: - raise AzureError( - 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format( - response.http_response.headers['content-md5'], computed_md5), - response=response.http_response - ) - - -class StorageRetryPolicy(HTTPPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - def __init__(self, **kwargs): - self.total_retries = kwargs.pop('retry_total', 10) - self.connect_retries = kwargs.pop('retry_connect', 3) - self.read_retries = kwargs.pop('retry_read', 3) - self.status_retries = kwargs.pop('retry_status', 3) - self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) - super(StorageRetryPolicy, self).__init__() - - def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use - """ - A function which sets the next host location on the request, if applicable. - - :param ~azure.storage.models.RetryContext context: - The retry context containing the previous host location and the request - to evaluate and possibly modify. - """ - if settings['hosts'] and all(settings['hosts'].values()): - url = urlparse(request.url) - # If there's more than one possible location, retry to the alternative - if settings['mode'] == LocationMode.PRIMARY: - settings['mode'] = LocationMode.SECONDARY - else: - settings['mode'] = LocationMode.PRIMARY - updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) - request.url = updated.geturl() - - def configure_retries(self, request): # pylint: disable=no-self-use - body_position = None - if hasattr(request.http_request.body, 'read'): - try: - body_position = request.http_request.body.tell() - except (AttributeError, UnsupportedOperation): - # if body position cannot be obtained, then retries will not work - pass - options = request.context.options - return { - 'total': options.pop("retry_total", self.total_retries), - 'connect': options.pop("retry_connect", self.connect_retries), - 'read': options.pop("retry_read", self.read_retries), - 'status': options.pop("retry_status", self.status_retries), - 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), - 'mode': options.pop("location_mode", LocationMode.PRIMARY), - 'hosts': options.pop("hosts", None), - 'hook': options.pop("retry_hook", None), - 'body_position': body_position, - 'count': 0, - 'history': [] - } - - def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use - """ Formula for computing the current backoff. - Should be calculated by child class. - - :rtype: float - """ - return 0 - - def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - transport.sleep(backoff) - - def increment(self, settings, request, response=None, error=None): - """Increment the retry counters. - - :param response: A pipeline response object. - :param error: An error encountered during the request, or - None if the response was received successfully. - - :return: Whether the retry attempts are exhausted. - """ - settings['total'] -= 1 - - if error and isinstance(error, ServiceRequestError): - # Errors when we're fairly sure that the server did not receive the - # request, so it should be safe to retry. - settings['connect'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - elif error and isinstance(error, ServiceResponseError): - # Errors that occur after the request has been started, so we should - # assume that the server began processing it. - settings['read'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - else: - # Incrementing because of a server error like a 500 in - # status_forcelist and a the given method is in the allowlist - if response: - settings['status'] -= 1 - settings['history'].append(RequestHistory(request, http_response=response)) - - if not is_exhausted(settings): - if request.method not in ['PUT'] and settings['retry_secondary']: - self._set_next_host_location(settings, request) - - # rewind the request body if it is a stream - if request.body and hasattr(request.body, 'read'): - # no position was saved, then retry would not work - if settings['body_position'] is None: - return False - try: - # attempt to rewind the body to the initial position - request.body.seek(settings['body_position'], SEEK_SET) - except (UnsupportedOperation, ValueError): - # if body is not seekable, then retry would not work - return False - settings['count'] += 1 - return True - return False - - def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(StorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(StorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/policies_async.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/policies_async.py deleted file mode 100644 index e0926b8..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/policies_async.py +++ /dev/null @@ -1,220 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -import asyncio -import random -import logging -from typing import Any, TYPE_CHECKING - -from azure.core.pipeline.policies import AsyncHTTPPolicy -from azure.core.exceptions import AzureError - -from .policies import is_retry, StorageRetryPolicy - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -async def retry_hook(settings, **kwargs): - if settings['hook']: - if asyncio.iscoroutine(settings['hook']): - await settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - else: - settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - - -class AsyncStorageResponseHook(AsyncHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(AsyncStorageResponseHook, self).__init__() - - async def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = await self.next.send(request) - await response.http_response.load_body() - - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - if asyncio.iscoroutine(response_callback): - await response_callback(response) - else: - response_callback(response) - request.context['response_callback'] = response_callback - return response - -class AsyncStorageRetryPolicy(StorageRetryPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - async def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - await transport.sleep(backoff) - - async def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = await self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - await self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - await self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(AsyncStorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(AsyncStorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/request_handlers.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/request_handlers.py deleted file mode 100644 index 325825c..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/request_handlers.py +++ /dev/null @@ -1,273 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) - -import logging -from os import fstat -from io import (SEEK_END, SEEK_SET, UnsupportedOperation) - -import isodate - -from azure.core.exceptions import raise_with_traceback - - -_LOGGER = logging.getLogger(__name__) - -_REQUEST_DELIMITER_PREFIX = "batch_" -_HTTP1_1_IDENTIFIER = "HTTP/1.1" -_HTTP_LINE_ENDING = "\r\n" - - -def serialize_iso(attr): - """Serialize Datetime object into ISO-8601 formatted string. - - :param Datetime attr: Object to be serialized. - :rtype: str - :raises: ValueError if format invalid. - """ - if not attr: - return None - if isinstance(attr, str): - attr = isodate.parse_datetime(attr) - try: - utc = attr.utctimetuple() - if utc.tm_year > 9999 or utc.tm_year < 1: - raise OverflowError("Hit max or min date") - - date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( - utc.tm_year, utc.tm_mon, utc.tm_mday, - utc.tm_hour, utc.tm_min, utc.tm_sec) - return date + 'Z' - except (ValueError, OverflowError) as err: - msg = "Unable to serialize datetime object." - raise_with_traceback(ValueError, msg, err) - except AttributeError as err: - msg = "ISO-8601 object must be valid Datetime object." - raise_with_traceback(TypeError, msg, err) - - -def get_length(data): - length = None - # Check if object implements the __len__ method, covers most input cases such as bytearray. - try: - length = len(data) - except: # pylint: disable=bare-except - pass - - if not length: - # Check if the stream is a file-like stream object. - # If so, calculate the size using the file descriptor. - try: - fileno = data.fileno() - except (AttributeError, UnsupportedOperation): - pass - else: - try: - return fstat(fileno).st_size - except OSError: - # Not a valid fileno, may be possible requests returned - # a socket number? - pass - - # If the stream is seekable and tell() is implemented, calculate the stream size. - try: - current_position = data.tell() - data.seek(0, SEEK_END) - length = data.tell() - current_position - data.seek(current_position, SEEK_SET) - except (AttributeError, OSError, UnsupportedOperation): - pass - - return length - - -def read_length(data): - try: - if hasattr(data, 'read'): - read_data = b'' - for chunk in iter(lambda: data.read(4096), b""): - read_data += chunk - return len(read_data), read_data - if hasattr(data, '__iter__'): - read_data = b'' - for chunk in data: - read_data += chunk - return len(read_data), read_data - except: # pylint: disable=bare-except - pass - raise ValueError("Unable to calculate content length, please specify.") - - -def validate_and_format_range_headers( - start_range, end_range, start_range_required=True, - end_range_required=True, check_content_md5=False, align_to_page=False): - # If end range is provided, start range must be provided - if (start_range_required or end_range is not None) and start_range is None: - raise ValueError("start_range value cannot be None.") - if end_range_required and end_range is None: - raise ValueError("end_range value cannot be None.") - - # Page ranges must be 512 aligned - if align_to_page: - if start_range is not None and start_range % 512 != 0: - raise ValueError("Invalid page blob start_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(start_range)) - if end_range is not None and end_range % 512 != 511: - raise ValueError("Invalid page blob end_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(end_range)) - - # Format based on whether end_range is present - range_header = None - if end_range is not None: - range_header = 'bytes={0}-{1}'.format(start_range, end_range) - elif start_range is not None: - range_header = "bytes={0}-".format(start_range) - - # Content MD5 can only be provided for a complete range less than 4MB in size - range_validation = None - if check_content_md5: - if start_range is None or end_range is None: - raise ValueError("Both start and end range requied for MD5 content validation.") - if end_range - start_range > 4 * 1024 * 1024: - raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") - range_validation = 'true' - - return range_header, range_validation - - -def add_metadata_headers(metadata=None): - # type: (Optional[Dict[str, str]]) -> Dict[str, str] - headers = {} - if metadata: - for key, value in metadata.items(): - headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value - return headers - - -def serialize_batch_body(requests, batch_id): - """ - -- - - -- - (repeated as needed) - ---- - - Serializes the requests in this batch to a single HTTP mixed/multipart body. - - :param list[~azure.core.pipeline.transport.HttpRequest] requests: - a list of sub-request for the batch request - :param str batch_id: - to be embedded in batch sub-request delimiter - :return: The body bytes for this batch. - """ - - if requests is None or len(requests) == 0: - raise ValueError('Please provide sub-request(s) for this batch request') - - delimiter_bytes = (_get_batch_request_delimiter(batch_id, True, False) + _HTTP_LINE_ENDING).encode('utf-8') - newline_bytes = _HTTP_LINE_ENDING.encode('utf-8') - batch_body = list() - - content_index = 0 - for request in requests: - request.headers.update({ - "Content-ID": str(content_index), - "Content-Length": str(0) - }) - batch_body.append(delimiter_bytes) - batch_body.append(_make_body_from_sub_request(request)) - batch_body.append(newline_bytes) - content_index += 1 - - batch_body.append(_get_batch_request_delimiter(batch_id, True, True).encode('utf-8')) - # final line of body MUST have \r\n at the end, or it will not be properly read by the service - batch_body.append(newline_bytes) - - return bytes().join(batch_body) - - -def _get_batch_request_delimiter(batch_id, is_prepend_dashes=False, is_append_dashes=False): - """ - Gets the delimiter used for this batch request's mixed/multipart HTTP format. - - :param str batch_id: - Randomly generated id - :param bool is_prepend_dashes: - Whether to include the starting dashes. Used in the body, but non on defining the delimiter. - :param bool is_append_dashes: - Whether to include the ending dashes. Used in the body on the closing delimiter only. - :return: The delimiter, WITHOUT a trailing newline. - """ - - prepend_dashes = '--' if is_prepend_dashes else '' - append_dashes = '--' if is_append_dashes else '' - - return prepend_dashes + _REQUEST_DELIMITER_PREFIX + batch_id + append_dashes - - -def _make_body_from_sub_request(sub_request): - """ - Content-Type: application/http - Content-ID: - Content-Transfer-Encoding: (if present) - - HTTP/ -
:
(repeated as necessary) - Content-Length: - (newline if content length > 0) - (if content length > 0) - - Serializes an http request. - - :param ~azure.core.pipeline.transport.HttpRequest sub_request: - Request to serialize. - :return: The serialized sub-request in bytes - """ - - # put the sub-request's headers into a list for efficient str concatenation - sub_request_body = list() - - # get headers for ease of manipulation; remove headers as they are used - headers = sub_request.headers - - # append opening headers - sub_request_body.append("Content-Type: application/http") - sub_request_body.append(_HTTP_LINE_ENDING) - - sub_request_body.append("Content-ID: ") - sub_request_body.append(headers.pop("Content-ID", "")) - sub_request_body.append(_HTTP_LINE_ENDING) - - sub_request_body.append("Content-Transfer-Encoding: binary") - sub_request_body.append(_HTTP_LINE_ENDING) - - # append blank line - sub_request_body.append(_HTTP_LINE_ENDING) - - # append HTTP verb and path and query and HTTP version - sub_request_body.append(sub_request.method) - sub_request_body.append(' ') - sub_request_body.append(sub_request.url) - sub_request_body.append(' ') - sub_request_body.append(_HTTP1_1_IDENTIFIER) - sub_request_body.append(_HTTP_LINE_ENDING) - - # append remaining headers (this will set the Content-Length, as it was set on `sub-request`) - for header_name, header_value in headers.items(): - if header_value is not None: - sub_request_body.append(header_name) - sub_request_body.append(": ") - sub_request_body.append(header_value) - sub_request_body.append(_HTTP_LINE_ENDING) - - # append blank line - sub_request_body.append(_HTTP_LINE_ENDING) - - return ''.join(sub_request_body).encode() diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/response_handlers.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/response_handlers.py deleted file mode 100644 index 5c1558c..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/response_handlers.py +++ /dev/null @@ -1,196 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging -from xml.etree.ElementTree import Element - -from azure.core.pipeline.policies import ContentDecodePolicy -from azure.core.exceptions import ( - HttpResponseError, - ResourceNotFoundError, - ResourceModifiedError, - ResourceExistsError, - ClientAuthenticationError, - DecodeError) - -from .parser import _to_utc_datetime -from .models import StorageErrorCode, UserDelegationKey, get_enum_value - - -if TYPE_CHECKING: - from datetime import datetime - from azure.core.exceptions import AzureError - - -_LOGGER = logging.getLogger(__name__) - - -class PartialBatchErrorException(HttpResponseError): - """There is a partial failure in batch operations. - - :param str message: The message of the exception. - :param response: Server response to be deserialized. - :param list parts: A list of the parts in multipart response. - """ - - def __init__(self, message, response, parts): - self.parts = parts - super(PartialBatchErrorException, self).__init__(message=message, response=response) - - -def parse_length_from_content_range(content_range): - ''' - Parses the blob length from the content range header: bytes 1-3/65537 - ''' - if content_range is None: - return None - - # First, split in space and take the second half: '1-3/65537' - # Next, split on slash and take the second half: '65537' - # Finally, convert to an int: 65537 - return int(content_range.split(' ', 1)[1].split('/', 1)[1]) - - -def normalize_headers(headers): - normalized = {} - for key, value in headers.items(): - if key.startswith('x-ms-'): - key = key[5:] - normalized[key.lower().replace('-', '_')] = get_enum_value(value) - return normalized - - -def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument - raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} - return {k[10:]: v for k, v in raw_metadata.items()} - - -def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers) - - -def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers), deserialized - - -def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return response.http_response.location_mode, deserialized - - -def process_storage_error(storage_error): # pylint:disable=too-many-statements - raise_error = HttpResponseError - serialized = False - if not storage_error.response: - raise storage_error - # If it is one of those three then it has been serialized prior by the generated layer. - if isinstance(storage_error, (PartialBatchErrorException, - ClientAuthenticationError, ResourceNotFoundError, ResourceExistsError)): - serialized = True - error_code = storage_error.response.headers.get('x-ms-error-code') - error_message = storage_error.message - additional_data = {} - error_dict = {} - try: - error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) - try: - error_body = error_body or storage_error.response.reason - except AttributeError: - error_body = '' - # If it is an XML response - if isinstance(error_body, Element): - error_dict = { - child.tag.lower(): child.text - for child in error_body - } - # If it is a JSON response - elif isinstance(error_body, dict): - error_dict = error_body.get('error', {}) - elif not error_code: - _LOGGER.warning( - 'Unexpected return type %s from ContentDecodePolicy.deserialize_from_http_generics.', type(error_body)) - error_dict = {'message': str(error_body)} - - # If we extracted from a Json or XML response - if error_dict: - error_code = error_dict.get('code') - error_message = error_dict.get('message') - additional_data = {k: v for k, v in error_dict.items() if k not in {'code', 'message'}} - except DecodeError: - pass - - try: - # This check would be unnecessary if we have already serialized the error - if error_code and not serialized: - error_code = StorageErrorCode(error_code) - if error_code in [StorageErrorCode.condition_not_met, - StorageErrorCode.blob_overwritten]: - raise_error = ResourceModifiedError - if error_code in [StorageErrorCode.invalid_authentication_info, - StorageErrorCode.authentication_failed]: - raise_error = ClientAuthenticationError - if error_code in [StorageErrorCode.resource_not_found, - StorageErrorCode.cannot_verify_copy_source, - StorageErrorCode.blob_not_found, - StorageErrorCode.queue_not_found, - StorageErrorCode.container_not_found, - StorageErrorCode.parent_not_found, - StorageErrorCode.share_not_found]: - raise_error = ResourceNotFoundError - if error_code in [StorageErrorCode.account_already_exists, - StorageErrorCode.account_being_created, - StorageErrorCode.resource_already_exists, - StorageErrorCode.resource_type_mismatch, - StorageErrorCode.blob_already_exists, - StorageErrorCode.queue_already_exists, - StorageErrorCode.container_already_exists, - StorageErrorCode.container_being_deleted, - StorageErrorCode.queue_being_deleted, - StorageErrorCode.share_already_exists, - StorageErrorCode.share_being_deleted]: - raise_error = ResourceExistsError - except ValueError: - # Got an unknown error code - pass - - # Error message should include all the error properties - try: - error_message += "\nErrorCode:{}".format(error_code.value) - except AttributeError: - error_message += "\nErrorCode:{}".format(error_code) - for name, info in additional_data.items(): - error_message += "\n{}:{}".format(name, info) - - # No need to create an instance if it has already been serialized by the generated layer - if serialized: - storage_error.message = error_message - error = storage_error - else: - error = raise_error(message=error_message, response=storage_error.response) - # Ensure these properties are stored in the error instance as well (not just the error message) - error.error_code = error_code - error.additional_info = additional_data - # error.args is what's surfaced on the traceback - show error message in all cases - error.args = (error.message,) - try: - # `from None` prevents us from double printing the exception (suppresses generated layer error context) - exec("raise error from None") # pylint: disable=exec-used # nosec - except SyntaxError: - raise error - - -def parse_to_internal_user_delegation_key(service_user_delegation_key): - internal_user_delegation_key = UserDelegationKey() - internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid - internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid - internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) - internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) - internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service - internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version - internal_user_delegation_key.value = service_user_delegation_key.value - return internal_user_delegation_key diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/shared_access_signature.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/shared_access_signature.py deleted file mode 100644 index 07aad5f..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/shared_access_signature.py +++ /dev/null @@ -1,220 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from datetime import date - -from .parser import _str, _to_utc_datetime -from .constants import X_MS_VERSION -from . import sign_string, url_quote - - -class QueryStringConstants(object): - SIGNED_SIGNATURE = 'sig' - SIGNED_PERMISSION = 'sp' - SIGNED_START = 'st' - SIGNED_EXPIRY = 'se' - SIGNED_RESOURCE = 'sr' - SIGNED_IDENTIFIER = 'si' - SIGNED_IP = 'sip' - SIGNED_PROTOCOL = 'spr' - SIGNED_VERSION = 'sv' - SIGNED_CACHE_CONTROL = 'rscc' - SIGNED_CONTENT_DISPOSITION = 'rscd' - SIGNED_CONTENT_ENCODING = 'rsce' - SIGNED_CONTENT_LANGUAGE = 'rscl' - SIGNED_CONTENT_TYPE = 'rsct' - START_PK = 'spk' - START_RK = 'srk' - END_PK = 'epk' - END_RK = 'erk' - SIGNED_RESOURCE_TYPES = 'srt' - SIGNED_SERVICES = 'ss' - SIGNED_OID = 'skoid' - SIGNED_TID = 'sktid' - SIGNED_KEY_START = 'skt' - SIGNED_KEY_EXPIRY = 'ske' - SIGNED_KEY_SERVICE = 'sks' - SIGNED_KEY_VERSION = 'skv' - - # for ADLS - SIGNED_AUTHORIZED_OID = 'saoid' - SIGNED_UNAUTHORIZED_OID = 'suoid' - SIGNED_CORRELATION_ID = 'scid' - SIGNED_DIRECTORY_DEPTH = 'sdd' - - @staticmethod - def to_list(): - return [ - QueryStringConstants.SIGNED_SIGNATURE, - QueryStringConstants.SIGNED_PERMISSION, - QueryStringConstants.SIGNED_START, - QueryStringConstants.SIGNED_EXPIRY, - QueryStringConstants.SIGNED_RESOURCE, - QueryStringConstants.SIGNED_IDENTIFIER, - QueryStringConstants.SIGNED_IP, - QueryStringConstants.SIGNED_PROTOCOL, - QueryStringConstants.SIGNED_VERSION, - QueryStringConstants.SIGNED_CACHE_CONTROL, - QueryStringConstants.SIGNED_CONTENT_DISPOSITION, - QueryStringConstants.SIGNED_CONTENT_ENCODING, - QueryStringConstants.SIGNED_CONTENT_LANGUAGE, - QueryStringConstants.SIGNED_CONTENT_TYPE, - QueryStringConstants.START_PK, - QueryStringConstants.START_RK, - QueryStringConstants.END_PK, - QueryStringConstants.END_RK, - QueryStringConstants.SIGNED_RESOURCE_TYPES, - QueryStringConstants.SIGNED_SERVICES, - QueryStringConstants.SIGNED_OID, - QueryStringConstants.SIGNED_TID, - QueryStringConstants.SIGNED_KEY_START, - QueryStringConstants.SIGNED_KEY_EXPIRY, - QueryStringConstants.SIGNED_KEY_SERVICE, - QueryStringConstants.SIGNED_KEY_VERSION, - # for ADLS - QueryStringConstants.SIGNED_AUTHORIZED_OID, - QueryStringConstants.SIGNED_UNAUTHORIZED_OID, - QueryStringConstants.SIGNED_CORRELATION_ID, - QueryStringConstants.SIGNED_DIRECTORY_DEPTH, - ] - - -class SharedAccessSignature(object): - ''' - Provides a factory for creating account access - signature tokens with an account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - :param str x_ms_version: - The service version used to generate the shared access signatures. - ''' - self.account_name = account_name - self.account_key = account_key - self.x_ms_version = x_ms_version - - def generate_account(self, services, resource_types, permission, expiry, start=None, - ip=None, protocol=None): - ''' - Generates a shared access signature for the account. - Use the returned signature with the sas_token parameter of the service - or to create a new account object. - - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account - SAS. You can combine values to provide access to more than one - resource type. - :param AccountSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. You can combine - values to provide more than one permission. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_account(services, resource_types) - sas.add_account_signature(self.account_name, self.account_key) - - return sas.get_token() - - -class _SharedAccessHelper(object): - def __init__(self): - self.query_dict = {} - - def _add_query(self, name, val): - if val: - self.query_dict[name] = _str(val) if val is not None else None - - def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): - if isinstance(start, date): - start = _to_utc_datetime(start) - - if isinstance(expiry, date): - expiry = _to_utc_datetime(expiry) - - self._add_query(QueryStringConstants.SIGNED_START, start) - self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) - self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) - self._add_query(QueryStringConstants.SIGNED_IP, ip) - self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) - self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) - - def add_resource(self, resource): - self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) - - def add_id(self, policy_id): - self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) - - def add_account(self, services, resource_types): - self._add_query(QueryStringConstants.SIGNED_SERVICES, services) - self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) - - def add_override_response_headers(self, cache_control, - content_disposition, - content_encoding, - content_language, - content_type): - self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) - self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) - self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) - self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) - self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) - - def add_account_signature(self, account_name, account_key): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - string_to_sign = \ - (account_name + '\n' + - get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + - get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + - get_value_to_append(QueryStringConstants.SIGNED_START) + - get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - get_value_to_append(QueryStringConstants.SIGNED_IP) + - get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(QueryStringConstants.SIGNED_VERSION)) - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key, string_to_sign)) - - def get_token(self): - return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/uploads.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/uploads.py deleted file mode 100644 index 1b619df..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/uploads.py +++ /dev/null @@ -1,602 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from concurrent import futures -from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) -from threading import Lock -from itertools import islice -from math import ceil - -import six - -from azure.core.tracing.common import with_current_context - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." - - -def _parallel_uploads(executor, uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(executor.submit(with_current_context(uploader), next_chunk)) - except StopIteration: - break - - # Wait for the remaining uploads to finish - done, _running = futures.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - validate_content=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - validate_content=validate_content, - **kwargs) - if parallel: - with futures.ThreadPoolExecutor(max_concurrency) as executor: - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - executor.submit(with_current_context(uploader.process_chunk), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - with futures.ThreadPoolExecutor(max_concurrency) as executor: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - executor.submit(with_current_context(uploader.process_substream_block), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] - if any(range_ids): - return sorted(range_ids) - return [] - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b"" - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError("Blob data should be of type bytes.") - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b"" or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - def _update_progress(self, length): - if self.progress_lock is not None: - with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = self._upload_chunk(chunk_offset, chunk_data) - self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield index, SubStream(self.stream, index, length, lock) - - def process_substream_block(self, block_data): - return self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - def _upload_substream_block(self, index, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_substream_block_with_progress(self, index, block_stream): - range_id = self._upload_substream_block(index, block_stream) - self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop("modified_access_conditions", None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - self.service.stage_block( - block_id, - len(chunk_data), - chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return index, block_id - - def _upload_substream_block(self, index, block_stream): - try: - block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) - self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - return not any(bytearray(chunk_data)) - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = self.service.upload_pages( - body=chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - def _upload_substream_block(self, index, block_stream): - pass - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - self.current_length = int(self.response_headers["blob_append_offset"]) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - def _upload_substream_block(self, index, block_stream): - pass - - -class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - self.response_headers = self.service.append_data( - body=chunk_data, - position=chunk_offset, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - def _upload_substream_block(self, index, block_stream): - try: - self.service.append_data( - body=block_stream, - position=index, - content_length=len(block_stream), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response - - # TODO: Implement this method. - def _upload_substream_block(self, index, block_stream): - pass - - -class SubStream(IOBase): - - def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): - # Python 2.7: file-like objects created with open() typically support seek(), but are not - # derivations of io.IOBase and thus do not implement seekable(). - # Python > 3.0: file-like objects created with open() are derived from io.IOBase. - try: - # only the main thread runs this, so there's no need grabbing the lock - wrapped_stream.seek(0, SEEK_CUR) - except: - raise ValueError("Wrapped stream must support seek().") - - self._lock = lockObj - self._wrapped_stream = wrapped_stream - self._position = 0 - self._stream_begin_index = stream_begin_index - self._length = length - self._buffer = BytesIO() - - # we must avoid buffering more than necessary, and also not use up too much memory - # so the max buffer size is capped at 4MB - self._max_buffer_size = ( - length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE - ) - self._current_buffer_start = 0 - self._current_buffer_size = 0 - super(SubStream, self).__init__() - - def __len__(self): - return self._length - - def close(self): - if self._buffer: - self._buffer.close() - self._wrapped_stream = None - IOBase.close(self) - - def fileno(self): - return self._wrapped_stream.fileno() - - def flush(self): - pass - - def read(self, size=None): - if self.closed: # pylint: disable=using-constant-test - raise ValueError("Stream is closed.") - - if size is None: - size = self._length - self._position - - # adjust if out of bounds - if size + self._position >= self._length: - size = self._length - self._position - - # return fast - if size == 0 or self._buffer.closed: - return b"" - - # attempt first read from the read buffer and update position - read_buffer = self._buffer.read(size) - bytes_read = len(read_buffer) - bytes_remaining = size - bytes_read - self._position += bytes_read - - # repopulate the read buffer from the underlying stream to fulfill the request - # ensure the seek and read operations are done atomically (only if a lock is provided) - if bytes_remaining > 0: - with self._buffer: - # either read in the max buffer size specified on the class - # or read in just enough data for the current block/sub stream - current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) - - # lock is only defined if max_concurrency > 1 (parallel uploads) - if self._lock: - with self._lock: - # reposition the underlying stream to match the start of the data to read - absolute_position = self._stream_begin_index + self._position - self._wrapped_stream.seek(absolute_position, SEEK_SET) - # If we can't seek to the right location, our read will be corrupted so fail fast. - if self._wrapped_stream.tell() != absolute_position: - raise IOError("Stream failed to seek to the desired location.") - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - else: - absolute_position = self._stream_begin_index + self._position - # It's possible that there's connection problem during data transfer, - # so when we retry we don't want to read from current position of wrapped stream, - # instead we should seek to where we want to read from. - if self._wrapped_stream.tell() != absolute_position: - self._wrapped_stream.seek(absolute_position, SEEK_SET) - - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - - if buffer_from_stream: - # update the buffer with new data from the wrapped stream - # we need to note down the start position and size of the buffer, in case seek is performed later - self._buffer = BytesIO(buffer_from_stream) - self._current_buffer_start = self._position - self._current_buffer_size = len(buffer_from_stream) - - # read the remaining bytes from the new buffer and update position - second_read_buffer = self._buffer.read(bytes_remaining) - read_buffer += second_read_buffer - self._position += len(second_read_buffer) - - return read_buffer - - def readable(self): - return True - - def readinto(self, b): - raise UnsupportedOperation - - def seek(self, offset, whence=0): - if whence is SEEK_SET: - start_index = 0 - elif whence is SEEK_CUR: - start_index = self._position - elif whence is SEEK_END: - start_index = self._length - offset = -offset - else: - raise ValueError("Invalid argument for the 'whence' parameter.") - - pos = start_index + offset - - if pos > self._length: - pos = self._length - elif pos < 0: - pos = 0 - - # check if buffer is still valid - # if not, drop buffer - if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: - self._buffer.close() - self._buffer = BytesIO() - else: # if yes seek to correct position - delta = pos - self._current_buffer_start - self._buffer.seek(delta, SEEK_SET) - - self._position = pos - return pos - - def seekable(self): - return True - - def tell(self): - return self._position - - def write(self): - raise UnsupportedOperation - - def writelines(self): - raise UnsupportedOperation - - def writeable(self): - return False - - -class IterStreamer(object): - """ - File-like streaming iterator. - """ - - def __init__(self, generator, encoding="UTF-8"): - self.generator = generator - self.iterator = iter(generator) - self.leftover = b"" - self.encoding = encoding - - def __len__(self): - return self.generator.__len__() - - def __iter__(self): - return self.iterator - - def seekable(self): - return False - - def __next__(self): - return next(self.iterator) - - next = __next__ # Python 2 compatibility. - - def tell(self, *args, **kwargs): - raise UnsupportedOperation("Data generator does not support tell.") - - def seek(self, *args, **kwargs): - raise UnsupportedOperation("Data generator is unseekable.") - - def read(self, size): - data = self.leftover - count = len(self.leftover) - try: - while count < size: - chunk = self.__next__() - if isinstance(chunk, six.text_type): - chunk = chunk.encode(self.encoding) - data += chunk - count += len(chunk) - except StopIteration: - pass - - if count > size: - self.leftover = data[size:] - - return data[:size] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/uploads_async.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/uploads_async.py deleted file mode 100644 index 5ed192b..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared/uploads_async.py +++ /dev/null @@ -1,395 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -import asyncio -from asyncio import Lock -from itertools import islice -import threading - -from math import ceil - -import six - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder -from .uploads import SubStream, IterStreamer # pylint: disable=unused-import - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' - - -async def _parallel_uploads(uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(asyncio.ensure_future(uploader(next_chunk))) - except StopIteration: - break - - # Wait for the remaining uploads to finish - if running: - done, _running = await asyncio.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -async def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - asyncio.ensure_future(uploader.process_chunk(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [] - for chunk in uploader.get_chunk_streams(): - range_ids.append(await uploader.process_chunk(chunk)) - - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -async def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - asyncio.ensure_future(uploader.process_substream_block(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [] - for block in uploader.get_substream_blocks(): - range_ids.append(await uploader.process_substream_block(block)) - if any(range_ids): - return sorted(range_ids) - return - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = threading.Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b'' - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b'' or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - async def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - async def _update_progress(self, length): - if self.progress_lock is not None: - async with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - async def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = await self._upload_chunk(chunk_offset, chunk_data) - await self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield index, SubStream(self.stream, index, length, lock) - - async def process_substream_block(self, block_data): - return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - async def _upload_substream_block(self, index, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_substream_block_with_progress(self, index, block_stream): - range_id = await self._upload_substream_block(index, block_stream) - await self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop('modified_access_conditions', None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - await self.service.stage_block( - block_id, - len(chunk_data), - body=chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - return index, block_id - - async def _upload_substream_block(self, index, block_stream): - try: - block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) - await self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - for each_byte in chunk_data: - if each_byte not in [0, b'\x00']: - return False - return True - - async def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = await self.service.upload_pages( - body=chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - async def _upload_substream_block(self, index, block_stream): - pass - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = await self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - self.current_length = int(self.response_headers['blob_append_offset']) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = await self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - async def _upload_substream_block(self, index, block_stream): - pass - - -class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - self.response_headers = await self.service.append_data( - body=chunk_data, - position=chunk_offset, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - async def _upload_substream_block(self, index, block_stream): - try: - await self.service.append_data( - body=block_stream, - position=index, - content_length=len(block_stream), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = await self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - return range_id, response - - # TODO: Implement this method. - async def _upload_substream_block(self, index, block_stream): - pass diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared_access_signature.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared_access_signature.py deleted file mode 100644 index f023a23..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_shared_access_signature.py +++ /dev/null @@ -1,393 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, TYPE_CHECKING -) - -from azure.multiapi.storagev2.blob.v2020_10_02 import generate_account_sas as generate_blob_account_sas -from azure.multiapi.storagev2.blob.v2020_10_02 import generate_container_sas, generate_blob_sas -if TYPE_CHECKING: - from datetime import datetime - from ._models import AccountSasPermissions, FileSystemSasPermissions, FileSasPermissions, ResourceTypes, \ - UserDelegationKey - - -def generate_account_sas( - account_name, # type: str - account_key, # type: str - resource_types, # type: Union[ResourceTypes, str] - permission, # type: Union[AccountSasPermissions, str] - expiry, # type: Optional[Union[datetime, str]] - **kwargs # type: Any - ): # type: (...) -> str - """Generates a shared access signature for the DataLake service. - - Use the returned signature as the credential parameter of any DataLakeServiceClient, - FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str account_key: - The access key to generate the shared access signature. - :param resource_types: - Specifies the resource types that are accessible with the account SAS. - :type resource_types: str or ~azure.storage.filedatalake.ResourceTypes - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.filedatalake.AccountSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :keyword start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :paramtype start: ~datetime.datetime or str - :keyword str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - return generate_blob_account_sas( - account_name=account_name, - account_key=account_key, - resource_types=resource_types, - permission=permission, - expiry=expiry, - **kwargs - ) - - -def generate_file_system_sas( - account_name, # type: str - file_system_name, # type: str - credential, # type: Union[str, UserDelegationKey] - permission=None, # type: Optional[Union[FileSystemSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - **kwargs # type: Any - ): - # type: (...) -> str - """Generates a shared access signature for a file system. - - Use the returned signature with the credential parameter of any DataLakeServiceClient, - FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str file_system_name: - The name of the file system. - :param str credential: - Credential could be either account key or user delegation key. - If use account key is used as credential, then the credential type should be a str. - Instead of an account key, the user could also pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished - by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :type credential: str or ~azure.storage.filedatalake.UserDelegationKey - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered racwdlmeop. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.filedatalake.FileSystemSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :keyword start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :paramtype start: datetime or str - :keyword str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :keyword str preauthorized_agent_object_id: - The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform - the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the - user delegation key has the required permissions before granting access but no additional permission check for - the agent object id will be performed. - :keyword str agent_object_id: - The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to - perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner - of the user delegation key has the required permissions before granting access and the service will perform an - additional POSIX ACL check to determine if this user is authorized to perform the requested operation. - :keyword str correlation_id: - The correlation id to correlate the storage audit logs with the audit logs used by the principal - generating and distributing the SAS. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - return generate_container_sas( - account_name=account_name, - container_name=file_system_name, - account_key=credential if isinstance(credential, str) else None, - user_delegation_key=credential if not isinstance(credential, str) else None, - permission=permission, - expiry=expiry, - **kwargs) - - -def generate_directory_sas( - account_name, # type: str - file_system_name, # type: str - directory_name, # type: str - credential, # type: Union[str, UserDelegationKey] - permission=None, # type: Optional[Union[FileSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - **kwargs # type: Any - ): - # type: (...) -> str - """Generates a shared access signature for a directory. - - Use the returned signature with the credential parameter of any DataLakeServiceClient, - FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str file_system_name: - The name of the file system. - :param str directory_name: - The name of the directory. - :param str credential: - Credential could be either account key or user delegation key. - If use account key is used as credential, then the credential type should be a str. - Instead of an account key, the user could also pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished - by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :type credential: str or ~azure.storage.filedatalake.UserDelegationKey - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered racwdlmeop. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.filedatalake.FileSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :keyword start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :paramtype start: ~datetime.datetime or str - :keyword str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :keyword str preauthorized_agent_object_id: - The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform - the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the - user delegation key has the required permissions before granting access but no additional permission check for - the agent object id will be performed. - :keyword str agent_object_id: - The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to - perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner - of the user delegation key has the required permissions before granting access and the service will perform an - additional POSIX ACL check to determine if this user is authorized to perform the requested operation. - :keyword str correlation_id: - The correlation id to correlate the storage audit logs with the audit logs used by the principal - generating and distributing the SAS. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - depth = len(directory_name.strip("/").split("/")) - return generate_blob_sas( - account_name=account_name, - container_name=file_system_name, - blob_name=directory_name, - account_key=credential if isinstance(credential, str) else None, - user_delegation_key=credential if not isinstance(credential, str) else None, - permission=permission, - expiry=expiry, - sdd=depth, - is_directory=True, - **kwargs) - - -def generate_file_sas( - account_name, # type: str - file_system_name, # type: str - directory_name, # type: str - file_name, # type: str - credential, # type: Union[str, UserDelegationKey] - permission=None, # type: Optional[Union[FileSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - **kwargs # type: Any - ): - # type: (...) -> str - """Generates a shared access signature for a file. - - Use the returned signature with the credential parameter of any BDataLakeServiceClient, - FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str file_system_name: - The name of the file system. - :param str directory_name: - The name of the directory. - :param str file_name: - The name of the file. - :param str credential: - Credential could be either account key or user delegation key. - If use account key is used as credential, then the credential type should be a str. - Instead of an account key, the user could also pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished - by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :type credential: str or ~azure.storage.filedatalake.UserDelegationKey - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered racwdmeop. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.filedatalake.FileSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :keyword start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :paramtype start: ~datetime.datetime or str - :keyword str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :keyword str preauthorized_agent_object_id: - The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform - the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the - user delegation key has the required permissions before granting access but no additional permission check for - the agent object id will be performed. - :keyword str agent_object_id: - The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to - perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner - of the user delegation key has the required permissions before granting access and the service will perform an - additional POSIX ACL check to determine if this user is authorized to perform the requested operation. - :keyword str correlation_id: - The correlation id to correlate the storage audit logs with the audit logs used by the principal - generating and distributing the SAS. This can only be used when to generate sas with delegation key. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - if directory_name: - path = directory_name.rstrip('/') + "/" + file_name - else: - path = file_name - return generate_blob_sas( - account_name=account_name, - container_name=file_system_name, - blob_name=path, - account_key=credential if isinstance(credential, str) else None, - user_delegation_key=credential if not isinstance(credential, str) else None, - permission=permission, - expiry=expiry, - **kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_upload_helper.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_upload_helper.py deleted file mode 100644 index 6d88c32..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_upload_helper.py +++ /dev/null @@ -1,104 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from ._deserialize import ( - process_storage_error) -from ._shared.response_handlers import return_response_headers -from ._shared.uploads import ( - upload_data_chunks, - DataLakeFileChunkUploader, upload_substream_blocks) -from azure.core.exceptions import HttpResponseError - - -def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument - return any([ - modified_access_conditions.if_modified_since, - modified_access_conditions.if_unmodified_since, - modified_access_conditions.if_none_match, - modified_access_conditions.if_match - ]) - - -def upload_datalake_file( # pylint: disable=unused-argument - client=None, - stream=None, - length=None, - overwrite=None, - validate_content=None, - max_concurrency=None, - file_settings=None, - **kwargs): - try: - if length == 0: - return {} - properties = kwargs.pop('properties', None) - umask = kwargs.pop('umask', None) - permissions = kwargs.pop('permissions', None) - path_http_headers = kwargs.pop('path_http_headers', None) - modified_access_conditions = kwargs.pop('modified_access_conditions', None) - chunk_size = kwargs.pop('chunk_size', 100 * 1024 * 1024) - - if not overwrite: - # if customers didn't specify access conditions, they cannot flush data to existing file - if not _any_conditions(modified_access_conditions): - modified_access_conditions.if_none_match = '*' - if properties or umask or permissions: - raise ValueError("metadata, umask and permissions can be set only when overwrite is enabled") - - if overwrite: - response = client.create( - resource='file', - path_http_headers=path_http_headers, - properties=properties, - modified_access_conditions=modified_access_conditions, - umask=umask, - permissions=permissions, - cls=return_response_headers, - **kwargs) - - # this modified_access_conditions will be applied to flush_data to make sure - # no other flush between create and the current flush - modified_access_conditions.if_match = response['etag'] - modified_access_conditions.if_none_match = None - modified_access_conditions.if_modified_since = None - modified_access_conditions.if_unmodified_since = None - - use_original_upload_path = file_settings.use_byte_buffer or \ - validate_content or chunk_size < file_settings.min_large_chunk_upload_threshold or \ - hasattr(stream, 'seekable') and not stream.seekable() or \ - not hasattr(stream, 'seek') or not hasattr(stream, 'tell') - - if use_original_upload_path: - upload_data_chunks( - service=client, - uploader_class=DataLakeFileChunkUploader, - total_size=length, - chunk_size=chunk_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - **kwargs) - else: - upload_substream_blocks( - service=client, - uploader_class=DataLakeFileChunkUploader, - total_size=length, - chunk_size=chunk_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - **kwargs - ) - - return client.flush_data(position=length, - path_http_headers=path_http_headers, - modified_access_conditions=modified_access_conditions, - close=True, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_version.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/_version.py deleted file mode 100644 index c9d0e60..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/_version.py +++ /dev/null @@ -1,7 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -VERSION = "12.6.0" diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/__init__.py deleted file mode 100644 index c24dde8..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ._download_async import StorageStreamDownloader -from .._shared.policies_async import ExponentialRetry, LinearRetry -from ._data_lake_file_client_async import DataLakeFileClient -from ._data_lake_directory_client_async import DataLakeDirectoryClient -from ._file_system_client_async import FileSystemClient -from ._data_lake_service_client_async import DataLakeServiceClient -from ._data_lake_lease_async import DataLakeLeaseClient - -__all__ = [ - 'DataLakeServiceClient', - 'FileSystemClient', - 'DataLakeDirectoryClient', - 'DataLakeFileClient', - 'DataLakeLeaseClient', - 'ExponentialRetry', - 'LinearRetry', - 'StorageStreamDownloader' -] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_data_lake_directory_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_data_lake_directory_client_async.py deleted file mode 100644 index 80ac8de..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_data_lake_directory_client_async.py +++ /dev/null @@ -1,553 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -from typing import Any - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore -from azure.core.pipeline import AsyncPipeline -from ._data_lake_file_client_async import DataLakeFileClient -from .._data_lake_directory_client import DataLakeDirectoryClient as DataLakeDirectoryClientBase -from .._models import DirectoryProperties, FileProperties -from .._deserialize import deserialize_dir_properties -from ._path_client_async import PathClient -from .._shared.base_client_async import AsyncTransportWrapper - - -class DataLakeDirectoryClient(PathClient, DataLakeDirectoryClientBase): - """A client to interact with the DataLake directory, even if the directory may not yet exist. - - For operations relating to a specific subdirectory or file under the directory, a directory client or file client - can be retrieved using the :func:`~get_sub_directory_client` or :func:`~get_file_client` functions. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param directory_name: - The whole path of the directory. eg. {directory under file system}/{directory to interact with} - :type directory_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_instantiate_client_async.py - :start-after: [START instantiate_directory_client_from_conn_str] - :end-before: [END instantiate_directory_client_from_conn_str] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. - """ - - def __init__( - self, account_url, # type: str - file_system_name, # type: str - directory_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - super(DataLakeDirectoryClient, self).__init__(account_url, file_system_name, directory_name, # pylint: disable=specify-parameter-names-in-call - credential=credential, **kwargs) - - async def create_directory(self, metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create a new directory. - - :param metadata: - Name-value pairs associated with the directory as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the directory has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: response dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory_async.py - :start-after: [START create_directory] - :end-before: [END create_directory] - :language: python - :dedent: 8 - :caption: Create directory. - """ - return await self._create('directory', metadata=metadata, **kwargs) - - async def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a directory exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return await self._exists(**kwargs) - - async def delete_directory(self, **kwargs): - # type: (...) -> None - """ - Marks the specified directory for deletion. - - :keyword lease: - Required if the directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory_async.py - :start-after: [START delete_directory] - :end-before: [END delete_directory] - :language: python - :dedent: 4 - :caption: Delete directory. - """ - return await self._delete(recursive=True, **kwargs) - - async def get_directory_properties(self, **kwargs): - # type: (**Any) -> DirectoryProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the directory. It does not return the content of the directory. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: DirectoryProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory_async.py - :start-after: [START get_directory_properties] - :end-before: [END get_directory_properties] - :language: python - :dedent: 4 - :caption: Getting the properties for a file/directory. - """ - return await self._get_path_properties(cls=deserialize_dir_properties, **kwargs) # pylint: disable=protected-access - - async def rename_directory(self, new_name, # type: str - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Rename the source directory. - - :param str new_name: - the new directory name the user want to rename to. - The value must have the following format: "{filesystem}/{directory}/{subdirectory}". - :keyword source_lease: - A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory_async.py - :start-after: [START rename_directory] - :end-before: [END rename_directory] - :language: python - :dedent: 4 - :caption: Rename the source directory. - """ - new_name = new_name.strip('/') - new_file_system = new_name.split('/')[0] - new_path_and_token = new_name[len(new_file_system):].strip('/').split('?') - new_path = new_path_and_token[0] - try: - new_dir_sas = new_path_and_token[1] or self._query_str.strip('?') - except IndexError: - if not self._raw_credential and new_file_system != self.file_system_name: - raise ValueError("please provide the sas token for the new directory") - if not self._raw_credential and new_file_system == self.file_system_name: - new_dir_sas = self._query_str.strip('?') - - new_directory_client = DataLakeDirectoryClient( - "{}://{}".format(self.scheme, self.primary_hostname), new_file_system, directory_name=new_path, - credential=self._raw_credential or new_dir_sas, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - await new_directory_client._rename_path( # pylint: disable=protected-access - '/{}/{}{}'.format(quote(unquote(self.file_system_name)), - quote(unquote(self.path_name)), - self._query_str), - **kwargs) - return new_directory_client - - async def create_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Create a subdirectory and return the subdirectory client to be interacted with. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient for the subdirectory. - """ - subdir = self.get_sub_directory_client(sub_directory) - await subdir.create_directory(metadata=metadata, **kwargs) - return subdir - - async def delete_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Marks the specified subdirectory for deletion. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :keyword lease: - Required if the directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient for the subdirectory - """ - subdir = self.get_sub_directory_client(sub_directory) - await subdir.delete_directory(**kwargs) - return subdir - - async def create_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Create a new file and return the file client to be interacted with. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - """ - file_client = self.get_file_client(file) - await file_client.create_file(**kwargs) - return file_client - - def get_file_client(self, file # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. eg. directory/subdirectory/file - :type file: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/test_datalake_service_samples.py - :start-after: [START bsc_get_file_client] - :end-before: [END bsc_get_file_client] - :language: python - :dedent: 12 - :caption: Getting the file client to interact with a specific file. - """ - try: - file_path = file.get('name') - except AttributeError: - file_path = self.path_name + '/' + str(file) - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeFileClient( - self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, - api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_sub_directory_client(self, sub_directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified subdirectory of the current directory. - - The sub subdirectory need not already exist. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/test_datalake_service_samples.py - :start-after: [START bsc_get_directory_client] - :end-before: [END bsc_get_directory_client] - :language: python - :dedent: 12 - :caption: Getting the directory client to interact with a specific directory. - """ - try: - subdir_path = sub_directory.get('name') - except AttributeError: - subdir_path = self.path_name + '/' + str(sub_directory) - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeDirectoryClient( - self.url, self.file_system_name, directory_name=subdir_path, credential=self._raw_credential, - api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_data_lake_file_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_data_lake_file_client_async.py deleted file mode 100644 index 34c33ee..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_data_lake_file_client_async.py +++ /dev/null @@ -1,574 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -from typing import Any -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore - -from azure.core.exceptions import HttpResponseError -from ._download_async import StorageStreamDownloader -from ._path_client_async import PathClient -from .._data_lake_file_client import DataLakeFileClient as DataLakeFileClientBase -from .._serialize import convert_datetime_to_rfc1123 -from .._deserialize import process_storage_error, deserialize_file_properties -from .._models import FileProperties -from ..aio._upload_helper import upload_datalake_file - - -class DataLakeFileClient(PathClient, DataLakeFileClientBase): - """A client to interact with the DataLake file, even if the file may not yet exist. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param file_path: - The whole file path, so that to interact with a specific file. - eg. "{directory}/{subdirectory}/{file}" - :type file_path: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_instantiate_client_async.py - :start-after: [START instantiate_file_client_from_conn_str] - :end-before: [END instantiate_file_client_from_conn_str] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. - """ - - def __init__( - self, account_url, # type: str - file_system_name, # type: str - file_path, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - super(DataLakeFileClient, self).__init__(account_url, file_system_name, path_name=file_path, - credential=credential, **kwargs) - - async def create_file(self, content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create a new file. - - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: response dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START create_file] - :end-before: [END create_file] - :language: python - :dedent: 4 - :caption: Create file. - """ - return await self._create('file', content_settings=content_settings, metadata=metadata, **kwargs) - - async def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a file exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return await self._exists(**kwargs) - - async def delete_file(self, **kwargs): - # type: (...) -> None - """ - Marks the specified file for deletion. - - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START delete_file] - :end-before: [END delete_file] - :language: python - :dedent: 4 - :caption: Delete file. - """ - return await self._delete(**kwargs) - - async def get_file_properties(self, **kwargs): - # type: (**Any) -> FileProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file. It does not return the content of the file. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: FileProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START get_file_properties] - :end-before: [END get_file_properties] - :language: python - :dedent: 4 - :caption: Getting the properties for a file. - """ - return await self._get_path_properties(cls=deserialize_file_properties, **kwargs) # pylint: disable=protected-access - - async def set_file_expiry(self, expiry_options, # type: str - expires_on=None, # type: Optional[Union[datetime, int]] - **kwargs): - # type: (...) -> None - """Sets the time a file will expire and be deleted. - - :param str expiry_options: - Required. Indicates mode of the expiry time. - Possible values include: 'NeverExpire', 'RelativeToCreation', 'RelativeToNow', 'Absolute' - :param datetime or int expires_on: - The time to set the file to expiry. - When expiry_options is RelativeTo*, expires_on should be an int in milliseconds - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - try: - expires_on = convert_datetime_to_rfc1123(expires_on) - except AttributeError: - expires_on = str(expires_on) - await self._datalake_client_for_blob_operation.path.set_expiry(expiry_options, expires_on=expires_on, - **kwargs) # pylint: disable=protected-access - - async def upload_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - overwrite=False, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Any] - """ - Upload data to a file. - - :param data: Content to be uploaded to file - :param int length: Size of the data in bytes. - :param bool overwrite: to overwrite an existing file or not. - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword metadata: - Name-value pairs associated with the blob as metadata. - :paramtype metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease: - Required if the blob has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword int chunk_size: - The maximum chunk size for uploading a file in chunks. - Defaults to 100*1024*1024, or 100MB. - :return: response dict (Etag and last modified). - """ - options = self._upload_options( - data, - length=length, - overwrite=overwrite, - **kwargs) - return await upload_datalake_file(**options) - - async def append_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - offset, # type: int - length=None, # type: Optional[int] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """Append data to the file. - - :param data: Content to be appended to file - :param offset: start position of the data to be appended to. - :param length: Size of the data in bytes. - :keyword bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - file. - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :return: dict of the response header - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START append_data] - :end-before: [END append_data] - :language: python - :dedent: 4 - :caption: Append data to the file. - """ - options = self._append_data_options( - data, - offset, - length=length, - **kwargs) - try: - return await self._client.path.append_data(**options) - except HttpResponseError as error: - process_storage_error(error) - - async def flush_data(self, offset, # type: int - retain_uncommitted_data=False, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ Commit the previous appended data. - - :param offset: offset is equal to the length of the file after commit the - previous appended data. - :param bool retain_uncommitted_data: Valid only for flush operations. If - "true", uncommitted data is retained after the flush operation - completes; otherwise, the uncommitted data is deleted after the flush - operation. The default is false. Data at offsets less than the - specified position are written to the file when flush succeeds, but - this optional parameter allows data after the flush position to be - retained for a future flush operation. - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword bool close: Azure Storage Events allow applications to receive - notifications when files change. When Azure Storage Events are - enabled, a file changed event is raised. This event has a property - indicating whether this is the final change to distinguish the - difference between an intermediate flush to a file stream and the - final close of a file stream. The close query parameter is valid only - when the action is "flush" and change notifications are enabled. If - the value of close is "true" and the flush operation completes - successfully, the service raises a file change notification with a - property indicating that this is the final update (the file stream has - been closed). If "false" a change notification is raised indicating - the file has changed. The default is false. This query parameter is - set to true by the Hadoop ABFS driver to indicate that the file stream - has been closed." - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :return: response header in dict - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START upload_file_to_file_system] - :end-before: [END upload_file_to_file_system] - :language: python - :dedent: 12 - :caption: Commit the previous appended data. - """ - options = self._flush_data_options( - offset, - retain_uncommitted_data=retain_uncommitted_data, **kwargs) - try: - return await self._client.path.flush_data(**options) - except HttpResponseError as error: - process_storage_error(error) - - async def download_file(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader - """Downloads a file to the StorageStreamDownloader. The readall() method must - be used to read all the content, or readinto() must be used to download the file into - a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks. - - :param int offset: - Start of byte range to use for downloading a section of the file. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword lease: - If specified, download only succeeds if the file's lease is active - and matches this ID. Required if the file has an active lease. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.filedatalake.aio.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START read_file] - :end-before: [END read_file] - :language: python - :dedent: 4 - :caption: Return the downloaded data. - """ - downloader = await self._blob_client.download_blob(offset=offset, length=length, **kwargs) - return StorageStreamDownloader(downloader) - - async def rename_file(self, new_name, **kwargs): - # type: (str, **Any) -> DataLakeFileClient - """ - Rename the source file. - - :param str new_name: the new file name the user want to rename to. - The value must have the following format: "{filesystem}/{directory}/{subdirectory}/{file}". - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword source_lease: A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :type permissions: str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: the renamed file client - :rtype: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START rename_file] - :end-before: [END rename_file] - :language: python - :dedent: 4 - :caption: Rename the source file. - """ - new_name = new_name.strip('/') - new_file_system = new_name.split('/')[0] - new_path_and_token = new_name[len(new_file_system):].strip('/').split('?') - new_path = new_path_and_token[0] - try: - new_file_sas = new_path_and_token[1] or self._query_str.strip('?') - except IndexError: - if not self._raw_credential and new_file_system != self.file_system_name: - raise ValueError("please provide the sas token for the new file") - if not self._raw_credential and new_file_system == self.file_system_name: - new_file_sas = self._query_str.strip('?') - - new_file_client = DataLakeFileClient( - "{}://{}".format(self.scheme, self.primary_hostname), new_file_system, file_path=new_path, - credential=self._raw_credential or new_file_sas, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - await new_file_client._rename_path( # pylint: disable=protected-access - '/{}/{}{}'.format(quote(unquote(self.file_system_name)), - quote(unquote(self.path_name)), - self._query_str), - **kwargs) - return new_file_client diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_data_lake_lease_async.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_data_lake_lease_async.py deleted file mode 100644 index fabb054..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_data_lake_lease_async.py +++ /dev/null @@ -1,243 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, - TypeVar, TYPE_CHECKING -) -from azure.multiapi.storagev2.blob.v2020_10_02.aio import BlobLeaseClient -from .._data_lake_lease import DataLakeLeaseClient as DataLakeLeaseClientBase - - -if TYPE_CHECKING: - FileSystemClient = TypeVar("FileSystemClient") - DataLakeDirectoryClient = TypeVar("DataLakeDirectoryClient") - DataLakeFileClient = TypeVar("DataLakeFileClient") - - -class DataLakeLeaseClient(DataLakeLeaseClientBase): - """Creates a new DataLakeLeaseClient. - - This client provides lease operations on a FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the file system, directory, or file to lease. - :type client: ~azure.storage.filedatalake.aio.FileSystemClient or - ~azure.storage.filedatalake.aio.DataLakeDirectoryClient or ~azure.storage.filedatalake.aio.DataLakeFileClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - def __init__( - self, client, lease_id=None - ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs - # type: (Union[FileSystemClient, DataLakeDirectoryClient, DataLakeFileClient], Optional[str]) -> None - super(DataLakeLeaseClient, self).__init__(client, lease_id) - - if hasattr(client, '_blob_client'): - _client = client._blob_client # type: ignore # pylint: disable=protected-access - elif hasattr(client, '_container_client'): - _client = client._container_client # type: ignore # pylint: disable=protected-access - else: - raise TypeError("Lease must use any of FileSystemClient DataLakeDirectoryClient, or DataLakeFileClient.") - - self._blob_lease_client = BlobLeaseClient(_client, lease_id=lease_id) - - def __enter__(self): - raise TypeError("Async lease must use 'async with'.") - - def __exit__(self, *args): - self.release() - - async def __aenter__(self): - return self - - async def __aexit__(self, *args): - await self.release() - - async def acquire(self, lease_duration=-1, **kwargs): - # type: (int, Optional[int], **Any) -> None - """Requests a new lease. - - If the file/file system does not have an active lease, the DataLake service creates a - lease on the file/file system and returns a new lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - await self._blob_lease_client.acquire(lease_duration=lease_duration, **kwargs) - self._update_lease_client_attributes() - - async def renew(self, **kwargs): - # type: (Any) -> None - """Renews the lease. - - The lease can be renewed if the lease ID specified in the - lease client matches that associated with the file system or file. Note that - the lease may be renewed even if it has expired as long as the file system - or file has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - await self._blob_lease_client.renew(**kwargs) - self._update_lease_client_attributes() - - async def release(self, **kwargs): - # type: (Any) -> None - """Release the lease. - - The lease may be released if the client lease id specified matches - that associated with the file system or file. Releasing the lease allows another client - to immediately acquire the lease for the file system or file as soon as the release is complete. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - await self._blob_lease_client.release(**kwargs) - self._update_lease_client_attributes() - - async def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """Change the lease ID of an active lease. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - await self._blob_lease_client.change(proposed_lease_id=proposed_lease_id, **kwargs) - self._update_lease_client_attributes() - - async def break_lease(self, lease_break_period=None, **kwargs): - # type: (Optional[int], Any) -> int - """Break the lease, if the file system or file has an active lease. - - Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. When a lease - is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the file system or file. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :param int lease_break_period: - This is the proposed duration of seconds that the lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the lease. If longer, the time remaining on the lease is used. - A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - await self._blob_lease_client.break_lease(lease_break_period=lease_break_period, **kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_data_lake_service_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_data_lake_service_client_async.py deleted file mode 100644 index b6692d1..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_data_lake_service_client_async.py +++ /dev/null @@ -1,510 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -from typing import Optional, Any, Dict - -from azure.core.paging import ItemPaged -from azure.core.pipeline import AsyncPipeline - -from azure.multiapi.storagev2.blob.v2020_10_02.aio import BlobServiceClient -from .._serialize import get_api_version -from .._generated.aio import AzureDataLakeStorageRESTAPI -from .._deserialize import get_datalake_service_properties -from .._shared.base_client_async import AsyncTransportWrapper, AsyncStorageAccountHostsMixin -from ._file_system_client_async import FileSystemClient -from .._data_lake_service_client import DataLakeServiceClient as DataLakeServiceClientBase -from .._shared.policies_async import ExponentialRetry -from ._data_lake_directory_client_async import DataLakeDirectoryClient -from ._data_lake_file_client_async import DataLakeFileClient -from ._models import FileSystemPropertiesPaged -from .._models import UserDelegationKey, LocationMode - - -class DataLakeServiceClient(AsyncStorageAccountHostsMixin, DataLakeServiceClientBase): - """A client to interact with the DataLake Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete file systems within the account. - For operations relating to a specific file system, directory or file, clients for those entities - can also be retrieved using the `get_client` functions. - - :ivar str url: - The full endpoint URL to the datalake service endpoint. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URL to the DataLake storage account. Any other entities included - in the URL path (e.g. file system or file) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START create_datalake_service_client] - :end-before: [END create_datalake_service_client] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START create_datalake_service_client_oauth] - :end-before: [END create_datalake_service_client_oauth] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient with Azure Identity credentials. - """ - - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(DataLakeServiceClient, self).__init__( - account_url, - credential=credential, - **kwargs - ) - self._blob_service_client = BlobServiceClient(self._blob_account_url, credential, **kwargs) - self._blob_service_client._hosts[LocationMode.SECONDARY] = "" #pylint: disable=protected-access - self._client = AzureDataLakeStorageRESTAPI(self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs) #pylint: disable=protected-access - self._loop = kwargs.get('loop', None) - - async def __aenter__(self): - await self._blob_service_client.__aenter__() - return self - - async def __aexit__(self, *args): - await self._blob_service_client.close() - - async def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._blob_service_client.close() - - async def get_user_delegation_key(self, key_start_time, # type: datetime - key_expiry_time, # type: datetime - **kwargs # type: Any - ): - # type: (...) -> UserDelegationKey - """ - Obtain a user delegation key for the purpose of signing SAS tokens. - A token credential must be present on the service object for this request to succeed. - - :param ~datetime.datetime key_start_time: - A DateTime value. Indicates when the key becomes valid. - :param ~datetime.datetime key_expiry_time: - A DateTime value. Indicates when the key stops being valid. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The user delegation key. - :rtype: ~azure.storage.filedatalake.UserDelegationKey - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START get_user_delegation_key] - :end-before: [END get_user_delegation_key] - :language: python - :dedent: 8 - :caption: Get user delegation key from datalake service client. - """ - delegation_key = await self._blob_service_client.get_user_delegation_key( - key_start_time=key_start_time, - key_expiry_time=key_expiry_time, - **kwargs) # pylint: disable=protected-access - return UserDelegationKey._from_generated(delegation_key) # pylint: disable=protected-access - - def list_file_systems(self, name_starts_with=None, # type: Optional[str] - include_metadata=None, # type: Optional[bool] - **kwargs): - # type: (...) -> ItemPaged[FileSystemProperties] - """Returns a generator to list the file systems under the specified account. - - The generator will lazily follow the continuation tokens returned by - the service and stop when all file systems have been returned. - - :param str name_starts_with: - Filters the results to return only file systems whose names - begin with the specified prefix. - :param bool include_metadata: - Specifies that file system metadata be returned in the response. - The default value is `False`. - :keyword int results_per_page: - The maximum number of file system names to retrieve per API - call. If the request does not specify the server will return up to 5,000 items per page. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword bool include_deleted: - Specifies that deleted file systems to be returned in the response. This is for file system restore enabled - account. The default value is `False`. - .. versionadded:: 12.3.0 - :keyword bool include_system: - Flag specifying that system filesystems should be included. - .. versionadded:: 12.6.0 - :returns: An iterable (auto-paging) of FileSystemProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.FileSystemProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START list_file_systems] - :end-before: [END list_file_systems] - :language: python - :dedent: 8 - :caption: Listing the file systems in the datalake service. - """ - item_paged = self._blob_service_client.list_containers(name_starts_with=name_starts_with, - include_metadata=include_metadata, - **kwargs) # pylint: disable=protected-access - item_paged._page_iterator_class = FileSystemPropertiesPaged # pylint: disable=protected-access - return item_paged - - async def create_file_system(self, file_system, # type: Union[FileSystemProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[PublicAccess] - **kwargs): - # type: (...) -> FileSystemClient - """Creates a new file system under the specified account. - - If the file system with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created file system. - - :param str file_system: - The name of the file system to create. - :param metadata: - A dict with name-value pairs to associate with the - file system as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - Possible values include: file system, file. - :type public_access: ~azure.storage.filedatalake.PublicAccess - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START create_file_system_from_service_client] - :end-before: [END create_file_system_from_service_client] - :language: python - :dedent: 8 - :caption: Creating a file system in the datalake service. - """ - file_system_client = self.get_file_system_client(file_system) - await file_system_client.create_file_system(metadata=metadata, public_access=public_access, **kwargs) - return file_system_client - - async def _rename_file_system(self, name, new_name, **kwargs): - # type: (str, str, **Any) -> FileSystemClient - """Renames a filesystem. - - Operation is successful only if the source filesystem exists. - - :param str name: - The name of the filesystem to rename. - :param str new_name: - The new filesystem name the user wants to rename to. - :keyword lease: - Specify this to perform only if the lease ID given - matches the active lease ID of the source filesystem. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - """ - await self._blob_service_client._rename_container(name, new_name, **kwargs) # pylint: disable=protected-access - renamed_file_system = self.get_file_system_client(new_name) - return renamed_file_system - - async def undelete_file_system(self, name, deleted_version, **kwargs): - # type: (str, str, **Any) -> FileSystemClient - """Restores soft-deleted filesystem. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.3.0 - This operation was introduced in API version '2019-12-12'. - - :param str name: - Specifies the name of the deleted filesystem to restore. - :param str deleted_version: - Specifies the version of the deleted filesystem to restore. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - """ - new_name = kwargs.pop('new_name', None) - await self._blob_service_client.undelete_container(name, deleted_version, new_name=new_name, **kwargs) # pylint: disable=protected-access - file_system = self.get_file_system_client(new_name or name) - return file_system - - async def delete_file_system(self, file_system, # type: Union[FileSystemProperties, str] - **kwargs): - # type: (...) -> FileSystemClient - """Marks the specified file system for deletion. - - The file system and any files contained within it are later deleted during garbage collection. - If the file system is not found, a ResourceNotFoundError will be raised. - - :param file_system: - The file system to delete. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :keyword lease: - If specified, delete_file_system only succeeds if the - file system's lease is active and matches this ID. - Required if the file system has an active lease. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START delete_file_system_from_service_client] - :end-before: [END delete_file_system_from_service_client] - :language: python - :dedent: 8 - :caption: Deleting a file system in the datalake service. - """ - file_system_client = self.get_file_system_client(file_system) - await file_system_client.delete_file_system(**kwargs) - return file_system_client - - def get_file_system_client(self, file_system # type: Union[FileSystemProperties, str] - ): - # type: (...) -> FileSystemClient - """Get a client to interact with the specified file system. - - The file system need not already exist. - - :param file_system: - The file system. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :returns: A FileSystemClient. - :rtype: ~azure.storage.filedatalake.aio.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_file_system_client_from_service] - :end-before: [END create_file_system_client_from_service] - :language: python - :dedent: 8 - :caption: Getting the file system client to interact with a specific file system. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return FileSystemClient(self.url, file_system_name, credential=self._raw_credential, - api_version=self.api_version, - _configuration=self._config, - _pipeline=self._pipeline, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_directory_client(self, file_system, # type: Union[FileSystemProperties, str] - directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified directory. - - The directory need not already exist. - - :param file_system: - The file system that the directory is in. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START get_directory_client_from_service_client] - :end-before: [END get_directory_client_from_service_client] - :language: python - :dedent: 8 - :caption: Getting the directory client to interact with a specific directory. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - try: - directory_name = directory.name - except AttributeError: - directory_name = directory - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeDirectoryClient(self.url, file_system_name, directory_name=directory_name, - credential=self._raw_credential, - api_version=self.api_version, - _configuration=self._config, _pipeline=self._pipeline, - _hosts=self._hosts, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function - ) - - def get_file_client(self, file_system, # type: Union[FileSystemProperties, str] - file_path # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file_system: - The file system that the file is in. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :param file_path: - The file with which to interact. This can either be the full path of the file(from the root directory), - or an instance of FileProperties. eg. directory/subdirectory/file - :type file_path: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START get_file_client_from_service_client] - :end-before: [END get_file_client_from_service_client] - :language: python - :dedent: 8 - :caption: Getting the file client to interact with a specific file. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - try: - file_path = file_path.name - except AttributeError: - pass - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeFileClient( - self.url, file_system_name, file_path=file_path, credential=self._raw_credential, - api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - async def set_service_properties(self, **kwargs): - # type: (**Any) -> None - """Sets the properties of a storage account's Datalake service, including - Azure Storage Analytics. - - If an element (e.g. analytics_logging) is left as None, the - existing settings on the service for that functionality are preserved. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2020-06-12'. - - :keyword analytics_logging: - Groups the Azure Analytics Logging settings. - :type analytics_logging: ~azure.storage.filedatalake.AnalyticsLogging - :keyword hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates. - :type hour_metrics: ~azure.storage.filedatalake.Metrics - :keyword minute_metrics: - The minute metrics settings provide request statistics - for each minute. - :type minute_metrics: ~azure.storage.filedatalake.Metrics - :keyword cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list[~azure.storage.filedatalake.CorsRule] - :keyword str target_version: - Indicates the default version to use for requests if an incoming - request's version is not specified. - :keyword delete_retention_policy: - The delete retention policy specifies whether to retain deleted files/directories. - It also specifies the number of days and versions of file/directory to keep. - :type delete_retention_policy: ~azure.storage.filedatalake.RetentionPolicy - :keyword static_website: - Specifies whether the static website feature is enabled, - and if yes, indicates the index document and 404 error document to use. - :type static_website: ~azure.storage.filedatalake.StaticWebsite - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - return await self._blob_service_client.set_service_properties(**kwargs) # pylint: disable=protected-access - - async def get_service_properties(self, **kwargs): - # type: (**Any) -> Dict[str, Any] - """Gets the properties of a storage account's datalake service, including - Azure Storage Analytics. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2020-06-12'. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An object containing datalake service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - """ - props = await self._blob_service_client.get_service_properties(**kwargs) # pylint: disable=protected-access - return get_datalake_service_properties(props) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_download_async.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_download_async.py deleted file mode 100644 index 5685478..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_download_async.py +++ /dev/null @@ -1,59 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import AsyncIterator - -from .._deserialize import from_blob_properties - - -class StorageStreamDownloader(object): - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the file being downloaded. - :ivar ~azure.storage.filedatalake.FileProperties properties: - The properties of the file being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the file. - """ - - def __init__(self, downloader): - self._downloader = downloader - self.name = self._downloader.name - self.properties = from_blob_properties(self._downloader.properties) # pylint: disable=protected-access - self.size = self._downloader.size - - def __len__(self): - return self.size - - def chunks(self): - # type: () -> AsyncIterator[bytes] - """Iterate over chunks in the download stream. - - :rtype: AsyncIterator[bytes] - """ - return self._downloader.chunks() - - async def readall(self): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - return await self._downloader.readall() - - async def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - return await self._downloader.readinto(stream) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_file_system_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_file_system_client_async.py deleted file mode 100644 index 01bf5aa..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_file_system_client_async.py +++ /dev/null @@ -1,942 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Dict, - TYPE_CHECKING -) - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator import distributed_trace - -from azure.core.pipeline import AsyncPipeline -from azure.core.async_paging import AsyncItemPaged - -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.multiapi.storagev2.blob.v2020_10_02.aio import ContainerClient -from .._serialize import get_api_version -from .._deserialize import process_storage_error, is_file_path -from .._generated.models import ListBlobsIncludeItem - -from ._data_lake_file_client_async import DataLakeFileClient -from ._data_lake_directory_client_async import DataLakeDirectoryClient -from ._data_lake_lease_async import DataLakeLeaseClient -from .._file_system_client import FileSystemClient as FileSystemClientBase -from .._generated.aio import AzureDataLakeStorageRESTAPI -from .._shared.base_client_async import AsyncTransportWrapper, AsyncStorageAccountHostsMixin -from .._shared.policies_async import ExponentialRetry -from .._models import FileSystemProperties, PublicAccess, DirectoryProperties, FileProperties, DeletedPathProperties -from ._list_paths_helper import DeletedPathPropertiesPaged, PathPropertiesPaged - - -if TYPE_CHECKING: - from datetime import datetime - from .._models import ( # pylint: disable=unused-import - ContentSettings) - - -class FileSystemClient(AsyncStorageAccountHostsMixin, FileSystemClientBase): - """A client to interact with a specific file system, even if that file system - may not yet exist. - - For operations relating to a specific directory or file within this file system, a directory client or file client - can be retrieved using the :func:`~get_directory_client` or :func:`~get_file_client` functions. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_file_system_client_from_service] - :end-before: [END create_file_system_client_from_service] - :language: python - :dedent: 8 - :caption: Get a FileSystemClient from an existing DataLakeServiceClient. - """ - - def __init__( - self, account_url, # type: str - file_system_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(FileSystemClient, self).__init__( - account_url, - file_system_name=file_system_name, - credential=credential, - **kwargs) - # to override the class field _container_client sync version - kwargs.pop('_hosts', None) - self._container_client = ContainerClient(self._blob_account_url, file_system_name, - credential=credential, - _hosts=self._container_client._hosts,# pylint: disable=protected-access - **kwargs) # type: ignore # pylint: disable=protected-access - self._client = AzureDataLakeStorageRESTAPI(self.url, file_system=file_system_name, pipeline=self._pipeline) - self._datalake_client_for_blob_operation = AzureDataLakeStorageRESTAPI(self._container_client.url, - file_system=file_system_name, - pipeline=self._pipeline) - api_version = get_api_version(kwargs) - self._client._config.version = api_version # pylint: disable=protected-access - self._datalake_client_for_blob_operation._config.version = api_version # pylint: disable=protected-access - - self._loop = kwargs.get('loop', None) - - async def __aexit__(self, *args): - await self._container_client.close() - await super(FileSystemClient, self).__aexit__(*args) - - async def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._container_client.close() - await self.__aexit__() - - @distributed_trace_async - async def acquire_lease( - self, lease_duration=-1, # type: int - lease_id=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> DataLakeLeaseClient - """ - Requests a new lease. If the file system does not have an active lease, - the DataLake service creates a lease on the file system and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A DataLakeLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.filedatalake.aio.DataLakeLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START acquire_lease_on_file_system] - :end-before: [END acquire_lease_on_file_system] - :language: python - :dedent: 12 - :caption: Acquiring a lease on the file_system. - """ - lease = DataLakeLeaseClient(self, lease_id=lease_id) - await lease.acquire(lease_duration=lease_duration, **kwargs) - return lease - - @distributed_trace_async - async def create_file_system(self, metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[PublicAccess] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """Creates a new file system under the specified account. - - If the file system with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created file system. - - :param metadata: - A dict with name-value pairs to associate with the - file system as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - To specify whether data in the file system may be accessed publicly and the level of access. - :type public_access: ~azure.storage.filedatalake.PublicAccess - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.aio.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_file_system] - :end-before: [END create_file_system] - :language: python - :dedent: 16 - :caption: Creating a file system in the datalake service. - """ - return await self._container_client.create_container(metadata=metadata, - public_access=public_access, - **kwargs) - - @distributed_trace_async - async def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a file system exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return await self._container_client.exists(**kwargs) - - @distributed_trace_async - async def _rename_file_system(self, new_name, **kwargs): - # type: (str, **Any) -> FileSystemClient - """Renames a filesystem. - - Operation is successful only if the source filesystem exists. - - :param str new_name: - The new filesystem name the user wants to rename to. - :keyword lease: - Specify this to perform only if the lease ID given - matches the active lease ID of the source filesystem. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - """ - await self._container_client._rename_container(new_name, **kwargs) # pylint: disable=protected-access - renamed_file_system = FileSystemClient( - "{}://{}".format(self.scheme, self.primary_hostname), file_system_name=new_name, - credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, - _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - return renamed_file_system - - @distributed_trace_async - async def delete_file_system(self, **kwargs): - # type: (Any) -> None - """Marks the specified file system for deletion. - - The file system and any files contained within it are later deleted during garbage collection. - If the file system is not found, a ResourceNotFoundError will be raised. - - :keyword lease: - If specified, delete_file_system only succeeds if the - file system's lease is active and matches this ID. - Required if the file system has an active lease. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START delete_file_system] - :end-before: [END delete_file_system] - :language: python - :dedent: 16 - :caption: Deleting a file system in the datalake service. - """ - await self._container_client.delete_container(**kwargs) - - @distributed_trace_async - async def get_file_system_properties(self, **kwargs): - # type: (Any) -> FileSystemProperties - """Returns all user-defined metadata and system properties for the specified - file system. The data returned does not include the file system's list of paths. - - :keyword lease: - If specified, get_file_system_properties only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Properties for the specified file system within a file system object. - :rtype: ~azure.storage.filedatalake.FileSystemProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START get_file_system_properties] - :end-before: [END get_file_system_properties] - :language: python - :dedent: 16 - :caption: Getting properties on the file system. - """ - container_properties = await self._container_client.get_container_properties(**kwargs) - return FileSystemProperties._convert_from_container_props(container_properties) # pylint: disable=protected-access - - @distributed_trace_async - async def set_file_system_metadata( # type: ignore - self, metadata, # type: Dict[str, str] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - file system. Each call to this operation replaces all existing metadata - attached to the file system. To remove all metadata from the file system, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the file system as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: file system-updated property dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START set_file_system_metadata] - :end-before: [END set_file_system_metadata] - :language: python - :dedent: 16 - :caption: Setting metadata on the container. - """ - return await self._container_client.set_container_metadata(metadata=metadata, **kwargs) - - @distributed_trace_async - async def set_file_system_access_policy( - self, signed_identifiers, # type: Dict[str, AccessPolicy] - public_access=None, # type: Optional[Union[str, PublicAccess]] - **kwargs - ): # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the permissions for the specified file system or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether files in a file system may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the file system. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict[str, ~azure.storage.filedatalake.AccessPolicy] - :param ~azure.storage.filedatalake.PublicAccess public_access: - To specify whether data in the file system may be accessed publicly and the level of access. - :keyword lease: - Required if the file system has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified date/time. - :keyword ~datetime.datetime if_unmodified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: filesystem-updated property dict (Etag and last modified). - :rtype: dict[str, str or ~datetime.datetime] - """ - return await self._container_client.set_container_access_policy(signed_identifiers, - public_access=public_access, **kwargs) - - @distributed_trace_async - async def get_file_system_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the specified file system. - The permissions indicate whether file system data may be accessed publicly. - - :keyword lease: - If specified, get_file_system_access_policy only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - """ - access_policy = await self._container_client.get_container_access_policy(**kwargs) - return { - 'public_access': PublicAccess._from_generated(access_policy['public_access']), # pylint: disable=protected-access - 'signed_identifiers': access_policy['signed_identifiers'] - } - - @distributed_trace - def get_paths(self, path=None, # type: Optional[str] - recursive=True, # type: Optional[bool] - max_results=None, # type: Optional[int] - **kwargs): - # type: (...) -> AsyncItemPaged[PathProperties] - """Returns a generator to list the paths(could be files or directories) under the specified file system. - The generator will lazily follow the continuation tokens returned by - the service. - - :param str path: - Filters the results to return only paths under the specified path. - :param int max_results: - An optional value that specifies the maximum - number of items to return per page. If omitted or greater than 5,000, the - response will include up to 5,000 items per page. - :keyword upn: - Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of PathProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.PathProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START get_paths_in_file_system] - :end-before: [END get_paths_in_file_system] - :language: python - :dedent: 12 - :caption: List the blobs in the file system. - """ - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.file_system.list_paths, - path=path, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, recursive, path=path, max_results=max_results, - page_iterator_class=PathPropertiesPaged, **kwargs) - - @distributed_trace_async - async def create_directory(self, directory, # type: Union[DirectoryProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Create directory - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_directory_from_file_system] - :end-before: [END create_directory_from_file_system] - :language: python - :dedent: 12 - :caption: Create directory in the file system. - """ - directory_client = self.get_directory_client(directory) - await directory_client.create_directory(metadata=metadata, **kwargs) - return directory_client - - @distributed_trace_async - async def delete_directory(self, directory, # type: Union[DirectoryProperties, str] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Marks the specified path for deletion. - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START delete_directory_from_file_system] - :end-before: [END delete_directory_from_file_system] - :language: python - :dedent: 12 - :caption: Delete directory in the file system. - """ - directory_client = self.get_directory_client(directory) - await directory_client.delete_directory(**kwargs) - return directory_client - - @distributed_trace_async - async def create_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Create file - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_file_from_file_system] - :end-before: [END create_file_from_file_system] - :language: python - :dedent: 12 - :caption: Create file in the file system. - """ - file_client = self.get_file_client(file) - await file_client.create_file(**kwargs) - return file_client - - @distributed_trace_async - async def delete_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Marks the specified file for deletion. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START delete_file_from_file_system] - :end-before: [END delete_file_from_file_system] - :language: python - :dedent: 12 - :caption: Delete file in the file system. - """ - file_client = self.get_file_client(file) - await file_client.delete_file(**kwargs) - return file_client - - # TODO: Temporarily removing this for GA release. - # @distributed_trace_async - # async def delete_files(self, *files, **kwargs): - # # type: (...) -> AsyncIterator[AsyncHttpResponse] - # """Marks the specified files or empty directories for deletion. - - # The files/empty directories are later deleted during garbage collection. - - # If a delete retention policy is enabled for the service, then this operation soft deletes the - # files/empty directories and retains the files or snapshots for specified number of days. - # After specified number of days, files' data is removed from the service during garbage collection. - # Soft deleted files/empty directories are accessible through :func:`list_deleted_paths()`. - - # :param files: - # The files/empty directories to delete. This can be a single file/empty directory, or multiple values can - # be supplied, where each value is either the name of the file/directory (str) or - # FileProperties/DirectoryProperties. - - # .. note:: - # When the file/dir type is dict, here's a list of keys, value rules. - - # blob name: - # key: 'name', value type: str - # if the file modified or not: - # key: 'if_modified_since', 'if_unmodified_since', value type: datetime - # etag: - # key: 'etag', value type: str - # match the etag or not: - # key: 'match_condition', value type: MatchConditions - # lease: - # key: 'lease_id', value type: Union[str, LeaseClient] - # timeout for subrequest: - # key: 'timeout', value type: int - - # :type files: list[str], list[dict], - # or list[Union[~azure.storage.filedatalake.FileProperties, ~azure.storage.filedatalake.DirectoryProperties] - # :keyword ~datetime.datetime if_modified_since: - # A DateTime value. Azure expects the date value passed in to be UTC. - # If timezone is included, any non-UTC datetimes will be converted to UTC. - # If a date is passed in without timezone info, it is assumed to be UTC. - # Specify this header to perform the operation only - # if the resource has been modified since the specified time. - # :keyword ~datetime.datetime if_unmodified_since: - # A DateTime value. Azure expects the date value passed in to be UTC. - # If timezone is included, any non-UTC datetimes will be converted to UTC. - # If a date is passed in without timezone info, it is assumed to be UTC. - # Specify this header to perform the operation only if - # the resource has not been modified since the specified date/time. - # :keyword bool raise_on_any_failure: - # This is a boolean param which defaults to True. When this is set, an exception - # is raised even if there is a single operation failure. - # :keyword int timeout: - # The timeout parameter is expressed in seconds. - # :return: An iterator of responses, one for each blob in order - # :rtype: AsyncIterator[~azure.core.pipeline.transport.AsyncHttpResponse] - - # .. admonition:: Example: - - # .. literalinclude:: ../samples/datalake_samples_file_system_async.py - # :start-after: [START batch_delete_files_or_empty_directories] - # :end-before: [END batch_delete_files_or_empty_directories] - # :language: python - # :dedent: 4 - # :caption: Deleting multiple files or empty directories. - # """ - # return await self._container_client.delete_blobs(*files, **kwargs) - - @distributed_trace_async - async def _undelete_path(self, deleted_path_name, deletion_id, **kwargs): - # type: (str, str, **Any) -> Union[DataLakeDirectoryClient, DataLakeFileClient] - """Restores soft-deleted path. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2020-06-12'. - - :param str deleted_path_name: - Specifies the name of the deleted container to restore. - :param str deletion_id: - Specifies the version of the deleted container to restore. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.file.datalake.aio.DataLakeDirectoryClient - or azure.storage.file.datalake.aio.DataLakeFileClient - """ - _, url, undelete_source = self._undelete_path_options(deleted_path_name, deletion_id) - - pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - path_client = AzureDataLakeStorageRESTAPI( - url, filesystem=self.file_system_name, path=deleted_path_name, pipeline=pipeline) - try: - is_file = await path_client.path.undelete(undelete_source=undelete_source, cls=is_file_path, **kwargs) - if is_file: - return self.get_file_client(deleted_path_name) - return self.get_directory_client(deleted_path_name) - except HttpResponseError as error: - process_storage_error(error) - - def _get_root_directory_client(self): - # type: () -> DataLakeDirectoryClient - """Get a client to interact with the root directory. - - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient - """ - return self.get_directory_client('/') - - def get_directory_client(self, directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified directory. - - The directory need not already exist. - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START get_directory_client_from_file_system] - :end-before: [END get_directory_client_from_file_system] - :language: python - :dedent: 12 - :caption: Getting the directory client to interact with a specific directory. - """ - try: - directory_name = directory.get('name') - except AttributeError: - directory_name = str(directory) - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeDirectoryClient(self.url, self.file_system_name, directory_name=directory_name, - credential=self._raw_credential, - api_version=self.api_version, - _configuration=self._config, _pipeline=_pipeline, - _hosts=self._hosts, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function, - loop=self._loop - ) - - def get_file_client(self, file_path # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file_path: - The file with which to interact. This can either be the path of the file(from root directory), - or an instance of FileProperties. eg. directory/subdirectory/file - :type file_path: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START get_file_client_from_file_system] - :end-before: [END get_file_client_from_file_system] - :language: python - :dedent: 12 - :caption: Getting the file client to interact with a specific file. - """ - try: - file_path = file_path.get('name') - except AttributeError: - file_path = str(file_path) - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeFileClient( - self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, - api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function, loop=self._loop) - - @distributed_trace - def list_deleted_paths(self, **kwargs): - # type: (Any) -> AsyncItemPaged[DeletedPathProperties] - """Returns a generator to list the deleted (file or directory) paths under the specified file system. - The generator will lazily follow the continuation tokens returned by - the service. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2020-06-12'. - - :keyword str path_prefix: - Filters the results to return only paths under the specified path. - :keyword int results_per_page: - An optional value that specifies the maximum number of items to return per page. - If omitted or greater than 5,000, the response will include up to 5,000 items per page. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of DeletedPathProperties. - :rtype: - ~azure.core.paging.AsyncItemPaged[~azure.storage.filedatalake.DeletedPathProperties] - """ - path_prefix = kwargs.pop('path_prefix', None) - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._datalake_client_for_blob_operation.file_system.list_blob_hierarchy_segment, - showonly=ListBlobsIncludeItem.deleted, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, prefix=path_prefix, page_iterator_class=DeletedPathPropertiesPaged, - results_per_page=results_per_page, **kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_list_paths_helper.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_list_paths_helper.py deleted file mode 100644 index 74ce2d7..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_list_paths_helper.py +++ /dev/null @@ -1,177 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines -from azure.core.exceptions import HttpResponseError -from azure.core.async_paging import AsyncPageIterator - -from .._deserialize import process_storage_error, get_deleted_path_properties_from_generated_code, \ - return_headers_and_deserialized_path_list -from .._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix - -from .._shared.models import DictMixin -from .._shared.response_handlers import return_context_and_deserialized -from .._generated.models import Path -from .._models import PathProperties - - -class DeletedPathPropertiesPaged(AsyncPageIterator): - """An Iterable of deleted path properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A path name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.filedatalake.DeletedPathProperties) - :ivar str container: The container that the paths are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - """ - def __init__( - self, command, - container=None, - prefix=None, - results_per_page=None, - continuation_token=None, - delimiter=None, - location_mode=None): - super(DeletedPathPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.container = container - self.delimiter = delimiter - self.current_page = None - self.location_mode = location_mode - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - prefix=self.prefix, - marker=continuation_token or None, - max_results=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.container = self._response.container_name - self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items - self.current_page = [self._build_item(item) for item in self.current_page] - self.delimiter = self._response.delimiter - - return self._response.next_marker or None, self.current_page - - def _build_item(self, item): - if isinstance(item, BlobItemInternal): - file_props = get_deleted_path_properties_from_generated_code(item) - file_props.file_system = self.container - return file_props - if isinstance(item, GenBlobPrefix): - return DirectoryPrefix( - container=self.container, - prefix=item.name, - results_per_page=self.results_per_page, - location_mode=self.location_mode) - return item - - -class DirectoryPrefix(DictMixin): - """Directory prefix. - - :ivar str name: Name of the deleted directory. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar str file_system: The file system that the deleted paths are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - """ - def __init__(self, **kwargs): - self.name = kwargs.get('prefix') - self.results_per_page = kwargs.get('results_per_page') - self.file_system = kwargs.get('container') - self.delimiter = kwargs.get('delimiter') - self.location_mode = kwargs.get('location_mode') - - -class PathPropertiesPaged(AsyncPageIterator): - """An Iterable of Path properties. - - :ivar str path: Filters the results to return only paths under the specified path. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar list(~azure.storage.filedatalake.PathProperties) current_page: The current page of listed results. - - :param callable command: Function to retrieve the next page of items. - :param str path: Filters the results to return only paths under the specified path. - :param int max_results: The maximum number of psths to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - - def __init__( - self, command, - recursive, - path=None, - max_results=None, - continuation_token=None, - upn=None): - super(PathPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.recursive = recursive - self.results_per_page = max_results - self.path = path - self.upn = upn - self.current_page = None - self.path_list = None - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - self.recursive, - continuation=continuation_token or None, - path=self.path, - max_results=self.results_per_page, - upn=self.upn, - cls=return_headers_and_deserialized_path_list) - except HttpResponseError as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.path_list, self._response = get_next_return - self.current_page = [self._build_item(item) for item in self.path_list] - - return self._response['continuation'] or None, self.current_page - - @staticmethod - def _build_item(item): - if isinstance(item, PathProperties): - return item - if isinstance(item, Path): - path = PathProperties._from_generated(item) # pylint: disable=protected-access - return path - return item diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_models.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_models.py deleted file mode 100644 index e1ba4c1..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_models.py +++ /dev/null @@ -1,41 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines -from azure.multiapi.storagev2.blob.v2020_10_02.aio._models import ContainerPropertiesPaged -from .._models import FileSystemProperties - - -class FileSystemPropertiesPaged(ContainerPropertiesPaged): - """An Iterable of File System properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file system name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.filedatalake.FileSystemProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only file systems whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of file system names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - - def __init__(self, *args, **kwargs): - super(FileSystemPropertiesPaged, self).__init__( - *args, - **kwargs - ) - - @staticmethod - def _build_item(item): - return FileSystemProperties._from_generated(item) # pylint: disable=protected-access diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_path_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_path_client_async.py deleted file mode 100644 index 242aac0..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_path_client_async.py +++ /dev/null @@ -1,732 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -from datetime import datetime -from typing import Any, Dict, Union - -from azure.core.exceptions import AzureError, HttpResponseError -from azure.multiapi.storagev2.blob.v2020_10_02.aio import BlobClient -from .._serialize import get_api_version -from .._shared.base_client_async import AsyncStorageAccountHostsMixin -from .._path_client import PathClient as PathClientBase -from .._models import DirectoryProperties, AccessControlChangeResult, AccessControlChangeFailure, \ - AccessControlChangeCounters, AccessControlChanges -from .._generated.aio import AzureDataLakeStorageRESTAPI -from ._data_lake_lease_async import DataLakeLeaseClient -from .._deserialize import process_storage_error -from .._shared.policies_async import ExponentialRetry - -_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = ( - 'The require_encryption flag is set, but encryption is not supported' - ' for this method.') - - -class PathClient(AsyncStorageAccountHostsMixin, PathClientBase): - def __init__( - self, account_url, # type: str - file_system_name, # type: str - path_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - - super(PathClient, self).__init__(account_url, # pylint: disable=specify-parameter-names-in-call - file_system_name, path_name, - credential=credential, - **kwargs) # type: ignore - - kwargs.pop('_hosts', None) - - self._blob_client = BlobClient(account_url=self._blob_account_url, container_name=file_system_name, - blob_name=path_name, - credential=credential, - _hosts=self._blob_client._hosts, # pylint: disable=protected-access - **kwargs) - - self._client = AzureDataLakeStorageRESTAPI(self.url, file_system=file_system_name, path=path_name, - pipeline=self._pipeline) - self._datalake_client_for_blob_operation = AzureDataLakeStorageRESTAPI(self._blob_client.url, - file_system=file_system_name, - path=path_name, - pipeline=self._pipeline) - api_version = get_api_version(kwargs) - self._client._config.version = api_version # pylint: disable=protected-access - self._datalake_client_for_blob_operation._config.version = api_version # pylint: disable=protected-access - - self._loop = kwargs.get('loop', None) - - async def __aexit__(self, *args): - await self._blob_client.close() - await super(PathClient, self).__aexit__(*args) - - async def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._blob_client.close() - await self.__aexit__() - - async def _create(self, resource_type, content_settings=None, metadata=None, **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create directory or file - - :param resource_type: - Required for Create File and Create Directory. - The value must be "file" or "directory". Possible values include: - 'directory', 'file' - :type resource_type: str - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file/directory as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file/directory has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :type permissions: str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Dict[str, Union[str, datetime]] - """ - options = self._create_path_options( - resource_type, - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return await self._client.path.create(**options) - except HttpResponseError as error: - process_storage_error(error) - - async def _delete(self, **kwargs): - # type: (**Any) -> Dict[Union[datetime, str]] - """ - Marks the specified path for deletion. - - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - options = self._delete_path_options(**kwargs) - try: - return await self._client.path.delete(**options) - except HttpResponseError as error: - process_storage_error(error) - - async def set_access_control(self, owner=None, # type: Optional[str] - group=None, # type: Optional[str] - permissions=None, # type: Optional[str] - acl=None, # type: Optional[str] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Set the owner, group, permissions, or access control list for a path. - - :param owner: - Optional. The owner of the file or directory. - :type owner: str - :param group: - Optional. The owning group of the file or directory. - :type group: str - :param permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - permissions and acl are mutually exclusive. - :type permissions: str - :param acl: - Sets POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - permissions and acl are mutually exclusive. - :type acl: str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword: response dict (Etag and last modified). - """ - options = self._set_access_control_options(owner=owner, group=group, permissions=permissions, acl=acl, **kwargs) - try: - return await self._client.path.set_access_control(**options) - except HttpResponseError as error: - process_storage_error(error) - - async def get_access_control(self, upn=None, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Any] - """ - Get the owner, group, permissions, or access control list for a path. - - :param upn: - Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword: response dict. - """ - options = self._get_access_control_options(upn=upn, **kwargs) - try: - return await self._client.path.get_properties(**options) - except HttpResponseError as error: - process_storage_error(error) - - async def set_access_control_recursive(self, - acl, - **kwargs): - # type: (str, **Any) -> AccessControlChangeResult - """ - Sets the Access Control on a path and sub-paths. - - :param acl: - Sets POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: - Callback where the caller can track progress of the operation - as well as collect paths that failed to change Access Control. - :keyword str continuation_token: - Optional continuation token that can be used to resume previously stopped operation. - :keyword int batch_size: - Optional. If data set size exceeds batch size then operation will be split into multiple - requests so that progress can be tracked. Batch size should be between 1 and 2000. - The default when unspecified is 2000. - :keyword int max_batches: - Optional. Defines maximum number of batches that single change Access Control operation can execute. - If maximum is reached before all sub-paths are processed, - then continuation token can be used to resume operation. - Empty value indicates that maximum number of batches in unbound and operation continues till end. - :keyword bool continue_on_failure: - If set to False, the operation will terminate quickly on encountering user errors (4XX). - If True, the operation will ignore user errors and proceed with the operation on other sub-entities of - the directory. - Continuation token will only be returned when continue_on_failure is True in case of user errors. - If not set the default value is False for this. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: A summary of the recursive operations, including the count of successes and failures, - as well as a continuation token in case the operation was terminated prematurely. - :rtype: :~azure.storage.filedatalake.AccessControlChangeResult` - :raises ~azure.core.exceptions.AzureError: - User can restart the operation using continuation_token field of AzureError if the token is available. - """ - if not acl: - raise ValueError("The Access Control List must be set for this operation") - - progress_hook = kwargs.pop('progress_hook', None) - max_batches = kwargs.pop('max_batches', None) - options = self._set_access_control_recursive_options(mode='set', acl=acl, **kwargs) - return await self._set_access_control_internal(options=options, progress_hook=progress_hook, - max_batches=max_batches) - - async def update_access_control_recursive(self, acl, **kwargs): - # type: (str, **Any) -> AccessControlChangeResult - """ - Modifies the Access Control on a path and sub-paths. - - :param acl: - Modifies POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: - Callback where the caller can track progress of the operation - as well as collect paths that failed to change Access Control. - :keyword str continuation_token: - Optional continuation token that can be used to resume previously stopped operation. - :keyword int batch_size: - Optional. If data set size exceeds batch size then operation will be split into multiple - requests so that progress can be tracked. Batch size should be between 1 and 2000. - The default when unspecified is 2000. - :keyword int max_batches: - Optional. Defines maximum number of batches that single, - change Access Control operation can execute. - If maximum is reached before all sub-paths are processed, - then continuation token can be used to resume operation. - Empty value indicates that maximum number of batches in unbound and operation continues till end. - :keyword bool continue_on_failure: - If set to False, the operation will terminate quickly on encountering user errors (4XX). - If True, the operation will ignore user errors and proceed with the operation on other sub-entities of - the directory. - Continuation token will only be returned when continue_on_failure is True in case of user errors. - If not set the default value is False for this. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: A summary of the recursive operations, including the count of successes and failures, - as well as a continuation token in case the operation was terminated prematurely. - :rtype: :~azure.storage.filedatalake.AccessControlChangeResult` - :raises ~azure.core.exceptions.AzureError: - User can restart the operation using continuation_token field of AzureError if the token is available. - """ - if not acl: - raise ValueError("The Access Control List must be set for this operation") - - progress_hook = kwargs.pop('progress_hook', None) - max_batches = kwargs.pop('max_batches', None) - options = self._set_access_control_recursive_options(mode='modify', acl=acl, **kwargs) - return await self._set_access_control_internal(options=options, progress_hook=progress_hook, - max_batches=max_batches) - - async def remove_access_control_recursive(self, - acl, - **kwargs): - # type: (str, **Any) -> AccessControlChangeResult - """ - Removes the Access Control on a path and sub-paths. - - :param acl: - Removes POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, and a user or - group identifier in the format "[scope:][type]:[id]". - :type acl: str - :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: - Callback where the caller can track progress of the operation - as well as collect paths that failed to change Access Control. - :keyword str continuation_token: - Optional continuation token that can be used to resume previously stopped operation. - :keyword int batch_size: - Optional. If data set size exceeds batch size then operation will be split into multiple - requests so that progress can be tracked. Batch size should be between 1 and 2000. - The default when unspecified is 2000. - :keyword int max_batches: - Optional. Defines maximum number of batches that single change Access Control operation can execute. - If maximum is reached before all sub-paths are processed, - then continuation token can be used to resume operation. - Empty value indicates that maximum number of batches in unbound and operation continues till end. - :keyword bool continue_on_failure: - If set to False, the operation will terminate quickly on encountering user errors (4XX). - If True, the operation will ignore user errors and proceed with the operation on other sub-entities of - the directory. - Continuation token will only be returned when continue_on_failure is True in case of user errors. - If not set the default value is False for this. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: A summary of the recursive operations, including the count of successes and failures, - as well as a continuation token in case the operation was terminated prematurely. - :rtype: :~azure.storage.filedatalake.AccessControlChangeResult` - :raises ~azure.core.exceptions.AzureError: - User can restart the operation using continuation_token field of AzureError if the token is available. - """ - if not acl: - raise ValueError("The Access Control List must be set for this operation") - - progress_hook = kwargs.pop('progress_hook', None) - max_batches = kwargs.pop('max_batches', None) - options = self._set_access_control_recursive_options(mode='remove', acl=acl, **kwargs) - return await self._set_access_control_internal(options=options, progress_hook=progress_hook, - max_batches=max_batches) - - async def _set_access_control_internal(self, options, progress_hook, max_batches=None): - try: - continue_on_failure = options.get('force_flag') - total_directories_successful = 0 - total_files_success = 0 - total_failure_count = 0 - batch_count = 0 - last_continuation_token = None - current_continuation_token = None - continue_operation = True - while continue_operation: - headers, resp = await self._client.path.set_access_control_recursive(**options) - - # make a running tally so that we can report the final results - total_directories_successful += resp.directories_successful - total_files_success += resp.files_successful - total_failure_count += resp.failure_count - batch_count += 1 - current_continuation_token = headers['continuation'] - - if current_continuation_token is not None: - last_continuation_token = current_continuation_token - - if progress_hook is not None: - await progress_hook(AccessControlChanges( - batch_counters=AccessControlChangeCounters( - directories_successful=resp.directories_successful, - files_successful=resp.files_successful, - failure_count=resp.failure_count, - ), - aggregate_counters=AccessControlChangeCounters( - directories_successful=total_directories_successful, - files_successful=total_files_success, - failure_count=total_failure_count, - ), - batch_failures=[AccessControlChangeFailure( - name=failure.name, - is_directory=failure.type == 'DIRECTORY', - error_message=failure.error_message) for failure in resp.failed_entries], - continuation=last_continuation_token)) - - # update the continuation token, if there are more operations that cannot be completed in a single call - max_batches_satisfied = (max_batches is not None and batch_count == max_batches) - continue_operation = bool(current_continuation_token) and not max_batches_satisfied - options['continuation'] = current_continuation_token - - # currently the service stops on any failure, so we should send back the last continuation token - # for the user to retry the failed updates - # otherwise we should just return what the service gave us - return AccessControlChangeResult(counters=AccessControlChangeCounters( - directories_successful=total_directories_successful, - files_successful=total_files_success, - failure_count=total_failure_count), - continuation=last_continuation_token - if total_failure_count > 0 and not continue_on_failure else current_continuation_token) - except HttpResponseError as error: - error.continuation_token = last_continuation_token - process_storage_error(error) - except AzureError as error: - error.continuation_token = last_continuation_token - raise error - - async def _rename_path(self, rename_source, **kwargs): - # type: (str, **Any) -> Dict[str, Any] - """ - Rename directory or file - - :param rename_source: The value must have the following format: "/{filesystem}/{path}". - :type rename_source: str - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword source_lease: A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._rename_path_options( - rename_source, - **kwargs) - try: - return await self._client.path.create(**options) - except HttpResponseError as error: - process_storage_error(error) - - async def _get_path_properties(self, **kwargs): - # type: (**Any) -> Union[FileProperties, DirectoryProperties] - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file or directory. It does not return the content of the directory or file. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: DirectoryProperties or FileProperties - """ - path_properties = await self._blob_client.get_blob_properties(**kwargs) - return path_properties - - async def _exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a path exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return await self._blob_client.exists(**kwargs) - - async def set_metadata(self, metadata, # type: Dict[str, str] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - file system. Each call to this operation replaces all existing metadata - attached to the file system. To remove all metadata from the file system, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the file system as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: file system-updated property dict (Etag and last modified). - """ - return await self._blob_client.set_blob_metadata(metadata=metadata, **kwargs) - - async def set_http_headers(self, content_settings=None, # type: Optional[ContentSettings] - **kwargs): - # type: (...) -> Dict[str, Any] - """Sets system properties on the file or directory. - - If one property is set for the content_settings, all properties will be overriden. - - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set file/directory properties. - :keyword lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: file/directory-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - return await self._blob_client.set_http_headers(content_settings=content_settings, **kwargs) - - async def acquire_lease(self, lease_duration=-1, # type: Optional[int] - lease_id=None, # type: Optional[str] - **kwargs): - # type: (...) -> DataLakeLeaseClient - """ - Requests a new lease. If the file or directory does not have an active lease, - the DataLake service creates a lease on the file/directory and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A DataLakeLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.filedatalake.aio.DataLakeLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/test_file_system_samples.py - :start-after: [START acquire_lease_on_file_system] - :end-before: [END acquire_lease_on_file_system] - :language: python - :dedent: 8 - :caption: Acquiring a lease on the file_system. - """ - lease = DataLakeLeaseClient(self, lease_id=lease_id) # type: ignore - await lease.acquire(lease_duration=lease_duration, **kwargs) - return lease diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_upload_helper.py b/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_upload_helper.py deleted file mode 100644 index 00d5bf1..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2020_10_02/aio/_upload_helper.py +++ /dev/null @@ -1,103 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use -from azure.core.exceptions import HttpResponseError -from .._deserialize import ( - process_storage_error) -from .._shared.response_handlers import return_response_headers -from .._shared.uploads_async import ( - upload_data_chunks, - DataLakeFileChunkUploader, upload_substream_blocks) - - -def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument - return any([ - modified_access_conditions.if_modified_since, - modified_access_conditions.if_unmodified_since, - modified_access_conditions.if_none_match, - modified_access_conditions.if_match - ]) - - -async def upload_datalake_file( # pylint: disable=unused-argument - client=None, - stream=None, - length=None, - overwrite=None, - validate_content=None, - max_concurrency=None, - file_settings=None, - **kwargs): - try: - if length == 0: - return {} - properties = kwargs.pop('properties', None) - umask = kwargs.pop('umask', None) - permissions = kwargs.pop('permissions', None) - path_http_headers = kwargs.pop('path_http_headers', None) - modified_access_conditions = kwargs.pop('modified_access_conditions', None) - chunk_size = kwargs.pop('chunk_size', 100 * 1024 * 1024) - - if not overwrite: - # if customers didn't specify access conditions, they cannot flush data to existing file - if not _any_conditions(modified_access_conditions): - modified_access_conditions.if_none_match = '*' - if properties or umask or permissions: - raise ValueError("metadata, umask and permissions can be set only when overwrite is enabled") - - if overwrite: - response = await client.create( - resource='file', - path_http_headers=path_http_headers, - properties=properties, - modified_access_conditions=modified_access_conditions, - umask=umask, - permissions=permissions, - cls=return_response_headers, - **kwargs) - - # this modified_access_conditions will be applied to flush_data to make sure - # no other flush between create and the current flush - modified_access_conditions.if_match = response['etag'] - modified_access_conditions.if_none_match = None - modified_access_conditions.if_modified_since = None - modified_access_conditions.if_unmodified_since = None - - use_original_upload_path = file_settings.use_byte_buffer or \ - validate_content or chunk_size < file_settings.min_large_chunk_upload_threshold or \ - hasattr(stream, 'seekable') and not stream.seekable() or \ - not hasattr(stream, 'seek') or not hasattr(stream, 'tell') - - if use_original_upload_path: - await upload_data_chunks( - service=client, - uploader_class=DataLakeFileChunkUploader, - total_size=length, - chunk_size=chunk_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - **kwargs) - else: - await upload_substream_blocks( - service=client, - uploader_class=DataLakeFileChunkUploader, - total_size=length, - chunk_size=chunk_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - **kwargs - ) - - return await client.flush_data(position=length, - path_http_headers=path_http_headers, - modified_access_conditions=modified_access_conditions, - close=True, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_10_02/py.typed b/azure/multiapi/storagev2/filedatalake/v2020_10_02/py.typed deleted file mode 100644 index e69de29..0000000 diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/__init__.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/__init__.py deleted file mode 100644 index bb3c23e..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/__init__.py +++ /dev/null @@ -1,107 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ._download import StorageStreamDownloader -from ._data_lake_file_client import DataLakeFileClient -from ._data_lake_directory_client import DataLakeDirectoryClient -from ._file_system_client import FileSystemClient -from ._data_lake_service_client import DataLakeServiceClient -from ._data_lake_lease import DataLakeLeaseClient -from ._models import ( - LocationMode, - ResourceTypes, - FileSystemProperties, - FileSystemPropertiesPaged, - DirectoryProperties, - FileProperties, - PathProperties, - LeaseProperties, - ContentSettings, - AccountSasPermissions, - FileSystemSasPermissions, - DirectorySasPermissions, - FileSasPermissions, - UserDelegationKey, - PublicAccess, - AccessPolicy, - DelimitedTextDialect, - DelimitedJsonDialect, - ArrowDialect, - ArrowType, - QuickQueryDialect, - DataLakeFileQueryError, - AccessControlChangeResult, - AccessControlChangeCounters, - AccessControlChangeFailure, - AccessControlChanges, - AnalyticsLogging, - Metrics, - RetentionPolicy, - StaticWebsite, - CorsRule, - DeletedPathProperties, - CustomerProvidedEncryptionKey -) - -from ._shared_access_signature import generate_account_sas, generate_file_system_sas, generate_directory_sas, \ - generate_file_sas - -from ._shared.policies import ExponentialRetry, LinearRetry -from ._shared.models import StorageErrorCode -from ._version import VERSION - -__version__ = VERSION - -__all__ = [ - 'DataLakeServiceClient', - 'FileSystemClient', - 'DataLakeFileClient', - 'DataLakeDirectoryClient', - 'DataLakeLeaseClient', - 'ExponentialRetry', - 'LinearRetry', - 'LocationMode', - 'PublicAccess', - 'AccessPolicy', - 'ResourceTypes', - 'StorageErrorCode', - 'UserDelegationKey', - 'FileSystemProperties', - 'FileSystemPropertiesPaged', - 'DirectoryProperties', - 'FileProperties', - 'PathProperties', - 'LeaseProperties', - 'ContentSettings', - 'AccessControlChangeResult', - 'AccessControlChangeCounters', - 'AccessControlChangeFailure', - 'AccessControlChanges', - 'AccountSasPermissions', - 'FileSystemSasPermissions', - 'DirectorySasPermissions', - 'FileSasPermissions', - 'generate_account_sas', - 'generate_file_system_sas', - 'generate_directory_sas', - 'generate_file_sas', - 'VERSION', - 'StorageStreamDownloader', - 'DelimitedTextDialect', - 'DelimitedJsonDialect', - 'DataLakeFileQueryError', - 'ArrowDialect', - 'ArrowType', - 'QuickQueryDialect', - 'DataLakeFileQueryError', - 'AnalyticsLogging', - 'Metrics', - 'RetentionPolicy', - 'StaticWebsite', - 'CorsRule', - 'DeletedPathProperties', - 'CustomerProvidedEncryptionKey' -] diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_data_lake_directory_client.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_data_lake_directory_client.py deleted file mode 100644 index 8a85f38..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_data_lake_directory_client.py +++ /dev/null @@ -1,589 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import ( # pylint: disable=unused-import - Any, Dict, Optional, Type, TypeVar, Union, - TYPE_CHECKING) - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore -from azure.core.pipeline import Pipeline -from ._deserialize import deserialize_dir_properties -from ._shared.base_client import TransportWrapper, parse_connection_str -from ._data_lake_file_client import DataLakeFileClient -from ._models import DirectoryProperties, FileProperties -from ._path_client import PathClient - -if TYPE_CHECKING: - from datetime import datetime - -ClassType = TypeVar("ClassType") - - -class DataLakeDirectoryClient(PathClient): - """A client to interact with the DataLake directory, even if the directory may not yet exist. - - For operations relating to a specific subdirectory or file under the directory, a directory client or file client - can be retrieved using the :func:`~get_sub_directory_client` or :func:`~get_file_client` functions. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param directory_name: - The whole path of the directory. eg. {directory under file system}/{directory to interact with} - :type directory_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, and account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is the most recent service version that is - compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_instantiate_client.py - :start-after: [START instantiate_directory_client_from_conn_str] - :end-before: [END instantiate_directory_client_from_conn_str] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. - """ - def __init__( - self, account_url, # type: str - file_system_name, # type: str - directory_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - super(DataLakeDirectoryClient, self).__init__(account_url, file_system_name, path_name=directory_name, - credential=credential, **kwargs) - - @classmethod - def from_connection_string( - cls, # type: Type[ClassType] - conn_str, # type: str - file_system_name, # type: str - directory_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> ClassType - """ - Create DataLakeDirectoryClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param file_system_name: - The name of file system to interact with. - :type file_system_name: str - :param directory_name: - The name of directory to interact with. The directory is under file system. - :type directory_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, and account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :return a DataLakeDirectoryClient - :rtype ~azure.storage.filedatalake.DataLakeDirectoryClient - """ - account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') - return cls( - account_url, file_system_name=file_system_name, directory_name=directory_name, - credential=credential, **kwargs) - - def create_directory(self, metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create a new directory. - - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: response dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory.py - :start-after: [START create_directory] - :end-before: [END create_directory] - :language: python - :dedent: 8 - :caption: Create directory. - """ - return self._create('directory', metadata=metadata, **kwargs) - - def delete_directory(self, **kwargs): - # type: (...) -> None - """ - Marks the specified directory for deletion. - - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory.py - :start-after: [START delete_directory] - :end-before: [END delete_directory] - :language: python - :dedent: 4 - :caption: Delete directory. - """ - return self._delete(recursive=True, **kwargs) - - def get_directory_properties(self, **kwargs): - # type: (**Any) -> DirectoryProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the directory. It does not return the content of the directory. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk: - Decrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - Required if the directory was created with a customer-provided key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: DirectoryProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory.py - :start-after: [START get_directory_properties] - :end-before: [END get_directory_properties] - :language: python - :dedent: 4 - :caption: Getting the properties for a file/directory. - """ - return self._get_path_properties(cls=deserialize_dir_properties, **kwargs) # pylint: disable=protected-access - - def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a directory exists and returns False otherwise. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return self._exists(**kwargs) - - def rename_directory(self, new_name, **kwargs): - # type: (str, **Any) -> DataLakeDirectoryClient - """ - Rename the source directory. - - :param str new_name: - the new directory name the user want to rename to. - The value must have the following format: "{filesystem}/{directory}/{subdirectory}". - :keyword source_lease: - A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory.py - :start-after: [START rename_directory] - :end-before: [END rename_directory] - :language: python - :dedent: 4 - :caption: Rename the source directory. - """ - new_name = new_name.strip('/') - new_file_system = new_name.split('/')[0] - new_path_and_token = new_name[len(new_file_system):].strip('/').split('?') - new_path = new_path_and_token[0] - try: - new_dir_sas = new_path_and_token[1] or self._query_str.strip('?') - except IndexError: - if not self._raw_credential and new_file_system != self.file_system_name: - raise ValueError("please provide the sas token for the new file") - if not self._raw_credential and new_file_system == self.file_system_name: - new_dir_sas = self._query_str.strip('?') - - new_directory_client = DataLakeDirectoryClient( - "{}://{}".format(self.scheme, self.primary_hostname), new_file_system, directory_name=new_path, - credential=self._raw_credential or new_dir_sas, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - new_directory_client._rename_path( # pylint: disable=protected-access - '/{}/{}{}'.format(quote(unquote(self.file_system_name)), - quote(unquote(self.path_name)), - self._query_str), - **kwargs) - return new_directory_client - - def create_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Create a subdirectory and return the subdirectory client to be interacted with. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient for the subdirectory. - """ - subdir = self.get_sub_directory_client(sub_directory) - subdir.create_directory(metadata=metadata, **kwargs) - return subdir - - def delete_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Marks the specified subdirectory for deletion. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient for the subdirectory - """ - subdir = self.get_sub_directory_client(sub_directory) - subdir.delete_directory(**kwargs) - return subdir - - def create_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Create a new file and return the file client to be interacted with. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - """ - file_client = self.get_file_client(file) - file_client.create_file(**kwargs) - return file_client - - def get_file_client(self, file # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. eg. directory/subdirectory/file - :type file: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake.DataLakeFileClient - """ - try: - file_path = file.get('name') - except AttributeError: - file_path = self.path_name + '/' + str(file) - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeFileClient( - self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, - api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_sub_directory_client(self, sub_directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified subdirectory of the current directory. - - The sub subdirectory need not already exist. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient - """ - try: - subdir_path = sub_directory.get('name') - except AttributeError: - subdir_path = self.path_name + '/' + str(sub_directory) - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeDirectoryClient( - self.url, self.file_system_name, directory_name=subdir_path, credential=self._raw_credential, - api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_data_lake_file_client.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_data_lake_file_client.py deleted file mode 100644 index 1aa9137..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_data_lake_file_client.py +++ /dev/null @@ -1,837 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from io import BytesIO -from typing import ( # pylint: disable=unused-import - Any, AnyStr, Dict, IO, Iterable, Optional, Type, TypeVar, Union, - TYPE_CHECKING) - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore - -import six - -from azure.core.exceptions import HttpResponseError -from ._quick_query_helper import DataLakeFileQueryReader -from ._shared.base_client import parse_connection_str -from ._shared.request_handlers import get_length, read_length -from ._shared.response_handlers import return_response_headers -from ._shared.uploads import IterStreamer -from ._upload_helper import upload_datalake_file -from ._download import StorageStreamDownloader -from ._path_client import PathClient -from ._serialize import get_mod_conditions, get_path_http_headers, get_access_conditions, add_metadata_headers, \ - convert_datetime_to_rfc1123, get_cpk_info -from ._deserialize import process_storage_error, deserialize_file_properties -from ._models import FileProperties, DataLakeFileQueryError - -if TYPE_CHECKING: - from datetime import datetime - from ._models import ContentSettings - -ClassType = TypeVar("ClassType") - - -class DataLakeFileClient(PathClient): - """A client to interact with the DataLake file, even if the file may not yet exist. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param file_path: - The whole file path, so that to interact with a specific file. - eg. "{directory}/{subdirectory}/{file}" - :type file_path: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is the most recent service version that is - compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_instantiate_client.py - :start-after: [START instantiate_file_client_from_conn_str] - :end-before: [END instantiate_file_client_from_conn_str] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. - """ - def __init__( - self, account_url, # type: str - file_system_name, # type: str - file_path, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - super(DataLakeFileClient, self).__init__(account_url, file_system_name, path_name=file_path, - credential=credential, **kwargs) - - @classmethod - def from_connection_string( - cls, # type: Type[ClassType] - conn_str, # type: str - file_system_name, # type: str - file_path, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> ClassType - """ - Create DataLakeFileClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param file_system_name: The name of file system to interact with. - :type file_system_name: str - :param directory_name: The name of directory to interact with. The directory is under file system. - :type directory_name: str - :param file_name: The name of file to interact with. The file is under directory. - :type file_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :return a DataLakeFileClient - :rtype ~azure.storage.filedatalake.DataLakeFileClient - """ - account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') - return cls( - account_url, file_system_name=file_system_name, file_path=file_path, - credential=credential, **kwargs) - - def create_file(self, content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create a new file. - - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: response dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START create_file] - :end-before: [END create_file] - :language: python - :dedent: 4 - :caption: Create file. - """ - return self._create('file', content_settings=content_settings, metadata=metadata, **kwargs) - - def delete_file(self, **kwargs): - # type: (...) -> None - """ - Marks the specified file for deletion. - - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START delete_file] - :end-before: [END delete_file] - :language: python - :dedent: 4 - :caption: Delete file. - """ - return self._delete(**kwargs) - - def get_file_properties(self, **kwargs): - # type: (**Any) -> FileProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file. It does not return the content of the file. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk: - Decrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - Required if the file was created with a customer-provided key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: FileProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START get_file_properties] - :end-before: [END get_file_properties] - :language: python - :dedent: 4 - :caption: Getting the properties for a file. - """ - return self._get_path_properties(cls=deserialize_file_properties, **kwargs) # pylint: disable=protected-access - - def set_file_expiry(self, expiry_options, # type: str - expires_on=None, # type: Optional[Union[datetime, int]] - **kwargs): - # type: (...) -> None - """Sets the time a file will expire and be deleted. - - :param str expiry_options: - Required. Indicates mode of the expiry time. - Possible values include: 'NeverExpire', 'RelativeToCreation', 'RelativeToNow', 'Absolute' - :param datetime or int expires_on: - The time to set the file to expiry. - When expiry_options is RelativeTo*, expires_on should be an int in milliseconds. - If the type of expires_on is datetime, it should be in UTC time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - try: - expires_on = convert_datetime_to_rfc1123(expires_on) - except AttributeError: - expires_on = str(expires_on) - self._datalake_client_for_blob_operation.path \ - .set_expiry(expiry_options, expires_on=expires_on, **kwargs) # pylint: disable=protected-access - - def _upload_options( # pylint:disable=too-many-statements - self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - - encoding = kwargs.pop('encoding', 'UTF-8') - if isinstance(data, six.text_type): - data = data.encode(encoding) # type: ignore - if length is None: - length = get_length(data) - if isinstance(data, bytes): - data = data[:length] - - if isinstance(data, bytes): - stream = BytesIO(data) - elif hasattr(data, 'read'): - stream = data - elif hasattr(data, '__iter__'): - stream = IterStreamer(data, encoding=encoding) - else: - raise TypeError("Unsupported data type: {}".format(type(data))) - - validate_content = kwargs.pop('validate_content', False) - content_settings = kwargs.pop('content_settings', None) - metadata = kwargs.pop('metadata', None) - max_concurrency = kwargs.pop('max_concurrency', 1) - - kwargs['properties'] = add_metadata_headers(metadata) - kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None)) - kwargs['modified_access_conditions'] = get_mod_conditions(kwargs) - kwargs['cpk_info'] = get_cpk_info(self.scheme, kwargs) - - if content_settings: - kwargs['path_http_headers'] = get_path_http_headers(content_settings) - - kwargs['stream'] = stream - kwargs['length'] = length - kwargs['validate_content'] = validate_content - kwargs['max_concurrency'] = max_concurrency - kwargs['client'] = self._client.path - kwargs['file_settings'] = self._config - - return kwargs - - def upload_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - overwrite=False, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Any] - """ - Upload data to a file. - - :param data: Content to be uploaded to file - :param int length: Size of the data in bytes. - :param bool overwrite: to overwrite an existing file or not. - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword metadata: - Name-value pairs associated with the blob as metadata. - :paramtype metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease: - Required if the blob has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword int chunk_size: - The maximum chunk size for uploading a file in chunks. - Defaults to 100*1024*1024, or 100MB. - :return: response dict (Etag and last modified). - """ - options = self._upload_options( - data, - length=length, - overwrite=overwrite, - **kwargs) - return upload_datalake_file(**options) - - @staticmethod - def _append_data_options( - data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - offset, # type: int - scheme, # type: str - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Dict[str, Any] - - if isinstance(data, six.text_type): - data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore - if length is None: - length = get_length(data) - if length is None: - length, data = read_length(data) - if isinstance(data, bytes): - data = data[:length] - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - cpk_info = get_cpk_info(scheme, kwargs) - - options = { - 'body': data, - 'position': offset, - 'content_length': length, - 'lease_access_conditions': access_conditions, - 'validate_content': kwargs.pop('validate_content', False), - 'cpk_info': cpk_info, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - def append_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - offset, # type: int - length=None, # type: Optional[int] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """Append data to the file. - - :param data: Content to be appended to file - :param offset: start position of the data to be appended to. - :param length: Size of the data in bytes. - :keyword bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - file. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - :return: dict of the response header - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START append_data] - :end-before: [END append_data] - :language: python - :dedent: 4 - :caption: Append data to the file. - """ - options = self._append_data_options( - data=data, - offset=offset, - scheme=self.scheme, - length=length, - **kwargs) - try: - return self._client.path.append_data(**options) - except HttpResponseError as error: - process_storage_error(error) - - @staticmethod - def _flush_data_options( - offset, # type: int - scheme, # type: str - content_settings=None, # type: Optional[ContentSettings] - retain_uncommitted_data=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> Dict[str, Any] - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_mod_conditions(kwargs) - - path_http_headers = None - if content_settings: - path_http_headers = get_path_http_headers(content_settings) - - cpk_info = get_cpk_info(scheme, kwargs) - - options = { - 'position': offset, - 'content_length': 0, - 'path_http_headers': path_http_headers, - 'retain_uncommitted_data': retain_uncommitted_data, - 'close': kwargs.pop('close', False), - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - def flush_data(self, offset, # type: int - retain_uncommitted_data=False, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ Commit the previous appended data. - - :param offset: offset is equal to the length of the file after commit the - previous appended data. - :param bool retain_uncommitted_data: Valid only for flush operations. If - "true", uncommitted data is retained after the flush operation - completes; otherwise, the uncommitted data is deleted after the flush - operation. The default is false. Data at offsets less than the - specified position are written to the file when flush succeeds, but - this optional parameter allows data after the flush position to be - retained for a future flush operation. - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword bool close: Azure Storage Events allow applications to receive - notifications when files change. When Azure Storage Events are - enabled, a file changed event is raised. This event has a property - indicating whether this is the final change to distinguish the - difference between an intermediate flush to a file stream and the - final close of a file stream. The close query parameter is valid only - when the action is "flush" and change notifications are enabled. If - the value of close is "true" and the flush operation completes - successfully, the service raises a file change notification with a - property indicating that this is the final update (the file stream has - been closed). If "false" a change notification is raised indicating - the file has changed. The default is false. This query parameter is - set to true by the Hadoop ABFS driver to indicate that the file stream - has been closed." - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - :return: response header in dict - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START upload_file_to_file_system] - :end-before: [END upload_file_to_file_system] - :language: python - :dedent: 8 - :caption: Commit the previous appended data. - """ - options = self._flush_data_options( - offset, - self.scheme, - retain_uncommitted_data=retain_uncommitted_data, **kwargs) - try: - return self._client.path.flush_data(**options) - except HttpResponseError as error: - process_storage_error(error) - - def download_file(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader - """Downloads a file to the StorageStreamDownloader. The readall() method must - be used to read all the content, or readinto() must be used to download the file into - a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks. - - :param int offset: - Start of byte range to use for downloading a section of the file. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword lease: - If specified, download only succeeds if the file's lease is active - and matches this ID. Required if the file has an active lease. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk: - Decrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - Required if the file was created with a Customer-Provided Key. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.filedatalake.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START read_file] - :end-before: [END read_file] - :language: python - :dedent: 4 - :caption: Return the downloaded data. - """ - downloader = self._blob_client.download_blob(offset=offset, length=length, **kwargs) - return StorageStreamDownloader(downloader) - - def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a file exists and returns False otherwise. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return self._exists(**kwargs) - - def rename_file(self, new_name, **kwargs): - # type: (str, **Any) -> DataLakeFileClient - """ - Rename the source file. - - :param str new_name: the new file name the user want to rename to. - The value must have the following format: "{filesystem}/{directory}/{subdirectory}/{file}". - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword source_lease: A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: the renamed file client - :rtype: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download.py - :start-after: [START rename_file] - :end-before: [END rename_file] - :language: python - :dedent: 4 - :caption: Rename the source file. - """ - new_name = new_name.strip('/') - new_file_system = new_name.split('/')[0] - new_path_and_token = new_name[len(new_file_system):].strip('/').split('?') - new_path = new_path_and_token[0] - try: - new_file_sas = new_path_and_token[1] or self._query_str.strip('?') - except IndexError: - if not self._raw_credential and new_file_system != self.file_system_name: - raise ValueError("please provide the sas token for the new file") - if not self._raw_credential and new_file_system == self.file_system_name: - new_file_sas = self._query_str.strip('?') - - new_file_client = DataLakeFileClient( - "{}://{}".format(self.scheme, self.primary_hostname), new_file_system, file_path=new_path, - credential=self._raw_credential or new_file_sas, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function - ) - new_file_client._rename_path( # pylint: disable=protected-access - '/{}/{}{}'.format(quote(unquote(self.file_system_name)), - quote(unquote(self.path_name)), - self._query_str), - **kwargs) - return new_file_client - - def query_file(self, query_expression, **kwargs): - # type: (str, **Any) -> DataLakeFileQueryReader - """ - Enables users to select/project on datalake file data by providing simple query expressions. - This operations returns a DataLakeFileQueryReader, users need to use readall() or readinto() to get query data. - - :param str query_expression: - Required. a query statement. - eg. Select * from DataLakeStorage - :keyword Callable[~azure.storage.filedatalake.DataLakeFileQueryError] on_error: - A function to be called on any processing errors returned by the service. - :keyword file_format: - Optional. Defines the serialization of the data currently stored in the file. The default is to - treat the file data as CSV data formatted in the default dialect. This can be overridden with - a custom DelimitedTextDialect, or DelimitedJsonDialect or "ParquetDialect" (passed as a string or enum). - These dialects can be passed through their respective classes, the QuickQueryDialect enum or as a string. - :paramtype file_format: - ~azure.storage.filedatalake.DelimitedTextDialect or ~azure.storage.filedatalake.DelimitedJsonDialect or - ~azure.storage.filedatalake.QuickQueryDialect or str - :keyword output_format: - Optional. Defines the output serialization for the data stream. By default the data will be returned - as it is represented in the file. By providing an output format, - the file data will be reformatted according to that profile. - This value can be a DelimitedTextDialect or a DelimitedJsonDialect or ArrowDialect. - These dialects can be passed through their respective classes, the QuickQueryDialect enum or as a string. - :paramtype output_format: - ~azure.storage.filedatalake.DelimitedTextDialect or ~azure.storage.filedatalake.DelimitedJsonDialect - or list[~azure.storage.filedatalake.ArrowDialect] or ~azure.storage.filedatalake.QuickQueryDialect or str - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk: - Decrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - Required if the file was created with a Customer-Provided Key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A streaming object (DataLakeFileQueryReader) - :rtype: ~azure.storage.filedatalake.DataLakeFileQueryReader - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_query.py - :start-after: [START query] - :end-before: [END query] - :language: python - :dedent: 4 - :caption: select/project on datalake file data by providing simple query expressions. - """ - query_expression = query_expression.replace("from DataLakeStorage", "from BlobStorage") - blob_quick_query_reader = self._blob_client.query_blob(query_expression, - blob_format=kwargs.pop('file_format', None), - error_cls=DataLakeFileQueryError, - **kwargs) - return DataLakeFileQueryReader(blob_quick_query_reader) diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_data_lake_lease.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_data_lake_lease.py deleted file mode 100644 index 86af7a9..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_data_lake_lease.py +++ /dev/null @@ -1,245 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import uuid - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, - TypeVar, TYPE_CHECKING -) -from azure.multiapi.storagev2.blob.v2021_06_08 import BlobLeaseClient - - -if TYPE_CHECKING: - from datetime import datetime - FileSystemClient = TypeVar("FileSystemClient") - DataLakeDirectoryClient = TypeVar("DataLakeDirectoryClient") - DataLakeFileClient = TypeVar("DataLakeFileClient") - - -class DataLakeLeaseClient(object): # pylint: disable=client-accepts-api-version-keyword - """Creates a new DataLakeLeaseClient. - - This client provides lease operations on a FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the file system, directory, or file to lease. - :type client: ~azure.storage.filedatalake.FileSystemClient or - ~azure.storage.filedatalake.DataLakeDirectoryClient or ~azure.storage.filedatalake.DataLakeFileClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - def __init__( - self, client, lease_id=None - ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs - # type: (Union[FileSystemClient, DataLakeDirectoryClient, DataLakeFileClient], Optional[str]) -> None - self.id = lease_id or str(uuid.uuid4()) - self.last_modified = None - self.etag = None - - if hasattr(client, '_blob_client'): - _client = client._blob_client # type: ignore # pylint: disable=protected-access - elif hasattr(client, '_container_client'): - _client = client._container_client # type: ignore # pylint: disable=protected-access - else: - raise TypeError("Lease must use any of FileSystemClient DataLakeDirectoryClient, or DataLakeFileClient.") - - self._blob_lease_client = BlobLeaseClient(_client, lease_id=lease_id) - - def __enter__(self): - return self - - def __exit__(self, *args): - self.release() - - def acquire(self, lease_duration=-1, **kwargs): - # type: (int, Optional[int], **Any) -> None - """Requests a new lease. - - If the file/file system does not have an active lease, the DataLake service creates a - lease on the file/file system and returns a new lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - self._blob_lease_client.acquire(lease_duration=lease_duration, **kwargs) - self._update_lease_client_attributes() - - def renew(self, **kwargs): - # type: (Any) -> None - """Renews the lease. - - The lease can be renewed if the lease ID specified in the - lease client matches that associated with the file system or file. Note that - the lease may be renewed even if it has expired as long as the file system - or file has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - self._blob_lease_client.renew(**kwargs) - self._update_lease_client_attributes() - - def release(self, **kwargs): - # type: (Any) -> None - """Release the lease. - - The lease may be released if the client lease id specified matches - that associated with the file system or file. Releasing the lease allows another client - to immediately acquire the lease for the file system or file as soon as the release is complete. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - self._blob_lease_client.release(**kwargs) - self._update_lease_client_attributes() - - def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """Change the lease ID of an active lease. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - self._blob_lease_client.change(proposed_lease_id=proposed_lease_id, **kwargs) - self._update_lease_client_attributes() - - def break_lease(self, lease_break_period=None, **kwargs): - # type: (Optional[int], Any) -> int - """Break the lease, if the file system or file has an active lease. - - Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. When a lease - is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the file system or file. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :param int lease_break_period: - This is the proposed duration of seconds that the lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the lease. If longer, the time remaining on the lease is used. - A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - self._blob_lease_client.break_lease(lease_break_period=lease_break_period, **kwargs) - - def _update_lease_client_attributes(self): - self.id = self._blob_lease_client.id # type: str - self.last_modified = self._blob_lease_client.last_modified # type: datetime - self.etag = self._blob_lease_client.etag # type: str diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_data_lake_service_client.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_data_lake_service_client.py deleted file mode 100644 index c9fd382..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_data_lake_service_client.py +++ /dev/null @@ -1,570 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import Optional, Dict, Any, TypeVar - -try: - from urllib.parse import urlparse -except ImportError: - from urlparse import urlparse # type: ignore - -from azure.core.paging import ItemPaged -from azure.core.pipeline import Pipeline - -from azure.multiapi.storagev2.blob.v2021_06_08 import BlobServiceClient -from ._shared.base_client import TransportWrapper, StorageAccountHostsMixin, parse_query, parse_connection_str -from ._deserialize import get_datalake_service_properties -from ._file_system_client import FileSystemClient -from ._data_lake_directory_client import DataLakeDirectoryClient -from ._data_lake_file_client import DataLakeFileClient -from ._models import UserDelegationKey, FileSystemPropertiesPaged, LocationMode -from ._serialize import convert_dfs_url_to_blob_url, get_api_version -from ._generated import AzureDataLakeStorageRESTAPI - -ClassType = TypeVar("ClassType") - - -class DataLakeServiceClient(StorageAccountHostsMixin): - """A client to interact with the DataLake Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete file systems within the account. - For operations relating to a specific file system, directory or file, clients for those entities - can also be retrieved using the `get_client` functions. - - :ivar str url: - The full endpoint URL to the datalake service endpoint. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URL to the DataLake storage account. Any other entities included - in the URL path (e.g. file system or file) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is the most recent service version that is - compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. - - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START create_datalake_service_client] - :end-before: [END create_datalake_service_client] - :language: python - :dedent: 8 - :caption: Creating the DataLakeServiceClient from connection string. - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START create_datalake_service_client_oauth] - :end-before: [END create_datalake_service_client_oauth] - :language: python - :dedent: 8 - :caption: Creating the DataLakeServiceClient with Azure Identity credentials. - """ - - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - blob_account_url = convert_dfs_url_to_blob_url(account_url) - self._blob_account_url = blob_account_url - self._blob_service_client = BlobServiceClient(blob_account_url, credential, **kwargs) - self._blob_service_client._hosts[LocationMode.SECONDARY] = "" #pylint: disable=protected-access - - _, sas_token = parse_query(parsed_url.query) - self._query_str, self._raw_credential = self._format_query_string(sas_token, credential) - - super(DataLakeServiceClient, self).__init__(parsed_url, service='dfs', - credential=self._raw_credential, **kwargs) - # ADLS doesn't support secondary endpoint, make sure it's empty - self._hosts[LocationMode.SECONDARY] = "" - - self._client = AzureDataLakeStorageRESTAPI(self.url, base_url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs) #pylint: disable=protected-access - - def __enter__(self): - self._blob_service_client.__enter__() - return self - - def __exit__(self, *args): - self._blob_service_client.close() - - def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._blob_service_client.close() - - def _format_url(self, hostname): - """Format the endpoint URL according to hostname - """ - formated_url = "{}://{}/{}".format(self.scheme, hostname, self._query_str) - return formated_url - - @classmethod - def from_connection_string( - cls, # type: Type[ClassType] - conn_str, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> ClassType - """ - Create DataLakeServiceClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :return a DataLakeServiceClient - :rtype ~azure.storage.filedatalake.DataLakeServiceClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_data_lake_service_client_from_conn_str] - :end-before: [END create_data_lake_service_client_from_conn_str] - :language: python - :dedent: 8 - :caption: Creating the DataLakeServiceClient from a connection string. - """ - account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') - return cls(account_url, credential=credential, **kwargs) - - def get_user_delegation_key(self, key_start_time, # type: datetime - key_expiry_time, # type: datetime - **kwargs # type: Any - ): - # type: (...) -> UserDelegationKey - """ - Obtain a user delegation key for the purpose of signing SAS tokens. - A token credential must be present on the service object for this request to succeed. - - :param ~datetime.datetime key_start_time: - A DateTime value. Indicates when the key becomes valid. - :param ~datetime.datetime key_expiry_time: - A DateTime value. Indicates when the key stops being valid. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The user delegation key. - :rtype: ~azure.storage.filedatalake.UserDelegationKey - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START get_user_delegation_key] - :end-before: [END get_user_delegation_key] - :language: python - :dedent: 8 - :caption: Get user delegation key from datalake service client. - """ - delegation_key = self._blob_service_client.get_user_delegation_key(key_start_time=key_start_time, - key_expiry_time=key_expiry_time, - **kwargs) # pylint: disable=protected-access - return UserDelegationKey._from_generated(delegation_key) # pylint: disable=protected-access - - def list_file_systems(self, name_starts_with=None, # type: Optional[str] - include_metadata=None, # type: Optional[bool] - **kwargs): - # type: (...) -> ItemPaged[FileSystemProperties] - """Returns a generator to list the file systems under the specified account. - - The generator will lazily follow the continuation tokens returned by - the service and stop when all file systems have been returned. - - :param str name_starts_with: - Filters the results to return only file systems whose names - begin with the specified prefix. - :param bool include_metadata: - Specifies that file system metadata be returned in the response. - The default value is `False`. - :keyword int results_per_page: - The maximum number of file system names to retrieve per API - call. If the request does not specify the server will return up to 5,000 items per page. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword bool include_deleted: - Specifies that deleted file systems to be returned in the response. This is for file system restore enabled - account. The default value is `False`. - .. versionadded:: 12.3.0 - :keyword bool include_system: - Flag specifying that system filesystems should be included. - .. versionadded:: 12.6.0 - :returns: An iterable (auto-paging) of FileSystemProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.FileSystemProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START list_file_systems] - :end-before: [END list_file_systems] - :language: python - :dedent: 8 - :caption: Listing the file systems in the datalake service. - """ - item_paged = self._blob_service_client.list_containers(name_starts_with=name_starts_with, - include_metadata=include_metadata, - **kwargs) # pylint: disable=protected-access - item_paged._page_iterator_class = FileSystemPropertiesPaged # pylint: disable=protected-access - return item_paged - - def create_file_system(self, file_system, # type: Union[FileSystemProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[PublicAccess] - **kwargs): - # type: (...) -> FileSystemClient - """Creates a new file system under the specified account. - - If the file system with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created file system. - - :param str file_system: - The name of the file system to create. - :param metadata: - A dict with name-value pairs to associate with the - file system as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - Possible values include: file system, file. - :type public_access: ~azure.storage.filedatalake.PublicAccess - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START create_file_system_from_service_client] - :end-before: [END create_file_system_from_service_client] - :language: python - :dedent: 8 - :caption: Creating a file system in the datalake service. - """ - file_system_client = self.get_file_system_client(file_system) - file_system_client.create_file_system(metadata=metadata, public_access=public_access, **kwargs) - return file_system_client - - def _rename_file_system(self, name, new_name, **kwargs): - # type: (str, str, **Any) -> FileSystemClient - """Renames a filesystem. - - Operation is successful only if the source filesystem exists. - - :param str name: - The name of the filesystem to rename. - :param str new_name: - The new filesystem name the user wants to rename to. - :keyword lease: - Specify this to perform only if the lease ID given - matches the active lease ID of the source filesystem. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - """ - self._blob_service_client._rename_container(name, new_name, **kwargs) # pylint: disable=protected-access - renamed_file_system = self.get_file_system_client(new_name) - return renamed_file_system - - def undelete_file_system(self, name, deleted_version, **kwargs): - # type: (str, str, **Any) -> FileSystemClient - """Restores soft-deleted filesystem. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.3.0 - This operation was introduced in API version '2019-12-12'. - - :param str name: - Specifies the name of the deleted filesystem to restore. - :param str deleted_version: - Specifies the version of the deleted filesystem to restore. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - """ - new_name = kwargs.pop('new_name', None) - file_system = self.get_file_system_client(new_name or name) - self._blob_service_client.undelete_container( - name, deleted_version, new_name=new_name, **kwargs) # pylint: disable=protected-access - return file_system - - def delete_file_system(self, file_system, # type: Union[FileSystemProperties, str] - **kwargs): - # type: (...) -> FileSystemClient - """Marks the specified file system for deletion. - - The file system and any files contained within it are later deleted during garbage collection. - If the file system is not found, a ResourceNotFoundError will be raised. - - :param file_system: - The file system to delete. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :keyword lease: - If specified, delete_file_system only succeeds if the - file system's lease is active and matches this ID. - Required if the file system has an active lease. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START delete_file_system_from_service_client] - :end-before: [END delete_file_system_from_service_client] - :language: python - :dedent: 8 - :caption: Deleting a file system in the datalake service. - """ - file_system_client = self.get_file_system_client(file_system) - file_system_client.delete_file_system(**kwargs) - return file_system_client - - def get_file_system_client(self, file_system # type: Union[FileSystemProperties, str] - ): - # type: (...) -> FileSystemClient - """Get a client to interact with the specified file system. - - The file system need not already exist. - - :param file_system: - The file system. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :returns: A FileSystemClient. - :rtype: ~azure.storage.filedatalake.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_file_system_client_from_service] - :end-before: [END create_file_system_client_from_service] - :language: python - :dedent: 8 - :caption: Getting the file system client to interact with a specific file system. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return FileSystemClient(self.url, file_system_name, credential=self._raw_credential, - api_version=self.api_version, - _configuration=self._config, - _pipeline=_pipeline, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_directory_client(self, file_system, # type: Union[FileSystemProperties, str] - directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified directory. - - The directory need not already exist. - - :param file_system: - The file system that the directory is in. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START get_directory_client_from_service_client] - :end-before: [END get_directory_client_from_service_client] - :language: python - :dedent: 8 - :caption: Getting the directory client to interact with a specific directory. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - try: - directory_name = directory.name - except AttributeError: - directory_name = directory - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeDirectoryClient(self.url, file_system_name, directory_name=directory_name, - credential=self._raw_credential, - api_version=self.api_version, - _configuration=self._config, _pipeline=_pipeline, - _hosts=self._hosts, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function - ) - - def get_file_client(self, file_system, # type: Union[FileSystemProperties, str] - file_path # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file_system: - The file system that the file is in. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :param file_path: - The file with which to interact. This can either be the full path of the file(from the root directory), - or an instance of FileProperties. eg. directory/subdirectory/file - :type file_path: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake.DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service.py - :start-after: [START get_file_client_from_service_client] - :end-before: [END get_file_client_from_service_client] - :language: python - :dedent: 8 - :caption: Getting the file client to interact with a specific file. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - try: - file_path = file_path.name - except AttributeError: - pass - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeFileClient( - self.url, file_system_name, file_path=file_path, credential=self._raw_credential, - api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def set_service_properties(self, **kwargs): - # type: (**Any) -> None - """Sets the properties of a storage account's Datalake service, including - Azure Storage Analytics. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2020-06-12'. - - If an element (e.g. analytics_logging) is left as None, the - existing settings on the service for that functionality are preserved. - - :keyword analytics_logging: - Groups the Azure Analytics Logging settings. - :type analytics_logging: ~azure.storage.filedatalake.AnalyticsLogging - :keyword hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates. - :type hour_metrics: ~azure.storage.filedatalake.Metrics - :keyword minute_metrics: - The minute metrics settings provide request statistics - for each minute. - :type minute_metrics: ~azure.storage.filedatalake.Metrics - :keyword cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list[~azure.storage.filedatalake.CorsRule] - :keyword str target_version: - Indicates the default version to use for requests if an incoming - request's version is not specified. - :keyword delete_retention_policy: - The delete retention policy specifies whether to retain deleted files/directories. - It also specifies the number of days and versions of file/directory to keep. - :type delete_retention_policy: ~azure.storage.filedatalake.RetentionPolicy - :keyword static_website: - Specifies whether the static website feature is enabled, - and if yes, indicates the index document and 404 error document to use. - :type static_website: ~azure.storage.filedatalake.StaticWebsite - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - return self._blob_service_client.set_service_properties(**kwargs) # pylint: disable=protected-access - - def get_service_properties(self, **kwargs): - # type: (**Any) -> Dict[str, Any] - """Gets the properties of a storage account's datalake service, including - Azure Storage Analytics. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2020-06-12'. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An object containing datalake service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - """ - props = self._blob_service_client.get_service_properties(**kwargs) # pylint: disable=protected-access - return get_datalake_service_properties(props) diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_deserialize.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_deserialize.py deleted file mode 100644 index 0a7c688..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_deserialize.py +++ /dev/null @@ -1,216 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import logging -from typing import ( # pylint: disable=unused-import - TYPE_CHECKING -) -from xml.etree.ElementTree import Element - -from azure.core.pipeline.policies import ContentDecodePolicy -from azure.core.exceptions import HttpResponseError, DecodeError, ResourceModifiedError, ClientAuthenticationError, \ - ResourceNotFoundError, ResourceExistsError -from ._models import FileProperties, DirectoryProperties, LeaseProperties, DeletedPathProperties, StaticWebsite, \ - RetentionPolicy, Metrics, AnalyticsLogging, PathProperties # pylint: disable=protected-access -from ._shared.models import StorageErrorCode - -if TYPE_CHECKING: - pass - -_LOGGER = logging.getLogger(__name__) - - -def deserialize_dir_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - dir_properties = DirectoryProperties( - metadata=metadata, - **headers - ) - return dir_properties - - -def deserialize_file_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - file_properties = FileProperties( - metadata=metadata, - **headers - ) - if 'Content-Range' in headers: - if 'x-ms-blob-content-md5' in headers: - file_properties.content_settings.content_md5 = headers['x-ms-blob-content-md5'] - else: - file_properties.content_settings.content_md5 = None - return file_properties - - -def deserialize_path_properties(path_list): - return [PathProperties._from_generated(path) for path in path_list] # pylint: disable=protected-access - - -def return_headers_and_deserialized_path_list(response, deserialized, response_headers): # pylint: disable=unused-argument - return deserialized.paths if deserialized.paths else {}, normalize_headers(response_headers) - - -def get_deleted_path_properties_from_generated_code(generated): - deleted_path = DeletedPathProperties() - deleted_path.name = generated.name - deleted_path.deleted_time = generated.properties.deleted_time - deleted_path.remaining_retention_days = generated.properties.remaining_retention_days - deleted_path.deletion_id = generated.deletion_id - return deleted_path - - -def is_file_path(_, __, headers): - if headers['x-ms-resource-type'] == "file": - return True - return False - - -def get_datalake_service_properties(datalake_properties): - datalake_properties["analytics_logging"] = AnalyticsLogging._from_generated( # pylint: disable=protected-access - datalake_properties["analytics_logging"]) - datalake_properties["hour_metrics"] = Metrics._from_generated(datalake_properties["hour_metrics"]) # pylint: disable=protected-access - datalake_properties["minute_metrics"] = Metrics._from_generated( # pylint: disable=protected-access - datalake_properties["minute_metrics"]) - datalake_properties["delete_retention_policy"] = RetentionPolicy._from_generated( # pylint: disable=protected-access - datalake_properties["delete_retention_policy"]) - datalake_properties["static_website"] = StaticWebsite._from_generated( # pylint: disable=protected-access - datalake_properties["static_website"]) - return datalake_properties - - -def from_blob_properties(blob_properties): - file_props = FileProperties() - file_props.name = blob_properties.name - file_props.etag = blob_properties.etag - file_props.deleted = blob_properties.deleted - file_props.metadata = blob_properties.metadata - file_props.lease = blob_properties.lease - file_props.lease.__class__ = LeaseProperties - file_props.last_modified = blob_properties.last_modified - file_props.creation_time = blob_properties.creation_time - file_props.size = blob_properties.size - file_props.deleted_time = blob_properties.deleted_time - file_props.remaining_retention_days = blob_properties.remaining_retention_days - file_props.content_settings = blob_properties.content_settings - return file_props - - -def normalize_headers(headers): - normalized = {} - for key, value in headers.items(): - if key.startswith('x-ms-'): - key = key[5:] - normalized[key.lower().replace('-', '_')] = value - return normalized - - -def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument - try: - raw_metadata = {k: v for k, v in response.http_response.headers.items() if k.startswith("x-ms-meta-")} - except AttributeError: - raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} - return {k[10:]: v for k, v in raw_metadata.items()} - - -def process_storage_error(storage_error): # pylint:disable=too-many-statements - raise_error = HttpResponseError - serialized = False - if not storage_error.response: - raise storage_error - # If it is one of those three then it has been serialized prior by the generated layer. - if isinstance(storage_error, (ResourceNotFoundError, ClientAuthenticationError, ResourceExistsError)): - serialized = True - error_code = storage_error.response.headers.get('x-ms-error-code') - error_message = storage_error.message - additional_data = {} - error_dict = {} - try: - error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) - # If it is an XML response - if isinstance(error_body, Element): - error_dict = { - child.tag.lower(): child.text - for child in error_body - } - # If it is a JSON response - elif isinstance(error_body, dict): - error_dict = error_body.get('error', {}) - elif not error_code: - _LOGGER.warning( - 'Unexpected return type % from ContentDecodePolicy.deserialize_from_http_generics.', type(error_body)) - error_dict = {'message': str(error_body)} - - # If we extracted from a Json or XML response - if error_dict: - error_code = error_dict.get('code') - error_message = error_dict.get('message') - additional_data = {k: v for k, v in error_dict.items() if k not in {'code', 'message'}} - - except DecodeError: - pass - - try: - # This check would be unnecessary if we have already serialized the error. - if error_code and not serialized: - error_code = StorageErrorCode(error_code) - if error_code in [StorageErrorCode.condition_not_met]: - raise_error = ResourceModifiedError - if error_code in [StorageErrorCode.invalid_authentication_info, - StorageErrorCode.authentication_failed]: - raise_error = ClientAuthenticationError - if error_code in [StorageErrorCode.resource_not_found, - StorageErrorCode.invalid_property_name, - StorageErrorCode.invalid_source_uri, - StorageErrorCode.source_path_not_found, - StorageErrorCode.lease_name_mismatch, - StorageErrorCode.file_system_not_found, - StorageErrorCode.path_not_found, - StorageErrorCode.parent_not_found, - StorageErrorCode.invalid_destination_path, - StorageErrorCode.invalid_rename_source_path, - StorageErrorCode.lease_is_already_broken, - StorageErrorCode.invalid_source_or_destination_resource_type, - StorageErrorCode.rename_destination_parent_path_not_found]: - raise_error = ResourceNotFoundError - if error_code in [StorageErrorCode.account_already_exists, - StorageErrorCode.account_being_created, - StorageErrorCode.resource_already_exists, - StorageErrorCode.resource_type_mismatch, - StorageErrorCode.source_path_is_being_deleted, - StorageErrorCode.path_already_exists, - StorageErrorCode.destination_path_is_being_deleted, - StorageErrorCode.file_system_already_exists, - StorageErrorCode.file_system_being_deleted, - StorageErrorCode.path_conflict]: - raise_error = ResourceExistsError - except ValueError: - # Got an unknown error code - pass - - # Error message should include all the error properties - try: - error_message += "\nErrorCode:{}".format(error_code.value) - except AttributeError: - error_message += "\nErrorCode:{}".format(error_code) - for name, info in additional_data.items(): - error_message += "\n{}:{}".format(name, info) - - # No need to create an instance if it has already been serialized by the generated layer - if serialized: - storage_error.message = error_message - error = storage_error - else: - error = raise_error(message=error_message, response=storage_error.response) - # Ensure these properties are stored in the error instance as well (not just the error message) - error.error_code = error_code - error.additional_info = additional_data - # error.args is what's surfaced on the traceback - show error message in all cases - error.args = (error.message,) - try: - # `from None` prevents us from double printing the exception (suppresses generated layer error context) - exec("raise error from None") # pylint: disable=exec-used # nosec - except SyntaxError: - raise error diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_download.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_download.py deleted file mode 100644 index 61716d3..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_download.py +++ /dev/null @@ -1,59 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import Iterator - -from ._deserialize import from_blob_properties - - -class StorageStreamDownloader(object): - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the file being downloaded. - :ivar ~azure.storage.filedatalake.FileProperties properties: - The properties of the file being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the file. - """ - - def __init__(self, downloader): - self._downloader = downloader - self.name = self._downloader.name - self.properties = from_blob_properties(self._downloader.properties) # pylint: disable=protected-access - self.size = self._downloader.size - - def __len__(self): - return self.size - - def chunks(self): - # type: () -> Iterator[bytes] - """Iterate over chunks in the download stream. - - :rtype: Iterator[bytes] - """ - return self._downloader.chunks() - - def readall(self): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - return self._downloader.readall() - - def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - return self._downloader.readinto(stream) diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_file_system_client.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_file_system_client.py deleted file mode 100644 index c51ab9d..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_file_system_client.py +++ /dev/null @@ -1,1002 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines -import functools -from typing import Any, Dict, Optional, Type, TypeVar, Union, TYPE_CHECKING - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore -import six - -from azure.core.pipeline import Pipeline -from azure.core.exceptions import HttpResponseError -from azure.core.paging import ItemPaged -from azure.multiapi.storagev2.blob.v2021_06_08 import ContainerClient -from ._shared.base_client import TransportWrapper, StorageAccountHostsMixin, parse_query, parse_connection_str -from ._serialize import convert_dfs_url_to_blob_url, get_api_version -from ._list_paths_helper import DeletedPathPropertiesPaged, PathPropertiesPaged -from ._models import LocationMode, FileSystemProperties, PublicAccess, DeletedPathProperties, FileProperties, \ - DirectoryProperties -from ._data_lake_file_client import DataLakeFileClient -from ._data_lake_directory_client import DataLakeDirectoryClient -from ._data_lake_lease import DataLakeLeaseClient -from ._generated import AzureDataLakeStorageRESTAPI -from ._generated.models import ListBlobsIncludeItem -from ._deserialize import process_storage_error, is_file_path - -if TYPE_CHECKING: - from datetime import datetime - - -ClassType = TypeVar("ClassType") - - -class FileSystemClient(StorageAccountHostsMixin): - """A client to interact with a specific file system, even if that file system - may not yet exist. - - For operations relating to a specific directory or file within this file system, a directory client or file client - can be retrieved using the :func:`~get_directory_client` or :func:`~get_file_client` functions. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is the most recent service version that is - compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_file_system_client_from_service] - :end-before: [END create_file_system_client_from_service] - :language: python - :dedent: 8 - :caption: Get a FileSystemClient from an existing DataLakeServiceClient. - """ - def __init__( - self, account_url, # type: str - file_system_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not file_system_name: - raise ValueError("Please specify a file system name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - blob_account_url = convert_dfs_url_to_blob_url(account_url) - # TODO: add self.account_url to base_client and remove _blob_account_url - self._blob_account_url = blob_account_url - - datalake_hosts = kwargs.pop('_hosts', None) - blob_hosts = None - if datalake_hosts: - blob_primary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.PRIMARY]) - blob_hosts = {LocationMode.PRIMARY: blob_primary_account_url, LocationMode.SECONDARY: ""} - self._container_client = ContainerClient(blob_account_url, file_system_name, - credential=credential, _hosts=blob_hosts, **kwargs) - - _, sas_token = parse_query(parsed_url.query) - self.file_system_name = file_system_name - self._query_str, self._raw_credential = self._format_query_string(sas_token, credential) - - super(FileSystemClient, self).__init__(parsed_url, service='dfs', credential=self._raw_credential, - _hosts=datalake_hosts, **kwargs) - # ADLS doesn't support secondary endpoint, make sure it's empty - self._hosts[LocationMode.SECONDARY] = "" - self._client = AzureDataLakeStorageRESTAPI(self.url, base_url=self.url, - file_system=file_system_name, pipeline=self._pipeline) - api_version = get_api_version(kwargs) - self._client._config.version = api_version # pylint: disable=protected-access - self._datalake_client_for_blob_operation = AzureDataLakeStorageRESTAPI(self._container_client.url, - base_url=self._container_client.url, - file_system=file_system_name, - pipeline=self._pipeline) - self._datalake_client_for_blob_operation._config.version = api_version # pylint: disable=protected-access - - def _format_url(self, hostname): - file_system_name = self.file_system_name - if isinstance(file_system_name, six.text_type): - file_system_name = file_system_name.encode('UTF-8') - return "{}://{}/{}{}".format( - self.scheme, - hostname, - quote(file_system_name), - self._query_str) - - def __exit__(self, *args): - self._container_client.close() - super(FileSystemClient, self).__exit__(*args) - - def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._container_client.close() - self.__exit__() - - @classmethod - def from_connection_string( - cls, # type: Type[ClassType] - conn_str, # type: str - file_system_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> ClassType - """ - Create FileSystemClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param file_system_name: The name of file system to interact with. - :type file_system_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account shared access - key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - :return a FileSystemClient - :rtype ~azure.storage.filedatalake.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_file_system_client_from_connection_string] - :end-before: [END create_file_system_client_from_connection_string] - :language: python - :dedent: 8 - :caption: Create FileSystemClient from connection string - """ - account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') - return cls( - account_url, file_system_name=file_system_name, credential=credential, **kwargs) - - def acquire_lease( - self, lease_duration=-1, # type: int - lease_id=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> DataLakeLeaseClient - """ - Requests a new lease. If the file system does not have an active lease, - the DataLake service creates a lease on the file system and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A DataLakeLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.filedatalake.DataLakeLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START acquire_lease_on_file_system] - :end-before: [END acquire_lease_on_file_system] - :language: python - :dedent: 8 - :caption: Acquiring a lease on the file system. - """ - lease = DataLakeLeaseClient(self, lease_id=lease_id) - lease.acquire(lease_duration=lease_duration, **kwargs) - return lease - - def create_file_system(self, metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[PublicAccess] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """Creates a new file system under the specified account. - - If the file system with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created file system. - - :param metadata: - A dict with name-value pairs to associate with the - file system as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - To specify whether data in the file system may be accessed publicly and the level of access. - :type public_access: ~azure.storage.filedatalake.PublicAccess - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A dictionary of response headers. - :rtype: Dict[str, Union[str, datetime]] - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_file_system] - :end-before: [END create_file_system] - :language: python - :dedent: 12 - :caption: Creating a file system in the datalake service. - """ - return self._container_client.create_container(metadata=metadata, - public_access=public_access, - **kwargs) - - def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a file system exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return self._container_client.exists(**kwargs) - - def _rename_file_system(self, new_name, **kwargs): - # type: (str, **Any) -> FileSystemClient - """Renames a filesystem. - - Operation is successful only if the source filesystem exists. - - :param str new_name: - The new filesystem name the user wants to rename to. - :keyword lease: - Specify this to perform only if the lease ID given - matches the active lease ID of the source filesystem. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - """ - self._container_client._rename_container(new_name, **kwargs) # pylint: disable=protected-access - #TODO: self._raw_credential would not work with SAS tokens - renamed_file_system = FileSystemClient( - "{}://{}".format(self.scheme, self.primary_hostname), file_system_name=new_name, - credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, - _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - return renamed_file_system - - def delete_file_system(self, **kwargs): - # type: (Any) -> None - """Marks the specified file system for deletion. - - The file system and any files contained within it are later deleted during garbage collection. - If the file system is not found, a ResourceNotFoundError will be raised. - - :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: - If specified, delete_file_system only succeeds if the - file system's lease is active and matches this ID. - Required if the file system has an active lease. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START delete_file_system] - :end-before: [END delete_file_system] - :language: python - :dedent: 12 - :caption: Deleting a file system in the datalake service. - """ - self._container_client.delete_container(**kwargs) - - def get_file_system_properties(self, **kwargs): - # type: (Any) -> FileSystemProperties - """Returns all user-defined metadata and system properties for the specified - file system. The data returned does not include the file system's list of paths. - - :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: - If specified, get_file_system_properties only succeeds if the - file system's lease is active and matches this ID. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Properties for the specified file system within a file system object. - :rtype: ~azure.storage.filedatalake.FileSystemProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START get_file_system_properties] - :end-before: [END get_file_system_properties] - :language: python - :dedent: 12 - :caption: Getting properties on the file system. - """ - container_properties = self._container_client.get_container_properties(**kwargs) - return FileSystemProperties._convert_from_container_props(container_properties) # pylint: disable=protected-access - - def set_file_system_metadata( # type: ignore - self, metadata, # type: Dict[str, str] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - file system. Each call to this operation replaces all existing metadata - attached to the file system. To remove all metadata from the file system, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the file system as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: filesystem-updated property dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START set_file_system_metadata] - :end-before: [END set_file_system_metadata] - :language: python - :dedent: 12 - :caption: Setting metadata on the file system. - """ - return self._container_client.set_container_metadata(metadata=metadata, **kwargs) - - def set_file_system_access_policy( - self, signed_identifiers, # type: Dict[str, AccessPolicy] - public_access=None, # type: Optional[Union[str, PublicAccess]] - **kwargs - ): # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the permissions for the specified file system or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether files in a file system may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the file system. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict[str, ~azure.storage.filedatalake.AccessPolicy] - :param ~azure.storage.filedatalake.PublicAccess public_access: - To specify whether data in the file system may be accessed publicly and the level of access. - :keyword lease: - Required if the file system has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified date/time. - :keyword ~datetime.datetime if_unmodified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File System-updated property dict (Etag and last modified). - :rtype: dict[str, str or ~datetime.datetime] - """ - return self._container_client.set_container_access_policy(signed_identifiers, - public_access=public_access, **kwargs) - - def get_file_system_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the specified file system. - The permissions indicate whether file system data may be accessed publicly. - - :keyword lease: - If specified, the operation only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - """ - access_policy = self._container_client.get_container_access_policy(**kwargs) - return { - 'public_access': PublicAccess._from_generated(access_policy['public_access']), # pylint: disable=protected-access - 'signed_identifiers': access_policy['signed_identifiers'] - } - - def get_paths(self, path=None, # type: Optional[str] - recursive=True, # type: Optional[bool] - max_results=None, # type: Optional[int] - **kwargs): - # type: (...) -> ItemPaged[PathProperties] - """Returns a generator to list the paths(could be files or directories) under the specified file system. - The generator will lazily follow the continuation tokens returned by - the service. - - :param str path: - Filters the results to return only paths under the specified path. - :param int max_results: An optional value that specifies the maximum - number of items to return per page. If omitted or greater than 5,000, the - response will include up to 5,000 items per page. - :keyword upn: - Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of PathProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.PathProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START get_paths_in_file_system] - :end-before: [END get_paths_in_file_system] - :language: python - :dedent: 8 - :caption: List the paths in the file system. - """ - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.file_system.list_paths, - path=path, - timeout=timeout, - **kwargs) - return ItemPaged( - command, recursive, path=path, max_results=max_results, - page_iterator_class=PathPropertiesPaged, **kwargs) - - def create_directory(self, directory, # type: Union[DirectoryProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Create directory - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_directory_from_file_system] - :end-before: [END create_directory_from_file_system] - :language: python - :dedent: 8 - :caption: Create directory in the file system. - """ - directory_client = self.get_directory_client(directory) - directory_client.create_directory(metadata=metadata, **kwargs) - return directory_client - - def delete_directory(self, directory, # type: Union[DirectoryProperties, str] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Marks the specified path for deletion. - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START delete_directory_from_file_system] - :end-before: [END delete_directory_from_file_system] - :language: python - :dedent: 8 - :caption: Delete directory in the file system. - """ - directory_client = self.get_directory_client(directory) - directory_client.delete_directory(**kwargs) - return directory_client - - def create_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Create file - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START create_file_from_file_system] - :end-before: [END create_file_from_file_system] - :language: python - :dedent: 8 - :caption: Create file in the file system. - """ - file_client = self.get_file_client(file) - file_client.create_file(**kwargs) - return file_client - - def delete_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Marks the specified file for deletion. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START delete_file_from_file_system] - :end-before: [END delete_file_from_file_system] - :language: python - :dedent: 8 - :caption: Delete file in the file system. - """ - file_client = self.get_file_client(file) - file_client.delete_file(**kwargs) - return file_client - - def _undelete_path_options(self, deleted_path_name, deletion_id): - quoted_path = quote(unquote(deleted_path_name.strip('/'))) - - url_and_token = self.url.replace('.dfs.', '.blob.').split('?') - try: - url = url_and_token[0] + '/' + quoted_path + url_and_token[1] - except IndexError: - url = url_and_token[0] + '/' + quoted_path - - undelete_source = quoted_path + '?deletionid={}'.format(deletion_id) if deletion_id else None - - return quoted_path, url, undelete_source - - def _undelete_path(self, deleted_path_name, deletion_id, **kwargs): - # type: (str, str, **Any) -> Union[DataLakeDirectoryClient, DataLakeFileClient] - """Restores soft-deleted path. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2020-06-12'. - - :param str deleted_path_name: - Specifies the path (file or directory) to restore. - :param str deletion_id: - Specifies the version of the deleted path to restore. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.file.datalake.DataLakeDirectoryClient or azure.storage.file.datalake.DataLakeFileClient - """ - _, url, undelete_source = self._undelete_path_options(deleted_path_name, deletion_id) - - pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - path_client = AzureDataLakeStorageRESTAPI( - url, filesystem=self.file_system_name, path=deleted_path_name, pipeline=pipeline) - try: - is_file = path_client.path.undelete(undelete_source=undelete_source, cls=is_file_path, **kwargs) - if is_file: - return self.get_file_client(deleted_path_name) - return self.get_directory_client(deleted_path_name) - except HttpResponseError as error: - process_storage_error(error) - - def _get_root_directory_client(self): - # type: () -> DataLakeDirectoryClient - """Get a client to interact with the root directory. - - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient - """ - return self.get_directory_client('/') - - # TODO: Temporarily removing this for GA release. - # def delete_files(self, *files, **kwargs): - # # type: (...) -> Iterator[HttpResponse] - # """Marks the specified files or empty directories for deletion. - - # The files/empty directories are later deleted during garbage collection. - - # If a delete retention policy is enabled for the service, then this operation soft deletes the - # files/empty directories and retains the files or snapshots for specified number of days. - # After specified number of days, files' data is removed from the service during garbage collection. - # Soft deleted files/empty directories are accessible through :func:`list_deleted_paths()`. - - # :param files: - # The files/empty directories to delete. This can be a single file/empty directory, or multiple values can - # be supplied, where each value is either the name of the file/directory (str) or - # FileProperties/DirectoryProperties. - - # .. note:: - # When the file/dir type is dict, here's a list of keys, value rules. - - # blob name: - # key: 'name', value type: str - # if the file modified or not: - # key: 'if_modified_since', 'if_unmodified_since', value type: datetime - # etag: - # key: 'etag', value type: str - # match the etag or not: - # key: 'match_condition', value type: MatchConditions - # lease: - # key: 'lease_id', value type: Union[str, LeaseClient] - # timeout for subrequest: - # key: 'timeout', value type: int - - # :type files: list[str], list[dict], - # or list[Union[~azure.storage.filedatalake.FileProperties, ~azure.storage.filedatalake.DirectoryProperties] - # :keyword ~datetime.datetime if_modified_since: - # A DateTime value. Azure expects the date value passed in to be UTC. - # If timezone is included, any non-UTC datetimes will be converted to UTC. - # If a date is passed in without timezone info, it is assumed to be UTC. - # Specify this header to perform the operation only - # if the resource has been modified since the specified time. - # :keyword ~datetime.datetime if_unmodified_since: - # A DateTime value. Azure expects the date value passed in to be UTC. - # If timezone is included, any non-UTC datetimes will be converted to UTC. - # If a date is passed in without timezone info, it is assumed to be UTC. - # Specify this header to perform the operation only if - # the resource has not been modified since the specified date/time. - # :keyword bool raise_on_any_failure: - # This is a boolean param which defaults to True. When this is set, an exception - # is raised even if there is a single operation failure. - # :keyword int timeout: - # The timeout parameter is expressed in seconds. - # :return: An iterator of responses, one for each blob in order - # :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse] - - # .. admonition:: Example: - - # .. literalinclude:: ../samples/datalake_samples_file_system_async.py - # :start-after: [START batch_delete_files_or_empty_directories] - # :end-before: [END batch_delete_files_or_empty_directories] - # :language: python - # :dedent: 4 - # :caption: Deleting multiple files or empty directories. - # """ - # return self._container_client.delete_blobs(*files, **kwargs) - - def get_directory_client(self, directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified directory. - - The directory need not already exist. - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START get_directory_client_from_file_system] - :end-before: [END get_directory_client_from_file_system] - :language: python - :dedent: 8 - :caption: Getting the directory client to interact with a specific directory. - """ - try: - directory_name = directory.get('name') - except AttributeError: - directory_name = str(directory) - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeDirectoryClient(self.url, self.file_system_name, directory_name=directory_name, - credential=self._raw_credential, - api_version=self.api_version, - _configuration=self._config, _pipeline=_pipeline, - _hosts=self._hosts, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function - ) - - def get_file_client(self, file_path # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file_path: - The file with which to interact. This can either be the path of the file(from root directory), - or an instance of FileProperties. eg. directory/subdirectory/file - :type file_path: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake.DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system.py - :start-after: [START get_file_client_from_file_system] - :end-before: [END get_file_client_from_file_system] - :language: python - :dedent: 8 - :caption: Getting the file client to interact with a specific file. - """ - try: - file_path = file_path.get('name') - except AttributeError: - file_path = str(file_path) - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeFileClient( - self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, - api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def list_deleted_paths(self, **kwargs): - # type: (Any) -> ItemPaged[DeletedPathProperties] - """Returns a generator to list the deleted (file or directory) paths under the specified file system. - The generator will lazily follow the continuation tokens returned by - the service. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2020-06-12'. - - :keyword str path_prefix: - Filters the results to return only paths under the specified path. - :keyword int results_per_page: - An optional value that specifies the maximum number of items to return per page. - If omitted or greater than 5,000, the response will include up to 5,000 items per page. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of DeletedPathProperties. - :rtype: - ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.DeletedPathProperties] - """ - path_prefix = kwargs.pop('path_prefix', None) - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._datalake_client_for_blob_operation.file_system.list_blob_hierarchy_segment, - showonly=ListBlobsIncludeItem.deleted, - timeout=timeout, - **kwargs) - return ItemPaged( - command, prefix=path_prefix, page_iterator_class=DeletedPathPropertiesPaged, - results_per_page=results_per_page, **kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/__init__.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/__init__.py deleted file mode 100644 index b0b85e1..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._azure_data_lake_storage_restapi import AzureDataLakeStorageRESTAPI -__all__ = ['AzureDataLakeStorageRESTAPI'] - -# `._patch.py` is used for handwritten extensions to the generated code -# Example: https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/customize_code/how-to-patch-sdk-code.md -from ._patch import patch_sdk -patch_sdk() diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/_azure_data_lake_storage_restapi.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/_azure_data_lake_storage_restapi.py deleted file mode 100644 index d452235..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/_azure_data_lake_storage_restapi.py +++ /dev/null @@ -1,105 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from copy import deepcopy -from typing import TYPE_CHECKING - -from msrest import Deserializer, Serializer - -from azure.core import PipelineClient - -from . import models -from ._configuration import AzureDataLakeStorageRESTAPIConfiguration -from .operations import FileSystemOperations, PathOperations, ServiceOperations - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any - - from azure.core.rest import HttpRequest, HttpResponse - -class AzureDataLakeStorageRESTAPI(object): - """Azure Data Lake Storage provides storage for Hadoop and other big data workloads. - - :ivar service: ServiceOperations operations - :vartype service: azure.storage.filedatalake.operations.ServiceOperations - :ivar file_system: FileSystemOperations operations - :vartype file_system: azure.storage.filedatalake.operations.FileSystemOperations - :ivar path: PathOperations operations - :vartype path: azure.storage.filedatalake.operations.PathOperations - :param url: The URL of the service account, container, or blob that is the target of the - desired operation. - :type url: str - :param base_url: Service URL. Default value is "". - :type base_url: str - :keyword resource: The value must be "filesystem" for all filesystem operations. Default value - is "filesystem". Note that overriding this default value may result in unsupported behavior. - :paramtype resource: str - :keyword version: Specifies the version of the operation to use for this request. Default value - is "2021-06-08". Note that overriding this default value may result in unsupported behavior. - :paramtype version: str - """ - - def __init__( - self, - url, # type: str - base_url="", # type: str - **kwargs # type: Any - ): - # type: (...) -> None - self._config = AzureDataLakeStorageRESTAPIConfiguration(url=url, **kwargs) - self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._deserialize = Deserializer(client_models) - self._serialize.client_side_validation = False - self.service = ServiceOperations(self._client, self._config, self._serialize, self._deserialize) - self.file_system = FileSystemOperations(self._client, self._config, self._serialize, self._deserialize) - self.path = PathOperations(self._client, self._config, self._serialize, self._deserialize) - - - def _send_request( - self, - request, # type: HttpRequest - **kwargs # type: Any - ): - # type: (...) -> HttpResponse - """Runs the network request through the client's chained policies. - - >>> from azure.core.rest import HttpRequest - >>> request = HttpRequest("GET", "https://www.example.org/") - - >>> response = client._send_request(request) - - - For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart - - :param request: The network request you want to make. Required. - :type request: ~azure.core.rest.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to False. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.rest.HttpResponse - """ - - request_copy = deepcopy(request) - request_copy.url = self._client.format_url(request_copy.url) - return self._client.send_request(request_copy, **kwargs) - - def close(self): - # type: () -> None - self._client.close() - - def __enter__(self): - # type: () -> AzureDataLakeStorageRESTAPI - self._client.__enter__() - return self - - def __exit__(self, *exc_details): - # type: (Any) -> None - self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/_configuration.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/_configuration.py deleted file mode 100644 index c713e81..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/_configuration.py +++ /dev/null @@ -1,69 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import TYPE_CHECKING - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any - -VERSION = "unknown" - -class AzureDataLakeStorageRESTAPIConfiguration(Configuration): # pylint: disable=too-many-instance-attributes - """Configuration for AzureDataLakeStorageRESTAPI. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, container, or blob that is the target of the - desired operation. - :type url: str - :keyword resource: The value must be "filesystem" for all filesystem operations. Default value - is "filesystem". Note that overriding this default value may result in unsupported behavior. - :paramtype resource: str - :keyword version: Specifies the version of the operation to use for this request. Default value - is "2021-06-08". Note that overriding this default value may result in unsupported behavior. - :paramtype version: str - """ - - def __init__( - self, - url, # type: str - **kwargs # type: Any - ): - # type: (...) -> None - super(AzureDataLakeStorageRESTAPIConfiguration, self).__init__(**kwargs) - resource = kwargs.pop('resource', "filesystem") # type: str - version = kwargs.pop('version', "2021-06-08") # type: str - - if url is None: - raise ValueError("Parameter 'url' must not be None.") - - self.url = url - self.resource = resource - self.version = version - kwargs.setdefault('sdk_moniker', 'azuredatalakestoragerestapi/{}'.format(VERSION)) - self._configure(**kwargs) - - def _configure( - self, - **kwargs # type: Any - ): - # type: (...) -> None - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) - self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/_patch.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/_patch.py deleted file mode 100644 index 74e48ec..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/_patch.py +++ /dev/null @@ -1,31 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# -# Copyright (c) Microsoft Corporation. All rights reserved. -# -# The MIT License (MIT) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the ""Software""), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# -# -------------------------------------------------------------------------- - -# This file is used for handwritten extensions to the generated code. Example: -# https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/customize_code/how-to-patch-sdk-code.md -def patch_sdk(): - pass \ No newline at end of file diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/_vendor.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/_vendor.py deleted file mode 100644 index 138f663..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/_vendor.py +++ /dev/null @@ -1,27 +0,0 @@ -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.core.pipeline.transport import HttpRequest - -def _convert_request(request, files=None): - data = request.content if not files else None - request = HttpRequest(method=request.method, url=request.url, headers=request.headers, data=data) - if files: - request.set_formdata_body(files) - return request - -def _format_url_section(template, **kwargs): - components = template.split("/") - while components: - try: - return template.format(**kwargs) - except KeyError as key: - formatted_components = template.split("/") - components = [ - c for c in formatted_components if "{}".format(key.args[0]) not in c - ] - template = "/".join(components) diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/aio/__init__.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/aio/__init__.py deleted file mode 100644 index b0b85e1..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/aio/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._azure_data_lake_storage_restapi import AzureDataLakeStorageRESTAPI -__all__ = ['AzureDataLakeStorageRESTAPI'] - -# `._patch.py` is used for handwritten extensions to the generated code -# Example: https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/customize_code/how-to-patch-sdk-code.md -from ._patch import patch_sdk -patch_sdk() diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/aio/_azure_data_lake_storage_restapi.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/aio/_azure_data_lake_storage_restapi.py deleted file mode 100644 index 34cb9a7..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/aio/_azure_data_lake_storage_restapi.py +++ /dev/null @@ -1,95 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from copy import deepcopy -from typing import Any, Awaitable - -from msrest import Deserializer, Serializer - -from azure.core import AsyncPipelineClient -from azure.core.rest import AsyncHttpResponse, HttpRequest - -from .. import models -from ._configuration import AzureDataLakeStorageRESTAPIConfiguration -from .operations import FileSystemOperations, PathOperations, ServiceOperations - -class AzureDataLakeStorageRESTAPI: - """Azure Data Lake Storage provides storage for Hadoop and other big data workloads. - - :ivar service: ServiceOperations operations - :vartype service: azure.storage.filedatalake.aio.operations.ServiceOperations - :ivar file_system: FileSystemOperations operations - :vartype file_system: azure.storage.filedatalake.aio.operations.FileSystemOperations - :ivar path: PathOperations operations - :vartype path: azure.storage.filedatalake.aio.operations.PathOperations - :param url: The URL of the service account, container, or blob that is the target of the - desired operation. - :type url: str - :param base_url: Service URL. Default value is "". - :type base_url: str - :keyword resource: The value must be "filesystem" for all filesystem operations. Default value - is "filesystem". Note that overriding this default value may result in unsupported behavior. - :paramtype resource: str - :keyword version: Specifies the version of the operation to use for this request. Default value - is "2021-06-08". Note that overriding this default value may result in unsupported behavior. - :paramtype version: str - """ - - def __init__( - self, - url: str, - base_url: str = "", - **kwargs: Any - ) -> None: - self._config = AzureDataLakeStorageRESTAPIConfiguration(url=url, **kwargs) - self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._deserialize = Deserializer(client_models) - self._serialize.client_side_validation = False - self.service = ServiceOperations(self._client, self._config, self._serialize, self._deserialize) - self.file_system = FileSystemOperations(self._client, self._config, self._serialize, self._deserialize) - self.path = PathOperations(self._client, self._config, self._serialize, self._deserialize) - - - def _send_request( - self, - request: HttpRequest, - **kwargs: Any - ) -> Awaitable[AsyncHttpResponse]: - """Runs the network request through the client's chained policies. - - >>> from azure.core.rest import HttpRequest - >>> request = HttpRequest("GET", "https://www.example.org/") - - >>> response = await client._send_request(request) - - - For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart - - :param request: The network request you want to make. Required. - :type request: ~azure.core.rest.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to False. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.rest.AsyncHttpResponse - """ - - request_copy = deepcopy(request) - request_copy.url = self._client.format_url(request_copy.url) - return self._client.send_request(request_copy, **kwargs) - - async def close(self) -> None: - await self._client.close() - - async def __aenter__(self) -> "AzureDataLakeStorageRESTAPI": - await self._client.__aenter__() - return self - - async def __aexit__(self, *exc_details) -> None: - await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/aio/_configuration.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/aio/_configuration.py deleted file mode 100644 index aa111fe..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/aio/_configuration.py +++ /dev/null @@ -1,63 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -VERSION = "unknown" - -class AzureDataLakeStorageRESTAPIConfiguration(Configuration): # pylint: disable=too-many-instance-attributes - """Configuration for AzureDataLakeStorageRESTAPI. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, container, or blob that is the target of the - desired operation. - :type url: str - :keyword resource: The value must be "filesystem" for all filesystem operations. Default value - is "filesystem". Note that overriding this default value may result in unsupported behavior. - :paramtype resource: str - :keyword version: Specifies the version of the operation to use for this request. Default value - is "2021-06-08". Note that overriding this default value may result in unsupported behavior. - :paramtype version: str - """ - - def __init__( - self, - url: str, - **kwargs: Any - ) -> None: - super(AzureDataLakeStorageRESTAPIConfiguration, self).__init__(**kwargs) - resource = kwargs.pop('resource', "filesystem") # type: str - version = kwargs.pop('version', "2021-06-08") # type: str - - if url is None: - raise ValueError("Parameter 'url' must not be None.") - - self.url = url - self.resource = resource - self.version = version - kwargs.setdefault('sdk_moniker', 'azuredatalakestoragerestapi/{}'.format(VERSION)) - self._configure(**kwargs) - - def _configure( - self, - **kwargs: Any - ) -> None: - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) - self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/aio/_patch.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/aio/_patch.py deleted file mode 100644 index 74e48ec..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/aio/_patch.py +++ /dev/null @@ -1,31 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# -# Copyright (c) Microsoft Corporation. All rights reserved. -# -# The MIT License (MIT) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the ""Software""), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# -# -------------------------------------------------------------------------- - -# This file is used for handwritten extensions to the generated code. Example: -# https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/customize_code/how-to-patch-sdk-code.md -def patch_sdk(): - pass \ No newline at end of file diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/aio/operations/__init__.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/aio/operations/__init__.py deleted file mode 100644 index 0db71e0..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/aio/operations/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._file_system_operations import FileSystemOperations -from ._path_operations import PathOperations - -__all__ = [ - 'ServiceOperations', - 'FileSystemOperations', - 'PathOperations', -] diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/aio/operations/_file_system_operations.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/aio/operations/_file_system_operations.py deleted file mode 100644 index 1f25cef..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/aio/operations/_file_system_operations.py +++ /dev/null @@ -1,593 +0,0 @@ -# pylint: disable=too-many-lines -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, List, Optional, TypeVar, Union - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse -from azure.core.rest import HttpRequest -from azure.core.tracing.decorator_async import distributed_trace_async - -from ... import models as _models -from ..._vendor import _convert_request -from ...operations._file_system_operations import build_create_request, build_delete_request, build_get_properties_request, build_list_blob_hierarchy_segment_request, build_list_paths_request, build_set_properties_request -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class FileSystemOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.storage.filedatalake.aio.AzureDataLakeStorageRESTAPI`'s - :attr:`file_system` attribute. - """ - - models = _models - - def __init__(self, *args, **kwargs) -> None: - args = list(args) - self._client = args.pop(0) if args else kwargs.pop("client") - self._config = args.pop(0) if args else kwargs.pop("config") - self._serialize = args.pop(0) if args else kwargs.pop("serializer") - self._deserialize = args.pop(0) if args else kwargs.pop("deserializer") - - - @distributed_trace_async - async def create( # pylint: disable=inconsistent-return-statements - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - properties: Optional[str] = None, - **kwargs: Any - ) -> None: - """Create FileSystem. - - Create a FileSystem rooted at the specified location. If the FileSystem already exists, the - operation fails. This operation does not support conditional HTTP requests. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. Default value is - None. - :type properties: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - - request = build_create_request( - url=self._config.url, - resource=self._config.resource, - version=self._config.version, - request_id_parameter=request_id_parameter, - timeout=timeout, - properties=properties, - template_url=self.create.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-namespace-enabled']=self._deserialize('str', response.headers.get('x-ms-namespace-enabled')) - - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': "{url}/{filesystem}"} # type: ignore - - - @distributed_trace_async - async def set_properties( # pylint: disable=inconsistent-return-statements - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - properties: Optional[str] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Set FileSystem Properties. - - Set properties for the FileSystem. This operation supports conditional HTTP requests. For - more information, see `Specifying Conditional Headers for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. Default value is - None. - :type properties: str - :param modified_access_conditions: Parameter group. Default value is None. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - - request = build_set_properties_request( - url=self._config.url, - resource=self._config.resource, - version=self._config.version, - request_id_parameter=request_id_parameter, - timeout=timeout, - properties=properties, - if_modified_since=_if_modified_since, - if_unmodified_since=_if_unmodified_since, - template_url=self.set_properties.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': "{url}/{filesystem}"} # type: ignore - - - @distributed_trace_async - async def get_properties( # pylint: disable=inconsistent-return-statements - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - **kwargs: Any - ) -> None: - """Get FileSystem Properties. - - All system and user-defined filesystem properties are specified in the response headers. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - - request = build_get_properties_request( - url=self._config.url, - resource=self._config.resource, - version=self._config.version, - request_id_parameter=request_id_parameter, - timeout=timeout, - template_url=self.get_properties.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-namespace-enabled']=self._deserialize('str', response.headers.get('x-ms-namespace-enabled')) - - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': "{url}/{filesystem}"} # type: ignore - - - @distributed_trace_async - async def delete( # pylint: disable=inconsistent-return-statements - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Delete FileSystem. - - Marks the FileSystem for deletion. When a FileSystem is deleted, a FileSystem with the same - identifier cannot be created for at least 30 seconds. While the filesystem is being deleted, - attempts to create a filesystem with the same identifier will fail with status code 409 - (Conflict), with the service returning additional error information indicating that the - filesystem is being deleted. All other operations, including operations on any files or - directories within the filesystem, will fail with status code 404 (Not Found) while the - filesystem is being deleted. This operation supports conditional HTTP requests. For more - information, see `Specifying Conditional Headers for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param modified_access_conditions: Parameter group. Default value is None. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - - request = build_delete_request( - url=self._config.url, - resource=self._config.resource, - version=self._config.version, - request_id_parameter=request_id_parameter, - timeout=timeout, - if_modified_since=_if_modified_since, - if_unmodified_since=_if_unmodified_since, - template_url=self.delete.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': "{url}/{filesystem}"} # type: ignore - - - @distributed_trace_async - async def list_paths( - self, - recursive: bool, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - continuation: Optional[str] = None, - path: Optional[str] = None, - max_results: Optional[int] = None, - upn: Optional[bool] = None, - **kwargs: Any - ) -> "_models.PathList": - """List Paths. - - List FileSystem paths and their properties. - - :param recursive: Required. - :type recursive: bool - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. Default value is None. - :type continuation: str - :param path: Optional. Filters results to paths within the specified directory. An error - occurs if the directory does not exist. Default value is None. - :type path: str - :param max_results: An optional value that specifies the maximum number of items to return. If - omitted or greater than 5,000, the response will include up to 5,000 items. Default value is - None. - :type max_results: int - :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If - "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response - headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If - "false", the values will be returned as Azure Active Directory Object IDs. The default value is - false. Note that group and application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PathList, or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.PathList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PathList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - - request = build_list_paths_request( - url=self._config.url, - resource=self._config.resource, - version=self._config.version, - recursive=recursive, - request_id_parameter=request_id_parameter, - timeout=timeout, - continuation=continuation, - path=path, - max_results=max_results, - upn=upn, - template_url=self.list_paths.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - - deserialized = self._deserialize('PathList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - - list_paths.metadata = {'url': "{url}/{filesystem}"} # type: ignore - - - @distributed_trace_async - async def list_blob_hierarchy_segment( - self, - prefix: Optional[str] = None, - delimiter: Optional[str] = None, - marker: Optional[str] = None, - max_results: Optional[int] = None, - include: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] = None, - showonly: Optional[str] = "deleted", - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> "_models.ListBlobsHierarchySegmentResponse": - """The List Blobs operation returns a list of the blobs under the specified container. - - :param prefix: Filters results to filesystems within the specified prefix. Default value is - None. - :type prefix: str - :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix - element in the response body that acts as a placeholder for all blobs whose names begin with - the same substring up to the appearance of the delimiter character. The delimiter may be a - single character or a string. Default value is None. - :type delimiter: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. Default value is None. - :type marker: str - :param max_results: An optional value that specifies the maximum number of items to return. If - omitted or greater than 5,000, the response will include up to 5,000 items. Default value is - None. - :type max_results: int - :param include: Include this parameter to specify one or more datasets to include in the - response. Default value is None. - :type include: list[str or ~azure.storage.filedatalake.models.ListBlobsIncludeItem] - :param showonly: Include this parameter to specify one or more datasets to include in the - response. Possible values are "deleted" or None. Default value is "deleted". - :type showonly: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :keyword restype: restype. Default value is "container". Note that overriding this default - value may result in unsupported behavior. - :paramtype restype: str - :keyword comp: comp. Default value is "list". Note that overriding this default value may - result in unsupported behavior. - :paramtype comp: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListBlobsHierarchySegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.ListBlobsHierarchySegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsHierarchySegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - restype = kwargs.pop('restype', "container") # type: str - comp = kwargs.pop('comp', "list") # type: str - - - request = build_list_blob_hierarchy_segment_request( - url=self._config.url, - restype=restype, - comp=comp, - version=self._config.version, - prefix=prefix, - delimiter=delimiter, - marker=marker, - max_results=max_results, - include=include, - showonly=showonly, - timeout=timeout, - request_id_parameter=request_id_parameter, - template_url=self.list_blob_hierarchy_segment.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - - list_blob_hierarchy_segment.metadata = {'url': "{url}/{filesystem}"} # type: ignore - diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/aio/operations/_path_operations.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/aio/operations/_path_operations.py deleted file mode 100644 index 41e832f..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/aio/operations/_path_operations.py +++ /dev/null @@ -1,1750 +0,0 @@ -# pylint: disable=too-many-lines -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse -from azure.core.rest import HttpRequest -from azure.core.tracing.decorator_async import distributed_trace_async - -from ... import models as _models -from ..._vendor import _convert_request -from ...operations._path_operations import build_append_data_request, build_create_request, build_delete_request, build_flush_data_request, build_get_properties_request, build_lease_request, build_read_request, build_set_access_control_recursive_request, build_set_access_control_request, build_set_expiry_request, build_undelete_request, build_update_request -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class PathOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.storage.filedatalake.aio.AzureDataLakeStorageRESTAPI`'s - :attr:`path` attribute. - """ - - models = _models - - def __init__(self, *args, **kwargs) -> None: - args = list(args) - self._client = args.pop(0) if args else kwargs.pop("client") - self._config = args.pop(0) if args else kwargs.pop("config") - self._serialize = args.pop(0) if args else kwargs.pop("serializer") - self._deserialize = args.pop(0) if args else kwargs.pop("deserializer") - - - @distributed_trace_async - async def create( # pylint: disable=inconsistent-return-statements - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - resource: Optional[Union[str, "_models.PathResourceType"]] = None, - continuation: Optional[str] = None, - mode: Optional[Union[str, "_models.PathRenameMode"]] = None, - rename_source: Optional[str] = None, - source_lease_id: Optional[str] = None, - properties: Optional[str] = None, - permissions: Optional[str] = None, - umask: Optional[str] = None, - path_http_headers: Optional["_models.PathHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - **kwargs: Any - ) -> None: - """Create File | Create Directory | Rename File | Rename Directory. - - Create or rename a file or directory. By default, the destination is overwritten and if the - destination already exists and has a lease the lease is broken. This operation supports - conditional HTTP requests. For more information, see `Specifying Conditional Headers for Blob - Service Operations - `_. - To fail if the destination already exists, use a conditional request with If-None-Match: "*". - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param resource: Required only for Create File and Create Directory. The value must be "file" - or "directory". Default value is None. - :type resource: str or ~azure.storage.filedatalake.models.PathResourceType - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. Default value is None. - :type continuation: str - :param mode: Optional. Valid only when namespace is enabled. This parameter determines the - behavior of the rename operation. The value must be "legacy" or "posix", and the default value - will be "posix". Default value is None. - :type mode: str or ~azure.storage.filedatalake.models.PathRenameMode - :param rename_source: An optional file or directory to be renamed. The value must have the - following format: "/{filesystem}/{path}". If "x-ms-properties" is specified, the properties - will overwrite the existing properties; otherwise, the existing properties will be preserved. - This value must be a URL percent-encoded string. Note that the string may only contain ASCII - characters in the ISO-8859-1 character set. Default value is None. - :type rename_source: str - :param source_lease_id: A lease ID for the source path. If specified, the source path must have - an active lease and the lease ID must match. Default value is None. - :type source_lease_id: str - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. Default value is - None. - :type properties: str - :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - Default value is None. - :type permissions: str - :param umask: Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, the umask - restricts the permissions of the file or directory to be created. The resulting permission is - given by p bitwise and not u, where p is the permission and u is the umask. For example, if p - is 0777 and u is 0057, then the resulting permission is 0720. The default permission is 0777 - for a directory and 0666 for a file. The default umask is 0027. The umask must be specified - in 4-digit octal notation (e.g. 0766). Default value is None. - :type umask: str - :param path_http_headers: Parameter group. Default value is None. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. Default value is None. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. Default value is None. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. Default value is None. - :type source_modified_access_conditions: - ~azure.storage.filedatalake.models.SourceModifiedAccessConditions - :param cpk_info: Parameter group. Default value is None. - :type cpk_info: ~azure.storage.filedatalake.models.CpkInfo - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _cache_control = None - _content_encoding = None - _content_language = None - _content_disposition = None - _content_type_parameter = None - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _encryption_key = None - _encryption_key_sha256 = None - encryption_algorithm = None - if path_http_headers is not None: - _cache_control = path_http_headers.cache_control - _content_encoding = path_http_headers.content_encoding - _content_language = path_http_headers.content_language - _content_disposition = path_http_headers.content_disposition - _content_type_parameter = path_http_headers.content_type - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - if source_modified_access_conditions is not None: - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = cpk_info.encryption_algorithm - - request = build_create_request( - url=self._config.url, - version=self._config.version, - request_id_parameter=request_id_parameter, - timeout=timeout, - resource=resource, - continuation=continuation, - mode=mode, - cache_control=_cache_control, - content_encoding=_content_encoding, - content_language=_content_language, - content_disposition=_content_disposition, - content_type_parameter=_content_type_parameter, - rename_source=rename_source, - lease_id=_lease_id, - source_lease_id=source_lease_id, - properties=properties, - permissions=permissions, - umask=umask, - if_match=_if_match, - if_none_match=_if_none_match, - if_modified_since=_if_modified_since, - if_unmodified_since=_if_unmodified_since, - source_if_match=_source_if_match, - source_if_none_match=_source_if_none_match, - source_if_modified_since=_source_if_modified_since, - source_if_unmodified_since=_source_if_unmodified_since, - encryption_key=_encryption_key, - encryption_key_sha256=_encryption_key_sha256, - encryption_algorithm=encryption_algorithm, - template_url=self.create.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': "{url}/{filesystem}/{path}"} # type: ignore - - - @distributed_trace_async - async def update( - self, - action: Union[str, "_models.PathUpdateAction"], - mode: Union[str, "_models.PathSetAccessControlRecursiveMode"], - body: IO, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - max_records: Optional[int] = None, - continuation: Optional[str] = None, - force_flag: Optional[bool] = None, - position: Optional[int] = None, - retain_uncommitted_data: Optional[bool] = None, - close: Optional[bool] = None, - content_length: Optional[int] = None, - properties: Optional[str] = None, - owner: Optional[str] = None, - group: Optional[str] = None, - permissions: Optional[str] = None, - acl: Optional[str] = None, - path_http_headers: Optional["_models.PathHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> Optional["_models.SetAccessControlRecursiveResponse"]: - """Append Data | Flush Data | Set Properties | Set Access Control. - - Uploads data to be appended to a file, flushes (writes) previously uploaded data to a file, - sets properties for a file or directory, or sets access control for a file or directory. Data - can only be appended to a file. Concurrent writes to the same file using multiple clients are - not supported. This operation supports conditional HTTP requests. For more information, see - `Specifying Conditional Headers for Blob Service Operations - `_. - - :param action: The action must be "append" to upload data to be appended to a file, "flush" to - flush previously uploaded data to a file, "setProperties" to set the properties of a file or - directory, "setAccessControl" to set the owner, group, permissions, or access control list for - a file or directory, or "setAccessControlRecursive" to set the access control list for a - directory recursively. Note that Hierarchical Namespace must be enabled for the account in - order to use access control. Also note that the Access Control List (ACL) includes permissions - for the owner, owning group, and others, so the x-ms-permissions and x-ms-acl request headers - are mutually exclusive. - :type action: str or ~azure.storage.filedatalake.models.PathUpdateAction - :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify" - modifies one or more POSIX access control rights that pre-exist on files and directories, - "remove" removes one or more POSIX access control rights that were present earlier on files - and directories. - :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode - :param body: Initial data. - :type body: IO - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param max_records: Optional. Valid for "SetAccessControlRecursive" operation. It specifies the - maximum number of files or directories on which the acl change will be applied. If omitted or - greater than 2,000, the request will process up to 2,000 items. Default value is None. - :type max_records: int - :param continuation: Optional. The number of paths processed with each invocation is limited. - If the number of paths to be processed exceeds this limit, a continuation token is returned in - the response header x-ms-continuation. When a continuation token is returned in the response, - it must be percent-encoded and specified in a subsequent invocation of - setAccessControlRecursive operation. Default value is None. - :type continuation: str - :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false, - the operation will terminate quickly on encountering user errors (4XX). If true, the operation - will ignore user errors and proceed with the operation on other sub-entities of the directory. - Continuation token will only be returned when forceFlag is true in case of user errors. If not - set the default value is false for this. - :type force_flag: bool - :param position: This parameter allows the caller to upload data in parallel and control the - order in which it is appended to the file. It is required when uploading data to be appended - to the file and when flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not immediately flushed, or - written, to the file. To flush, the previously uploaded data must be contiguous, the position - parameter must be specified and equal to the length of the file after all data has been - written, and there must not be a request entity body included with the request. Default value - is None. - :type position: long - :param retain_uncommitted_data: Valid only for flush operations. If "true", uncommitted data - is retained after the flush operation completes; otherwise, the uncommitted data is deleted - after the flush operation. The default is false. Data at offsets less than the specified - position are written to the file when flush succeeds, but this optional parameter allows data - after the flush position to be retained for a future flush operation. - :type retain_uncommitted_data: bool - :param close: Azure Storage Events allow applications to receive notifications when files - change. When Azure Storage Events are enabled, a file changed event is raised. This event has a - property indicating whether this is the final change to distinguish the difference between an - intermediate flush to a file stream and the final close of a file stream. The close query - parameter is valid only when the action is "flush" and change notifications are enabled. If the - value of close is "true" and the flush operation completes successfully, the service raises a - file change notification with a property indicating that this is the final update (the file - stream has been closed). If "false" a change notification is raised indicating the file has - changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to - indicate that the file stream has been closed.". - :type close: bool - :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush - Data". Must be the length of the request content in bytes for "Append Data". Default value is - None. - :type content_length: long - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. Default value is - None. - :type properties: str - :param owner: Optional. The owner of the blob or directory. Default value is None. - :type owner: str - :param group: Optional. The owning group of the blob or directory. Default value is None. - :type group: str - :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - Default value is None. - :type permissions: str - :param acl: Sets POSIX access control rights on files and directories. The value is a - comma-separated list of access control entries. Each access control entry (ACE) consists of a - scope, a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". Default value is None. - :type acl: str - :param path_http_headers: Parameter group. Default value is None. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. Default value is None. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. Default value is None. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SetAccessControlRecursiveResponse, or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SetAccessControlRecursiveResponse"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - content_type = kwargs.pop('content_type', "application/octet-stream") # type: Optional[str] - - _content_md5 = None - _lease_id = None - _cache_control = None - _content_type_parameter = None - _content_disposition = None - _content_encoding = None - _content_language = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if path_http_headers is not None: - _content_md5 = path_http_headers.content_md5 - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if path_http_headers is not None: - _cache_control = path_http_headers.cache_control - _content_type_parameter = path_http_headers.content_type - _content_disposition = path_http_headers.content_disposition - _content_encoding = path_http_headers.content_encoding - _content_language = path_http_headers.content_language - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _content = body - - request = build_update_request( - url=self._config.url, - version=self._config.version, - content_type=content_type, - content=_content, - action=action, - mode=mode, - request_id_parameter=request_id_parameter, - timeout=timeout, - max_records=max_records, - continuation=continuation, - force_flag=force_flag, - position=position, - retain_uncommitted_data=retain_uncommitted_data, - close=close, - content_length=content_length, - content_md5=_content_md5, - lease_id=_lease_id, - cache_control=_cache_control, - content_type_parameter=_content_type_parameter, - content_disposition=_content_disposition, - content_encoding=_content_encoding, - content_language=_content_language, - properties=properties, - owner=owner, - group=group, - permissions=permissions, - acl=acl, - if_match=_if_match, - if_none_match=_if_none_match, - if_modified_since=_if_modified_since, - if_unmodified_since=_if_unmodified_since, - template_url=self.update.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - response_headers = {} - if response.status_code == 200: - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - deserialized = self._deserialize('SetAccessControlRecursiveResponse', pipeline_response) - - if response.status_code == 202: - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - - update.metadata = {'url': "{url}/{filesystem}/{path}"} # type: ignore - - - @distributed_trace_async - async def lease( # pylint: disable=inconsistent-return-statements - self, - x_ms_lease_action: Union[str, "_models.PathLeaseAction"], - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - x_ms_lease_duration: Optional[int] = None, - x_ms_lease_break_period: Optional[int] = None, - proposed_lease_id: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Lease Path. - - Create and manage a lease to restrict write and delete access to the path. This operation - supports conditional HTTP requests. For more information, see `Specifying Conditional Headers - for Blob Service Operations - `_. - - :param x_ms_lease_action: There are five lease actions: "acquire", "break", "change", "renew", - and "release". Use "acquire" and specify the "x-ms-proposed-lease-id" and "x-ms-lease-duration" - to acquire a new lease. Use "break" to break an existing lease. When a lease is broken, the - lease break period is allowed to elapse, during which time no lease operation except break and - release can be performed on the file. When a lease is successfully broken, the response - indicates the interval in seconds until a new lease can be acquired. Use "change" and specify - the current lease ID in "x-ms-lease-id" and the new lease ID in "x-ms-proposed-lease-id" to - change the lease ID of an active lease. Use "renew" and specify the "x-ms-lease-id" to renew an - existing lease. Use "release" and specify the "x-ms-lease-id" to release a lease. - :type x_ms_lease_action: str or ~azure.storage.filedatalake.models.PathLeaseAction - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param x_ms_lease_duration: The lease duration is required to acquire a lease, and specifies - the duration of the lease in seconds. The lease duration must be between 15 and 60 seconds or - -1 for infinite lease. Default value is None. - :type x_ms_lease_duration: int - :param x_ms_lease_break_period: The lease break period duration is optional to break a lease, - and specifies the break period of the lease in seconds. The lease break duration must be - between 0 and 60 seconds. Default value is None. - :type x_ms_lease_break_period: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. Default value is None. - :type proposed_lease_id: str - :param lease_access_conditions: Parameter group. Default value is None. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. Default value is None. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - - request = build_lease_request( - url=self._config.url, - version=self._config.version, - x_ms_lease_action=x_ms_lease_action, - request_id_parameter=request_id_parameter, - timeout=timeout, - x_ms_lease_duration=x_ms_lease_duration, - x_ms_lease_break_period=x_ms_lease_break_period, - lease_id=_lease_id, - proposed_lease_id=proposed_lease_id, - if_match=_if_match, - if_none_match=_if_none_match, - if_modified_since=_if_modified_since, - if_unmodified_since=_if_unmodified_since, - template_url=self.lease.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200, 201, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - - - if response.status_code == 201: - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - - - if response.status_code == 202: - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-lease-time']=self._deserialize('str', response.headers.get('x-ms-lease-time')) - - - if cls: - return cls(pipeline_response, None, response_headers) - - lease.metadata = {'url': "{url}/{filesystem}/{path}"} # type: ignore - - - @distributed_trace_async - async def read( - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - range: Optional[str] = None, - x_ms_range_get_content_md5: Optional[bool] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - **kwargs: Any - ) -> IO: - """Read File. - - Read the contents of a file. For read operations, range requests are supported. This operation - supports conditional HTTP requests. For more information, see `Specifying Conditional Headers - for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param range: The HTTP Range request header specifies one or more byte ranges of the resource - to be retrieved. Default value is None. - :type range: str - :param x_ms_range_get_content_md5: Optional. When this header is set to "true" and specified - together with the Range header, the service returns the MD5 hash for the range, as long as the - range is less than or equal to 4MB in size. If this header is specified without the Range - header, the service returns status code 400 (Bad Request). If this header is set to true when - the range exceeds 4 MB in size, the service returns status code 400 (Bad Request). Default - value is None. - :type x_ms_range_get_content_md5: bool - :param lease_access_conditions: Parameter group. Default value is None. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. Default value is None. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param cpk_info: Parameter group. Default value is None. - :type cpk_info: ~azure.storage.filedatalake.models.CpkInfo - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - _encryption_key = None - _encryption_key_sha256 = None - encryption_algorithm = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = cpk_info.encryption_algorithm - - request = build_read_request( - url=self._config.url, - version=self._config.version, - request_id_parameter=request_id_parameter, - timeout=timeout, - range=range, - lease_id=_lease_id, - x_ms_range_get_content_md5=x_ms_range_get_content_md5, - if_match=_if_match, - if_none_match=_if_none_match, - if_modified_since=_if_modified_since, - if_unmodified_since=_if_unmodified_since, - encryption_key=_encryption_key, - encryption_key_sha256=_encryption_key_sha256, - encryption_algorithm=encryption_algorithm, - template_url=self.read.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=True, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['x-ms-content-md5']=self._deserialize('str', response.headers.get('x-ms-content-md5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - - read.metadata = {'url': "{url}/{filesystem}/{path}"} # type: ignore - - - @distributed_trace_async - async def get_properties( # pylint: disable=inconsistent-return-statements - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - action: Optional[Union[str, "_models.PathGetPropertiesAction"]] = None, - upn: Optional[bool] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Get Properties | Get Status | Get Access Control List. - - Get Properties returns all system and user defined properties for a path. Get Status returns - all system defined properties for a path. Get Access Control List returns the access control - list for a path. This operation supports conditional HTTP requests. For more information, see - `Specifying Conditional Headers for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param action: Optional. If the value is "getStatus" only the system defined properties for the - path are returned. If the value is "getAccessControl" the access control list is returned in - the response headers (Hierarchical Namespace must be enabled for the account), otherwise the - properties are returned. Default value is None. - :type action: str or ~azure.storage.filedatalake.models.PathGetPropertiesAction - :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If - "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response - headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If - "false", the values will be returned as Azure Active Directory Object IDs. The default value is - false. Note that group and application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :param lease_access_conditions: Parameter group. Default value is None. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. Default value is None. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - - request = build_get_properties_request( - url=self._config.url, - version=self._config.version, - request_id_parameter=request_id_parameter, - timeout=timeout, - action=action, - upn=upn, - lease_id=_lease_id, - if_match=_if_match, - if_none_match=_if_none_match, - if_modified_since=_if_modified_since, - if_unmodified_since=_if_unmodified_since, - template_url=self.get_properties.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) - response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) - response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) - response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': "{url}/{filesystem}/{path}"} # type: ignore - - - @distributed_trace_async - async def delete( # pylint: disable=inconsistent-return-statements - self, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - recursive: Optional[bool] = None, - continuation: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Delete File | Delete Directory. - - Delete the file or directory. This operation supports conditional HTTP requests. For more - information, see `Specifying Conditional Headers for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param recursive: Required. Default value is None. - :type recursive: bool - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. Default value is None. - :type continuation: str - :param lease_access_conditions: Parameter group. Default value is None. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. Default value is None. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - - request = build_delete_request( - url=self._config.url, - version=self._config.version, - request_id_parameter=request_id_parameter, - timeout=timeout, - recursive=recursive, - continuation=continuation, - lease_id=_lease_id, - if_match=_if_match, - if_none_match=_if_none_match, - if_modified_since=_if_modified_since, - if_unmodified_since=_if_unmodified_since, - template_url=self.delete.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['x-ms-deletion-id']=self._deserialize('str', response.headers.get('x-ms-deletion-id')) - - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': "{url}/{filesystem}/{path}"} # type: ignore - - - @distributed_trace_async - async def set_access_control( # pylint: disable=inconsistent-return-statements - self, - timeout: Optional[int] = None, - owner: Optional[str] = None, - group: Optional[str] = None, - permissions: Optional[str] = None, - acl: Optional[str] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Set the owner, group, permissions, or access control list for a path. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. Default value is None. - :type owner: str - :param group: Optional. The owning group of the blob or directory. Default value is None. - :type group: str - :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - Default value is None. - :type permissions: str - :param acl: Sets POSIX access control rights on files and directories. The value is a - comma-separated list of access control entries. Each access control entry (ACE) consists of a - scope, a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". Default value is None. - :type acl: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. Default value is None. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. Default value is None. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword action: action. Default value is "setAccessControl". Note that overriding this default - value may result in unsupported behavior. - :paramtype action: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - action = kwargs.pop('action', "setAccessControl") # type: str - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - - request = build_set_access_control_request( - url=self._config.url, - action=action, - version=self._config.version, - timeout=timeout, - lease_id=_lease_id, - owner=owner, - group=group, - permissions=permissions, - acl=acl, - if_match=_if_match, - if_none_match=_if_none_match, - if_modified_since=_if_modified_since, - if_unmodified_since=_if_unmodified_since, - request_id_parameter=request_id_parameter, - template_url=self.set_access_control.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_control.metadata = {'url': "{url}/{filesystem}/{path}"} # type: ignore - - - @distributed_trace_async - async def set_access_control_recursive( - self, - mode: Union[str, "_models.PathSetAccessControlRecursiveMode"], - timeout: Optional[int] = None, - continuation: Optional[str] = None, - force_flag: Optional[bool] = None, - max_records: Optional[int] = None, - acl: Optional[str] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> "_models.SetAccessControlRecursiveResponse": - """Set the access control list for a path and sub-paths. - - :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify" - modifies one or more POSIX access control rights that pre-exist on files and directories, - "remove" removes one or more POSIX access control rights that were present earlier on files - and directories. - :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. Default value is None. - :type continuation: str - :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false, - the operation will terminate quickly on encountering user errors (4XX). If true, the operation - will ignore user errors and proceed with the operation on other sub-entities of the directory. - Continuation token will only be returned when forceFlag is true in case of user errors. If not - set the default value is false for this. - :type force_flag: bool - :param max_records: Optional. It specifies the maximum number of files or directories on which - the acl change will be applied. If omitted or greater than 2,000, the request will process up - to 2,000 items. Default value is None. - :type max_records: int - :param acl: Sets POSIX access control rights on files and directories. The value is a - comma-separated list of access control entries. Each access control entry (ACE) consists of a - scope, a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". Default value is None. - :type acl: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :keyword action: action. Default value is "setAccessControlRecursive". Note that overriding - this default value may result in unsupported behavior. - :paramtype action: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SetAccessControlRecursiveResponse, or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.SetAccessControlRecursiveResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - action = kwargs.pop('action', "setAccessControlRecursive") # type: str - - - request = build_set_access_control_recursive_request( - url=self._config.url, - action=action, - version=self._config.version, - mode=mode, - timeout=timeout, - continuation=continuation, - force_flag=force_flag, - max_records=max_records, - acl=acl, - request_id_parameter=request_id_parameter, - template_url=self.set_access_control_recursive.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - deserialized = self._deserialize('SetAccessControlRecursiveResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - - set_access_control_recursive.metadata = {'url': "{url}/{filesystem}/{path}"} # type: ignore - - - @distributed_trace_async - async def flush_data( # pylint: disable=inconsistent-return-statements - self, - timeout: Optional[int] = None, - position: Optional[int] = None, - retain_uncommitted_data: Optional[bool] = None, - close: Optional[bool] = None, - content_length: Optional[int] = None, - request_id_parameter: Optional[str] = None, - path_http_headers: Optional["_models.PathHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - **kwargs: Any - ) -> None: - """Set the owner, group, permissions, or access control list for a path. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param position: This parameter allows the caller to upload data in parallel and control the - order in which it is appended to the file. It is required when uploading data to be appended - to the file and when flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not immediately flushed, or - written, to the file. To flush, the previously uploaded data must be contiguous, the position - parameter must be specified and equal to the length of the file after all data has been - written, and there must not be a request entity body included with the request. Default value - is None. - :type position: long - :param retain_uncommitted_data: Valid only for flush operations. If "true", uncommitted data - is retained after the flush operation completes; otherwise, the uncommitted data is deleted - after the flush operation. The default is false. Data at offsets less than the specified - position are written to the file when flush succeeds, but this optional parameter allows data - after the flush position to be retained for a future flush operation. - :type retain_uncommitted_data: bool - :param close: Azure Storage Events allow applications to receive notifications when files - change. When Azure Storage Events are enabled, a file changed event is raised. This event has a - property indicating whether this is the final change to distinguish the difference between an - intermediate flush to a file stream and the final close of a file stream. The close query - parameter is valid only when the action is "flush" and change notifications are enabled. If the - value of close is "true" and the flush operation completes successfully, the service raises a - file change notification with a property indicating that this is the final update (the file - stream has been closed). If "false" a change notification is raised indicating the file has - changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to - indicate that the file stream has been closed.". - :type close: bool - :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush - Data". Must be the length of the request content in bytes for "Append Data". Default value is - None. - :type content_length: long - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param path_http_headers: Parameter group. Default value is None. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. Default value is None. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. Default value is None. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param cpk_info: Parameter group. Default value is None. - :type cpk_info: ~azure.storage.filedatalake.models.CpkInfo - :keyword action: action. Default value is "flush". Note that overriding this default value may - result in unsupported behavior. - :paramtype action: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - action = kwargs.pop('action', "flush") # type: str - - _content_md5 = None - _lease_id = None - _cache_control = None - _content_type_parameter = None - _content_disposition = None - _content_encoding = None - _content_language = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - _encryption_key = None - _encryption_key_sha256 = None - encryption_algorithm = None - if path_http_headers is not None: - _content_md5 = path_http_headers.content_md5 - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if path_http_headers is not None: - _cache_control = path_http_headers.cache_control - _content_type_parameter = path_http_headers.content_type - _content_disposition = path_http_headers.content_disposition - _content_encoding = path_http_headers.content_encoding - _content_language = path_http_headers.content_language - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = cpk_info.encryption_algorithm - - request = build_flush_data_request( - url=self._config.url, - action=action, - version=self._config.version, - timeout=timeout, - position=position, - retain_uncommitted_data=retain_uncommitted_data, - close=close, - content_length=content_length, - content_md5=_content_md5, - lease_id=_lease_id, - cache_control=_cache_control, - content_type_parameter=_content_type_parameter, - content_disposition=_content_disposition, - content_encoding=_content_encoding, - content_language=_content_language, - if_match=_if_match, - if_none_match=_if_none_match, - if_modified_since=_if_modified_since, - if_unmodified_since=_if_unmodified_since, - request_id_parameter=request_id_parameter, - encryption_key=_encryption_key, - encryption_key_sha256=_encryption_key_sha256, - encryption_algorithm=encryption_algorithm, - template_url=self.flush_data.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - - - if cls: - return cls(pipeline_response, None, response_headers) - - flush_data.metadata = {'url': "{url}/{filesystem}/{path}"} # type: ignore - - - @distributed_trace_async - async def append_data( # pylint: disable=inconsistent-return-statements - self, - body: IO, - position: Optional[int] = None, - timeout: Optional[int] = None, - content_length: Optional[int] = None, - transactional_content_crc64: Optional[bytearray] = None, - request_id_parameter: Optional[str] = None, - path_http_headers: Optional["_models.PathHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - cpk_info: Optional["_models.CpkInfo"] = None, - **kwargs: Any - ) -> None: - """Append data to the file. - - :param body: Initial data. - :type body: IO - :param position: This parameter allows the caller to upload data in parallel and control the - order in which it is appended to the file. It is required when uploading data to be appended - to the file and when flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not immediately flushed, or - written, to the file. To flush, the previously uploaded data must be contiguous, the position - parameter must be specified and equal to the length of the file after all data has been - written, and there must not be a request entity body included with the request. Default value - is None. - :type position: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush - Data". Must be the length of the request content in bytes for "Append Data". Default value is - None. - :type content_length: long - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. Default value is None. - :type transactional_content_crc64: bytearray - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param path_http_headers: Parameter group. Default value is None. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. Default value is None. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param cpk_info: Parameter group. Default value is None. - :type cpk_info: ~azure.storage.filedatalake.models.CpkInfo - :keyword action: action. Default value is "append". Note that overriding this default value may - result in unsupported behavior. - :paramtype action: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - action = kwargs.pop('action', "append") # type: str - content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] - - _transactional_content_hash = None - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - encryption_algorithm = None - if path_http_headers is not None: - _transactional_content_hash = path_http_headers.transactional_content_hash - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = cpk_info.encryption_algorithm - _content = body - - request = build_append_data_request( - url=self._config.url, - action=action, - version=self._config.version, - content_type=content_type, - content=_content, - position=position, - timeout=timeout, - content_length=content_length, - transactional_content_hash=_transactional_content_hash, - transactional_content_crc64=transactional_content_crc64, - lease_id=_lease_id, - request_id_parameter=request_id_parameter, - encryption_key=_encryption_key, - encryption_key_sha256=_encryption_key_sha256, - encryption_algorithm=encryption_algorithm, - template_url=self.append_data.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - - - if cls: - return cls(pipeline_response, None, response_headers) - - append_data.metadata = {'url': "{url}/{filesystem}/{path}"} # type: ignore - - - @distributed_trace_async - async def set_expiry( # pylint: disable=inconsistent-return-statements - self, - expiry_options: Union[str, "_models.PathExpiryOptions"], - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - expires_on: Optional[str] = None, - **kwargs: Any - ) -> None: - """Sets the time a blob will expire and be deleted. - - :param expiry_options: Required. Indicates mode of the expiry time. - :type expiry_options: str or ~azure.storage.filedatalake.models.PathExpiryOptions - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param expires_on: The time to set the blob to expiry. Default value is None. - :type expires_on: str - :keyword comp: comp. Default value is "expiry". Note that overriding this default value may - result in unsupported behavior. - :paramtype comp: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - comp = kwargs.pop('comp', "expiry") # type: str - - - request = build_set_expiry_request( - url=self._config.url, - comp=comp, - version=self._config.version, - expiry_options=expiry_options, - timeout=timeout, - request_id_parameter=request_id_parameter, - expires_on=expires_on, - template_url=self.set_expiry.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - - if cls: - return cls(pipeline_response, None, response_headers) - - set_expiry.metadata = {'url': "{url}/{filesystem}/{path}"} # type: ignore - - - @distributed_trace_async - async def undelete( # pylint: disable=inconsistent-return-statements - self, - timeout: Optional[int] = None, - undelete_source: Optional[str] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> None: - """Undelete a path that was previously soft deleted. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param undelete_source: Only for hierarchical namespace enabled accounts. Optional. The path of - the soft deleted blob to undelete. Default value is None. - :type undelete_source: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :keyword comp: comp. Default value is "undelete". Note that overriding this default value may - result in unsupported behavior. - :paramtype comp: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - comp = kwargs.pop('comp', "undelete") # type: str - - - request = build_undelete_request( - url=self._config.url, - comp=comp, - version=self._config.version, - timeout=timeout, - undelete_source=undelete_source, - request_id_parameter=request_id_parameter, - template_url=self.undelete.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - - if cls: - return cls(pipeline_response, None, response_headers) - - undelete.metadata = {'url': "{url}/{filesystem}/{path}"} # type: ignore - diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/aio/operations/_service_operations.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/aio/operations/_service_operations.py deleted file mode 100644 index 88e25d4..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/aio/operations/_service_operations.py +++ /dev/null @@ -1,159 +0,0 @@ -# pylint: disable=too-many-lines -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar - -from azure.core.async_paging import AsyncItemPaged, AsyncList -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse -from azure.core.rest import HttpRequest -from azure.core.tracing.decorator import distributed_trace - -from ... import models as _models -from ..._vendor import _convert_request -from ...operations._service_operations import build_list_file_systems_request -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class ServiceOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.storage.filedatalake.aio.AzureDataLakeStorageRESTAPI`'s - :attr:`service` attribute. - """ - - models = _models - - def __init__(self, *args, **kwargs) -> None: - args = list(args) - self._client = args.pop(0) if args else kwargs.pop("client") - self._config = args.pop(0) if args else kwargs.pop("config") - self._serialize = args.pop(0) if args else kwargs.pop("serializer") - self._deserialize = args.pop(0) if args else kwargs.pop("deserializer") - - - @distributed_trace - def list_file_systems( - self, - prefix: Optional[str] = None, - continuation: Optional[str] = None, - max_results: Optional[int] = None, - request_id_parameter: Optional[str] = None, - timeout: Optional[int] = None, - **kwargs: Any - ) -> AsyncIterable["_models.FileSystemList"]: - """List FileSystems. - - List filesystems and their properties in given account. - - :param prefix: Filters results to filesystems within the specified prefix. Default value is - None. - :type prefix: str - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. Default value is None. - :type continuation: str - :param max_results: An optional value that specifies the maximum number of items to return. If - omitted or greater than 5,000, the response will include up to 5,000 items. Default value is - None. - :type max_results: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :keyword resource: The value must be "account" for all account operations. Default value is - "account". Note that overriding this default value may result in unsupported behavior. - :paramtype resource: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: An iterator like instance of either FileSystemList or the result of cls(response) - :rtype: - ~azure.core.async_paging.AsyncItemPaged[~azure.storage.filedatalake.models.FileSystemList] - :raises: ~azure.core.exceptions.HttpResponseError - """ - resource = kwargs.pop('resource', "account") # type: str - - cls = kwargs.pop('cls', None) # type: ClsType["_models.FileSystemList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - def prepare_request(next_link=None): - if not next_link: - - request = build_list_file_systems_request( - url=self._config.url, - resource=resource, - version=self._config.version, - prefix=prefix, - continuation=continuation, - max_results=max_results, - request_id_parameter=request_id_parameter, - timeout=timeout, - template_url=self.list_file_systems.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - else: - - request = build_list_file_systems_request( - url=self._config.url, - resource=resource, - version=self._config.version, - prefix=prefix, - continuation=continuation, - max_results=max_results, - request_id_parameter=request_id_parameter, - timeout=timeout, - template_url=next_link, - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - request.method = "GET" - return request - - async def extract_data(pipeline_response): - deserialized = self._deserialize("FileSystemList", pipeline_response) - list_of_elem = deserialized.filesystems - if cls: - list_of_elem = cls(list_of_elem) - return None, AsyncList(list_of_elem) - - async def get_next(next_link=None): - request = prepare_request(next_link) - - pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - return pipeline_response - - - return AsyncItemPaged( - get_next, extract_data - ) - list_file_systems.metadata = {'url': "{url}"} # type: ignore diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/models/__init__.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/models/__init__.py deleted file mode 100644 index 8c207a5..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/models/__init__.py +++ /dev/null @@ -1,86 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -try: - from ._models_py3 import AclFailedEntry - from ._models_py3 import BlobHierarchyListSegment - from ._models_py3 import BlobItemInternal - from ._models_py3 import BlobPrefix - from ._models_py3 import BlobPropertiesInternal - from ._models_py3 import CpkInfo - from ._models_py3 import FileSystem - from ._models_py3 import FileSystemList - from ._models_py3 import LeaseAccessConditions - from ._models_py3 import ListBlobsHierarchySegmentResponse - from ._models_py3 import ModifiedAccessConditions - from ._models_py3 import Path - from ._models_py3 import PathHTTPHeaders - from ._models_py3 import PathList - from ._models_py3 import SetAccessControlRecursiveResponse - from ._models_py3 import SourceModifiedAccessConditions - from ._models_py3 import StorageError - from ._models_py3 import StorageErrorError -except (SyntaxError, ImportError): - from ._models import AclFailedEntry # type: ignore - from ._models import BlobHierarchyListSegment # type: ignore - from ._models import BlobItemInternal # type: ignore - from ._models import BlobPrefix # type: ignore - from ._models import BlobPropertiesInternal # type: ignore - from ._models import CpkInfo # type: ignore - from ._models import FileSystem # type: ignore - from ._models import FileSystemList # type: ignore - from ._models import LeaseAccessConditions # type: ignore - from ._models import ListBlobsHierarchySegmentResponse # type: ignore - from ._models import ModifiedAccessConditions # type: ignore - from ._models import Path # type: ignore - from ._models import PathHTTPHeaders # type: ignore - from ._models import PathList # type: ignore - from ._models import SetAccessControlRecursiveResponse # type: ignore - from ._models import SourceModifiedAccessConditions # type: ignore - from ._models import StorageError # type: ignore - from ._models import StorageErrorError # type: ignore - -from ._azure_data_lake_storage_restapi_enums import ( - ListBlobsIncludeItem, - PathExpiryOptions, - PathGetPropertiesAction, - PathLeaseAction, - PathRenameMode, - PathResourceType, - PathSetAccessControlRecursiveMode, - PathUpdateAction, -) - -__all__ = [ - 'AclFailedEntry', - 'BlobHierarchyListSegment', - 'BlobItemInternal', - 'BlobPrefix', - 'BlobPropertiesInternal', - 'CpkInfo', - 'FileSystem', - 'FileSystemList', - 'LeaseAccessConditions', - 'ListBlobsHierarchySegmentResponse', - 'ModifiedAccessConditions', - 'Path', - 'PathHTTPHeaders', - 'PathList', - 'SetAccessControlRecursiveResponse', - 'SourceModifiedAccessConditions', - 'StorageError', - 'StorageErrorError', - 'ListBlobsIncludeItem', - 'PathExpiryOptions', - 'PathGetPropertiesAction', - 'PathLeaseAction', - 'PathRenameMode', - 'PathResourceType', - 'PathSetAccessControlRecursiveMode', - 'PathUpdateAction', -] diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/models/_azure_data_lake_storage_restapi_enums.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/models/_azure_data_lake_storage_restapi_enums.py deleted file mode 100644 index 001cce1..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/models/_azure_data_lake_storage_restapi_enums.py +++ /dev/null @@ -1,66 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from enum import Enum -from six import with_metaclass -from azure.core import CaseInsensitiveEnumMeta - - -class ListBlobsIncludeItem(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): - - COPY = "copy" - DELETED = "deleted" - METADATA = "metadata" - SNAPSHOTS = "snapshots" - UNCOMMITTEDBLOBS = "uncommittedblobs" - VERSIONS = "versions" - TAGS = "tags" - -class PathExpiryOptions(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): - - NEVER_EXPIRE = "NeverExpire" - RELATIVE_TO_CREATION = "RelativeToCreation" - RELATIVE_TO_NOW = "RelativeToNow" - ABSOLUTE = "Absolute" - -class PathGetPropertiesAction(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): - - GET_ACCESS_CONTROL = "getAccessControl" - GET_STATUS = "getStatus" - -class PathLeaseAction(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): - - ACQUIRE = "acquire" - BREAK_ENUM = "break" - CHANGE = "change" - RENEW = "renew" - RELEASE = "release" - -class PathRenameMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): - - LEGACY = "legacy" - POSIX = "posix" - -class PathResourceType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): - - DIRECTORY = "directory" - FILE = "file" - -class PathSetAccessControlRecursiveMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): - - SET = "set" - MODIFY = "modify" - REMOVE = "remove" - -class PathUpdateAction(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): - - APPEND = "append" - FLUSH = "flush" - SET_PROPERTIES = "setProperties" - SET_ACCESS_CONTROL = "setAccessControl" - SET_ACCESS_CONTROL_RECURSIVE = "setAccessControlRecursive" diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/models/_models.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/models/_models.py deleted file mode 100644 index 1e34fb3..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/models/_models.py +++ /dev/null @@ -1,958 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import HttpResponseError -import msrest.serialization - - -class AclFailedEntry(msrest.serialization.Model): - """AclFailedEntry. - - :ivar name: - :vartype name: str - :ivar type: - :vartype type: str - :ivar error_message: - :vartype error_message: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'error_message': {'key': 'errorMessage', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - """ - :keyword name: - :paramtype name: str - :keyword type: - :paramtype type: str - :keyword error_message: - :paramtype error_message: str - """ - super(AclFailedEntry, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.type = kwargs.get('type', None) - self.error_message = kwargs.get('error_message', None) - - -class BlobHierarchyListSegment(msrest.serialization.Model): - """BlobHierarchyListSegment. - - All required parameters must be populated in order to send to Azure. - - :ivar blob_prefixes: - :vartype blob_prefixes: list[~azure.storage.filedatalake.models.BlobPrefix] - :ivar blob_items: Required. - :vartype blob_items: list[~azure.storage.filedatalake.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]'}, - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]'}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__( - self, - **kwargs - ): - """ - :keyword blob_prefixes: - :paramtype blob_prefixes: list[~azure.storage.filedatalake.models.BlobPrefix] - :keyword blob_items: Required. - :paramtype blob_items: list[~azure.storage.filedatalake.models.BlobItemInternal] - """ - super(BlobHierarchyListSegment, self).__init__(**kwargs) - self.blob_prefixes = kwargs.get('blob_prefixes', None) - self.blob_items = kwargs['blob_items'] - - -class BlobItemInternal(msrest.serialization.Model): - """An Azure Storage blob. - - All required parameters must be populated in order to send to Azure. - - :ivar name: Required. - :vartype name: str - :ivar deleted: Required. - :vartype deleted: bool - :ivar snapshot: Required. - :vartype snapshot: str - :ivar version_id: - :vartype version_id: str - :ivar is_current_version: - :vartype is_current_version: bool - :ivar properties: Required. Properties of a blob. - :vartype properties: ~azure.storage.filedatalake.models.BlobPropertiesInternal - :ivar deletion_id: - :vartype deletion_id: str - """ - - _validation = { - 'name': {'required': True}, - 'deleted': {'required': True}, - 'snapshot': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'deleted': {'key': 'Deleted', 'type': 'bool'}, - 'snapshot': {'key': 'Snapshot', 'type': 'str'}, - 'version_id': {'key': 'VersionId', 'type': 'str'}, - 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool'}, - 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal'}, - 'deletion_id': {'key': 'DeletionId', 'type': 'str'}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__( - self, - **kwargs - ): - """ - :keyword name: Required. - :paramtype name: str - :keyword deleted: Required. - :paramtype deleted: bool - :keyword snapshot: Required. - :paramtype snapshot: str - :keyword version_id: - :paramtype version_id: str - :keyword is_current_version: - :paramtype is_current_version: bool - :keyword properties: Required. Properties of a blob. - :paramtype properties: ~azure.storage.filedatalake.models.BlobPropertiesInternal - :keyword deletion_id: - :paramtype deletion_id: str - """ - super(BlobItemInternal, self).__init__(**kwargs) - self.name = kwargs['name'] - self.deleted = kwargs['deleted'] - self.snapshot = kwargs['snapshot'] - self.version_id = kwargs.get('version_id', None) - self.is_current_version = kwargs.get('is_current_version', None) - self.properties = kwargs['properties'] - self.deletion_id = kwargs.get('deletion_id', None) - - -class BlobPrefix(msrest.serialization.Model): - """BlobPrefix. - - All required parameters must be populated in order to send to Azure. - - :ivar name: Required. - :vartype name: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - """ - :keyword name: Required. - :paramtype name: str - """ - super(BlobPrefix, self).__init__(**kwargs) - self.name = kwargs['name'] - - -class BlobPropertiesInternal(msrest.serialization.Model): - """Properties of a blob. - - All required parameters must be populated in order to send to Azure. - - :ivar creation_time: - :vartype creation_time: ~datetime.datetime - :ivar last_modified: Required. - :vartype last_modified: ~datetime.datetime - :ivar etag: Required. - :vartype etag: str - :ivar content_length: Size in bytes. - :vartype content_length: long - :ivar content_type: - :vartype content_type: str - :ivar content_encoding: - :vartype content_encoding: str - :ivar content_language: - :vartype content_language: str - :ivar content_md5: - :vartype content_md5: bytearray - :ivar content_disposition: - :vartype content_disposition: str - :ivar cache_control: - :vartype cache_control: str - :ivar blob_sequence_number: - :vartype blob_sequence_number: long - :ivar copy_id: - :vartype copy_id: str - :ivar copy_source: - :vartype copy_source: str - :ivar copy_progress: - :vartype copy_progress: str - :ivar copy_completion_time: - :vartype copy_completion_time: ~datetime.datetime - :ivar copy_status_description: - :vartype copy_status_description: str - :ivar server_encrypted: - :vartype server_encrypted: bool - :ivar incremental_copy: - :vartype incremental_copy: bool - :ivar destination_snapshot: - :vartype destination_snapshot: str - :ivar deleted_time: - :vartype deleted_time: ~datetime.datetime - :ivar remaining_retention_days: - :vartype remaining_retention_days: int - :ivar access_tier_inferred: - :vartype access_tier_inferred: bool - :ivar customer_provided_key_sha256: - :vartype customer_provided_key_sha256: str - :ivar encryption_scope: The name of the encryption scope under which the blob is encrypted. - :vartype encryption_scope: str - :ivar access_tier_change_time: - :vartype access_tier_change_time: ~datetime.datetime - :ivar tag_count: - :vartype tag_count: int - :ivar expires_on: - :vartype expires_on: ~datetime.datetime - :ivar is_sealed: - :vartype is_sealed: bool - :ivar last_accessed_on: - :vartype last_accessed_on: ~datetime.datetime - :ivar delete_time: - :vartype delete_time: ~datetime.datetime - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123'}, - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - 'content_length': {'key': 'Content-Length', 'type': 'long'}, - 'content_type': {'key': 'Content-Type', 'type': 'str'}, - 'content_encoding': {'key': 'Content-Encoding', 'type': 'str'}, - 'content_language': {'key': 'Content-Language', 'type': 'str'}, - 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray'}, - 'content_disposition': {'key': 'Content-Disposition', 'type': 'str'}, - 'cache_control': {'key': 'Cache-Control', 'type': 'str'}, - 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long'}, - 'copy_id': {'key': 'CopyId', 'type': 'str'}, - 'copy_source': {'key': 'CopySource', 'type': 'str'}, - 'copy_progress': {'key': 'CopyProgress', 'type': 'str'}, - 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123'}, - 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str'}, - 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool'}, - 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool'}, - 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str'}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, - 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool'}, - 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str'}, - 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str'}, - 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'}, - 'tag_count': {'key': 'TagCount', 'type': 'int'}, - 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123'}, - 'is_sealed': {'key': 'Sealed', 'type': 'bool'}, - 'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123'}, - 'delete_time': {'key': 'DeleteTime', 'type': 'rfc-1123'}, - } - _xml_map = { - 'name': 'Properties' - } - - def __init__( - self, - **kwargs - ): - """ - :keyword creation_time: - :paramtype creation_time: ~datetime.datetime - :keyword last_modified: Required. - :paramtype last_modified: ~datetime.datetime - :keyword etag: Required. - :paramtype etag: str - :keyword content_length: Size in bytes. - :paramtype content_length: long - :keyword content_type: - :paramtype content_type: str - :keyword content_encoding: - :paramtype content_encoding: str - :keyword content_language: - :paramtype content_language: str - :keyword content_md5: - :paramtype content_md5: bytearray - :keyword content_disposition: - :paramtype content_disposition: str - :keyword cache_control: - :paramtype cache_control: str - :keyword blob_sequence_number: - :paramtype blob_sequence_number: long - :keyword copy_id: - :paramtype copy_id: str - :keyword copy_source: - :paramtype copy_source: str - :keyword copy_progress: - :paramtype copy_progress: str - :keyword copy_completion_time: - :paramtype copy_completion_time: ~datetime.datetime - :keyword copy_status_description: - :paramtype copy_status_description: str - :keyword server_encrypted: - :paramtype server_encrypted: bool - :keyword incremental_copy: - :paramtype incremental_copy: bool - :keyword destination_snapshot: - :paramtype destination_snapshot: str - :keyword deleted_time: - :paramtype deleted_time: ~datetime.datetime - :keyword remaining_retention_days: - :paramtype remaining_retention_days: int - :keyword access_tier_inferred: - :paramtype access_tier_inferred: bool - :keyword customer_provided_key_sha256: - :paramtype customer_provided_key_sha256: str - :keyword encryption_scope: The name of the encryption scope under which the blob is encrypted. - :paramtype encryption_scope: str - :keyword access_tier_change_time: - :paramtype access_tier_change_time: ~datetime.datetime - :keyword tag_count: - :paramtype tag_count: int - :keyword expires_on: - :paramtype expires_on: ~datetime.datetime - :keyword is_sealed: - :paramtype is_sealed: bool - :keyword last_accessed_on: - :paramtype last_accessed_on: ~datetime.datetime - :keyword delete_time: - :paramtype delete_time: ~datetime.datetime - """ - super(BlobPropertiesInternal, self).__init__(**kwargs) - self.creation_time = kwargs.get('creation_time', None) - self.last_modified = kwargs['last_modified'] - self.etag = kwargs['etag'] - self.content_length = kwargs.get('content_length', None) - self.content_type = kwargs.get('content_type', None) - self.content_encoding = kwargs.get('content_encoding', None) - self.content_language = kwargs.get('content_language', None) - self.content_md5 = kwargs.get('content_md5', None) - self.content_disposition = kwargs.get('content_disposition', None) - self.cache_control = kwargs.get('cache_control', None) - self.blob_sequence_number = kwargs.get('blob_sequence_number', None) - self.copy_id = kwargs.get('copy_id', None) - self.copy_source = kwargs.get('copy_source', None) - self.copy_progress = kwargs.get('copy_progress', None) - self.copy_completion_time = kwargs.get('copy_completion_time', None) - self.copy_status_description = kwargs.get('copy_status_description', None) - self.server_encrypted = kwargs.get('server_encrypted', None) - self.incremental_copy = kwargs.get('incremental_copy', None) - self.destination_snapshot = kwargs.get('destination_snapshot', None) - self.deleted_time = kwargs.get('deleted_time', None) - self.remaining_retention_days = kwargs.get('remaining_retention_days', None) - self.access_tier_inferred = kwargs.get('access_tier_inferred', None) - self.customer_provided_key_sha256 = kwargs.get('customer_provided_key_sha256', None) - self.encryption_scope = kwargs.get('encryption_scope', None) - self.access_tier_change_time = kwargs.get('access_tier_change_time', None) - self.tag_count = kwargs.get('tag_count', None) - self.expires_on = kwargs.get('expires_on', None) - self.is_sealed = kwargs.get('is_sealed', None) - self.last_accessed_on = kwargs.get('last_accessed_on', None) - self.delete_time = kwargs.get('delete_time', None) - - -class CpkInfo(msrest.serialization.Model): - """Parameter group. - - :ivar encryption_key: Optional. Specifies the encryption key to use to encrypt the data - provided in the request. If not specified, encryption is performed with the root account - encryption key. For more information, see Encryption at Rest for Azure Storage Services. - :vartype encryption_key: str - :ivar encryption_key_sha256: The SHA-256 hash of the provided encryption key. Must be provided - if the x-ms-encryption-key header is provided. - :vartype encryption_key_sha256: str - :ivar encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. The only acceptable values to pass in are None and "AES256". The default value is - None. - :vartype encryption_algorithm: str - """ - - _attribute_map = { - 'encryption_key': {'key': 'encryptionKey', 'type': 'str'}, - 'encryption_key_sha256': {'key': 'encryptionKeySha256', 'type': 'str'}, - 'encryption_algorithm': {'key': 'encryptionAlgorithm', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - """ - :keyword encryption_key: Optional. Specifies the encryption key to use to encrypt the data - provided in the request. If not specified, encryption is performed with the root account - encryption key. For more information, see Encryption at Rest for Azure Storage Services. - :paramtype encryption_key: str - :keyword encryption_key_sha256: The SHA-256 hash of the provided encryption key. Must be - provided if the x-ms-encryption-key header is provided. - :paramtype encryption_key_sha256: str - :keyword encryption_algorithm: The algorithm used to produce the encryption key hash. - Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key - header is provided. The only acceptable values to pass in are None and "AES256". The default - value is None. - :paramtype encryption_algorithm: str - """ - super(CpkInfo, self).__init__(**kwargs) - self.encryption_key = kwargs.get('encryption_key', None) - self.encryption_key_sha256 = kwargs.get('encryption_key_sha256', None) - self.encryption_algorithm = kwargs.get('encryption_algorithm', None) - - -class FileSystem(msrest.serialization.Model): - """FileSystem. - - :ivar name: - :vartype name: str - :ivar last_modified: - :vartype last_modified: str - :ivar e_tag: - :vartype e_tag: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'last_modified': {'key': 'lastModified', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - """ - :keyword name: - :paramtype name: str - :keyword last_modified: - :paramtype last_modified: str - :keyword e_tag: - :paramtype e_tag: str - """ - super(FileSystem, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.last_modified = kwargs.get('last_modified', None) - self.e_tag = kwargs.get('e_tag', None) - - -class FileSystemList(msrest.serialization.Model): - """FileSystemList. - - :ivar filesystems: - :vartype filesystems: list[~azure.storage.filedatalake.models.FileSystem] - """ - - _attribute_map = { - 'filesystems': {'key': 'filesystems', 'type': '[FileSystem]'}, - } - - def __init__( - self, - **kwargs - ): - """ - :keyword filesystems: - :paramtype filesystems: list[~azure.storage.filedatalake.models.FileSystem] - """ - super(FileSystemList, self).__init__(**kwargs) - self.filesystems = kwargs.get('filesystems', None) - - -class LeaseAccessConditions(msrest.serialization.Model): - """Parameter group. - - :ivar lease_id: If specified, the operation only succeeds if the resource's lease is active and - matches this ID. - :vartype lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': 'leaseId', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - """ - :keyword lease_id: If specified, the operation only succeeds if the resource's lease is active - and matches this ID. - :paramtype lease_id: str - """ - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = kwargs.get('lease_id', None) - - -class ListBlobsHierarchySegmentResponse(msrest.serialization.Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :ivar service_endpoint: Required. - :vartype service_endpoint: str - :ivar container_name: Required. - :vartype container_name: str - :ivar prefix: - :vartype prefix: str - :ivar marker: - :vartype marker: str - :ivar max_results: - :vartype max_results: int - :ivar delimiter: - :vartype delimiter: str - :ivar segment: Required. - :vartype segment: ~azure.storage.filedatalake.models.BlobHierarchyListSegment - :ivar next_marker: - :vartype next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'delimiter': {'key': 'Delimiter', 'type': 'str'}, - 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment'}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - **kwargs - ): - """ - :keyword service_endpoint: Required. - :paramtype service_endpoint: str - :keyword container_name: Required. - :paramtype container_name: str - :keyword prefix: - :paramtype prefix: str - :keyword marker: - :paramtype marker: str - :keyword max_results: - :paramtype max_results: int - :keyword delimiter: - :paramtype delimiter: str - :keyword segment: Required. - :paramtype segment: ~azure.storage.filedatalake.models.BlobHierarchyListSegment - :keyword next_marker: - :paramtype next_marker: str - """ - super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs['service_endpoint'] - self.container_name = kwargs['container_name'] - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.delimiter = kwargs.get('delimiter', None) - self.segment = kwargs['segment'] - self.next_marker = kwargs.get('next_marker', None) - - -class ModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :ivar if_modified_since: Specify this header value to operate only on a blob if it has been - modified since the specified date/time. - :vartype if_modified_since: ~datetime.datetime - :ivar if_unmodified_since: Specify this header value to operate only on a blob if it has not - been modified since the specified date/time. - :vartype if_unmodified_since: ~datetime.datetime - :ivar if_match: Specify an ETag value to operate only on blobs with a matching value. - :vartype if_match: str - :ivar if_none_match: Specify an ETag value to operate only on blobs without a matching value. - :vartype if_none_match: str - """ - - _attribute_map = { - 'if_modified_since': {'key': 'ifModifiedSince', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': 'ifUnmodifiedSince', 'type': 'rfc-1123'}, - 'if_match': {'key': 'ifMatch', 'type': 'str'}, - 'if_none_match': {'key': 'ifNoneMatch', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - """ - :keyword if_modified_since: Specify this header value to operate only on a blob if it has been - modified since the specified date/time. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: Specify this header value to operate only on a blob if it has not - been modified since the specified date/time. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword if_match: Specify an ETag value to operate only on blobs with a matching value. - :paramtype if_match: str - :keyword if_none_match: Specify an ETag value to operate only on blobs without a matching - value. - :paramtype if_none_match: str - """ - super(ModifiedAccessConditions, self).__init__(**kwargs) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - - -class Path(msrest.serialization.Model): - """Path. - - :ivar name: - :vartype name: str - :ivar is_directory: - :vartype is_directory: bool - :ivar last_modified: - :vartype last_modified: str - :ivar e_tag: - :vartype e_tag: str - :ivar content_length: - :vartype content_length: long - :ivar owner: - :vartype owner: str - :ivar group: - :vartype group: str - :ivar permissions: - :vartype permissions: str - :ivar encryption_scope: The name of the encryption scope under which the blob is encrypted. - :vartype encryption_scope: str - :ivar creation_time: - :vartype creation_time: str - :ivar expiry_time: - :vartype expiry_time: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'is_directory': {'key': 'isDirectory', 'type': 'bool'}, - 'last_modified': {'key': 'lastModified', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - 'content_length': {'key': 'contentLength', 'type': 'long'}, - 'owner': {'key': 'owner', 'type': 'str'}, - 'group': {'key': 'group', 'type': 'str'}, - 'permissions': {'key': 'permissions', 'type': 'str'}, - 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str'}, - 'creation_time': {'key': 'creationTime', 'type': 'str'}, - 'expiry_time': {'key': 'expiryTime', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - """ - :keyword name: - :paramtype name: str - :keyword is_directory: - :paramtype is_directory: bool - :keyword last_modified: - :paramtype last_modified: str - :keyword e_tag: - :paramtype e_tag: str - :keyword content_length: - :paramtype content_length: long - :keyword owner: - :paramtype owner: str - :keyword group: - :paramtype group: str - :keyword permissions: - :paramtype permissions: str - :keyword encryption_scope: The name of the encryption scope under which the blob is encrypted. - :paramtype encryption_scope: str - :keyword creation_time: - :paramtype creation_time: str - :keyword expiry_time: - :paramtype expiry_time: str - """ - super(Path, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.is_directory = kwargs.get('is_directory', False) - self.last_modified = kwargs.get('last_modified', None) - self.e_tag = kwargs.get('e_tag', None) - self.content_length = kwargs.get('content_length', None) - self.owner = kwargs.get('owner', None) - self.group = kwargs.get('group', None) - self.permissions = kwargs.get('permissions', None) - self.encryption_scope = kwargs.get('encryption_scope', None) - self.creation_time = kwargs.get('creation_time', None) - self.expiry_time = kwargs.get('expiry_time', None) - - -class PathHTTPHeaders(msrest.serialization.Model): - """Parameter group. - - :ivar cache_control: Optional. Sets the blob's cache control. If specified, this property is - stored with the blob and returned with a read request. - :vartype cache_control: str - :ivar content_encoding: Optional. Sets the blob's content encoding. If specified, this property - is stored with the blob and returned with a read request. - :vartype content_encoding: str - :ivar content_language: Optional. Set the blob's content language. If specified, this property - is stored with the blob and returned with a read request. - :vartype content_language: str - :ivar content_disposition: Optional. Sets the blob's Content-Disposition header. - :vartype content_disposition: str - :ivar content_type: Optional. Sets the blob's content type. If specified, this property is - stored with the blob and returned with a read request. - :vartype content_type: str - :ivar content_md5: Specify the transactional md5 for the body, to be validated by the service. - :vartype content_md5: bytearray - :ivar transactional_content_hash: Specify the transactional md5 for the body, to be validated - by the service. - :vartype transactional_content_hash: bytearray - """ - - _attribute_map = { - 'cache_control': {'key': 'cacheControl', 'type': 'str'}, - 'content_encoding': {'key': 'contentEncoding', 'type': 'str'}, - 'content_language': {'key': 'contentLanguage', 'type': 'str'}, - 'content_disposition': {'key': 'contentDisposition', 'type': 'str'}, - 'content_type': {'key': 'contentType', 'type': 'str'}, - 'content_md5': {'key': 'contentMD5', 'type': 'bytearray'}, - 'transactional_content_hash': {'key': 'transactionalContentHash', 'type': 'bytearray'}, - } - - def __init__( - self, - **kwargs - ): - """ - :keyword cache_control: Optional. Sets the blob's cache control. If specified, this property is - stored with the blob and returned with a read request. - :paramtype cache_control: str - :keyword content_encoding: Optional. Sets the blob's content encoding. If specified, this - property is stored with the blob and returned with a read request. - :paramtype content_encoding: str - :keyword content_language: Optional. Set the blob's content language. If specified, this - property is stored with the blob and returned with a read request. - :paramtype content_language: str - :keyword content_disposition: Optional. Sets the blob's Content-Disposition header. - :paramtype content_disposition: str - :keyword content_type: Optional. Sets the blob's content type. If specified, this property is - stored with the blob and returned with a read request. - :paramtype content_type: str - :keyword content_md5: Specify the transactional md5 for the body, to be validated by the - service. - :paramtype content_md5: bytearray - :keyword transactional_content_hash: Specify the transactional md5 for the body, to be - validated by the service. - :paramtype transactional_content_hash: bytearray - """ - super(PathHTTPHeaders, self).__init__(**kwargs) - self.cache_control = kwargs.get('cache_control', None) - self.content_encoding = kwargs.get('content_encoding', None) - self.content_language = kwargs.get('content_language', None) - self.content_disposition = kwargs.get('content_disposition', None) - self.content_type = kwargs.get('content_type', None) - self.content_md5 = kwargs.get('content_md5', None) - self.transactional_content_hash = kwargs.get('transactional_content_hash', None) - - -class PathList(msrest.serialization.Model): - """PathList. - - :ivar paths: - :vartype paths: list[~azure.storage.filedatalake.models.Path] - """ - - _attribute_map = { - 'paths': {'key': 'paths', 'type': '[Path]'}, - } - - def __init__( - self, - **kwargs - ): - """ - :keyword paths: - :paramtype paths: list[~azure.storage.filedatalake.models.Path] - """ - super(PathList, self).__init__(**kwargs) - self.paths = kwargs.get('paths', None) - - -class SetAccessControlRecursiveResponse(msrest.serialization.Model): - """SetAccessControlRecursiveResponse. - - :ivar directories_successful: - :vartype directories_successful: int - :ivar files_successful: - :vartype files_successful: int - :ivar failure_count: - :vartype failure_count: int - :ivar failed_entries: - :vartype failed_entries: list[~azure.storage.filedatalake.models.AclFailedEntry] - """ - - _attribute_map = { - 'directories_successful': {'key': 'directoriesSuccessful', 'type': 'int'}, - 'files_successful': {'key': 'filesSuccessful', 'type': 'int'}, - 'failure_count': {'key': 'failureCount', 'type': 'int'}, - 'failed_entries': {'key': 'failedEntries', 'type': '[AclFailedEntry]'}, - } - - def __init__( - self, - **kwargs - ): - """ - :keyword directories_successful: - :paramtype directories_successful: int - :keyword files_successful: - :paramtype files_successful: int - :keyword failure_count: - :paramtype failure_count: int - :keyword failed_entries: - :paramtype failed_entries: list[~azure.storage.filedatalake.models.AclFailedEntry] - """ - super(SetAccessControlRecursiveResponse, self).__init__(**kwargs) - self.directories_successful = kwargs.get('directories_successful', None) - self.files_successful = kwargs.get('files_successful', None) - self.failure_count = kwargs.get('failure_count', None) - self.failed_entries = kwargs.get('failed_entries', None) - - -class SourceModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :ivar source_if_match: Specify an ETag value to operate only on blobs with a matching value. - :vartype source_if_match: str - :ivar source_if_none_match: Specify an ETag value to operate only on blobs without a matching - value. - :vartype source_if_none_match: str - :ivar source_if_modified_since: Specify this header value to operate only on a blob if it has - been modified since the specified date/time. - :vartype source_if_modified_since: ~datetime.datetime - :ivar source_if_unmodified_since: Specify this header value to operate only on a blob if it has - not been modified since the specified date/time. - :vartype source_if_unmodified_since: ~datetime.datetime - """ - - _attribute_map = { - 'source_if_match': {'key': 'sourceIfMatch', 'type': 'str'}, - 'source_if_none_match': {'key': 'sourceIfNoneMatch', 'type': 'str'}, - 'source_if_modified_since': {'key': 'sourceIfModifiedSince', 'type': 'rfc-1123'}, - 'source_if_unmodified_since': {'key': 'sourceIfUnmodifiedSince', 'type': 'rfc-1123'}, - } - - def __init__( - self, - **kwargs - ): - """ - :keyword source_if_match: Specify an ETag value to operate only on blobs with a matching value. - :paramtype source_if_match: str - :keyword source_if_none_match: Specify an ETag value to operate only on blobs without a - matching value. - :paramtype source_if_none_match: str - :keyword source_if_modified_since: Specify this header value to operate only on a blob if it - has been modified since the specified date/time. - :paramtype source_if_modified_since: ~datetime.datetime - :keyword source_if_unmodified_since: Specify this header value to operate only on a blob if it - has not been modified since the specified date/time. - :paramtype source_if_unmodified_since: ~datetime.datetime - """ - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_match = kwargs.get('source_if_match', None) - self.source_if_none_match = kwargs.get('source_if_none_match', None) - self.source_if_modified_since = kwargs.get('source_if_modified_since', None) - self.source_if_unmodified_since = kwargs.get('source_if_unmodified_since', None) - - -class StorageError(msrest.serialization.Model): - """StorageError. - - :ivar error: The service error response object. - :vartype error: ~azure.storage.filedatalake.models.StorageErrorError - """ - - _attribute_map = { - 'error': {'key': 'error', 'type': 'StorageErrorError'}, - } - - def __init__( - self, - **kwargs - ): - """ - :keyword error: The service error response object. - :paramtype error: ~azure.storage.filedatalake.models.StorageErrorError - """ - super(StorageError, self).__init__(**kwargs) - self.error = kwargs.get('error', None) - - -class StorageErrorError(msrest.serialization.Model): - """The service error response object. - - :ivar code: The service error code. - :vartype code: str - :ivar message: The service error message. - :vartype message: str - """ - - _attribute_map = { - 'code': {'key': 'Code', 'type': 'str'}, - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - """ - :keyword code: The service error code. - :paramtype code: str - :keyword message: The service error message. - :paramtype message: str - """ - super(StorageErrorError, self).__init__(**kwargs) - self.code = kwargs.get('code', None) - self.message = kwargs.get('message', None) diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/models/_models_py3.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/models/_models_py3.py deleted file mode 100644 index 924604a..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/models/_models_py3.py +++ /dev/null @@ -1,1072 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -import datetime -from typing import List, Optional - -from azure.core.exceptions import HttpResponseError -import msrest.serialization - - -class AclFailedEntry(msrest.serialization.Model): - """AclFailedEntry. - - :ivar name: - :vartype name: str - :ivar type: - :vartype type: str - :ivar error_message: - :vartype error_message: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'error_message': {'key': 'errorMessage', 'type': 'str'}, - } - - def __init__( - self, - *, - name: Optional[str] = None, - type: Optional[str] = None, - error_message: Optional[str] = None, - **kwargs - ): - """ - :keyword name: - :paramtype name: str - :keyword type: - :paramtype type: str - :keyword error_message: - :paramtype error_message: str - """ - super(AclFailedEntry, self).__init__(**kwargs) - self.name = name - self.type = type - self.error_message = error_message - - -class BlobHierarchyListSegment(msrest.serialization.Model): - """BlobHierarchyListSegment. - - All required parameters must be populated in order to send to Azure. - - :ivar blob_prefixes: - :vartype blob_prefixes: list[~azure.storage.filedatalake.models.BlobPrefix] - :ivar blob_items: Required. - :vartype blob_items: list[~azure.storage.filedatalake.models.BlobItemInternal] - """ - - _validation = { - 'blob_items': {'required': True}, - } - - _attribute_map = { - 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]'}, - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]'}, - } - _xml_map = { - 'name': 'Blobs' - } - - def __init__( - self, - *, - blob_items: List["BlobItemInternal"], - blob_prefixes: Optional[List["BlobPrefix"]] = None, - **kwargs - ): - """ - :keyword blob_prefixes: - :paramtype blob_prefixes: list[~azure.storage.filedatalake.models.BlobPrefix] - :keyword blob_items: Required. - :paramtype blob_items: list[~azure.storage.filedatalake.models.BlobItemInternal] - """ - super(BlobHierarchyListSegment, self).__init__(**kwargs) - self.blob_prefixes = blob_prefixes - self.blob_items = blob_items - - -class BlobItemInternal(msrest.serialization.Model): - """An Azure Storage blob. - - All required parameters must be populated in order to send to Azure. - - :ivar name: Required. - :vartype name: str - :ivar deleted: Required. - :vartype deleted: bool - :ivar snapshot: Required. - :vartype snapshot: str - :ivar version_id: - :vartype version_id: str - :ivar is_current_version: - :vartype is_current_version: bool - :ivar properties: Required. Properties of a blob. - :vartype properties: ~azure.storage.filedatalake.models.BlobPropertiesInternal - :ivar deletion_id: - :vartype deletion_id: str - """ - - _validation = { - 'name': {'required': True}, - 'deleted': {'required': True}, - 'snapshot': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'deleted': {'key': 'Deleted', 'type': 'bool'}, - 'snapshot': {'key': 'Snapshot', 'type': 'str'}, - 'version_id': {'key': 'VersionId', 'type': 'str'}, - 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool'}, - 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal'}, - 'deletion_id': {'key': 'DeletionId', 'type': 'str'}, - } - _xml_map = { - 'name': 'Blob' - } - - def __init__( - self, - *, - name: str, - deleted: bool, - snapshot: str, - properties: "BlobPropertiesInternal", - version_id: Optional[str] = None, - is_current_version: Optional[bool] = None, - deletion_id: Optional[str] = None, - **kwargs - ): - """ - :keyword name: Required. - :paramtype name: str - :keyword deleted: Required. - :paramtype deleted: bool - :keyword snapshot: Required. - :paramtype snapshot: str - :keyword version_id: - :paramtype version_id: str - :keyword is_current_version: - :paramtype is_current_version: bool - :keyword properties: Required. Properties of a blob. - :paramtype properties: ~azure.storage.filedatalake.models.BlobPropertiesInternal - :keyword deletion_id: - :paramtype deletion_id: str - """ - super(BlobItemInternal, self).__init__(**kwargs) - self.name = name - self.deleted = deleted - self.snapshot = snapshot - self.version_id = version_id - self.is_current_version = is_current_version - self.properties = properties - self.deletion_id = deletion_id - - -class BlobPrefix(msrest.serialization.Model): - """BlobPrefix. - - All required parameters must be populated in order to send to Azure. - - :ivar name: Required. - :vartype name: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - **kwargs - ): - """ - :keyword name: Required. - :paramtype name: str - """ - super(BlobPrefix, self).__init__(**kwargs) - self.name = name - - -class BlobPropertiesInternal(msrest.serialization.Model): - """Properties of a blob. - - All required parameters must be populated in order to send to Azure. - - :ivar creation_time: - :vartype creation_time: ~datetime.datetime - :ivar last_modified: Required. - :vartype last_modified: ~datetime.datetime - :ivar etag: Required. - :vartype etag: str - :ivar content_length: Size in bytes. - :vartype content_length: long - :ivar content_type: - :vartype content_type: str - :ivar content_encoding: - :vartype content_encoding: str - :ivar content_language: - :vartype content_language: str - :ivar content_md5: - :vartype content_md5: bytearray - :ivar content_disposition: - :vartype content_disposition: str - :ivar cache_control: - :vartype cache_control: str - :ivar blob_sequence_number: - :vartype blob_sequence_number: long - :ivar copy_id: - :vartype copy_id: str - :ivar copy_source: - :vartype copy_source: str - :ivar copy_progress: - :vartype copy_progress: str - :ivar copy_completion_time: - :vartype copy_completion_time: ~datetime.datetime - :ivar copy_status_description: - :vartype copy_status_description: str - :ivar server_encrypted: - :vartype server_encrypted: bool - :ivar incremental_copy: - :vartype incremental_copy: bool - :ivar destination_snapshot: - :vartype destination_snapshot: str - :ivar deleted_time: - :vartype deleted_time: ~datetime.datetime - :ivar remaining_retention_days: - :vartype remaining_retention_days: int - :ivar access_tier_inferred: - :vartype access_tier_inferred: bool - :ivar customer_provided_key_sha256: - :vartype customer_provided_key_sha256: str - :ivar encryption_scope: The name of the encryption scope under which the blob is encrypted. - :vartype encryption_scope: str - :ivar access_tier_change_time: - :vartype access_tier_change_time: ~datetime.datetime - :ivar tag_count: - :vartype tag_count: int - :ivar expires_on: - :vartype expires_on: ~datetime.datetime - :ivar is_sealed: - :vartype is_sealed: bool - :ivar last_accessed_on: - :vartype last_accessed_on: ~datetime.datetime - :ivar delete_time: - :vartype delete_time: ~datetime.datetime - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - } - - _attribute_map = { - 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123'}, - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - 'content_length': {'key': 'Content-Length', 'type': 'long'}, - 'content_type': {'key': 'Content-Type', 'type': 'str'}, - 'content_encoding': {'key': 'Content-Encoding', 'type': 'str'}, - 'content_language': {'key': 'Content-Language', 'type': 'str'}, - 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray'}, - 'content_disposition': {'key': 'Content-Disposition', 'type': 'str'}, - 'cache_control': {'key': 'Cache-Control', 'type': 'str'}, - 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long'}, - 'copy_id': {'key': 'CopyId', 'type': 'str'}, - 'copy_source': {'key': 'CopySource', 'type': 'str'}, - 'copy_progress': {'key': 'CopyProgress', 'type': 'str'}, - 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123'}, - 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str'}, - 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool'}, - 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool'}, - 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str'}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, - 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool'}, - 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str'}, - 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str'}, - 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'}, - 'tag_count': {'key': 'TagCount', 'type': 'int'}, - 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123'}, - 'is_sealed': {'key': 'Sealed', 'type': 'bool'}, - 'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123'}, - 'delete_time': {'key': 'DeleteTime', 'type': 'rfc-1123'}, - } - _xml_map = { - 'name': 'Properties' - } - - def __init__( - self, - *, - last_modified: datetime.datetime, - etag: str, - creation_time: Optional[datetime.datetime] = None, - content_length: Optional[int] = None, - content_type: Optional[str] = None, - content_encoding: Optional[str] = None, - content_language: Optional[str] = None, - content_md5: Optional[bytearray] = None, - content_disposition: Optional[str] = None, - cache_control: Optional[str] = None, - blob_sequence_number: Optional[int] = None, - copy_id: Optional[str] = None, - copy_source: Optional[str] = None, - copy_progress: Optional[str] = None, - copy_completion_time: Optional[datetime.datetime] = None, - copy_status_description: Optional[str] = None, - server_encrypted: Optional[bool] = None, - incremental_copy: Optional[bool] = None, - destination_snapshot: Optional[str] = None, - deleted_time: Optional[datetime.datetime] = None, - remaining_retention_days: Optional[int] = None, - access_tier_inferred: Optional[bool] = None, - customer_provided_key_sha256: Optional[str] = None, - encryption_scope: Optional[str] = None, - access_tier_change_time: Optional[datetime.datetime] = None, - tag_count: Optional[int] = None, - expires_on: Optional[datetime.datetime] = None, - is_sealed: Optional[bool] = None, - last_accessed_on: Optional[datetime.datetime] = None, - delete_time: Optional[datetime.datetime] = None, - **kwargs - ): - """ - :keyword creation_time: - :paramtype creation_time: ~datetime.datetime - :keyword last_modified: Required. - :paramtype last_modified: ~datetime.datetime - :keyword etag: Required. - :paramtype etag: str - :keyword content_length: Size in bytes. - :paramtype content_length: long - :keyword content_type: - :paramtype content_type: str - :keyword content_encoding: - :paramtype content_encoding: str - :keyword content_language: - :paramtype content_language: str - :keyword content_md5: - :paramtype content_md5: bytearray - :keyword content_disposition: - :paramtype content_disposition: str - :keyword cache_control: - :paramtype cache_control: str - :keyword blob_sequence_number: - :paramtype blob_sequence_number: long - :keyword copy_id: - :paramtype copy_id: str - :keyword copy_source: - :paramtype copy_source: str - :keyword copy_progress: - :paramtype copy_progress: str - :keyword copy_completion_time: - :paramtype copy_completion_time: ~datetime.datetime - :keyword copy_status_description: - :paramtype copy_status_description: str - :keyword server_encrypted: - :paramtype server_encrypted: bool - :keyword incremental_copy: - :paramtype incremental_copy: bool - :keyword destination_snapshot: - :paramtype destination_snapshot: str - :keyword deleted_time: - :paramtype deleted_time: ~datetime.datetime - :keyword remaining_retention_days: - :paramtype remaining_retention_days: int - :keyword access_tier_inferred: - :paramtype access_tier_inferred: bool - :keyword customer_provided_key_sha256: - :paramtype customer_provided_key_sha256: str - :keyword encryption_scope: The name of the encryption scope under which the blob is encrypted. - :paramtype encryption_scope: str - :keyword access_tier_change_time: - :paramtype access_tier_change_time: ~datetime.datetime - :keyword tag_count: - :paramtype tag_count: int - :keyword expires_on: - :paramtype expires_on: ~datetime.datetime - :keyword is_sealed: - :paramtype is_sealed: bool - :keyword last_accessed_on: - :paramtype last_accessed_on: ~datetime.datetime - :keyword delete_time: - :paramtype delete_time: ~datetime.datetime - """ - super(BlobPropertiesInternal, self).__init__(**kwargs) - self.creation_time = creation_time - self.last_modified = last_modified - self.etag = etag - self.content_length = content_length - self.content_type = content_type - self.content_encoding = content_encoding - self.content_language = content_language - self.content_md5 = content_md5 - self.content_disposition = content_disposition - self.cache_control = cache_control - self.blob_sequence_number = blob_sequence_number - self.copy_id = copy_id - self.copy_source = copy_source - self.copy_progress = copy_progress - self.copy_completion_time = copy_completion_time - self.copy_status_description = copy_status_description - self.server_encrypted = server_encrypted - self.incremental_copy = incremental_copy - self.destination_snapshot = destination_snapshot - self.deleted_time = deleted_time - self.remaining_retention_days = remaining_retention_days - self.access_tier_inferred = access_tier_inferred - self.customer_provided_key_sha256 = customer_provided_key_sha256 - self.encryption_scope = encryption_scope - self.access_tier_change_time = access_tier_change_time - self.tag_count = tag_count - self.expires_on = expires_on - self.is_sealed = is_sealed - self.last_accessed_on = last_accessed_on - self.delete_time = delete_time - - -class CpkInfo(msrest.serialization.Model): - """Parameter group. - - :ivar encryption_key: Optional. Specifies the encryption key to use to encrypt the data - provided in the request. If not specified, encryption is performed with the root account - encryption key. For more information, see Encryption at Rest for Azure Storage Services. - :vartype encryption_key: str - :ivar encryption_key_sha256: The SHA-256 hash of the provided encryption key. Must be provided - if the x-ms-encryption-key header is provided. - :vartype encryption_key_sha256: str - :ivar encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, - the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is - provided. The only acceptable values to pass in are None and "AES256". The default value is - None. - :vartype encryption_algorithm: str - """ - - _attribute_map = { - 'encryption_key': {'key': 'encryptionKey', 'type': 'str'}, - 'encryption_key_sha256': {'key': 'encryptionKeySha256', 'type': 'str'}, - 'encryption_algorithm': {'key': 'encryptionAlgorithm', 'type': 'str'}, - } - - def __init__( - self, - *, - encryption_key: Optional[str] = None, - encryption_key_sha256: Optional[str] = None, - encryption_algorithm: Optional[str] = None, - **kwargs - ): - """ - :keyword encryption_key: Optional. Specifies the encryption key to use to encrypt the data - provided in the request. If not specified, encryption is performed with the root account - encryption key. For more information, see Encryption at Rest for Azure Storage Services. - :paramtype encryption_key: str - :keyword encryption_key_sha256: The SHA-256 hash of the provided encryption key. Must be - provided if the x-ms-encryption-key header is provided. - :paramtype encryption_key_sha256: str - :keyword encryption_algorithm: The algorithm used to produce the encryption key hash. - Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key - header is provided. The only acceptable values to pass in are None and "AES256". The default - value is None. - :paramtype encryption_algorithm: str - """ - super(CpkInfo, self).__init__(**kwargs) - self.encryption_key = encryption_key - self.encryption_key_sha256 = encryption_key_sha256 - self.encryption_algorithm = encryption_algorithm - - -class FileSystem(msrest.serialization.Model): - """FileSystem. - - :ivar name: - :vartype name: str - :ivar last_modified: - :vartype last_modified: str - :ivar e_tag: - :vartype e_tag: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'last_modified': {'key': 'lastModified', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - } - - def __init__( - self, - *, - name: Optional[str] = None, - last_modified: Optional[str] = None, - e_tag: Optional[str] = None, - **kwargs - ): - """ - :keyword name: - :paramtype name: str - :keyword last_modified: - :paramtype last_modified: str - :keyword e_tag: - :paramtype e_tag: str - """ - super(FileSystem, self).__init__(**kwargs) - self.name = name - self.last_modified = last_modified - self.e_tag = e_tag - - -class FileSystemList(msrest.serialization.Model): - """FileSystemList. - - :ivar filesystems: - :vartype filesystems: list[~azure.storage.filedatalake.models.FileSystem] - """ - - _attribute_map = { - 'filesystems': {'key': 'filesystems', 'type': '[FileSystem]'}, - } - - def __init__( - self, - *, - filesystems: Optional[List["FileSystem"]] = None, - **kwargs - ): - """ - :keyword filesystems: - :paramtype filesystems: list[~azure.storage.filedatalake.models.FileSystem] - """ - super(FileSystemList, self).__init__(**kwargs) - self.filesystems = filesystems - - -class LeaseAccessConditions(msrest.serialization.Model): - """Parameter group. - - :ivar lease_id: If specified, the operation only succeeds if the resource's lease is active and - matches this ID. - :vartype lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': 'leaseId', 'type': 'str'}, - } - - def __init__( - self, - *, - lease_id: Optional[str] = None, - **kwargs - ): - """ - :keyword lease_id: If specified, the operation only succeeds if the resource's lease is active - and matches this ID. - :paramtype lease_id: str - """ - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = lease_id - - -class ListBlobsHierarchySegmentResponse(msrest.serialization.Model): - """An enumeration of blobs. - - All required parameters must be populated in order to send to Azure. - - :ivar service_endpoint: Required. - :vartype service_endpoint: str - :ivar container_name: Required. - :vartype container_name: str - :ivar prefix: - :vartype prefix: str - :ivar marker: - :vartype marker: str - :ivar max_results: - :vartype max_results: int - :ivar delimiter: - :vartype delimiter: str - :ivar segment: Required. - :vartype segment: ~azure.storage.filedatalake.models.BlobHierarchyListSegment - :ivar next_marker: - :vartype next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'container_name': {'required': True}, - 'segment': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'delimiter': {'key': 'Delimiter', 'type': 'str'}, - 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment'}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - *, - service_endpoint: str, - container_name: str, - segment: "BlobHierarchyListSegment", - prefix: Optional[str] = None, - marker: Optional[str] = None, - max_results: Optional[int] = None, - delimiter: Optional[str] = None, - next_marker: Optional[str] = None, - **kwargs - ): - """ - :keyword service_endpoint: Required. - :paramtype service_endpoint: str - :keyword container_name: Required. - :paramtype container_name: str - :keyword prefix: - :paramtype prefix: str - :keyword marker: - :paramtype marker: str - :keyword max_results: - :paramtype max_results: int - :keyword delimiter: - :paramtype delimiter: str - :keyword segment: Required. - :paramtype segment: ~azure.storage.filedatalake.models.BlobHierarchyListSegment - :keyword next_marker: - :paramtype next_marker: str - """ - super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.container_name = container_name - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.delimiter = delimiter - self.segment = segment - self.next_marker = next_marker - - -class ModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :ivar if_modified_since: Specify this header value to operate only on a blob if it has been - modified since the specified date/time. - :vartype if_modified_since: ~datetime.datetime - :ivar if_unmodified_since: Specify this header value to operate only on a blob if it has not - been modified since the specified date/time. - :vartype if_unmodified_since: ~datetime.datetime - :ivar if_match: Specify an ETag value to operate only on blobs with a matching value. - :vartype if_match: str - :ivar if_none_match: Specify an ETag value to operate only on blobs without a matching value. - :vartype if_none_match: str - """ - - _attribute_map = { - 'if_modified_since': {'key': 'ifModifiedSince', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': 'ifUnmodifiedSince', 'type': 'rfc-1123'}, - 'if_match': {'key': 'ifMatch', 'type': 'str'}, - 'if_none_match': {'key': 'ifNoneMatch', 'type': 'str'}, - } - - def __init__( - self, - *, - if_modified_since: Optional[datetime.datetime] = None, - if_unmodified_since: Optional[datetime.datetime] = None, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - **kwargs - ): - """ - :keyword if_modified_since: Specify this header value to operate only on a blob if it has been - modified since the specified date/time. - :paramtype if_modified_since: ~datetime.datetime - :keyword if_unmodified_since: Specify this header value to operate only on a blob if it has not - been modified since the specified date/time. - :paramtype if_unmodified_since: ~datetime.datetime - :keyword if_match: Specify an ETag value to operate only on blobs with a matching value. - :paramtype if_match: str - :keyword if_none_match: Specify an ETag value to operate only on blobs without a matching - value. - :paramtype if_none_match: str - """ - super(ModifiedAccessConditions, self).__init__(**kwargs) - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since - self.if_match = if_match - self.if_none_match = if_none_match - - -class Path(msrest.serialization.Model): - """Path. - - :ivar name: - :vartype name: str - :ivar is_directory: - :vartype is_directory: bool - :ivar last_modified: - :vartype last_modified: str - :ivar e_tag: - :vartype e_tag: str - :ivar content_length: - :vartype content_length: long - :ivar owner: - :vartype owner: str - :ivar group: - :vartype group: str - :ivar permissions: - :vartype permissions: str - :ivar encryption_scope: The name of the encryption scope under which the blob is encrypted. - :vartype encryption_scope: str - :ivar creation_time: - :vartype creation_time: str - :ivar expiry_time: - :vartype expiry_time: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'is_directory': {'key': 'isDirectory', 'type': 'bool'}, - 'last_modified': {'key': 'lastModified', 'type': 'str'}, - 'e_tag': {'key': 'eTag', 'type': 'str'}, - 'content_length': {'key': 'contentLength', 'type': 'long'}, - 'owner': {'key': 'owner', 'type': 'str'}, - 'group': {'key': 'group', 'type': 'str'}, - 'permissions': {'key': 'permissions', 'type': 'str'}, - 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str'}, - 'creation_time': {'key': 'creationTime', 'type': 'str'}, - 'expiry_time': {'key': 'expiryTime', 'type': 'str'}, - } - - def __init__( - self, - *, - name: Optional[str] = None, - is_directory: Optional[bool] = False, - last_modified: Optional[str] = None, - e_tag: Optional[str] = None, - content_length: Optional[int] = None, - owner: Optional[str] = None, - group: Optional[str] = None, - permissions: Optional[str] = None, - encryption_scope: Optional[str] = None, - creation_time: Optional[str] = None, - expiry_time: Optional[str] = None, - **kwargs - ): - """ - :keyword name: - :paramtype name: str - :keyword is_directory: - :paramtype is_directory: bool - :keyword last_modified: - :paramtype last_modified: str - :keyword e_tag: - :paramtype e_tag: str - :keyword content_length: - :paramtype content_length: long - :keyword owner: - :paramtype owner: str - :keyword group: - :paramtype group: str - :keyword permissions: - :paramtype permissions: str - :keyword encryption_scope: The name of the encryption scope under which the blob is encrypted. - :paramtype encryption_scope: str - :keyword creation_time: - :paramtype creation_time: str - :keyword expiry_time: - :paramtype expiry_time: str - """ - super(Path, self).__init__(**kwargs) - self.name = name - self.is_directory = is_directory - self.last_modified = last_modified - self.e_tag = e_tag - self.content_length = content_length - self.owner = owner - self.group = group - self.permissions = permissions - self.encryption_scope = encryption_scope - self.creation_time = creation_time - self.expiry_time = expiry_time - - -class PathHTTPHeaders(msrest.serialization.Model): - """Parameter group. - - :ivar cache_control: Optional. Sets the blob's cache control. If specified, this property is - stored with the blob and returned with a read request. - :vartype cache_control: str - :ivar content_encoding: Optional. Sets the blob's content encoding. If specified, this property - is stored with the blob and returned with a read request. - :vartype content_encoding: str - :ivar content_language: Optional. Set the blob's content language. If specified, this property - is stored with the blob and returned with a read request. - :vartype content_language: str - :ivar content_disposition: Optional. Sets the blob's Content-Disposition header. - :vartype content_disposition: str - :ivar content_type: Optional. Sets the blob's content type. If specified, this property is - stored with the blob and returned with a read request. - :vartype content_type: str - :ivar content_md5: Specify the transactional md5 for the body, to be validated by the service. - :vartype content_md5: bytearray - :ivar transactional_content_hash: Specify the transactional md5 for the body, to be validated - by the service. - :vartype transactional_content_hash: bytearray - """ - - _attribute_map = { - 'cache_control': {'key': 'cacheControl', 'type': 'str'}, - 'content_encoding': {'key': 'contentEncoding', 'type': 'str'}, - 'content_language': {'key': 'contentLanguage', 'type': 'str'}, - 'content_disposition': {'key': 'contentDisposition', 'type': 'str'}, - 'content_type': {'key': 'contentType', 'type': 'str'}, - 'content_md5': {'key': 'contentMD5', 'type': 'bytearray'}, - 'transactional_content_hash': {'key': 'transactionalContentHash', 'type': 'bytearray'}, - } - - def __init__( - self, - *, - cache_control: Optional[str] = None, - content_encoding: Optional[str] = None, - content_language: Optional[str] = None, - content_disposition: Optional[str] = None, - content_type: Optional[str] = None, - content_md5: Optional[bytearray] = None, - transactional_content_hash: Optional[bytearray] = None, - **kwargs - ): - """ - :keyword cache_control: Optional. Sets the blob's cache control. If specified, this property is - stored with the blob and returned with a read request. - :paramtype cache_control: str - :keyword content_encoding: Optional. Sets the blob's content encoding. If specified, this - property is stored with the blob and returned with a read request. - :paramtype content_encoding: str - :keyword content_language: Optional. Set the blob's content language. If specified, this - property is stored with the blob and returned with a read request. - :paramtype content_language: str - :keyword content_disposition: Optional. Sets the blob's Content-Disposition header. - :paramtype content_disposition: str - :keyword content_type: Optional. Sets the blob's content type. If specified, this property is - stored with the blob and returned with a read request. - :paramtype content_type: str - :keyword content_md5: Specify the transactional md5 for the body, to be validated by the - service. - :paramtype content_md5: bytearray - :keyword transactional_content_hash: Specify the transactional md5 for the body, to be - validated by the service. - :paramtype transactional_content_hash: bytearray - """ - super(PathHTTPHeaders, self).__init__(**kwargs) - self.cache_control = cache_control - self.content_encoding = content_encoding - self.content_language = content_language - self.content_disposition = content_disposition - self.content_type = content_type - self.content_md5 = content_md5 - self.transactional_content_hash = transactional_content_hash - - -class PathList(msrest.serialization.Model): - """PathList. - - :ivar paths: - :vartype paths: list[~azure.storage.filedatalake.models.Path] - """ - - _attribute_map = { - 'paths': {'key': 'paths', 'type': '[Path]'}, - } - - def __init__( - self, - *, - paths: Optional[List["Path"]] = None, - **kwargs - ): - """ - :keyword paths: - :paramtype paths: list[~azure.storage.filedatalake.models.Path] - """ - super(PathList, self).__init__(**kwargs) - self.paths = paths - - -class SetAccessControlRecursiveResponse(msrest.serialization.Model): - """SetAccessControlRecursiveResponse. - - :ivar directories_successful: - :vartype directories_successful: int - :ivar files_successful: - :vartype files_successful: int - :ivar failure_count: - :vartype failure_count: int - :ivar failed_entries: - :vartype failed_entries: list[~azure.storage.filedatalake.models.AclFailedEntry] - """ - - _attribute_map = { - 'directories_successful': {'key': 'directoriesSuccessful', 'type': 'int'}, - 'files_successful': {'key': 'filesSuccessful', 'type': 'int'}, - 'failure_count': {'key': 'failureCount', 'type': 'int'}, - 'failed_entries': {'key': 'failedEntries', 'type': '[AclFailedEntry]'}, - } - - def __init__( - self, - *, - directories_successful: Optional[int] = None, - files_successful: Optional[int] = None, - failure_count: Optional[int] = None, - failed_entries: Optional[List["AclFailedEntry"]] = None, - **kwargs - ): - """ - :keyword directories_successful: - :paramtype directories_successful: int - :keyword files_successful: - :paramtype files_successful: int - :keyword failure_count: - :paramtype failure_count: int - :keyword failed_entries: - :paramtype failed_entries: list[~azure.storage.filedatalake.models.AclFailedEntry] - """ - super(SetAccessControlRecursiveResponse, self).__init__(**kwargs) - self.directories_successful = directories_successful - self.files_successful = files_successful - self.failure_count = failure_count - self.failed_entries = failed_entries - - -class SourceModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :ivar source_if_match: Specify an ETag value to operate only on blobs with a matching value. - :vartype source_if_match: str - :ivar source_if_none_match: Specify an ETag value to operate only on blobs without a matching - value. - :vartype source_if_none_match: str - :ivar source_if_modified_since: Specify this header value to operate only on a blob if it has - been modified since the specified date/time. - :vartype source_if_modified_since: ~datetime.datetime - :ivar source_if_unmodified_since: Specify this header value to operate only on a blob if it has - not been modified since the specified date/time. - :vartype source_if_unmodified_since: ~datetime.datetime - """ - - _attribute_map = { - 'source_if_match': {'key': 'sourceIfMatch', 'type': 'str'}, - 'source_if_none_match': {'key': 'sourceIfNoneMatch', 'type': 'str'}, - 'source_if_modified_since': {'key': 'sourceIfModifiedSince', 'type': 'rfc-1123'}, - 'source_if_unmodified_since': {'key': 'sourceIfUnmodifiedSince', 'type': 'rfc-1123'}, - } - - def __init__( - self, - *, - source_if_match: Optional[str] = None, - source_if_none_match: Optional[str] = None, - source_if_modified_since: Optional[datetime.datetime] = None, - source_if_unmodified_since: Optional[datetime.datetime] = None, - **kwargs - ): - """ - :keyword source_if_match: Specify an ETag value to operate only on blobs with a matching value. - :paramtype source_if_match: str - :keyword source_if_none_match: Specify an ETag value to operate only on blobs without a - matching value. - :paramtype source_if_none_match: str - :keyword source_if_modified_since: Specify this header value to operate only on a blob if it - has been modified since the specified date/time. - :paramtype source_if_modified_since: ~datetime.datetime - :keyword source_if_unmodified_since: Specify this header value to operate only on a blob if it - has not been modified since the specified date/time. - :paramtype source_if_unmodified_since: ~datetime.datetime - """ - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_match = source_if_match - self.source_if_none_match = source_if_none_match - self.source_if_modified_since = source_if_modified_since - self.source_if_unmodified_since = source_if_unmodified_since - - -class StorageError(msrest.serialization.Model): - """StorageError. - - :ivar error: The service error response object. - :vartype error: ~azure.storage.filedatalake.models.StorageErrorError - """ - - _attribute_map = { - 'error': {'key': 'error', 'type': 'StorageErrorError'}, - } - - def __init__( - self, - *, - error: Optional["StorageErrorError"] = None, - **kwargs - ): - """ - :keyword error: The service error response object. - :paramtype error: ~azure.storage.filedatalake.models.StorageErrorError - """ - super(StorageError, self).__init__(**kwargs) - self.error = error - - -class StorageErrorError(msrest.serialization.Model): - """The service error response object. - - :ivar code: The service error code. - :vartype code: str - :ivar message: The service error message. - :vartype message: str - """ - - _attribute_map = { - 'code': {'key': 'Code', 'type': 'str'}, - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__( - self, - *, - code: Optional[str] = None, - message: Optional[str] = None, - **kwargs - ): - """ - :keyword code: The service error code. - :paramtype code: str - :keyword message: The service error message. - :paramtype message: str - """ - super(StorageErrorError, self).__init__(**kwargs) - self.code = code - self.message = message diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/operations/__init__.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/operations/__init__.py deleted file mode 100644 index 0db71e0..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/operations/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._file_system_operations import FileSystemOperations -from ._path_operations import PathOperations - -__all__ = [ - 'ServiceOperations', - 'FileSystemOperations', - 'PathOperations', -] diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/operations/_file_system_operations.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/operations/_file_system_operations.py deleted file mode 100644 index 5bec942..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/operations/_file_system_operations.py +++ /dev/null @@ -1,907 +0,0 @@ -# pylint: disable=too-many-lines -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING - -from msrest import Serializer - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpResponse -from azure.core.rest import HttpRequest -from azure.core.tracing.decorator import distributed_trace - -from .. import models as _models -from .._vendor import _convert_request, _format_url_section - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, List, Optional, TypeVar, Union - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -_SERIALIZER = Serializer() -_SERIALIZER.client_side_validation = False -# fmt: off - -def build_create_request( - url, # type: str - **kwargs # type: Any -): - # type: (...) -> HttpRequest - resource = kwargs.pop('resource', "filesystem") # type: str - version = kwargs.pop('version', "2021-06-08") # type: str - request_id_parameter = kwargs.pop('request_id_parameter', None) # type: Optional[str] - timeout = kwargs.pop('timeout', None) # type: Optional[int] - properties = kwargs.pop('properties', None) # type: Optional[str] - - accept = "application/json" - # Construct URL - _url = kwargs.pop("template_url", "{url}/{filesystem}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, 'str', skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] - _query_parameters['resource'] = _SERIALIZER.query("resource", resource, 'str') - if timeout is not None: - _query_parameters['timeout'] = _SERIALIZER.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - _header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] - if request_id_parameter is not None: - _header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("request_id_parameter", request_id_parameter, 'str') - _header_parameters['x-ms-version'] = _SERIALIZER.header("version", version, 'str') - if properties is not None: - _header_parameters['x-ms-properties'] = _SERIALIZER.header("properties", properties, 'str') - _header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') - - return HttpRequest( - method="PUT", - url=_url, - params=_query_parameters, - headers=_header_parameters, - **kwargs - ) - - -def build_set_properties_request( - url, # type: str - **kwargs # type: Any -): - # type: (...) -> HttpRequest - resource = kwargs.pop('resource', "filesystem") # type: str - version = kwargs.pop('version', "2021-06-08") # type: str - request_id_parameter = kwargs.pop('request_id_parameter', None) # type: Optional[str] - timeout = kwargs.pop('timeout', None) # type: Optional[int] - properties = kwargs.pop('properties', None) # type: Optional[str] - if_modified_since = kwargs.pop('if_modified_since', None) # type: Optional[datetime.datetime] - if_unmodified_since = kwargs.pop('if_unmodified_since', None) # type: Optional[datetime.datetime] - - accept = "application/json" - # Construct URL - _url = kwargs.pop("template_url", "{url}/{filesystem}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, 'str', skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] - _query_parameters['resource'] = _SERIALIZER.query("resource", resource, 'str') - if timeout is not None: - _query_parameters['timeout'] = _SERIALIZER.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - _header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] - if request_id_parameter is not None: - _header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("request_id_parameter", request_id_parameter, 'str') - _header_parameters['x-ms-version'] = _SERIALIZER.header("version", version, 'str') - if properties is not None: - _header_parameters['x-ms-properties'] = _SERIALIZER.header("properties", properties, 'str') - if if_modified_since is not None: - _header_parameters['If-Modified-Since'] = _SERIALIZER.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - _header_parameters['If-Unmodified-Since'] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - _header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') - - return HttpRequest( - method="PATCH", - url=_url, - params=_query_parameters, - headers=_header_parameters, - **kwargs - ) - - -def build_get_properties_request( - url, # type: str - **kwargs # type: Any -): - # type: (...) -> HttpRequest - resource = kwargs.pop('resource', "filesystem") # type: str - version = kwargs.pop('version', "2021-06-08") # type: str - request_id_parameter = kwargs.pop('request_id_parameter', None) # type: Optional[str] - timeout = kwargs.pop('timeout', None) # type: Optional[int] - - accept = "application/json" - # Construct URL - _url = kwargs.pop("template_url", "{url}/{filesystem}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, 'str', skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] - _query_parameters['resource'] = _SERIALIZER.query("resource", resource, 'str') - if timeout is not None: - _query_parameters['timeout'] = _SERIALIZER.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - _header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] - if request_id_parameter is not None: - _header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("request_id_parameter", request_id_parameter, 'str') - _header_parameters['x-ms-version'] = _SERIALIZER.header("version", version, 'str') - _header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') - - return HttpRequest( - method="HEAD", - url=_url, - params=_query_parameters, - headers=_header_parameters, - **kwargs - ) - - -def build_delete_request( - url, # type: str - **kwargs # type: Any -): - # type: (...) -> HttpRequest - resource = kwargs.pop('resource', "filesystem") # type: str - version = kwargs.pop('version', "2021-06-08") # type: str - request_id_parameter = kwargs.pop('request_id_parameter', None) # type: Optional[str] - timeout = kwargs.pop('timeout', None) # type: Optional[int] - if_modified_since = kwargs.pop('if_modified_since', None) # type: Optional[datetime.datetime] - if_unmodified_since = kwargs.pop('if_unmodified_since', None) # type: Optional[datetime.datetime] - - accept = "application/json" - # Construct URL - _url = kwargs.pop("template_url", "{url}/{filesystem}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, 'str', skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] - _query_parameters['resource'] = _SERIALIZER.query("resource", resource, 'str') - if timeout is not None: - _query_parameters['timeout'] = _SERIALIZER.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - _header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] - if request_id_parameter is not None: - _header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("request_id_parameter", request_id_parameter, 'str') - _header_parameters['x-ms-version'] = _SERIALIZER.header("version", version, 'str') - if if_modified_since is not None: - _header_parameters['If-Modified-Since'] = _SERIALIZER.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - _header_parameters['If-Unmodified-Since'] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - _header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') - - return HttpRequest( - method="DELETE", - url=_url, - params=_query_parameters, - headers=_header_parameters, - **kwargs - ) - - -def build_list_paths_request( - url, # type: str - **kwargs # type: Any -): - # type: (...) -> HttpRequest - resource = kwargs.pop('resource', "filesystem") # type: str - version = kwargs.pop('version', "2021-06-08") # type: str - recursive = kwargs.pop('recursive') # type: bool - request_id_parameter = kwargs.pop('request_id_parameter', None) # type: Optional[str] - timeout = kwargs.pop('timeout', None) # type: Optional[int] - continuation = kwargs.pop('continuation', None) # type: Optional[str] - path = kwargs.pop('path', None) # type: Optional[str] - max_results = kwargs.pop('max_results', None) # type: Optional[int] - upn = kwargs.pop('upn', None) # type: Optional[bool] - - accept = "application/json" - # Construct URL - _url = kwargs.pop("template_url", "{url}/{filesystem}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, 'str', skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] - _query_parameters['resource'] = _SERIALIZER.query("resource", resource, 'str') - if timeout is not None: - _query_parameters['timeout'] = _SERIALIZER.query("timeout", timeout, 'int', minimum=0) - if continuation is not None: - _query_parameters['continuation'] = _SERIALIZER.query("continuation", continuation, 'str') - if path is not None: - _query_parameters['directory'] = _SERIALIZER.query("path", path, 'str') - _query_parameters['recursive'] = _SERIALIZER.query("recursive", recursive, 'bool') - if max_results is not None: - _query_parameters['maxResults'] = _SERIALIZER.query("max_results", max_results, 'int', minimum=1) - if upn is not None: - _query_parameters['upn'] = _SERIALIZER.query("upn", upn, 'bool') - - # Construct headers - _header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] - if request_id_parameter is not None: - _header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("request_id_parameter", request_id_parameter, 'str') - _header_parameters['x-ms-version'] = _SERIALIZER.header("version", version, 'str') - _header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') - - return HttpRequest( - method="GET", - url=_url, - params=_query_parameters, - headers=_header_parameters, - **kwargs - ) - - -def build_list_blob_hierarchy_segment_request( - url, # type: str - **kwargs # type: Any -): - # type: (...) -> HttpRequest - restype = kwargs.pop('restype', "container") # type: str - comp = kwargs.pop('comp', "list") # type: str - version = kwargs.pop('version', "2021-06-08") # type: str - prefix = kwargs.pop('prefix', None) # type: Optional[str] - delimiter = kwargs.pop('delimiter', None) # type: Optional[str] - marker = kwargs.pop('marker', None) # type: Optional[str] - max_results = kwargs.pop('max_results', None) # type: Optional[int] - include = kwargs.pop('include', None) # type: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] - showonly = kwargs.pop('showonly', "deleted") # type: Optional[str] - timeout = kwargs.pop('timeout', None) # type: Optional[int] - request_id_parameter = kwargs.pop('request_id_parameter', None) # type: Optional[str] - - accept = "application/xml" - # Construct URL - _url = kwargs.pop("template_url", "{url}/{filesystem}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, 'str', skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] - _query_parameters['restype'] = _SERIALIZER.query("restype", restype, 'str') - _query_parameters['comp'] = _SERIALIZER.query("comp", comp, 'str') - if prefix is not None: - _query_parameters['prefix'] = _SERIALIZER.query("prefix", prefix, 'str') - if delimiter is not None: - _query_parameters['delimiter'] = _SERIALIZER.query("delimiter", delimiter, 'str') - if marker is not None: - _query_parameters['marker'] = _SERIALIZER.query("marker", marker, 'str') - if max_results is not None: - _query_parameters['maxResults'] = _SERIALIZER.query("max_results", max_results, 'int', minimum=1) - if include is not None: - _query_parameters['include'] = _SERIALIZER.query("include", include, '[str]', div=',') - if showonly is not None: - _query_parameters['showonly'] = _SERIALIZER.query("showonly", showonly, 'str') - if timeout is not None: - _query_parameters['timeout'] = _SERIALIZER.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - _header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] - _header_parameters['x-ms-version'] = _SERIALIZER.header("version", version, 'str') - if request_id_parameter is not None: - _header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("request_id_parameter", request_id_parameter, 'str') - _header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') - - return HttpRequest( - method="GET", - url=_url, - params=_query_parameters, - headers=_header_parameters, - **kwargs - ) - -# fmt: on -class FileSystemOperations(object): - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.storage.filedatalake.AzureDataLakeStorageRESTAPI`'s - :attr:`file_system` attribute. - """ - - models = _models - - def __init__(self, *args, **kwargs): - args = list(args) - self._client = args.pop(0) if args else kwargs.pop("client") - self._config = args.pop(0) if args else kwargs.pop("config") - self._serialize = args.pop(0) if args else kwargs.pop("serializer") - self._deserialize = args.pop(0) if args else kwargs.pop("deserializer") - - - @distributed_trace - def create( # pylint: disable=inconsistent-return-statements - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - properties=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Create FileSystem. - - Create a FileSystem rooted at the specified location. If the FileSystem already exists, the - operation fails. This operation does not support conditional HTTP requests. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. Default value is - None. - :type properties: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - - request = build_create_request( - url=self._config.url, - resource=self._config.resource, - version=self._config.version, - request_id_parameter=request_id_parameter, - timeout=timeout, - properties=properties, - template_url=self.create.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-namespace-enabled']=self._deserialize('str', response.headers.get('x-ms-namespace-enabled')) - - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': "{url}/{filesystem}"} # type: ignore - - - @distributed_trace - def set_properties( # pylint: disable=inconsistent-return-statements - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - properties=None, # type: Optional[str] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Set FileSystem Properties. - - Set properties for the FileSystem. This operation supports conditional HTTP requests. For - more information, see `Specifying Conditional Headers for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. Default value is - None. - :type properties: str - :param modified_access_conditions: Parameter group. Default value is None. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - - request = build_set_properties_request( - url=self._config.url, - resource=self._config.resource, - version=self._config.version, - request_id_parameter=request_id_parameter, - timeout=timeout, - properties=properties, - if_modified_since=_if_modified_since, - if_unmodified_since=_if_unmodified_since, - template_url=self.set_properties.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': "{url}/{filesystem}"} # type: ignore - - - @distributed_trace - def get_properties( # pylint: disable=inconsistent-return-statements - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Get FileSystem Properties. - - All system and user-defined filesystem properties are specified in the response headers. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - - request = build_get_properties_request( - url=self._config.url, - resource=self._config.resource, - version=self._config.version, - request_id_parameter=request_id_parameter, - timeout=timeout, - template_url=self.get_properties.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-namespace-enabled']=self._deserialize('str', response.headers.get('x-ms-namespace-enabled')) - - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': "{url}/{filesystem}"} # type: ignore - - - @distributed_trace - def delete( # pylint: disable=inconsistent-return-statements - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Delete FileSystem. - - Marks the FileSystem for deletion. When a FileSystem is deleted, a FileSystem with the same - identifier cannot be created for at least 30 seconds. While the filesystem is being deleted, - attempts to create a filesystem with the same identifier will fail with status code 409 - (Conflict), with the service returning additional error information indicating that the - filesystem is being deleted. All other operations, including operations on any files or - directories within the filesystem, will fail with status code 404 (Not Found) while the - filesystem is being deleted. This operation supports conditional HTTP requests. For more - information, see `Specifying Conditional Headers for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param modified_access_conditions: Parameter group. Default value is None. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _if_modified_since = None - _if_unmodified_since = None - if modified_access_conditions is not None: - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - - request = build_delete_request( - url=self._config.url, - resource=self._config.resource, - version=self._config.version, - request_id_parameter=request_id_parameter, - timeout=timeout, - if_modified_since=_if_modified_since, - if_unmodified_since=_if_unmodified_since, - template_url=self.delete.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': "{url}/{filesystem}"} # type: ignore - - - @distributed_trace - def list_paths( - self, - recursive, # type: bool - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - continuation=None, # type: Optional[str] - path=None, # type: Optional[str] - max_results=None, # type: Optional[int] - upn=None, # type: Optional[bool] - **kwargs # type: Any - ): - # type: (...) -> "_models.PathList" - """List Paths. - - List FileSystem paths and their properties. - - :param recursive: Required. - :type recursive: bool - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. Default value is None. - :type continuation: str - :param path: Optional. Filters results to paths within the specified directory. An error - occurs if the directory does not exist. Default value is None. - :type path: str - :param max_results: An optional value that specifies the maximum number of items to return. If - omitted or greater than 5,000, the response will include up to 5,000 items. Default value is - None. - :type max_results: int - :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If - "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response - headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If - "false", the values will be returned as Azure Active Directory Object IDs. The default value is - false. Note that group and application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: PathList, or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.PathList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.PathList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - - request = build_list_paths_request( - url=self._config.url, - resource=self._config.resource, - version=self._config.version, - recursive=recursive, - request_id_parameter=request_id_parameter, - timeout=timeout, - continuation=continuation, - path=path, - max_results=max_results, - upn=upn, - template_url=self.list_paths.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - - deserialized = self._deserialize('PathList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - - list_paths.metadata = {'url': "{url}/{filesystem}"} # type: ignore - - - @distributed_trace - def list_blob_hierarchy_segment( - self, - prefix=None, # type: Optional[str] - delimiter=None, # type: Optional[str] - marker=None, # type: Optional[str] - max_results=None, # type: Optional[int] - include=None, # type: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] - showonly="deleted", # type: Optional[str] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.ListBlobsHierarchySegmentResponse" - """The List Blobs operation returns a list of the blobs under the specified container. - - :param prefix: Filters results to filesystems within the specified prefix. Default value is - None. - :type prefix: str - :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix - element in the response body that acts as a placeholder for all blobs whose names begin with - the same substring up to the appearance of the delimiter character. The delimiter may be a - single character or a string. Default value is None. - :type delimiter: str - :param marker: A string value that identifies the portion of the list of containers to be - returned with the next listing operation. The operation returns the NextMarker value within the - response body if the listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value for the marker parameter - in a subsequent call to request the next page of list items. The marker value is opaque to the - client. Default value is None. - :type marker: str - :param max_results: An optional value that specifies the maximum number of items to return. If - omitted or greater than 5,000, the response will include up to 5,000 items. Default value is - None. - :type max_results: int - :param include: Include this parameter to specify one or more datasets to include in the - response. Default value is None. - :type include: list[str or ~azure.storage.filedatalake.models.ListBlobsIncludeItem] - :param showonly: Include this parameter to specify one or more datasets to include in the - response. Possible values are "deleted" or None. Default value is "deleted". - :type showonly: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :keyword restype: restype. Default value is "container". Note that overriding this default - value may result in unsupported behavior. - :paramtype restype: str - :keyword comp: comp. Default value is "list". Note that overriding this default value may - result in unsupported behavior. - :paramtype comp: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListBlobsHierarchySegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.ListBlobsHierarchySegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsHierarchySegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - restype = kwargs.pop('restype', "container") # type: str - comp = kwargs.pop('comp', "list") # type: str - - - request = build_list_blob_hierarchy_segment_request( - url=self._config.url, - restype=restype, - comp=comp, - version=self._config.version, - prefix=prefix, - delimiter=delimiter, - marker=marker, - max_results=max_results, - include=include, - showonly=showonly, - timeout=timeout, - request_id_parameter=request_id_parameter, - template_url=self.list_blob_hierarchy_segment.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - - list_blob_hierarchy_segment.metadata = {'url': "{url}/{filesystem}"} # type: ignore - diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/operations/_path_operations.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/operations/_path_operations.py deleted file mode 100644 index 33ba92a..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/operations/_path_operations.py +++ /dev/null @@ -1,2631 +0,0 @@ -# pylint: disable=too-many-lines -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import datetime -from typing import TYPE_CHECKING - -from msrest import Serializer - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpResponse -from azure.core.rest import HttpRequest -from azure.core.tracing.decorator import distributed_trace - -from .. import models as _models -from .._vendor import _convert_request, _format_url_section - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -_SERIALIZER = Serializer() -_SERIALIZER.client_side_validation = False -# fmt: off - -def build_create_request( - url, # type: str - **kwargs # type: Any -): - # type: (...) -> HttpRequest - version = kwargs.pop('version', "2021-06-08") # type: str - request_id_parameter = kwargs.pop('request_id_parameter', None) # type: Optional[str] - timeout = kwargs.pop('timeout', None) # type: Optional[int] - resource = kwargs.pop('resource', None) # type: Optional[Union[str, "_models.PathResourceType"]] - continuation = kwargs.pop('continuation', None) # type: Optional[str] - mode = kwargs.pop('mode', None) # type: Optional[Union[str, "_models.PathRenameMode"]] - cache_control = kwargs.pop('cache_control', None) # type: Optional[str] - content_encoding = kwargs.pop('content_encoding', None) # type: Optional[str] - content_language = kwargs.pop('content_language', None) # type: Optional[str] - content_disposition = kwargs.pop('content_disposition', None) # type: Optional[str] - content_type_parameter = kwargs.pop('content_type_parameter', None) # type: Optional[str] - rename_source = kwargs.pop('rename_source', None) # type: Optional[str] - lease_id = kwargs.pop('lease_id', None) # type: Optional[str] - source_lease_id = kwargs.pop('source_lease_id', None) # type: Optional[str] - properties = kwargs.pop('properties', None) # type: Optional[str] - permissions = kwargs.pop('permissions', None) # type: Optional[str] - umask = kwargs.pop('umask', None) # type: Optional[str] - if_match = kwargs.pop('if_match', None) # type: Optional[str] - if_none_match = kwargs.pop('if_none_match', None) # type: Optional[str] - if_modified_since = kwargs.pop('if_modified_since', None) # type: Optional[datetime.datetime] - if_unmodified_since = kwargs.pop('if_unmodified_since', None) # type: Optional[datetime.datetime] - source_if_match = kwargs.pop('source_if_match', None) # type: Optional[str] - source_if_none_match = kwargs.pop('source_if_none_match', None) # type: Optional[str] - source_if_modified_since = kwargs.pop('source_if_modified_since', None) # type: Optional[datetime.datetime] - source_if_unmodified_since = kwargs.pop('source_if_unmodified_since', None) # type: Optional[datetime.datetime] - encryption_key = kwargs.pop('encryption_key', None) # type: Optional[str] - encryption_key_sha256 = kwargs.pop('encryption_key_sha256', None) # type: Optional[str] - encryption_algorithm = kwargs.pop('encryption_algorithm', "AES256") # type: Optional[str] - - accept = "application/json" - # Construct URL - _url = kwargs.pop("template_url", "{url}/{filesystem}/{path}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, 'str', skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] - if timeout is not None: - _query_parameters['timeout'] = _SERIALIZER.query("timeout", timeout, 'int', minimum=0) - if resource is not None: - _query_parameters['resource'] = _SERIALIZER.query("resource", resource, 'str') - if continuation is not None: - _query_parameters['continuation'] = _SERIALIZER.query("continuation", continuation, 'str') - if mode is not None: - _query_parameters['mode'] = _SERIALIZER.query("mode", mode, 'str') - - # Construct headers - _header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] - if request_id_parameter is not None: - _header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("request_id_parameter", request_id_parameter, 'str') - _header_parameters['x-ms-version'] = _SERIALIZER.header("version", version, 'str') - if cache_control is not None: - _header_parameters['x-ms-cache-control'] = _SERIALIZER.header("cache_control", cache_control, 'str') - if content_encoding is not None: - _header_parameters['x-ms-content-encoding'] = _SERIALIZER.header("content_encoding", content_encoding, 'str') - if content_language is not None: - _header_parameters['x-ms-content-language'] = _SERIALIZER.header("content_language", content_language, 'str') - if content_disposition is not None: - _header_parameters['x-ms-content-disposition'] = _SERIALIZER.header("content_disposition", content_disposition, 'str') - if content_type_parameter is not None: - _header_parameters['x-ms-content-type'] = _SERIALIZER.header("content_type_parameter", content_type_parameter, 'str') - if rename_source is not None: - _header_parameters['x-ms-rename-source'] = _SERIALIZER.header("rename_source", rename_source, 'str') - if lease_id is not None: - _header_parameters['x-ms-lease-id'] = _SERIALIZER.header("lease_id", lease_id, 'str') - if source_lease_id is not None: - _header_parameters['x-ms-source-lease-id'] = _SERIALIZER.header("source_lease_id", source_lease_id, 'str') - if properties is not None: - _header_parameters['x-ms-properties'] = _SERIALIZER.header("properties", properties, 'str') - if permissions is not None: - _header_parameters['x-ms-permissions'] = _SERIALIZER.header("permissions", permissions, 'str') - if umask is not None: - _header_parameters['x-ms-umask'] = _SERIALIZER.header("umask", umask, 'str') - if if_match is not None: - _header_parameters['If-Match'] = _SERIALIZER.header("if_match", if_match, 'str') - if if_none_match is not None: - _header_parameters['If-None-Match'] = _SERIALIZER.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - _header_parameters['If-Modified-Since'] = _SERIALIZER.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - _header_parameters['If-Unmodified-Since'] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if source_if_match is not None: - _header_parameters['x-ms-source-if-match'] = _SERIALIZER.header("source_if_match", source_if_match, 'str') - if source_if_none_match is not None: - _header_parameters['x-ms-source-if-none-match'] = _SERIALIZER.header("source_if_none_match", source_if_none_match, 'str') - if source_if_modified_since is not None: - _header_parameters['x-ms-source-if-modified-since'] = _SERIALIZER.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') - if source_if_unmodified_since is not None: - _header_parameters['x-ms-source-if-unmodified-since'] = _SERIALIZER.header("source_if_unmodified_since", source_if_unmodified_since, 'rfc-1123') - if encryption_key is not None: - _header_parameters['x-ms-encryption-key'] = _SERIALIZER.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - _header_parameters['x-ms-encryption-key-sha256'] = _SERIALIZER.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - _header_parameters['x-ms-encryption-algorithm'] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, 'str') - _header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') - - return HttpRequest( - method="PUT", - url=_url, - params=_query_parameters, - headers=_header_parameters, - **kwargs - ) - - -def build_update_request( - url, # type: str - **kwargs # type: Any -): - # type: (...) -> HttpRequest - version = kwargs.pop('version', "2021-06-08") # type: str - content_type = kwargs.pop('content_type', None) # type: Optional[str] - action = kwargs.pop('action') # type: Union[str, "_models.PathUpdateAction"] - mode = kwargs.pop('mode') # type: Union[str, "_models.PathSetAccessControlRecursiveMode"] - request_id_parameter = kwargs.pop('request_id_parameter', None) # type: Optional[str] - timeout = kwargs.pop('timeout', None) # type: Optional[int] - max_records = kwargs.pop('max_records', None) # type: Optional[int] - continuation = kwargs.pop('continuation', None) # type: Optional[str] - force_flag = kwargs.pop('force_flag', None) # type: Optional[bool] - position = kwargs.pop('position', None) # type: Optional[int] - retain_uncommitted_data = kwargs.pop('retain_uncommitted_data', None) # type: Optional[bool] - close = kwargs.pop('close', None) # type: Optional[bool] - content_length = kwargs.pop('content_length', None) # type: Optional[int] - content_md5 = kwargs.pop('content_md5', None) # type: Optional[bytearray] - lease_id = kwargs.pop('lease_id', None) # type: Optional[str] - cache_control = kwargs.pop('cache_control', None) # type: Optional[str] - content_type_parameter = kwargs.pop('content_type_parameter', None) # type: Optional[str] - content_disposition = kwargs.pop('content_disposition', None) # type: Optional[str] - content_encoding = kwargs.pop('content_encoding', None) # type: Optional[str] - content_language = kwargs.pop('content_language', None) # type: Optional[str] - properties = kwargs.pop('properties', None) # type: Optional[str] - owner = kwargs.pop('owner', None) # type: Optional[str] - group = kwargs.pop('group', None) # type: Optional[str] - permissions = kwargs.pop('permissions', None) # type: Optional[str] - acl = kwargs.pop('acl', None) # type: Optional[str] - if_match = kwargs.pop('if_match', None) # type: Optional[str] - if_none_match = kwargs.pop('if_none_match', None) # type: Optional[str] - if_modified_since = kwargs.pop('if_modified_since', None) # type: Optional[datetime.datetime] - if_unmodified_since = kwargs.pop('if_unmodified_since', None) # type: Optional[datetime.datetime] - - accept = "application/json" - # Construct URL - _url = kwargs.pop("template_url", "{url}/{filesystem}/{path}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, 'str', skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] - if timeout is not None: - _query_parameters['timeout'] = _SERIALIZER.query("timeout", timeout, 'int', minimum=0) - _query_parameters['action'] = _SERIALIZER.query("action", action, 'str') - if max_records is not None: - _query_parameters['maxRecords'] = _SERIALIZER.query("max_records", max_records, 'int', minimum=1) - if continuation is not None: - _query_parameters['continuation'] = _SERIALIZER.query("continuation", continuation, 'str') - _query_parameters['mode'] = _SERIALIZER.query("mode", mode, 'str') - if force_flag is not None: - _query_parameters['forceFlag'] = _SERIALIZER.query("force_flag", force_flag, 'bool') - if position is not None: - _query_parameters['position'] = _SERIALIZER.query("position", position, 'long') - if retain_uncommitted_data is not None: - _query_parameters['retainUncommittedData'] = _SERIALIZER.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') - if close is not None: - _query_parameters['close'] = _SERIALIZER.query("close", close, 'bool') - - # Construct headers - _header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] - if request_id_parameter is not None: - _header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("request_id_parameter", request_id_parameter, 'str') - _header_parameters['x-ms-version'] = _SERIALIZER.header("version", version, 'str') - if content_length is not None: - _header_parameters['Content-Length'] = _SERIALIZER.header("content_length", content_length, 'long', minimum=0) - if content_md5 is not None: - _header_parameters['x-ms-content-md5'] = _SERIALIZER.header("content_md5", content_md5, 'bytearray') - if lease_id is not None: - _header_parameters['x-ms-lease-id'] = _SERIALIZER.header("lease_id", lease_id, 'str') - if cache_control is not None: - _header_parameters['x-ms-cache-control'] = _SERIALIZER.header("cache_control", cache_control, 'str') - if content_type_parameter is not None: - _header_parameters['x-ms-content-type'] = _SERIALIZER.header("content_type_parameter", content_type_parameter, 'str') - if content_disposition is not None: - _header_parameters['x-ms-content-disposition'] = _SERIALIZER.header("content_disposition", content_disposition, 'str') - if content_encoding is not None: - _header_parameters['x-ms-content-encoding'] = _SERIALIZER.header("content_encoding", content_encoding, 'str') - if content_language is not None: - _header_parameters['x-ms-content-language'] = _SERIALIZER.header("content_language", content_language, 'str') - if properties is not None: - _header_parameters['x-ms-properties'] = _SERIALIZER.header("properties", properties, 'str') - if owner is not None: - _header_parameters['x-ms-owner'] = _SERIALIZER.header("owner", owner, 'str') - if group is not None: - _header_parameters['x-ms-group'] = _SERIALIZER.header("group", group, 'str') - if permissions is not None: - _header_parameters['x-ms-permissions'] = _SERIALIZER.header("permissions", permissions, 'str') - if acl is not None: - _header_parameters['x-ms-acl'] = _SERIALIZER.header("acl", acl, 'str') - if if_match is not None: - _header_parameters['If-Match'] = _SERIALIZER.header("if_match", if_match, 'str') - if if_none_match is not None: - _header_parameters['If-None-Match'] = _SERIALIZER.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - _header_parameters['If-Modified-Since'] = _SERIALIZER.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - _header_parameters['If-Unmodified-Since'] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if content_type is not None: - _header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str') - _header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') - - return HttpRequest( - method="PATCH", - url=_url, - params=_query_parameters, - headers=_header_parameters, - **kwargs - ) - - -def build_lease_request( - url, # type: str - **kwargs # type: Any -): - # type: (...) -> HttpRequest - version = kwargs.pop('version', "2021-06-08") # type: str - x_ms_lease_action = kwargs.pop('x_ms_lease_action') # type: Union[str, "_models.PathLeaseAction"] - request_id_parameter = kwargs.pop('request_id_parameter', None) # type: Optional[str] - timeout = kwargs.pop('timeout', None) # type: Optional[int] - x_ms_lease_duration = kwargs.pop('x_ms_lease_duration', None) # type: Optional[int] - x_ms_lease_break_period = kwargs.pop('x_ms_lease_break_period', None) # type: Optional[int] - lease_id = kwargs.pop('lease_id', None) # type: Optional[str] - proposed_lease_id = kwargs.pop('proposed_lease_id', None) # type: Optional[str] - if_match = kwargs.pop('if_match', None) # type: Optional[str] - if_none_match = kwargs.pop('if_none_match', None) # type: Optional[str] - if_modified_since = kwargs.pop('if_modified_since', None) # type: Optional[datetime.datetime] - if_unmodified_since = kwargs.pop('if_unmodified_since', None) # type: Optional[datetime.datetime] - - accept = "application/json" - # Construct URL - _url = kwargs.pop("template_url", "{url}/{filesystem}/{path}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, 'str', skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] - if timeout is not None: - _query_parameters['timeout'] = _SERIALIZER.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - _header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] - if request_id_parameter is not None: - _header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("request_id_parameter", request_id_parameter, 'str') - _header_parameters['x-ms-version'] = _SERIALIZER.header("version", version, 'str') - _header_parameters['x-ms-lease-action'] = _SERIALIZER.header("x_ms_lease_action", x_ms_lease_action, 'str') - if x_ms_lease_duration is not None: - _header_parameters['x-ms-lease-duration'] = _SERIALIZER.header("x_ms_lease_duration", x_ms_lease_duration, 'int') - if x_ms_lease_break_period is not None: - _header_parameters['x-ms-lease-break-period'] = _SERIALIZER.header("x_ms_lease_break_period", x_ms_lease_break_period, 'int') - if lease_id is not None: - _header_parameters['x-ms-lease-id'] = _SERIALIZER.header("lease_id", lease_id, 'str') - if proposed_lease_id is not None: - _header_parameters['x-ms-proposed-lease-id'] = _SERIALIZER.header("proposed_lease_id", proposed_lease_id, 'str') - if if_match is not None: - _header_parameters['If-Match'] = _SERIALIZER.header("if_match", if_match, 'str') - if if_none_match is not None: - _header_parameters['If-None-Match'] = _SERIALIZER.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - _header_parameters['If-Modified-Since'] = _SERIALIZER.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - _header_parameters['If-Unmodified-Since'] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - _header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') - - return HttpRequest( - method="POST", - url=_url, - params=_query_parameters, - headers=_header_parameters, - **kwargs - ) - - -def build_read_request( - url, # type: str - **kwargs # type: Any -): - # type: (...) -> HttpRequest - version = kwargs.pop('version', "2021-06-08") # type: str - request_id_parameter = kwargs.pop('request_id_parameter', None) # type: Optional[str] - timeout = kwargs.pop('timeout', None) # type: Optional[int] - range = kwargs.pop('range', None) # type: Optional[str] - lease_id = kwargs.pop('lease_id', None) # type: Optional[str] - x_ms_range_get_content_md5 = kwargs.pop('x_ms_range_get_content_md5', None) # type: Optional[bool] - if_match = kwargs.pop('if_match', None) # type: Optional[str] - if_none_match = kwargs.pop('if_none_match', None) # type: Optional[str] - if_modified_since = kwargs.pop('if_modified_since', None) # type: Optional[datetime.datetime] - if_unmodified_since = kwargs.pop('if_unmodified_since', None) # type: Optional[datetime.datetime] - encryption_key = kwargs.pop('encryption_key', None) # type: Optional[str] - encryption_key_sha256 = kwargs.pop('encryption_key_sha256', None) # type: Optional[str] - encryption_algorithm = kwargs.pop('encryption_algorithm', "AES256") # type: Optional[str] - - accept = "application/json" - # Construct URL - _url = kwargs.pop("template_url", "{url}/{filesystem}/{path}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, 'str', skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] - if timeout is not None: - _query_parameters['timeout'] = _SERIALIZER.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - _header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] - if request_id_parameter is not None: - _header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("request_id_parameter", request_id_parameter, 'str') - _header_parameters['x-ms-version'] = _SERIALIZER.header("version", version, 'str') - if range is not None: - _header_parameters['Range'] = _SERIALIZER.header("range", range, 'str') - if lease_id is not None: - _header_parameters['x-ms-lease-id'] = _SERIALIZER.header("lease_id", lease_id, 'str') - if x_ms_range_get_content_md5 is not None: - _header_parameters['x-ms-range-get-content-md5'] = _SERIALIZER.header("x_ms_range_get_content_md5", x_ms_range_get_content_md5, 'bool') - if if_match is not None: - _header_parameters['If-Match'] = _SERIALIZER.header("if_match", if_match, 'str') - if if_none_match is not None: - _header_parameters['If-None-Match'] = _SERIALIZER.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - _header_parameters['If-Modified-Since'] = _SERIALIZER.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - _header_parameters['If-Unmodified-Since'] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if encryption_key is not None: - _header_parameters['x-ms-encryption-key'] = _SERIALIZER.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - _header_parameters['x-ms-encryption-key-sha256'] = _SERIALIZER.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - _header_parameters['x-ms-encryption-algorithm'] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, 'str') - _header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') - - return HttpRequest( - method="GET", - url=_url, - params=_query_parameters, - headers=_header_parameters, - **kwargs - ) - - -def build_get_properties_request( - url, # type: str - **kwargs # type: Any -): - # type: (...) -> HttpRequest - version = kwargs.pop('version', "2021-06-08") # type: str - request_id_parameter = kwargs.pop('request_id_parameter', None) # type: Optional[str] - timeout = kwargs.pop('timeout', None) # type: Optional[int] - action = kwargs.pop('action', None) # type: Optional[Union[str, "_models.PathGetPropertiesAction"]] - upn = kwargs.pop('upn', None) # type: Optional[bool] - lease_id = kwargs.pop('lease_id', None) # type: Optional[str] - if_match = kwargs.pop('if_match', None) # type: Optional[str] - if_none_match = kwargs.pop('if_none_match', None) # type: Optional[str] - if_modified_since = kwargs.pop('if_modified_since', None) # type: Optional[datetime.datetime] - if_unmodified_since = kwargs.pop('if_unmodified_since', None) # type: Optional[datetime.datetime] - - accept = "application/json" - # Construct URL - _url = kwargs.pop("template_url", "{url}/{filesystem}/{path}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, 'str', skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] - if timeout is not None: - _query_parameters['timeout'] = _SERIALIZER.query("timeout", timeout, 'int', minimum=0) - if action is not None: - _query_parameters['action'] = _SERIALIZER.query("action", action, 'str') - if upn is not None: - _query_parameters['upn'] = _SERIALIZER.query("upn", upn, 'bool') - - # Construct headers - _header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] - if request_id_parameter is not None: - _header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("request_id_parameter", request_id_parameter, 'str') - _header_parameters['x-ms-version'] = _SERIALIZER.header("version", version, 'str') - if lease_id is not None: - _header_parameters['x-ms-lease-id'] = _SERIALIZER.header("lease_id", lease_id, 'str') - if if_match is not None: - _header_parameters['If-Match'] = _SERIALIZER.header("if_match", if_match, 'str') - if if_none_match is not None: - _header_parameters['If-None-Match'] = _SERIALIZER.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - _header_parameters['If-Modified-Since'] = _SERIALIZER.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - _header_parameters['If-Unmodified-Since'] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - _header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') - - return HttpRequest( - method="HEAD", - url=_url, - params=_query_parameters, - headers=_header_parameters, - **kwargs - ) - - -def build_delete_request( - url, # type: str - **kwargs # type: Any -): - # type: (...) -> HttpRequest - version = kwargs.pop('version', "2021-06-08") # type: str - request_id_parameter = kwargs.pop('request_id_parameter', None) # type: Optional[str] - timeout = kwargs.pop('timeout', None) # type: Optional[int] - recursive = kwargs.pop('recursive', None) # type: Optional[bool] - continuation = kwargs.pop('continuation', None) # type: Optional[str] - lease_id = kwargs.pop('lease_id', None) # type: Optional[str] - if_match = kwargs.pop('if_match', None) # type: Optional[str] - if_none_match = kwargs.pop('if_none_match', None) # type: Optional[str] - if_modified_since = kwargs.pop('if_modified_since', None) # type: Optional[datetime.datetime] - if_unmodified_since = kwargs.pop('if_unmodified_since', None) # type: Optional[datetime.datetime] - - accept = "application/json" - # Construct URL - _url = kwargs.pop("template_url", "{url}/{filesystem}/{path}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, 'str', skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] - if timeout is not None: - _query_parameters['timeout'] = _SERIALIZER.query("timeout", timeout, 'int', minimum=0) - if recursive is not None: - _query_parameters['recursive'] = _SERIALIZER.query("recursive", recursive, 'bool') - if continuation is not None: - _query_parameters['continuation'] = _SERIALIZER.query("continuation", continuation, 'str') - - # Construct headers - _header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] - if request_id_parameter is not None: - _header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("request_id_parameter", request_id_parameter, 'str') - _header_parameters['x-ms-version'] = _SERIALIZER.header("version", version, 'str') - if lease_id is not None: - _header_parameters['x-ms-lease-id'] = _SERIALIZER.header("lease_id", lease_id, 'str') - if if_match is not None: - _header_parameters['If-Match'] = _SERIALIZER.header("if_match", if_match, 'str') - if if_none_match is not None: - _header_parameters['If-None-Match'] = _SERIALIZER.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - _header_parameters['If-Modified-Since'] = _SERIALIZER.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - _header_parameters['If-Unmodified-Since'] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - _header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') - - return HttpRequest( - method="DELETE", - url=_url, - params=_query_parameters, - headers=_header_parameters, - **kwargs - ) - - -def build_set_access_control_request( - url, # type: str - **kwargs # type: Any -): - # type: (...) -> HttpRequest - action = kwargs.pop('action', "setAccessControl") # type: str - version = kwargs.pop('version', "2021-06-08") # type: str - timeout = kwargs.pop('timeout', None) # type: Optional[int] - lease_id = kwargs.pop('lease_id', None) # type: Optional[str] - owner = kwargs.pop('owner', None) # type: Optional[str] - group = kwargs.pop('group', None) # type: Optional[str] - permissions = kwargs.pop('permissions', None) # type: Optional[str] - acl = kwargs.pop('acl', None) # type: Optional[str] - if_match = kwargs.pop('if_match', None) # type: Optional[str] - if_none_match = kwargs.pop('if_none_match', None) # type: Optional[str] - if_modified_since = kwargs.pop('if_modified_since', None) # type: Optional[datetime.datetime] - if_unmodified_since = kwargs.pop('if_unmodified_since', None) # type: Optional[datetime.datetime] - request_id_parameter = kwargs.pop('request_id_parameter', None) # type: Optional[str] - - accept = "application/json" - # Construct URL - _url = kwargs.pop("template_url", "{url}/{filesystem}/{path}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, 'str', skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] - _query_parameters['action'] = _SERIALIZER.query("action", action, 'str') - if timeout is not None: - _query_parameters['timeout'] = _SERIALIZER.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - _header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] - if lease_id is not None: - _header_parameters['x-ms-lease-id'] = _SERIALIZER.header("lease_id", lease_id, 'str') - if owner is not None: - _header_parameters['x-ms-owner'] = _SERIALIZER.header("owner", owner, 'str') - if group is not None: - _header_parameters['x-ms-group'] = _SERIALIZER.header("group", group, 'str') - if permissions is not None: - _header_parameters['x-ms-permissions'] = _SERIALIZER.header("permissions", permissions, 'str') - if acl is not None: - _header_parameters['x-ms-acl'] = _SERIALIZER.header("acl", acl, 'str') - if if_match is not None: - _header_parameters['If-Match'] = _SERIALIZER.header("if_match", if_match, 'str') - if if_none_match is not None: - _header_parameters['If-None-Match'] = _SERIALIZER.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - _header_parameters['If-Modified-Since'] = _SERIALIZER.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - _header_parameters['If-Unmodified-Since'] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - _header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("request_id_parameter", request_id_parameter, 'str') - _header_parameters['x-ms-version'] = _SERIALIZER.header("version", version, 'str') - _header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') - - return HttpRequest( - method="PATCH", - url=_url, - params=_query_parameters, - headers=_header_parameters, - **kwargs - ) - - -def build_set_access_control_recursive_request( - url, # type: str - **kwargs # type: Any -): - # type: (...) -> HttpRequest - action = kwargs.pop('action', "setAccessControlRecursive") # type: str - version = kwargs.pop('version', "2021-06-08") # type: str - mode = kwargs.pop('mode') # type: Union[str, "_models.PathSetAccessControlRecursiveMode"] - timeout = kwargs.pop('timeout', None) # type: Optional[int] - continuation = kwargs.pop('continuation', None) # type: Optional[str] - force_flag = kwargs.pop('force_flag', None) # type: Optional[bool] - max_records = kwargs.pop('max_records', None) # type: Optional[int] - acl = kwargs.pop('acl', None) # type: Optional[str] - request_id_parameter = kwargs.pop('request_id_parameter', None) # type: Optional[str] - - accept = "application/json" - # Construct URL - _url = kwargs.pop("template_url", "{url}/{filesystem}/{path}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, 'str', skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] - _query_parameters['action'] = _SERIALIZER.query("action", action, 'str') - if timeout is not None: - _query_parameters['timeout'] = _SERIALIZER.query("timeout", timeout, 'int', minimum=0) - if continuation is not None: - _query_parameters['continuation'] = _SERIALIZER.query("continuation", continuation, 'str') - _query_parameters['mode'] = _SERIALIZER.query("mode", mode, 'str') - if force_flag is not None: - _query_parameters['forceFlag'] = _SERIALIZER.query("force_flag", force_flag, 'bool') - if max_records is not None: - _query_parameters['maxRecords'] = _SERIALIZER.query("max_records", max_records, 'int', minimum=1) - - # Construct headers - _header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] - if acl is not None: - _header_parameters['x-ms-acl'] = _SERIALIZER.header("acl", acl, 'str') - if request_id_parameter is not None: - _header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("request_id_parameter", request_id_parameter, 'str') - _header_parameters['x-ms-version'] = _SERIALIZER.header("version", version, 'str') - _header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') - - return HttpRequest( - method="PATCH", - url=_url, - params=_query_parameters, - headers=_header_parameters, - **kwargs - ) - - -def build_flush_data_request( - url, # type: str - **kwargs # type: Any -): - # type: (...) -> HttpRequest - action = kwargs.pop('action', "flush") # type: str - version = kwargs.pop('version', "2021-06-08") # type: str - timeout = kwargs.pop('timeout', None) # type: Optional[int] - position = kwargs.pop('position', None) # type: Optional[int] - retain_uncommitted_data = kwargs.pop('retain_uncommitted_data', None) # type: Optional[bool] - close = kwargs.pop('close', None) # type: Optional[bool] - content_length = kwargs.pop('content_length', None) # type: Optional[int] - content_md5 = kwargs.pop('content_md5', None) # type: Optional[bytearray] - lease_id = kwargs.pop('lease_id', None) # type: Optional[str] - cache_control = kwargs.pop('cache_control', None) # type: Optional[str] - content_type_parameter = kwargs.pop('content_type_parameter', None) # type: Optional[str] - content_disposition = kwargs.pop('content_disposition', None) # type: Optional[str] - content_encoding = kwargs.pop('content_encoding', None) # type: Optional[str] - content_language = kwargs.pop('content_language', None) # type: Optional[str] - if_match = kwargs.pop('if_match', None) # type: Optional[str] - if_none_match = kwargs.pop('if_none_match', None) # type: Optional[str] - if_modified_since = kwargs.pop('if_modified_since', None) # type: Optional[datetime.datetime] - if_unmodified_since = kwargs.pop('if_unmodified_since', None) # type: Optional[datetime.datetime] - request_id_parameter = kwargs.pop('request_id_parameter', None) # type: Optional[str] - encryption_key = kwargs.pop('encryption_key', None) # type: Optional[str] - encryption_key_sha256 = kwargs.pop('encryption_key_sha256', None) # type: Optional[str] - encryption_algorithm = kwargs.pop('encryption_algorithm', "AES256") # type: Optional[str] - - accept = "application/json" - # Construct URL - _url = kwargs.pop("template_url", "{url}/{filesystem}/{path}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, 'str', skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] - _query_parameters['action'] = _SERIALIZER.query("action", action, 'str') - if timeout is not None: - _query_parameters['timeout'] = _SERIALIZER.query("timeout", timeout, 'int', minimum=0) - if position is not None: - _query_parameters['position'] = _SERIALIZER.query("position", position, 'long') - if retain_uncommitted_data is not None: - _query_parameters['retainUncommittedData'] = _SERIALIZER.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') - if close is not None: - _query_parameters['close'] = _SERIALIZER.query("close", close, 'bool') - - # Construct headers - _header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] - if content_length is not None: - _header_parameters['Content-Length'] = _SERIALIZER.header("content_length", content_length, 'long', minimum=0) - if content_md5 is not None: - _header_parameters['x-ms-content-md5'] = _SERIALIZER.header("content_md5", content_md5, 'bytearray') - if lease_id is not None: - _header_parameters['x-ms-lease-id'] = _SERIALIZER.header("lease_id", lease_id, 'str') - if cache_control is not None: - _header_parameters['x-ms-cache-control'] = _SERIALIZER.header("cache_control", cache_control, 'str') - if content_type_parameter is not None: - _header_parameters['x-ms-content-type'] = _SERIALIZER.header("content_type_parameter", content_type_parameter, 'str') - if content_disposition is not None: - _header_parameters['x-ms-content-disposition'] = _SERIALIZER.header("content_disposition", content_disposition, 'str') - if content_encoding is not None: - _header_parameters['x-ms-content-encoding'] = _SERIALIZER.header("content_encoding", content_encoding, 'str') - if content_language is not None: - _header_parameters['x-ms-content-language'] = _SERIALIZER.header("content_language", content_language, 'str') - if if_match is not None: - _header_parameters['If-Match'] = _SERIALIZER.header("if_match", if_match, 'str') - if if_none_match is not None: - _header_parameters['If-None-Match'] = _SERIALIZER.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - _header_parameters['If-Modified-Since'] = _SERIALIZER.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - _header_parameters['If-Unmodified-Since'] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - if request_id_parameter is not None: - _header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("request_id_parameter", request_id_parameter, 'str') - _header_parameters['x-ms-version'] = _SERIALIZER.header("version", version, 'str') - if encryption_key is not None: - _header_parameters['x-ms-encryption-key'] = _SERIALIZER.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - _header_parameters['x-ms-encryption-key-sha256'] = _SERIALIZER.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - _header_parameters['x-ms-encryption-algorithm'] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, 'str') - _header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') - - return HttpRequest( - method="PATCH", - url=_url, - params=_query_parameters, - headers=_header_parameters, - **kwargs - ) - - -def build_append_data_request( - url, # type: str - **kwargs # type: Any -): - # type: (...) -> HttpRequest - action = kwargs.pop('action', "append") # type: str - version = kwargs.pop('version', "2021-06-08") # type: str - content_type = kwargs.pop('content_type', None) # type: Optional[str] - position = kwargs.pop('position', None) # type: Optional[int] - timeout = kwargs.pop('timeout', None) # type: Optional[int] - content_length = kwargs.pop('content_length', None) # type: Optional[int] - transactional_content_hash = kwargs.pop('transactional_content_hash', None) # type: Optional[bytearray] - transactional_content_crc64 = kwargs.pop('transactional_content_crc64', None) # type: Optional[bytearray] - lease_id = kwargs.pop('lease_id', None) # type: Optional[str] - request_id_parameter = kwargs.pop('request_id_parameter', None) # type: Optional[str] - encryption_key = kwargs.pop('encryption_key', None) # type: Optional[str] - encryption_key_sha256 = kwargs.pop('encryption_key_sha256', None) # type: Optional[str] - encryption_algorithm = kwargs.pop('encryption_algorithm', "AES256") # type: Optional[str] - - accept = "application/json" - # Construct URL - _url = kwargs.pop("template_url", "{url}/{filesystem}/{path}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, 'str', skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] - _query_parameters['action'] = _SERIALIZER.query("action", action, 'str') - if position is not None: - _query_parameters['position'] = _SERIALIZER.query("position", position, 'long') - if timeout is not None: - _query_parameters['timeout'] = _SERIALIZER.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - _header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] - if content_length is not None: - _header_parameters['Content-Length'] = _SERIALIZER.header("content_length", content_length, 'long', minimum=0) - if transactional_content_hash is not None: - _header_parameters['Content-MD5'] = _SERIALIZER.header("transactional_content_hash", transactional_content_hash, 'bytearray') - if transactional_content_crc64 is not None: - _header_parameters['x-ms-content-crc64'] = _SERIALIZER.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') - if lease_id is not None: - _header_parameters['x-ms-lease-id'] = _SERIALIZER.header("lease_id", lease_id, 'str') - if request_id_parameter is not None: - _header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("request_id_parameter", request_id_parameter, 'str') - _header_parameters['x-ms-version'] = _SERIALIZER.header("version", version, 'str') - if encryption_key is not None: - _header_parameters['x-ms-encryption-key'] = _SERIALIZER.header("encryption_key", encryption_key, 'str') - if encryption_key_sha256 is not None: - _header_parameters['x-ms-encryption-key-sha256'] = _SERIALIZER.header("encryption_key_sha256", encryption_key_sha256, 'str') - if encryption_algorithm is not None: - _header_parameters['x-ms-encryption-algorithm'] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, 'str') - if content_type is not None: - _header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str') - _header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') - - return HttpRequest( - method="PATCH", - url=_url, - params=_query_parameters, - headers=_header_parameters, - **kwargs - ) - - -def build_set_expiry_request( - url, # type: str - **kwargs # type: Any -): - # type: (...) -> HttpRequest - comp = kwargs.pop('comp', "expiry") # type: str - version = kwargs.pop('version', "2021-06-08") # type: str - expiry_options = kwargs.pop('expiry_options') # type: Union[str, "_models.PathExpiryOptions"] - timeout = kwargs.pop('timeout', None) # type: Optional[int] - request_id_parameter = kwargs.pop('request_id_parameter', None) # type: Optional[str] - expires_on = kwargs.pop('expires_on', None) # type: Optional[str] - - accept = "application/json" - # Construct URL - _url = kwargs.pop("template_url", "{url}/{filesystem}/{path}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, 'str', skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] - _query_parameters['comp'] = _SERIALIZER.query("comp", comp, 'str') - if timeout is not None: - _query_parameters['timeout'] = _SERIALIZER.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - _header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] - _header_parameters['x-ms-version'] = _SERIALIZER.header("version", version, 'str') - if request_id_parameter is not None: - _header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("request_id_parameter", request_id_parameter, 'str') - _header_parameters['x-ms-expiry-option'] = _SERIALIZER.header("expiry_options", expiry_options, 'str') - if expires_on is not None: - _header_parameters['x-ms-expiry-time'] = _SERIALIZER.header("expires_on", expires_on, 'str') - _header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') - - return HttpRequest( - method="PUT", - url=_url, - params=_query_parameters, - headers=_header_parameters, - **kwargs - ) - - -def build_undelete_request( - url, # type: str - **kwargs # type: Any -): - # type: (...) -> HttpRequest - comp = kwargs.pop('comp', "undelete") # type: str - version = kwargs.pop('version', "2021-06-08") # type: str - timeout = kwargs.pop('timeout', None) # type: Optional[int] - undelete_source = kwargs.pop('undelete_source', None) # type: Optional[str] - request_id_parameter = kwargs.pop('request_id_parameter', None) # type: Optional[str] - - accept = "application/json" - # Construct URL - _url = kwargs.pop("template_url", "{url}/{filesystem}/{path}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, 'str', skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] - _query_parameters['comp'] = _SERIALIZER.query("comp", comp, 'str') - if timeout is not None: - _query_parameters['timeout'] = _SERIALIZER.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - _header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] - if undelete_source is not None: - _header_parameters['x-ms-undelete-source'] = _SERIALIZER.header("undelete_source", undelete_source, 'str') - _header_parameters['x-ms-version'] = _SERIALIZER.header("version", version, 'str') - if request_id_parameter is not None: - _header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("request_id_parameter", request_id_parameter, 'str') - _header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') - - return HttpRequest( - method="PUT", - url=_url, - params=_query_parameters, - headers=_header_parameters, - **kwargs - ) - -# fmt: on -class PathOperations(object): - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.storage.filedatalake.AzureDataLakeStorageRESTAPI`'s - :attr:`path` attribute. - """ - - models = _models - - def __init__(self, *args, **kwargs): - args = list(args) - self._client = args.pop(0) if args else kwargs.pop("client") - self._config = args.pop(0) if args else kwargs.pop("config") - self._serialize = args.pop(0) if args else kwargs.pop("serializer") - self._deserialize = args.pop(0) if args else kwargs.pop("deserializer") - - - @distributed_trace - def create( # pylint: disable=inconsistent-return-statements - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - resource=None, # type: Optional[Union[str, "_models.PathResourceType"]] - continuation=None, # type: Optional[str] - mode=None, # type: Optional[Union[str, "_models.PathRenameMode"]] - rename_source=None, # type: Optional[str] - source_lease_id=None, # type: Optional[str] - properties=None, # type: Optional[str] - permissions=None, # type: Optional[str] - umask=None, # type: Optional[str] - path_http_headers=None, # type: Optional["_models.PathHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - **kwargs # type: Any - ): - # type: (...) -> None - """Create File | Create Directory | Rename File | Rename Directory. - - Create or rename a file or directory. By default, the destination is overwritten and if the - destination already exists and has a lease the lease is broken. This operation supports - conditional HTTP requests. For more information, see `Specifying Conditional Headers for Blob - Service Operations - `_. - To fail if the destination already exists, use a conditional request with If-None-Match: "*". - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param resource: Required only for Create File and Create Directory. The value must be "file" - or "directory". Default value is None. - :type resource: str or ~azure.storage.filedatalake.models.PathResourceType - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. Default value is None. - :type continuation: str - :param mode: Optional. Valid only when namespace is enabled. This parameter determines the - behavior of the rename operation. The value must be "legacy" or "posix", and the default value - will be "posix". Default value is None. - :type mode: str or ~azure.storage.filedatalake.models.PathRenameMode - :param rename_source: An optional file or directory to be renamed. The value must have the - following format: "/{filesystem}/{path}". If "x-ms-properties" is specified, the properties - will overwrite the existing properties; otherwise, the existing properties will be preserved. - This value must be a URL percent-encoded string. Note that the string may only contain ASCII - characters in the ISO-8859-1 character set. Default value is None. - :type rename_source: str - :param source_lease_id: A lease ID for the source path. If specified, the source path must have - an active lease and the lease ID must match. Default value is None. - :type source_lease_id: str - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. Default value is - None. - :type properties: str - :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - Default value is None. - :type permissions: str - :param umask: Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, the umask - restricts the permissions of the file or directory to be created. The resulting permission is - given by p bitwise and not u, where p is the permission and u is the umask. For example, if p - is 0777 and u is 0057, then the resulting permission is 0720. The default permission is 0777 - for a directory and 0666 for a file. The default umask is 0027. The umask must be specified - in 4-digit octal notation (e.g. 0766). Default value is None. - :type umask: str - :param path_http_headers: Parameter group. Default value is None. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. Default value is None. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. Default value is None. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param source_modified_access_conditions: Parameter group. Default value is None. - :type source_modified_access_conditions: - ~azure.storage.filedatalake.models.SourceModifiedAccessConditions - :param cpk_info: Parameter group. Default value is None. - :type cpk_info: ~azure.storage.filedatalake.models.CpkInfo - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _cache_control = None - _content_encoding = None - _content_language = None - _content_disposition = None - _content_type_parameter = None - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - _source_if_match = None - _source_if_none_match = None - _source_if_modified_since = None - _source_if_unmodified_since = None - _encryption_key = None - _encryption_key_sha256 = None - encryption_algorithm = None - if path_http_headers is not None: - _cache_control = path_http_headers.cache_control - _content_encoding = path_http_headers.content_encoding - _content_language = path_http_headers.content_language - _content_disposition = path_http_headers.content_disposition - _content_type_parameter = path_http_headers.content_type - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - if source_modified_access_conditions is not None: - _source_if_match = source_modified_access_conditions.source_if_match - _source_if_none_match = source_modified_access_conditions.source_if_none_match - _source_if_modified_since = source_modified_access_conditions.source_if_modified_since - _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = cpk_info.encryption_algorithm - - request = build_create_request( - url=self._config.url, - version=self._config.version, - request_id_parameter=request_id_parameter, - timeout=timeout, - resource=resource, - continuation=continuation, - mode=mode, - cache_control=_cache_control, - content_encoding=_content_encoding, - content_language=_content_language, - content_disposition=_content_disposition, - content_type_parameter=_content_type_parameter, - rename_source=rename_source, - lease_id=_lease_id, - source_lease_id=source_lease_id, - properties=properties, - permissions=permissions, - umask=umask, - if_match=_if_match, - if_none_match=_if_none_match, - if_modified_since=_if_modified_since, - if_unmodified_since=_if_unmodified_since, - source_if_match=_source_if_match, - source_if_none_match=_source_if_none_match, - source_if_modified_since=_source_if_modified_since, - source_if_unmodified_since=_source_if_unmodified_since, - encryption_key=_encryption_key, - encryption_key_sha256=_encryption_key_sha256, - encryption_algorithm=encryption_algorithm, - template_url=self.create.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': "{url}/{filesystem}/{path}"} # type: ignore - - - @distributed_trace - def update( - self, - action, # type: Union[str, "_models.PathUpdateAction"] - mode, # type: Union[str, "_models.PathSetAccessControlRecursiveMode"] - body, # type: IO - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - max_records=None, # type: Optional[int] - continuation=None, # type: Optional[str] - force_flag=None, # type: Optional[bool] - position=None, # type: Optional[int] - retain_uncommitted_data=None, # type: Optional[bool] - close=None, # type: Optional[bool] - content_length=None, # type: Optional[int] - properties=None, # type: Optional[str] - owner=None, # type: Optional[str] - group=None, # type: Optional[str] - permissions=None, # type: Optional[str] - acl=None, # type: Optional[str] - path_http_headers=None, # type: Optional["_models.PathHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> Optional["_models.SetAccessControlRecursiveResponse"] - """Append Data | Flush Data | Set Properties | Set Access Control. - - Uploads data to be appended to a file, flushes (writes) previously uploaded data to a file, - sets properties for a file or directory, or sets access control for a file or directory. Data - can only be appended to a file. Concurrent writes to the same file using multiple clients are - not supported. This operation supports conditional HTTP requests. For more information, see - `Specifying Conditional Headers for Blob Service Operations - `_. - - :param action: The action must be "append" to upload data to be appended to a file, "flush" to - flush previously uploaded data to a file, "setProperties" to set the properties of a file or - directory, "setAccessControl" to set the owner, group, permissions, or access control list for - a file or directory, or "setAccessControlRecursive" to set the access control list for a - directory recursively. Note that Hierarchical Namespace must be enabled for the account in - order to use access control. Also note that the Access Control List (ACL) includes permissions - for the owner, owning group, and others, so the x-ms-permissions and x-ms-acl request headers - are mutually exclusive. - :type action: str or ~azure.storage.filedatalake.models.PathUpdateAction - :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify" - modifies one or more POSIX access control rights that pre-exist on files and directories, - "remove" removes one or more POSIX access control rights that were present earlier on files - and directories. - :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode - :param body: Initial data. - :type body: IO - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param max_records: Optional. Valid for "SetAccessControlRecursive" operation. It specifies the - maximum number of files or directories on which the acl change will be applied. If omitted or - greater than 2,000, the request will process up to 2,000 items. Default value is None. - :type max_records: int - :param continuation: Optional. The number of paths processed with each invocation is limited. - If the number of paths to be processed exceeds this limit, a continuation token is returned in - the response header x-ms-continuation. When a continuation token is returned in the response, - it must be percent-encoded and specified in a subsequent invocation of - setAccessControlRecursive operation. Default value is None. - :type continuation: str - :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false, - the operation will terminate quickly on encountering user errors (4XX). If true, the operation - will ignore user errors and proceed with the operation on other sub-entities of the directory. - Continuation token will only be returned when forceFlag is true in case of user errors. If not - set the default value is false for this. - :type force_flag: bool - :param position: This parameter allows the caller to upload data in parallel and control the - order in which it is appended to the file. It is required when uploading data to be appended - to the file and when flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not immediately flushed, or - written, to the file. To flush, the previously uploaded data must be contiguous, the position - parameter must be specified and equal to the length of the file after all data has been - written, and there must not be a request entity body included with the request. Default value - is None. - :type position: long - :param retain_uncommitted_data: Valid only for flush operations. If "true", uncommitted data - is retained after the flush operation completes; otherwise, the uncommitted data is deleted - after the flush operation. The default is false. Data at offsets less than the specified - position are written to the file when flush succeeds, but this optional parameter allows data - after the flush position to be retained for a future flush operation. - :type retain_uncommitted_data: bool - :param close: Azure Storage Events allow applications to receive notifications when files - change. When Azure Storage Events are enabled, a file changed event is raised. This event has a - property indicating whether this is the final change to distinguish the difference between an - intermediate flush to a file stream and the final close of a file stream. The close query - parameter is valid only when the action is "flush" and change notifications are enabled. If the - value of close is "true" and the flush operation completes successfully, the service raises a - file change notification with a property indicating that this is the final update (the file - stream has been closed). If "false" a change notification is raised indicating the file has - changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to - indicate that the file stream has been closed.". - :type close: bool - :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush - Data". Must be the length of the request content in bytes for "Append Data". Default value is - None. - :type content_length: long - :param properties: Optional. User-defined properties to be stored with the filesystem, in the - format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value - is a base64 encoded string. Note that the string may only contain ASCII characters in the - ISO-8859-1 character set. If the filesystem exists, any properties not included in the list - will be removed. All properties are removed if the header is omitted. To merge new and - existing properties, first get all existing properties and the current E-Tag, then make a - conditional request with the E-Tag and include values for all properties. Default value is - None. - :type properties: str - :param owner: Optional. The owner of the blob or directory. Default value is None. - :type owner: str - :param group: Optional. The owning group of the blob or directory. Default value is None. - :type group: str - :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - Default value is None. - :type permissions: str - :param acl: Sets POSIX access control rights on files and directories. The value is a - comma-separated list of access control entries. Each access control entry (ACE) consists of a - scope, a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". Default value is None. - :type acl: str - :param path_http_headers: Parameter group. Default value is None. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. Default value is None. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. Default value is None. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SetAccessControlRecursiveResponse, or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse or None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SetAccessControlRecursiveResponse"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - content_type = kwargs.pop('content_type', "application/octet-stream") # type: Optional[str] - - _content_md5 = None - _lease_id = None - _cache_control = None - _content_type_parameter = None - _content_disposition = None - _content_encoding = None - _content_language = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if path_http_headers is not None: - _content_md5 = path_http_headers.content_md5 - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if path_http_headers is not None: - _cache_control = path_http_headers.cache_control - _content_type_parameter = path_http_headers.content_type - _content_disposition = path_http_headers.content_disposition - _content_encoding = path_http_headers.content_encoding - _content_language = path_http_headers.content_language - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - _content = body - - request = build_update_request( - url=self._config.url, - version=self._config.version, - content_type=content_type, - content=_content, - action=action, - mode=mode, - request_id_parameter=request_id_parameter, - timeout=timeout, - max_records=max_records, - continuation=continuation, - force_flag=force_flag, - position=position, - retain_uncommitted_data=retain_uncommitted_data, - close=close, - content_length=content_length, - content_md5=_content_md5, - lease_id=_lease_id, - cache_control=_cache_control, - content_type_parameter=_content_type_parameter, - content_disposition=_content_disposition, - content_encoding=_content_encoding, - content_language=_content_language, - properties=properties, - owner=owner, - group=group, - permissions=permissions, - acl=acl, - if_match=_if_match, - if_none_match=_if_none_match, - if_modified_since=_if_modified_since, - if_unmodified_since=_if_unmodified_since, - template_url=self.update.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = None - response_headers = {} - if response.status_code == 200: - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - deserialized = self._deserialize('SetAccessControlRecursiveResponse', pipeline_response) - - if response.status_code == 202: - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - - update.metadata = {'url': "{url}/{filesystem}/{path}"} # type: ignore - - - @distributed_trace - def lease( # pylint: disable=inconsistent-return-statements - self, - x_ms_lease_action, # type: Union[str, "_models.PathLeaseAction"] - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - x_ms_lease_duration=None, # type: Optional[int] - x_ms_lease_break_period=None, # type: Optional[int] - proposed_lease_id=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Lease Path. - - Create and manage a lease to restrict write and delete access to the path. This operation - supports conditional HTTP requests. For more information, see `Specifying Conditional Headers - for Blob Service Operations - `_. - - :param x_ms_lease_action: There are five lease actions: "acquire", "break", "change", "renew", - and "release". Use "acquire" and specify the "x-ms-proposed-lease-id" and "x-ms-lease-duration" - to acquire a new lease. Use "break" to break an existing lease. When a lease is broken, the - lease break period is allowed to elapse, during which time no lease operation except break and - release can be performed on the file. When a lease is successfully broken, the response - indicates the interval in seconds until a new lease can be acquired. Use "change" and specify - the current lease ID in "x-ms-lease-id" and the new lease ID in "x-ms-proposed-lease-id" to - change the lease ID of an active lease. Use "renew" and specify the "x-ms-lease-id" to renew an - existing lease. Use "release" and specify the "x-ms-lease-id" to release a lease. - :type x_ms_lease_action: str or ~azure.storage.filedatalake.models.PathLeaseAction - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param x_ms_lease_duration: The lease duration is required to acquire a lease, and specifies - the duration of the lease in seconds. The lease duration must be between 15 and 60 seconds or - -1 for infinite lease. Default value is None. - :type x_ms_lease_duration: int - :param x_ms_lease_break_period: The lease break period duration is optional to break a lease, - and specifies the break period of the lease in seconds. The lease break duration must be - between 0 and 60 seconds. Default value is None. - :type x_ms_lease_break_period: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. Default value is None. - :type proposed_lease_id: str - :param lease_access_conditions: Parameter group. Default value is None. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. Default value is None. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - - request = build_lease_request( - url=self._config.url, - version=self._config.version, - x_ms_lease_action=x_ms_lease_action, - request_id_parameter=request_id_parameter, - timeout=timeout, - x_ms_lease_duration=x_ms_lease_duration, - x_ms_lease_break_period=x_ms_lease_break_period, - lease_id=_lease_id, - proposed_lease_id=proposed_lease_id, - if_match=_if_match, - if_none_match=_if_none_match, - if_modified_since=_if_modified_since, - if_unmodified_since=_if_unmodified_since, - template_url=self.lease.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200, 201, 202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - - - if response.status_code == 201: - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - - - if response.status_code == 202: - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-lease-time']=self._deserialize('str', response.headers.get('x-ms-lease-time')) - - - if cls: - return cls(pipeline_response, None, response_headers) - - lease.metadata = {'url': "{url}/{filesystem}/{path}"} # type: ignore - - - @distributed_trace - def read( - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - range=None, # type: Optional[str] - x_ms_range_get_content_md5=None, # type: Optional[bool] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - **kwargs # type: Any - ): - # type: (...) -> IO - """Read File. - - Read the contents of a file. For read operations, range requests are supported. This operation - supports conditional HTTP requests. For more information, see `Specifying Conditional Headers - for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param range: The HTTP Range request header specifies one or more byte ranges of the resource - to be retrieved. Default value is None. - :type range: str - :param x_ms_range_get_content_md5: Optional. When this header is set to "true" and specified - together with the Range header, the service returns the MD5 hash for the range, as long as the - range is less than or equal to 4MB in size. If this header is specified without the Range - header, the service returns status code 400 (Bad Request). If this header is set to true when - the range exceeds 4 MB in size, the service returns status code 400 (Bad Request). Default - value is None. - :type x_ms_range_get_content_md5: bool - :param lease_access_conditions: Parameter group. Default value is None. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. Default value is None. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param cpk_info: Parameter group. Default value is None. - :type cpk_info: ~azure.storage.filedatalake.models.CpkInfo - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - _encryption_key = None - _encryption_key_sha256 = None - encryption_algorithm = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = cpk_info.encryption_algorithm - - request = build_read_request( - url=self._config.url, - version=self._config.version, - request_id_parameter=request_id_parameter, - timeout=timeout, - range=range, - lease_id=_lease_id, - x_ms_range_get_content_md5=x_ms_range_get_content_md5, - if_match=_if_match, - if_none_match=_if_none_match, - if_modified_since=_if_modified_since, - if_unmodified_since=_if_unmodified_since, - encryption_key=_encryption_key, - encryption_key_sha256=_encryption_key_sha256, - encryption_algorithm=encryption_algorithm, - template_url=self.read.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=True, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['x-ms-content-md5']=self._deserialize('str', response.headers.get('x-ms-content-md5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - - read.metadata = {'url': "{url}/{filesystem}/{path}"} # type: ignore - - - @distributed_trace - def get_properties( # pylint: disable=inconsistent-return-statements - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - action=None, # type: Optional[Union[str, "_models.PathGetPropertiesAction"]] - upn=None, # type: Optional[bool] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Get Properties | Get Status | Get Access Control List. - - Get Properties returns all system and user defined properties for a path. Get Status returns - all system defined properties for a path. Get Access Control List returns the access control - list for a path. This operation supports conditional HTTP requests. For more information, see - `Specifying Conditional Headers for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param action: Optional. If the value is "getStatus" only the system defined properties for the - path are returned. If the value is "getAccessControl" the access control list is returned in - the response headers (Hierarchical Namespace must be enabled for the account), otherwise the - properties are returned. Default value is None. - :type action: str or ~azure.storage.filedatalake.models.PathGetPropertiesAction - :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If - "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response - headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If - "false", the values will be returned as Azure Active Directory Object IDs. The default value is - false. Note that group and application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :param lease_access_conditions: Parameter group. Default value is None. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. Default value is None. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - - request = build_get_properties_request( - url=self._config.url, - version=self._config.version, - request_id_parameter=request_id_parameter, - timeout=timeout, - action=action, - upn=upn, - lease_id=_lease_id, - if_match=_if_match, - if_none_match=_if_none_match, - if_modified_since=_if_modified_since, - if_unmodified_since=_if_unmodified_since, - template_url=self.get_properties.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) - response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) - response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) - response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) - response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': "{url}/{filesystem}/{path}"} # type: ignore - - - @distributed_trace - def delete( # pylint: disable=inconsistent-return-statements - self, - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - recursive=None, # type: Optional[bool] - continuation=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Delete File | Delete Directory. - - Delete the file or directory. This operation supports conditional HTTP requests. For more - information, see `Specifying Conditional Headers for Blob Service Operations - `_. - - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param recursive: Required. Default value is None. - :type recursive: bool - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. Default value is None. - :type continuation: str - :param lease_access_conditions: Parameter group. Default value is None. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. Default value is None. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - - request = build_delete_request( - url=self._config.url, - version=self._config.version, - request_id_parameter=request_id_parameter, - timeout=timeout, - recursive=recursive, - continuation=continuation, - lease_id=_lease_id, - if_match=_if_match, - if_none_match=_if_none_match, - if_modified_since=_if_modified_since, - if_unmodified_since=_if_unmodified_since, - template_url=self.delete.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['x-ms-deletion-id']=self._deserialize('str', response.headers.get('x-ms-deletion-id')) - - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': "{url}/{filesystem}/{path}"} # type: ignore - - - @distributed_trace - def set_access_control( # pylint: disable=inconsistent-return-statements - self, - timeout=None, # type: Optional[int] - owner=None, # type: Optional[str] - group=None, # type: Optional[str] - permissions=None, # type: Optional[str] - acl=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Set the owner, group, permissions, or access control list for a path. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param owner: Optional. The owner of the blob or directory. Default value is None. - :type owner: str - :param group: Optional. The owning group of the blob or directory. Default value is None. - :type group: str - :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the - account. Sets POSIX access permissions for the file owner, the file owning group, and others. - Each class may be granted read, write, or execute permission. The sticky bit is also - supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. - Default value is None. - :type permissions: str - :param acl: Sets POSIX access control rights on files and directories. The value is a - comma-separated list of access control entries. Each access control entry (ACE) consists of a - scope, a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". Default value is None. - :type acl: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. Default value is None. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. Default value is None. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :keyword action: action. Default value is "setAccessControl". Note that overriding this default - value may result in unsupported behavior. - :paramtype action: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - action = kwargs.pop('action', "setAccessControl") # type: str - - _lease_id = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - - request = build_set_access_control_request( - url=self._config.url, - action=action, - version=self._config.version, - timeout=timeout, - lease_id=_lease_id, - owner=owner, - group=group, - permissions=permissions, - acl=acl, - if_match=_if_match, - if_none_match=_if_none_match, - if_modified_since=_if_modified_since, - if_unmodified_since=_if_unmodified_since, - request_id_parameter=request_id_parameter, - template_url=self.set_access_control.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_control.metadata = {'url': "{url}/{filesystem}/{path}"} # type: ignore - - - @distributed_trace - def set_access_control_recursive( - self, - mode, # type: Union[str, "_models.PathSetAccessControlRecursiveMode"] - timeout=None, # type: Optional[int] - continuation=None, # type: Optional[str] - force_flag=None, # type: Optional[bool] - max_records=None, # type: Optional[int] - acl=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.SetAccessControlRecursiveResponse" - """Set the access control list for a path and sub-paths. - - :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify" - modifies one or more POSIX access control rights that pre-exist on files and directories, - "remove" removes one or more POSIX access control rights that were present earlier on files - and directories. - :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. Default value is None. - :type continuation: str - :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false, - the operation will terminate quickly on encountering user errors (4XX). If true, the operation - will ignore user errors and proceed with the operation on other sub-entities of the directory. - Continuation token will only be returned when forceFlag is true in case of user errors. If not - set the default value is false for this. - :type force_flag: bool - :param max_records: Optional. It specifies the maximum number of files or directories on which - the acl change will be applied. If omitted or greater than 2,000, the request will process up - to 2,000 items. Default value is None. - :type max_records: int - :param acl: Sets POSIX access control rights on files and directories. The value is a - comma-separated list of access control entries. Each access control entry (ACE) consists of a - scope, a type, a user or group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". Default value is None. - :type acl: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :keyword action: action. Default value is "setAccessControlRecursive". Note that overriding - this default value may result in unsupported behavior. - :paramtype action: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SetAccessControlRecursiveResponse, or the result of cls(response) - :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.SetAccessControlRecursiveResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - action = kwargs.pop('action', "setAccessControlRecursive") # type: str - - - request = build_set_access_control_recursive_request( - url=self._config.url, - action=action, - version=self._config.version, - mode=mode, - timeout=timeout, - continuation=continuation, - force_flag=force_flag, - max_records=max_records, - acl=acl, - request_id_parameter=request_id_parameter, - template_url=self.set_access_control_recursive.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - deserialized = self._deserialize('SetAccessControlRecursiveResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - - set_access_control_recursive.metadata = {'url': "{url}/{filesystem}/{path}"} # type: ignore - - - @distributed_trace - def flush_data( # pylint: disable=inconsistent-return-statements - self, - timeout=None, # type: Optional[int] - position=None, # type: Optional[int] - retain_uncommitted_data=None, # type: Optional[bool] - close=None, # type: Optional[bool] - content_length=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - path_http_headers=None, # type: Optional["_models.PathHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - **kwargs # type: Any - ): - # type: (...) -> None - """Set the owner, group, permissions, or access control list for a path. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param position: This parameter allows the caller to upload data in parallel and control the - order in which it is appended to the file. It is required when uploading data to be appended - to the file and when flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not immediately flushed, or - written, to the file. To flush, the previously uploaded data must be contiguous, the position - parameter must be specified and equal to the length of the file after all data has been - written, and there must not be a request entity body included with the request. Default value - is None. - :type position: long - :param retain_uncommitted_data: Valid only for flush operations. If "true", uncommitted data - is retained after the flush operation completes; otherwise, the uncommitted data is deleted - after the flush operation. The default is false. Data at offsets less than the specified - position are written to the file when flush succeeds, but this optional parameter allows data - after the flush position to be retained for a future flush operation. - :type retain_uncommitted_data: bool - :param close: Azure Storage Events allow applications to receive notifications when files - change. When Azure Storage Events are enabled, a file changed event is raised. This event has a - property indicating whether this is the final change to distinguish the difference between an - intermediate flush to a file stream and the final close of a file stream. The close query - parameter is valid only when the action is "flush" and change notifications are enabled. If the - value of close is "true" and the flush operation completes successfully, the service raises a - file change notification with a property indicating that this is the final update (the file - stream has been closed). If "false" a change notification is raised indicating the file has - changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to - indicate that the file stream has been closed.". - :type close: bool - :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush - Data". Must be the length of the request content in bytes for "Append Data". Default value is - None. - :type content_length: long - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param path_http_headers: Parameter group. Default value is None. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. Default value is None. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param modified_access_conditions: Parameter group. Default value is None. - :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions - :param cpk_info: Parameter group. Default value is None. - :type cpk_info: ~azure.storage.filedatalake.models.CpkInfo - :keyword action: action. Default value is "flush". Note that overriding this default value may - result in unsupported behavior. - :paramtype action: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - action = kwargs.pop('action', "flush") # type: str - - _content_md5 = None - _lease_id = None - _cache_control = None - _content_type_parameter = None - _content_disposition = None - _content_encoding = None - _content_language = None - _if_match = None - _if_none_match = None - _if_modified_since = None - _if_unmodified_since = None - _encryption_key = None - _encryption_key_sha256 = None - encryption_algorithm = None - if path_http_headers is not None: - _content_md5 = path_http_headers.content_md5 - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if path_http_headers is not None: - _cache_control = path_http_headers.cache_control - _content_type_parameter = path_http_headers.content_type - _content_disposition = path_http_headers.content_disposition - _content_encoding = path_http_headers.content_encoding - _content_language = path_http_headers.content_language - if modified_access_conditions is not None: - _if_match = modified_access_conditions.if_match - _if_none_match = modified_access_conditions.if_none_match - _if_modified_since = modified_access_conditions.if_modified_since - _if_unmodified_since = modified_access_conditions.if_unmodified_since - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = cpk_info.encryption_algorithm - - request = build_flush_data_request( - url=self._config.url, - action=action, - version=self._config.version, - timeout=timeout, - position=position, - retain_uncommitted_data=retain_uncommitted_data, - close=close, - content_length=content_length, - content_md5=_content_md5, - lease_id=_lease_id, - cache_control=_cache_control, - content_type_parameter=_content_type_parameter, - content_disposition=_content_disposition, - content_encoding=_content_encoding, - content_language=_content_language, - if_match=_if_match, - if_none_match=_if_none_match, - if_modified_since=_if_modified_since, - if_unmodified_since=_if_unmodified_since, - request_id_parameter=request_id_parameter, - encryption_key=_encryption_key, - encryption_key_sha256=_encryption_key_sha256, - encryption_algorithm=encryption_algorithm, - template_url=self.flush_data.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - - - if cls: - return cls(pipeline_response, None, response_headers) - - flush_data.metadata = {'url': "{url}/{filesystem}/{path}"} # type: ignore - - - @distributed_trace - def append_data( # pylint: disable=inconsistent-return-statements - self, - body, # type: IO - position=None, # type: Optional[int] - timeout=None, # type: Optional[int] - content_length=None, # type: Optional[int] - transactional_content_crc64=None, # type: Optional[bytearray] - request_id_parameter=None, # type: Optional[str] - path_http_headers=None, # type: Optional["_models.PathHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - cpk_info=None, # type: Optional["_models.CpkInfo"] - **kwargs # type: Any - ): - # type: (...) -> None - """Append data to the file. - - :param body: Initial data. - :type body: IO - :param position: This parameter allows the caller to upload data in parallel and control the - order in which it is appended to the file. It is required when uploading data to be appended - to the file and when flushing previously uploaded data to the file. The value must be the - position where the data is to be appended. Uploaded data is not immediately flushed, or - written, to the file. To flush, the previously uploaded data must be contiguous, the position - parameter must be specified and equal to the length of the file after all data has been - written, and there must not be a request entity body included with the request. Default value - is None. - :type position: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush - Data". Must be the length of the request content in bytes for "Append Data". Default value is - None. - :type content_length: long - :param transactional_content_crc64: Specify the transactional crc64 for the body, to be - validated by the service. Default value is None. - :type transactional_content_crc64: bytearray - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param path_http_headers: Parameter group. Default value is None. - :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders - :param lease_access_conditions: Parameter group. Default value is None. - :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions - :param cpk_info: Parameter group. Default value is None. - :type cpk_info: ~azure.storage.filedatalake.models.CpkInfo - :keyword action: action. Default value is "append". Note that overriding this default value may - result in unsupported behavior. - :paramtype action: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - action = kwargs.pop('action', "append") # type: str - content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] - - _transactional_content_hash = None - _lease_id = None - _encryption_key = None - _encryption_key_sha256 = None - encryption_algorithm = None - if path_http_headers is not None: - _transactional_content_hash = path_http_headers.transactional_content_hash - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if cpk_info is not None: - _encryption_key = cpk_info.encryption_key - _encryption_key_sha256 = cpk_info.encryption_key_sha256 - encryption_algorithm = cpk_info.encryption_algorithm - _content = body - - request = build_append_data_request( - url=self._config.url, - action=action, - version=self._config.version, - content_type=content_type, - content=_content, - position=position, - timeout=timeout, - content_length=content_length, - transactional_content_hash=_transactional_content_hash, - transactional_content_crc64=transactional_content_crc64, - lease_id=_lease_id, - request_id_parameter=request_id_parameter, - encryption_key=_encryption_key, - encryption_key_sha256=_encryption_key_sha256, - encryption_algorithm=encryption_algorithm, - template_url=self.append_data.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) - - - if cls: - return cls(pipeline_response, None, response_headers) - - append_data.metadata = {'url': "{url}/{filesystem}/{path}"} # type: ignore - - - @distributed_trace - def set_expiry( # pylint: disable=inconsistent-return-statements - self, - expiry_options, # type: Union[str, "_models.PathExpiryOptions"] - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - expires_on=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Sets the time a blob will expire and be deleted. - - :param expiry_options: Required. Indicates mode of the expiry time. - :type expiry_options: str or ~azure.storage.filedatalake.models.PathExpiryOptions - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param expires_on: The time to set the blob to expiry. Default value is None. - :type expires_on: str - :keyword comp: comp. Default value is "expiry". Note that overriding this default value may - result in unsupported behavior. - :paramtype comp: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - comp = kwargs.pop('comp', "expiry") # type: str - - - request = build_set_expiry_request( - url=self._config.url, - comp=comp, - version=self._config.version, - expiry_options=expiry_options, - timeout=timeout, - request_id_parameter=request_id_parameter, - expires_on=expires_on, - template_url=self.set_expiry.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - - if cls: - return cls(pipeline_response, None, response_headers) - - set_expiry.metadata = {'url': "{url}/{filesystem}/{path}"} # type: ignore - - - @distributed_trace - def undelete( # pylint: disable=inconsistent-return-statements - self, - timeout=None, # type: Optional[int] - undelete_source=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Undelete a path that was previously soft deleted. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :param undelete_source: Only for hierarchical namespace enabled accounts. Optional. The path of - the soft deleted blob to undelete. Default value is None. - :type undelete_source: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :keyword comp: comp. Default value is "undelete". Note that overriding this default value may - result in unsupported behavior. - :paramtype comp: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - comp = kwargs.pop('comp', "undelete") # type: str - - - request = build_undelete_request( - url=self._config.url, - comp=comp, - version=self._config.version, - timeout=timeout, - undelete_source=undelete_source, - request_id_parameter=request_id_parameter, - template_url=self.undelete.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - - if cls: - return cls(pipeline_response, None, response_headers) - - undelete.metadata = {'url': "{url}/{filesystem}/{path}"} # type: ignore - diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/operations/_service_operations.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/operations/_service_operations.py deleted file mode 100644 index 1154d22..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_generated/operations/_service_operations.py +++ /dev/null @@ -1,218 +0,0 @@ -# pylint: disable=too-many-lines -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import TYPE_CHECKING - -from msrest import Serializer - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.paging import ItemPaged -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpResponse -from azure.core.rest import HttpRequest -from azure.core.tracing.decorator import distributed_trace - -from .. import models as _models -from .._vendor import _convert_request, _format_url_section - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Iterable, Optional, TypeVar - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -_SERIALIZER = Serializer() -_SERIALIZER.client_side_validation = False -# fmt: off - -def build_list_file_systems_request( - url, # type: str - **kwargs # type: Any -): - # type: (...) -> HttpRequest - resource = kwargs.pop('resource', "account") # type: str - version = kwargs.pop('version', "2021-06-08") # type: str - prefix = kwargs.pop('prefix', None) # type: Optional[str] - continuation = kwargs.pop('continuation', None) # type: Optional[str] - max_results = kwargs.pop('max_results', None) # type: Optional[int] - request_id_parameter = kwargs.pop('request_id_parameter', None) # type: Optional[str] - timeout = kwargs.pop('timeout', None) # type: Optional[int] - - accept = "application/json" - # Construct URL - _url = kwargs.pop("template_url", "{url}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, 'str', skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] - _query_parameters['resource'] = _SERIALIZER.query("resource", resource, 'str') - if prefix is not None: - _query_parameters['prefix'] = _SERIALIZER.query("prefix", prefix, 'str') - if continuation is not None: - _query_parameters['continuation'] = _SERIALIZER.query("continuation", continuation, 'str') - if max_results is not None: - _query_parameters['maxResults'] = _SERIALIZER.query("max_results", max_results, 'int', minimum=1) - if timeout is not None: - _query_parameters['timeout'] = _SERIALIZER.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - _header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] - if request_id_parameter is not None: - _header_parameters['x-ms-client-request-id'] = _SERIALIZER.header("request_id_parameter", request_id_parameter, 'str') - _header_parameters['x-ms-version'] = _SERIALIZER.header("version", version, 'str') - _header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') - - return HttpRequest( - method="GET", - url=_url, - params=_query_parameters, - headers=_header_parameters, - **kwargs - ) - -# fmt: on -class ServiceOperations(object): - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.storage.filedatalake.AzureDataLakeStorageRESTAPI`'s - :attr:`service` attribute. - """ - - models = _models - - def __init__(self, *args, **kwargs): - args = list(args) - self._client = args.pop(0) if args else kwargs.pop("client") - self._config = args.pop(0) if args else kwargs.pop("config") - self._serialize = args.pop(0) if args else kwargs.pop("serializer") - self._deserialize = args.pop(0) if args else kwargs.pop("deserializer") - - - @distributed_trace - def list_file_systems( - self, - prefix=None, # type: Optional[str] - continuation=None, # type: Optional[str] - max_results=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> Iterable["_models.FileSystemList"] - """List FileSystems. - - List filesystems and their properties in given account. - - :param prefix: Filters results to filesystems within the specified prefix. Default value is - None. - :type prefix: str - :param continuation: Optional. When deleting a directory, the number of paths that are deleted - with each invocation is limited. If the number of paths to be deleted exceeds this limit, a - continuation token is returned in this response header. When a continuation token is returned - in the response, it must be specified in a subsequent invocation of the delete operation to - continue deleting the directory. Default value is None. - :type continuation: str - :param max_results: An optional value that specifies the maximum number of items to return. If - omitted or greater than 5,000, the response will include up to 5,000 items. Default value is - None. - :type max_results: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. Default - value is None. - :type request_id_parameter: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for Blob Service Operations.`. Default value is None. - :type timeout: int - :keyword resource: The value must be "account" for all account operations. Default value is - "account". Note that overriding this default value may result in unsupported behavior. - :paramtype resource: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: An iterator like instance of either FileSystemList or the result of cls(response) - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.models.FileSystemList] - :raises: ~azure.core.exceptions.HttpResponseError - """ - resource = kwargs.pop('resource', "account") # type: str - - cls = kwargs.pop('cls', None) # type: ClsType["_models.FileSystemList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - def prepare_request(next_link=None): - if not next_link: - - request = build_list_file_systems_request( - url=self._config.url, - resource=resource, - version=self._config.version, - prefix=prefix, - continuation=continuation, - max_results=max_results, - request_id_parameter=request_id_parameter, - timeout=timeout, - template_url=self.list_file_systems.metadata['url'], - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - - else: - - request = build_list_file_systems_request( - url=self._config.url, - resource=resource, - version=self._config.version, - prefix=prefix, - continuation=continuation, - max_results=max_results, - request_id_parameter=request_id_parameter, - timeout=timeout, - template_url=next_link, - ) - request = _convert_request(request) - request.url = self._client.format_url(request.url) - request.method = "GET" - return request - - def extract_data(pipeline_response): - deserialized = self._deserialize("FileSystemList", pipeline_response) - list_of_elem = deserialized.filesystems - if cls: - list_of_elem = cls(list_of_elem) - return None, iter(list_of_elem) - - def get_next(next_link=None): - request = prepare_request(next_link) - - pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access - request, - stream=False, - **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) - raise HttpResponseError(response=response, model=error) - - return pipeline_response - - - return ItemPaged( - get_next, extract_data - ) - list_file_systems.metadata = {'url': "{url}"} # type: ignore diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_list_paths_helper.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_list_paths_helper.py deleted file mode 100644 index e5cb8f6..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_list_paths_helper.py +++ /dev/null @@ -1,173 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from azure.core.paging import PageIterator -from azure.core.exceptions import HttpResponseError -from ._deserialize import process_storage_error, get_deleted_path_properties_from_generated_code, \ - return_headers_and_deserialized_path_list -from ._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix -from ._shared.models import DictMixin -from ._shared.response_handlers import return_context_and_deserialized -from ._models import PathProperties -from ._generated.models import Path - - -class DeletedPathPropertiesPaged(PageIterator): - """An Iterable of deleted path properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A path name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.filedatalake.DeletedPathProperties) - :ivar str container: The container that the paths are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - """ - def __init__( - self, command, - container=None, - prefix=None, - results_per_page=None, - continuation_token=None, - delimiter=None, - location_mode=None): - super(DeletedPathPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.container = container - self.delimiter = delimiter - self.current_page = None - self.location_mode = location_mode - - def _get_next_cb(self, continuation_token): - try: - return self._command( - prefix=self.prefix, - marker=continuation_token or None, - max_results=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.container = self._response.container_name - self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items - self.current_page = [self._build_item(item) for item in self.current_page] - self.delimiter = self._response.delimiter - - return self._response.next_marker or None, self.current_page - - def _build_item(self, item): - if isinstance(item, BlobItemInternal): - file_props = get_deleted_path_properties_from_generated_code(item) - file_props.file_system = self.container - return file_props - if isinstance(item, GenBlobPrefix): - return DirectoryPrefix( - container=self.container, - prefix=item.name, - results_per_page=self.results_per_page, - location_mode=self.location_mode) - return item - - -class DirectoryPrefix(DictMixin): - """Directory prefix. - - :ivar str name: Name of the deleted directory. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar str file_system: The file system that the deleted paths are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - """ - def __init__(self, **kwargs): - self.name = kwargs.get('prefix') - self.results_per_page = kwargs.get('results_per_page') - self.file_system = kwargs.get('container') - self.delimiter = kwargs.get('delimiter') - self.location_mode = kwargs.get('location_mode') - - -class PathPropertiesPaged(PageIterator): - """An Iterable of Path properties. - - :ivar str path: Filters the results to return only paths under the specified path. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar list(~azure.storage.filedatalake.PathProperties) current_page: The current page of listed results. - - :param callable command: Function to retrieve the next page of items. - :param str path: Filters the results to return only paths under the specified path. - :param int max_results: The maximum number of psths to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__( - self, command, - recursive, - path=None, - max_results=None, - continuation_token=None, - upn=None): - super(PathPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.recursive = recursive - self.results_per_page = max_results - self.path = path - self.upn = upn - self.current_page = None - self.path_list = None - - def _get_next_cb(self, continuation_token): - try: - return self._command( - self.recursive, - continuation=continuation_token or None, - path=self.path, - max_results=self.results_per_page, - upn=self.upn, - cls=return_headers_and_deserialized_path_list) - except HttpResponseError as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.path_list, self._response = get_next_return - self.current_page = [self._build_item(item) for item in self.path_list] - - return self._response['continuation'] or None, self.current_page - - @staticmethod - def _build_item(item): - if isinstance(item, PathProperties): - return item - if isinstance(item, Path): - path = PathProperties._from_generated(item) # pylint: disable=protected-access - return path - return item diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_models.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_models.py deleted file mode 100644 index d3b43c2..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_models.py +++ /dev/null @@ -1,1091 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines -from enum import Enum - -from azure.core import CaseInsensitiveEnumMeta -from azure.multiapi.storagev2.blob.v2021_06_08 import LeaseProperties as BlobLeaseProperties -from azure.multiapi.storagev2.blob.v2021_06_08 import AccountSasPermissions as BlobAccountSasPermissions -from azure.multiapi.storagev2.blob.v2021_06_08 import ResourceTypes as BlobResourceTypes -from azure.multiapi.storagev2.blob.v2021_06_08 import UserDelegationKey as BlobUserDelegationKey -from azure.multiapi.storagev2.blob.v2021_06_08 import ContentSettings as BlobContentSettings -from azure.multiapi.storagev2.blob.v2021_06_08 import AccessPolicy as BlobAccessPolicy -from azure.multiapi.storagev2.blob.v2021_06_08 import DelimitedTextDialect as BlobDelimitedTextDialect -from azure.multiapi.storagev2.blob.v2021_06_08 import DelimitedJsonDialect as BlobDelimitedJSON -from azure.multiapi.storagev2.blob.v2021_06_08 import ArrowDialect as BlobArrowDialect -from azure.multiapi.storagev2.blob.v2021_06_08 import CustomerProvidedEncryptionKey as BlobCustomerProvidedEncryptionKey -from azure.multiapi.storagev2.blob.v2021_06_08._models import ContainerPropertiesPaged -from azure.multiapi.storagev2.blob.v2021_06_08._generated.models import Logging as GenLogging, Metrics as GenMetrics, \ - RetentionPolicy as GenRetentionPolicy, StaticWebsite as GenStaticWebsite, CorsRule as GenCorsRule - -from ._shared.models import DictMixin -from ._shared.parser import _filetime_to_datetime, _rfc_1123_to_datetime - - -class FileSystemProperties(DictMixin): - """File System properties class. - - :ivar str name: - Name of the filesystem. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the file system was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar ~azure.storage.filedatalake.LeaseProperties lease: - Stores all the lease information for the file system. - :ivar str public_access: Specifies whether data in the file system may be accessed - publicly and the level of access. - :ivar bool has_immutability_policy: - Represents whether the file system has an immutability policy. - :ivar bool has_legal_hold: - Represents whether the file system has a legal hold. - :ivar dict metadata: A dict with name-value pairs to associate with the - file system as metadata. - :ivar bool deleted: - Whether this file system was deleted. - :ivar str deleted_version: - The version of a deleted file system. - - Returned ``FileSystemProperties`` instances expose these values through a - dictionary interface, for example: ``file_system_props["last_modified"]``. - Additionally, the file system name is available as ``file_system_props["name"]``. - """ - - def __init__(self): - self.name = None - self.last_modified = None - self.etag = None - self.lease = None - self.public_access = None - self.has_immutability_policy = None - self.has_legal_hold = None - self.metadata = None - self.deleted = None - self.deleted_version = None - - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.last_modified = generated.properties.last_modified - props.deleted = generated.deleted - props.deleted_version = generated.version - props.etag = generated.properties.etag - props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access - props.public_access = PublicAccess._from_generated( # pylint: disable=protected-access - generated.properties.public_access) - props.has_immutability_policy = generated.properties.has_immutability_policy - props.has_legal_hold = generated.properties.has_legal_hold - props.metadata = generated.metadata - return props - - @classmethod - def _convert_from_container_props(cls, container_properties): - container_properties.__class__ = cls - container_properties.public_access = PublicAccess._from_generated( # pylint: disable=protected-access - container_properties.public_access) - container_properties.lease.__class__ = LeaseProperties - return container_properties - - -class FileSystemPropertiesPaged(ContainerPropertiesPaged): - """An Iterable of File System properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file system name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.filedatalake.FileSystemProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only file systems whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of file system names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - - def __init__(self, *args, **kwargs): - super(FileSystemPropertiesPaged, self).__init__( - *args, - **kwargs - ) - - @staticmethod - def _build_item(item): - return FileSystemProperties._from_generated(item) # pylint: disable=protected-access - - -class DirectoryProperties(DictMixin): - """ - :ivar str name: name of the directory - :ivar str etag: The ETag contains a value that you can use to perform operations - conditionally. - :ivar bool deleted: if the current directory marked as deleted - :ivar dict metadata: Name-value pairs associated with the directory as metadata. - :ivar ~azure.storage.filedatalake.LeaseProperties lease: - Stores all the lease information for the directory. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the directory was modified. - :ivar ~datetime.datetime creation_time: - Indicates when the directory was created, in UTC. - :ivar int remaining_retention_days: The number of days that the directory will be retained - before being permanently deleted by the service. - :var ~azure.storage.filedatalake.ContentSettings content_settings: - """ - - def __init__(self, **kwargs): - self.name = kwargs.get('name') - self.etag = kwargs.get('ETag') - self.deleted = False - self.metadata = kwargs.get('metadata') - self.lease = LeaseProperties(**kwargs) - self.last_modified = kwargs.get('Last-Modified') - self.creation_time = kwargs.get('x-ms-creation-time') - self.deleted_time = None - self.remaining_retention_days = None - - -class FileProperties(DictMixin): - """ - :ivar str name: name of the file - :ivar str etag: The ETag contains a value that you can use to perform operations - conditionally. - :ivar bool deleted: if the current file marked as deleted - :ivar dict metadata: Name-value pairs associated with the file as metadata. - :ivar ~azure.storage.filedatalake.LeaseProperties lease: - Stores all the lease information for the file. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the file was modified. - :ivar ~datetime.datetime creation_time: - Indicates when the file was created, in UTC. - :ivar int size: size of the file - :ivar int remaining_retention_days: The number of days that the file will be retained - before being permanently deleted by the service. - :var ~azure.storage.filedatalake.ContentSettings content_settings: - """ - - def __init__(self, **kwargs): - self.name = kwargs.get('name') - self.etag = kwargs.get('ETag') - self.deleted = False - self.metadata = kwargs.get('metadata') - self.lease = LeaseProperties(**kwargs) - self.last_modified = kwargs.get('Last-Modified') - self.creation_time = kwargs.get('x-ms-creation-time') - self.size = kwargs.get('Content-Length') - self.deleted_time = None - self.expiry_time = kwargs.get("x-ms-expiry-time") - self.remaining_retention_days = None - self.content_settings = ContentSettings(**kwargs) - - -class PathProperties(DictMixin): - """Path properties listed by get_paths api. - - :ivar str name: The full path for a file or directory. - :ivar str owner: The owner of the file or directory. - :ivar str group: The owning group of the file or directory. - :ivar str permissions: Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :ivar datetime last_modified: A datetime object representing the last time the directory/file was modified. - :ivar bool is_directory: Is the path a directory or not. - :ivar str etag: The ETag contains a value that you can use to perform operations - conditionally. - :ivar int content_length: The size of file if the path is a file. - :ivar datetime creation_time: The creation time of the file/directory. - :ivar datetime expiry_time: The expiry time of the file/directory. - """ - - def __init__(self, **kwargs): - self.name = kwargs.pop('name', None) - self.owner = kwargs.get('owner', None) - self.group = kwargs.get('group', None) - self.permissions = kwargs.get('permissions', None) - self.last_modified = kwargs.get('last_modified', None) - self.is_directory = kwargs.get('is_directory', False) - self.etag = kwargs.get('etag', None) - self.content_length = kwargs.get('content_length', None) - self.creation_time = kwargs.get('creation_time', None) - self.expiry_time = kwargs.get('expiry_time', None) - - @classmethod - def _from_generated(cls, generated): - path_prop = PathProperties() - path_prop.name = generated.name - path_prop.owner = generated.owner - path_prop.group = generated.group - path_prop.permissions = generated.permissions - path_prop.last_modified = _rfc_1123_to_datetime(generated.last_modified) - path_prop.is_directory = bool(generated.is_directory) - path_prop.etag = generated.additional_properties.get('etag') - path_prop.content_length = generated.content_length - path_prop.creation_time = _filetime_to_datetime(generated.creation_time) - path_prop.expiry_time = _filetime_to_datetime(generated.expiry_time) - return path_prop - - -class LeaseProperties(BlobLeaseProperties): - """DataLake Lease Properties. - - :ivar str status: - The lease status of the file. Possible values: locked|unlocked - :ivar str state: - Lease state of the file. Possible values: available|leased|expired|breaking|broken - :ivar str duration: - When a file is leased, specifies whether the lease is of infinite or fixed duration. - """ - - -class ContentSettings(BlobContentSettings): - """The content settings of a file or directory. - - :ivar str content_type: - The content type specified for the file or directory. If no content type was - specified, the default content type is application/octet-stream. - :ivar str content_encoding: - If the content_encoding has previously been set - for the file, that value is stored. - :ivar str content_language: - If the content_language has previously been set - for the file, that value is stored. - :ivar str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the file, that value is stored. - :ivar str cache_control: - If the cache_control has previously been set for - the file, that value is stored. - :ivar bytearray content_md5: - If the content_md5 has been set for the file, this response - header is stored so that the client can check for message content - integrity. - :keyword str content_type: - The content type specified for the file or directory. If no content type was - specified, the default content type is application/octet-stream. - :keyword str content_encoding: - If the content_encoding has previously been set - for the file, that value is stored. - :keyword str content_language: - If the content_language has previously been set - for the file, that value is stored. - :keyword str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the file, that value is stored. - :keyword str cache_control: - If the cache_control has previously been set for - the file, that value is stored. - :keyword bytearray content_md5: - If the content_md5 has been set for the file, this response - header is stored so that the client can check for message content - integrity. - """ - - def __init__( - self, **kwargs): - super(ContentSettings, self).__init__( - **kwargs - ) - - -class AccountSasPermissions(BlobAccountSasPermissions): - def __init__(self, read=False, write=False, delete=False, list=False, # pylint: disable=redefined-builtin - create=False): - super(AccountSasPermissions, self).__init__( - read=read, create=create, write=write, list=list, - delete=delete - ) - - -class FileSystemSasPermissions(object): - """FileSystemSasPermissions class to be used with the - :func:`~azure.storage.filedatalake.generate_file_system_sas` function. - - :param bool read: - Read the content, properties, metadata etc. - :param bool write: - Create or write content, properties, metadata. Lease the file system. - :param bool delete: - Delete the file system. - :param bool list: - List paths in the file system. - :keyword bool add: - Append data to a file in the directory. - :keyword bool create: - Write a new file, snapshot a file, or copy a file to a new file. - :keyword bool move: - Move any file in the directory to a new location. - Note the move operation can optionally be restricted to the child file or directory owner or - the parent directory owner if the saoid parameter is included in the token and the sticky bit is set - on the parent directory. - :keyword bool execute: - Get the status (system defined properties) and ACL of any file in the directory. - If the caller is the owner, set access control on any file in the directory. - :keyword bool manage_ownership: - Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory - within a folder that has the sticky bit set. - :keyword bool manage_access_control: - Allows the user to set permissions and POSIX ACLs on files and directories. - """ - - def __init__(self, read=False, write=False, delete=False, list=False, # pylint: disable=redefined-builtin - **kwargs): - self.read = read - self.add = kwargs.pop('add', None) - self.create = kwargs.pop('create', None) - self.write = write - self.delete = delete - self.list = list - self.move = kwargs.pop('move', None) - self.execute = kwargs.pop('execute', None) - self.manage_ownership = kwargs.pop('manage_ownership', None) - self.manage_access_control = kwargs.pop('manage_access_control', None) - self._str = (('r' if self.read else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('l' if self.list else '') + - ('m' if self.move else '') + - ('e' if self.execute else '') + - ('o' if self.manage_ownership else '') + - ('p' if self.manage_access_control else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a FileSystemSasPermissions from a string. - - To specify read, write, or delete permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, add, create, - write, or delete permissions. - :return: A FileSystemSasPermissions object - :rtype: ~azure.storage.fildatalake.FileSystemSasPermissions - """ - p_read = 'r' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_list = 'l' in permission - p_move = 'm' in permission - p_execute = 'e' in permission - p_manage_ownership = 'o' in permission - p_manage_access_control = 'p' in permission - - parsed = cls(read=p_read, write=p_write, delete=p_delete, - list=p_list, add=p_add, create=p_create, move=p_move, - execute=p_execute, manage_ownership=p_manage_ownership, - manage_access_control=p_manage_access_control) - return parsed - - -class DirectorySasPermissions(object): - """DirectorySasPermissions class to be used with the - :func:`~azure.storage.filedatalake.generate_directory_sas` function. - - :param bool read: - Read the content, properties, metadata etc. - :param bool create: - Create a new directory - :param bool write: - Create or write content, properties, metadata. Lease the directory. - :param bool delete: - Delete the directory. - :keyword bool add: - Append data to a file in the directory. - :keyword bool list: - List any files in the directory. Implies Execute. - :keyword bool move: - Move any file in the directory to a new location. - Note the move operation can optionally be restricted to the child file or directory owner or - the parent directory owner if the saoid parameter is included in the token and the sticky bit is set - on the parent directory. - :keyword bool execute: - Get the status (system defined properties) and ACL of any file in the directory. - If the caller is the owner, set access control on any file in the directory. - :keyword bool manage_ownership: - Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory - within a folder that has the sticky bit set. - :keyword bool manage_access_control: - Allows the user to set permissions and POSIX ACLs on files and directories. - """ - - def __init__(self, read=False, create=False, write=False, - delete=False, **kwargs): - self.read = read - self.add = kwargs.pop('add', None) - self.create = create - self.write = write - self.delete = delete - self.list = kwargs.pop('list', None) - self.move = kwargs.pop('move', None) - self.execute = kwargs.pop('execute', None) - self.manage_ownership = kwargs.pop('manage_ownership', None) - self.manage_access_control = kwargs.pop('manage_access_control', None) - self._str = (('r' if self.read else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('l' if self.list else '') + - ('m' if self.move else '') + - ('e' if self.execute else '') + - ('o' if self.manage_ownership else '') + - ('p' if self.manage_access_control else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a DirectorySasPermissions from a string. - - To specify read, create, write, or delete permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, add, create, - write, or delete permissions. - :return: A DirectorySasPermissions object - :rtype: ~azure.storage.filedatalake.DirectorySasPermissions - """ - p_read = 'r' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_list = 'l' in permission - p_move = 'm' in permission - p_execute = 'e' in permission - p_manage_ownership = 'o' in permission - p_manage_access_control = 'p' in permission - - parsed = cls(read=p_read, create=p_create, write=p_write, delete=p_delete, add=p_add, - list=p_list, move=p_move, execute=p_execute, manage_ownership=p_manage_ownership, - manage_access_control=p_manage_access_control) - return parsed - - -class FileSasPermissions(object): - """FileSasPermissions class to be used with the - :func:`~azure.storage.filedatalake.generate_file_sas` function. - - :param bool read: - Read the content, properties, metadata etc. Use the file as - the source of a read operation. - :param bool create: - Write a new file. - :param bool write: - Create or write content, properties, metadata. Lease the file. - :param bool delete: - Delete the file. - :keyword bool add: - Append data to the file. - :keyword bool move: - Move any file in the directory to a new location. - Note the move operation can optionally be restricted to the child file or directory owner or - the parent directory owner if the saoid parameter is included in the token and the sticky bit is set - on the parent directory. - :keyword bool execute: - Get the status (system defined properties) and ACL of any file in the directory. - If the caller is the owner, set access control on any file in the directory. - :keyword bool manage_ownership: - Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory - within a folder that has the sticky bit set. - :keyword bool manage_access_control: - Allows the user to set permissions and POSIX ACLs on files and directories. - """ - - def __init__(self, read=False, create=False, write=False, delete=False, **kwargs): - self.read = read - self.add = kwargs.pop('add', None) - self.create = create - self.write = write - self.delete = delete - self.move = kwargs.pop('move', None) - self.execute = kwargs.pop('execute', None) - self.manage_ownership = kwargs.pop('manage_ownership', None) - self.manage_access_control = kwargs.pop('manage_access_control', None) - self._str = (('r' if self.read else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('m' if self.move else '') + - ('e' if self.execute else '') + - ('o' if self.manage_ownership else '') + - ('p' if self.manage_access_control else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a FileSasPermissions from a string. - - To specify read, write, or delete permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, add, create, - write, or delete permissions. - :return: A FileSasPermissions object - :rtype: ~azure.storage.fildatalake.FileSasPermissions - """ - p_read = 'r' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_move = 'm' in permission - p_execute = 'e' in permission - p_manage_ownership = 'o' in permission - p_manage_access_control = 'p' in permission - - parsed = cls(read=p_read, create=p_create, write=p_write, delete=p_delete, add=p_add, - move=p_move, execute=p_execute, manage_ownership=p_manage_ownership, - manage_access_control=p_manage_access_control) - return parsed - - -class AccessPolicy(BlobAccessPolicy): - """Access Policy class used by the set and get access policy methods in each service. - - A stored access policy can specify the start time, expiry time, and - permissions for the Shared Access Signatures with which it's associated. - Depending on how you want to control access to your resource, you can - specify all of these parameters within the stored access policy, and omit - them from the URL for the Shared Access Signature. Doing so permits you to - modify the associated signature's behavior at any time, as well as to revoke - it. Or you can specify one or more of the access policy parameters within - the stored access policy, and the others on the URL. Finally, you can - specify all of the parameters on the URL. In this case, you can use the - stored access policy to revoke the signature, but not to modify its behavior. - - Together the Shared Access Signature and the stored access policy must - include all fields required to authenticate the signature. If any required - fields are missing, the request will fail. Likewise, if a field is specified - both in the Shared Access Signature URL and in the stored access policy, the - request will fail with status code 400 (Bad Request). - - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.datalake.FileSystemSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :keyword start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :paramtype start: ~datetime.datetime or str - """ - - def __init__(self, permission=None, expiry=None, **kwargs): - super(AccessPolicy, self).__init__( - permission=permission, expiry=expiry, start=kwargs.pop('start', None) - ) - - -class ResourceTypes(BlobResourceTypes): - """ - Specifies the resource types that are accessible with the account SAS. - - :param bool service: - Access to service-level APIs (e.g.List File Systems) - :param bool file_system: - Access to file_system-level APIs (e.g., Create/Delete file system, - List Directories/Files) - :param bool object: - Access to object-level APIs for - files(e.g. Create File, etc.) - """ - - def __init__(self, service=False, file_system=False, object=False # pylint: disable=redefined-builtin - ): - super(ResourceTypes, self).__init__(service=service, container=file_system, object=object) - - -class UserDelegationKey(BlobUserDelegationKey): - """ - Represents a user delegation key, provided to the user by Azure Storage - based on their Azure Active Directory access token. - - The fields are saved as simple strings since the user does not have to interact with this object; - to generate an identify SAS, the user can simply pass it to the right API. - - :ivar str signed_oid: - Object ID of this token. - :ivar str signed_tid: - Tenant ID of the tenant that issued this token. - :ivar str signed_start: - The datetime this token becomes valid. - :ivar str signed_expiry: - The datetime this token expires. - :ivar str signed_service: - What service this key is valid for. - :ivar str signed_version: - The version identifier of the REST service that created this token. - :ivar str value: - The user delegation key. - """ - - @classmethod - def _from_generated(cls, generated): - delegation_key = cls() - delegation_key.signed_oid = generated.signed_oid - delegation_key.signed_tid = generated.signed_tid - delegation_key.signed_start = generated.signed_start - delegation_key.signed_expiry = generated.signed_expiry - delegation_key.signed_service = generated.signed_service - delegation_key.signed_version = generated.signed_version - delegation_key.value = generated.value - return delegation_key - - -class PublicAccess(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """ - Specifies whether data in the file system may be accessed publicly and the level of access. - """ - - FILE = 'blob' - """ - Specifies public read access for files. file data within this file system can be read - via anonymous request, but file system data is not available. Clients cannot enumerate - files within the container via anonymous request. - """ - - FILESYSTEM = 'container' - """ - Specifies full public read access for file system and file data. Clients can enumerate - files within the file system via anonymous request, but cannot enumerate file systems - within the storage account. - """ - - @classmethod - def _from_generated(cls, public_access): - if public_access == "blob": # pylint:disable=no-else-return - return cls.File - elif public_access == "container": - return cls.FileSystem - - return None - - -class LocationMode(object): - """ - Specifies the location the request should be sent to. This mode only applies - for RA-GRS accounts which allow secondary read access. All other account types - must use PRIMARY. - """ - - PRIMARY = 'primary' #: Requests should be sent to the primary location. - SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. - - -class DelimitedJsonDialect(BlobDelimitedJSON): - """Defines the input or output JSON serialization for a datalake query. - - :keyword str delimiter: The line separator character, default value is '\n' - """ - - -class DelimitedTextDialect(BlobDelimitedTextDialect): - """Defines the input or output delimited (CSV) serialization for a datalake query request. - - :keyword str delimiter: - Column separator, defaults to ','. - :keyword str quotechar: - Field quote, defaults to '"'. - :keyword str lineterminator: - Record separator, defaults to '\n'. - :keyword str escapechar: - Escape char, defaults to empty. - :keyword bool has_header: - Whether the blob data includes headers in the first line. The default value is False, meaning that the - data will be returned inclusive of the first line. If set to True, the data will be returned exclusive - of the first line. - """ - - -class ArrowDialect(BlobArrowDialect): - """field of an arrow schema. - - All required parameters must be populated in order to send to Azure. - - :param str type: Required. - :keyword str name: The name of the field. - :keyword int precision: The precision of the field. - :keyword int scale: The scale of the field. - """ - - -class CustomerProvidedEncryptionKey(BlobCustomerProvidedEncryptionKey): - """ - All data in Azure Storage is encrypted at-rest using an account-level encryption key. - In versions 2021-06-08 and newer, you can manage the key used to encrypt file contents - and application metadata per-file by providing an AES-256 encryption key in requests to the storage service. - - When you use a customer-provided key, Azure Storage does not manage or persist your key. - When writing data to a file, the provided key is used to encrypt your data before writing it to disk. - A SHA-256 hash of the encryption key is written alongside the file contents, - and is used to verify that all subsequent operations against the file use the same encryption key. - This hash cannot be used to retrieve the encryption key or decrypt the contents of the file. - When reading a file, the provided key is used to decrypt your data after reading it from disk. - In both cases, the provided encryption key is securely discarded - as soon as the encryption or decryption process completes. - - :param str key_value: - Base64-encoded AES-256 encryption key value. - :param str key_hash: - Base64-encoded SHA256 of the encryption key. - :ivar str algorithm: - Specifies the algorithm to use when encrypting data using the given key. Must be AES256. - """ - - -class QuickQueryDialect(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Specifies the quick query input/output dialect.""" - - DELIMITEDTEXT = 'DelimitedTextDialect' - DELIMITEDJSON = 'DelimitedJsonDialect' - PARQUET = 'ParquetDialect' - - -class ArrowType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - - INT64 = "int64" - BOOL = "bool" - TIMESTAMP_MS = "timestamp[ms]" - STRING = "string" - DOUBLE = "double" - DECIMAL = 'decimal' - - -class DataLakeFileQueryError(object): - """The error happened during quick query operation. - - :ivar str error: - The name of the error. - :ivar bool is_fatal: - If true, this error prevents further query processing. More result data may be returned, - but there is no guarantee that all of the original data will be processed. - If false, this error does not prevent further query processing. - :ivar str description: - A description of the error. - :ivar int position: - The blob offset at which the error occurred. - """ - - def __init__(self, error=None, is_fatal=False, description=None, position=None): - self.error = error - self.is_fatal = is_fatal - self.description = description - self.position = position - - -class AccessControlChangeCounters(DictMixin): - """ - AccessControlChangeCounters contains counts of operations that change Access Control Lists recursively. - - :ivar int directories_successful: - Number of directories where Access Control List has been updated successfully. - :ivar int files_successful: - Number of files where Access Control List has been updated successfully. - :ivar int failure_count: - Number of paths where Access Control List update has failed. - """ - - def __init__(self, directories_successful, files_successful, failure_count): - self.directories_successful = directories_successful - self.files_successful = files_successful - self.failure_count = failure_count - - -class AccessControlChangeResult(DictMixin): - """ - AccessControlChangeResult contains result of operations that change Access Control Lists recursively. - - :ivar ~azure.storage.filedatalake.AccessControlChangeCounters counters: - Contains counts of paths changed from start of the operation. - :ivar str continuation: - Optional continuation token. - Value is present when operation is split into multiple batches and can be used to resume progress. - """ - - def __init__(self, counters, continuation): - self.counters = counters - self.continuation = continuation - - -class AccessControlChangeFailure(DictMixin): - """ - Represents an entry that failed to update Access Control List. - - :ivar str name: - Name of the entry. - :ivar bool is_directory: - Indicates whether the entry is a directory. - :ivar str error_message: - Indicates the reason why the entry failed to update. - """ - - def __init__(self, name, is_directory, error_message): - self.name = name - self.is_directory = is_directory - self.error_message = error_message - - -class AccessControlChanges(DictMixin): - """ - AccessControlChanges contains batch and cumulative counts of operations - that change Access Control Lists recursively. - Additionally it exposes path entries that failed to update while these operations progress. - - :ivar ~azure.storage.filedatalake.AccessControlChangeCounters batch_counters: - Contains counts of paths changed within single batch. - :ivar ~azure.storage.filedatalake.AccessControlChangeCounters aggregate_counters: - Contains counts of paths changed from start of the operation. - :ivar list(~azure.storage.filedatalake.AccessControlChangeFailure) batch_failures: - List of path entries that failed to update Access Control List within single batch. - :ivar str continuation: - An opaque continuation token that may be used to resume the operations in case of failures. - """ - - def __init__(self, batch_counters, aggregate_counters, batch_failures, continuation): - self.batch_counters = batch_counters - self.aggregate_counters = aggregate_counters - self.batch_failures = batch_failures - self.continuation = continuation - - -class DeletedPathProperties(DictMixin): - """ - Properties populated for a deleted path. - - :ivar str name: - The name of the file in the path. - :ivar ~datetime.datetime deleted_time: - A datetime object representing the time at which the path was deleted. - :ivar int remaining_retention_days: - The number of days that the path will be retained before being permanently deleted by the service. - :ivar str deletion_id: - The id associated with the deleted path. - """ - def __init__(self, **kwargs): - self.name = kwargs.get('name') - self.deleted_time = None - self.remaining_retention_days = None - self.deletion_id = None - - -class AnalyticsLogging(GenLogging): - """Azure Analytics Logging settings. - - :keyword str version: - The version of Storage Analytics to configure. The default value is 1.0. - :keyword bool delete: - Indicates whether all delete requests should be logged. The default value is `False`. - :keyword bool read: - Indicates whether all read requests should be logged. The default value is `False`. - :keyword bool write: - Indicates whether all write requests should be logged. The default value is `False`. - :keyword ~azure.storage.filedatalake.RetentionPolicy retention_policy: - Determines how long the associated data should persist. If not specified the retention - policy will be disabled by default. - """ - - def __init__(self, **kwargs): - self.version = kwargs.get('version', u'1.0') - self.delete = kwargs.get('delete', False) - self.read = kwargs.get('read', False) - self.write = kwargs.get('write', False) - self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - version=generated.version, - delete=generated.delete, - read=generated.read, - write=generated.write, - retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access - ) - - -class Metrics(GenMetrics): - """A summary of request statistics grouped by API in hour or minute aggregates. - - :keyword str version: - The version of Storage Analytics to configure. The default value is 1.0. - :keyword bool enabled: - Indicates whether metrics are enabled for the Datalake service. - The default value is `False`. - :keyword bool include_apis: - Indicates whether metrics should generate summary statistics for called API operations. - :keyword ~azure.storage.filedatalake.RetentionPolicy retention_policy: - Determines how long the associated data should persist. If not specified the retention - policy will be disabled by default. - """ - - def __init__(self, **kwargs): - self.version = kwargs.get('version', u'1.0') - self.enabled = kwargs.get('enabled', False) - self.include_apis = kwargs.get('include_apis') - self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - version=generated.version, - enabled=generated.enabled, - include_apis=generated.include_apis, - retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access - ) - - -class RetentionPolicy(GenRetentionPolicy): - """The retention policy which determines how long the associated data should - persist. - - :param bool enabled: - Indicates whether a retention policy is enabled for the storage service. - The default value is False. - :param int days: - Indicates the number of days that metrics or logging or - soft-deleted data should be retained. All data older than this value will - be deleted. If enabled=True, the number of days must be specified. - """ - - def __init__(self, enabled=False, days=None): - super(RetentionPolicy, self).__init__(enabled=enabled, days=days, allow_permanent_delete=None) - if self.enabled and (self.days is None): - raise ValueError("If policy is enabled, 'days' must be specified.") - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - enabled=generated.enabled, - days=generated.days, - ) - - -class StaticWebsite(GenStaticWebsite): - """The properties that enable an account to host a static website. - - :keyword bool enabled: - Indicates whether this account is hosting a static website. - The default value is `False`. - :keyword str index_document: - The default name of the index page under each directory. - :keyword str error_document404_path: - The absolute path of the custom 404 page. - :keyword str default_index_document_path: - Absolute path of the default index page. - """ - - def __init__(self, **kwargs): - self.enabled = kwargs.get('enabled', False) - if self.enabled: - self.index_document = kwargs.get('index_document') - self.error_document404_path = kwargs.get('error_document404_path') - self.default_index_document_path = kwargs.get('default_index_document_path') - else: - self.index_document = None - self.error_document404_path = None - self.default_index_document_path = None - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - enabled=generated.enabled, - index_document=generated.index_document, - error_document404_path=generated.error_document404_path, - default_index_document_path=generated.default_index_document_path - ) - - -class CorsRule(GenCorsRule): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. - - :param list(str) allowed_origins: - A list of origin domains that will be allowed via CORS, or "*" to allow - all domains. The list of must contain at least one entry. Limited to 64 - origin domains. Each allowed origin can have up to 256 characters. - :param list(str) allowed_methods: - A list of HTTP methods that are allowed to be executed by the origin. - The list of must contain at least one entry. For Azure Storage, - permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. - :keyword list(str) allowed_headers: - Defaults to an empty list. A list of headers allowed to be part of - the cross-origin request. Limited to 64 defined headers and 2 prefixed - headers. Each header can be up to 256 characters. - :keyword list(str) exposed_headers: - Defaults to an empty list. A list of response headers to expose to CORS - clients. Limited to 64 defined headers and two prefixed headers. Each - header can be up to 256 characters. - :keyword int max_age_in_seconds: - The number of seconds that the client/browser should cache a - preflight response. - """ - - def __init__(self, allowed_origins, allowed_methods, **kwargs): - self.allowed_origins = ','.join(allowed_origins) - self.allowed_methods = ','.join(allowed_methods) - self.allowed_headers = ','.join(kwargs.get('allowed_headers', [])) - self.exposed_headers = ','.join(kwargs.get('exposed_headers', [])) - self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0) - - @classmethod - def _from_generated(cls, generated): - return cls( - [generated.allowed_origins], - [generated.allowed_methods], - allowed_headers=[generated.allowed_headers], - exposed_headers=[generated.exposed_headers], - max_age_in_seconds=generated.max_age_in_seconds, - ) diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_path_client.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_path_client.py deleted file mode 100644 index 419c8c4..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_path_client.py +++ /dev/null @@ -1,949 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from datetime import datetime -from typing import ( # pylint: disable=unused-import - Any, Dict, Optional, Union, - TYPE_CHECKING) - -try: - from urllib.parse import urlparse, quote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote # type: ignore - -import six - -from azure.core.exceptions import AzureError, HttpResponseError -from azure.multiapi.storagev2.blob.v2021_06_08 import BlobClient -from ._data_lake_lease import DataLakeLeaseClient -from ._deserialize import process_storage_error -from ._generated import AzureDataLakeStorageRESTAPI -from ._models import LocationMode, DirectoryProperties, AccessControlChangeResult, AccessControlChanges, \ - AccessControlChangeCounters, AccessControlChangeFailure -from ._serialize import convert_dfs_url_to_blob_url, get_mod_conditions, \ - get_path_http_headers, add_metadata_headers, get_lease_id, get_source_mod_conditions, get_access_conditions, \ - get_api_version, get_cpk_info -from ._shared.base_client import StorageAccountHostsMixin, parse_query -from ._shared.response_handlers import return_response_headers, return_headers_and_deserialized - -if TYPE_CHECKING: - from ._models import ContentSettings - from ._models import FileProperties - -_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = ( - 'The require_encryption flag is set, but encryption is not supported' - ' for this method.') - - -class PathClient(StorageAccountHostsMixin): - """A base client for interacting with a DataLake file/directory, even if the file/directory may not - yet exist. - - :param str account_url: - The URI to the storage account. - :param str file_system_name: - The file system for the directory or files. - :param str file_path: - The whole file path, so that to interact with a specific file. - eg. "{directory}/{subdirectory}/{file}" - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is the most recent service version that is - compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. - """ - def __init__( - self, account_url, # type: str - file_system_name, # type: str - path_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - - # remove the preceding/trailing delimiter from the path components - file_system_name = file_system_name.strip('/') - - # the name of root directory is / - if path_name != '/': - path_name = path_name.strip('/') - - if not (file_system_name and path_name): - raise ValueError("Please specify a file system name and file path.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - - blob_account_url = convert_dfs_url_to_blob_url(account_url) - self._blob_account_url = blob_account_url - - datalake_hosts = kwargs.pop('_hosts', None) - blob_hosts = None - if datalake_hosts: - blob_primary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.PRIMARY]) - blob_hosts = {LocationMode.PRIMARY: blob_primary_account_url, LocationMode.SECONDARY: ""} - self._blob_client = BlobClient(blob_account_url, file_system_name, path_name, - credential=credential, _hosts=blob_hosts, **kwargs) - - _, sas_token = parse_query(parsed_url.query) - self.file_system_name = file_system_name - self.path_name = path_name - - self._query_str, self._raw_credential = self._format_query_string(sas_token, credential) - - super(PathClient, self).__init__(parsed_url, service='dfs', credential=self._raw_credential, - _hosts=datalake_hosts, **kwargs) - # ADLS doesn't support secondary endpoint, make sure it's empty - self._hosts[LocationMode.SECONDARY] = "" - api_version = get_api_version(kwargs) - - self._client = AzureDataLakeStorageRESTAPI(self.url, base_url=self.url, file_system=file_system_name, - path=path_name, pipeline=self._pipeline) - self._client._config.version = api_version # pylint: disable=protected-access - - self._datalake_client_for_blob_operation = AzureDataLakeStorageRESTAPI( - self._blob_client.url, - base_url=self._blob_client.url, - file_system=file_system_name, - path=path_name, - pipeline=self._pipeline) - self._datalake_client_for_blob_operation._config.version = api_version # pylint: disable=protected-access - - def __exit__(self, *args): - self._blob_client.close() - super(PathClient, self).__exit__(*args) - - def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._blob_client.close() - self.__exit__() - - def _format_url(self, hostname): - file_system_name = self.file_system_name - if isinstance(file_system_name, six.text_type): - file_system_name = file_system_name.encode('UTF-8') - return "{}://{}/{}/{}{}".format( - self.scheme, - hostname, - quote(file_system_name), - quote(self.path_name, safe='~'), - self._query_str) - - def _create_path_options(self, resource_type, - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_mod_conditions(kwargs) - - path_http_headers = None - if content_settings: - path_http_headers = get_path_http_headers(content_settings) - - cpk_info = get_cpk_info(self.scheme, kwargs) - - options = { - 'resource': resource_type, - 'properties': add_metadata_headers(metadata), - 'permissions': kwargs.pop('permissions', None), - 'umask': kwargs.pop('umask', None), - 'path_http_headers': path_http_headers, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cpk_info': cpk_info, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - def _create(self, resource_type, content_settings=None, metadata=None, **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create directory or file - - :param resource_type: - Required for Create File and Create Directory. - The value must be "file" or "directory". Possible values include: - 'directory', 'file' - :type resource_type: str - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file/directory as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :type permissions: str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Dict[str, Union[str, datetime]] - """ - options = self._create_path_options( - resource_type, - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return self._client.path.create(**options) - except HttpResponseError as error: - process_storage_error(error) - - @staticmethod - def _delete_path_options(**kwargs): - # type: (**Any) -> Dict[str, Any] - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_mod_conditions(kwargs) - - options = { - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'cls': return_response_headers, - 'timeout': kwargs.pop('timeout', None)} - options.update(kwargs) - return options - - def _delete(self, **kwargs): - # type: (**Any) -> Dict[Union[datetime, str]] - """ - Marks the specified path for deletion. - - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :param ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :param ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :param int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - options = self._delete_path_options(**kwargs) - try: - return self._client.path.delete(**options) - except HttpResponseError as error: - process_storage_error(error) - - @staticmethod - def _set_access_control_options(owner=None, group=None, permissions=None, acl=None, **kwargs): - # type: (...) -> Dict[str, Any] - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_mod_conditions(kwargs) - - options = { - 'owner': owner, - 'group': group, - 'permissions': permissions, - 'acl': acl, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - def set_access_control(self, owner=None, # type: Optional[str] - group=None, # type: Optional[str] - permissions=None, # type: Optional[str] - acl=None, # type: Optional[str] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Set the owner, group, permissions, or access control list for a path. - - :param owner: - Optional. The owner of the file or directory. - :type owner: str - :param group: - Optional. The owning group of the file or directory. - :type group: str - :param permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - permissions and acl are mutually exclusive. - :type permissions: str - :param acl: - Sets POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - permissions and acl are mutually exclusive. - :type acl: str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword: response dict (Etag and last modified). - """ - if not any([owner, group, permissions, acl]): - raise ValueError("At least one parameter should be set for set_access_control API") - options = self._set_access_control_options(owner=owner, group=group, permissions=permissions, acl=acl, **kwargs) - try: - return self._client.path.set_access_control(**options) - except HttpResponseError as error: - process_storage_error(error) - - @staticmethod - def _get_access_control_options(upn=None, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Any] - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - mod_conditions = get_mod_conditions(kwargs) - - options = { - 'action': 'getAccessControl', - 'upn': upn if upn else False, - 'lease_access_conditions': access_conditions, - 'modified_access_conditions': mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - def get_access_control(self, upn=None, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Any] - """ - :param upn: Optional. - Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword: response dict. - """ - options = self._get_access_control_options(upn=upn, **kwargs) - try: - return self._client.path.get_properties(**options) - except HttpResponseError as error: - process_storage_error(error) - - @staticmethod - def _set_access_control_recursive_options(mode, acl, **kwargs): - # type: (str, str, **Any) -> Dict[str, Any] - - options = { - 'mode': mode, - 'force_flag': kwargs.pop('continue_on_failure', None), - 'timeout': kwargs.pop('timeout', None), - 'continuation': kwargs.pop('continuation_token', None), - 'max_records': kwargs.pop('batch_size', None), - 'acl': acl, - 'cls': return_headers_and_deserialized} - options.update(kwargs) - return options - - def set_access_control_recursive(self, - acl, - **kwargs): - # type: (str, **Any) -> AccessControlChangeResult - """ - Sets the Access Control on a path and sub-paths. - - :param acl: - Sets POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: - Callback where the caller can track progress of the operation - as well as collect paths that failed to change Access Control. - :keyword str continuation_token: - Optional continuation token that can be used to resume previously stopped operation. - :keyword int batch_size: - Optional. If data set size exceeds batch size then operation will be split into multiple - requests so that progress can be tracked. Batch size should be between 1 and 2000. - The default when unspecified is 2000. - :keyword int max_batches: - Optional. Defines maximum number of batches that single change Access Control operation can execute. - If maximum is reached before all sub-paths are processed, - then continuation token can be used to resume operation. - Empty value indicates that maximum number of batches in unbound and operation continues till end. - :keyword bool continue_on_failure: - If set to False, the operation will terminate quickly on encountering user errors (4XX). - If True, the operation will ignore user errors and proceed with the operation on other sub-entities of - the directory. - Continuation token will only be returned when continue_on_failure is True in case of user errors. - If not set the default value is False for this. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: A summary of the recursive operations, including the count of successes and failures, - as well as a continuation token in case the operation was terminated prematurely. - :rtype: :class:`~azure.storage.filedatalake.AccessControlChangeResult` - :raises ~azure.core.exceptions.AzureError: - User can restart the operation using continuation_token field of AzureError if the token is available. - """ - if not acl: - raise ValueError("The Access Control List must be set for this operation") - - progress_hook = kwargs.pop('progress_hook', None) - max_batches = kwargs.pop('max_batches', None) - options = self._set_access_control_recursive_options(mode='set', acl=acl, **kwargs) - return self._set_access_control_internal(options=options, progress_hook=progress_hook, - max_batches=max_batches) - - def update_access_control_recursive(self, - acl, - **kwargs): - # type: (str, **Any) -> AccessControlChangeResult - """ - Modifies the Access Control on a path and sub-paths. - - :param acl: - Modifies POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: - Callback where the caller can track progress of the operation - as well as collect paths that failed to change Access Control. - :keyword str continuation_token: - Optional continuation token that can be used to resume previously stopped operation. - :keyword int batch_size: - Optional. If data set size exceeds batch size then operation will be split into multiple - requests so that progress can be tracked. Batch size should be between 1 and 2000. - The default when unspecified is 2000. - :keyword int max_batches: - Optional. Defines maximum number of batches that single change Access Control operation can execute. - If maximum is reached before all sub-paths are processed, - then continuation token can be used to resume operation. - Empty value indicates that maximum number of batches in unbound and operation continues till end. - :keyword bool continue_on_failure: - If set to False, the operation will terminate quickly on encountering user errors (4XX). - If True, the operation will ignore user errors and proceed with the operation on other sub-entities of - the directory. - Continuation token will only be returned when continue_on_failure is True in case of user errors. - If not set the default value is False for this. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: A summary of the recursive operations, including the count of successes and failures, - as well as a continuation token in case the operation was terminated prematurely. - :rtype: :class:`~azure.storage.filedatalake.AccessControlChangeResult` - :raises ~azure.core.exceptions.AzureError: - User can restart the operation using continuation_token field of AzureError if the token is available. - """ - if not acl: - raise ValueError("The Access Control List must be set for this operation") - - progress_hook = kwargs.pop('progress_hook', None) - max_batches = kwargs.pop('max_batches', None) - options = self._set_access_control_recursive_options(mode='modify', acl=acl, **kwargs) - return self._set_access_control_internal(options=options, progress_hook=progress_hook, - max_batches=max_batches) - - def remove_access_control_recursive(self, - acl, - **kwargs): - # type: (str, **Any) -> AccessControlChangeResult - """ - Removes the Access Control on a path and sub-paths. - - :param acl: - Removes POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, and a user or - group identifier in the format "[scope:][type]:[id]". - :type acl: str - :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: - Callback where the caller can track progress of the operation - as well as collect paths that failed to change Access Control. - :keyword str continuation_token: - Optional continuation token that can be used to resume previously stopped operation. - :keyword int batch_size: - Optional. If data set size exceeds batch size then operation will be split into multiple - requests so that progress can be tracked. Batch size should be between 1 and 2000. - The default when unspecified is 2000. - :keyword int max_batches: - Optional. Defines maximum number of batches that single change Access Control operation can execute. - If maximum is reached before all sub-paths are processed then, - continuation token can be used to resume operation. - Empty value indicates that maximum number of batches in unbound and operation continues till end. - :keyword bool continue_on_failure: - If set to False, the operation will terminate quickly on encountering user errors (4XX). - If True, the operation will ignore user errors and proceed with the operation on other sub-entities of - the directory. - Continuation token will only be returned when continue_on_failure is True in case of user errors. - If not set the default value is False for this. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: A summary of the recursive operations, including the count of successes and failures, - as well as a continuation token in case the operation was terminated prematurely. - :rtype: :class:`~azure.storage.filedatalake.AccessControlChangeResult` - :raises ~azure.core.exceptions.AzureError: - User can restart the operation using continuation_token field of AzureError if the token is available. - """ - if not acl: - raise ValueError("The Access Control List must be set for this operation") - - progress_hook = kwargs.pop('progress_hook', None) - max_batches = kwargs.pop('max_batches', None) - options = self._set_access_control_recursive_options(mode='remove', acl=acl, **kwargs) - return self._set_access_control_internal(options=options, progress_hook=progress_hook, - max_batches=max_batches) - - def _set_access_control_internal(self, options, progress_hook, max_batches=None): - try: - continue_on_failure = options.get('force_flag') - total_directories_successful = 0 - total_files_success = 0 - total_failure_count = 0 - batch_count = 0 - last_continuation_token = None - current_continuation_token = None - continue_operation = True - while continue_operation: - headers, resp = self._client.path.set_access_control_recursive(**options) - - # make a running tally so that we can report the final results - total_directories_successful += resp.directories_successful - total_files_success += resp.files_successful - total_failure_count += resp.failure_count - batch_count += 1 - current_continuation_token = headers['continuation'] - - if current_continuation_token is not None: - last_continuation_token = current_continuation_token - - if progress_hook is not None: - progress_hook(AccessControlChanges( - batch_counters=AccessControlChangeCounters( - directories_successful=resp.directories_successful, - files_successful=resp.files_successful, - failure_count=resp.failure_count, - ), - aggregate_counters=AccessControlChangeCounters( - directories_successful=total_directories_successful, - files_successful=total_files_success, - failure_count=total_failure_count, - ), - batch_failures=[AccessControlChangeFailure( - name=failure.name, - is_directory=failure.type == 'DIRECTORY', - error_message=failure.error_message) for failure in resp.failed_entries], - continuation=last_continuation_token)) - - # update the continuation token, if there are more operations that cannot be completed in a single call - max_batches_satisfied = (max_batches is not None and batch_count == max_batches) - continue_operation = bool(current_continuation_token) and not max_batches_satisfied - options['continuation'] = current_continuation_token - - # currently the service stops on any failure, so we should send back the last continuation token - # for the user to retry the failed updates - # otherwise we should just return what the service gave us - return AccessControlChangeResult(counters=AccessControlChangeCounters( - directories_successful=total_directories_successful, - files_successful=total_files_success, - failure_count=total_failure_count), - continuation=last_continuation_token - if total_failure_count > 0 and not continue_on_failure else current_continuation_token) - except HttpResponseError as error: - error.continuation_token = last_continuation_token - process_storage_error(error) - except AzureError as error: - error.continuation_token = last_continuation_token - raise error - - def _rename_path_options(self, rename_source, - content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) - if metadata or kwargs.pop('permissions', None) or kwargs.pop('umask', None): - raise ValueError("metadata, permissions, umask is not supported for this operation") - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - source_lease_id = get_lease_id(kwargs.pop('source_lease', None)) - mod_conditions = get_mod_conditions(kwargs) - source_mod_conditions = get_source_mod_conditions(kwargs) - - path_http_headers = None - if content_settings: - path_http_headers = get_path_http_headers(content_settings) - - options = { - 'rename_source': rename_source, - 'path_http_headers': path_http_headers, - 'lease_access_conditions': access_conditions, - 'source_lease_id': source_lease_id, - 'modified_access_conditions': mod_conditions, - 'source_modified_access_conditions': source_mod_conditions, - 'timeout': kwargs.pop('timeout', None), - 'mode': 'legacy', - 'cls': return_response_headers} - options.update(kwargs) - return options - - def _rename_path(self, rename_source, **kwargs): - # type: (str, **Any) -> Dict[str, Any] - """ - Rename directory or file - - :param rename_source: - The value must have the following format: "/{filesystem}/{path}". - :type rename_source: str - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword source_lease: - A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._rename_path_options( - rename_source, - **kwargs) - try: - return self._client.path.create(**options) - except HttpResponseError as error: - process_storage_error(error) - - def _get_path_properties(self, **kwargs): - # type: (**Any) -> Union[FileProperties, DirectoryProperties] - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file or directory. It does not return the content of the directory or file. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk: - Decrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - Required if the file/directory was created with a customer-provided key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: DirectoryProperties or FileProperties - - .. admonition:: Example: - - .. literalinclude:: ../tests/test_blob_samples_common.py - :start-after: [START get_blob_properties] - :end-before: [END get_blob_properties] - :language: python - :dedent: 8 - :caption: Getting the properties for a file/directory. - """ - path_properties = self._blob_client.get_blob_properties(**kwargs) - return path_properties - - def _exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a path exists and returns False otherwise. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return self._blob_client.exists(**kwargs) - - def set_metadata(self, metadata, # type: Dict[str, str] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - file system. Each call to this operation replaces all existing metadata - attached to the file system. To remove all metadata from the file system, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the file system as - metadata. Example: {'category':'test'} - :type metadata: Dict[str, str] - :keyword lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: file system-updated property dict (Etag and last modified). - """ - return self._blob_client.set_blob_metadata(metadata=metadata, **kwargs) - - def set_http_headers(self, content_settings=None, # type: Optional[ContentSettings] - **kwargs): - # type: (...) -> Dict[str, Any] - """Sets system properties on the file or directory. - - If one property is set for the content_settings, all properties will be overriden. - - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set file/directory properties. - :keyword lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: file/directory-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - return self._blob_client.set_http_headers(content_settings=content_settings, **kwargs) - - def acquire_lease(self, lease_duration=-1, # type: Optional[int] - lease_id=None, # type: Optional[str] - **kwargs): - # type: (...) -> DataLakeLeaseClient - """ - Requests a new lease. If the file or directory does not have an active lease, - the DataLake service creates a lease on the file/directory and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A DataLakeLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.filedatalake.DataLakeLeaseClient - """ - lease = DataLakeLeaseClient(self, lease_id=lease_id) # type: ignore - lease.acquire(lease_duration=lease_duration, **kwargs) - return lease diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_quick_query_helper.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_quick_query_helper.py deleted file mode 100644 index ff67d27..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_quick_query_helper.py +++ /dev/null @@ -1,71 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import Union, Iterable, IO # pylint: disable=unused-import - - -class DataLakeFileQueryReader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to read query results. - - :ivar str name: - The name of the blob being quered. - :ivar str container: - The name of the container where the blob is. - :ivar dict response_headers: - The response_headers of the quick query request. - :ivar bytes record_delimiter: - The delimiter used to separate lines, or records with the data. The `records` - method will return these lines via a generator. - """ - - def __init__( - self, - blob_query_reader - ): - self.name = blob_query_reader.name - self.file_system = blob_query_reader.container - self.response_headers = blob_query_reader.response_headers - self.record_delimiter = blob_query_reader.record_delimiter - self._bytes_processed = 0 - self._blob_query_reader = blob_query_reader - - def __len__(self): - return len(self._blob_query_reader) - - def readall(self): - # type: () -> Union[bytes, str] - """Return all query results. - - This operation is blocking until all data is downloaded. - If encoding has been configured - this will be used to decode individual - records are they are received. - - :rtype: Union[bytes, str] - """ - return self._blob_query_reader.readall() - - def readinto(self, stream): - # type: (IO) -> None - """Download the query result to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. - :returns: None - """ - self._blob_query_reader(stream) - - def records(self): - # type: () -> Iterable[Union[bytes, str]] - """Returns a record generator for the query result. - - Records will be returned line by line. - If encoding has been configured - this will be used to decode individual - records are they are received. - - :rtype: Iterable[Union[bytes, str]] - """ - return self._blob_query_reader.records() diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_serialize.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_serialize.py deleted file mode 100644 index 2e6105e..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_serialize.py +++ /dev/null @@ -1,133 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import Any, Dict, Optional, Union - -from azure.multiapi.storagev2.blob.v2021_06_08._serialize import _get_match_headers # pylint: disable=protected-access -from ._shared import encode_base64 -from ._generated.models import ModifiedAccessConditions, PathHTTPHeaders, \ - SourceModifiedAccessConditions, LeaseAccessConditions, CpkInfo - - -_SUPPORTED_API_VERSIONS = [ - '2019-02-02', - '2019-07-07', - '2019-10-10', - '2019-12-12', - '2020-02-10', - '2020-04-08', - '2020-06-12', - '2020-08-04', - '2020-10-02', - '2021-02-12', - '2021-04-10', - '2021-06-08' -] - - -def get_api_version(kwargs): - # type: (Dict[str, Any]) -> str - api_version = kwargs.get('api_version', None) - if api_version and api_version not in _SUPPORTED_API_VERSIONS: - versions = '\n'.join(_SUPPORTED_API_VERSIONS) - raise ValueError("Unsupported API version '{}'. Please select from:\n{}".format(api_version, versions)) - return api_version or _SUPPORTED_API_VERSIONS[-1] - - -def convert_dfs_url_to_blob_url(dfs_account_url): - return dfs_account_url.replace('.dfs.', '.blob.', 1) - - -def convert_datetime_to_rfc1123(date): - weekday = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][date.weekday()] - month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", - "Oct", "Nov", "Dec"][date.month - 1] - return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (weekday, date.day, month, - date.year, date.hour, date.minute, date.second) - - -def add_metadata_headers(metadata=None): - # type: (Optional[Dict[str, str]]) -> str - if not metadata: - return None - headers = list() - if metadata: - for key, value in metadata.items(): - headers.append(key + '=') - headers.append(encode_base64(value)) - headers.append(',') - - if headers: - del headers[-1] - - return ''.join(headers) - - -def get_mod_conditions(kwargs): - # type: (Dict[str, Any]) -> ModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'match_condition', 'etag') - return ModifiedAccessConditions( - if_modified_since=kwargs.pop('if_modified_since', None), - if_unmodified_since=kwargs.pop('if_unmodified_since', None), - if_match=if_match or kwargs.pop('if_match', None), - if_none_match=if_none_match or kwargs.pop('if_none_match', None) - ) - - -def get_source_mod_conditions(kwargs): - # type: (Dict[str, Any]) -> SourceModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') - return SourceModifiedAccessConditions( - source_if_modified_since=kwargs.pop('source_if_modified_since', None), - source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), - source_if_match=if_match or kwargs.pop('source_if_match', None), - source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None) - ) - - -def get_path_http_headers(content_settings): - path_headers = PathHTTPHeaders( - cache_control=content_settings.cache_control, - content_type=content_settings.content_type, - content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - content_encoding=content_settings.content_encoding, - content_language=content_settings.content_language, - content_disposition=content_settings.content_disposition - ) - return path_headers - - -def get_access_conditions(lease): - # type: (Optional[Union[BlobLeaseClient, str]]) -> Union[LeaseAccessConditions, None] - try: - lease_id = lease.id # type: ignore - except AttributeError: - lease_id = lease # type: ignore - return LeaseAccessConditions(lease_id=lease_id) if lease_id else None - - -def get_lease_id(lease): - if not lease: - return "" - try: - lease_id = lease.id - except AttributeError: - lease_id = lease - return lease_id - - -def get_cpk_info(scheme, kwargs): - # type: (str, Dict[str, Any]) -> CpkInfo - cpk = kwargs.pop('cpk', None) - if cpk: - if scheme.lower() != 'https': - raise ValueError("Customer provided encryption key must be used over HTTPS.") - return CpkInfo( - encryption_key=cpk.key_value, - encryption_key_sha256=cpk.key_hash, - encryption_algorithm=cpk.algorithm) - - return None diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/__init__.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/__init__.py deleted file mode 100644 index 160f882..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import hmac - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore - -import six - - -def url_quote(url): - return quote(url) - - -def url_unquote(url): - return unquote(url) - - -def encode_base64(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def decode_base64_to_bytes(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - return base64.b64decode(data) - - -def decode_base64_to_text(data): - decoded_bytes = decode_base64_to_bytes(data) - return decoded_bytes.decode('utf-8') - - -def sign_string(key, string_to_sign, key_is_base64=True): - if key_is_base64: - key = decode_base64_to_bytes(key) - else: - if isinstance(key, six.text_type): - key = key.encode('utf-8') - if isinstance(string_to_sign, six.text_type): - string_to_sign = string_to_sign.encode('utf-8') - signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) - digest = signed_hmac_sha256.digest() - encoded_digest = encode_base64(digest) - return encoded_digest diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/authentication.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/authentication.py deleted file mode 100644 index d04c1e4..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/authentication.py +++ /dev/null @@ -1,142 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import logging -import sys - -try: - from urllib.parse import urlparse, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import unquote # type: ignore - -try: - from yarl import URL -except ImportError: - pass - -try: - from azure.core.pipeline.transport import AioHttpTransport -except ImportError: - AioHttpTransport = None - -from azure.core.exceptions import ClientAuthenticationError -from azure.core.pipeline.policies import SansIOHTTPPolicy - -from . import sign_string - - -logger = logging.getLogger(__name__) - - - -# wraps a given exception with the desired exception type -def _wrap_exception(ex, desired_type): - msg = "" - if ex.args: - msg = ex.args[0] - if sys.version_info >= (3,): - # Automatic chaining in Python 3 means we keep the trace - return desired_type(msg) - # There isn't a good solution in 2 for keeping the stack trace - # in general, or that will not result in an error in 3 - # However, we can keep the previous error type and message - # TODO: In the future we will log the trace - return desired_type('{}: {}'.format(ex.__class__.__name__, msg)) - - -class AzureSigningError(ClientAuthenticationError): - """ - Represents a fatal error when attempting to sign a request. - In general, the cause of this exception is user error. For example, the given account key is not valid. - Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. - """ - - -# pylint: disable=no-self-use -class SharedKeyCredentialPolicy(SansIOHTTPPolicy): - - def __init__(self, account_name, account_key): - self.account_name = account_name - self.account_key = account_key - super(SharedKeyCredentialPolicy, self).__init__() - - @staticmethod - def _get_headers(request, headers_to_sign): - headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) - if 'content-length' in headers and headers['content-length'] == '0': - del headers['content-length'] - return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' - - @staticmethod - def _get_verb(request): - return request.http_request.method + '\n' - - def _get_canonicalized_resource(self, request): - uri_path = urlparse(request.http_request.url).path - try: - if isinstance(request.context.transport, AioHttpTransport) or \ - isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \ - isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None), - AioHttpTransport): - uri_path = URL(uri_path) - return '/' + self.account_name + str(uri_path) - except TypeError: - pass - return '/' + self.account_name + uri_path - - @staticmethod - def _get_canonicalized_headers(request): - string_to_sign = '' - x_ms_headers = [] - for name, value in request.http_request.headers.items(): - if name.startswith('x-ms-'): - x_ms_headers.append((name.lower(), value)) - x_ms_headers.sort() - for name, value in x_ms_headers: - if value is not None: - string_to_sign += ''.join([name, ':', value, '\n']) - return string_to_sign - - @staticmethod - def _get_canonicalized_resource_query(request): - sorted_queries = list(request.http_request.query.items()) - sorted_queries.sort() - - string_to_sign = '' - for name, value in sorted_queries: - if value is not None: - string_to_sign += '\n' + name.lower() + ':' + unquote(value) - - return string_to_sign - - def _add_authorization_header(self, request, string_to_sign): - try: - signature = sign_string(self.account_key, string_to_sign) - auth_string = 'SharedKey ' + self.account_name + ':' + signature - request.http_request.headers['Authorization'] = auth_string - except Exception as ex: - # Wrap any error that occurred as signing error - # Doing so will clarify/locate the source of problem - raise _wrap_exception(ex, AzureSigningError) - - def on_request(self, request): - string_to_sign = \ - self._get_verb(request) + \ - self._get_headers( - request, - [ - 'content-encoding', 'content-language', 'content-length', - 'content-md5', 'content-type', 'date', 'if-modified-since', - 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' - ] - ) + \ - self._get_canonicalized_headers(request) + \ - self._get_canonicalized_resource(request) + \ - self._get_canonicalized_resource_query(request) - - self._add_authorization_header(request, string_to_sign) - #logger.debug("String_to_sign=%s", string_to_sign) diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/base_client.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/base_client.py deleted file mode 100644 index f8fae9e..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/base_client.py +++ /dev/null @@ -1,462 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import logging -import uuid -from typing import ( # pylint: disable=unused-import - Optional, - Any, - Tuple, -) - -try: - from urllib.parse import parse_qs, quote -except ImportError: - from urlparse import parse_qs # type: ignore - from urllib2 import quote # type: ignore - -import six - -from azure.core.configuration import Configuration -from azure.core.credentials import AzureSasCredential -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline import Pipeline -from azure.core.pipeline.transport import RequestsTransport, HttpTransport -from azure.core.pipeline.policies import ( - RedirectPolicy, - ContentDecodePolicy, - BearerTokenCredentialPolicy, - ProxyPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, - UserAgentPolicy, - AzureSasCredentialPolicy -) - -from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .models import LocationMode -from .authentication import SharedKeyCredentialPolicy -from .shared_access_signature import QueryStringConstants -from .request_handlers import serialize_batch_body, _get_batch_request_delimiter -from .policies import ( - StorageHeadersPolicy, - StorageContentValidation, - StorageRequestHook, - StorageResponseHook, - StorageLoggingPolicy, - StorageHosts, - QueueMessagePolicy, - ExponentialRetry, -) -from .._version import VERSION -from .response_handlers import process_storage_error, PartialBatchErrorException - - -_LOGGER = logging.getLogger(__name__) -_SERVICE_PARAMS = { - "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"}, - "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"}, - "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"}, - "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"}, -} - -class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - parsed_url, # type: Any - service, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) - self._hosts = kwargs.get("_hosts") - self.scheme = parsed_url.scheme - - if service not in ["blob", "queue", "file-share", "dfs"]: - raise ValueError("Invalid service: {}".format(service)) - service_name = service.split('-')[0] - account = parsed_url.netloc.split(".{}.core.".format(service_name)) - - self.account_name = account[0] if len(account) > 1 else None - if not self.account_name and parsed_url.netloc.startswith("localhost") \ - or parsed_url.netloc.startswith("127.0.0.1"): - self.account_name = parsed_url.path.strip("/") - - self.credential = _format_shared_key_credential(self.account_name, credential) - if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): - raise ValueError("Token credential is only supported with HTTPS.") - - secondary_hostname = None - if hasattr(self.credential, "account_name"): - self.account_name = self.credential.account_name - secondary_hostname = "{}-secondary.{}.{}".format( - self.credential.account_name, service_name, SERVICE_HOST_BASE) - - if not self._hosts: - if len(account) > 1: - secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") - if kwargs.get("secondary_hostname"): - secondary_hostname = kwargs["secondary_hostname"] - primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') - self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} - - self.require_encryption = kwargs.get("require_encryption", False) - self.key_encryption_key = kwargs.get("key_encryption_key") - self.key_resolver_function = kwargs.get("key_resolver_function") - self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) - - def __enter__(self): - self._client.__enter__() - return self - - def __exit__(self, *args): - self._client.__exit__(*args) - - def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._client.close() - - @property - def url(self): - """The full endpoint URL to this entity, including SAS token if used. - - This could be either the primary endpoint, - or the secondary endpoint depending on the current :func:`location_mode`. - """ - return self._format_url(self._hosts[self._location_mode]) - - @property - def primary_endpoint(self): - """The full primary endpoint URL. - - :type: str - """ - return self._format_url(self._hosts[LocationMode.PRIMARY]) - - @property - def primary_hostname(self): - """The hostname of the primary endpoint. - - :type: str - """ - return self._hosts[LocationMode.PRIMARY] - - @property - def secondary_endpoint(self): - """The full secondary endpoint URL if configured. - - If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str - :raise ValueError: - """ - if not self._hosts[LocationMode.SECONDARY]: - raise ValueError("No secondary host configured.") - return self._format_url(self._hosts[LocationMode.SECONDARY]) - - @property - def secondary_hostname(self): - """The hostname of the secondary endpoint. - - If not available this will be None. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str or None - """ - return self._hosts[LocationMode.SECONDARY] - - @property - def location_mode(self): - """The location mode that the client is currently using. - - By default this will be "primary". Options include "primary" and "secondary". - - :type: str - """ - - return self._location_mode - - @location_mode.setter - def location_mode(self, value): - if self._hosts.get(value): - self._location_mode = value - self._client._config.url = self.url # pylint: disable=protected-access - else: - raise ValueError("No host URL for location mode: {}".format(value)) - - @property - def api_version(self): - """The version of the Storage API used for requests. - - :type: str - """ - return self._client._config.version # pylint: disable=protected-access - - def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): - query_str = "?" - if snapshot: - query_str += "snapshot={}&".format(self.snapshot) - if share_snapshot: - query_str += "sharesnapshot={}&".format(self.snapshot) - if sas_token and isinstance(credential, AzureSasCredential): - raise ValueError( - "You cannot use AzureSasCredential when the resource URI also contains a Shared Access Signature.") - if sas_token and not credential: - query_str += sas_token - elif is_credential_sastoken(credential): - query_str += credential.lstrip("?") - credential = None - return query_str.rstrip("?&"), credential - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, "get_token"): - self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif isinstance(credential, AzureSasCredential): - self._credential_policy = AzureSasCredentialPolicy(credential) - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - - config = kwargs.get("_configuration") or create_configuration(**kwargs) - if kwargs.get("_pipeline"): - return config, kwargs["_pipeline"] - config.transport = kwargs.get("transport") # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - config.transport = RequestsTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - ContentDecodePolicy(response_encoding="utf-8"), - RedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), - config.retry_policy, - config.headers_policy, - StorageRequestHook(**kwargs), - self._credential_policy, - config.logging_policy, - StorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs) - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, Pipeline(config.transport, policies=policies) - - def _batch_send( - self, - *reqs, # type: HttpRequest - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - batch_id = str(uuid.uuid1()) - - request = self._client._client.post( # pylint: disable=protected-access - url='{}://{}/{}?{}comp=batch{}{}'.format( - self.scheme, - self.primary_hostname, - kwargs.pop('path', ""), - kwargs.pop('restype', ""), - kwargs.pop('sas', ""), - kwargs.pop('timeout', "") - ), - headers={ - 'x-ms-version': self.api_version, - "Content-Type": "multipart/mixed; boundary=" + _get_batch_request_delimiter(batch_id, False, False) - } - ) - - policies = [StorageHeadersPolicy()] - if self._credential_policy: - policies.append(self._credential_policy) - - request.set_multipart_mixed( - *reqs, - policies=policies, - enforce_https=False - ) - - Pipeline._prepare_multipart_mixed_request(request) # pylint: disable=protected-access - body = serialize_batch_body(request.multipart_mixed_info[0], batch_id) - request.set_bytes_body(body) - - temp = request.multipart_mixed_info - request.multipart_mixed_info = None - pipeline_response = self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - request.multipart_mixed_info = temp - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() - if raise_on_any_failure: - parts = list(response.parts()) - if any(p for p in parts if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts - ) - raise error - return iter(parts) - return parts - except HttpResponseError as error: - process_storage_error(error) - -class TransportWrapper(HttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, transport): - self._transport = transport - - def send(self, request, **kwargs): - return self._transport.send(request, **kwargs) - - def open(self): - pass - - def close(self): - pass - - def __enter__(self): - pass - - def __exit__(self, *args): # pylint: disable=arguments-differ - pass - - -def _format_shared_key_credential(account_name, credential): - if isinstance(credential, six.string_types): - if not account_name: - raise ValueError("Unable to determine account name for shared key credential.") - credential = {"account_name": account_name, "account_key": credential} - if isinstance(credential, dict): - if "account_name" not in credential: - raise ValueError("Shared key credential missing 'account_name") - if "account_key" not in credential: - raise ValueError("Shared key credential missing 'account_key") - return SharedKeyCredentialPolicy(**credential) - return credential - - -def parse_connection_str(conn_str, credential, service): - conn_str = conn_str.rstrip(";") - conn_settings = [s.split("=", 1) for s in conn_str.split(";")] - if any(len(tup) != 2 for tup in conn_settings): - raise ValueError("Connection string is either blank or malformed.") - conn_settings = dict((key.upper(), val) for key, val in conn_settings) - endpoints = _SERVICE_PARAMS[service] - primary = None - secondary = None - if not credential: - try: - credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]} - except KeyError: - credential = conn_settings.get("SHAREDACCESSSIGNATURE") - if endpoints["primary"] in conn_settings: - primary = conn_settings[endpoints["primary"]] - if endpoints["secondary"] in conn_settings: - secondary = conn_settings[endpoints["secondary"]] - else: - if endpoints["secondary"] in conn_settings: - raise ValueError("Connection string specifies only secondary endpoint.") - try: - primary = "{}://{}.{}.{}".format( - conn_settings["DEFAULTENDPOINTSPROTOCOL"], - conn_settings["ACCOUNTNAME"], - service, - conn_settings["ENDPOINTSUFFIX"], - ) - secondary = "{}-secondary.{}.{}".format( - conn_settings["ACCOUNTNAME"], service, conn_settings["ENDPOINTSUFFIX"] - ) - except KeyError: - pass - - if not primary: - try: - primary = "https://{}.{}.{}".format( - conn_settings["ACCOUNTNAME"], service, conn_settings.get("ENDPOINTSUFFIX", SERVICE_HOST_BASE) - ) - except KeyError: - raise ValueError("Connection string missing required connection details.") - if service == "dfs": - primary = primary.replace(".blob.", ".dfs.") - secondary = secondary.replace(".blob.", ".dfs.") - return primary, secondary, credential - - -def create_configuration(**kwargs): - # type: (**Any) -> Configuration - config = Configuration(**kwargs) - config.headers_policy = StorageHeadersPolicy(**kwargs) - config.user_agent_policy = UserAgentPolicy( - sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) - config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) - config.logging_policy = StorageLoggingPolicy(**kwargs) - config.proxy_policy = ProxyPolicy(**kwargs) - - # Storage settings - config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) - config.copy_polling_interval = 15 - - # Block blob uploads - config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) - config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) - config.use_byte_buffer = kwargs.get("use_byte_buffer", False) - - # Page blob uploads - config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) - - # Datalake file uploads - config.min_large_chunk_upload_threshold = kwargs.get("min_large_chunk_upload_threshold", 100 * 1024 * 1024 + 1) - - # Blob downloads - config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) - config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) - - # File uploads - config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) - return config - - -def parse_query(query_str): - sas_values = QueryStringConstants.to_list() - parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} - sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values] - sas_token = None - if sas_params: - sas_token = "&".join(sas_params) - - snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") - return snapshot, sas_token - - -def is_credential_sastoken(credential): - if not credential or not isinstance(credential, six.string_types): - return False - - sas_values = QueryStringConstants.to_list() - parsed_query = parse_qs(credential.lstrip("?")) - if parsed_query and all([k in sas_values for k in parsed_query.keys()]): - return True - return False diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/base_client_async.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/base_client_async.py deleted file mode 100644 index 091c350..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/base_client_async.py +++ /dev/null @@ -1,183 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging - -from azure.core.credentials import AzureSasCredential -from azure.core.pipeline import AsyncPipeline -from azure.core.async_paging import AsyncList -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline.policies import ( - ContentDecodePolicy, - AsyncBearerTokenCredentialPolicy, - AsyncRedirectPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, - AzureSasCredentialPolicy, -) -from azure.core.pipeline.transport import AsyncHttpTransport - -from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .authentication import SharedKeyCredentialPolicy -from .base_client import create_configuration -from .policies import ( - StorageContentValidation, - StorageRequestHook, - StorageHosts, - StorageHeadersPolicy, - QueueMessagePolicy -) -from .policies_async import AsyncStorageResponseHook - -from .response_handlers import process_storage_error, PartialBatchErrorException - -if TYPE_CHECKING: - from azure.core.pipeline import Pipeline - from azure.core.pipeline.transport import HttpRequest - from azure.core.configuration import Configuration -_LOGGER = logging.getLogger(__name__) - - -class AsyncStorageAccountHostsMixin(object): - - def __enter__(self): - raise TypeError("Async client only supports 'async with'.") - - def __exit__(self, *args): - pass - - async def __aenter__(self): - await self._client.__aenter__() - return self - - async def __aexit__(self, *args): - await self._client.__aexit__(*args) - - async def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._client.close() - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, 'get_token'): - self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif isinstance(credential, AzureSasCredential): - self._credential_policy = AzureSasCredentialPolicy(credential) - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - config = kwargs.get('_configuration') or create_configuration(**kwargs) - if kwargs.get('_pipeline'): - return config, kwargs['_pipeline'] - config.transport = kwargs.get('transport') # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - try: - from azure.core.pipeline.transport import AioHttpTransport - except ImportError: - raise ImportError("Unable to create async transport. Please check aiohttp is installed.") - config.transport = AioHttpTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.headers_policy, - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - StorageRequestHook(**kwargs), - self._credential_policy, - ContentDecodePolicy(response_encoding="utf-8"), - AsyncRedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), # type: ignore - config.retry_policy, - config.logging_policy, - AsyncStorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs), - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, AsyncPipeline(config.transport, policies=policies) - - async def _batch_send( - self, *reqs: 'HttpRequest', - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - request = self._client._client.post( # pylint: disable=protected-access - url='https://{}/?comp=batch'.format(self.primary_hostname), - headers={ - 'x-ms-version': self.api_version - } - ) - - request.set_multipart_mixed( - *reqs, - policies=[ - StorageHeadersPolicy(), - self._credential_policy - ], - enforce_https=False - ) - - pipeline_response = await self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() # Return an AsyncIterator - if raise_on_any_failure: - parts_list = [] - async for part in parts: - parts_list.append(part) - if any(p for p in parts_list if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts_list - ) - raise error - return AsyncList(parts_list) - return parts - except HttpResponseError as error: - process_storage_error(error) - - -class AsyncTransportWrapper(AsyncHttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, async_transport): - self._transport = async_transport - - async def send(self, request, **kwargs): - return await self._transport.send(request, **kwargs) - - async def open(self): - pass - - async def close(self): - pass - - async def __aenter__(self): - pass - - async def __aexit__(self, *args): # pylint: disable=arguments-differ - pass diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/constants.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/constants.py deleted file mode 100644 index ea467c5..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/constants.py +++ /dev/null @@ -1,18 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from .._serialize import _SUPPORTED_API_VERSIONS - - -X_MS_VERSION = _SUPPORTED_API_VERSIONS[-1] - -# Default socket timeouts, in seconds -CONNECTION_TIMEOUT = 20 -READ_TIMEOUT = 2000 # 100MB (max block size) / 50KB/s (an arbitrarily chosen minimum upload speed) - -STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" - -SERVICE_HOST_BASE = 'core.windows.net' diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/encryption.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/encryption.py deleted file mode 100644 index 62607cc..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/encryption.py +++ /dev/null @@ -1,542 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os -from os import urandom -from json import ( - dumps, - loads, -) -from collections import OrderedDict - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.ciphers import Cipher -from cryptography.hazmat.primitives.ciphers.algorithms import AES -from cryptography.hazmat.primitives.ciphers.modes import CBC -from cryptography.hazmat.primitives.padding import PKCS7 - -from azure.core.exceptions import HttpResponseError - -from .._version import VERSION -from . import encode_base64, decode_base64_to_bytes - - -_ENCRYPTION_PROTOCOL_V1 = '1.0' -_ERROR_OBJECT_INVALID = \ - '{0} does not define a complete interface. Value of {1} is either missing or invalid.' - - -def _validate_not_none(param_name, param): - if param is None: - raise ValueError('{0} should not be None.'.format(param_name)) - - -def _validate_key_encryption_key_wrap(kek): - # Note that None is not callable and so will fail the second clause of each check. - if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) - if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) - - -class _EncryptionAlgorithm(object): - ''' - Specifies which client encryption algorithm is used. - ''' - AES_CBC_256 = 'AES_CBC_256' - - -class _WrappedContentKey: - ''' - Represents the envelope key details stored on the service. - ''' - - def __init__(self, algorithm, encrypted_key, key_id): - ''' - :param str algorithm: - The algorithm used for wrapping. - :param bytes encrypted_key: - The encrypted content-encryption-key. - :param str key_id: - The key-encryption-key identifier string. - ''' - - _validate_not_none('algorithm', algorithm) - _validate_not_none('encrypted_key', encrypted_key) - _validate_not_none('key_id', key_id) - - self.algorithm = algorithm - self.encrypted_key = encrypted_key - self.key_id = key_id - - -class _EncryptionAgent: - ''' - Represents the encryption agent stored on the service. - It consists of the encryption protocol version and encryption algorithm used. - ''' - - def __init__(self, encryption_algorithm, protocol): - ''' - :param _EncryptionAlgorithm encryption_algorithm: - The algorithm used for encrypting the message contents. - :param str protocol: - The protocol version used for encryption. - ''' - - _validate_not_none('encryption_algorithm', encryption_algorithm) - _validate_not_none('protocol', protocol) - - self.encryption_algorithm = str(encryption_algorithm) - self.protocol = protocol - - -class _EncryptionData: - ''' - Represents the encryption data that is stored on the service. - ''' - - def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, - key_wrapping_metadata): - ''' - :param bytes content_encryption_IV: - The content encryption initialization vector. - :param _EncryptionAgent encryption_agent: - The encryption agent. - :param _WrappedContentKey wrapped_content_key: - An object that stores the wrapping algorithm, the key identifier, - and the encrypted key bytes. - :param dict key_wrapping_metadata: - A dict containing metadata related to the key wrapping. - ''' - - _validate_not_none('content_encryption_IV', content_encryption_IV) - _validate_not_none('encryption_agent', encryption_agent) - _validate_not_none('wrapped_content_key', wrapped_content_key) - - self.content_encryption_IV = content_encryption_IV - self.encryption_agent = encryption_agent - self.wrapped_content_key = wrapped_content_key - self.key_wrapping_metadata = key_wrapping_metadata - - -def _generate_encryption_data_dict(kek, cek, iv): - ''' - Generates and returns the encryption metadata as a dict. - - :param object kek: The key encryption key. See calling functions for more information. - :param bytes cek: The content encryption key. - :param bytes iv: The initialization vector. - :return: A dict containing all the encryption metadata. - :rtype: dict - ''' - # Encrypt the cek. - wrapped_cek = kek.wrap_key(cek) - - # Build the encryption_data dict. - # Use OrderedDict to comply with Java's ordering requirement. - wrapped_content_key = OrderedDict() - wrapped_content_key['KeyId'] = kek.get_kid() - wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek) - wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() - - encryption_agent = OrderedDict() - encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 - encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 - - encryption_data_dict = OrderedDict() - encryption_data_dict['WrappedContentKey'] = wrapped_content_key - encryption_data_dict['EncryptionAgent'] = encryption_agent - encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv) - encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION} - - return encryption_data_dict - - -def _dict_to_encryption_data(encryption_data_dict): - ''' - Converts the specified dictionary to an EncryptionData object for - eventual use in decryption. - - :param dict encryption_data_dict: - The dictionary containing the encryption data. - :return: an _EncryptionData object built from the dictionary. - :rtype: _EncryptionData - ''' - try: - if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: - raise ValueError("Unsupported encryption version.") - except KeyError: - raise ValueError("Unsupported encryption version.") - wrapped_content_key = encryption_data_dict['WrappedContentKey'] - wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], - decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), - wrapped_content_key['KeyId']) - - encryption_agent = encryption_data_dict['EncryptionAgent'] - encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], - encryption_agent['Protocol']) - - if 'KeyWrappingMetadata' in encryption_data_dict: - key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] - else: - key_wrapping_metadata = None - - encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), - encryption_agent, - wrapped_content_key, - key_wrapping_metadata) - - return encryption_data - - -def _generate_AES_CBC_cipher(cek, iv): - ''' - Generates and returns an encryption cipher for AES CBC using the given cek and iv. - - :param bytes[] cek: The content encryption key for the cipher. - :param bytes[] iv: The initialization vector for the cipher. - :return: A cipher for encrypting in AES256 CBC. - :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher - ''' - - backend = default_backend() - algorithm = AES(cek) - mode = CBC(iv) - return Cipher(algorithm, mode, backend) - - -def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): - ''' - Extracts and returns the content_encryption_key stored in the encryption_data object - and performs necessary validation on all parameters. - :param _EncryptionData encryption_data: - The encryption metadata of the retrieved value. - :param obj key_encryption_key: - The key_encryption_key used to unwrap the cek. Please refer to high-level service object - instance variables for more details. - :param func key_resolver: - A function used that, given a key_id, will return a key_encryption_key. Please refer - to high-level service object instance variables for more details. - :return: the content_encryption_key stored in the encryption_data object. - :rtype: bytes[] - ''' - - _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) - _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) - - if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol: - raise ValueError('Encryption version is not supported.') - - content_encryption_key = None - - # If the resolver exists, give priority to the key it finds. - if key_resolver is not None: - key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) - - _validate_not_none('key_encryption_key', key_encryption_key) - if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) - if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid(): - raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.') - # Will throw an exception if the specified algorithm is not supported. - content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, - encryption_data.wrapped_content_key.algorithm) - _validate_not_none('content_encryption_key', content_encryption_key) - - return content_encryption_key - - -def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None): - ''' - Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. - Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). - Returns the original plaintex. - - :param str message: - The ciphertext to be decrypted. - :param _EncryptionData encryption_data: - The metadata associated with this ciphertext. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted plaintext. - :rtype: str - ''' - _validate_not_none('message', message) - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) - - if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm: - raise ValueError('Specified encryption algorithm is not supported.') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) - - # decrypt data - decrypted_data = message - decryptor = cipher.decryptor() - decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) - - # unpad data - unpadder = PKCS7(128).unpadder() - decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) - - return decrypted_data - - -def encrypt_blob(blob, key_encryption_key): - ''' - Encrypts the given blob using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encryption metadata. This method should - only be used when a blob is small enough for single shot upload. Encrypting larger blobs - is done as a part of the upload_data_chunks method. - - :param bytes blob: - The blob to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. - :rtype: (str, bytes) - ''' - - _validate_not_none('blob', blob) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(blob) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - - return dumps(encryption_data), encrypted_data - - -def generate_blob_encryption_data(key_encryption_key): - ''' - Generates the encryption_metadata for the blob. - - :param bytes key_encryption_key: - The key-encryption-key used to wrap the cek associate with this blob. - :return: A tuple containing the cek and iv for this blob as well as the - serialized encryption metadata for the blob. - :rtype: (bytes, bytes, str) - ''' - encryption_data = None - content_encryption_key = None - initialization_vector = None - if key_encryption_key: - _validate_key_encryption_key_wrap(key_encryption_key) - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - encryption_data = _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - encryption_data = dumps(encryption_data) - - return content_encryption_key, initialization_vector, encryption_data - - -def decrypt_blob(require_encryption, key_encryption_key, key_resolver, - content, start_offset, end_offset, response_headers): - ''' - Decrypts the given blob contents and returns only the requested range. - - :param bool require_encryption: - Whether or not the calling blob service requires objects to be decrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :param key_resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted blob content. - :rtype: bytes - ''' - try: - encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata'])) - except: # pylint: disable=bare-except - if require_encryption: - raise ValueError( - 'Encryption required, but received data does not contain appropriate metatadata.' + \ - 'Data was either not encrypted or metadata has been lost.') - - return content - - if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256: - raise ValueError('Specified encryption algorithm is not supported.') - - blob_type = response_headers['x-ms-blob-type'] - - iv = None - unpad = False - if 'content-range' in response_headers: - content_range = response_headers['content-range'] - # Format: 'bytes x-y/size' - - # Ignore the word 'bytes' - content_range = content_range.split(' ') - - content_range = content_range[1].split('-') - content_range = content_range[1].split('/') - end_range = int(content_range[0]) - blob_size = int(content_range[1]) - - if start_offset >= 16: - iv = content[:16] - content = content[16:] - start_offset -= 16 - else: - iv = encryption_data.content_encryption_IV - - if end_range == blob_size - 1: - unpad = True - else: - unpad = True - iv = encryption_data.content_encryption_IV - - if blob_type == 'PageBlob': - unpad = False - - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) - cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) - decryptor = cipher.decryptor() - - content = decryptor.update(content) + decryptor.finalize() - if unpad: - unpadder = PKCS7(128).unpadder() - content = unpadder.update(content) + unpadder.finalize() - - return content[start_offset: len(content) - end_offset] - - -def get_blob_encryptor_and_padder(cek, iv, should_pad): - encryptor = None - padder = None - - if cek is not None and iv is not None: - cipher = _generate_AES_CBC_cipher(cek, iv) - encryptor = cipher.encryptor() - padder = PKCS7(128).padder() if should_pad else None - - return encryptor, padder - - -def encrypt_queue_message(message, key_encryption_key): - ''' - Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encrypted message and the encryption metadata. - - :param object message: - The plain text messge to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A json-formatted string containing the encrypted message and the encryption metadata. - :rtype: str - ''' - - _validate_not_none('message', message) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = os.urandom(32) - initialization_vector = os.urandom(16) - - # Queue encoding functions all return unicode strings, and encryption should - # operate on binary strings. - message = message.encode('utf-8') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(message) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - - # Build the dictionary structure. - queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data), - 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector)} - - return dumps(queue_message) - - -def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver): - ''' - Returns the decrypted message contents from an EncryptedQueueMessage. - If no encryption metadata is present, will return the unaltered message. - :param str message: - The JSON formatted QueueEncryptedMessage contents with all associated metadata. - :param bool require_encryption: - If set, will enforce that the retrieved messages are encrypted and decrypt them. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The plain text message from the queue message. - :rtype: str - ''' - - try: - message = loads(message) - - encryption_data = _dict_to_encryption_data(message['EncryptionData']) - decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents']) - except (KeyError, ValueError): - # Message was not json formatted and so was not encrypted - # or the user provided a json formatted message. - if require_encryption: - raise ValueError('Message was not encrypted.') - - return message - try: - return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=response, - error=error) diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/models.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/models.py deleted file mode 100644 index 71da954..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/models.py +++ /dev/null @@ -1,483 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-instance-attributes - -from enum import Enum - -from azure.core import CaseInsensitiveEnumMeta - - -def get_enum_value(value): - if value is None or value in ["None", ""]: - return None - try: - return value.value - except AttributeError: - return value - - -class StorageErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta): - - # Generic storage values - ACCOUNT_ALREADY_EXISTS = "AccountAlreadyExists" - ACCOUNT_BEING_CREATED = "AccountBeingCreated" - ACCOUNT_IS_DISABLED = "AccountIsDisabled" - AUTHENTICATION_FAILED = "AuthenticationFailed" - AUTHORIZATION_FAILURE = "AuthorizationFailure" - NO_AUTHENTICATION_INFORMATION = "NoAuthenticationInformation" - CONDITION_HEADERS_NOT_SUPPORTED = "ConditionHeadersNotSupported" - CONDITION_NOT_MET = "ConditionNotMet" - EMPTY_METADATA_KEY = "EmptyMetadataKey" - INSUFFICIENT_ACCOUNT_PERMISSIONS = "InsufficientAccountPermissions" - INTERNAL_ERROR = "InternalError" - INVALID_AUTHENTICATION_INFO = "InvalidAuthenticationInfo" - INVALID_HEADER_VALUE = "InvalidHeaderValue" - INVALID_HTTP_VERB = "InvalidHttpVerb" - INVALID_INPUT = "InvalidInput" - INVALID_MD5 = "InvalidMd5" - INVALID_METADATA = "InvalidMetadata" - INVALID_QUERY_PARAMETER_VALUE = "InvalidQueryParameterValue" - INVALID_RANGE = "InvalidRange" - INVALID_RESOURCE_NAME = "InvalidResourceName" - INVALID_URI = "InvalidUri" - INVALID_XML_DOCUMENT = "InvalidXmlDocument" - INVALID_XML_NODE_VALUE = "InvalidXmlNodeValue" - MD5_MISMATCH = "Md5Mismatch" - METADATA_TOO_LARGE = "MetadataTooLarge" - MISSING_CONTENT_LENGTH_HEADER = "MissingContentLengthHeader" - MISSING_REQUIRED_QUERY_PARAMETER = "MissingRequiredQueryParameter" - MISSING_REQUIRED_HEADER = "MissingRequiredHeader" - MISSING_REQUIRED_XML_NODE = "MissingRequiredXmlNode" - MULTIPLE_CONDITION_HEADERS_NOT_SUPPORTED = "MultipleConditionHeadersNotSupported" - OPERATION_TIMED_OUT = "OperationTimedOut" - OUT_OF_RANGE_INPUT = "OutOfRangeInput" - OUT_OF_RANGE_QUERY_PARAMETER_VALUE = "OutOfRangeQueryParameterValue" - REQUEST_BODY_TOO_LARGE = "RequestBodyTooLarge" - RESOURCE_TYPE_MISMATCH = "ResourceTypeMismatch" - REQUEST_URL_FAILED_TO_PARSE = "RequestUrlFailedToParse" - RESOURCE_ALREADY_EXISTS = "ResourceAlreadyExists" - RESOURCE_NOT_FOUND = "ResourceNotFound" - SERVER_BUSY = "ServerBusy" - UNSUPPORTED_HEADER = "UnsupportedHeader" - UNSUPPORTED_XML_NODE = "UnsupportedXmlNode" - UNSUPPORTED_QUERY_PARAMETER = "UnsupportedQueryParameter" - UNSUPPORTED_HTTP_VERB = "UnsupportedHttpVerb" - - # Blob values - APPEND_POSITION_CONDITION_NOT_MET = "AppendPositionConditionNotMet" - BLOB_ALREADY_EXISTS = "BlobAlreadyExists" - BLOB_NOT_FOUND = "BlobNotFound" - BLOB_OVERWRITTEN = "BlobOverwritten" - BLOB_TIER_INADEQUATE_FOR_CONTENT_LENGTH = "BlobTierInadequateForContentLength" - BLOCK_COUNT_EXCEEDS_LIMIT = "BlockCountExceedsLimit" - BLOCK_LIST_TOO_LONG = "BlockListTooLong" - CANNOT_CHANGE_TO_LOWER_TIER = "CannotChangeToLowerTier" - CANNOT_VERIFY_COPY_SOURCE = "CannotVerifyCopySource" - CONTAINER_ALREADY_EXISTS = "ContainerAlreadyExists" - CONTAINER_BEING_DELETED = "ContainerBeingDeleted" - CONTAINER_DISABLED = "ContainerDisabled" - CONTAINER_NOT_FOUND = "ContainerNotFound" - CONTENT_LENGTH_LARGER_THAN_TIER_LIMIT = "ContentLengthLargerThanTierLimit" - COPY_ACROSS_ACCOUNTS_NOT_SUPPORTED = "CopyAcrossAccountsNotSupported" - COPY_ID_MISMATCH = "CopyIdMismatch" - FEATURE_VERSION_MISMATCH = "FeatureVersionMismatch" - INCREMENTAL_COPY_BLOB_MISMATCH = "IncrementalCopyBlobMismatch" - INCREMENTAL_COPY_OF_ERALIER_VERSION_SNAPSHOT_NOT_ALLOWED = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" - INCREMENTAL_COPY_SOURCE_MUST_BE_SNAPSHOT = "IncrementalCopySourceMustBeSnapshot" - INFINITE_LEASE_DURATION_REQUIRED = "InfiniteLeaseDurationRequired" - INVALID_BLOB_OR_BLOCK = "InvalidBlobOrBlock" - INVALID_BLOB_TIER = "InvalidBlobTier" - INVALID_BLOB_TYPE = "InvalidBlobType" - INVALID_BLOCK_ID = "InvalidBlockId" - INVALID_BLOCK_LIST = "InvalidBlockList" - INVALID_OPERATION = "InvalidOperation" - INVALID_PAGE_RANGE = "InvalidPageRange" - INVALID_SOURCE_BLOB_TYPE = "InvalidSourceBlobType" - INVALID_SOURCE_BLOB_URL = "InvalidSourceBlobUrl" - INVALID_VERSION_FOR_PAGE_BLOB_OPERATION = "InvalidVersionForPageBlobOperation" - LEASE_ALREADY_PRESENT = "LeaseAlreadyPresent" - LEASE_ALREADY_BROKEN = "LeaseAlreadyBroken" - LEASE_ID_MISMATCH_WITH_BLOB_OPERATION = "LeaseIdMismatchWithBlobOperation" - LEASE_ID_MISMATCH_WITH_CONTAINER_OPERATION = "LeaseIdMismatchWithContainerOperation" - LEASE_ID_MISMATCH_WITH_LEASE_OPERATION = "LeaseIdMismatchWithLeaseOperation" - LEASE_ID_MISSING = "LeaseIdMissing" - LEASE_IS_BREAKING_AND_CANNOT_BE_ACQUIRED = "LeaseIsBreakingAndCannotBeAcquired" - LEASE_IS_BREAKING_AND_CANNOT_BE_CHANGED = "LeaseIsBreakingAndCannotBeChanged" - LEASE_IS_BROKEN_AND_CANNOT_BE_RENEWED = "LeaseIsBrokenAndCannotBeRenewed" - LEASE_LOST = "LeaseLost" - LEASE_NOT_PRESENT_WITH_BLOB_OPERATION = "LeaseNotPresentWithBlobOperation" - LEASE_NOT_PRESENT_WITH_CONTAINER_OPERATION = "LeaseNotPresentWithContainerOperation" - LEASE_NOT_PRESENT_WITH_LEASE_OPERATION = "LeaseNotPresentWithLeaseOperation" - MAX_BLOB_SIZE_CONDITION_NOT_MET = "MaxBlobSizeConditionNotMet" - NO_PENDING_COPY_OPERATION = "NoPendingCopyOperation" - OPERATION_NOT_ALLOWED_ON_INCREMENTAL_COPY_BLOB = "OperationNotAllowedOnIncrementalCopyBlob" - PENDING_COPY_OPERATION = "PendingCopyOperation" - PREVIOUS_SNAPSHOT_CANNOT_BE_NEWER = "PreviousSnapshotCannotBeNewer" - PREVIOUS_SNAPSHOT_NOT_FOUND = "PreviousSnapshotNotFound" - PREVIOUS_SNAPSHOT_OPERATION_NOT_SUPPORTED = "PreviousSnapshotOperationNotSupported" - SEQUENCE_NUMBER_CONDITION_NOT_MET = "SequenceNumberConditionNotMet" - SEQUENCE_NUMBER_INCREMENT_TOO_LARGE = "SequenceNumberIncrementTooLarge" - SNAPSHOT_COUNT_EXCEEDED = "SnapshotCountExceeded" - SNAPHOT_OPERATION_RATE_EXCEEDED = "SnaphotOperationRateExceeded" - SNAPSHOTS_PRESENT = "SnapshotsPresent" - SOURCE_CONDITION_NOT_MET = "SourceConditionNotMet" - SYSTEM_IN_USE = "SystemInUse" - TARGET_CONDITION_NOT_MET = "TargetConditionNotMet" - UNAUTHORIZED_BLOB_OVERWRITE = "UnauthorizedBlobOverwrite" - BLOB_BEING_REHYDRATED = "BlobBeingRehydrated" - BLOB_ARCHIVED = "BlobArchived" - BLOB_NOT_ARCHIVED = "BlobNotArchived" - - # Queue values - INVALID_MARKER = "InvalidMarker" - MESSAGE_NOT_FOUND = "MessageNotFound" - MESSAGE_TOO_LARGE = "MessageTooLarge" - POP_RECEIPT_MISMATCH = "PopReceiptMismatch" - QUEUE_ALREADY_EXISTS = "QueueAlreadyExists" - QUEUE_BEING_DELETED = "QueueBeingDeleted" - QUEUE_DISABLED = "QueueDisabled" - QUEUE_NOT_EMPTY = "QueueNotEmpty" - QUEUE_NOT_FOUND = "QueueNotFound" - - # File values - CANNOT_DELETE_FILE_OR_DIRECTORY = "CannotDeleteFileOrDirectory" - CLIENT_CACHE_FLUSH_DELAY = "ClientCacheFlushDelay" - DELETE_PENDING = "DeletePending" - DIRECTORY_NOT_EMPTY = "DirectoryNotEmpty" - FILE_LOCK_CONFLICT = "FileLockConflict" - INVALID_FILE_OR_DIRECTORY_PATH_NAME = "InvalidFileOrDirectoryPathName" - PARENT_NOT_FOUND = "ParentNotFound" - READ_ONLY_ATTRIBUTE = "ReadOnlyAttribute" - SHARE_ALREADY_EXISTS = "ShareAlreadyExists" - SHARE_BEING_DELETED = "ShareBeingDeleted" - SHARE_DISABLED = "ShareDisabled" - SHARE_NOT_FOUND = "ShareNotFound" - SHARING_VIOLATION = "SharingViolation" - SHARE_SNAPSHOT_IN_PROGRESS = "ShareSnapshotInProgress" - SHARE_SNAPSHOT_COUNT_EXCEEDED = "ShareSnapshotCountExceeded" - SHARE_SNAPSHOT_OPERATION_NOT_SUPPORTED = "ShareSnapshotOperationNotSupported" - SHARE_HAS_SNAPSHOTS = "ShareHasSnapshots" - CONTAINER_QUOTA_DOWNGRADE_NOT_ALLOWED = "ContainerQuotaDowngradeNotAllowed" - - # DataLake values - CONTENT_LENGTH_MUST_BE_ZERO = 'ContentLengthMustBeZero' - PATH_ALREADY_EXISTS = 'PathAlreadyExists' - INVALID_FLUSH_POSITION = 'InvalidFlushPosition' - INVALID_PROPERTY_NAME = 'InvalidPropertyName' - INVALID_SOURCE_URI = 'InvalidSourceUri' - UNSUPPORTED_REST_VERSION = 'UnsupportedRestVersion' - FILE_SYSTEM_NOT_FOUND = 'FilesystemNotFound' - PATH_NOT_FOUND = 'PathNotFound' - RENAME_DESTINATION_PARENT_PATH_NOT_FOUND = 'RenameDestinationParentPathNotFound' - SOURCE_PATH_NOT_FOUND = 'SourcePathNotFound' - DESTINATION_PATH_IS_BEING_DELETED = 'DestinationPathIsBeingDeleted' - FILE_SYSTEM_ALREADY_EXISTS = 'FilesystemAlreadyExists' - FILE_SYSTEM_BEING_DELETED = 'FilesystemBeingDeleted' - INVALID_DESTINATION_PATH = 'InvalidDestinationPath' - INVALID_RENAME_SOURCE_PATH = 'InvalidRenameSourcePath' - INVALID_SOURCE_OR_DESTINATION_RESOURCE_TYPE = 'InvalidSourceOrDestinationResourceType' - LEASE_IS_ALREADY_BROKEN = 'LeaseIsAlreadyBroken' - LEASE_NAME_MISMATCH = 'LeaseNameMismatch' - PATH_CONFLICT = 'PathConflict' - SOURCE_PATH_IS_BEING_DELETED = 'SourcePathIsBeingDeleted' - - -class DictMixin(object): - - def __setitem__(self, key, item): - self.__dict__[key] = item - - def __getitem__(self, key): - return self.__dict__[key] - - def __repr__(self): - return str(self) - - def __len__(self): - return len(self.keys()) - - def __delitem__(self, key): - self.__dict__[key] = None - - def __eq__(self, other): - """Compare objects by comparing all attributes.""" - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - def __ne__(self, other): - """Compare objects by comparing all attributes.""" - return not self.__eq__(other) - - def __str__(self): - return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) - - def has_key(self, k): - return k in self.__dict__ - - def update(self, *args, **kwargs): - return self.__dict__.update(*args, **kwargs) - - def keys(self): - return [k for k in self.__dict__ if not k.startswith('_')] - - def values(self): - return [v for k, v in self.__dict__.items() if not k.startswith('_')] - - def items(self): - return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] - - def get(self, key, default=None): - if key in self.__dict__: - return self.__dict__[key] - return default - - -class LocationMode(object): - """ - Specifies the location the request should be sent to. This mode only applies - for RA-GRS accounts which allow secondary read access. All other account types - must use PRIMARY. - """ - - PRIMARY = 'primary' #: Requests should be sent to the primary location. - SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. - - -class ResourceTypes(object): - """ - Specifies the resource types that are accessible with the account SAS. - - :param bool service: - Access to service-level APIs (e.g., Get/Set Service Properties, - Get Service Stats, List Containers/Queues/Shares) - :param bool container: - Access to container-level APIs (e.g., Create/Delete Container, - Create/Delete Queue, Create/Delete Share, - List Blobs/Files and Directories) - :param bool object: - Access to object-level APIs for blobs, queue messages, and - files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) - """ - - def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin - self.service = service - self.container = container - self.object = object - self._str = (('s' if self.service else '') + - ('c' if self.container else '') + - ('o' if self.object else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create a ResourceTypes from a string. - - To specify service, container, or object you need only to - include the first letter of the word in the string. E.g. service and container, - you would provide a string "sc". - - :param str string: Specify service, container, or object in - in the string with the first letter of the word. - :return: A ResourceTypes object - :rtype: ~azure.storage.blob.ResourceTypes - """ - res_service = 's' in string - res_container = 'c' in string - res_object = 'o' in string - - parsed = cls(res_service, res_container, res_object) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class AccountSasPermissions(object): - """ - :class:`~ResourceTypes` class to be used with generate_account_sas - function and for the AccessPolicies used with set_*_acl. There are two types of - SAS which may be used to grant resource access. One is to grant access to a - specific resource (resource-specific). Another is to grant access to the - entire service for a specific account and allow certain operations based on - perms found here. - - :param bool read: - Valid for all signed resources types (Service, Container, and Object). - Permits read permissions to the specified resource type. - :param bool write: - Valid for all signed resources types (Service, Container, and Object). - Permits write permissions to the specified resource type. - :param bool delete: - Valid for Container and Object resource types, except for queue messages. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool list: - Valid for Service and Container resource types only. - :param bool add: - Valid for the following Object resource types only: queue messages, and append blobs. - :param bool create: - Valid for the following Object resource types only: blobs and files. - Users can create new blobs or files, but may not overwrite existing - blobs or files. - :param bool update: - Valid for the following Object resource types only: queue messages. - :param bool process: - Valid for the following Object resource type only: queue messages. - :keyword bool tag: - To enable set or get tags on the blobs in the container. - :keyword bool filter_by_tags: - To enable get blobs by tags, this should be used together with list permission. - :keyword bool set_immutability_policy: - To enable operations related to set/delete immutability policy. - To get immutability policy, you just need read permission. - :keyword bool permanent_delete: - To enable permanent delete on the blob is permitted. - Valid for Object resource type of Blob only. - """ - def __init__(self, read=False, write=False, delete=False, - list=False, # pylint: disable=redefined-builtin - add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): - self.read = read - self.write = write - self.delete = delete - self.delete_previous_version = delete_previous_version - self.permanent_delete = kwargs.pop('permanent_delete', False) - self.list = list - self.add = add - self.create = create - self.update = update - self.process = process - self.tag = kwargs.pop('tag', False) - self.filter_by_tags = kwargs.pop('filter_by_tags', False) - self.set_immutability_policy = kwargs.pop('set_immutability_policy', False) - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('y' if self.permanent_delete else '') + - ('l' if self.list else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('u' if self.update else '') + - ('p' if self.process else '') + - ('f' if self.filter_by_tags else '') + - ('t' if self.tag else '') + - ('i' if self.set_immutability_policy else '') - ) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create AccountSasPermissions from a string. - - To specify read, write, delete, etc. permissions you need only to - include the first letter of the word in the string. E.g. for read and write - permissions you would provide a string "rw". - - :param str permission: Specify permissions in - the string with the first letter of the word. - :return: An AccountSasPermissions object - :rtype: ~azure.storage.filedatalake.AccountSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_delete_previous_version = 'x' in permission - p_permanent_delete = 'y' in permission - p_list = 'l' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_update = 'u' in permission - p_process = 'p' in permission - p_tag = 't' in permission - p_filter_by_tags = 'f' in permission - p_set_immutability_policy = 'i' in permission - parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, - list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, - filter_by_tags=p_filter_by_tags, set_immutability_policy=p_set_immutability_policy, - permanent_delete=p_permanent_delete) - - return parsed - - -class Services(object): - """Specifies the services accessible with the account SAS. - - :param bool blob: - Access for the `~azure.storage.blob.BlobServiceClient` - :param bool queue: - Access for the `~azure.storage.queue.QueueServiceClient` - :param bool fileshare: - Access for the `~azure.storage.fileshare.ShareServiceClient` - """ - - def __init__(self, blob=False, queue=False, fileshare=False): - self.blob = blob - self.queue = queue - self.fileshare = fileshare - self._str = (('b' if self.blob else '') + - ('q' if self.queue else '') + - ('f' if self.fileshare else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create Services from a string. - - To specify blob, queue, or file you need only to - include the first letter of the word in the string. E.g. for blob and queue - you would provide a string "bq". - - :param str string: Specify blob, queue, or file in - in the string with the first letter of the word. - :return: A Services object - :rtype: ~azure.storage.blob.Services - """ - res_blob = 'b' in string - res_queue = 'q' in string - res_file = 'f' in string - - parsed = cls(res_blob, res_queue, res_file) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class UserDelegationKey(object): - """ - Represents a user delegation key, provided to the user by Azure Storage - based on their Azure Active Directory access token. - - The fields are saved as simple strings since the user does not have to interact with this object; - to generate an identify SAS, the user can simply pass it to the right API. - - :ivar str signed_oid: - Object ID of this token. - :ivar str signed_tid: - Tenant ID of the tenant that issued this token. - :ivar str signed_start: - The datetime this token becomes valid. - :ivar str signed_expiry: - The datetime this token expires. - :ivar str signed_service: - What service this key is valid for. - :ivar str signed_version: - The version identifier of the REST service that created this token. - :ivar str value: - The user delegation key. - """ - def __init__(self): - self.signed_oid = None - self.signed_tid = None - self.signed_start = None - self.signed_expiry = None - self.signed_service = None - self.signed_version = None - self.value = None diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/parser.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/parser.py deleted file mode 100644 index a4f9da9..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/parser.py +++ /dev/null @@ -1,52 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -from datetime import datetime, timezone - -EPOCH_AS_FILETIME = 116444736000000000 # January 1, 1970 as MS filetime -HUNDREDS_OF_NANOSECONDS = 10000000 - -if sys.version_info < (3,): - def _str(value): - if isinstance(value, unicode): # pylint: disable=undefined-variable - return value.encode('utf-8') - - return str(value) -else: - _str = str - - -def _to_utc_datetime(value): - return value.strftime('%Y-%m-%dT%H:%M:%SZ') - -def _rfc_1123_to_datetime(rfc_1123: str) -> datetime: - """Converts an RFC 1123 date string to a UTC datetime. - """ - if not rfc_1123: - return None - - return datetime.strptime(rfc_1123, "%a, %d %b %Y %H:%M:%S %Z") - -def _filetime_to_datetime(filetime: str) -> datetime: - """Converts an MS filetime string to a UTC datetime. "0" indicates None. - If parsing MS Filetime fails, tries RFC 1123 as backup. - """ - if not filetime: - return None - - # Try to convert to MS Filetime - try: - filetime = int(filetime) - if filetime == 0: - return None - - return datetime.fromtimestamp((filetime - EPOCH_AS_FILETIME) / HUNDREDS_OF_NANOSECONDS, tz=timezone.utc) - except ValueError: - pass - - # Try RFC 1123 as backup - return _rfc_1123_to_datetime(filetime) diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/policies.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/policies.py deleted file mode 100644 index 1c77692..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/policies.py +++ /dev/null @@ -1,622 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import re -import random -from time import time -from io import SEEK_SET, UnsupportedOperation -import logging -import uuid -from typing import Any, TYPE_CHECKING -from wsgiref.handlers import format_date_time -try: - from urllib.parse import ( - urlparse, - parse_qsl, - urlunparse, - urlencode, - ) -except ImportError: - from urllib import urlencode # type: ignore - from urlparse import ( # type: ignore - urlparse, - parse_qsl, - urlunparse, - ) - -from azure.core.pipeline.policies import ( - HeadersPolicy, - SansIOHTTPPolicy, - NetworkTraceLoggingPolicy, - HTTPPolicy, - RequestHistory -) -from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError - -from .models import LocationMode - -try: - _unicode_type = unicode # type: ignore -except NameError: - _unicode_type = str - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -def encode_base64(data): - if isinstance(data, _unicode_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def is_exhausted(settings): - """Are we out of retries?""" - retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) - retry_counts = list(filter(None, retry_counts)) - if not retry_counts: - return False - return min(retry_counts) < 0 - - -def retry_hook(settings, **kwargs): - if settings['hook']: - settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) - - -def is_retry(response, mode): # pylint: disable=too-many-return-statements - """Is this method/status code retryable? (Based on allowlists and control - variables such as the number of total retries to allow, whether to - respect the Retry-After header, whether this header is present, and - whether the returned status code is on the list of status codes to - be retried upon on the presence of the aforementioned header) - """ - status = response.http_response.status_code - if 300 <= status < 500: - # An exception occured, but in most cases it was expected. Examples could - # include a 309 Conflict or 412 Precondition Failed. - if status == 404 and mode == LocationMode.SECONDARY: - # Response code 404 should be retried if secondary was used. - return True - if status == 408: - # Response code 408 is a timeout and should be retried. - return True - return False - if status >= 500: - # Response codes above 500 with the exception of 501 Not Implemented and - # 505 Version Not Supported indicate a server issue and should be retried. - if status in [501, 505]: - return False - return True - # retry if invalid content md5 - if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): - computed_md5 = response.http_request.headers.get('content-md5', None) or \ - encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) - if response.http_response.headers['content-md5'] != computed_md5: - return True - return False - - -def urljoin(base_url, stub_url): - parsed = urlparse(base_url) - parsed = parsed._replace(path=parsed.path + '/' + stub_url) - return parsed.geturl() - - -class QueueMessagePolicy(SansIOHTTPPolicy): - - def on_request(self, request): - message_id = request.context.options.pop('queue_message_id', None) - if message_id: - request.http_request.url = urljoin( - request.http_request.url, - message_id) - - -class StorageHeadersPolicy(HeadersPolicy): - request_id_header_name = 'x-ms-client-request-id' - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - super(StorageHeadersPolicy, self).on_request(request) - current_time = format_date_time(time()) - request.http_request.headers['x-ms-date'] = current_time - - custom_id = request.context.options.pop('client_request_id', None) - request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) - - # def on_response(self, request, response): - # # raise exception if the echoed client request id from the service is not identical to the one we sent - # if self.request_id_header_name in response.http_response.headers: - - # client_request_id = request.http_request.headers.get(self.request_id_header_name) - - # if response.http_response.headers[self.request_id_header_name] != client_request_id: - # raise AzureError( - # "Echoed client request ID: {} does not match sent client request ID: {}. " - # "Service request ID: {}".format( - # response.http_response.headers[self.request_id_header_name], client_request_id, - # response.http_response.headers['x-ms-request-id']), - # response=response.http_response - # ) - - -class StorageHosts(SansIOHTTPPolicy): - - def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument - self.hosts = hosts - super(StorageHosts, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - request.context.options['hosts'] = self.hosts - parsed_url = urlparse(request.http_request.url) - - # Detect what location mode we're currently requesting with - location_mode = LocationMode.PRIMARY - for key, value in self.hosts.items(): - if parsed_url.netloc == value: - location_mode = key - - # See if a specific location mode has been specified, and if so, redirect - use_location = request.context.options.pop('use_location', None) - if use_location: - # Lock retries to the specific location - request.context.options['retry_to_secondary'] = False - if use_location not in self.hosts: - raise ValueError("Attempting to use undefined host location {}".format(use_location)) - if use_location != location_mode: - # Update request URL to use the specified location - updated = parsed_url._replace(netloc=self.hosts[use_location]) - request.http_request.url = updated.geturl() - location_mode = use_location - - request.context.options['location_mode'] = location_mode - - -class StorageLoggingPolicy(NetworkTraceLoggingPolicy): - """A policy that logs HTTP request and response to the DEBUG logger. - - This accepts both global configuration, and per-request level with "enable_http_logger" - """ - def __init__(self, logging_enable=False, **kwargs): - self.logging_body = kwargs.pop("logging_body", False) - super(StorageLoggingPolicy, self).__init__(logging_enable=logging_enable, **kwargs) - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - http_request = request.http_request - options = request.context.options - self.logging_body = self.logging_body or options.pop("logging_body", False) - if options.pop("logging_enable", self.enable_http_logger): - request.context["logging_enable"] = True - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - log_url = http_request.url - query_params = http_request.query - if 'sig' in query_params: - log_url = log_url.replace(query_params['sig'], "sig=*****") - _LOGGER.debug("Request URL: %r", log_url) - _LOGGER.debug("Request method: %r", http_request.method) - _LOGGER.debug("Request headers:") - for header, value in http_request.headers.items(): - if header.lower() == 'authorization': - value = '*****' - elif header.lower() == 'x-ms-copy-source' and 'sig' in value: - # take the url apart and scrub away the signed signature - scheme, netloc, path, params, query, fragment = urlparse(value) - parsed_qs = dict(parse_qsl(query)) - parsed_qs['sig'] = '*****' - - # the SAS needs to be put back together - value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) - - _LOGGER.debug(" %r: %r", header, value) - _LOGGER.debug("Request body:") - - if self.logging_body: - _LOGGER.debug(str(http_request.body)) - else: - # We don't want to log the binary data of a file upload. - _LOGGER.debug("Hidden body, please use logging_body to show body") - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log request: %r", err) - - def on_response(self, request, response): - # type: (PipelineRequest, PipelineResponse, Any) -> None - if response.context.pop("logging_enable", self.enable_http_logger): - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - _LOGGER.debug("Response status: %r", response.http_response.status_code) - _LOGGER.debug("Response headers:") - for res_header, value in response.http_response.headers.items(): - _LOGGER.debug(" %r: %r", res_header, value) - - # We don't want to log binary data if the response is a file. - _LOGGER.debug("Response content:") - pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) - header = response.http_response.headers.get('content-disposition') - resp_content_type = response.http_response.headers.get("content-type", "") - - if header and pattern.match(header): - filename = header.partition('=')[2] - _LOGGER.debug("File attachments: %s", filename) - elif resp_content_type.endswith("octet-stream"): - _LOGGER.debug("Body contains binary data.") - elif resp_content_type.startswith("image"): - _LOGGER.debug("Body contains image data.") - - if self.logging_body and resp_content_type.startswith("text"): - _LOGGER.debug(response.http_response.text()) - elif self.logging_body: - try: - _LOGGER.debug(response.http_response.body()) - except ValueError: - _LOGGER.debug("Body is streamable") - - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log response: %s", repr(err)) - - -class StorageRequestHook(SansIOHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._request_callback = kwargs.get('raw_request_hook') - super(StorageRequestHook, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, **Any) -> PipelineResponse - request_callback = request.context.options.pop('raw_request_hook', self._request_callback) - if request_callback: - request_callback(request) - - -class StorageResponseHook(HTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(StorageResponseHook, self).__init__() - - def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = self.next.send(request) - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - response_callback(response) - request.context['response_callback'] = response_callback - return response - - -class StorageContentValidation(SansIOHTTPPolicy): - """A simple policy that sends the given headers - with the request. - - This will overwrite any headers already defined in the request. - """ - header_name = 'Content-MD5' - - def __init__(self, **kwargs): # pylint: disable=unused-argument - super(StorageContentValidation, self).__init__() - - @staticmethod - def get_content_md5(data): - md5 = hashlib.md5() # nosec - if isinstance(data, bytes): - md5.update(data) - elif hasattr(data, 'read'): - pos = 0 - try: - pos = data.tell() - except: # pylint: disable=bare-except - pass - for chunk in iter(lambda: data.read(4096), b""): - md5.update(chunk) - try: - data.seek(pos, SEEK_SET) - except (AttributeError, IOError): - raise ValueError("Data should be bytes or a seekable file-like object.") - else: - raise ValueError("Data should be bytes or a seekable file-like object.") - - return md5.digest() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - validate_content = request.context.options.pop('validate_content', False) - if validate_content and request.http_request.method != 'GET': - computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) - request.http_request.headers[self.header_name] = computed_md5 - request.context['validate_content_md5'] = computed_md5 - request.context['validate_content'] = validate_content - - def on_response(self, request, response): - if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): - computed_md5 = request.context.get('validate_content_md5') or \ - encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) - if response.http_response.headers['content-md5'] != computed_md5: - raise AzureError( - 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format( - response.http_response.headers['content-md5'], computed_md5), - response=response.http_response - ) - - -class StorageRetryPolicy(HTTPPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - def __init__(self, **kwargs): - self.total_retries = kwargs.pop('retry_total', 10) - self.connect_retries = kwargs.pop('retry_connect', 3) - self.read_retries = kwargs.pop('retry_read', 3) - self.status_retries = kwargs.pop('retry_status', 3) - self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) - super(StorageRetryPolicy, self).__init__() - - def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use - """ - A function which sets the next host location on the request, if applicable. - - :param ~azure.storage.models.RetryContext context: - The retry context containing the previous host location and the request - to evaluate and possibly modify. - """ - if settings['hosts'] and all(settings['hosts'].values()): - url = urlparse(request.url) - # If there's more than one possible location, retry to the alternative - if settings['mode'] == LocationMode.PRIMARY: - settings['mode'] = LocationMode.SECONDARY - else: - settings['mode'] = LocationMode.PRIMARY - updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) - request.url = updated.geturl() - - def configure_retries(self, request): # pylint: disable=no-self-use - body_position = None - if hasattr(request.http_request.body, 'read'): - try: - body_position = request.http_request.body.tell() - except (AttributeError, UnsupportedOperation): - # if body position cannot be obtained, then retries will not work - pass - options = request.context.options - return { - 'total': options.pop("retry_total", self.total_retries), - 'connect': options.pop("retry_connect", self.connect_retries), - 'read': options.pop("retry_read", self.read_retries), - 'status': options.pop("retry_status", self.status_retries), - 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), - 'mode': options.pop("location_mode", LocationMode.PRIMARY), - 'hosts': options.pop("hosts", None), - 'hook': options.pop("retry_hook", None), - 'body_position': body_position, - 'count': 0, - 'history': [] - } - - def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use - """ Formula for computing the current backoff. - Should be calculated by child class. - - :rtype: float - """ - return 0 - - def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - transport.sleep(backoff) - - def increment(self, settings, request, response=None, error=None): - """Increment the retry counters. - - :param response: A pipeline response object. - :param error: An error encountered during the request, or - None if the response was received successfully. - - :return: Whether the retry attempts are exhausted. - """ - settings['total'] -= 1 - - if error and isinstance(error, ServiceRequestError): - # Errors when we're fairly sure that the server did not receive the - # request, so it should be safe to retry. - settings['connect'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - elif error and isinstance(error, ServiceResponseError): - # Errors that occur after the request has been started, so we should - # assume that the server began processing it. - settings['read'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - else: - # Incrementing because of a server error like a 500 in - # status_forcelist and a the given method is in the allowlist - if response: - settings['status'] -= 1 - settings['history'].append(RequestHistory(request, http_response=response)) - - if not is_exhausted(settings): - if request.method not in ['PUT'] and settings['retry_secondary']: - self._set_next_host_location(settings, request) - - # rewind the request body if it is a stream - if request.body and hasattr(request.body, 'read'): - # no position was saved, then retry would not work - if settings['body_position'] is None: - return False - try: - # attempt to rewind the body to the initial position - request.body.seek(settings['body_position'], SEEK_SET) - except (UnsupportedOperation, ValueError): - # if body is not seekable, then retry would not work - return False - settings['count'] += 1 - return True - return False - - def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(StorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(StorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/policies_async.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/policies_async.py deleted file mode 100644 index e0926b8..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/policies_async.py +++ /dev/null @@ -1,220 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -import asyncio -import random -import logging -from typing import Any, TYPE_CHECKING - -from azure.core.pipeline.policies import AsyncHTTPPolicy -from azure.core.exceptions import AzureError - -from .policies import is_retry, StorageRetryPolicy - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -async def retry_hook(settings, **kwargs): - if settings['hook']: - if asyncio.iscoroutine(settings['hook']): - await settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - else: - settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - - -class AsyncStorageResponseHook(AsyncHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(AsyncStorageResponseHook, self).__init__() - - async def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = await self.next.send(request) - await response.http_response.load_body() - - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - if asyncio.iscoroutine(response_callback): - await response_callback(response) - else: - response_callback(response) - request.context['response_callback'] = response_callback - return response - -class AsyncStorageRetryPolicy(StorageRetryPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - async def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - await transport.sleep(backoff) - - async def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = await self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - await self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - await self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(AsyncStorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(AsyncStorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/request_handlers.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/request_handlers.py deleted file mode 100644 index 325825c..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/request_handlers.py +++ /dev/null @@ -1,273 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) - -import logging -from os import fstat -from io import (SEEK_END, SEEK_SET, UnsupportedOperation) - -import isodate - -from azure.core.exceptions import raise_with_traceback - - -_LOGGER = logging.getLogger(__name__) - -_REQUEST_DELIMITER_PREFIX = "batch_" -_HTTP1_1_IDENTIFIER = "HTTP/1.1" -_HTTP_LINE_ENDING = "\r\n" - - -def serialize_iso(attr): - """Serialize Datetime object into ISO-8601 formatted string. - - :param Datetime attr: Object to be serialized. - :rtype: str - :raises: ValueError if format invalid. - """ - if not attr: - return None - if isinstance(attr, str): - attr = isodate.parse_datetime(attr) - try: - utc = attr.utctimetuple() - if utc.tm_year > 9999 or utc.tm_year < 1: - raise OverflowError("Hit max or min date") - - date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( - utc.tm_year, utc.tm_mon, utc.tm_mday, - utc.tm_hour, utc.tm_min, utc.tm_sec) - return date + 'Z' - except (ValueError, OverflowError) as err: - msg = "Unable to serialize datetime object." - raise_with_traceback(ValueError, msg, err) - except AttributeError as err: - msg = "ISO-8601 object must be valid Datetime object." - raise_with_traceback(TypeError, msg, err) - - -def get_length(data): - length = None - # Check if object implements the __len__ method, covers most input cases such as bytearray. - try: - length = len(data) - except: # pylint: disable=bare-except - pass - - if not length: - # Check if the stream is a file-like stream object. - # If so, calculate the size using the file descriptor. - try: - fileno = data.fileno() - except (AttributeError, UnsupportedOperation): - pass - else: - try: - return fstat(fileno).st_size - except OSError: - # Not a valid fileno, may be possible requests returned - # a socket number? - pass - - # If the stream is seekable and tell() is implemented, calculate the stream size. - try: - current_position = data.tell() - data.seek(0, SEEK_END) - length = data.tell() - current_position - data.seek(current_position, SEEK_SET) - except (AttributeError, OSError, UnsupportedOperation): - pass - - return length - - -def read_length(data): - try: - if hasattr(data, 'read'): - read_data = b'' - for chunk in iter(lambda: data.read(4096), b""): - read_data += chunk - return len(read_data), read_data - if hasattr(data, '__iter__'): - read_data = b'' - for chunk in data: - read_data += chunk - return len(read_data), read_data - except: # pylint: disable=bare-except - pass - raise ValueError("Unable to calculate content length, please specify.") - - -def validate_and_format_range_headers( - start_range, end_range, start_range_required=True, - end_range_required=True, check_content_md5=False, align_to_page=False): - # If end range is provided, start range must be provided - if (start_range_required or end_range is not None) and start_range is None: - raise ValueError("start_range value cannot be None.") - if end_range_required and end_range is None: - raise ValueError("end_range value cannot be None.") - - # Page ranges must be 512 aligned - if align_to_page: - if start_range is not None and start_range % 512 != 0: - raise ValueError("Invalid page blob start_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(start_range)) - if end_range is not None and end_range % 512 != 511: - raise ValueError("Invalid page blob end_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(end_range)) - - # Format based on whether end_range is present - range_header = None - if end_range is not None: - range_header = 'bytes={0}-{1}'.format(start_range, end_range) - elif start_range is not None: - range_header = "bytes={0}-".format(start_range) - - # Content MD5 can only be provided for a complete range less than 4MB in size - range_validation = None - if check_content_md5: - if start_range is None or end_range is None: - raise ValueError("Both start and end range requied for MD5 content validation.") - if end_range - start_range > 4 * 1024 * 1024: - raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") - range_validation = 'true' - - return range_header, range_validation - - -def add_metadata_headers(metadata=None): - # type: (Optional[Dict[str, str]]) -> Dict[str, str] - headers = {} - if metadata: - for key, value in metadata.items(): - headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value - return headers - - -def serialize_batch_body(requests, batch_id): - """ - -- - - -- - (repeated as needed) - ---- - - Serializes the requests in this batch to a single HTTP mixed/multipart body. - - :param list[~azure.core.pipeline.transport.HttpRequest] requests: - a list of sub-request for the batch request - :param str batch_id: - to be embedded in batch sub-request delimiter - :return: The body bytes for this batch. - """ - - if requests is None or len(requests) == 0: - raise ValueError('Please provide sub-request(s) for this batch request') - - delimiter_bytes = (_get_batch_request_delimiter(batch_id, True, False) + _HTTP_LINE_ENDING).encode('utf-8') - newline_bytes = _HTTP_LINE_ENDING.encode('utf-8') - batch_body = list() - - content_index = 0 - for request in requests: - request.headers.update({ - "Content-ID": str(content_index), - "Content-Length": str(0) - }) - batch_body.append(delimiter_bytes) - batch_body.append(_make_body_from_sub_request(request)) - batch_body.append(newline_bytes) - content_index += 1 - - batch_body.append(_get_batch_request_delimiter(batch_id, True, True).encode('utf-8')) - # final line of body MUST have \r\n at the end, or it will not be properly read by the service - batch_body.append(newline_bytes) - - return bytes().join(batch_body) - - -def _get_batch_request_delimiter(batch_id, is_prepend_dashes=False, is_append_dashes=False): - """ - Gets the delimiter used for this batch request's mixed/multipart HTTP format. - - :param str batch_id: - Randomly generated id - :param bool is_prepend_dashes: - Whether to include the starting dashes. Used in the body, but non on defining the delimiter. - :param bool is_append_dashes: - Whether to include the ending dashes. Used in the body on the closing delimiter only. - :return: The delimiter, WITHOUT a trailing newline. - """ - - prepend_dashes = '--' if is_prepend_dashes else '' - append_dashes = '--' if is_append_dashes else '' - - return prepend_dashes + _REQUEST_DELIMITER_PREFIX + batch_id + append_dashes - - -def _make_body_from_sub_request(sub_request): - """ - Content-Type: application/http - Content-ID: - Content-Transfer-Encoding: (if present) - - HTTP/ -
:
(repeated as necessary) - Content-Length: - (newline if content length > 0) - (if content length > 0) - - Serializes an http request. - - :param ~azure.core.pipeline.transport.HttpRequest sub_request: - Request to serialize. - :return: The serialized sub-request in bytes - """ - - # put the sub-request's headers into a list for efficient str concatenation - sub_request_body = list() - - # get headers for ease of manipulation; remove headers as they are used - headers = sub_request.headers - - # append opening headers - sub_request_body.append("Content-Type: application/http") - sub_request_body.append(_HTTP_LINE_ENDING) - - sub_request_body.append("Content-ID: ") - sub_request_body.append(headers.pop("Content-ID", "")) - sub_request_body.append(_HTTP_LINE_ENDING) - - sub_request_body.append("Content-Transfer-Encoding: binary") - sub_request_body.append(_HTTP_LINE_ENDING) - - # append blank line - sub_request_body.append(_HTTP_LINE_ENDING) - - # append HTTP verb and path and query and HTTP version - sub_request_body.append(sub_request.method) - sub_request_body.append(' ') - sub_request_body.append(sub_request.url) - sub_request_body.append(' ') - sub_request_body.append(_HTTP1_1_IDENTIFIER) - sub_request_body.append(_HTTP_LINE_ENDING) - - # append remaining headers (this will set the Content-Length, as it was set on `sub-request`) - for header_name, header_value in headers.items(): - if header_value is not None: - sub_request_body.append(header_name) - sub_request_body.append(": ") - sub_request_body.append(header_value) - sub_request_body.append(_HTTP_LINE_ENDING) - - # append blank line - sub_request_body.append(_HTTP_LINE_ENDING) - - return ''.join(sub_request_body).encode() diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/response_handlers.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/response_handlers.py deleted file mode 100644 index 5c1558c..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/response_handlers.py +++ /dev/null @@ -1,196 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging -from xml.etree.ElementTree import Element - -from azure.core.pipeline.policies import ContentDecodePolicy -from azure.core.exceptions import ( - HttpResponseError, - ResourceNotFoundError, - ResourceModifiedError, - ResourceExistsError, - ClientAuthenticationError, - DecodeError) - -from .parser import _to_utc_datetime -from .models import StorageErrorCode, UserDelegationKey, get_enum_value - - -if TYPE_CHECKING: - from datetime import datetime - from azure.core.exceptions import AzureError - - -_LOGGER = logging.getLogger(__name__) - - -class PartialBatchErrorException(HttpResponseError): - """There is a partial failure in batch operations. - - :param str message: The message of the exception. - :param response: Server response to be deserialized. - :param list parts: A list of the parts in multipart response. - """ - - def __init__(self, message, response, parts): - self.parts = parts - super(PartialBatchErrorException, self).__init__(message=message, response=response) - - -def parse_length_from_content_range(content_range): - ''' - Parses the blob length from the content range header: bytes 1-3/65537 - ''' - if content_range is None: - return None - - # First, split in space and take the second half: '1-3/65537' - # Next, split on slash and take the second half: '65537' - # Finally, convert to an int: 65537 - return int(content_range.split(' ', 1)[1].split('/', 1)[1]) - - -def normalize_headers(headers): - normalized = {} - for key, value in headers.items(): - if key.startswith('x-ms-'): - key = key[5:] - normalized[key.lower().replace('-', '_')] = get_enum_value(value) - return normalized - - -def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument - raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} - return {k[10:]: v for k, v in raw_metadata.items()} - - -def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers) - - -def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers), deserialized - - -def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return response.http_response.location_mode, deserialized - - -def process_storage_error(storage_error): # pylint:disable=too-many-statements - raise_error = HttpResponseError - serialized = False - if not storage_error.response: - raise storage_error - # If it is one of those three then it has been serialized prior by the generated layer. - if isinstance(storage_error, (PartialBatchErrorException, - ClientAuthenticationError, ResourceNotFoundError, ResourceExistsError)): - serialized = True - error_code = storage_error.response.headers.get('x-ms-error-code') - error_message = storage_error.message - additional_data = {} - error_dict = {} - try: - error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) - try: - error_body = error_body or storage_error.response.reason - except AttributeError: - error_body = '' - # If it is an XML response - if isinstance(error_body, Element): - error_dict = { - child.tag.lower(): child.text - for child in error_body - } - # If it is a JSON response - elif isinstance(error_body, dict): - error_dict = error_body.get('error', {}) - elif not error_code: - _LOGGER.warning( - 'Unexpected return type %s from ContentDecodePolicy.deserialize_from_http_generics.', type(error_body)) - error_dict = {'message': str(error_body)} - - # If we extracted from a Json or XML response - if error_dict: - error_code = error_dict.get('code') - error_message = error_dict.get('message') - additional_data = {k: v for k, v in error_dict.items() if k not in {'code', 'message'}} - except DecodeError: - pass - - try: - # This check would be unnecessary if we have already serialized the error - if error_code and not serialized: - error_code = StorageErrorCode(error_code) - if error_code in [StorageErrorCode.condition_not_met, - StorageErrorCode.blob_overwritten]: - raise_error = ResourceModifiedError - if error_code in [StorageErrorCode.invalid_authentication_info, - StorageErrorCode.authentication_failed]: - raise_error = ClientAuthenticationError - if error_code in [StorageErrorCode.resource_not_found, - StorageErrorCode.cannot_verify_copy_source, - StorageErrorCode.blob_not_found, - StorageErrorCode.queue_not_found, - StorageErrorCode.container_not_found, - StorageErrorCode.parent_not_found, - StorageErrorCode.share_not_found]: - raise_error = ResourceNotFoundError - if error_code in [StorageErrorCode.account_already_exists, - StorageErrorCode.account_being_created, - StorageErrorCode.resource_already_exists, - StorageErrorCode.resource_type_mismatch, - StorageErrorCode.blob_already_exists, - StorageErrorCode.queue_already_exists, - StorageErrorCode.container_already_exists, - StorageErrorCode.container_being_deleted, - StorageErrorCode.queue_being_deleted, - StorageErrorCode.share_already_exists, - StorageErrorCode.share_being_deleted]: - raise_error = ResourceExistsError - except ValueError: - # Got an unknown error code - pass - - # Error message should include all the error properties - try: - error_message += "\nErrorCode:{}".format(error_code.value) - except AttributeError: - error_message += "\nErrorCode:{}".format(error_code) - for name, info in additional_data.items(): - error_message += "\n{}:{}".format(name, info) - - # No need to create an instance if it has already been serialized by the generated layer - if serialized: - storage_error.message = error_message - error = storage_error - else: - error = raise_error(message=error_message, response=storage_error.response) - # Ensure these properties are stored in the error instance as well (not just the error message) - error.error_code = error_code - error.additional_info = additional_data - # error.args is what's surfaced on the traceback - show error message in all cases - error.args = (error.message,) - try: - # `from None` prevents us from double printing the exception (suppresses generated layer error context) - exec("raise error from None") # pylint: disable=exec-used # nosec - except SyntaxError: - raise error - - -def parse_to_internal_user_delegation_key(service_user_delegation_key): - internal_user_delegation_key = UserDelegationKey() - internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid - internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid - internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) - internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) - internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service - internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version - internal_user_delegation_key.value = service_user_delegation_key.value - return internal_user_delegation_key diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/shared_access_signature.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/shared_access_signature.py deleted file mode 100644 index 07aad5f..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/shared_access_signature.py +++ /dev/null @@ -1,220 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from datetime import date - -from .parser import _str, _to_utc_datetime -from .constants import X_MS_VERSION -from . import sign_string, url_quote - - -class QueryStringConstants(object): - SIGNED_SIGNATURE = 'sig' - SIGNED_PERMISSION = 'sp' - SIGNED_START = 'st' - SIGNED_EXPIRY = 'se' - SIGNED_RESOURCE = 'sr' - SIGNED_IDENTIFIER = 'si' - SIGNED_IP = 'sip' - SIGNED_PROTOCOL = 'spr' - SIGNED_VERSION = 'sv' - SIGNED_CACHE_CONTROL = 'rscc' - SIGNED_CONTENT_DISPOSITION = 'rscd' - SIGNED_CONTENT_ENCODING = 'rsce' - SIGNED_CONTENT_LANGUAGE = 'rscl' - SIGNED_CONTENT_TYPE = 'rsct' - START_PK = 'spk' - START_RK = 'srk' - END_PK = 'epk' - END_RK = 'erk' - SIGNED_RESOURCE_TYPES = 'srt' - SIGNED_SERVICES = 'ss' - SIGNED_OID = 'skoid' - SIGNED_TID = 'sktid' - SIGNED_KEY_START = 'skt' - SIGNED_KEY_EXPIRY = 'ske' - SIGNED_KEY_SERVICE = 'sks' - SIGNED_KEY_VERSION = 'skv' - - # for ADLS - SIGNED_AUTHORIZED_OID = 'saoid' - SIGNED_UNAUTHORIZED_OID = 'suoid' - SIGNED_CORRELATION_ID = 'scid' - SIGNED_DIRECTORY_DEPTH = 'sdd' - - @staticmethod - def to_list(): - return [ - QueryStringConstants.SIGNED_SIGNATURE, - QueryStringConstants.SIGNED_PERMISSION, - QueryStringConstants.SIGNED_START, - QueryStringConstants.SIGNED_EXPIRY, - QueryStringConstants.SIGNED_RESOURCE, - QueryStringConstants.SIGNED_IDENTIFIER, - QueryStringConstants.SIGNED_IP, - QueryStringConstants.SIGNED_PROTOCOL, - QueryStringConstants.SIGNED_VERSION, - QueryStringConstants.SIGNED_CACHE_CONTROL, - QueryStringConstants.SIGNED_CONTENT_DISPOSITION, - QueryStringConstants.SIGNED_CONTENT_ENCODING, - QueryStringConstants.SIGNED_CONTENT_LANGUAGE, - QueryStringConstants.SIGNED_CONTENT_TYPE, - QueryStringConstants.START_PK, - QueryStringConstants.START_RK, - QueryStringConstants.END_PK, - QueryStringConstants.END_RK, - QueryStringConstants.SIGNED_RESOURCE_TYPES, - QueryStringConstants.SIGNED_SERVICES, - QueryStringConstants.SIGNED_OID, - QueryStringConstants.SIGNED_TID, - QueryStringConstants.SIGNED_KEY_START, - QueryStringConstants.SIGNED_KEY_EXPIRY, - QueryStringConstants.SIGNED_KEY_SERVICE, - QueryStringConstants.SIGNED_KEY_VERSION, - # for ADLS - QueryStringConstants.SIGNED_AUTHORIZED_OID, - QueryStringConstants.SIGNED_UNAUTHORIZED_OID, - QueryStringConstants.SIGNED_CORRELATION_ID, - QueryStringConstants.SIGNED_DIRECTORY_DEPTH, - ] - - -class SharedAccessSignature(object): - ''' - Provides a factory for creating account access - signature tokens with an account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - :param str x_ms_version: - The service version used to generate the shared access signatures. - ''' - self.account_name = account_name - self.account_key = account_key - self.x_ms_version = x_ms_version - - def generate_account(self, services, resource_types, permission, expiry, start=None, - ip=None, protocol=None): - ''' - Generates a shared access signature for the account. - Use the returned signature with the sas_token parameter of the service - or to create a new account object. - - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account - SAS. You can combine values to provide access to more than one - resource type. - :param AccountSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. You can combine - values to provide more than one permission. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_account(services, resource_types) - sas.add_account_signature(self.account_name, self.account_key) - - return sas.get_token() - - -class _SharedAccessHelper(object): - def __init__(self): - self.query_dict = {} - - def _add_query(self, name, val): - if val: - self.query_dict[name] = _str(val) if val is not None else None - - def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): - if isinstance(start, date): - start = _to_utc_datetime(start) - - if isinstance(expiry, date): - expiry = _to_utc_datetime(expiry) - - self._add_query(QueryStringConstants.SIGNED_START, start) - self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) - self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) - self._add_query(QueryStringConstants.SIGNED_IP, ip) - self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) - self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) - - def add_resource(self, resource): - self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) - - def add_id(self, policy_id): - self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) - - def add_account(self, services, resource_types): - self._add_query(QueryStringConstants.SIGNED_SERVICES, services) - self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) - - def add_override_response_headers(self, cache_control, - content_disposition, - content_encoding, - content_language, - content_type): - self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) - self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) - self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) - self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) - self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) - - def add_account_signature(self, account_name, account_key): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - string_to_sign = \ - (account_name + '\n' + - get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + - get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + - get_value_to_append(QueryStringConstants.SIGNED_START) + - get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - get_value_to_append(QueryStringConstants.SIGNED_IP) + - get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(QueryStringConstants.SIGNED_VERSION)) - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key, string_to_sign)) - - def get_token(self): - return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/uploads.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/uploads.py deleted file mode 100644 index 1672177..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/uploads.py +++ /dev/null @@ -1,619 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from concurrent import futures -from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) -from threading import Lock -from itertools import islice -from math import ceil - -import six - -from azure.core.tracing.common import with_current_context - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." - - -def _parallel_uploads(executor, uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(executor.submit(with_current_context(uploader), next_chunk)) - except StopIteration: - break - - # Wait for the remaining uploads to finish - done, _running = futures.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - validate_content=None, - encryption_options=None, - progress_hook=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - validate_content=validate_content, - progress_hook=progress_hook, - **kwargs) - if parallel: - with futures.ThreadPoolExecutor(max_concurrency) as executor: - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - executor.submit(with_current_context(uploader.process_chunk), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - progress_hook=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - progress_hook=progress_hook, - **kwargs) - - if parallel: - with futures.ThreadPoolExecutor(max_concurrency) as executor: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - executor.submit(with_current_context(uploader.process_substream_block), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] - if any(range_ids): - return sorted(range_ids) - return [] - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__( - self, service, - total_size, - chunk_size, - stream, - parallel, - encryptor=None, - padder=None, - progress_hook=None, - **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - self.progress_hook = progress_hook - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b"" - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError("Blob data should be of type bytes.") - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b"" or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - def _update_progress(self, length): - if self.progress_lock is not None: - with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - if self.progress_hook: - self.progress_hook(self.progress_total, self.total_size) - - def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = self._upload_chunk(chunk_offset, chunk_data) - self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield index, SubStream(self.stream, index, length, lock) - - def process_substream_block(self, block_data): - return self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - def _upload_substream_block(self, index, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_substream_block_with_progress(self, index, block_stream): - range_id = self._upload_substream_block(index, block_stream) - self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop("modified_access_conditions", None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - self.service.stage_block( - block_id, - len(chunk_data), - chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return index, block_id - - def _upload_substream_block(self, index, block_stream): - try: - block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) - self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - return not any(bytearray(chunk_data)) - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = self.service.upload_pages( - body=chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - def _upload_substream_block(self, index, block_stream): - pass - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - self.current_length = int(self.response_headers["blob_append_offset"]) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - def _upload_substream_block(self, index, block_stream): - pass - - -class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - self.response_headers = self.service.append_data( - body=chunk_data, - position=chunk_offset, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - def _upload_substream_block(self, index, block_stream): - try: - self.service.append_data( - body=block_stream, - position=index, - content_length=len(block_stream), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response - - # TODO: Implement this method. - def _upload_substream_block(self, index, block_stream): - pass - - -class SubStream(IOBase): - - def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): - # Python 2.7: file-like objects created with open() typically support seek(), but are not - # derivations of io.IOBase and thus do not implement seekable(). - # Python > 3.0: file-like objects created with open() are derived from io.IOBase. - try: - # only the main thread runs this, so there's no need grabbing the lock - wrapped_stream.seek(0, SEEK_CUR) - except: - raise ValueError("Wrapped stream must support seek().") - - self._lock = lockObj - self._wrapped_stream = wrapped_stream - self._position = 0 - self._stream_begin_index = stream_begin_index - self._length = length - self._buffer = BytesIO() - - # we must avoid buffering more than necessary, and also not use up too much memory - # so the max buffer size is capped at 4MB - self._max_buffer_size = ( - length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE - ) - self._current_buffer_start = 0 - self._current_buffer_size = 0 - super(SubStream, self).__init__() - - def __len__(self): - return self._length - - def close(self): - if self._buffer: - self._buffer.close() - self._wrapped_stream = None - IOBase.close(self) - - def fileno(self): - return self._wrapped_stream.fileno() - - def flush(self): - pass - - def read(self, size=None): - if self.closed: # pylint: disable=using-constant-test - raise ValueError("Stream is closed.") - - if size is None: - size = self._length - self._position - - # adjust if out of bounds - if size + self._position >= self._length: - size = self._length - self._position - - # return fast - if size == 0 or self._buffer.closed: - return b"" - - # attempt first read from the read buffer and update position - read_buffer = self._buffer.read(size) - bytes_read = len(read_buffer) - bytes_remaining = size - bytes_read - self._position += bytes_read - - # repopulate the read buffer from the underlying stream to fulfill the request - # ensure the seek and read operations are done atomically (only if a lock is provided) - if bytes_remaining > 0: - with self._buffer: - # either read in the max buffer size specified on the class - # or read in just enough data for the current block/sub stream - current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) - - # lock is only defined if max_concurrency > 1 (parallel uploads) - if self._lock: - with self._lock: - # reposition the underlying stream to match the start of the data to read - absolute_position = self._stream_begin_index + self._position - self._wrapped_stream.seek(absolute_position, SEEK_SET) - # If we can't seek to the right location, our read will be corrupted so fail fast. - if self._wrapped_stream.tell() != absolute_position: - raise IOError("Stream failed to seek to the desired location.") - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - else: - absolute_position = self._stream_begin_index + self._position - # It's possible that there's connection problem during data transfer, - # so when we retry we don't want to read from current position of wrapped stream, - # instead we should seek to where we want to read from. - if self._wrapped_stream.tell() != absolute_position: - self._wrapped_stream.seek(absolute_position, SEEK_SET) - - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - - if buffer_from_stream: - # update the buffer with new data from the wrapped stream - # we need to note down the start position and size of the buffer, in case seek is performed later - self._buffer = BytesIO(buffer_from_stream) - self._current_buffer_start = self._position - self._current_buffer_size = len(buffer_from_stream) - - # read the remaining bytes from the new buffer and update position - second_read_buffer = self._buffer.read(bytes_remaining) - read_buffer += second_read_buffer - self._position += len(second_read_buffer) - - return read_buffer - - def readable(self): - return True - - def readinto(self, b): - raise UnsupportedOperation - - def seek(self, offset, whence=0): - if whence is SEEK_SET: - start_index = 0 - elif whence is SEEK_CUR: - start_index = self._position - elif whence is SEEK_END: - start_index = self._length - offset = -offset - else: - raise ValueError("Invalid argument for the 'whence' parameter.") - - pos = start_index + offset - - if pos > self._length: - pos = self._length - elif pos < 0: - pos = 0 - - # check if buffer is still valid - # if not, drop buffer - if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: - self._buffer.close() - self._buffer = BytesIO() - else: # if yes seek to correct position - delta = pos - self._current_buffer_start - self._buffer.seek(delta, SEEK_SET) - - self._position = pos - return pos - - def seekable(self): - return True - - def tell(self): - return self._position - - def write(self): - raise UnsupportedOperation - - def writelines(self): - raise UnsupportedOperation - - def writeable(self): - return False - - -class IterStreamer(object): - """ - File-like streaming iterator. - """ - - def __init__(self, generator, encoding="UTF-8"): - self.generator = generator - self.iterator = iter(generator) - self.leftover = b"" - self.encoding = encoding - - def __len__(self): - return self.generator.__len__() - - def __iter__(self): - return self.iterator - - def seekable(self): - return False - - def __next__(self): - return next(self.iterator) - - next = __next__ # Python 2 compatibility. - - def tell(self, *args, **kwargs): - raise UnsupportedOperation("Data generator does not support tell.") - - def seek(self, *args, **kwargs): - raise UnsupportedOperation("Data generator is unseekable.") - - def read(self, size): - data = self.leftover - count = len(self.leftover) - try: - while count < size: - chunk = self.__next__() - if isinstance(chunk, six.text_type): - chunk = chunk.encode(self.encoding) - data += chunk - count += len(chunk) - except StopIteration: - pass - - if count > size: - self.leftover = data[size:] - - return data[:size] diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/uploads_async.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/uploads_async.py deleted file mode 100644 index 2d8376a..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared/uploads_async.py +++ /dev/null @@ -1,412 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -import asyncio -from asyncio import Lock -from itertools import islice -import threading - -from math import ceil - -import six - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder -from .uploads import SubStream, IterStreamer # pylint: disable=unused-import - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' - - -async def _parallel_uploads(uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(asyncio.ensure_future(uploader(next_chunk))) - except StopIteration: - break - - # Wait for the remaining uploads to finish - if running: - done, _running = await asyncio.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -async def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - encryption_options=None, - progress_hook=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - progress_hook=progress_hook, - **kwargs) - - if parallel: - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - asyncio.ensure_future(uploader.process_chunk(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [] - for chunk in uploader.get_chunk_streams(): - range_ids.append(await uploader.process_chunk(chunk)) - - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -async def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - progress_hook=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - progress_hook=progress_hook, - **kwargs) - - if parallel: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - asyncio.ensure_future(uploader.process_substream_block(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [] - for block in uploader.get_substream_blocks(): - range_ids.append(await uploader.process_substream_block(block)) - if any(range_ids): - return sorted(range_ids) - return - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__( - self, service, - total_size, - chunk_size, - stream, - parallel, - encryptor=None, - padder=None, - progress_hook=None, - **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = threading.Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - self.progress_hook = progress_hook - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b'' - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b'' or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - async def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - async def _update_progress(self, length): - if self.progress_lock is not None: - async with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - if self.progress_hook: - self.progress_hook(self.progress_total, self.total_size) - - async def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = await self._upload_chunk(chunk_offset, chunk_data) - await self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield index, SubStream(self.stream, index, length, lock) - - async def process_substream_block(self, block_data): - return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - async def _upload_substream_block(self, index, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_substream_block_with_progress(self, index, block_stream): - range_id = await self._upload_substream_block(index, block_stream) - await self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop('modified_access_conditions', None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - await self.service.stage_block( - block_id, - len(chunk_data), - body=chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - return index, block_id - - async def _upload_substream_block(self, index, block_stream): - try: - block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) - await self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - for each_byte in chunk_data: - if each_byte not in [0, b'\x00']: - return False - return True - - async def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = await self.service.upload_pages( - body=chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - async def _upload_substream_block(self, index, block_stream): - pass - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = await self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - self.current_length = int(self.response_headers['blob_append_offset']) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = await self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - async def _upload_substream_block(self, index, block_stream): - pass - - -class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - self.response_headers = await self.service.append_data( - body=chunk_data, - position=chunk_offset, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - async def _upload_substream_block(self, index, block_stream): - try: - await self.service.append_data( - body=block_stream, - position=index, - content_length=len(block_stream), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = await self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - return range_id, response - - # TODO: Implement this method. - async def _upload_substream_block(self, index, block_stream): - pass diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared_access_signature.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared_access_signature.py deleted file mode 100644 index e271f99..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_shared_access_signature.py +++ /dev/null @@ -1,393 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, TYPE_CHECKING -) - -from azure.multiapi.storagev2.blob.v2021_06_08 import generate_account_sas as generate_blob_account_sas -from azure.multiapi.storagev2.blob.v2021_06_08 import generate_container_sas, generate_blob_sas -if TYPE_CHECKING: - from datetime import datetime - from ._models import AccountSasPermissions, FileSystemSasPermissions, FileSasPermissions, ResourceTypes, \ - UserDelegationKey - - -def generate_account_sas( - account_name, # type: str - account_key, # type: str - resource_types, # type: Union[ResourceTypes, str] - permission, # type: Union[AccountSasPermissions, str] - expiry, # type: Optional[Union[datetime, str]] - **kwargs # type: Any - ): # type: (...) -> str - """Generates a shared access signature for the DataLake service. - - Use the returned signature as the credential parameter of any DataLakeServiceClient, - FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str account_key: - The access key to generate the shared access signature. - :param resource_types: - Specifies the resource types that are accessible with the account SAS. - :type resource_types: str or ~azure.storage.filedatalake.ResourceTypes - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.filedatalake.AccountSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :keyword start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :paramtype start: ~datetime.datetime or str - :keyword str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - return generate_blob_account_sas( - account_name=account_name, - account_key=account_key, - resource_types=resource_types, - permission=permission, - expiry=expiry, - **kwargs - ) - - -def generate_file_system_sas( - account_name, # type: str - file_system_name, # type: str - credential, # type: Union[str, UserDelegationKey] - permission=None, # type: Optional[Union[FileSystemSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - **kwargs # type: Any - ): - # type: (...) -> str - """Generates a shared access signature for a file system. - - Use the returned signature with the credential parameter of any DataLakeServiceClient, - FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str file_system_name: - The name of the file system. - :param str credential: - Credential could be either account key or user delegation key. - If use account key is used as credential, then the credential type should be a str. - Instead of an account key, the user could also pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished - by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :type credential: str or ~azure.storage.filedatalake.UserDelegationKey - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered racwdlmeop. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.filedatalake.FileSystemSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :keyword start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :paramtype start: datetime or str - :keyword str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :keyword str preauthorized_agent_object_id: - The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform - the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the - user delegation key has the required permissions before granting access but no additional permission check for - the agent object id will be performed. - :keyword str agent_object_id: - The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to - perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner - of the user delegation key has the required permissions before granting access and the service will perform an - additional POSIX ACL check to determine if this user is authorized to perform the requested operation. - :keyword str correlation_id: - The correlation id to correlate the storage audit logs with the audit logs used by the principal - generating and distributing the SAS. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - return generate_container_sas( - account_name=account_name, - container_name=file_system_name, - account_key=credential if isinstance(credential, str) else None, - user_delegation_key=credential if not isinstance(credential, str) else None, - permission=permission, - expiry=expiry, - **kwargs) - - -def generate_directory_sas( - account_name, # type: str - file_system_name, # type: str - directory_name, # type: str - credential, # type: Union[str, UserDelegationKey] - permission=None, # type: Optional[Union[FileSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - **kwargs # type: Any - ): - # type: (...) -> str - """Generates a shared access signature for a directory. - - Use the returned signature with the credential parameter of any DataLakeServiceClient, - FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str file_system_name: - The name of the file system. - :param str directory_name: - The name of the directory. - :param str credential: - Credential could be either account key or user delegation key. - If use account key is used as credential, then the credential type should be a str. - Instead of an account key, the user could also pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished - by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :type credential: str or ~azure.storage.filedatalake.UserDelegationKey - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered racwdlmeop. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.filedatalake.FileSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :keyword start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :paramtype start: ~datetime.datetime or str - :keyword str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :keyword str preauthorized_agent_object_id: - The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform - the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the - user delegation key has the required permissions before granting access but no additional permission check for - the agent object id will be performed. - :keyword str agent_object_id: - The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to - perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner - of the user delegation key has the required permissions before granting access and the service will perform an - additional POSIX ACL check to determine if this user is authorized to perform the requested operation. - :keyword str correlation_id: - The correlation id to correlate the storage audit logs with the audit logs used by the principal - generating and distributing the SAS. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - depth = len(directory_name.strip("/").split("/")) - return generate_blob_sas( - account_name=account_name, - container_name=file_system_name, - blob_name=directory_name, - account_key=credential if isinstance(credential, str) else None, - user_delegation_key=credential if not isinstance(credential, str) else None, - permission=permission, - expiry=expiry, - sdd=depth, - is_directory=True, - **kwargs) - - -def generate_file_sas( - account_name, # type: str - file_system_name, # type: str - directory_name, # type: str - file_name, # type: str - credential, # type: Union[str, UserDelegationKey] - permission=None, # type: Optional[Union[FileSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - **kwargs # type: Any - ): - # type: (...) -> str - """Generates a shared access signature for a file. - - Use the returned signature with the credential parameter of any BDataLakeServiceClient, - FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str file_system_name: - The name of the file system. - :param str directory_name: - The name of the directory. - :param str file_name: - The name of the file. - :param str credential: - Credential could be either account key or user delegation key. - If use account key is used as credential, then the credential type should be a str. - Instead of an account key, the user could also pass in a user delegation key. - A user delegation key can be obtained from the service by authenticating with an AAD identity; - this can be accomplished - by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`. - When present, the SAS is signed with the user delegation key instead. - :type credential: str or ~azure.storage.filedatalake.UserDelegationKey - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered racwdmeop. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.filedatalake.FileSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :keyword start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :paramtype start: ~datetime.datetime or str - :keyword str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :keyword str preauthorized_agent_object_id: - The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform - the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the - user delegation key has the required permissions before granting access but no additional permission check for - the agent object id will be performed. - :keyword str agent_object_id: - The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to - perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner - of the user delegation key has the required permissions before granting access and the service will perform an - additional POSIX ACL check to determine if this user is authorized to perform the requested operation. - :keyword str correlation_id: - The correlation id to correlate the storage audit logs with the audit logs used by the principal - generating and distributing the SAS. This can only be used when to generate sas with delegation key. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - if directory_name: - path = directory_name.rstrip('/') + "/" + file_name - else: - path = file_name - return generate_blob_sas( - account_name=account_name, - container_name=file_system_name, - blob_name=path, - account_key=credential if isinstance(credential, str) else None, - user_delegation_key=credential if not isinstance(credential, str) else None, - permission=permission, - expiry=expiry, - **kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_upload_helper.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_upload_helper.py deleted file mode 100644 index 6d88c32..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_upload_helper.py +++ /dev/null @@ -1,104 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from ._deserialize import ( - process_storage_error) -from ._shared.response_handlers import return_response_headers -from ._shared.uploads import ( - upload_data_chunks, - DataLakeFileChunkUploader, upload_substream_blocks) -from azure.core.exceptions import HttpResponseError - - -def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument - return any([ - modified_access_conditions.if_modified_since, - modified_access_conditions.if_unmodified_since, - modified_access_conditions.if_none_match, - modified_access_conditions.if_match - ]) - - -def upload_datalake_file( # pylint: disable=unused-argument - client=None, - stream=None, - length=None, - overwrite=None, - validate_content=None, - max_concurrency=None, - file_settings=None, - **kwargs): - try: - if length == 0: - return {} - properties = kwargs.pop('properties', None) - umask = kwargs.pop('umask', None) - permissions = kwargs.pop('permissions', None) - path_http_headers = kwargs.pop('path_http_headers', None) - modified_access_conditions = kwargs.pop('modified_access_conditions', None) - chunk_size = kwargs.pop('chunk_size', 100 * 1024 * 1024) - - if not overwrite: - # if customers didn't specify access conditions, they cannot flush data to existing file - if not _any_conditions(modified_access_conditions): - modified_access_conditions.if_none_match = '*' - if properties or umask or permissions: - raise ValueError("metadata, umask and permissions can be set only when overwrite is enabled") - - if overwrite: - response = client.create( - resource='file', - path_http_headers=path_http_headers, - properties=properties, - modified_access_conditions=modified_access_conditions, - umask=umask, - permissions=permissions, - cls=return_response_headers, - **kwargs) - - # this modified_access_conditions will be applied to flush_data to make sure - # no other flush between create and the current flush - modified_access_conditions.if_match = response['etag'] - modified_access_conditions.if_none_match = None - modified_access_conditions.if_modified_since = None - modified_access_conditions.if_unmodified_since = None - - use_original_upload_path = file_settings.use_byte_buffer or \ - validate_content or chunk_size < file_settings.min_large_chunk_upload_threshold or \ - hasattr(stream, 'seekable') and not stream.seekable() or \ - not hasattr(stream, 'seek') or not hasattr(stream, 'tell') - - if use_original_upload_path: - upload_data_chunks( - service=client, - uploader_class=DataLakeFileChunkUploader, - total_size=length, - chunk_size=chunk_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - **kwargs) - else: - upload_substream_blocks( - service=client, - uploader_class=DataLakeFileChunkUploader, - total_size=length, - chunk_size=chunk_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - **kwargs - ) - - return client.flush_data(position=length, - path_http_headers=path_http_headers, - modified_access_conditions=modified_access_conditions, - close=True, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_version.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/_version.py deleted file mode 100644 index 23e2a0e..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/_version.py +++ /dev/null @@ -1,7 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -VERSION = "12.7.0" diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/__init__.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/__init__.py deleted file mode 100644 index c24dde8..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ._download_async import StorageStreamDownloader -from .._shared.policies_async import ExponentialRetry, LinearRetry -from ._data_lake_file_client_async import DataLakeFileClient -from ._data_lake_directory_client_async import DataLakeDirectoryClient -from ._file_system_client_async import FileSystemClient -from ._data_lake_service_client_async import DataLakeServiceClient -from ._data_lake_lease_async import DataLakeLeaseClient - -__all__ = [ - 'DataLakeServiceClient', - 'FileSystemClient', - 'DataLakeDirectoryClient', - 'DataLakeFileClient', - 'DataLakeLeaseClient', - 'ExponentialRetry', - 'LinearRetry', - 'StorageStreamDownloader' -] diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_data_lake_directory_client_async.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_data_lake_directory_client_async.py deleted file mode 100644 index 31b9561..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_data_lake_directory_client_async.py +++ /dev/null @@ -1,574 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -from typing import ( # pylint: disable=unused-import - Any, Dict, Optional, Union, - TYPE_CHECKING) - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore -from azure.core.pipeline import AsyncPipeline -from ._data_lake_file_client_async import DataLakeFileClient -from .._data_lake_directory_client import DataLakeDirectoryClient as DataLakeDirectoryClientBase -from .._models import DirectoryProperties, FileProperties -from .._deserialize import deserialize_dir_properties -from ._path_client_async import PathClient -from .._shared.base_client_async import AsyncTransportWrapper - -if TYPE_CHECKING: - from datetime import datetime - - -class DataLakeDirectoryClient(PathClient, DataLakeDirectoryClientBase): - """A client to interact with the DataLake directory, even if the directory may not yet exist. - - For operations relating to a specific subdirectory or file under the directory, a directory client or file client - can be retrieved using the :func:`~get_sub_directory_client` or :func:`~get_file_client` functions. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param directory_name: - The whole path of the directory. eg. {directory under file system}/{directory to interact with} - :type directory_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is the most recent service version that is - compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_instantiate_client_async.py - :start-after: [START instantiate_directory_client_from_conn_str] - :end-before: [END instantiate_directory_client_from_conn_str] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. - """ - - def __init__( - self, account_url, # type: str - file_system_name, # type: str - directory_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - super(DataLakeDirectoryClient, self).__init__(account_url, file_system_name, directory_name, # pylint: disable=specify-parameter-names-in-call - credential=credential, **kwargs) - - async def create_directory(self, metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create a new directory. - - :param metadata: - Name-value pairs associated with the directory as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the directory has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: response dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory_async.py - :start-after: [START create_directory] - :end-before: [END create_directory] - :language: python - :dedent: 8 - :caption: Create directory. - """ - return await self._create('directory', metadata=metadata, **kwargs) - - async def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a directory exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return await self._exists(**kwargs) - - async def delete_directory(self, **kwargs): - # type: (...) -> None - """ - Marks the specified directory for deletion. - - :keyword lease: - Required if the directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory_async.py - :start-after: [START delete_directory] - :end-before: [END delete_directory] - :language: python - :dedent: 4 - :caption: Delete directory. - """ - return await self._delete(recursive=True, **kwargs) - - async def get_directory_properties(self, **kwargs): - # type: (**Any) -> DirectoryProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the directory. It does not return the content of the directory. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk: - Decrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - Required if the directory was created with a customer-provided key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: DirectoryProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory_async.py - :start-after: [START get_directory_properties] - :end-before: [END get_directory_properties] - :language: python - :dedent: 4 - :caption: Getting the properties for a file/directory. - """ - return await self._get_path_properties(cls=deserialize_dir_properties, **kwargs) # pylint: disable=protected-access - - async def rename_directory(self, new_name, # type: str - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Rename the source directory. - - :param str new_name: - the new directory name the user want to rename to. - The value must have the following format: "{filesystem}/{directory}/{subdirectory}". - :keyword source_lease: - A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_directory_async.py - :start-after: [START rename_directory] - :end-before: [END rename_directory] - :language: python - :dedent: 4 - :caption: Rename the source directory. - """ - new_name = new_name.strip('/') - new_file_system = new_name.split('/')[0] - new_path_and_token = new_name[len(new_file_system):].strip('/').split('?') - new_path = new_path_and_token[0] - try: - new_dir_sas = new_path_and_token[1] or self._query_str.strip('?') - except IndexError: - if not self._raw_credential and new_file_system != self.file_system_name: - raise ValueError("please provide the sas token for the new directory") - if not self._raw_credential and new_file_system == self.file_system_name: - new_dir_sas = self._query_str.strip('?') - - new_directory_client = DataLakeDirectoryClient( - "{}://{}".format(self.scheme, self.primary_hostname), new_file_system, directory_name=new_path, - credential=self._raw_credential or new_dir_sas, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - await new_directory_client._rename_path( # pylint: disable=protected-access - '/{}/{}{}'.format(quote(unquote(self.file_system_name)), - quote(unquote(self.path_name)), - self._query_str), - **kwargs) - return new_directory_client - - async def create_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Create a subdirectory and return the subdirectory client to be interacted with. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient for the subdirectory. - """ - subdir = self.get_sub_directory_client(sub_directory) - await subdir.create_directory(metadata=metadata, **kwargs) - return subdir - - async def delete_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Marks the specified subdirectory for deletion. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :keyword lease: - Required if the directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient for the subdirectory - """ - subdir = self.get_sub_directory_client(sub_directory) - await subdir.delete_directory(**kwargs) - return subdir - - async def create_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Create a new file and return the file client to be interacted with. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - """ - file_client = self.get_file_client(file) - await file_client.create_file(**kwargs) - return file_client - - def get_file_client(self, file # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. eg. directory/subdirectory/file - :type file: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/test_datalake_service_samples.py - :start-after: [START bsc_get_file_client] - :end-before: [END bsc_get_file_client] - :language: python - :dedent: 12 - :caption: Getting the file client to interact with a specific file. - """ - try: - file_path = file.get('name') - except AttributeError: - file_path = self.path_name + '/' + str(file) - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeFileClient( - self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, - api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_sub_directory_client(self, sub_directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified subdirectory of the current directory. - - The sub subdirectory need not already exist. - - :param sub_directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/test_datalake_service_samples.py - :start-after: [START bsc_get_directory_client] - :end-before: [END bsc_get_directory_client] - :language: python - :dedent: 12 - :caption: Getting the directory client to interact with a specific directory. - """ - try: - subdir_path = sub_directory.get('name') - except AttributeError: - subdir_path = self.path_name + '/' + str(sub_directory) - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeDirectoryClient( - self.url, self.file_system_name, directory_name=subdir_path, credential=self._raw_credential, - api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_data_lake_file_client_async.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_data_lake_file_client_async.py deleted file mode 100644 index aa24c6e..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_data_lake_file_client_async.py +++ /dev/null @@ -1,606 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -from typing import ( # pylint: disable=unused-import - Any, AnyStr, Dict, IO, Iterable, Optional, Union, - TYPE_CHECKING) - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore - -from azure.core.exceptions import HttpResponseError -from ._download_async import StorageStreamDownloader -from ._path_client_async import PathClient -from .._data_lake_file_client import DataLakeFileClient as DataLakeFileClientBase -from .._serialize import convert_datetime_to_rfc1123 -from .._deserialize import process_storage_error, deserialize_file_properties -from .._models import FileProperties -from ..aio._upload_helper import upload_datalake_file - -if TYPE_CHECKING: - from datetime import datetime - from .._models import ContentSettings - - -class DataLakeFileClient(PathClient, DataLakeFileClientBase): - """A client to interact with the DataLake file, even if the file may not yet exist. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param file_path: - The whole file path, so that to interact with a specific file. - eg. "{directory}/{subdirectory}/{file}" - :type file_path: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is the most recent service version that is - compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_instantiate_client_async.py - :start-after: [START instantiate_file_client_from_conn_str] - :end-before: [END instantiate_file_client_from_conn_str] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. - """ - - def __init__( - self, account_url, # type: str - file_system_name, # type: str - file_path, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - super(DataLakeFileClient, self).__init__(account_url, file_system_name, path_name=file_path, - credential=credential, **kwargs) - - async def create_file(self, content_settings=None, # type: Optional[ContentSettings] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create a new file. - - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: response dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START create_file] - :end-before: [END create_file] - :language: python - :dedent: 4 - :caption: Create file. - """ - return await self._create('file', content_settings=content_settings, metadata=metadata, **kwargs) - - async def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a file exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return await self._exists(**kwargs) - - async def delete_file(self, **kwargs): - # type: (...) -> None - """ - Marks the specified file for deletion. - - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START delete_file] - :end-before: [END delete_file] - :language: python - :dedent: 4 - :caption: Delete file. - """ - return await self._delete(**kwargs) - - async def get_file_properties(self, **kwargs): - # type: (**Any) -> FileProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file. It does not return the content of the file. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk: - Decrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - Required if the file was created with a customer-provided key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: FileProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START get_file_properties] - :end-before: [END get_file_properties] - :language: python - :dedent: 4 - :caption: Getting the properties for a file. - """ - return await self._get_path_properties(cls=deserialize_file_properties, **kwargs) # pylint: disable=protected-access - - async def set_file_expiry(self, expiry_options, # type: str - expires_on=None, # type: Optional[Union[datetime, int]] - **kwargs): - # type: (...) -> None - """Sets the time a file will expire and be deleted. - - :param str expiry_options: - Required. Indicates mode of the expiry time. - Possible values include: 'NeverExpire', 'RelativeToCreation', 'RelativeToNow', 'Absolute' - :param datetime or int expires_on: - The time to set the file to expiry. - When expiry_options is RelativeTo*, expires_on should be an int in milliseconds - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - try: - expires_on = convert_datetime_to_rfc1123(expires_on) - except AttributeError: - expires_on = str(expires_on) - await self._datalake_client_for_blob_operation.path.set_expiry(expiry_options, expires_on=expires_on, - **kwargs) # pylint: disable=protected-access - - async def upload_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - length=None, # type: Optional[int] - overwrite=False, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Any] - """ - Upload data to a file. - - :param data: Content to be uploaded to file - :param int length: Size of the data in bytes. - :param bool overwrite: to overwrite an existing file or not. - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword metadata: - Name-value pairs associated with the blob as metadata. - :paramtype metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease: - Required if the blob has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https, as https (the default), will - already validate. Note that this MD5 hash is not stored with the - blob. Also note that if enabled, the memory-efficient upload algorithm - will not be used because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword int chunk_size: - The maximum chunk size for uploading a file in chunks. - Defaults to 100*1024*1024, or 100MB. - :return: response dict (Etag and last modified). - """ - options = self._upload_options( - data, - length=length, - overwrite=overwrite, - **kwargs) - return await upload_datalake_file(**options) - - async def append_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] - offset, # type: int - length=None, # type: Optional[int] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime, int]] - """Append data to the file. - - :param data: Content to be appended to file - :param offset: start position of the data to be appended to. - :param length: Size of the data in bytes. - :keyword bool validate_content: - If true, calculates an MD5 hash of the block content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - file. - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - :return: dict of the response header - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START append_data] - :end-before: [END append_data] - :language: python - :dedent: 4 - :caption: Append data to the file. - """ - options = self._append_data_options( - data=data, - offset=offset, - scheme=self.scheme, - length=length, - **kwargs) - try: - return await self._client.path.append_data(**options) - except HttpResponseError as error: - process_storage_error(error) - - async def flush_data(self, offset, # type: int - retain_uncommitted_data=False, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ Commit the previous appended data. - - :param offset: offset is equal to the length of the file after commit the - previous appended data. - :param bool retain_uncommitted_data: Valid only for flush operations. If - "true", uncommitted data is retained after the flush operation - completes; otherwise, the uncommitted data is deleted after the flush - operation. The default is false. Data at offsets less than the - specified position are written to the file when flush succeeds, but - this optional parameter allows data after the flush position to be - retained for a future flush operation. - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword bool close: Azure Storage Events allow applications to receive - notifications when files change. When Azure Storage Events are - enabled, a file changed event is raised. This event has a property - indicating whether this is the final change to distinguish the - difference between an intermediate flush to a file stream and the - final close of a file stream. The close query parameter is valid only - when the action is "flush" and change notifications are enabled. If - the value of close is "true" and the flush operation completes - successfully, the service raises a file change notification with a - property indicating that this is the final update (the file stream has - been closed). If "false" a change notification is raised indicating - the file has changed. The default is false. This query parameter is - set to true by the Hadoop ABFS driver to indicate that the file stream - has been closed." - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - :return: response header in dict - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START upload_file_to_file_system] - :end-before: [END upload_file_to_file_system] - :language: python - :dedent: 12 - :caption: Commit the previous appended data. - """ - options = self._flush_data_options( - offset, - self.scheme, - retain_uncommitted_data=retain_uncommitted_data, **kwargs) - try: - return await self._client.path.flush_data(**options) - except HttpResponseError as error: - process_storage_error(error) - - async def download_file(self, offset=None, length=None, **kwargs): - # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader - """Downloads a file to the StorageStreamDownloader. The readall() method must - be used to read all the content, or readinto() must be used to download the file into - a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks. - - :param int offset: - Start of byte range to use for downloading a section of the file. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword lease: - If specified, download only succeeds if the file's lease is active - and matches this ID. Required if the file has an active lease. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk: - Decrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - Required if the file was created with a Customer-Provided Key. - :keyword int max_concurrency: - The number of parallel connections with which to download. - :keyword int timeout: - The timeout parameter is expressed in seconds. This method may make - multiple calls to the Azure service and the timeout will apply to - each call individually. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.filedatalake.aio.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START read_file] - :end-before: [END read_file] - :language: python - :dedent: 4 - :caption: Return the downloaded data. - """ - downloader = await self._blob_client.download_blob(offset=offset, length=length, **kwargs) - return StorageStreamDownloader(downloader) - - async def rename_file(self, new_name, **kwargs): - # type: (str, **Any) -> DataLakeFileClient - """ - Rename the source file. - - :param str new_name: the new file name the user want to rename to. - The value must have the following format: "{filesystem}/{directory}/{subdirectory}/{file}". - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword source_lease: A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :type permissions: str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: the renamed file client - :rtype: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_upload_download_async.py - :start-after: [START rename_file] - :end-before: [END rename_file] - :language: python - :dedent: 4 - :caption: Rename the source file. - """ - new_name = new_name.strip('/') - new_file_system = new_name.split('/')[0] - new_path_and_token = new_name[len(new_file_system):].strip('/').split('?') - new_path = new_path_and_token[0] - try: - new_file_sas = new_path_and_token[1] or self._query_str.strip('?') - except IndexError: - if not self._raw_credential and new_file_system != self.file_system_name: - raise ValueError("please provide the sas token for the new file") - if not self._raw_credential and new_file_system == self.file_system_name: - new_file_sas = self._query_str.strip('?') - - new_file_client = DataLakeFileClient( - "{}://{}".format(self.scheme, self.primary_hostname), new_file_system, file_path=new_path, - credential=self._raw_credential or new_file_sas, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - await new_file_client._rename_path( # pylint: disable=protected-access - '/{}/{}{}'.format(quote(unquote(self.file_system_name)), - quote(unquote(self.path_name)), - self._query_str), - **kwargs) - return new_file_client diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_data_lake_lease_async.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_data_lake_lease_async.py deleted file mode 100644 index 1eeb698..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_data_lake_lease_async.py +++ /dev/null @@ -1,243 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, - TypeVar, TYPE_CHECKING -) -from azure.multiapi.storagev2.blob.v2021_06_08.aio import BlobLeaseClient -from .._data_lake_lease import DataLakeLeaseClient as DataLakeLeaseClientBase - - -if TYPE_CHECKING: - FileSystemClient = TypeVar("FileSystemClient") - DataLakeDirectoryClient = TypeVar("DataLakeDirectoryClient") - DataLakeFileClient = TypeVar("DataLakeFileClient") - - -class DataLakeLeaseClient(DataLakeLeaseClientBase): # pylint: disable=client-accepts-api-version-keyword - """Creates a new DataLakeLeaseClient. - - This client provides lease operations on a FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the file system, directory, or file to lease. - :type client: ~azure.storage.filedatalake.aio.FileSystemClient or - ~azure.storage.filedatalake.aio.DataLakeDirectoryClient or ~azure.storage.filedatalake.aio.DataLakeFileClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - def __init__( - self, client, lease_id=None - ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs - # type: (Union[FileSystemClient, DataLakeDirectoryClient, DataLakeFileClient], Optional[str]) -> None - super(DataLakeLeaseClient, self).__init__(client, lease_id) - - if hasattr(client, '_blob_client'): - _client = client._blob_client # type: ignore # pylint: disable=protected-access - elif hasattr(client, '_container_client'): - _client = client._container_client # type: ignore # pylint: disable=protected-access - else: - raise TypeError("Lease must use any of FileSystemClient DataLakeDirectoryClient, or DataLakeFileClient.") - - self._blob_lease_client = BlobLeaseClient(_client, lease_id=lease_id) - - def __enter__(self): - raise TypeError("Async lease must use 'async with'.") - - def __exit__(self, *args): - self.release() - - async def __aenter__(self): - return self - - async def __aexit__(self, *args): - await self.release() - - async def acquire(self, lease_duration=-1, **kwargs): - # type: (int, Optional[int], **Any) -> None - """Requests a new lease. - - If the file/file system does not have an active lease, the DataLake service creates a - lease on the file/file system and returns a new lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - await self._blob_lease_client.acquire(lease_duration=lease_duration, **kwargs) - self._update_lease_client_attributes() - - async def renew(self, **kwargs): - # type: (Any) -> None - """Renews the lease. - - The lease can be renewed if the lease ID specified in the - lease client matches that associated with the file system or file. Note that - the lease may be renewed even if it has expired as long as the file system - or file has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - await self._blob_lease_client.renew(**kwargs) - self._update_lease_client_attributes() - - async def release(self, **kwargs): - # type: (Any) -> None - """Release the lease. - - The lease may be released if the client lease id specified matches - that associated with the file system or file. Releasing the lease allows another client - to immediately acquire the lease for the file system or file as soon as the release is complete. - - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - await self._blob_lease_client.release(**kwargs) - self._update_lease_client_attributes() - - async def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """Change the lease ID of an active lease. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - await self._blob_lease_client.change(proposed_lease_id=proposed_lease_id, **kwargs) - self._update_lease_client_attributes() - - async def break_lease(self, lease_break_period=None, **kwargs): - # type: (Optional[int], Any) -> int - """Break the lease, if the file system or file has an active lease. - - Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. When a lease - is broken, the lease break period is allowed to elapse, during which time - no lease operation except break and release can be performed on the file system or file. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :param int lease_break_period: - This is the proposed duration of seconds that the lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the lease. If longer, the time remaining on the lease is used. - A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration lease breaks after the remaining lease - period elapses, and an infinite lease breaks immediately. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - await self._blob_lease_client.break_lease(lease_break_period=lease_break_period, **kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_data_lake_service_client_async.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_data_lake_service_client_async.py deleted file mode 100644 index f278468..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_data_lake_service_client_async.py +++ /dev/null @@ -1,513 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -from typing import Optional, Any, Dict - -from azure.core.paging import ItemPaged -from azure.core.pipeline import AsyncPipeline - -from azure.multiapi.storagev2.blob.v2021_06_08.aio import BlobServiceClient -from .._serialize import get_api_version -from .._generated.aio import AzureDataLakeStorageRESTAPI -from .._deserialize import get_datalake_service_properties -from .._shared.base_client_async import AsyncTransportWrapper, AsyncStorageAccountHostsMixin -from ._file_system_client_async import FileSystemClient -from .._data_lake_service_client import DataLakeServiceClient as DataLakeServiceClientBase -from .._shared.policies_async import ExponentialRetry -from ._data_lake_directory_client_async import DataLakeDirectoryClient -from ._data_lake_file_client_async import DataLakeFileClient -from ._models import FileSystemPropertiesPaged -from .._models import UserDelegationKey, LocationMode - - -class DataLakeServiceClient(AsyncStorageAccountHostsMixin, DataLakeServiceClientBase): - """A client to interact with the DataLake Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete file systems within the account. - For operations relating to a specific file system, directory or file, clients for those entities - can also be retrieved using the `get_client` functions. - - :ivar str url: - The full endpoint URL to the datalake service endpoint. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URL to the DataLake storage account. Any other entities included - in the URL path (e.g. file system or file) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is the most recent service version that is - compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START create_datalake_service_client] - :end-before: [END create_datalake_service_client] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient from connection string. - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START create_datalake_service_client_oauth] - :end-before: [END create_datalake_service_client_oauth] - :language: python - :dedent: 4 - :caption: Creating the DataLakeServiceClient with Azure Identity credentials. - """ - - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(DataLakeServiceClient, self).__init__( - account_url, - credential=credential, - **kwargs - ) - self._blob_service_client = BlobServiceClient(self._blob_account_url, credential, **kwargs) - self._blob_service_client._hosts[LocationMode.SECONDARY] = "" #pylint: disable=protected-access - self._client = AzureDataLakeStorageRESTAPI(self.url, base_url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs) #pylint: disable=protected-access - self._loop = kwargs.get('loop', None) - - async def __aenter__(self): - await self._blob_service_client.__aenter__() - return self - - async def __aexit__(self, *args): - await self._blob_service_client.close() - - async def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._blob_service_client.close() - - async def get_user_delegation_key(self, key_start_time, # type: datetime - key_expiry_time, # type: datetime - **kwargs # type: Any - ): - # type: (...) -> UserDelegationKey - """ - Obtain a user delegation key for the purpose of signing SAS tokens. - A token credential must be present on the service object for this request to succeed. - - :param ~datetime.datetime key_start_time: - A DateTime value. Indicates when the key becomes valid. - :param ~datetime.datetime key_expiry_time: - A DateTime value. Indicates when the key stops being valid. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The user delegation key. - :rtype: ~azure.storage.filedatalake.UserDelegationKey - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START get_user_delegation_key] - :end-before: [END get_user_delegation_key] - :language: python - :dedent: 8 - :caption: Get user delegation key from datalake service client. - """ - delegation_key = await self._blob_service_client.get_user_delegation_key( - key_start_time=key_start_time, - key_expiry_time=key_expiry_time, - **kwargs) # pylint: disable=protected-access - return UserDelegationKey._from_generated(delegation_key) # pylint: disable=protected-access - - def list_file_systems(self, name_starts_with=None, # type: Optional[str] - include_metadata=None, # type: Optional[bool] - **kwargs): - # type: (...) -> ItemPaged[FileSystemProperties] - """Returns a generator to list the file systems under the specified account. - - The generator will lazily follow the continuation tokens returned by - the service and stop when all file systems have been returned. - - :param str name_starts_with: - Filters the results to return only file systems whose names - begin with the specified prefix. - :param bool include_metadata: - Specifies that file system metadata be returned in the response. - The default value is `False`. - :keyword int results_per_page: - The maximum number of file system names to retrieve per API - call. If the request does not specify the server will return up to 5,000 items per page. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword bool include_deleted: - Specifies that deleted file systems to be returned in the response. This is for file system restore enabled - account. The default value is `False`. - .. versionadded:: 12.3.0 - :keyword bool include_system: - Flag specifying that system filesystems should be included. - .. versionadded:: 12.6.0 - :returns: An iterable (auto-paging) of FileSystemProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.FileSystemProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START list_file_systems] - :end-before: [END list_file_systems] - :language: python - :dedent: 8 - :caption: Listing the file systems in the datalake service. - """ - item_paged = self._blob_service_client.list_containers(name_starts_with=name_starts_with, - include_metadata=include_metadata, - **kwargs) # pylint: disable=protected-access - item_paged._page_iterator_class = FileSystemPropertiesPaged # pylint: disable=protected-access - return item_paged - - async def create_file_system(self, file_system, # type: Union[FileSystemProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[PublicAccess] - **kwargs): - # type: (...) -> FileSystemClient - """Creates a new file system under the specified account. - - If the file system with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created file system. - - :param str file_system: - The name of the file system to create. - :param metadata: - A dict with name-value pairs to associate with the - file system as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - Possible values include: file system, file. - :type public_access: ~azure.storage.filedatalake.PublicAccess - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START create_file_system_from_service_client] - :end-before: [END create_file_system_from_service_client] - :language: python - :dedent: 8 - :caption: Creating a file system in the datalake service. - """ - file_system_client = self.get_file_system_client(file_system) - await file_system_client.create_file_system(metadata=metadata, public_access=public_access, **kwargs) - return file_system_client - - async def _rename_file_system(self, name, new_name, **kwargs): - # type: (str, str, **Any) -> FileSystemClient - """Renames a filesystem. - - Operation is successful only if the source filesystem exists. - - :param str name: - The name of the filesystem to rename. - :param str new_name: - The new filesystem name the user wants to rename to. - :keyword lease: - Specify this to perform only if the lease ID given - matches the active lease ID of the source filesystem. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - """ - await self._blob_service_client._rename_container(name, new_name, **kwargs) # pylint: disable=protected-access - renamed_file_system = self.get_file_system_client(new_name) - return renamed_file_system - - async def undelete_file_system(self, name, deleted_version, **kwargs): - # type: (str, str, **Any) -> FileSystemClient - """Restores soft-deleted filesystem. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.3.0 - This operation was introduced in API version '2019-12-12'. - - :param str name: - Specifies the name of the deleted filesystem to restore. - :param str deleted_version: - Specifies the version of the deleted filesystem to restore. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - """ - new_name = kwargs.pop('new_name', None) - await self._blob_service_client.undelete_container(name, deleted_version, new_name=new_name, **kwargs) # pylint: disable=protected-access - file_system = self.get_file_system_client(new_name or name) - return file_system - - async def delete_file_system(self, file_system, # type: Union[FileSystemProperties, str] - **kwargs): - # type: (...) -> FileSystemClient - """Marks the specified file system for deletion. - - The file system and any files contained within it are later deleted during garbage collection. - If the file system is not found, a ResourceNotFoundError will be raised. - - :param file_system: - The file system to delete. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :keyword lease: - If specified, delete_file_system only succeeds if the - file system's lease is active and matches this ID. - Required if the file system has an active lease. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START delete_file_system_from_service_client] - :end-before: [END delete_file_system_from_service_client] - :language: python - :dedent: 8 - :caption: Deleting a file system in the datalake service. - """ - file_system_client = self.get_file_system_client(file_system) - await file_system_client.delete_file_system(**kwargs) - return file_system_client - - def get_file_system_client(self, file_system # type: Union[FileSystemProperties, str] - ): - # type: (...) -> FileSystemClient - """Get a client to interact with the specified file system. - - The file system need not already exist. - - :param file_system: - The file system. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :returns: A FileSystemClient. - :rtype: ~azure.storage.filedatalake.aio.FileSystemClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_file_system_client_from_service] - :end-before: [END create_file_system_client_from_service] - :language: python - :dedent: 8 - :caption: Getting the file system client to interact with a specific file system. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return FileSystemClient(self.url, file_system_name, credential=self._raw_credential, - api_version=self.api_version, - _configuration=self._config, - _pipeline=self._pipeline, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - def get_directory_client(self, file_system, # type: Union[FileSystemProperties, str] - directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified directory. - - The directory need not already exist. - - :param file_system: - The file system that the directory is in. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START get_directory_client_from_service_client] - :end-before: [END get_directory_client_from_service_client] - :language: python - :dedent: 8 - :caption: Getting the directory client to interact with a specific directory. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - try: - directory_name = directory.name - except AttributeError: - directory_name = directory - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeDirectoryClient(self.url, file_system_name, directory_name=directory_name, - credential=self._raw_credential, - api_version=self.api_version, - _configuration=self._config, _pipeline=self._pipeline, - _hosts=self._hosts, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function - ) - - def get_file_client(self, file_system, # type: Union[FileSystemProperties, str] - file_path # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file_system: - The file system that the file is in. This can either be the name of the file system, - or an instance of FileSystemProperties. - :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties - :param file_path: - The file with which to interact. This can either be the full path of the file(from the root directory), - or an instance of FileProperties. eg. directory/subdirectory/file - :type file_path: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_service_async.py - :start-after: [START get_file_client_from_service_client] - :end-before: [END get_file_client_from_service_client] - :language: python - :dedent: 8 - :caption: Getting the file client to interact with a specific file. - """ - try: - file_system_name = file_system.name - except AttributeError: - file_system_name = file_system - try: - file_path = file_path.name - except AttributeError: - pass - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeFileClient( - self.url, file_system_name, file_path=file_path, credential=self._raw_credential, - api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - - async def set_service_properties(self, **kwargs): - # type: (**Any) -> None - """Sets the properties of a storage account's Datalake service, including - Azure Storage Analytics. - - If an element (e.g. analytics_logging) is left as None, the - existing settings on the service for that functionality are preserved. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2020-06-12'. - - :keyword analytics_logging: - Groups the Azure Analytics Logging settings. - :type analytics_logging: ~azure.storage.filedatalake.AnalyticsLogging - :keyword hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates. - :type hour_metrics: ~azure.storage.filedatalake.Metrics - :keyword minute_metrics: - The minute metrics settings provide request statistics - for each minute. - :type minute_metrics: ~azure.storage.filedatalake.Metrics - :keyword cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list[~azure.storage.filedatalake.CorsRule] - :keyword str target_version: - Indicates the default version to use for requests if an incoming - request's version is not specified. - :keyword delete_retention_policy: - The delete retention policy specifies whether to retain deleted files/directories. - It also specifies the number of days and versions of file/directory to keep. - :type delete_retention_policy: ~azure.storage.filedatalake.RetentionPolicy - :keyword static_website: - Specifies whether the static website feature is enabled, - and if yes, indicates the index document and 404 error document to use. - :type static_website: ~azure.storage.filedatalake.StaticWebsite - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - return await self._blob_service_client.set_service_properties(**kwargs) # pylint: disable=protected-access - - async def get_service_properties(self, **kwargs): - # type: (**Any) -> Dict[str, Any] - """Gets the properties of a storage account's datalake service, including - Azure Storage Analytics. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2020-06-12'. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An object containing datalake service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - """ - props = await self._blob_service_client.get_service_properties(**kwargs) # pylint: disable=protected-access - return get_datalake_service_properties(props) diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_download_async.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_download_async.py deleted file mode 100644 index 5685478..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_download_async.py +++ /dev/null @@ -1,59 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import AsyncIterator - -from .._deserialize import from_blob_properties - - -class StorageStreamDownloader(object): - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the file being downloaded. - :ivar ~azure.storage.filedatalake.FileProperties properties: - The properties of the file being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the file. - """ - - def __init__(self, downloader): - self._downloader = downloader - self.name = self._downloader.name - self.properties = from_blob_properties(self._downloader.properties) # pylint: disable=protected-access - self.size = self._downloader.size - - def __len__(self): - return self.size - - def chunks(self): - # type: () -> AsyncIterator[bytes] - """Iterate over chunks in the download stream. - - :rtype: AsyncIterator[bytes] - """ - return self._downloader.chunks() - - async def readall(self): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - return await self._downloader.readall() - - async def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - return await self._downloader.readinto(stream) diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_file_system_client_async.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_file_system_client_async.py deleted file mode 100644 index bd53cb9..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_file_system_client_async.py +++ /dev/null @@ -1,948 +0,0 @@ -# pylint: disable=too-many-lines -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Dict, - TYPE_CHECKING -) - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator import distributed_trace - -from azure.core.pipeline import AsyncPipeline -from azure.core.async_paging import AsyncItemPaged - -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.multiapi.storagev2.blob.v2021_06_08.aio import ContainerClient -from .._serialize import get_api_version -from .._deserialize import process_storage_error, is_file_path -from .._generated.models import ListBlobsIncludeItem - -from ._data_lake_file_client_async import DataLakeFileClient -from ._data_lake_directory_client_async import DataLakeDirectoryClient -from ._data_lake_lease_async import DataLakeLeaseClient -from .._file_system_client import FileSystemClient as FileSystemClientBase -from .._generated.aio import AzureDataLakeStorageRESTAPI -from .._shared.base_client_async import AsyncTransportWrapper, AsyncStorageAccountHostsMixin -from .._shared.policies_async import ExponentialRetry -from .._models import FileSystemProperties, PublicAccess, DirectoryProperties, FileProperties, DeletedPathProperties -from ._list_paths_helper import DeletedPathPropertiesPaged, PathPropertiesPaged - - -if TYPE_CHECKING: - from datetime import datetime - from .._models import ( # pylint: disable=unused-import - ContentSettings) - - -class FileSystemClient(AsyncStorageAccountHostsMixin, FileSystemClientBase): - """A client to interact with a specific file system, even if that file system - may not yet exist. - - For operations relating to a specific directory or file within this file system, a directory client or file client - can be retrieved using the :func:`~get_directory_client` or :func:`~get_file_client` functions. - - :ivar str url: - The full endpoint URL to the file system, including SAS token if used. - :ivar str primary_endpoint: - The full primary endpoint URL. - :ivar str primary_hostname: - The hostname of the primary endpoint. - :param str account_url: - The URI to the storage account. - :param file_system_name: - The file system for the directory or files. - :type file_system_name: str - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is the most recent service version that is - compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_file_system_client_from_service] - :end-before: [END create_file_system_client_from_service] - :language: python - :dedent: 8 - :caption: Get a FileSystemClient from an existing DataLakeServiceClient. - """ - - def __init__( - self, account_url, # type: str - file_system_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - super(FileSystemClient, self).__init__( - account_url, - file_system_name=file_system_name, - credential=credential, - **kwargs) - # to override the class field _container_client sync version - kwargs.pop('_hosts', None) - self._container_client = ContainerClient(self._blob_account_url, self.file_system_name, - credential=credential, - _hosts=self._container_client._hosts,# pylint: disable=protected-access - **kwargs) # type: ignore # pylint: disable=protected-access - self._client = AzureDataLakeStorageRESTAPI(self.url, base_url=self.url, - file_system=self.file_system_name, pipeline=self._pipeline) - self._datalake_client_for_blob_operation = AzureDataLakeStorageRESTAPI(self._container_client.url, - base_url=self._container_client.url, - file_system=self.file_system_name, - pipeline=self._pipeline) - api_version = get_api_version(kwargs) - self._client._config.version = api_version # pylint: disable=protected-access - self._datalake_client_for_blob_operation._config.version = api_version # pylint: disable=protected-access - - self._loop = kwargs.get('loop', None) - - async def __aexit__(self, *args): - await self._container_client.close() - await super(FileSystemClient, self).__aexit__(*args) - - async def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._container_client.close() - await self.__aexit__() - - @distributed_trace_async - async def acquire_lease( - self, lease_duration=-1, # type: int - lease_id=None, # type: Optional[str] - **kwargs - ): - # type: (...) -> DataLakeLeaseClient - """ - Requests a new lease. If the file system does not have an active lease, - the DataLake service creates a lease on the file system and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A DataLakeLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.filedatalake.aio.DataLakeLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START acquire_lease_on_file_system] - :end-before: [END acquire_lease_on_file_system] - :language: python - :dedent: 12 - :caption: Acquiring a lease on the file_system. - """ - lease = DataLakeLeaseClient(self, lease_id=lease_id) - await lease.acquire(lease_duration=lease_duration, **kwargs) - return lease - - @distributed_trace_async - async def create_file_system(self, metadata=None, # type: Optional[Dict[str, str]] - public_access=None, # type: Optional[PublicAccess] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """Creates a new file system under the specified account. - - If the file system with the same name already exists, a ResourceExistsError will - be raised. This method returns a client with which to interact with the newly - created file system. - - :param metadata: - A dict with name-value pairs to associate with the - file system as metadata. Example: `{'Category':'test'}` - :type metadata: dict(str, str) - :param public_access: - To specify whether data in the file system may be accessed publicly and the level of access. - :type public_access: ~azure.storage.filedatalake.PublicAccess - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A dictionary of response headers. - :rtype: Dict[str, Union[str, datetime]] - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_file_system] - :end-before: [END create_file_system] - :language: python - :dedent: 16 - :caption: Creating a file system in the datalake service. - """ - return await self._container_client.create_container(metadata=metadata, - public_access=public_access, - **kwargs) - - @distributed_trace_async - async def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a file system exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return await self._container_client.exists(**kwargs) - - @distributed_trace_async - async def _rename_file_system(self, new_name, **kwargs): - # type: (str, **Any) -> FileSystemClient - """Renames a filesystem. - - Operation is successful only if the source filesystem exists. - - :param str new_name: - The new filesystem name the user wants to rename to. - :keyword lease: - Specify this to perform only if the lease ID given - matches the active lease ID of the source filesystem. - :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.filedatalake.FileSystemClient - """ - await self._container_client._rename_container(new_name, **kwargs) # pylint: disable=protected-access - renamed_file_system = FileSystemClient( - "{}://{}".format(self.scheme, self.primary_hostname), file_system_name=new_name, - credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, - _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts, - require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function) - return renamed_file_system - - @distributed_trace_async - async def delete_file_system(self, **kwargs): - # type: (Any) -> None - """Marks the specified file system for deletion. - - The file system and any files contained within it are later deleted during garbage collection. - If the file system is not found, a ResourceNotFoundError will be raised. - - :keyword lease: - If specified, delete_file_system only succeeds if the - file system's lease is active and matches this ID. - Required if the file system has an active lease. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START delete_file_system] - :end-before: [END delete_file_system] - :language: python - :dedent: 16 - :caption: Deleting a file system in the datalake service. - """ - await self._container_client.delete_container(**kwargs) - - @distributed_trace_async - async def get_file_system_properties(self, **kwargs): - # type: (Any) -> FileSystemProperties - """Returns all user-defined metadata and system properties for the specified - file system. The data returned does not include the file system's list of paths. - - :keyword lease: - If specified, get_file_system_properties only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Properties for the specified file system within a file system object. - :rtype: ~azure.storage.filedatalake.FileSystemProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START get_file_system_properties] - :end-before: [END get_file_system_properties] - :language: python - :dedent: 16 - :caption: Getting properties on the file system. - """ - container_properties = await self._container_client.get_container_properties(**kwargs) - return FileSystemProperties._convert_from_container_props(container_properties) # pylint: disable=protected-access - - @distributed_trace_async - async def set_file_system_metadata( # type: ignore - self, metadata, # type: Dict[str, str] - **kwargs - ): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - file system. Each call to this operation replaces all existing metadata - attached to the file system. To remove all metadata from the file system, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the file system as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: file system-updated property dict (Etag and last modified). - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START set_file_system_metadata] - :end-before: [END set_file_system_metadata] - :language: python - :dedent: 16 - :caption: Setting metadata on the container. - """ - return await self._container_client.set_container_metadata(metadata=metadata, **kwargs) - - @distributed_trace_async - async def set_file_system_access_policy( - self, signed_identifiers, # type: Dict[str, AccessPolicy] - public_access=None, # type: Optional[Union[str, PublicAccess]] - **kwargs - ): # type: (...) -> Dict[str, Union[str, datetime]] - """Sets the permissions for the specified file system or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether files in a file system may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the file system. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict[str, ~azure.storage.filedatalake.AccessPolicy] - :param ~azure.storage.filedatalake.PublicAccess public_access: - To specify whether data in the file system may be accessed publicly and the level of access. - :keyword lease: - Required if the file system has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified date/time. - :keyword ~datetime.datetime if_unmodified_since: - A datetime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: filesystem-updated property dict (Etag and last modified). - :rtype: dict[str, str or ~datetime.datetime] - """ - return await self._container_client.set_container_access_policy(signed_identifiers, - public_access=public_access, **kwargs) - - @distributed_trace_async - async def get_file_system_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the specified file system. - The permissions indicate whether file system data may be accessed publicly. - - :keyword lease: - If specified, get_file_system_access_policy only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - """ - access_policy = await self._container_client.get_container_access_policy(**kwargs) - return { - 'public_access': PublicAccess._from_generated(access_policy['public_access']), # pylint: disable=protected-access - 'signed_identifiers': access_policy['signed_identifiers'] - } - - @distributed_trace - def get_paths(self, path=None, # type: Optional[str] - recursive=True, # type: Optional[bool] - max_results=None, # type: Optional[int] - **kwargs): - # type: (...) -> AsyncItemPaged[PathProperties] - """Returns a generator to list the paths(could be files or directories) under the specified file system. - The generator will lazily follow the continuation tokens returned by - the service. - - :param str path: - Filters the results to return only paths under the specified path. - :param int max_results: - An optional value that specifies the maximum - number of items to return per page. If omitted or greater than 5,000, the - response will include up to 5,000 items per page. - :keyword upn: - Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of PathProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.PathProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START get_paths_in_file_system] - :end-before: [END get_paths_in_file_system] - :language: python - :dedent: 12 - :caption: List the blobs in the file system. - """ - timeout = kwargs.pop('timeout', None) - command = functools.partial( - self._client.file_system.list_paths, - path=path, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, recursive, path=path, max_results=max_results, - page_iterator_class=PathPropertiesPaged, **kwargs) - - @distributed_trace_async - async def create_directory(self, directory, # type: Union[DirectoryProperties, str] - metadata=None, # type: Optional[Dict[str, str]] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Create directory - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_directory_from_file_system] - :end-before: [END create_directory_from_file_system] - :language: python - :dedent: 12 - :caption: Create directory in the file system. - """ - directory_client = self.get_directory_client(directory) - await directory_client.create_directory(metadata=metadata, **kwargs) - return directory_client - - @distributed_trace_async - async def delete_directory(self, directory, # type: Union[DirectoryProperties, str] - **kwargs): - # type: (...) -> DataLakeDirectoryClient - """ - Marks the specified path for deletion. - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START delete_directory_from_file_system] - :end-before: [END delete_directory_from_file_system] - :language: python - :dedent: 12 - :caption: Delete directory in the file system. - """ - directory_client = self.get_directory_client(directory) - await directory_client.delete_directory(**kwargs) - return directory_client - - @distributed_trace_async - async def create_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Create file - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword str permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START create_file_from_file_system] - :end-before: [END create_file_from_file_system] - :language: python - :dedent: 12 - :caption: Create file in the file system. - """ - file_client = self.get_file_client(file) - await file_client.create_file(**kwargs) - return file_client - - @distributed_trace_async - async def delete_file(self, file, # type: Union[FileProperties, str] - **kwargs): - # type: (...) -> DataLakeFileClient - """ - Marks the specified file for deletion. - - :param file: - The file with which to interact. This can either be the name of the file, - or an instance of FileProperties. - :type file: str or ~azure.storage.filedatalake.FileProperties - :keyword lease: - Required if the file has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: DataLakeFileClient - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START delete_file_from_file_system] - :end-before: [END delete_file_from_file_system] - :language: python - :dedent: 12 - :caption: Delete file in the file system. - """ - file_client = self.get_file_client(file) - await file_client.delete_file(**kwargs) - return file_client - - # TODO: Temporarily removing this for GA release. - # @distributed_trace_async - # async def delete_files(self, *files, **kwargs): - # # type: (...) -> AsyncIterator[AsyncHttpResponse] - # """Marks the specified files or empty directories for deletion. - - # The files/empty directories are later deleted during garbage collection. - - # If a delete retention policy is enabled for the service, then this operation soft deletes the - # files/empty directories and retains the files or snapshots for specified number of days. - # After specified number of days, files' data is removed from the service during garbage collection. - # Soft deleted files/empty directories are accessible through :func:`list_deleted_paths()`. - - # :param files: - # The files/empty directories to delete. This can be a single file/empty directory, or multiple values can - # be supplied, where each value is either the name of the file/directory (str) or - # FileProperties/DirectoryProperties. - - # .. note:: - # When the file/dir type is dict, here's a list of keys, value rules. - - # blob name: - # key: 'name', value type: str - # if the file modified or not: - # key: 'if_modified_since', 'if_unmodified_since', value type: datetime - # etag: - # key: 'etag', value type: str - # match the etag or not: - # key: 'match_condition', value type: MatchConditions - # lease: - # key: 'lease_id', value type: Union[str, LeaseClient] - # timeout for subrequest: - # key: 'timeout', value type: int - - # :type files: list[str], list[dict], - # or list[Union[~azure.storage.filedatalake.FileProperties, ~azure.storage.filedatalake.DirectoryProperties] - # :keyword ~datetime.datetime if_modified_since: - # A DateTime value. Azure expects the date value passed in to be UTC. - # If timezone is included, any non-UTC datetimes will be converted to UTC. - # If a date is passed in without timezone info, it is assumed to be UTC. - # Specify this header to perform the operation only - # if the resource has been modified since the specified time. - # :keyword ~datetime.datetime if_unmodified_since: - # A DateTime value. Azure expects the date value passed in to be UTC. - # If timezone is included, any non-UTC datetimes will be converted to UTC. - # If a date is passed in without timezone info, it is assumed to be UTC. - # Specify this header to perform the operation only if - # the resource has not been modified since the specified date/time. - # :keyword bool raise_on_any_failure: - # This is a boolean param which defaults to True. When this is set, an exception - # is raised even if there is a single operation failure. - # :keyword int timeout: - # The timeout parameter is expressed in seconds. - # :return: An iterator of responses, one for each blob in order - # :rtype: AsyncIterator[~azure.core.pipeline.transport.AsyncHttpResponse] - - # .. admonition:: Example: - - # .. literalinclude:: ../samples/datalake_samples_file_system_async.py - # :start-after: [START batch_delete_files_or_empty_directories] - # :end-before: [END batch_delete_files_or_empty_directories] - # :language: python - # :dedent: 4 - # :caption: Deleting multiple files or empty directories. - # """ - # return await self._container_client.delete_blobs(*files, **kwargs) - - @distributed_trace_async - async def _undelete_path(self, deleted_path_name, deletion_id, **kwargs): - # type: (str, str, **Any) -> Union[DataLakeDirectoryClient, DataLakeFileClient] - """Restores soft-deleted path. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2020-06-12'. - - :param str deleted_path_name: - Specifies the name of the deleted container to restore. - :param str deletion_id: - Specifies the version of the deleted container to restore. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.file.datalake.aio.DataLakeDirectoryClient - or azure.storage.file.datalake.aio.DataLakeFileClient - """ - _, url, undelete_source = self._undelete_path_options(deleted_path_name, deletion_id) - - pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - path_client = AzureDataLakeStorageRESTAPI( - url, filesystem=self.file_system_name, path=deleted_path_name, pipeline=pipeline) - try: - is_file = await path_client.path.undelete(undelete_source=undelete_source, cls=is_file_path, **kwargs) - if is_file: - return self.get_file_client(deleted_path_name) - return self.get_directory_client(deleted_path_name) - except HttpResponseError as error: - process_storage_error(error) - - def _get_root_directory_client(self): - # type: () -> DataLakeDirectoryClient - """Get a client to interact with the root directory. - - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient - """ - return self.get_directory_client('/') - - def get_directory_client(self, directory # type: Union[DirectoryProperties, str] - ): - # type: (...) -> DataLakeDirectoryClient - """Get a client to interact with the specified directory. - - The directory need not already exist. - - :param directory: - The directory with which to interact. This can either be the name of the directory, - or an instance of DirectoryProperties. - :type directory: str or ~azure.storage.filedatalake.DirectoryProperties - :returns: A DataLakeDirectoryClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START get_directory_client_from_file_system] - :end-before: [END get_directory_client_from_file_system] - :language: python - :dedent: 12 - :caption: Getting the directory client to interact with a specific directory. - """ - try: - directory_name = directory.get('name') - except AttributeError: - directory_name = str(directory) - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeDirectoryClient(self.url, self.file_system_name, directory_name=directory_name, - credential=self._raw_credential, - api_version=self.api_version, - _configuration=self._config, _pipeline=_pipeline, - _hosts=self._hosts, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function, - loop=self._loop - ) - - def get_file_client(self, file_path # type: Union[FileProperties, str] - ): - # type: (...) -> DataLakeFileClient - """Get a client to interact with the specified file. - - The file need not already exist. - - :param file_path: - The file with which to interact. This can either be the path of the file(from root directory), - or an instance of FileProperties. eg. directory/subdirectory/file - :type file_path: str or ~azure.storage.filedatalake.FileProperties - :returns: A DataLakeFileClient. - :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/datalake_samples_file_system_async.py - :start-after: [START get_file_client_from_file_system] - :end-before: [END get_file_client_from_file_system] - :language: python - :dedent: 12 - :caption: Getting the file client to interact with a specific file. - """ - try: - file_path = file_path.get('name') - except AttributeError: - file_path = str(file_path) - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return DataLakeFileClient( - self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, - api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - key_resolver_function=self.key_resolver_function, loop=self._loop) - - @distributed_trace - def list_deleted_paths(self, **kwargs): - # type: (Any) -> AsyncItemPaged[DeletedPathProperties] - """Returns a generator to list the deleted (file or directory) paths under the specified file system. - The generator will lazily follow the continuation tokens returned by - the service. - - .. versionadded:: 12.4.0 - This operation was introduced in API version '2020-06-12'. - - :keyword str path_prefix: - Filters the results to return only paths under the specified path. - :keyword int results_per_page: - An optional value that specifies the maximum number of items to return per page. - If omitted or greater than 5,000, the response will include up to 5,000 items per page. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) response of DeletedPathProperties. - :rtype: - ~azure.core.paging.AsyncItemPaged[~azure.storage.filedatalake.DeletedPathProperties] - """ - path_prefix = kwargs.pop('path_prefix', None) - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._datalake_client_for_blob_operation.file_system.list_blob_hierarchy_segment, - showonly=ListBlobsIncludeItem.deleted, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, prefix=path_prefix, page_iterator_class=DeletedPathPropertiesPaged, - results_per_page=results_per_page, **kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_list_paths_helper.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_list_paths_helper.py deleted file mode 100644 index 74ce2d7..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_list_paths_helper.py +++ /dev/null @@ -1,177 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines -from azure.core.exceptions import HttpResponseError -from azure.core.async_paging import AsyncPageIterator - -from .._deserialize import process_storage_error, get_deleted_path_properties_from_generated_code, \ - return_headers_and_deserialized_path_list -from .._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix - -from .._shared.models import DictMixin -from .._shared.response_handlers import return_context_and_deserialized -from .._generated.models import Path -from .._models import PathProperties - - -class DeletedPathPropertiesPaged(AsyncPageIterator): - """An Iterable of deleted path properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A path name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.filedatalake.DeletedPathProperties) - :ivar str container: The container that the paths are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - - :param callable command: Function to retrieve the next page of items. - """ - def __init__( - self, command, - container=None, - prefix=None, - results_per_page=None, - continuation_token=None, - delimiter=None, - location_mode=None): - super(DeletedPathPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.container = container - self.delimiter = delimiter - self.current_page = None - self.location_mode = location_mode - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - prefix=self.prefix, - marker=continuation_token or None, - max_results=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.container = self._response.container_name - self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items - self.current_page = [self._build_item(item) for item in self.current_page] - self.delimiter = self._response.delimiter - - return self._response.next_marker or None, self.current_page - - def _build_item(self, item): - if isinstance(item, BlobItemInternal): - file_props = get_deleted_path_properties_from_generated_code(item) - file_props.file_system = self.container - return file_props - if isinstance(item, GenBlobPrefix): - return DirectoryPrefix( - container=self.container, - prefix=item.name, - results_per_page=self.results_per_page, - location_mode=self.location_mode) - return item - - -class DirectoryPrefix(DictMixin): - """Directory prefix. - - :ivar str name: Name of the deleted directory. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar str file_system: The file system that the deleted paths are listed from. - :ivar str delimiter: A delimiting character used for hierarchy listing. - """ - def __init__(self, **kwargs): - self.name = kwargs.get('prefix') - self.results_per_page = kwargs.get('results_per_page') - self.file_system = kwargs.get('container') - self.delimiter = kwargs.get('delimiter') - self.location_mode = kwargs.get('location_mode') - - -class PathPropertiesPaged(AsyncPageIterator): - """An Iterable of Path properties. - - :ivar str path: Filters the results to return only paths under the specified path. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar list(~azure.storage.filedatalake.PathProperties) current_page: The current page of listed results. - - :param callable command: Function to retrieve the next page of items. - :param str path: Filters the results to return only paths under the specified path. - :param int max_results: The maximum number of psths to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - - def __init__( - self, command, - recursive, - path=None, - max_results=None, - continuation_token=None, - upn=None): - super(PathPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.recursive = recursive - self.results_per_page = max_results - self.path = path - self.upn = upn - self.current_page = None - self.path_list = None - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - self.recursive, - continuation=continuation_token or None, - path=self.path, - max_results=self.results_per_page, - upn=self.upn, - cls=return_headers_and_deserialized_path_list) - except HttpResponseError as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.path_list, self._response = get_next_return - self.current_page = [self._build_item(item) for item in self.path_list] - - return self._response['continuation'] or None, self.current_page - - @staticmethod - def _build_item(item): - if isinstance(item, PathProperties): - return item - if isinstance(item, Path): - path = PathProperties._from_generated(item) # pylint: disable=protected-access - return path - return item diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_models.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_models.py deleted file mode 100644 index 36c0b2b..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_models.py +++ /dev/null @@ -1,41 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines -from azure.multiapi.storagev2.blob.v2021_06_08.aio._models import ContainerPropertiesPaged -from .._models import FileSystemProperties - - -class FileSystemPropertiesPaged(ContainerPropertiesPaged): - """An Iterable of File System properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file system name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.filedatalake.FileSystemProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only file systems whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of file system names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - - def __init__(self, *args, **kwargs): - super(FileSystemPropertiesPaged, self).__init__( - *args, - **kwargs - ) - - @staticmethod - def _build_item(item): - return FileSystemProperties._from_generated(item) # pylint: disable=protected-access diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_path_client_async.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_path_client_async.py deleted file mode 100644 index 46fe18f..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_path_client_async.py +++ /dev/null @@ -1,770 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -from datetime import datetime -from typing import ( # pylint: disable=unused-import - Any, Dict, Optional, Union, - TYPE_CHECKING) - -from azure.core.exceptions import AzureError, HttpResponseError -from azure.multiapi.storagev2.blob.v2021_06_08.aio import BlobClient -from .._serialize import get_api_version -from .._shared.base_client_async import AsyncStorageAccountHostsMixin -from .._path_client import PathClient as PathClientBase -from .._models import DirectoryProperties, AccessControlChangeResult, AccessControlChangeFailure, \ - AccessControlChangeCounters, AccessControlChanges -from .._generated.aio import AzureDataLakeStorageRESTAPI -from ._data_lake_lease_async import DataLakeLeaseClient -from .._deserialize import process_storage_error -from .._shared.policies_async import ExponentialRetry - -if TYPE_CHECKING: - from .._models import ContentSettings - from .._models import FileProperties - -_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = ( - 'The require_encryption flag is set, but encryption is not supported' - ' for this method.') - - -class PathClient(AsyncStorageAccountHostsMixin, PathClientBase): - """A base client for interacting with a DataLake file/directory, even if the file/directory may not - yet exist. - - :param str account_url: - The URI to the storage account. - :param str file_system_name: - The file system for the directory or files. - :param str file_path: - The whole file path, so that to interact with a specific file. - eg. "{directory}/{subdirectory}/{file}" - :param credential: - The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials, an account - shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - :keyword str api_version: - The Storage API version to use for requests. Default value is the most recent service version that is - compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. - """ - def __init__( - self, account_url, # type: str - file_system_name, # type: str - path_name, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - - super(PathClient, self).__init__(account_url, # pylint: disable=specify-parameter-names-in-call - file_system_name, path_name, - credential=credential, - **kwargs) # type: ignore - - kwargs.pop('_hosts', None) - - self._blob_client = BlobClient(account_url=self._blob_account_url, container_name=self.file_system_name, - blob_name=self.path_name, - credential=credential, - _hosts=self._blob_client._hosts, # pylint: disable=protected-access - **kwargs) - - self._client = AzureDataLakeStorageRESTAPI(self.url, base_url=self.url, file_system=self.file_system_name, - path=self.path_name, pipeline=self._pipeline) - self._datalake_client_for_blob_operation = AzureDataLakeStorageRESTAPI(self._blob_client.url, - base_url=self._blob_client.url, - file_system=self.file_system_name, - path=self.path_name, - pipeline=self._pipeline) - api_version = get_api_version(kwargs) - self._client._config.version = api_version # pylint: disable=protected-access - self._datalake_client_for_blob_operation._config.version = api_version # pylint: disable=protected-access - - self._loop = kwargs.get('loop', None) - - async def __aexit__(self, *args): - await self._blob_client.close() - await super(PathClient, self).__aexit__(*args) - - async def close(self): - # type: () -> None - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._blob_client.close() - await self.__aexit__() - - async def _create(self, resource_type, content_settings=None, metadata=None, **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Create directory or file - - :param resource_type: - Required for Create File and Create Directory. - The value must be "file" or "directory". Possible values include: - 'directory', 'file' - :type resource_type: str - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :param metadata: - Name-value pairs associated with the file/directory as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file/directory has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword str umask: - Optional and only valid if Hierarchical Namespace is enabled for the account. - When creating a file or directory and the parent folder does not have a default ACL, - the umask restricts the permissions of the file or directory to be created. - The resulting permission is given by p & ^u, where p is the permission and u is the umask. - For example, if p is 0777 and u is 0057, then the resulting permission is 0720. - The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. - The umask must be specified in 4-digit octal notation (e.g. 0766). - :keyword permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - :type permissions: str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Dict[str, Union[str, datetime]] - """ - options = self._create_path_options( - resource_type, - content_settings=content_settings, - metadata=metadata, - **kwargs) - try: - return await self._client.path.create(**options) - except HttpResponseError as error: - process_storage_error(error) - - async def _delete(self, **kwargs): - # type: (**Any) -> Dict[Union[datetime, str]] - """ - Marks the specified path for deletion. - - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - options = self._delete_path_options(**kwargs) - try: - return await self._client.path.delete(**options) - except HttpResponseError as error: - process_storage_error(error) - - async def set_access_control(self, owner=None, # type: Optional[str] - group=None, # type: Optional[str] - permissions=None, # type: Optional[str] - acl=None, # type: Optional[str] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """ - Set the owner, group, permissions, or access control list for a path. - - :param owner: - Optional. The owner of the file or directory. - :type owner: str - :param group: - Optional. The owning group of the file or directory. - :type group: str - :param permissions: - Optional and only valid if Hierarchical Namespace - is enabled for the account. Sets POSIX access permissions for the file - owner, the file owning group, and others. Each class may be granted - read, write, or execute permission. The sticky bit is also supported. - Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are - supported. - permissions and acl are mutually exclusive. - :type permissions: str - :param acl: - Sets POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - permissions and acl are mutually exclusive. - :type acl: str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword: response dict (Etag and last modified). - """ - options = self._set_access_control_options(owner=owner, group=group, permissions=permissions, acl=acl, **kwargs) - try: - return await self._client.path.set_access_control(**options) - except HttpResponseError as error: - process_storage_error(error) - - async def get_access_control(self, upn=None, # type: Optional[bool] - **kwargs): - # type: (...) -> Dict[str, Any] - """ - Get the owner, group, permissions, or access control list for a path. - - :param upn: - Optional. Valid only when Hierarchical Namespace is - enabled for the account. If "true", the user identity values returned - in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be - transformed from Azure Active Directory Object IDs to User Principal - Names. If "false", the values will be returned as Azure Active - Directory Object IDs. The default value is false. Note that group and - application Object IDs are not translated because they do not have - unique friendly names. - :type upn: bool - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword: response dict. - """ - options = self._get_access_control_options(upn=upn, **kwargs) - try: - return await self._client.path.get_properties(**options) - except HttpResponseError as error: - process_storage_error(error) - - async def set_access_control_recursive(self, - acl, - **kwargs): - # type: (str, **Any) -> AccessControlChangeResult - """ - Sets the Access Control on a path and sub-paths. - - :param acl: - Sets POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: - Callback where the caller can track progress of the operation - as well as collect paths that failed to change Access Control. - :keyword str continuation_token: - Optional continuation token that can be used to resume previously stopped operation. - :keyword int batch_size: - Optional. If data set size exceeds batch size then operation will be split into multiple - requests so that progress can be tracked. Batch size should be between 1 and 2000. - The default when unspecified is 2000. - :keyword int max_batches: - Optional. Defines maximum number of batches that single change Access Control operation can execute. - If maximum is reached before all sub-paths are processed, - then continuation token can be used to resume operation. - Empty value indicates that maximum number of batches in unbound and operation continues till end. - :keyword bool continue_on_failure: - If set to False, the operation will terminate quickly on encountering user errors (4XX). - If True, the operation will ignore user errors and proceed with the operation on other sub-entities of - the directory. - Continuation token will only be returned when continue_on_failure is True in case of user errors. - If not set the default value is False for this. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: A summary of the recursive operations, including the count of successes and failures, - as well as a continuation token in case the operation was terminated prematurely. - :rtype: :~azure.storage.filedatalake.AccessControlChangeResult` - :raises ~azure.core.exceptions.AzureError: - User can restart the operation using continuation_token field of AzureError if the token is available. - """ - if not acl: - raise ValueError("The Access Control List must be set for this operation") - - progress_hook = kwargs.pop('progress_hook', None) - max_batches = kwargs.pop('max_batches', None) - options = self._set_access_control_recursive_options(mode='set', acl=acl, **kwargs) - return await self._set_access_control_internal(options=options, progress_hook=progress_hook, - max_batches=max_batches) - - async def update_access_control_recursive(self, acl, **kwargs): - # type: (str, **Any) -> AccessControlChangeResult - """ - Modifies the Access Control on a path and sub-paths. - - :param acl: - Modifies POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, a user or - group identifier, and permissions in the format - "[scope:][type]:[id]:[permissions]". - :type acl: str - :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: - Callback where the caller can track progress of the operation - as well as collect paths that failed to change Access Control. - :keyword str continuation_token: - Optional continuation token that can be used to resume previously stopped operation. - :keyword int batch_size: - Optional. If data set size exceeds batch size then operation will be split into multiple - requests so that progress can be tracked. Batch size should be between 1 and 2000. - The default when unspecified is 2000. - :keyword int max_batches: - Optional. Defines maximum number of batches that single, - change Access Control operation can execute. - If maximum is reached before all sub-paths are processed, - then continuation token can be used to resume operation. - Empty value indicates that maximum number of batches in unbound and operation continues till end. - :keyword bool continue_on_failure: - If set to False, the operation will terminate quickly on encountering user errors (4XX). - If True, the operation will ignore user errors and proceed with the operation on other sub-entities of - the directory. - Continuation token will only be returned when continue_on_failure is True in case of user errors. - If not set the default value is False for this. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: A summary of the recursive operations, including the count of successes and failures, - as well as a continuation token in case the operation was terminated prematurely. - :rtype: :~azure.storage.filedatalake.AccessControlChangeResult` - :raises ~azure.core.exceptions.AzureError: - User can restart the operation using continuation_token field of AzureError if the token is available. - """ - if not acl: - raise ValueError("The Access Control List must be set for this operation") - - progress_hook = kwargs.pop('progress_hook', None) - max_batches = kwargs.pop('max_batches', None) - options = self._set_access_control_recursive_options(mode='modify', acl=acl, **kwargs) - return await self._set_access_control_internal(options=options, progress_hook=progress_hook, - max_batches=max_batches) - - async def remove_access_control_recursive(self, - acl, - **kwargs): - # type: (str, **Any) -> AccessControlChangeResult - """ - Removes the Access Control on a path and sub-paths. - - :param acl: - Removes POSIX access control rights on files and directories. - The value is a comma-separated list of access control entries. Each - access control entry (ACE) consists of a scope, a type, and a user or - group identifier in the format "[scope:][type]:[id]". - :type acl: str - :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: - Callback where the caller can track progress of the operation - as well as collect paths that failed to change Access Control. - :keyword str continuation_token: - Optional continuation token that can be used to resume previously stopped operation. - :keyword int batch_size: - Optional. If data set size exceeds batch size then operation will be split into multiple - requests so that progress can be tracked. Batch size should be between 1 and 2000. - The default when unspecified is 2000. - :keyword int max_batches: - Optional. Defines maximum number of batches that single change Access Control operation can execute. - If maximum is reached before all sub-paths are processed, - then continuation token can be used to resume operation. - Empty value indicates that maximum number of batches in unbound and operation continues till end. - :keyword bool continue_on_failure: - If set to False, the operation will terminate quickly on encountering user errors (4XX). - If True, the operation will ignore user errors and proceed with the operation on other sub-entities of - the directory. - Continuation token will only be returned when continue_on_failure is True in case of user errors. - If not set the default value is False for this. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: A summary of the recursive operations, including the count of successes and failures, - as well as a continuation token in case the operation was terminated prematurely. - :rtype: :~azure.storage.filedatalake.AccessControlChangeResult` - :raises ~azure.core.exceptions.AzureError: - User can restart the operation using continuation_token field of AzureError if the token is available. - """ - if not acl: - raise ValueError("The Access Control List must be set for this operation") - - progress_hook = kwargs.pop('progress_hook', None) - max_batches = kwargs.pop('max_batches', None) - options = self._set_access_control_recursive_options(mode='remove', acl=acl, **kwargs) - return await self._set_access_control_internal(options=options, progress_hook=progress_hook, - max_batches=max_batches) - - async def _set_access_control_internal(self, options, progress_hook, max_batches=None): - try: - continue_on_failure = options.get('force_flag') - total_directories_successful = 0 - total_files_success = 0 - total_failure_count = 0 - batch_count = 0 - last_continuation_token = None - current_continuation_token = None - continue_operation = True - while continue_operation: - headers, resp = await self._client.path.set_access_control_recursive(**options) - - # make a running tally so that we can report the final results - total_directories_successful += resp.directories_successful - total_files_success += resp.files_successful - total_failure_count += resp.failure_count - batch_count += 1 - current_continuation_token = headers['continuation'] - - if current_continuation_token is not None: - last_continuation_token = current_continuation_token - - if progress_hook is not None: - await progress_hook(AccessControlChanges( - batch_counters=AccessControlChangeCounters( - directories_successful=resp.directories_successful, - files_successful=resp.files_successful, - failure_count=resp.failure_count, - ), - aggregate_counters=AccessControlChangeCounters( - directories_successful=total_directories_successful, - files_successful=total_files_success, - failure_count=total_failure_count, - ), - batch_failures=[AccessControlChangeFailure( - name=failure.name, - is_directory=failure.type == 'DIRECTORY', - error_message=failure.error_message) for failure in resp.failed_entries], - continuation=last_continuation_token)) - - # update the continuation token, if there are more operations that cannot be completed in a single call - max_batches_satisfied = (max_batches is not None and batch_count == max_batches) - continue_operation = bool(current_continuation_token) and not max_batches_satisfied - options['continuation'] = current_continuation_token - - # currently the service stops on any failure, so we should send back the last continuation token - # for the user to retry the failed updates - # otherwise we should just return what the service gave us - return AccessControlChangeResult(counters=AccessControlChangeCounters( - directories_successful=total_directories_successful, - files_successful=total_files_success, - failure_count=total_failure_count), - continuation=last_continuation_token - if total_failure_count > 0 and not continue_on_failure else current_continuation_token) - except HttpResponseError as error: - error.continuation_token = last_continuation_token - process_storage_error(error) - except AzureError as error: - error.continuation_token = last_continuation_token - raise error - - async def _rename_path(self, rename_source, **kwargs): - # type: (str, **Any) -> Dict[str, Any] - """ - Rename directory or file - - :param rename_source: The value must have the following format: "/{filesystem}/{path}". - :type rename_source: str - :keyword ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set path properties. - :keyword source_lease: A lease ID for the source path. If specified, - the source path must have an active lease and the leaase ID must - match. - :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword lease: - Required if the file/directory has an active lease. Value can be a LeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._rename_path_options( - rename_source, - **kwargs) - try: - return await self._client.path.create(**options) - except HttpResponseError as error: - process_storage_error(error) - - async def _get_path_properties(self, **kwargs): - # type: (**Any) -> Union[FileProperties, DirectoryProperties] - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file or directory. It does not return the content of the directory or file. - - :keyword lease: - Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk: - Decrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - Required if the file/directory was created with a customer-provided key. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: DirectoryProperties or FileProperties - """ - path_properties = await self._blob_client.get_blob_properties(**kwargs) - return path_properties - - async def _exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a path exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: boolean - """ - return await self._blob_client.exists(**kwargs) - - async def set_metadata(self, metadata, # type: Dict[str, str] - **kwargs): - # type: (...) -> Dict[str, Union[str, datetime]] - """Sets one or more user-defined name-value pairs for the specified - file system. Each call to this operation replaces all existing metadata - attached to the file system. To remove all metadata from the file system, - call this operation with no metadata dict. - - :param metadata: - A dict containing name-value pairs to associate with the file system as - metadata. Example: {'category':'test'} - :type metadata: dict[str, str] - :keyword lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword ~azure.storage.filedatalake.CustomerProvidedEncryptionKey cpk: - Encrypts the data on the service-side with the given key. - Use of customer-provided keys must be done over HTTPS. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: file system-updated property dict (Etag and last modified). - """ - return await self._blob_client.set_blob_metadata(metadata=metadata, **kwargs) - - async def set_http_headers(self, content_settings=None, # type: Optional[ContentSettings] - **kwargs): - # type: (...) -> Dict[str, Any] - """Sets system properties on the file or directory. - - If one property is set for the content_settings, all properties will be overriden. - - :param ~azure.storage.filedatalake.ContentSettings content_settings: - ContentSettings object used to set file/directory properties. - :keyword lease: - If specified, set_file_system_metadata only succeeds if the - file system's lease is active and matches this ID. - :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: file/directory-updated property dict (Etag and last modified) - :rtype: Dict[str, Any] - """ - return await self._blob_client.set_http_headers(content_settings=content_settings, **kwargs) - - async def acquire_lease(self, lease_duration=-1, # type: Optional[int] - lease_id=None, # type: Optional[str] - **kwargs): - # type: (...) -> DataLakeLeaseClient - """ - Requests a new lease. If the file or directory does not have an active lease, - the DataLake service creates a lease on the file/directory and returns a new - lease ID. - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The DataLake service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. - :keyword ~datetime.datetime if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only - if the resource has been modified since the specified time. - :keyword ~datetime.datetime if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this header to perform the operation only if - the resource has not been modified since the specified date/time. - :keyword str etag: - An ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions match_condition: - The match condition to use upon the etag. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A DataLakeLeaseClient object, that can be run in a context manager. - :rtype: ~azure.storage.filedatalake.aio.DataLakeLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/test_file_system_samples.py - :start-after: [START acquire_lease_on_file_system] - :end-before: [END acquire_lease_on_file_system] - :language: python - :dedent: 8 - :caption: Acquiring a lease on the file_system. - """ - lease = DataLakeLeaseClient(self, lease_id=lease_id) # type: ignore - await lease.acquire(lease_duration=lease_duration, **kwargs) - return lease diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_upload_helper.py b/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_upload_helper.py deleted file mode 100644 index 00d5bf1..0000000 --- a/azure/multiapi/storagev2/filedatalake/v2021_06_08/aio/_upload_helper.py +++ /dev/null @@ -1,103 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use -from azure.core.exceptions import HttpResponseError -from .._deserialize import ( - process_storage_error) -from .._shared.response_handlers import return_response_headers -from .._shared.uploads_async import ( - upload_data_chunks, - DataLakeFileChunkUploader, upload_substream_blocks) - - -def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument - return any([ - modified_access_conditions.if_modified_since, - modified_access_conditions.if_unmodified_since, - modified_access_conditions.if_none_match, - modified_access_conditions.if_match - ]) - - -async def upload_datalake_file( # pylint: disable=unused-argument - client=None, - stream=None, - length=None, - overwrite=None, - validate_content=None, - max_concurrency=None, - file_settings=None, - **kwargs): - try: - if length == 0: - return {} - properties = kwargs.pop('properties', None) - umask = kwargs.pop('umask', None) - permissions = kwargs.pop('permissions', None) - path_http_headers = kwargs.pop('path_http_headers', None) - modified_access_conditions = kwargs.pop('modified_access_conditions', None) - chunk_size = kwargs.pop('chunk_size', 100 * 1024 * 1024) - - if not overwrite: - # if customers didn't specify access conditions, they cannot flush data to existing file - if not _any_conditions(modified_access_conditions): - modified_access_conditions.if_none_match = '*' - if properties or umask or permissions: - raise ValueError("metadata, umask and permissions can be set only when overwrite is enabled") - - if overwrite: - response = await client.create( - resource='file', - path_http_headers=path_http_headers, - properties=properties, - modified_access_conditions=modified_access_conditions, - umask=umask, - permissions=permissions, - cls=return_response_headers, - **kwargs) - - # this modified_access_conditions will be applied to flush_data to make sure - # no other flush between create and the current flush - modified_access_conditions.if_match = response['etag'] - modified_access_conditions.if_none_match = None - modified_access_conditions.if_modified_since = None - modified_access_conditions.if_unmodified_since = None - - use_original_upload_path = file_settings.use_byte_buffer or \ - validate_content or chunk_size < file_settings.min_large_chunk_upload_threshold or \ - hasattr(stream, 'seekable') and not stream.seekable() or \ - not hasattr(stream, 'seek') or not hasattr(stream, 'tell') - - if use_original_upload_path: - await upload_data_chunks( - service=client, - uploader_class=DataLakeFileChunkUploader, - total_size=length, - chunk_size=chunk_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - **kwargs) - else: - await upload_substream_blocks( - service=client, - uploader_class=DataLakeFileChunkUploader, - total_size=length, - chunk_size=chunk_size, - max_concurrency=max_concurrency, - stream=stream, - validate_content=validate_content, - **kwargs - ) - - return await client.flush_data(position=length, - path_http_headers=path_http_headers, - modified_access_conditions=modified_access_conditions, - close=True, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) diff --git a/azure/multiapi/storagev2/filedatalake/v2021_06_08/py.typed b/azure/multiapi/storagev2/filedatalake/v2021_06_08/py.typed deleted file mode 100644 index e69de29..0000000 diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/__init__.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/__init__.py deleted file mode 100644 index 706b8a8..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/__init__.py +++ /dev/null @@ -1,66 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ._version import VERSION -from ._file_client import ShareFileClient -from ._directory_client import ShareDirectoryClient -from ._share_client import ShareClient -from ._share_service_client import ShareServiceClient -from ._shared_access_signature import generate_account_sas, generate_share_sas, generate_file_sas -from ._shared.policies import ExponentialRetry, LinearRetry -from ._shared.models import ( - LocationMode, - ResourceTypes, - AccountSasPermissions, - StorageErrorCode) -from ._models import ( - ShareProperties, - DirectoryProperties, - Handle, - FileProperties, - Metrics, - RetentionPolicy, - CorsRule, - AccessPolicy, - FileSasPermissions, - ShareSasPermissions, - ContentSettings, - NTFSAttributes) -from ._generated.models import ( - HandleItem -) - -__version__ = VERSION - - -__all__ = [ - 'ShareFileClient', - 'ShareDirectoryClient', - 'ShareClient', - 'ShareServiceClient', - 'ExponentialRetry', - 'LinearRetry', - 'LocationMode', - 'ResourceTypes', - 'AccountSasPermissions', - 'StorageErrorCode', - 'Metrics', - 'RetentionPolicy', - 'CorsRule', - 'AccessPolicy', - 'FileSasPermissions', - 'ShareSasPermissions', - 'ShareProperties', - 'DirectoryProperties', - 'FileProperties', - 'ContentSettings', - 'Handle', - 'NTFSAttributes', - 'HandleItem', - 'generate_account_sas', - 'generate_share_sas', - 'generate_file_sas' -] diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_deserialize.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_deserialize.py deleted file mode 100644 index 5475e6d..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_deserialize.py +++ /dev/null @@ -1,64 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ._models import ShareProperties, DirectoryProperties, FileProperties -from ._shared.response_handlers import deserialize_metadata - - -def deserialize_share_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - share_properties = ShareProperties( - metadata=metadata, - **headers - ) - return share_properties - - -def deserialize_directory_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - directory_properties = DirectoryProperties( - metadata=metadata, - **headers - ) - return directory_properties - - -def deserialize_file_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - file_properties = FileProperties( - metadata=metadata, - **headers - ) - if 'Content-Range' in headers: - if 'x-ms-content-md5' in headers: - file_properties.content_settings.content_md5 = headers['x-ms-content-md5'] - else: - file_properties.content_settings.content_md5 = None - return file_properties - - -def deserialize_file_stream(response, obj, headers): - file_properties = deserialize_file_properties(response, obj, headers) - obj.properties = file_properties - return response.location_mode, obj - - -def deserialize_permission(response, obj, headers): # pylint: disable=unused-argument - ''' - Extracts out file permission - ''' - - return obj.permission - - -def deserialize_permission_key(response, obj, headers): # pylint: disable=unused-argument - ''' - Extracts out file permission key - ''' - - if response is None or headers is None: - return None - return headers.get('x-ms-file-permission-key', None) diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_directory_client.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_directory_client.py deleted file mode 100644 index d49f976..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_directory_client.py +++ /dev/null @@ -1,692 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -import time -from typing import ( # pylint: disable=unused-import - Optional, Union, Any, Dict, TYPE_CHECKING -) - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six -from azure.core.paging import ItemPaged -from azure.core.pipeline import Pipeline -from azure.core.tracing.decorator import distributed_trace - -from ._generated import AzureFileStorage -from ._generated.version import VERSION -from ._generated.models import StorageErrorException -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.request_handlers import add_metadata_headers -from ._shared.response_handlers import return_response_headers, process_storage_error -from ._shared.parser import _str -from ._parser import _get_file_permission, _datetime_to_str -from ._deserialize import deserialize_directory_properties -from ._file_client import ShareFileClient -from ._models import DirectoryPropertiesPaged, HandlesPaged, NTFSAttributes # pylint: disable=unused-import - -if TYPE_CHECKING: - from datetime import datetime - from ._models import ShareProperties, DirectoryProperties, ContentSettings - from ._generated.models import HandleItem - - -class ShareDirectoryClient(StorageAccountHostsMixin): - """A client to interact with a specific directory, although it may not yet exist. - - For operations relating to a specific subdirectory or file in this share, the clients for those - entities can also be retrieved using the :func:`get_subdirectory_client` and :func:`get_file_client` functions. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the directory, - use the :func:`from_directory_url` classmethod. - :param share_name: - The name of the share for the directory. - :type share_name: str - :param str directory_path: - The directory path for the directory with which to interact. - If specified, this value will override a directory value specified in the directory URL. - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - directory_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Optional[Any] - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not share_name: - raise ValueError("Please specify a share name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - if hasattr(credential, 'get_token'): - raise ValueError("Token credentials not supported by the File service.") - - path_snapshot, sas_token = parse_query(parsed_url.query) - if not sas_token and not credential: - raise ValueError( - 'You need to provide either an account shared key or SAS token when creating a storage service.') - try: - self.snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - self.snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - self.snapshot = snapshot or path_snapshot - - self.share_name = share_name - self.directory_path = directory_path - - self._query_str, credential = self._format_query_string( - sas_token, credential, share_snapshot=self.snapshot) - super(ShareDirectoryClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) - self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline) - - @classmethod - def from_directory_url(cls, directory_url, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Optional[Any] - ): - # type: (...) -> ShareDirectoryClient - """Create a ShareDirectoryClient from a directory url. - - :param str directory_url: - The full URI to the directory. - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :returns: A directory client. - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - """ - try: - if not directory_url.lower().startswith('http'): - directory_url = "https://" + directory_url - except AttributeError: - raise ValueError("Directory URL must be a string.") - parsed_url = urlparse(directory_url.rstrip('/')) - if not parsed_url.path and not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(directory_url)) - account_url = parsed_url.netloc.rstrip('/') + "?" + parsed_url.query - path_snapshot, _ = parse_query(parsed_url.query) - - share_name, _, path_dir = parsed_url.path.lstrip('/').partition('/') - share_name = unquote(share_name) - - directory_path = path_dir - snapshot = snapshot or path_snapshot - - return cls( - account_url=account_url, share_name=share_name, directory_path=directory_path, - credential=credential, **kwargs) - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - share_name = self.share_name - if isinstance(share_name, six.text_type): - share_name = share_name.encode('UTF-8') - directory_path = "" - if self.directory_path: - directory_path = "/" + quote(self.directory_path, safe='~') - return "{}://{}/{}{}{}".format( - self.scheme, - hostname, - quote(share_name), - directory_path, - self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - share_name, # type: str - directory_path, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareDirectoryClient - """Create ShareDirectoryClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param share_name: The name of the share. - :type share_name: str - :param str directory_path: - The directory path. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :returns: A directory client. - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, share_name=share_name, directory_path=directory_path, credential=credential, **kwargs) - - def get_file_client(self, file_name, **kwargs): - # type: (str, Any) -> ShareFileClient - """Get a client to interact with a specific file. - - The file need not already exist. - - :param file_name: - The name of the file. - :returns: A File Client. - :rtype: ~azure.storage.fileshare.ShareFileClient - """ - if self.directory_path: - file_name = self.directory_path.rstrip('/') + "/" + file_name - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareFileClient( - self.url, file_path=file_name, share_name=self.share_name, napshot=self.snapshot, - credential=self.credential, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, **kwargs) - - def get_subdirectory_client(self, directory_name, **kwargs): - # type: (str, Any) -> ShareDirectoryClient - """Get a client to interact with a specific subdirectory. - - The subdirectory need not already exist. - - :param str directory_name: - The name of the subdirectory. - :returns: A Directory Client. - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START get_subdirectory_client] - :end-before: [END get_subdirectory_client] - :language: python - :dedent: 12 - :caption: Gets the subdirectory client. - """ - directory_path = self.directory_path.rstrip('/') + "/" + directory_name - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareDirectoryClient( - self.url, share_name=self.share_name, directory_path=directory_path, snapshot=self.snapshot, - credential=self.credential, _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, - _location_mode=self._location_mode, **kwargs) - - @distributed_trace - def create_directory(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Creates a new directory under the directory referenced by the client. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the directory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Directory-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START create_directory] - :end-before: [END create_directory] - :language: python - :dedent: 12 - :caption: Creates a directory. - """ - timeout = kwargs.pop('timeout', None) - metadata = kwargs.pop('metadata', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return self._client.directory.create( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def delete_directory(self, **kwargs): - # type: (**Any) -> None - """Marks the directory for deletion. The directory is - later deleted during garbage collection. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START delete_directory] - :end-before: [END delete_directory] - :language: python - :dedent: 12 - :caption: Deletes a directory. - """ - timeout = kwargs.pop('timeout', None) - try: - self._client.directory.delete(timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_directories_and_files(self, name_starts_with=None, **kwargs): - # type: (Optional[str], **Any) -> ItemPaged - """Lists all the directories and files under the directory. - - :param str name_starts_with: - Filters the results to return only entities whose names - begin with the specified prefix. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties - :rtype: ~azure.core.paging.ItemPaged[DirectoryProperties and FileProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START lists_directory] - :end-before: [END lists_directory] - :language: python - :dedent: 12 - :caption: List directories and files. - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.directory.list_files_and_directories_segment, - sharesnapshot=self.snapshot, - timeout=timeout, - **kwargs) - return ItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=DirectoryPropertiesPaged) - - @distributed_trace - def list_handles(self, recursive=False, **kwargs): - # type: (bool, Any) -> ItemPaged - """Lists opened handles on a directory or a file under the directory. - - :param bool recursive: - Boolean that specifies if operation should apply to the directory specified by the client, - its files, its subdirectories and their files. Default value is False. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of HandleItem - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.HandleItem] - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.directory.list_handles, - sharesnapshot=self.snapshot, - timeout=timeout, - recursive=recursive, - **kwargs) - return ItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=HandlesPaged) - - @distributed_trace - def close_handle(self, handle, **kwargs): - # type: (Union[str, HandleItem], Any) -> Dict[str, int] - """Close an open file handle. - - :param handle: - A specific handle to close. - :type handle: str or ~azure.storage.fileshare.Handle - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - try: - handle_id = handle.id # type: ignore - except AttributeError: - handle_id = handle - if handle_id == '*': - raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") - try: - response = self._client.directory.force_close_handles( - handle_id, - marker=None, - recursive=None, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - return { - 'closed_handles_count': response.get('number_of_handles_closed', 0), - } - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def close_all_handles(self, recursive=False, **kwargs): - # type: (bool, Any) -> Dict[str, int] - """Close any open file handles. - - This operation will block until the service has closed all open handles. - - :param bool recursive: - Boolean that specifies if operation should apply to the directory specified by the client, - its files, its subdirectories and their files. Default value is False. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - timeout = kwargs.pop('timeout', None) - start_time = time.time() - - try_close = True - continuation_token = None - total_closed = 0 - while try_close: - try: - response = self._client.directory.force_close_handles( - handle_id='*', - timeout=timeout, - marker=continuation_token, - recursive=recursive, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - continuation_token = response.get('marker') - try_close = bool(continuation_token) - total_closed += response.get('number_of_handles_closed', 0) - if timeout: - timeout = max(0, timeout - (time.time() - start_time)) - return { - 'closed_handles_count': total_closed, - } - - @distributed_trace - def get_directory_properties(self, **kwargs): - # type: (Any) -> DirectoryProperties - """Returns all user-defined metadata and system properties for the - specified directory. The data returned does not include the directory's - list of files. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: DirectoryProperties - :rtype: ~azure.storage.fileshare.DirectoryProperties - """ - timeout = kwargs.pop('timeout', None) - try: - response = self._client.directory.get_properties( - timeout=timeout, - cls=deserialize_directory_properties, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return response # type: ignore - - @distributed_trace - def set_directory_metadata(self, metadata, **kwargs): - # type: (Dict[str, Any], Any) -> Dict[str, Any] - """Sets the metadata for the directory. - - Each call to this operation replaces all existing metadata - attached to the directory. To remove all metadata from the directory, - call this operation with an empty metadata dict. - - :param metadata: - Name-value pairs associated with the directory as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Directory-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - try: - return self._client.directory.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def set_http_headers(self, file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="preserve", # type: Union[str, datetime] - file_last_write_time="preserve", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Sets HTTP headers on the directory. - - :param file_attributes: - The file system attributes for files and directories. - If not set, indicates preservation of existing values. - Here is an example for when the var type is str: 'Temporary|Archive' - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Preserve. - :type file_creation_time: str or datetime - :param file_last_write_time: Last write time for the file - Default value: Preserve. - :type file_last_write_time: str or datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - file_permission = _get_file_permission(file_permission, permission_key, 'preserve') - try: - return self._client.directory.set_properties( # type: ignore - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - timeout=timeout, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def create_subdirectory( - self, directory_name, # type: str - **kwargs): - # type: (...) -> ShareDirectoryClient - """Creates a new subdirectory and returns a client to interact - with the subdirectory. - - :param str directory_name: - The name of the subdirectory. - :keyword dict(str,str) metadata: - Name-value pairs associated with the subdirectory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: ShareDirectoryClient - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START create_subdirectory] - :end-before: [END create_subdirectory] - :language: python - :dedent: 12 - :caption: Create a subdirectory. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - subdir = self.get_subdirectory_client(directory_name) - subdir.create_directory(metadata=metadata, timeout=timeout, **kwargs) - return subdir # type: ignore - - @distributed_trace - def delete_subdirectory( - self, directory_name, # type: str - **kwargs - ): - # type: (...) -> None - """Deletes a subdirectory. - - :param str directory_name: - The name of the subdirectory. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START delete_subdirectory] - :end-before: [END delete_subdirectory] - :language: python - :dedent: 12 - :caption: Delete a subdirectory. - """ - timeout = kwargs.pop('timeout', None) - subdir = self.get_subdirectory_client(directory_name) - subdir.delete_directory(timeout=timeout, **kwargs) - - @distributed_trace - def upload_file( - self, file_name, # type: str - data, # type: Any - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> ShareFileClient - """Creates a new file in the directory and returns a ShareFileClient - to interact with the file. - - :param str file_name: - The name of the file. - :param Any data: - Content of the file. - :param int length: - Length of the file in bytes. Specify its maximum size, up to 1 TiB. - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: ShareFileClient - :rtype: ~azure.storage.fileshare.ShareFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START upload_file_to_directory] - :end-before: [END upload_file_to_directory] - :language: python - :dedent: 12 - :caption: Upload a file to a directory. - """ - file_client = self.get_file_client(file_name) - file_client.upload_file( - data, - length=length, - **kwargs) - return file_client # type: ignore - - @distributed_trace - def delete_file( - self, file_name, # type: str - **kwargs # type: Optional[Any] - ): - # type: (...) -> None - """Marks the specified file for deletion. The file is later - deleted during garbage collection. - - :param str file_name: - The name of the file to delete. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START delete_file_in_directory] - :end-before: [END delete_file_in_directory] - :language: python - :dedent: 12 - :caption: Delete a file in a directory. - """ - file_client = self.get_file_client(file_name) - file_client.delete_file(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_download.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_download.py deleted file mode 100644 index 8a86027..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_download.py +++ /dev/null @@ -1,522 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -import threading -import warnings -from io import BytesIO - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.common import with_current_context -from ._shared.encryption import decrypt_blob -from ._shared.request_handlers import validate_and_format_range_headers -from ._shared.response_handlers import process_storage_error, parse_length_from_content_range - - -def process_range_and_offset(start_range, end_range, length, encryption): - start_offset, end_offset = 0, 0 - if encryption.get("key") is not None or encryption.get("resolver") is not None: - if start_range is not None: - # Align the start of the range along a 16 byte block - start_offset = start_range % 16 - start_range -= start_offset - - # Include an extra 16 bytes for the IV if necessary - # Because of the previous offsetting, start_range will always - # be a multiple of 16. - if start_range > 0: - start_offset += 16 - start_range -= 16 - - if length is not None: - # Align the end of the range along a 16 byte block - end_offset = 15 - (end_range % 16) - end_range += end_offset - - return (start_range, end_range), (start_offset, end_offset) - - -def process_content(data, start_offset, end_offset, encryption): - if data is None: - raise ValueError("Response cannot be None.") - try: - content = b"".join(list(data)) - except Exception as error: - raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) - if content and encryption.get("key") is not None or encryption.get("resolver") is not None: - try: - return decrypt_blob( - encryption.get("required"), - encryption.get("key"), - encryption.get("resolver"), - content, - start_offset, - end_offset, - data.response.headers, - ) - except Exception as error: - raise HttpResponseError(message="Decryption failed.", response=data.response, error=error) - return content - - -class _ChunkDownloader(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - client=None, - total_size=None, - chunk_size=None, - current_progress=None, - start_range=None, - end_range=None, - stream=None, - parallel=None, - validate_content=None, - encryption_options=None, - **kwargs - ): - self.client = client - - # Information on the download range/chunk size - self.chunk_size = chunk_size - self.total_size = total_size - self.start_index = start_range - self.end_index = end_range - - # The destination that we will write to - self.stream = stream - self.stream_lock = threading.Lock() if parallel else None - self.progress_lock = threading.Lock() if parallel else None - - # For a parallel download, the stream is always seekable, so we note down the current position - # in order to seek to the right place when out-of-order chunks come in - self.stream_start = stream.tell() if parallel else None - - # Download progress so far - self.progress_total = current_progress - - # Encryption - self.encryption_options = encryption_options - - # Parameters for each get operation - self.validate_content = validate_content - self.request_options = kwargs - - def _calculate_range(self, chunk_start): - if chunk_start + self.chunk_size > self.end_index: - chunk_end = self.end_index - else: - chunk_end = chunk_start + self.chunk_size - return chunk_start, chunk_end - - def get_chunk_offsets(self): - index = self.start_index - while index < self.end_index: - yield index - index += self.chunk_size - - def process_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - chunk_data = self._download_chunk(chunk_start, chunk_end - 1) - length = chunk_end - chunk_start - if length > 0: - self._write_to_stream(chunk_data, chunk_start) - self._update_progress(length) - - def yield_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - return self._download_chunk(chunk_start, chunk_end - 1) - - def _update_progress(self, length): - if self.progress_lock: - with self.progress_lock: # pylint: disable=not-context-manager - self.progress_total += length - else: - self.progress_total += length - - def _write_to_stream(self, chunk_data, chunk_start): - if self.stream_lock: - with self.stream_lock: # pylint: disable=not-context-manager - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - else: - self.stream.write(chunk_data) - - def _download_chunk(self, chunk_start, chunk_end): - download_range, offset = process_range_and_offset( - chunk_start, chunk_end, chunk_end, self.encryption_options - ) - range_header, range_validation = validate_and_format_range_headers( - download_range[0], download_range[1], check_content_md5=self.validate_content - ) - - try: - _, response = self.client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self.validate_content, - data_stream_total=self.total_size, - download_stream_current=self.progress_total, - **self.request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - chunk_data = process_content(response, offset[0], offset[1], self.encryption_options) - return chunk_data - - -class _ChunkIterator(object): - """Async iterator for chunks in blob download stream.""" - - def __init__(self, size, content, downloader): - self.size = size - self._current_content = content - self._iter_downloader = downloader - self._iter_chunks = None - self._complete = (size == 0) - - def __len__(self): - return self.size - - def __iter__(self): - return self - - def __next__(self): - """Iterate through responses.""" - if self._complete: - raise StopIteration("Download complete") - if not self._iter_downloader: - # If no iterator was supplied, the download completed with - # the initial GET, so we just return that data - self._complete = True - return self._current_content - - if not self._iter_chunks: - self._iter_chunks = self._iter_downloader.get_chunk_offsets() - else: - chunk = next(self._iter_chunks) - self._current_content = self._iter_downloader.yield_chunk(chunk) - - return self._current_content - - next = __next__ # Python 2 compatibility. - - -class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the file being downloaded. - :ivar: str path: - The full path of the file. - :ivar str share: - The name of the share where the file is. - :ivar ~azure.storage.fileshare.FileProperties properties: - The properties of the file being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the file. - """ - - def __init__( - self, - client=None, - config=None, - start_range=None, - end_range=None, - validate_content=None, - encryption_options=None, - max_concurrency=1, - name=None, - path=None, - share=None, - encoding=None, - **kwargs - ): - self.name = name - self.path = path - self.share = share - self.properties = None - self.size = None - - self._client = client - self._config = config - self._start_range = start_range - self._end_range = end_range - self._max_concurrency = max_concurrency - self._encoding = encoding - self._validate_content = validate_content - self._encryption_options = encryption_options or {} - self._request_options = kwargs - self._location_mode = None - self._download_complete = False - self._current_content = None - self._file_size = None - self._response = None - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - self._first_get_size = ( - self._config.max_single_get_size if not self._validate_content else self._config.max_chunk_get_size - ) - initial_request_start = self._start_range if self._start_range is not None else 0 - if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: - initial_request_end = self._end_range - else: - initial_request_end = initial_request_start + self._first_get_size - 1 - - self._initial_range, self._initial_offset = process_range_and_offset( - initial_request_start, initial_request_end, self._end_range, self._encryption_options - ) - - self._response = self._initial_request() - self.properties = self._response.properties - self.properties.name = self.name - self.properties.path = self.path - self.properties.share = self.share - - # Set the content length to the download size instead of the size of - # the last range - self.properties.size = self.size - - # Overwrite the content range to the user requested range - self.properties.content_range = "bytes {0}-{1}/{2}".format( - self._start_range, - self._end_range, - self._file_size - ) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - self.properties.content_md5 = None - - if self.size == 0: - self._current_content = b"" - else: - self._current_content = process_content( - self._response, - self._initial_offset[0], - self._initial_offset[1], - self._encryption_options - ) - - def __len__(self): - return self.size - - def _initial_request(self): - range_header, range_validation = validate_and_format_range_headers( - self._initial_range[0], - self._initial_range[1], - start_range_required=False, - end_range_required=False, - check_content_md5=self._validate_content - ) - - try: - location_mode, response = self._client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self._validate_content, - data_stream_total=None, - download_stream_current=0, - **self._request_options - ) - - # Check the location we read from to ensure we use the same one - # for subsequent requests. - self._location_mode = location_mode - - # Parse the total file size and adjust the download size if ranges - # were specified - self._file_size = parse_length_from_content_range(response.properties.content_range) - if self._end_range is not None: - # Use the end range index unless it is over the end of the file - self.size = min(self._file_size, self._end_range - self._start_range + 1) - elif self._start_range is not None: - self.size = self._file_size - self._start_range - else: - self.size = self._file_size - - except HttpResponseError as error: - if self._start_range is None and error.response.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - try: - _, response = self._client.download( - validate_content=self._validate_content, - data_stream_total=0, - download_stream_current=0, - **self._request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - # Set the download size to empty - self.size = 0 - self._file_size = 0 - else: - process_storage_error(error) - - # If the file is small, the download is complete at this point. - # If file size is large, download the rest of the file in chunks. - if response.properties.size == self.size: - self._download_complete = True - return response - - def chunks(self): - if self.size == 0 or self._download_complete: - iter_downloader = None - else: - data_end = self._file_size - if self._end_range is not None: - # Use the end range index unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - iter_downloader = _ChunkDownloader( - client=self._client, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # start where the first download ended - end_range=data_end, - stream=None, - parallel=False, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options - ) - return _ChunkIterator( - size=self.size, - content=self._current_content, - downloader=iter_downloader) - - def readall(self): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - stream = BytesIO() - self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - def content_as_bytes(self, max_concurrency=1): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :rtype: bytes - """ - warnings.warn( - "content_as_bytes is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - return self.readall() - - def content_as_text(self, max_concurrency=1, encoding="UTF-8"): - """Download the contents of this file, and decode as text. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :param str encoding: - Test encoding to decode the downloaded bytes. Default is UTF-8. - :rtype: str - """ - warnings.warn( - "content_as_text is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self._encoding = encoding - return self.readall() - - def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - # The stream must be seekable if parallel download is required - parallel = self._max_concurrency > 1 - if parallel: - error_message = "Target stream handle must be seekable." - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(error_message) - - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(error_message) - - # Write the content to the user stream - stream.write(self._current_content) - if self._download_complete: - return self.size - - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - - downloader = _ChunkDownloader( - client=self._client, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # Start where the first download ended - end_range=data_end, - stream=stream, - parallel=parallel, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options - ) - if parallel: - import concurrent.futures - executor = concurrent.futures.ThreadPoolExecutor(self._max_concurrency) - list(executor.map( - with_current_context(downloader.process_chunk), - downloader.get_chunk_offsets() - )) - else: - for chunk in downloader.get_chunk_offsets(): - downloader.process_chunk(chunk) - return self.size - - def download_to_stream(self, stream, max_concurrency=1): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The properties of the downloaded file. - :rtype: Any - """ - warnings.warn( - "download_to_stream is deprecated, use readinto instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self.readinto(stream) - return self.properties diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_file_client.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_file_client.py deleted file mode 100644 index 615ec2e..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_file_client.py +++ /dev/null @@ -1,1078 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines -import functools -import time -from io import BytesIO -from typing import ( # pylint: disable=unused-import - Optional, Union, IO, List, Dict, Any, Iterable, - TYPE_CHECKING -) - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six -from azure.core.paging import ItemPaged -from azure.core.tracing.decorator import distributed_trace - -from ._generated import AzureFileStorage -from ._generated.version import VERSION -from ._generated.models import StorageErrorException, FileHTTPHeaders -from ._shared.uploads import IterStreamer, FileChunkUploader, upload_data_chunks -from ._shared.base_client import StorageAccountHostsMixin, parse_connection_str, parse_query -from ._shared.request_handlers import add_metadata_headers, get_length -from ._shared.response_handlers import return_response_headers, process_storage_error -from ._shared.parser import _str -from ._parser import _get_file_permission, _datetime_to_str -from ._deserialize import deserialize_file_properties, deserialize_file_stream -from ._models import HandlesPaged, NTFSAttributes # pylint: disable=unused-import -from ._download import StorageStreamDownloader - -if TYPE_CHECKING: - from datetime import datetime - from ._models import ShareProperties, ContentSettings, FileProperties, Handle - from ._generated.models import HandleItem - - -def _upload_file_helper( - client, - stream, - size, - metadata, - content_settings, - validate_content, - timeout, - max_concurrency, - file_settings, - file_attributes="none", - file_creation_time="now", - file_last_write_time="now", - file_permission=None, - file_permission_key=None, - **kwargs): - try: - if size is None or size < 0: - raise ValueError("A content size must be specified for a File.") - response = client.create_file( - size, - content_settings=content_settings, - metadata=metadata, - timeout=timeout, - file_attributes=file_attributes, - file_creation_time=file_creation_time, - file_last_write_time=file_last_write_time, - file_permission=file_permission, - permission_key=file_permission_key, - **kwargs - ) - if size == 0: - return response - - responses = upload_data_chunks( - service=client, - uploader_class=FileChunkUploader, - total_size=size, - chunk_size=file_settings.max_range_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - timeout=timeout, - **kwargs - ) - return sorted(responses, key=lambda r: r.get('last_modified'))[-1] - except StorageErrorException as error: - process_storage_error(error) - - -class ShareFileClient(StorageAccountHostsMixin): - """A client to interact with a specific file, although that file may not yet exist. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the - file, use the :func:`from_file_url` classmethod. - :param share_name: - The name of the share for the file. - :type share_name: str - :param str file_path: - The file path to the file with which to interact. If specified, this value will override - a file value specified in the file URL. - :param str snapshot: - An optional file snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - file_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not (share_name and file_path): - raise ValueError("Please specify a share name and file name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - if hasattr(credential, 'get_token'): - raise ValueError("Token credentials not supported by the File service.") - - path_snapshot = None - path_snapshot, sas_token = parse_query(parsed_url.query) - if not sas_token and not credential: - raise ValueError( - 'You need to provide either an account shared key or SAS token when creating a storage service.') - try: - self.snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - self.snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - self.snapshot = snapshot or path_snapshot - - self.share_name = share_name - self.file_path = file_path.split('/') - self.file_name = self.file_path[-1] - self.directory_path = "/".join(self.file_path[:-1]) - - self._query_str, credential = self._format_query_string( - sas_token, credential, share_snapshot=self.snapshot) - super(ShareFileClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) - self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline) - - @classmethod - def from_file_url( - cls, file_url, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareFileClient - """A client to interact with a specific file, although that file may not yet exist. - - :param str file_url: The full URI to the file. - :param str snapshot: - An optional file snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :returns: A File client. - :rtype: ~azure.storage.fileshare.ShareFileClient - """ - try: - if not file_url.lower().startswith('http'): - file_url = "https://" + file_url - except AttributeError: - raise ValueError("File URL must be a string.") - parsed_url = urlparse(file_url.rstrip('/')) - - if not (parsed_url.netloc and parsed_url.path): - raise ValueError("Invalid URL: {}".format(file_url)) - account_url = parsed_url.netloc.rstrip('/') + "?" + parsed_url.query - - path_share, _, path_file = parsed_url.path.lstrip('/').partition('/') - path_snapshot, _ = parse_query(parsed_url.query) - snapshot = snapshot or path_snapshot - share_name = unquote(path_share) - file_path = '/'.join([unquote(p) for p in path_file.split('/')]) - return cls(account_url, share_name, file_path, snapshot, credential, **kwargs) - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - share_name = self.share_name - if isinstance(share_name, six.text_type): - share_name = share_name.encode('UTF-8') - return "{}://{}/{}/{}{}".format( - self.scheme, - hostname, - quote(share_name), - "/".join([quote(p, safe='~') for p in self.file_path]), - self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - share_name, # type: str - file_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareFileClient - """Create ShareFileClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param share_name: The name of the share. - :type share_name: str - :param str file_path: - The file path. - :param str snapshot: - An optional file snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :returns: A File client. - :rtype: ~azure.storage.fileshare.ShareFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_hello_world.py - :start-after: [START create_file_client] - :end-before: [END create_file_client] - :language: python - :dedent: 12 - :caption: Creates the file client with connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, share_name=share_name, file_path=file_path, snapshot=snapshot, credential=credential, **kwargs) - - @distributed_trace - def create_file( # type: ignore - self, size, # type: int - file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="now", # type: Union[str, datetime] - file_last_write_time="now", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Creates a new file. - - Note that it only initializes the file with no content. - - :param int size: Specifies the maximum size for the file, - up to 1 TB. - :param file_attributes: - The file system attributes for files and directories. - If not set, the default value would be "None" and the attributes will be set to "Archive". - Here is an example for when the var type is str: 'Temporary|Archive'. - file_attributes value is not case sensitive. - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Now. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Now. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START create_file] - :end-before: [END create_file] - :language: python - :dedent: 12 - :caption: Create a file. - """ - content_settings = kwargs.pop('content_settings', None) - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - if self.require_encryption and not self.key_encryption_key: - raise ValueError("Encryption required but no key was provided.") - - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - file_http_headers = None - if content_settings: - file_http_headers = FileHTTPHeaders( - file_cache_control=content_settings.cache_control, - file_content_type=content_settings.content_type, - file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - file_content_encoding=content_settings.content_encoding, - file_content_language=content_settings.content_language, - file_content_disposition=content_settings.content_disposition - ) - file_permission = _get_file_permission(file_permission, permission_key, 'Inherit') - try: - return self._client.file.create( # type: ignore - file_content_length=size, - metadata=metadata, - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - file_http_headers=file_http_headers, - headers=headers, - timeout=timeout, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def upload_file( - self, data, # type: Any - length=None, # type: Optional[int] - file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="now", # type: Union[str, datetime] - file_last_write_time="now", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Uploads a new file. - - :param Any data: - Content of the file. - :param int length: - Length of the file in bytes. Specify its maximum size, up to 1 TiB. - :param file_attributes: - The file system attributes for files and directories. - If not set, the default value would be "None" and the attributes will be set to "Archive". - Here is an example for when the var type is str: 'Temporary|Archive'. - file_attributes value is not case sensitive. - :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes - :param file_creation_time: Creation time for the file - Default value: Now. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Now. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START upload_file] - :end-before: [END upload_file] - :language: python - :dedent: 12 - :caption: Upload a file. - """ - metadata = kwargs.pop('metadata', None) - content_settings = kwargs.pop('content_settings', None) - max_concurrency = kwargs.pop('max_concurrency', 1) - validate_content = kwargs.pop('validate_content', False) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - - if isinstance(data, six.text_type): - data = data.encode(encoding) - if length is None: - length = get_length(data) - if isinstance(data, bytes): - data = data[:length] - - if isinstance(data, bytes): - stream = BytesIO(data) - elif hasattr(data, 'read'): - stream = data - elif hasattr(data, '__iter__'): - stream = IterStreamer(data, encoding=encoding) # type: ignore - else: - raise TypeError("Unsupported data type: {}".format(type(data))) - return _upload_file_helper( # type: ignore - self, - stream, - length, - metadata, - content_settings, - validate_content, - timeout, - max_concurrency, - self._config, - file_attributes=file_attributes, - file_creation_time=file_creation_time, - file_last_write_time=file_last_write_time, - file_permission=file_permission, - file_permission_key=permission_key, - **kwargs) - - @distributed_trace - def start_copy_from_url( - self, source_url, # type: str - **kwargs # type: Any - ): - # type: (...) -> Any - """Initiates the copying of data from a source URL into the file - referenced by the client. - - The status of this copy operation can be found using the `get_properties` - method. - - :param str source_url: - Specifies the URL of the source file. - :keyword metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START copy_file_from_url] - :end-before: [END copy_file_from_url] - :language: python - :dedent: 12 - :caption: Copy a file from a URL - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - - try: - return self._client.file.start_copy( - source_url, - timeout=timeout, - metadata=metadata, - headers=headers, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - def abort_copy(self, copy_id, **kwargs): - # type: (Union[str, FileProperties], Any) -> None - """Abort an ongoing copy operation. - - This will leave a destination file with zero length and full metadata. - This will raise an error if the copy operation has already ended. - - :param copy_id: - The copy operation to abort. This can be either an ID, or an - instance of FileProperties. - :type copy_id: str or ~azure.storage.fileshare.FileProperties - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - timeout = kwargs.pop('timeout', None) - try: - copy_id = copy_id.copy.id - except AttributeError: - try: - copy_id = copy_id['copy_id'] - except TypeError: - pass - try: - self._client.file.abort_copy(copy_id=copy_id, timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def download_file( - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Iterable[bytes] - """Downloads a file to a stream with automatic chunking. - - :param int offset: - Start of byte range to use for downloading a section of the file. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A iterable data generator (stream) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START download_file] - :end-before: [END download_file] - :language: python - :dedent: 12 - :caption: Download a file. - """ - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - if length is not None and offset is None: - raise ValueError("Offset value must not be None if length is set.") - - range_end = None - if length is not None: - range_end = offset + length - 1 # Service actually uses an end-range inclusive index - return StorageStreamDownloader( - client=self._client.file, - config=self._config, - start_range=offset, - end_range=range_end, - encryption_options=None, - name=self.file_name, - path='/'.join(self.file_path), - share=self.share_name, - cls=deserialize_file_stream, - **kwargs) - - @distributed_trace - def delete_file(self, **kwargs): - # type: (Any) -> None - """Marks the specified file for deletion. The file is - later deleted during garbage collection. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START delete_file] - :end-before: [END delete_file] - :language: python - :dedent: 12 - :caption: Delete a file. - """ - timeout = kwargs.pop('timeout', None) - try: - self._client.file.delete(timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_file_properties(self, **kwargs): - # type: (Any) -> FileProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: FileProperties - :rtype: ~azure.storage.fileshare.FileProperties - """ - timeout = kwargs.pop('timeout', None) - try: - file_props = self._client.file.get_properties( - sharesnapshot=self.snapshot, - timeout=timeout, - cls=deserialize_file_properties, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - file_props.name = self.file_name - file_props.share = self.share_name - file_props.snapshot = self.snapshot - file_props.path = '/'.join(self.file_path) - return file_props # type: ignore - - @distributed_trace - def set_http_headers(self, content_settings, # type: ContentSettings - file_attributes="preserve", # type: Union[str, NTFSAttributes] - file_creation_time="preserve", # type: Union[str, datetime] - file_last_write_time="preserve", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Sets HTTP headers on the file. - - :param ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param file_attributes: - The file system attributes for files and directories. - If not set, indicates preservation of existing values. - Here is an example for when the var type is str: 'Temporary|Archive' - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Preserve. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Preserve. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - file_content_length = kwargs.pop('size', None) - file_http_headers = FileHTTPHeaders( - file_cache_control=content_settings.cache_control, - file_content_type=content_settings.content_type, - file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - file_content_encoding=content_settings.content_encoding, - file_content_language=content_settings.content_language, - file_content_disposition=content_settings.content_disposition - ) - file_permission = _get_file_permission(file_permission, permission_key, 'preserve') - try: - return self._client.file.set_http_headers( # type: ignore - file_content_length=file_content_length, - file_http_headers=file_http_headers, - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - timeout=timeout, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def set_file_metadata(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, Any]], Any) -> Dict[str, Any] - """Sets user-defined metadata for the specified file as one or more - name-value pairs. - - Each call to this operation replaces all existing metadata - attached to the file. To remove all metadata from the file, - call this operation with no metadata dict. - - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return self._client.file.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - metadata=metadata, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def upload_range( # type: ignore - self, data, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Upload a range of bytes to a file. - - :param bytes data: - The data to upload. - :param int offset: - Start of byte range to use for uploading a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for uploading a section of the file. - The range can be up to 4 MB in size. - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - file. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - validate_content = kwargs.pop('validate_content', False) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - if isinstance(data, six.text_type): - data = data.encode(encoding) - - end_range = offset + length - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - try: - return self._client.file.upload_range( # type: ignore - range=content_range, - content_length=length, - optionalbody=data, - timeout=timeout, - validate_content=validate_content, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @staticmethod - def _upload_range_from_url_options(source_url, # type: str - offset, # type: int - length, # type: int - source_offset, # type: int - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - - if offset is None: - raise ValueError("offset must be provided.") - if length is None: - raise ValueError("length must be provided.") - if source_offset is None: - raise ValueError("source_offset must be provided.") - - # Format range - end_range = offset + length - 1 - destination_range = 'bytes={0}-{1}'.format(offset, end_range) - source_range = 'bytes={0}-{1}'.format(source_offset, source_offset + length - 1) - - options = { - 'copy_source': source_url, - 'content_length': 0, - 'source_range': source_range, - 'range': destination_range, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def upload_range_from_url(self, source_url, - offset, - length, - source_offset, - **kwargs - ): - # type: (str, int, int, int, **Any) -> Dict[str, Any] - """ - Writes the bytes from one Azure File endpoint into the specified range of another Azure File endpoint. - - :param int offset: - Start of byte range to use for updating a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for updating a section of the file. - The range can be up to 4 MB in size. - :param str source_url: - A URL of up to 2 KB in length that specifies an Azure file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.file.core.windows.net/myshare/mydir/myfile - https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - The service will read the same number of bytes as the destination range (length-offset). - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._upload_range_from_url_options( - source_url=source_url, - offset=offset, - length=length, - source_offset=source_offset, - **kwargs - ) - try: - return self._client.file.upload_range_from_url(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_ranges( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> List[Dict[str, int]] - """Returns the list of valid ranges of a file. - - :param int offset: - Specifies the start offset of bytes over which to get ranges. - :param int length: - Number of bytes to use over which to get ranges. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A list of valid ranges. - :rtype: List[dict[str, int]] - """ - timeout = kwargs.pop('timeout', None) - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Unsupported method for encryption.") - - content_range = None - if offset is not None: - if length is not None: - end_range = offset + length - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - else: - content_range = 'bytes={0}-'.format(offset) - try: - ranges = self._client.file.get_range_list( - sharesnapshot=self.snapshot, - timeout=timeout, - range=content_range, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return [{'start': b.start, 'end': b.end} for b in ranges] - - @distributed_trace - def clear_range( # type: ignore - self, offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Clears the specified range and releases the space used in storage for - that range. - - :param int offset: - Start of byte range to use for clearing a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for clearing a section of the file. - The range can be up to 4 MB in size. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - timeout = kwargs.pop('timeout', None) - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Unsupported method for encryption.") - - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 bytes file size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 bytes file size") - end_range = length + offset - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - try: - return self._client.file.upload_range( # type: ignore - timeout=timeout, - cls=return_response_headers, - content_length=0, - file_range_write="clear", - range=content_range, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def resize_file(self, size, **kwargs): - # type: (int, Any) -> Dict[str, Any] - """Resizes a file to the specified size. - - :param int size: - Size to resize file to (in bytes) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - timeout = kwargs.pop('timeout', None) - try: - return self._client.file.set_http_headers( # type: ignore - file_content_length=size, - file_attributes="preserve", - file_creation_time="preserve", - file_last_write_time="preserve", - file_permission="preserve", - cls=return_response_headers, - timeout=timeout, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_handles(self, **kwargs): - # type: (Any) -> ItemPaged[Handle] - """Lists handles for file. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of HandleItem - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.HandleItem] - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.file.list_handles, - sharesnapshot=self.snapshot, - timeout=timeout, - **kwargs) - return ItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=HandlesPaged) - - @distributed_trace - def close_handle(self, handle, **kwargs): - # type: (Union[str, HandleItem], Any) -> Dict[str, int] - """Close an open file handle. - - :param handle: - A specific handle to close. - :type handle: str or ~azure.storage.fileshare.Handle - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - try: - handle_id = handle.id # type: ignore - except AttributeError: - handle_id = handle - if handle_id == '*': - raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") - try: - response = self._client.file.force_close_handles( - handle_id, - marker=None, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - return { - 'closed_handles_count': response.get('number_of_handles_closed', 0), - } - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def close_all_handles(self, **kwargs): - # type: (Any) -> Dict[str, int] - """Close any open file handles. - - This operation will block until the service has closed all open handles. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - timeout = kwargs.pop('timeout', None) - start_time = time.time() - - try_close = True - continuation_token = None - total_closed = 0 - while try_close: - try: - response = self._client.file.force_close_handles( - handle_id='*', - timeout=timeout, - marker=continuation_token, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - continuation_token = response.get('marker') - try_close = bool(continuation_token) - total_closed += response.get('number_of_handles_closed', 0) - if timeout: - timeout = max(0, timeout - (time.time() - start_time)) - return { - 'closed_handles_count': total_closed, - } diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/__init__.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/__init__.py deleted file mode 100644 index 22b5762..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from ._azure_file_storage import AzureFileStorage -__all__ = ['AzureFileStorage'] - -from .version import VERSION - -__version__ = VERSION - diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/_azure_file_storage.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/_azure_file_storage.py deleted file mode 100644 index 1c7c8d7..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/_azure_file_storage.py +++ /dev/null @@ -1,71 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core import PipelineClient -from msrest import Serializer, Deserializer - -from ._configuration import AzureFileStorageConfiguration -from azure.core.exceptions import map_error -from .operations import ServiceOperations -from .operations import ShareOperations -from .operations import DirectoryOperations -from .operations import FileOperations -from . import models - - -class AzureFileStorage(object): - """AzureFileStorage - - - :ivar service: Service operations - :vartype service: azure.storage.fileshare.operations.ServiceOperations - :ivar share: Share operations - :vartype share: azure.storage.fileshare.operations.ShareOperations - :ivar directory: Directory operations - :vartype directory: azure.storage.fileshare.operations.DirectoryOperations - :ivar file: File operations - :vartype file: azure.storage.fileshare.operations.FileOperations - - :param version: Specifies the version of the operation to use for this - request. - :type version: str - :param url: The URL of the service account, share, directory or file that - is the target of the desired operation. - :type url: str - """ - - def __init__(self, version, url, **kwargs): - - base_url = '{url}' - self._config = AzureFileStorageConfiguration(version, url, **kwargs) - self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '2019-02-02' - self._serialize = Serializer(client_models) - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.share = ShareOperations( - self._client, self._config, self._serialize, self._deserialize) - self.directory = DirectoryOperations( - self._client, self._config, self._serialize, self._deserialize) - self.file = FileOperations( - self._client, self._config, self._serialize, self._deserialize) - - def close(self): - self._client.close() - def __enter__(self): - self._client.__enter__() - return self - def __exit__(self, *exc_details): - self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/_configuration.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/_configuration.py deleted file mode 100644 index 4f5501d..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/_configuration.py +++ /dev/null @@ -1,54 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -from .version import VERSION - - -class AzureFileStorageConfiguration(Configuration): - """Configuration for AzureFileStorage - Note that all parameters used to create this instance are saved as instance - attributes. - - :param version: Specifies the version of the operation to use for this - request. - :type version: str - :param url: The URL of the service account, share, directory or file that - is the target of the desired operation. - :type url: str - """ - - def __init__(self, version, url, **kwargs): - - if version is None: - raise ValueError("Parameter 'version' must not be None.") - if url is None: - raise ValueError("Parameter 'url' must not be None.") - - super(AzureFileStorageConfiguration, self).__init__(**kwargs) - self._configure(**kwargs) - - self.user_agent_policy.add_user_agent('azsdk-python-azurefilestorage/{}'.format(VERSION)) - self.generate_client_request_id = True - - self.version = version - self.url = url - - def _configure(self, **kwargs): - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/aio/__init__.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/aio/__init__.py deleted file mode 100644 index 942d3c5..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/aio/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from ._azure_file_storage_async import AzureFileStorage -__all__ = ['AzureFileStorage'] diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/aio/_azure_file_storage_async.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/aio/_azure_file_storage_async.py deleted file mode 100644 index 74cfd2d..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/aio/_azure_file_storage_async.py +++ /dev/null @@ -1,72 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core import AsyncPipelineClient -from msrest import Serializer, Deserializer - -from ._configuration_async import AzureFileStorageConfiguration -from azure.core.exceptions import map_error -from .operations_async import ServiceOperations -from .operations_async import ShareOperations -from .operations_async import DirectoryOperations -from .operations_async import FileOperations -from .. import models - - -class AzureFileStorage(object): - """AzureFileStorage - - - :ivar service: Service operations - :vartype service: azure.storage.fileshare.aio.operations_async.ServiceOperations - :ivar share: Share operations - :vartype share: azure.storage.fileshare.aio.operations_async.ShareOperations - :ivar directory: Directory operations - :vartype directory: azure.storage.fileshare.aio.operations_async.DirectoryOperations - :ivar file: File operations - :vartype file: azure.storage.fileshare.aio.operations_async.FileOperations - - :param version: Specifies the version of the operation to use for this - request. - :type version: str - :param url: The URL of the service account, share, directory or file that - is the target of the desired operation. - :type url: str - """ - - def __init__( - self, version, url, **kwargs): - - base_url = '{url}' - self._config = AzureFileStorageConfiguration(version, url, **kwargs) - self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '2019-02-02' - self._serialize = Serializer(client_models) - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.share = ShareOperations( - self._client, self._config, self._serialize, self._deserialize) - self.directory = DirectoryOperations( - self._client, self._config, self._serialize, self._deserialize) - self.file = FileOperations( - self._client, self._config, self._serialize, self._deserialize) - - async def close(self): - await self._client.close() - async def __aenter__(self): - await self._client.__aenter__() - return self - async def __aexit__(self, *exc_details): - await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/aio/_configuration_async.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/aio/_configuration_async.py deleted file mode 100644 index bdb635e..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/aio/_configuration_async.py +++ /dev/null @@ -1,55 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -from ..version import VERSION - - -class AzureFileStorageConfiguration(Configuration): - """Configuration for AzureFileStorage - Note that all parameters used to create this instance are saved as instance - attributes. - - :param version: Specifies the version of the operation to use for this - request. - :type version: str - :param url: The URL of the service account, share, directory or file that - is the target of the desired operation. - :type url: str - """ - - def __init__(self, version, url, **kwargs): - - if version is None: - raise ValueError("Parameter 'version' must not be None.") - if url is None: - raise ValueError("Parameter 'url' must not be None.") - - super(AzureFileStorageConfiguration, self).__init__(**kwargs) - self._configure(**kwargs) - - self.user_agent_policy.add_user_agent('azsdk-python-azurefilestorage/{}'.format(VERSION)) - self.generate_client_request_id = True - self.accept_language = None - - self.version = version - self.url = url - - def _configure(self, **kwargs): - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/aio/operations_async/__init__.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/aio/operations_async/__init__.py deleted file mode 100644 index 601c709..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/aio/operations_async/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations_async import ServiceOperations -from ._share_operations_async import ShareOperations -from ._directory_operations_async import DirectoryOperations -from ._file_operations_async import FileOperations - -__all__ = [ - 'ServiceOperations', - 'ShareOperations', - 'DirectoryOperations', - 'FileOperations', -] diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/aio/operations_async/_directory_operations_async.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/aio/operations_async/_directory_operations_async.py deleted file mode 100644 index a7b1e20..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/aio/operations_async/_directory_operations_async.py +++ /dev/null @@ -1,671 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class DirectoryOperations: - """DirectoryOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar restype: . Constant value: "directory". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.restype = "directory" - - async def create(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, *, cls=None, **kwargs): - """Creates a new directory under the specified share or parent directory. - - :param file_attributes: If specified, the provided file attributes - shall be set. Default value: ‘Archive’ for file and ‘Directory’ for - directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. - Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. - Default value: Now. - :type file_last_write_time: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{shareName}/{directory}'} - - async def get_properties(self, sharesnapshot=None, timeout=None, *, cls=None, **kwargs): - """Returns all system properties for the specified directory, and can also - be used to check the existence of a directory. The data returned does - not include the files in the directory or any subdirectories. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{shareName}/{directory}'} - - async def delete(self, timeout=None, *, cls=None, **kwargs): - """Removes the specified empty directory. Note that the directory must be - empty before it can be deleted. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{shareName}/{directory}'} - - async def set_properties(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, file_permission="inherit", file_permission_key=None, *, cls=None, **kwargs): - """Sets properties on the directory. - - :param file_attributes: If specified, the provided file attributes - shall be set. Default value: ‘Archive’ for file and ‘Directory’ for - directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. - Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. - Default value: Now. - :type file_last_write_time: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "properties" - - # Construct URL - url = self.set_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_properties.metadata = {'url': '/{shareName}/{directory}'} - - async def set_metadata(self, timeout=None, metadata=None, *, cls=None, **kwargs): - """Updates user defined metadata for the specified directory. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{shareName}/{directory}'} - - async def list_files_and_directories_segment(self, prefix=None, sharesnapshot=None, marker=None, maxresults=None, timeout=None, *, cls=None, **kwargs): - """Returns a list of files or directories under the specified share or - directory. It lists the contents only for a single level of the - directory hierarchy. - - :param prefix: Filters the results to return only entries whose name - begins with the specified prefix. - :type prefix: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. - If the request does not specify maxresults, or specifies a value - greater than 5,000, the server will return up to 5,000 items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListFilesAndDirectoriesSegmentResponse or the result of - cls(response) - :rtype: - ~azure.storage.fileshare.models.ListFilesAndDirectoriesSegmentResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "list" - - # Construct URL - url = self.list_files_and_directories_segment.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListFilesAndDirectoriesSegmentResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_files_and_directories_segment.metadata = {'url': '/{shareName}/{directory}'} - - async def list_handles(self, marker=None, maxresults=None, timeout=None, sharesnapshot=None, recursive=None, *, cls=None, **kwargs): - """Lists handles for directory. - - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. - If the request does not specify maxresults, or specifies a value - greater than 5,000, the server will return up to 5,000 items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param recursive: Specifies operation should apply to the directory - specified in the URI, its files, its subdirectories and their files. - :type recursive: bool - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListHandlesResponse or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListHandlesResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "listhandles" - - # Construct URL - url = self.list_handles.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - if recursive is not None: - header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListHandlesResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_handles.metadata = {'url': '/{shareName}/{directory}'} - - async def force_close_handles(self, handle_id, timeout=None, marker=None, sharesnapshot=None, recursive=None, *, cls=None, **kwargs): - """Closes all handles open for given directory. - - :param handle_id: Specifies handle ID opened on the file or directory - to be closed. Asterix (‘*’) is a wildcard that specifies all handles. - :type handle_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param recursive: Specifies operation should apply to the directory - specified in the URI, its files, its subdirectories and their files. - :type recursive: bool - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "forceclosehandles" - - # Construct URL - url = self.force_close_handles.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') - if recursive is not None: - header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-marker': self._deserialize('str', response.headers.get('x-ms-marker')), - 'x-ms-number-of-handles-closed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - force_close_handles.metadata = {'url': '/{shareName}/{directory}'} diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/aio/operations_async/_file_operations_async.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/aio/operations_async/_file_operations_async.py deleted file mode 100644 index 23ec7b2..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/aio/operations_async/_file_operations_async.py +++ /dev/null @@ -1,1199 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class FileOperations: - """FileOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_type: Dummy constant parameter, file type can only be file. Constant value: "file". - :ivar x_ms_write: Only update is supported: - Update: Writes the bytes downloaded from the source url into the specified range. Constant value: "update". - :ivar x_ms_copy_action: . Constant value: "abort". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_type = "file" - self.x_ms_write = "update" - self.x_ms_copy_action = "abort" - - async def create(self, file_content_length, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, file_http_headers=None, *, cls=None, **kwargs): - """Creates a new file or replaces a file. Note it only initializes the - file with no content. - - :param file_content_length: Specifies the maximum size for the file, - up to 1 TB. - :type file_content_length: long - :param file_attributes: If specified, the provided file attributes - shall be set. Default value: ‘Archive’ for file and ‘Directory’ for - directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. - Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. - Default value: Now. - :type file_last_write_time: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_http_headers: Additional parameters for the operation - :type file_http_headers: - ~azure.storage.fileshare.models.FileHTTPHeaders - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - file_content_type = None - if file_http_headers is not None: - file_content_type = file_http_headers.file_content_type - file_content_encoding = None - if file_http_headers is not None: - file_content_encoding = file_http_headers.file_content_encoding - file_content_language = None - if file_http_headers is not None: - file_content_language = file_http_headers.file_content_language - file_cache_control = None - if file_http_headers is not None: - file_cache_control = file_http_headers.file_cache_control - file_content_md5 = None - if file_http_headers is not None: - file_content_md5 = file_http_headers.file_content_md5 - file_content_disposition = None - if file_http_headers is not None: - file_content_disposition = file_http_headers.file_content_disposition - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') - header_parameters['x-ms-type'] = self._serialize.header("self.x_ms_type", self.x_ms_type, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - if file_content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", file_content_type, 'str') - if file_content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", file_content_encoding, 'str') - if file_content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", file_content_language, 'str') - if file_cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", file_cache_control, 'str') - if file_content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", file_content_md5, 'bytearray') - if file_content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", file_content_disposition, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def download(self, timeout=None, range=None, range_get_content_md5=None, *, cls=None, **kwargs): - """Reads or downloads a file from the system, including its metadata and - properties. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param range: Return file data only from the specified byte range. - :type range: str - :param range_get_content_md5: When this header is set to true and - specified together with the Range header, the service returns the MD5 - hash for the range, as long as the range is less than or equal to 4 MB - in size. - :type range_get_content_md5: bool - :param callable cls: A custom type or function that will be passed the - direct response - :return: object or the result of cls(response) - :rtype: Generator - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.download.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - await response.load_body() - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-content-md5')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - if response.status_code == 206: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-content-md5')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - download.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def get_properties(self, sharesnapshot=None, timeout=None, *, cls=None, **kwargs): - """Returns all user-defined metadata, standard HTTP properties, and system - properties for the file. It does not return the content of the file. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'x-ms-type': self._deserialize('str', response.headers.get('x-ms-type')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def delete(self, timeout=None, *, cls=None, **kwargs): - """removes the file from the storage account. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def set_http_headers(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, file_content_length=None, file_permission="inherit", file_permission_key=None, file_http_headers=None, *, cls=None, **kwargs): - """Sets HTTP headers on the file. - - :param file_attributes: If specified, the provided file attributes - shall be set. Default value: ‘Archive’ for file and ‘Directory’ for - directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. - Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. - Default value: Now. - :type file_last_write_time: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param file_content_length: Resizes a file to the specified size. If - the specified byte value is less than the current size of the file, - then all ranges above the specified byte value are cleared. - :type file_content_length: long - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_http_headers: Additional parameters for the operation - :type file_http_headers: - ~azure.storage.fileshare.models.FileHTTPHeaders - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - file_content_type = None - if file_http_headers is not None: - file_content_type = file_http_headers.file_content_type - file_content_encoding = None - if file_http_headers is not None: - file_content_encoding = file_http_headers.file_content_encoding - file_content_language = None - if file_http_headers is not None: - file_content_language = file_http_headers.file_content_language - file_cache_control = None - if file_http_headers is not None: - file_cache_control = file_http_headers.file_cache_control - file_content_md5 = None - if file_http_headers is not None: - file_content_md5 = file_http_headers.file_content_md5 - file_content_disposition = None - if file_http_headers is not None: - file_content_disposition = file_http_headers.file_content_disposition - - comp = "properties" - - # Construct URL - url = self.set_http_headers.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_content_length is not None: - header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - if file_content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", file_content_type, 'str') - if file_content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", file_content_encoding, 'str') - if file_content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", file_content_language, 'str') - if file_cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", file_cache_control, 'str') - if file_content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", file_content_md5, 'bytearray') - if file_content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", file_content_disposition, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_http_headers.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def set_metadata(self, timeout=None, metadata=None, *, cls=None, **kwargs): - """Updates user-defined metadata for the specified file. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def upload_range(self, range, content_length, file_range_write="update", optionalbody=None, timeout=None, content_md5=None, *, cls=None, **kwargs): - """Upload a range of bytes to a file. - - :param range: Specifies the range of bytes to be written. Both the - start and end of the range must be specified. For an update operation, - the range can be up to 4 MB in size. For a clear operation, the range - can be up to the value of the file's full size. The File service - accepts only a single byte range for the Range and 'x-ms-range' - headers, and the byte range must be specified in the following format: - bytes=startByte-endByte. - :type range: str - :param file_range_write: Specify one of the following options: - - Update: Writes the bytes specified by the request body into the - specified range. The Range and Content-Length headers must match to - perform the update. - Clear: Clears the specified range and releases - the space used in storage for that range. To clear a range, set the - Content-Length header to zero, and set the Range header to a value - that indicates the range to clear, up to maximum file size. Possible - values include: 'update', 'clear' - :type file_range_write: str or - ~azure.storage.fileshare.models.FileRangeWriteType - :param content_length: Specifies the number of bytes being transmitted - in the request body. When the x-ms-write header is set to clear, the - value of this header must be set to zero. - :type content_length: long - :param optionalbody: Initial data. - :type optionalbody: Generator - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param content_md5: An MD5 hash of the content. This hash is used to - verify the integrity of the data during transport. When the - Content-MD5 header is specified, the File service compares the hash of - the content that has arrived with the header value that was sent. If - the two hashes do not match, the operation will fail with error code - 400 (Bad Request). - :type content_md5: bytearray - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "range" - - # Construct URL - url = self.upload_range.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-write'] = self._serialize.header("self.x_ms_write", self.x_ms_write, 'FileRangeWriteType') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("content_md5", content_md5, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=optionalbody) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload_range.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def upload_range_from_url(self, range, copy_source, content_length, timeout=None, source_range=None, source_content_crc64=None, source_modified_access_conditions=None, *, cls=None, **kwargs): - """Upload a range of bytes to a file where the contents are read from a - URL. - - :param range: Writes data to the specified byte range in the file. - :type range: str - :param copy_source: Specifies the URL of the source file or blob, up - to 2 KB in length. To copy a file to another file within the same - storage account, you may use Shared Key to authenticate the source - file. If you are copying a file from another storage account, or if - you are copying a blob from the same storage account or another - storage account, then you must authenticate the source file or blob - using a shared access signature. If the source is a public blob, no - authentication is required to perform the copy operation. A file in a - share snapshot can also be specified as a copy source. - :type copy_source: str - :param content_length: Specifies the number of bytes being transmitted - in the request body. When the x-ms-write header is set to clear, the - value of this header must be set to zero. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_crc64: Specify the crc64 calculated for the - range of bytes that must be read from the copy source. - :type source_content_crc64: bytearray - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.fileshare.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - source_if_match_crc64 = None - if source_modified_access_conditions is not None: - source_if_match_crc64 = source_modified_access_conditions.source_if_match_crc64 - source_if_none_match_crc64 = None - if source_modified_access_conditions is not None: - source_if_none_match_crc64 = source_modified_access_conditions.source_if_none_match_crc64 - - comp = "range" - - # Construct URL - url = self.upload_range_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - header_parameters['x-ms-write'] = self._serialize.header("self.x_ms_write", self.x_ms_write, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if source_content_crc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_content_crc64", source_content_crc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if source_if_match_crc64 is not None: - header_parameters['x-ms-source-if-match-crc64'] = self._serialize.header("source_if_match_crc64", source_if_match_crc64, 'bytearray') - if source_if_none_match_crc64 is not None: - header_parameters['x-ms-source-if-none-match-crc64'] = self._serialize.header("source_if_none_match_crc64", source_if_none_match_crc64, 'bytearray') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload_range_from_url.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def get_range_list(self, sharesnapshot=None, timeout=None, range=None, *, cls=None, **kwargs): - """Returns the list of valid ranges for a file. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param range: Specifies the range of bytes over which to list ranges, - inclusively. - :type range: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: list or the result of cls(response) - :rtype: list[~azure.storage.fileshare.models.Range] - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "rangelist" - - # Construct URL - url = self.get_range_list.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[Range]', response) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-content-length': self._deserialize('long', response.headers.get('x-ms-content-length')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_range_list.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def start_copy(self, copy_source, timeout=None, metadata=None, *, cls=None, **kwargs): - """Copies a blob or file to a destination file within the storage account. - - :param copy_source: Specifies the URL of the source file or blob, up - to 2 KB in length. To copy a file to another file within the same - storage account, you may use Shared Key to authenticate the source - file. If you are copying a file from another storage account, or if - you are copying a blob from the same storage account or another - storage account, then you must authenticate the source file or blob - using a shared access signature. If the source is a public blob, no - authentication is required to perform the copy operation. A file in a - share snapshot can also be specified as a copy source. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.start_copy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - start_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def abort_copy(self, copy_id, timeout=None, *, cls=None, **kwargs): - """Aborts a pending Copy File operation, and leaves a destination file - with zero length and full metadata. - - :param copy_id: The copy identifier provided in the x-ms-copy-id - header of the original Copy File operation. - :type copy_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "copy" - - # Construct URL - url = self.abort_copy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-copy-action'] = self._serialize.header("self.x_ms_copy_action", self.x_ms_copy_action, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - abort_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def list_handles(self, marker=None, maxresults=None, timeout=None, sharesnapshot=None, *, cls=None, **kwargs): - """Lists handles for file. - - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. - If the request does not specify maxresults, or specifies a value - greater than 5,000, the server will return up to 5,000 items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListHandlesResponse or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListHandlesResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "listhandles" - - # Construct URL - url = self.list_handles.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListHandlesResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def force_close_handles(self, handle_id, timeout=None, marker=None, sharesnapshot=None, *, cls=None, **kwargs): - """Closes all handles open for given file. - - :param handle_id: Specifies handle ID opened on the file or directory - to be closed. Asterix (‘*’) is a wildcard that specifies all handles. - :type handle_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "forceclosehandles" - - # Construct URL - url = self.force_close_handles.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-marker': self._deserialize('str', response.headers.get('x-ms-marker')), - 'x-ms-number-of-handles-closed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - force_close_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/aio/operations_async/_service_operations_async.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/aio/operations_async/_service_operations_async.py deleted file mode 100644 index c4e40f1..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/aio/operations_async/_service_operations_async.py +++ /dev/null @@ -1,253 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class ServiceOperations: - """ServiceOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar restype: . Constant value: "service". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.restype = "service" - - async def set_properties(self, storage_service_properties, timeout=None, *, cls=None, **kwargs): - """Sets properties for a storage account's File service endpoint, - including properties for Storage Analytics metrics and CORS - (Cross-Origin Resource Sharing) rules. - - :param storage_service_properties: The StorageService properties. - :type storage_service_properties: - ~azure.storage.fileshare.models.StorageServiceProperties - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "properties" - - # Construct URL - url = self.set_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct body - body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_properties.metadata = {'url': '/'} - - async def get_properties(self, timeout=None, *, cls=None, **kwargs): - """Gets the properties of a storage account's File service, including - properties for Storage Analytics metrics and CORS (Cross-Origin - Resource Sharing) rules. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: StorageServiceProperties or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.StorageServiceProperties - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "properties" - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('StorageServiceProperties', response) - header_dict = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_properties.metadata = {'url': '/'} - - async def list_shares_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, *, cls=None, **kwargs): - """The List Shares Segment operation returns a list of the shares and - share snapshots under the specified account. - - :param prefix: Filters the results to return only entries whose name - begins with the specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. - If the request does not specify maxresults, or specifies a value - greater than 5,000, the server will return up to 5,000 items. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets - to include in the response. - :type include: list[str or - ~azure.storage.fileshare.models.ListSharesIncludeType] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListSharesResponse or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListSharesResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "list" - - # Construct URL - url = self.list_shares_segment.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[ListSharesIncludeType]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListSharesResponse', response) - header_dict = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_shares_segment.metadata = {'url': '/'} diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/aio/operations_async/_share_operations_async.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/aio/operations_async/_share_operations_async.py deleted file mode 100644 index 836729d..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/aio/operations_async/_share_operations_async.py +++ /dev/null @@ -1,747 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class ShareOperations: - """ShareOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar restype: . Constant value: "share". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.restype = "share" - - async def create(self, timeout=None, metadata=None, quota=None, *, cls=None, **kwargs): - """Creates a new share under the specified account. If the share with the - same name already exists, the operation fails. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param quota: Specifies the maximum size of the share, in gigabytes. - :type quota: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if quota is not None: - header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{shareName}'} - - async def get_properties(self, sharesnapshot=None, timeout=None, *, cls=None, **kwargs): - """Returns all user-defined metadata and system properties for the - specified share or share snapshot. The data returned does not include - the share's list of files. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-share-quota': self._deserialize('int', response.headers.get('x-ms-share-quota')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{shareName}'} - - async def delete(self, sharesnapshot=None, timeout=None, delete_snapshots=None, *, cls=None, **kwargs): - """Operation marks the specified share or share snapshot for deletion. The - share or share snapshot and any files contained within it are later - deleted during garbage collection. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param delete_snapshots: Specifies the option include to delete the - base share and all of its snapshots. Possible values include: - 'include' - :type delete_snapshots: str or - ~azure.storage.fileshare.models.DeleteSnapshotsOptionType - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{shareName}'} - - async def create_snapshot(self, timeout=None, metadata=None, *, cls=None, **kwargs): - """Creates a read-only snapshot of a share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "snapshot" - - # Construct URL - url = self.create_snapshot.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-snapshot': self._deserialize('str', response.headers.get('x-ms-snapshot')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create_snapshot.metadata = {'url': '/{shareName}'} - - async def create_permission(self, share_permission, timeout=None, *, cls=None, **kwargs): - """Create a permission (a security descriptor). - - :param share_permission: A permission (a security descriptor) at the - share level. - :type share_permission: - ~azure.storage.fileshare.models.SharePermission - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "filepermission" - - # Construct URL - url = self.create_permission.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct body - body_content = self._serialize.body(share_permission, 'SharePermission', is_xml=False) - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create_permission.metadata = {'url': '/{shareName}'} - - async def get_permission(self, file_permission_key, timeout=None, *, cls=None, **kwargs): - """Returns the permission (security descriptor) for a given key. - - :param file_permission_key: Key of the permission to be set for the - directory/file. - :type file_permission_key: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: SharePermission or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.SharePermission - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "filepermission" - - # Construct URL - url = self.get_permission.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('SharePermission', response) - header_dict = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_permission.metadata = {'url': '/{shareName}'} - - async def set_quota(self, timeout=None, quota=None, *, cls=None, **kwargs): - """Sets quota for the specified share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param quota: Specifies the maximum size of the share, in gigabytes. - :type quota: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "properties" - - # Construct URL - url = self.set_quota.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if quota is not None: - header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_quota.metadata = {'url': '/{shareName}'} - - async def set_metadata(self, timeout=None, metadata=None, *, cls=None, **kwargs): - """Sets one or more user-defined name-value pairs for the specified share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{shareName}'} - - async def get_access_policy(self, timeout=None, *, cls=None, **kwargs): - """Returns information about stored access policies specified on the - share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: list or the result of cls(response) - :rtype: list[~azure.storage.fileshare.models.SignedIdentifier] - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "acl" - - # Construct URL - url = self.get_access_policy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[SignedIdentifier]', response) - header_dict = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_access_policy.metadata = {'url': '/{shareName}'} - - async def set_access_policy(self, share_acl=None, timeout=None, *, cls=None, **kwargs): - """Sets a stored access policy for use with shared access signatures. - - :param share_acl: The ACL for the share. - :type share_acl: - list[~azure.storage.fileshare.models.SignedIdentifier] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "acl" - - # Construct URL - url = self.set_access_policy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct body - serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifier', 'wrapped': True}} - if share_acl is not None: - body_content = self._serialize.body(share_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt) - else: - body_content = None - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_access_policy.metadata = {'url': '/{shareName}'} - - async def get_statistics(self, timeout=None, *, cls=None, **kwargs): - """Retrieves statistics related to the share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: ShareStats or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ShareStats - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "stats" - - # Construct URL - url = self.get_statistics.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ShareStats', response) - header_dict = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_statistics.metadata = {'url': '/{shareName}'} diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/models/__init__.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/models/__init__.py deleted file mode 100644 index 6baaf79..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/models/__init__.py +++ /dev/null @@ -1,94 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -try: - from ._models_py3 import AccessPolicy - from ._models_py3 import CorsRule - from ._models_py3 import DirectoryItem - from ._models_py3 import FileHTTPHeaders - from ._models_py3 import FileItem - from ._models_py3 import FileProperty - from ._models_py3 import FilesAndDirectoriesListSegment - from ._models_py3 import HandleItem - from ._models_py3 import ListFilesAndDirectoriesSegmentResponse - from ._models_py3 import ListHandlesResponse - from ._models_py3 import ListSharesResponse - from ._models_py3 import Metrics - from ._models_py3 import Range - from ._models_py3 import RetentionPolicy - from ._models_py3 import ShareItem - from ._models_py3 import SharePermission - from ._models_py3 import ShareProperties - from ._models_py3 import ShareStats - from ._models_py3 import SignedIdentifier - from ._models_py3 import SourceModifiedAccessConditions - from ._models_py3 import StorageError, StorageErrorException - from ._models_py3 import StorageServiceProperties -except (SyntaxError, ImportError): - from ._models import AccessPolicy - from ._models import CorsRule - from ._models import DirectoryItem - from ._models import FileHTTPHeaders - from ._models import FileItem - from ._models import FileProperty - from ._models import FilesAndDirectoriesListSegment - from ._models import HandleItem - from ._models import ListFilesAndDirectoriesSegmentResponse - from ._models import ListHandlesResponse - from ._models import ListSharesResponse - from ._models import Metrics - from ._models import Range - from ._models import RetentionPolicy - from ._models import ShareItem - from ._models import SharePermission - from ._models import ShareProperties - from ._models import ShareStats - from ._models import SignedIdentifier - from ._models import SourceModifiedAccessConditions - from ._models import StorageError, StorageErrorException - from ._models import StorageServiceProperties -from ._azure_file_storage_enums import ( - CopyStatusType, - DeleteSnapshotsOptionType, - FileRangeWriteType, - ListSharesIncludeType, - StorageErrorCode, -) - -__all__ = [ - 'AccessPolicy', - 'CorsRule', - 'DirectoryItem', - 'FileHTTPHeaders', - 'FileItem', - 'FileProperty', - 'FilesAndDirectoriesListSegment', - 'HandleItem', - 'ListFilesAndDirectoriesSegmentResponse', - 'ListHandlesResponse', - 'ListSharesResponse', - 'Metrics', - 'Range', - 'RetentionPolicy', - 'ShareItem', - 'SharePermission', - 'ShareProperties', - 'ShareStats', - 'SignedIdentifier', - 'SourceModifiedAccessConditions', - 'StorageError', 'StorageErrorException', - 'StorageServiceProperties', - 'StorageErrorCode', - 'DeleteSnapshotsOptionType', - 'ListSharesIncludeType', - 'CopyStatusType', - 'FileRangeWriteType', -] diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/models/_models.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/models/_models.py deleted file mode 100644 index 5f684db..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/models/_models.py +++ /dev/null @@ -1,794 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model -from azure.core.exceptions import HttpResponseError - - -class AccessPolicy(Model): - """An Access policy. - - :param start: The date-time the policy is active. - :type start: str - :param expiry: The date-time the policy expires. - :type expiry: str - :param permission: The permissions for the ACL policy. - :type permission: str - """ - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, - 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, - 'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(AccessPolicy, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.expiry = kwargs.get('expiry', None) - self.permission = kwargs.get('permission', None) - - -class CorsRule(Model): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param allowed_origins: Required. The origin domains that are permitted to - make a request against the storage service via CORS. The origin domain is - the domain from which the request originates. Note that the origin must be - an exact case-sensitive match with the origin that the user age sends to - the service. You can also use the wildcard character '*' to allow all - origin domains to make requests via CORS. - :type allowed_origins: str - :param allowed_methods: Required. The methods (HTTP request verbs) that - the origin domain may use for a CORS request. (comma separated) - :type allowed_methods: str - :param allowed_headers: Required. The request headers that the origin - domain may specify on the CORS request. - :type allowed_headers: str - :param exposed_headers: Required. The response headers that may be sent in - the response to the CORS request and exposed by the browser to the request - issuer. - :type exposed_headers: str - :param max_age_in_seconds: Required. The maximum amount time that a - browser should cache the preflight OPTIONS request. - :type max_age_in_seconds: int - """ - - _validation = { - 'allowed_origins': {'required': True}, - 'allowed_methods': {'required': True}, - 'allowed_headers': {'required': True}, - 'exposed_headers': {'required': True}, - 'max_age_in_seconds': {'required': True, 'minimum': 0}, - } - - _attribute_map = { - 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}}, - 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}}, - 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}}, - 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}}, - 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(CorsRule, self).__init__(**kwargs) - self.allowed_origins = kwargs.get('allowed_origins', None) - self.allowed_methods = kwargs.get('allowed_methods', None) - self.allowed_headers = kwargs.get('allowed_headers', None) - self.exposed_headers = kwargs.get('exposed_headers', None) - self.max_age_in_seconds = kwargs.get('max_age_in_seconds', None) - - -class DirectoryItem(Model): - """A listed directory item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - } - _xml_map = { - 'name': 'Directory' - } - - def __init__(self, **kwargs): - super(DirectoryItem, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - - -class FileHTTPHeaders(Model): - """Additional parameters for a set of operations, such as: File_create, - File_set_http_headers. - - :param file_content_type: Sets the MIME content type of the file. The - default type is 'application/octet-stream'. - :type file_content_type: str - :param file_content_encoding: Specifies which content encodings have been - applied to the file. - :type file_content_encoding: str - :param file_content_language: Specifies the natural languages used by this - resource. - :type file_content_language: str - :param file_cache_control: Sets the file's cache control. The File service - stores this value but does not use or modify it. - :type file_cache_control: str - :param file_content_md5: Sets the file's MD5 hash. - :type file_content_md5: bytearray - :param file_content_disposition: Sets the file's Content-Disposition - header. - :type file_content_disposition: str - """ - - _attribute_map = { - 'file_content_type': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_type'}}, - 'file_content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_encoding'}}, - 'file_content_language': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_language'}}, - 'file_cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'file_cache_control'}}, - 'file_content_md5': {'key': '', 'type': 'bytearray', 'xml': {'name': 'file_content_md5'}}, - 'file_content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_disposition'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(FileHTTPHeaders, self).__init__(**kwargs) - self.file_content_type = kwargs.get('file_content_type', None) - self.file_content_encoding = kwargs.get('file_content_encoding', None) - self.file_content_language = kwargs.get('file_content_language', None) - self.file_cache_control = kwargs.get('file_cache_control', None) - self.file_content_md5 = kwargs.get('file_content_md5', None) - self.file_content_disposition = kwargs.get('file_content_disposition', None) - - -class FileItem(Model): - """A listed file item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param properties: Required. - :type properties: ~azure.storage.fileshare.models.FileProperty - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'properties': {'key': 'Properties', 'type': 'FileProperty', 'xml': {'name': 'Properties'}}, - } - _xml_map = { - 'name': 'File' - } - - def __init__(self, **kwargs): - super(FileItem, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.properties = kwargs.get('properties', None) - - -class FileProperty(Model): - """File properties. - - All required parameters must be populated in order to send to Azure. - - :param content_length: Required. Content length of the file. This value - may not be up-to-date since an SMB client may have modified the file - locally. The value of Content-Length may not reflect that fact until the - handle is closed or the op-lock is broken. To retrieve current property - values, call Get File Properties. - :type content_length: long - """ - - _validation = { - 'content_length': {'required': True}, - } - - _attribute_map = { - 'content_length': {'key': 'Content-Length', 'type': 'long', 'xml': {'name': 'Content-Length'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(FileProperty, self).__init__(**kwargs) - self.content_length = kwargs.get('content_length', None) - - -class FilesAndDirectoriesListSegment(Model): - """Abstract for entries that can be listed from Directory. - - All required parameters must be populated in order to send to Azure. - - :param directory_items: Required. - :type directory_items: list[~azure.storage.fileshare.models.DirectoryItem] - :param file_items: Required. - :type file_items: list[~azure.storage.fileshare.models.FileItem] - """ - - _validation = { - 'directory_items': {'required': True}, - 'file_items': {'required': True}, - } - - _attribute_map = { - 'directory_items': {'key': 'DirectoryItems', 'type': '[DirectoryItem]', 'xml': {'name': 'DirectoryItems', 'itemsName': 'Directory'}}, - 'file_items': {'key': 'FileItems', 'type': '[FileItem]', 'xml': {'name': 'FileItems', 'itemsName': 'File'}}, - } - _xml_map = { - 'name': 'Entries' - } - - def __init__(self, **kwargs): - super(FilesAndDirectoriesListSegment, self).__init__(**kwargs) - self.directory_items = kwargs.get('directory_items', None) - self.file_items = kwargs.get('file_items', None) - - -class HandleItem(Model): - """A listed Azure Storage handle item. - - All required parameters must be populated in order to send to Azure. - - :param handle_id: Required. XSMB service handle ID - :type handle_id: str - :param path: Required. File or directory name including full path starting - from share root - :type path: str - :param file_id: Required. FileId uniquely identifies the file or - directory. - :type file_id: str - :param parent_id: ParentId uniquely identifies the parent directory of the - object. - :type parent_id: str - :param session_id: Required. SMB session ID in context of which the file - handle was opened - :type session_id: str - :param client_ip: Required. Client IP that opened the handle - :type client_ip: str - :param open_time: Required. Time when the session that previously opened - the handle has last been reconnected. (UTC) - :type open_time: datetime - :param last_reconnect_time: Time handle was last connected to (UTC) - :type last_reconnect_time: datetime - """ - - _validation = { - 'handle_id': {'required': True}, - 'path': {'required': True}, - 'file_id': {'required': True}, - 'session_id': {'required': True}, - 'client_ip': {'required': True}, - 'open_time': {'required': True}, - } - - _attribute_map = { - 'handle_id': {'key': 'HandleId', 'type': 'str', 'xml': {'name': 'HandleId'}}, - 'path': {'key': 'Path', 'type': 'str', 'xml': {'name': 'Path'}}, - 'file_id': {'key': 'FileId', 'type': 'str', 'xml': {'name': 'FileId'}}, - 'parent_id': {'key': 'ParentId', 'type': 'str', 'xml': {'name': 'ParentId'}}, - 'session_id': {'key': 'SessionId', 'type': 'str', 'xml': {'name': 'SessionId'}}, - 'client_ip': {'key': 'ClientIp', 'type': 'str', 'xml': {'name': 'ClientIp'}}, - 'open_time': {'key': 'OpenTime', 'type': 'rfc-1123', 'xml': {'name': 'OpenTime'}}, - 'last_reconnect_time': {'key': 'LastReconnectTime', 'type': 'rfc-1123', 'xml': {'name': 'LastReconnectTime'}}, - } - _xml_map = { - 'name': 'Handle' - } - - def __init__(self, **kwargs): - super(HandleItem, self).__init__(**kwargs) - self.handle_id = kwargs.get('handle_id', None) - self.path = kwargs.get('path', None) - self.file_id = kwargs.get('file_id', None) - self.parent_id = kwargs.get('parent_id', None) - self.session_id = kwargs.get('session_id', None) - self.client_ip = kwargs.get('client_ip', None) - self.open_time = kwargs.get('open_time', None) - self.last_reconnect_time = kwargs.get('last_reconnect_time', None) - - -class ListFilesAndDirectoriesSegmentResponse(Model): - """An enumeration of directories and files. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param share_name: Required. - :type share_name: str - :param share_snapshot: - :type share_snapshot: str - :param directory_path: Required. - :type directory_path: str - :param prefix: Required. - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param segment: Required. - :type segment: - ~azure.storage.fileshare.models.FilesAndDirectoriesListSegment - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'share_name': {'required': True}, - 'directory_path': {'required': True}, - 'prefix': {'required': True}, - 'segment': {'required': True}, - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'share_name': {'key': 'ShareName', 'type': 'str', 'xml': {'name': 'ShareName', 'attr': True}}, - 'share_snapshot': {'key': 'ShareSnapshot', 'type': 'str', 'xml': {'name': 'ShareSnapshot', 'attr': True}}, - 'directory_path': {'key': 'DirectoryPath', 'type': 'str', 'xml': {'name': 'DirectoryPath', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'segment': {'key': 'Segment', 'type': 'FilesAndDirectoriesListSegment', 'xml': {'name': 'Segment'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, **kwargs): - super(ListFilesAndDirectoriesSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs.get('service_endpoint', None) - self.share_name = kwargs.get('share_name', None) - self.share_snapshot = kwargs.get('share_snapshot', None) - self.directory_path = kwargs.get('directory_path', None) - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.segment = kwargs.get('segment', None) - self.next_marker = kwargs.get('next_marker', None) - - -class ListHandlesResponse(Model): - """An enumeration of handles. - - All required parameters must be populated in order to send to Azure. - - :param handle_list: - :type handle_list: list[~azure.storage.fileshare.models.HandleItem] - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'handle_list': {'key': 'HandleList', 'type': '[HandleItem]', 'xml': {'name': 'Entries', 'itemsName': 'Entries', 'wrapped': True}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, **kwargs): - super(ListHandlesResponse, self).__init__(**kwargs) - self.handle_list = kwargs.get('handle_list', None) - self.next_marker = kwargs.get('next_marker', None) - - -class ListSharesResponse(Model): - """An enumeration of shares. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param share_items: - :type share_items: list[~azure.storage.fileshare.models.ShareItem] - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'share_items': {'key': 'ShareItems', 'type': '[ShareItem]', 'xml': {'name': 'Shares', 'itemsName': 'Shares', 'wrapped': True}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, **kwargs): - super(ListSharesResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs.get('service_endpoint', None) - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.share_items = kwargs.get('share_items', None) - self.next_marker = kwargs.get('next_marker', None) - - -class Metrics(Model): - """Storage Analytics metrics for file service. - - All required parameters must be populated in order to send to Azure. - - :param version: Required. The version of Storage Analytics to configure. - :type version: str - :param enabled: Required. Indicates whether metrics are enabled for the - File service. - :type enabled: bool - :param include_apis: Indicates whether metrics should generate summary - statistics for called API operations. - :type include_apis: bool - :param retention_policy: - :type retention_policy: ~azure.storage.fileshare.models.RetentionPolicy - """ - - _validation = { - 'version': {'required': True}, - 'enabled': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(Metrics, self).__init__(**kwargs) - self.version = kwargs.get('version', None) - self.enabled = kwargs.get('enabled', None) - self.include_apis = kwargs.get('include_apis', None) - self.retention_policy = kwargs.get('retention_policy', None) - - -class Range(Model): - """An Azure Storage file range. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. Start of the range. - :type start: long - :param end: Required. End of the range. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'Range' - } - - def __init__(self, **kwargs): - super(Range, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.end = kwargs.get('end', None) - - -class RetentionPolicy(Model): - """The retention policy. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether a retention policy is enabled - for the File service. If false, metrics data is retained, and the user is - responsible for deleting it. - :type enabled: bool - :param days: Indicates the number of days that metrics data should be - retained. All data older than this value will be deleted. Metrics data is - deleted on a best-effort basis after the retention period expires. - :type days: int - """ - - _validation = { - 'enabled': {'required': True}, - 'days': {'maximum': 365, 'minimum': 1}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(RetentionPolicy, self).__init__(**kwargs) - self.enabled = kwargs.get('enabled', None) - self.days = kwargs.get('days', None) - - -class ShareItem(Model): - """A listed Azure Storage share item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param snapshot: - :type snapshot: str - :param properties: Required. - :type properties: ~azure.storage.fileshare.models.ShareProperties - :param metadata: - :type metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'snapshot': {'key': 'Snapshot', 'type': 'str', 'xml': {'name': 'Snapshot'}}, - 'properties': {'key': 'Properties', 'type': 'ShareProperties', 'xml': {'name': 'Properties'}}, - 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}}, - } - _xml_map = { - 'name': 'Share' - } - - def __init__(self, **kwargs): - super(ShareItem, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.snapshot = kwargs.get('snapshot', None) - self.properties = kwargs.get('properties', None) - self.metadata = kwargs.get('metadata', None) - - -class SharePermission(Model): - """A permission (a security descriptor) at the share level. - - All required parameters must be populated in order to send to Azure. - - :param permission: Required. The permission in the Security Descriptor - Definition Language (SDDL). - :type permission: str - """ - - _validation = { - 'permission': {'required': True}, - } - - _attribute_map = { - 'permission': {'key': 'permission', 'type': 'str', 'xml': {'name': 'permission'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(SharePermission, self).__init__(**kwargs) - self.permission = kwargs.get('permission', None) - - -class ShareProperties(Model): - """Properties of a share. - - All required parameters must be populated in order to send to Azure. - - :param last_modified: Required. - :type last_modified: datetime - :param etag: Required. - :type etag: str - :param quota: Required. - :type quota: int - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - 'quota': {'required': True}, - } - - _attribute_map = { - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}}, - 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}}, - 'quota': {'key': 'Quota', 'type': 'int', 'xml': {'name': 'Quota'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(ShareProperties, self).__init__(**kwargs) - self.last_modified = kwargs.get('last_modified', None) - self.etag = kwargs.get('etag', None) - self.quota = kwargs.get('quota', None) - - -class ShareStats(Model): - """Stats for the share. - - All required parameters must be populated in order to send to Azure. - - :param share_usage_bytes: Required. The approximate size of the data - stored in bytes. Note that this value may not include all recently created - or recently resized files. - :type share_usage_bytes: int - """ - - _validation = { - 'share_usage_bytes': {'required': True}, - } - - _attribute_map = { - 'share_usage_bytes': {'key': 'ShareUsageBytes', 'type': 'int', 'xml': {'name': 'ShareUsageBytes'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(ShareStats, self).__init__(**kwargs) - self.share_usage_bytes = kwargs.get('share_usage_bytes', None) - - -class SignedIdentifier(Model): - """Signed identifier. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. A unique id. - :type id: str - :param access_policy: The access policy. - :type access_policy: ~azure.storage.fileshare.models.AccessPolicy - """ - - _validation = { - 'id': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}}, - 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(SignedIdentifier, self).__init__(**kwargs) - self.id = kwargs.get('id', None) - self.access_policy = kwargs.get('access_policy', None) - - -class SourceModifiedAccessConditions(Model): - """Additional parameters for upload_range_from_url operation. - - :param source_if_match_crc64: Specify the crc64 value to operate only on - range with a matching crc64 checksum. - :type source_if_match_crc64: bytearray - :param source_if_none_match_crc64: Specify the crc64 value to operate only - on range without a matching crc64 checksum. - :type source_if_none_match_crc64: bytearray - """ - - _attribute_map = { - 'source_if_match_crc64': {'key': '', 'type': 'bytearray', 'xml': {'name': 'source_if_match_crc64'}}, - 'source_if_none_match_crc64': {'key': '', 'type': 'bytearray', 'xml': {'name': 'source_if_none_match_crc64'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_match_crc64 = kwargs.get('source_if_match_crc64', None) - self.source_if_none_match_crc64 = kwargs.get('source_if_none_match_crc64', None) - - -class StorageError(Model): - """StorageError. - - :param message: - :type message: str - """ - - _attribute_map = { - 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(StorageError, self).__init__(**kwargs) - self.message = kwargs.get('message', None) - - -class StorageErrorException(HttpResponseError): - """Server responsed with exception of type: 'StorageError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, response, deserialize, *args): - - model_name = 'StorageError' - self.error = deserialize(model_name, response) - if self.error is None: - self.error = deserialize.dependencies[model_name]() - super(StorageErrorException, self).__init__(response=response) - - -class StorageServiceProperties(Model): - """Storage service properties. - - :param hour_metrics: A summary of request statistics grouped by API in - hourly aggregates for files. - :type hour_metrics: ~azure.storage.fileshare.models.Metrics - :param minute_metrics: A summary of request statistics grouped by API in - minute aggregates for files. - :type minute_metrics: ~azure.storage.fileshare.models.Metrics - :param cors: The set of CORS rules. - :type cors: list[~azure.storage.fileshare.models.CorsRule] - """ - - _attribute_map = { - 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}}, - 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}}, - 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(StorageServiceProperties, self).__init__(**kwargs) - self.hour_metrics = kwargs.get('hour_metrics', None) - self.minute_metrics = kwargs.get('minute_metrics', None) - self.cors = kwargs.get('cors', None) diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/models/_models_py3.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/models/_models_py3.py deleted file mode 100644 index 67681a2..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/models/_models_py3.py +++ /dev/null @@ -1,794 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model -from azure.core.exceptions import HttpResponseError - - -class AccessPolicy(Model): - """An Access policy. - - :param start: The date-time the policy is active. - :type start: str - :param expiry: The date-time the policy expires. - :type expiry: str - :param permission: The permissions for the ACL policy. - :type permission: str - """ - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, - 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, - 'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}}, - } - _xml_map = { - } - - def __init__(self, *, start: str=None, expiry: str=None, permission: str=None, **kwargs) -> None: - super(AccessPolicy, self).__init__(**kwargs) - self.start = start - self.expiry = expiry - self.permission = permission - - -class CorsRule(Model): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param allowed_origins: Required. The origin domains that are permitted to - make a request against the storage service via CORS. The origin domain is - the domain from which the request originates. Note that the origin must be - an exact case-sensitive match with the origin that the user age sends to - the service. You can also use the wildcard character '*' to allow all - origin domains to make requests via CORS. - :type allowed_origins: str - :param allowed_methods: Required. The methods (HTTP request verbs) that - the origin domain may use for a CORS request. (comma separated) - :type allowed_methods: str - :param allowed_headers: Required. The request headers that the origin - domain may specify on the CORS request. - :type allowed_headers: str - :param exposed_headers: Required. The response headers that may be sent in - the response to the CORS request and exposed by the browser to the request - issuer. - :type exposed_headers: str - :param max_age_in_seconds: Required. The maximum amount time that a - browser should cache the preflight OPTIONS request. - :type max_age_in_seconds: int - """ - - _validation = { - 'allowed_origins': {'required': True}, - 'allowed_methods': {'required': True}, - 'allowed_headers': {'required': True}, - 'exposed_headers': {'required': True}, - 'max_age_in_seconds': {'required': True, 'minimum': 0}, - } - - _attribute_map = { - 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}}, - 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}}, - 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}}, - 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}}, - 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}}, - } - _xml_map = { - } - - def __init__(self, *, allowed_origins: str, allowed_methods: str, allowed_headers: str, exposed_headers: str, max_age_in_seconds: int, **kwargs) -> None: - super(CorsRule, self).__init__(**kwargs) - self.allowed_origins = allowed_origins - self.allowed_methods = allowed_methods - self.allowed_headers = allowed_headers - self.exposed_headers = exposed_headers - self.max_age_in_seconds = max_age_in_seconds - - -class DirectoryItem(Model): - """A listed directory item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - } - _xml_map = { - 'name': 'Directory' - } - - def __init__(self, *, name: str, **kwargs) -> None: - super(DirectoryItem, self).__init__(**kwargs) - self.name = name - - -class FileHTTPHeaders(Model): - """Additional parameters for a set of operations, such as: File_create, - File_set_http_headers. - - :param file_content_type: Sets the MIME content type of the file. The - default type is 'application/octet-stream'. - :type file_content_type: str - :param file_content_encoding: Specifies which content encodings have been - applied to the file. - :type file_content_encoding: str - :param file_content_language: Specifies the natural languages used by this - resource. - :type file_content_language: str - :param file_cache_control: Sets the file's cache control. The File service - stores this value but does not use or modify it. - :type file_cache_control: str - :param file_content_md5: Sets the file's MD5 hash. - :type file_content_md5: bytearray - :param file_content_disposition: Sets the file's Content-Disposition - header. - :type file_content_disposition: str - """ - - _attribute_map = { - 'file_content_type': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_type'}}, - 'file_content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_encoding'}}, - 'file_content_language': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_language'}}, - 'file_cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'file_cache_control'}}, - 'file_content_md5': {'key': '', 'type': 'bytearray', 'xml': {'name': 'file_content_md5'}}, - 'file_content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_disposition'}}, - } - _xml_map = { - } - - def __init__(self, *, file_content_type: str=None, file_content_encoding: str=None, file_content_language: str=None, file_cache_control: str=None, file_content_md5: bytearray=None, file_content_disposition: str=None, **kwargs) -> None: - super(FileHTTPHeaders, self).__init__(**kwargs) - self.file_content_type = file_content_type - self.file_content_encoding = file_content_encoding - self.file_content_language = file_content_language - self.file_cache_control = file_cache_control - self.file_content_md5 = file_content_md5 - self.file_content_disposition = file_content_disposition - - -class FileItem(Model): - """A listed file item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param properties: Required. - :type properties: ~azure.storage.fileshare.models.FileProperty - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'properties': {'key': 'Properties', 'type': 'FileProperty', 'xml': {'name': 'Properties'}}, - } - _xml_map = { - 'name': 'File' - } - - def __init__(self, *, name: str, properties, **kwargs) -> None: - super(FileItem, self).__init__(**kwargs) - self.name = name - self.properties = properties - - -class FileProperty(Model): - """File properties. - - All required parameters must be populated in order to send to Azure. - - :param content_length: Required. Content length of the file. This value - may not be up-to-date since an SMB client may have modified the file - locally. The value of Content-Length may not reflect that fact until the - handle is closed or the op-lock is broken. To retrieve current property - values, call Get File Properties. - :type content_length: long - """ - - _validation = { - 'content_length': {'required': True}, - } - - _attribute_map = { - 'content_length': {'key': 'Content-Length', 'type': 'long', 'xml': {'name': 'Content-Length'}}, - } - _xml_map = { - } - - def __init__(self, *, content_length: int, **kwargs) -> None: - super(FileProperty, self).__init__(**kwargs) - self.content_length = content_length - - -class FilesAndDirectoriesListSegment(Model): - """Abstract for entries that can be listed from Directory. - - All required parameters must be populated in order to send to Azure. - - :param directory_items: Required. - :type directory_items: list[~azure.storage.fileshare.models.DirectoryItem] - :param file_items: Required. - :type file_items: list[~azure.storage.fileshare.models.FileItem] - """ - - _validation = { - 'directory_items': {'required': True}, - 'file_items': {'required': True}, - } - - _attribute_map = { - 'directory_items': {'key': 'DirectoryItems', 'type': '[DirectoryItem]', 'xml': {'name': 'DirectoryItems', 'itemsName': 'Directory'}}, - 'file_items': {'key': 'FileItems', 'type': '[FileItem]', 'xml': {'name': 'FileItems', 'itemsName': 'File'}}, - } - _xml_map = { - 'name': 'Entries' - } - - def __init__(self, *, directory_items, file_items, **kwargs) -> None: - super(FilesAndDirectoriesListSegment, self).__init__(**kwargs) - self.directory_items = directory_items - self.file_items = file_items - - -class HandleItem(Model): - """A listed Azure Storage handle item. - - All required parameters must be populated in order to send to Azure. - - :param handle_id: Required. XSMB service handle ID - :type handle_id: str - :param path: Required. File or directory name including full path starting - from share root - :type path: str - :param file_id: Required. FileId uniquely identifies the file or - directory. - :type file_id: str - :param parent_id: ParentId uniquely identifies the parent directory of the - object. - :type parent_id: str - :param session_id: Required. SMB session ID in context of which the file - handle was opened - :type session_id: str - :param client_ip: Required. Client IP that opened the handle - :type client_ip: str - :param open_time: Required. Time when the session that previously opened - the handle has last been reconnected. (UTC) - :type open_time: datetime - :param last_reconnect_time: Time handle was last connected to (UTC) - :type last_reconnect_time: datetime - """ - - _validation = { - 'handle_id': {'required': True}, - 'path': {'required': True}, - 'file_id': {'required': True}, - 'session_id': {'required': True}, - 'client_ip': {'required': True}, - 'open_time': {'required': True}, - } - - _attribute_map = { - 'handle_id': {'key': 'HandleId', 'type': 'str', 'xml': {'name': 'HandleId'}}, - 'path': {'key': 'Path', 'type': 'str', 'xml': {'name': 'Path'}}, - 'file_id': {'key': 'FileId', 'type': 'str', 'xml': {'name': 'FileId'}}, - 'parent_id': {'key': 'ParentId', 'type': 'str', 'xml': {'name': 'ParentId'}}, - 'session_id': {'key': 'SessionId', 'type': 'str', 'xml': {'name': 'SessionId'}}, - 'client_ip': {'key': 'ClientIp', 'type': 'str', 'xml': {'name': 'ClientIp'}}, - 'open_time': {'key': 'OpenTime', 'type': 'rfc-1123', 'xml': {'name': 'OpenTime'}}, - 'last_reconnect_time': {'key': 'LastReconnectTime', 'type': 'rfc-1123', 'xml': {'name': 'LastReconnectTime'}}, - } - _xml_map = { - 'name': 'Handle' - } - - def __init__(self, *, handle_id: str, path: str, file_id: str, session_id: str, client_ip: str, open_time, parent_id: str=None, last_reconnect_time=None, **kwargs) -> None: - super(HandleItem, self).__init__(**kwargs) - self.handle_id = handle_id - self.path = path - self.file_id = file_id - self.parent_id = parent_id - self.session_id = session_id - self.client_ip = client_ip - self.open_time = open_time - self.last_reconnect_time = last_reconnect_time - - -class ListFilesAndDirectoriesSegmentResponse(Model): - """An enumeration of directories and files. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param share_name: Required. - :type share_name: str - :param share_snapshot: - :type share_snapshot: str - :param directory_path: Required. - :type directory_path: str - :param prefix: Required. - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param segment: Required. - :type segment: - ~azure.storage.fileshare.models.FilesAndDirectoriesListSegment - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'share_name': {'required': True}, - 'directory_path': {'required': True}, - 'prefix': {'required': True}, - 'segment': {'required': True}, - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'share_name': {'key': 'ShareName', 'type': 'str', 'xml': {'name': 'ShareName', 'attr': True}}, - 'share_snapshot': {'key': 'ShareSnapshot', 'type': 'str', 'xml': {'name': 'ShareSnapshot', 'attr': True}}, - 'directory_path': {'key': 'DirectoryPath', 'type': 'str', 'xml': {'name': 'DirectoryPath', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'segment': {'key': 'Segment', 'type': 'FilesAndDirectoriesListSegment', 'xml': {'name': 'Segment'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, *, service_endpoint: str, share_name: str, directory_path: str, prefix: str, segment, next_marker: str, share_snapshot: str=None, marker: str=None, max_results: int=None, **kwargs) -> None: - super(ListFilesAndDirectoriesSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.share_name = share_name - self.share_snapshot = share_snapshot - self.directory_path = directory_path - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.segment = segment - self.next_marker = next_marker - - -class ListHandlesResponse(Model): - """An enumeration of handles. - - All required parameters must be populated in order to send to Azure. - - :param handle_list: - :type handle_list: list[~azure.storage.fileshare.models.HandleItem] - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'handle_list': {'key': 'HandleList', 'type': '[HandleItem]', 'xml': {'name': 'Entries', 'itemsName': 'Entries', 'wrapped': True}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, *, next_marker: str, handle_list=None, **kwargs) -> None: - super(ListHandlesResponse, self).__init__(**kwargs) - self.handle_list = handle_list - self.next_marker = next_marker - - -class ListSharesResponse(Model): - """An enumeration of shares. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param share_items: - :type share_items: list[~azure.storage.fileshare.models.ShareItem] - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'share_items': {'key': 'ShareItems', 'type': '[ShareItem]', 'xml': {'name': 'Shares', 'itemsName': 'Shares', 'wrapped': True}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, *, service_endpoint: str, next_marker: str, prefix: str=None, marker: str=None, max_results: int=None, share_items=None, **kwargs) -> None: - super(ListSharesResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.share_items = share_items - self.next_marker = next_marker - - -class Metrics(Model): - """Storage Analytics metrics for file service. - - All required parameters must be populated in order to send to Azure. - - :param version: Required. The version of Storage Analytics to configure. - :type version: str - :param enabled: Required. Indicates whether metrics are enabled for the - File service. - :type enabled: bool - :param include_apis: Indicates whether metrics should generate summary - statistics for called API operations. - :type include_apis: bool - :param retention_policy: - :type retention_policy: ~azure.storage.fileshare.models.RetentionPolicy - """ - - _validation = { - 'version': {'required': True}, - 'enabled': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, - } - _xml_map = { - } - - def __init__(self, *, version: str, enabled: bool, include_apis: bool=None, retention_policy=None, **kwargs) -> None: - super(Metrics, self).__init__(**kwargs) - self.version = version - self.enabled = enabled - self.include_apis = include_apis - self.retention_policy = retention_policy - - -class Range(Model): - """An Azure Storage file range. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. Start of the range. - :type start: long - :param end: Required. End of the range. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'Range' - } - - def __init__(self, *, start: int, end: int, **kwargs) -> None: - super(Range, self).__init__(**kwargs) - self.start = start - self.end = end - - -class RetentionPolicy(Model): - """The retention policy. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether a retention policy is enabled - for the File service. If false, metrics data is retained, and the user is - responsible for deleting it. - :type enabled: bool - :param days: Indicates the number of days that metrics data should be - retained. All data older than this value will be deleted. Metrics data is - deleted on a best-effort basis after the retention period expires. - :type days: int - """ - - _validation = { - 'enabled': {'required': True}, - 'days': {'maximum': 365, 'minimum': 1}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}}, - } - _xml_map = { - } - - def __init__(self, *, enabled: bool, days: int=None, **kwargs) -> None: - super(RetentionPolicy, self).__init__(**kwargs) - self.enabled = enabled - self.days = days - - -class ShareItem(Model): - """A listed Azure Storage share item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param snapshot: - :type snapshot: str - :param properties: Required. - :type properties: ~azure.storage.fileshare.models.ShareProperties - :param metadata: - :type metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'snapshot': {'key': 'Snapshot', 'type': 'str', 'xml': {'name': 'Snapshot'}}, - 'properties': {'key': 'Properties', 'type': 'ShareProperties', 'xml': {'name': 'Properties'}}, - 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}}, - } - _xml_map = { - 'name': 'Share' - } - - def __init__(self, *, name: str, properties, snapshot: str=None, metadata=None, **kwargs) -> None: - super(ShareItem, self).__init__(**kwargs) - self.name = name - self.snapshot = snapshot - self.properties = properties - self.metadata = metadata - - -class SharePermission(Model): - """A permission (a security descriptor) at the share level. - - All required parameters must be populated in order to send to Azure. - - :param permission: Required. The permission in the Security Descriptor - Definition Language (SDDL). - :type permission: str - """ - - _validation = { - 'permission': {'required': True}, - } - - _attribute_map = { - 'permission': {'key': 'permission', 'type': 'str', 'xml': {'name': 'permission'}}, - } - _xml_map = { - } - - def __init__(self, *, permission: str, **kwargs) -> None: - super(SharePermission, self).__init__(**kwargs) - self.permission = permission - - -class ShareProperties(Model): - """Properties of a share. - - All required parameters must be populated in order to send to Azure. - - :param last_modified: Required. - :type last_modified: datetime - :param etag: Required. - :type etag: str - :param quota: Required. - :type quota: int - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - 'quota': {'required': True}, - } - - _attribute_map = { - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}}, - 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}}, - 'quota': {'key': 'Quota', 'type': 'int', 'xml': {'name': 'Quota'}}, - } - _xml_map = { - } - - def __init__(self, *, last_modified, etag: str, quota: int, **kwargs) -> None: - super(ShareProperties, self).__init__(**kwargs) - self.last_modified = last_modified - self.etag = etag - self.quota = quota - - -class ShareStats(Model): - """Stats for the share. - - All required parameters must be populated in order to send to Azure. - - :param share_usage_bytes: Required. The approximate size of the data - stored in bytes. Note that this value may not include all recently created - or recently resized files. - :type share_usage_bytes: int - """ - - _validation = { - 'share_usage_bytes': {'required': True}, - } - - _attribute_map = { - 'share_usage_bytes': {'key': 'ShareUsageBytes', 'type': 'int', 'xml': {'name': 'ShareUsageBytes'}}, - } - _xml_map = { - } - - def __init__(self, *, share_usage_bytes: int, **kwargs) -> None: - super(ShareStats, self).__init__(**kwargs) - self.share_usage_bytes = share_usage_bytes - - -class SignedIdentifier(Model): - """Signed identifier. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. A unique id. - :type id: str - :param access_policy: The access policy. - :type access_policy: ~azure.storage.fileshare.models.AccessPolicy - """ - - _validation = { - 'id': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}}, - 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}}, - } - _xml_map = { - } - - def __init__(self, *, id: str, access_policy=None, **kwargs) -> None: - super(SignedIdentifier, self).__init__(**kwargs) - self.id = id - self.access_policy = access_policy - - -class SourceModifiedAccessConditions(Model): - """Additional parameters for upload_range_from_url operation. - - :param source_if_match_crc64: Specify the crc64 value to operate only on - range with a matching crc64 checksum. - :type source_if_match_crc64: bytearray - :param source_if_none_match_crc64: Specify the crc64 value to operate only - on range without a matching crc64 checksum. - :type source_if_none_match_crc64: bytearray - """ - - _attribute_map = { - 'source_if_match_crc64': {'key': '', 'type': 'bytearray', 'xml': {'name': 'source_if_match_crc64'}}, - 'source_if_none_match_crc64': {'key': '', 'type': 'bytearray', 'xml': {'name': 'source_if_none_match_crc64'}}, - } - _xml_map = { - } - - def __init__(self, *, source_if_match_crc64: bytearray=None, source_if_none_match_crc64: bytearray=None, **kwargs) -> None: - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_match_crc64 = source_if_match_crc64 - self.source_if_none_match_crc64 = source_if_none_match_crc64 - - -class StorageError(Model): - """StorageError. - - :param message: - :type message: str - """ - - _attribute_map = { - 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, - } - _xml_map = { - } - - def __init__(self, *, message: str=None, **kwargs) -> None: - super(StorageError, self).__init__(**kwargs) - self.message = message - - -class StorageErrorException(HttpResponseError): - """Server responsed with exception of type: 'StorageError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, response, deserialize, *args): - - model_name = 'StorageError' - self.error = deserialize(model_name, response) - if self.error is None: - self.error = deserialize.dependencies[model_name]() - super(StorageErrorException, self).__init__(response=response) - - -class StorageServiceProperties(Model): - """Storage service properties. - - :param hour_metrics: A summary of request statistics grouped by API in - hourly aggregates for files. - :type hour_metrics: ~azure.storage.fileshare.models.Metrics - :param minute_metrics: A summary of request statistics grouped by API in - minute aggregates for files. - :type minute_metrics: ~azure.storage.fileshare.models.Metrics - :param cors: The set of CORS rules. - :type cors: list[~azure.storage.fileshare.models.CorsRule] - """ - - _attribute_map = { - 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}}, - 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}}, - 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}}, - } - _xml_map = { - } - - def __init__(self, *, hour_metrics=None, minute_metrics=None, cors=None, **kwargs) -> None: - super(StorageServiceProperties, self).__init__(**kwargs) - self.hour_metrics = hour_metrics - self.minute_metrics = minute_metrics - self.cors = cors diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/operations/__init__.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/operations/__init__.py deleted file mode 100644 index 65680c9..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/operations/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._share_operations import ShareOperations -from ._directory_operations import DirectoryOperations -from ._file_operations import FileOperations - -__all__ = [ - 'ServiceOperations', - 'ShareOperations', - 'DirectoryOperations', - 'FileOperations', -] diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/operations/_directory_operations.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/operations/_directory_operations.py deleted file mode 100644 index 6b8866b..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/operations/_directory_operations.py +++ /dev/null @@ -1,671 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class DirectoryOperations(object): - """DirectoryOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar restype: . Constant value: "directory". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.restype = "directory" - - def create(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, cls=None, **kwargs): - """Creates a new directory under the specified share or parent directory. - - :param file_attributes: If specified, the provided file attributes - shall be set. Default value: ‘Archive’ for file and ‘Directory’ for - directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. - Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. - Default value: Now. - :type file_last_write_time: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{shareName}/{directory}'} - - def get_properties(self, sharesnapshot=None, timeout=None, cls=None, **kwargs): - """Returns all system properties for the specified directory, and can also - be used to check the existence of a directory. The data returned does - not include the files in the directory or any subdirectories. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{shareName}/{directory}'} - - def delete(self, timeout=None, cls=None, **kwargs): - """Removes the specified empty directory. Note that the directory must be - empty before it can be deleted. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{shareName}/{directory}'} - - def set_properties(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, file_permission="inherit", file_permission_key=None, cls=None, **kwargs): - """Sets properties on the directory. - - :param file_attributes: If specified, the provided file attributes - shall be set. Default value: ‘Archive’ for file and ‘Directory’ for - directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. - Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. - Default value: Now. - :type file_last_write_time: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "properties" - - # Construct URL - url = self.set_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_properties.metadata = {'url': '/{shareName}/{directory}'} - - def set_metadata(self, timeout=None, metadata=None, cls=None, **kwargs): - """Updates user defined metadata for the specified directory. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{shareName}/{directory}'} - - def list_files_and_directories_segment(self, prefix=None, sharesnapshot=None, marker=None, maxresults=None, timeout=None, cls=None, **kwargs): - """Returns a list of files or directories under the specified share or - directory. It lists the contents only for a single level of the - directory hierarchy. - - :param prefix: Filters the results to return only entries whose name - begins with the specified prefix. - :type prefix: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. - If the request does not specify maxresults, or specifies a value - greater than 5,000, the server will return up to 5,000 items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListFilesAndDirectoriesSegmentResponse or the result of - cls(response) - :rtype: - ~azure.storage.fileshare.models.ListFilesAndDirectoriesSegmentResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "list" - - # Construct URL - url = self.list_files_and_directories_segment.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListFilesAndDirectoriesSegmentResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_files_and_directories_segment.metadata = {'url': '/{shareName}/{directory}'} - - def list_handles(self, marker=None, maxresults=None, timeout=None, sharesnapshot=None, recursive=None, cls=None, **kwargs): - """Lists handles for directory. - - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. - If the request does not specify maxresults, or specifies a value - greater than 5,000, the server will return up to 5,000 items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param recursive: Specifies operation should apply to the directory - specified in the URI, its files, its subdirectories and their files. - :type recursive: bool - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListHandlesResponse or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListHandlesResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "listhandles" - - # Construct URL - url = self.list_handles.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - if recursive is not None: - header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListHandlesResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_handles.metadata = {'url': '/{shareName}/{directory}'} - - def force_close_handles(self, handle_id, timeout=None, marker=None, sharesnapshot=None, recursive=None, cls=None, **kwargs): - """Closes all handles open for given directory. - - :param handle_id: Specifies handle ID opened on the file or directory - to be closed. Asterix (‘*’) is a wildcard that specifies all handles. - :type handle_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param recursive: Specifies operation should apply to the directory - specified in the URI, its files, its subdirectories and their files. - :type recursive: bool - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "forceclosehandles" - - # Construct URL - url = self.force_close_handles.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') - if recursive is not None: - header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-marker': self._deserialize('str', response.headers.get('x-ms-marker')), - 'x-ms-number-of-handles-closed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - force_close_handles.metadata = {'url': '/{shareName}/{directory}'} diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/operations/_file_operations.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/operations/_file_operations.py deleted file mode 100644 index c8e5532..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/operations/_file_operations.py +++ /dev/null @@ -1,1198 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class FileOperations(object): - """FileOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_type: Dummy constant parameter, file type can only be file. Constant value: "file". - :ivar x_ms_write: Only update is supported: - Update: Writes the bytes downloaded from the source url into the specified range. Constant value: "update". - :ivar x_ms_copy_action: . Constant value: "abort". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_type = "file" - self.x_ms_write = "update" - self.x_ms_copy_action = "abort" - - def create(self, file_content_length, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, file_http_headers=None, cls=None, **kwargs): - """Creates a new file or replaces a file. Note it only initializes the - file with no content. - - :param file_content_length: Specifies the maximum size for the file, - up to 1 TB. - :type file_content_length: long - :param file_attributes: If specified, the provided file attributes - shall be set. Default value: ‘Archive’ for file and ‘Directory’ for - directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. - Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. - Default value: Now. - :type file_last_write_time: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_http_headers: Additional parameters for the operation - :type file_http_headers: - ~azure.storage.fileshare.models.FileHTTPHeaders - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - file_content_type = None - if file_http_headers is not None: - file_content_type = file_http_headers.file_content_type - file_content_encoding = None - if file_http_headers is not None: - file_content_encoding = file_http_headers.file_content_encoding - file_content_language = None - if file_http_headers is not None: - file_content_language = file_http_headers.file_content_language - file_cache_control = None - if file_http_headers is not None: - file_cache_control = file_http_headers.file_cache_control - file_content_md5 = None - if file_http_headers is not None: - file_content_md5 = file_http_headers.file_content_md5 - file_content_disposition = None - if file_http_headers is not None: - file_content_disposition = file_http_headers.file_content_disposition - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') - header_parameters['x-ms-type'] = self._serialize.header("self.x_ms_type", self.x_ms_type, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - if file_content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", file_content_type, 'str') - if file_content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", file_content_encoding, 'str') - if file_content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", file_content_language, 'str') - if file_cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", file_cache_control, 'str') - if file_content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", file_content_md5, 'bytearray') - if file_content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", file_content_disposition, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def download(self, timeout=None, range=None, range_get_content_md5=None, cls=None, **kwargs): - """Reads or downloads a file from the system, including its metadata and - properties. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param range: Return file data only from the specified byte range. - :type range: str - :param range_get_content_md5: When this header is set to true and - specified together with the Range header, the service returns the MD5 - hash for the range, as long as the range is less than or equal to 4 MB - in size. - :type range_get_content_md5: bool - :param callable cls: A custom type or function that will be passed the - direct response - :return: object or the result of cls(response) - :rtype: Generator - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.download.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-content-md5')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - if response.status_code == 206: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-content-md5')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - download.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def get_properties(self, sharesnapshot=None, timeout=None, cls=None, **kwargs): - """Returns all user-defined metadata, standard HTTP properties, and system - properties for the file. It does not return the content of the file. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'x-ms-type': self._deserialize('str', response.headers.get('x-ms-type')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def delete(self, timeout=None, cls=None, **kwargs): - """removes the file from the storage account. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def set_http_headers(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, file_content_length=None, file_permission="inherit", file_permission_key=None, file_http_headers=None, cls=None, **kwargs): - """Sets HTTP headers on the file. - - :param file_attributes: If specified, the provided file attributes - shall be set. Default value: ‘Archive’ for file and ‘Directory’ for - directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. - Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. - Default value: Now. - :type file_last_write_time: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param file_content_length: Resizes a file to the specified size. If - the specified byte value is less than the current size of the file, - then all ranges above the specified byte value are cleared. - :type file_content_length: long - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_http_headers: Additional parameters for the operation - :type file_http_headers: - ~azure.storage.fileshare.models.FileHTTPHeaders - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - file_content_type = None - if file_http_headers is not None: - file_content_type = file_http_headers.file_content_type - file_content_encoding = None - if file_http_headers is not None: - file_content_encoding = file_http_headers.file_content_encoding - file_content_language = None - if file_http_headers is not None: - file_content_language = file_http_headers.file_content_language - file_cache_control = None - if file_http_headers is not None: - file_cache_control = file_http_headers.file_cache_control - file_content_md5 = None - if file_http_headers is not None: - file_content_md5 = file_http_headers.file_content_md5 - file_content_disposition = None - if file_http_headers is not None: - file_content_disposition = file_http_headers.file_content_disposition - - comp = "properties" - - # Construct URL - url = self.set_http_headers.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_content_length is not None: - header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - if file_content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", file_content_type, 'str') - if file_content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", file_content_encoding, 'str') - if file_content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", file_content_language, 'str') - if file_cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", file_cache_control, 'str') - if file_content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", file_content_md5, 'bytearray') - if file_content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", file_content_disposition, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_http_headers.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def set_metadata(self, timeout=None, metadata=None, cls=None, **kwargs): - """Updates user-defined metadata for the specified file. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def upload_range(self, range, content_length, file_range_write="update", optionalbody=None, timeout=None, content_md5=None, cls=None, **kwargs): - """Upload a range of bytes to a file. - - :param range: Specifies the range of bytes to be written. Both the - start and end of the range must be specified. For an update operation, - the range can be up to 4 MB in size. For a clear operation, the range - can be up to the value of the file's full size. The File service - accepts only a single byte range for the Range and 'x-ms-range' - headers, and the byte range must be specified in the following format: - bytes=startByte-endByte. - :type range: str - :param file_range_write: Specify one of the following options: - - Update: Writes the bytes specified by the request body into the - specified range. The Range and Content-Length headers must match to - perform the update. - Clear: Clears the specified range and releases - the space used in storage for that range. To clear a range, set the - Content-Length header to zero, and set the Range header to a value - that indicates the range to clear, up to maximum file size. Possible - values include: 'update', 'clear' - :type file_range_write: str or - ~azure.storage.fileshare.models.FileRangeWriteType - :param content_length: Specifies the number of bytes being transmitted - in the request body. When the x-ms-write header is set to clear, the - value of this header must be set to zero. - :type content_length: long - :param optionalbody: Initial data. - :type optionalbody: Generator - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param content_md5: An MD5 hash of the content. This hash is used to - verify the integrity of the data during transport. When the - Content-MD5 header is specified, the File service compares the hash of - the content that has arrived with the header value that was sent. If - the two hashes do not match, the operation will fail with error code - 400 (Bad Request). - :type content_md5: bytearray - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "range" - - # Construct URL - url = self.upload_range.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-write'] = self._serialize.header("self.x_ms_write", self.x_ms_write, 'FileRangeWriteType') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("content_md5", content_md5, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=optionalbody) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload_range.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def upload_range_from_url(self, range, copy_source, content_length, timeout=None, source_range=None, source_content_crc64=None, source_modified_access_conditions=None, cls=None, **kwargs): - """Upload a range of bytes to a file where the contents are read from a - URL. - - :param range: Writes data to the specified byte range in the file. - :type range: str - :param copy_source: Specifies the URL of the source file or blob, up - to 2 KB in length. To copy a file to another file within the same - storage account, you may use Shared Key to authenticate the source - file. If you are copying a file from another storage account, or if - you are copying a blob from the same storage account or another - storage account, then you must authenticate the source file or blob - using a shared access signature. If the source is a public blob, no - authentication is required to perform the copy operation. A file in a - share snapshot can also be specified as a copy source. - :type copy_source: str - :param content_length: Specifies the number of bytes being transmitted - in the request body. When the x-ms-write header is set to clear, the - value of this header must be set to zero. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_crc64: Specify the crc64 calculated for the - range of bytes that must be read from the copy source. - :type source_content_crc64: bytearray - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.fileshare.models.SourceModifiedAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - source_if_match_crc64 = None - if source_modified_access_conditions is not None: - source_if_match_crc64 = source_modified_access_conditions.source_if_match_crc64 - source_if_none_match_crc64 = None - if source_modified_access_conditions is not None: - source_if_none_match_crc64 = source_modified_access_conditions.source_if_none_match_crc64 - - comp = "range" - - # Construct URL - url = self.upload_range_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - header_parameters['x-ms-write'] = self._serialize.header("self.x_ms_write", self.x_ms_write, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if source_content_crc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_content_crc64", source_content_crc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if source_if_match_crc64 is not None: - header_parameters['x-ms-source-if-match-crc64'] = self._serialize.header("source_if_match_crc64", source_if_match_crc64, 'bytearray') - if source_if_none_match_crc64 is not None: - header_parameters['x-ms-source-if-none-match-crc64'] = self._serialize.header("source_if_none_match_crc64", source_if_none_match_crc64, 'bytearray') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload_range_from_url.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def get_range_list(self, sharesnapshot=None, timeout=None, range=None, cls=None, **kwargs): - """Returns the list of valid ranges for a file. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param range: Specifies the range of bytes over which to list ranges, - inclusively. - :type range: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: list or the result of cls(response) - :rtype: list[~azure.storage.fileshare.models.Range] - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "rangelist" - - # Construct URL - url = self.get_range_list.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[Range]', response) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-content-length': self._deserialize('long', response.headers.get('x-ms-content-length')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_range_list.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def start_copy(self, copy_source, timeout=None, metadata=None, cls=None, **kwargs): - """Copies a blob or file to a destination file within the storage account. - - :param copy_source: Specifies the URL of the source file or blob, up - to 2 KB in length. To copy a file to another file within the same - storage account, you may use Shared Key to authenticate the source - file. If you are copying a file from another storage account, or if - you are copying a blob from the same storage account or another - storage account, then you must authenticate the source file or blob - using a shared access signature. If the source is a public blob, no - authentication is required to perform the copy operation. A file in a - share snapshot can also be specified as a copy source. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.start_copy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - start_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def abort_copy(self, copy_id, timeout=None, cls=None, **kwargs): - """Aborts a pending Copy File operation, and leaves a destination file - with zero length and full metadata. - - :param copy_id: The copy identifier provided in the x-ms-copy-id - header of the original Copy File operation. - :type copy_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "copy" - - # Construct URL - url = self.abort_copy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-copy-action'] = self._serialize.header("self.x_ms_copy_action", self.x_ms_copy_action, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - abort_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def list_handles(self, marker=None, maxresults=None, timeout=None, sharesnapshot=None, cls=None, **kwargs): - """Lists handles for file. - - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. - If the request does not specify maxresults, or specifies a value - greater than 5,000, the server will return up to 5,000 items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListHandlesResponse or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListHandlesResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "listhandles" - - # Construct URL - url = self.list_handles.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListHandlesResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def force_close_handles(self, handle_id, timeout=None, marker=None, sharesnapshot=None, cls=None, **kwargs): - """Closes all handles open for given file. - - :param handle_id: Specifies handle ID opened on the file or directory - to be closed. Asterix (‘*’) is a wildcard that specifies all handles. - :type handle_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "forceclosehandles" - - # Construct URL - url = self.force_close_handles.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-marker': self._deserialize('str', response.headers.get('x-ms-marker')), - 'x-ms-number-of-handles-closed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - force_close_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/operations/_service_operations.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/operations/_service_operations.py deleted file mode 100644 index cd43e83..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/operations/_service_operations.py +++ /dev/null @@ -1,253 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class ServiceOperations(object): - """ServiceOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar restype: . Constant value: "service". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.restype = "service" - - def set_properties(self, storage_service_properties, timeout=None, cls=None, **kwargs): - """Sets properties for a storage account's File service endpoint, - including properties for Storage Analytics metrics and CORS - (Cross-Origin Resource Sharing) rules. - - :param storage_service_properties: The StorageService properties. - :type storage_service_properties: - ~azure.storage.fileshare.models.StorageServiceProperties - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "properties" - - # Construct URL - url = self.set_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct body - body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_properties.metadata = {'url': '/'} - - def get_properties(self, timeout=None, cls=None, **kwargs): - """Gets the properties of a storage account's File service, including - properties for Storage Analytics metrics and CORS (Cross-Origin - Resource Sharing) rules. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: StorageServiceProperties or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.StorageServiceProperties - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "properties" - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('StorageServiceProperties', response) - header_dict = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_properties.metadata = {'url': '/'} - - def list_shares_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, cls=None, **kwargs): - """The List Shares Segment operation returns a list of the shares and - share snapshots under the specified account. - - :param prefix: Filters the results to return only entries whose name - begins with the specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. - If the request does not specify maxresults, or specifies a value - greater than 5,000, the server will return up to 5,000 items. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets - to include in the response. - :type include: list[str or - ~azure.storage.fileshare.models.ListSharesIncludeType] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListSharesResponse or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListSharesResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "list" - - # Construct URL - url = self.list_shares_segment.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[ListSharesIncludeType]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListSharesResponse', response) - header_dict = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_shares_segment.metadata = {'url': '/'} diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/operations/_share_operations.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/operations/_share_operations.py deleted file mode 100644 index 6e27545..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/operations/_share_operations.py +++ /dev/null @@ -1,747 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class ShareOperations(object): - """ShareOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar restype: . Constant value: "share". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.restype = "share" - - def create(self, timeout=None, metadata=None, quota=None, cls=None, **kwargs): - """Creates a new share under the specified account. If the share with the - same name already exists, the operation fails. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param quota: Specifies the maximum size of the share, in gigabytes. - :type quota: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if quota is not None: - header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{shareName}'} - - def get_properties(self, sharesnapshot=None, timeout=None, cls=None, **kwargs): - """Returns all user-defined metadata and system properties for the - specified share or share snapshot. The data returned does not include - the share's list of files. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-share-quota': self._deserialize('int', response.headers.get('x-ms-share-quota')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{shareName}'} - - def delete(self, sharesnapshot=None, timeout=None, delete_snapshots=None, cls=None, **kwargs): - """Operation marks the specified share or share snapshot for deletion. The - share or share snapshot and any files contained within it are later - deleted during garbage collection. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param delete_snapshots: Specifies the option include to delete the - base share and all of its snapshots. Possible values include: - 'include' - :type delete_snapshots: str or - ~azure.storage.fileshare.models.DeleteSnapshotsOptionType - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{shareName}'} - - def create_snapshot(self, timeout=None, metadata=None, cls=None, **kwargs): - """Creates a read-only snapshot of a share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "snapshot" - - # Construct URL - url = self.create_snapshot.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-snapshot': self._deserialize('str', response.headers.get('x-ms-snapshot')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create_snapshot.metadata = {'url': '/{shareName}'} - - def create_permission(self, share_permission, timeout=None, cls=None, **kwargs): - """Create a permission (a security descriptor). - - :param share_permission: A permission (a security descriptor) at the - share level. - :type share_permission: - ~azure.storage.fileshare.models.SharePermission - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "filepermission" - - # Construct URL - url = self.create_permission.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct body - body_content = self._serialize.body(share_permission, 'SharePermission', is_xml=False) - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create_permission.metadata = {'url': '/{shareName}'} - - def get_permission(self, file_permission_key, timeout=None, cls=None, **kwargs): - """Returns the permission (security descriptor) for a given key. - - :param file_permission_key: Key of the permission to be set for the - directory/file. - :type file_permission_key: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: SharePermission or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.SharePermission - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "filepermission" - - # Construct URL - url = self.get_permission.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('SharePermission', response) - header_dict = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_permission.metadata = {'url': '/{shareName}'} - - def set_quota(self, timeout=None, quota=None, cls=None, **kwargs): - """Sets quota for the specified share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param quota: Specifies the maximum size of the share, in gigabytes. - :type quota: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "properties" - - # Construct URL - url = self.set_quota.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if quota is not None: - header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_quota.metadata = {'url': '/{shareName}'} - - def set_metadata(self, timeout=None, metadata=None, cls=None, **kwargs): - """Sets one or more user-defined name-value pairs for the specified share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{shareName}'} - - def get_access_policy(self, timeout=None, cls=None, **kwargs): - """Returns information about stored access policies specified on the - share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: list or the result of cls(response) - :rtype: list[~azure.storage.fileshare.models.SignedIdentifier] - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "acl" - - # Construct URL - url = self.get_access_policy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[SignedIdentifier]', response) - header_dict = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_access_policy.metadata = {'url': '/{shareName}'} - - def set_access_policy(self, share_acl=None, timeout=None, cls=None, **kwargs): - """Sets a stored access policy for use with shared access signatures. - - :param share_acl: The ACL for the share. - :type share_acl: - list[~azure.storage.fileshare.models.SignedIdentifier] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "acl" - - # Construct URL - url = self.set_access_policy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct body - serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifier', 'wrapped': True}} - if share_acl is not None: - body_content = self._serialize.body(share_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt) - else: - body_content = None - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_access_policy.metadata = {'url': '/{shareName}'} - - def get_statistics(self, timeout=None, cls=None, **kwargs): - """Retrieves statistics related to the share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: ShareStats or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ShareStats - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "stats" - - # Construct URL - url = self.get_statistics.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ShareStats', response) - header_dict = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_statistics.metadata = {'url': '/{shareName}'} diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/version.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/version.py deleted file mode 100644 index 9c89a27..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/version.py +++ /dev/null @@ -1,13 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -VERSION = "2019-02-02" - diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_models.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_models.py deleted file mode 100644 index 5a4b284..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_models.py +++ /dev/null @@ -1,870 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines - -from azure.core.paging import PageIterator -from ._parser import _parse_datetime_from_str -from ._shared.response_handlers import return_context_and_deserialized, process_storage_error -from ._shared.models import DictMixin, get_enum_value -from ._generated.models import StorageErrorException -from ._generated.models import Metrics as GeneratedMetrics -from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy -from ._generated.models import CorsRule as GeneratedCorsRule -from ._generated.models import AccessPolicy as GenAccessPolicy -from ._generated.models import DirectoryItem - - -def _wrap_item(item): - if isinstance(item, DirectoryItem): - return {'name': item.name, 'is_directory': True} - return {'name': item.name, 'size': item.properties.content_length, 'is_directory': False} - - -class Metrics(GeneratedMetrics): - """A summary of request statistics grouped by API in hour or minute aggregates - for files. - - All required parameters must be populated in order to send to Azure. - - :keyword str version: The version of Storage Analytics to configure. - :keyword bool enabled: Required. Indicates whether metrics are enabled for the - File service. - :keyword bool include_ap_is: Indicates whether metrics should generate summary - statistics for called API operations. - :keyword ~azure.storage.fileshare.RetentionPolicy retention_policy: Determines how long the associated data should - persist. - """ - - def __init__(self, **kwargs): - self.version = kwargs.get('version', u'1.0') - self.enabled = kwargs.get('enabled', False) - self.include_apis = kwargs.get('include_apis') - self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - version=generated.version, - enabled=generated.enabled, - include_apis=generated.include_apis, - retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access - ) - - -class RetentionPolicy(GeneratedRetentionPolicy): - """The retention policy which determines how long the associated data should - persist. - - All required parameters must be populated in order to send to Azure. - - :param bool enabled: Required. Indicates whether a retention policy is enabled - for the storage service. - :param int days: Indicates the number of days that metrics or logging or - soft-deleted data should be retained. All data older than this value will - be deleted. - """ - - def __init__(self, enabled=False, days=None): - self.enabled = enabled - self.days = days - if self.enabled and (self.days is None): - raise ValueError("If policy is enabled, 'days' must be specified.") - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - enabled=generated.enabled, - days=generated.days, - ) - - -class CorsRule(GeneratedCorsRule): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param list(str) allowed_origins: - A list of origin domains that will be allowed via CORS, or "*" to allow - all domains. The list of must contain at least one entry. Limited to 64 - origin domains. Each allowed origin can have up to 256 characters. - :param list(str) allowed_methods: - A list of HTTP methods that are allowed to be executed by the origin. - The list of must contain at least one entry. For Azure Storage, - permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. - :keyword list(str) allowed_headers: - Defaults to an empty list. A list of headers allowed to be part of - the cross-origin request. Limited to 64 defined headers and 2 prefixed - headers. Each header can be up to 256 characters. - :keyword list(str) exposed_headers: - Defaults to an empty list. A list of response headers to expose to CORS - clients. Limited to 64 defined headers and two prefixed headers. Each - header can be up to 256 characters. - :keyword int max_age_in_seconds: - The number of seconds that the client/browser should cache a - preflight response. - """ - - def __init__(self, allowed_origins, allowed_methods, **kwargs): - self.allowed_origins = ','.join(allowed_origins) - self.allowed_methods = ','.join(allowed_methods) - self.allowed_headers = ','.join(kwargs.get('allowed_headers', [])) - self.exposed_headers = ','.join(kwargs.get('exposed_headers', [])) - self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0) - - @classmethod - def _from_generated(cls, generated): - return cls( - [generated.allowed_origins], - [generated.allowed_methods], - allowed_headers=[generated.allowed_headers], - exposed_headers=[generated.exposed_headers], - max_age_in_seconds=generated.max_age_in_seconds, - ) - - -class AccessPolicy(GenAccessPolicy): - """Access Policy class used by the set and get acl methods in each service. - - A stored access policy can specify the start time, expiry time, and - permissions for the Shared Access Signatures with which it's associated. - Depending on how you want to control access to your resource, you can - specify all of these parameters within the stored access policy, and omit - them from the URL for the Shared Access Signature. Doing so permits you to - modify the associated signature's behavior at any time, as well as to revoke - it. Or you can specify one or more of the access policy parameters within - the stored access policy, and the others on the URL. Finally, you can - specify all of the parameters on the URL. In this case, you can use the - stored access policy to revoke the signature, but not to modify its behavior. - - Together the Shared Access Signature and the stored access policy must - include all fields required to authenticate the signature. If any required - fields are missing, the request will fail. Likewise, if a field is specified - both in the Shared Access Signature URL and in the stored access policy, the - request will fail with status code 400 (Bad Request). - - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.fileshare.FileSasPermissions or - ~azure.storage.fileshare.ShareSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - """ - def __init__(self, permission=None, expiry=None, start=None): - self.start = start - self.expiry = expiry - self.permission = permission - - -class ContentSettings(DictMixin): - """Used to store the content settings of a file. - - :param str content_type: - The content type specified for the file. If no content type was - specified, the default content type is application/octet-stream. - :param str content_encoding: - If the content_encoding has previously been set - for the file, that value is stored. - :param str content_language: - If the content_language has previously been set - for the file, that value is stored. - :param str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the file, that value is stored. - :param str cache_control: - If the cache_control has previously been set for - the file, that value is stored. - :param str content_md5: - If the content_md5 has been set for the file, this response - header is stored so that the client can check for message content - integrity. - """ - - def __init__( - self, content_type=None, content_encoding=None, - content_language=None, content_disposition=None, - cache_control=None, content_md5=None, **kwargs): - - self.content_type = content_type or kwargs.get('Content-Type') - self.content_encoding = content_encoding or kwargs.get('Content-Encoding') - self.content_language = content_language or kwargs.get('Content-Language') - self.content_md5 = content_md5 or kwargs.get('Content-MD5') - self.content_disposition = content_disposition or kwargs.get('Content-Disposition') - self.cache_control = cache_control or kwargs.get('Cache-Control') - - @classmethod - def _from_generated(cls, generated): - settings = cls() - settings.content_type = generated.properties.content_type or None - settings.content_encoding = generated.properties.content_encoding or None - settings.content_language = generated.properties.content_language or None - settings.content_md5 = generated.properties.content_md5 or None - settings.content_disposition = generated.properties.content_disposition or None - settings.cache_control = generated.properties.cache_control or None - return settings - - -class ShareProperties(DictMixin): - """Share's properties class. - - :ivar str name: - The name of the share. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the share was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar int quota: - The allocated quota. - :ivar dict metadata: A dict with name_value pairs to associate with the - share as metadata. - :ivar str snapshot: - Snapshot of the share. - """ - - def __init__(self, **kwargs): - self.name = None - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.quota = kwargs.get('x-ms-share-quota') - self.metadata = kwargs.get('metadata') - self.snapshot = None - - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.last_modified = generated.properties.last_modified - props.etag = generated.properties.etag - props.quota = generated.properties.quota - props.metadata = generated.metadata - props.snapshot = generated.snapshot - return props - - -class SharePropertiesPaged(PageIterator): - """An iterable of Share properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.fileshare.ShareProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only shares whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(SharePropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - prefix=self.prefix, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [ShareProperties._from_generated(i) for i in self._response.share_items] # pylint: disable=protected-access - return self._response.next_marker or None, self.current_page - - -class Handle(DictMixin): - """A listed Azure Storage handle item. - - All required parameters must be populated in order to send to Azure. - - :keyword str handle_id: Required. XSMB service handle ID - :keyword str path: Required. File or directory name including full path starting - from share root - :keyword str file_id: Required. FileId uniquely identifies the file or - directory. - :keyword str parent_id: ParentId uniquely identifies the parent directory of the - object. - :keyword str session_id: Required. SMB session ID in context of which the file - handle was opened - :keyword str client_ip: Required. Client IP that opened the handle - :keyword ~datetime.datetime open_time: Required. Time when the session that previously opened - the handle has last been reconnected. (UTC) - :keyword ~datetime.datetime last_reconnect_time: Time handle was last connected to (UTC) - """ - - def __init__(self, **kwargs): - self.id = kwargs.get('handle_id') - self.path = kwargs.get('path') - self.file_id = kwargs.get('file_id') - self.parent_id = kwargs.get('parent_id') - self.session_id = kwargs.get('session_id') - self.client_ip = kwargs.get('client_ip') - self.open_time = kwargs.get('open_time') - self.last_reconnect_time = kwargs.get('last_reconnect_time') - - @classmethod - def _from_generated(cls, generated): - handle = cls() - handle.id = generated.handle_id - handle.path = generated.path - handle.file_id = generated.file_id - handle.parent_id = generated.parent_id - handle.session_id = generated.session_id - handle.client_ip = generated.client_ip - handle.open_time = generated.open_time - handle.last_reconnect_time = generated.last_reconnect_time - return handle - - -class HandlesPaged(PageIterator): - """An iterable of Handles. - - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.fileshare.Handle) - - :param callable command: Function to retrieve the next page of items. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, results_per_page=None, continuation_token=None): - super(HandlesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.current_page = [Handle._from_generated(h) for h in self._response.handle_list] # pylint: disable=protected-access - return self._response.next_marker or None, self.current_page - - -class DirectoryProperties(DictMixin): - """Directory's properties class. - - :ivar str name: - The name of the directory. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the directory was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar bool server_encrypted: - Whether encryption is enabled. - :keyword dict metadata: A dict with name_value pairs to associate with the - directory as metadata. - :ivar change_time: Change time for the file. - :vartype change_time: str or ~datetime.datetime - :ivar creation_time: Creation time for the file. - :vartype creation_time: str or ~datetime.datetime - :ivar last_write_time: Last write time for the file. - :vartype last_write_time: str or ~datetime.datetime - :ivar file_attributes: - The file system attributes for files and directories. - :vartype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :ivar permission_key: Key of the permission to be set for the - directory/file. - :vartype permission_key: str - :ivar file_id: Required. FileId uniquely identifies the file or - directory. - :vartype file_id: str - :ivar parent_id: ParentId uniquely identifies the parent directory of the - object. - :vartype parent_id: str - """ - - def __init__(self, **kwargs): - self.name = None - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.server_encrypted = kwargs.get('x-ms-server-encrypted') - self.metadata = kwargs.get('metadata') - self.change_time = _parse_datetime_from_str(kwargs.get('x-ms-file-change-time')) - self.creation_time = _parse_datetime_from_str(kwargs.get('x-ms-file-creation-time')) - self.last_write_time = _parse_datetime_from_str(kwargs.get('x-ms-file-last-write-time')) - self.file_attributes = kwargs.get('x-ms-file-attributes') - self.permission_key = kwargs.get('x-ms-file-permission-key') - self.file_id = kwargs.get('x-ms-file-id') - self.parent_id = kwargs.get('x-ms-file-parent-id') - - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.last_modified = generated.properties.last_modified - props.etag = generated.properties.etag - props.server_encrypted = generated.properties.server_encrypted - props.metadata = generated.metadata - return props - - -class DirectoryPropertiesPaged(PageIterator): - """An iterable for the contents of a directory. - - This iterable will yield dicts for the contents of the directory. The dicts - will have the keys 'name' (str) and 'is_directory' (bool). - Items that are files (is_directory=False) will have an additional 'content_length' key. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(dict(str, Any)) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only directories whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(DirectoryPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - prefix=self.prefix, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [_wrap_item(i) for i in self._response.segment.directory_items] - self.current_page.extend([_wrap_item(i) for i in self._response.segment.file_items]) - return self._response.next_marker or None, self.current_page - - -class FileProperties(DictMixin): - """File's properties class. - - :ivar str name: - The name of the file. - :ivar str path: - The path of the file. - :ivar str share: - The name of share. - :ivar str snapshot: - File snapshot. - :ivar int content_length: - Size of file in bytes. - :ivar dict metadata: A dict with name_value pairs to associate with the - file as metadata. - :ivar str file_type: - Type of the file. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the file was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar int size: - Size of file in bytes. - :ivar str content_range: - The range of bytes. - :ivar bool server_encrypted: - Whether encryption is enabled. - :ivar copy: - The copy properties. - :vartype copy: ~azure.storage.fileshare.CopyProperties - :ivar content_settings: - The content settings for the file. - :vartype content_settings: ~azure.storage.fileshare.ContentSettings - """ - - def __init__(self, **kwargs): - self.name = kwargs.get('name') - self.path = None - self.share = None - self.snapshot = None - self.content_length = kwargs.get('Content-Length') - self.metadata = kwargs.get('metadata') - self.file_type = kwargs.get('x-ms-type') - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.size = kwargs.get('Content-Length') - self.content_range = kwargs.get('Content-Range') - self.server_encrypted = kwargs.get('x-ms-server-encrypted') - self.copy = CopyProperties(**kwargs) - self.content_settings = ContentSettings(**kwargs) - self.change_time = _parse_datetime_from_str(kwargs.get('x-ms-file-change-time')) - self.creation_time = _parse_datetime_from_str(kwargs.get('x-ms-file-creation-time')) - self.last_write_time = _parse_datetime_from_str(kwargs.get('x-ms-file-last-write-time')) - self.file_attributes = kwargs.get('x-ms-file-attributes') - self.permission_key = kwargs.get('x-ms-file-permission-key') - self.file_id = kwargs.get('x-ms-file-id') - self.parent_id = kwargs.get('x-ms-file-parent-id') - - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.content_length = generated.properties.content_length - props.metadata = generated.properties.metadata - return props - - -class CopyProperties(DictMixin): - """File Copy Properties. - - :ivar str id: - String identifier for the last attempted Copy File operation where this file - was the destination file. This header does not appear if this file has never - been the destination in a Copy File operation, or if this file has been - modified after a concluded Copy File operation. - :ivar str source: - URL up to 2 KB in length that specifies the source file used in the last attempted - Copy File operation where this file was the destination file. This header does not - appear if this file has never been the destination in a Copy File operation, or if - this file has been modified after a concluded Copy File operation. - :ivar str status: - State of the copy operation identified by Copy ID, with these values: - success: - Copy completed successfully. - pending: - Copy is in progress. Check copy_status_description if intermittent, - non-fatal errors impede copy progress but don't cause failure. - aborted: - Copy was ended by Abort Copy File. - failed: - Copy failed. See copy_status_description for failure details. - :ivar str progress: - Contains the number of bytes copied and the total bytes in the source in the last - attempted Copy File operation where this file was the destination file. Can show - between 0 and Content-Length bytes copied. - :ivar datetime completion_time: - Conclusion time of the last attempted Copy File operation where this file was the - destination file. This value can specify the time of a completed, aborted, or - failed copy attempt. - :ivar str status_description: - Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal - or non-fatal copy operation failure. - :ivar bool incremental_copy: - Copies the snapshot of the source file to a destination file. - The snapshot is copied such that only the differential changes between - the previously copied snapshot are transferred to the destination - :ivar datetime destination_snapshot: - Included if the file is incremental copy or incremental copy snapshot, - if x-ms-copy-status is success. Snapshot time of the last successful - incremental copy snapshot for this file. - """ - - def __init__(self, **kwargs): - self.id = kwargs.get('x-ms-copy-id') - self.source = kwargs.get('x-ms-copy-source') - self.status = get_enum_value(kwargs.get('x-ms-copy-status')) - self.progress = kwargs.get('x-ms-copy-progress') - self.completion_time = kwargs.get('x-ms-copy-completion_time') - self.status_description = kwargs.get('x-ms-copy-status-description') - self.incremental_copy = kwargs.get('x-ms-incremental-copy') - self.destination_snapshot = kwargs.get('x-ms-copy-destination-snapshot') - - @classmethod - def _from_generated(cls, generated): - copy = cls() - copy.id = generated.properties.copy_id or None - copy.status = get_enum_value(generated.properties.copy_status) or None - copy.source = generated.properties.copy_source or None - copy.progress = generated.properties.copy_progress or None - copy.completion_time = generated.properties.copy_completion_time or None - copy.status_description = generated.properties.copy_status_description or None - copy.incremental_copy = generated.properties.incremental_copy or None - copy.destination_snapshot = generated.properties.destination_snapshot or None - return copy - - -class FileSasPermissions(object): - """FileSasPermissions class to be used with - generating shared access signature operations. - - :param bool read: - Read the content, properties, metadata. Use the file as the source of a copy - operation. - :param bool create: - Create a new file or copy a file to a new file. - :param bool write: - Create or write content, properties, metadata. Resize the file. Use the file - as the destination of a copy operation within the same account. - :param bool delete: - Delete the file. - """ - def __init__(self, read=False, create=False, write=False, delete=False): - self.read = read - self.create = create - self.write = write - self.delete = delete - self._str = (('r' if self.read else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a FileSasPermissions from a string. - - To specify read, create, write, or delete permissions you need only to - include the first letter of the word in the string. E.g. For read and - create permissions, you would provide a string "rc". - - :param str permission: The string which dictates the read, create, - write, or delete permissions - :return: A FileSasPermissions object - :rtype: ~azure.storage.fileshare.FileSasPermissions - """ - p_read = 'r' in permission - p_create = 'c' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - - parsed = cls(p_read, p_create, p_write, p_delete) - parsed._str = permission # pylint: disable = protected-access - return parsed - - -class ShareSasPermissions(object): - """ShareSasPermissions class to be used to be used with - generating shared access signature and access policy operations. - - :param bool read: - Read the content, properties or metadata of any file in the share. Use any - file in the share as the source of a copy operation. - :param bool write: - For any file in the share, create or write content, properties or metadata. - Resize the file. Use the file as the destination of a copy operation within - the same account. - Note: You cannot grant permissions to read or write share properties or - metadata with a service SAS. Use an account SAS instead. - :param bool delete: - Delete any file in the share. - Note: You cannot grant permissions to delete a share with a service SAS. Use - an account SAS instead. - :param bool list: - List files and directories in the share. - """ - def __init__(self, read=False, write=False, delete=False, list=False): # pylint: disable=redefined-builtin - self.read = read - self.write = write - self.delete = delete - self.list = list - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('l' if self.list else '')) - - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a ShareSasPermissions from a string. - - To specify read, write, delete, or list permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, write, - delete, or list permissions - :return: A ShareSasPermissions object - :rtype: ~azure.storage.fileshare.ShareSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_list = 'l' in permission - - parsed = cls(p_read, p_write, p_delete, p_list) - parsed._str = permission # pylint: disable = protected-access - return parsed - -class NTFSAttributes(object): - """ - Valid set of attributes to set for file or directory. - To set attribute for directory, 'Directory' should always be enabled except setting 'None' for directory. - - :ivar bool read_only: - Enable/disable 'ReadOnly' attribute for DIRECTORY or FILE - :ivar bool hidden: - Enable/disable 'Hidden' attribute for DIRECTORY or FILE - :ivar bool system: - Enable/disable 'System' attribute for DIRECTORY or FILE - :ivar bool none: - Enable/disable 'None' attribute for DIRECTORY or FILE to clear all attributes of FILE/DIRECTORY - :ivar bool directory: - Enable/disable 'Directory' attribute for DIRECTORY - :ivar bool archive: - Enable/disable 'Archive' attribute for DIRECTORY or FILE - :ivar bool temporary: - Enable/disable 'Temporary' attribute for FILE - :ivar bool offline: - Enable/disable 'Offline' attribute for DIRECTORY or FILE - :ivar bool not_content_indexed: - Enable/disable 'NotContentIndexed' attribute for DIRECTORY or FILE - :ivar bool no_scrub_data: - Enable/disable 'NoScrubData' attribute for DIRECTORY or FILE - """ - def __init__(self, read_only=False, hidden=False, system=False, none=False, directory=False, archive=False, - temporary=False, offline=False, not_content_indexed=False, no_scrub_data=False): - - self.read_only = read_only - self.hidden = hidden - self.system = system - self.none = none - self.directory = directory - self.archive = archive - self.temporary = temporary - self.offline = offline - self.not_content_indexed = not_content_indexed - self.no_scrub_data = no_scrub_data - self._str = (('ReadOnly|' if self.read_only else '') + - ('Hidden|' if self.hidden else '') + - ('System|' if self.system else '') + - ('None|' if self.none else '') + - ('Directory|' if self.directory else '') + - ('Archive|' if self.archive else '') + - ('Temporary|' if self.temporary else '') + - ('Offline|' if self.offline else '') + - ('NotContentIndexed|' if self.not_content_indexed else '') + - ('NoScrubData|' if self.no_scrub_data else '')) - - def __str__(self): - concatenated_params = self._str - return concatenated_params.strip('|') - - @classmethod - def from_string(cls, string): - """Create a NTFSAttributes from a string. - - To specify permissions you can pass in a string with the - desired permissions, e.g. "ReadOnly|Hidden|System" - - :param str string: The string which dictates the permissions. - :return: A NTFSAttributes object - :rtype: ~azure.storage.fileshare.NTFSAttributes - """ - read_only = "ReadOnly" in string - hidden = "Hidden" in string - system = "System" in string - none = "None" in string - directory = "Directory" in string - archive = "Archive" in string - temporary = "Temporary" in string - offline = "Offline" in string - not_content_indexed = "NotContentIndexed" in string - no_scrub_data = "NoScrubData" in string - - parsed = cls(read_only, hidden, system, none, directory, archive, temporary, offline, not_content_indexed, - no_scrub_data) - parsed._str = string # pylint: disable = protected-access - return parsed - - -def service_properties_deserialize(generated): - """Deserialize a ServiceProperties objects into a dict. - """ - return { - 'hour_metrics': Metrics._from_generated(generated.hour_metrics), # pylint: disable=protected-access - 'minute_metrics': Metrics._from_generated(generated.minute_metrics), # pylint: disable=protected-access - 'cors': [CorsRule._from_generated(cors) for cors in generated.cors], # pylint: disable=protected-access - } diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_parser.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_parser.py deleted file mode 100644 index 0b5227b..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_parser.py +++ /dev/null @@ -1,48 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from datetime import datetime, timedelta - -_ERROR_TOO_MANY_FILE_PERMISSIONS = 'file_permission and file_permission_key should not be set at the same time' -_FILE_PERMISSION_TOO_LONG = 'Size of file_permission is too large. file_permission should be <=8KB, else' \ - 'please use file_permission_key' - - -def _get_file_permission(file_permission, file_permission_key, default_permission): - # if file_permission and file_permission_key are both empty, then use the default_permission - # value as file permission, file_permission size should be <= 8KB, else file permission_key should be used - empty_file_permission = not file_permission - empty_file_permission_key = not file_permission_key - file_permission_size_too_big = False if file_permission is None \ - else len(str(file_permission).encode('utf-8')) > 8 * 1024 - - if file_permission_size_too_big: - raise ValueError(_FILE_PERMISSION_TOO_LONG) - - if empty_file_permission: - if empty_file_permission_key: - return default_permission - - return None - - if empty_file_permission_key: - return file_permission - - raise ValueError(_ERROR_TOO_MANY_FILE_PERMISSIONS) - - -def _parse_datetime_from_str(string_datetime): - if not string_datetime: - return None - dt, _, us = string_datetime.partition(".") - dt = datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S") - us = int(us[:-2]) # microseconds - datetime_obj = dt + timedelta(microseconds=us) - return datetime_obj - - -def _datetime_to_str(datetime_obj): - return datetime_obj if isinstance(datetime_obj, str) else datetime_obj.isoformat() + '0Z' diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_share_client.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_share_client.py deleted file mode 100644 index 73f7329..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_share_client.py +++ /dev/null @@ -1,676 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Optional, Union, Dict, Any, Iterable, TYPE_CHECKING -) -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import Pipeline -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.request_handlers import add_metadata_headers, serialize_iso -from ._shared.response_handlers import ( - return_response_headers, - process_storage_error, - return_headers_and_deserialized) -from ._generated import AzureFileStorage -from ._generated.version import VERSION -from ._generated.models import ( - StorageErrorException, - SignedIdentifier, - DeleteSnapshotsOptionType, - SharePermission) -from ._deserialize import deserialize_share_properties, deserialize_permission_key, deserialize_permission -from ._directory_client import ShareDirectoryClient -from ._file_client import ShareFileClient - -if TYPE_CHECKING: - from ._models import ShareProperties, AccessPolicy - - -class ShareClient(StorageAccountHostsMixin): - """A client to interact with a specific share, although that share may not yet exist. - - For operations relating to a specific directory or file in this share, the clients for - those entities can also be retrieved using the :func:`get_directory_client` and :func:`get_file_client` functions. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the share, - use the :func:`from_share_url` classmethod. - :param share_name: - The name of the share with which to interact. - :type share_name: str - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not share_name: - raise ValueError("Please specify a share name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - if hasattr(credential, 'get_token'): - raise ValueError("Token credentials not supported by the File service.") - - path_snapshot = None - path_snapshot, sas_token = parse_query(parsed_url.query) - if not sas_token and not credential: - raise ValueError( - 'You need to provide either an account shared key or SAS token when creating a storage service.') - try: - self.snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - self.snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - self.snapshot = snapshot or path_snapshot - - self.share_name = share_name - self._query_str, credential = self._format_query_string( - sas_token, credential, share_snapshot=self.snapshot) - super(ShareClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) - self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline) - - @classmethod - def from_share_url(cls, share_url, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareClient - """ - :param str share_url: The full URI to the share. - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :returns: A share client. - :rtype: ~azure.storage.fileshare.ShareClient - """ - try: - if not share_url.lower().startswith('http'): - share_url = "https://" + share_url - except AttributeError: - raise ValueError("Share URL must be a string.") - parsed_url = urlparse(share_url.rstrip('/')) - if not (parsed_url.path and parsed_url.netloc): - raise ValueError("Invalid URL: {}".format(share_url)) - account_url = parsed_url.netloc.rstrip('/') + "?" + parsed_url.query - path_snapshot, _ = parse_query(parsed_url.query) - share_name = unquote(parsed_url.path.lstrip('/')) - snapshot = snapshot or unquote(path_snapshot) - - return cls(account_url, share_name, snapshot, credential, **kwargs) - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - share_name = self.share_name - if isinstance(share_name, six.text_type): - share_name = share_name.encode('UTF-8') - return "{}://{}/{}{}".format( - self.scheme, - hostname, - quote(share_name), - self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - share_name, # type: str - snapshot=None, # type: Optional[str] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareClient - """Create ShareClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param share_name: The name of the share. - :type share_name: str - :param str snapshot: - The optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :returns: A share client. - :rtype: ~azure.storage.fileshare.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START create_share_client_from_conn_string] - :end-before: [END create_share_client_from_conn_string] - :language: python - :dedent: 8 - :caption: Gets the share client from connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, share_name=share_name, snapshot=snapshot, credential=credential, **kwargs) - - def get_directory_client(self, directory_path=None): - # type: (Optional[str]) -> ShareDirectoryClient - """Get a client to interact with the specified directory. - The directory need not already exist. - - :param str directory_path: - Path to the specified directory. - :returns: A Directory Client. - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - """ - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - - return ShareDirectoryClient( - self.url, share_name=self.share_name, directory_path=directory_path or "", snapshot=self.snapshot, - credential=self.credential, _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, - _location_mode=self._location_mode) - - def get_file_client(self, file_path): - # type: (str) -> ShareFileClient - """Get a client to interact with the specified file. - The file need not already exist. - - :param str file_path: - Path to the specified file. - :returns: A File Client. - :rtype: ~azure.storage.fileshare.ShareFileClient - """ - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - - return ShareFileClient( - self.url, share_name=self.share_name, file_path=file_path, snapshot=self.snapshot, - credential=self.credential, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode) - - @distributed_trace - def create_share(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Creates a new Share under the account. If a share with the - same name already exists, the operation fails. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the share as metadata. - :keyword int quota: - The quota to be allotted. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START create_share] - :end-before: [END create_share] - :language: python - :dedent: 8 - :caption: Creates a file share. - """ - metadata = kwargs.pop('metadata', None) - quota = kwargs.pop('quota', None) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - - try: - return self._client.share.create( # type: ignore - timeout=timeout, - metadata=metadata, - quota=quota, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def create_snapshot( # type: ignore - self, - **kwargs # type: Optional[Any] - ): - # type: (...) -> Dict[str, Any] - """Creates a snapshot of the share. - - A snapshot is a read-only version of a share that's taken at a point in time. - It can be read, copied, or deleted, but not modified. Snapshots provide a way - to back up a share as it appears at a moment in time. - - A snapshot of a share has the same name as the base share from which the snapshot - is taken, with a DateTime value appended to indicate the time at which the - snapshot was taken. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the share as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Snapshot ID, Etag, and last modified). - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START create_share_snapshot] - :end-before: [END create_share_snapshot] - :language: python - :dedent: 12 - :caption: Creates a snapshot of the file share. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return self._client.share.create_snapshot( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def delete_share( - self, delete_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> None - """Marks the specified share for deletion. The share is - later deleted during garbage collection. - - :param bool delete_snapshots: - Indicates if snapshots are to be deleted. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START delete_share] - :end-before: [END delete_share] - :language: python - :dedent: 12 - :caption: Deletes the share and any snapshots. - """ - timeout = kwargs.pop('timeout', None) - delete_include = None - if delete_snapshots: - delete_include = DeleteSnapshotsOptionType.include - try: - self._client.share.delete( - timeout=timeout, - sharesnapshot=self.snapshot, - delete_snapshots=delete_include, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_share_properties(self, **kwargs): - # type: (Any) -> ShareProperties - """Returns all user-defined metadata and system properties for the - specified share. The data returned does not include the shares's - list of files or directories. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: The share properties. - :rtype: ~azure.storage.fileshare.ShareProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_hello_world.py - :start-after: [START get_share_properties] - :end-before: [END get_share_properties] - :language: python - :dedent: 12 - :caption: Gets the share properties. - """ - timeout = kwargs.pop('timeout', None) - try: - props = self._client.share.get_properties( - timeout=timeout, - sharesnapshot=self.snapshot, - cls=deserialize_share_properties, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - props.name = self.share_name - props.snapshot = self.snapshot - return props # type: ignore - - @distributed_trace - def set_share_quota(self, quota, **kwargs): - # type: (int, Any) -> Dict[str, Any] - """Sets the quota for the share. - - :param int quota: - Specifies the maximum size of the share, in gigabytes. - Must be greater than 0, and less than or equal to 5TB. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START set_share_quota] - :end-before: [END set_share_quota] - :language: python - :dedent: 12 - :caption: Sets the share quota. - """ - timeout = kwargs.pop('timeout', None) - try: - return self._client.share.set_quota( # type: ignore - timeout=timeout, - quota=quota, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def set_share_metadata(self, metadata, **kwargs): - # type: (Dict[str, Any], Any) -> Dict[str, Any] - """Sets the metadata for the share. - - Each call to this operation replaces all existing metadata - attached to the share. To remove all metadata from the share, - call this operation with no metadata dict. - - :param metadata: - Name-value pairs associated with the share as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START set_share_metadata] - :end-before: [END set_share_metadata] - :language: python - :dedent: 12 - :caption: Sets the share metadata. - """ - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - try: - return self._client.share.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_share_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the share. The permissions - indicate whether files in a share may be accessed publicly. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - """ - timeout = kwargs.pop('timeout', None) - try: - response, identifiers = self._client.share.get_access_policy( - timeout=timeout, - cls=return_headers_and_deserialized, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return { - 'public_access': response.get('share_public_access'), - 'signed_identifiers': identifiers or [] - } - - @distributed_trace - def set_share_access_policy(self, signed_identifiers, **kwargs): - # type: (Dict[str, AccessPolicy], Any) -> Dict[str, str] - """Sets the permissions for the share, or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether files in a share may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the share. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict(str, :class:`~azure.storage.fileshare.AccessPolicy`) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - if len(signed_identifiers) > 5: - raise ValueError( - 'Too many access policies provided. The server does not support setting ' - 'more than 5 access policies on a single resource.') - identifiers = [] - for key, value in signed_identifiers.items(): - if value: - value.start = serialize_iso(value.start) - value.expiry = serialize_iso(value.expiry) - identifiers.append(SignedIdentifier(id=key, access_policy=value)) - signed_identifiers = identifiers # type: ignore - try: - return self._client.share.set_access_policy( # type: ignore - share_acl=signed_identifiers or None, - timeout=timeout, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_share_stats(self, **kwargs): - # type: (Any) -> int - """Gets the approximate size of the data stored on the share in bytes. - - Note that this value may not include all recently created - or recently re-sized files. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The approximate size of the data (in bytes) stored on the share. - :rtype: int - """ - timeout = kwargs.pop('timeout', None) - try: - stats = self._client.share.get_statistics( - timeout=timeout, - **kwargs) - return stats.share_usage_bytes # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_directories_and_files( - self, directory_name=None, # type: Optional[str] - name_starts_with=None, # type: Optional[str] - marker=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Iterable[Dict[str,str]] - """Lists the directories and files under the share. - - :param str directory_name: - Name of a directory. - :param str name_starts_with: - Filters the results to return only directories whose names - begin with the specified prefix. - :param str marker: - An opaque continuation token. This value can be retrieved from the - next_marker field of a previous generator object. If specified, - this generator will begin returning results from this point. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START share_list_files_in_dir] - :end-before: [END share_list_files_in_dir] - :language: python - :dedent: 12 - :caption: List directories and files in the share. - """ - timeout = kwargs.pop('timeout', None) - directory = self.get_directory_client(directory_name) - kwargs.setdefault('merge_span', True) - return directory.list_directories_and_files( - name_starts_with=name_starts_with, marker=marker, timeout=timeout, **kwargs) - - @staticmethod - def _create_permission_for_share_options(file_permission, # type: str - **kwargs): - options = { - 'share_permission': SharePermission(permission=file_permission), - 'cls': deserialize_permission_key, - 'timeout': kwargs.pop('timeout', None), - } - options.update(kwargs) - return options - - @distributed_trace - def create_permission_for_share(self, file_permission, # type: str - **kwargs # type: Any - ): - # type: (...) -> str - """Create a permission (a security descriptor) at the share level. - - This 'permission' can be used for the files/directories in the share. - If a 'permission' already exists, it shall return the key of it, else - creates a new permission at the share level and return its key. - - :param str file_permission: - File permission, a Portable SDDL - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A file permission key - :rtype: str - """ - timeout = kwargs.pop('timeout', None) - options = self._create_permission_for_share_options(file_permission, timeout=timeout, **kwargs) - try: - return self._client.share.create_permission(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_permission_for_share( # type: ignore - self, permission_key, # type: str - **kwargs # type: Any - ): - # type: (...) -> str - """Get a permission (a security descriptor) for a given key. - - This 'permission' can be used for the files/directories in the share. - - :param str permission_key: - Key of the file permission to retrieve - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A file permission (a portable SDDL) - :rtype: str - """ - timeout = kwargs.pop('timeout', None) - try: - return self._client.share.get_permission( # type: ignore - file_permission_key=permission_key, - cls=deserialize_permission, - timeout=timeout, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def create_directory(self, directory_name, **kwargs): - # type: (str, Any) -> ShareDirectoryClient - """Creates a directory in the share and returns a client to interact - with the directory. - - :param str directory_name: - The name of the directory. - :keyword metadata: - Name-value pairs associated with the directory as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: ShareDirectoryClient - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - """ - directory = self.get_directory_client(directory_name) - kwargs.setdefault('merge_span', True) - directory.create_directory(**kwargs) - return directory # type: ignore - - @distributed_trace - def delete_directory(self, directory_name, **kwargs): - # type: (str, Any) -> None - """Marks the directory for deletion. The directory is - later deleted during garbage collection. - - :param str directory_name: - The name of the directory. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - directory = self.get_directory_client(directory_name) - directory.delete_directory(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_share_service_client.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_share_service_client.py deleted file mode 100644 index 5812148..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_share_service_client.py +++ /dev/null @@ -1,364 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Dict, List, - TYPE_CHECKING -) -try: - from urllib.parse import urlparse -except ImportError: - from urlparse import urlparse # type: ignore - -from azure.core.paging import ItemPaged -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import Pipeline -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.response_handlers import process_storage_error -from ._generated import AzureFileStorage -from ._generated.models import StorageErrorException, StorageServiceProperties -from ._generated.version import VERSION -from ._share_client import ShareClient -from ._models import ( - SharePropertiesPaged, - service_properties_deserialize, -) - -if TYPE_CHECKING: - from datetime import datetime - from ._models import ( - ShareProperties, - Metrics, - CorsRule, - ) - - -class ShareServiceClient(StorageAccountHostsMixin): - """A client to interact with the File Share Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete shares within the account. - For operations relating to a specific share, a client for that entity - can also be retrieved using the :func:`get_share_client` function. - - :param str account_url: - The URL to the file share storage account. Any other entities included - in the URL path (e.g. share or file) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_authentication.py - :start-after: [START create_share_service_client] - :end-before: [END create_share_service_client] - :language: python - :dedent: 8 - :caption: Create the share service client with url and credential. - """ - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - if hasattr(credential, 'get_token'): - raise ValueError("Token credentials not supported by the File Share service.") - - _, sas_token = parse_query(parsed_url.query) - if not sas_token and not credential: - raise ValueError( - 'You need to provide either an account shared key or SAS token when creating a storage service.') - self._query_str, credential = self._format_query_string(sas_token, credential) - super(ShareServiceClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) - self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline) - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - return "{}://{}/{}".format(self.scheme, hostname, self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> ShareServiceClient - """Create ShareServiceClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :returns: A File Share service client. - :rtype: ~azure.storage.fileshare.ShareServiceClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_authentication.py - :start-after: [START create_share_service_client_from_conn_string] - :end-before: [END create_share_service_client_from_conn_string] - :language: python - :dedent: 8 - :caption: Create the share service client with connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls(account_url, credential=credential, **kwargs) - - @distributed_trace - def get_service_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the properties of a storage account's File Share service, including - Azure Storage Analytics. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A dictionary containing file service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START get_service_properties] - :end-before: [END get_service_properties] - :language: python - :dedent: 8 - :caption: Get file share service properties. - """ - timeout = kwargs.pop('timeout', None) - try: - service_props = self._client.service.get_properties(timeout=timeout, **kwargs) - return service_properties_deserialize(service_props) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def set_service_properties( - self, hour_metrics=None, # type: Optional[Metrics] - minute_metrics=None, # type: Optional[Metrics] - cors=None, # type: Optional[List[CorsRule]] - **kwargs - ): - # type: (...) -> None - """Sets the properties of a storage account's File Share service, including - Azure Storage Analytics. If an element (e.g. hour_metrics) is left as None, the - existing settings on the service for that functionality are preserved. - - :param hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for files. - :type hour_metrics: ~azure.storage.fileshare.Metrics - :param minute_metrics: - The minute metrics settings provide request statistics - for each minute for files. - :type minute_metrics: ~azure.storage.fileshare.Metrics - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list(:class:`~azure.storage.fileshare.CorsRule`) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START set_service_properties] - :end-before: [END set_service_properties] - :language: python - :dedent: 8 - :caption: Sets file share service properties. - """ - timeout = kwargs.pop('timeout', None) - props = StorageServiceProperties( - hour_metrics=hour_metrics, - minute_metrics=minute_metrics, - cors=cors - ) - try: - self._client.service.set_properties(props, timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_shares( - self, name_starts_with=None, # type: Optional[str] - include_metadata=False, # type: Optional[bool] - include_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> ItemPaged[ShareProperties] - """Returns auto-paging iterable of dict-like ShareProperties under the specified account. - The generator will lazily follow the continuation tokens returned by - the service and stop when all shares have been returned. - - :param str name_starts_with: - Filters the results to return only shares whose names - begin with the specified name_starts_with. - :param bool include_metadata: - Specifies that share metadata be returned in the response. - :param bool include_snapshots: - Specifies that share snapshot be returned in the response. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) of ShareProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.ShareProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START fsc_list_shares] - :end-before: [END fsc_list_shares] - :language: python - :dedent: 12 - :caption: List shares in the file share service. - """ - timeout = kwargs.pop('timeout', None) - include = [] - if include_metadata: - include.append('metadata') - if include_snapshots: - include.append('snapshots') - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.service.list_shares_segment, - include=include, - timeout=timeout, - **kwargs) - return ItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=SharePropertiesPaged) - - @distributed_trace - def create_share( - self, share_name, # type: str - **kwargs - ): - # type: (...) -> ShareClient - """Creates a new share under the specified account. If the share - with the same name already exists, the operation fails. Returns a client with - which to interact with the newly created share. - - :param str share_name: The name of the share to create. - :keyword dict(str,str) metadata: - A dict with name_value pairs to associate with the - share as metadata. Example:{'Category':'test'} - :keyword int quota: - Quota in bytes. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.fileshare.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START fsc_create_shares] - :end-before: [END fsc_create_shares] - :language: python - :dedent: 8 - :caption: Create a share in the file share service. - """ - metadata = kwargs.pop('metadata', None) - quota = kwargs.pop('quota', None) - timeout = kwargs.pop('timeout', None) - share = self.get_share_client(share_name) - kwargs.setdefault('merge_span', True) - share.create_share(metadata=metadata, quota=quota, timeout=timeout, **kwargs) - return share - - @distributed_trace - def delete_share( - self, share_name, # type: Union[ShareProperties, str] - delete_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> None - """Marks the specified share for deletion. The share is - later deleted during garbage collection. - - :param share_name: - The share to delete. This can either be the name of the share, - or an instance of ShareProperties. - :type share_name: str or ~azure.storage.fileshare.ShareProperties - :param bool delete_snapshots: - Indicates if snapshots are to be deleted. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START fsc_delete_shares] - :end-before: [END fsc_delete_shares] - :language: python - :dedent: 12 - :caption: Delete a share in the file share service. - """ - timeout = kwargs.pop('timeout', None) - share = self.get_share_client(share_name) - kwargs.setdefault('merge_span', True) - share.delete_share( - delete_snapshots=delete_snapshots, timeout=timeout, **kwargs) - - def get_share_client(self, share, snapshot=None): - # type: (Union[ShareProperties, str],Optional[Union[Dict[str, Any], str]]) -> ShareClient - """Get a client to interact with the specified share. - The share need not already exist. - - :param share: - The share. This can either be the name of the share, - or an instance of ShareProperties. - :type share: str or ~azure.storage.fileshare.ShareProperties - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :returns: A ShareClient. - :rtype: ~azure.storage.fileshare.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START get_share_client] - :end-before: [END get_share_client] - :language: python - :dedent: 8 - :caption: Gets the share client. - """ - try: - share_name = share.name - except AttributeError: - share_name = share - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareClient( - self.url, share_name=share_name, snapshot=snapshot, credential=self.credential, _hosts=self._hosts, - _configuration=self._config, _pipeline=_pipeline, _location_mode=self._location_mode) diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/__init__.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/__init__.py deleted file mode 100644 index 160f882..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import hmac - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore - -import six - - -def url_quote(url): - return quote(url) - - -def url_unquote(url): - return unquote(url) - - -def encode_base64(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def decode_base64_to_bytes(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - return base64.b64decode(data) - - -def decode_base64_to_text(data): - decoded_bytes = decode_base64_to_bytes(data) - return decoded_bytes.decode('utf-8') - - -def sign_string(key, string_to_sign, key_is_base64=True): - if key_is_base64: - key = decode_base64_to_bytes(key) - else: - if isinstance(key, six.text_type): - key = key.encode('utf-8') - if isinstance(string_to_sign, six.text_type): - string_to_sign = string_to_sign.encode('utf-8') - signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) - digest = signed_hmac_sha256.digest() - encoded_digest = encode_base64(digest) - return encoded_digest diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/authentication.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/authentication.py deleted file mode 100644 index a8db96d..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/authentication.py +++ /dev/null @@ -1,136 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import logging -import sys - -try: - from urllib.parse import urlparse, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import unquote # type: ignore - -try: - from yarl import URL -except ImportError: - pass - -try: - from azure.core.pipeline.transport import AioHttpTransport -except ImportError: - AioHttpTransport = None - -from azure.core.exceptions import ClientAuthenticationError -from azure.core.pipeline.policies import SansIOHTTPPolicy - -from . import sign_string - - -logger = logging.getLogger(__name__) - - - -# wraps a given exception with the desired exception type -def _wrap_exception(ex, desired_type): - msg = "" - if ex.args: - msg = ex.args[0] - if sys.version_info >= (3,): - # Automatic chaining in Python 3 means we keep the trace - return desired_type(msg) - # There isn't a good solution in 2 for keeping the stack trace - # in general, or that will not result in an error in 3 - # However, we can keep the previous error type and message - # TODO: In the future we will log the trace - return desired_type('{}: {}'.format(ex.__class__.__name__, msg)) - - -class AzureSigningError(ClientAuthenticationError): - """ - Represents a fatal error when attempting to sign a request. - In general, the cause of this exception is user error. For example, the given account key is not valid. - Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. - """ - - -# pylint: disable=no-self-use -class SharedKeyCredentialPolicy(SansIOHTTPPolicy): - - def __init__(self, account_name, account_key): - self.account_name = account_name - self.account_key = account_key - super(SharedKeyCredentialPolicy, self).__init__() - - def _get_headers(self, request, headers_to_sign): - headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) - if 'content-length' in headers and headers['content-length'] == '0': - del headers['content-length'] - return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' - - def _get_verb(self, request): - return request.http_request.method + '\n' - - def _get_canonicalized_resource(self, request): - uri_path = urlparse(request.http_request.url).path - try: - if isinstance(request.context.transport, AioHttpTransport) or \ - isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport): - uri_path = URL(uri_path) - return '/' + self.account_name + str(uri_path) - except TypeError: - pass - return '/' + self.account_name + uri_path - - def _get_canonicalized_headers(self, request): - string_to_sign = '' - x_ms_headers = [] - for name, value in request.http_request.headers.items(): - if name.startswith('x-ms-'): - x_ms_headers.append((name.lower(), value)) - x_ms_headers.sort() - for name, value in x_ms_headers: - if value is not None: - string_to_sign += ''.join([name, ':', value, '\n']) - return string_to_sign - - def _get_canonicalized_resource_query(self, request): - sorted_queries = [(name, value) for name, value in request.http_request.query.items()] - sorted_queries.sort() - - string_to_sign = '' - for name, value in sorted_queries: - if value is not None: - string_to_sign += '\n' + name.lower() + ':' + unquote(value) - - return string_to_sign - - def _add_authorization_header(self, request, string_to_sign): - try: - signature = sign_string(self.account_key, string_to_sign) - auth_string = 'SharedKey ' + self.account_name + ':' + signature - request.http_request.headers['Authorization'] = auth_string - except Exception as ex: - # Wrap any error that occurred as signing error - # Doing so will clarify/locate the source of problem - raise _wrap_exception(ex, AzureSigningError) - - def on_request(self, request): - string_to_sign = \ - self._get_verb(request) + \ - self._get_headers( - request, - [ - 'content-encoding', 'content-language', 'content-length', - 'content-md5', 'content-type', 'date', 'if-modified-since', - 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' - ] - ) + \ - self._get_canonicalized_headers(request) + \ - self._get_canonicalized_resource(request) + \ - self._get_canonicalized_resource_query(request) - - self._add_authorization_header(request, string_to_sign) - #logger.debug("String_to_sign=%s", string_to_sign) diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/constants.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/constants.py deleted file mode 100644 index f8b033b..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/constants.py +++ /dev/null @@ -1,25 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -from .._generated.version import VERSION - -X_MS_VERSION = VERSION - -# Socket timeout in seconds -CONNECTION_TIMEOUT = 20 -READ_TIMEOUT = 20 - -# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned) -# The socket timeout is now the maximum total duration to send all data. -if sys.version_info >= (3, 5): - # the timeout to connect is 20 seconds, and the read timeout is 2000 seconds - # the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) - READ_TIMEOUT = 2000 - -STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" - -SERVICE_HOST_BASE = 'core.windows.net' diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/encryption.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/encryption.py deleted file mode 100644 index 62607cc..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/encryption.py +++ /dev/null @@ -1,542 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os -from os import urandom -from json import ( - dumps, - loads, -) -from collections import OrderedDict - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.ciphers import Cipher -from cryptography.hazmat.primitives.ciphers.algorithms import AES -from cryptography.hazmat.primitives.ciphers.modes import CBC -from cryptography.hazmat.primitives.padding import PKCS7 - -from azure.core.exceptions import HttpResponseError - -from .._version import VERSION -from . import encode_base64, decode_base64_to_bytes - - -_ENCRYPTION_PROTOCOL_V1 = '1.0' -_ERROR_OBJECT_INVALID = \ - '{0} does not define a complete interface. Value of {1} is either missing or invalid.' - - -def _validate_not_none(param_name, param): - if param is None: - raise ValueError('{0} should not be None.'.format(param_name)) - - -def _validate_key_encryption_key_wrap(kek): - # Note that None is not callable and so will fail the second clause of each check. - if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) - if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) - - -class _EncryptionAlgorithm(object): - ''' - Specifies which client encryption algorithm is used. - ''' - AES_CBC_256 = 'AES_CBC_256' - - -class _WrappedContentKey: - ''' - Represents the envelope key details stored on the service. - ''' - - def __init__(self, algorithm, encrypted_key, key_id): - ''' - :param str algorithm: - The algorithm used for wrapping. - :param bytes encrypted_key: - The encrypted content-encryption-key. - :param str key_id: - The key-encryption-key identifier string. - ''' - - _validate_not_none('algorithm', algorithm) - _validate_not_none('encrypted_key', encrypted_key) - _validate_not_none('key_id', key_id) - - self.algorithm = algorithm - self.encrypted_key = encrypted_key - self.key_id = key_id - - -class _EncryptionAgent: - ''' - Represents the encryption agent stored on the service. - It consists of the encryption protocol version and encryption algorithm used. - ''' - - def __init__(self, encryption_algorithm, protocol): - ''' - :param _EncryptionAlgorithm encryption_algorithm: - The algorithm used for encrypting the message contents. - :param str protocol: - The protocol version used for encryption. - ''' - - _validate_not_none('encryption_algorithm', encryption_algorithm) - _validate_not_none('protocol', protocol) - - self.encryption_algorithm = str(encryption_algorithm) - self.protocol = protocol - - -class _EncryptionData: - ''' - Represents the encryption data that is stored on the service. - ''' - - def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, - key_wrapping_metadata): - ''' - :param bytes content_encryption_IV: - The content encryption initialization vector. - :param _EncryptionAgent encryption_agent: - The encryption agent. - :param _WrappedContentKey wrapped_content_key: - An object that stores the wrapping algorithm, the key identifier, - and the encrypted key bytes. - :param dict key_wrapping_metadata: - A dict containing metadata related to the key wrapping. - ''' - - _validate_not_none('content_encryption_IV', content_encryption_IV) - _validate_not_none('encryption_agent', encryption_agent) - _validate_not_none('wrapped_content_key', wrapped_content_key) - - self.content_encryption_IV = content_encryption_IV - self.encryption_agent = encryption_agent - self.wrapped_content_key = wrapped_content_key - self.key_wrapping_metadata = key_wrapping_metadata - - -def _generate_encryption_data_dict(kek, cek, iv): - ''' - Generates and returns the encryption metadata as a dict. - - :param object kek: The key encryption key. See calling functions for more information. - :param bytes cek: The content encryption key. - :param bytes iv: The initialization vector. - :return: A dict containing all the encryption metadata. - :rtype: dict - ''' - # Encrypt the cek. - wrapped_cek = kek.wrap_key(cek) - - # Build the encryption_data dict. - # Use OrderedDict to comply with Java's ordering requirement. - wrapped_content_key = OrderedDict() - wrapped_content_key['KeyId'] = kek.get_kid() - wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek) - wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() - - encryption_agent = OrderedDict() - encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 - encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 - - encryption_data_dict = OrderedDict() - encryption_data_dict['WrappedContentKey'] = wrapped_content_key - encryption_data_dict['EncryptionAgent'] = encryption_agent - encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv) - encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION} - - return encryption_data_dict - - -def _dict_to_encryption_data(encryption_data_dict): - ''' - Converts the specified dictionary to an EncryptionData object for - eventual use in decryption. - - :param dict encryption_data_dict: - The dictionary containing the encryption data. - :return: an _EncryptionData object built from the dictionary. - :rtype: _EncryptionData - ''' - try: - if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: - raise ValueError("Unsupported encryption version.") - except KeyError: - raise ValueError("Unsupported encryption version.") - wrapped_content_key = encryption_data_dict['WrappedContentKey'] - wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], - decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), - wrapped_content_key['KeyId']) - - encryption_agent = encryption_data_dict['EncryptionAgent'] - encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], - encryption_agent['Protocol']) - - if 'KeyWrappingMetadata' in encryption_data_dict: - key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] - else: - key_wrapping_metadata = None - - encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), - encryption_agent, - wrapped_content_key, - key_wrapping_metadata) - - return encryption_data - - -def _generate_AES_CBC_cipher(cek, iv): - ''' - Generates and returns an encryption cipher for AES CBC using the given cek and iv. - - :param bytes[] cek: The content encryption key for the cipher. - :param bytes[] iv: The initialization vector for the cipher. - :return: A cipher for encrypting in AES256 CBC. - :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher - ''' - - backend = default_backend() - algorithm = AES(cek) - mode = CBC(iv) - return Cipher(algorithm, mode, backend) - - -def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): - ''' - Extracts and returns the content_encryption_key stored in the encryption_data object - and performs necessary validation on all parameters. - :param _EncryptionData encryption_data: - The encryption metadata of the retrieved value. - :param obj key_encryption_key: - The key_encryption_key used to unwrap the cek. Please refer to high-level service object - instance variables for more details. - :param func key_resolver: - A function used that, given a key_id, will return a key_encryption_key. Please refer - to high-level service object instance variables for more details. - :return: the content_encryption_key stored in the encryption_data object. - :rtype: bytes[] - ''' - - _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) - _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) - - if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol: - raise ValueError('Encryption version is not supported.') - - content_encryption_key = None - - # If the resolver exists, give priority to the key it finds. - if key_resolver is not None: - key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) - - _validate_not_none('key_encryption_key', key_encryption_key) - if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) - if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid(): - raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.') - # Will throw an exception if the specified algorithm is not supported. - content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, - encryption_data.wrapped_content_key.algorithm) - _validate_not_none('content_encryption_key', content_encryption_key) - - return content_encryption_key - - -def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None): - ''' - Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. - Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). - Returns the original plaintex. - - :param str message: - The ciphertext to be decrypted. - :param _EncryptionData encryption_data: - The metadata associated with this ciphertext. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted plaintext. - :rtype: str - ''' - _validate_not_none('message', message) - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) - - if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm: - raise ValueError('Specified encryption algorithm is not supported.') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) - - # decrypt data - decrypted_data = message - decryptor = cipher.decryptor() - decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) - - # unpad data - unpadder = PKCS7(128).unpadder() - decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) - - return decrypted_data - - -def encrypt_blob(blob, key_encryption_key): - ''' - Encrypts the given blob using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encryption metadata. This method should - only be used when a blob is small enough for single shot upload. Encrypting larger blobs - is done as a part of the upload_data_chunks method. - - :param bytes blob: - The blob to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. - :rtype: (str, bytes) - ''' - - _validate_not_none('blob', blob) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(blob) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - - return dumps(encryption_data), encrypted_data - - -def generate_blob_encryption_data(key_encryption_key): - ''' - Generates the encryption_metadata for the blob. - - :param bytes key_encryption_key: - The key-encryption-key used to wrap the cek associate with this blob. - :return: A tuple containing the cek and iv for this blob as well as the - serialized encryption metadata for the blob. - :rtype: (bytes, bytes, str) - ''' - encryption_data = None - content_encryption_key = None - initialization_vector = None - if key_encryption_key: - _validate_key_encryption_key_wrap(key_encryption_key) - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - encryption_data = _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - encryption_data = dumps(encryption_data) - - return content_encryption_key, initialization_vector, encryption_data - - -def decrypt_blob(require_encryption, key_encryption_key, key_resolver, - content, start_offset, end_offset, response_headers): - ''' - Decrypts the given blob contents and returns only the requested range. - - :param bool require_encryption: - Whether or not the calling blob service requires objects to be decrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :param key_resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted blob content. - :rtype: bytes - ''' - try: - encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata'])) - except: # pylint: disable=bare-except - if require_encryption: - raise ValueError( - 'Encryption required, but received data does not contain appropriate metatadata.' + \ - 'Data was either not encrypted or metadata has been lost.') - - return content - - if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256: - raise ValueError('Specified encryption algorithm is not supported.') - - blob_type = response_headers['x-ms-blob-type'] - - iv = None - unpad = False - if 'content-range' in response_headers: - content_range = response_headers['content-range'] - # Format: 'bytes x-y/size' - - # Ignore the word 'bytes' - content_range = content_range.split(' ') - - content_range = content_range[1].split('-') - content_range = content_range[1].split('/') - end_range = int(content_range[0]) - blob_size = int(content_range[1]) - - if start_offset >= 16: - iv = content[:16] - content = content[16:] - start_offset -= 16 - else: - iv = encryption_data.content_encryption_IV - - if end_range == blob_size - 1: - unpad = True - else: - unpad = True - iv = encryption_data.content_encryption_IV - - if blob_type == 'PageBlob': - unpad = False - - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) - cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) - decryptor = cipher.decryptor() - - content = decryptor.update(content) + decryptor.finalize() - if unpad: - unpadder = PKCS7(128).unpadder() - content = unpadder.update(content) + unpadder.finalize() - - return content[start_offset: len(content) - end_offset] - - -def get_blob_encryptor_and_padder(cek, iv, should_pad): - encryptor = None - padder = None - - if cek is not None and iv is not None: - cipher = _generate_AES_CBC_cipher(cek, iv) - encryptor = cipher.encryptor() - padder = PKCS7(128).padder() if should_pad else None - - return encryptor, padder - - -def encrypt_queue_message(message, key_encryption_key): - ''' - Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encrypted message and the encryption metadata. - - :param object message: - The plain text messge to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A json-formatted string containing the encrypted message and the encryption metadata. - :rtype: str - ''' - - _validate_not_none('message', message) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = os.urandom(32) - initialization_vector = os.urandom(16) - - # Queue encoding functions all return unicode strings, and encryption should - # operate on binary strings. - message = message.encode('utf-8') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(message) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - - # Build the dictionary structure. - queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data), - 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector)} - - return dumps(queue_message) - - -def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver): - ''' - Returns the decrypted message contents from an EncryptedQueueMessage. - If no encryption metadata is present, will return the unaltered message. - :param str message: - The JSON formatted QueueEncryptedMessage contents with all associated metadata. - :param bool require_encryption: - If set, will enforce that the retrieved messages are encrypted and decrypt them. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The plain text message from the queue message. - :rtype: str - ''' - - try: - message = loads(message) - - encryption_data = _dict_to_encryption_data(message['EncryptionData']) - decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents']) - except (KeyError, ValueError): - # Message was not json formatted and so was not encrypted - # or the user provided a json formatted message. - if require_encryption: - raise ValueError('Message was not encrypted.') - - return message - try: - return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=response, - error=error) diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/models.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/models.py deleted file mode 100644 index c9879e9..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/models.py +++ /dev/null @@ -1,426 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from enum import Enum - - -def get_enum_value(value): - if value is None or value in ["None", ""]: - return None - try: - return value.value - except AttributeError: - return value - - -class StorageErrorCode(str, Enum): - - # Generic storage values - account_already_exists = "AccountAlreadyExists" - account_being_created = "AccountBeingCreated" - account_is_disabled = "AccountIsDisabled" - authentication_failed = "AuthenticationFailed" - authorization_failure = "AuthorizationFailure" - condition_headers_not_supported = "ConditionHeadersNotSupported" - condition_not_met = "ConditionNotMet" - empty_metadata_key = "EmptyMetadataKey" - insufficient_account_permissions = "InsufficientAccountPermissions" - internal_error = "InternalError" - invalid_authentication_info = "InvalidAuthenticationInfo" - invalid_header_value = "InvalidHeaderValue" - invalid_http_verb = "InvalidHttpVerb" - invalid_input = "InvalidInput" - invalid_md5 = "InvalidMd5" - invalid_metadata = "InvalidMetadata" - invalid_query_parameter_value = "InvalidQueryParameterValue" - invalid_range = "InvalidRange" - invalid_resource_name = "InvalidResourceName" - invalid_uri = "InvalidUri" - invalid_xml_document = "InvalidXmlDocument" - invalid_xml_node_value = "InvalidXmlNodeValue" - md5_mismatch = "Md5Mismatch" - metadata_too_large = "MetadataTooLarge" - missing_content_length_header = "MissingContentLengthHeader" - missing_required_query_parameter = "MissingRequiredQueryParameter" - missing_required_header = "MissingRequiredHeader" - missing_required_xml_node = "MissingRequiredXmlNode" - multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" - operation_timed_out = "OperationTimedOut" - out_of_range_input = "OutOfRangeInput" - out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" - request_body_too_large = "RequestBodyTooLarge" - resource_type_mismatch = "ResourceTypeMismatch" - request_url_failed_to_parse = "RequestUrlFailedToParse" - resource_already_exists = "ResourceAlreadyExists" - resource_not_found = "ResourceNotFound" - server_busy = "ServerBusy" - unsupported_header = "UnsupportedHeader" - unsupported_xml_node = "UnsupportedXmlNode" - unsupported_query_parameter = "UnsupportedQueryParameter" - unsupported_http_verb = "UnsupportedHttpVerb" - - # Blob values - append_position_condition_not_met = "AppendPositionConditionNotMet" - blob_already_exists = "BlobAlreadyExists" - blob_not_found = "BlobNotFound" - blob_overwritten = "BlobOverwritten" - blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" - block_count_exceeds_limit = "BlockCountExceedsLimit" - block_list_too_long = "BlockListTooLong" - cannot_change_to_lower_tier = "CannotChangeToLowerTier" - cannot_verify_copy_source = "CannotVerifyCopySource" - container_already_exists = "ContainerAlreadyExists" - container_being_deleted = "ContainerBeingDeleted" - container_disabled = "ContainerDisabled" - container_not_found = "ContainerNotFound" - content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" - copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" - copy_id_mismatch = "CopyIdMismatch" - feature_version_mismatch = "FeatureVersionMismatch" - incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" - incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" - incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" - infinite_lease_duration_required = "InfiniteLeaseDurationRequired" - invalid_blob_or_block = "InvalidBlobOrBlock" - invalid_blob_tier = "InvalidBlobTier" - invalid_blob_type = "InvalidBlobType" - invalid_block_id = "InvalidBlockId" - invalid_block_list = "InvalidBlockList" - invalid_operation = "InvalidOperation" - invalid_page_range = "InvalidPageRange" - invalid_source_blob_type = "InvalidSourceBlobType" - invalid_source_blob_url = "InvalidSourceBlobUrl" - invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" - lease_already_present = "LeaseAlreadyPresent" - lease_already_broken = "LeaseAlreadyBroken" - lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" - lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" - lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" - lease_id_missing = "LeaseIdMissing" - lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" - lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" - lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" - lease_lost = "LeaseLost" - lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" - lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" - lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" - max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" - no_pending_copy_operation = "NoPendingCopyOperation" - operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" - pending_copy_operation = "PendingCopyOperation" - previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" - previous_snapshot_not_found = "PreviousSnapshotNotFound" - previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" - sequence_number_condition_not_met = "SequenceNumberConditionNotMet" - sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" - snapshot_count_exceeded = "SnapshotCountExceeded" - snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" - snapshots_present = "SnapshotsPresent" - source_condition_not_met = "SourceConditionNotMet" - system_in_use = "SystemInUse" - target_condition_not_met = "TargetConditionNotMet" - unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" - blob_being_rehydrated = "BlobBeingRehydrated" - blob_archived = "BlobArchived" - blob_not_archived = "BlobNotArchived" - - # Queue values - invalid_marker = "InvalidMarker" - message_not_found = "MessageNotFound" - message_too_large = "MessageTooLarge" - pop_receipt_mismatch = "PopReceiptMismatch" - queue_already_exists = "QueueAlreadyExists" - queue_being_deleted = "QueueBeingDeleted" - queue_disabled = "QueueDisabled" - queue_not_empty = "QueueNotEmpty" - queue_not_found = "QueueNotFound" - - # File values - cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" - client_cache_flush_delay = "ClientCacheFlushDelay" - delete_pending = "DeletePending" - directory_not_empty = "DirectoryNotEmpty" - file_lock_conflict = "FileLockConflict" - invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" - parent_not_found = "ParentNotFound" - read_only_attribute = "ReadOnlyAttribute" - share_already_exists = "ShareAlreadyExists" - share_being_deleted = "ShareBeingDeleted" - share_disabled = "ShareDisabled" - share_not_found = "ShareNotFound" - sharing_violation = "SharingViolation" - share_snapshot_in_progress = "ShareSnapshotInProgress" - share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" - share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" - share_has_snapshots = "ShareHasSnapshots" - container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" - - -class DictMixin(object): - - def __setitem__(self, key, item): - self.__dict__[key] = item - - def __getitem__(self, key): - return self.__dict__[key] - - def __repr__(self): - return str(self) - - def __len__(self): - return len(self.keys()) - - def __delitem__(self, key): - self.__dict__[key] = None - - def __eq__(self, other): - """Compare objects by comparing all attributes.""" - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - def __ne__(self, other): - """Compare objects by comparing all attributes.""" - return not self.__eq__(other) - - def __str__(self): - return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) - - def has_key(self, k): - return k in self.__dict__ - - def update(self, *args, **kwargs): - return self.__dict__.update(*args, **kwargs) - - def keys(self): - return [k for k in self.__dict__ if not k.startswith('_')] - - def values(self): - return [v for k, v in self.__dict__.items() if not k.startswith('_')] - - def items(self): - return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] - - def get(self, key, default=None): - if key in self.__dict__: - return self.__dict__[key] - return default - - -class LocationMode(object): - """ - Specifies the location the request should be sent to. This mode only applies - for RA-GRS accounts which allow secondary read access. All other account types - must use PRIMARY. - """ - - PRIMARY = 'primary' #: Requests should be sent to the primary location. - SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. - - -class ResourceTypes(object): - """ - Specifies the resource types that are accessible with the account SAS. - - :param bool service: - Access to service-level APIs (e.g., Get/Set Service Properties, - Get Service Stats, List Containers/Queues/Shares) - :param bool container: - Access to container-level APIs (e.g., Create/Delete Container, - Create/Delete Queue, Create/Delete Share, - List Blobs/Files and Directories) - :param bool object: - Access to object-level APIs for blobs, queue messages, and - files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) - """ - - def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin - self.service = service - self.container = container - self.object = object - self._str = (('s' if self.service else '') + - ('c' if self.container else '') + - ('o' if self.object else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create a ResourceTypes from a string. - - To specify service, container, or object you need only to - include the first letter of the word in the string. E.g. service and container, - you would provide a string "sc". - - :param str string: Specify service, container, or object in - in the string with the first letter of the word. - :return: A ResourceTypes object - :rtype: ~azure.storage.fileshare.ResourceTypes - """ - res_service = 's' in string - res_container = 'c' in string - res_object = 'o' in string - - parsed = cls(res_service, res_container, res_object) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class AccountSasPermissions(object): - """ - :class:`~ResourceTypes` class to be used with generate_account_sas - function and for the AccessPolicies used with set_*_acl. There are two types of - SAS which may be used to grant resource access. One is to grant access to a - specific resource (resource-specific). Another is to grant access to the - entire service for a specific account and allow certain operations based on - perms found here. - - :param bool read: - Valid for all signed resources types (Service, Container, and Object). - Permits read permissions to the specified resource type. - :param bool write: - Valid for all signed resources types (Service, Container, and Object). - Permits write permissions to the specified resource type. - :param bool delete: - Valid for Container and Object resource types, except for queue messages. - :param bool list: - Valid for Service and Container resource types only. - :param bool add: - Valid for the following Object resource types only: queue messages, and append blobs. - :param bool create: - Valid for the following Object resource types only: blobs and files. - Users can create new blobs or files, but may not overwrite existing - blobs or files. - :param bool update: - Valid for the following Object resource types only: queue messages. - :param bool process: - Valid for the following Object resource type only: queue messages. - """ - def __init__(self, read=False, write=False, delete=False, list=False, # pylint: disable=redefined-builtin - add=False, create=False, update=False, process=False): - self.read = read - self.write = write - self.delete = delete - self.list = list - self.add = add - self.create = create - self.update = update - self.process = process - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('l' if self.list else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('u' if self.update else '') + - ('p' if self.process else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create AccountSasPermissions from a string. - - To specify read, write, delete, etc. permissions you need only to - include the first letter of the word in the string. E.g. for read and write - permissions you would provide a string "rw". - - :param str permission: Specify permissions in - the string with the first letter of the word. - :return: An AccountSasPermissions object - :rtype: ~azure.storage.fileshare.AccountSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_list = 'l' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_update = 'u' in permission - p_process = 'p' in permission - - parsed = cls(p_read, p_write, p_delete, p_list, p_add, p_create, p_update, p_process) - parsed._str = permission # pylint: disable = protected-access - return parsed - - -class Services(object): - """Specifies the services accessible with the account SAS. - - :param bool blob: - Access for the `~azure.storage.blob.BlobServiceClient` - :param bool queue: - Access for the `~azure.storage.queue.QueueServiceClient` - :param bool fileshare: - Access for the `~azure.storage.fileshare.ShareServiceClient` - """ - - def __init__(self, blob=False, queue=False, fileshare=False): - self.blob = blob - self.queue = queue - self.fileshare = fileshare - self._str = (('b' if self.blob else '') + - ('q' if self.queue else '') + - ('f' if self.fileshare else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create Services from a string. - - To specify blob, queue, or file you need only to - include the first letter of the word in the string. E.g. for blob and queue - you would provide a string "bq". - - :param str string: Specify blob, queue, or file in - in the string with the first letter of the word. - :return: A Services object - :rtype: ~azure.storage.fileshare.Services - """ - res_blob = 'b' in string - res_queue = 'q' in string - res_file = 'f' in string - - parsed = cls(res_blob, res_queue, res_file) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class UserDelegationKey(object): - """ - Represents a user delegation key, provided to the user by Azure Storage - based on their Azure Active Directory access token. - - The fields are saved as simple strings since the user does not have to interact with this object; - to generate an identify SAS, the user can simply pass it to the right API. - - :ivar str signed_oid: - Object ID of this token. - :ivar str signed_tid: - Tenant ID of the tenant that issued this token. - :ivar str signed_start: - The datetime this token becomes valid. - :ivar str signed_expiry: - The datetime this token expires. - :ivar str signed_service: - What service this key is valid for. - :ivar str signed_version: - The version identifier of the REST service that created this token. - :ivar str value: - The user delegation key. - """ - def __init__(self): - self.signed_oid = None - self.signed_tid = None - self.signed_start = None - self.signed_expiry = None - self.signed_service = None - self.signed_version = None - self.value = None diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/parser.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/parser.py deleted file mode 100644 index c6feba8..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/parser.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys - -if sys.version_info < (3,): - def _str(value): - if isinstance(value, unicode): # pylint: disable=undefined-variable - return value.encode('utf-8') - - return str(value) -else: - _str = str - - -def _to_utc_datetime(value): - return value.strftime('%Y-%m-%dT%H:%M:%SZ') diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/policies_async.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/policies_async.py deleted file mode 100644 index c0a4476..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/policies_async.py +++ /dev/null @@ -1,219 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import asyncio -import random -import logging -from typing import Any, TYPE_CHECKING - -from azure.core.pipeline.policies import AsyncHTTPPolicy -from azure.core.exceptions import AzureError - -from .policies import is_retry, StorageRetryPolicy - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -async def retry_hook(settings, **kwargs): - if settings['hook']: - if asyncio.iscoroutine(settings['hook']): - await settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - else: - settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - - -class AsyncStorageResponseHook(AsyncHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(AsyncStorageResponseHook, self).__init__() - - async def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = await self.next.send(request) - await response.http_response.load_body() - - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - if asyncio.iscoroutine(response_callback): - await response_callback(response) - else: - response_callback(response) - request.context['response_callback'] = response_callback - return response - -class AsyncStorageRetryPolicy(StorageRetryPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - async def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - await transport.sleep(backoff) - - async def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = await self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - await self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - await self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(AsyncStorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(AsyncStorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/response_handlers.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/response_handlers.py deleted file mode 100644 index ac526e5..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/response_handlers.py +++ /dev/null @@ -1,159 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging - -from azure.core.pipeline.policies import ContentDecodePolicy -from azure.core.exceptions import ( - HttpResponseError, - ResourceNotFoundError, - ResourceModifiedError, - ResourceExistsError, - ClientAuthenticationError, - DecodeError) - -from .parser import _to_utc_datetime -from .models import StorageErrorCode, UserDelegationKey, get_enum_value - - -if TYPE_CHECKING: - from datetime import datetime - from azure.core.exceptions import AzureError - - -_LOGGER = logging.getLogger(__name__) - - -class PartialBatchErrorException(HttpResponseError): - """There is a partial failure in batch operations. - - :param str message: The message of the exception. - :param response: Server response to be deserialized. - :param list parts: A list of the parts in multipart response. - """ - - def __init__(self, message, response, parts): - self.parts = parts - super(PartialBatchErrorException, self).__init__(message=message, response=response) - - -def parse_length_from_content_range(content_range): - ''' - Parses the blob length from the content range header: bytes 1-3/65537 - ''' - if content_range is None: - return None - - # First, split in space and take the second half: '1-3/65537' - # Next, split on slash and take the second half: '65537' - # Finally, convert to an int: 65537 - return int(content_range.split(' ', 1)[1].split('/', 1)[1]) - - -def normalize_headers(headers): - normalized = {} - for key, value in headers.items(): - if key.startswith('x-ms-'): - key = key[5:] - normalized[key.lower().replace('-', '_')] = get_enum_value(value) - return normalized - - -def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument - raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} - return {k[10:]: v for k, v in raw_metadata.items()} - - -def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers) - - -def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers), deserialized - - -def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return response.location_mode, deserialized - - -def process_storage_error(storage_error): - raise_error = HttpResponseError - error_code = storage_error.response.headers.get('x-ms-error-code') - error_message = storage_error.message - additional_data = {} - try: - error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) - if error_body: - for info in error_body.iter(): - if info.tag.lower() == 'code': - error_code = info.text - elif info.tag.lower() == 'message': - error_message = info.text - else: - additional_data[info.tag] = info.text - except DecodeError: - pass - - try: - if error_code: - error_code = StorageErrorCode(error_code) - if error_code in [StorageErrorCode.condition_not_met, - StorageErrorCode.blob_overwritten]: - raise_error = ResourceModifiedError - if error_code in [StorageErrorCode.invalid_authentication_info, - StorageErrorCode.authentication_failed]: - raise_error = ClientAuthenticationError - if error_code in [StorageErrorCode.resource_not_found, - StorageErrorCode.cannot_verify_copy_source, - StorageErrorCode.blob_not_found, - StorageErrorCode.queue_not_found, - StorageErrorCode.container_not_found, - StorageErrorCode.parent_not_found, - StorageErrorCode.share_not_found]: - raise_error = ResourceNotFoundError - if error_code in [StorageErrorCode.account_already_exists, - StorageErrorCode.account_being_created, - StorageErrorCode.resource_already_exists, - StorageErrorCode.resource_type_mismatch, - StorageErrorCode.blob_already_exists, - StorageErrorCode.queue_already_exists, - StorageErrorCode.container_already_exists, - StorageErrorCode.container_being_deleted, - StorageErrorCode.queue_being_deleted, - StorageErrorCode.share_already_exists, - StorageErrorCode.share_being_deleted]: - raise_error = ResourceExistsError - except ValueError: - # Got an unknown error code - pass - - try: - error_message += "\nErrorCode:{}".format(error_code.value) - except AttributeError: - error_message += "\nErrorCode:{}".format(error_code) - for name, info in additional_data.items(): - error_message += "\n{}:{}".format(name, info) - - error = raise_error(message=error_message, response=storage_error.response) - error.error_code = error_code - error.additional_info = additional_data - raise error - - -def parse_to_internal_user_delegation_key(service_user_delegation_key): - internal_user_delegation_key = UserDelegationKey() - internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid - internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid - internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) - internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) - internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service - internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version - internal_user_delegation_key.value = service_user_delegation_key.value - return internal_user_delegation_key diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/shared_access_signature.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/shared_access_signature.py deleted file mode 100644 index 367c655..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/shared_access_signature.py +++ /dev/null @@ -1,209 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from datetime import date - -from .parser import _str, _to_utc_datetime -from .constants import X_MS_VERSION -from . import sign_string, url_quote - - -class QueryStringConstants(object): - SIGNED_SIGNATURE = 'sig' - SIGNED_PERMISSION = 'sp' - SIGNED_START = 'st' - SIGNED_EXPIRY = 'se' - SIGNED_RESOURCE = 'sr' - SIGNED_IDENTIFIER = 'si' - SIGNED_IP = 'sip' - SIGNED_PROTOCOL = 'spr' - SIGNED_VERSION = 'sv' - SIGNED_CACHE_CONTROL = 'rscc' - SIGNED_CONTENT_DISPOSITION = 'rscd' - SIGNED_CONTENT_ENCODING = 'rsce' - SIGNED_CONTENT_LANGUAGE = 'rscl' - SIGNED_CONTENT_TYPE = 'rsct' - START_PK = 'spk' - START_RK = 'srk' - END_PK = 'epk' - END_RK = 'erk' - SIGNED_RESOURCE_TYPES = 'srt' - SIGNED_SERVICES = 'ss' - SIGNED_OID = 'skoid' - SIGNED_TID = 'sktid' - SIGNED_KEY_START = 'skt' - SIGNED_KEY_EXPIRY = 'ske' - SIGNED_KEY_SERVICE = 'sks' - SIGNED_KEY_VERSION = 'skv' - - @staticmethod - def to_list(): - return [ - QueryStringConstants.SIGNED_SIGNATURE, - QueryStringConstants.SIGNED_PERMISSION, - QueryStringConstants.SIGNED_START, - QueryStringConstants.SIGNED_EXPIRY, - QueryStringConstants.SIGNED_RESOURCE, - QueryStringConstants.SIGNED_IDENTIFIER, - QueryStringConstants.SIGNED_IP, - QueryStringConstants.SIGNED_PROTOCOL, - QueryStringConstants.SIGNED_VERSION, - QueryStringConstants.SIGNED_CACHE_CONTROL, - QueryStringConstants.SIGNED_CONTENT_DISPOSITION, - QueryStringConstants.SIGNED_CONTENT_ENCODING, - QueryStringConstants.SIGNED_CONTENT_LANGUAGE, - QueryStringConstants.SIGNED_CONTENT_TYPE, - QueryStringConstants.START_PK, - QueryStringConstants.START_RK, - QueryStringConstants.END_PK, - QueryStringConstants.END_RK, - QueryStringConstants.SIGNED_RESOURCE_TYPES, - QueryStringConstants.SIGNED_SERVICES, - QueryStringConstants.SIGNED_OID, - QueryStringConstants.SIGNED_TID, - QueryStringConstants.SIGNED_KEY_START, - QueryStringConstants.SIGNED_KEY_EXPIRY, - QueryStringConstants.SIGNED_KEY_SERVICE, - QueryStringConstants.SIGNED_KEY_VERSION, - ] - - -class SharedAccessSignature(object): - ''' - Provides a factory for creating account access - signature tokens with an account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - :param str x_ms_version: - The service version used to generate the shared access signatures. - ''' - self.account_name = account_name - self.account_key = account_key - self.x_ms_version = x_ms_version - - def generate_account(self, services, resource_types, permission, expiry, start=None, - ip=None, protocol=None): - ''' - Generates a shared access signature for the account. - Use the returned signature with the sas_token parameter of the service - or to create a new account object. - - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account - SAS. You can combine values to provide access to more than one - resource type. - :param AccountSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. You can combine - values to provide more than one permission. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_account(services, resource_types) - sas.add_account_signature(self.account_name, self.account_key) - - return sas.get_token() - - -class _SharedAccessHelper(object): - def __init__(self): - self.query_dict = {} - - def _add_query(self, name, val): - if val: - self.query_dict[name] = _str(val) if val is not None else None - - def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): - if isinstance(start, date): - start = _to_utc_datetime(start) - - if isinstance(expiry, date): - expiry = _to_utc_datetime(expiry) - - self._add_query(QueryStringConstants.SIGNED_START, start) - self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) - self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) - self._add_query(QueryStringConstants.SIGNED_IP, ip) - self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) - self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) - - def add_resource(self, resource): - self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) - - def add_id(self, policy_id): - self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) - - def add_account(self, services, resource_types): - self._add_query(QueryStringConstants.SIGNED_SERVICES, services) - self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) - - def add_override_response_headers(self, cache_control, - content_disposition, - content_encoding, - content_language, - content_type): - self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) - self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) - self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) - self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) - self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) - - def add_account_signature(self, account_name, account_key): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - string_to_sign = \ - (account_name + '\n' + - get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + - get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + - get_value_to_append(QueryStringConstants.SIGNED_START) + - get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - get_value_to_append(QueryStringConstants.SIGNED_IP) + - get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(QueryStringConstants.SIGNED_VERSION)) - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key, string_to_sign)) - - def get_token(self): - return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared_access_signature.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared_access_signature.py deleted file mode 100644 index ab23dcf..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared_access_signature.py +++ /dev/null @@ -1,491 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, List, TYPE_CHECKING -) - -from ._shared import sign_string -from ._shared.constants import X_MS_VERSION -from ._shared.models import Services -from ._shared.shared_access_signature import SharedAccessSignature, _SharedAccessHelper, QueryStringConstants -from ._shared.parser import _str - -if TYPE_CHECKING: - from datetime import datetime - from . import ( - ResourceTypes, - AccountSasPermissions, - ShareSasPermissions, - FileSasPermissions - ) - -class FileSharedAccessSignature(SharedAccessSignature): - ''' - Provides a factory for creating file and share access - signature tokens with a common account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - ''' - super(FileSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) - - def generate_file(self, share_name, directory_name=None, file_name=None, - permission=None, expiry=None, start=None, policy_id=None, - ip=None, protocol=None, cache_control=None, - content_disposition=None, content_encoding=None, - content_language=None, content_type=None): - ''' - Generates a shared access signature for the file. - Use the returned signature with the sas_token parameter of FileService. - - :param str share_name: - Name of share. - :param str directory_name: - Name of directory. SAS tokens cannot be created for directories, so - this parameter should only be present if file_name is provided. - :param str file_name: - Name of file. - :param ~azure.storage.fileshare.FileSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, create, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_file_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - resource_path = share_name - if directory_name is not None: - resource_path += '/' + _str(directory_name) if directory_name is not None else None - resource_path += '/' + _str(file_name) if file_name is not None else None - - sas = _FileSharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(policy_id) - sas.add_resource('f') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_resource_signature(self.account_name, self.account_key, resource_path) - - return sas.get_token() - - def generate_share(self, share_name, permission=None, expiry=None, - start=None, policy_id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None): - ''' - Generates a shared access signature for the share. - Use the returned signature with the sas_token parameter of FileService. - - :param str share_name: - Name of share. - :param ShareSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, create, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_file_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - sas = _FileSharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(policy_id) - sas.add_resource('s') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_resource_signature(self.account_name, self.account_key, share_name) - - return sas.get_token() - - -class _FileSharedAccessHelper(_SharedAccessHelper): - - def add_resource_signature(self, account_name, account_key, path): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - if path[0] != '/': - path = '/' + path - - canonicalized_resource = '/file/' + account_name + path + '\n' - - # Form the string to sign from shared_access_policy and canonicalized - # resource. The order of values is important. - string_to_sign = \ - (get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(QueryStringConstants.SIGNED_START) + - get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - canonicalized_resource + - get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER) + - get_value_to_append(QueryStringConstants.SIGNED_IP) + - get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(QueryStringConstants.SIGNED_VERSION) + - get_value_to_append(QueryStringConstants.SIGNED_CACHE_CONTROL) + - get_value_to_append(QueryStringConstants.SIGNED_CONTENT_DISPOSITION) + - get_value_to_append(QueryStringConstants.SIGNED_CONTENT_ENCODING) + - get_value_to_append(QueryStringConstants.SIGNED_CONTENT_LANGUAGE) + - get_value_to_append(QueryStringConstants.SIGNED_CONTENT_TYPE)) - - # remove the trailing newline - if string_to_sign[-1] == '\n': - string_to_sign = string_to_sign[:-1] - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key, string_to_sign)) - - -def generate_account_sas( - account_name, # type: str - account_key, # type: str - resource_types, # type: Union[ResourceTypes, str] - permission, # type: Union[AccountSasPermissions, str] - expiry, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> str - """Generates a shared access signature for the file service. - - Use the returned signature with the credential parameter of any ShareServiceClient, - ShareClient, ShareDirectoryClient, or ShareFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - :param ~azure.storage.fileshare.ResourceTypes resource_types: - Specifies the resource types that are accessible with the account SAS. - :param ~azure.storage.fileshare.AccountSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_authentication.py - :start-after: [START generate_sas_token] - :end-before: [END generate_sas_token] - :language: python - :dedent: 8 - :caption: Generate a sas token. - """ - sas = SharedAccessSignature(account_name, account_key) - return sas.generate_account( - services=Services(fileshare=True), - resource_types=resource_types, - permission=permission, - expiry=expiry, - start=start, - ip=ip, - **kwargs - ) # type: ignore - - -def generate_share_sas( - account_name, # type: str - share_name, # type: str - account_key, # type: str - permission=None, # type: Optional[Union[ShareSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - policy_id=None, # type: Optional[str] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): # type: (...) -> str - """Generates a shared access signature for a share. - - Use the returned signature with the credential parameter of any ShareServiceClient, - ShareClient, ShareDirectoryClient, or ShareFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str share_name: - The name of the share. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - :param ~azure.storage.fileshare.ShareSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, create, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - :func:`~azure.storage.fileshare.ShareClient.set_share_access_policy`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - sas = FileSharedAccessSignature(account_name, account_key) - return sas.generate_share( - share_name=share_name, - permission=permission, - expiry=expiry, - start=start, - policy_id=policy_id, - ip=ip, - **kwargs - ) - - -def generate_file_sas( - account_name, # type: str - share_name, # type: str - file_path, # type: List[str] - account_key, # type: str - permission=None, # type: Optional[Union[FileSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - policy_id=None, # type: Optional[str] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> str - """Generates a shared access signature for a file. - - Use the returned signature with the credential parameter of any ShareServiceClient, - ShareClient, ShareDirectoryClient, or ShareFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str share_name: - The name of the share. - :param file_path: - The file path represented as a list of path segments, including the file name. - :type file_path: List[str] - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - :param ~azure.storage.fileshare.FileSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - sas = FileSharedAccessSignature(account_name, account_key) - if len(file_path) > 1: - dir_path = '/'.join(file_path[:-1]) - else: - dir_path = None # type: ignore - return sas.generate_file( # type: ignore - share_name=share_name, - directory_name=dir_path, - file_name=file_path[-1], - permission=permission, - expiry=expiry, - start=start, - policy_id=policy_id, - ip=ip, - **kwargs - ) diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_version.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/_version.py deleted file mode 100644 index 0ff55a7..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_version.py +++ /dev/null @@ -1,7 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -VERSION = '12.0.0' diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/aio/__init__.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/aio/__init__.py deleted file mode 100644 index 0496a0d..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/aio/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ._file_client_async import ShareFileClient -from ._directory_client_async import ShareDirectoryClient -from ._share_client_async import ShareClient -from ._share_service_client_async import ShareServiceClient - - -__all__ = [ - 'ShareFileClient', - 'ShareDirectoryClient', - 'ShareClient', - 'ShareServiceClient', -] diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/aio/_directory_client_async.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/aio/_directory_client_async.py deleted file mode 100644 index 9b503b7..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/aio/_directory_client_async.py +++ /dev/null @@ -1,581 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -import time -from typing import ( # pylint: disable=unused-import - Optional, Union, Any, Dict, TYPE_CHECKING -) - -from azure.core.async_paging import AsyncItemPaged -from azure.core.pipeline import AsyncPipeline -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from .._parser import _get_file_permission, _datetime_to_str -from .._shared.parser import _str - -from .._generated.aio import AzureFileStorage -from .._generated.version import VERSION -from .._generated.models import StorageErrorException -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.policies_async import ExponentialRetry -from .._shared.request_handlers import add_metadata_headers -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._deserialize import deserialize_directory_properties -from .._directory_client import ShareDirectoryClient as ShareDirectoryClientBase -from ._file_client_async import ShareFileClient -from ._models import DirectoryPropertiesPaged, HandlesPaged - -if TYPE_CHECKING: - from datetime import datetime - from .._models import ShareProperties, DirectoryProperties, ContentSettings, NTFSAttributes - from .._generated.models import HandleItem - - -class ShareDirectoryClient(AsyncStorageAccountHostsMixin, ShareDirectoryClientBase): - """A client to interact with a specific directory, although it may not yet exist. - - For operations relating to a specific subdirectory or file in this share, the clients for those - entities can also be retrieved using the :func:`get_subdirectory_client` and :func:`get_file_client` functions. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the directory, - use the :func:`from_directory_url` classmethod. - :param share_name: - The name of the share for the directory. - :type share_name: str - :param str directory_path: - The directory path for the directory with which to interact. - If specified, this value will override a directory value specified in the directory URL. - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword loop: - The event loop to run the asynchronous tasks. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - directory_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Optional[Any] - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - loop = kwargs.pop('loop', None) - super(ShareDirectoryClient, self).__init__( - account_url, - share_name=share_name, - directory_path=directory_path, - snapshot=snapshot, - credential=credential, - loop=loop, - **kwargs) - self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline, loop=loop) - self._loop = loop - - def get_file_client(self, file_name, **kwargs): - # type: (str, Any) -> ShareFileClient - """Get a client to interact with a specific file. - - The file need not already exist. - - :param str file_name: - The name of the file. - :returns: A File Client. - :rtype: ~azure.storage.fileshare.ShareFileClient - """ - if self.directory_path: - file_name = self.directory_path.rstrip('/') + "/" + file_name - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareFileClient( - self.url, file_path=file_name, share_name=self.share_name, snapshot=self.snapshot, - credential=self.credential, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop, **kwargs) - - def get_subdirectory_client(self, directory_name, **kwargs): - # type: (str, Any) -> ShareDirectoryClient - """Get a client to interact with a specific subdirectory. - - The subdirectory need not already exist. - - :param str directory_name: - The name of the subdirectory. - :returns: A Directory Client. - :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START get_subdirectory_client] - :end-before: [END get_subdirectory_client] - :language: python - :dedent: 16 - :caption: Gets the subdirectory client. - """ - directory_path = self.directory_path.rstrip('/') + "/" + directory_name - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareDirectoryClient( - self.url, share_name=self.share_name, directory_path=directory_path, snapshot=self.snapshot, - credential=self.credential, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop, **kwargs) - - @distributed_trace_async - async def create_directory(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Creates a new directory under the directory referenced by the client. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the directory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Directory-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START create_directory] - :end-before: [END create_directory] - :language: python - :dedent: 16 - :caption: Creates a directory. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return await self._client.directory.create( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def delete_directory(self, **kwargs): - # type: (**Any) -> None - """Marks the directory for deletion. The directory is - later deleted during garbage collection. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START delete_directory] - :end-before: [END delete_directory] - :language: python - :dedent: 16 - :caption: Deletes a directory. - """ - timeout = kwargs.pop('timeout', None) - try: - await self._client.directory.delete(timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_directories_and_files(self, name_starts_with=None, **kwargs): - # type: (Optional[str], Any) -> AsyncItemPaged - """Lists all the directories and files under the directory. - - :param str name_starts_with: - Filters the results to return only entities whose names - begin with the specified prefix. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties - :rtype: ~azure.core.async_paging.AsyncItemPaged[DirectoryProperties and FileProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START lists_directory] - :end-before: [END lists_directory] - :language: python - :dedent: 16 - :caption: List directories and files. - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.directory.list_files_and_directories_segment, - sharesnapshot=self.snapshot, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=DirectoryPropertiesPaged) - - @distributed_trace - def list_handles(self, recursive=False, **kwargs): - # type: (bool, Any) -> AsyncItemPaged - """Lists opened handles on a directory or a file under the directory. - - :param bool recursive: - Boolean that specifies if operation should apply to the directory specified by the client, - its files, its subdirectories and their files. Default value is False. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of HandleItem - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.HandleItem] - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.directory.list_handles, - sharesnapshot=self.snapshot, - timeout=timeout, - recursive=recursive, - **kwargs) - return AsyncItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=HandlesPaged) - - @distributed_trace_async - async def close_handle(self, handle, **kwargs): - # type: (Union[str, HandleItem], Any) -> Dict[str, int] - """Close an open file handle. - - :param handle: - A specific handle to close. - :type handle: str or ~azure.storage.fileshare.Handle - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - try: - handle_id = handle.id # type: ignore - except AttributeError: - handle_id = handle - if handle_id == '*': - raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") - try: - response = await self._client.directory.force_close_handles( - handle_id, - marker=None, - recursive=None, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - return { - 'closed_handles_count': response.get('number_of_handles_closed', 0), - } - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def close_all_handles(self, recursive=False, **kwargs): - # type: (bool, Any) -> Dict[str, int] - """Close any open file handles. - - This operation will block until the service has closed all open handles. - - :param bool recursive: - Boolean that specifies if operation should apply to the directory specified by the client, - its files, its subdirectories and their files. Default value is False. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - timeout = kwargs.pop('timeout', None) - start_time = time.time() - - try_close = True - continuation_token = None - total_closed = 0 - while try_close: - try: - response = await self._client.directory.force_close_handles( - handle_id='*', - timeout=timeout, - marker=continuation_token, - recursive=recursive, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - continuation_token = response.get('marker') - try_close = bool(continuation_token) - total_closed += response.get('number_of_handles_closed', 0) - if timeout: - timeout = max(0, timeout - (time.time() - start_time)) - return { - 'closed_handles_count': total_closed, - } - - @distributed_trace_async - async def get_directory_properties(self, **kwargs): - # type: (Any) -> DirectoryProperties - """Returns all user-defined metadata and system properties for the - specified directory. The data returned does not include the directory's - list of files. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: DirectoryProperties - :rtype: ~azure.storage.fileshare.DirectoryProperties - """ - timeout = kwargs.pop('timeout', None) - try: - response = await self._client.directory.get_properties( - timeout=timeout, - cls=deserialize_directory_properties, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return response # type: ignore - - @distributed_trace_async - async def set_directory_metadata(self, metadata, **kwargs): - # type: (Dict[str, Any], Any) -> Dict[str, Any] - """Sets the metadata for the directory. - - Each call to this operation replaces all existing metadata - attached to the directory. To remove all metadata from the directory, - call this operation with an empty metadata dict. - - :param metadata: - Name-value pairs associated with the directory as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Directory-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - try: - return await self._client.directory.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def set_http_headers(self, file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="preserve", # type: Union[str, datetime] - file_last_write_time="preserve", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Sets HTTP headers on the directory. - - :param file_attributes: - The file system attributes for files and directories. - If not set, indicates preservation of existing values. - Here is an example for when the var type is str: 'Temporary|Archive' - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Preserve. - :type file_creation_time: str or datetime - :param file_last_write_time: Last write time for the file - Default value: Preserve. - :type file_last_write_time: str or datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - file_permission = _get_file_permission(file_permission, permission_key, 'preserve') - try: - return await self._client.directory.set_properties( # type: ignore - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - timeout=timeout, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def create_subdirectory( - self, directory_name, # type: str - **kwargs - ): - # type: (...) -> ShareDirectoryClient - """Creates a new subdirectory and returns a client to interact - with the subdirectory. - - :param str directory_name: - The name of the subdirectory. - :keyword dict(str,str) metadata: - Name-value pairs associated with the subdirectory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: ShareDirectoryClient - :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START create_subdirectory] - :end-before: [END create_subdirectory] - :language: python - :dedent: 16 - :caption: Create a subdirectory. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - subdir = self.get_subdirectory_client(directory_name) - await subdir.create_directory(metadata=metadata, timeout=timeout, **kwargs) - return subdir # type: ignore - - @distributed_trace_async - async def delete_subdirectory( - self, directory_name, # type: str - **kwargs - ): - # type: (...) -> None - """Deletes a subdirectory. - - :param str directory_name: - The name of the subdirectory. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START delete_subdirectory] - :end-before: [END delete_subdirectory] - :language: python - :dedent: 16 - :caption: Delete a subdirectory. - """ - timeout = kwargs.pop('timeout', None) - subdir = self.get_subdirectory_client(directory_name) - await subdir.delete_directory(timeout=timeout, **kwargs) - - @distributed_trace_async - async def upload_file( - self, file_name, # type: str - data, # type: Any - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> ShareFileClient - """Creates a new file in the directory and returns a ShareFileClient - to interact with the file. - - :param str file_name: - The name of the file. - :param Any data: - Content of the file. - :param int length: - Length of the file in bytes. Specify its maximum size, up to 1 TiB. - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: ShareFileClient - :rtype: ~azure.storage.fileshare.aio.ShareFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START upload_file_to_directory] - :end-before: [END upload_file_to_directory] - :language: python - :dedent: 16 - :caption: Upload a file to a directory. - """ - file_client = self.get_file_client(file_name) - await file_client.upload_file( - data, - length=length, - **kwargs) - return file_client # type: ignore - - @distributed_trace_async - async def delete_file( - self, file_name, # type: str - **kwargs # type: Optional[Any] - ): - # type: (...) -> None - """Marks the specified file for deletion. The file is later - deleted during garbage collection. - - :param str file_name: - The name of the file to delete. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START delete_file_in_directory] - :end-before: [END delete_file_in_directory] - :language: python - :dedent: 16 - :caption: Delete a file in a directory. - """ - file_client = self.get_file_client(file_name) - await file_client.delete_file(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/aio/_download_async.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/aio/_download_async.py deleted file mode 100644 index 7636190..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/aio/_download_async.py +++ /dev/null @@ -1,467 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import asyncio -import sys -from io import BytesIO -from itertools import islice -import warnings - -from azure.core.exceptions import HttpResponseError -from .._shared.encryption import decrypt_blob -from .._shared.request_handlers import validate_and_format_range_headers -from .._shared.response_handlers import process_storage_error, parse_length_from_content_range -from .._download import process_range_and_offset, _ChunkDownloader - - -async def process_content(data, start_offset, end_offset, encryption): - if data is None: - raise ValueError("Response cannot be None.") - try: - content = data.response.body() - except Exception as error: - raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) - if encryption.get('key') is not None or encryption.get('resolver') is not None: - try: - return decrypt_blob( - encryption.get('required'), - encryption.get('key'), - encryption.get('resolver'), - content, - start_offset, - end_offset, - data.response.headers) - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=data.response, - error=error) - return content - - -class _AsyncChunkDownloader(_ChunkDownloader): - def __init__(self, **kwargs): - super(_AsyncChunkDownloader, self).__init__(**kwargs) - self.stream_lock = asyncio.Lock() if kwargs.get('parallel') else None - self.progress_lock = asyncio.Lock() if kwargs.get('parallel') else None - - async def process_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - chunk_data = await self._download_chunk(chunk_start, chunk_end - 1) - length = chunk_end - chunk_start - if length > 0: - await self._write_to_stream(chunk_data, chunk_start) - await self._update_progress(length) - - async def yield_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - return await self._download_chunk(chunk_start, chunk_end - 1) - - async def _update_progress(self, length): - if self.progress_lock: - async with self.progress_lock: # pylint: disable=not-async-context-manager - self.progress_total += length - else: - self.progress_total += length - - async def _write_to_stream(self, chunk_data, chunk_start): - if self.stream_lock: - async with self.stream_lock: # pylint: disable=not-async-context-manager - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - else: - self.stream.write(chunk_data) - - async def _download_chunk(self, chunk_start, chunk_end): - download_range, offset = process_range_and_offset( - chunk_start, chunk_end, chunk_end, self.encryption_options - ) - range_header, range_validation = validate_and_format_range_headers( - download_range[0], - download_range[1], - check_content_md5=self.validate_content - ) - try: - _, response = await self.client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self.validate_content, - data_stream_total=self.total_size, - download_stream_current=self.progress_total, - **self.request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - chunk_data = await process_content(response, offset[0], offset[1], self.encryption_options) - return chunk_data - - -class _AsyncChunkIterator(object): - """Async iterator for chunks in file download stream.""" - - def __init__(self, size, content, downloader): - self.size = size - self._current_content = content - self._iter_downloader = downloader - self._iter_chunks = None - self._complete = (size == 0) - - def __len__(self): - return self.size - - def __iter__(self): - raise TypeError("Async stream must be iterated asynchronously.") - - def __aiter__(self): - return self - - async def __anext__(self): - """Iterate through responses.""" - if self._complete: - raise StopAsyncIteration("Download complete") - if not self._iter_downloader: - # If no iterator was supplied, the download completed with - # the initial GET, so we just return that data - self._complete = True - return self._current_content - - if not self._iter_chunks: - self._iter_chunks = self._iter_downloader.get_chunk_offsets() - else: - try: - chunk = next(self._iter_chunks) - except StopIteration: - raise StopAsyncIteration("Download complete") - self._current_content = await self._iter_downloader.yield_chunk(chunk) - - return self._current_content - - -class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the file being downloaded. - :ivar: str path: - The full path of the file. - :ivar str share: - The name of the share where the file is. - :ivar ~azure.storage.fileshare.FileProperties properties: - The properties of the file being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the file. - """ - - def __init__( - self, - client=None, - config=None, - start_range=None, - end_range=None, - validate_content=None, - encryption_options=None, - max_concurrency=1, - name=None, - path=None, - share=None, - encoding=None, - **kwargs - ): - self.name = name - self.path = path - self.share = share - self.properties = None - self.size = None - - self._client = client - self._config = config - self._start_range = start_range - self._end_range = end_range - self._max_concurrency = max_concurrency - self._encoding = encoding - self._validate_content = validate_content - self._encryption_options = encryption_options or {} - self._request_options = kwargs - self._location_mode = None - self._download_complete = False - self._current_content = None - self._file_size = None - self._response = None - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - self._first_get_size = self._config.max_single_get_size if not self._validate_content \ - else self._config.max_chunk_get_size - initial_request_start = self._start_range if self._start_range is not None else 0 - if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: - initial_request_end = self._end_range - else: - initial_request_end = initial_request_start + self._first_get_size - 1 - - self._initial_range, self._initial_offset = process_range_and_offset( - initial_request_start, initial_request_end, self._end_range, self._encryption_options - ) - - def __len__(self): - return self.size - - async def _setup(self): - self._response = await self._initial_request() - self.properties = self._response.properties - self.properties.name = self.name - self.properties.path = self.path - self.properties.share = self.share - - # Set the content length to the download size instead of the size of - # the last range - self.properties.size = self.size - - # Overwrite the content range to the user requested range - self.properties.content_range = 'bytes {0}-{1}/{2}'.format( - self._start_range, - self._end_range, - self._file_size - ) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - self.properties.content_md5 = None - - if self.size == 0: - self._current_content = b"" - else: - self._current_content = await process_content( - self._response, - self._initial_offset[0], - self._initial_offset[1], - self._encryption_options - ) - - async def _initial_request(self): - range_header, range_validation = validate_and_format_range_headers( - self._initial_range[0], - self._initial_range[1], - start_range_required=False, - end_range_required=False, - check_content_md5=self._validate_content) - - try: - location_mode, response = await self._client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self._validate_content, - data_stream_total=None, - download_stream_current=0, - **self._request_options) - - # Check the location we read from to ensure we use the same one - # for subsequent requests. - self._location_mode = location_mode - - # Parse the total file size and adjust the download size if ranges - # were specified - self._file_size = parse_length_from_content_range(response.properties.content_range) - if self._end_range is not None: - # Use the length unless it is over the end of the file - self.size = min(self._file_size, self._end_range - self._start_range + 1) - elif self._start_range is not None: - self.size = self._file_size - self._start_range - else: - self.size = self._file_size - - except HttpResponseError as error: - if self._start_range is None and error.response.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - try: - _, response = await self._client.download( - validate_content=self._validate_content, - data_stream_total=0, - download_stream_current=0, - **self._request_options) - except HttpResponseError as error: - process_storage_error(error) - - # Set the download size to empty - self.size = 0 - self._file_size = 0 - else: - process_storage_error(error) - - # If the file is small, the download is complete at this point. - # If file size is large, download the rest of the file in chunks. - if response.properties.size == self.size: - self._download_complete = True - return response - - def chunks(self): - """Iterate over chunks in the download stream. - - :rtype: Iterable[bytes] - """ - if self.size == 0 or self._download_complete: - iter_downloader = None - else: - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - iter_downloader = _AsyncChunkDownloader( - client=self._client, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # Start where the first download ended - end_range=data_end, - stream=None, - parallel=False, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options) - return _AsyncChunkIterator( - size=self.size, - content=self._current_content, - downloader=iter_downloader) - - async def readall(self): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - stream = BytesIO() - await self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - async def content_as_bytes(self, max_concurrency=1): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :rtype: bytes - """ - warnings.warn( - "content_as_bytes is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - return await self.readall() - - async def content_as_text(self, max_concurrency=1, encoding="UTF-8"): - """Download the contents of this file, and decode as text. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :param str encoding: - Test encoding to decode the downloaded bytes. Default is UTF-8. - :rtype: str - """ - warnings.warn( - "content_as_text is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self._encoding = encoding - return await self.readall() - - async def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - # the stream must be seekable if parallel download is required - parallel = self._max_concurrency > 1 - if parallel: - error_message = "Target stream handle must be seekable." - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(error_message) - - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(error_message) - - # Write the content to the user stream - stream.write(self._current_content) - if self._download_complete: - return self.size - - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - - downloader = _AsyncChunkDownloader( - client=self._client, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # start where the first download ended - end_range=data_end, - stream=stream, - parallel=parallel, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options) - - dl_tasks = downloader.get_chunk_offsets() - running_futures = [ - asyncio.ensure_future(downloader.process_chunk(d)) - for d in islice(dl_tasks, 0, self._max_concurrency) - ] - while running_futures: - # Wait for some download to finish before adding a new one - _done, running_futures = await asyncio.wait( - running_futures, return_when=asyncio.FIRST_COMPLETED) - try: - next_chunk = next(dl_tasks) - except StopIteration: - break - else: - running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk))) - - if running_futures: - # Wait for the remaining downloads to finish - await asyncio.wait(running_futures) - return self.size - - async def download_to_stream(self, stream, max_concurrency=1): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The properties of the downloaded file. - :rtype: Any - """ - warnings.warn( - "download_to_stream is deprecated, use readinto instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - await self.readinto(stream) - return self.properties diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/aio/_file_client_async.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/aio/_file_client_async.py deleted file mode 100644 index dc205f7..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/aio/_file_client_async.py +++ /dev/null @@ -1,926 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -import time -from io import BytesIO -from typing import Optional, Union, IO, List, Dict, Any, Iterable, TYPE_CHECKING # pylint: disable=unused-import - -import six -from azure.core.async_paging import AsyncItemPaged - -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from .._parser import _datetime_to_str, _get_file_permission -from .._shared.parser import _str - -from .._generated.aio import AzureFileStorage -from .._generated.version import VERSION -from .._generated.models import StorageErrorException, FileHTTPHeaders -from .._shared.policies_async import ExponentialRetry -from .._shared.uploads_async import upload_data_chunks, FileChunkUploader, IterStreamer -from .._shared.base_client_async import AsyncStorageAccountHostsMixin -from .._shared.request_handlers import add_metadata_headers, get_length -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._deserialize import deserialize_file_properties, deserialize_file_stream -from .._file_client import ShareFileClient as ShareFileClientBase -from ._models import HandlesPaged -from ._download_async import StorageStreamDownloader - -if TYPE_CHECKING: - from datetime import datetime - from .._models import ShareProperties, ContentSettings, FileProperties, NTFSAttributes - from .._generated.models import HandleItem - - -async def _upload_file_helper( - client, - stream, - size, - metadata, - content_settings, - validate_content, - timeout, - max_concurrency, - file_settings, - file_attributes="none", - file_creation_time="now", - file_last_write_time="now", - file_permission=None, - file_permission_key=None, - **kwargs -): - try: - if size is None or size < 0: - raise ValueError("A content size must be specified for a File.") - response = await client.create_file( - size, content_settings=content_settings, metadata=metadata, - file_attributes=file_attributes, - file_creation_time=file_creation_time, - file_last_write_time=file_last_write_time, - file_permission=file_permission, - permission_key=file_permission_key, - timeout=timeout, - **kwargs - ) - if size == 0: - return response - - responses = await upload_data_chunks( - service=client, - uploader_class=FileChunkUploader, - total_size=size, - chunk_size=file_settings.max_range_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - timeout=timeout, - **kwargs - ) - return sorted(responses, key=lambda r: r.get('last_modified'))[-1] - except StorageErrorException as error: - process_storage_error(error) - - -class ShareFileClient(AsyncStorageAccountHostsMixin, ShareFileClientBase): - """A client to interact with a specific file, although that file may not yet exist. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the - file, use the :func:`from_file_url` classmethod. - :param share_name: - The name of the share for the file. - :type share_name: str - :param str file_path: - The file path to the file with which to interact. If specified, this value will override - a file value specified in the file URL. - :param str snapshot: - An optional file snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword loop: - The event loop to run the asynchronous tasks. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - - def __init__( # type: ignore - self, - account_url, # type: str - share_name, # type: str - file_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs["retry_policy"] = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) - loop = kwargs.pop('loop', None) - super(ShareFileClient, self).__init__( - account_url, share_name=share_name, file_path=file_path, snapshot=snapshot, - credential=credential, loop=loop, **kwargs - ) - self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline, loop=loop) - self._loop = loop - - @distributed_trace_async - async def create_file( # type: ignore - self, - size, # type: int - file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="now", # type: Union[str, datetime] - file_last_write_time="now", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Creates a new file. - - Note that it only initializes the file with no content. - - :param int size: Specifies the maximum size for the file, - up to 1 TB. - :param file_attributes: - The file system attributes for files and directories. - If not set, the default value would be "None" and the attributes will be set to "Archive". - Here is an example for when the var type is str: 'Temporary|Archive'. - file_attributes value is not case sensitive. - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Now. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Now. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START create_file] - :end-before: [END create_file] - :language: python - :dedent: 16 - :caption: Create a file. - """ - content_settings = kwargs.pop('content_settings', None) - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - if self.require_encryption and not self.key_encryption_key: - raise ValueError("Encryption required but no key was provided.") - - headers = kwargs.pop("headers", {}) - headers.update(add_metadata_headers(metadata)) - file_http_headers = None - if content_settings: - file_http_headers = FileHTTPHeaders( - file_cache_control=content_settings.cache_control, - file_content_type=content_settings.content_type, - file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - file_content_encoding=content_settings.content_encoding, - file_content_language=content_settings.content_language, - file_content_disposition=content_settings.content_disposition, - ) - file_permission = _get_file_permission(file_permission, permission_key, 'Inherit') - try: - return await self._client.file.create( # type: ignore - file_content_length=size, - metadata=metadata, - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - file_http_headers=file_http_headers, - headers=headers, - timeout=timeout, - cls=return_response_headers, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_file( - self, data, # type: Any - length=None, # type: Optional[int] - file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="now", # type: Union[str, datetime] - file_last_write_time="now", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Uploads a new file. - - :param Any data: - Content of the file. - :param int length: - Length of the file in bytes. Specify its maximum size, up to 1 TiB. - :param file_attributes: - The file system attributes for files and directories. - If not set, the default value would be "None" and the attributes will be set to "Archive". - Here is an example for when the var type is str: 'Temporary|Archive'. - file_attributes value is not case sensitive. - :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes - :param file_creation_time: Creation time for the file - Default value: Now. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Now. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword str encoding: - Defaults to UTF-8. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START upload_file] - :end-before: [END upload_file] - :language: python - :dedent: 16 - :caption: Upload a file. - """ - metadata = kwargs.pop('metadata', None) - content_settings = kwargs.pop('content_settings', None) - max_concurrency = kwargs.pop('max_concurrency', 1) - validate_content = kwargs.pop('validate_content', False) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - - if isinstance(data, six.text_type): - data = data.encode(encoding) - if length is None: - length = get_length(data) - if isinstance(data, bytes): - data = data[:length] - - if isinstance(data, bytes): - stream = BytesIO(data) - elif hasattr(data, "read"): - stream = data - elif hasattr(data, "__iter__"): - stream = IterStreamer(data, encoding=encoding) # type: ignore - else: - raise TypeError("Unsupported data type: {}".format(type(data))) - return await _upload_file_helper( # type: ignore - self, - stream, - length, - metadata, - content_settings, - validate_content, - timeout, - max_concurrency, - self._config, - file_attributes=file_attributes, - file_creation_time=file_creation_time, - file_last_write_time=file_last_write_time, - file_permission=file_permission, - file_permission_key=permission_key, - **kwargs - ) - - @distributed_trace_async - async def start_copy_from_url( - self, - source_url, # type: str - **kwargs # type: Any - ): - # type: (...) -> Any - """Initiates the copying of data from a source URL into the file - referenced by the client. - - The status of this copy operation can be found using the `get_properties` - method. - - :param str source_url: - Specifies the URL of the source file. - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START copy_file_from_url] - :end-before: [END copy_file_from_url] - :language: python - :dedent: 16 - :caption: Copy a file from a URL - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop("headers", {}) - headers.update(add_metadata_headers(metadata)) - - try: - return await self._client.file.start_copy( - source_url, timeout=timeout, metadata=metadata, headers=headers, cls=return_response_headers, **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def abort_copy(self, copy_id, **kwargs): - # type: (Union[str, FileProperties], Any) -> None - """Abort an ongoing copy operation. - - This will leave a destination file with zero length and full metadata. - This will raise an error if the copy operation has already ended. - - :param copy_id: - The copy operation to abort. This can be either an ID, or an - instance of FileProperties. - :type copy_id: str or ~azure.storage.fileshare.FileProperties - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - timeout = kwargs.pop('timeout', None) - try: - copy_id = copy_id.copy.id - except AttributeError: - try: - copy_id = copy_id["copy_id"] - except TypeError: - pass - try: - await self._client.file.abort_copy(copy_id=copy_id, timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def download_file( - self, - offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Iterable[bytes] - """Downloads a file to a stream with automatic chunking. - - :param int offset: - Start of byte range to use for downloading a section of the file. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A iterable data generator (stream) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START download_file] - :end-before: [END download_file] - :language: python - :dedent: 16 - :caption: Download a file. - """ - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - if length is not None and offset is None: - raise ValueError("Offset value must not be None if length is set.") - - range_end = None - if length is not None: - range_end = offset + length - 1 # Service actually uses an end-range inclusive index - downloader = StorageStreamDownloader( - client=self._client.file, - config=self._config, - start_range=offset, - end_range=range_end, - encryption_options=None, - name=self.file_name, - path='/'.join(self.file_path), - share=self.share_name, - cls=deserialize_file_stream, - **kwargs - ) - await downloader._setup() # pylint: disable=protected-access - return downloader - - @distributed_trace_async - async def delete_file(self, **kwargs): - # type: (Any) -> None - """Marks the specified file for deletion. The file is - later deleted during garbage collection. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START delete_file] - :end-before: [END delete_file] - :language: python - :dedent: 16 - :caption: Delete a file. - """ - timeout = kwargs.pop('timeout', None) - try: - await self._client.file.delete(timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_file_properties(self, **kwargs): - # type: (Any) -> FileProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: FileProperties - :rtype: ~azure.storage.fileshare.FileProperties - """ - timeout = kwargs.pop('timeout', None) - try: - file_props = await self._client.file.get_properties( - sharesnapshot=self.snapshot, timeout=timeout, cls=deserialize_file_properties, **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - file_props.name = self.file_name - file_props.share = self.share_name - file_props.snapshot = self.snapshot - file_props.path = "/".join(self.file_path) - return file_props # type: ignore - - @distributed_trace_async - async def set_http_headers(self, content_settings, # type: ContentSettings - file_attributes="preserve", # type: Union[str, NTFSAttributes] - file_creation_time="preserve", # type: Union[str, datetime] - file_last_write_time="preserve", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Sets HTTP headers on the file. - - :param ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param file_attributes: - The file system attributes for files and directories. - If not set, indicates preservation of existing values. - Here is an example for when the var type is str: 'Temporary|Archive' - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Preserve. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Preserve. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - file_content_length = kwargs.pop("size", None) - file_http_headers = FileHTTPHeaders( - file_cache_control=content_settings.cache_control, - file_content_type=content_settings.content_type, - file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - file_content_encoding=content_settings.content_encoding, - file_content_language=content_settings.content_language, - file_content_disposition=content_settings.content_disposition, - ) - file_permission = _get_file_permission(file_permission, permission_key, 'preserve') - try: - return await self._client.file.set_http_headers( # type: ignore - file_content_length=file_content_length, - file_http_headers=file_http_headers, - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - timeout=timeout, - cls=return_response_headers, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def set_file_metadata(self, metadata=None, **kwargs): # type: ignore - # type: (Optional[Dict[str, Any]], Any) -> Dict[str, Any] - """Sets user-defined metadata for the specified file as one or more - name-value pairs. - - Each call to this operation replaces all existing metadata - attached to the file. To remove all metadata from the file, - call this operation with no metadata dict. - - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop("headers", {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return await self._client.file.set_metadata( # type: ignore - timeout=timeout, cls=return_response_headers, headers=headers, metadata=metadata, **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_range( # type: ignore - self, - data, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Upload a range of bytes to a file. - - :param bytes data: - The data to upload. - :param int offset: - Start of byte range to use for uploading a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for uploading a section of the file. - The range can be up to 4 MB in size. - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - file. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - validate_content = kwargs.pop('validate_content', False) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - if isinstance(data, six.text_type): - data = data.encode(encoding) - end_range = offset + length - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - try: - return await self._client.file.upload_range( # type: ignore - range=content_range, - content_length=length, - optionalbody=data, - timeout=timeout, - validate_content=validate_content, - cls=return_response_headers, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_range_from_url(self, source_url, - offset, - length, - source_offset, - **kwargs - ): - # type: (str, int, int, int, **Any) -> Dict[str, Any] - """ - Writes the bytes from one Azure File endpoint into the specified range of another Azure File endpoint. - - :param int offset: - Start of byte range to use for updating a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for updating a section of the file. - The range can be up to 4 MB in size. - :param str source_url: - A URL of up to 2 KB in length that specifies an Azure file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.file.core.windows.net/myshare/mydir/myfile - https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - The service will read the same number of bytes as the destination range (length-offset). - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._upload_range_from_url_options( - source_url=source_url, - offset=offset, - length=length, - source_offset=source_offset, - **kwargs - ) - try: - return await self._client.file.upload_range_from_url(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_ranges( # type: ignore - self, - offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> List[Dict[str, int]] - """Returns the list of valid ranges of a file. - - :param int offset: - Specifies the start offset of bytes over which to get ranges. - :param int length: - Number of bytes to use over which to get ranges. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A list of valid ranges. - :rtype: List[dict[str, int]] - """ - timeout = kwargs.pop('timeout', None) - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Unsupported method for encryption.") - - content_range = None - if offset is not None: - if length is not None: - end_range = offset + length - 1 # Reformat to an inclusive range index - content_range = "bytes={0}-{1}".format(offset, end_range) - else: - content_range = "bytes={0}-".format(offset) - try: - ranges = await self._client.file.get_range_list( - sharesnapshot=self.snapshot, timeout=timeout, range=content_range, **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - return [{"start": b.start, "end": b.end} for b in ranges] - - @distributed_trace_async - async def clear_range( # type: ignore - self, - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Clears the specified range and releases the space used in storage for - that range. - - :param int offset: - Start of byte range to use for clearing a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for clearing a section of the file. - The range can be up to 4 MB in size. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - timeout = kwargs.pop('timeout', None) - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Unsupported method for encryption.") - - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 bytes file size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 bytes file size") - end_range = length + offset - 1 # Reformat to an inclusive range index - content_range = "bytes={0}-{1}".format(offset, end_range) - try: - return await self._client.file.upload_range( # type: ignore - timeout=timeout, - cls=return_response_headers, - content_length=0, - file_range_write="clear", - range=content_range, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def resize_file(self, size, **kwargs): - # type: (int, Any) -> Dict[str, Any] - """Resizes a file to the specified size. - - :param int size: - Size to resize file to (in bytes) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - timeout = kwargs.pop('timeout', None) - try: - return await self._client.file.set_http_headers( # type: ignore - file_content_length=size, - file_attributes="preserve", - file_creation_time="preserve", - file_last_write_time="preserve", - file_permission="preserve", - cls=return_response_headers, - timeout=timeout, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_handles(self, **kwargs): - # type: (Any) -> AsyncItemPaged - """Lists handles for file. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of HandleItem - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.HandleItem] - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop("results_per_page", None) - command = functools.partial( - self._client.file.list_handles, - sharesnapshot=self.snapshot, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=HandlesPaged) - - @distributed_trace_async - async def close_handle(self, handle, **kwargs): - # type: (Union[str, HandleItem], Any) -> Dict[str, int] - """Close an open file handle. - - :param handle: - A specific handle to close. - :type handle: str or ~azure.storage.fileshare.Handle - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - try: - handle_id = handle.id # type: ignore - except AttributeError: - handle_id = handle - if handle_id == '*': - raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") - try: - response = await self._client.file.force_close_handles( - handle_id, - marker=None, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - return { - 'closed_handles_count': response.get('number_of_handles_closed', 0), - } - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def close_all_handles(self, **kwargs): - # type: (Any) -> Dict[str, int] - """Close any open file handles. - - This operation will block until the service has closed all open handles. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - timeout = kwargs.pop('timeout', None) - start_time = time.time() - - try_close = True - continuation_token = None - total_closed = 0 - while try_close: - try: - response = await self._client.file.force_close_handles( - handle_id='*', - timeout=timeout, - marker=continuation_token, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - continuation_token = response.get('marker') - try_close = bool(continuation_token) - total_closed += response.get('number_of_handles_closed', 0) - if timeout: - timeout = max(0, timeout - (time.time() - start_time)) - return { - 'closed_handles_count': total_closed, - } diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/aio/_models.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/aio/_models.py deleted file mode 100644 index affee8f..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/aio/_models.py +++ /dev/null @@ -1,178 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines - -from azure.core.async_paging import AsyncPageIterator - -from .._shared.response_handlers import return_context_and_deserialized, process_storage_error -from .._generated.models import StorageErrorException -from .._generated.models import DirectoryItem -from .._models import Handle, ShareProperties - - -def _wrap_item(item): - if isinstance(item, DirectoryItem): - return {'name': item.name, 'is_directory': True} - return {'name': item.name, 'size': item.properties.content_length, 'is_directory': False} - - -class SharePropertiesPaged(AsyncPageIterator): - """An iterable of Share properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.fileshare.ShareProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only shares whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(SharePropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [ShareProperties._from_generated(i) for i in self._response.share_items] # pylint: disable=protected-access - return self._response.next_marker or None, self.current_page - - -class HandlesPaged(AsyncPageIterator): - """An iterable of Handles. - - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str marker: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.fileshare.Handle) - - :param callable command: Function to retrieve the next page of items. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, results_per_page=None, continuation_token=None): - super(HandlesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.current_page = [Handle._from_generated(h) for h in self._response.handle_list] # pylint: disable=protected-access - return self._response.next_marker or None, self.current_page - - -class DirectoryPropertiesPaged(AsyncPageIterator): - """An iterable for the contents of a directory. - - This iterable will yield dicts for the contents of the directory. The dicts - will have the keys 'name' (str) and 'is_directory' (bool). - Items that are files (is_directory=False) will have an additional 'content_length' key. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(dict(str, Any)) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only directories whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(DirectoryPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - prefix=self.prefix, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [_wrap_item(i) for i in self._response.segment.directory_items] - self.current_page.extend([_wrap_item(i) for i in self._response.segment.file_items]) - return self._response.next_marker or None, self.current_page diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/aio/_share_client_async.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/aio/_share_client_async.py deleted file mode 100644 index e6008f0..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/aio/_share_client_async.py +++ /dev/null @@ -1,555 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Optional, Union, Dict, Any, Iterable, TYPE_CHECKING -) - -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.pipeline import AsyncPipeline -from .._shared.policies_async import ExponentialRetry -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.request_handlers import add_metadata_headers, serialize_iso -from .._shared.response_handlers import ( - return_response_headers, - process_storage_error, - return_headers_and_deserialized) -from .._generated.aio import AzureFileStorage -from .._generated.version import VERSION -from .._generated.models import ( - StorageErrorException, - SignedIdentifier, - DeleteSnapshotsOptionType) -from .._deserialize import deserialize_share_properties, deserialize_permission -from .._share_client import ShareClient as ShareClientBase -from ._directory_client_async import ShareDirectoryClient -from ._file_client_async import ShareFileClient - -if TYPE_CHECKING: - from .._models import ShareProperties, AccessPolicy - - -class ShareClient(AsyncStorageAccountHostsMixin, ShareClientBase): - """A client to interact with a specific share, although that share may not yet exist. - - For operations relating to a specific directory or file in this share, the clients for - those entities can also be retrieved using the :func:`get_directory_client` and :func:`get_file_client` functions. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the share, - use the :func:`from_share_url` classmethod. - :param share_name: - The name of the share with which to interact. - :type share_name: str - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword loop: - The event loop to run the asynchronous tasks. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - loop = kwargs.pop('loop', None) - super(ShareClient, self).__init__( - account_url, - share_name=share_name, - snapshot=snapshot, - credential=credential, - loop=loop, - **kwargs) - self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline, loop=loop) - self._loop = loop - - def get_directory_client(self, directory_path=None): - # type: (Optional[str]) -> ShareDirectoryClient - """Get a client to interact with the specified directory. - The directory need not already exist. - - :param str directory_path: - Path to the specified directory. - :returns: A Directory Client. - :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient - """ - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - - return ShareDirectoryClient( - self.url, share_name=self.share_name, directory_path=directory_path or "", snapshot=self.snapshot, - credential=self.credential, _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, - _location_mode=self._location_mode, loop=self._loop) - - def get_file_client(self, file_path): - # type: (str) -> ShareFileClient - """Get a client to interact with the specified file. - The file need not already exist. - - :param str file_path: - Path to the specified file. - :returns: A File Client. - :rtype: ~azure.storage.fileshare.aio.ShareFileClient - """ - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - - return ShareFileClient( - self.url, share_name=self.share_name, file_path=file_path, snapshot=self.snapshot, - credential=self.credential, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop) - - @distributed_trace_async - async def create_share(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Creates a new Share under the account. If a share with the - same name already exists, the operation fails. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the share as metadata. - :keyword int quota: - The quota to be allotted. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START create_share] - :end-before: [END create_share] - :language: python - :dedent: 12 - :caption: Creates a file share. - """ - metadata = kwargs.pop('metadata', None) - quota = kwargs.pop('quota', None) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - - try: - return await self._client.share.create( # type: ignore - timeout=timeout, - metadata=metadata, - quota=quota, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def create_snapshot( # type: ignore - self, - **kwargs # type: Optional[Any] - ): - # type: (...) -> Dict[str, Any] - """Creates a snapshot of the share. - - A snapshot is a read-only version of a share that's taken at a point in time. - It can be read, copied, or deleted, but not modified. Snapshots provide a way - to back up a share as it appears at a moment in time. - - A snapshot of a share has the same name as the base share from which the snapshot - is taken, with a DateTime value appended to indicate the time at which the - snapshot was taken. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the share as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Snapshot ID, Etag, and last modified). - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START create_share_snapshot] - :end-before: [END create_share_snapshot] - :language: python - :dedent: 16 - :caption: Creates a snapshot of the file share. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return await self._client.share.create_snapshot( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def delete_share( - self, delete_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> None - """Marks the specified share for deletion. The share is - later deleted during garbage collection. - - :param bool delete_snapshots: - Indicates if snapshots are to be deleted. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START delete_share] - :end-before: [END delete_share] - :language: python - :dedent: 16 - :caption: Deletes the share and any snapshots. - """ - timeout = kwargs.pop('timeout', None) - delete_include = None - if delete_snapshots: - delete_include = DeleteSnapshotsOptionType.include - try: - await self._client.share.delete( - timeout=timeout, - sharesnapshot=self.snapshot, - delete_snapshots=delete_include, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_share_properties(self, **kwargs): - # type: (Any) -> ShareProperties - """Returns all user-defined metadata and system properties for the - specified share. The data returned does not include the shares's - list of files or directories. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: The share properties. - :rtype: ~azure.storage.fileshare.ShareProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_hello_world_async.py - :start-after: [START get_share_properties] - :end-before: [END get_share_properties] - :language: python - :dedent: 16 - :caption: Gets the share properties. - """ - timeout = kwargs.pop('timeout', None) - try: - props = await self._client.share.get_properties( - timeout=timeout, - sharesnapshot=self.snapshot, - cls=deserialize_share_properties, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - props.name = self.share_name - props.snapshot = self.snapshot - return props # type: ignore - - @distributed_trace_async - async def set_share_quota(self, quota, **kwargs): - # type: (int, Any) -> Dict[str, Any] - """Sets the quota for the share. - - :param int quota: - Specifies the maximum size of the share, in gigabytes. - Must be greater than 0, and less than or equal to 5TB. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START set_share_quota] - :end-before: [END set_share_quota] - :language: python - :dedent: 16 - :caption: Sets the share quota. - """ - timeout = kwargs.pop('timeout', None) - try: - return await self._client.share.set_quota( # type: ignore - timeout=timeout, - quota=quota, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def set_share_metadata(self, metadata, **kwargs): - # type: (Dict[str, Any], Any) -> Dict[str, Any] - """Sets the metadata for the share. - - Each call to this operation replaces all existing metadata - attached to the share. To remove all metadata from the share, - call this operation with no metadata dict. - - :param metadata: - Name-value pairs associated with the share as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START set_share_metadata] - :end-before: [END set_share_metadata] - :language: python - :dedent: 16 - :caption: Sets the share metadata. - """ - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - try: - return await self._client.share.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_share_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the share. The permissions - indicate whether files in a share may be accessed publicly. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - """ - timeout = kwargs.pop('timeout', None) - try: - response, identifiers = await self._client.share.get_access_policy( - timeout=timeout, - cls=return_headers_and_deserialized, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return { - 'public_access': response.get('share_public_access'), - 'signed_identifiers': identifiers or [] - } - - @distributed_trace_async - async def set_share_access_policy(self, signed_identifiers, **kwargs): - # type: (Dict[str, AccessPolicy], Any) -> Dict[str, str] - """Sets the permissions for the share, or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether files in a share may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the share. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict(str, :class:`~azure.storage.fileshare.AccessPolicy`) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - if len(signed_identifiers) > 5: - raise ValueError( - 'Too many access policies provided. The server does not support setting ' - 'more than 5 access policies on a single resource.') - identifiers = [] - for key, value in signed_identifiers.items(): - if value: - value.start = serialize_iso(value.start) - value.expiry = serialize_iso(value.expiry) - identifiers.append(SignedIdentifier(id=key, access_policy=value)) - signed_identifiers = identifiers # type: ignore - - try: - return await self._client.share.set_access_policy( # type: ignore - share_acl=signed_identifiers or None, - timeout=timeout, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_share_stats(self, **kwargs): - # type: (Any) -> int - """Gets the approximate size of the data stored on the share in bytes. - - Note that this value may not include all recently created - or recently re-sized files. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The approximate size of the data (in bytes) stored on the share. - :rtype: int - """ - timeout = kwargs.pop('timeout', None) - try: - stats = await self._client.share.get_statistics( - timeout=timeout, - **kwargs) - return stats.share_usage_bytes # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_directories_and_files( # type: ignore - self, directory_name=None, # type: Optional[str] - name_starts_with=None, # type: Optional[str] - marker=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Iterable[Dict[str,str]] - """Lists the directories and files under the share. - - :param str directory_name: - Name of a directory. - :param str name_starts_with: - Filters the results to return only directories whose names - begin with the specified prefix. - :param str marker: - An opaque continuation token. This value can be retrieved from the - next_marker field of a previous generator object. If specified, - this generator will begin returning results from this point. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START share_list_files_in_dir] - :end-before: [END share_list_files_in_dir] - :language: python - :dedent: 16 - :caption: List directories and files in the share. - """ - timeout = kwargs.pop('timeout', None) - directory = self.get_directory_client(directory_name) - return directory.list_directories_and_files( - name_starts_with=name_starts_with, marker=marker, timeout=timeout, **kwargs) - - @distributed_trace_async - async def create_permission_for_share(self, file_permission, # type: str - **kwargs # type: Any - ): - # type: (...) -> str - """Create a permission (a security descriptor) at the share level. - - This 'permission' can be used for the files/directories in the share. - If a 'permission' already exists, it shall return the key of it, else - creates a new permission at the share level and return its key. - - :param str file_permission: - File permission, a Portable SDDL - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A file permission key - :rtype: str - """ - timeout = kwargs.pop('timeout', None) - options = self._create_permission_for_share_options(file_permission, timeout=timeout, **kwargs) - try: - return await self._client.share.create_permission(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_permission_for_share( # type: ignore - self, permission_key, # type: str - **kwargs # type: Any - ): - # type: (...) -> str - """Get a permission (a security descriptor) for a given key. - - This 'permission' can be used for the files/directories in the share. - - :param str permission_key: - Key of the file permission to retrieve - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A file permission (a portable SDDL) - :rtype: str - """ - timeout = kwargs.pop('timeout', None) - try: - return await self._client.share.get_permission( # type: ignore - file_permission_key=permission_key, - cls=deserialize_permission, - timeout=timeout, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def create_directory(self, directory_name, **kwargs): - # type: (str, Any) -> ShareDirectoryClient - """Creates a directory in the share and returns a client to interact - with the directory. - - :param str directory_name: - The name of the directory. - :keyword dict(str,str) metadata: - Name-value pairs associated with the directory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: ShareDirectoryClient - :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient - """ - directory = self.get_directory_client(directory_name) - kwargs.setdefault('merge_span', True) - await directory.create_directory(**kwargs) - return directory # type: ignore - - @distributed_trace_async - async def delete_directory(self, directory_name, **kwargs): - # type: (str, Any) -> None - """Marks the directory for deletion. The directory is - later deleted during garbage collection. - - :param str directory_name: - The name of the directory. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - directory = self.get_directory_client(directory_name) - await directory.delete_directory(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/aio/_share_service_client_async.py b/azure/multiapi/storagev2/fileshare/v2019_02_02/aio/_share_service_client_async.py deleted file mode 100644 index 68fc0c3..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/aio/_share_service_client_async.py +++ /dev/null @@ -1,318 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, - TYPE_CHECKING -) - -from azure.core.async_paging import AsyncItemPaged -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import AsyncPipeline -from azure.core.tracing.decorator_async import distributed_trace_async - -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.response_handlers import process_storage_error -from .._shared.policies_async import ExponentialRetry -from .._generated.aio import AzureFileStorage -from .._generated.models import StorageErrorException, StorageServiceProperties -from .._generated.version import VERSION -from .._share_service_client import ShareServiceClient as ShareServiceClientBase -from ._share_client_async import ShareClient -from ._models import SharePropertiesPaged -from .._models import service_properties_deserialize - -if TYPE_CHECKING: - from datetime import datetime - from .._shared.models import ResourceTypes, AccountSasPermissions - from .._models import ( - ShareProperties, - Metrics, - CorsRule, - ) - - -class ShareServiceClient(AsyncStorageAccountHostsMixin, ShareServiceClientBase): - """A client to interact with the File Share Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete shares within the account. - For operations relating to a specific share, a client for that entity - can also be retrieved using the :func:`get_share_client` function. - - :param str account_url: - The URL to the file share storage account. Any other entities included - in the URL path (e.g. share or file) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword loop: - The event loop to run the asynchronous tasks. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_authentication_async.py - :start-after: [START create_share_service_client] - :end-before: [END create_share_service_client] - :language: python - :dedent: 8 - :caption: Create the share service client with url and credential. - """ - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - loop = kwargs.pop('loop', None) - super(ShareServiceClient, self).__init__( - account_url, - credential=credential, - loop=loop, - **kwargs) - self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline, loop=loop) - self._loop = loop - - @distributed_trace_async - async def get_service_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the properties of a storage account's File Share service, including - Azure Storage Analytics. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A dictionary containing file service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START get_service_properties] - :end-before: [END get_service_properties] - :language: python - :dedent: 12 - :caption: Get file share service properties. - """ - timeout = kwargs.pop('timeout', None) - try: - service_props = await self._client.service.get_properties(timeout=timeout, **kwargs) - return service_properties_deserialize(service_props) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def set_service_properties( - self, hour_metrics=None, # type: Optional[Metrics] - minute_metrics=None, # type: Optional[Metrics] - cors=None, # type: Optional[List[CorsRule]] - **kwargs - ): - # type: (...) -> None - """Sets the properties of a storage account's File Share service, including - Azure Storage Analytics. If an element (e.g. hour_metrics) is left as None, the - existing settings on the service for that functionality are preserved. - - :param hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for files. - :type hour_metrics: ~azure.storage.fileshare.Metrics - :param minute_metrics: - The minute metrics settings provide request statistics - for each minute for files. - :type minute_metrics: ~azure.storage.fileshare.Metrics - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list(:class:`~azure.storage.fileshare.CorsRule`) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START set_service_properties] - :end-before: [END set_service_properties] - :language: python - :dedent: 8 - :caption: Sets file share service properties. - """ - timeout = kwargs.pop('timeout', None) - props = StorageServiceProperties( - hour_metrics=hour_metrics, - minute_metrics=minute_metrics, - cors=cors - ) - try: - await self._client.service.set_properties(props, timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_shares( - self, name_starts_with=None, # type: Optional[str] - include_metadata=False, # type: Optional[bool] - include_snapshots=False, # type: Optional[bool] - **kwargs # type: Any - ): # type: (...) -> AsyncItemPaged - """Returns auto-paging iterable of dict-like ShareProperties under the specified account. - The generator will lazily follow the continuation tokens returned by - the service and stop when all shares have been returned. - - :param str name_starts_with: - Filters the results to return only shares whose names - begin with the specified name_starts_with. - :param bool include_metadata: - Specifies that share metadata be returned in the response. - :param bool include_snapshots: - Specifies that share snapshot be returned in the response. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) of ShareProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.ShareProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START fsc_list_shares] - :end-before: [END fsc_list_shares] - :language: python - :dedent: 16 - :caption: List shares in the file share service. - """ - timeout = kwargs.pop('timeout', None) - include = [] - if include_metadata: - include.append('metadata') - if include_snapshots: - include.append('snapshots') - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.service.list_shares_segment, - include=include, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=SharePropertiesPaged) - - @distributed_trace_async - async def create_share( - self, share_name, # type: str - **kwargs - ): - # type: (...) -> ShareClient - """Creates a new share under the specified account. If the share - with the same name already exists, the operation fails. Returns a client with - which to interact with the newly created share. - - :param str share_name: The name of the share to create. - :keyword dict(str,str) metadata: - A dict with name_value pairs to associate with the - share as metadata. Example:{'Category':'test'} - :keyword int quota: - Quota in bytes. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.fileshare.aio.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START fsc_create_shares] - :end-before: [END fsc_create_shares] - :language: python - :dedent: 12 - :caption: Create a share in the file share service. - """ - metadata = kwargs.pop('metadata', None) - quota = kwargs.pop('quota', None) - timeout = kwargs.pop('timeout', None) - share = self.get_share_client(share_name) - kwargs.setdefault('merge_span', True) - await share.create_share(metadata=metadata, quota=quota, timeout=timeout, **kwargs) - return share - - @distributed_trace_async - async def delete_share( - self, share_name, # type: Union[ShareProperties, str] - delete_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> None - """Marks the specified share for deletion. The share is - later deleted during garbage collection. - - :param share_name: - The share to delete. This can either be the name of the share, - or an instance of ShareProperties. - :type share_name: str or ~azure.storage.fileshare.ShareProperties - :param bool delete_snapshots: - Indicates if snapshots are to be deleted. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START fsc_delete_shares] - :end-before: [END fsc_delete_shares] - :language: python - :dedent: 16 - :caption: Delete a share in the file share service. - """ - timeout = kwargs.pop('timeout', None) - share = self.get_share_client(share_name) - kwargs.setdefault('merge_span', True) - await share.delete_share( - delete_snapshots=delete_snapshots, timeout=timeout, **kwargs) - - def get_share_client(self, share, snapshot=None): - # type: (Union[ShareProperties, str],Optional[Union[Dict[str, Any], str]]) -> ShareClient - """Get a client to interact with the specified share. - The share need not already exist. - - :param share: - The share. This can either be the name of the share, - or an instance of ShareProperties. - :type share: str or ~azure.storage.fileshare.ShareProperties - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :returns: A ShareClient. - :rtype: ~azure.storage.fileshare.aio.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START get_share_client] - :end-before: [END get_share_client] - :language: python - :dedent: 8 - :caption: Gets the share client. - """ - try: - share_name = share.name - except AttributeError: - share_name = share - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareClient( - self.url, share_name=share_name, snapshot=snapshot, credential=self.credential, _hosts=self._hosts, - _configuration=self._config, _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/__init__.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/__init__.py deleted file mode 100644 index 3266ae2..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/__init__.py +++ /dev/null @@ -1,68 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ._version import VERSION -from ._file_client import ShareFileClient -from ._directory_client import ShareDirectoryClient -from ._share_client import ShareClient -from ._share_service_client import ShareServiceClient -from ._lease import ShareLeaseClient -from ._shared_access_signature import generate_account_sas, generate_share_sas, generate_file_sas -from ._shared.policies import ExponentialRetry, LinearRetry -from ._shared.models import ( - LocationMode, - ResourceTypes, - AccountSasPermissions, - StorageErrorCode) -from ._models import ( - ShareProperties, - DirectoryProperties, - Handle, - FileProperties, - Metrics, - RetentionPolicy, - CorsRule, - AccessPolicy, - FileSasPermissions, - ShareSasPermissions, - ContentSettings, - NTFSAttributes) -from ._generated.models import ( - HandleItem -) - -__version__ = VERSION - - -__all__ = [ - 'ShareFileClient', - 'ShareDirectoryClient', - 'ShareClient', - 'ShareServiceClient', - 'ShareLeaseClient', - 'ExponentialRetry', - 'LinearRetry', - 'LocationMode', - 'ResourceTypes', - 'AccountSasPermissions', - 'StorageErrorCode', - 'Metrics', - 'RetentionPolicy', - 'CorsRule', - 'AccessPolicy', - 'FileSasPermissions', - 'ShareSasPermissions', - 'ShareProperties', - 'DirectoryProperties', - 'FileProperties', - 'ContentSettings', - 'Handle', - 'NTFSAttributes', - 'HandleItem', - 'generate_account_sas', - 'generate_share_sas', - 'generate_file_sas' -] diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_deserialize.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_deserialize.py deleted file mode 100644 index 5475e6d..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_deserialize.py +++ /dev/null @@ -1,64 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ._models import ShareProperties, DirectoryProperties, FileProperties -from ._shared.response_handlers import deserialize_metadata - - -def deserialize_share_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - share_properties = ShareProperties( - metadata=metadata, - **headers - ) - return share_properties - - -def deserialize_directory_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - directory_properties = DirectoryProperties( - metadata=metadata, - **headers - ) - return directory_properties - - -def deserialize_file_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - file_properties = FileProperties( - metadata=metadata, - **headers - ) - if 'Content-Range' in headers: - if 'x-ms-content-md5' in headers: - file_properties.content_settings.content_md5 = headers['x-ms-content-md5'] - else: - file_properties.content_settings.content_md5 = None - return file_properties - - -def deserialize_file_stream(response, obj, headers): - file_properties = deserialize_file_properties(response, obj, headers) - obj.properties = file_properties - return response.location_mode, obj - - -def deserialize_permission(response, obj, headers): # pylint: disable=unused-argument - ''' - Extracts out file permission - ''' - - return obj.permission - - -def deserialize_permission_key(response, obj, headers): # pylint: disable=unused-argument - ''' - Extracts out file permission key - ''' - - if response is None or headers is None: - return None - return headers.get('x-ms-file-permission-key', None) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_directory_client.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_directory_client.py deleted file mode 100644 index f1c7c05..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_directory_client.py +++ /dev/null @@ -1,706 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -import time -from typing import ( # pylint: disable=unused-import - Optional, Union, Any, Dict, TYPE_CHECKING -) - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six -from azure.core.paging import ItemPaged -from azure.core.pipeline import Pipeline -from azure.core.tracing.decorator import distributed_trace - -from ._generated import AzureFileStorage -from ._generated.version import VERSION -from ._generated.models import StorageErrorException -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.request_handlers import add_metadata_headers -from ._shared.response_handlers import return_response_headers, process_storage_error -from ._shared.parser import _str -from ._parser import _get_file_permission, _datetime_to_str -from ._deserialize import deserialize_directory_properties -from ._serialize import get_api_version -from ._file_client import ShareFileClient -from ._models import DirectoryPropertiesPaged, HandlesPaged, NTFSAttributes # pylint: disable=unused-import - -if TYPE_CHECKING: - from datetime import datetime - from ._models import ShareProperties, DirectoryProperties, ContentSettings - from ._generated.models import HandleItem - - -class ShareDirectoryClient(StorageAccountHostsMixin): - """A client to interact with a specific directory, although it may not yet exist. - - For operations relating to a specific subdirectory or file in this share, the clients for those - entities can also be retrieved using the :func:`get_subdirectory_client` and :func:`get_file_client` functions. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the directory, - use the :func:`from_directory_url` classmethod. - :param share_name: - The name of the share for the directory. - :type share_name: str - :param str directory_path: - The directory path for the directory with which to interact. - If specified, this value will override a directory value specified in the directory URL. - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - directory_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Optional[Any] - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not share_name: - raise ValueError("Please specify a share name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - if hasattr(credential, 'get_token'): - raise ValueError("Token credentials not supported by the File service.") - - path_snapshot, sas_token = parse_query(parsed_url.query) - if not sas_token and not credential: - raise ValueError( - 'You need to provide either an account shared key or SAS token when creating a storage service.') - try: - self.snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - self.snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - self.snapshot = snapshot or path_snapshot - - self.share_name = share_name - self.directory_path = directory_path - - self._query_str, credential = self._format_query_string( - sas_token, credential, share_snapshot=self.snapshot) - super(ShareDirectoryClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) - self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access - - @classmethod - def from_directory_url(cls, directory_url, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Optional[Any] - ): - # type: (...) -> ShareDirectoryClient - """Create a ShareDirectoryClient from a directory url. - - :param str directory_url: - The full URI to the directory. - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :returns: A directory client. - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - """ - try: - if not directory_url.lower().startswith('http'): - directory_url = "https://" + directory_url - except AttributeError: - raise ValueError("Directory URL must be a string.") - parsed_url = urlparse(directory_url.rstrip('/')) - if not parsed_url.path and not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(directory_url)) - account_url = parsed_url.netloc.rstrip('/') + "?" + parsed_url.query - path_snapshot, _ = parse_query(parsed_url.query) - - share_name, _, path_dir = parsed_url.path.lstrip('/').partition('/') - share_name = unquote(share_name) - - directory_path = path_dir - snapshot = snapshot or path_snapshot - - return cls( - account_url=account_url, share_name=share_name, directory_path=directory_path, - credential=credential, **kwargs) - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - share_name = self.share_name - if isinstance(share_name, six.text_type): - share_name = share_name.encode('UTF-8') - directory_path = "" - if self.directory_path: - directory_path = "/" + quote(self.directory_path, safe='~') - return "{}://{}/{}{}{}".format( - self.scheme, - hostname, - quote(share_name), - directory_path, - self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - share_name, # type: str - directory_path, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareDirectoryClient - """Create ShareDirectoryClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param share_name: The name of the share. - :type share_name: str - :param str directory_path: - The directory path. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :returns: A directory client. - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, share_name=share_name, directory_path=directory_path, credential=credential, **kwargs) - - def get_file_client(self, file_name, **kwargs): - # type: (str, Any) -> ShareFileClient - """Get a client to interact with a specific file. - - The file need not already exist. - - :param file_name: - The name of the file. - :returns: A File Client. - :rtype: ~azure.storage.fileshare.ShareFileClient - """ - if self.directory_path: - file_name = self.directory_path.rstrip('/') + "/" + file_name - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareFileClient( - self.url, file_path=file_name, share_name=self.share_name, napshot=self.snapshot, - credential=self.credential, api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, **kwargs) - - def get_subdirectory_client(self, directory_name, **kwargs): - # type: (str, Any) -> ShareDirectoryClient - """Get a client to interact with a specific subdirectory. - - The subdirectory need not already exist. - - :param str directory_name: - The name of the subdirectory. - :returns: A Directory Client. - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START get_subdirectory_client] - :end-before: [END get_subdirectory_client] - :language: python - :dedent: 12 - :caption: Gets the subdirectory client. - """ - directory_path = self.directory_path.rstrip('/') + "/" + directory_name - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareDirectoryClient( - self.url, share_name=self.share_name, directory_path=directory_path, snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, - _location_mode=self._location_mode, **kwargs) - - @distributed_trace - def create_directory(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Creates a new directory under the directory referenced by the client. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the directory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Directory-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START create_directory] - :end-before: [END create_directory] - :language: python - :dedent: 12 - :caption: Creates a directory. - """ - timeout = kwargs.pop('timeout', None) - metadata = kwargs.pop('metadata', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return self._client.directory.create( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def delete_directory(self, **kwargs): - # type: (**Any) -> None - """Marks the directory for deletion. The directory is - later deleted during garbage collection. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START delete_directory] - :end-before: [END delete_directory] - :language: python - :dedent: 12 - :caption: Deletes a directory. - """ - timeout = kwargs.pop('timeout', None) - try: - self._client.directory.delete(timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_directories_and_files(self, name_starts_with=None, **kwargs): - # type: (Optional[str], **Any) -> ItemPaged - """Lists all the directories and files under the directory. - - :param str name_starts_with: - Filters the results to return only entities whose names - begin with the specified prefix. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties - :rtype: ~azure.core.paging.ItemPaged[DirectoryProperties and FileProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START lists_directory] - :end-before: [END lists_directory] - :language: python - :dedent: 12 - :caption: List directories and files. - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.directory.list_files_and_directories_segment, - sharesnapshot=self.snapshot, - timeout=timeout, - **kwargs) - return ItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=DirectoryPropertiesPaged) - - @distributed_trace - def list_handles(self, recursive=False, **kwargs): - # type: (bool, Any) -> ItemPaged - """Lists opened handles on a directory or a file under the directory. - - :param bool recursive: - Boolean that specifies if operation should apply to the directory specified by the client, - its files, its subdirectories and their files. Default value is False. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of HandleItem - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.HandleItem] - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.directory.list_handles, - sharesnapshot=self.snapshot, - timeout=timeout, - recursive=recursive, - **kwargs) - return ItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=HandlesPaged) - - @distributed_trace - def close_handle(self, handle, **kwargs): - # type: (Union[str, HandleItem], Any) -> Dict[str, int] - """Close an open file handle. - - :param handle: - A specific handle to close. - :type handle: str or ~azure.storage.fileshare.Handle - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - try: - handle_id = handle.id # type: ignore - except AttributeError: - handle_id = handle - if handle_id == '*': - raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") - try: - response = self._client.directory.force_close_handles( - handle_id, - marker=None, - recursive=None, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - return { - 'closed_handles_count': response.get('number_of_handles_closed', 0), - 'failed_handles_count': response.get('number_of_handles_failed', 0) - } - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def close_all_handles(self, recursive=False, **kwargs): - # type: (bool, Any) -> Dict[str, int] - """Close any open file handles. - - This operation will block until the service has closed all open handles. - - :param bool recursive: - Boolean that specifies if operation should apply to the directory specified by the client, - its files, its subdirectories and their files. Default value is False. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - timeout = kwargs.pop('timeout', None) - start_time = time.time() - - try_close = True - continuation_token = None - total_closed = 0 - total_failed = 0 - while try_close: - try: - response = self._client.directory.force_close_handles( - handle_id='*', - timeout=timeout, - marker=continuation_token, - recursive=recursive, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - continuation_token = response.get('marker') - try_close = bool(continuation_token) - total_closed += response.get('number_of_handles_closed', 0) - total_failed += response.get('number_of_handles_failed', 0) - if timeout: - timeout = max(0, timeout - (time.time() - start_time)) - return { - 'closed_handles_count': total_closed, - 'failed_handles_count': total_failed - } - - @distributed_trace - def get_directory_properties(self, **kwargs): - # type: (Any) -> DirectoryProperties - """Returns all user-defined metadata and system properties for the - specified directory. The data returned does not include the directory's - list of files. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: DirectoryProperties - :rtype: ~azure.storage.fileshare.DirectoryProperties - """ - timeout = kwargs.pop('timeout', None) - try: - response = self._client.directory.get_properties( - timeout=timeout, - cls=deserialize_directory_properties, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return response # type: ignore - - @distributed_trace - def set_directory_metadata(self, metadata, **kwargs): - # type: (Dict[str, Any], Any) -> Dict[str, Any] - """Sets the metadata for the directory. - - Each call to this operation replaces all existing metadata - attached to the directory. To remove all metadata from the directory, - call this operation with an empty metadata dict. - - :param metadata: - Name-value pairs associated with the directory as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Directory-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - try: - return self._client.directory.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def set_http_headers(self, file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="preserve", # type: Union[str, datetime] - file_last_write_time="preserve", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Sets HTTP headers on the directory. - - :param file_attributes: - The file system attributes for files and directories. - If not set, indicates preservation of existing values. - Here is an example for when the var type is str: 'Temporary|Archive' - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Preserve. - :type file_creation_time: str or datetime - :param file_last_write_time: Last write time for the file - Default value: Preserve. - :type file_last_write_time: str or datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - file_permission = _get_file_permission(file_permission, permission_key, 'preserve') - try: - return self._client.directory.set_properties( # type: ignore - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - timeout=timeout, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def create_subdirectory( - self, directory_name, # type: str - **kwargs): - # type: (...) -> ShareDirectoryClient - """Creates a new subdirectory and returns a client to interact - with the subdirectory. - - :param str directory_name: - The name of the subdirectory. - :keyword dict(str,str) metadata: - Name-value pairs associated with the subdirectory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: ShareDirectoryClient - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START create_subdirectory] - :end-before: [END create_subdirectory] - :language: python - :dedent: 12 - :caption: Create a subdirectory. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - subdir = self.get_subdirectory_client(directory_name) - subdir.create_directory(metadata=metadata, timeout=timeout, **kwargs) - return subdir # type: ignore - - @distributed_trace - def delete_subdirectory( - self, directory_name, # type: str - **kwargs - ): - # type: (...) -> None - """Deletes a subdirectory. - - :param str directory_name: - The name of the subdirectory. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START delete_subdirectory] - :end-before: [END delete_subdirectory] - :language: python - :dedent: 12 - :caption: Delete a subdirectory. - """ - timeout = kwargs.pop('timeout', None) - subdir = self.get_subdirectory_client(directory_name) - subdir.delete_directory(timeout=timeout, **kwargs) - - @distributed_trace - def upload_file( - self, file_name, # type: str - data, # type: Any - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> ShareFileClient - """Creates a new file in the directory and returns a ShareFileClient - to interact with the file. - - :param str file_name: - The name of the file. - :param Any data: - Content of the file. - :param int length: - Length of the file in bytes. Specify its maximum size, up to 1 TiB. - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: ShareFileClient - :rtype: ~azure.storage.fileshare.ShareFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START upload_file_to_directory] - :end-before: [END upload_file_to_directory] - :language: python - :dedent: 12 - :caption: Upload a file to a directory. - """ - file_client = self.get_file_client(file_name) - file_client.upload_file( - data, - length=length, - **kwargs) - return file_client # type: ignore - - @distributed_trace - def delete_file( - self, file_name, # type: str - **kwargs # type: Optional[Any] - ): - # type: (...) -> None - """Marks the specified file for deletion. The file is later - deleted during garbage collection. - - :param str file_name: - The name of the file to delete. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START delete_file_in_directory] - :end-before: [END delete_file_in_directory] - :language: python - :dedent: 12 - :caption: Delete a file in a directory. - """ - file_client = self.get_file_client(file_name) - file_client.delete_file(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_download.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_download.py deleted file mode 100644 index 8a86027..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_download.py +++ /dev/null @@ -1,522 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -import threading -import warnings -from io import BytesIO - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.common import with_current_context -from ._shared.encryption import decrypt_blob -from ._shared.request_handlers import validate_and_format_range_headers -from ._shared.response_handlers import process_storage_error, parse_length_from_content_range - - -def process_range_and_offset(start_range, end_range, length, encryption): - start_offset, end_offset = 0, 0 - if encryption.get("key") is not None or encryption.get("resolver") is not None: - if start_range is not None: - # Align the start of the range along a 16 byte block - start_offset = start_range % 16 - start_range -= start_offset - - # Include an extra 16 bytes for the IV if necessary - # Because of the previous offsetting, start_range will always - # be a multiple of 16. - if start_range > 0: - start_offset += 16 - start_range -= 16 - - if length is not None: - # Align the end of the range along a 16 byte block - end_offset = 15 - (end_range % 16) - end_range += end_offset - - return (start_range, end_range), (start_offset, end_offset) - - -def process_content(data, start_offset, end_offset, encryption): - if data is None: - raise ValueError("Response cannot be None.") - try: - content = b"".join(list(data)) - except Exception as error: - raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) - if content and encryption.get("key") is not None or encryption.get("resolver") is not None: - try: - return decrypt_blob( - encryption.get("required"), - encryption.get("key"), - encryption.get("resolver"), - content, - start_offset, - end_offset, - data.response.headers, - ) - except Exception as error: - raise HttpResponseError(message="Decryption failed.", response=data.response, error=error) - return content - - -class _ChunkDownloader(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - client=None, - total_size=None, - chunk_size=None, - current_progress=None, - start_range=None, - end_range=None, - stream=None, - parallel=None, - validate_content=None, - encryption_options=None, - **kwargs - ): - self.client = client - - # Information on the download range/chunk size - self.chunk_size = chunk_size - self.total_size = total_size - self.start_index = start_range - self.end_index = end_range - - # The destination that we will write to - self.stream = stream - self.stream_lock = threading.Lock() if parallel else None - self.progress_lock = threading.Lock() if parallel else None - - # For a parallel download, the stream is always seekable, so we note down the current position - # in order to seek to the right place when out-of-order chunks come in - self.stream_start = stream.tell() if parallel else None - - # Download progress so far - self.progress_total = current_progress - - # Encryption - self.encryption_options = encryption_options - - # Parameters for each get operation - self.validate_content = validate_content - self.request_options = kwargs - - def _calculate_range(self, chunk_start): - if chunk_start + self.chunk_size > self.end_index: - chunk_end = self.end_index - else: - chunk_end = chunk_start + self.chunk_size - return chunk_start, chunk_end - - def get_chunk_offsets(self): - index = self.start_index - while index < self.end_index: - yield index - index += self.chunk_size - - def process_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - chunk_data = self._download_chunk(chunk_start, chunk_end - 1) - length = chunk_end - chunk_start - if length > 0: - self._write_to_stream(chunk_data, chunk_start) - self._update_progress(length) - - def yield_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - return self._download_chunk(chunk_start, chunk_end - 1) - - def _update_progress(self, length): - if self.progress_lock: - with self.progress_lock: # pylint: disable=not-context-manager - self.progress_total += length - else: - self.progress_total += length - - def _write_to_stream(self, chunk_data, chunk_start): - if self.stream_lock: - with self.stream_lock: # pylint: disable=not-context-manager - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - else: - self.stream.write(chunk_data) - - def _download_chunk(self, chunk_start, chunk_end): - download_range, offset = process_range_and_offset( - chunk_start, chunk_end, chunk_end, self.encryption_options - ) - range_header, range_validation = validate_and_format_range_headers( - download_range[0], download_range[1], check_content_md5=self.validate_content - ) - - try: - _, response = self.client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self.validate_content, - data_stream_total=self.total_size, - download_stream_current=self.progress_total, - **self.request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - chunk_data = process_content(response, offset[0], offset[1], self.encryption_options) - return chunk_data - - -class _ChunkIterator(object): - """Async iterator for chunks in blob download stream.""" - - def __init__(self, size, content, downloader): - self.size = size - self._current_content = content - self._iter_downloader = downloader - self._iter_chunks = None - self._complete = (size == 0) - - def __len__(self): - return self.size - - def __iter__(self): - return self - - def __next__(self): - """Iterate through responses.""" - if self._complete: - raise StopIteration("Download complete") - if not self._iter_downloader: - # If no iterator was supplied, the download completed with - # the initial GET, so we just return that data - self._complete = True - return self._current_content - - if not self._iter_chunks: - self._iter_chunks = self._iter_downloader.get_chunk_offsets() - else: - chunk = next(self._iter_chunks) - self._current_content = self._iter_downloader.yield_chunk(chunk) - - return self._current_content - - next = __next__ # Python 2 compatibility. - - -class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the file being downloaded. - :ivar: str path: - The full path of the file. - :ivar str share: - The name of the share where the file is. - :ivar ~azure.storage.fileshare.FileProperties properties: - The properties of the file being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the file. - """ - - def __init__( - self, - client=None, - config=None, - start_range=None, - end_range=None, - validate_content=None, - encryption_options=None, - max_concurrency=1, - name=None, - path=None, - share=None, - encoding=None, - **kwargs - ): - self.name = name - self.path = path - self.share = share - self.properties = None - self.size = None - - self._client = client - self._config = config - self._start_range = start_range - self._end_range = end_range - self._max_concurrency = max_concurrency - self._encoding = encoding - self._validate_content = validate_content - self._encryption_options = encryption_options or {} - self._request_options = kwargs - self._location_mode = None - self._download_complete = False - self._current_content = None - self._file_size = None - self._response = None - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - self._first_get_size = ( - self._config.max_single_get_size if not self._validate_content else self._config.max_chunk_get_size - ) - initial_request_start = self._start_range if self._start_range is not None else 0 - if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: - initial_request_end = self._end_range - else: - initial_request_end = initial_request_start + self._first_get_size - 1 - - self._initial_range, self._initial_offset = process_range_and_offset( - initial_request_start, initial_request_end, self._end_range, self._encryption_options - ) - - self._response = self._initial_request() - self.properties = self._response.properties - self.properties.name = self.name - self.properties.path = self.path - self.properties.share = self.share - - # Set the content length to the download size instead of the size of - # the last range - self.properties.size = self.size - - # Overwrite the content range to the user requested range - self.properties.content_range = "bytes {0}-{1}/{2}".format( - self._start_range, - self._end_range, - self._file_size - ) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - self.properties.content_md5 = None - - if self.size == 0: - self._current_content = b"" - else: - self._current_content = process_content( - self._response, - self._initial_offset[0], - self._initial_offset[1], - self._encryption_options - ) - - def __len__(self): - return self.size - - def _initial_request(self): - range_header, range_validation = validate_and_format_range_headers( - self._initial_range[0], - self._initial_range[1], - start_range_required=False, - end_range_required=False, - check_content_md5=self._validate_content - ) - - try: - location_mode, response = self._client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self._validate_content, - data_stream_total=None, - download_stream_current=0, - **self._request_options - ) - - # Check the location we read from to ensure we use the same one - # for subsequent requests. - self._location_mode = location_mode - - # Parse the total file size and adjust the download size if ranges - # were specified - self._file_size = parse_length_from_content_range(response.properties.content_range) - if self._end_range is not None: - # Use the end range index unless it is over the end of the file - self.size = min(self._file_size, self._end_range - self._start_range + 1) - elif self._start_range is not None: - self.size = self._file_size - self._start_range - else: - self.size = self._file_size - - except HttpResponseError as error: - if self._start_range is None and error.response.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - try: - _, response = self._client.download( - validate_content=self._validate_content, - data_stream_total=0, - download_stream_current=0, - **self._request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - # Set the download size to empty - self.size = 0 - self._file_size = 0 - else: - process_storage_error(error) - - # If the file is small, the download is complete at this point. - # If file size is large, download the rest of the file in chunks. - if response.properties.size == self.size: - self._download_complete = True - return response - - def chunks(self): - if self.size == 0 or self._download_complete: - iter_downloader = None - else: - data_end = self._file_size - if self._end_range is not None: - # Use the end range index unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - iter_downloader = _ChunkDownloader( - client=self._client, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # start where the first download ended - end_range=data_end, - stream=None, - parallel=False, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options - ) - return _ChunkIterator( - size=self.size, - content=self._current_content, - downloader=iter_downloader) - - def readall(self): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - stream = BytesIO() - self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - def content_as_bytes(self, max_concurrency=1): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :rtype: bytes - """ - warnings.warn( - "content_as_bytes is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - return self.readall() - - def content_as_text(self, max_concurrency=1, encoding="UTF-8"): - """Download the contents of this file, and decode as text. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :param str encoding: - Test encoding to decode the downloaded bytes. Default is UTF-8. - :rtype: str - """ - warnings.warn( - "content_as_text is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self._encoding = encoding - return self.readall() - - def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - # The stream must be seekable if parallel download is required - parallel = self._max_concurrency > 1 - if parallel: - error_message = "Target stream handle must be seekable." - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(error_message) - - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(error_message) - - # Write the content to the user stream - stream.write(self._current_content) - if self._download_complete: - return self.size - - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - - downloader = _ChunkDownloader( - client=self._client, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # Start where the first download ended - end_range=data_end, - stream=stream, - parallel=parallel, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options - ) - if parallel: - import concurrent.futures - executor = concurrent.futures.ThreadPoolExecutor(self._max_concurrency) - list(executor.map( - with_current_context(downloader.process_chunk), - downloader.get_chunk_offsets() - )) - else: - for chunk in downloader.get_chunk_offsets(): - downloader.process_chunk(chunk) - return self.size - - def download_to_stream(self, stream, max_concurrency=1): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The properties of the downloaded file. - :rtype: Any - """ - warnings.warn( - "download_to_stream is deprecated, use readinto instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self.readinto(stream) - return self.properties diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_file_client.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_file_client.py deleted file mode 100644 index 3110a19..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_file_client.py +++ /dev/null @@ -1,1328 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines -import functools -import time -from io import BytesIO -from typing import ( # pylint: disable=unused-import - Optional, Union, IO, List, Dict, Any, Iterable, - TYPE_CHECKING -) - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six -from azure.core.paging import ItemPaged # pylint: disable=ungrouped-imports -from azure.core.tracing.decorator import distributed_trace - -from ._generated import AzureFileStorage -from ._generated.version import VERSION -from ._generated.models import StorageErrorException, FileHTTPHeaders -from ._shared.uploads import IterStreamer, FileChunkUploader, upload_data_chunks -from ._shared.base_client import StorageAccountHostsMixin, parse_connection_str, parse_query -from ._shared.request_handlers import add_metadata_headers, get_length -from ._shared.response_handlers import return_response_headers, process_storage_error -from ._shared.parser import _str -from ._parser import _get_file_permission, _datetime_to_str -from ._lease import ShareLeaseClient -from ._serialize import get_source_conditions, get_access_conditions, get_smb_properties, get_api_version -from ._deserialize import deserialize_file_properties, deserialize_file_stream -from ._models import HandlesPaged, NTFSAttributes # pylint: disable=unused-import -from ._download import StorageStreamDownloader - -if TYPE_CHECKING: - from datetime import datetime - from ._models import ShareProperties, ContentSettings, FileProperties, Handle - from ._generated.models import HandleItem - - -def _upload_file_helper( - client, - stream, - size, - metadata, - content_settings, - validate_content, - timeout, - max_concurrency, - file_settings, - file_attributes="none", - file_creation_time="now", - file_last_write_time="now", - file_permission=None, - file_permission_key=None, - **kwargs): - try: - if size is None or size < 0: - raise ValueError("A content size must be specified for a File.") - response = client.create_file( - size, - content_settings=content_settings, - metadata=metadata, - timeout=timeout, - file_attributes=file_attributes, - file_creation_time=file_creation_time, - file_last_write_time=file_last_write_time, - file_permission=file_permission, - permission_key=file_permission_key, - **kwargs - ) - if size == 0: - return response - - responses = upload_data_chunks( - service=client, - uploader_class=FileChunkUploader, - total_size=size, - chunk_size=file_settings.max_range_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - timeout=timeout, - **kwargs - ) - return sorted(responses, key=lambda r: r.get('last_modified'))[-1] - except StorageErrorException as error: - process_storage_error(error) - - -class ShareFileClient(StorageAccountHostsMixin): - """A client to interact with a specific file, although that file may not yet exist. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the - file, use the :func:`from_file_url` classmethod. - :param share_name: - The name of the share for the file. - :type share_name: str - :param str file_path: - The file path to the file with which to interact. If specified, this value will override - a file value specified in the file URL. - :param str snapshot: - An optional file snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - file_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not (share_name and file_path): - raise ValueError("Please specify a share name and file name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - if hasattr(credential, 'get_token'): - raise ValueError("Token credentials not supported by the File service.") - - path_snapshot = None - path_snapshot, sas_token = parse_query(parsed_url.query) - if not sas_token and not credential: - raise ValueError( - 'You need to provide either an account shared key or SAS token when creating a storage service.') - try: - self.snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - self.snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - self.snapshot = snapshot or path_snapshot - - self.share_name = share_name - self.file_path = file_path.split('/') - self.file_name = self.file_path[-1] - self.directory_path = "/".join(self.file_path[:-1]) - - self._query_str, credential = self._format_query_string( - sas_token, credential, share_snapshot=self.snapshot) - super(ShareFileClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) - self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access - - @classmethod - def from_file_url( - cls, file_url, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareFileClient - """A client to interact with a specific file, although that file may not yet exist. - - :param str file_url: The full URI to the file. - :param str snapshot: - An optional file snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :returns: A File client. - :rtype: ~azure.storage.fileshare.ShareFileClient - """ - try: - if not file_url.lower().startswith('http'): - file_url = "https://" + file_url - except AttributeError: - raise ValueError("File URL must be a string.") - parsed_url = urlparse(file_url.rstrip('/')) - - if not (parsed_url.netloc and parsed_url.path): - raise ValueError("Invalid URL: {}".format(file_url)) - account_url = parsed_url.netloc.rstrip('/') + "?" + parsed_url.query - - path_share, _, path_file = parsed_url.path.lstrip('/').partition('/') - path_snapshot, _ = parse_query(parsed_url.query) - snapshot = snapshot or path_snapshot - share_name = unquote(path_share) - file_path = '/'.join([unquote(p) for p in path_file.split('/')]) - return cls(account_url, share_name, file_path, snapshot, credential, **kwargs) - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - share_name = self.share_name - if isinstance(share_name, six.text_type): - share_name = share_name.encode('UTF-8') - return "{}://{}/{}/{}{}".format( - self.scheme, - hostname, - quote(share_name), - "/".join([quote(p, safe='~') for p in self.file_path]), - self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - share_name, # type: str - file_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareFileClient - """Create ShareFileClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param share_name: The name of the share. - :type share_name: str - :param str file_path: - The file path. - :param str snapshot: - An optional file snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :returns: A File client. - :rtype: ~azure.storage.fileshare.ShareFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_hello_world.py - :start-after: [START create_file_client] - :end-before: [END create_file_client] - :language: python - :dedent: 12 - :caption: Creates the file client with connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, share_name=share_name, file_path=file_path, snapshot=snapshot, credential=credential, **kwargs) - - @distributed_trace - def acquire_lease(self, lease_id=None, **kwargs): - # type: (int, Optional[str], **Any) -> BlobLeaseClient - """Requests a new lease. - - If the file does not have an active lease, the File - Service creates a lease on the blob and returns a new lease. - - :param str lease_id: - Proposed lease ID, in a GUID string format. The File Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A ShareLeaseClient object. - :rtype: ~azure.storage.fileshare.ShareLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START acquire_lease_on_blob] - :end-before: [END acquire_lease_on_blob] - :language: python - :dedent: 8 - :caption: Acquiring a lease on a blob. - """ - lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore - lease.acquire(**kwargs) - return lease - - @distributed_trace - def create_file( # type: ignore - self, size, # type: int - file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="now", # type: Union[str, datetime] - file_last_write_time="now", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Creates a new file. - - Note that it only initializes the file with no content. - - :param int size: Specifies the maximum size for the file, - up to 1 TB. - :param file_attributes: - The file system attributes for files and directories. - If not set, the default value would be "None" and the attributes will be set to "Archive". - Here is an example for when the var type is str: 'Temporary|Archive'. - file_attributes value is not case sensitive. - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Now. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Now. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START create_file] - :end-before: [END create_file] - :language: python - :dedent: 12 - :caption: Create a file. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - content_settings = kwargs.pop('content_settings', None) - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - if self.require_encryption and not self.key_encryption_key: - raise ValueError("Encryption required but no key was provided.") - - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - file_http_headers = None - if content_settings: - file_http_headers = FileHTTPHeaders( - file_cache_control=content_settings.cache_control, - file_content_type=content_settings.content_type, - file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - file_content_encoding=content_settings.content_encoding, - file_content_language=content_settings.content_language, - file_content_disposition=content_settings.content_disposition - ) - file_permission = _get_file_permission(file_permission, permission_key, 'Inherit') - try: - return self._client.file.create( # type: ignore - file_content_length=size, - metadata=metadata, - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - file_http_headers=file_http_headers, - lease_access_conditions=access_conditions, - headers=headers, - timeout=timeout, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def upload_file( - self, data, # type: Any - length=None, # type: Optional[int] - file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="now", # type: Union[str, datetime] - file_last_write_time="now", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Uploads a new file. - - :param Any data: - Content of the file. - :param int length: - Length of the file in bytes. Specify its maximum size, up to 1 TiB. - :param file_attributes: - The file system attributes for files and directories. - If not set, the default value would be "None" and the attributes will be set to "Archive". - Here is an example for when the var type is str: 'Temporary|Archive'. - file_attributes value is not case sensitive. - :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes - :param file_creation_time: Creation time for the file - Default value: Now. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Now. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START upload_file] - :end-before: [END upload_file] - :language: python - :dedent: 12 - :caption: Upload a file. - """ - metadata = kwargs.pop('metadata', None) - content_settings = kwargs.pop('content_settings', None) - max_concurrency = kwargs.pop('max_concurrency', 1) - validate_content = kwargs.pop('validate_content', False) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - - if isinstance(data, six.text_type): - data = data.encode(encoding) - if length is None: - length = get_length(data) - if isinstance(data, bytes): - data = data[:length] - - if isinstance(data, bytes): - stream = BytesIO(data) - elif hasattr(data, 'read'): - stream = data - elif hasattr(data, '__iter__'): - stream = IterStreamer(data, encoding=encoding) # type: ignore - else: - raise TypeError("Unsupported data type: {}".format(type(data))) - return _upload_file_helper( # type: ignore - self, - stream, - length, - metadata, - content_settings, - validate_content, - timeout, - max_concurrency, - self._config, - file_attributes=file_attributes, - file_creation_time=file_creation_time, - file_last_write_time=file_last_write_time, - file_permission=file_permission, - file_permission_key=permission_key, - **kwargs) - - @distributed_trace - def start_copy_from_url(self, source_url, **kwargs): - # type: (str, Any) -> Any - """Initiates the copying of data from a source URL into the file - referenced by the client. - - The status of this copy operation can be found using the `get_properties` - method. - - :param str source_url: - Specifies the URL of the source file. - :keyword str file_permission: - If specified the permission (security descriptor) shall be set for the directory/file. - This value can be set to "source" to copy the security descriptor from the source file. - Otherwise if set, this value will be used to override the source value. If not set, permission value - is inherited from the parent directory of the target file. This setting can be - used if Permission size is <= 8KB, otherwise permission_key shall be used. - If SDDL is specified as input, it must have owner, group and dacl. - Note: Only one of the file_permission or permission_key should be specified. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword str permission_key: - Key of the permission to be set for the directory/file. - This value can be set to "source" to copy the security descriptor from the source file. - Otherwise if set, this value will be used to override the source value. If not set, permission value - is inherited from the parent directory of the target file. - Note: Only one of the file_permission or permission_key should be specified. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword file_attributes: - This value can be set to "source" to copy file attributes from the source file to the target file, - or to clear all attributes, it can be set to "None". Otherwise it can be set to a list of attributes - to set on the target file. If this is not set, the default value is "Archive". - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :keyword file_creation_time: - This value can be set to "source" to copy the creation time from the source file to the target file, - or a datetime to set as creation time on the target file. This could also be a string in ISO 8601 format. - If this is not set, creation time will be set to the date time value of the creation - (or when it was overwritten) of the target file by copy engine. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_creation_time: str or ~datetime.datetime - :keyword file_last_write_time: - This value can be set to "source" to copy the last write time from the source file to the target file, or - a datetime to set as the last write time on the target file. This could also be a string in ISO 8601 format. - If this is not set, value will be the last write time to the file by the copy engine. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_last_write_time: str or ~datetime.datetime - :keyword bool ignore_read_only: - Specifies the option to overwrite the target file if it already exists and has read-only attribute set. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword bool set_archive_attribute: - Specifies the option to set the archive attribute on the target file. - True means the archive attribute will be set on the target file despite attribute - overrides or the source file state. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START copy_file_from_url] - :end-before: [END copy_file_from_url] - :language: python - :dedent: 12 - :caption: Copy a file from a URL - """ - metadata = kwargs.pop('metadata', None) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - kwargs.update(get_smb_properties(kwargs)) - try: - return self._client.file.start_copy( - source_url, - metadata=metadata, - lease_access_conditions=access_conditions, - headers=headers, - cls=return_response_headers, - timeout=timeout, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - def abort_copy(self, copy_id, **kwargs): - # type: (Union[str, FileProperties], Any) -> None - """Abort an ongoing copy operation. - - This will leave a destination file with zero length and full metadata. - This will raise an error if the copy operation has already ended. - - :param copy_id: - The copy operation to abort. This can be either an ID, or an - instance of FileProperties. - :type copy_id: str or ~azure.storage.fileshare.FileProperties - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - copy_id = copy_id.copy.id - except AttributeError: - try: - copy_id = copy_id['copy_id'] - except TypeError: - pass - try: - self._client.file.abort_copy(copy_id=copy_id, - lease_access_conditions=access_conditions, - timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def download_file( - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Iterable[bytes] - """Downloads a file to a stream with automatic chunking. - - :param int offset: - Start of byte range to use for downloading a section of the file. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A iterable data generator (stream) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START download_file] - :end-before: [END download_file] - :language: python - :dedent: 12 - :caption: Download a file. - """ - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - if length is not None and offset is None: - raise ValueError("Offset value must not be None if length is set.") - - range_end = None - if length is not None: - range_end = offset + length - 1 # Service actually uses an end-range inclusive index - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - - return StorageStreamDownloader( - client=self._client.file, - config=self._config, - start_range=offset, - end_range=range_end, - encryption_options=None, - name=self.file_name, - path='/'.join(self.file_path), - share=self.share_name, - lease_access_conditions=access_conditions, - cls=deserialize_file_stream, - **kwargs) - - @distributed_trace - def delete_file(self, **kwargs): - # type: (Any) -> None - """Marks the specified file for deletion. The file is - later deleted during garbage collection. - - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START delete_file] - :end-before: [END delete_file] - :language: python - :dedent: 12 - :caption: Delete a file. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - self._client.file.delete(lease_access_conditions=access_conditions, timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_file_properties(self, **kwargs): - # type: (Any) -> FileProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file. - - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: FileProperties - :rtype: ~azure.storage.fileshare.FileProperties - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - file_props = self._client.file.get_properties( - sharesnapshot=self.snapshot, - lease_access_conditions=access_conditions, - timeout=timeout, - cls=deserialize_file_properties, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - file_props.name = self.file_name - file_props.share = self.share_name - file_props.snapshot = self.snapshot - file_props.path = '/'.join(self.file_path) - return file_props # type: ignore - - @distributed_trace - def set_http_headers(self, content_settings, # type: ContentSettings - file_attributes="preserve", # type: Union[str, NTFSAttributes] - file_creation_time="preserve", # type: Union[str, datetime] - file_last_write_time="preserve", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Sets HTTP headers on the file. - - :param ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param file_attributes: - The file system attributes for files and directories. - If not set, indicates preservation of existing values. - Here is an example for when the var type is str: 'Temporary|Archive' - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Preserve. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Preserve. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - file_content_length = kwargs.pop('size', None) - file_http_headers = FileHTTPHeaders( - file_cache_control=content_settings.cache_control, - file_content_type=content_settings.content_type, - file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - file_content_encoding=content_settings.content_encoding, - file_content_language=content_settings.content_language, - file_content_disposition=content_settings.content_disposition - ) - file_permission = _get_file_permission(file_permission, permission_key, 'preserve') - try: - return self._client.file.set_http_headers( # type: ignore - file_content_length=file_content_length, - file_http_headers=file_http_headers, - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - lease_access_conditions=access_conditions, - timeout=timeout, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def set_file_metadata(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, Any]], Any) -> Dict[str, Any] - """Sets user-defined metadata for the specified file as one or more - name-value pairs. - - Each call to this operation replaces all existing metadata - attached to the file. To remove all metadata from the file, - call this operation with no metadata dict. - - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return self._client.file.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - metadata=metadata, - lease_access_conditions=access_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def upload_range( # type: ignore - self, data, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Upload a range of bytes to a file. - - :param bytes data: - The data to upload. - :param int offset: - Start of byte range to use for uploading a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for uploading a section of the file. - The range can be up to 4 MB in size. - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - file. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - validate_content = kwargs.pop('validate_content', False) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - if isinstance(data, six.text_type): - data = data.encode(encoding) - - end_range = offset + length - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - try: - return self._client.file.upload_range( # type: ignore - range=content_range, - content_length=length, - optionalbody=data, - timeout=timeout, - validate_content=validate_content, - lease_access_conditions=access_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @staticmethod - def _upload_range_from_url_options(source_url, # type: str - offset, # type: int - length, # type: int - source_offset, # type: int - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - - if offset is None: - raise ValueError("offset must be provided.") - if length is None: - raise ValueError("length must be provided.") - if source_offset is None: - raise ValueError("source_offset must be provided.") - - # Format range - end_range = offset + length - 1 - destination_range = 'bytes={0}-{1}'.format(offset, end_range) - source_range = 'bytes={0}-{1}'.format(source_offset, source_offset + length - 1) - - source_mod_conditions = get_source_conditions(kwargs) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - - options = { - 'copy_source': source_url, - 'content_length': 0, - 'source_range': source_range, - 'range': destination_range, - 'source_modified_access_conditions': source_mod_conditions, - 'lease_access_conditions': access_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def upload_range_from_url(self, source_url, - offset, - length, - source_offset, - **kwargs - ): - # type: (str, int, int, int, **Any) -> Dict[str, Any] - """ - Writes the bytes from one Azure File endpoint into the specified range of another Azure File endpoint. - - :param int offset: - Start of byte range to use for updating a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for updating a section of the file. - The range can be up to 4 MB in size. - :param str source_url: - A URL of up to 2 KB in length that specifies an Azure file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.file.core.windows.net/myshare/mydir/myfile - https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - The service will read the same number of bytes as the destination range (length-offset). - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source - blob has been modified since the specified date/time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source blob - has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._upload_range_from_url_options( - source_url=source_url, - offset=offset, - length=length, - source_offset=source_offset, - **kwargs - ) - try: - return self._client.file.upload_range_from_url(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_ranges( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> List[Dict[str, int]] - """Returns the list of valid ranges of a file. - - :param int offset: - Specifies the start offset of bytes over which to get ranges. - :param int length: - Number of bytes to use over which to get ranges. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A list of valid ranges. - :rtype: List[dict[str, int]] - """ - timeout = kwargs.pop('timeout', None) - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Unsupported method for encryption.") - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - - content_range = None - if offset is not None: - if length is not None: - end_range = offset + length - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - else: - content_range = 'bytes={0}-'.format(offset) - try: - ranges = self._client.file.get_range_list( - range=content_range, - sharesnapshot=self.snapshot, - lease_access_conditions=access_conditions, - timeout=timeout, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return [{'start': b.start, 'end': b.end} for b in ranges] - - @distributed_trace - def clear_range( # type: ignore - self, offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Clears the specified range and releases the space used in storage for - that range. - - :param int offset: - Start of byte range to use for clearing a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for clearing a section of the file. - The range can be up to 4 MB in size. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Unsupported method for encryption.") - - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 bytes file size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 bytes file size") - end_range = length + offset - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - try: - return self._client.file.upload_range( # type: ignore - timeout=timeout, - cls=return_response_headers, - content_length=0, - file_range_write="clear", - range=content_range, - lease_access_conditions=access_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def resize_file(self, size, **kwargs): - # type: (int, Any) -> Dict[str, Any] - """Resizes a file to the specified size. - - :param int size: - Size to resize file to (in bytes) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - return self._client.file.set_http_headers( # type: ignore - file_content_length=size, - file_attributes="preserve", - file_creation_time="preserve", - file_last_write_time="preserve", - file_permission="preserve", - lease_access_conditions=access_conditions, - cls=return_response_headers, - timeout=timeout, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_handles(self, **kwargs): - # type: (Any) -> ItemPaged[Handle] - """Lists handles for file. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of HandleItem - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.HandleItem] - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.file.list_handles, - sharesnapshot=self.snapshot, - timeout=timeout, - **kwargs) - return ItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=HandlesPaged) - - @distributed_trace - def close_handle(self, handle, **kwargs): - # type: (Union[str, HandleItem], Any) -> Dict[str, int] - """Close an open file handle. - - :param handle: - A specific handle to close. - :type handle: str or ~azure.storage.fileshare.Handle - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - try: - handle_id = handle.id # type: ignore - except AttributeError: - handle_id = handle - if handle_id == '*': - raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") - try: - response = self._client.file.force_close_handles( - handle_id, - marker=None, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - return { - 'closed_handles_count': response.get('number_of_handles_closed', 0), - 'failed_handles_count': response.get('number_of_handles_failed', 0) - } - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def close_all_handles(self, **kwargs): - # type: (Any) -> Dict[str, int] - """Close any open file handles. - - This operation will block until the service has closed all open handles. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - timeout = kwargs.pop('timeout', None) - start_time = time.time() - - try_close = True - continuation_token = None - total_closed = 0 - total_failed = 0 - while try_close: - try: - response = self._client.file.force_close_handles( - handle_id='*', - timeout=timeout, - marker=continuation_token, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - continuation_token = response.get('marker') - try_close = bool(continuation_token) - total_closed += response.get('number_of_handles_closed', 0) - total_failed += response.get('number_of_handles_failed', 0) - if timeout: - timeout = max(0, timeout - (time.time() - start_time)) - return { - 'closed_handles_count': total_closed, - 'failed_handles_count': total_failed - } diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/__init__.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/__init__.py deleted file mode 100644 index 22b5762..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from ._azure_file_storage import AzureFileStorage -__all__ = ['AzureFileStorage'] - -from .version import VERSION - -__version__ = VERSION - diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/_azure_file_storage.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/_azure_file_storage.py deleted file mode 100644 index e3dd92c..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/_azure_file_storage.py +++ /dev/null @@ -1,71 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core import PipelineClient -from msrest import Serializer, Deserializer - -from ._configuration import AzureFileStorageConfiguration -from azure.core.exceptions import map_error -from .operations import ServiceOperations -from .operations import ShareOperations -from .operations import DirectoryOperations -from .operations import FileOperations -from . import models - - -class AzureFileStorage(object): - """AzureFileStorage - - - :ivar service: Service operations - :vartype service: azure.storage.fileshare.operations.ServiceOperations - :ivar share: Share operations - :vartype share: azure.storage.fileshare.operations.ShareOperations - :ivar directory: Directory operations - :vartype directory: azure.storage.fileshare.operations.DirectoryOperations - :ivar file: File operations - :vartype file: azure.storage.fileshare.operations.FileOperations - - :param version: Specifies the version of the operation to use for this - request. - :type version: str - :param url: The URL of the service account, share, directory or file that - is the target of the desired operation. - :type url: str - """ - - def __init__(self, version, url, **kwargs): - - base_url = '{url}' - self._config = AzureFileStorageConfiguration(version, url, **kwargs) - self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '2019-12-12' - self._serialize = Serializer(client_models) - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.share = ShareOperations( - self._client, self._config, self._serialize, self._deserialize) - self.directory = DirectoryOperations( - self._client, self._config, self._serialize, self._deserialize) - self.file = FileOperations( - self._client, self._config, self._serialize, self._deserialize) - - def close(self): - self._client.close() - def __enter__(self): - self._client.__enter__() - return self - def __exit__(self, *exc_details): - self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/_configuration.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/_configuration.py deleted file mode 100644 index d638b1e..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/_configuration.py +++ /dev/null @@ -1,58 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -from .version import VERSION - - -class AzureFileStorageConfiguration(Configuration): - """Configuration for AzureFileStorage - Note that all parameters used to create this instance are saved as instance - attributes. - - :param version: Specifies the version of the operation to use for this - request. - :type version: str - :param url: The URL of the service account, share, directory or file that - is the target of the desired operation. - :type url: str - :ivar file_range_write_from_url: Only update is supported: - Update: - Writes the bytes downloaded from the source url into the specified range. - :type file_range_write_from_url: str - """ - - def __init__(self, version, url, **kwargs): - - if version is None: - raise ValueError("Parameter 'version' must not be None.") - if url is None: - raise ValueError("Parameter 'url' must not be None.") - - super(AzureFileStorageConfiguration, self).__init__(**kwargs) - self._configure(**kwargs) - - self.user_agent_policy.add_user_agent('azsdk-python-azurefilestorage/{}'.format(VERSION)) - self.generate_client_request_id = True - - self.version = version - self.url = url - self.file_range_write_from_url = "update" - - def _configure(self, **kwargs): - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/__init__.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/__init__.py deleted file mode 100644 index 942d3c5..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from ._azure_file_storage_async import AzureFileStorage -__all__ = ['AzureFileStorage'] diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/_azure_file_storage_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/_azure_file_storage_async.py deleted file mode 100644 index 39cf463..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/_azure_file_storage_async.py +++ /dev/null @@ -1,72 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core import AsyncPipelineClient -from msrest import Serializer, Deserializer - -from ._configuration_async import AzureFileStorageConfiguration -from azure.core.exceptions import map_error -from .operations_async import ServiceOperations -from .operations_async import ShareOperations -from .operations_async import DirectoryOperations -from .operations_async import FileOperations -from .. import models - - -class AzureFileStorage(object): - """AzureFileStorage - - - :ivar service: Service operations - :vartype service: azure.storage.fileshare.aio.operations_async.ServiceOperations - :ivar share: Share operations - :vartype share: azure.storage.fileshare.aio.operations_async.ShareOperations - :ivar directory: Directory operations - :vartype directory: azure.storage.fileshare.aio.operations_async.DirectoryOperations - :ivar file: File operations - :vartype file: azure.storage.fileshare.aio.operations_async.FileOperations - - :param version: Specifies the version of the operation to use for this - request. - :type version: str - :param url: The URL of the service account, share, directory or file that - is the target of the desired operation. - :type url: str - """ - - def __init__( - self, version, url, **kwargs): - - base_url = '{url}' - self._config = AzureFileStorageConfiguration(version, url, **kwargs) - self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '2019-12-12' - self._serialize = Serializer(client_models) - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.share = ShareOperations( - self._client, self._config, self._serialize, self._deserialize) - self.directory = DirectoryOperations( - self._client, self._config, self._serialize, self._deserialize) - self.file = FileOperations( - self._client, self._config, self._serialize, self._deserialize) - - async def close(self): - await self._client.close() - async def __aenter__(self): - await self._client.__aenter__() - return self - async def __aexit__(self, *exc_details): - await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/_configuration_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/_configuration_async.py deleted file mode 100644 index 75c206e..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/_configuration_async.py +++ /dev/null @@ -1,59 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -from ..version import VERSION - - -class AzureFileStorageConfiguration(Configuration): - """Configuration for AzureFileStorage - Note that all parameters used to create this instance are saved as instance - attributes. - - :param version: Specifies the version of the operation to use for this - request. - :type version: str - :param url: The URL of the service account, share, directory or file that - is the target of the desired operation. - :type url: str - :ivar file_range_write_from_url: Only update is supported: - Update: - Writes the bytes downloaded from the source url into the specified range. - :type file_range_write_from_url: str - """ - - def __init__(self, version, url, **kwargs): - - if version is None: - raise ValueError("Parameter 'version' must not be None.") - if url is None: - raise ValueError("Parameter 'url' must not be None.") - - super(AzureFileStorageConfiguration, self).__init__(**kwargs) - self._configure(**kwargs) - - self.user_agent_policy.add_user_agent('azsdk-python-azurefilestorage/{}'.format(VERSION)) - self.generate_client_request_id = True - self.accept_language = None - - self.version = version - self.url = url - self.file_range_write_from_url = "update" - - def _configure(self, **kwargs): - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/__init__.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/__init__.py deleted file mode 100644 index 601c709..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations_async import ServiceOperations -from ._share_operations_async import ShareOperations -from ._directory_operations_async import DirectoryOperations -from ._file_operations_async import FileOperations - -__all__ = [ - 'ServiceOperations', - 'ShareOperations', - 'DirectoryOperations', - 'FileOperations', -] diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/_directory_operations_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/_directory_operations_async.py deleted file mode 100644 index 30aea57..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/_directory_operations_async.py +++ /dev/null @@ -1,672 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class DirectoryOperations: - """DirectoryOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar restype: . Constant value: "directory". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.restype = "directory" - - async def create(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, *, cls=None, **kwargs): - """Creates a new directory under the specified share or parent directory. - - :param file_attributes: If specified, the provided file attributes - shall be set. Default value: ‘Archive’ for file and ‘Directory’ for - directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. - Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. - Default value: Now. - :type file_last_write_time: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{shareName}/{directory}'} - - async def get_properties(self, sharesnapshot=None, timeout=None, *, cls=None, **kwargs): - """Returns all system properties for the specified directory, and can also - be used to check the existence of a directory. The data returned does - not include the files in the directory or any subdirectories. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{shareName}/{directory}'} - - async def delete(self, timeout=None, *, cls=None, **kwargs): - """Removes the specified empty directory. Note that the directory must be - empty before it can be deleted. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{shareName}/{directory}'} - - async def set_properties(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, file_permission="inherit", file_permission_key=None, *, cls=None, **kwargs): - """Sets properties on the directory. - - :param file_attributes: If specified, the provided file attributes - shall be set. Default value: ‘Archive’ for file and ‘Directory’ for - directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. - Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. - Default value: Now. - :type file_last_write_time: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "properties" - - # Construct URL - url = self.set_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_properties.metadata = {'url': '/{shareName}/{directory}'} - - async def set_metadata(self, timeout=None, metadata=None, *, cls=None, **kwargs): - """Updates user defined metadata for the specified directory. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{shareName}/{directory}'} - - async def list_files_and_directories_segment(self, prefix=None, sharesnapshot=None, marker=None, maxresults=None, timeout=None, *, cls=None, **kwargs): - """Returns a list of files or directories under the specified share or - directory. It lists the contents only for a single level of the - directory hierarchy. - - :param prefix: Filters the results to return only entries whose name - begins with the specified prefix. - :type prefix: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. - If the request does not specify maxresults, or specifies a value - greater than 5,000, the server will return up to 5,000 items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListFilesAndDirectoriesSegmentResponse or the result of - cls(response) - :rtype: - ~azure.storage.fileshare.models.ListFilesAndDirectoriesSegmentResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "list" - - # Construct URL - url = self.list_files_and_directories_segment.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListFilesAndDirectoriesSegmentResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_files_and_directories_segment.metadata = {'url': '/{shareName}/{directory}'} - - async def list_handles(self, marker=None, maxresults=None, timeout=None, sharesnapshot=None, recursive=None, *, cls=None, **kwargs): - """Lists handles for directory. - - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. - If the request does not specify maxresults, or specifies a value - greater than 5,000, the server will return up to 5,000 items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param recursive: Specifies operation should apply to the directory - specified in the URI, its files, its subdirectories and their files. - :type recursive: bool - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListHandlesResponse or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListHandlesResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "listhandles" - - # Construct URL - url = self.list_handles.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - if recursive is not None: - header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListHandlesResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_handles.metadata = {'url': '/{shareName}/{directory}'} - - async def force_close_handles(self, handle_id, timeout=None, marker=None, sharesnapshot=None, recursive=None, *, cls=None, **kwargs): - """Closes all handles open for given directory. - - :param handle_id: Specifies handle ID opened on the file or directory - to be closed. Asterix (‘*’) is a wildcard that specifies all handles. - :type handle_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param recursive: Specifies operation should apply to the directory - specified in the URI, its files, its subdirectories and their files. - :type recursive: bool - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "forceclosehandles" - - # Construct URL - url = self.force_close_handles.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') - if recursive is not None: - header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-marker': self._deserialize('str', response.headers.get('x-ms-marker')), - 'x-ms-number-of-handles-closed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')), - 'x-ms-number-of-handles-failed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - force_close_handles.metadata = {'url': '/{shareName}/{directory}'} diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/_file_operations_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/_file_operations_async.py deleted file mode 100644 index b8957df..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/_file_operations_async.py +++ /dev/null @@ -1,1666 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class FileOperations: - """FileOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_type: Dummy constant parameter, file type can only be file. Constant value: "file". - :ivar x_ms_copy_action: . Constant value: "abort". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_type = "file" - self.x_ms_copy_action = "abort" - - async def create(self, file_content_length, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, file_http_headers=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Creates a new file or replaces a file. Note it only initializes the - file with no content. - - :param file_content_length: Specifies the maximum size for the file, - up to 1 TB. - :type file_content_length: long - :param file_attributes: If specified, the provided file attributes - shall be set. Default value: ‘Archive’ for file and ‘Directory’ for - directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. - Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. - Default value: Now. - :type file_last_write_time: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_http_headers: Additional parameters for the operation - :type file_http_headers: - ~azure.storage.fileshare.models.FileHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - file_content_type = None - if file_http_headers is not None: - file_content_type = file_http_headers.file_content_type - file_content_encoding = None - if file_http_headers is not None: - file_content_encoding = file_http_headers.file_content_encoding - file_content_language = None - if file_http_headers is not None: - file_content_language = file_http_headers.file_content_language - file_cache_control = None - if file_http_headers is not None: - file_cache_control = file_http_headers.file_cache_control - file_content_md5 = None - if file_http_headers is not None: - file_content_md5 = file_http_headers.file_content_md5 - file_content_disposition = None - if file_http_headers is not None: - file_content_disposition = file_http_headers.file_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') - header_parameters['x-ms-type'] = self._serialize.header("self.x_ms_type", self.x_ms_type, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - if file_content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", file_content_type, 'str') - if file_content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", file_content_encoding, 'str') - if file_content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", file_content_language, 'str') - if file_cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", file_cache_control, 'str') - if file_content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", file_content_md5, 'bytearray') - if file_content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", file_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def download(self, timeout=None, range=None, range_get_content_md5=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Reads or downloads a file from the system, including its metadata and - properties. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param range: Return file data only from the specified byte range. - :type range: str - :param range_get_content_md5: When this header is set to true and - specified together with the Range header, the service returns the MD5 - hash for the range, as long as the range is less than or equal to 4 MB - in size. - :type range_get_content_md5: bool - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: object or the result of cls(response) - :rtype: Generator - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.download.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - await response.load_body() - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-content-md5')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - if response.status_code == 206: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-content-md5')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - download.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def get_properties(self, sharesnapshot=None, timeout=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Returns all user-defined metadata, standard HTTP properties, and system - properties for the file. It does not return the content of the file. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'x-ms-type': self._deserialize('str', response.headers.get('x-ms-type')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def delete(self, timeout=None, lease_access_conditions=None, *, cls=None, **kwargs): - """removes the file from the storage account. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def set_http_headers(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, file_content_length=None, file_permission="inherit", file_permission_key=None, file_http_headers=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Sets HTTP headers on the file. - - :param file_attributes: If specified, the provided file attributes - shall be set. Default value: ‘Archive’ for file and ‘Directory’ for - directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. - Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. - Default value: Now. - :type file_last_write_time: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param file_content_length: Resizes a file to the specified size. If - the specified byte value is less than the current size of the file, - then all ranges above the specified byte value are cleared. - :type file_content_length: long - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_http_headers: Additional parameters for the operation - :type file_http_headers: - ~azure.storage.fileshare.models.FileHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - file_content_type = None - if file_http_headers is not None: - file_content_type = file_http_headers.file_content_type - file_content_encoding = None - if file_http_headers is not None: - file_content_encoding = file_http_headers.file_content_encoding - file_content_language = None - if file_http_headers is not None: - file_content_language = file_http_headers.file_content_language - file_cache_control = None - if file_http_headers is not None: - file_cache_control = file_http_headers.file_cache_control - file_content_md5 = None - if file_http_headers is not None: - file_content_md5 = file_http_headers.file_content_md5 - file_content_disposition = None - if file_http_headers is not None: - file_content_disposition = file_http_headers.file_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "properties" - - # Construct URL - url = self.set_http_headers.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_content_length is not None: - header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - if file_content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", file_content_type, 'str') - if file_content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", file_content_encoding, 'str') - if file_content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", file_content_language, 'str') - if file_cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", file_cache_control, 'str') - if file_content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", file_content_md5, 'bytearray') - if file_content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", file_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_http_headers.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def set_metadata(self, timeout=None, metadata=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Updates user-defined metadata for the specified file. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, *, cls=None, **kwargs): - """[Update] The Lease File operation establishes and manages a lock on a - file for write and delete operations. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or - negative one (-1) for a lease that never expires. A non-infinite lease - can be between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The File service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "lease" - action = "acquire" - - # Construct URL - url = self.acquire_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - acquire_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def release_lease(self, lease_id, timeout=None, request_id=None, *, cls=None, **kwargs): - """[Update] The Lease File operation establishes and manages a lock on a - file for write and delete operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "lease" - action = "release" - - # Construct URL - url = self.release_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - release_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def change_lease(self, lease_id, timeout=None, proposed_lease_id=None, request_id=None, *, cls=None, **kwargs): - """[Update] The Lease File operation establishes and manages a lock on a - file for write and delete operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The File service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "lease" - action = "change" - - # Construct URL - url = self.change_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - change_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def break_lease(self, timeout=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs): - """[Update] The Lease File operation establishes and manages a lock on a - file for write and delete operations. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "lease" - action = "break" - - # Construct URL - url = self.break_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - break_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def upload_range(self, range, content_length, file_range_write="update", optionalbody=None, timeout=None, content_md5=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Upload a range of bytes to a file. - - :param range: Specifies the range of bytes to be written. Both the - start and end of the range must be specified. For an update operation, - the range can be up to 4 MB in size. For a clear operation, the range - can be up to the value of the file's full size. The File service - accepts only a single byte range for the Range and 'x-ms-range' - headers, and the byte range must be specified in the following format: - bytes=startByte-endByte. - :type range: str - :param file_range_write: Specify one of the following options: - - Update: Writes the bytes specified by the request body into the - specified range. The Range and Content-Length headers must match to - perform the update. - Clear: Clears the specified range and releases - the space used in storage for that range. To clear a range, set the - Content-Length header to zero, and set the Range header to a value - that indicates the range to clear, up to maximum file size. Possible - values include: 'update', 'clear' - :type file_range_write: str or - ~azure.storage.fileshare.models.FileRangeWriteType - :param content_length: Specifies the number of bytes being transmitted - in the request body. When the x-ms-write header is set to clear, the - value of this header must be set to zero. - :type content_length: long - :param optionalbody: Initial data. - :type optionalbody: Generator - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param content_md5: An MD5 hash of the content. This hash is used to - verify the integrity of the data during transport. When the - Content-MD5 header is specified, the File service compares the hash of - the content that has arrived with the header value that was sent. If - the two hashes do not match, the operation will fail with error code - 400 (Bad Request). - :type content_md5: bytearray - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "range" - - # Construct URL - url = self.upload_range.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-write'] = self._serialize.header("file_range_write", file_range_write, 'FileRangeWriteType') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("content_md5", content_md5, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=optionalbody) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload_range.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def upload_range_from_url(self, range, copy_source, content_length, timeout=None, source_range=None, source_content_crc64=None, source_modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Upload a range of bytes to a file where the contents are read from a - URL. - - :param range: Writes data to the specified byte range in the file. - :type range: str - :param copy_source: Specifies the URL of the source file or blob, up - to 2 KB in length. To copy a file to another file within the same - storage account, you may use Shared Key to authenticate the source - file. If you are copying a file from another storage account, or if - you are copying a blob from the same storage account or another - storage account, then you must authenticate the source file or blob - using a shared access signature. If the source is a public blob, no - authentication is required to perform the copy operation. A file in a - share snapshot can also be specified as a copy source. - :type copy_source: str - :param content_length: Specifies the number of bytes being transmitted - in the request body. When the x-ms-write header is set to clear, the - value of this header must be set to zero. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_crc64: Specify the crc64 calculated for the - range of bytes that must be read from the copy source. - :type source_content_crc64: bytearray - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.fileshare.models.SourceModifiedAccessConditions - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - source_if_match_crc64 = None - if source_modified_access_conditions is not None: - source_if_match_crc64 = source_modified_access_conditions.source_if_match_crc64 - source_if_none_match_crc64 = None - if source_modified_access_conditions is not None: - source_if_none_match_crc64 = source_modified_access_conditions.source_if_none_match_crc64 - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "range" - - # Construct URL - url = self.upload_range_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - header_parameters['x-ms-write'] = self._serialize.header("self._config.file_range_write_from_url", self._config.file_range_write_from_url, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if source_content_crc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_content_crc64", source_content_crc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if source_if_match_crc64 is not None: - header_parameters['x-ms-source-if-match-crc64'] = self._serialize.header("source_if_match_crc64", source_if_match_crc64, 'bytearray') - if source_if_none_match_crc64 is not None: - header_parameters['x-ms-source-if-none-match-crc64'] = self._serialize.header("source_if_none_match_crc64", source_if_none_match_crc64, 'bytearray') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload_range_from_url.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def get_range_list(self, sharesnapshot=None, timeout=None, range=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Returns the list of valid ranges for a file. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param range: Specifies the range of bytes over which to list ranges, - inclusively. - :type range: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: list or the result of cls(response) - :rtype: list[~azure.storage.fileshare.models.Range] - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "rangelist" - - # Construct URL - url = self.get_range_list.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[Range]', response) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-content-length': self._deserialize('long', response.headers.get('x-ms-content-length')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_range_list.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def start_copy(self, copy_source, timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, copy_file_smb_info=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Copies a blob or file to a destination file within the storage account. - - :param copy_source: Specifies the URL of the source file or blob, up - to 2 KB in length. To copy a file to another file within the same - storage account, you may use Shared Key to authenticate the source - file. If you are copying a file from another storage account, or if - you are copying a blob from the same storage account or another - storage account, then you must authenticate the source file or blob - using a shared access signature. If the source is a public blob, no - authentication is required to perform the copy operation. A file in a - share snapshot can also be specified as a copy source. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param copy_file_smb_info: Additional parameters for the operation - :type copy_file_smb_info: - ~azure.storage.fileshare.models.CopyFileSmbInfo - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - file_permission_copy_mode = None - if copy_file_smb_info is not None: - file_permission_copy_mode = copy_file_smb_info.file_permission_copy_mode - ignore_read_only = None - if copy_file_smb_info is not None: - ignore_read_only = copy_file_smb_info.ignore_read_only - file_attributes = None - if copy_file_smb_info is not None: - file_attributes = copy_file_smb_info.file_attributes - file_creation_time = None - if copy_file_smb_info is not None: - file_creation_time = copy_file_smb_info.file_creation_time - file_last_write_time = None - if copy_file_smb_info is not None: - file_last_write_time = copy_file_smb_info.file_last_write_time - set_archive_attribute = None - if copy_file_smb_info is not None: - set_archive_attribute = copy_file_smb_info.set_archive_attribute - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.start_copy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - if file_permission_copy_mode is not None: - header_parameters['x-ms-file-permission-copy-mode'] = self._serialize.header("file_permission_copy_mode", file_permission_copy_mode, 'PermissionCopyModeType') - if ignore_read_only is not None: - header_parameters['x-ms-file-copy-ignore-read-only'] = self._serialize.header("ignore_read_only", ignore_read_only, 'bool') - if file_attributes is not None: - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - if file_creation_time is not None: - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - if file_last_write_time is not None: - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - if set_archive_attribute is not None: - header_parameters['x-ms-file-copy-set-archive'] = self._serialize.header("set_archive_attribute", set_archive_attribute, 'bool') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - start_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def abort_copy(self, copy_id, timeout=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Aborts a pending Copy File operation, and leaves a destination file - with zero length and full metadata. - - :param copy_id: The copy identifier provided in the x-ms-copy-id - header of the original Copy File operation. - :type copy_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "copy" - - # Construct URL - url = self.abort_copy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-copy-action'] = self._serialize.header("self.x_ms_copy_action", self.x_ms_copy_action, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - abort_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def list_handles(self, marker=None, maxresults=None, timeout=None, sharesnapshot=None, *, cls=None, **kwargs): - """Lists handles for file. - - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. - If the request does not specify maxresults, or specifies a value - greater than 5,000, the server will return up to 5,000 items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListHandlesResponse or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListHandlesResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "listhandles" - - # Construct URL - url = self.list_handles.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListHandlesResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def force_close_handles(self, handle_id, timeout=None, marker=None, sharesnapshot=None, *, cls=None, **kwargs): - """Closes all handles open for given file. - - :param handle_id: Specifies handle ID opened on the file or directory - to be closed. Asterix (‘*’) is a wildcard that specifies all handles. - :type handle_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "forceclosehandles" - - # Construct URL - url = self.force_close_handles.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-marker': self._deserialize('str', response.headers.get('x-ms-marker')), - 'x-ms-number-of-handles-closed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')), - 'x-ms-number-of-handles-failed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - force_close_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/_service_operations_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/_service_operations_async.py deleted file mode 100644 index c4e40f1..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/_service_operations_async.py +++ /dev/null @@ -1,253 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class ServiceOperations: - """ServiceOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar restype: . Constant value: "service". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.restype = "service" - - async def set_properties(self, storage_service_properties, timeout=None, *, cls=None, **kwargs): - """Sets properties for a storage account's File service endpoint, - including properties for Storage Analytics metrics and CORS - (Cross-Origin Resource Sharing) rules. - - :param storage_service_properties: The StorageService properties. - :type storage_service_properties: - ~azure.storage.fileshare.models.StorageServiceProperties - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "properties" - - # Construct URL - url = self.set_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct body - body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_properties.metadata = {'url': '/'} - - async def get_properties(self, timeout=None, *, cls=None, **kwargs): - """Gets the properties of a storage account's File service, including - properties for Storage Analytics metrics and CORS (Cross-Origin - Resource Sharing) rules. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: StorageServiceProperties or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.StorageServiceProperties - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "properties" - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('StorageServiceProperties', response) - header_dict = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_properties.metadata = {'url': '/'} - - async def list_shares_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, *, cls=None, **kwargs): - """The List Shares Segment operation returns a list of the shares and - share snapshots under the specified account. - - :param prefix: Filters the results to return only entries whose name - begins with the specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. - If the request does not specify maxresults, or specifies a value - greater than 5,000, the server will return up to 5,000 items. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets - to include in the response. - :type include: list[str or - ~azure.storage.fileshare.models.ListSharesIncludeType] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListSharesResponse or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListSharesResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "list" - - # Construct URL - url = self.list_shares_segment.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[ListSharesIncludeType]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListSharesResponse', response) - header_dict = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_shares_segment.metadata = {'url': '/'} diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/_share_operations_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/_share_operations_async.py deleted file mode 100644 index 3005625..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/aio/operations_async/_share_operations_async.py +++ /dev/null @@ -1,825 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class ShareOperations: - """ShareOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar restype: . Constant value: "share". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.restype = "share" - - async def create(self, timeout=None, metadata=None, quota=None, *, cls=None, **kwargs): - """Creates a new share under the specified account. If the share with the - same name already exists, the operation fails. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param quota: Specifies the maximum size of the share, in gigabytes. - :type quota: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if quota is not None: - header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{shareName}'} - - async def get_properties(self, sharesnapshot=None, timeout=None, *, cls=None, **kwargs): - """Returns all user-defined metadata and system properties for the - specified share or share snapshot. The data returned does not include - the share's list of files. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-share-quota': self._deserialize('int', response.headers.get('x-ms-share-quota')), - 'x-ms-share-provisioned-iops': self._deserialize('int', response.headers.get('x-ms-share-provisioned-iops')), - 'x-ms-share-provisioned-ingress-mbps': self._deserialize('int', response.headers.get('x-ms-share-provisioned-ingress-mbps')), - 'x-ms-share-provisioned-egress-mbps': self._deserialize('int', response.headers.get('x-ms-share-provisioned-egress-mbps')), - 'x-ms-share-next-allowed-quota-downgrade-time': self._deserialize('rfc-1123', response.headers.get('x-ms-share-next-allowed-quota-downgrade-time')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{shareName}'} - - async def delete(self, sharesnapshot=None, timeout=None, delete_snapshots=None, *, cls=None, **kwargs): - """Operation marks the specified share or share snapshot for deletion. The - share or share snapshot and any files contained within it are later - deleted during garbage collection. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param delete_snapshots: Specifies the option include to delete the - base share and all of its snapshots. Possible values include: - 'include' - :type delete_snapshots: str or - ~azure.storage.fileshare.models.DeleteSnapshotsOptionType - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{shareName}'} - - async def create_snapshot(self, timeout=None, metadata=None, *, cls=None, **kwargs): - """Creates a read-only snapshot of a share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "snapshot" - - # Construct URL - url = self.create_snapshot.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-snapshot': self._deserialize('str', response.headers.get('x-ms-snapshot')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create_snapshot.metadata = {'url': '/{shareName}'} - - async def create_permission(self, share_permission, timeout=None, *, cls=None, **kwargs): - """Create a permission (a security descriptor). - - :param share_permission: A permission (a security descriptor) at the - share level. - :type share_permission: - ~azure.storage.fileshare.models.SharePermission - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "filepermission" - - # Construct URL - url = self.create_permission.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct body - body_content = self._serialize.body(share_permission, 'SharePermission', is_xml=False) - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create_permission.metadata = {'url': '/{shareName}'} - - async def get_permission(self, file_permission_key, timeout=None, *, cls=None, **kwargs): - """Returns the permission (security descriptor) for a given key. - - :param file_permission_key: Key of the permission to be set for the - directory/file. - :type file_permission_key: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: SharePermission or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.SharePermission - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "filepermission" - - # Construct URL - url = self.get_permission.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('SharePermission', response) - header_dict = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_permission.metadata = {'url': '/{shareName}'} - - async def set_quota(self, timeout=None, quota=None, *, cls=None, **kwargs): - """Sets quota for the specified share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param quota: Specifies the maximum size of the share, in gigabytes. - :type quota: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "properties" - - # Construct URL - url = self.set_quota.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if quota is not None: - header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_quota.metadata = {'url': '/{shareName}'} - - async def set_metadata(self, timeout=None, metadata=None, *, cls=None, **kwargs): - """Sets one or more user-defined name-value pairs for the specified share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{shareName}'} - - async def get_access_policy(self, timeout=None, *, cls=None, **kwargs): - """Returns information about stored access policies specified on the - share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: list or the result of cls(response) - :rtype: list[~azure.storage.fileshare.models.SignedIdentifier] - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "acl" - - # Construct URL - url = self.get_access_policy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[SignedIdentifier]', response) - header_dict = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_access_policy.metadata = {'url': '/{shareName}'} - - async def set_access_policy(self, share_acl=None, timeout=None, *, cls=None, **kwargs): - """Sets a stored access policy for use with shared access signatures. - - :param share_acl: The ACL for the share. - :type share_acl: - list[~azure.storage.fileshare.models.SignedIdentifier] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "acl" - - # Construct URL - url = self.set_access_policy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct body - serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifier', 'wrapped': True}} - if share_acl is not None: - body_content = self._serialize.body(share_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt) - else: - body_content = None - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_access_policy.metadata = {'url': '/{shareName}'} - - async def get_statistics(self, timeout=None, *, cls=None, **kwargs): - """Retrieves statistics related to the share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: ShareStats or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ShareStats - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "stats" - - # Construct URL - url = self.get_statistics.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ShareStats', response) - header_dict = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_statistics.metadata = {'url': '/{shareName}'} - - async def restore(self, timeout=None, request_id=None, deleted_share_name=None, deleted_share_version=None, *, cls=None, **kwargs): - """Restores a previously deleted Share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param deleted_share_name: Specifies the name of the - preivously-deleted share. - :type deleted_share_name: str - :param deleted_share_version: Specifies the version of the - preivously-deleted share. - :type deleted_share_version: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "undelete" - - # Construct URL - url = self.restore.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if deleted_share_name is not None: - header_parameters['x-ms-deleted-share-name'] = self._serialize.header("deleted_share_name", deleted_share_name, 'str') - if deleted_share_version is not None: - header_parameters['x-ms-deleted-share-version'] = self._serialize.header("deleted_share_version", deleted_share_version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - restore.metadata = {'url': '/{shareName}'} diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/models/__init__.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/models/__init__.py deleted file mode 100644 index 44ec5d1..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/models/__init__.py +++ /dev/null @@ -1,108 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -try: - from ._models_py3 import AccessPolicy - from ._models_py3 import CopyFileSmbInfo - from ._models_py3 import CorsRule - from ._models_py3 import DirectoryItem - from ._models_py3 import FileHTTPHeaders - from ._models_py3 import FileItem - from ._models_py3 import FileProperty - from ._models_py3 import FilesAndDirectoriesListSegment - from ._models_py3 import HandleItem - from ._models_py3 import LeaseAccessConditions - from ._models_py3 import ListFilesAndDirectoriesSegmentResponse - from ._models_py3 import ListHandlesResponse - from ._models_py3 import ListSharesResponse - from ._models_py3 import Metrics - from ._models_py3 import Range - from ._models_py3 import RetentionPolicy - from ._models_py3 import ShareItem - from ._models_py3 import SharePermission - from ._models_py3 import ShareProperties - from ._models_py3 import ShareStats - from ._models_py3 import SignedIdentifier - from ._models_py3 import SourceModifiedAccessConditions - from ._models_py3 import StorageError, StorageErrorException - from ._models_py3 import StorageServiceProperties -except (SyntaxError, ImportError): - from ._models import AccessPolicy - from ._models import CopyFileSmbInfo - from ._models import CorsRule - from ._models import DirectoryItem - from ._models import FileHTTPHeaders - from ._models import FileItem - from ._models import FileProperty - from ._models import FilesAndDirectoriesListSegment - from ._models import HandleItem - from ._models import LeaseAccessConditions - from ._models import ListFilesAndDirectoriesSegmentResponse - from ._models import ListHandlesResponse - from ._models import ListSharesResponse - from ._models import Metrics - from ._models import Range - from ._models import RetentionPolicy - from ._models import ShareItem - from ._models import SharePermission - from ._models import ShareProperties - from ._models import ShareStats - from ._models import SignedIdentifier - from ._models import SourceModifiedAccessConditions - from ._models import StorageError, StorageErrorException - from ._models import StorageServiceProperties -from ._azure_file_storage_enums import ( - CopyStatusType, - DeleteSnapshotsOptionType, - FileRangeWriteType, - LeaseDurationType, - LeaseStateType, - LeaseStatusType, - ListSharesIncludeType, - PermissionCopyModeType, - StorageErrorCode, -) - -__all__ = [ - 'AccessPolicy', - 'CopyFileSmbInfo', - 'CorsRule', - 'DirectoryItem', - 'FileHTTPHeaders', - 'FileItem', - 'FileProperty', - 'FilesAndDirectoriesListSegment', - 'HandleItem', - 'LeaseAccessConditions', - 'ListFilesAndDirectoriesSegmentResponse', - 'ListHandlesResponse', - 'ListSharesResponse', - 'Metrics', - 'Range', - 'RetentionPolicy', - 'ShareItem', - 'SharePermission', - 'ShareProperties', - 'ShareStats', - 'SignedIdentifier', - 'SourceModifiedAccessConditions', - 'StorageError', 'StorageErrorException', - 'StorageServiceProperties', - 'StorageErrorCode', - 'PermissionCopyModeType', - 'DeleteSnapshotsOptionType', - 'ListSharesIncludeType', - 'CopyStatusType', - 'LeaseDurationType', - 'LeaseStateType', - 'LeaseStatusType', - 'FileRangeWriteType', -] diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/models/_azure_file_storage_enums.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/models/_azure_file_storage_enums.py deleted file mode 100644 index 66f39fb..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/models/_azure_file_storage_enums.py +++ /dev/null @@ -1,135 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from enum import Enum - - -class StorageErrorCode(str, Enum): - - account_already_exists = "AccountAlreadyExists" - account_being_created = "AccountBeingCreated" - account_is_disabled = "AccountIsDisabled" - authentication_failed = "AuthenticationFailed" - authorization_failure = "AuthorizationFailure" - condition_headers_not_supported = "ConditionHeadersNotSupported" - condition_not_met = "ConditionNotMet" - empty_metadata_key = "EmptyMetadataKey" - insufficient_account_permissions = "InsufficientAccountPermissions" - internal_error = "InternalError" - invalid_authentication_info = "InvalidAuthenticationInfo" - invalid_header_value = "InvalidHeaderValue" - invalid_http_verb = "InvalidHttpVerb" - invalid_input = "InvalidInput" - invalid_md5 = "InvalidMd5" - invalid_metadata = "InvalidMetadata" - invalid_query_parameter_value = "InvalidQueryParameterValue" - invalid_range = "InvalidRange" - invalid_resource_name = "InvalidResourceName" - invalid_uri = "InvalidUri" - invalid_xml_document = "InvalidXmlDocument" - invalid_xml_node_value = "InvalidXmlNodeValue" - md5_mismatch = "Md5Mismatch" - metadata_too_large = "MetadataTooLarge" - missing_content_length_header = "MissingContentLengthHeader" - missing_required_query_parameter = "MissingRequiredQueryParameter" - missing_required_header = "MissingRequiredHeader" - missing_required_xml_node = "MissingRequiredXmlNode" - multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" - operation_timed_out = "OperationTimedOut" - out_of_range_input = "OutOfRangeInput" - out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" - request_body_too_large = "RequestBodyTooLarge" - resource_type_mismatch = "ResourceTypeMismatch" - request_url_failed_to_parse = "RequestUrlFailedToParse" - resource_already_exists = "ResourceAlreadyExists" - resource_not_found = "ResourceNotFound" - server_busy = "ServerBusy" - unsupported_header = "UnsupportedHeader" - unsupported_xml_node = "UnsupportedXmlNode" - unsupported_query_parameter = "UnsupportedQueryParameter" - unsupported_http_verb = "UnsupportedHttpVerb" - cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" - client_cache_flush_delay = "ClientCacheFlushDelay" - delete_pending = "DeletePending" - directory_not_empty = "DirectoryNotEmpty" - file_lock_conflict = "FileLockConflict" - invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" - parent_not_found = "ParentNotFound" - read_only_attribute = "ReadOnlyAttribute" - share_already_exists = "ShareAlreadyExists" - share_being_deleted = "ShareBeingDeleted" - share_disabled = "ShareDisabled" - share_not_found = "ShareNotFound" - sharing_violation = "SharingViolation" - share_snapshot_in_progress = "ShareSnapshotInProgress" - share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" - share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" - share_has_snapshots = "ShareHasSnapshots" - container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" - authorization_source_ip_mismatch = "AuthorizationSourceIPMismatch" - authorization_protocol_mismatch = "AuthorizationProtocolMismatch" - authorization_permission_mismatch = "AuthorizationPermissionMismatch" - authorization_service_mismatch = "AuthorizationServiceMismatch" - authorization_resource_type_mismatch = "AuthorizationResourceTypeMismatch" - feature_version_mismatch = "FeatureVersionMismatch" - - -class PermissionCopyModeType(str, Enum): - - source = "source" - override = "override" - - -class DeleteSnapshotsOptionType(str, Enum): - - include = "include" - - -class ListSharesIncludeType(str, Enum): - - snapshots = "snapshots" - metadata = "metadata" - deleted = "deleted" - - -class CopyStatusType(str, Enum): - - pending = "pending" - success = "success" - aborted = "aborted" - failed = "failed" - - -class LeaseDurationType(str, Enum): - - infinite = "infinite" - fixed = "fixed" - - -class LeaseStateType(str, Enum): - - available = "available" - leased = "leased" - expired = "expired" - breaking = "breaking" - broken = "broken" - - -class LeaseStatusType(str, Enum): - - locked = "locked" - unlocked = "unlocked" - - -class FileRangeWriteType(str, Enum): - - update = "update" - clear = "clear" diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/models/_models.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/models/_models.py deleted file mode 100644 index f5cc1fa..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/models/_models.py +++ /dev/null @@ -1,896 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model -from azure.core.exceptions import HttpResponseError - - -class AccessPolicy(Model): - """An Access policy. - - :param start: The date-time the policy is active. - :type start: str - :param expiry: The date-time the policy expires. - :type expiry: str - :param permission: The permissions for the ACL policy. - :type permission: str - """ - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, - 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, - 'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(AccessPolicy, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.expiry = kwargs.get('expiry', None) - self.permission = kwargs.get('permission', None) - - -class CopyFileSmbInfo(Model): - """Additional parameters for start_copy operation. - - :param file_permission_copy_mode: Specifies the option to copy file - security descriptor from source file or to set it using the value which is - defined by the header value of x-ms-file-permission or - x-ms-file-permission-key. Possible values include: 'source', 'override' - :type file_permission_copy_mode: str or - ~azure.storage.fileshare.models.PermissionCopyModeType - :param ignore_read_only: Specifies the option to overwrite the target file - if it already exists and has read-only attribute set. - :type ignore_read_only: bool - :param file_attributes: Specifies either the option to copy file - attributes from a source file(source) to a target file or a list of - attributes to set on a target file. - :type file_attributes: str - :param file_creation_time: Specifies either the option to copy file - creation time from a source file(source) to a target file or a time value - in ISO 8601 format to set as creation time on a target file. - :type file_creation_time: str - :param file_last_write_time: Specifies either the option to copy file last - write time from a source file(source) to a target file or a time value in - ISO 8601 format to set as last write time on a target file. - :type file_last_write_time: str - :param set_archive_attribute: Specifies the option to set archive - attribute on a target file. True means archive attribute will be set on a - target file despite attribute overrides or a source file state. - :type set_archive_attribute: bool - """ - - _attribute_map = { - 'file_permission_copy_mode': {'key': '', 'type': 'PermissionCopyModeType', 'xml': {'name': 'file_permission_copy_mode'}}, - 'ignore_read_only': {'key': '', 'type': 'bool', 'xml': {'name': 'ignore_read_only'}}, - 'file_attributes': {'key': '', 'type': 'str', 'xml': {'name': 'file_attributes'}}, - 'file_creation_time': {'key': '', 'type': 'str', 'xml': {'name': 'file_creation_time'}}, - 'file_last_write_time': {'key': '', 'type': 'str', 'xml': {'name': 'file_last_write_time'}}, - 'set_archive_attribute': {'key': '', 'type': 'bool', 'xml': {'name': 'set_archive_attribute'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(CopyFileSmbInfo, self).__init__(**kwargs) - self.file_permission_copy_mode = kwargs.get('file_permission_copy_mode', None) - self.ignore_read_only = kwargs.get('ignore_read_only', None) - self.file_attributes = kwargs.get('file_attributes', None) - self.file_creation_time = kwargs.get('file_creation_time', None) - self.file_last_write_time = kwargs.get('file_last_write_time', None) - self.set_archive_attribute = kwargs.get('set_archive_attribute', None) - - -class CorsRule(Model): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param allowed_origins: Required. The origin domains that are permitted to - make a request against the storage service via CORS. The origin domain is - the domain from which the request originates. Note that the origin must be - an exact case-sensitive match with the origin that the user age sends to - the service. You can also use the wildcard character '*' to allow all - origin domains to make requests via CORS. - :type allowed_origins: str - :param allowed_methods: Required. The methods (HTTP request verbs) that - the origin domain may use for a CORS request. (comma separated) - :type allowed_methods: str - :param allowed_headers: Required. The request headers that the origin - domain may specify on the CORS request. - :type allowed_headers: str - :param exposed_headers: Required. The response headers that may be sent in - the response to the CORS request and exposed by the browser to the request - issuer. - :type exposed_headers: str - :param max_age_in_seconds: Required. The maximum amount time that a - browser should cache the preflight OPTIONS request. - :type max_age_in_seconds: int - """ - - _validation = { - 'allowed_origins': {'required': True}, - 'allowed_methods': {'required': True}, - 'allowed_headers': {'required': True}, - 'exposed_headers': {'required': True}, - 'max_age_in_seconds': {'required': True, 'minimum': 0}, - } - - _attribute_map = { - 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}}, - 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}}, - 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}}, - 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}}, - 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(CorsRule, self).__init__(**kwargs) - self.allowed_origins = kwargs.get('allowed_origins', None) - self.allowed_methods = kwargs.get('allowed_methods', None) - self.allowed_headers = kwargs.get('allowed_headers', None) - self.exposed_headers = kwargs.get('exposed_headers', None) - self.max_age_in_seconds = kwargs.get('max_age_in_seconds', None) - - -class DirectoryItem(Model): - """A listed directory item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - } - _xml_map = { - 'name': 'Directory' - } - - def __init__(self, **kwargs): - super(DirectoryItem, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - - -class FileHTTPHeaders(Model): - """Additional parameters for a set of operations, such as: File_create, - File_set_http_headers. - - :param file_content_type: Sets the MIME content type of the file. The - default type is 'application/octet-stream'. - :type file_content_type: str - :param file_content_encoding: Specifies which content encodings have been - applied to the file. - :type file_content_encoding: str - :param file_content_language: Specifies the natural languages used by this - resource. - :type file_content_language: str - :param file_cache_control: Sets the file's cache control. The File service - stores this value but does not use or modify it. - :type file_cache_control: str - :param file_content_md5: Sets the file's MD5 hash. - :type file_content_md5: bytearray - :param file_content_disposition: Sets the file's Content-Disposition - header. - :type file_content_disposition: str - """ - - _attribute_map = { - 'file_content_type': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_type'}}, - 'file_content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_encoding'}}, - 'file_content_language': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_language'}}, - 'file_cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'file_cache_control'}}, - 'file_content_md5': {'key': '', 'type': 'bytearray', 'xml': {'name': 'file_content_md5'}}, - 'file_content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_disposition'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(FileHTTPHeaders, self).__init__(**kwargs) - self.file_content_type = kwargs.get('file_content_type', None) - self.file_content_encoding = kwargs.get('file_content_encoding', None) - self.file_content_language = kwargs.get('file_content_language', None) - self.file_cache_control = kwargs.get('file_cache_control', None) - self.file_content_md5 = kwargs.get('file_content_md5', None) - self.file_content_disposition = kwargs.get('file_content_disposition', None) - - -class FileItem(Model): - """A listed file item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param properties: Required. - :type properties: ~azure.storage.fileshare.models.FileProperty - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'properties': {'key': 'Properties', 'type': 'FileProperty', 'xml': {'name': 'Properties'}}, - } - _xml_map = { - 'name': 'File' - } - - def __init__(self, **kwargs): - super(FileItem, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.properties = kwargs.get('properties', None) - - -class FileProperty(Model): - """File properties. - - All required parameters must be populated in order to send to Azure. - - :param content_length: Required. Content length of the file. This value - may not be up-to-date since an SMB client may have modified the file - locally. The value of Content-Length may not reflect that fact until the - handle is closed or the op-lock is broken. To retrieve current property - values, call Get File Properties. - :type content_length: long - """ - - _validation = { - 'content_length': {'required': True}, - } - - _attribute_map = { - 'content_length': {'key': 'Content-Length', 'type': 'long', 'xml': {'name': 'Content-Length'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(FileProperty, self).__init__(**kwargs) - self.content_length = kwargs.get('content_length', None) - - -class FilesAndDirectoriesListSegment(Model): - """Abstract for entries that can be listed from Directory. - - All required parameters must be populated in order to send to Azure. - - :param directory_items: Required. - :type directory_items: list[~azure.storage.fileshare.models.DirectoryItem] - :param file_items: Required. - :type file_items: list[~azure.storage.fileshare.models.FileItem] - """ - - _validation = { - 'directory_items': {'required': True}, - 'file_items': {'required': True}, - } - - _attribute_map = { - 'directory_items': {'key': 'DirectoryItems', 'type': '[DirectoryItem]', 'xml': {'name': 'DirectoryItems', 'itemsName': 'Directory'}}, - 'file_items': {'key': 'FileItems', 'type': '[FileItem]', 'xml': {'name': 'FileItems', 'itemsName': 'File'}}, - } - _xml_map = { - 'name': 'Entries' - } - - def __init__(self, **kwargs): - super(FilesAndDirectoriesListSegment, self).__init__(**kwargs) - self.directory_items = kwargs.get('directory_items', None) - self.file_items = kwargs.get('file_items', None) - - -class HandleItem(Model): - """A listed Azure Storage handle item. - - All required parameters must be populated in order to send to Azure. - - :param handle_id: Required. XSMB service handle ID - :type handle_id: str - :param path: Required. File or directory name including full path starting - from share root - :type path: str - :param file_id: Required. FileId uniquely identifies the file or - directory. - :type file_id: str - :param parent_id: ParentId uniquely identifies the parent directory of the - object. - :type parent_id: str - :param session_id: Required. SMB session ID in context of which the file - handle was opened - :type session_id: str - :param client_ip: Required. Client IP that opened the handle - :type client_ip: str - :param open_time: Required. Time when the session that previously opened - the handle has last been reconnected. (UTC) - :type open_time: datetime - :param last_reconnect_time: Time handle was last connected to (UTC) - :type last_reconnect_time: datetime - """ - - _validation = { - 'handle_id': {'required': True}, - 'path': {'required': True}, - 'file_id': {'required': True}, - 'session_id': {'required': True}, - 'client_ip': {'required': True}, - 'open_time': {'required': True}, - } - - _attribute_map = { - 'handle_id': {'key': 'HandleId', 'type': 'str', 'xml': {'name': 'HandleId'}}, - 'path': {'key': 'Path', 'type': 'str', 'xml': {'name': 'Path'}}, - 'file_id': {'key': 'FileId', 'type': 'str', 'xml': {'name': 'FileId'}}, - 'parent_id': {'key': 'ParentId', 'type': 'str', 'xml': {'name': 'ParentId'}}, - 'session_id': {'key': 'SessionId', 'type': 'str', 'xml': {'name': 'SessionId'}}, - 'client_ip': {'key': 'ClientIp', 'type': 'str', 'xml': {'name': 'ClientIp'}}, - 'open_time': {'key': 'OpenTime', 'type': 'rfc-1123', 'xml': {'name': 'OpenTime'}}, - 'last_reconnect_time': {'key': 'LastReconnectTime', 'type': 'rfc-1123', 'xml': {'name': 'LastReconnectTime'}}, - } - _xml_map = { - 'name': 'Handle' - } - - def __init__(self, **kwargs): - super(HandleItem, self).__init__(**kwargs) - self.handle_id = kwargs.get('handle_id', None) - self.path = kwargs.get('path', None) - self.file_id = kwargs.get('file_id', None) - self.parent_id = kwargs.get('parent_id', None) - self.session_id = kwargs.get('session_id', None) - self.client_ip = kwargs.get('client_ip', None) - self.open_time = kwargs.get('open_time', None) - self.last_reconnect_time = kwargs.get('last_reconnect_time', None) - - -class LeaseAccessConditions(Model): - """Additional parameters for a set of operations. - - :param lease_id: If specified, the operation only succeeds if the - resource's lease is active and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': '', 'type': 'str', 'xml': {'name': 'lease_id'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = kwargs.get('lease_id', None) - - -class ListFilesAndDirectoriesSegmentResponse(Model): - """An enumeration of directories and files. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param share_name: Required. - :type share_name: str - :param share_snapshot: - :type share_snapshot: str - :param directory_path: Required. - :type directory_path: str - :param prefix: Required. - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param segment: Required. - :type segment: - ~azure.storage.fileshare.models.FilesAndDirectoriesListSegment - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'share_name': {'required': True}, - 'directory_path': {'required': True}, - 'prefix': {'required': True}, - 'segment': {'required': True}, - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'share_name': {'key': 'ShareName', 'type': 'str', 'xml': {'name': 'ShareName', 'attr': True}}, - 'share_snapshot': {'key': 'ShareSnapshot', 'type': 'str', 'xml': {'name': 'ShareSnapshot', 'attr': True}}, - 'directory_path': {'key': 'DirectoryPath', 'type': 'str', 'xml': {'name': 'DirectoryPath', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'segment': {'key': 'Segment', 'type': 'FilesAndDirectoriesListSegment', 'xml': {'name': 'Segment'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, **kwargs): - super(ListFilesAndDirectoriesSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs.get('service_endpoint', None) - self.share_name = kwargs.get('share_name', None) - self.share_snapshot = kwargs.get('share_snapshot', None) - self.directory_path = kwargs.get('directory_path', None) - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.segment = kwargs.get('segment', None) - self.next_marker = kwargs.get('next_marker', None) - - -class ListHandlesResponse(Model): - """An enumeration of handles. - - All required parameters must be populated in order to send to Azure. - - :param handle_list: - :type handle_list: list[~azure.storage.fileshare.models.HandleItem] - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'handle_list': {'key': 'HandleList', 'type': '[HandleItem]', 'xml': {'name': 'Entries', 'itemsName': 'Entries', 'wrapped': True}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, **kwargs): - super(ListHandlesResponse, self).__init__(**kwargs) - self.handle_list = kwargs.get('handle_list', None) - self.next_marker = kwargs.get('next_marker', None) - - -class ListSharesResponse(Model): - """An enumeration of shares. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param share_items: - :type share_items: list[~azure.storage.fileshare.models.ShareItem] - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'share_items': {'key': 'ShareItems', 'type': '[ShareItem]', 'xml': {'name': 'Shares', 'itemsName': 'Shares', 'wrapped': True}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, **kwargs): - super(ListSharesResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs.get('service_endpoint', None) - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.share_items = kwargs.get('share_items', None) - self.next_marker = kwargs.get('next_marker', None) - - -class Metrics(Model): - """Storage Analytics metrics for file service. - - All required parameters must be populated in order to send to Azure. - - :param version: Required. The version of Storage Analytics to configure. - :type version: str - :param enabled: Required. Indicates whether metrics are enabled for the - File service. - :type enabled: bool - :param include_apis: Indicates whether metrics should generate summary - statistics for called API operations. - :type include_apis: bool - :param retention_policy: - :type retention_policy: ~azure.storage.fileshare.models.RetentionPolicy - """ - - _validation = { - 'version': {'required': True}, - 'enabled': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(Metrics, self).__init__(**kwargs) - self.version = kwargs.get('version', None) - self.enabled = kwargs.get('enabled', None) - self.include_apis = kwargs.get('include_apis', None) - self.retention_policy = kwargs.get('retention_policy', None) - - -class Range(Model): - """An Azure Storage file range. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. Start of the range. - :type start: long - :param end: Required. End of the range. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'Range' - } - - def __init__(self, **kwargs): - super(Range, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.end = kwargs.get('end', None) - - -class RetentionPolicy(Model): - """The retention policy. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether a retention policy is enabled - for the File service. If false, metrics data is retained, and the user is - responsible for deleting it. - :type enabled: bool - :param days: Indicates the number of days that metrics data should be - retained. All data older than this value will be deleted. Metrics data is - deleted on a best-effort basis after the retention period expires. - :type days: int - """ - - _validation = { - 'enabled': {'required': True}, - 'days': {'maximum': 365, 'minimum': 1}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(RetentionPolicy, self).__init__(**kwargs) - self.enabled = kwargs.get('enabled', None) - self.days = kwargs.get('days', None) - - -class ShareItem(Model): - """A listed Azure Storage share item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param snapshot: - :type snapshot: str - :param deleted: - :type deleted: bool - :param version: - :type version: str - :param properties: Required. - :type properties: ~azure.storage.fileshare.models.ShareProperties - :param metadata: - :type metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'snapshot': {'key': 'Snapshot', 'type': 'str', 'xml': {'name': 'Snapshot'}}, - 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}}, - 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, - 'properties': {'key': 'Properties', 'type': 'ShareProperties', 'xml': {'name': 'Properties'}}, - 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}}, - } - _xml_map = { - 'name': 'Share' - } - - def __init__(self, **kwargs): - super(ShareItem, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.snapshot = kwargs.get('snapshot', None) - self.deleted = kwargs.get('deleted', None) - self.version = kwargs.get('version', None) - self.properties = kwargs.get('properties', None) - self.metadata = kwargs.get('metadata', None) - - -class SharePermission(Model): - """A permission (a security descriptor) at the share level. - - All required parameters must be populated in order to send to Azure. - - :param permission: Required. The permission in the Security Descriptor - Definition Language (SDDL). - :type permission: str - """ - - _validation = { - 'permission': {'required': True}, - } - - _attribute_map = { - 'permission': {'key': 'permission', 'type': 'str', 'xml': {'name': 'permission'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(SharePermission, self).__init__(**kwargs) - self.permission = kwargs.get('permission', None) - - -class ShareProperties(Model): - """Properties of a share. - - All required parameters must be populated in order to send to Azure. - - :param last_modified: Required. - :type last_modified: datetime - :param etag: Required. - :type etag: str - :param quota: Required. - :type quota: int - :param provisioned_iops: - :type provisioned_iops: int - :param provisioned_ingress_mbps: - :type provisioned_ingress_mbps: int - :param provisioned_egress_mbps: - :type provisioned_egress_mbps: int - :param next_allowed_quota_downgrade_time: - :type next_allowed_quota_downgrade_time: datetime - :param deleted_time: - :type deleted_time: datetime - :param remaining_retention_days: - :type remaining_retention_days: int - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - 'quota': {'required': True}, - } - - _attribute_map = { - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}}, - 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}}, - 'quota': {'key': 'Quota', 'type': 'int', 'xml': {'name': 'Quota'}}, - 'provisioned_iops': {'key': 'ProvisionedIops', 'type': 'int', 'xml': {'name': 'ProvisionedIops'}}, - 'provisioned_ingress_mbps': {'key': 'ProvisionedIngressMBps', 'type': 'int', 'xml': {'name': 'ProvisionedIngressMBps'}}, - 'provisioned_egress_mbps': {'key': 'ProvisionedEgressMBps', 'type': 'int', 'xml': {'name': 'ProvisionedEgressMBps'}}, - 'next_allowed_quota_downgrade_time': {'key': 'NextAllowedQuotaDowngradeTime', 'type': 'rfc-1123', 'xml': {'name': 'NextAllowedQuotaDowngradeTime'}}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(ShareProperties, self).__init__(**kwargs) - self.last_modified = kwargs.get('last_modified', None) - self.etag = kwargs.get('etag', None) - self.quota = kwargs.get('quota', None) - self.provisioned_iops = kwargs.get('provisioned_iops', None) - self.provisioned_ingress_mbps = kwargs.get('provisioned_ingress_mbps', None) - self.provisioned_egress_mbps = kwargs.get('provisioned_egress_mbps', None) - self.next_allowed_quota_downgrade_time = kwargs.get('next_allowed_quota_downgrade_time', None) - self.deleted_time = kwargs.get('deleted_time', None) - self.remaining_retention_days = kwargs.get('remaining_retention_days', None) - - -class ShareStats(Model): - """Stats for the share. - - All required parameters must be populated in order to send to Azure. - - :param share_usage_bytes: Required. The approximate size of the data - stored in bytes. Note that this value may not include all recently created - or recently resized files. - :type share_usage_bytes: int - """ - - _validation = { - 'share_usage_bytes': {'required': True}, - } - - _attribute_map = { - 'share_usage_bytes': {'key': 'ShareUsageBytes', 'type': 'int', 'xml': {'name': 'ShareUsageBytes'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(ShareStats, self).__init__(**kwargs) - self.share_usage_bytes = kwargs.get('share_usage_bytes', None) - - -class SignedIdentifier(Model): - """Signed identifier. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. A unique id. - :type id: str - :param access_policy: The access policy. - :type access_policy: ~azure.storage.fileshare.models.AccessPolicy - """ - - _validation = { - 'id': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}}, - 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(SignedIdentifier, self).__init__(**kwargs) - self.id = kwargs.get('id', None) - self.access_policy = kwargs.get('access_policy', None) - - -class SourceModifiedAccessConditions(Model): - """Additional parameters for upload_range_from_url operation. - - :param source_if_match_crc64: Specify the crc64 value to operate only on - range with a matching crc64 checksum. - :type source_if_match_crc64: bytearray - :param source_if_none_match_crc64: Specify the crc64 value to operate only - on range without a matching crc64 checksum. - :type source_if_none_match_crc64: bytearray - """ - - _attribute_map = { - 'source_if_match_crc64': {'key': '', 'type': 'bytearray', 'xml': {'name': 'source_if_match_crc64'}}, - 'source_if_none_match_crc64': {'key': '', 'type': 'bytearray', 'xml': {'name': 'source_if_none_match_crc64'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_match_crc64 = kwargs.get('source_if_match_crc64', None) - self.source_if_none_match_crc64 = kwargs.get('source_if_none_match_crc64', None) - - -class StorageError(Model): - """StorageError. - - :param message: - :type message: str - """ - - _attribute_map = { - 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(StorageError, self).__init__(**kwargs) - self.message = kwargs.get('message', None) - - -class StorageErrorException(HttpResponseError): - """Server responsed with exception of type: 'StorageError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, response, deserialize, *args): - - model_name = 'StorageError' - self.error = deserialize(model_name, response) - if self.error is None: - self.error = deserialize.dependencies[model_name]() - super(StorageErrorException, self).__init__(response=response) - - -class StorageServiceProperties(Model): - """Storage service properties. - - :param hour_metrics: A summary of request statistics grouped by API in - hourly aggregates for files. - :type hour_metrics: ~azure.storage.fileshare.models.Metrics - :param minute_metrics: A summary of request statistics grouped by API in - minute aggregates for files. - :type minute_metrics: ~azure.storage.fileshare.models.Metrics - :param cors: The set of CORS rules. - :type cors: list[~azure.storage.fileshare.models.CorsRule] - """ - - _attribute_map = { - 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}}, - 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}}, - 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(StorageServiceProperties, self).__init__(**kwargs) - self.hour_metrics = kwargs.get('hour_metrics', None) - self.minute_metrics = kwargs.get('minute_metrics', None) - self.cors = kwargs.get('cors', None) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/models/_models_py3.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/models/_models_py3.py deleted file mode 100644 index 0be5dca..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/models/_models_py3.py +++ /dev/null @@ -1,896 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model -from azure.core.exceptions import HttpResponseError - - -class AccessPolicy(Model): - """An Access policy. - - :param start: The date-time the policy is active. - :type start: str - :param expiry: The date-time the policy expires. - :type expiry: str - :param permission: The permissions for the ACL policy. - :type permission: str - """ - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, - 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, - 'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}}, - } - _xml_map = { - } - - def __init__(self, *, start: str=None, expiry: str=None, permission: str=None, **kwargs) -> None: - super(AccessPolicy, self).__init__(**kwargs) - self.start = start - self.expiry = expiry - self.permission = permission - - -class CopyFileSmbInfo(Model): - """Additional parameters for start_copy operation. - - :param file_permission_copy_mode: Specifies the option to copy file - security descriptor from source file or to set it using the value which is - defined by the header value of x-ms-file-permission or - x-ms-file-permission-key. Possible values include: 'source', 'override' - :type file_permission_copy_mode: str or - ~azure.storage.fileshare.models.PermissionCopyModeType - :param ignore_read_only: Specifies the option to overwrite the target file - if it already exists and has read-only attribute set. - :type ignore_read_only: bool - :param file_attributes: Specifies either the option to copy file - attributes from a source file(source) to a target file or a list of - attributes to set on a target file. - :type file_attributes: str - :param file_creation_time: Specifies either the option to copy file - creation time from a source file(source) to a target file or a time value - in ISO 8601 format to set as creation time on a target file. - :type file_creation_time: str - :param file_last_write_time: Specifies either the option to copy file last - write time from a source file(source) to a target file or a time value in - ISO 8601 format to set as last write time on a target file. - :type file_last_write_time: str - :param set_archive_attribute: Specifies the option to set archive - attribute on a target file. True means archive attribute will be set on a - target file despite attribute overrides or a source file state. - :type set_archive_attribute: bool - """ - - _attribute_map = { - 'file_permission_copy_mode': {'key': '', 'type': 'PermissionCopyModeType', 'xml': {'name': 'file_permission_copy_mode'}}, - 'ignore_read_only': {'key': '', 'type': 'bool', 'xml': {'name': 'ignore_read_only'}}, - 'file_attributes': {'key': '', 'type': 'str', 'xml': {'name': 'file_attributes'}}, - 'file_creation_time': {'key': '', 'type': 'str', 'xml': {'name': 'file_creation_time'}}, - 'file_last_write_time': {'key': '', 'type': 'str', 'xml': {'name': 'file_last_write_time'}}, - 'set_archive_attribute': {'key': '', 'type': 'bool', 'xml': {'name': 'set_archive_attribute'}}, - } - _xml_map = { - } - - def __init__(self, *, file_permission_copy_mode=None, ignore_read_only: bool=None, file_attributes: str=None, file_creation_time: str=None, file_last_write_time: str=None, set_archive_attribute: bool=None, **kwargs) -> None: - super(CopyFileSmbInfo, self).__init__(**kwargs) - self.file_permission_copy_mode = file_permission_copy_mode - self.ignore_read_only = ignore_read_only - self.file_attributes = file_attributes - self.file_creation_time = file_creation_time - self.file_last_write_time = file_last_write_time - self.set_archive_attribute = set_archive_attribute - - -class CorsRule(Model): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param allowed_origins: Required. The origin domains that are permitted to - make a request against the storage service via CORS. The origin domain is - the domain from which the request originates. Note that the origin must be - an exact case-sensitive match with the origin that the user age sends to - the service. You can also use the wildcard character '*' to allow all - origin domains to make requests via CORS. - :type allowed_origins: str - :param allowed_methods: Required. The methods (HTTP request verbs) that - the origin domain may use for a CORS request. (comma separated) - :type allowed_methods: str - :param allowed_headers: Required. The request headers that the origin - domain may specify on the CORS request. - :type allowed_headers: str - :param exposed_headers: Required. The response headers that may be sent in - the response to the CORS request and exposed by the browser to the request - issuer. - :type exposed_headers: str - :param max_age_in_seconds: Required. The maximum amount time that a - browser should cache the preflight OPTIONS request. - :type max_age_in_seconds: int - """ - - _validation = { - 'allowed_origins': {'required': True}, - 'allowed_methods': {'required': True}, - 'allowed_headers': {'required': True}, - 'exposed_headers': {'required': True}, - 'max_age_in_seconds': {'required': True, 'minimum': 0}, - } - - _attribute_map = { - 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}}, - 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}}, - 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}}, - 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}}, - 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}}, - } - _xml_map = { - } - - def __init__(self, *, allowed_origins: str, allowed_methods: str, allowed_headers: str, exposed_headers: str, max_age_in_seconds: int, **kwargs) -> None: - super(CorsRule, self).__init__(**kwargs) - self.allowed_origins = allowed_origins - self.allowed_methods = allowed_methods - self.allowed_headers = allowed_headers - self.exposed_headers = exposed_headers - self.max_age_in_seconds = max_age_in_seconds - - -class DirectoryItem(Model): - """A listed directory item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - } - _xml_map = { - 'name': 'Directory' - } - - def __init__(self, *, name: str, **kwargs) -> None: - super(DirectoryItem, self).__init__(**kwargs) - self.name = name - - -class FileHTTPHeaders(Model): - """Additional parameters for a set of operations, such as: File_create, - File_set_http_headers. - - :param file_content_type: Sets the MIME content type of the file. The - default type is 'application/octet-stream'. - :type file_content_type: str - :param file_content_encoding: Specifies which content encodings have been - applied to the file. - :type file_content_encoding: str - :param file_content_language: Specifies the natural languages used by this - resource. - :type file_content_language: str - :param file_cache_control: Sets the file's cache control. The File service - stores this value but does not use or modify it. - :type file_cache_control: str - :param file_content_md5: Sets the file's MD5 hash. - :type file_content_md5: bytearray - :param file_content_disposition: Sets the file's Content-Disposition - header. - :type file_content_disposition: str - """ - - _attribute_map = { - 'file_content_type': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_type'}}, - 'file_content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_encoding'}}, - 'file_content_language': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_language'}}, - 'file_cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'file_cache_control'}}, - 'file_content_md5': {'key': '', 'type': 'bytearray', 'xml': {'name': 'file_content_md5'}}, - 'file_content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_disposition'}}, - } - _xml_map = { - } - - def __init__(self, *, file_content_type: str=None, file_content_encoding: str=None, file_content_language: str=None, file_cache_control: str=None, file_content_md5: bytearray=None, file_content_disposition: str=None, **kwargs) -> None: - super(FileHTTPHeaders, self).__init__(**kwargs) - self.file_content_type = file_content_type - self.file_content_encoding = file_content_encoding - self.file_content_language = file_content_language - self.file_cache_control = file_cache_control - self.file_content_md5 = file_content_md5 - self.file_content_disposition = file_content_disposition - - -class FileItem(Model): - """A listed file item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param properties: Required. - :type properties: ~azure.storage.fileshare.models.FileProperty - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'properties': {'key': 'Properties', 'type': 'FileProperty', 'xml': {'name': 'Properties'}}, - } - _xml_map = { - 'name': 'File' - } - - def __init__(self, *, name: str, properties, **kwargs) -> None: - super(FileItem, self).__init__(**kwargs) - self.name = name - self.properties = properties - - -class FileProperty(Model): - """File properties. - - All required parameters must be populated in order to send to Azure. - - :param content_length: Required. Content length of the file. This value - may not be up-to-date since an SMB client may have modified the file - locally. The value of Content-Length may not reflect that fact until the - handle is closed or the op-lock is broken. To retrieve current property - values, call Get File Properties. - :type content_length: long - """ - - _validation = { - 'content_length': {'required': True}, - } - - _attribute_map = { - 'content_length': {'key': 'Content-Length', 'type': 'long', 'xml': {'name': 'Content-Length'}}, - } - _xml_map = { - } - - def __init__(self, *, content_length: int, **kwargs) -> None: - super(FileProperty, self).__init__(**kwargs) - self.content_length = content_length - - -class FilesAndDirectoriesListSegment(Model): - """Abstract for entries that can be listed from Directory. - - All required parameters must be populated in order to send to Azure. - - :param directory_items: Required. - :type directory_items: list[~azure.storage.fileshare.models.DirectoryItem] - :param file_items: Required. - :type file_items: list[~azure.storage.fileshare.models.FileItem] - """ - - _validation = { - 'directory_items': {'required': True}, - 'file_items': {'required': True}, - } - - _attribute_map = { - 'directory_items': {'key': 'DirectoryItems', 'type': '[DirectoryItem]', 'xml': {'name': 'DirectoryItems', 'itemsName': 'Directory'}}, - 'file_items': {'key': 'FileItems', 'type': '[FileItem]', 'xml': {'name': 'FileItems', 'itemsName': 'File'}}, - } - _xml_map = { - 'name': 'Entries' - } - - def __init__(self, *, directory_items, file_items, **kwargs) -> None: - super(FilesAndDirectoriesListSegment, self).__init__(**kwargs) - self.directory_items = directory_items - self.file_items = file_items - - -class HandleItem(Model): - """A listed Azure Storage handle item. - - All required parameters must be populated in order to send to Azure. - - :param handle_id: Required. XSMB service handle ID - :type handle_id: str - :param path: Required. File or directory name including full path starting - from share root - :type path: str - :param file_id: Required. FileId uniquely identifies the file or - directory. - :type file_id: str - :param parent_id: ParentId uniquely identifies the parent directory of the - object. - :type parent_id: str - :param session_id: Required. SMB session ID in context of which the file - handle was opened - :type session_id: str - :param client_ip: Required. Client IP that opened the handle - :type client_ip: str - :param open_time: Required. Time when the session that previously opened - the handle has last been reconnected. (UTC) - :type open_time: datetime - :param last_reconnect_time: Time handle was last connected to (UTC) - :type last_reconnect_time: datetime - """ - - _validation = { - 'handle_id': {'required': True}, - 'path': {'required': True}, - 'file_id': {'required': True}, - 'session_id': {'required': True}, - 'client_ip': {'required': True}, - 'open_time': {'required': True}, - } - - _attribute_map = { - 'handle_id': {'key': 'HandleId', 'type': 'str', 'xml': {'name': 'HandleId'}}, - 'path': {'key': 'Path', 'type': 'str', 'xml': {'name': 'Path'}}, - 'file_id': {'key': 'FileId', 'type': 'str', 'xml': {'name': 'FileId'}}, - 'parent_id': {'key': 'ParentId', 'type': 'str', 'xml': {'name': 'ParentId'}}, - 'session_id': {'key': 'SessionId', 'type': 'str', 'xml': {'name': 'SessionId'}}, - 'client_ip': {'key': 'ClientIp', 'type': 'str', 'xml': {'name': 'ClientIp'}}, - 'open_time': {'key': 'OpenTime', 'type': 'rfc-1123', 'xml': {'name': 'OpenTime'}}, - 'last_reconnect_time': {'key': 'LastReconnectTime', 'type': 'rfc-1123', 'xml': {'name': 'LastReconnectTime'}}, - } - _xml_map = { - 'name': 'Handle' - } - - def __init__(self, *, handle_id: str, path: str, file_id: str, session_id: str, client_ip: str, open_time, parent_id: str=None, last_reconnect_time=None, **kwargs) -> None: - super(HandleItem, self).__init__(**kwargs) - self.handle_id = handle_id - self.path = path - self.file_id = file_id - self.parent_id = parent_id - self.session_id = session_id - self.client_ip = client_ip - self.open_time = open_time - self.last_reconnect_time = last_reconnect_time - - -class LeaseAccessConditions(Model): - """Additional parameters for a set of operations. - - :param lease_id: If specified, the operation only succeeds if the - resource's lease is active and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': '', 'type': 'str', 'xml': {'name': 'lease_id'}}, - } - _xml_map = { - } - - def __init__(self, *, lease_id: str=None, **kwargs) -> None: - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = lease_id - - -class ListFilesAndDirectoriesSegmentResponse(Model): - """An enumeration of directories and files. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param share_name: Required. - :type share_name: str - :param share_snapshot: - :type share_snapshot: str - :param directory_path: Required. - :type directory_path: str - :param prefix: Required. - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param segment: Required. - :type segment: - ~azure.storage.fileshare.models.FilesAndDirectoriesListSegment - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'share_name': {'required': True}, - 'directory_path': {'required': True}, - 'prefix': {'required': True}, - 'segment': {'required': True}, - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'share_name': {'key': 'ShareName', 'type': 'str', 'xml': {'name': 'ShareName', 'attr': True}}, - 'share_snapshot': {'key': 'ShareSnapshot', 'type': 'str', 'xml': {'name': 'ShareSnapshot', 'attr': True}}, - 'directory_path': {'key': 'DirectoryPath', 'type': 'str', 'xml': {'name': 'DirectoryPath', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'segment': {'key': 'Segment', 'type': 'FilesAndDirectoriesListSegment', 'xml': {'name': 'Segment'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, *, service_endpoint: str, share_name: str, directory_path: str, prefix: str, segment, next_marker: str, share_snapshot: str=None, marker: str=None, max_results: int=None, **kwargs) -> None: - super(ListFilesAndDirectoriesSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.share_name = share_name - self.share_snapshot = share_snapshot - self.directory_path = directory_path - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.segment = segment - self.next_marker = next_marker - - -class ListHandlesResponse(Model): - """An enumeration of handles. - - All required parameters must be populated in order to send to Azure. - - :param handle_list: - :type handle_list: list[~azure.storage.fileshare.models.HandleItem] - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'handle_list': {'key': 'HandleList', 'type': '[HandleItem]', 'xml': {'name': 'Entries', 'itemsName': 'Entries', 'wrapped': True}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, *, next_marker: str, handle_list=None, **kwargs) -> None: - super(ListHandlesResponse, self).__init__(**kwargs) - self.handle_list = handle_list - self.next_marker = next_marker - - -class ListSharesResponse(Model): - """An enumeration of shares. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param share_items: - :type share_items: list[~azure.storage.fileshare.models.ShareItem] - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'share_items': {'key': 'ShareItems', 'type': '[ShareItem]', 'xml': {'name': 'Shares', 'itemsName': 'Shares', 'wrapped': True}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, *, service_endpoint: str, next_marker: str, prefix: str=None, marker: str=None, max_results: int=None, share_items=None, **kwargs) -> None: - super(ListSharesResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.share_items = share_items - self.next_marker = next_marker - - -class Metrics(Model): - """Storage Analytics metrics for file service. - - All required parameters must be populated in order to send to Azure. - - :param version: Required. The version of Storage Analytics to configure. - :type version: str - :param enabled: Required. Indicates whether metrics are enabled for the - File service. - :type enabled: bool - :param include_apis: Indicates whether metrics should generate summary - statistics for called API operations. - :type include_apis: bool - :param retention_policy: - :type retention_policy: ~azure.storage.fileshare.models.RetentionPolicy - """ - - _validation = { - 'version': {'required': True}, - 'enabled': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, - } - _xml_map = { - } - - def __init__(self, *, version: str, enabled: bool, include_apis: bool=None, retention_policy=None, **kwargs) -> None: - super(Metrics, self).__init__(**kwargs) - self.version = version - self.enabled = enabled - self.include_apis = include_apis - self.retention_policy = retention_policy - - -class Range(Model): - """An Azure Storage file range. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. Start of the range. - :type start: long - :param end: Required. End of the range. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'Range' - } - - def __init__(self, *, start: int, end: int, **kwargs) -> None: - super(Range, self).__init__(**kwargs) - self.start = start - self.end = end - - -class RetentionPolicy(Model): - """The retention policy. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether a retention policy is enabled - for the File service. If false, metrics data is retained, and the user is - responsible for deleting it. - :type enabled: bool - :param days: Indicates the number of days that metrics data should be - retained. All data older than this value will be deleted. Metrics data is - deleted on a best-effort basis after the retention period expires. - :type days: int - """ - - _validation = { - 'enabled': {'required': True}, - 'days': {'maximum': 365, 'minimum': 1}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}}, - } - _xml_map = { - } - - def __init__(self, *, enabled: bool, days: int=None, **kwargs) -> None: - super(RetentionPolicy, self).__init__(**kwargs) - self.enabled = enabled - self.days = days - - -class ShareItem(Model): - """A listed Azure Storage share item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param snapshot: - :type snapshot: str - :param deleted: - :type deleted: bool - :param version: - :type version: str - :param properties: Required. - :type properties: ~azure.storage.fileshare.models.ShareProperties - :param metadata: - :type metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'snapshot': {'key': 'Snapshot', 'type': 'str', 'xml': {'name': 'Snapshot'}}, - 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}}, - 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, - 'properties': {'key': 'Properties', 'type': 'ShareProperties', 'xml': {'name': 'Properties'}}, - 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}}, - } - _xml_map = { - 'name': 'Share' - } - - def __init__(self, *, name: str, properties, snapshot: str=None, deleted: bool=None, version: str=None, metadata=None, **kwargs) -> None: - super(ShareItem, self).__init__(**kwargs) - self.name = name - self.snapshot = snapshot - self.deleted = deleted - self.version = version - self.properties = properties - self.metadata = metadata - - -class SharePermission(Model): - """A permission (a security descriptor) at the share level. - - All required parameters must be populated in order to send to Azure. - - :param permission: Required. The permission in the Security Descriptor - Definition Language (SDDL). - :type permission: str - """ - - _validation = { - 'permission': {'required': True}, - } - - _attribute_map = { - 'permission': {'key': 'permission', 'type': 'str', 'xml': {'name': 'permission'}}, - } - _xml_map = { - } - - def __init__(self, *, permission: str, **kwargs) -> None: - super(SharePermission, self).__init__(**kwargs) - self.permission = permission - - -class ShareProperties(Model): - """Properties of a share. - - All required parameters must be populated in order to send to Azure. - - :param last_modified: Required. - :type last_modified: datetime - :param etag: Required. - :type etag: str - :param quota: Required. - :type quota: int - :param provisioned_iops: - :type provisioned_iops: int - :param provisioned_ingress_mbps: - :type provisioned_ingress_mbps: int - :param provisioned_egress_mbps: - :type provisioned_egress_mbps: int - :param next_allowed_quota_downgrade_time: - :type next_allowed_quota_downgrade_time: datetime - :param deleted_time: - :type deleted_time: datetime - :param remaining_retention_days: - :type remaining_retention_days: int - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - 'quota': {'required': True}, - } - - _attribute_map = { - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}}, - 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}}, - 'quota': {'key': 'Quota', 'type': 'int', 'xml': {'name': 'Quota'}}, - 'provisioned_iops': {'key': 'ProvisionedIops', 'type': 'int', 'xml': {'name': 'ProvisionedIops'}}, - 'provisioned_ingress_mbps': {'key': 'ProvisionedIngressMBps', 'type': 'int', 'xml': {'name': 'ProvisionedIngressMBps'}}, - 'provisioned_egress_mbps': {'key': 'ProvisionedEgressMBps', 'type': 'int', 'xml': {'name': 'ProvisionedEgressMBps'}}, - 'next_allowed_quota_downgrade_time': {'key': 'NextAllowedQuotaDowngradeTime', 'type': 'rfc-1123', 'xml': {'name': 'NextAllowedQuotaDowngradeTime'}}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}}, - } - _xml_map = { - } - - def __init__(self, *, last_modified, etag: str, quota: int, provisioned_iops: int=None, provisioned_ingress_mbps: int=None, provisioned_egress_mbps: int=None, next_allowed_quota_downgrade_time=None, deleted_time=None, remaining_retention_days: int=None, **kwargs) -> None: - super(ShareProperties, self).__init__(**kwargs) - self.last_modified = last_modified - self.etag = etag - self.quota = quota - self.provisioned_iops = provisioned_iops - self.provisioned_ingress_mbps = provisioned_ingress_mbps - self.provisioned_egress_mbps = provisioned_egress_mbps - self.next_allowed_quota_downgrade_time = next_allowed_quota_downgrade_time - self.deleted_time = deleted_time - self.remaining_retention_days = remaining_retention_days - - -class ShareStats(Model): - """Stats for the share. - - All required parameters must be populated in order to send to Azure. - - :param share_usage_bytes: Required. The approximate size of the data - stored in bytes. Note that this value may not include all recently created - or recently resized files. - :type share_usage_bytes: int - """ - - _validation = { - 'share_usage_bytes': {'required': True}, - } - - _attribute_map = { - 'share_usage_bytes': {'key': 'ShareUsageBytes', 'type': 'int', 'xml': {'name': 'ShareUsageBytes'}}, - } - _xml_map = { - } - - def __init__(self, *, share_usage_bytes: int, **kwargs) -> None: - super(ShareStats, self).__init__(**kwargs) - self.share_usage_bytes = share_usage_bytes - - -class SignedIdentifier(Model): - """Signed identifier. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. A unique id. - :type id: str - :param access_policy: The access policy. - :type access_policy: ~azure.storage.fileshare.models.AccessPolicy - """ - - _validation = { - 'id': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}}, - 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}}, - } - _xml_map = { - } - - def __init__(self, *, id: str, access_policy=None, **kwargs) -> None: - super(SignedIdentifier, self).__init__(**kwargs) - self.id = id - self.access_policy = access_policy - - -class SourceModifiedAccessConditions(Model): - """Additional parameters for upload_range_from_url operation. - - :param source_if_match_crc64: Specify the crc64 value to operate only on - range with a matching crc64 checksum. - :type source_if_match_crc64: bytearray - :param source_if_none_match_crc64: Specify the crc64 value to operate only - on range without a matching crc64 checksum. - :type source_if_none_match_crc64: bytearray - """ - - _attribute_map = { - 'source_if_match_crc64': {'key': '', 'type': 'bytearray', 'xml': {'name': 'source_if_match_crc64'}}, - 'source_if_none_match_crc64': {'key': '', 'type': 'bytearray', 'xml': {'name': 'source_if_none_match_crc64'}}, - } - _xml_map = { - } - - def __init__(self, *, source_if_match_crc64: bytearray=None, source_if_none_match_crc64: bytearray=None, **kwargs) -> None: - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_match_crc64 = source_if_match_crc64 - self.source_if_none_match_crc64 = source_if_none_match_crc64 - - -class StorageError(Model): - """StorageError. - - :param message: - :type message: str - """ - - _attribute_map = { - 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, - } - _xml_map = { - } - - def __init__(self, *, message: str=None, **kwargs) -> None: - super(StorageError, self).__init__(**kwargs) - self.message = message - - -class StorageErrorException(HttpResponseError): - """Server responsed with exception of type: 'StorageError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, response, deserialize, *args): - - model_name = 'StorageError' - self.error = deserialize(model_name, response) - if self.error is None: - self.error = deserialize.dependencies[model_name]() - super(StorageErrorException, self).__init__(response=response) - - -class StorageServiceProperties(Model): - """Storage service properties. - - :param hour_metrics: A summary of request statistics grouped by API in - hourly aggregates for files. - :type hour_metrics: ~azure.storage.fileshare.models.Metrics - :param minute_metrics: A summary of request statistics grouped by API in - minute aggregates for files. - :type minute_metrics: ~azure.storage.fileshare.models.Metrics - :param cors: The set of CORS rules. - :type cors: list[~azure.storage.fileshare.models.CorsRule] - """ - - _attribute_map = { - 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}}, - 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}}, - 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}}, - } - _xml_map = { - } - - def __init__(self, *, hour_metrics=None, minute_metrics=None, cors=None, **kwargs) -> None: - super(StorageServiceProperties, self).__init__(**kwargs) - self.hour_metrics = hour_metrics - self.minute_metrics = minute_metrics - self.cors = cors diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/_directory_operations.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/_directory_operations.py deleted file mode 100644 index c1afd8e..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/_directory_operations.py +++ /dev/null @@ -1,672 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class DirectoryOperations(object): - """DirectoryOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar restype: . Constant value: "directory". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.restype = "directory" - - def create(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, cls=None, **kwargs): - """Creates a new directory under the specified share or parent directory. - - :param file_attributes: If specified, the provided file attributes - shall be set. Default value: ‘Archive’ for file and ‘Directory’ for - directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. - Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. - Default value: Now. - :type file_last_write_time: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{shareName}/{directory}'} - - def get_properties(self, sharesnapshot=None, timeout=None, cls=None, **kwargs): - """Returns all system properties for the specified directory, and can also - be used to check the existence of a directory. The data returned does - not include the files in the directory or any subdirectories. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{shareName}/{directory}'} - - def delete(self, timeout=None, cls=None, **kwargs): - """Removes the specified empty directory. Note that the directory must be - empty before it can be deleted. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{shareName}/{directory}'} - - def set_properties(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, file_permission="inherit", file_permission_key=None, cls=None, **kwargs): - """Sets properties on the directory. - - :param file_attributes: If specified, the provided file attributes - shall be set. Default value: ‘Archive’ for file and ‘Directory’ for - directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. - Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. - Default value: Now. - :type file_last_write_time: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "properties" - - # Construct URL - url = self.set_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_properties.metadata = {'url': '/{shareName}/{directory}'} - - def set_metadata(self, timeout=None, metadata=None, cls=None, **kwargs): - """Updates user defined metadata for the specified directory. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{shareName}/{directory}'} - - def list_files_and_directories_segment(self, prefix=None, sharesnapshot=None, marker=None, maxresults=None, timeout=None, cls=None, **kwargs): - """Returns a list of files or directories under the specified share or - directory. It lists the contents only for a single level of the - directory hierarchy. - - :param prefix: Filters the results to return only entries whose name - begins with the specified prefix. - :type prefix: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. - If the request does not specify maxresults, or specifies a value - greater than 5,000, the server will return up to 5,000 items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListFilesAndDirectoriesSegmentResponse or the result of - cls(response) - :rtype: - ~azure.storage.fileshare.models.ListFilesAndDirectoriesSegmentResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "list" - - # Construct URL - url = self.list_files_and_directories_segment.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListFilesAndDirectoriesSegmentResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_files_and_directories_segment.metadata = {'url': '/{shareName}/{directory}'} - - def list_handles(self, marker=None, maxresults=None, timeout=None, sharesnapshot=None, recursive=None, cls=None, **kwargs): - """Lists handles for directory. - - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. - If the request does not specify maxresults, or specifies a value - greater than 5,000, the server will return up to 5,000 items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param recursive: Specifies operation should apply to the directory - specified in the URI, its files, its subdirectories and their files. - :type recursive: bool - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListHandlesResponse or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListHandlesResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "listhandles" - - # Construct URL - url = self.list_handles.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - if recursive is not None: - header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListHandlesResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_handles.metadata = {'url': '/{shareName}/{directory}'} - - def force_close_handles(self, handle_id, timeout=None, marker=None, sharesnapshot=None, recursive=None, cls=None, **kwargs): - """Closes all handles open for given directory. - - :param handle_id: Specifies handle ID opened on the file or directory - to be closed. Asterix (‘*’) is a wildcard that specifies all handles. - :type handle_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param recursive: Specifies operation should apply to the directory - specified in the URI, its files, its subdirectories and their files. - :type recursive: bool - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "forceclosehandles" - - # Construct URL - url = self.force_close_handles.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') - if recursive is not None: - header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-marker': self._deserialize('str', response.headers.get('x-ms-marker')), - 'x-ms-number-of-handles-closed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')), - 'x-ms-number-of-handles-failed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - force_close_handles.metadata = {'url': '/{shareName}/{directory}'} diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/_file_operations.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/_file_operations.py deleted file mode 100644 index 05636f7..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/_file_operations.py +++ /dev/null @@ -1,1665 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class FileOperations(object): - """FileOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_type: Dummy constant parameter, file type can only be file. Constant value: "file". - :ivar x_ms_copy_action: . Constant value: "abort". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_type = "file" - self.x_ms_copy_action = "abort" - - def create(self, file_content_length, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, file_http_headers=None, lease_access_conditions=None, cls=None, **kwargs): - """Creates a new file or replaces a file. Note it only initializes the - file with no content. - - :param file_content_length: Specifies the maximum size for the file, - up to 1 TB. - :type file_content_length: long - :param file_attributes: If specified, the provided file attributes - shall be set. Default value: ‘Archive’ for file and ‘Directory’ for - directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. - Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. - Default value: Now. - :type file_last_write_time: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_http_headers: Additional parameters for the operation - :type file_http_headers: - ~azure.storage.fileshare.models.FileHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - file_content_type = None - if file_http_headers is not None: - file_content_type = file_http_headers.file_content_type - file_content_encoding = None - if file_http_headers is not None: - file_content_encoding = file_http_headers.file_content_encoding - file_content_language = None - if file_http_headers is not None: - file_content_language = file_http_headers.file_content_language - file_cache_control = None - if file_http_headers is not None: - file_cache_control = file_http_headers.file_cache_control - file_content_md5 = None - if file_http_headers is not None: - file_content_md5 = file_http_headers.file_content_md5 - file_content_disposition = None - if file_http_headers is not None: - file_content_disposition = file_http_headers.file_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') - header_parameters['x-ms-type'] = self._serialize.header("self.x_ms_type", self.x_ms_type, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - if file_content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", file_content_type, 'str') - if file_content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", file_content_encoding, 'str') - if file_content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", file_content_language, 'str') - if file_cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", file_cache_control, 'str') - if file_content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", file_content_md5, 'bytearray') - if file_content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", file_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def download(self, timeout=None, range=None, range_get_content_md5=None, lease_access_conditions=None, cls=None, **kwargs): - """Reads or downloads a file from the system, including its metadata and - properties. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param range: Return file data only from the specified byte range. - :type range: str - :param range_get_content_md5: When this header is set to true and - specified together with the Range header, the service returns the MD5 - hash for the range, as long as the range is less than or equal to 4 MB - in size. - :type range_get_content_md5: bool - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: object or the result of cls(response) - :rtype: Generator - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.download.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-content-md5')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - if response.status_code == 206: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-content-md5')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - download.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def get_properties(self, sharesnapshot=None, timeout=None, lease_access_conditions=None, cls=None, **kwargs): - """Returns all user-defined metadata, standard HTTP properties, and system - properties for the file. It does not return the content of the file. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'x-ms-type': self._deserialize('str', response.headers.get('x-ms-type')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def delete(self, timeout=None, lease_access_conditions=None, cls=None, **kwargs): - """removes the file from the storage account. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def set_http_headers(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, file_content_length=None, file_permission="inherit", file_permission_key=None, file_http_headers=None, lease_access_conditions=None, cls=None, **kwargs): - """Sets HTTP headers on the file. - - :param file_attributes: If specified, the provided file attributes - shall be set. Default value: ‘Archive’ for file and ‘Directory’ for - directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. - Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. - Default value: Now. - :type file_last_write_time: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param file_content_length: Resizes a file to the specified size. If - the specified byte value is less than the current size of the file, - then all ranges above the specified byte value are cleared. - :type file_content_length: long - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_http_headers: Additional parameters for the operation - :type file_http_headers: - ~azure.storage.fileshare.models.FileHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - file_content_type = None - if file_http_headers is not None: - file_content_type = file_http_headers.file_content_type - file_content_encoding = None - if file_http_headers is not None: - file_content_encoding = file_http_headers.file_content_encoding - file_content_language = None - if file_http_headers is not None: - file_content_language = file_http_headers.file_content_language - file_cache_control = None - if file_http_headers is not None: - file_cache_control = file_http_headers.file_cache_control - file_content_md5 = None - if file_http_headers is not None: - file_content_md5 = file_http_headers.file_content_md5 - file_content_disposition = None - if file_http_headers is not None: - file_content_disposition = file_http_headers.file_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "properties" - - # Construct URL - url = self.set_http_headers.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_content_length is not None: - header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - if file_content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", file_content_type, 'str') - if file_content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", file_content_encoding, 'str') - if file_content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", file_content_language, 'str') - if file_cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", file_cache_control, 'str') - if file_content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", file_content_md5, 'bytearray') - if file_content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", file_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_http_headers.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def set_metadata(self, timeout=None, metadata=None, lease_access_conditions=None, cls=None, **kwargs): - """Updates user-defined metadata for the specified file. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, cls=None, **kwargs): - """[Update] The Lease File operation establishes and manages a lock on a - file for write and delete operations. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or - negative one (-1) for a lease that never expires. A non-infinite lease - can be between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The File service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "lease" - action = "acquire" - - # Construct URL - url = self.acquire_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - acquire_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def release_lease(self, lease_id, timeout=None, request_id=None, cls=None, **kwargs): - """[Update] The Lease File operation establishes and manages a lock on a - file for write and delete operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "lease" - action = "release" - - # Construct URL - url = self.release_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - release_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def change_lease(self, lease_id, timeout=None, proposed_lease_id=None, request_id=None, cls=None, **kwargs): - """[Update] The Lease File operation establishes and manages a lock on a - file for write and delete operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The File service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "lease" - action = "change" - - # Construct URL - url = self.change_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - change_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def break_lease(self, timeout=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs): - """[Update] The Lease File operation establishes and manages a lock on a - file for write and delete operations. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "lease" - action = "break" - - # Construct URL - url = self.break_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - break_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def upload_range(self, range, content_length, file_range_write="update", optionalbody=None, timeout=None, content_md5=None, lease_access_conditions=None, cls=None, **kwargs): - """Upload a range of bytes to a file. - - :param range: Specifies the range of bytes to be written. Both the - start and end of the range must be specified. For an update operation, - the range can be up to 4 MB in size. For a clear operation, the range - can be up to the value of the file's full size. The File service - accepts only a single byte range for the Range and 'x-ms-range' - headers, and the byte range must be specified in the following format: - bytes=startByte-endByte. - :type range: str - :param file_range_write: Specify one of the following options: - - Update: Writes the bytes specified by the request body into the - specified range. The Range and Content-Length headers must match to - perform the update. - Clear: Clears the specified range and releases - the space used in storage for that range. To clear a range, set the - Content-Length header to zero, and set the Range header to a value - that indicates the range to clear, up to maximum file size. Possible - values include: 'update', 'clear' - :type file_range_write: str or - ~azure.storage.fileshare.models.FileRangeWriteType - :param content_length: Specifies the number of bytes being transmitted - in the request body. When the x-ms-write header is set to clear, the - value of this header must be set to zero. - :type content_length: long - :param optionalbody: Initial data. - :type optionalbody: Generator - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param content_md5: An MD5 hash of the content. This hash is used to - verify the integrity of the data during transport. When the - Content-MD5 header is specified, the File service compares the hash of - the content that has arrived with the header value that was sent. If - the two hashes do not match, the operation will fail with error code - 400 (Bad Request). - :type content_md5: bytearray - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "range" - - # Construct URL - url = self.upload_range.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-write'] = self._serialize.header("file_range_write", file_range_write, 'FileRangeWriteType') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("content_md5", content_md5, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=optionalbody) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload_range.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def upload_range_from_url(self, range, copy_source, content_length, timeout=None, source_range=None, source_content_crc64=None, source_modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs): - """Upload a range of bytes to a file where the contents are read from a - URL. - - :param range: Writes data to the specified byte range in the file. - :type range: str - :param copy_source: Specifies the URL of the source file or blob, up - to 2 KB in length. To copy a file to another file within the same - storage account, you may use Shared Key to authenticate the source - file. If you are copying a file from another storage account, or if - you are copying a blob from the same storage account or another - storage account, then you must authenticate the source file or blob - using a shared access signature. If the source is a public blob, no - authentication is required to perform the copy operation. A file in a - share snapshot can also be specified as a copy source. - :type copy_source: str - :param content_length: Specifies the number of bytes being transmitted - in the request body. When the x-ms-write header is set to clear, the - value of this header must be set to zero. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_crc64: Specify the crc64 calculated for the - range of bytes that must be read from the copy source. - :type source_content_crc64: bytearray - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.fileshare.models.SourceModifiedAccessConditions - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - source_if_match_crc64 = None - if source_modified_access_conditions is not None: - source_if_match_crc64 = source_modified_access_conditions.source_if_match_crc64 - source_if_none_match_crc64 = None - if source_modified_access_conditions is not None: - source_if_none_match_crc64 = source_modified_access_conditions.source_if_none_match_crc64 - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "range" - - # Construct URL - url = self.upload_range_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - header_parameters['x-ms-write'] = self._serialize.header("self._config.file_range_write_from_url", self._config.file_range_write_from_url, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if source_content_crc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_content_crc64", source_content_crc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if source_if_match_crc64 is not None: - header_parameters['x-ms-source-if-match-crc64'] = self._serialize.header("source_if_match_crc64", source_if_match_crc64, 'bytearray') - if source_if_none_match_crc64 is not None: - header_parameters['x-ms-source-if-none-match-crc64'] = self._serialize.header("source_if_none_match_crc64", source_if_none_match_crc64, 'bytearray') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload_range_from_url.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def get_range_list(self, sharesnapshot=None, timeout=None, range=None, lease_access_conditions=None, cls=None, **kwargs): - """Returns the list of valid ranges for a file. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param range: Specifies the range of bytes over which to list ranges, - inclusively. - :type range: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: list or the result of cls(response) - :rtype: list[~azure.storage.fileshare.models.Range] - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "rangelist" - - # Construct URL - url = self.get_range_list.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[Range]', response) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-content-length': self._deserialize('long', response.headers.get('x-ms-content-length')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_range_list.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def start_copy(self, copy_source, timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, copy_file_smb_info=None, lease_access_conditions=None, cls=None, **kwargs): - """Copies a blob or file to a destination file within the storage account. - - :param copy_source: Specifies the URL of the source file or blob, up - to 2 KB in length. To copy a file to another file within the same - storage account, you may use Shared Key to authenticate the source - file. If you are copying a file from another storage account, or if - you are copying a blob from the same storage account or another - storage account, then you must authenticate the source file or blob - using a shared access signature. If the source is a public blob, no - authentication is required to perform the copy operation. A file in a - share snapshot can also be specified as a copy source. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param copy_file_smb_info: Additional parameters for the operation - :type copy_file_smb_info: - ~azure.storage.fileshare.models.CopyFileSmbInfo - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - file_permission_copy_mode = None - if copy_file_smb_info is not None: - file_permission_copy_mode = copy_file_smb_info.file_permission_copy_mode - ignore_read_only = None - if copy_file_smb_info is not None: - ignore_read_only = copy_file_smb_info.ignore_read_only - file_attributes = None - if copy_file_smb_info is not None: - file_attributes = copy_file_smb_info.file_attributes - file_creation_time = None - if copy_file_smb_info is not None: - file_creation_time = copy_file_smb_info.file_creation_time - file_last_write_time = None - if copy_file_smb_info is not None: - file_last_write_time = copy_file_smb_info.file_last_write_time - set_archive_attribute = None - if copy_file_smb_info is not None: - set_archive_attribute = copy_file_smb_info.set_archive_attribute - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.start_copy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - if file_permission_copy_mode is not None: - header_parameters['x-ms-file-permission-copy-mode'] = self._serialize.header("file_permission_copy_mode", file_permission_copy_mode, 'PermissionCopyModeType') - if ignore_read_only is not None: - header_parameters['x-ms-file-copy-ignore-read-only'] = self._serialize.header("ignore_read_only", ignore_read_only, 'bool') - if file_attributes is not None: - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - if file_creation_time is not None: - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - if file_last_write_time is not None: - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - if set_archive_attribute is not None: - header_parameters['x-ms-file-copy-set-archive'] = self._serialize.header("set_archive_attribute", set_archive_attribute, 'bool') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - start_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def abort_copy(self, copy_id, timeout=None, lease_access_conditions=None, cls=None, **kwargs): - """Aborts a pending Copy File operation, and leaves a destination file - with zero length and full metadata. - - :param copy_id: The copy identifier provided in the x-ms-copy-id - header of the original Copy File operation. - :type copy_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "copy" - - # Construct URL - url = self.abort_copy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-copy-action'] = self._serialize.header("self.x_ms_copy_action", self.x_ms_copy_action, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - abort_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def list_handles(self, marker=None, maxresults=None, timeout=None, sharesnapshot=None, cls=None, **kwargs): - """Lists handles for file. - - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. - If the request does not specify maxresults, or specifies a value - greater than 5,000, the server will return up to 5,000 items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListHandlesResponse or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListHandlesResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "listhandles" - - # Construct URL - url = self.list_handles.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListHandlesResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def force_close_handles(self, handle_id, timeout=None, marker=None, sharesnapshot=None, cls=None, **kwargs): - """Closes all handles open for given file. - - :param handle_id: Specifies handle ID opened on the file or directory - to be closed. Asterix (‘*’) is a wildcard that specifies all handles. - :type handle_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "forceclosehandles" - - # Construct URL - url = self.force_close_handles.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-marker': self._deserialize('str', response.headers.get('x-ms-marker')), - 'x-ms-number-of-handles-closed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')), - 'x-ms-number-of-handles-failed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - force_close_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/_share_operations.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/_share_operations.py deleted file mode 100644 index 4b53be8..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/_share_operations.py +++ /dev/null @@ -1,825 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class ShareOperations(object): - """ShareOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar restype: . Constant value: "share". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.restype = "share" - - def create(self, timeout=None, metadata=None, quota=None, cls=None, **kwargs): - """Creates a new share under the specified account. If the share with the - same name already exists, the operation fails. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param quota: Specifies the maximum size of the share, in gigabytes. - :type quota: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if quota is not None: - header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{shareName}'} - - def get_properties(self, sharesnapshot=None, timeout=None, cls=None, **kwargs): - """Returns all user-defined metadata and system properties for the - specified share or share snapshot. The data returned does not include - the share's list of files. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-share-quota': self._deserialize('int', response.headers.get('x-ms-share-quota')), - 'x-ms-share-provisioned-iops': self._deserialize('int', response.headers.get('x-ms-share-provisioned-iops')), - 'x-ms-share-provisioned-ingress-mbps': self._deserialize('int', response.headers.get('x-ms-share-provisioned-ingress-mbps')), - 'x-ms-share-provisioned-egress-mbps': self._deserialize('int', response.headers.get('x-ms-share-provisioned-egress-mbps')), - 'x-ms-share-next-allowed-quota-downgrade-time': self._deserialize('rfc-1123', response.headers.get('x-ms-share-next-allowed-quota-downgrade-time')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{shareName}'} - - def delete(self, sharesnapshot=None, timeout=None, delete_snapshots=None, cls=None, **kwargs): - """Operation marks the specified share or share snapshot for deletion. The - share or share snapshot and any files contained within it are later - deleted during garbage collection. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param delete_snapshots: Specifies the option include to delete the - base share and all of its snapshots. Possible values include: - 'include' - :type delete_snapshots: str or - ~azure.storage.fileshare.models.DeleteSnapshotsOptionType - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{shareName}'} - - def create_snapshot(self, timeout=None, metadata=None, cls=None, **kwargs): - """Creates a read-only snapshot of a share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "snapshot" - - # Construct URL - url = self.create_snapshot.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-snapshot': self._deserialize('str', response.headers.get('x-ms-snapshot')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create_snapshot.metadata = {'url': '/{shareName}'} - - def create_permission(self, share_permission, timeout=None, cls=None, **kwargs): - """Create a permission (a security descriptor). - - :param share_permission: A permission (a security descriptor) at the - share level. - :type share_permission: - ~azure.storage.fileshare.models.SharePermission - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "filepermission" - - # Construct URL - url = self.create_permission.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct body - body_content = self._serialize.body(share_permission, 'SharePermission', is_xml=False) - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create_permission.metadata = {'url': '/{shareName}'} - - def get_permission(self, file_permission_key, timeout=None, cls=None, **kwargs): - """Returns the permission (security descriptor) for a given key. - - :param file_permission_key: Key of the permission to be set for the - directory/file. - :type file_permission_key: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: SharePermission or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.SharePermission - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "filepermission" - - # Construct URL - url = self.get_permission.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('SharePermission', response) - header_dict = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_permission.metadata = {'url': '/{shareName}'} - - def set_quota(self, timeout=None, quota=None, cls=None, **kwargs): - """Sets quota for the specified share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param quota: Specifies the maximum size of the share, in gigabytes. - :type quota: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "properties" - - # Construct URL - url = self.set_quota.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if quota is not None: - header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_quota.metadata = {'url': '/{shareName}'} - - def set_metadata(self, timeout=None, metadata=None, cls=None, **kwargs): - """Sets one or more user-defined name-value pairs for the specified share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{shareName}'} - - def get_access_policy(self, timeout=None, cls=None, **kwargs): - """Returns information about stored access policies specified on the - share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: list or the result of cls(response) - :rtype: list[~azure.storage.fileshare.models.SignedIdentifier] - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "acl" - - # Construct URL - url = self.get_access_policy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[SignedIdentifier]', response) - header_dict = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_access_policy.metadata = {'url': '/{shareName}'} - - def set_access_policy(self, share_acl=None, timeout=None, cls=None, **kwargs): - """Sets a stored access policy for use with shared access signatures. - - :param share_acl: The ACL for the share. - :type share_acl: - list[~azure.storage.fileshare.models.SignedIdentifier] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "acl" - - # Construct URL - url = self.set_access_policy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct body - serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifier', 'wrapped': True}} - if share_acl is not None: - body_content = self._serialize.body(share_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt) - else: - body_content = None - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_access_policy.metadata = {'url': '/{shareName}'} - - def get_statistics(self, timeout=None, cls=None, **kwargs): - """Retrieves statistics related to the share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: ShareStats or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ShareStats - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "stats" - - # Construct URL - url = self.get_statistics.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ShareStats', response) - header_dict = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_statistics.metadata = {'url': '/{shareName}'} - - def restore(self, timeout=None, request_id=None, deleted_share_name=None, deleted_share_version=None, cls=None, **kwargs): - """Restores a previously deleted Share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param deleted_share_name: Specifies the name of the - preivously-deleted share. - :type deleted_share_name: str - :param deleted_share_version: Specifies the version of the - preivously-deleted share. - :type deleted_share_version: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "undelete" - - # Construct URL - url = self.restore.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if deleted_share_name is not None: - header_parameters['x-ms-deleted-share-name'] = self._serialize.header("deleted_share_name", deleted_share_name, 'str') - if deleted_share_version is not None: - header_parameters['x-ms-deleted-share-version'] = self._serialize.header("deleted_share_version", deleted_share_version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - restore.metadata = {'url': '/{shareName}'} diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/version.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/version.py deleted file mode 100644 index be04589..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/version.py +++ /dev/null @@ -1,13 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -VERSION = "2019-12-12" - diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_lease.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_lease.py deleted file mode 100644 index f67264a..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_lease.py +++ /dev/null @@ -1,170 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import uuid - -from typing import ( # pylint: disable=unused-import - Optional, Any, TypeVar, TYPE_CHECKING -) - -from azure.core.tracing.decorator import distributed_trace - -from ._shared.response_handlers import return_response_headers, process_storage_error -from ._generated.models import StorageErrorException - -if TYPE_CHECKING: - from datetime import datetime - ShareFileClient = TypeVar("ShareFileClient") - - -class ShareLeaseClient(object): - """Creates a new ShareLeaseClient. - - This client provides lease operations on a ShareFileClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the file to lease. - :type client: ~azure.storage.fileshare.ShareFileClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - def __init__( - self, client, lease_id=None - ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs - # type: (ShareFileClient, Optional[str]) -> None - self.id = lease_id or str(uuid.uuid4()) - self.last_modified = None - self.etag = None - if hasattr(client, 'file_name'): - self._client = client._client.file # type: ignore # pylint: disable=protected-access - else: - raise TypeError("Lease must use ShareFileClient.") - - def __enter__(self): - return self - - def __exit__(self, *args): - self.release() - - @distributed_trace - def acquire(self, **kwargs): - # type: (int, **Any) -> None - """Requests a new lease. This operation establishes and manages a lock on a - file for write and delete operations. If the file does not have an active lease, - the File service creates a lease on the file. If the file has an active lease, - you can only request a new lease using the active lease ID. - - - If the file does not have an active lease, the File service creates a - lease on the file and returns a new lease ID. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - try: - response = self._client.acquire_lease( - timeout=kwargs.pop('timeout', None), - duration=-1, - proposed_lease_id=self.id, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - self.etag = response.get('etag') # type: str - - @distributed_trace - def release(self, **kwargs): - # type: (Any) -> None - """Releases the lease. The lease may be released if the lease ID specified on the request matches - that associated with the file. Releasing the lease allows another client to immediately acquire the lease - for the file as soon as the release is complete. - - - The lease may be released if the client lease id specified matches - that associated with the file. Releasing the lease allows another client - to immediately acquire the lease for the file as soon as the release is complete. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - try: - response = self._client.release_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """ Changes the lease ID of an active lease. A change must include the current lease ID in x-ms-lease-id and - a new lease ID in x-ms-proposed-lease-id. - - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The File service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - try: - response = self._client.change_lease( - lease_id=self.id, - proposed_lease_id=proposed_lease_id, - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def break_lease(self, **kwargs): - # type: (Optional[int], Any) -> int - """Force breaks the lease if the file has an active lease. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. An infinite lease breaks immediately. - - Once a lease is broken, it cannot be changed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - try: - response = self._client.break_lease( - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_models.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_models.py deleted file mode 100644 index 2d348d6..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_models.py +++ /dev/null @@ -1,925 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines - -from azure.core.paging import PageIterator -from ._parser import _parse_datetime_from_str -from ._shared.response_handlers import return_context_and_deserialized, process_storage_error -from ._shared.models import DictMixin, get_enum_value -from ._generated.models import StorageErrorException -from ._generated.models import Metrics as GeneratedMetrics -from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy -from ._generated.models import CorsRule as GeneratedCorsRule -from ._generated.models import AccessPolicy as GenAccessPolicy -from ._generated.models import DirectoryItem - - -def _wrap_item(item): - if isinstance(item, DirectoryItem): - return {'name': item.name, 'is_directory': True} - return {'name': item.name, 'size': item.properties.content_length, 'is_directory': False} - - -class Metrics(GeneratedMetrics): - """A summary of request statistics grouped by API in hour or minute aggregates - for files. - - All required parameters must be populated in order to send to Azure. - - :keyword str version: The version of Storage Analytics to configure. - :keyword bool enabled: Required. Indicates whether metrics are enabled for the - File service. - :keyword bool include_ap_is: Indicates whether metrics should generate summary - statistics for called API operations. - :keyword ~azure.storage.fileshare.RetentionPolicy retention_policy: Determines how long the associated data should - persist. - """ - - def __init__(self, **kwargs): - self.version = kwargs.get('version', u'1.0') - self.enabled = kwargs.get('enabled', False) - self.include_apis = kwargs.get('include_apis') - self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - version=generated.version, - enabled=generated.enabled, - include_apis=generated.include_apis, - retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access - ) - - -class RetentionPolicy(GeneratedRetentionPolicy): - """The retention policy which determines how long the associated data should - persist. - - All required parameters must be populated in order to send to Azure. - - :param bool enabled: Required. Indicates whether a retention policy is enabled - for the storage service. - :param int days: Indicates the number of days that metrics or logging or - soft-deleted data should be retained. All data older than this value will - be deleted. - """ - - def __init__(self, enabled=False, days=None): - self.enabled = enabled - self.days = days - if self.enabled and (self.days is None): - raise ValueError("If policy is enabled, 'days' must be specified.") - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - enabled=generated.enabled, - days=generated.days, - ) - - -class CorsRule(GeneratedCorsRule): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param list(str) allowed_origins: - A list of origin domains that will be allowed via CORS, or "*" to allow - all domains. The list of must contain at least one entry. Limited to 64 - origin domains. Each allowed origin can have up to 256 characters. - :param list(str) allowed_methods: - A list of HTTP methods that are allowed to be executed by the origin. - The list of must contain at least one entry. For Azure Storage, - permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. - :keyword list(str) allowed_headers: - Defaults to an empty list. A list of headers allowed to be part of - the cross-origin request. Limited to 64 defined headers and 2 prefixed - headers. Each header can be up to 256 characters. - :keyword list(str) exposed_headers: - Defaults to an empty list. A list of response headers to expose to CORS - clients. Limited to 64 defined headers and two prefixed headers. Each - header can be up to 256 characters. - :keyword int max_age_in_seconds: - The number of seconds that the client/browser should cache a - preflight response. - """ - - def __init__(self, allowed_origins, allowed_methods, **kwargs): - self.allowed_origins = ','.join(allowed_origins) - self.allowed_methods = ','.join(allowed_methods) - self.allowed_headers = ','.join(kwargs.get('allowed_headers', [])) - self.exposed_headers = ','.join(kwargs.get('exposed_headers', [])) - self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0) - - @classmethod - def _from_generated(cls, generated): - return cls( - [generated.allowed_origins], - [generated.allowed_methods], - allowed_headers=[generated.allowed_headers], - exposed_headers=[generated.exposed_headers], - max_age_in_seconds=generated.max_age_in_seconds, - ) - - -class AccessPolicy(GenAccessPolicy): - """Access Policy class used by the set and get acl methods in each service. - - A stored access policy can specify the start time, expiry time, and - permissions for the Shared Access Signatures with which it's associated. - Depending on how you want to control access to your resource, you can - specify all of these parameters within the stored access policy, and omit - them from the URL for the Shared Access Signature. Doing so permits you to - modify the associated signature's behavior at any time, as well as to revoke - it. Or you can specify one or more of the access policy parameters within - the stored access policy, and the others on the URL. Finally, you can - specify all of the parameters on the URL. In this case, you can use the - stored access policy to revoke the signature, but not to modify its behavior. - - Together the Shared Access Signature and the stored access policy must - include all fields required to authenticate the signature. If any required - fields are missing, the request will fail. Likewise, if a field is specified - both in the Shared Access Signature URL and in the stored access policy, the - request will fail with status code 400 (Bad Request). - - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.fileshare.FileSasPermissions or - ~azure.storage.fileshare.ShareSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - """ - def __init__(self, permission=None, expiry=None, start=None): - self.start = start - self.expiry = expiry - self.permission = permission - - -class LeaseProperties(DictMixin): - """File Lease Properties. - - :ivar str status: - The lease status of the file. Possible values: locked|unlocked - :ivar str state: - Lease state of the file. Possible values: available|leased|expired|breaking|broken - :ivar str duration: - When a file is leased, specifies whether the lease is of infinite or fixed duration. - """ - - def __init__(self, **kwargs): - self.status = get_enum_value(kwargs.get('x-ms-lease-status')) - self.state = get_enum_value(kwargs.get('x-ms-lease-state')) - self.duration = get_enum_value(kwargs.get('x-ms-lease-duration')) - - @classmethod - def _from_generated(cls, generated): - lease = cls() - lease.status = get_enum_value(generated.properties.lease_status) - lease.state = get_enum_value(generated.properties.lease_state) - lease.duration = get_enum_value(generated.properties.lease_duration) - return lease - - -class ContentSettings(DictMixin): - """Used to store the content settings of a file. - - :param str content_type: - The content type specified for the file. If no content type was - specified, the default content type is application/octet-stream. - :param str content_encoding: - If the content_encoding has previously been set - for the file, that value is stored. - :param str content_language: - If the content_language has previously been set - for the file, that value is stored. - :param str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the file, that value is stored. - :param str cache_control: - If the cache_control has previously been set for - the file, that value is stored. - :param str content_md5: - If the content_md5 has been set for the file, this response - header is stored so that the client can check for message content - integrity. - """ - - def __init__( - self, content_type=None, content_encoding=None, - content_language=None, content_disposition=None, - cache_control=None, content_md5=None, **kwargs): - - self.content_type = content_type or kwargs.get('Content-Type') - self.content_encoding = content_encoding or kwargs.get('Content-Encoding') - self.content_language = content_language or kwargs.get('Content-Language') - self.content_md5 = content_md5 or kwargs.get('Content-MD5') - self.content_disposition = content_disposition or kwargs.get('Content-Disposition') - self.cache_control = cache_control or kwargs.get('Cache-Control') - - @classmethod - def _from_generated(cls, generated): - settings = cls() - settings.content_type = generated.properties.content_type or None - settings.content_encoding = generated.properties.content_encoding or None - settings.content_language = generated.properties.content_language or None - settings.content_md5 = generated.properties.content_md5 or None - settings.content_disposition = generated.properties.content_disposition or None - settings.cache_control = generated.properties.cache_control or None - return settings - - -class ShareProperties(DictMixin): - """Share's properties class. - - :ivar str name: - The name of the share. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the share was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar int quota: - The allocated quota. - :ivar dict metadata: A dict with name_value pairs to associate with the - share as metadata. - :ivar str snapshot: - Snapshot of the share. - :ivar bool deleted: - To indicate if this share is deleted or not. - This is a service returned value, and the value will be set when list shared including deleted ones. - :ivar datetime deleted: - To indicate the deleted time of the deleted share. - This is a service returned value, and the value will be set when list shared including deleted ones. - :ivar str version: - To indicate the version of deleted share. - This is a service returned value, and the value will be set when list shared including deleted ones. - :ivar int remaining_retention_days: - To indicate how many remaining days the deleted share will be kept. - This is a service returned value, and the value will be set when list shared including deleted ones. - """ - - def __init__(self, **kwargs): - self.name = None - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.quota = kwargs.get('x-ms-share-quota') - self.next_allowed_quota_downgrade_time = kwargs.get('x-ms-share-next-allowed-quota-downgrade-time') - self.metadata = kwargs.get('metadata') - self.snapshot = None - self.deleted = None - self.deleted_time = None - self.version = None - self.remaining_retention_days = None - self.provisioned_egress_mbps = kwargs.get('x-ms-share-provisioned-egress-mbps') - self.provisioned_ingress_mbps = kwargs.get('x-ms-share-provisioned-ingress-mbps') - self.provisioned_iops = kwargs.get('x-ms-share-provisioned-iops') - - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.last_modified = generated.properties.last_modified - props.etag = generated.properties.etag - props.quota = generated.properties.quota - props.next_allowed_quota_downgrade_time = generated.properties.next_allowed_quota_downgrade_time - props.metadata = generated.metadata - props.snapshot = generated.snapshot - props.deleted = generated.deleted - props.deleted_time = generated.properties.deleted_time - props.version = generated.version - props.remaining_retention_days = generated.properties.remaining_retention_days - props.provisioned_egress_mbps = generated.properties.provisioned_egress_mbps - props.provisioned_ingress_mbps = generated.properties.provisioned_ingress_mbps - props.provisioned_iops = generated.properties.provisioned_iops - return props - - -class SharePropertiesPaged(PageIterator): - """An iterable of Share properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.fileshare.ShareProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only shares whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(SharePropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - prefix=self.prefix, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [ShareProperties._from_generated(i) for i in self._response.share_items] # pylint: disable=protected-access - return self._response.next_marker or None, self.current_page - - -class Handle(DictMixin): - """A listed Azure Storage handle item. - - All required parameters must be populated in order to send to Azure. - - :keyword str handle_id: Required. XSMB service handle ID - :keyword str path: Required. File or directory name including full path starting - from share root - :keyword str file_id: Required. FileId uniquely identifies the file or - directory. - :keyword str parent_id: ParentId uniquely identifies the parent directory of the - object. - :keyword str session_id: Required. SMB session ID in context of which the file - handle was opened - :keyword str client_ip: Required. Client IP that opened the handle - :keyword ~datetime.datetime open_time: Required. Time when the session that previously opened - the handle has last been reconnected. (UTC) - :keyword ~datetime.datetime last_reconnect_time: Time handle was last connected to (UTC) - """ - - def __init__(self, **kwargs): - self.id = kwargs.get('handle_id') - self.path = kwargs.get('path') - self.file_id = kwargs.get('file_id') - self.parent_id = kwargs.get('parent_id') - self.session_id = kwargs.get('session_id') - self.client_ip = kwargs.get('client_ip') - self.open_time = kwargs.get('open_time') - self.last_reconnect_time = kwargs.get('last_reconnect_time') - - @classmethod - def _from_generated(cls, generated): - handle = cls() - handle.id = generated.handle_id - handle.path = generated.path - handle.file_id = generated.file_id - handle.parent_id = generated.parent_id - handle.session_id = generated.session_id - handle.client_ip = generated.client_ip - handle.open_time = generated.open_time - handle.last_reconnect_time = generated.last_reconnect_time - return handle - - -class HandlesPaged(PageIterator): - """An iterable of Handles. - - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.fileshare.Handle) - - :param callable command: Function to retrieve the next page of items. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, results_per_page=None, continuation_token=None): - super(HandlesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.current_page = [Handle._from_generated(h) for h in self._response.handle_list] # pylint: disable=protected-access - return self._response.next_marker or None, self.current_page - - -class DirectoryProperties(DictMixin): - """Directory's properties class. - - :ivar str name: - The name of the directory. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the directory was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar bool server_encrypted: - Whether encryption is enabled. - :keyword dict metadata: A dict with name_value pairs to associate with the - directory as metadata. - :ivar change_time: Change time for the file. - :vartype change_time: str or ~datetime.datetime - :ivar creation_time: Creation time for the file. - :vartype creation_time: str or ~datetime.datetime - :ivar last_write_time: Last write time for the file. - :vartype last_write_time: str or ~datetime.datetime - :ivar file_attributes: - The file system attributes for files and directories. - :vartype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :ivar permission_key: Key of the permission to be set for the - directory/file. - :vartype permission_key: str - :ivar file_id: Required. FileId uniquely identifies the file or - directory. - :vartype file_id: str - :ivar parent_id: ParentId uniquely identifies the parent directory of the - object. - :vartype parent_id: str - """ - - def __init__(self, **kwargs): - self.name = None - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.server_encrypted = kwargs.get('x-ms-server-encrypted') - self.metadata = kwargs.get('metadata') - self.change_time = _parse_datetime_from_str(kwargs.get('x-ms-file-change-time')) - self.creation_time = _parse_datetime_from_str(kwargs.get('x-ms-file-creation-time')) - self.last_write_time = _parse_datetime_from_str(kwargs.get('x-ms-file-last-write-time')) - self.file_attributes = kwargs.get('x-ms-file-attributes') - self.permission_key = kwargs.get('x-ms-file-permission-key') - self.file_id = kwargs.get('x-ms-file-id') - self.parent_id = kwargs.get('x-ms-file-parent-id') - - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.last_modified = generated.properties.last_modified - props.etag = generated.properties.etag - props.server_encrypted = generated.properties.server_encrypted - props.metadata = generated.metadata - return props - - -class DirectoryPropertiesPaged(PageIterator): - """An iterable for the contents of a directory. - - This iterable will yield dicts for the contents of the directory. The dicts - will have the keys 'name' (str) and 'is_directory' (bool). - Items that are files (is_directory=False) will have an additional 'content_length' key. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(dict(str, Any)) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only directories whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(DirectoryPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - prefix=self.prefix, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [_wrap_item(i) for i in self._response.segment.directory_items] - self.current_page.extend([_wrap_item(i) for i in self._response.segment.file_items]) - return self._response.next_marker or None, self.current_page - - -class FileProperties(DictMixin): - """File's properties class. - - :ivar str name: - The name of the file. - :ivar str path: - The path of the file. - :ivar str share: - The name of share. - :ivar str snapshot: - File snapshot. - :ivar int content_length: - Size of file in bytes. - :ivar dict metadata: A dict with name_value pairs to associate with the - file as metadata. - :ivar str file_type: - Type of the file. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the file was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar int size: - Size of file in bytes. - :ivar str content_range: - The range of bytes. - :ivar bool server_encrypted: - Whether encryption is enabled. - :ivar copy: - The copy properties. - :vartype copy: ~azure.storage.fileshare.CopyProperties - :ivar content_settings: - The content settings for the file. - :vartype content_settings: ~azure.storage.fileshare.ContentSettings - """ - - def __init__(self, **kwargs): - self.name = kwargs.get('name') - self.path = None - self.share = None - self.snapshot = None - self.content_length = kwargs.get('Content-Length') - self.metadata = kwargs.get('metadata') - self.file_type = kwargs.get('x-ms-type') - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.size = kwargs.get('Content-Length') - self.content_range = kwargs.get('Content-Range') - self.server_encrypted = kwargs.get('x-ms-server-encrypted') - self.copy = CopyProperties(**kwargs) - self.content_settings = ContentSettings(**kwargs) - self.lease = LeaseProperties(**kwargs) - self.change_time = _parse_datetime_from_str(kwargs.get('x-ms-file-change-time')) - self.creation_time = _parse_datetime_from_str(kwargs.get('x-ms-file-creation-time')) - self.last_write_time = _parse_datetime_from_str(kwargs.get('x-ms-file-last-write-time')) - self.file_attributes = kwargs.get('x-ms-file-attributes') - self.permission_key = kwargs.get('x-ms-file-permission-key') - self.file_id = kwargs.get('x-ms-file-id') - self.parent_id = kwargs.get('x-ms-file-parent-id') - - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.content_length = generated.properties.content_length - props.metadata = generated.properties.metadata - props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access - return props - - -class CopyProperties(DictMixin): - """File Copy Properties. - - :ivar str id: - String identifier for the last attempted Copy File operation where this file - was the destination file. This header does not appear if this file has never - been the destination in a Copy File operation, or if this file has been - modified after a concluded Copy File operation. - :ivar str source: - URL up to 2 KB in length that specifies the source file used in the last attempted - Copy File operation where this file was the destination file. This header does not - appear if this file has never been the destination in a Copy File operation, or if - this file has been modified after a concluded Copy File operation. - :ivar str status: - State of the copy operation identified by Copy ID, with these values: - success: - Copy completed successfully. - pending: - Copy is in progress. Check copy_status_description if intermittent, - non-fatal errors impede copy progress but don't cause failure. - aborted: - Copy was ended by Abort Copy File. - failed: - Copy failed. See copy_status_description for failure details. - :ivar str progress: - Contains the number of bytes copied and the total bytes in the source in the last - attempted Copy File operation where this file was the destination file. Can show - between 0 and Content-Length bytes copied. - :ivar datetime completion_time: - Conclusion time of the last attempted Copy File operation where this file was the - destination file. This value can specify the time of a completed, aborted, or - failed copy attempt. - :ivar str status_description: - Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal - or non-fatal copy operation failure. - :ivar bool incremental_copy: - Copies the snapshot of the source file to a destination file. - The snapshot is copied such that only the differential changes between - the previously copied snapshot are transferred to the destination - :ivar datetime destination_snapshot: - Included if the file is incremental copy or incremental copy snapshot, - if x-ms-copy-status is success. Snapshot time of the last successful - incremental copy snapshot for this file. - """ - - def __init__(self, **kwargs): - self.id = kwargs.get('x-ms-copy-id') - self.source = kwargs.get('x-ms-copy-source') - self.status = get_enum_value(kwargs.get('x-ms-copy-status')) - self.progress = kwargs.get('x-ms-copy-progress') - self.completion_time = kwargs.get('x-ms-copy-completion_time') - self.status_description = kwargs.get('x-ms-copy-status-description') - self.incremental_copy = kwargs.get('x-ms-incremental-copy') - self.destination_snapshot = kwargs.get('x-ms-copy-destination-snapshot') - - @classmethod - def _from_generated(cls, generated): - copy = cls() - copy.id = generated.properties.copy_id or None - copy.status = get_enum_value(generated.properties.copy_status) or None - copy.source = generated.properties.copy_source or None - copy.progress = generated.properties.copy_progress or None - copy.completion_time = generated.properties.copy_completion_time or None - copy.status_description = generated.properties.copy_status_description or None - copy.incremental_copy = generated.properties.incremental_copy or None - copy.destination_snapshot = generated.properties.destination_snapshot or None - return copy - - -class FileSasPermissions(object): - """FileSasPermissions class to be used with - generating shared access signature operations. - - :param bool read: - Read the content, properties, metadata. Use the file as the source of a copy - operation. - :param bool create: - Create a new file or copy a file to a new file. - :param bool write: - Create or write content, properties, metadata. Resize the file. Use the file - as the destination of a copy operation within the same account. - :param bool delete: - Delete the file. - """ - def __init__(self, read=False, create=False, write=False, delete=False): - self.read = read - self.create = create - self.write = write - self.delete = delete - self._str = (('r' if self.read else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a FileSasPermissions from a string. - - To specify read, create, write, or delete permissions you need only to - include the first letter of the word in the string. E.g. For read and - create permissions, you would provide a string "rc". - - :param str permission: The string which dictates the read, create, - write, or delete permissions - :return: A FileSasPermissions object - :rtype: ~azure.storage.fileshare.FileSasPermissions - """ - p_read = 'r' in permission - p_create = 'c' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - - parsed = cls(p_read, p_create, p_write, p_delete) - parsed._str = permission # pylint: disable = protected-access - return parsed - - -class ShareSasPermissions(object): - """ShareSasPermissions class to be used to be used with - generating shared access signature and access policy operations. - - :param bool read: - Read the content, properties or metadata of any file in the share. Use any - file in the share as the source of a copy operation. - :param bool write: - For any file in the share, create or write content, properties or metadata. - Resize the file. Use the file as the destination of a copy operation within - the same account. - Note: You cannot grant permissions to read or write share properties or - metadata with a service SAS. Use an account SAS instead. - :param bool delete: - Delete any file in the share. - Note: You cannot grant permissions to delete a share with a service SAS. Use - an account SAS instead. - :param bool list: - List files and directories in the share. - """ - def __init__(self, read=False, write=False, delete=False, list=False): # pylint: disable=redefined-builtin - self.read = read - self.write = write - self.delete = delete - self.list = list - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('l' if self.list else '')) - - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a ShareSasPermissions from a string. - - To specify read, write, delete, or list permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, write, - delete, or list permissions - :return: A ShareSasPermissions object - :rtype: ~azure.storage.fileshare.ShareSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_list = 'l' in permission - - parsed = cls(p_read, p_write, p_delete, p_list) - parsed._str = permission # pylint: disable = protected-access - return parsed - -class NTFSAttributes(object): - """ - Valid set of attributes to set for file or directory. - To set attribute for directory, 'Directory' should always be enabled except setting 'None' for directory. - - :ivar bool read_only: - Enable/disable 'ReadOnly' attribute for DIRECTORY or FILE - :ivar bool hidden: - Enable/disable 'Hidden' attribute for DIRECTORY or FILE - :ivar bool system: - Enable/disable 'System' attribute for DIRECTORY or FILE - :ivar bool none: - Enable/disable 'None' attribute for DIRECTORY or FILE to clear all attributes of FILE/DIRECTORY - :ivar bool directory: - Enable/disable 'Directory' attribute for DIRECTORY - :ivar bool archive: - Enable/disable 'Archive' attribute for DIRECTORY or FILE - :ivar bool temporary: - Enable/disable 'Temporary' attribute for FILE - :ivar bool offline: - Enable/disable 'Offline' attribute for DIRECTORY or FILE - :ivar bool not_content_indexed: - Enable/disable 'NotContentIndexed' attribute for DIRECTORY or FILE - :ivar bool no_scrub_data: - Enable/disable 'NoScrubData' attribute for DIRECTORY or FILE - """ - def __init__(self, read_only=False, hidden=False, system=False, none=False, directory=False, archive=False, - temporary=False, offline=False, not_content_indexed=False, no_scrub_data=False): - - self.read_only = read_only - self.hidden = hidden - self.system = system - self.none = none - self.directory = directory - self.archive = archive - self.temporary = temporary - self.offline = offline - self.not_content_indexed = not_content_indexed - self.no_scrub_data = no_scrub_data - self._str = (('ReadOnly|' if self.read_only else '') + - ('Hidden|' if self.hidden else '') + - ('System|' if self.system else '') + - ('None|' if self.none else '') + - ('Directory|' if self.directory else '') + - ('Archive|' if self.archive else '') + - ('Temporary|' if self.temporary else '') + - ('Offline|' if self.offline else '') + - ('NotContentIndexed|' if self.not_content_indexed else '') + - ('NoScrubData|' if self.no_scrub_data else '')) - - def __str__(self): - concatenated_params = self._str - return concatenated_params.strip('|') - - @classmethod - def from_string(cls, string): - """Create a NTFSAttributes from a string. - - To specify permissions you can pass in a string with the - desired permissions, e.g. "ReadOnly|Hidden|System" - - :param str string: The string which dictates the permissions. - :return: A NTFSAttributes object - :rtype: ~azure.storage.fileshare.NTFSAttributes - """ - read_only = "ReadOnly" in string - hidden = "Hidden" in string - system = "System" in string - none = "None" in string - directory = "Directory" in string - archive = "Archive" in string - temporary = "Temporary" in string - offline = "Offline" in string - not_content_indexed = "NotContentIndexed" in string - no_scrub_data = "NoScrubData" in string - - parsed = cls(read_only, hidden, system, none, directory, archive, temporary, offline, not_content_indexed, - no_scrub_data) - parsed._str = string # pylint: disable = protected-access - return parsed - - -def service_properties_deserialize(generated): - """Deserialize a ServiceProperties objects into a dict. - """ - return { - 'hour_metrics': Metrics._from_generated(generated.hour_metrics), # pylint: disable=protected-access - 'minute_metrics': Metrics._from_generated(generated.minute_metrics), # pylint: disable=protected-access - 'cors': [CorsRule._from_generated(cors) for cors in generated.cors], # pylint: disable=protected-access - } diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_parser.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_parser.py deleted file mode 100644 index db7cab5..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_parser.py +++ /dev/null @@ -1,42 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from datetime import datetime, timedelta - -_ERROR_TOO_MANY_FILE_PERMISSIONS = 'file_permission and file_permission_key should not be set at the same time' -_FILE_PERMISSION_TOO_LONG = 'Size of file_permission is too large. file_permission should be <=8KB, else' \ - 'please use file_permission_key' - - -def _get_file_permission(file_permission, file_permission_key, default_permission): - # if file_permission and file_permission_key are both empty, then use the default_permission - # value as file permission, file_permission size should be <= 8KB, else file permission_key should be used - if file_permission and len(str(file_permission).encode('utf-8')) > 8 * 1024: - raise ValueError(_FILE_PERMISSION_TOO_LONG) - - if not file_permission: - if not file_permission_key: - return default_permission - return None - - if not file_permission_key: - return file_permission - - raise ValueError(_ERROR_TOO_MANY_FILE_PERMISSIONS) - - -def _parse_datetime_from_str(string_datetime): - if not string_datetime: - return None - dt, _, us = string_datetime.partition(".") - dt = datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S") - us = int(us[:-2]) # microseconds - datetime_obj = dt + timedelta(microseconds=us) - return datetime_obj - - -def _datetime_to_str(datetime_obj): - return datetime_obj if isinstance(datetime_obj, str) else datetime_obj.isoformat() + '0Z' diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_serialize.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_serialize.py deleted file mode 100644 index c075511..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_serialize.py +++ /dev/null @@ -1,111 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from azure.core import MatchConditions - -from ._parser import _datetime_to_str, _get_file_permission -from ._generated.models import SourceModifiedAccessConditions, LeaseAccessConditions, CopyFileSmbInfo - - -_SUPPORTED_API_VERSIONS = [ - '2019-02-02', - '2019-07-07', - '2019-12-12' -] - - -def _get_match_headers(kwargs, match_param, etag_param): - # type: (str) -> Tuple(Dict[str, Any], Optional[str], Optional[str]) - # TODO: extract this method to shared folder also add some comments, so that share, datalake and blob can use it. - if_match = None - if_none_match = None - match_condition = kwargs.pop(match_param, None) - if match_condition == MatchConditions.IfNotModified: - if_match = kwargs.pop(etag_param, None) - if not if_match: - raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) - elif match_condition == MatchConditions.IfPresent: - if_match = '*' - elif match_condition == MatchConditions.IfModified: - if_none_match = kwargs.pop(etag_param, None) - if not if_none_match: - raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) - elif match_condition == MatchConditions.IfMissing: - if_none_match = '*' - elif match_condition is None: - if etag_param in kwargs: - raise ValueError("'{}' specified without '{}'.".format(etag_param, match_param)) - else: - raise TypeError("Invalid match condition: {}".format(match_condition)) - return if_match, if_none_match - - -def get_source_conditions(kwargs): - # type: (Dict[str, Any]) -> SourceModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') - return SourceModifiedAccessConditions( - source_if_modified_since=kwargs.pop('source_if_modified_since', None), - source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), - source_if_match=if_match or kwargs.pop('source_if_match', None), - source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None) - ) - -def get_access_conditions(lease): - # type: (Optional[Union[ShareLeaseClient, str]]) -> Union[LeaseAccessConditions, None] - try: - lease_id = lease.id # type: ignore - except AttributeError: - lease_id = lease # type: ignore - return LeaseAccessConditions(lease_id=lease_id) if lease_id else None - - -def get_smb_properties(kwargs): - # type: (Dict[str, Any]) -> Dict[str, Any] - ignore_read_only = kwargs.pop('ignore_read_only', None) - set_archive_attribute = kwargs.pop('set_archive_attribute', None) - file_permission = kwargs.pop('file_permission', None) - file_permission_key = kwargs.pop('permission_key', None) - file_attributes = kwargs.pop('file_attributes', None) - file_creation_time = kwargs.pop('file_creation_time', None) or "" - file_last_write_time = kwargs.pop('file_last_write_time', None) or "" - - file_permission_copy_mode = None - file_permission = _get_file_permission(file_permission, file_permission_key, None) - - if file_permission: - if file_permission.lower() == "source": - file_permission = None - file_permission_copy_mode = "source" - else: - file_permission_copy_mode = "override" - elif file_permission_key: - if file_permission_key.lower() == "source": - file_permission_key = None - file_permission_copy_mode = "source" - else: - file_permission_copy_mode = "override" - return { - 'file_permission': file_permission, - 'file_permission_key': file_permission_key, - 'copy_file_smb_info': CopyFileSmbInfo( - file_permission_copy_mode=file_permission_copy_mode, - ignore_read_only=ignore_read_only, - file_attributes=file_attributes, - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - set_archive_attribute=set_archive_attribute - ) - - } - -def get_api_version(kwargs, default): - # type: (Dict[str, Any]) -> str - api_version = kwargs.pop('api_version', None) - if api_version and api_version not in _SUPPORTED_API_VERSIONS: - versions = '\n'.join(_SUPPORTED_API_VERSIONS) - raise ValueError("Unsupported API version '{}'. Please select from:\n{}".format(api_version, versions)) - return api_version or default diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_share_client.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_share_client.py deleted file mode 100644 index 765ec5e..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_share_client.py +++ /dev/null @@ -1,705 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Optional, Union, Dict, Any, Iterable, TYPE_CHECKING -) -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import Pipeline -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.request_handlers import add_metadata_headers, serialize_iso -from ._shared.response_handlers import ( - return_response_headers, - process_storage_error, - return_headers_and_deserialized) -from ._generated import AzureFileStorage -from ._generated.version import VERSION -from ._generated.models import ( - StorageErrorException, - SignedIdentifier, - DeleteSnapshotsOptionType, - SharePermission) -from ._deserialize import deserialize_share_properties, deserialize_permission_key, deserialize_permission -from ._serialize import get_api_version -from ._directory_client import ShareDirectoryClient -from ._file_client import ShareFileClient - -if TYPE_CHECKING: - from ._models import ShareProperties, AccessPolicy - - -class ShareClient(StorageAccountHostsMixin): - """A client to interact with a specific share, although that share may not yet exist. - - For operations relating to a specific directory or file in this share, the clients for - those entities can also be retrieved using the :func:`get_directory_client` and :func:`get_file_client` functions. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the share, - use the :func:`from_share_url` classmethod. - :param share_name: - The name of the share with which to interact. - :type share_name: str - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not share_name: - raise ValueError("Please specify a share name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - if hasattr(credential, 'get_token'): - raise ValueError("Token credentials not supported by the File service.") - - path_snapshot = None - path_snapshot, sas_token = parse_query(parsed_url.query) - if not sas_token and not credential: - raise ValueError( - 'You need to provide either an account shared key or SAS token when creating a storage service.') - try: - self.snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - self.snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - self.snapshot = snapshot or path_snapshot - - self.share_name = share_name - self._query_str, credential = self._format_query_string( - sas_token, credential, share_snapshot=self.snapshot) - super(ShareClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) - self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access - - @classmethod - def from_share_url(cls, share_url, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareClient - """ - :param str share_url: The full URI to the share. - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :returns: A share client. - :rtype: ~azure.storage.fileshare.ShareClient - """ - try: - if not share_url.lower().startswith('http'): - share_url = "https://" + share_url - except AttributeError: - raise ValueError("Share URL must be a string.") - parsed_url = urlparse(share_url.rstrip('/')) - if not (parsed_url.path and parsed_url.netloc): - raise ValueError("Invalid URL: {}".format(share_url)) - - share_path = parsed_url.path.lstrip('/').split('/') - account_path = "" - if len(share_path) > 1: - account_path = "/" + "/".join(share_path[:-1]) - account_url = "{}://{}{}?{}".format( - parsed_url.scheme, - parsed_url.netloc.rstrip('/'), - account_path, - parsed_url.query) - - share_name = unquote(share_path[-1]) - path_snapshot, _ = parse_query(parsed_url.query) - if snapshot: - try: - path_snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - path_snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - path_snapshot = snapshot - - if not share_name: - raise ValueError("Invalid URL. Please provide a URL with a valid share name") - return cls(account_url, share_name, path_snapshot, credential, **kwargs) - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - share_name = self.share_name - if isinstance(share_name, six.text_type): - share_name = share_name.encode('UTF-8') - return "{}://{}/{}{}".format( - self.scheme, - hostname, - quote(share_name), - self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - share_name, # type: str - snapshot=None, # type: Optional[str] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareClient - """Create ShareClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param share_name: The name of the share. - :type share_name: str - :param str snapshot: - The optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :returns: A share client. - :rtype: ~azure.storage.fileshare.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START create_share_client_from_conn_string] - :end-before: [END create_share_client_from_conn_string] - :language: python - :dedent: 8 - :caption: Gets the share client from connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, share_name=share_name, snapshot=snapshot, credential=credential, **kwargs) - - def get_directory_client(self, directory_path=None): - # type: (Optional[str]) -> ShareDirectoryClient - """Get a client to interact with the specified directory. - The directory need not already exist. - - :param str directory_path: - Path to the specified directory. - :returns: A Directory Client. - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - """ - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - - return ShareDirectoryClient( - self.url, share_name=self.share_name, directory_path=directory_path or "", snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, - _location_mode=self._location_mode) - - def get_file_client(self, file_path): - # type: (str) -> ShareFileClient - """Get a client to interact with the specified file. - The file need not already exist. - - :param str file_path: - Path to the specified file. - :returns: A File Client. - :rtype: ~azure.storage.fileshare.ShareFileClient - """ - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - - return ShareFileClient( - self.url, share_name=self.share_name, file_path=file_path, snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode) - - @distributed_trace - def create_share(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Creates a new Share under the account. If a share with the - same name already exists, the operation fails. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the share as metadata. - :keyword int quota: - The quota to be allotted. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START create_share] - :end-before: [END create_share] - :language: python - :dedent: 8 - :caption: Creates a file share. - """ - metadata = kwargs.pop('metadata', None) - quota = kwargs.pop('quota', None) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - - try: - return self._client.share.create( # type: ignore - timeout=timeout, - metadata=metadata, - quota=quota, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def create_snapshot( # type: ignore - self, - **kwargs # type: Optional[Any] - ): - # type: (...) -> Dict[str, Any] - """Creates a snapshot of the share. - - A snapshot is a read-only version of a share that's taken at a point in time. - It can be read, copied, or deleted, but not modified. Snapshots provide a way - to back up a share as it appears at a moment in time. - - A snapshot of a share has the same name as the base share from which the snapshot - is taken, with a DateTime value appended to indicate the time at which the - snapshot was taken. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the share as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Snapshot ID, Etag, and last modified). - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START create_share_snapshot] - :end-before: [END create_share_snapshot] - :language: python - :dedent: 12 - :caption: Creates a snapshot of the file share. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return self._client.share.create_snapshot( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def delete_share( - self, delete_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> None - """Marks the specified share for deletion. The share is - later deleted during garbage collection. - - :param bool delete_snapshots: - Indicates if snapshots are to be deleted. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START delete_share] - :end-before: [END delete_share] - :language: python - :dedent: 12 - :caption: Deletes the share and any snapshots. - """ - timeout = kwargs.pop('timeout', None) - delete_include = None - if delete_snapshots: - delete_include = DeleteSnapshotsOptionType.include - try: - self._client.share.delete( - timeout=timeout, - sharesnapshot=self.snapshot, - delete_snapshots=delete_include, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_share_properties(self, **kwargs): - # type: (Any) -> ShareProperties - """Returns all user-defined metadata and system properties for the - specified share. The data returned does not include the shares's - list of files or directories. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: The share properties. - :rtype: ~azure.storage.fileshare.ShareProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_hello_world.py - :start-after: [START get_share_properties] - :end-before: [END get_share_properties] - :language: python - :dedent: 12 - :caption: Gets the share properties. - """ - timeout = kwargs.pop('timeout', None) - try: - props = self._client.share.get_properties( - timeout=timeout, - sharesnapshot=self.snapshot, - cls=deserialize_share_properties, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - props.name = self.share_name - props.snapshot = self.snapshot - return props # type: ignore - - @distributed_trace - def set_share_quota(self, quota, **kwargs): - # type: (int, Any) -> Dict[str, Any] - """Sets the quota for the share. - - :param int quota: - Specifies the maximum size of the share, in gigabytes. - Must be greater than 0, and less than or equal to 5TB. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START set_share_quota] - :end-before: [END set_share_quota] - :language: python - :dedent: 12 - :caption: Sets the share quota. - """ - timeout = kwargs.pop('timeout', None) - try: - return self._client.share.set_quota( # type: ignore - timeout=timeout, - quota=quota, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def set_share_metadata(self, metadata, **kwargs): - # type: (Dict[str, Any], Any) -> Dict[str, Any] - """Sets the metadata for the share. - - Each call to this operation replaces all existing metadata - attached to the share. To remove all metadata from the share, - call this operation with no metadata dict. - - :param metadata: - Name-value pairs associated with the share as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START set_share_metadata] - :end-before: [END set_share_metadata] - :language: python - :dedent: 12 - :caption: Sets the share metadata. - """ - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - try: - return self._client.share.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_share_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the share. The permissions - indicate whether files in a share may be accessed publicly. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - """ - timeout = kwargs.pop('timeout', None) - try: - response, identifiers = self._client.share.get_access_policy( - timeout=timeout, - cls=return_headers_and_deserialized, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return { - 'public_access': response.get('share_public_access'), - 'signed_identifiers': identifiers or [] - } - - @distributed_trace - def set_share_access_policy(self, signed_identifiers, **kwargs): - # type: (Dict[str, AccessPolicy], Any) -> Dict[str, str] - """Sets the permissions for the share, or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether files in a share may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the share. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict(str, :class:`~azure.storage.fileshare.AccessPolicy`) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - if len(signed_identifiers) > 5: - raise ValueError( - 'Too many access policies provided. The server does not support setting ' - 'more than 5 access policies on a single resource.') - identifiers = [] - for key, value in signed_identifiers.items(): - if value: - value.start = serialize_iso(value.start) - value.expiry = serialize_iso(value.expiry) - identifiers.append(SignedIdentifier(id=key, access_policy=value)) - signed_identifiers = identifiers # type: ignore - try: - return self._client.share.set_access_policy( # type: ignore - share_acl=signed_identifiers or None, - timeout=timeout, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_share_stats(self, **kwargs): - # type: (Any) -> int - """Gets the approximate size of the data stored on the share in bytes. - - Note that this value may not include all recently created - or recently re-sized files. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The approximate size of the data (in bytes) stored on the share. - :rtype: int - """ - timeout = kwargs.pop('timeout', None) - try: - stats = self._client.share.get_statistics( - timeout=timeout, - **kwargs) - return stats.share_usage_bytes # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_directories_and_files( - self, directory_name=None, # type: Optional[str] - name_starts_with=None, # type: Optional[str] - marker=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Iterable[Dict[str,str]] - """Lists the directories and files under the share. - - :param str directory_name: - Name of a directory. - :param str name_starts_with: - Filters the results to return only directories whose names - begin with the specified prefix. - :param str marker: - An opaque continuation token. This value can be retrieved from the - next_marker field of a previous generator object. If specified, - this generator will begin returning results from this point. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START share_list_files_in_dir] - :end-before: [END share_list_files_in_dir] - :language: python - :dedent: 12 - :caption: List directories and files in the share. - """ - timeout = kwargs.pop('timeout', None) - directory = self.get_directory_client(directory_name) - kwargs.setdefault('merge_span', True) - return directory.list_directories_and_files( - name_starts_with=name_starts_with, marker=marker, timeout=timeout, **kwargs) - - @staticmethod - def _create_permission_for_share_options(file_permission, # type: str - **kwargs): - options = { - 'share_permission': SharePermission(permission=file_permission), - 'cls': deserialize_permission_key, - 'timeout': kwargs.pop('timeout', None), - } - options.update(kwargs) - return options - - @distributed_trace - def create_permission_for_share(self, file_permission, # type: str - **kwargs # type: Any - ): - # type: (...) -> str - """Create a permission (a security descriptor) at the share level. - - This 'permission' can be used for the files/directories in the share. - If a 'permission' already exists, it shall return the key of it, else - creates a new permission at the share level and return its key. - - :param str file_permission: - File permission, a Portable SDDL - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A file permission key - :rtype: str - """ - timeout = kwargs.pop('timeout', None) - options = self._create_permission_for_share_options(file_permission, timeout=timeout, **kwargs) - try: - return self._client.share.create_permission(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_permission_for_share( # type: ignore - self, permission_key, # type: str - **kwargs # type: Any - ): - # type: (...) -> str - """Get a permission (a security descriptor) for a given key. - - This 'permission' can be used for the files/directories in the share. - - :param str permission_key: - Key of the file permission to retrieve - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A file permission (a portable SDDL) - :rtype: str - """ - timeout = kwargs.pop('timeout', None) - try: - return self._client.share.get_permission( # type: ignore - file_permission_key=permission_key, - cls=deserialize_permission, - timeout=timeout, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def create_directory(self, directory_name, **kwargs): - # type: (str, Any) -> ShareDirectoryClient - """Creates a directory in the share and returns a client to interact - with the directory. - - :param str directory_name: - The name of the directory. - :keyword metadata: - Name-value pairs associated with the directory as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: ShareDirectoryClient - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - """ - directory = self.get_directory_client(directory_name) - kwargs.setdefault('merge_span', True) - directory.create_directory(**kwargs) - return directory # type: ignore - - @distributed_trace - def delete_directory(self, directory_name, **kwargs): - # type: (str, Any) -> None - """Marks the directory for deletion. The directory is - later deleted during garbage collection. - - :param str directory_name: - The name of the directory. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - directory = self.get_directory_client(directory_name) - directory.delete_directory(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_share_service_client.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_share_service_client.py deleted file mode 100644 index 549e09f..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_share_service_client.py +++ /dev/null @@ -1,409 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Dict, List, - TYPE_CHECKING -) -try: - from urllib.parse import urlparse -except ImportError: - from urlparse import urlparse # type: ignore - -from azure.core.paging import ItemPaged -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import Pipeline -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.response_handlers import process_storage_error -from ._generated import AzureFileStorage -from ._generated.models import StorageErrorException, StorageServiceProperties -from ._generated.version import VERSION -from ._share_client import ShareClient -from ._serialize import get_api_version -from ._models import ( - SharePropertiesPaged, - service_properties_deserialize, -) - -if TYPE_CHECKING: - from datetime import datetime - from ._models import ( - ShareProperties, - Metrics, - CorsRule, - ) - - -class ShareServiceClient(StorageAccountHostsMixin): - """A client to interact with the File Share Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete shares within the account. - For operations relating to a specific share, a client for that entity - can also be retrieved using the :func:`get_share_client` function. - - :param str account_url: - The URL to the file share storage account. Any other entities included - in the URL path (e.g. share or file) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_authentication.py - :start-after: [START create_share_service_client] - :end-before: [END create_share_service_client] - :language: python - :dedent: 8 - :caption: Create the share service client with url and credential. - """ - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - if hasattr(credential, 'get_token'): - raise ValueError("Token credentials not supported by the File Share service.") - - _, sas_token = parse_query(parsed_url.query) - if not sas_token and not credential: - raise ValueError( - 'You need to provide either an account shared key or SAS token when creating a storage service.') - self._query_str, credential = self._format_query_string(sas_token, credential) - super(ShareServiceClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) - self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - return "{}://{}/{}".format(self.scheme, hostname, self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> ShareServiceClient - """Create ShareServiceClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :returns: A File Share service client. - :rtype: ~azure.storage.fileshare.ShareServiceClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_authentication.py - :start-after: [START create_share_service_client_from_conn_string] - :end-before: [END create_share_service_client_from_conn_string] - :language: python - :dedent: 8 - :caption: Create the share service client with connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls(account_url, credential=credential, **kwargs) - - @distributed_trace - def get_service_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the properties of a storage account's File Share service, including - Azure Storage Analytics. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A dictionary containing file service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START get_service_properties] - :end-before: [END get_service_properties] - :language: python - :dedent: 8 - :caption: Get file share service properties. - """ - timeout = kwargs.pop('timeout', None) - try: - service_props = self._client.service.get_properties(timeout=timeout, **kwargs) - return service_properties_deserialize(service_props) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def set_service_properties( - self, hour_metrics=None, # type: Optional[Metrics] - minute_metrics=None, # type: Optional[Metrics] - cors=None, # type: Optional[List[CorsRule]] - **kwargs - ): - # type: (...) -> None - """Sets the properties of a storage account's File Share service, including - Azure Storage Analytics. If an element (e.g. hour_metrics) is left as None, the - existing settings on the service for that functionality are preserved. - - :param hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for files. - :type hour_metrics: ~azure.storage.fileshare.Metrics - :param minute_metrics: - The minute metrics settings provide request statistics - for each minute for files. - :type minute_metrics: ~azure.storage.fileshare.Metrics - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list(:class:`~azure.storage.fileshare.CorsRule`) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START set_service_properties] - :end-before: [END set_service_properties] - :language: python - :dedent: 8 - :caption: Sets file share service properties. - """ - timeout = kwargs.pop('timeout', None) - props = StorageServiceProperties( - hour_metrics=hour_metrics, - minute_metrics=minute_metrics, - cors=cors - ) - try: - self._client.service.set_properties(props, timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_shares( - self, name_starts_with=None, # type: Optional[str] - include_metadata=False, # type: Optional[bool] - include_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> ItemPaged[ShareProperties] - """Returns auto-paging iterable of dict-like ShareProperties under the specified account. - The generator will lazily follow the continuation tokens returned by - the service and stop when all shares have been returned. - - :param str name_starts_with: - Filters the results to return only shares whose names - begin with the specified name_starts_with. - :param bool include_metadata: - Specifies that share metadata be returned in the response. - :param bool include_snapshots: - Specifies that share snapshot be returned in the response. - :keyword bool include_deleted: - Specifies that deleted shares be returned in the response. - This is only for share soft delete enabled account. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) of ShareProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.ShareProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START fsc_list_shares] - :end-before: [END fsc_list_shares] - :language: python - :dedent: 12 - :caption: List shares in the file share service. - """ - timeout = kwargs.pop('timeout', None) - include = [] - include_deleted = kwargs.pop('include_deleted', None) - if include_deleted: - include.append("deleted") - if include_metadata: - include.append('metadata') - if include_snapshots: - include.append('snapshots') - - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.service.list_shares_segment, - include=include, - timeout=timeout, - **kwargs) - return ItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=SharePropertiesPaged) - - @distributed_trace - def create_share( - self, share_name, # type: str - **kwargs - ): - # type: (...) -> ShareClient - """Creates a new share under the specified account. If the share - with the same name already exists, the operation fails. Returns a client with - which to interact with the newly created share. - - :param str share_name: The name of the share to create. - :keyword dict(str,str) metadata: - A dict with name_value pairs to associate with the - share as metadata. Example:{'Category':'test'} - :keyword int quota: - Quota in bytes. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.fileshare.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START fsc_create_shares] - :end-before: [END fsc_create_shares] - :language: python - :dedent: 8 - :caption: Create a share in the file share service. - """ - metadata = kwargs.pop('metadata', None) - quota = kwargs.pop('quota', None) - timeout = kwargs.pop('timeout', None) - share = self.get_share_client(share_name) - kwargs.setdefault('merge_span', True) - share.create_share(metadata=metadata, quota=quota, timeout=timeout, **kwargs) - return share - - @distributed_trace - def delete_share( - self, share_name, # type: Union[ShareProperties, str] - delete_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> None - """Marks the specified share for deletion. The share is - later deleted during garbage collection. - - :param share_name: - The share to delete. This can either be the name of the share, - or an instance of ShareProperties. - :type share_name: str or ~azure.storage.fileshare.ShareProperties - :param bool delete_snapshots: - Indicates if snapshots are to be deleted. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START fsc_delete_shares] - :end-before: [END fsc_delete_shares] - :language: python - :dedent: 12 - :caption: Delete a share in the file share service. - """ - timeout = kwargs.pop('timeout', None) - share = self.get_share_client(share_name) - kwargs.setdefault('merge_span', True) - share.delete_share( - delete_snapshots=delete_snapshots, timeout=timeout, **kwargs) - - @distributed_trace - def undelete_share(self, deleted_share_name, deleted_share_version, **kwargs): - # type: (str, str, **Any) -> ShareClient - """Restores soft-deleted share. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.2.0 - This operation was introduced in API version '2019-12-12'. - - :param str deleted_share_name: - Specifies the name of the deleted share to restore. - :param str deleted_share_version: - Specifies the version of the deleted share to restore. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.fileshare.ShareClient - """ - share = self.get_share_client(deleted_share_name) - - try: - share._client.share.restore(deleted_share_name=deleted_share_name, # pylint: disable = protected-access - deleted_share_version=deleted_share_version, - timeout=kwargs.pop('timeout', None), **kwargs) - return share - except StorageErrorException as error: - process_storage_error(error) - - def get_share_client(self, share, snapshot=None): - # type: (Union[ShareProperties, str],Optional[Union[Dict[str, Any], str]]) -> ShareClient - """Get a client to interact with the specified share. - The share need not already exist. - - :param share: - The share. This can either be the name of the share, - or an instance of ShareProperties. - :type share: str or ~azure.storage.fileshare.ShareProperties - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :returns: A ShareClient. - :rtype: ~azure.storage.fileshare.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START get_share_client] - :end-before: [END get_share_client] - :language: python - :dedent: 8 - :caption: Gets the share client. - """ - try: - share_name = share.name - except AttributeError: - share_name = share - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareClient( - self.url, share_name=share_name, snapshot=snapshot, credential=self.credential, - api_version=self.api_version, _hosts=self._hosts, - _configuration=self._config, _pipeline=_pipeline, _location_mode=self._location_mode) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/__init__.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/__init__.py deleted file mode 100644 index 160f882..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import hmac - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore - -import six - - -def url_quote(url): - return quote(url) - - -def url_unquote(url): - return unquote(url) - - -def encode_base64(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def decode_base64_to_bytes(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - return base64.b64decode(data) - - -def decode_base64_to_text(data): - decoded_bytes = decode_base64_to_bytes(data) - return decoded_bytes.decode('utf-8') - - -def sign_string(key, string_to_sign, key_is_base64=True): - if key_is_base64: - key = decode_base64_to_bytes(key) - else: - if isinstance(key, six.text_type): - key = key.encode('utf-8') - if isinstance(string_to_sign, six.text_type): - string_to_sign = string_to_sign.encode('utf-8') - signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) - digest = signed_hmac_sha256.digest() - encoded_digest = encode_base64(digest) - return encoded_digest diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/authentication.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/authentication.py deleted file mode 100644 index b11dc57..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/authentication.py +++ /dev/null @@ -1,140 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import logging -import sys - -try: - from urllib.parse import urlparse, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import unquote # type: ignore - -try: - from yarl import URL -except ImportError: - pass - -try: - from azure.core.pipeline.transport import AioHttpTransport -except ImportError: - AioHttpTransport = None - -from azure.core.exceptions import ClientAuthenticationError -from azure.core.pipeline.policies import SansIOHTTPPolicy - -from . import sign_string - - -logger = logging.getLogger(__name__) - - - -# wraps a given exception with the desired exception type -def _wrap_exception(ex, desired_type): - msg = "" - if ex.args: - msg = ex.args[0] - if sys.version_info >= (3,): - # Automatic chaining in Python 3 means we keep the trace - return desired_type(msg) - # There isn't a good solution in 2 for keeping the stack trace - # in general, or that will not result in an error in 3 - # However, we can keep the previous error type and message - # TODO: In the future we will log the trace - return desired_type('{}: {}'.format(ex.__class__.__name__, msg)) - - -class AzureSigningError(ClientAuthenticationError): - """ - Represents a fatal error when attempting to sign a request. - In general, the cause of this exception is user error. For example, the given account key is not valid. - Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. - """ - - -# pylint: disable=no-self-use -class SharedKeyCredentialPolicy(SansIOHTTPPolicy): - - def __init__(self, account_name, account_key): - self.account_name = account_name - self.account_key = account_key - super(SharedKeyCredentialPolicy, self).__init__() - - @staticmethod - def _get_headers(request, headers_to_sign): - headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) - if 'content-length' in headers and headers['content-length'] == '0': - del headers['content-length'] - return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' - - @staticmethod - def _get_verb(request): - return request.http_request.method + '\n' - - def _get_canonicalized_resource(self, request): - uri_path = urlparse(request.http_request.url).path - try: - if isinstance(request.context.transport, AioHttpTransport) or \ - isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport): - uri_path = URL(uri_path) - return '/' + self.account_name + str(uri_path) - except TypeError: - pass - return '/' + self.account_name + uri_path - - @staticmethod - def _get_canonicalized_headers(request): - string_to_sign = '' - x_ms_headers = [] - for name, value in request.http_request.headers.items(): - if name.startswith('x-ms-'): - x_ms_headers.append((name.lower(), value)) - x_ms_headers.sort() - for name, value in x_ms_headers: - if value is not None: - string_to_sign += ''.join([name, ':', value, '\n']) - return string_to_sign - - @staticmethod - def _get_canonicalized_resource_query(request): - sorted_queries = list(request.http_request.query.items()) - sorted_queries.sort() - - string_to_sign = '' - for name, value in sorted_queries: - if value is not None: - string_to_sign += '\n' + name.lower() + ':' + unquote(value) - - return string_to_sign - - def _add_authorization_header(self, request, string_to_sign): - try: - signature = sign_string(self.account_key, string_to_sign) - auth_string = 'SharedKey ' + self.account_name + ':' + signature - request.http_request.headers['Authorization'] = auth_string - except Exception as ex: - # Wrap any error that occurred as signing error - # Doing so will clarify/locate the source of problem - raise _wrap_exception(ex, AzureSigningError) - - def on_request(self, request): - string_to_sign = \ - self._get_verb(request) + \ - self._get_headers( - request, - [ - 'content-encoding', 'content-language', 'content-length', - 'content-md5', 'content-type', 'date', 'if-modified-since', - 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' - ] - ) + \ - self._get_canonicalized_headers(request) + \ - self._get_canonicalized_resource(request) + \ - self._get_canonicalized_resource_query(request) - - self._add_authorization_header(request, string_to_sign) - #logger.debug("String_to_sign=%s", string_to_sign) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/base_client.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/base_client.py deleted file mode 100644 index 14deea6..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/base_client.py +++ /dev/null @@ -1,437 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, - Optional, - Any, - Iterable, - Dict, - List, - Type, - Tuple, - TYPE_CHECKING, -) -import logging - -try: - from urllib.parse import parse_qs, quote -except ImportError: - from urlparse import parse_qs # type: ignore - from urllib2 import quote # type: ignore - -import six - -from azure.core.configuration import Configuration -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline import Pipeline -from azure.core.pipeline.transport import RequestsTransport, HttpTransport -from azure.core.pipeline.policies import ( - RedirectPolicy, - ContentDecodePolicy, - BearerTokenCredentialPolicy, - ProxyPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, - UserAgentPolicy -) - -from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .models import LocationMode -from .authentication import SharedKeyCredentialPolicy -from .shared_access_signature import QueryStringConstants -from .policies import ( - StorageHeadersPolicy, - StorageContentValidation, - StorageRequestHook, - StorageResponseHook, - StorageLoggingPolicy, - StorageHosts, - QueueMessagePolicy, - ExponentialRetry, -) -from .._version import VERSION -from .._generated.models import StorageErrorException -from .response_handlers import process_storage_error, PartialBatchErrorException - - -_LOGGER = logging.getLogger(__name__) -_SERVICE_PARAMS = { - "blob": {"primary": "BlobEndpoint", "secondary": "BlobSecondaryEndpoint"}, - "queue": {"primary": "QueueEndpoint", "secondary": "QueueSecondaryEndpoint"}, - "file": {"primary": "FileEndpoint", "secondary": "FileSecondaryEndpoint"}, - "dfs": {"primary": "BlobEndpoint", "secondary": "BlobEndpoint"}, -} - - -class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - parsed_url, # type: Any - service, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) - self._hosts = kwargs.get("_hosts") - self.scheme = parsed_url.scheme - - if service not in ["blob", "queue", "file-share", "dfs"]: - raise ValueError("Invalid service: {}".format(service)) - service_name = service.split('-')[0] - account = parsed_url.netloc.split(".{}.core.".format(service_name)) - - self.account_name = account[0] if len(account) > 1 else None - if not self.account_name and parsed_url.netloc.startswith("localhost") \ - or parsed_url.netloc.startswith("127.0.0.1"): - self.account_name = parsed_url.path.strip("/") - - self.credential = _format_shared_key_credential(self.account_name, credential) - if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): - raise ValueError("Token credential is only supported with HTTPS.") - - secondary_hostname = None - if hasattr(self.credential, "account_name"): - self.account_name = self.credential.account_name - secondary_hostname = "{}-secondary.{}.{}".format( - self.credential.account_name, service_name, SERVICE_HOST_BASE) - - if not self._hosts: - if len(account) > 1: - secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") - if kwargs.get("secondary_hostname"): - secondary_hostname = kwargs["secondary_hostname"] - primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') - self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} - - self.require_encryption = kwargs.get("require_encryption", False) - self.key_encryption_key = kwargs.get("key_encryption_key") - self.key_resolver_function = kwargs.get("key_resolver_function") - self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) - - def __enter__(self): - self._client.__enter__() - return self - - def __exit__(self, *args): - self._client.__exit__(*args) - - def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._client.close() - - @property - def url(self): - """The full endpoint URL to this entity, including SAS token if used. - - This could be either the primary endpoint, - or the secondary endpoint depending on the current :func:`location_mode`. - """ - return self._format_url(self._hosts[self._location_mode]) - - @property - def primary_endpoint(self): - """The full primary endpoint URL. - - :type: str - """ - return self._format_url(self._hosts[LocationMode.PRIMARY]) - - @property - def primary_hostname(self): - """The hostname of the primary endpoint. - - :type: str - """ - return self._hosts[LocationMode.PRIMARY] - - @property - def secondary_endpoint(self): - """The full secondary endpoint URL if configured. - - If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str - :raise ValueError: - """ - if not self._hosts[LocationMode.SECONDARY]: - raise ValueError("No secondary host configured.") - return self._format_url(self._hosts[LocationMode.SECONDARY]) - - @property - def secondary_hostname(self): - """The hostname of the secondary endpoint. - - If not available this will be None. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str or None - """ - return self._hosts[LocationMode.SECONDARY] - - @property - def location_mode(self): - """The location mode that the client is currently using. - - By default this will be "primary". Options include "primary" and "secondary". - - :type: str - """ - - return self._location_mode - - @location_mode.setter - def location_mode(self, value): - if self._hosts.get(value): - self._location_mode = value - self._client._config.url = self.url # pylint: disable=protected-access - else: - raise ValueError("No host URL for location mode: {}".format(value)) - - @property - def api_version(self): - """The version of the Storage API used for requests. - - :type: str - """ - return self._client._config.version # pylint: disable=protected-access - - def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): - query_str = "?" - if snapshot: - query_str += "snapshot={}&".format(self.snapshot) - if share_snapshot: - query_str += "sharesnapshot={}&".format(self.snapshot) - if sas_token and not credential: - query_str += sas_token - elif is_credential_sastoken(credential): - query_str += credential.lstrip("?") - credential = None - return query_str.rstrip("?&"), credential - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, "get_token"): - self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - - config = kwargs.get("_configuration") or create_configuration(**kwargs) - if kwargs.get("_pipeline"): - return config, kwargs["_pipeline"] - config.transport = kwargs.get("transport") # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - config.transport = RequestsTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.headers_policy, - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - StorageRequestHook(**kwargs), - self._credential_policy, - ContentDecodePolicy(response_encoding="utf-8"), - RedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), - config.retry_policy, - config.logging_policy, - StorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs) - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, Pipeline(config.transport, policies=policies) - - def _batch_send( - self, *reqs, # type: HttpRequest - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - request = self._client._client.post( # pylint: disable=protected-access - url='https://{}/?comp=batch'.format(self.primary_hostname), - headers={ - 'x-ms-version': self.api_version - } - ) - - request.set_multipart_mixed( - *reqs, - policies=[ - StorageHeadersPolicy(), - self._credential_policy - ], - enforce_https=False - ) - - pipeline_response = self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() - if raise_on_any_failure: - parts = list(response.parts()) - if any(p for p in parts if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts - ) - raise error - return iter(parts) - return parts - except StorageErrorException as error: - process_storage_error(error) - -class TransportWrapper(HttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, transport): - self._transport = transport - - def send(self, request, **kwargs): - return self._transport.send(request, **kwargs) - - def open(self): - pass - - def close(self): - pass - - def __enter__(self): - pass - - def __exit__(self, *args): # pylint: disable=arguments-differ - pass - - -def _format_shared_key_credential(account_name, credential): - if isinstance(credential, six.string_types): - if not account_name: - raise ValueError("Unable to determine account name for shared key credential.") - credential = {"account_name": account_name, "account_key": credential} - if isinstance(credential, dict): - if "account_name" not in credential: - raise ValueError("Shared key credential missing 'account_name") - if "account_key" not in credential: - raise ValueError("Shared key credential missing 'account_key") - return SharedKeyCredentialPolicy(**credential) - return credential - - -def parse_connection_str(conn_str, credential, service): - conn_str = conn_str.rstrip(";") - conn_settings = [s.split("=", 1) for s in conn_str.split(";")] - if any(len(tup) != 2 for tup in conn_settings): - raise ValueError("Connection string is either blank or malformed.") - conn_settings = dict(conn_settings) - endpoints = _SERVICE_PARAMS[service] - primary = None - secondary = None - if not credential: - try: - credential = {"account_name": conn_settings["AccountName"], "account_key": conn_settings["AccountKey"]} - except KeyError: - credential = conn_settings.get("SharedAccessSignature") - if endpoints["primary"] in conn_settings: - primary = conn_settings[endpoints["primary"]] - if endpoints["secondary"] in conn_settings: - secondary = conn_settings[endpoints["secondary"]] - else: - if endpoints["secondary"] in conn_settings: - raise ValueError("Connection string specifies only secondary endpoint.") - try: - primary = "{}://{}.{}.{}".format( - conn_settings["DefaultEndpointsProtocol"], - conn_settings["AccountName"], - service, - conn_settings["EndpointSuffix"], - ) - secondary = "{}-secondary.{}.{}".format( - conn_settings["AccountName"], service, conn_settings["EndpointSuffix"] - ) - except KeyError: - pass - - if not primary: - try: - primary = "https://{}.{}.{}".format( - conn_settings["AccountName"], service, conn_settings.get("EndpointSuffix", SERVICE_HOST_BASE) - ) - except KeyError: - raise ValueError("Connection string missing required connection details.") - return primary, secondary, credential - - -def create_configuration(**kwargs): - # type: (**Any) -> Configuration - config = Configuration(**kwargs) - config.headers_policy = StorageHeadersPolicy(**kwargs) - config.user_agent_policy = UserAgentPolicy( - sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) - config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) - config.logging_policy = StorageLoggingPolicy(**kwargs) - config.proxy_policy = ProxyPolicy(**kwargs) - - # Storage settings - config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) - config.copy_polling_interval = 15 - - # Block blob uploads - config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) - config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) - config.use_byte_buffer = kwargs.get("use_byte_buffer", False) - - # Page blob uploads - config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) - - # Blob downloads - config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) - config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) - - # File uploads - config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) - return config - - -def parse_query(query_str): - sas_values = QueryStringConstants.to_list() - parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} - sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values] - sas_token = None - if sas_params: - sas_token = "&".join(sas_params) - - snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") - return snapshot, sas_token - - -def is_credential_sastoken(credential): - if not credential or not isinstance(credential, six.string_types): - return False - - sas_values = QueryStringConstants.to_list() - parsed_query = parse_qs(credential.lstrip("?")) - if parsed_query and all([k in sas_values for k in parsed_query.keys()]): - return True - return False diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/base_client_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/base_client_async.py deleted file mode 100644 index d252ad0..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/base_client_async.py +++ /dev/null @@ -1,179 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging -from azure.core.pipeline import AsyncPipeline -from azure.core.async_paging import AsyncList -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline.policies import ( - ContentDecodePolicy, - AsyncBearerTokenCredentialPolicy, - AsyncRedirectPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, -) -from azure.core.pipeline.transport import AsyncHttpTransport - -from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .authentication import SharedKeyCredentialPolicy -from .base_client import create_configuration -from .policies import ( - StorageContentValidation, - StorageRequestHook, - StorageHosts, - StorageHeadersPolicy, - QueueMessagePolicy -) -from .policies_async import AsyncStorageResponseHook - -from .._generated.models import StorageErrorException -from .response_handlers import process_storage_error, PartialBatchErrorException - -if TYPE_CHECKING: - from azure.core.pipeline import Pipeline - from azure.core.pipeline.transport import HttpRequest - from azure.core.configuration import Configuration -_LOGGER = logging.getLogger(__name__) - - -class AsyncStorageAccountHostsMixin(object): - - def __enter__(self): - raise TypeError("Async client only supports 'async with'.") - - def __exit__(self, *args): - pass - - async def __aenter__(self): - await self._client.__aenter__() - return self - - async def __aexit__(self, *args): - await self._client.__aexit__(*args) - - async def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._client.close() - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, 'get_token'): - self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - config = kwargs.get('_configuration') or create_configuration(**kwargs) - if kwargs.get('_pipeline'): - return config, kwargs['_pipeline'] - config.transport = kwargs.get('transport') # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - try: - from azure.core.pipeline.transport import AioHttpTransport - except ImportError: - raise ImportError("Unable to create async transport. Please check aiohttp is installed.") - config.transport = AioHttpTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.headers_policy, - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - StorageRequestHook(**kwargs), - self._credential_policy, - ContentDecodePolicy(response_encoding="utf-8"), - AsyncRedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), # type: ignore - config.retry_policy, - config.logging_policy, - AsyncStorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs), - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, AsyncPipeline(config.transport, policies=policies) - - async def _batch_send( - self, *reqs: 'HttpRequest', - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - request = self._client._client.post( # pylint: disable=protected-access - url='https://{}/?comp=batch'.format(self.primary_hostname), - headers={ - 'x-ms-version': self.api_version - } - ) - - request.set_multipart_mixed( - *reqs, - policies=[ - StorageHeadersPolicy(), - self._credential_policy - ], - enforce_https=False - ) - - pipeline_response = await self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() # Return an AsyncIterator - if raise_on_any_failure: - parts_list = [] - async for part in parts: - parts_list.append(part) - if any(p for p in parts_list if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts_list - ) - raise error - return AsyncList(parts_list) - return parts - except StorageErrorException as error: - process_storage_error(error) - - -class AsyncTransportWrapper(AsyncHttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, async_transport): - self._transport = async_transport - - async def send(self, request, **kwargs): - return await self._transport.send(request, **kwargs) - - async def open(self): - pass - - async def close(self): - pass - - async def __aenter__(self): - pass - - async def __aexit__(self, *args): # pylint: disable=arguments-differ - pass diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/constants.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/constants.py deleted file mode 100644 index 7fb05b5..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/constants.py +++ /dev/null @@ -1,26 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -from .._generated.version import VERSION - - -X_MS_VERSION = VERSION - -# Socket timeout in seconds -CONNECTION_TIMEOUT = 20 -READ_TIMEOUT = 20 - -# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned) -# The socket timeout is now the maximum total duration to send all data. -if sys.version_info >= (3, 5): - # the timeout to connect is 20 seconds, and the read timeout is 2000 seconds - # the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) - READ_TIMEOUT = 2000 - -STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" - -SERVICE_HOST_BASE = 'core.windows.net' diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/encryption.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/encryption.py deleted file mode 100644 index 62607cc..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/encryption.py +++ /dev/null @@ -1,542 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os -from os import urandom -from json import ( - dumps, - loads, -) -from collections import OrderedDict - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.ciphers import Cipher -from cryptography.hazmat.primitives.ciphers.algorithms import AES -from cryptography.hazmat.primitives.ciphers.modes import CBC -from cryptography.hazmat.primitives.padding import PKCS7 - -from azure.core.exceptions import HttpResponseError - -from .._version import VERSION -from . import encode_base64, decode_base64_to_bytes - - -_ENCRYPTION_PROTOCOL_V1 = '1.0' -_ERROR_OBJECT_INVALID = \ - '{0} does not define a complete interface. Value of {1} is either missing or invalid.' - - -def _validate_not_none(param_name, param): - if param is None: - raise ValueError('{0} should not be None.'.format(param_name)) - - -def _validate_key_encryption_key_wrap(kek): - # Note that None is not callable and so will fail the second clause of each check. - if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) - if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) - - -class _EncryptionAlgorithm(object): - ''' - Specifies which client encryption algorithm is used. - ''' - AES_CBC_256 = 'AES_CBC_256' - - -class _WrappedContentKey: - ''' - Represents the envelope key details stored on the service. - ''' - - def __init__(self, algorithm, encrypted_key, key_id): - ''' - :param str algorithm: - The algorithm used for wrapping. - :param bytes encrypted_key: - The encrypted content-encryption-key. - :param str key_id: - The key-encryption-key identifier string. - ''' - - _validate_not_none('algorithm', algorithm) - _validate_not_none('encrypted_key', encrypted_key) - _validate_not_none('key_id', key_id) - - self.algorithm = algorithm - self.encrypted_key = encrypted_key - self.key_id = key_id - - -class _EncryptionAgent: - ''' - Represents the encryption agent stored on the service. - It consists of the encryption protocol version and encryption algorithm used. - ''' - - def __init__(self, encryption_algorithm, protocol): - ''' - :param _EncryptionAlgorithm encryption_algorithm: - The algorithm used for encrypting the message contents. - :param str protocol: - The protocol version used for encryption. - ''' - - _validate_not_none('encryption_algorithm', encryption_algorithm) - _validate_not_none('protocol', protocol) - - self.encryption_algorithm = str(encryption_algorithm) - self.protocol = protocol - - -class _EncryptionData: - ''' - Represents the encryption data that is stored on the service. - ''' - - def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, - key_wrapping_metadata): - ''' - :param bytes content_encryption_IV: - The content encryption initialization vector. - :param _EncryptionAgent encryption_agent: - The encryption agent. - :param _WrappedContentKey wrapped_content_key: - An object that stores the wrapping algorithm, the key identifier, - and the encrypted key bytes. - :param dict key_wrapping_metadata: - A dict containing metadata related to the key wrapping. - ''' - - _validate_not_none('content_encryption_IV', content_encryption_IV) - _validate_not_none('encryption_agent', encryption_agent) - _validate_not_none('wrapped_content_key', wrapped_content_key) - - self.content_encryption_IV = content_encryption_IV - self.encryption_agent = encryption_agent - self.wrapped_content_key = wrapped_content_key - self.key_wrapping_metadata = key_wrapping_metadata - - -def _generate_encryption_data_dict(kek, cek, iv): - ''' - Generates and returns the encryption metadata as a dict. - - :param object kek: The key encryption key. See calling functions for more information. - :param bytes cek: The content encryption key. - :param bytes iv: The initialization vector. - :return: A dict containing all the encryption metadata. - :rtype: dict - ''' - # Encrypt the cek. - wrapped_cek = kek.wrap_key(cek) - - # Build the encryption_data dict. - # Use OrderedDict to comply with Java's ordering requirement. - wrapped_content_key = OrderedDict() - wrapped_content_key['KeyId'] = kek.get_kid() - wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek) - wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() - - encryption_agent = OrderedDict() - encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 - encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 - - encryption_data_dict = OrderedDict() - encryption_data_dict['WrappedContentKey'] = wrapped_content_key - encryption_data_dict['EncryptionAgent'] = encryption_agent - encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv) - encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION} - - return encryption_data_dict - - -def _dict_to_encryption_data(encryption_data_dict): - ''' - Converts the specified dictionary to an EncryptionData object for - eventual use in decryption. - - :param dict encryption_data_dict: - The dictionary containing the encryption data. - :return: an _EncryptionData object built from the dictionary. - :rtype: _EncryptionData - ''' - try: - if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: - raise ValueError("Unsupported encryption version.") - except KeyError: - raise ValueError("Unsupported encryption version.") - wrapped_content_key = encryption_data_dict['WrappedContentKey'] - wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], - decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), - wrapped_content_key['KeyId']) - - encryption_agent = encryption_data_dict['EncryptionAgent'] - encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], - encryption_agent['Protocol']) - - if 'KeyWrappingMetadata' in encryption_data_dict: - key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] - else: - key_wrapping_metadata = None - - encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), - encryption_agent, - wrapped_content_key, - key_wrapping_metadata) - - return encryption_data - - -def _generate_AES_CBC_cipher(cek, iv): - ''' - Generates and returns an encryption cipher for AES CBC using the given cek and iv. - - :param bytes[] cek: The content encryption key for the cipher. - :param bytes[] iv: The initialization vector for the cipher. - :return: A cipher for encrypting in AES256 CBC. - :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher - ''' - - backend = default_backend() - algorithm = AES(cek) - mode = CBC(iv) - return Cipher(algorithm, mode, backend) - - -def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): - ''' - Extracts and returns the content_encryption_key stored in the encryption_data object - and performs necessary validation on all parameters. - :param _EncryptionData encryption_data: - The encryption metadata of the retrieved value. - :param obj key_encryption_key: - The key_encryption_key used to unwrap the cek. Please refer to high-level service object - instance variables for more details. - :param func key_resolver: - A function used that, given a key_id, will return a key_encryption_key. Please refer - to high-level service object instance variables for more details. - :return: the content_encryption_key stored in the encryption_data object. - :rtype: bytes[] - ''' - - _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) - _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) - - if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol: - raise ValueError('Encryption version is not supported.') - - content_encryption_key = None - - # If the resolver exists, give priority to the key it finds. - if key_resolver is not None: - key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) - - _validate_not_none('key_encryption_key', key_encryption_key) - if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) - if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid(): - raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.') - # Will throw an exception if the specified algorithm is not supported. - content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, - encryption_data.wrapped_content_key.algorithm) - _validate_not_none('content_encryption_key', content_encryption_key) - - return content_encryption_key - - -def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None): - ''' - Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. - Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). - Returns the original plaintex. - - :param str message: - The ciphertext to be decrypted. - :param _EncryptionData encryption_data: - The metadata associated with this ciphertext. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted plaintext. - :rtype: str - ''' - _validate_not_none('message', message) - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) - - if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm: - raise ValueError('Specified encryption algorithm is not supported.') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) - - # decrypt data - decrypted_data = message - decryptor = cipher.decryptor() - decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) - - # unpad data - unpadder = PKCS7(128).unpadder() - decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) - - return decrypted_data - - -def encrypt_blob(blob, key_encryption_key): - ''' - Encrypts the given blob using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encryption metadata. This method should - only be used when a blob is small enough for single shot upload. Encrypting larger blobs - is done as a part of the upload_data_chunks method. - - :param bytes blob: - The blob to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. - :rtype: (str, bytes) - ''' - - _validate_not_none('blob', blob) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(blob) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - - return dumps(encryption_data), encrypted_data - - -def generate_blob_encryption_data(key_encryption_key): - ''' - Generates the encryption_metadata for the blob. - - :param bytes key_encryption_key: - The key-encryption-key used to wrap the cek associate with this blob. - :return: A tuple containing the cek and iv for this blob as well as the - serialized encryption metadata for the blob. - :rtype: (bytes, bytes, str) - ''' - encryption_data = None - content_encryption_key = None - initialization_vector = None - if key_encryption_key: - _validate_key_encryption_key_wrap(key_encryption_key) - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - encryption_data = _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - encryption_data = dumps(encryption_data) - - return content_encryption_key, initialization_vector, encryption_data - - -def decrypt_blob(require_encryption, key_encryption_key, key_resolver, - content, start_offset, end_offset, response_headers): - ''' - Decrypts the given blob contents and returns only the requested range. - - :param bool require_encryption: - Whether or not the calling blob service requires objects to be decrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :param key_resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted blob content. - :rtype: bytes - ''' - try: - encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata'])) - except: # pylint: disable=bare-except - if require_encryption: - raise ValueError( - 'Encryption required, but received data does not contain appropriate metatadata.' + \ - 'Data was either not encrypted or metadata has been lost.') - - return content - - if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256: - raise ValueError('Specified encryption algorithm is not supported.') - - blob_type = response_headers['x-ms-blob-type'] - - iv = None - unpad = False - if 'content-range' in response_headers: - content_range = response_headers['content-range'] - # Format: 'bytes x-y/size' - - # Ignore the word 'bytes' - content_range = content_range.split(' ') - - content_range = content_range[1].split('-') - content_range = content_range[1].split('/') - end_range = int(content_range[0]) - blob_size = int(content_range[1]) - - if start_offset >= 16: - iv = content[:16] - content = content[16:] - start_offset -= 16 - else: - iv = encryption_data.content_encryption_IV - - if end_range == blob_size - 1: - unpad = True - else: - unpad = True - iv = encryption_data.content_encryption_IV - - if blob_type == 'PageBlob': - unpad = False - - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) - cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) - decryptor = cipher.decryptor() - - content = decryptor.update(content) + decryptor.finalize() - if unpad: - unpadder = PKCS7(128).unpadder() - content = unpadder.update(content) + unpadder.finalize() - - return content[start_offset: len(content) - end_offset] - - -def get_blob_encryptor_and_padder(cek, iv, should_pad): - encryptor = None - padder = None - - if cek is not None and iv is not None: - cipher = _generate_AES_CBC_cipher(cek, iv) - encryptor = cipher.encryptor() - padder = PKCS7(128).padder() if should_pad else None - - return encryptor, padder - - -def encrypt_queue_message(message, key_encryption_key): - ''' - Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encrypted message and the encryption metadata. - - :param object message: - The plain text messge to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A json-formatted string containing the encrypted message and the encryption metadata. - :rtype: str - ''' - - _validate_not_none('message', message) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = os.urandom(32) - initialization_vector = os.urandom(16) - - # Queue encoding functions all return unicode strings, and encryption should - # operate on binary strings. - message = message.encode('utf-8') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(message) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - - # Build the dictionary structure. - queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data), - 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector)} - - return dumps(queue_message) - - -def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver): - ''' - Returns the decrypted message contents from an EncryptedQueueMessage. - If no encryption metadata is present, will return the unaltered message. - :param str message: - The JSON formatted QueueEncryptedMessage contents with all associated metadata. - :param bool require_encryption: - If set, will enforce that the retrieved messages are encrypted and decrypt them. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The plain text message from the queue message. - :rtype: str - ''' - - try: - message = loads(message) - - encryption_data = _dict_to_encryption_data(message['EncryptionData']) - decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents']) - except (KeyError, ValueError): - # Message was not json formatted and so was not encrypted - # or the user provided a json formatted message. - if require_encryption: - raise ValueError('Message was not encrypted.') - - return message - try: - return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=response, - error=error) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/models.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/models.py deleted file mode 100644 index aa31bfb..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/models.py +++ /dev/null @@ -1,468 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-instance-attributes - -from enum import Enum - - -def get_enum_value(value): - if value is None or value in ["None", ""]: - return None - try: - return value.value - except AttributeError: - return value - - -class StorageErrorCode(str, Enum): - - # Generic storage values - account_already_exists = "AccountAlreadyExists" - account_being_created = "AccountBeingCreated" - account_is_disabled = "AccountIsDisabled" - authentication_failed = "AuthenticationFailed" - authorization_failure = "AuthorizationFailure" - no_authentication_information = "NoAuthenticationInformation" - condition_headers_not_supported = "ConditionHeadersNotSupported" - condition_not_met = "ConditionNotMet" - empty_metadata_key = "EmptyMetadataKey" - insufficient_account_permissions = "InsufficientAccountPermissions" - internal_error = "InternalError" - invalid_authentication_info = "InvalidAuthenticationInfo" - invalid_header_value = "InvalidHeaderValue" - invalid_http_verb = "InvalidHttpVerb" - invalid_input = "InvalidInput" - invalid_md5 = "InvalidMd5" - invalid_metadata = "InvalidMetadata" - invalid_query_parameter_value = "InvalidQueryParameterValue" - invalid_range = "InvalidRange" - invalid_resource_name = "InvalidResourceName" - invalid_uri = "InvalidUri" - invalid_xml_document = "InvalidXmlDocument" - invalid_xml_node_value = "InvalidXmlNodeValue" - md5_mismatch = "Md5Mismatch" - metadata_too_large = "MetadataTooLarge" - missing_content_length_header = "MissingContentLengthHeader" - missing_required_query_parameter = "MissingRequiredQueryParameter" - missing_required_header = "MissingRequiredHeader" - missing_required_xml_node = "MissingRequiredXmlNode" - multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" - operation_timed_out = "OperationTimedOut" - out_of_range_input = "OutOfRangeInput" - out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" - request_body_too_large = "RequestBodyTooLarge" - resource_type_mismatch = "ResourceTypeMismatch" - request_url_failed_to_parse = "RequestUrlFailedToParse" - resource_already_exists = "ResourceAlreadyExists" - resource_not_found = "ResourceNotFound" - server_busy = "ServerBusy" - unsupported_header = "UnsupportedHeader" - unsupported_xml_node = "UnsupportedXmlNode" - unsupported_query_parameter = "UnsupportedQueryParameter" - unsupported_http_verb = "UnsupportedHttpVerb" - - # Blob values - append_position_condition_not_met = "AppendPositionConditionNotMet" - blob_already_exists = "BlobAlreadyExists" - blob_not_found = "BlobNotFound" - blob_overwritten = "BlobOverwritten" - blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" - block_count_exceeds_limit = "BlockCountExceedsLimit" - block_list_too_long = "BlockListTooLong" - cannot_change_to_lower_tier = "CannotChangeToLowerTier" - cannot_verify_copy_source = "CannotVerifyCopySource" - container_already_exists = "ContainerAlreadyExists" - container_being_deleted = "ContainerBeingDeleted" - container_disabled = "ContainerDisabled" - container_not_found = "ContainerNotFound" - content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" - copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" - copy_id_mismatch = "CopyIdMismatch" - feature_version_mismatch = "FeatureVersionMismatch" - incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" - incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" - incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" - infinite_lease_duration_required = "InfiniteLeaseDurationRequired" - invalid_blob_or_block = "InvalidBlobOrBlock" - invalid_blob_tier = "InvalidBlobTier" - invalid_blob_type = "InvalidBlobType" - invalid_block_id = "InvalidBlockId" - invalid_block_list = "InvalidBlockList" - invalid_operation = "InvalidOperation" - invalid_page_range = "InvalidPageRange" - invalid_source_blob_type = "InvalidSourceBlobType" - invalid_source_blob_url = "InvalidSourceBlobUrl" - invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" - lease_already_present = "LeaseAlreadyPresent" - lease_already_broken = "LeaseAlreadyBroken" - lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" - lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" - lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" - lease_id_missing = "LeaseIdMissing" - lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" - lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" - lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" - lease_lost = "LeaseLost" - lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" - lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" - lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" - max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" - no_pending_copy_operation = "NoPendingCopyOperation" - operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" - pending_copy_operation = "PendingCopyOperation" - previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" - previous_snapshot_not_found = "PreviousSnapshotNotFound" - previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" - sequence_number_condition_not_met = "SequenceNumberConditionNotMet" - sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" - snapshot_count_exceeded = "SnapshotCountExceeded" - snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" - snapshots_present = "SnapshotsPresent" - source_condition_not_met = "SourceConditionNotMet" - system_in_use = "SystemInUse" - target_condition_not_met = "TargetConditionNotMet" - unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" - blob_being_rehydrated = "BlobBeingRehydrated" - blob_archived = "BlobArchived" - blob_not_archived = "BlobNotArchived" - - # Queue values - invalid_marker = "InvalidMarker" - message_not_found = "MessageNotFound" - message_too_large = "MessageTooLarge" - pop_receipt_mismatch = "PopReceiptMismatch" - queue_already_exists = "QueueAlreadyExists" - queue_being_deleted = "QueueBeingDeleted" - queue_disabled = "QueueDisabled" - queue_not_empty = "QueueNotEmpty" - queue_not_found = "QueueNotFound" - - # File values - cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" - client_cache_flush_delay = "ClientCacheFlushDelay" - delete_pending = "DeletePending" - directory_not_empty = "DirectoryNotEmpty" - file_lock_conflict = "FileLockConflict" - invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" - parent_not_found = "ParentNotFound" - read_only_attribute = "ReadOnlyAttribute" - share_already_exists = "ShareAlreadyExists" - share_being_deleted = "ShareBeingDeleted" - share_disabled = "ShareDisabled" - share_not_found = "ShareNotFound" - sharing_violation = "SharingViolation" - share_snapshot_in_progress = "ShareSnapshotInProgress" - share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" - share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" - share_has_snapshots = "ShareHasSnapshots" - container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" - - # DataLake values - content_length_must_be_zero = 'ContentLengthMustBeZero' - path_already_exists = 'PathAlreadyExists' - invalid_flush_position = 'InvalidFlushPosition' - invalid_property_name = 'InvalidPropertyName' - invalid_source_uri = 'InvalidSourceUri' - unsupported_rest_version = 'UnsupportedRestVersion' - file_system_not_found = 'FilesystemNotFound' - path_not_found = 'PathNotFound' - rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound' - source_path_not_found = 'SourcePathNotFound' - destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted' - file_system_already_exists = 'FilesystemAlreadyExists' - file_system_being_deleted = 'FilesystemBeingDeleted' - invalid_destination_path = 'InvalidDestinationPath' - invalid_rename_source_path = 'InvalidRenameSourcePath' - invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType' - lease_is_already_broken = 'LeaseIsAlreadyBroken' - lease_name_mismatch = 'LeaseNameMismatch' - path_conflict = 'PathConflict' - source_path_is_being_deleted = 'SourcePathIsBeingDeleted' - - -class DictMixin(object): - - def __setitem__(self, key, item): - self.__dict__[key] = item - - def __getitem__(self, key): - return self.__dict__[key] - - def __repr__(self): - return str(self) - - def __len__(self): - return len(self.keys()) - - def __delitem__(self, key): - self.__dict__[key] = None - - def __eq__(self, other): - """Compare objects by comparing all attributes.""" - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - def __ne__(self, other): - """Compare objects by comparing all attributes.""" - return not self.__eq__(other) - - def __str__(self): - return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) - - def has_key(self, k): - return k in self.__dict__ - - def update(self, *args, **kwargs): - return self.__dict__.update(*args, **kwargs) - - def keys(self): - return [k for k in self.__dict__ if not k.startswith('_')] - - def values(self): - return [v for k, v in self.__dict__.items() if not k.startswith('_')] - - def items(self): - return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] - - def get(self, key, default=None): - if key in self.__dict__: - return self.__dict__[key] - return default - - -class LocationMode(object): - """ - Specifies the location the request should be sent to. This mode only applies - for RA-GRS accounts which allow secondary read access. All other account types - must use PRIMARY. - """ - - PRIMARY = 'primary' #: Requests should be sent to the primary location. - SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. - - -class ResourceTypes(object): - """ - Specifies the resource types that are accessible with the account SAS. - - :param bool service: - Access to service-level APIs (e.g., Get/Set Service Properties, - Get Service Stats, List Containers/Queues/Shares) - :param bool container: - Access to container-level APIs (e.g., Create/Delete Container, - Create/Delete Queue, Create/Delete Share, - List Blobs/Files and Directories) - :param bool object: - Access to object-level APIs for blobs, queue messages, and - files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) - """ - - def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin - self.service = service - self.container = container - self.object = object - self._str = (('s' if self.service else '') + - ('c' if self.container else '') + - ('o' if self.object else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create a ResourceTypes from a string. - - To specify service, container, or object you need only to - include the first letter of the word in the string. E.g. service and container, - you would provide a string "sc". - - :param str string: Specify service, container, or object in - in the string with the first letter of the word. - :return: A ResourceTypes object - :rtype: ~azure.storage.fileshare.ResourceTypes - """ - res_service = 's' in string - res_container = 'c' in string - res_object = 'o' in string - - parsed = cls(res_service, res_container, res_object) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class AccountSasPermissions(object): - """ - :class:`~ResourceTypes` class to be used with generate_account_sas - function and for the AccessPolicies used with set_*_acl. There are two types of - SAS which may be used to grant resource access. One is to grant access to a - specific resource (resource-specific). Another is to grant access to the - entire service for a specific account and allow certain operations based on - perms found here. - - :param bool read: - Valid for all signed resources types (Service, Container, and Object). - Permits read permissions to the specified resource type. - :param bool write: - Valid for all signed resources types (Service, Container, and Object). - Permits write permissions to the specified resource type. - :param bool delete: - Valid for Container and Object resource types, except for queue messages. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool list: - Valid for Service and Container resource types only. - :param bool add: - Valid for the following Object resource types only: queue messages, and append blobs. - :param bool create: - Valid for the following Object resource types only: blobs and files. - Users can create new blobs or files, but may not overwrite existing - blobs or files. - :param bool update: - Valid for the following Object resource types only: queue messages. - :param bool process: - Valid for the following Object resource type only: queue messages. - :keyword bool tag: - To enable set or get tags on the blobs in the container. - :keyword bool filter_by_tags: - To enable get blobs by tags, this should be used together with list permission. - """ - def __init__(self, read=False, write=False, delete=False, - list=False, # pylint: disable=redefined-builtin - add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): - self.read = read - self.write = write - self.delete = delete - self.delete_previous_version = delete_previous_version - self.list = list - self.add = add - self.create = create - self.update = update - self.process = process - self.tag = kwargs.pop('tag', False) - self.filter_by_tags = kwargs.pop('filter_by_tags', False) - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('l' if self.list else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('u' if self.update else '') + - ('p' if self.process else '') + - ('f' if self.filter_by_tags else '') + - ('t' if self.tag else '') - ) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create AccountSasPermissions from a string. - - To specify read, write, delete, etc. permissions you need only to - include the first letter of the word in the string. E.g. for read and write - permissions you would provide a string "rw". - - :param str permission: Specify permissions in - the string with the first letter of the word. - :return: An AccountSasPermissions object - :rtype: ~azure.storage.fileshare.AccountSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_delete_previous_version = 'x' in permission - p_list = 'l' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_update = 'u' in permission - p_process = 'p' in permission - p_tag = 't' in permission - p_filter_by_tags = 'f' in permission - parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, - list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, - filter_by_tags=p_filter_by_tags) - parsed._str = permission # pylint: disable = protected-access - return parsed - - -class Services(object): - """Specifies the services accessible with the account SAS. - - :param bool blob: - Access for the `~azure.storage.blob.BlobServiceClient` - :param bool queue: - Access for the `~azure.storage.queue.QueueServiceClient` - :param bool fileshare: - Access for the `~azure.storage.fileshare.ShareServiceClient` - """ - - def __init__(self, blob=False, queue=False, fileshare=False): - self.blob = blob - self.queue = queue - self.fileshare = fileshare - self._str = (('b' if self.blob else '') + - ('q' if self.queue else '') + - ('f' if self.fileshare else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create Services from a string. - - To specify blob, queue, or file you need only to - include the first letter of the word in the string. E.g. for blob and queue - you would provide a string "bq". - - :param str string: Specify blob, queue, or file in - in the string with the first letter of the word. - :return: A Services object - :rtype: ~azure.storage.fileshare.Services - """ - res_blob = 'b' in string - res_queue = 'q' in string - res_file = 'f' in string - - parsed = cls(res_blob, res_queue, res_file) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class UserDelegationKey(object): - """ - Represents a user delegation key, provided to the user by Azure Storage - based on their Azure Active Directory access token. - - The fields are saved as simple strings since the user does not have to interact with this object; - to generate an identify SAS, the user can simply pass it to the right API. - - :ivar str signed_oid: - Object ID of this token. - :ivar str signed_tid: - Tenant ID of the tenant that issued this token. - :ivar str signed_start: - The datetime this token becomes valid. - :ivar str signed_expiry: - The datetime this token expires. - :ivar str signed_service: - What service this key is valid for. - :ivar str signed_version: - The version identifier of the REST service that created this token. - :ivar str value: - The user delegation key. - """ - def __init__(self): - self.signed_oid = None - self.signed_tid = None - self.signed_start = None - self.signed_expiry = None - self.signed_service = None - self.signed_version = None - self.value = None diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/parser.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/parser.py deleted file mode 100644 index c6feba8..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/parser.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys - -if sys.version_info < (3,): - def _str(value): - if isinstance(value, unicode): # pylint: disable=undefined-variable - return value.encode('utf-8') - - return str(value) -else: - _str = str - - -def _to_utc_datetime(value): - return value.strftime('%Y-%m-%dT%H:%M:%SZ') diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/policies.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/policies.py deleted file mode 100644 index c9bc798..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/policies.py +++ /dev/null @@ -1,610 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import re -import random -from time import time -from io import SEEK_SET, UnsupportedOperation -import logging -import uuid -import types -from typing import Any, TYPE_CHECKING -from wsgiref.handlers import format_date_time -try: - from urllib.parse import ( - urlparse, - parse_qsl, - urlunparse, - urlencode, - ) -except ImportError: - from urllib import urlencode # type: ignore - from urlparse import ( # type: ignore - urlparse, - parse_qsl, - urlunparse, - ) - -from azure.core.pipeline.policies import ( - HeadersPolicy, - SansIOHTTPPolicy, - NetworkTraceLoggingPolicy, - HTTPPolicy, - RequestHistory -) -from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError - -from .models import LocationMode - -try: - _unicode_type = unicode # type: ignore -except NameError: - _unicode_type = str - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -def encode_base64(data): - if isinstance(data, _unicode_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def is_exhausted(settings): - """Are we out of retries?""" - retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) - retry_counts = list(filter(None, retry_counts)) - if not retry_counts: - return False - return min(retry_counts) < 0 - - -def retry_hook(settings, **kwargs): - if settings['hook']: - settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) - - -def is_retry(response, mode): - """Is this method/status code retryable? (Based on whitelists and control - variables such as the number of total retries to allow, whether to - respect the Retry-After header, whether this header is present, and - whether the returned status code is on the list of status codes to - be retried upon on the presence of the aforementioned header) - """ - status = response.http_response.status_code - if 300 <= status < 500: - # An exception occured, but in most cases it was expected. Examples could - # include a 309 Conflict or 412 Precondition Failed. - if status == 404 and mode == LocationMode.SECONDARY: - # Response code 404 should be retried if secondary was used. - return True - if status == 408: - # Response code 408 is a timeout and should be retried. - return True - return False - if status >= 500: - # Response codes above 500 with the exception of 501 Not Implemented and - # 505 Version Not Supported indicate a server issue and should be retried. - if status in [501, 505]: - return False - return True - return False - - -def urljoin(base_url, stub_url): - parsed = urlparse(base_url) - parsed = parsed._replace(path=parsed.path + '/' + stub_url) - return parsed.geturl() - - -class QueueMessagePolicy(SansIOHTTPPolicy): - - def on_request(self, request): - message_id = request.context.options.pop('queue_message_id', None) - if message_id: - request.http_request.url = urljoin( - request.http_request.url, - message_id) - - -class StorageHeadersPolicy(HeadersPolicy): - request_id_header_name = 'x-ms-client-request-id' - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - super(StorageHeadersPolicy, self).on_request(request) - current_time = format_date_time(time()) - request.http_request.headers['x-ms-date'] = current_time - - custom_id = request.context.options.pop('client_request_id', None) - request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) - - # def on_response(self, request, response): - # # raise exception if the echoed client request id from the service is not identical to the one we sent - # if self.request_id_header_name in response.http_response.headers: - - # client_request_id = request.http_request.headers.get(self.request_id_header_name) - - # if response.http_response.headers[self.request_id_header_name] != client_request_id: - # raise AzureError( - # "Echoed client request ID: {} does not match sent client request ID: {}. " - # "Service request ID: {}".format( - # response.http_response.headers[self.request_id_header_name], client_request_id, - # response.http_response.headers['x-ms-request-id']), - # response=response.http_response - # ) - - -class StorageHosts(SansIOHTTPPolicy): - - def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument - self.hosts = hosts - super(StorageHosts, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - request.context.options['hosts'] = self.hosts - parsed_url = urlparse(request.http_request.url) - - # Detect what location mode we're currently requesting with - location_mode = LocationMode.PRIMARY - for key, value in self.hosts.items(): - if parsed_url.netloc == value: - location_mode = key - - # See if a specific location mode has been specified, and if so, redirect - use_location = request.context.options.pop('use_location', None) - if use_location: - # Lock retries to the specific location - request.context.options['retry_to_secondary'] = False - if use_location not in self.hosts: - raise ValueError("Attempting to use undefined host location {}".format(use_location)) - if use_location != location_mode: - # Update request URL to use the specified location - updated = parsed_url._replace(netloc=self.hosts[use_location]) - request.http_request.url = updated.geturl() - location_mode = use_location - - request.context.options['location_mode'] = location_mode - - -class StorageLoggingPolicy(NetworkTraceLoggingPolicy): - """A policy that logs HTTP request and response to the DEBUG logger. - - This accepts both global configuration, and per-request level with "enable_http_logger" - """ - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - http_request = request.http_request - options = request.context.options - if options.pop("logging_enable", self.enable_http_logger): - request.context["logging_enable"] = True - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - log_url = http_request.url - query_params = http_request.query - if 'sig' in query_params: - log_url = log_url.replace(query_params['sig'], "sig=*****") - _LOGGER.debug("Request URL: %r", log_url) - _LOGGER.debug("Request method: %r", http_request.method) - _LOGGER.debug("Request headers:") - for header, value in http_request.headers.items(): - if header.lower() == 'authorization': - value = '*****' - elif header.lower() == 'x-ms-copy-source' and 'sig' in value: - # take the url apart and scrub away the signed signature - scheme, netloc, path, params, query, fragment = urlparse(value) - parsed_qs = dict(parse_qsl(query)) - parsed_qs['sig'] = '*****' - - # the SAS needs to be put back together - value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) - - _LOGGER.debug(" %r: %r", header, value) - _LOGGER.debug("Request body:") - - # We don't want to log the binary data of a file upload. - if isinstance(http_request.body, types.GeneratorType): - _LOGGER.debug("File upload") - else: - _LOGGER.debug(str(http_request.body)) - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log request: %r", err) - - def on_response(self, request, response): - # type: (PipelineRequest, PipelineResponse, Any) -> None - if response.context.pop("logging_enable", self.enable_http_logger): - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - _LOGGER.debug("Response status: %r", response.http_response.status_code) - _LOGGER.debug("Response headers:") - for res_header, value in response.http_response.headers.items(): - _LOGGER.debug(" %r: %r", res_header, value) - - # We don't want to log binary data if the response is a file. - _LOGGER.debug("Response content:") - pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) - header = response.http_response.headers.get('content-disposition') - - if header and pattern.match(header): - filename = header.partition('=')[2] - _LOGGER.debug("File attachments: %s", filename) - elif response.http_response.headers.get("content-type", "").endswith("octet-stream"): - _LOGGER.debug("Body contains binary data.") - elif response.http_response.headers.get("content-type", "").startswith("image"): - _LOGGER.debug("Body contains image data.") - else: - if response.context.options.get('stream', False): - _LOGGER.debug("Body is streamable") - else: - _LOGGER.debug(response.http_response.text()) - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log response: %s", repr(err)) - - -class StorageRequestHook(SansIOHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._request_callback = kwargs.get('raw_request_hook') - super(StorageRequestHook, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, **Any) -> PipelineResponse - request_callback = request.context.options.pop('raw_request_hook', self._request_callback) - if request_callback: - request_callback(request) - - -class StorageResponseHook(HTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(StorageResponseHook, self).__init__() - - def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = self.next.send(request) - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - response_callback(response) - request.context['response_callback'] = response_callback - return response - - -class StorageContentValidation(SansIOHTTPPolicy): - """A simple policy that sends the given headers - with the request. - - This will overwrite any headers already defined in the request. - """ - header_name = 'Content-MD5' - - def __init__(self, **kwargs): # pylint: disable=unused-argument - super(StorageContentValidation, self).__init__() - - @staticmethod - def get_content_md5(data): - md5 = hashlib.md5() # nosec - if isinstance(data, bytes): - md5.update(data) - elif hasattr(data, 'read'): - pos = 0 - try: - pos = data.tell() - except: # pylint: disable=bare-except - pass - for chunk in iter(lambda: data.read(4096), b""): - md5.update(chunk) - try: - data.seek(pos, SEEK_SET) - except (AttributeError, IOError): - raise ValueError("Data should be bytes or a seekable file-like object.") - else: - raise ValueError("Data should be bytes or a seekable file-like object.") - - return md5.digest() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - validate_content = request.context.options.pop('validate_content', False) - if validate_content and request.http_request.method != 'GET': - computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) - request.http_request.headers[self.header_name] = computed_md5 - request.context['validate_content_md5'] = computed_md5 - request.context['validate_content'] = validate_content - - def on_response(self, request, response): - if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): - computed_md5 = request.context.get('validate_content_md5') or \ - encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) - if response.http_response.headers['content-md5'] != computed_md5: - raise AzureError( - 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format( - response.http_response.headers['content-md5'], computed_md5), - response=response.http_response - ) - - -class StorageRetryPolicy(HTTPPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - def __init__(self, **kwargs): - self.total_retries = kwargs.pop('retry_total', 10) - self.connect_retries = kwargs.pop('retry_connect', 3) - self.read_retries = kwargs.pop('retry_read', 3) - self.status_retries = kwargs.pop('retry_status', 3) - self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) - super(StorageRetryPolicy, self).__init__() - - def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use - """ - A function which sets the next host location on the request, if applicable. - - :param ~azure.storage.models.RetryContext context: - The retry context containing the previous host location and the request - to evaluate and possibly modify. - """ - if settings['hosts'] and all(settings['hosts'].values()): - url = urlparse(request.url) - # If there's more than one possible location, retry to the alternative - if settings['mode'] == LocationMode.PRIMARY: - settings['mode'] = LocationMode.SECONDARY - else: - settings['mode'] = LocationMode.PRIMARY - updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) - request.url = updated.geturl() - - def configure_retries(self, request): # pylint: disable=no-self-use - body_position = None - if hasattr(request.http_request.body, 'read'): - try: - body_position = request.http_request.body.tell() - except (AttributeError, UnsupportedOperation): - # if body position cannot be obtained, then retries will not work - pass - options = request.context.options - return { - 'total': options.pop("retry_total", self.total_retries), - 'connect': options.pop("retry_connect", self.connect_retries), - 'read': options.pop("retry_read", self.read_retries), - 'status': options.pop("retry_status", self.status_retries), - 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), - 'mode': options.pop("location_mode", LocationMode.PRIMARY), - 'hosts': options.pop("hosts", None), - 'hook': options.pop("retry_hook", None), - 'body_position': body_position, - 'count': 0, - 'history': [] - } - - def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use - """ Formula for computing the current backoff. - Should be calculated by child class. - - :rtype: float - """ - return 0 - - def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - transport.sleep(backoff) - - def increment(self, settings, request, response=None, error=None): - """Increment the retry counters. - - :param response: A pipeline response object. - :param error: An error encountered during the request, or - None if the response was received successfully. - - :return: Whether the retry attempts are exhausted. - """ - settings['total'] -= 1 - - if error and isinstance(error, ServiceRequestError): - # Errors when we're fairly sure that the server did not receive the - # request, so it should be safe to retry. - settings['connect'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - elif error and isinstance(error, ServiceResponseError): - # Errors that occur after the request has been started, so we should - # assume that the server began processing it. - settings['read'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - else: - # Incrementing because of a server error like a 500 in - # status_forcelist and a the given method is in the whitelist - if response: - settings['status'] -= 1 - settings['history'].append(RequestHistory(request, http_response=response)) - - if not is_exhausted(settings): - if request.method not in ['PUT'] and settings['retry_secondary']: - self._set_next_host_location(settings, request) - - # rewind the request body if it is a stream - if request.body and hasattr(request.body, 'read'): - # no position was saved, then retry would not work - if settings['body_position'] is None: - return False - try: - # attempt to rewind the body to the initial position - request.body.seek(settings['body_position'], SEEK_SET) - except (UnsupportedOperation, ValueError): - # if body is not seekable, then retry would not work - return False - settings['count'] += 1 - return True - return False - - def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(StorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(StorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/policies_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/policies_async.py deleted file mode 100644 index e0926b8..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/policies_async.py +++ /dev/null @@ -1,220 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -import asyncio -import random -import logging -from typing import Any, TYPE_CHECKING - -from azure.core.pipeline.policies import AsyncHTTPPolicy -from azure.core.exceptions import AzureError - -from .policies import is_retry, StorageRetryPolicy - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -async def retry_hook(settings, **kwargs): - if settings['hook']: - if asyncio.iscoroutine(settings['hook']): - await settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - else: - settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - - -class AsyncStorageResponseHook(AsyncHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(AsyncStorageResponseHook, self).__init__() - - async def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = await self.next.send(request) - await response.http_response.load_body() - - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - if asyncio.iscoroutine(response_callback): - await response_callback(response) - else: - response_callback(response) - request.context['response_callback'] = response_callback - return response - -class AsyncStorageRetryPolicy(StorageRetryPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - async def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - await transport.sleep(backoff) - - async def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = await self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - await self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - await self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(AsyncStorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(AsyncStorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/request_handlers.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/request_handlers.py deleted file mode 100644 index 4f15b65..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/request_handlers.py +++ /dev/null @@ -1,147 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) - -import logging -from os import fstat -from io import (SEEK_END, SEEK_SET, UnsupportedOperation) - -import isodate - -from azure.core.exceptions import raise_with_traceback - - -_LOGGER = logging.getLogger(__name__) - - -def serialize_iso(attr): - """Serialize Datetime object into ISO-8601 formatted string. - - :param Datetime attr: Object to be serialized. - :rtype: str - :raises: ValueError if format invalid. - """ - if not attr: - return None - if isinstance(attr, str): - attr = isodate.parse_datetime(attr) - try: - utc = attr.utctimetuple() - if utc.tm_year > 9999 or utc.tm_year < 1: - raise OverflowError("Hit max or min date") - - date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( - utc.tm_year, utc.tm_mon, utc.tm_mday, - utc.tm_hour, utc.tm_min, utc.tm_sec) - return date + 'Z' - except (ValueError, OverflowError) as err: - msg = "Unable to serialize datetime object." - raise_with_traceback(ValueError, msg, err) - except AttributeError as err: - msg = "ISO-8601 object must be valid Datetime object." - raise_with_traceback(TypeError, msg, err) - - -def get_length(data): - length = None - # Check if object implements the __len__ method, covers most input cases such as bytearray. - try: - length = len(data) - except: # pylint: disable=bare-except - pass - - if not length: - # Check if the stream is a file-like stream object. - # If so, calculate the size using the file descriptor. - try: - fileno = data.fileno() - except (AttributeError, UnsupportedOperation): - pass - else: - try: - return fstat(fileno).st_size - except OSError: - # Not a valid fileno, may be possible requests returned - # a socket number? - pass - - # If the stream is seekable and tell() is implemented, calculate the stream size. - try: - current_position = data.tell() - data.seek(0, SEEK_END) - length = data.tell() - current_position - data.seek(current_position, SEEK_SET) - except (AttributeError, UnsupportedOperation): - pass - - return length - - -def read_length(data): - try: - if hasattr(data, 'read'): - read_data = b'' - for chunk in iter(lambda: data.read(4096), b""): - read_data += chunk - return len(read_data), read_data - if hasattr(data, '__iter__'): - read_data = b'' - for chunk in data: - read_data += chunk - return len(read_data), read_data - except: # pylint: disable=bare-except - pass - raise ValueError("Unable to calculate content length, please specify.") - - -def validate_and_format_range_headers( - start_range, end_range, start_range_required=True, - end_range_required=True, check_content_md5=False, align_to_page=False): - # If end range is provided, start range must be provided - if (start_range_required or end_range is not None) and start_range is None: - raise ValueError("start_range value cannot be None.") - if end_range_required and end_range is None: - raise ValueError("end_range value cannot be None.") - - # Page ranges must be 512 aligned - if align_to_page: - if start_range is not None and start_range % 512 != 0: - raise ValueError("Invalid page blob start_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(start_range)) - if end_range is not None and end_range % 512 != 511: - raise ValueError("Invalid page blob end_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(end_range)) - - # Format based on whether end_range is present - range_header = None - if end_range is not None: - range_header = 'bytes={0}-{1}'.format(start_range, end_range) - elif start_range is not None: - range_header = "bytes={0}-".format(start_range) - - # Content MD5 can only be provided for a complete range less than 4MB in size - range_validation = None - if check_content_md5: - if start_range is None or end_range is None: - raise ValueError("Both start and end range requied for MD5 content validation.") - if end_range - start_range > 4 * 1024 * 1024: - raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") - range_validation = 'true' - - return range_header, range_validation - - -def add_metadata_headers(metadata=None): - # type: (Optional[Dict[str, str]]) -> Dict[str, str] - headers = {} - if metadata: - for key, value in metadata.items(): - headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value - return headers diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/response_handlers.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/response_handlers.py deleted file mode 100644 index ac526e5..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/response_handlers.py +++ /dev/null @@ -1,159 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging - -from azure.core.pipeline.policies import ContentDecodePolicy -from azure.core.exceptions import ( - HttpResponseError, - ResourceNotFoundError, - ResourceModifiedError, - ResourceExistsError, - ClientAuthenticationError, - DecodeError) - -from .parser import _to_utc_datetime -from .models import StorageErrorCode, UserDelegationKey, get_enum_value - - -if TYPE_CHECKING: - from datetime import datetime - from azure.core.exceptions import AzureError - - -_LOGGER = logging.getLogger(__name__) - - -class PartialBatchErrorException(HttpResponseError): - """There is a partial failure in batch operations. - - :param str message: The message of the exception. - :param response: Server response to be deserialized. - :param list parts: A list of the parts in multipart response. - """ - - def __init__(self, message, response, parts): - self.parts = parts - super(PartialBatchErrorException, self).__init__(message=message, response=response) - - -def parse_length_from_content_range(content_range): - ''' - Parses the blob length from the content range header: bytes 1-3/65537 - ''' - if content_range is None: - return None - - # First, split in space and take the second half: '1-3/65537' - # Next, split on slash and take the second half: '65537' - # Finally, convert to an int: 65537 - return int(content_range.split(' ', 1)[1].split('/', 1)[1]) - - -def normalize_headers(headers): - normalized = {} - for key, value in headers.items(): - if key.startswith('x-ms-'): - key = key[5:] - normalized[key.lower().replace('-', '_')] = get_enum_value(value) - return normalized - - -def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument - raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} - return {k[10:]: v for k, v in raw_metadata.items()} - - -def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers) - - -def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers), deserialized - - -def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return response.location_mode, deserialized - - -def process_storage_error(storage_error): - raise_error = HttpResponseError - error_code = storage_error.response.headers.get('x-ms-error-code') - error_message = storage_error.message - additional_data = {} - try: - error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) - if error_body: - for info in error_body.iter(): - if info.tag.lower() == 'code': - error_code = info.text - elif info.tag.lower() == 'message': - error_message = info.text - else: - additional_data[info.tag] = info.text - except DecodeError: - pass - - try: - if error_code: - error_code = StorageErrorCode(error_code) - if error_code in [StorageErrorCode.condition_not_met, - StorageErrorCode.blob_overwritten]: - raise_error = ResourceModifiedError - if error_code in [StorageErrorCode.invalid_authentication_info, - StorageErrorCode.authentication_failed]: - raise_error = ClientAuthenticationError - if error_code in [StorageErrorCode.resource_not_found, - StorageErrorCode.cannot_verify_copy_source, - StorageErrorCode.blob_not_found, - StorageErrorCode.queue_not_found, - StorageErrorCode.container_not_found, - StorageErrorCode.parent_not_found, - StorageErrorCode.share_not_found]: - raise_error = ResourceNotFoundError - if error_code in [StorageErrorCode.account_already_exists, - StorageErrorCode.account_being_created, - StorageErrorCode.resource_already_exists, - StorageErrorCode.resource_type_mismatch, - StorageErrorCode.blob_already_exists, - StorageErrorCode.queue_already_exists, - StorageErrorCode.container_already_exists, - StorageErrorCode.container_being_deleted, - StorageErrorCode.queue_being_deleted, - StorageErrorCode.share_already_exists, - StorageErrorCode.share_being_deleted]: - raise_error = ResourceExistsError - except ValueError: - # Got an unknown error code - pass - - try: - error_message += "\nErrorCode:{}".format(error_code.value) - except AttributeError: - error_message += "\nErrorCode:{}".format(error_code) - for name, info in additional_data.items(): - error_message += "\n{}:{}".format(name, info) - - error = raise_error(message=error_message, response=storage_error.response) - error.error_code = error_code - error.additional_info = additional_data - raise error - - -def parse_to_internal_user_delegation_key(service_user_delegation_key): - internal_user_delegation_key = UserDelegationKey() - internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid - internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid - internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) - internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) - internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service - internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version - internal_user_delegation_key.value = service_user_delegation_key.value - return internal_user_delegation_key diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/shared_access_signature.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/shared_access_signature.py deleted file mode 100644 index 367c655..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/shared_access_signature.py +++ /dev/null @@ -1,209 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from datetime import date - -from .parser import _str, _to_utc_datetime -from .constants import X_MS_VERSION -from . import sign_string, url_quote - - -class QueryStringConstants(object): - SIGNED_SIGNATURE = 'sig' - SIGNED_PERMISSION = 'sp' - SIGNED_START = 'st' - SIGNED_EXPIRY = 'se' - SIGNED_RESOURCE = 'sr' - SIGNED_IDENTIFIER = 'si' - SIGNED_IP = 'sip' - SIGNED_PROTOCOL = 'spr' - SIGNED_VERSION = 'sv' - SIGNED_CACHE_CONTROL = 'rscc' - SIGNED_CONTENT_DISPOSITION = 'rscd' - SIGNED_CONTENT_ENCODING = 'rsce' - SIGNED_CONTENT_LANGUAGE = 'rscl' - SIGNED_CONTENT_TYPE = 'rsct' - START_PK = 'spk' - START_RK = 'srk' - END_PK = 'epk' - END_RK = 'erk' - SIGNED_RESOURCE_TYPES = 'srt' - SIGNED_SERVICES = 'ss' - SIGNED_OID = 'skoid' - SIGNED_TID = 'sktid' - SIGNED_KEY_START = 'skt' - SIGNED_KEY_EXPIRY = 'ske' - SIGNED_KEY_SERVICE = 'sks' - SIGNED_KEY_VERSION = 'skv' - - @staticmethod - def to_list(): - return [ - QueryStringConstants.SIGNED_SIGNATURE, - QueryStringConstants.SIGNED_PERMISSION, - QueryStringConstants.SIGNED_START, - QueryStringConstants.SIGNED_EXPIRY, - QueryStringConstants.SIGNED_RESOURCE, - QueryStringConstants.SIGNED_IDENTIFIER, - QueryStringConstants.SIGNED_IP, - QueryStringConstants.SIGNED_PROTOCOL, - QueryStringConstants.SIGNED_VERSION, - QueryStringConstants.SIGNED_CACHE_CONTROL, - QueryStringConstants.SIGNED_CONTENT_DISPOSITION, - QueryStringConstants.SIGNED_CONTENT_ENCODING, - QueryStringConstants.SIGNED_CONTENT_LANGUAGE, - QueryStringConstants.SIGNED_CONTENT_TYPE, - QueryStringConstants.START_PK, - QueryStringConstants.START_RK, - QueryStringConstants.END_PK, - QueryStringConstants.END_RK, - QueryStringConstants.SIGNED_RESOURCE_TYPES, - QueryStringConstants.SIGNED_SERVICES, - QueryStringConstants.SIGNED_OID, - QueryStringConstants.SIGNED_TID, - QueryStringConstants.SIGNED_KEY_START, - QueryStringConstants.SIGNED_KEY_EXPIRY, - QueryStringConstants.SIGNED_KEY_SERVICE, - QueryStringConstants.SIGNED_KEY_VERSION, - ] - - -class SharedAccessSignature(object): - ''' - Provides a factory for creating account access - signature tokens with an account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - :param str x_ms_version: - The service version used to generate the shared access signatures. - ''' - self.account_name = account_name - self.account_key = account_key - self.x_ms_version = x_ms_version - - def generate_account(self, services, resource_types, permission, expiry, start=None, - ip=None, protocol=None): - ''' - Generates a shared access signature for the account. - Use the returned signature with the sas_token parameter of the service - or to create a new account object. - - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account - SAS. You can combine values to provide access to more than one - resource type. - :param AccountSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. You can combine - values to provide more than one permission. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_account(services, resource_types) - sas.add_account_signature(self.account_name, self.account_key) - - return sas.get_token() - - -class _SharedAccessHelper(object): - def __init__(self): - self.query_dict = {} - - def _add_query(self, name, val): - if val: - self.query_dict[name] = _str(val) if val is not None else None - - def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): - if isinstance(start, date): - start = _to_utc_datetime(start) - - if isinstance(expiry, date): - expiry = _to_utc_datetime(expiry) - - self._add_query(QueryStringConstants.SIGNED_START, start) - self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) - self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) - self._add_query(QueryStringConstants.SIGNED_IP, ip) - self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) - self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) - - def add_resource(self, resource): - self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) - - def add_id(self, policy_id): - self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) - - def add_account(self, services, resource_types): - self._add_query(QueryStringConstants.SIGNED_SERVICES, services) - self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) - - def add_override_response_headers(self, cache_control, - content_disposition, - content_encoding, - content_language, - content_type): - self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) - self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) - self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) - self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) - self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) - - def add_account_signature(self, account_name, account_key): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - string_to_sign = \ - (account_name + '\n' + - get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + - get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + - get_value_to_append(QueryStringConstants.SIGNED_START) + - get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - get_value_to_append(QueryStringConstants.SIGNED_IP) + - get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(QueryStringConstants.SIGNED_VERSION)) - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key, string_to_sign)) - - def get_token(self): - return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/uploads.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/uploads.py deleted file mode 100644 index abf3fb2..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/uploads.py +++ /dev/null @@ -1,550 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from concurrent import futures -from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) -from threading import Lock -from itertools import islice -from math import ceil - -import six - -from azure.core.tracing.common import with_current_context - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." - - -def _parallel_uploads(executor, uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(executor.submit(with_current_context(uploader), next_chunk)) - except StopIteration: - break - - # Wait for the remaining uploads to finish - done, _running = futures.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - validate_content=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - validate_content=validate_content, - **kwargs) - if parallel: - executor = futures.ThreadPoolExecutor(max_concurrency) - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - executor.submit(with_current_context(uploader.process_chunk), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - executor = futures.ThreadPoolExecutor(max_concurrency) - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - executor.submit(with_current_context(uploader.process_substream_block), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] - return sorted(range_ids) - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b"" - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError("Blob data should be of type bytes.") - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b"" or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - def _update_progress(self, length): - if self.progress_lock is not None: - with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = self._upload_chunk(chunk_offset, chunk_data) - self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) - - def process_substream_block(self, block_data): - return self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - def _upload_substream_block(self, block_id, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_substream_block_with_progress(self, block_id, block_stream): - range_id = self._upload_substream_block(block_id, block_stream) - self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop("modified_access_conditions", None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - self.service.stage_block( - block_id, - len(chunk_data), - chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return index, block_id - - def _upload_substream_block(self, block_id, block_stream): - try: - self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - return not any(bytearray(chunk_data)) - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = self.service.upload_pages( - chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = self.service.append_block( - chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - self.current_length = int(self.response_headers["blob_append_offset"]) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = self.service.append_block( - chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response - - -class SubStream(IOBase): - - def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): - # Python 2.7: file-like objects created with open() typically support seek(), but are not - # derivations of io.IOBase and thus do not implement seekable(). - # Python > 3.0: file-like objects created with open() are derived from io.IOBase. - try: - # only the main thread runs this, so there's no need grabbing the lock - wrapped_stream.seek(0, SEEK_CUR) - except: - raise ValueError("Wrapped stream must support seek().") - - self._lock = lockObj - self._wrapped_stream = wrapped_stream - self._position = 0 - self._stream_begin_index = stream_begin_index - self._length = length - self._buffer = BytesIO() - - # we must avoid buffering more than necessary, and also not use up too much memory - # so the max buffer size is capped at 4MB - self._max_buffer_size = ( - length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE - ) - self._current_buffer_start = 0 - self._current_buffer_size = 0 - super(SubStream, self).__init__() - - def __len__(self): - return self._length - - def close(self): - if self._buffer: - self._buffer.close() - self._wrapped_stream = None - IOBase.close(self) - - def fileno(self): - return self._wrapped_stream.fileno() - - def flush(self): - pass - - def read(self, size=None): - if self.closed: # pylint: disable=using-constant-test - raise ValueError("Stream is closed.") - - if size is None: - size = self._length - self._position - - # adjust if out of bounds - if size + self._position >= self._length: - size = self._length - self._position - - # return fast - if size == 0 or self._buffer.closed: - return b"" - - # attempt first read from the read buffer and update position - read_buffer = self._buffer.read(size) - bytes_read = len(read_buffer) - bytes_remaining = size - bytes_read - self._position += bytes_read - - # repopulate the read buffer from the underlying stream to fulfill the request - # ensure the seek and read operations are done atomically (only if a lock is provided) - if bytes_remaining > 0: - with self._buffer: - # either read in the max buffer size specified on the class - # or read in just enough data for the current block/sub stream - current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) - - # lock is only defined if max_concurrency > 1 (parallel uploads) - if self._lock: - with self._lock: - # reposition the underlying stream to match the start of the data to read - absolute_position = self._stream_begin_index + self._position - self._wrapped_stream.seek(absolute_position, SEEK_SET) - # If we can't seek to the right location, our read will be corrupted so fail fast. - if self._wrapped_stream.tell() != absolute_position: - raise IOError("Stream failed to seek to the desired location.") - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - else: - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - - if buffer_from_stream: - # update the buffer with new data from the wrapped stream - # we need to note down the start position and size of the buffer, in case seek is performed later - self._buffer = BytesIO(buffer_from_stream) - self._current_buffer_start = self._position - self._current_buffer_size = len(buffer_from_stream) - - # read the remaining bytes from the new buffer and update position - second_read_buffer = self._buffer.read(bytes_remaining) - read_buffer += second_read_buffer - self._position += len(second_read_buffer) - - return read_buffer - - def readable(self): - return True - - def readinto(self, b): - raise UnsupportedOperation - - def seek(self, offset, whence=0): - if whence is SEEK_SET: - start_index = 0 - elif whence is SEEK_CUR: - start_index = self._position - elif whence is SEEK_END: - start_index = self._length - offset = -offset - else: - raise ValueError("Invalid argument for the 'whence' parameter.") - - pos = start_index + offset - - if pos > self._length: - pos = self._length - elif pos < 0: - pos = 0 - - # check if buffer is still valid - # if not, drop buffer - if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: - self._buffer.close() - self._buffer = BytesIO() - else: # if yes seek to correct position - delta = pos - self._current_buffer_start - self._buffer.seek(delta, SEEK_SET) - - self._position = pos - return pos - - def seekable(self): - return True - - def tell(self): - return self._position - - def write(self): - raise UnsupportedOperation - - def writelines(self): - raise UnsupportedOperation - - def writeable(self): - return False - - -class IterStreamer(object): - """ - File-like streaming iterator. - """ - - def __init__(self, generator, encoding="UTF-8"): - self.generator = generator - self.iterator = iter(generator) - self.leftover = b"" - self.encoding = encoding - - def __len__(self): - return self.generator.__len__() - - def __iter__(self): - return self.iterator - - def seekable(self): - return False - - def __next__(self): - return next(self.iterator) - - next = __next__ # Python 2 compatibility. - - def tell(self, *args, **kwargs): - raise UnsupportedOperation("Data generator does not support tell.") - - def seek(self, *args, **kwargs): - raise UnsupportedOperation("Data generator is unseekable.") - - def read(self, size): - data = self.leftover - count = len(self.leftover) - try: - while count < size: - chunk = self.__next__() - if isinstance(chunk, six.text_type): - chunk = chunk.encode(self.encoding) - data += chunk - count += len(chunk) - except StopIteration: - pass - - if count > size: - self.leftover = data[size:] - - return data[:size] diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/uploads_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/uploads_async.py deleted file mode 100644 index f6a8725..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared/uploads_async.py +++ /dev/null @@ -1,351 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -import asyncio -from asyncio import Lock -from itertools import islice -import threading - -from math import ceil - -import six - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder -from .uploads import SubStream, IterStreamer # pylint: disable=unused-import - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' - - -async def _parallel_uploads(uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(asyncio.ensure_future(uploader(next_chunk))) - except StopIteration: - break - - # Wait for the remaining uploads to finish - if running: - done, _running = await asyncio.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -async def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - asyncio.ensure_future(uploader.process_chunk(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [] - for chunk in uploader.get_chunk_streams(): - range_ids.append(await uploader.process_chunk(chunk)) - - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -async def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - asyncio.ensure_future(uploader.process_substream_block(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [] - for block in uploader.get_substream_blocks(): - range_ids.append(await uploader.process_substream_block(block)) - return sorted(range_ids) - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = threading.Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b'' - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b'' or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - async def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - async def _update_progress(self, length): - if self.progress_lock is not None: - async with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - async def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = await self._upload_chunk(chunk_offset, chunk_data) - await self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) - - async def process_substream_block(self, block_data): - return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - async def _upload_substream_block(self, block_id, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_substream_block_with_progress(self, block_id, block_stream): - range_id = await self._upload_substream_block(block_id, block_stream) - await self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop('modified_access_conditions', None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - await self.service.stage_block( - block_id, - len(chunk_data), - chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - return index, block_id - - async def _upload_substream_block(self, block_id, block_stream): - try: - await self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - for each_byte in chunk_data: - if each_byte not in [0, b'\x00']: - return False - return True - - async def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = await self.service.upload_pages( - chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = await self.service.append_block( - chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - self.current_length = int(self.response_headers['blob_append_offset']) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = await self.service.append_block( - chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = await self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - return range_id, response diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared_access_signature.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared_access_signature.py deleted file mode 100644 index 20dad95..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_shared_access_signature.py +++ /dev/null @@ -1,491 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, List, TYPE_CHECKING -) - -from ._shared import sign_string -from ._shared.constants import X_MS_VERSION -from ._shared.models import Services -from ._shared.shared_access_signature import SharedAccessSignature, _SharedAccessHelper, QueryStringConstants -from ._shared.parser import _str - -if TYPE_CHECKING: - from datetime import datetime - from .import ( - ResourceTypes, - AccountSasPermissions, - ShareSasPermissions, - FileSasPermissions - ) - -class FileSharedAccessSignature(SharedAccessSignature): - ''' - Provides a factory for creating file and share access - signature tokens with a common account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - ''' - super(FileSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) - - def generate_file(self, share_name, directory_name=None, file_name=None, - permission=None, expiry=None, start=None, policy_id=None, - ip=None, protocol=None, cache_control=None, - content_disposition=None, content_encoding=None, - content_language=None, content_type=None): - ''' - Generates a shared access signature for the file. - Use the returned signature with the sas_token parameter of FileService. - - :param str share_name: - Name of share. - :param str directory_name: - Name of directory. SAS tokens cannot be created for directories, so - this parameter should only be present if file_name is provided. - :param str file_name: - Name of file. - :param ~azure.storage.fileshare.FileSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, create, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_file_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - resource_path = share_name - if directory_name is not None: - resource_path += '/' + _str(directory_name) if directory_name is not None else None - resource_path += '/' + _str(file_name) if file_name is not None else None - - sas = _FileSharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(policy_id) - sas.add_resource('f') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_resource_signature(self.account_name, self.account_key, resource_path) - - return sas.get_token() - - def generate_share(self, share_name, permission=None, expiry=None, - start=None, policy_id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None): - ''' - Generates a shared access signature for the share. - Use the returned signature with the sas_token parameter of FileService. - - :param str share_name: - Name of share. - :param ShareSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, create, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_file_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - sas = _FileSharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(policy_id) - sas.add_resource('s') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_resource_signature(self.account_name, self.account_key, share_name) - - return sas.get_token() - - -class _FileSharedAccessHelper(_SharedAccessHelper): - - def add_resource_signature(self, account_name, account_key, path): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - if path[0] != '/': - path = '/' + path - - canonicalized_resource = '/file/' + account_name + path + '\n' - - # Form the string to sign from shared_access_policy and canonicalized - # resource. The order of values is important. - string_to_sign = \ - (get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(QueryStringConstants.SIGNED_START) + - get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - canonicalized_resource + - get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER) + - get_value_to_append(QueryStringConstants.SIGNED_IP) + - get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(QueryStringConstants.SIGNED_VERSION) + - get_value_to_append(QueryStringConstants.SIGNED_CACHE_CONTROL) + - get_value_to_append(QueryStringConstants.SIGNED_CONTENT_DISPOSITION) + - get_value_to_append(QueryStringConstants.SIGNED_CONTENT_ENCODING) + - get_value_to_append(QueryStringConstants.SIGNED_CONTENT_LANGUAGE) + - get_value_to_append(QueryStringConstants.SIGNED_CONTENT_TYPE)) - - # remove the trailing newline - if string_to_sign[-1] == '\n': - string_to_sign = string_to_sign[:-1] - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key, string_to_sign)) - - -def generate_account_sas( - account_name, # type: str - account_key, # type: str - resource_types, # type: Union[ResourceTypes, str] - permission, # type: Union[AccountSasPermissions, str] - expiry, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> str - """Generates a shared access signature for the file service. - - Use the returned signature with the credential parameter of any ShareServiceClient, - ShareClient, ShareDirectoryClient, or ShareFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - :param ~azure.storage.fileshare.ResourceTypes resource_types: - Specifies the resource types that are accessible with the account SAS. - :param ~azure.storage.fileshare.AccountSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_authentication.py - :start-after: [START generate_sas_token] - :end-before: [END generate_sas_token] - :language: python - :dedent: 8 - :caption: Generate a sas token. - """ - sas = SharedAccessSignature(account_name, account_key) - return sas.generate_account( - services=Services(fileshare=True), - resource_types=resource_types, - permission=permission, - expiry=expiry, - start=start, - ip=ip, - **kwargs - ) # type: ignore - - -def generate_share_sas( - account_name, # type: str - share_name, # type: str - account_key, # type: str - permission=None, # type: Optional[Union[ShareSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - policy_id=None, # type: Optional[str] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): # type: (...) -> str - """Generates a shared access signature for a share. - - Use the returned signature with the credential parameter of any ShareServiceClient, - ShareClient, ShareDirectoryClient, or ShareFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str share_name: - The name of the share. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - :param ~azure.storage.fileshare.ShareSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, create, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - :func:`~azure.storage.fileshare.ShareClient.set_share_access_policy`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - sas = FileSharedAccessSignature(account_name, account_key) - return sas.generate_share( - share_name=share_name, - permission=permission, - expiry=expiry, - start=start, - policy_id=policy_id, - ip=ip, - **kwargs - ) - - -def generate_file_sas( - account_name, # type: str - share_name, # type: str - file_path, # type: List[str] - account_key, # type: str - permission=None, # type: Optional[Union[FileSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - policy_id=None, # type: Optional[str] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> str - """Generates a shared access signature for a file. - - Use the returned signature with the credential parameter of any ShareServiceClient, - ShareClient, ShareDirectoryClient, or ShareFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str share_name: - The name of the share. - :param file_path: - The file path represented as a list of path segments, including the file name. - :type file_path: List[str] - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - :param ~azure.storage.fileshare.FileSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - sas = FileSharedAccessSignature(account_name, account_key) - if len(file_path) > 1: - dir_path = '/'.join(file_path[:-1]) - else: - dir_path = None # type: ignore - return sas.generate_file( # type: ignore - share_name=share_name, - directory_name=dir_path, - file_name=file_path[-1], - permission=permission, - expiry=expiry, - start=start, - policy_id=policy_id, - ip=ip, - **kwargs - ) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_version.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/_version.py deleted file mode 100644 index dc78818..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_version.py +++ /dev/null @@ -1,7 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -VERSION = "12.2.0" diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/__init__.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/__init__.py deleted file mode 100644 index 73393b8..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ._file_client_async import ShareFileClient -from ._directory_client_async import ShareDirectoryClient -from ._share_client_async import ShareClient -from ._share_service_client_async import ShareServiceClient -from ._lease_async import ShareLeaseClient - - -__all__ = [ - 'ShareFileClient', - 'ShareDirectoryClient', - 'ShareClient', - 'ShareServiceClient', - 'ShareLeaseClient', -] diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_directory_client_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_directory_client_async.py deleted file mode 100644 index 29b6396..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_directory_client_async.py +++ /dev/null @@ -1,593 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import functools -import time -from typing import ( # pylint: disable=unused-import - Optional, Union, Any, Dict, TYPE_CHECKING -) - -from azure.core.async_paging import AsyncItemPaged -from azure.core.pipeline import AsyncPipeline -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from .._parser import _get_file_permission, _datetime_to_str -from .._shared.parser import _str - -from .._generated.aio import AzureFileStorage -from .._generated.version import VERSION -from .._generated.models import StorageErrorException -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.policies_async import ExponentialRetry -from .._shared.request_handlers import add_metadata_headers -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._deserialize import deserialize_directory_properties -from .._serialize import get_api_version -from .._directory_client import ShareDirectoryClient as ShareDirectoryClientBase -from ._file_client_async import ShareFileClient -from ._models import DirectoryPropertiesPaged, HandlesPaged - -if TYPE_CHECKING: - from datetime import datetime - from .._models import ShareProperties, DirectoryProperties, ContentSettings, NTFSAttributes - from .._generated.models import HandleItem - - -class ShareDirectoryClient(AsyncStorageAccountHostsMixin, ShareDirectoryClientBase): - """A client to interact with a specific directory, although it may not yet exist. - - For operations relating to a specific subdirectory or file in this share, the clients for those - entities can also be retrieved using the :func:`get_subdirectory_client` and :func:`get_file_client` functions. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the directory, - use the :func:`from_directory_url` classmethod. - :param share_name: - The name of the share for the directory. - :type share_name: str - :param str directory_path: - The directory path for the directory with which to interact. - If specified, this value will override a directory value specified in the directory URL. - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword loop: - The event loop to run the asynchronous tasks. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - directory_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Optional[Any] - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - loop = kwargs.pop('loop', None) - super(ShareDirectoryClient, self).__init__( - account_url, - share_name=share_name, - directory_path=directory_path, - snapshot=snapshot, - credential=credential, - loop=loop, - **kwargs) - self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline, loop=loop) - self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access - self._loop = loop - - def get_file_client(self, file_name, **kwargs): - # type: (str, Any) -> ShareFileClient - """Get a client to interact with a specific file. - - The file need not already exist. - - :param str file_name: - The name of the file. - :returns: A File Client. - :rtype: ~azure.storage.fileshare.ShareFileClient - """ - if self.directory_path: - file_name = self.directory_path.rstrip('/') + "/" + file_name - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareFileClient( - self.url, file_path=file_name, share_name=self.share_name, snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop, **kwargs) - - def get_subdirectory_client(self, directory_name, **kwargs): - # type: (str, Any) -> ShareDirectoryClient - """Get a client to interact with a specific subdirectory. - - The subdirectory need not already exist. - - :param str directory_name: - The name of the subdirectory. - :returns: A Directory Client. - :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START get_subdirectory_client] - :end-before: [END get_subdirectory_client] - :language: python - :dedent: 16 - :caption: Gets the subdirectory client. - """ - directory_path = self.directory_path.rstrip('/') + "/" + directory_name - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareDirectoryClient( - self.url, share_name=self.share_name, directory_path=directory_path, snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop, **kwargs) - - @distributed_trace_async - async def create_directory(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Creates a new directory under the directory referenced by the client. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the directory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Directory-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START create_directory] - :end-before: [END create_directory] - :language: python - :dedent: 16 - :caption: Creates a directory. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return await self._client.directory.create( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def delete_directory(self, **kwargs): - # type: (**Any) -> None - """Marks the directory for deletion. The directory is - later deleted during garbage collection. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START delete_directory] - :end-before: [END delete_directory] - :language: python - :dedent: 16 - :caption: Deletes a directory. - """ - timeout = kwargs.pop('timeout', None) - try: - await self._client.directory.delete(timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_directories_and_files(self, name_starts_with=None, **kwargs): - # type: (Optional[str], Any) -> AsyncItemPaged - """Lists all the directories and files under the directory. - - :param str name_starts_with: - Filters the results to return only entities whose names - begin with the specified prefix. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties - :rtype: ~azure.core.async_paging.AsyncItemPaged[DirectoryProperties and FileProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START lists_directory] - :end-before: [END lists_directory] - :language: python - :dedent: 16 - :caption: List directories and files. - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.directory.list_files_and_directories_segment, - sharesnapshot=self.snapshot, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=DirectoryPropertiesPaged) - - @distributed_trace - def list_handles(self, recursive=False, **kwargs): - # type: (bool, Any) -> AsyncItemPaged - """Lists opened handles on a directory or a file under the directory. - - :param bool recursive: - Boolean that specifies if operation should apply to the directory specified by the client, - its files, its subdirectories and their files. Default value is False. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of HandleItem - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.HandleItem] - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.directory.list_handles, - sharesnapshot=self.snapshot, - timeout=timeout, - recursive=recursive, - **kwargs) - return AsyncItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=HandlesPaged) - - @distributed_trace_async - async def close_handle(self, handle, **kwargs): - # type: (Union[str, HandleItem], Any) -> Dict[str, int] - """Close an open file handle. - - :param handle: - A specific handle to close. - :type handle: str or ~azure.storage.fileshare.Handle - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - try: - handle_id = handle.id # type: ignore - except AttributeError: - handle_id = handle - if handle_id == '*': - raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") - try: - response = await self._client.directory.force_close_handles( - handle_id, - marker=None, - recursive=None, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - return { - 'closed_handles_count': response.get('number_of_handles_closed', 0), - 'failed_handles_count': response.get('number_of_handles_failed', 0) - } - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def close_all_handles(self, recursive=False, **kwargs): - # type: (bool, Any) -> Dict[str, int] - """Close any open file handles. - - This operation will block until the service has closed all open handles. - - :param bool recursive: - Boolean that specifies if operation should apply to the directory specified by the client, - its files, its subdirectories and their files. Default value is False. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - timeout = kwargs.pop('timeout', None) - start_time = time.time() - - try_close = True - continuation_token = None - total_closed = 0 - total_failed = 0 - while try_close: - try: - response = await self._client.directory.force_close_handles( - handle_id='*', - timeout=timeout, - marker=continuation_token, - recursive=recursive, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - continuation_token = response.get('marker') - try_close = bool(continuation_token) - total_closed += response.get('number_of_handles_closed', 0) - total_failed += response.get('number_of_handles_failed', 0) - if timeout: - timeout = max(0, timeout - (time.time() - start_time)) - return { - 'closed_handles_count': total_closed, - 'failed_handles_count': total_failed - } - - @distributed_trace_async - async def get_directory_properties(self, **kwargs): - # type: (Any) -> DirectoryProperties - """Returns all user-defined metadata and system properties for the - specified directory. The data returned does not include the directory's - list of files. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: DirectoryProperties - :rtype: ~azure.storage.fileshare.DirectoryProperties - """ - timeout = kwargs.pop('timeout', None) - try: - response = await self._client.directory.get_properties( - timeout=timeout, - cls=deserialize_directory_properties, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return response # type: ignore - - @distributed_trace_async - async def set_directory_metadata(self, metadata, **kwargs): - # type: (Dict[str, Any], Any) -> Dict[str, Any] - """Sets the metadata for the directory. - - Each call to this operation replaces all existing metadata - attached to the directory. To remove all metadata from the directory, - call this operation with an empty metadata dict. - - :param metadata: - Name-value pairs associated with the directory as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Directory-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - try: - return await self._client.directory.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def set_http_headers(self, file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="preserve", # type: Union[str, datetime] - file_last_write_time="preserve", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Sets HTTP headers on the directory. - - :param file_attributes: - The file system attributes for files and directories. - If not set, indicates preservation of existing values. - Here is an example for when the var type is str: 'Temporary|Archive' - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Preserve. - :type file_creation_time: str or datetime - :param file_last_write_time: Last write time for the file - Default value: Preserve. - :type file_last_write_time: str or datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - file_permission = _get_file_permission(file_permission, permission_key, 'preserve') - try: - return await self._client.directory.set_properties( # type: ignore - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - timeout=timeout, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def create_subdirectory( - self, directory_name, # type: str - **kwargs - ): - # type: (...) -> ShareDirectoryClient - """Creates a new subdirectory and returns a client to interact - with the subdirectory. - - :param str directory_name: - The name of the subdirectory. - :keyword dict(str,str) metadata: - Name-value pairs associated with the subdirectory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: ShareDirectoryClient - :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START create_subdirectory] - :end-before: [END create_subdirectory] - :language: python - :dedent: 16 - :caption: Create a subdirectory. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - subdir = self.get_subdirectory_client(directory_name) - await subdir.create_directory(metadata=metadata, timeout=timeout, **kwargs) - return subdir # type: ignore - - @distributed_trace_async - async def delete_subdirectory( - self, directory_name, # type: str - **kwargs - ): - # type: (...) -> None - """Deletes a subdirectory. - - :param str directory_name: - The name of the subdirectory. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START delete_subdirectory] - :end-before: [END delete_subdirectory] - :language: python - :dedent: 16 - :caption: Delete a subdirectory. - """ - timeout = kwargs.pop('timeout', None) - subdir = self.get_subdirectory_client(directory_name) - await subdir.delete_directory(timeout=timeout, **kwargs) - - @distributed_trace_async - async def upload_file( - self, file_name, # type: str - data, # type: Any - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> ShareFileClient - """Creates a new file in the directory and returns a ShareFileClient - to interact with the file. - - :param str file_name: - The name of the file. - :param Any data: - Content of the file. - :param int length: - Length of the file in bytes. Specify its maximum size, up to 1 TiB. - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: ShareFileClient - :rtype: ~azure.storage.fileshare.aio.ShareFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START upload_file_to_directory] - :end-before: [END upload_file_to_directory] - :language: python - :dedent: 16 - :caption: Upload a file to a directory. - """ - file_client = self.get_file_client(file_name) - await file_client.upload_file( - data, - length=length, - **kwargs) - return file_client # type: ignore - - @distributed_trace_async - async def delete_file( - self, file_name, # type: str - **kwargs # type: Optional[Any] - ): - # type: (...) -> None - """Marks the specified file for deletion. The file is later - deleted during garbage collection. - - :param str file_name: - The name of the file to delete. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START delete_file_in_directory] - :end-before: [END delete_file_in_directory] - :language: python - :dedent: 16 - :caption: Delete a file in a directory. - """ - file_client = self.get_file_client(file_name) - await file_client.delete_file(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_download_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_download_async.py deleted file mode 100644 index c0db16d..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_download_async.py +++ /dev/null @@ -1,467 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import asyncio -import sys -from io import BytesIO -from itertools import islice -import warnings - -from azure.core.exceptions import HttpResponseError -from .._shared.encryption import decrypt_blob -from .._shared.request_handlers import validate_and_format_range_headers -from .._shared.response_handlers import process_storage_error, parse_length_from_content_range -from .._download import process_range_and_offset, _ChunkDownloader - - -async def process_content(data, start_offset, end_offset, encryption): - if data is None: - raise ValueError("Response cannot be None.") - try: - content = data.response.body() - except Exception as error: - raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) - if encryption.get('key') is not None or encryption.get('resolver') is not None: - try: - return decrypt_blob( - encryption.get('required'), - encryption.get('key'), - encryption.get('resolver'), - content, - start_offset, - end_offset, - data.response.headers) - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=data.response, - error=error) - return content - - -class _AsyncChunkDownloader(_ChunkDownloader): - def __init__(self, **kwargs): - super(_AsyncChunkDownloader, self).__init__(**kwargs) - self.stream_lock = asyncio.Lock() if kwargs.get('parallel') else None - self.progress_lock = asyncio.Lock() if kwargs.get('parallel') else None - - async def process_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - chunk_data = await self._download_chunk(chunk_start, chunk_end - 1) - length = chunk_end - chunk_start - if length > 0: - await self._write_to_stream(chunk_data, chunk_start) - await self._update_progress(length) - - async def yield_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - return await self._download_chunk(chunk_start, chunk_end - 1) - - async def _update_progress(self, length): - if self.progress_lock: - async with self.progress_lock: # pylint: disable=not-async-context-manager - self.progress_total += length - else: - self.progress_total += length - - async def _write_to_stream(self, chunk_data, chunk_start): - if self.stream_lock: - async with self.stream_lock: # pylint: disable=not-async-context-manager - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - else: - self.stream.write(chunk_data) - - async def _download_chunk(self, chunk_start, chunk_end): - download_range, offset = process_range_and_offset( - chunk_start, chunk_end, chunk_end, self.encryption_options - ) - range_header, range_validation = validate_and_format_range_headers( - download_range[0], - download_range[1], - check_content_md5=self.validate_content - ) - try: - _, response = await self.client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self.validate_content, - data_stream_total=self.total_size, - download_stream_current=self.progress_total, - **self.request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - chunk_data = await process_content(response, offset[0], offset[1], self.encryption_options) - return chunk_data - - -class _AsyncChunkIterator(object): - """Async iterator for chunks in file download stream.""" - - def __init__(self, size, content, downloader): - self.size = size - self._current_content = content - self._iter_downloader = downloader - self._iter_chunks = None - self._complete = (size == 0) - - def __len__(self): - return self.size - - def __iter__(self): - raise TypeError("Async stream must be iterated asynchronously.") - - def __aiter__(self): - return self - - async def __anext__(self): - """Iterate through responses.""" - if self._complete: - raise StopAsyncIteration("Download complete") - if not self._iter_downloader: - # If no iterator was supplied, the download completed with - # the initial GET, so we just return that data - self._complete = True - return self._current_content - - if not self._iter_chunks: - self._iter_chunks = self._iter_downloader.get_chunk_offsets() - else: - try: - chunk = next(self._iter_chunks) - except StopIteration: - raise StopAsyncIteration("Download complete") - self._current_content = await self._iter_downloader.yield_chunk(chunk) - - return self._current_content - - -class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the file being downloaded. - :ivar: str path: - The full path of the file. - :ivar str share: - The name of the share where the file is. - :ivar ~azure.storage.fileshare.FileProperties properties: - The properties of the file being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the file. - """ - - def __init__( - self, - client=None, - config=None, - start_range=None, - end_range=None, - validate_content=None, - encryption_options=None, - max_concurrency=1, - name=None, - path=None, - share=None, - encoding=None, - **kwargs - ): - self.name = name - self.path = path - self.share = share - self.properties = None - self.size = None - - self._client = client - self._config = config - self._start_range = start_range - self._end_range = end_range - self._max_concurrency = max_concurrency - self._encoding = encoding - self._validate_content = validate_content - self._encryption_options = encryption_options or {} - self._request_options = kwargs - self._location_mode = None - self._download_complete = False - self._current_content = None - self._file_size = None - self._response = None - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - self._first_get_size = self._config.max_single_get_size if not self._validate_content \ - else self._config.max_chunk_get_size - initial_request_start = self._start_range if self._start_range is not None else 0 - if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: - initial_request_end = self._end_range - else: - initial_request_end = initial_request_start + self._first_get_size - 1 - - self._initial_range, self._initial_offset = process_range_and_offset( - initial_request_start, initial_request_end, self._end_range, self._encryption_options - ) - - def __len__(self): - return self.size - - async def _setup(self): - self._response = await self._initial_request() - self.properties = self._response.properties - self.properties.name = self.name - self.properties.path = self.path - self.properties.share = self.share - - # Set the content length to the download size instead of the size of - # the last range - self.properties.size = self.size - - # Overwrite the content range to the user requested range - self.properties.content_range = 'bytes {0}-{1}/{2}'.format( - self._start_range, - self._end_range, - self._file_size - ) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - self.properties.content_md5 = None - - if self.size == 0: - self._current_content = b"" - else: - self._current_content = await process_content( - self._response, - self._initial_offset[0], - self._initial_offset[1], - self._encryption_options - ) - - async def _initial_request(self): - range_header, range_validation = validate_and_format_range_headers( - self._initial_range[0], - self._initial_range[1], - start_range_required=False, - end_range_required=False, - check_content_md5=self._validate_content) - - try: - location_mode, response = await self._client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self._validate_content, - data_stream_total=None, - download_stream_current=0, - **self._request_options) - - # Check the location we read from to ensure we use the same one - # for subsequent requests. - self._location_mode = location_mode - - # Parse the total file size and adjust the download size if ranges - # were specified - self._file_size = parse_length_from_content_range(response.properties.content_range) - if self._end_range is not None: - # Use the length unless it is over the end of the file - self.size = min(self._file_size, self._end_range - self._start_range + 1) - elif self._start_range is not None: - self.size = self._file_size - self._start_range - else: - self.size = self._file_size - - except HttpResponseError as error: - if self._start_range is None and error.response.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - try: - _, response = await self._client.download( - validate_content=self._validate_content, - data_stream_total=0, - download_stream_current=0, - **self._request_options) - except HttpResponseError as error: - process_storage_error(error) - - # Set the download size to empty - self.size = 0 - self._file_size = 0 - else: - process_storage_error(error) - - # If the file is small, the download is complete at this point. - # If file size is large, download the rest of the file in chunks. - if response.properties.size == self.size: - self._download_complete = True - return response - - def chunks(self): - """Iterate over chunks in the download stream. - - :rtype: Iterable[bytes] - """ - if self.size == 0 or self._download_complete: - iter_downloader = None - else: - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - iter_downloader = _AsyncChunkDownloader( - client=self._client, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # Start where the first download ended - end_range=data_end, - stream=None, - parallel=False, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options) - return _AsyncChunkIterator( - size=self.size, - content=self._current_content, - downloader=iter_downloader) - - async def readall(self): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - stream = BytesIO() - await self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - async def content_as_bytes(self, max_concurrency=1): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :rtype: bytes - """ - warnings.warn( - "content_as_bytes is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - return await self.readall() - - async def content_as_text(self, max_concurrency=1, encoding="UTF-8"): - """Download the contents of this file, and decode as text. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :param str encoding: - Test encoding to decode the downloaded bytes. Default is UTF-8. - :rtype: str - """ - warnings.warn( - "content_as_text is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self._encoding = encoding - return await self.readall() - - async def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - # the stream must be seekable if parallel download is required - parallel = self._max_concurrency > 1 - if parallel: - error_message = "Target stream handle must be seekable." - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(error_message) - - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(error_message) - - # Write the content to the user stream - stream.write(self._current_content) - if self._download_complete: - return self.size - - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - - downloader = _AsyncChunkDownloader( - client=self._client, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # start where the first download ended - end_range=data_end, - stream=stream, - parallel=parallel, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options) - - dl_tasks = downloader.get_chunk_offsets() - running_futures = [ - asyncio.ensure_future(downloader.process_chunk(d)) - for d in islice(dl_tasks, 0, self._max_concurrency) - ] - while running_futures: - # Wait for some download to finish before adding a new one - _done, running_futures = await asyncio.wait( - running_futures, return_when=asyncio.FIRST_COMPLETED) - try: - next_chunk = next(dl_tasks) - except StopIteration: - break - else: - running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk))) - - if running_futures: - # Wait for the remaining downloads to finish - await asyncio.wait(running_futures) - return self.size - - async def download_to_stream(self, stream, max_concurrency=1): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The properties of the downloaded file. - :rtype: Any - """ - warnings.warn( - "download_to_stream is deprecated, use readinto instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - await self.readinto(stream) - return self.properties diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_file_client_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_file_client_async.py deleted file mode 100644 index 3d48fdc..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_file_client_async.py +++ /dev/null @@ -1,1165 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines, invalid-overridden-method -import functools -import time -from io import BytesIO -from typing import Optional, Union, IO, List, Dict, Any, Iterable, TYPE_CHECKING # pylint: disable=unused-import - -import six -from azure.core.async_paging import AsyncItemPaged - -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from .._parser import _datetime_to_str, _get_file_permission -from .._shared.parser import _str - -from .._generated.aio import AzureFileStorage -from .._generated.version import VERSION -from .._generated.models import StorageErrorException, FileHTTPHeaders -from .._shared.policies_async import ExponentialRetry -from .._shared.uploads_async import upload_data_chunks, FileChunkUploader, IterStreamer -from .._shared.base_client_async import AsyncStorageAccountHostsMixin -from .._shared.request_handlers import add_metadata_headers, get_length -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._deserialize import deserialize_file_properties, deserialize_file_stream -from .._serialize import get_access_conditions, get_smb_properties, get_api_version -from .._file_client import ShareFileClient as ShareFileClientBase -from ._models import HandlesPaged -from ._lease_async import ShareLeaseClient -from ._download_async import StorageStreamDownloader - -if TYPE_CHECKING: - from datetime import datetime - from .._models import ShareProperties, ContentSettings, FileProperties, NTFSAttributes - from .._generated.models import HandleItem - - -async def _upload_file_helper( - client, - stream, - size, - metadata, - content_settings, - validate_content, - timeout, - max_concurrency, - file_settings, - file_attributes="none", - file_creation_time="now", - file_last_write_time="now", - file_permission=None, - file_permission_key=None, - **kwargs -): - try: - if size is None or size < 0: - raise ValueError("A content size must be specified for a File.") - response = await client.create_file( - size, content_settings=content_settings, metadata=metadata, - file_attributes=file_attributes, - file_creation_time=file_creation_time, - file_last_write_time=file_last_write_time, - file_permission=file_permission, - permission_key=file_permission_key, - timeout=timeout, - **kwargs - ) - if size == 0: - return response - - responses = await upload_data_chunks( - service=client, - uploader_class=FileChunkUploader, - total_size=size, - chunk_size=file_settings.max_range_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - timeout=timeout, - **kwargs - ) - return sorted(responses, key=lambda r: r.get('last_modified'))[-1] - except StorageErrorException as error: - process_storage_error(error) - - -class ShareFileClient(AsyncStorageAccountHostsMixin, ShareFileClientBase): - """A client to interact with a specific file, although that file may not yet exist. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the - file, use the :func:`from_file_url` classmethod. - :param share_name: - The name of the share for the file. - :type share_name: str - :param str file_path: - The file path to the file with which to interact. If specified, this value will override - a file value specified in the file URL. - :param str snapshot: - An optional file snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword loop: - The event loop to run the asynchronous tasks. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - - def __init__( # type: ignore - self, - account_url, # type: str - share_name, # type: str - file_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs["retry_policy"] = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) - loop = kwargs.pop('loop', None) - super(ShareFileClient, self).__init__( - account_url, share_name=share_name, file_path=file_path, snapshot=snapshot, - credential=credential, loop=loop, **kwargs - ) - self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline, loop=loop) - self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access - self._loop = loop - - @distributed_trace_async - async def acquire_lease(self, lease_id=None, **kwargs): - # type: (int, Optional[str], **Any) -> BlobLeaseClient - """Requests a new lease. - - If the file does not have an active lease, the File - Service creates a lease on the blob and returns a new lease. - - :param str lease_id: - Proposed lease ID, in a GUID string format. The File Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A ShareLeaseClient object. - :rtype: ~azure.storage.fileshare.aio.ShareLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START acquire_lease_on_blob] - :end-before: [END acquire_lease_on_blob] - :language: python - :dedent: 8 - :caption: Acquiring a lease on a blob. - """ - lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore - await lease.acquire(**kwargs) - return lease - - @distributed_trace_async - async def create_file( # type: ignore - self, - size, # type: int - file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="now", # type: Union[str, datetime] - file_last_write_time="now", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Creates a new file. - - Note that it only initializes the file with no content. - - :param int size: Specifies the maximum size for the file, - up to 1 TB. - :param file_attributes: - The file system attributes for files and directories. - If not set, the default value would be "None" and the attributes will be set to "Archive". - Here is an example for when the var type is str: 'Temporary|Archive'. - file_attributes value is not case sensitive. - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Now. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Now. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START create_file] - :end-before: [END create_file] - :language: python - :dedent: 16 - :caption: Create a file. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - content_settings = kwargs.pop('content_settings', None) - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - if self.require_encryption and not self.key_encryption_key: - raise ValueError("Encryption required but no key was provided.") - - headers = kwargs.pop("headers", {}) - headers.update(add_metadata_headers(metadata)) - file_http_headers = None - if content_settings: - file_http_headers = FileHTTPHeaders( - file_cache_control=content_settings.cache_control, - file_content_type=content_settings.content_type, - file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - file_content_encoding=content_settings.content_encoding, - file_content_language=content_settings.content_language, - file_content_disposition=content_settings.content_disposition, - ) - file_permission = _get_file_permission(file_permission, permission_key, 'Inherit') - try: - return await self._client.file.create( # type: ignore - file_content_length=size, - metadata=metadata, - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - file_http_headers=file_http_headers, - lease_access_conditions=access_conditions, - headers=headers, - timeout=timeout, - cls=return_response_headers, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_file( - self, data, # type: Any - length=None, # type: Optional[int] - file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="now", # type: Union[str, datetime] - file_last_write_time="now", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Uploads a new file. - - :param Any data: - Content of the file. - :param int length: - Length of the file in bytes. Specify its maximum size, up to 1 TiB. - :param file_attributes: - The file system attributes for files and directories. - If not set, the default value would be "None" and the attributes will be set to "Archive". - Here is an example for when the var type is str: 'Temporary|Archive'. - file_attributes value is not case sensitive. - :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes - :param file_creation_time: Creation time for the file - Default value: Now. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Now. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword str encoding: - Defaults to UTF-8. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START upload_file] - :end-before: [END upload_file] - :language: python - :dedent: 16 - :caption: Upload a file. - """ - metadata = kwargs.pop('metadata', None) - content_settings = kwargs.pop('content_settings', None) - max_concurrency = kwargs.pop('max_concurrency', 1) - validate_content = kwargs.pop('validate_content', False) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - - if isinstance(data, six.text_type): - data = data.encode(encoding) - if length is None: - length = get_length(data) - if isinstance(data, bytes): - data = data[:length] - - if isinstance(data, bytes): - stream = BytesIO(data) - elif hasattr(data, "read"): - stream = data - elif hasattr(data, "__iter__"): - stream = IterStreamer(data, encoding=encoding) # type: ignore - else: - raise TypeError("Unsupported data type: {}".format(type(data))) - return await _upload_file_helper( # type: ignore - self, - stream, - length, - metadata, - content_settings, - validate_content, - timeout, - max_concurrency, - self._config, - file_attributes=file_attributes, - file_creation_time=file_creation_time, - file_last_write_time=file_last_write_time, - file_permission=file_permission, - file_permission_key=permission_key, - **kwargs - ) - - @distributed_trace_async - async def start_copy_from_url(self, source_url, **kwargs): - # type: (str, Any) -> Any - """Initiates the copying of data from a source URL into the file - referenced by the client. - - The status of this copy operation can be found using the `get_properties` - method. - - :param str source_url: - Specifies the URL of the source file. - :keyword str file_permission: - If specified the permission (security descriptor) shall be set for the directory/file. - This value can be set to "source" to copy the security descriptor from the source file. - Otherwise if set, this value will be used to override the source value. If not set, permission value - is inherited from the parent directory of the target file. This setting can be - used if Permission size is <= 8KB, otherwise permission_key shall be used. - If SDDL is specified as input, it must have owner, group and dacl. - Note: Only one of the file_permission or permission_key should be specified. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword str permission_key: - Key of the permission to be set for the directory/file. - This value can be set to "source" to copy the security descriptor from the source file. - Otherwise if set, this value will be used to override the source value. If not set, permission value - is inherited from the parent directory of the target file. - Note: Only one of the file_permission or permission_key should be specified. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword file_attributes: - This value can be set to "source" to copy file attributes from the source file to the target file, - or to clear all attributes, it can be set to "None". Otherwise it can be set to a list of attributes - to set on the target file. If this is not set, the default value is "Archive". - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :keyword file_creation_time: - This value can be set to "source" to copy the creation time from the source file to the target file, - or a datetime to set as creation time on the target file. This could also be a string in ISO 8601 format. - If this is not set, creation time will be set to the date time value of the creation - (or when it was overwritten) of the target file by copy engine. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_creation_time: str or ~datetime.datetime - :keyword file_last_write_time: - This value can be set to "source" to copy the last write time from the source file to the target file, or - a datetime to set as the last write time on the target file. This could also be a string in ISO 8601 format. - If this is not set, value will be the last write time to the file by the copy engine. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_last_write_time: str or ~datetime.datetime - :keyword bool ignore_read_only: - Specifies the option to overwrite the target file if it already exists and has read-only attribute set. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword bool set_archive_attribute: - Specifies the option to set the archive attribute on the target file. - True means the archive attribute will be set on the target file despite attribute - overrides or the source file state. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START copy_file_from_url] - :end-before: [END copy_file_from_url] - :language: python - :dedent: 16 - :caption: Copy a file from a URL - """ - metadata = kwargs.pop('metadata', None) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop("headers", {}) - headers.update(add_metadata_headers(metadata)) - kwargs.update(get_smb_properties(kwargs)) - try: - return await self._client.file.start_copy( - source_url, - metadata=metadata, - lease_access_conditions=access_conditions, - headers=headers, - cls=return_response_headers, - timeout=timeout, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def abort_copy(self, copy_id, **kwargs): - # type: (Union[str, FileProperties], Any) -> None - """Abort an ongoing copy operation. - - This will leave a destination file with zero length and full metadata. - This will raise an error if the copy operation has already ended. - - :param copy_id: - The copy operation to abort. This can be either an ID, or an - instance of FileProperties. - :type copy_id: str or ~azure.storage.fileshare.FileProperties - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - copy_id = copy_id.copy.id - except AttributeError: - try: - copy_id = copy_id["copy_id"] - except TypeError: - pass - try: - await self._client.file.abort_copy(copy_id=copy_id, - lease_access_conditions=access_conditions, - timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def download_file( - self, - offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Iterable[bytes] - """Downloads a file to a stream with automatic chunking. - - :param int offset: - Start of byte range to use for downloading a section of the file. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A iterable data generator (stream) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START download_file] - :end-before: [END download_file] - :language: python - :dedent: 16 - :caption: Download a file. - """ - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - if length is not None and offset is None: - raise ValueError("Offset value must not be None if length is set.") - - range_end = None - if length is not None: - range_end = offset + length - 1 # Service actually uses an end-range inclusive index - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - - downloader = StorageStreamDownloader( - client=self._client.file, - config=self._config, - start_range=offset, - end_range=range_end, - encryption_options=None, - name=self.file_name, - path='/'.join(self.file_path), - share=self.share_name, - lease_access_conditions=access_conditions, - cls=deserialize_file_stream, - **kwargs - ) - await downloader._setup() # pylint: disable=protected-access - return downloader - - @distributed_trace_async - async def delete_file(self, **kwargs): - # type: (Any) -> None - """Marks the specified file for deletion. The file is - later deleted during garbage collection. - - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START delete_file] - :end-before: [END delete_file] - :language: python - :dedent: 16 - :caption: Delete a file. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - await self._client.file.delete(lease_access_conditions=access_conditions, timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_file_properties(self, **kwargs): - # type: (Any) -> FileProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file. - - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: FileProperties - :rtype: ~azure.storage.fileshare.FileProperties - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - file_props = await self._client.file.get_properties( - sharesnapshot=self.snapshot, - lease_access_conditions=access_conditions, - timeout=timeout, - cls=deserialize_file_properties, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - file_props.name = self.file_name - file_props.share = self.share_name - file_props.snapshot = self.snapshot - file_props.path = "/".join(self.file_path) - return file_props # type: ignore - - @distributed_trace_async - async def set_http_headers(self, content_settings, # type: ContentSettings - file_attributes="preserve", # type: Union[str, NTFSAttributes] - file_creation_time="preserve", # type: Union[str, datetime] - file_last_write_time="preserve", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Sets HTTP headers on the file. - - :param ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param file_attributes: - The file system attributes for files and directories. - If not set, indicates preservation of existing values. - Here is an example for when the var type is str: 'Temporary|Archive' - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Preserve. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Preserve. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - file_content_length = kwargs.pop("size", None) - file_http_headers = FileHTTPHeaders( - file_cache_control=content_settings.cache_control, - file_content_type=content_settings.content_type, - file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - file_content_encoding=content_settings.content_encoding, - file_content_language=content_settings.content_language, - file_content_disposition=content_settings.content_disposition, - ) - file_permission = _get_file_permission(file_permission, permission_key, 'preserve') - try: - return await self._client.file.set_http_headers( # type: ignore - file_content_length=file_content_length, - file_http_headers=file_http_headers, - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - lease_access_conditions=access_conditions, - timeout=timeout, - cls=return_response_headers, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def set_file_metadata(self, metadata=None, **kwargs): # type: ignore - # type: (Optional[Dict[str, Any]], Any) -> Dict[str, Any] - """Sets user-defined metadata for the specified file as one or more - name-value pairs. - - Each call to this operation replaces all existing metadata - attached to the file. To remove all metadata from the file, - call this operation with no metadata dict. - - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop("headers", {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return await self._client.file.set_metadata( # type: ignore - metadata=metadata, lease_access_conditions=access_conditions, - timeout=timeout, cls=return_response_headers, headers=headers, **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_range( # type: ignore - self, - data, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Upload a range of bytes to a file. - - :param bytes data: - The data to upload. - :param int offset: - Start of byte range to use for uploading a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for uploading a section of the file. - The range can be up to 4 MB in size. - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - file. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - validate_content = kwargs.pop('validate_content', False) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - if isinstance(data, six.text_type): - data = data.encode(encoding) - end_range = offset + length - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - try: - return await self._client.file.upload_range( # type: ignore - range=content_range, - content_length=length, - optionalbody=data, - timeout=timeout, - validate_content=validate_content, - lease_access_conditions=access_conditions, - cls=return_response_headers, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_range_from_url(self, source_url, - offset, - length, - source_offset, - **kwargs - ): - # type: (str, int, int, int, **Any) -> Dict[str, Any] - """ - Writes the bytes from one Azure File endpoint into the specified range of another Azure File endpoint. - - :param int offset: - Start of byte range to use for updating a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for updating a section of the file. - The range can be up to 4 MB in size. - :param str source_url: - A URL of up to 2 KB in length that specifies an Azure file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.file.core.windows.net/myshare/mydir/myfile - https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - The service will read the same number of bytes as the destination range (length-offset). - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._upload_range_from_url_options( - source_url=source_url, - offset=offset, - length=length, - source_offset=source_offset, - **kwargs - ) - try: - return await self._client.file.upload_range_from_url(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_ranges( # type: ignore - self, - offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> List[Dict[str, int]] - """Returns the list of valid ranges of a file. - - :param int offset: - Specifies the start offset of bytes over which to get ranges. - :param int length: - Number of bytes to use over which to get ranges. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A list of valid ranges. - :rtype: List[dict[str, int]] - """ - timeout = kwargs.pop('timeout', None) - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Unsupported method for encryption.") - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - - content_range = None - if offset is not None: - if length is not None: - end_range = offset + length - 1 # Reformat to an inclusive range index - content_range = "bytes={0}-{1}".format(offset, end_range) - else: - content_range = "bytes={0}-".format(offset) - try: - ranges = await self._client.file.get_range_list( - range=content_range, - sharesnapshot=self.snapshot, - lease_access_conditions=access_conditions, - timeout=timeout, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - return [{"start": b.start, "end": b.end} for b in ranges] - - @distributed_trace_async - async def clear_range( # type: ignore - self, - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Clears the specified range and releases the space used in storage for - that range. - - :param int offset: - Start of byte range to use for clearing a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for clearing a section of the file. - The range can be up to 4 MB in size. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Unsupported method for encryption.") - - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 bytes file size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 bytes file size") - end_range = length + offset - 1 # Reformat to an inclusive range index - content_range = "bytes={0}-{1}".format(offset, end_range) - try: - return await self._client.file.upload_range( # type: ignore - timeout=timeout, - cls=return_response_headers, - content_length=0, - file_range_write="clear", - range=content_range, - lease_access_conditions=access_conditions, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def resize_file(self, size, **kwargs): - # type: (int, Any) -> Dict[str, Any] - """Resizes a file to the specified size. - - :param int size: - Size to resize file to (in bytes) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - return await self._client.file.set_http_headers( # type: ignore - file_content_length=size, - file_attributes="preserve", - file_creation_time="preserve", - file_last_write_time="preserve", - file_permission="preserve", - lease_access_conditions=access_conditions, - cls=return_response_headers, - timeout=timeout, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_handles(self, **kwargs): - # type: (Any) -> AsyncItemPaged - """Lists handles for file. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of HandleItem - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.HandleItem] - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop("results_per_page", None) - command = functools.partial( - self._client.file.list_handles, - sharesnapshot=self.snapshot, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=HandlesPaged) - - @distributed_trace_async - async def close_handle(self, handle, **kwargs): - # type: (Union[str, HandleItem], Any) -> Dict[str, int] - """Close an open file handle. - - :param handle: - A specific handle to close. - :type handle: str or ~azure.storage.fileshare.Handle - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - try: - handle_id = handle.id # type: ignore - except AttributeError: - handle_id = handle - if handle_id == '*': - raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") - try: - response = await self._client.file.force_close_handles( - handle_id, - marker=None, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - return { - 'closed_handles_count': response.get('number_of_handles_closed', 0), - 'failed_handles_count': response.get('number_of_handles_failed', 0) - } - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def close_all_handles(self, **kwargs): - # type: (Any) -> Dict[str, int] - """Close any open file handles. - - This operation will block until the service has closed all open handles. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - timeout = kwargs.pop('timeout', None) - start_time = time.time() - - try_close = True - continuation_token = None - total_closed = 0 - total_failed = 0 - while try_close: - try: - response = await self._client.file.force_close_handles( - handle_id='*', - timeout=timeout, - marker=continuation_token, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - continuation_token = response.get('marker') - try_close = bool(continuation_token) - total_closed += response.get('number_of_handles_closed', 0) - total_failed += response.get('number_of_handles_failed', 0) - if timeout: - timeout = max(0, timeout - (time.time() - start_time)) - return { - 'closed_handles_count': total_closed, - 'failed_handles_count': total_failed - } diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_lease_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_lease_async.py deleted file mode 100644 index 0a04484..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_lease_async.py +++ /dev/null @@ -1,166 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, - TypeVar, TYPE_CHECKING -) - -from azure.core.tracing.decorator_async import distributed_trace_async - -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._generated.models import ( - StorageErrorException) -from .._lease import ShareLeaseClient as LeaseClientBase - -if TYPE_CHECKING: - from datetime import datetime - ShareFileClient = TypeVar("ShareFileClient") - - -class ShareLeaseClient(LeaseClientBase): - """Creates a new ShareLeaseClient. - - This client provides lease operations on a ShareFileClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the file to lease. - :type client: ~azure.storage.fileshare.aio.ShareFileClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - - def __enter__(self): - raise TypeError("Async lease must use 'async with'.") - - def __exit__(self, *args): - self.release() - - async def __aenter__(self): - return self - - async def __aexit__(self, *args): - await self.release() - - @distributed_trace_async - async def acquire(self, **kwargs): - # type: (int, Any) -> None - """Requests a new lease. This operation establishes and manages a lock on a - file for write and delete operations. If the file does not have an active lease, - the File service creates a lease on the file. If the file has an active lease, - you can only request a new lease using the active lease ID. - - - If the file does not have an active lease, the File service creates a - lease on the file and returns a new lease ID. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - try: - response = await self._client.acquire_lease( - timeout=kwargs.pop('timeout', None), - duration=-1, - proposed_lease_id=self.id, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - self.etag = response.get('etag') # type: str - - @distributed_trace_async - async def release(self, **kwargs): - # type: (Any) -> None - """Releases the lease. The lease may be released if the lease ID specified on the request matches - that associated with the file. Releasing the lease allows another client to immediately acquire the lease - for the file as soon as the release is complete. - - - The lease may be released if the client lease id specified matches - that associated with the file. Releasing the lease allows another client - to immediately acquire the lease for the file as soon as the release is complete. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - try: - response = await self._client.release_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """ Changes the lease ID of an active lease. A change must include the current lease ID in x-ms-lease-id and - a new lease ID in x-ms-proposed-lease-id. - - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The File service returns 400 - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - try: - response = await self._client.change_lease( - lease_id=self.id, - proposed_lease_id=proposed_lease_id, - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def break_lease(self, **kwargs): - # type: (Optional[int], Any) -> int - """Force breaks the lease if the file has an active lease. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. An infinite lease breaks immediately. - - Once a lease is broken, it cannot be changed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - try: - response = await self._client.break_lease( - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_models.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_models.py deleted file mode 100644 index affee8f..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_models.py +++ /dev/null @@ -1,178 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines - -from azure.core.async_paging import AsyncPageIterator - -from .._shared.response_handlers import return_context_and_deserialized, process_storage_error -from .._generated.models import StorageErrorException -from .._generated.models import DirectoryItem -from .._models import Handle, ShareProperties - - -def _wrap_item(item): - if isinstance(item, DirectoryItem): - return {'name': item.name, 'is_directory': True} - return {'name': item.name, 'size': item.properties.content_length, 'is_directory': False} - - -class SharePropertiesPaged(AsyncPageIterator): - """An iterable of Share properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.fileshare.ShareProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only shares whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(SharePropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [ShareProperties._from_generated(i) for i in self._response.share_items] # pylint: disable=protected-access - return self._response.next_marker or None, self.current_page - - -class HandlesPaged(AsyncPageIterator): - """An iterable of Handles. - - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str marker: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.fileshare.Handle) - - :param callable command: Function to retrieve the next page of items. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, results_per_page=None, continuation_token=None): - super(HandlesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.current_page = [Handle._from_generated(h) for h in self._response.handle_list] # pylint: disable=protected-access - return self._response.next_marker or None, self.current_page - - -class DirectoryPropertiesPaged(AsyncPageIterator): - """An iterable for the contents of a directory. - - This iterable will yield dicts for the contents of the directory. The dicts - will have the keys 'name' (str) and 'is_directory' (bool). - Items that are files (is_directory=False) will have an additional 'content_length' key. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(dict(str, Any)) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only directories whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(DirectoryPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - prefix=self.prefix, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [_wrap_item(i) for i in self._response.segment.directory_items] - self.current_page.extend([_wrap_item(i) for i in self._response.segment.file_items]) - return self._response.next_marker or None, self.current_page diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_share_client_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_share_client_async.py deleted file mode 100644 index b6fb243..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_share_client_async.py +++ /dev/null @@ -1,563 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -from typing import ( # pylint: disable=unused-import - Optional, Union, Dict, Any, Iterable, TYPE_CHECKING -) - -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.pipeline import AsyncPipeline -from .._shared.policies_async import ExponentialRetry -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.request_handlers import add_metadata_headers, serialize_iso -from .._shared.response_handlers import ( - return_response_headers, - process_storage_error, - return_headers_and_deserialized) -from .._generated.aio import AzureFileStorage -from .._generated.version import VERSION -from .._generated.models import ( - StorageErrorException, - SignedIdentifier, - DeleteSnapshotsOptionType) -from .._deserialize import deserialize_share_properties, deserialize_permission -from .._serialize import get_api_version -from .._share_client import ShareClient as ShareClientBase -from ._directory_client_async import ShareDirectoryClient -from ._file_client_async import ShareFileClient - -if TYPE_CHECKING: - from .._models import ShareProperties, AccessPolicy - - -class ShareClient(AsyncStorageAccountHostsMixin, ShareClientBase): - """A client to interact with a specific share, although that share may not yet exist. - - For operations relating to a specific directory or file in this share, the clients for - those entities can also be retrieved using the :func:`get_directory_client` and :func:`get_file_client` functions. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the share, - use the :func:`from_share_url` classmethod. - :param share_name: - The name of the share with which to interact. - :type share_name: str - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword loop: - The event loop to run the asynchronous tasks. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - loop = kwargs.pop('loop', None) - super(ShareClient, self).__init__( - account_url, - share_name=share_name, - snapshot=snapshot, - credential=credential, - loop=loop, - **kwargs) - self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline, loop=loop) - self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access - self._loop = loop - - def get_directory_client(self, directory_path=None): - # type: (Optional[str]) -> ShareDirectoryClient - """Get a client to interact with the specified directory. - The directory need not already exist. - - :param str directory_path: - Path to the specified directory. - :returns: A Directory Client. - :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient - """ - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - - return ShareDirectoryClient( - self.url, share_name=self.share_name, directory_path=directory_path or "", snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop) - - def get_file_client(self, file_path): - # type: (str) -> ShareFileClient - """Get a client to interact with the specified file. - The file need not already exist. - - :param str file_path: - Path to the specified file. - :returns: A File Client. - :rtype: ~azure.storage.fileshare.aio.ShareFileClient - """ - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - - return ShareFileClient( - self.url, share_name=self.share_name, file_path=file_path, snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop) - - @distributed_trace_async - async def create_share(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Creates a new Share under the account. If a share with the - same name already exists, the operation fails. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the share as metadata. - :keyword int quota: - The quota to be allotted. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START create_share] - :end-before: [END create_share] - :language: python - :dedent: 12 - :caption: Creates a file share. - """ - metadata = kwargs.pop('metadata', None) - quota = kwargs.pop('quota', None) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - - try: - return await self._client.share.create( # type: ignore - timeout=timeout, - metadata=metadata, - quota=quota, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def create_snapshot( # type: ignore - self, - **kwargs # type: Optional[Any] - ): - # type: (...) -> Dict[str, Any] - """Creates a snapshot of the share. - - A snapshot is a read-only version of a share that's taken at a point in time. - It can be read, copied, or deleted, but not modified. Snapshots provide a way - to back up a share as it appears at a moment in time. - - A snapshot of a share has the same name as the base share from which the snapshot - is taken, with a DateTime value appended to indicate the time at which the - snapshot was taken. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the share as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Snapshot ID, Etag, and last modified). - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START create_share_snapshot] - :end-before: [END create_share_snapshot] - :language: python - :dedent: 16 - :caption: Creates a snapshot of the file share. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return await self._client.share.create_snapshot( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def delete_share( - self, delete_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> None - """Marks the specified share for deletion. The share is - later deleted during garbage collection. - - :param bool delete_snapshots: - Indicates if snapshots are to be deleted. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START delete_share] - :end-before: [END delete_share] - :language: python - :dedent: 16 - :caption: Deletes the share and any snapshots. - """ - timeout = kwargs.pop('timeout', None) - delete_include = None - if delete_snapshots: - delete_include = DeleteSnapshotsOptionType.include - try: - await self._client.share.delete( - timeout=timeout, - sharesnapshot=self.snapshot, - delete_snapshots=delete_include, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_share_properties(self, **kwargs): - # type: (Any) -> ShareProperties - """Returns all user-defined metadata and system properties for the - specified share. The data returned does not include the shares's - list of files or directories. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: The share properties. - :rtype: ~azure.storage.fileshare.ShareProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_hello_world_async.py - :start-after: [START get_share_properties] - :end-before: [END get_share_properties] - :language: python - :dedent: 16 - :caption: Gets the share properties. - """ - timeout = kwargs.pop('timeout', None) - try: - props = await self._client.share.get_properties( - timeout=timeout, - sharesnapshot=self.snapshot, - cls=deserialize_share_properties, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - props.name = self.share_name - props.snapshot = self.snapshot - return props # type: ignore - - @distributed_trace_async - async def set_share_quota(self, quota, **kwargs): - # type: (int, Any) -> Dict[str, Any] - """Sets the quota for the share. - - :param int quota: - Specifies the maximum size of the share, in gigabytes. - Must be greater than 0, and less than or equal to 5TB. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START set_share_quota] - :end-before: [END set_share_quota] - :language: python - :dedent: 16 - :caption: Sets the share quota. - """ - timeout = kwargs.pop('timeout', None) - try: - return await self._client.share.set_quota( # type: ignore - timeout=timeout, - quota=quota, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def set_share_metadata(self, metadata, **kwargs): - # type: (Dict[str, Any], Any) -> Dict[str, Any] - """Sets the metadata for the share. - - Each call to this operation replaces all existing metadata - attached to the share. To remove all metadata from the share, - call this operation with no metadata dict. - - :param metadata: - Name-value pairs associated with the share as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START set_share_metadata] - :end-before: [END set_share_metadata] - :language: python - :dedent: 16 - :caption: Sets the share metadata. - """ - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - try: - return await self._client.share.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_share_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the share. The permissions - indicate whether files in a share may be accessed publicly. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - """ - timeout = kwargs.pop('timeout', None) - try: - response, identifiers = await self._client.share.get_access_policy( - timeout=timeout, - cls=return_headers_and_deserialized, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return { - 'public_access': response.get('share_public_access'), - 'signed_identifiers': identifiers or [] - } - - @distributed_trace_async - async def set_share_access_policy(self, signed_identifiers, **kwargs): - # type: (Dict[str, AccessPolicy], Any) -> Dict[str, str] - """Sets the permissions for the share, or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether files in a share may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the share. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict(str, :class:`~azure.storage.fileshare.AccessPolicy`) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - if len(signed_identifiers) > 5: - raise ValueError( - 'Too many access policies provided. The server does not support setting ' - 'more than 5 access policies on a single resource.') - identifiers = [] - for key, value in signed_identifiers.items(): - if value: - value.start = serialize_iso(value.start) - value.expiry = serialize_iso(value.expiry) - identifiers.append(SignedIdentifier(id=key, access_policy=value)) - signed_identifiers = identifiers # type: ignore - - try: - return await self._client.share.set_access_policy( # type: ignore - share_acl=signed_identifiers or None, - timeout=timeout, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_share_stats(self, **kwargs): - # type: (Any) -> int - """Gets the approximate size of the data stored on the share in bytes. - - Note that this value may not include all recently created - or recently re-sized files. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: The approximate size of the data (in bytes) stored on the share. - :rtype: int - """ - timeout = kwargs.pop('timeout', None) - try: - stats = await self._client.share.get_statistics( - timeout=timeout, - **kwargs) - return stats.share_usage_bytes # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_directories_and_files( # type: ignore - self, directory_name=None, # type: Optional[str] - name_starts_with=None, # type: Optional[str] - marker=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Iterable[Dict[str,str]] - """Lists the directories and files under the share. - - :param str directory_name: - Name of a directory. - :param str name_starts_with: - Filters the results to return only directories whose names - begin with the specified prefix. - :param str marker: - An opaque continuation token. This value can be retrieved from the - next_marker field of a previous generator object. If specified, - this generator will begin returning results from this point. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START share_list_files_in_dir] - :end-before: [END share_list_files_in_dir] - :language: python - :dedent: 16 - :caption: List directories and files in the share. - """ - timeout = kwargs.pop('timeout', None) - directory = self.get_directory_client(directory_name) - return directory.list_directories_and_files( - name_starts_with=name_starts_with, marker=marker, timeout=timeout, **kwargs) - - @distributed_trace_async - async def create_permission_for_share(self, file_permission, # type: str - **kwargs # type: Any - ): - # type: (...) -> str - """Create a permission (a security descriptor) at the share level. - - This 'permission' can be used for the files/directories in the share. - If a 'permission' already exists, it shall return the key of it, else - creates a new permission at the share level and return its key. - - :param str file_permission: - File permission, a Portable SDDL - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A file permission key - :rtype: str - """ - timeout = kwargs.pop('timeout', None) - options = self._create_permission_for_share_options(file_permission, timeout=timeout, **kwargs) - try: - return await self._client.share.create_permission(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_permission_for_share( # type: ignore - self, permission_key, # type: str - **kwargs # type: Any - ): - # type: (...) -> str - """Get a permission (a security descriptor) for a given key. - - This 'permission' can be used for the files/directories in the share. - - :param str permission_key: - Key of the file permission to retrieve - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A file permission (a portable SDDL) - :rtype: str - """ - timeout = kwargs.pop('timeout', None) - try: - return await self._client.share.get_permission( # type: ignore - file_permission_key=permission_key, - cls=deserialize_permission, - timeout=timeout, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def create_directory(self, directory_name, **kwargs): - # type: (str, Any) -> ShareDirectoryClient - """Creates a directory in the share and returns a client to interact - with the directory. - - :param str directory_name: - The name of the directory. - :keyword dict(str,str) metadata: - Name-value pairs associated with the directory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: ShareDirectoryClient - :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient - """ - directory = self.get_directory_client(directory_name) - kwargs.setdefault('merge_span', True) - await directory.create_directory(**kwargs) - return directory # type: ignore - - @distributed_trace_async - async def delete_directory(self, directory_name, **kwargs): - # type: (str, Any) -> None - """Marks the directory for deletion. The directory is - later deleted during garbage collection. - - :param str directory_name: - The name of the directory. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - directory = self.get_directory_client(directory_name) - await directory.delete_directory(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_share_service_client_async.py b/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_share_service_client_async.py deleted file mode 100644 index 2ee8390..0000000 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/aio/_share_service_client_async.py +++ /dev/null @@ -1,362 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, - TYPE_CHECKING -) - -from azure.core.async_paging import AsyncItemPaged -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import AsyncPipeline -from azure.core.tracing.decorator_async import distributed_trace_async - -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.response_handlers import process_storage_error -from .._shared.policies_async import ExponentialRetry -from .._generated.aio import AzureFileStorage -from .._generated.models import StorageErrorException, StorageServiceProperties -from .._generated.version import VERSION -from .._share_service_client import ShareServiceClient as ShareServiceClientBase -from .._serialize import get_api_version -from ._share_client_async import ShareClient -from ._models import SharePropertiesPaged -from .._models import service_properties_deserialize - -if TYPE_CHECKING: - from datetime import datetime - from .._shared.models import ResourceTypes, AccountSasPermissions - from .._models import ( - ShareProperties, - Metrics, - CorsRule, - ) - - -class ShareServiceClient(AsyncStorageAccountHostsMixin, ShareServiceClientBase): - """A client to interact with the File Share Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete shares within the account. - For operations relating to a specific share, a client for that entity - can also be retrieved using the :func:`get_share_client` function. - - :param str account_url: - The URL to the file share storage account. Any other entities included - in the URL path (e.g. share or file) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword loop: - The event loop to run the asynchronous tasks. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_authentication_async.py - :start-after: [START create_share_service_client] - :end-before: [END create_share_service_client] - :language: python - :dedent: 8 - :caption: Create the share service client with url and credential. - """ - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - loop = kwargs.pop('loop', None) - super(ShareServiceClient, self).__init__( - account_url, - credential=credential, - loop=loop, - **kwargs) - self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline, loop=loop) - self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access - self._loop = loop - - @distributed_trace_async - async def get_service_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the properties of a storage account's File Share service, including - Azure Storage Analytics. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A dictionary containing file service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START get_service_properties] - :end-before: [END get_service_properties] - :language: python - :dedent: 12 - :caption: Get file share service properties. - """ - timeout = kwargs.pop('timeout', None) - try: - service_props = await self._client.service.get_properties(timeout=timeout, **kwargs) - return service_properties_deserialize(service_props) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def set_service_properties( - self, hour_metrics=None, # type: Optional[Metrics] - minute_metrics=None, # type: Optional[Metrics] - cors=None, # type: Optional[List[CorsRule]] - **kwargs - ): - # type: (...) -> None - """Sets the properties of a storage account's File Share service, including - Azure Storage Analytics. If an element (e.g. hour_metrics) is left as None, the - existing settings on the service for that functionality are preserved. - - :param hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for files. - :type hour_metrics: ~azure.storage.fileshare.Metrics - :param minute_metrics: - The minute metrics settings provide request statistics - for each minute for files. - :type minute_metrics: ~azure.storage.fileshare.Metrics - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list(:class:`~azure.storage.fileshare.CorsRule`) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START set_service_properties] - :end-before: [END set_service_properties] - :language: python - :dedent: 8 - :caption: Sets file share service properties. - """ - timeout = kwargs.pop('timeout', None) - props = StorageServiceProperties( - hour_metrics=hour_metrics, - minute_metrics=minute_metrics, - cors=cors - ) - try: - await self._client.service.set_properties(props, timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_shares( - self, name_starts_with=None, # type: Optional[str] - include_metadata=False, # type: Optional[bool] - include_snapshots=False, # type: Optional[bool] - **kwargs # type: Any - ): # type: (...) -> AsyncItemPaged - """Returns auto-paging iterable of dict-like ShareProperties under the specified account. - The generator will lazily follow the continuation tokens returned by - the service and stop when all shares have been returned. - - :param str name_starts_with: - Filters the results to return only shares whose names - begin with the specified name_starts_with. - :param bool include_metadata: - Specifies that share metadata be returned in the response. - :param bool include_snapshots: - Specifies that share snapshot be returned in the response. - :keyword bool include_deleted: - Specifies that deleted shares be returned in the response. - This is only for share soft delete enabled account. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) of ShareProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.ShareProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START fsc_list_shares] - :end-before: [END fsc_list_shares] - :language: python - :dedent: 16 - :caption: List shares in the file share service. - """ - timeout = kwargs.pop('timeout', None) - include = [] - if include_metadata: - include.append('metadata') - if include_snapshots: - include.append('snapshots') - include_deleted = kwargs.pop('include_deleted', None) - if include_deleted: - include.append("deleted") - - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.service.list_shares_segment, - include=include, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=SharePropertiesPaged) - - @distributed_trace_async - async def create_share( - self, share_name, # type: str - **kwargs - ): - # type: (...) -> ShareClient - """Creates a new share under the specified account. If the share - with the same name already exists, the operation fails. Returns a client with - which to interact with the newly created share. - - :param str share_name: The name of the share to create. - :keyword dict(str,str) metadata: - A dict with name_value pairs to associate with the - share as metadata. Example:{'Category':'test'} - :keyword int quota: - Quota in bytes. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.fileshare.aio.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START fsc_create_shares] - :end-before: [END fsc_create_shares] - :language: python - :dedent: 12 - :caption: Create a share in the file share service. - """ - metadata = kwargs.pop('metadata', None) - quota = kwargs.pop('quota', None) - timeout = kwargs.pop('timeout', None) - share = self.get_share_client(share_name) - kwargs.setdefault('merge_span', True) - await share.create_share(metadata=metadata, quota=quota, timeout=timeout, **kwargs) - return share - - @distributed_trace_async - async def delete_share( - self, share_name, # type: Union[ShareProperties, str] - delete_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> None - """Marks the specified share for deletion. The share is - later deleted during garbage collection. - - :param share_name: - The share to delete. This can either be the name of the share, - or an instance of ShareProperties. - :type share_name: str or ~azure.storage.fileshare.ShareProperties - :param bool delete_snapshots: - Indicates if snapshots are to be deleted. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START fsc_delete_shares] - :end-before: [END fsc_delete_shares] - :language: python - :dedent: 16 - :caption: Delete a share in the file share service. - """ - timeout = kwargs.pop('timeout', None) - share = self.get_share_client(share_name) - kwargs.setdefault('merge_span', True) - await share.delete_share( - delete_snapshots=delete_snapshots, timeout=timeout, **kwargs) - - @distributed_trace_async - async def undelete_share(self, deleted_share_name, deleted_share_version, **kwargs): - # type: (str, str, **Any) -> ShareClient - """Restores soft-deleted share. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.2.0 - This operation was introduced in API version '2019-12-12'. - - :param str deleted_share_name: - Specifies the name of the deleted share to restore. - :param str deleted_share_version: - Specifies the version of the deleted share to restore. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.fileshare.aio.ShareClient - """ - share = self.get_share_client(deleted_share_name) - try: - await share._client.share.restore(deleted_share_name=deleted_share_name, # pylint: disable = protected-access - deleted_share_version=deleted_share_version, - timeout=kwargs.pop('timeout', None), **kwargs) - return share - except StorageErrorException as error: - process_storage_error(error) - - def get_share_client(self, share, snapshot=None): - # type: (Union[ShareProperties, str],Optional[Union[Dict[str, Any], str]]) -> ShareClient - """Get a client to interact with the specified share. - The share need not already exist. - - :param share: - The share. This can either be the name of the share, - or an instance of ShareProperties. - :type share: str or ~azure.storage.fileshare.ShareProperties - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :returns: A ShareClient. - :rtype: ~azure.storage.fileshare.aio.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START get_share_client] - :end-before: [END get_share_client] - :language: python - :dedent: 8 - :caption: Gets the share client. - """ - try: - share_name = share.name - except AttributeError: - share_name = share - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareClient( - self.url, share_name=share_name, snapshot=snapshot, credential=self.credential, - api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/__init__.py deleted file mode 100644 index 7c838c1..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/__init__.py +++ /dev/null @@ -1,74 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ._version import VERSION -from ._file_client import ShareFileClient -from ._directory_client import ShareDirectoryClient -from ._share_client import ShareClient -from ._share_service_client import ShareServiceClient -from ._lease import ShareLeaseClient -from ._shared_access_signature import generate_account_sas, generate_share_sas, generate_file_sas -from ._shared.policies import ExponentialRetry, LinearRetry -from ._shared.models import ( - LocationMode, - ResourceTypes, - AccountSasPermissions, - StorageErrorCode) -from ._models import ( - ShareProperties, - DirectoryProperties, - Handle, - FileProperties, - Metrics, - RetentionPolicy, - CorsRule, - ShareSmbSettings, - SmbMultichannel, - ShareProtocolSettings, - AccessPolicy, - FileSasPermissions, - ShareSasPermissions, - ContentSettings, - NTFSAttributes) -from ._generated.models import ( - HandleItem -) - -__version__ = VERSION - - -__all__ = [ - 'ShareFileClient', - 'ShareDirectoryClient', - 'ShareClient', - 'ShareServiceClient', - 'ShareLeaseClient', - 'ExponentialRetry', - 'LinearRetry', - 'LocationMode', - 'ResourceTypes', - 'AccountSasPermissions', - 'StorageErrorCode', - 'Metrics', - 'RetentionPolicy', - 'CorsRule', - 'ShareSmbSettings', - 'SmbMultichannel', - 'ShareProtocolSettings', - 'AccessPolicy', - 'FileSasPermissions', - 'ShareSasPermissions', - 'ShareProperties', - 'DirectoryProperties', - 'FileProperties', - 'ContentSettings', - 'Handle', - 'NTFSAttributes', - 'HandleItem', - 'generate_account_sas', - 'generate_share_sas', - 'generate_file_sas' -] diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_deserialize.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_deserialize.py deleted file mode 100644 index a4b3500..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_deserialize.py +++ /dev/null @@ -1,83 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use -from typing import ( # pylint: disable=unused-import - Tuple, Dict, List, - TYPE_CHECKING -) - -from ._models import ShareProperties, DirectoryProperties, FileProperties -from ._shared.response_handlers import deserialize_metadata -from ._generated.models import ShareFileRangeList - - -def deserialize_share_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - share_properties = ShareProperties( - metadata=metadata, - **headers - ) - return share_properties - - -def deserialize_directory_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - directory_properties = DirectoryProperties( - metadata=metadata, - **headers - ) - return directory_properties - - -def deserialize_file_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - file_properties = FileProperties( - metadata=metadata, - **headers - ) - if 'Content-Range' in headers: - if 'x-ms-content-md5' in headers: - file_properties.content_settings.content_md5 = headers['x-ms-content-md5'] - else: - file_properties.content_settings.content_md5 = None - return file_properties - - -def deserialize_file_stream(response, obj, headers): - file_properties = deserialize_file_properties(response, obj, headers) - obj.properties = file_properties - return response.location_mode, obj - - -def deserialize_permission(response, obj, headers): # pylint: disable=unused-argument - ''' - Extracts out file permission - ''' - - return obj.permission - - -def deserialize_permission_key(response, obj, headers): # pylint: disable=unused-argument - ''' - Extracts out file permission key - ''' - - if response is None or headers is None: - return None - return headers.get('x-ms-file-permission-key', None) - - -def get_file_ranges_result(ranges): - # type: (ShareFileRangeList) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - file_ranges = [] # type: ignore - clear_ranges = [] # type: List - if ranges.ranges: - file_ranges = [ - {'start': file_range.start, 'end': file_range.end} for file_range in ranges.ranges] # type: ignore - if ranges.clear_ranges: - clear_ranges = [ - {'start': clear_range.start, 'end': clear_range.end} for clear_range in ranges.clear_ranges] - return file_ranges, clear_ranges diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_directory_client.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_directory_client.py deleted file mode 100644 index f1c7c05..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_directory_client.py +++ /dev/null @@ -1,706 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -import time -from typing import ( # pylint: disable=unused-import - Optional, Union, Any, Dict, TYPE_CHECKING -) - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six -from azure.core.paging import ItemPaged -from azure.core.pipeline import Pipeline -from azure.core.tracing.decorator import distributed_trace - -from ._generated import AzureFileStorage -from ._generated.version import VERSION -from ._generated.models import StorageErrorException -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.request_handlers import add_metadata_headers -from ._shared.response_handlers import return_response_headers, process_storage_error -from ._shared.parser import _str -from ._parser import _get_file_permission, _datetime_to_str -from ._deserialize import deserialize_directory_properties -from ._serialize import get_api_version -from ._file_client import ShareFileClient -from ._models import DirectoryPropertiesPaged, HandlesPaged, NTFSAttributes # pylint: disable=unused-import - -if TYPE_CHECKING: - from datetime import datetime - from ._models import ShareProperties, DirectoryProperties, ContentSettings - from ._generated.models import HandleItem - - -class ShareDirectoryClient(StorageAccountHostsMixin): - """A client to interact with a specific directory, although it may not yet exist. - - For operations relating to a specific subdirectory or file in this share, the clients for those - entities can also be retrieved using the :func:`get_subdirectory_client` and :func:`get_file_client` functions. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the directory, - use the :func:`from_directory_url` classmethod. - :param share_name: - The name of the share for the directory. - :type share_name: str - :param str directory_path: - The directory path for the directory with which to interact. - If specified, this value will override a directory value specified in the directory URL. - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - directory_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Optional[Any] - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not share_name: - raise ValueError("Please specify a share name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - if hasattr(credential, 'get_token'): - raise ValueError("Token credentials not supported by the File service.") - - path_snapshot, sas_token = parse_query(parsed_url.query) - if not sas_token and not credential: - raise ValueError( - 'You need to provide either an account shared key or SAS token when creating a storage service.') - try: - self.snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - self.snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - self.snapshot = snapshot or path_snapshot - - self.share_name = share_name - self.directory_path = directory_path - - self._query_str, credential = self._format_query_string( - sas_token, credential, share_snapshot=self.snapshot) - super(ShareDirectoryClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) - self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access - - @classmethod - def from_directory_url(cls, directory_url, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Optional[Any] - ): - # type: (...) -> ShareDirectoryClient - """Create a ShareDirectoryClient from a directory url. - - :param str directory_url: - The full URI to the directory. - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :returns: A directory client. - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - """ - try: - if not directory_url.lower().startswith('http'): - directory_url = "https://" + directory_url - except AttributeError: - raise ValueError("Directory URL must be a string.") - parsed_url = urlparse(directory_url.rstrip('/')) - if not parsed_url.path and not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(directory_url)) - account_url = parsed_url.netloc.rstrip('/') + "?" + parsed_url.query - path_snapshot, _ = parse_query(parsed_url.query) - - share_name, _, path_dir = parsed_url.path.lstrip('/').partition('/') - share_name = unquote(share_name) - - directory_path = path_dir - snapshot = snapshot or path_snapshot - - return cls( - account_url=account_url, share_name=share_name, directory_path=directory_path, - credential=credential, **kwargs) - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - share_name = self.share_name - if isinstance(share_name, six.text_type): - share_name = share_name.encode('UTF-8') - directory_path = "" - if self.directory_path: - directory_path = "/" + quote(self.directory_path, safe='~') - return "{}://{}/{}{}{}".format( - self.scheme, - hostname, - quote(share_name), - directory_path, - self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - share_name, # type: str - directory_path, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareDirectoryClient - """Create ShareDirectoryClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param share_name: The name of the share. - :type share_name: str - :param str directory_path: - The directory path. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :returns: A directory client. - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, share_name=share_name, directory_path=directory_path, credential=credential, **kwargs) - - def get_file_client(self, file_name, **kwargs): - # type: (str, Any) -> ShareFileClient - """Get a client to interact with a specific file. - - The file need not already exist. - - :param file_name: - The name of the file. - :returns: A File Client. - :rtype: ~azure.storage.fileshare.ShareFileClient - """ - if self.directory_path: - file_name = self.directory_path.rstrip('/') + "/" + file_name - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareFileClient( - self.url, file_path=file_name, share_name=self.share_name, napshot=self.snapshot, - credential=self.credential, api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, **kwargs) - - def get_subdirectory_client(self, directory_name, **kwargs): - # type: (str, Any) -> ShareDirectoryClient - """Get a client to interact with a specific subdirectory. - - The subdirectory need not already exist. - - :param str directory_name: - The name of the subdirectory. - :returns: A Directory Client. - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START get_subdirectory_client] - :end-before: [END get_subdirectory_client] - :language: python - :dedent: 12 - :caption: Gets the subdirectory client. - """ - directory_path = self.directory_path.rstrip('/') + "/" + directory_name - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareDirectoryClient( - self.url, share_name=self.share_name, directory_path=directory_path, snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, - _location_mode=self._location_mode, **kwargs) - - @distributed_trace - def create_directory(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Creates a new directory under the directory referenced by the client. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the directory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Directory-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START create_directory] - :end-before: [END create_directory] - :language: python - :dedent: 12 - :caption: Creates a directory. - """ - timeout = kwargs.pop('timeout', None) - metadata = kwargs.pop('metadata', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return self._client.directory.create( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def delete_directory(self, **kwargs): - # type: (**Any) -> None - """Marks the directory for deletion. The directory is - later deleted during garbage collection. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START delete_directory] - :end-before: [END delete_directory] - :language: python - :dedent: 12 - :caption: Deletes a directory. - """ - timeout = kwargs.pop('timeout', None) - try: - self._client.directory.delete(timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_directories_and_files(self, name_starts_with=None, **kwargs): - # type: (Optional[str], **Any) -> ItemPaged - """Lists all the directories and files under the directory. - - :param str name_starts_with: - Filters the results to return only entities whose names - begin with the specified prefix. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties - :rtype: ~azure.core.paging.ItemPaged[DirectoryProperties and FileProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START lists_directory] - :end-before: [END lists_directory] - :language: python - :dedent: 12 - :caption: List directories and files. - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.directory.list_files_and_directories_segment, - sharesnapshot=self.snapshot, - timeout=timeout, - **kwargs) - return ItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=DirectoryPropertiesPaged) - - @distributed_trace - def list_handles(self, recursive=False, **kwargs): - # type: (bool, Any) -> ItemPaged - """Lists opened handles on a directory or a file under the directory. - - :param bool recursive: - Boolean that specifies if operation should apply to the directory specified by the client, - its files, its subdirectories and their files. Default value is False. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of HandleItem - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.HandleItem] - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.directory.list_handles, - sharesnapshot=self.snapshot, - timeout=timeout, - recursive=recursive, - **kwargs) - return ItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=HandlesPaged) - - @distributed_trace - def close_handle(self, handle, **kwargs): - # type: (Union[str, HandleItem], Any) -> Dict[str, int] - """Close an open file handle. - - :param handle: - A specific handle to close. - :type handle: str or ~azure.storage.fileshare.Handle - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - try: - handle_id = handle.id # type: ignore - except AttributeError: - handle_id = handle - if handle_id == '*': - raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") - try: - response = self._client.directory.force_close_handles( - handle_id, - marker=None, - recursive=None, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - return { - 'closed_handles_count': response.get('number_of_handles_closed', 0), - 'failed_handles_count': response.get('number_of_handles_failed', 0) - } - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def close_all_handles(self, recursive=False, **kwargs): - # type: (bool, Any) -> Dict[str, int] - """Close any open file handles. - - This operation will block until the service has closed all open handles. - - :param bool recursive: - Boolean that specifies if operation should apply to the directory specified by the client, - its files, its subdirectories and their files. Default value is False. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - timeout = kwargs.pop('timeout', None) - start_time = time.time() - - try_close = True - continuation_token = None - total_closed = 0 - total_failed = 0 - while try_close: - try: - response = self._client.directory.force_close_handles( - handle_id='*', - timeout=timeout, - marker=continuation_token, - recursive=recursive, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - continuation_token = response.get('marker') - try_close = bool(continuation_token) - total_closed += response.get('number_of_handles_closed', 0) - total_failed += response.get('number_of_handles_failed', 0) - if timeout: - timeout = max(0, timeout - (time.time() - start_time)) - return { - 'closed_handles_count': total_closed, - 'failed_handles_count': total_failed - } - - @distributed_trace - def get_directory_properties(self, **kwargs): - # type: (Any) -> DirectoryProperties - """Returns all user-defined metadata and system properties for the - specified directory. The data returned does not include the directory's - list of files. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: DirectoryProperties - :rtype: ~azure.storage.fileshare.DirectoryProperties - """ - timeout = kwargs.pop('timeout', None) - try: - response = self._client.directory.get_properties( - timeout=timeout, - cls=deserialize_directory_properties, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return response # type: ignore - - @distributed_trace - def set_directory_metadata(self, metadata, **kwargs): - # type: (Dict[str, Any], Any) -> Dict[str, Any] - """Sets the metadata for the directory. - - Each call to this operation replaces all existing metadata - attached to the directory. To remove all metadata from the directory, - call this operation with an empty metadata dict. - - :param metadata: - Name-value pairs associated with the directory as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Directory-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - try: - return self._client.directory.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def set_http_headers(self, file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="preserve", # type: Union[str, datetime] - file_last_write_time="preserve", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Sets HTTP headers on the directory. - - :param file_attributes: - The file system attributes for files and directories. - If not set, indicates preservation of existing values. - Here is an example for when the var type is str: 'Temporary|Archive' - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Preserve. - :type file_creation_time: str or datetime - :param file_last_write_time: Last write time for the file - Default value: Preserve. - :type file_last_write_time: str or datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - file_permission = _get_file_permission(file_permission, permission_key, 'preserve') - try: - return self._client.directory.set_properties( # type: ignore - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - timeout=timeout, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def create_subdirectory( - self, directory_name, # type: str - **kwargs): - # type: (...) -> ShareDirectoryClient - """Creates a new subdirectory and returns a client to interact - with the subdirectory. - - :param str directory_name: - The name of the subdirectory. - :keyword dict(str,str) metadata: - Name-value pairs associated with the subdirectory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: ShareDirectoryClient - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START create_subdirectory] - :end-before: [END create_subdirectory] - :language: python - :dedent: 12 - :caption: Create a subdirectory. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - subdir = self.get_subdirectory_client(directory_name) - subdir.create_directory(metadata=metadata, timeout=timeout, **kwargs) - return subdir # type: ignore - - @distributed_trace - def delete_subdirectory( - self, directory_name, # type: str - **kwargs - ): - # type: (...) -> None - """Deletes a subdirectory. - - :param str directory_name: - The name of the subdirectory. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START delete_subdirectory] - :end-before: [END delete_subdirectory] - :language: python - :dedent: 12 - :caption: Delete a subdirectory. - """ - timeout = kwargs.pop('timeout', None) - subdir = self.get_subdirectory_client(directory_name) - subdir.delete_directory(timeout=timeout, **kwargs) - - @distributed_trace - def upload_file( - self, file_name, # type: str - data, # type: Any - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> ShareFileClient - """Creates a new file in the directory and returns a ShareFileClient - to interact with the file. - - :param str file_name: - The name of the file. - :param Any data: - Content of the file. - :param int length: - Length of the file in bytes. Specify its maximum size, up to 1 TiB. - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: ShareFileClient - :rtype: ~azure.storage.fileshare.ShareFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START upload_file_to_directory] - :end-before: [END upload_file_to_directory] - :language: python - :dedent: 12 - :caption: Upload a file to a directory. - """ - file_client = self.get_file_client(file_name) - file_client.upload_file( - data, - length=length, - **kwargs) - return file_client # type: ignore - - @distributed_trace - def delete_file( - self, file_name, # type: str - **kwargs # type: Optional[Any] - ): - # type: (...) -> None - """Marks the specified file for deletion. The file is later - deleted during garbage collection. - - :param str file_name: - The name of the file to delete. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START delete_file_in_directory] - :end-before: [END delete_file_in_directory] - :language: python - :dedent: 12 - :caption: Delete a file in a directory. - """ - file_client = self.get_file_client(file_name) - file_client.delete_file(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_download.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_download.py deleted file mode 100644 index 8a86027..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_download.py +++ /dev/null @@ -1,522 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -import threading -import warnings -from io import BytesIO - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.common import with_current_context -from ._shared.encryption import decrypt_blob -from ._shared.request_handlers import validate_and_format_range_headers -from ._shared.response_handlers import process_storage_error, parse_length_from_content_range - - -def process_range_and_offset(start_range, end_range, length, encryption): - start_offset, end_offset = 0, 0 - if encryption.get("key") is not None or encryption.get("resolver") is not None: - if start_range is not None: - # Align the start of the range along a 16 byte block - start_offset = start_range % 16 - start_range -= start_offset - - # Include an extra 16 bytes for the IV if necessary - # Because of the previous offsetting, start_range will always - # be a multiple of 16. - if start_range > 0: - start_offset += 16 - start_range -= 16 - - if length is not None: - # Align the end of the range along a 16 byte block - end_offset = 15 - (end_range % 16) - end_range += end_offset - - return (start_range, end_range), (start_offset, end_offset) - - -def process_content(data, start_offset, end_offset, encryption): - if data is None: - raise ValueError("Response cannot be None.") - try: - content = b"".join(list(data)) - except Exception as error: - raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) - if content and encryption.get("key") is not None or encryption.get("resolver") is not None: - try: - return decrypt_blob( - encryption.get("required"), - encryption.get("key"), - encryption.get("resolver"), - content, - start_offset, - end_offset, - data.response.headers, - ) - except Exception as error: - raise HttpResponseError(message="Decryption failed.", response=data.response, error=error) - return content - - -class _ChunkDownloader(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - client=None, - total_size=None, - chunk_size=None, - current_progress=None, - start_range=None, - end_range=None, - stream=None, - parallel=None, - validate_content=None, - encryption_options=None, - **kwargs - ): - self.client = client - - # Information on the download range/chunk size - self.chunk_size = chunk_size - self.total_size = total_size - self.start_index = start_range - self.end_index = end_range - - # The destination that we will write to - self.stream = stream - self.stream_lock = threading.Lock() if parallel else None - self.progress_lock = threading.Lock() if parallel else None - - # For a parallel download, the stream is always seekable, so we note down the current position - # in order to seek to the right place when out-of-order chunks come in - self.stream_start = stream.tell() if parallel else None - - # Download progress so far - self.progress_total = current_progress - - # Encryption - self.encryption_options = encryption_options - - # Parameters for each get operation - self.validate_content = validate_content - self.request_options = kwargs - - def _calculate_range(self, chunk_start): - if chunk_start + self.chunk_size > self.end_index: - chunk_end = self.end_index - else: - chunk_end = chunk_start + self.chunk_size - return chunk_start, chunk_end - - def get_chunk_offsets(self): - index = self.start_index - while index < self.end_index: - yield index - index += self.chunk_size - - def process_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - chunk_data = self._download_chunk(chunk_start, chunk_end - 1) - length = chunk_end - chunk_start - if length > 0: - self._write_to_stream(chunk_data, chunk_start) - self._update_progress(length) - - def yield_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - return self._download_chunk(chunk_start, chunk_end - 1) - - def _update_progress(self, length): - if self.progress_lock: - with self.progress_lock: # pylint: disable=not-context-manager - self.progress_total += length - else: - self.progress_total += length - - def _write_to_stream(self, chunk_data, chunk_start): - if self.stream_lock: - with self.stream_lock: # pylint: disable=not-context-manager - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - else: - self.stream.write(chunk_data) - - def _download_chunk(self, chunk_start, chunk_end): - download_range, offset = process_range_and_offset( - chunk_start, chunk_end, chunk_end, self.encryption_options - ) - range_header, range_validation = validate_and_format_range_headers( - download_range[0], download_range[1], check_content_md5=self.validate_content - ) - - try: - _, response = self.client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self.validate_content, - data_stream_total=self.total_size, - download_stream_current=self.progress_total, - **self.request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - chunk_data = process_content(response, offset[0], offset[1], self.encryption_options) - return chunk_data - - -class _ChunkIterator(object): - """Async iterator for chunks in blob download stream.""" - - def __init__(self, size, content, downloader): - self.size = size - self._current_content = content - self._iter_downloader = downloader - self._iter_chunks = None - self._complete = (size == 0) - - def __len__(self): - return self.size - - def __iter__(self): - return self - - def __next__(self): - """Iterate through responses.""" - if self._complete: - raise StopIteration("Download complete") - if not self._iter_downloader: - # If no iterator was supplied, the download completed with - # the initial GET, so we just return that data - self._complete = True - return self._current_content - - if not self._iter_chunks: - self._iter_chunks = self._iter_downloader.get_chunk_offsets() - else: - chunk = next(self._iter_chunks) - self._current_content = self._iter_downloader.yield_chunk(chunk) - - return self._current_content - - next = __next__ # Python 2 compatibility. - - -class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the file being downloaded. - :ivar: str path: - The full path of the file. - :ivar str share: - The name of the share where the file is. - :ivar ~azure.storage.fileshare.FileProperties properties: - The properties of the file being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the file. - """ - - def __init__( - self, - client=None, - config=None, - start_range=None, - end_range=None, - validate_content=None, - encryption_options=None, - max_concurrency=1, - name=None, - path=None, - share=None, - encoding=None, - **kwargs - ): - self.name = name - self.path = path - self.share = share - self.properties = None - self.size = None - - self._client = client - self._config = config - self._start_range = start_range - self._end_range = end_range - self._max_concurrency = max_concurrency - self._encoding = encoding - self._validate_content = validate_content - self._encryption_options = encryption_options or {} - self._request_options = kwargs - self._location_mode = None - self._download_complete = False - self._current_content = None - self._file_size = None - self._response = None - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - self._first_get_size = ( - self._config.max_single_get_size if not self._validate_content else self._config.max_chunk_get_size - ) - initial_request_start = self._start_range if self._start_range is not None else 0 - if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: - initial_request_end = self._end_range - else: - initial_request_end = initial_request_start + self._first_get_size - 1 - - self._initial_range, self._initial_offset = process_range_and_offset( - initial_request_start, initial_request_end, self._end_range, self._encryption_options - ) - - self._response = self._initial_request() - self.properties = self._response.properties - self.properties.name = self.name - self.properties.path = self.path - self.properties.share = self.share - - # Set the content length to the download size instead of the size of - # the last range - self.properties.size = self.size - - # Overwrite the content range to the user requested range - self.properties.content_range = "bytes {0}-{1}/{2}".format( - self._start_range, - self._end_range, - self._file_size - ) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - self.properties.content_md5 = None - - if self.size == 0: - self._current_content = b"" - else: - self._current_content = process_content( - self._response, - self._initial_offset[0], - self._initial_offset[1], - self._encryption_options - ) - - def __len__(self): - return self.size - - def _initial_request(self): - range_header, range_validation = validate_and_format_range_headers( - self._initial_range[0], - self._initial_range[1], - start_range_required=False, - end_range_required=False, - check_content_md5=self._validate_content - ) - - try: - location_mode, response = self._client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self._validate_content, - data_stream_total=None, - download_stream_current=0, - **self._request_options - ) - - # Check the location we read from to ensure we use the same one - # for subsequent requests. - self._location_mode = location_mode - - # Parse the total file size and adjust the download size if ranges - # were specified - self._file_size = parse_length_from_content_range(response.properties.content_range) - if self._end_range is not None: - # Use the end range index unless it is over the end of the file - self.size = min(self._file_size, self._end_range - self._start_range + 1) - elif self._start_range is not None: - self.size = self._file_size - self._start_range - else: - self.size = self._file_size - - except HttpResponseError as error: - if self._start_range is None and error.response.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - try: - _, response = self._client.download( - validate_content=self._validate_content, - data_stream_total=0, - download_stream_current=0, - **self._request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - # Set the download size to empty - self.size = 0 - self._file_size = 0 - else: - process_storage_error(error) - - # If the file is small, the download is complete at this point. - # If file size is large, download the rest of the file in chunks. - if response.properties.size == self.size: - self._download_complete = True - return response - - def chunks(self): - if self.size == 0 or self._download_complete: - iter_downloader = None - else: - data_end = self._file_size - if self._end_range is not None: - # Use the end range index unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - iter_downloader = _ChunkDownloader( - client=self._client, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # start where the first download ended - end_range=data_end, - stream=None, - parallel=False, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options - ) - return _ChunkIterator( - size=self.size, - content=self._current_content, - downloader=iter_downloader) - - def readall(self): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - stream = BytesIO() - self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - def content_as_bytes(self, max_concurrency=1): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :rtype: bytes - """ - warnings.warn( - "content_as_bytes is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - return self.readall() - - def content_as_text(self, max_concurrency=1, encoding="UTF-8"): - """Download the contents of this file, and decode as text. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :param str encoding: - Test encoding to decode the downloaded bytes. Default is UTF-8. - :rtype: str - """ - warnings.warn( - "content_as_text is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self._encoding = encoding - return self.readall() - - def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - # The stream must be seekable if parallel download is required - parallel = self._max_concurrency > 1 - if parallel: - error_message = "Target stream handle must be seekable." - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(error_message) - - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(error_message) - - # Write the content to the user stream - stream.write(self._current_content) - if self._download_complete: - return self.size - - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - - downloader = _ChunkDownloader( - client=self._client, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # Start where the first download ended - end_range=data_end, - stream=stream, - parallel=parallel, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options - ) - if parallel: - import concurrent.futures - executor = concurrent.futures.ThreadPoolExecutor(self._max_concurrency) - list(executor.map( - with_current_context(downloader.process_chunk), - downloader.get_chunk_offsets() - )) - else: - for chunk in downloader.get_chunk_offsets(): - downloader.process_chunk(chunk) - return self.size - - def download_to_stream(self, stream, max_concurrency=1): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The properties of the downloaded file. - :rtype: Any - """ - warnings.warn( - "download_to_stream is deprecated, use readinto instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self.readinto(stream) - return self.properties diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_file_client.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_file_client.py deleted file mode 100644 index 631adac..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_file_client.py +++ /dev/null @@ -1,1395 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines, too-many-public-methods -import functools -import time -from io import BytesIO -from typing import ( # pylint: disable=unused-import - Optional, Union, IO, List, Dict, Any, Iterable, Tuple, - TYPE_CHECKING -) - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six -from azure.core.paging import ItemPaged # pylint: disable=ungrouped-imports -from azure.core.tracing.decorator import distributed_trace - -from ._generated import AzureFileStorage -from ._generated.version import VERSION -from ._generated.models import StorageErrorException, FileHTTPHeaders -from ._shared.uploads import IterStreamer, FileChunkUploader, upload_data_chunks -from ._shared.base_client import StorageAccountHostsMixin, parse_connection_str, parse_query -from ._shared.request_handlers import add_metadata_headers, get_length -from ._shared.response_handlers import return_response_headers, process_storage_error -from ._shared.parser import _str -from ._parser import _get_file_permission, _datetime_to_str -from ._lease import ShareLeaseClient -from ._serialize import get_source_conditions, get_access_conditions, get_smb_properties, get_api_version -from ._deserialize import deserialize_file_properties, deserialize_file_stream, get_file_ranges_result -from ._models import HandlesPaged, NTFSAttributes # pylint: disable=unused-import -from ._download import StorageStreamDownloader - -if TYPE_CHECKING: - from datetime import datetime - from ._models import ShareProperties, ContentSettings, FileProperties, Handle - from ._generated.models import HandleItem - - -def _upload_file_helper( - client, - stream, - size, - metadata, - content_settings, - validate_content, - timeout, - max_concurrency, - file_settings, - file_attributes="none", - file_creation_time="now", - file_last_write_time="now", - file_permission=None, - file_permission_key=None, - **kwargs): - try: - if size is None or size < 0: - raise ValueError("A content size must be specified for a File.") - response = client.create_file( - size, - content_settings=content_settings, - metadata=metadata, - timeout=timeout, - file_attributes=file_attributes, - file_creation_time=file_creation_time, - file_last_write_time=file_last_write_time, - file_permission=file_permission, - permission_key=file_permission_key, - **kwargs - ) - if size == 0: - return response - - responses = upload_data_chunks( - service=client, - uploader_class=FileChunkUploader, - total_size=size, - chunk_size=file_settings.max_range_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - timeout=timeout, - **kwargs - ) - return sorted(responses, key=lambda r: r.get('last_modified'))[-1] - except StorageErrorException as error: - process_storage_error(error) - - -class ShareFileClient(StorageAccountHostsMixin): - """A client to interact with a specific file, although that file may not yet exist. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the - file, use the :func:`from_file_url` classmethod. - :param share_name: - The name of the share for the file. - :type share_name: str - :param str file_path: - The file path to the file with which to interact. If specified, this value will override - a file value specified in the file URL. - :param str snapshot: - An optional file snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - file_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not (share_name and file_path): - raise ValueError("Please specify a share name and file name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - if hasattr(credential, 'get_token'): - raise ValueError("Token credentials not supported by the File service.") - - path_snapshot = None - path_snapshot, sas_token = parse_query(parsed_url.query) - if not sas_token and not credential: - raise ValueError( - 'You need to provide either an account shared key or SAS token when creating a storage service.') - try: - self.snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - self.snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - self.snapshot = snapshot or path_snapshot - - self.share_name = share_name - self.file_path = file_path.split('/') - self.file_name = self.file_path[-1] - self.directory_path = "/".join(self.file_path[:-1]) - - self._query_str, credential = self._format_query_string( - sas_token, credential, share_snapshot=self.snapshot) - super(ShareFileClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) - self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access - - @classmethod - def from_file_url( - cls, file_url, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareFileClient - """A client to interact with a specific file, although that file may not yet exist. - - :param str file_url: The full URI to the file. - :param str snapshot: - An optional file snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :returns: A File client. - :rtype: ~azure.storage.fileshare.ShareFileClient - """ - try: - if not file_url.lower().startswith('http'): - file_url = "https://" + file_url - except AttributeError: - raise ValueError("File URL must be a string.") - parsed_url = urlparse(file_url.rstrip('/')) - - if not (parsed_url.netloc and parsed_url.path): - raise ValueError("Invalid URL: {}".format(file_url)) - account_url = parsed_url.netloc.rstrip('/') + "?" + parsed_url.query - - path_share, _, path_file = parsed_url.path.lstrip('/').partition('/') - path_snapshot, _ = parse_query(parsed_url.query) - snapshot = snapshot or path_snapshot - share_name = unquote(path_share) - file_path = '/'.join([unquote(p) for p in path_file.split('/')]) - return cls(account_url, share_name, file_path, snapshot, credential, **kwargs) - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - share_name = self.share_name - if isinstance(share_name, six.text_type): - share_name = share_name.encode('UTF-8') - return "{}://{}/{}/{}{}".format( - self.scheme, - hostname, - quote(share_name), - "/".join([quote(p, safe='~') for p in self.file_path]), - self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - share_name, # type: str - file_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareFileClient - """Create ShareFileClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param share_name: The name of the share. - :type share_name: str - :param str file_path: - The file path. - :param str snapshot: - An optional file snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :returns: A File client. - :rtype: ~azure.storage.fileshare.ShareFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_hello_world.py - :start-after: [START create_file_client] - :end-before: [END create_file_client] - :language: python - :dedent: 12 - :caption: Creates the file client with connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, share_name=share_name, file_path=file_path, snapshot=snapshot, credential=credential, **kwargs) - - @distributed_trace - def acquire_lease(self, lease_id=None, **kwargs): - # type: (Optional[str], **Any) -> ShareLeaseClient - """Requests a new lease. - - If the file does not have an active lease, the File - Service creates a lease on the blob and returns a new lease. - - :param str lease_id: - Proposed lease ID, in a GUID string format. The File Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A ShareLeaseClient object. - :rtype: ~azure.storage.fileshare.ShareLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START acquire_and_release_lease_on_file] - :end-before: [END acquire_and_release_lease_on_file] - :language: python - :dedent: 12 - :caption: Acquiring a lease on a file. - """ - kwargs['lease_duration'] = -1 - lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore - lease.acquire(**kwargs) - return lease - - @distributed_trace - def create_file( # type: ignore - self, size, # type: int - file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="now", # type: Union[str, datetime] - file_last_write_time="now", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Creates a new file. - - Note that it only initializes the file with no content. - - :param int size: Specifies the maximum size for the file, - up to 1 TB. - :param file_attributes: - The file system attributes for files and directories. - If not set, the default value would be "None" and the attributes will be set to "Archive". - Here is an example for when the var type is str: 'Temporary|Archive'. - file_attributes value is not case sensitive. - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Now. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Now. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START create_file] - :end-before: [END create_file] - :language: python - :dedent: 12 - :caption: Create a file. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - content_settings = kwargs.pop('content_settings', None) - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - if self.require_encryption and not self.key_encryption_key: - raise ValueError("Encryption required but no key was provided.") - - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - file_http_headers = None - if content_settings: - file_http_headers = FileHTTPHeaders( - file_cache_control=content_settings.cache_control, - file_content_type=content_settings.content_type, - file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - file_content_encoding=content_settings.content_encoding, - file_content_language=content_settings.content_language, - file_content_disposition=content_settings.content_disposition - ) - file_permission = _get_file_permission(file_permission, permission_key, 'Inherit') - try: - return self._client.file.create( # type: ignore - file_content_length=size, - metadata=metadata, - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - file_http_headers=file_http_headers, - lease_access_conditions=access_conditions, - headers=headers, - timeout=timeout, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def upload_file( - self, data, # type: Any - length=None, # type: Optional[int] - file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="now", # type: Union[str, datetime] - file_last_write_time="now", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Uploads a new file. - - :param Any data: - Content of the file. - :param int length: - Length of the file in bytes. Specify its maximum size, up to 1 TiB. - :param file_attributes: - The file system attributes for files and directories. - If not set, the default value would be "None" and the attributes will be set to "Archive". - Here is an example for when the var type is str: 'Temporary|Archive'. - file_attributes value is not case sensitive. - :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes - :param file_creation_time: Creation time for the file - Default value: Now. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Now. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START upload_file] - :end-before: [END upload_file] - :language: python - :dedent: 12 - :caption: Upload a file. - """ - metadata = kwargs.pop('metadata', None) - content_settings = kwargs.pop('content_settings', None) - max_concurrency = kwargs.pop('max_concurrency', 1) - validate_content = kwargs.pop('validate_content', False) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - - if isinstance(data, six.text_type): - data = data.encode(encoding) - if length is None: - length = get_length(data) - if isinstance(data, bytes): - data = data[:length] - - if isinstance(data, bytes): - stream = BytesIO(data) - elif hasattr(data, 'read'): - stream = data - elif hasattr(data, '__iter__'): - stream = IterStreamer(data, encoding=encoding) # type: ignore - else: - raise TypeError("Unsupported data type: {}".format(type(data))) - return _upload_file_helper( # type: ignore - self, - stream, - length, - metadata, - content_settings, - validate_content, - timeout, - max_concurrency, - self._config, - file_attributes=file_attributes, - file_creation_time=file_creation_time, - file_last_write_time=file_last_write_time, - file_permission=file_permission, - file_permission_key=permission_key, - **kwargs) - - @distributed_trace - def start_copy_from_url(self, source_url, **kwargs): - # type: (str, Any) -> Any - """Initiates the copying of data from a source URL into the file - referenced by the client. - - The status of this copy operation can be found using the `get_properties` - method. - - :param str source_url: - Specifies the URL of the source file. - :keyword str file_permission: - If specified the permission (security descriptor) shall be set for the directory/file. - This value can be set to "source" to copy the security descriptor from the source file. - Otherwise if set, this value will be used to override the source value. If not set, permission value - is inherited from the parent directory of the target file. This setting can be - used if Permission size is <= 8KB, otherwise permission_key shall be used. - If SDDL is specified as input, it must have owner, group and dacl. - Note: Only one of the file_permission or permission_key should be specified. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword str permission_key: - Key of the permission to be set for the directory/file. - This value can be set to "source" to copy the security descriptor from the source file. - Otherwise if set, this value will be used to override the source value. If not set, permission value - is inherited from the parent directory of the target file. - Note: Only one of the file_permission or permission_key should be specified. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword file_attributes: - This value can be set to "source" to copy file attributes from the source file to the target file, - or to clear all attributes, it can be set to "None". Otherwise it can be set to a list of attributes - to set on the target file. If this is not set, the default value is "Archive". - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :keyword file_creation_time: - This value can be set to "source" to copy the creation time from the source file to the target file, - or a datetime to set as creation time on the target file. This could also be a string in ISO 8601 format. - If this is not set, creation time will be set to the date time value of the creation - (or when it was overwritten) of the target file by copy engine. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_creation_time: str or ~datetime.datetime - :keyword file_last_write_time: - This value can be set to "source" to copy the last write time from the source file to the target file, or - a datetime to set as the last write time on the target file. This could also be a string in ISO 8601 format. - If this is not set, value will be the last write time to the file by the copy engine. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_last_write_time: str or ~datetime.datetime - :keyword bool ignore_read_only: - Specifies the option to overwrite the target file if it already exists and has read-only attribute set. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword bool set_archive_attribute: - Specifies the option to set the archive attribute on the target file. - True means the archive attribute will be set on the target file despite attribute - overrides or the source file state. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START copy_file_from_url] - :end-before: [END copy_file_from_url] - :language: python - :dedent: 12 - :caption: Copy a file from a URL - """ - metadata = kwargs.pop('metadata', None) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - kwargs.update(get_smb_properties(kwargs)) - try: - return self._client.file.start_copy( - source_url, - metadata=metadata, - lease_access_conditions=access_conditions, - headers=headers, - cls=return_response_headers, - timeout=timeout, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - def abort_copy(self, copy_id, **kwargs): - # type: (Union[str, FileProperties], Any) -> None - """Abort an ongoing copy operation. - - This will leave a destination file with zero length and full metadata. - This will raise an error if the copy operation has already ended. - - :param copy_id: - The copy operation to abort. This can be either an ID, or an - instance of FileProperties. - :type copy_id: str or ~azure.storage.fileshare.FileProperties - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - copy_id = copy_id.copy.id - except AttributeError: - try: - copy_id = copy_id['copy_id'] - except TypeError: - pass - try: - self._client.file.abort_copy(copy_id=copy_id, - lease_access_conditions=access_conditions, - timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def download_file( - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Iterable[bytes] - """Downloads a file to a stream with automatic chunking. - - :param int offset: - Start of byte range to use for downloading a section of the file. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A iterable data generator (stream) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START download_file] - :end-before: [END download_file] - :language: python - :dedent: 12 - :caption: Download a file. - """ - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - if length is not None and offset is None: - raise ValueError("Offset value must not be None if length is set.") - - range_end = None - if length is not None: - range_end = offset + length - 1 # Service actually uses an end-range inclusive index - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - - return StorageStreamDownloader( - client=self._client.file, - config=self._config, - start_range=offset, - end_range=range_end, - encryption_options=None, - name=self.file_name, - path='/'.join(self.file_path), - share=self.share_name, - lease_access_conditions=access_conditions, - cls=deserialize_file_stream, - **kwargs) - - @distributed_trace - def delete_file(self, **kwargs): - # type: (Any) -> None - """Marks the specified file for deletion. The file is - later deleted during garbage collection. - - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START delete_file] - :end-before: [END delete_file] - :language: python - :dedent: 12 - :caption: Delete a file. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - self._client.file.delete(lease_access_conditions=access_conditions, timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_file_properties(self, **kwargs): - # type: (Any) -> FileProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file. - - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: FileProperties - :rtype: ~azure.storage.fileshare.FileProperties - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - file_props = self._client.file.get_properties( - sharesnapshot=self.snapshot, - lease_access_conditions=access_conditions, - timeout=timeout, - cls=deserialize_file_properties, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - file_props.name = self.file_name - file_props.share = self.share_name - file_props.snapshot = self.snapshot - file_props.path = '/'.join(self.file_path) - return file_props # type: ignore - - @distributed_trace - def set_http_headers(self, content_settings, # type: ContentSettings - file_attributes="preserve", # type: Union[str, NTFSAttributes] - file_creation_time="preserve", # type: Union[str, datetime] - file_last_write_time="preserve", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Sets HTTP headers on the file. - - :param ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param file_attributes: - The file system attributes for files and directories. - If not set, indicates preservation of existing values. - Here is an example for when the var type is str: 'Temporary|Archive' - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Preserve. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Preserve. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - file_content_length = kwargs.pop('size', None) - file_http_headers = FileHTTPHeaders( - file_cache_control=content_settings.cache_control, - file_content_type=content_settings.content_type, - file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - file_content_encoding=content_settings.content_encoding, - file_content_language=content_settings.content_language, - file_content_disposition=content_settings.content_disposition - ) - file_permission = _get_file_permission(file_permission, permission_key, 'preserve') - try: - return self._client.file.set_http_headers( # type: ignore - file_content_length=file_content_length, - file_http_headers=file_http_headers, - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - lease_access_conditions=access_conditions, - timeout=timeout, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def set_file_metadata(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, Any]], Any) -> Dict[str, Any] - """Sets user-defined metadata for the specified file as one or more - name-value pairs. - - Each call to this operation replaces all existing metadata - attached to the file. To remove all metadata from the file, - call this operation with no metadata dict. - - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return self._client.file.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - metadata=metadata, - lease_access_conditions=access_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def upload_range( # type: ignore - self, data, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Upload a range of bytes to a file. - - :param bytes data: - The data to upload. - :param int offset: - Start of byte range to use for uploading a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for uploading a section of the file. - The range can be up to 4 MB in size. - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - file. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - validate_content = kwargs.pop('validate_content', False) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - if isinstance(data, six.text_type): - data = data.encode(encoding) - - end_range = offset + length - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - try: - return self._client.file.upload_range( # type: ignore - range=content_range, - content_length=length, - optionalbody=data, - timeout=timeout, - validate_content=validate_content, - lease_access_conditions=access_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @staticmethod - def _upload_range_from_url_options(source_url, # type: str - offset, # type: int - length, # type: int - source_offset, # type: int - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - - if offset is None: - raise ValueError("offset must be provided.") - if length is None: - raise ValueError("length must be provided.") - if source_offset is None: - raise ValueError("source_offset must be provided.") - - # Format range - end_range = offset + length - 1 - destination_range = 'bytes={0}-{1}'.format(offset, end_range) - source_range = 'bytes={0}-{1}'.format(source_offset, source_offset + length - 1) - - source_mod_conditions = get_source_conditions(kwargs) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - - options = { - 'copy_source': source_url, - 'content_length': 0, - 'source_range': source_range, - 'range': destination_range, - 'source_modified_access_conditions': source_mod_conditions, - 'lease_access_conditions': access_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def upload_range_from_url(self, source_url, - offset, - length, - source_offset, - **kwargs - ): - # type: (str, int, int, int, **Any) -> Dict[str, Any] - """ - Writes the bytes from one Azure File endpoint into the specified range of another Azure File endpoint. - - :param int offset: - Start of byte range to use for updating a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for updating a section of the file. - The range can be up to 4 MB in size. - :param str source_url: - A URL of up to 2 KB in length that specifies an Azure file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.file.core.windows.net/myshare/mydir/myfile - https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - The service will read the same number of bytes as the destination range (length-offset). - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source - blob has been modified since the specified date/time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source blob - has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._upload_range_from_url_options( - source_url=source_url, - offset=offset, - length=length, - source_offset=source_offset, - **kwargs - ) - try: - return self._client.file.upload_range_from_url(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - def _get_ranges_options( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - previous_sharesnapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Unsupported method for encryption.") - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - - content_range = None - if offset is not None: - if length is not None: - end_range = offset + length - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - else: - content_range = 'bytes={0}-'.format(offset) - options = { - 'sharesnapshot': self.snapshot, - 'lease_access_conditions': access_conditions, - 'timeout': kwargs.pop('timeout', None), - 'range': content_range} - if previous_sharesnapshot: - try: - options['prevsharesnapshot'] = previous_sharesnapshot.snapshot # type: ignore - except AttributeError: - try: - options['prevsharesnapshot'] = previous_sharesnapshot['snapshot'] # type: ignore - except TypeError: - options['prevsharesnapshot'] = previous_sharesnapshot - options.update(kwargs) - return options - - @distributed_trace - def get_ranges( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> List[Dict[str, int]] - """Returns the list of valid page ranges for a file or snapshot - of a file. - - :param int offset: - Specifies the start offset of bytes over which to get ranges. - :param int length: - Number of bytes to use over which to get ranges. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A list of valid ranges. - :rtype: List[dict[str, int]] - """ - options = self._get_ranges_options( - offset=offset, - length=length, - **kwargs) - try: - ranges = self._client.file.get_range_list(**options) - except StorageErrorException as error: - process_storage_error(error) - return [{'start': file_range.start, 'end': file_range.end} for file_range in ranges.ranges] - - def get_ranges_diff( # type: ignore - self, - previous_sharesnapshot, # type: Union[str, Dict[str, Any]] - offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a file or snapshot - of a file. - - .. versionadded:: 12.6.0 - - :param int offset: - Specifies the start offset of bytes over which to get ranges. - :param int length: - Number of bytes to use over which to get ranges. - :param str previous_sharesnapshot: - The snapshot diff parameter that contains an opaque DateTime value that - specifies a previous file snapshot to be compared - against a more recent snapshot or the current file. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of file ranges as dictionaries with 'start' and 'end' keys. - The first element are filled file ranges, the 2nd element is cleared file ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_ranges_options( - offset=offset, - length=length, - previous_sharesnapshot=previous_sharesnapshot, - **kwargs) - try: - ranges = self._client.file.get_range_list(**options) - except StorageErrorException as error: - process_storage_error(error) - return get_file_ranges_result(ranges) - - @distributed_trace - def clear_range( # type: ignore - self, offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Clears the specified range and releases the space used in storage for - that range. - - :param int offset: - Start of byte range to use for clearing a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for clearing a section of the file. - The range can be up to 4 MB in size. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Unsupported method for encryption.") - - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 bytes file size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 bytes file size") - end_range = length + offset - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - try: - return self._client.file.upload_range( # type: ignore - timeout=timeout, - cls=return_response_headers, - content_length=0, - file_range_write="clear", - range=content_range, - lease_access_conditions=access_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def resize_file(self, size, **kwargs): - # type: (int, Any) -> Dict[str, Any] - """Resizes a file to the specified size. - - :param int size: - Size to resize file to (in bytes) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - return self._client.file.set_http_headers( # type: ignore - file_content_length=size, - file_attributes="preserve", - file_creation_time="preserve", - file_last_write_time="preserve", - file_permission="preserve", - lease_access_conditions=access_conditions, - cls=return_response_headers, - timeout=timeout, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_handles(self, **kwargs): - # type: (Any) -> ItemPaged[Handle] - """Lists handles for file. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of HandleItem - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.HandleItem] - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.file.list_handles, - sharesnapshot=self.snapshot, - timeout=timeout, - **kwargs) - return ItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=HandlesPaged) - - @distributed_trace - def close_handle(self, handle, **kwargs): - # type: (Union[str, HandleItem], Any) -> Dict[str, int] - """Close an open file handle. - - :param handle: - A specific handle to close. - :type handle: str or ~azure.storage.fileshare.Handle - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - try: - handle_id = handle.id # type: ignore - except AttributeError: - handle_id = handle - if handle_id == '*': - raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") - try: - response = self._client.file.force_close_handles( - handle_id, - marker=None, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - return { - 'closed_handles_count': response.get('number_of_handles_closed', 0), - 'failed_handles_count': response.get('number_of_handles_failed', 0) - } - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def close_all_handles(self, **kwargs): - # type: (Any) -> Dict[str, int] - """Close any open file handles. - - This operation will block until the service has closed all open handles. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - timeout = kwargs.pop('timeout', None) - start_time = time.time() - - try_close = True - continuation_token = None - total_closed = 0 - total_failed = 0 - while try_close: - try: - response = self._client.file.force_close_handles( - handle_id='*', - timeout=timeout, - marker=continuation_token, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - continuation_token = response.get('marker') - try_close = bool(continuation_token) - total_closed += response.get('number_of_handles_closed', 0) - total_failed += response.get('number_of_handles_failed', 0) - if timeout: - timeout = max(0, timeout - (time.time() - start_time)) - return { - 'closed_handles_count': total_closed, - 'failed_handles_count': total_failed - } diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/__init__.py deleted file mode 100644 index 22b5762..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from ._azure_file_storage import AzureFileStorage -__all__ = ['AzureFileStorage'] - -from .version import VERSION - -__version__ = VERSION - diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/_azure_file_storage.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/_azure_file_storage.py deleted file mode 100644 index 3c52986..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/_azure_file_storage.py +++ /dev/null @@ -1,71 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core import PipelineClient -from msrest import Serializer, Deserializer - -from ._configuration import AzureFileStorageConfiguration -from azure.core.exceptions import map_error -from .operations import ServiceOperations -from .operations import ShareOperations -from .operations import DirectoryOperations -from .operations import FileOperations -from . import models - - -class AzureFileStorage(object): - """AzureFileStorage - - - :ivar service: Service operations - :vartype service: azure.storage.fileshare.operations.ServiceOperations - :ivar share: Share operations - :vartype share: azure.storage.fileshare.operations.ShareOperations - :ivar directory: Directory operations - :vartype directory: azure.storage.fileshare.operations.DirectoryOperations - :ivar file: File operations - :vartype file: azure.storage.fileshare.operations.FileOperations - - :param version: Specifies the version of the operation to use for this - request. - :type version: str - :param url: The URL of the service account, share, directory or file that - is the target of the desired operation. - :type url: str - """ - - def __init__(self, version, url, **kwargs): - - base_url = '{url}' - self._config = AzureFileStorageConfiguration(version, url, **kwargs) - self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '2020-02-10' - self._serialize = Serializer(client_models) - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.share = ShareOperations( - self._client, self._config, self._serialize, self._deserialize) - self.directory = DirectoryOperations( - self._client, self._config, self._serialize, self._deserialize) - self.file = FileOperations( - self._client, self._config, self._serialize, self._deserialize) - - def close(self): - self._client.close() - def __enter__(self): - self._client.__enter__() - return self - def __exit__(self, *exc_details): - self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/_configuration.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/_configuration.py deleted file mode 100644 index d638b1e..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/_configuration.py +++ /dev/null @@ -1,58 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -from .version import VERSION - - -class AzureFileStorageConfiguration(Configuration): - """Configuration for AzureFileStorage - Note that all parameters used to create this instance are saved as instance - attributes. - - :param version: Specifies the version of the operation to use for this - request. - :type version: str - :param url: The URL of the service account, share, directory or file that - is the target of the desired operation. - :type url: str - :ivar file_range_write_from_url: Only update is supported: - Update: - Writes the bytes downloaded from the source url into the specified range. - :type file_range_write_from_url: str - """ - - def __init__(self, version, url, **kwargs): - - if version is None: - raise ValueError("Parameter 'version' must not be None.") - if url is None: - raise ValueError("Parameter 'url' must not be None.") - - super(AzureFileStorageConfiguration, self).__init__(**kwargs) - self._configure(**kwargs) - - self.user_agent_policy.add_user_agent('azsdk-python-azurefilestorage/{}'.format(VERSION)) - self.generate_client_request_id = True - - self.version = version - self.url = url - self.file_range_write_from_url = "update" - - def _configure(self, **kwargs): - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/__init__.py deleted file mode 100644 index 942d3c5..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from ._azure_file_storage_async import AzureFileStorage -__all__ = ['AzureFileStorage'] diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/_azure_file_storage_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/_azure_file_storage_async.py deleted file mode 100644 index c0fcb43..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/_azure_file_storage_async.py +++ /dev/null @@ -1,72 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core import AsyncPipelineClient -from msrest import Serializer, Deserializer - -from ._configuration_async import AzureFileStorageConfiguration -from azure.core.exceptions import map_error -from .operations_async import ServiceOperations -from .operations_async import ShareOperations -from .operations_async import DirectoryOperations -from .operations_async import FileOperations -from .. import models - - -class AzureFileStorage(object): - """AzureFileStorage - - - :ivar service: Service operations - :vartype service: azure.storage.fileshare.aio.operations_async.ServiceOperations - :ivar share: Share operations - :vartype share: azure.storage.fileshare.aio.operations_async.ShareOperations - :ivar directory: Directory operations - :vartype directory: azure.storage.fileshare.aio.operations_async.DirectoryOperations - :ivar file: File operations - :vartype file: azure.storage.fileshare.aio.operations_async.FileOperations - - :param version: Specifies the version of the operation to use for this - request. - :type version: str - :param url: The URL of the service account, share, directory or file that - is the target of the desired operation. - :type url: str - """ - - def __init__( - self, version, url, **kwargs): - - base_url = '{url}' - self._config = AzureFileStorageConfiguration(version, url, **kwargs) - self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '2020-02-10' - self._serialize = Serializer(client_models) - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.share = ShareOperations( - self._client, self._config, self._serialize, self._deserialize) - self.directory = DirectoryOperations( - self._client, self._config, self._serialize, self._deserialize) - self.file = FileOperations( - self._client, self._config, self._serialize, self._deserialize) - - async def close(self): - await self._client.close() - async def __aenter__(self): - await self._client.__aenter__() - return self - async def __aexit__(self, *exc_details): - await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/_configuration_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/_configuration_async.py deleted file mode 100644 index 75c206e..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/_configuration_async.py +++ /dev/null @@ -1,59 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -from ..version import VERSION - - -class AzureFileStorageConfiguration(Configuration): - """Configuration for AzureFileStorage - Note that all parameters used to create this instance are saved as instance - attributes. - - :param version: Specifies the version of the operation to use for this - request. - :type version: str - :param url: The URL of the service account, share, directory or file that - is the target of the desired operation. - :type url: str - :ivar file_range_write_from_url: Only update is supported: - Update: - Writes the bytes downloaded from the source url into the specified range. - :type file_range_write_from_url: str - """ - - def __init__(self, version, url, **kwargs): - - if version is None: - raise ValueError("Parameter 'version' must not be None.") - if url is None: - raise ValueError("Parameter 'url' must not be None.") - - super(AzureFileStorageConfiguration, self).__init__(**kwargs) - self._configure(**kwargs) - - self.user_agent_policy.add_user_agent('azsdk-python-azurefilestorage/{}'.format(VERSION)) - self.generate_client_request_id = True - self.accept_language = None - - self.version = version - self.url = url - self.file_range_write_from_url = "update" - - def _configure(self, **kwargs): - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/__init__.py deleted file mode 100644 index 601c709..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations_async import ServiceOperations -from ._share_operations_async import ShareOperations -from ._directory_operations_async import DirectoryOperations -from ._file_operations_async import FileOperations - -__all__ = [ - 'ServiceOperations', - 'ShareOperations', - 'DirectoryOperations', - 'FileOperations', -] diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/_directory_operations_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/_directory_operations_async.py deleted file mode 100644 index 26a962f..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/_directory_operations_async.py +++ /dev/null @@ -1,672 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class DirectoryOperations: - """DirectoryOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar restype: . Constant value: "directory". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.restype = "directory" - - async def create(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, *, cls=None, **kwargs): - """Creates a new directory under the specified share or parent directory. - - :param file_attributes: If specified, the provided file attributes - shall be set. Default value: ‘Archive’ for file and ‘Directory’ for - directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. - Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. - Default value: Now. - :type file_last_write_time: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{shareName}/{directory}'} - - async def get_properties(self, sharesnapshot=None, timeout=None, *, cls=None, **kwargs): - """Returns all system properties for the specified directory, and can also - be used to check the existence of a directory. The data returned does - not include the files in the directory or any subdirectories. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{shareName}/{directory}'} - - async def delete(self, timeout=None, *, cls=None, **kwargs): - """Removes the specified empty directory. Note that the directory must be - empty before it can be deleted. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{shareName}/{directory}'} - - async def set_properties(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, file_permission="inherit", file_permission_key=None, *, cls=None, **kwargs): - """Sets properties on the directory. - - :param file_attributes: If specified, the provided file attributes - shall be set. Default value: ‘Archive’ for file and ‘Directory’ for - directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. - Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. - Default value: Now. - :type file_last_write_time: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "properties" - - # Construct URL - url = self.set_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_properties.metadata = {'url': '/{shareName}/{directory}'} - - async def set_metadata(self, timeout=None, metadata=None, *, cls=None, **kwargs): - """Updates user defined metadata for the specified directory. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{shareName}/{directory}'} - - async def list_files_and_directories_segment(self, prefix=None, sharesnapshot=None, marker=None, maxresults=None, timeout=None, *, cls=None, **kwargs): - """Returns a list of files or directories under the specified share or - directory. It lists the contents only for a single level of the - directory hierarchy. - - :param prefix: Filters the results to return only entries whose name - begins with the specified prefix. - :type prefix: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. - If the request does not specify maxresults, or specifies a value - greater than 5,000, the server will return up to 5,000 items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListFilesAndDirectoriesSegmentResponse or the result of - cls(response) - :rtype: - ~azure.storage.fileshare.models.ListFilesAndDirectoriesSegmentResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "list" - - # Construct URL - url = self.list_files_and_directories_segment.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListFilesAndDirectoriesSegmentResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_files_and_directories_segment.metadata = {'url': '/{shareName}/{directory}'} - - async def list_handles(self, marker=None, maxresults=None, timeout=None, sharesnapshot=None, recursive=None, *, cls=None, **kwargs): - """Lists handles for directory. - - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. - If the request does not specify maxresults, or specifies a value - greater than 5,000, the server will return up to 5,000 items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param recursive: Specifies operation should apply to the directory - specified in the URI, its files, its subdirectories and their files. - :type recursive: bool - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListHandlesResponse or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListHandlesResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "listhandles" - - # Construct URL - url = self.list_handles.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - if recursive is not None: - header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListHandlesResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_handles.metadata = {'url': '/{shareName}/{directory}'} - - async def force_close_handles(self, handle_id, timeout=None, marker=None, sharesnapshot=None, recursive=None, *, cls=None, **kwargs): - """Closes all handles open for given directory. - - :param handle_id: Specifies handle ID opened on the file or directory - to be closed. Asterisk (‘*’) is a wildcard that specifies all handles. - :type handle_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param recursive: Specifies operation should apply to the directory - specified in the URI, its files, its subdirectories and their files. - :type recursive: bool - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "forceclosehandles" - - # Construct URL - url = self.force_close_handles.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') - if recursive is not None: - header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-marker': self._deserialize('str', response.headers.get('x-ms-marker')), - 'x-ms-number-of-handles-closed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')), - 'x-ms-number-of-handles-failed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - force_close_handles.metadata = {'url': '/{shareName}/{directory}'} diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/_file_operations_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/_file_operations_async.py deleted file mode 100644 index 51d7614..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/_file_operations_async.py +++ /dev/null @@ -1,1671 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class FileOperations: - """FileOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_type: Dummy constant parameter, file type can only be file. Constant value: "file". - :ivar x_ms_copy_action: . Constant value: "abort". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_type = "file" - self.x_ms_copy_action = "abort" - - async def create(self, file_content_length, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, file_http_headers=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Creates a new file or replaces a file. Note it only initializes the - file with no content. - - :param file_content_length: Specifies the maximum size for the file, - up to 4 TB. - :type file_content_length: long - :param file_attributes: If specified, the provided file attributes - shall be set. Default value: ‘Archive’ for file and ‘Directory’ for - directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. - Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. - Default value: Now. - :type file_last_write_time: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_http_headers: Additional parameters for the operation - :type file_http_headers: - ~azure.storage.fileshare.models.FileHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - file_content_type = None - if file_http_headers is not None: - file_content_type = file_http_headers.file_content_type - file_content_encoding = None - if file_http_headers is not None: - file_content_encoding = file_http_headers.file_content_encoding - file_content_language = None - if file_http_headers is not None: - file_content_language = file_http_headers.file_content_language - file_cache_control = None - if file_http_headers is not None: - file_cache_control = file_http_headers.file_cache_control - file_content_md5 = None - if file_http_headers is not None: - file_content_md5 = file_http_headers.file_content_md5 - file_content_disposition = None - if file_http_headers is not None: - file_content_disposition = file_http_headers.file_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') - header_parameters['x-ms-type'] = self._serialize.header("self.x_ms_type", self.x_ms_type, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - if file_content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", file_content_type, 'str') - if file_content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", file_content_encoding, 'str') - if file_content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", file_content_language, 'str') - if file_cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", file_cache_control, 'str') - if file_content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", file_content_md5, 'bytearray') - if file_content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", file_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def download(self, timeout=None, range=None, range_get_content_md5=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Reads or downloads a file from the system, including its metadata and - properties. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param range: Return file data only from the specified byte range. - :type range: str - :param range_get_content_md5: When this header is set to true and - specified together with the Range header, the service returns the MD5 - hash for the range, as long as the range is less than or equal to 4 MB - in size. - :type range_get_content_md5: bool - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: object or the result of cls(response) - :rtype: Generator - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.download.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - await response.load_body() - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-content-md5')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - if response.status_code == 206: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-content-md5')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - download.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def get_properties(self, sharesnapshot=None, timeout=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Returns all user-defined metadata, standard HTTP properties, and system - properties for the file. It does not return the content of the file. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'x-ms-type': self._deserialize('str', response.headers.get('x-ms-type')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def delete(self, timeout=None, lease_access_conditions=None, *, cls=None, **kwargs): - """removes the file from the storage account. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def set_http_headers(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, file_content_length=None, file_permission="inherit", file_permission_key=None, file_http_headers=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Sets HTTP headers on the file. - - :param file_attributes: If specified, the provided file attributes - shall be set. Default value: ‘Archive’ for file and ‘Directory’ for - directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. - Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. - Default value: Now. - :type file_last_write_time: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param file_content_length: Resizes a file to the specified size. If - the specified byte value is less than the current size of the file, - then all ranges above the specified byte value are cleared. - :type file_content_length: long - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_http_headers: Additional parameters for the operation - :type file_http_headers: - ~azure.storage.fileshare.models.FileHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - file_content_type = None - if file_http_headers is not None: - file_content_type = file_http_headers.file_content_type - file_content_encoding = None - if file_http_headers is not None: - file_content_encoding = file_http_headers.file_content_encoding - file_content_language = None - if file_http_headers is not None: - file_content_language = file_http_headers.file_content_language - file_cache_control = None - if file_http_headers is not None: - file_cache_control = file_http_headers.file_cache_control - file_content_md5 = None - if file_http_headers is not None: - file_content_md5 = file_http_headers.file_content_md5 - file_content_disposition = None - if file_http_headers is not None: - file_content_disposition = file_http_headers.file_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "properties" - - # Construct URL - url = self.set_http_headers.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_content_length is not None: - header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - if file_content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", file_content_type, 'str') - if file_content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", file_content_encoding, 'str') - if file_content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", file_content_language, 'str') - if file_cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", file_cache_control, 'str') - if file_content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", file_content_md5, 'bytearray') - if file_content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", file_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_http_headers.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def set_metadata(self, timeout=None, metadata=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Updates user-defined metadata for the specified file. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, *, cls=None, **kwargs): - """[Update] The Lease File operation establishes and manages a lock on a - file for write and delete operations. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or - negative one (-1) for a lease that never expires. A non-infinite lease - can be between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The File service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "lease" - action = "acquire" - - # Construct URL - url = self.acquire_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - acquire_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def release_lease(self, lease_id, timeout=None, request_id=None, *, cls=None, **kwargs): - """[Update] The Lease File operation establishes and manages a lock on a - file for write and delete operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "lease" - action = "release" - - # Construct URL - url = self.release_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - release_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def change_lease(self, lease_id, timeout=None, proposed_lease_id=None, request_id=None, *, cls=None, **kwargs): - """[Update] The Lease File operation establishes and manages a lock on a - file for write and delete operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The File service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "lease" - action = "change" - - # Construct URL - url = self.change_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - change_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def break_lease(self, timeout=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs): - """[Update] The Lease File operation establishes and manages a lock on a - file for write and delete operations. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "lease" - action = "break" - - # Construct URL - url = self.break_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - break_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def upload_range(self, range, content_length, file_range_write="update", optionalbody=None, timeout=None, content_md5=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Upload a range of bytes to a file. - - :param range: Specifies the range of bytes to be written. Both the - start and end of the range must be specified. For an update operation, - the range can be up to 4 MB in size. For a clear operation, the range - can be up to the value of the file's full size. The File service - accepts only a single byte range for the Range and 'x-ms-range' - headers, and the byte range must be specified in the following format: - bytes=startByte-endByte. - :type range: str - :param file_range_write: Specify one of the following options: - - Update: Writes the bytes specified by the request body into the - specified range. The Range and Content-Length headers must match to - perform the update. - Clear: Clears the specified range and releases - the space used in storage for that range. To clear a range, set the - Content-Length header to zero, and set the Range header to a value - that indicates the range to clear, up to maximum file size. Possible - values include: 'update', 'clear' - :type file_range_write: str or - ~azure.storage.fileshare.models.FileRangeWriteType - :param content_length: Specifies the number of bytes being transmitted - in the request body. When the x-ms-write header is set to clear, the - value of this header must be set to zero. - :type content_length: long - :param optionalbody: Initial data. - :type optionalbody: Generator - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param content_md5: An MD5 hash of the content. This hash is used to - verify the integrity of the data during transport. When the - Content-MD5 header is specified, the File service compares the hash of - the content that has arrived with the header value that was sent. If - the two hashes do not match, the operation will fail with error code - 400 (Bad Request). - :type content_md5: bytearray - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "range" - - # Construct URL - url = self.upload_range.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-write'] = self._serialize.header("file_range_write", file_range_write, 'FileRangeWriteType') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("content_md5", content_md5, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=optionalbody) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload_range.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def upload_range_from_url(self, range, copy_source, content_length, timeout=None, source_range=None, source_content_crc64=None, source_modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Upload a range of bytes to a file where the contents are read from a - URL. - - :param range: Writes data to the specified byte range in the file. - :type range: str - :param copy_source: Specifies the URL of the source file or blob, up - to 2 KB in length. To copy a file to another file within the same - storage account, you may use Shared Key to authenticate the source - file. If you are copying a file from another storage account, or if - you are copying a blob from the same storage account or another - storage account, then you must authenticate the source file or blob - using a shared access signature. If the source is a public blob, no - authentication is required to perform the copy operation. A file in a - share snapshot can also be specified as a copy source. - :type copy_source: str - :param content_length: Specifies the number of bytes being transmitted - in the request body. When the x-ms-write header is set to clear, the - value of this header must be set to zero. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_crc64: Specify the crc64 calculated for the - range of bytes that must be read from the copy source. - :type source_content_crc64: bytearray - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.fileshare.models.SourceModifiedAccessConditions - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - source_if_match_crc64 = None - if source_modified_access_conditions is not None: - source_if_match_crc64 = source_modified_access_conditions.source_if_match_crc64 - source_if_none_match_crc64 = None - if source_modified_access_conditions is not None: - source_if_none_match_crc64 = source_modified_access_conditions.source_if_none_match_crc64 - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "range" - - # Construct URL - url = self.upload_range_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - header_parameters['x-ms-write'] = self._serialize.header("self._config.file_range_write_from_url", self._config.file_range_write_from_url, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if source_content_crc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_content_crc64", source_content_crc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if source_if_match_crc64 is not None: - header_parameters['x-ms-source-if-match-crc64'] = self._serialize.header("source_if_match_crc64", source_if_match_crc64, 'bytearray') - if source_if_none_match_crc64 is not None: - header_parameters['x-ms-source-if-none-match-crc64'] = self._serialize.header("source_if_none_match_crc64", source_if_none_match_crc64, 'bytearray') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload_range_from_url.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def get_range_list(self, sharesnapshot=None, prevsharesnapshot=None, timeout=None, range=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Returns the list of valid ranges for a file. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param prevsharesnapshot: The previous snapshot parameter is an opaque - DateTime value that, when present, specifies the previous snapshot. - :type prevsharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param range: Specifies the range of bytes over which to list ranges, - inclusively. - :type range: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: ShareFileRangeList or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ShareFileRangeList - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "rangelist" - - # Construct URL - url = self.get_range_list.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if prevsharesnapshot is not None: - query_parameters['prevsharesnapshot'] = self._serialize.query("prevsharesnapshot", prevsharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ShareFileRangeList', response) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-content-length': self._deserialize('long', response.headers.get('x-ms-content-length')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_range_list.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def start_copy(self, copy_source, timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, copy_file_smb_info=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Copies a blob or file to a destination file within the storage account. - - :param copy_source: Specifies the URL of the source file or blob, up - to 2 KB in length. To copy a file to another file within the same - storage account, you may use Shared Key to authenticate the source - file. If you are copying a file from another storage account, or if - you are copying a blob from the same storage account or another - storage account, then you must authenticate the source file or blob - using a shared access signature. If the source is a public blob, no - authentication is required to perform the copy operation. A file in a - share snapshot can also be specified as a copy source. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param copy_file_smb_info: Additional parameters for the operation - :type copy_file_smb_info: - ~azure.storage.fileshare.models.CopyFileSmbInfo - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - file_permission_copy_mode = None - if copy_file_smb_info is not None: - file_permission_copy_mode = copy_file_smb_info.file_permission_copy_mode - ignore_read_only = None - if copy_file_smb_info is not None: - ignore_read_only = copy_file_smb_info.ignore_read_only - file_attributes = None - if copy_file_smb_info is not None: - file_attributes = copy_file_smb_info.file_attributes - file_creation_time = None - if copy_file_smb_info is not None: - file_creation_time = copy_file_smb_info.file_creation_time - file_last_write_time = None - if copy_file_smb_info is not None: - file_last_write_time = copy_file_smb_info.file_last_write_time - set_archive_attribute = None - if copy_file_smb_info is not None: - set_archive_attribute = copy_file_smb_info.set_archive_attribute - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.start_copy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - if file_permission_copy_mode is not None: - header_parameters['x-ms-file-permission-copy-mode'] = self._serialize.header("file_permission_copy_mode", file_permission_copy_mode, 'PermissionCopyModeType') - if ignore_read_only is not None: - header_parameters['x-ms-file-copy-ignore-read-only'] = self._serialize.header("ignore_read_only", ignore_read_only, 'bool') - if file_attributes is not None: - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - if file_creation_time is not None: - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - if file_last_write_time is not None: - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - if set_archive_attribute is not None: - header_parameters['x-ms-file-copy-set-archive'] = self._serialize.header("set_archive_attribute", set_archive_attribute, 'bool') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - start_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def abort_copy(self, copy_id, timeout=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Aborts a pending Copy File operation, and leaves a destination file - with zero length and full metadata. - - :param copy_id: The copy identifier provided in the x-ms-copy-id - header of the original Copy File operation. - :type copy_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "copy" - - # Construct URL - url = self.abort_copy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-copy-action'] = self._serialize.header("self.x_ms_copy_action", self.x_ms_copy_action, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - abort_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def list_handles(self, marker=None, maxresults=None, timeout=None, sharesnapshot=None, *, cls=None, **kwargs): - """Lists handles for file. - - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. - If the request does not specify maxresults, or specifies a value - greater than 5,000, the server will return up to 5,000 items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListHandlesResponse or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListHandlesResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "listhandles" - - # Construct URL - url = self.list_handles.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListHandlesResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - async def force_close_handles(self, handle_id, timeout=None, marker=None, sharesnapshot=None, *, cls=None, **kwargs): - """Closes all handles open for given file. - - :param handle_id: Specifies handle ID opened on the file or directory - to be closed. Asterisk (‘*’) is a wildcard that specifies all handles. - :type handle_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "forceclosehandles" - - # Construct URL - url = self.force_close_handles.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-marker': self._deserialize('str', response.headers.get('x-ms-marker')), - 'x-ms-number-of-handles-closed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')), - 'x-ms-number-of-handles-failed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - force_close_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/_share_operations_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/_share_operations_async.py deleted file mode 100644 index 915f757..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/_share_operations_async.py +++ /dev/null @@ -1,1315 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from ... import models - - -class ShareOperations: - """ShareOperations async operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar restype: . Constant value: "share". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer) -> None: - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.restype = "share" - - async def create(self, timeout=None, metadata=None, quota=None, *, cls=None, **kwargs): - """Creates a new share under the specified account. If the share with the - same name already exists, the operation fails. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param quota: Specifies the maximum size of the share, in gigabytes. - :type quota: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if quota is not None: - header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{shareName}'} - - async def get_properties(self, sharesnapshot=None, timeout=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Returns all user-defined metadata and system properties for the - specified share or share snapshot. The data returned does not include - the share's list of files. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-share-quota': self._deserialize('int', response.headers.get('x-ms-share-quota')), - 'x-ms-share-provisioned-iops': self._deserialize('int', response.headers.get('x-ms-share-provisioned-iops')), - 'x-ms-share-provisioned-ingress-mbps': self._deserialize('int', response.headers.get('x-ms-share-provisioned-ingress-mbps')), - 'x-ms-share-provisioned-egress-mbps': self._deserialize('int', response.headers.get('x-ms-share-provisioned-egress-mbps')), - 'x-ms-share-next-allowed-quota-downgrade-time': self._deserialize('rfc-1123', response.headers.get('x-ms-share-next-allowed-quota-downgrade-time')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{shareName}'} - - async def delete(self, sharesnapshot=None, timeout=None, delete_snapshots=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Operation marks the specified share or share snapshot for deletion. The - share or share snapshot and any files contained within it are later - deleted during garbage collection. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param delete_snapshots: Specifies the option include to delete the - base share and all of its snapshots. Possible values include: - 'include' - :type delete_snapshots: str or - ~azure.storage.fileshare.models.DeleteSnapshotsOptionType - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{shareName}'} - - async def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, sharesnapshot=None, request_id=None, *, cls=None, **kwargs): - """The Lease Share operation establishes and manages a lock on a share, or - the specified snapshot for set and delete share operations. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or - negative one (-1) for a lease that never expires. A non-infinite lease - can be between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The File service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "lease" - action = "acquire" - - # Construct URL - url = self.acquire_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - acquire_lease.metadata = {'url': '/{shareName}'} - - async def release_lease(self, lease_id, timeout=None, sharesnapshot=None, request_id=None, *, cls=None, **kwargs): - """The Lease Share operation establishes and manages a lock on a share, or - the specified snapshot for set and delete share operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "lease" - action = "release" - - # Construct URL - url = self.release_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - release_lease.metadata = {'url': '/{shareName}'} - - async def change_lease(self, lease_id, timeout=None, proposed_lease_id=None, sharesnapshot=None, request_id=None, *, cls=None, **kwargs): - """The Lease Share operation establishes and manages a lock on a share, or - the specified snapshot for set and delete share operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The File service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "lease" - action = "change" - - # Construct URL - url = self.change_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - change_lease.metadata = {'url': '/{shareName}'} - - async def renew_lease(self, lease_id, timeout=None, sharesnapshot=None, request_id=None, *, cls=None, **kwargs): - """The Lease Share operation establishes and manages a lock on a share, or - the specified snapshot for set and delete share operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "lease" - action = "renew" - - # Construct URL - url = self.renew_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - renew_lease.metadata = {'url': '/{shareName}'} - - async def break_lease(self, timeout=None, break_period=None, request_id=None, sharesnapshot=None, lease_access_conditions=None, *, cls=None, **kwargs): - """The Lease Share operation establishes and manages a lock on a share, or - the specified snapshot for set and delete share operations. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param break_period: For a break operation, proposed duration the - lease should continue before it is broken, in seconds, between 0 and - 60. This break period is only used if it is shorter than the time - remaining on the lease. If longer, the time remaining on the lease is - used. A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break period. - If this header does not appear with a break operation, a - fixed-duration lease breaks after the remaining lease period elapses, - and an infinite lease breaks immediately. - :type break_period: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "lease" - action = "break" - - # Construct URL - url = self.break_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - break_lease.metadata = {'url': '/{shareName}'} - - async def create_snapshot(self, timeout=None, metadata=None, *, cls=None, **kwargs): - """Creates a read-only snapshot of a share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "snapshot" - - # Construct URL - url = self.create_snapshot.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-snapshot': self._deserialize('str', response.headers.get('x-ms-snapshot')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create_snapshot.metadata = {'url': '/{shareName}'} - - async def create_permission(self, share_permission, timeout=None, *, cls=None, **kwargs): - """Create a permission (a security descriptor). - - :param share_permission: A permission (a security descriptor) at the - share level. - :type share_permission: - ~azure.storage.fileshare.models.SharePermission - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "filepermission" - - # Construct URL - url = self.create_permission.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct body - body_content = self._serialize.body(share_permission, 'SharePermission', is_xml=False) - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create_permission.metadata = {'url': '/{shareName}'} - - async def get_permission(self, file_permission_key, timeout=None, *, cls=None, **kwargs): - """Returns the permission (security descriptor) for a given key. - - :param file_permission_key: Key of the permission to be set for the - directory/file. - :type file_permission_key: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: SharePermission or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.SharePermission - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "filepermission" - - # Construct URL - url = self.get_permission.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('SharePermission', response) - header_dict = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_permission.metadata = {'url': '/{shareName}'} - - async def set_quota(self, timeout=None, quota=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Sets quota for the specified share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param quota: Specifies the maximum size of the share, in gigabytes. - :type quota: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "properties" - - # Construct URL - url = self.set_quota.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if quota is not None: - header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_quota.metadata = {'url': '/{shareName}'} - - async def set_metadata(self, timeout=None, metadata=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Sets one or more user-defined name-value pairs for the specified share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{shareName}'} - - async def get_access_policy(self, timeout=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Returns information about stored access policies specified on the - share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: list or the result of cls(response) - :rtype: list[~azure.storage.fileshare.models.SignedIdentifier] - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "acl" - - # Construct URL - url = self.get_access_policy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[SignedIdentifier]', response) - header_dict = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_access_policy.metadata = {'url': '/{shareName}'} - - async def set_access_policy(self, share_acl=None, timeout=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Sets a stored access policy for use with shared access signatures. - - :param share_acl: The ACL for the share. - :type share_acl: - list[~azure.storage.fileshare.models.SignedIdentifier] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "acl" - - # Construct URL - url = self.set_access_policy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct body - serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifier', 'wrapped': True}} - if share_acl is not None: - body_content = self._serialize.body(share_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt) - else: - body_content = None - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_access_policy.metadata = {'url': '/{shareName}'} - - async def get_statistics(self, timeout=None, lease_access_conditions=None, *, cls=None, **kwargs): - """Retrieves statistics related to the share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: ShareStats or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ShareStats - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "stats" - - # Construct URL - url = self.get_statistics.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ShareStats', response) - header_dict = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_statistics.metadata = {'url': '/{shareName}'} - - async def restore(self, timeout=None, request_id=None, deleted_share_name=None, deleted_share_version=None, *, cls=None, **kwargs): - """Restores a previously deleted Share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param deleted_share_name: Specifies the name of the - preivously-deleted share. - :type deleted_share_name: str - :param deleted_share_version: Specifies the version of the - preivously-deleted share. - :type deleted_share_version: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "undelete" - - # Construct URL - url = self.restore.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if deleted_share_name is not None: - header_parameters['x-ms-deleted-share-name'] = self._serialize.header("deleted_share_name", deleted_share_name, 'str') - if deleted_share_version is not None: - header_parameters['x-ms-deleted-share-version'] = self._serialize.header("deleted_share_version", deleted_share_version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - restore.metadata = {'url': '/{shareName}'} diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/models/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/models/__init__.py deleted file mode 100644 index 163c6b6..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/models/__init__.py +++ /dev/null @@ -1,123 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -try: - from ._models_py3 import AccessPolicy - from ._models_py3 import ClearRange - from ._models_py3 import CopyFileSmbInfo - from ._models_py3 import CorsRule - from ._models_py3 import DirectoryItem - from ._models_py3 import FileHTTPHeaders - from ._models_py3 import FileItem - from ._models_py3 import FileProperty - from ._models_py3 import FileRange - from ._models_py3 import FilesAndDirectoriesListSegment - from ._models_py3 import HandleItem - from ._models_py3 import LeaseAccessConditions - from ._models_py3 import ListFilesAndDirectoriesSegmentResponse - from ._models_py3 import ListHandlesResponse - from ._models_py3 import ListSharesResponse - from ._models_py3 import Metrics - from ._models_py3 import RetentionPolicy - from ._models_py3 import ShareFileRangeList - from ._models_py3 import ShareItem - from ._models_py3 import SharePermission - from ._models_py3 import ShareProperties - from ._models_py3 import ShareProtocolSettings - from ._models_py3 import ShareSmbSettings - from ._models_py3 import ShareStats - from ._models_py3 import SignedIdentifier - from ._models_py3 import SmbMultichannel - from ._models_py3 import SourceModifiedAccessConditions - from ._models_py3 import StorageError, StorageErrorException - from ._models_py3 import StorageServiceProperties -except (SyntaxError, ImportError): - from ._models import AccessPolicy - from ._models import ClearRange - from ._models import CopyFileSmbInfo - from ._models import CorsRule - from ._models import DirectoryItem - from ._models import FileHTTPHeaders - from ._models import FileItem - from ._models import FileProperty - from ._models import FileRange - from ._models import FilesAndDirectoriesListSegment - from ._models import HandleItem - from ._models import LeaseAccessConditions - from ._models import ListFilesAndDirectoriesSegmentResponse - from ._models import ListHandlesResponse - from ._models import ListSharesResponse - from ._models import Metrics - from ._models import RetentionPolicy - from ._models import ShareFileRangeList - from ._models import ShareItem - from ._models import SharePermission - from ._models import ShareProperties - from ._models import ShareProtocolSettings - from ._models import ShareSmbSettings - from ._models import ShareStats - from ._models import SignedIdentifier - from ._models import SmbMultichannel - from ._models import SourceModifiedAccessConditions - from ._models import StorageError, StorageErrorException - from ._models import StorageServiceProperties -from ._azure_file_storage_enums import ( - CopyStatusType, - DeleteSnapshotsOptionType, - FileRangeWriteType, - LeaseDurationType, - LeaseStateType, - LeaseStatusType, - ListSharesIncludeType, - PermissionCopyModeType, - StorageErrorCode, -) - -__all__ = [ - 'AccessPolicy', - 'ClearRange', - 'CopyFileSmbInfo', - 'CorsRule', - 'DirectoryItem', - 'FileHTTPHeaders', - 'FileItem', - 'FileProperty', - 'FileRange', - 'FilesAndDirectoriesListSegment', - 'HandleItem', - 'LeaseAccessConditions', - 'ListFilesAndDirectoriesSegmentResponse', - 'ListHandlesResponse', - 'ListSharesResponse', - 'Metrics', - 'RetentionPolicy', - 'ShareFileRangeList', - 'ShareItem', - 'SharePermission', - 'ShareProperties', - 'ShareProtocolSettings', - 'ShareSmbSettings', - 'ShareStats', - 'SignedIdentifier', - 'SmbMultichannel', - 'SourceModifiedAccessConditions', - 'StorageError', 'StorageErrorException', - 'StorageServiceProperties', - 'StorageErrorCode', - 'LeaseDurationType', - 'LeaseStateType', - 'LeaseStatusType', - 'PermissionCopyModeType', - 'DeleteSnapshotsOptionType', - 'ListSharesIncludeType', - 'CopyStatusType', - 'FileRangeWriteType', -] diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/models/_azure_file_storage_enums.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/models/_azure_file_storage_enums.py deleted file mode 100644 index 3a1fab6..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/models/_azure_file_storage_enums.py +++ /dev/null @@ -1,135 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from enum import Enum - - -class StorageErrorCode(str, Enum): - - account_already_exists = "AccountAlreadyExists" - account_being_created = "AccountBeingCreated" - account_is_disabled = "AccountIsDisabled" - authentication_failed = "AuthenticationFailed" - authorization_failure = "AuthorizationFailure" - condition_headers_not_supported = "ConditionHeadersNotSupported" - condition_not_met = "ConditionNotMet" - empty_metadata_key = "EmptyMetadataKey" - insufficient_account_permissions = "InsufficientAccountPermissions" - internal_error = "InternalError" - invalid_authentication_info = "InvalidAuthenticationInfo" - invalid_header_value = "InvalidHeaderValue" - invalid_http_verb = "InvalidHttpVerb" - invalid_input = "InvalidInput" - invalid_md5 = "InvalidMd5" - invalid_metadata = "InvalidMetadata" - invalid_query_parameter_value = "InvalidQueryParameterValue" - invalid_range = "InvalidRange" - invalid_resource_name = "InvalidResourceName" - invalid_uri = "InvalidUri" - invalid_xml_document = "InvalidXmlDocument" - invalid_xml_node_value = "InvalidXmlNodeValue" - md5_mismatch = "Md5Mismatch" - metadata_too_large = "MetadataTooLarge" - missing_content_length_header = "MissingContentLengthHeader" - missing_required_query_parameter = "MissingRequiredQueryParameter" - missing_required_header = "MissingRequiredHeader" - missing_required_xml_node = "MissingRequiredXmlNode" - multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" - operation_timed_out = "OperationTimedOut" - out_of_range_input = "OutOfRangeInput" - out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" - request_body_too_large = "RequestBodyTooLarge" - resource_type_mismatch = "ResourceTypeMismatch" - request_url_failed_to_parse = "RequestUrlFailedToParse" - resource_already_exists = "ResourceAlreadyExists" - resource_not_found = "ResourceNotFound" - server_busy = "ServerBusy" - unsupported_header = "UnsupportedHeader" - unsupported_xml_node = "UnsupportedXmlNode" - unsupported_query_parameter = "UnsupportedQueryParameter" - unsupported_http_verb = "UnsupportedHttpVerb" - cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" - client_cache_flush_delay = "ClientCacheFlushDelay" - delete_pending = "DeletePending" - directory_not_empty = "DirectoryNotEmpty" - file_lock_conflict = "FileLockConflict" - invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" - parent_not_found = "ParentNotFound" - read_only_attribute = "ReadOnlyAttribute" - share_already_exists = "ShareAlreadyExists" - share_being_deleted = "ShareBeingDeleted" - share_disabled = "ShareDisabled" - share_not_found = "ShareNotFound" - sharing_violation = "SharingViolation" - share_snapshot_in_progress = "ShareSnapshotInProgress" - share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" - share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" - share_has_snapshots = "ShareHasSnapshots" - container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" - authorization_source_ip_mismatch = "AuthorizationSourceIPMismatch" - authorization_protocol_mismatch = "AuthorizationProtocolMismatch" - authorization_permission_mismatch = "AuthorizationPermissionMismatch" - authorization_service_mismatch = "AuthorizationServiceMismatch" - authorization_resource_type_mismatch = "AuthorizationResourceTypeMismatch" - feature_version_mismatch = "FeatureVersionMismatch" - - -class LeaseDurationType(str, Enum): - - infinite = "infinite" - fixed = "fixed" - - -class LeaseStateType(str, Enum): - - available = "available" - leased = "leased" - expired = "expired" - breaking = "breaking" - broken = "broken" - - -class LeaseStatusType(str, Enum): - - locked = "locked" - unlocked = "unlocked" - - -class PermissionCopyModeType(str, Enum): - - source = "source" - override = "override" - - -class DeleteSnapshotsOptionType(str, Enum): - - include = "include" - - -class ListSharesIncludeType(str, Enum): - - snapshots = "snapshots" - metadata = "metadata" - deleted = "deleted" - - -class CopyStatusType(str, Enum): - - pending = "pending" - success = "success" - aborted = "aborted" - failed = "failed" - - -class FileRangeWriteType(str, Enum): - - update = "update" - clear = "clear" diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/models/_models.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/models/_models.py deleted file mode 100644 index 9f6b2db..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/models/_models.py +++ /dev/null @@ -1,1021 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model -from azure.core.exceptions import HttpResponseError - - -class AccessPolicy(Model): - """An Access policy. - - :param start: The date-time the policy is active. - :type start: str - :param expiry: The date-time the policy expires. - :type expiry: str - :param permission: The permissions for the ACL policy. - :type permission: str - """ - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, - 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, - 'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(AccessPolicy, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.expiry = kwargs.get('expiry', None) - self.permission = kwargs.get('permission', None) - - -class ClearRange(Model): - """ClearRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'ClearRange' - } - - def __init__(self, **kwargs): - super(ClearRange, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.end = kwargs.get('end', None) - - -class CopyFileSmbInfo(Model): - """Additional parameters for start_copy operation. - - :param file_permission_copy_mode: Specifies the option to copy file - security descriptor from source file or to set it using the value which is - defined by the header value of x-ms-file-permission or - x-ms-file-permission-key. Possible values include: 'source', 'override' - :type file_permission_copy_mode: str or - ~azure.storage.fileshare.models.PermissionCopyModeType - :param ignore_read_only: Specifies the option to overwrite the target file - if it already exists and has read-only attribute set. - :type ignore_read_only: bool - :param file_attributes: Specifies either the option to copy file - attributes from a source file(source) to a target file or a list of - attributes to set on a target file. - :type file_attributes: str - :param file_creation_time: Specifies either the option to copy file - creation time from a source file(source) to a target file or a time value - in ISO 8601 format to set as creation time on a target file. - :type file_creation_time: str - :param file_last_write_time: Specifies either the option to copy file last - write time from a source file(source) to a target file or a time value in - ISO 8601 format to set as last write time on a target file. - :type file_last_write_time: str - :param set_archive_attribute: Specifies the option to set archive - attribute on a target file. True means archive attribute will be set on a - target file despite attribute overrides or a source file state. - :type set_archive_attribute: bool - """ - - _attribute_map = { - 'file_permission_copy_mode': {'key': '', 'type': 'PermissionCopyModeType', 'xml': {'name': 'file_permission_copy_mode'}}, - 'ignore_read_only': {'key': '', 'type': 'bool', 'xml': {'name': 'ignore_read_only'}}, - 'file_attributes': {'key': '', 'type': 'str', 'xml': {'name': 'file_attributes'}}, - 'file_creation_time': {'key': '', 'type': 'str', 'xml': {'name': 'file_creation_time'}}, - 'file_last_write_time': {'key': '', 'type': 'str', 'xml': {'name': 'file_last_write_time'}}, - 'set_archive_attribute': {'key': '', 'type': 'bool', 'xml': {'name': 'set_archive_attribute'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(CopyFileSmbInfo, self).__init__(**kwargs) - self.file_permission_copy_mode = kwargs.get('file_permission_copy_mode', None) - self.ignore_read_only = kwargs.get('ignore_read_only', None) - self.file_attributes = kwargs.get('file_attributes', None) - self.file_creation_time = kwargs.get('file_creation_time', None) - self.file_last_write_time = kwargs.get('file_last_write_time', None) - self.set_archive_attribute = kwargs.get('set_archive_attribute', None) - - -class CorsRule(Model): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param allowed_origins: Required. The origin domains that are permitted to - make a request against the storage service via CORS. The origin domain is - the domain from which the request originates. Note that the origin must be - an exact case-sensitive match with the origin that the user age sends to - the service. You can also use the wildcard character '*' to allow all - origin domains to make requests via CORS. - :type allowed_origins: str - :param allowed_methods: Required. The methods (HTTP request verbs) that - the origin domain may use for a CORS request. (comma separated) - :type allowed_methods: str - :param allowed_headers: Required. The request headers that the origin - domain may specify on the CORS request. - :type allowed_headers: str - :param exposed_headers: Required. The response headers that may be sent in - the response to the CORS request and exposed by the browser to the request - issuer. - :type exposed_headers: str - :param max_age_in_seconds: Required. The maximum amount time that a - browser should cache the preflight OPTIONS request. - :type max_age_in_seconds: int - """ - - _validation = { - 'allowed_origins': {'required': True}, - 'allowed_methods': {'required': True}, - 'allowed_headers': {'required': True}, - 'exposed_headers': {'required': True}, - 'max_age_in_seconds': {'required': True, 'minimum': 0}, - } - - _attribute_map = { - 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}}, - 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}}, - 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}}, - 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}}, - 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(CorsRule, self).__init__(**kwargs) - self.allowed_origins = kwargs.get('allowed_origins', None) - self.allowed_methods = kwargs.get('allowed_methods', None) - self.allowed_headers = kwargs.get('allowed_headers', None) - self.exposed_headers = kwargs.get('exposed_headers', None) - self.max_age_in_seconds = kwargs.get('max_age_in_seconds', None) - - -class DirectoryItem(Model): - """A listed directory item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - } - _xml_map = { - 'name': 'Directory' - } - - def __init__(self, **kwargs): - super(DirectoryItem, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - - -class FileHTTPHeaders(Model): - """Additional parameters for a set of operations, such as: File_create, - File_set_http_headers. - - :param file_content_type: Sets the MIME content type of the file. The - default type is 'application/octet-stream'. - :type file_content_type: str - :param file_content_encoding: Specifies which content encodings have been - applied to the file. - :type file_content_encoding: str - :param file_content_language: Specifies the natural languages used by this - resource. - :type file_content_language: str - :param file_cache_control: Sets the file's cache control. The File service - stores this value but does not use or modify it. - :type file_cache_control: str - :param file_content_md5: Sets the file's MD5 hash. - :type file_content_md5: bytearray - :param file_content_disposition: Sets the file's Content-Disposition - header. - :type file_content_disposition: str - """ - - _attribute_map = { - 'file_content_type': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_type'}}, - 'file_content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_encoding'}}, - 'file_content_language': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_language'}}, - 'file_cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'file_cache_control'}}, - 'file_content_md5': {'key': '', 'type': 'bytearray', 'xml': {'name': 'file_content_md5'}}, - 'file_content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_disposition'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(FileHTTPHeaders, self).__init__(**kwargs) - self.file_content_type = kwargs.get('file_content_type', None) - self.file_content_encoding = kwargs.get('file_content_encoding', None) - self.file_content_language = kwargs.get('file_content_language', None) - self.file_cache_control = kwargs.get('file_cache_control', None) - self.file_content_md5 = kwargs.get('file_content_md5', None) - self.file_content_disposition = kwargs.get('file_content_disposition', None) - - -class FileItem(Model): - """A listed file item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param properties: Required. - :type properties: ~azure.storage.fileshare.models.FileProperty - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'properties': {'key': 'Properties', 'type': 'FileProperty', 'xml': {'name': 'Properties'}}, - } - _xml_map = { - 'name': 'File' - } - - def __init__(self, **kwargs): - super(FileItem, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.properties = kwargs.get('properties', None) - - -class FileProperty(Model): - """File properties. - - All required parameters must be populated in order to send to Azure. - - :param content_length: Required. Content length of the file. This value - may not be up-to-date since an SMB client may have modified the file - locally. The value of Content-Length may not reflect that fact until the - handle is closed or the op-lock is broken. To retrieve current property - values, call Get File Properties. - :type content_length: long - """ - - _validation = { - 'content_length': {'required': True}, - } - - _attribute_map = { - 'content_length': {'key': 'Content-Length', 'type': 'long', 'xml': {'name': 'Content-Length'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(FileProperty, self).__init__(**kwargs) - self.content_length = kwargs.get('content_length', None) - - -class FileRange(Model): - """An Azure Storage file range. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. Start of the range. - :type start: long - :param end: Required. End of the range. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'Range' - } - - def __init__(self, **kwargs): - super(FileRange, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.end = kwargs.get('end', None) - - -class FilesAndDirectoriesListSegment(Model): - """Abstract for entries that can be listed from Directory. - - All required parameters must be populated in order to send to Azure. - - :param directory_items: Required. - :type directory_items: list[~azure.storage.fileshare.models.DirectoryItem] - :param file_items: Required. - :type file_items: list[~azure.storage.fileshare.models.FileItem] - """ - - _validation = { - 'directory_items': {'required': True}, - 'file_items': {'required': True}, - } - - _attribute_map = { - 'directory_items': {'key': 'DirectoryItems', 'type': '[DirectoryItem]', 'xml': {'name': 'DirectoryItems', 'itemsName': 'Directory'}}, - 'file_items': {'key': 'FileItems', 'type': '[FileItem]', 'xml': {'name': 'FileItems', 'itemsName': 'File'}}, - } - _xml_map = { - 'name': 'Entries' - } - - def __init__(self, **kwargs): - super(FilesAndDirectoriesListSegment, self).__init__(**kwargs) - self.directory_items = kwargs.get('directory_items', None) - self.file_items = kwargs.get('file_items', None) - - -class HandleItem(Model): - """A listed Azure Storage handle item. - - All required parameters must be populated in order to send to Azure. - - :param handle_id: Required. XSMB service handle ID - :type handle_id: str - :param path: Required. File or directory name including full path starting - from share root - :type path: str - :param file_id: Required. FileId uniquely identifies the file or - directory. - :type file_id: str - :param parent_id: ParentId uniquely identifies the parent directory of the - object. - :type parent_id: str - :param session_id: Required. SMB session ID in context of which the file - handle was opened - :type session_id: str - :param client_ip: Required. Client IP that opened the handle - :type client_ip: str - :param open_time: Required. Time when the session that previously opened - the handle has last been reconnected. (UTC) - :type open_time: datetime - :param last_reconnect_time: Time handle was last connected to (UTC) - :type last_reconnect_time: datetime - """ - - _validation = { - 'handle_id': {'required': True}, - 'path': {'required': True}, - 'file_id': {'required': True}, - 'session_id': {'required': True}, - 'client_ip': {'required': True}, - 'open_time': {'required': True}, - } - - _attribute_map = { - 'handle_id': {'key': 'HandleId', 'type': 'str', 'xml': {'name': 'HandleId'}}, - 'path': {'key': 'Path', 'type': 'str', 'xml': {'name': 'Path'}}, - 'file_id': {'key': 'FileId', 'type': 'str', 'xml': {'name': 'FileId'}}, - 'parent_id': {'key': 'ParentId', 'type': 'str', 'xml': {'name': 'ParentId'}}, - 'session_id': {'key': 'SessionId', 'type': 'str', 'xml': {'name': 'SessionId'}}, - 'client_ip': {'key': 'ClientIp', 'type': 'str', 'xml': {'name': 'ClientIp'}}, - 'open_time': {'key': 'OpenTime', 'type': 'rfc-1123', 'xml': {'name': 'OpenTime'}}, - 'last_reconnect_time': {'key': 'LastReconnectTime', 'type': 'rfc-1123', 'xml': {'name': 'LastReconnectTime'}}, - } - _xml_map = { - 'name': 'Handle' - } - - def __init__(self, **kwargs): - super(HandleItem, self).__init__(**kwargs) - self.handle_id = kwargs.get('handle_id', None) - self.path = kwargs.get('path', None) - self.file_id = kwargs.get('file_id', None) - self.parent_id = kwargs.get('parent_id', None) - self.session_id = kwargs.get('session_id', None) - self.client_ip = kwargs.get('client_ip', None) - self.open_time = kwargs.get('open_time', None) - self.last_reconnect_time = kwargs.get('last_reconnect_time', None) - - -class LeaseAccessConditions(Model): - """Additional parameters for a set of operations. - - :param lease_id: If specified, the operation only succeeds if the - resource's lease is active and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': '', 'type': 'str', 'xml': {'name': 'lease_id'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = kwargs.get('lease_id', None) - - -class ListFilesAndDirectoriesSegmentResponse(Model): - """An enumeration of directories and files. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param share_name: Required. - :type share_name: str - :param share_snapshot: - :type share_snapshot: str - :param directory_path: Required. - :type directory_path: str - :param prefix: Required. - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param segment: Required. - :type segment: - ~azure.storage.fileshare.models.FilesAndDirectoriesListSegment - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'share_name': {'required': True}, - 'directory_path': {'required': True}, - 'prefix': {'required': True}, - 'segment': {'required': True}, - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'share_name': {'key': 'ShareName', 'type': 'str', 'xml': {'name': 'ShareName', 'attr': True}}, - 'share_snapshot': {'key': 'ShareSnapshot', 'type': 'str', 'xml': {'name': 'ShareSnapshot', 'attr': True}}, - 'directory_path': {'key': 'DirectoryPath', 'type': 'str', 'xml': {'name': 'DirectoryPath', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'segment': {'key': 'Segment', 'type': 'FilesAndDirectoriesListSegment', 'xml': {'name': 'Segment'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, **kwargs): - super(ListFilesAndDirectoriesSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs.get('service_endpoint', None) - self.share_name = kwargs.get('share_name', None) - self.share_snapshot = kwargs.get('share_snapshot', None) - self.directory_path = kwargs.get('directory_path', None) - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.segment = kwargs.get('segment', None) - self.next_marker = kwargs.get('next_marker', None) - - -class ListHandlesResponse(Model): - """An enumeration of handles. - - All required parameters must be populated in order to send to Azure. - - :param handle_list: - :type handle_list: list[~azure.storage.fileshare.models.HandleItem] - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'handle_list': {'key': 'HandleList', 'type': '[HandleItem]', 'xml': {'name': 'Entries', 'itemsName': 'Entries', 'wrapped': True}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, **kwargs): - super(ListHandlesResponse, self).__init__(**kwargs) - self.handle_list = kwargs.get('handle_list', None) - self.next_marker = kwargs.get('next_marker', None) - - -class ListSharesResponse(Model): - """An enumeration of shares. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param share_items: - :type share_items: list[~azure.storage.fileshare.models.ShareItem] - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'share_items': {'key': 'ShareItems', 'type': '[ShareItem]', 'xml': {'name': 'Shares', 'itemsName': 'Shares', 'wrapped': True}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, **kwargs): - super(ListSharesResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs.get('service_endpoint', None) - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.share_items = kwargs.get('share_items', None) - self.next_marker = kwargs.get('next_marker', None) - - -class Metrics(Model): - """Storage Analytics metrics for file service. - - All required parameters must be populated in order to send to Azure. - - :param version: Required. The version of Storage Analytics to configure. - :type version: str - :param enabled: Required. Indicates whether metrics are enabled for the - File service. - :type enabled: bool - :param include_apis: Indicates whether metrics should generate summary - statistics for called API operations. - :type include_apis: bool - :param retention_policy: - :type retention_policy: ~azure.storage.fileshare.models.RetentionPolicy - """ - - _validation = { - 'version': {'required': True}, - 'enabled': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(Metrics, self).__init__(**kwargs) - self.version = kwargs.get('version', None) - self.enabled = kwargs.get('enabled', None) - self.include_apis = kwargs.get('include_apis', None) - self.retention_policy = kwargs.get('retention_policy', None) - - -class RetentionPolicy(Model): - """The retention policy. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether a retention policy is enabled - for the File service. If false, metrics data is retained, and the user is - responsible for deleting it. - :type enabled: bool - :param days: Indicates the number of days that metrics data should be - retained. All data older than this value will be deleted. Metrics data is - deleted on a best-effort basis after the retention period expires. - :type days: int - """ - - _validation = { - 'enabled': {'required': True}, - 'days': {'maximum': 365, 'minimum': 1}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(RetentionPolicy, self).__init__(**kwargs) - self.enabled = kwargs.get('enabled', None) - self.days = kwargs.get('days', None) - - -class ShareFileRangeList(Model): - """The list of file ranges. - - :param ranges: - :type ranges: list[~azure.storage.fileshare.models.FileRange] - :param clear_ranges: - :type clear_ranges: list[~azure.storage.fileshare.models.ClearRange] - """ - - _attribute_map = { - 'ranges': {'key': 'Ranges', 'type': '[FileRange]', 'xml': {'name': 'Ranges', 'itemsName': 'Range'}}, - 'clear_ranges': {'key': 'ClearRanges', 'type': '[ClearRange]', 'xml': {'name': 'ClearRanges', 'itemsName': 'ClearRange'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(ShareFileRangeList, self).__init__(**kwargs) - self.ranges = kwargs.get('ranges', None) - self.clear_ranges = kwargs.get('clear_ranges', None) - - -class ShareItem(Model): - """A listed Azure Storage share item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param snapshot: - :type snapshot: str - :param deleted: - :type deleted: bool - :param version: - :type version: str - :param properties: Required. - :type properties: ~azure.storage.fileshare.models.ShareProperties - :param metadata: - :type metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'snapshot': {'key': 'Snapshot', 'type': 'str', 'xml': {'name': 'Snapshot'}}, - 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}}, - 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, - 'properties': {'key': 'Properties', 'type': 'ShareProperties', 'xml': {'name': 'Properties'}}, - 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}}, - } - _xml_map = { - 'name': 'Share' - } - - def __init__(self, **kwargs): - super(ShareItem, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.snapshot = kwargs.get('snapshot', None) - self.deleted = kwargs.get('deleted', None) - self.version = kwargs.get('version', None) - self.properties = kwargs.get('properties', None) - self.metadata = kwargs.get('metadata', None) - - -class SharePermission(Model): - """A permission (a security descriptor) at the share level. - - All required parameters must be populated in order to send to Azure. - - :param permission: Required. The permission in the Security Descriptor - Definition Language (SDDL). - :type permission: str - """ - - _validation = { - 'permission': {'required': True}, - } - - _attribute_map = { - 'permission': {'key': 'permission', 'type': 'str', 'xml': {'name': 'permission'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(SharePermission, self).__init__(**kwargs) - self.permission = kwargs.get('permission', None) - - -class ShareProperties(Model): - """Properties of a share. - - All required parameters must be populated in order to send to Azure. - - :param last_modified: Required. - :type last_modified: datetime - :param etag: Required. - :type etag: str - :param quota: Required. - :type quota: int - :param provisioned_iops: - :type provisioned_iops: int - :param provisioned_ingress_mbps: - :type provisioned_ingress_mbps: int - :param provisioned_egress_mbps: - :type provisioned_egress_mbps: int - :param next_allowed_quota_downgrade_time: - :type next_allowed_quota_downgrade_time: datetime - :param deleted_time: - :type deleted_time: datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param lease_status: Possible values include: 'locked', 'unlocked' - :type lease_status: str or ~azure.storage.fileshare.models.LeaseStatusType - :param lease_state: Possible values include: 'available', 'leased', - 'expired', 'breaking', 'broken' - :type lease_state: str or ~azure.storage.fileshare.models.LeaseStateType - :param lease_duration: Possible values include: 'infinite', 'fixed' - :type lease_duration: str or - ~azure.storage.fileshare.models.LeaseDurationType - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - 'quota': {'required': True}, - } - - _attribute_map = { - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}}, - 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}}, - 'quota': {'key': 'Quota', 'type': 'int', 'xml': {'name': 'Quota'}}, - 'provisioned_iops': {'key': 'ProvisionedIops', 'type': 'int', 'xml': {'name': 'ProvisionedIops'}}, - 'provisioned_ingress_mbps': {'key': 'ProvisionedIngressMBps', 'type': 'int', 'xml': {'name': 'ProvisionedIngressMBps'}}, - 'provisioned_egress_mbps': {'key': 'ProvisionedEgressMBps', 'type': 'int', 'xml': {'name': 'ProvisionedEgressMBps'}}, - 'next_allowed_quota_downgrade_time': {'key': 'NextAllowedQuotaDowngradeTime', 'type': 'rfc-1123', 'xml': {'name': 'NextAllowedQuotaDowngradeTime'}}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}}, - 'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(ShareProperties, self).__init__(**kwargs) - self.last_modified = kwargs.get('last_modified', None) - self.etag = kwargs.get('etag', None) - self.quota = kwargs.get('quota', None) - self.provisioned_iops = kwargs.get('provisioned_iops', None) - self.provisioned_ingress_mbps = kwargs.get('provisioned_ingress_mbps', None) - self.provisioned_egress_mbps = kwargs.get('provisioned_egress_mbps', None) - self.next_allowed_quota_downgrade_time = kwargs.get('next_allowed_quota_downgrade_time', None) - self.deleted_time = kwargs.get('deleted_time', None) - self.remaining_retention_days = kwargs.get('remaining_retention_days', None) - self.lease_status = kwargs.get('lease_status', None) - self.lease_state = kwargs.get('lease_state', None) - self.lease_duration = kwargs.get('lease_duration', None) - - -class ShareProtocolSettings(Model): - """Protocol settings. - - :param smb: Settings for SMB protocol. - :type smb: ~azure.storage.fileshare.models.ShareSmbSettings - """ - - _attribute_map = { - 'smb': {'key': 'Smb', 'type': 'ShareSmbSettings', 'xml': {'name': 'SMB'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(ShareProtocolSettings, self).__init__(**kwargs) - self.smb = kwargs.get('smb', None) - - -class ShareSmbSettings(Model): - """Settings for SMB protocol. - - :param multichannel: Settings for SMB Multichannel. - :type multichannel: ~azure.storage.fileshare.models.SmbMultichannel - """ - - _attribute_map = { - 'multichannel': {'key': 'Multichannel', 'type': 'SmbMultichannel', 'xml': {'name': 'Multichannel'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(ShareSmbSettings, self).__init__(**kwargs) - self.multichannel = kwargs.get('multichannel', None) - - -class ShareStats(Model): - """Stats for the share. - - All required parameters must be populated in order to send to Azure. - - :param share_usage_bytes: Required. The approximate size of the data - stored in bytes. Note that this value may not include all recently created - or recently resized files. - :type share_usage_bytes: int - """ - - _validation = { - 'share_usage_bytes': {'required': True}, - } - - _attribute_map = { - 'share_usage_bytes': {'key': 'ShareUsageBytes', 'type': 'int', 'xml': {'name': 'ShareUsageBytes'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(ShareStats, self).__init__(**kwargs) - self.share_usage_bytes = kwargs.get('share_usage_bytes', None) - - -class SignedIdentifier(Model): - """Signed identifier. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. A unique id. - :type id: str - :param access_policy: The access policy. - :type access_policy: ~azure.storage.fileshare.models.AccessPolicy - """ - - _validation = { - 'id': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}}, - 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(SignedIdentifier, self).__init__(**kwargs) - self.id = kwargs.get('id', None) - self.access_policy = kwargs.get('access_policy', None) - - -class SmbMultichannel(Model): - """Settings for SMB multichannel. - - :param enabled: If SMB multichannel is enabled. - :type enabled: bool - """ - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - } - _xml_map = { - 'name': 'Multichannel' - } - - def __init__(self, **kwargs): - super(SmbMultichannel, self).__init__(**kwargs) - self.enabled = kwargs.get('enabled', None) - - -class SourceModifiedAccessConditions(Model): - """Additional parameters for upload_range_from_url operation. - - :param source_if_match_crc64: Specify the crc64 value to operate only on - range with a matching crc64 checksum. - :type source_if_match_crc64: bytearray - :param source_if_none_match_crc64: Specify the crc64 value to operate only - on range without a matching crc64 checksum. - :type source_if_none_match_crc64: bytearray - """ - - _attribute_map = { - 'source_if_match_crc64': {'key': '', 'type': 'bytearray', 'xml': {'name': 'source_if_match_crc64'}}, - 'source_if_none_match_crc64': {'key': '', 'type': 'bytearray', 'xml': {'name': 'source_if_none_match_crc64'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_match_crc64 = kwargs.get('source_if_match_crc64', None) - self.source_if_none_match_crc64 = kwargs.get('source_if_none_match_crc64', None) - - -class StorageError(Model): - """StorageError. - - :param message: - :type message: str - """ - - _attribute_map = { - 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(StorageError, self).__init__(**kwargs) - self.message = kwargs.get('message', None) - - -class StorageErrorException(HttpResponseError): - """Server responsed with exception of type: 'StorageError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, response, deserialize, *args): - - model_name = 'StorageError' - self.error = deserialize(model_name, response) - if self.error is None: - self.error = deserialize.dependencies[model_name]() - super(StorageErrorException, self).__init__(response=response) - - -class StorageServiceProperties(Model): - """Storage service properties. - - :param hour_metrics: A summary of request statistics grouped by API in - hourly aggregates for files. - :type hour_metrics: ~azure.storage.fileshare.models.Metrics - :param minute_metrics: A summary of request statistics grouped by API in - minute aggregates for files. - :type minute_metrics: ~azure.storage.fileshare.models.Metrics - :param cors: The set of CORS rules. - :type cors: list[~azure.storage.fileshare.models.CorsRule] - :param protocol: Protocol settings - :type protocol: ~azure.storage.fileshare.models.ShareProtocolSettings - """ - - _attribute_map = { - 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}}, - 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}}, - 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}}, - 'protocol': {'key': 'Protocol', 'type': 'ShareProtocolSettings', 'xml': {'name': 'ProtocolSettings'}}, - } - _xml_map = { - } - - def __init__(self, **kwargs): - super(StorageServiceProperties, self).__init__(**kwargs) - self.hour_metrics = kwargs.get('hour_metrics', None) - self.minute_metrics = kwargs.get('minute_metrics', None) - self.cors = kwargs.get('cors', None) - self.protocol = kwargs.get('protocol', None) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/models/_models_py3.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/models/_models_py3.py deleted file mode 100644 index 7fe9c4a..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/models/_models_py3.py +++ /dev/null @@ -1,1021 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model -from azure.core.exceptions import HttpResponseError - - -class AccessPolicy(Model): - """An Access policy. - - :param start: The date-time the policy is active. - :type start: str - :param expiry: The date-time the policy expires. - :type expiry: str - :param permission: The permissions for the ACL policy. - :type permission: str - """ - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, - 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, - 'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}}, - } - _xml_map = { - } - - def __init__(self, *, start: str=None, expiry: str=None, permission: str=None, **kwargs) -> None: - super(AccessPolicy, self).__init__(**kwargs) - self.start = start - self.expiry = expiry - self.permission = permission - - -class ClearRange(Model): - """ClearRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'ClearRange' - } - - def __init__(self, *, start: int, end: int, **kwargs) -> None: - super(ClearRange, self).__init__(**kwargs) - self.start = start - self.end = end - - -class CopyFileSmbInfo(Model): - """Additional parameters for start_copy operation. - - :param file_permission_copy_mode: Specifies the option to copy file - security descriptor from source file or to set it using the value which is - defined by the header value of x-ms-file-permission or - x-ms-file-permission-key. Possible values include: 'source', 'override' - :type file_permission_copy_mode: str or - ~azure.storage.fileshare.models.PermissionCopyModeType - :param ignore_read_only: Specifies the option to overwrite the target file - if it already exists and has read-only attribute set. - :type ignore_read_only: bool - :param file_attributes: Specifies either the option to copy file - attributes from a source file(source) to a target file or a list of - attributes to set on a target file. - :type file_attributes: str - :param file_creation_time: Specifies either the option to copy file - creation time from a source file(source) to a target file or a time value - in ISO 8601 format to set as creation time on a target file. - :type file_creation_time: str - :param file_last_write_time: Specifies either the option to copy file last - write time from a source file(source) to a target file or a time value in - ISO 8601 format to set as last write time on a target file. - :type file_last_write_time: str - :param set_archive_attribute: Specifies the option to set archive - attribute on a target file. True means archive attribute will be set on a - target file despite attribute overrides or a source file state. - :type set_archive_attribute: bool - """ - - _attribute_map = { - 'file_permission_copy_mode': {'key': '', 'type': 'PermissionCopyModeType', 'xml': {'name': 'file_permission_copy_mode'}}, - 'ignore_read_only': {'key': '', 'type': 'bool', 'xml': {'name': 'ignore_read_only'}}, - 'file_attributes': {'key': '', 'type': 'str', 'xml': {'name': 'file_attributes'}}, - 'file_creation_time': {'key': '', 'type': 'str', 'xml': {'name': 'file_creation_time'}}, - 'file_last_write_time': {'key': '', 'type': 'str', 'xml': {'name': 'file_last_write_time'}}, - 'set_archive_attribute': {'key': '', 'type': 'bool', 'xml': {'name': 'set_archive_attribute'}}, - } - _xml_map = { - } - - def __init__(self, *, file_permission_copy_mode=None, ignore_read_only: bool=None, file_attributes: str=None, file_creation_time: str=None, file_last_write_time: str=None, set_archive_attribute: bool=None, **kwargs) -> None: - super(CopyFileSmbInfo, self).__init__(**kwargs) - self.file_permission_copy_mode = file_permission_copy_mode - self.ignore_read_only = ignore_read_only - self.file_attributes = file_attributes - self.file_creation_time = file_creation_time - self.file_last_write_time = file_last_write_time - self.set_archive_attribute = set_archive_attribute - - -class CorsRule(Model): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param allowed_origins: Required. The origin domains that are permitted to - make a request against the storage service via CORS. The origin domain is - the domain from which the request originates. Note that the origin must be - an exact case-sensitive match with the origin that the user age sends to - the service. You can also use the wildcard character '*' to allow all - origin domains to make requests via CORS. - :type allowed_origins: str - :param allowed_methods: Required. The methods (HTTP request verbs) that - the origin domain may use for a CORS request. (comma separated) - :type allowed_methods: str - :param allowed_headers: Required. The request headers that the origin - domain may specify on the CORS request. - :type allowed_headers: str - :param exposed_headers: Required. The response headers that may be sent in - the response to the CORS request and exposed by the browser to the request - issuer. - :type exposed_headers: str - :param max_age_in_seconds: Required. The maximum amount time that a - browser should cache the preflight OPTIONS request. - :type max_age_in_seconds: int - """ - - _validation = { - 'allowed_origins': {'required': True}, - 'allowed_methods': {'required': True}, - 'allowed_headers': {'required': True}, - 'exposed_headers': {'required': True}, - 'max_age_in_seconds': {'required': True, 'minimum': 0}, - } - - _attribute_map = { - 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}}, - 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}}, - 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}}, - 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}}, - 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}}, - } - _xml_map = { - } - - def __init__(self, *, allowed_origins: str, allowed_methods: str, allowed_headers: str, exposed_headers: str, max_age_in_seconds: int, **kwargs) -> None: - super(CorsRule, self).__init__(**kwargs) - self.allowed_origins = allowed_origins - self.allowed_methods = allowed_methods - self.allowed_headers = allowed_headers - self.exposed_headers = exposed_headers - self.max_age_in_seconds = max_age_in_seconds - - -class DirectoryItem(Model): - """A listed directory item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - } - _xml_map = { - 'name': 'Directory' - } - - def __init__(self, *, name: str, **kwargs) -> None: - super(DirectoryItem, self).__init__(**kwargs) - self.name = name - - -class FileHTTPHeaders(Model): - """Additional parameters for a set of operations, such as: File_create, - File_set_http_headers. - - :param file_content_type: Sets the MIME content type of the file. The - default type is 'application/octet-stream'. - :type file_content_type: str - :param file_content_encoding: Specifies which content encodings have been - applied to the file. - :type file_content_encoding: str - :param file_content_language: Specifies the natural languages used by this - resource. - :type file_content_language: str - :param file_cache_control: Sets the file's cache control. The File service - stores this value but does not use or modify it. - :type file_cache_control: str - :param file_content_md5: Sets the file's MD5 hash. - :type file_content_md5: bytearray - :param file_content_disposition: Sets the file's Content-Disposition - header. - :type file_content_disposition: str - """ - - _attribute_map = { - 'file_content_type': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_type'}}, - 'file_content_encoding': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_encoding'}}, - 'file_content_language': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_language'}}, - 'file_cache_control': {'key': '', 'type': 'str', 'xml': {'name': 'file_cache_control'}}, - 'file_content_md5': {'key': '', 'type': 'bytearray', 'xml': {'name': 'file_content_md5'}}, - 'file_content_disposition': {'key': '', 'type': 'str', 'xml': {'name': 'file_content_disposition'}}, - } - _xml_map = { - } - - def __init__(self, *, file_content_type: str=None, file_content_encoding: str=None, file_content_language: str=None, file_cache_control: str=None, file_content_md5: bytearray=None, file_content_disposition: str=None, **kwargs) -> None: - super(FileHTTPHeaders, self).__init__(**kwargs) - self.file_content_type = file_content_type - self.file_content_encoding = file_content_encoding - self.file_content_language = file_content_language - self.file_cache_control = file_cache_control - self.file_content_md5 = file_content_md5 - self.file_content_disposition = file_content_disposition - - -class FileItem(Model): - """A listed file item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param properties: Required. - :type properties: ~azure.storage.fileshare.models.FileProperty - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'properties': {'key': 'Properties', 'type': 'FileProperty', 'xml': {'name': 'Properties'}}, - } - _xml_map = { - 'name': 'File' - } - - def __init__(self, *, name: str, properties, **kwargs) -> None: - super(FileItem, self).__init__(**kwargs) - self.name = name - self.properties = properties - - -class FileProperty(Model): - """File properties. - - All required parameters must be populated in order to send to Azure. - - :param content_length: Required. Content length of the file. This value - may not be up-to-date since an SMB client may have modified the file - locally. The value of Content-Length may not reflect that fact until the - handle is closed or the op-lock is broken. To retrieve current property - values, call Get File Properties. - :type content_length: long - """ - - _validation = { - 'content_length': {'required': True}, - } - - _attribute_map = { - 'content_length': {'key': 'Content-Length', 'type': 'long', 'xml': {'name': 'Content-Length'}}, - } - _xml_map = { - } - - def __init__(self, *, content_length: int, **kwargs) -> None: - super(FileProperty, self).__init__(**kwargs) - self.content_length = content_length - - -class FileRange(Model): - """An Azure Storage file range. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. Start of the range. - :type start: long - :param end: Required. End of the range. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'Range' - } - - def __init__(self, *, start: int, end: int, **kwargs) -> None: - super(FileRange, self).__init__(**kwargs) - self.start = start - self.end = end - - -class FilesAndDirectoriesListSegment(Model): - """Abstract for entries that can be listed from Directory. - - All required parameters must be populated in order to send to Azure. - - :param directory_items: Required. - :type directory_items: list[~azure.storage.fileshare.models.DirectoryItem] - :param file_items: Required. - :type file_items: list[~azure.storage.fileshare.models.FileItem] - """ - - _validation = { - 'directory_items': {'required': True}, - 'file_items': {'required': True}, - } - - _attribute_map = { - 'directory_items': {'key': 'DirectoryItems', 'type': '[DirectoryItem]', 'xml': {'name': 'DirectoryItems', 'itemsName': 'Directory'}}, - 'file_items': {'key': 'FileItems', 'type': '[FileItem]', 'xml': {'name': 'FileItems', 'itemsName': 'File'}}, - } - _xml_map = { - 'name': 'Entries' - } - - def __init__(self, *, directory_items, file_items, **kwargs) -> None: - super(FilesAndDirectoriesListSegment, self).__init__(**kwargs) - self.directory_items = directory_items - self.file_items = file_items - - -class HandleItem(Model): - """A listed Azure Storage handle item. - - All required parameters must be populated in order to send to Azure. - - :param handle_id: Required. XSMB service handle ID - :type handle_id: str - :param path: Required. File or directory name including full path starting - from share root - :type path: str - :param file_id: Required. FileId uniquely identifies the file or - directory. - :type file_id: str - :param parent_id: ParentId uniquely identifies the parent directory of the - object. - :type parent_id: str - :param session_id: Required. SMB session ID in context of which the file - handle was opened - :type session_id: str - :param client_ip: Required. Client IP that opened the handle - :type client_ip: str - :param open_time: Required. Time when the session that previously opened - the handle has last been reconnected. (UTC) - :type open_time: datetime - :param last_reconnect_time: Time handle was last connected to (UTC) - :type last_reconnect_time: datetime - """ - - _validation = { - 'handle_id': {'required': True}, - 'path': {'required': True}, - 'file_id': {'required': True}, - 'session_id': {'required': True}, - 'client_ip': {'required': True}, - 'open_time': {'required': True}, - } - - _attribute_map = { - 'handle_id': {'key': 'HandleId', 'type': 'str', 'xml': {'name': 'HandleId'}}, - 'path': {'key': 'Path', 'type': 'str', 'xml': {'name': 'Path'}}, - 'file_id': {'key': 'FileId', 'type': 'str', 'xml': {'name': 'FileId'}}, - 'parent_id': {'key': 'ParentId', 'type': 'str', 'xml': {'name': 'ParentId'}}, - 'session_id': {'key': 'SessionId', 'type': 'str', 'xml': {'name': 'SessionId'}}, - 'client_ip': {'key': 'ClientIp', 'type': 'str', 'xml': {'name': 'ClientIp'}}, - 'open_time': {'key': 'OpenTime', 'type': 'rfc-1123', 'xml': {'name': 'OpenTime'}}, - 'last_reconnect_time': {'key': 'LastReconnectTime', 'type': 'rfc-1123', 'xml': {'name': 'LastReconnectTime'}}, - } - _xml_map = { - 'name': 'Handle' - } - - def __init__(self, *, handle_id: str, path: str, file_id: str, session_id: str, client_ip: str, open_time, parent_id: str=None, last_reconnect_time=None, **kwargs) -> None: - super(HandleItem, self).__init__(**kwargs) - self.handle_id = handle_id - self.path = path - self.file_id = file_id - self.parent_id = parent_id - self.session_id = session_id - self.client_ip = client_ip - self.open_time = open_time - self.last_reconnect_time = last_reconnect_time - - -class LeaseAccessConditions(Model): - """Additional parameters for a set of operations. - - :param lease_id: If specified, the operation only succeeds if the - resource's lease is active and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': '', 'type': 'str', 'xml': {'name': 'lease_id'}}, - } - _xml_map = { - } - - def __init__(self, *, lease_id: str=None, **kwargs) -> None: - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = lease_id - - -class ListFilesAndDirectoriesSegmentResponse(Model): - """An enumeration of directories and files. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param share_name: Required. - :type share_name: str - :param share_snapshot: - :type share_snapshot: str - :param directory_path: Required. - :type directory_path: str - :param prefix: Required. - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param segment: Required. - :type segment: - ~azure.storage.fileshare.models.FilesAndDirectoriesListSegment - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'share_name': {'required': True}, - 'directory_path': {'required': True}, - 'prefix': {'required': True}, - 'segment': {'required': True}, - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'share_name': {'key': 'ShareName', 'type': 'str', 'xml': {'name': 'ShareName', 'attr': True}}, - 'share_snapshot': {'key': 'ShareSnapshot', 'type': 'str', 'xml': {'name': 'ShareSnapshot', 'attr': True}}, - 'directory_path': {'key': 'DirectoryPath', 'type': 'str', 'xml': {'name': 'DirectoryPath', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'segment': {'key': 'Segment', 'type': 'FilesAndDirectoriesListSegment', 'xml': {'name': 'Segment'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, *, service_endpoint: str, share_name: str, directory_path: str, prefix: str, segment, next_marker: str, share_snapshot: str=None, marker: str=None, max_results: int=None, **kwargs) -> None: - super(ListFilesAndDirectoriesSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.share_name = share_name - self.share_snapshot = share_snapshot - self.directory_path = directory_path - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.segment = segment - self.next_marker = next_marker - - -class ListHandlesResponse(Model): - """An enumeration of handles. - - All required parameters must be populated in order to send to Azure. - - :param handle_list: - :type handle_list: list[~azure.storage.fileshare.models.HandleItem] - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'handle_list': {'key': 'HandleList', 'type': '[HandleItem]', 'xml': {'name': 'Entries', 'itemsName': 'Entries', 'wrapped': True}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, *, next_marker: str, handle_list=None, **kwargs) -> None: - super(ListHandlesResponse, self).__init__(**kwargs) - self.handle_list = handle_list - self.next_marker = next_marker - - -class ListSharesResponse(Model): - """An enumeration of shares. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param share_items: - :type share_items: list[~azure.storage.fileshare.models.ShareItem] - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'share_items': {'key': 'ShareItems', 'type': '[ShareItem]', 'xml': {'name': 'Shares', 'itemsName': 'Shares', 'wrapped': True}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__(self, *, service_endpoint: str, next_marker: str, prefix: str=None, marker: str=None, max_results: int=None, share_items=None, **kwargs) -> None: - super(ListSharesResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.share_items = share_items - self.next_marker = next_marker - - -class Metrics(Model): - """Storage Analytics metrics for file service. - - All required parameters must be populated in order to send to Azure. - - :param version: Required. The version of Storage Analytics to configure. - :type version: str - :param enabled: Required. Indicates whether metrics are enabled for the - File service. - :type enabled: bool - :param include_apis: Indicates whether metrics should generate summary - statistics for called API operations. - :type include_apis: bool - :param retention_policy: - :type retention_policy: ~azure.storage.fileshare.models.RetentionPolicy - """ - - _validation = { - 'version': {'required': True}, - 'enabled': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, - } - _xml_map = { - } - - def __init__(self, *, version: str, enabled: bool, include_apis: bool=None, retention_policy=None, **kwargs) -> None: - super(Metrics, self).__init__(**kwargs) - self.version = version - self.enabled = enabled - self.include_apis = include_apis - self.retention_policy = retention_policy - - -class RetentionPolicy(Model): - """The retention policy. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether a retention policy is enabled - for the File service. If false, metrics data is retained, and the user is - responsible for deleting it. - :type enabled: bool - :param days: Indicates the number of days that metrics data should be - retained. All data older than this value will be deleted. Metrics data is - deleted on a best-effort basis after the retention period expires. - :type days: int - """ - - _validation = { - 'enabled': {'required': True}, - 'days': {'maximum': 365, 'minimum': 1}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}}, - } - _xml_map = { - } - - def __init__(self, *, enabled: bool, days: int=None, **kwargs) -> None: - super(RetentionPolicy, self).__init__(**kwargs) - self.enabled = enabled - self.days = days - - -class ShareFileRangeList(Model): - """The list of file ranges. - - :param ranges: - :type ranges: list[~azure.storage.fileshare.models.FileRange] - :param clear_ranges: - :type clear_ranges: list[~azure.storage.fileshare.models.ClearRange] - """ - - _attribute_map = { - 'ranges': {'key': 'Ranges', 'type': '[FileRange]', 'xml': {'name': 'Ranges', 'itemsName': 'Range'}}, - 'clear_ranges': {'key': 'ClearRanges', 'type': '[ClearRange]', 'xml': {'name': 'ClearRanges', 'itemsName': 'ClearRange'}}, - } - _xml_map = { - } - - def __init__(self, *, ranges=None, clear_ranges=None, **kwargs) -> None: - super(ShareFileRangeList, self).__init__(**kwargs) - self.ranges = ranges - self.clear_ranges = clear_ranges - - -class ShareItem(Model): - """A listed Azure Storage share item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param snapshot: - :type snapshot: str - :param deleted: - :type deleted: bool - :param version: - :type version: str - :param properties: Required. - :type properties: ~azure.storage.fileshare.models.ShareProperties - :param metadata: - :type metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'snapshot': {'key': 'Snapshot', 'type': 'str', 'xml': {'name': 'Snapshot'}}, - 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}}, - 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, - 'properties': {'key': 'Properties', 'type': 'ShareProperties', 'xml': {'name': 'Properties'}}, - 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}}, - } - _xml_map = { - 'name': 'Share' - } - - def __init__(self, *, name: str, properties, snapshot: str=None, deleted: bool=None, version: str=None, metadata=None, **kwargs) -> None: - super(ShareItem, self).__init__(**kwargs) - self.name = name - self.snapshot = snapshot - self.deleted = deleted - self.version = version - self.properties = properties - self.metadata = metadata - - -class SharePermission(Model): - """A permission (a security descriptor) at the share level. - - All required parameters must be populated in order to send to Azure. - - :param permission: Required. The permission in the Security Descriptor - Definition Language (SDDL). - :type permission: str - """ - - _validation = { - 'permission': {'required': True}, - } - - _attribute_map = { - 'permission': {'key': 'permission', 'type': 'str', 'xml': {'name': 'permission'}}, - } - _xml_map = { - } - - def __init__(self, *, permission: str, **kwargs) -> None: - super(SharePermission, self).__init__(**kwargs) - self.permission = permission - - -class ShareProperties(Model): - """Properties of a share. - - All required parameters must be populated in order to send to Azure. - - :param last_modified: Required. - :type last_modified: datetime - :param etag: Required. - :type etag: str - :param quota: Required. - :type quota: int - :param provisioned_iops: - :type provisioned_iops: int - :param provisioned_ingress_mbps: - :type provisioned_ingress_mbps: int - :param provisioned_egress_mbps: - :type provisioned_egress_mbps: int - :param next_allowed_quota_downgrade_time: - :type next_allowed_quota_downgrade_time: datetime - :param deleted_time: - :type deleted_time: datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param lease_status: Possible values include: 'locked', 'unlocked' - :type lease_status: str or ~azure.storage.fileshare.models.LeaseStatusType - :param lease_state: Possible values include: 'available', 'leased', - 'expired', 'breaking', 'broken' - :type lease_state: str or ~azure.storage.fileshare.models.LeaseStateType - :param lease_duration: Possible values include: 'infinite', 'fixed' - :type lease_duration: str or - ~azure.storage.fileshare.models.LeaseDurationType - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - 'quota': {'required': True}, - } - - _attribute_map = { - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123', 'xml': {'name': 'Last-Modified'}}, - 'etag': {'key': 'Etag', 'type': 'str', 'xml': {'name': 'Etag'}}, - 'quota': {'key': 'Quota', 'type': 'int', 'xml': {'name': 'Quota'}}, - 'provisioned_iops': {'key': 'ProvisionedIops', 'type': 'int', 'xml': {'name': 'ProvisionedIops'}}, - 'provisioned_ingress_mbps': {'key': 'ProvisionedIngressMBps', 'type': 'int', 'xml': {'name': 'ProvisionedIngressMBps'}}, - 'provisioned_egress_mbps': {'key': 'ProvisionedEgressMBps', 'type': 'int', 'xml': {'name': 'ProvisionedEgressMBps'}}, - 'next_allowed_quota_downgrade_time': {'key': 'NextAllowedQuotaDowngradeTime', 'type': 'rfc-1123', 'xml': {'name': 'NextAllowedQuotaDowngradeTime'}}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'LeaseStatusType', 'xml': {'name': 'LeaseStatus'}}, - 'lease_state': {'key': 'LeaseState', 'type': 'LeaseStateType', 'xml': {'name': 'LeaseState'}}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'LeaseDurationType', 'xml': {'name': 'LeaseDuration'}}, - } - _xml_map = { - } - - def __init__(self, *, last_modified, etag: str, quota: int, provisioned_iops: int=None, provisioned_ingress_mbps: int=None, provisioned_egress_mbps: int=None, next_allowed_quota_downgrade_time=None, deleted_time=None, remaining_retention_days: int=None, lease_status=None, lease_state=None, lease_duration=None, **kwargs) -> None: - super(ShareProperties, self).__init__(**kwargs) - self.last_modified = last_modified - self.etag = etag - self.quota = quota - self.provisioned_iops = provisioned_iops - self.provisioned_ingress_mbps = provisioned_ingress_mbps - self.provisioned_egress_mbps = provisioned_egress_mbps - self.next_allowed_quota_downgrade_time = next_allowed_quota_downgrade_time - self.deleted_time = deleted_time - self.remaining_retention_days = remaining_retention_days - self.lease_status = lease_status - self.lease_state = lease_state - self.lease_duration = lease_duration - - -class ShareProtocolSettings(Model): - """Protocol settings. - - :param smb: Settings for SMB protocol. - :type smb: ~azure.storage.fileshare.models.ShareSmbSettings - """ - - _attribute_map = { - 'smb': {'key': 'Smb', 'type': 'ShareSmbSettings', 'xml': {'name': 'SMB'}}, - } - _xml_map = { - } - - def __init__(self, *, smb=None, **kwargs) -> None: - super(ShareProtocolSettings, self).__init__(**kwargs) - self.smb = smb - - -class ShareSmbSettings(Model): - """Settings for SMB protocol. - - :param multichannel: Settings for SMB Multichannel. - :type multichannel: ~azure.storage.fileshare.models.SmbMultichannel - """ - - _attribute_map = { - 'multichannel': {'key': 'Multichannel', 'type': 'SmbMultichannel', 'xml': {'name': 'Multichannel'}}, - } - _xml_map = { - } - - def __init__(self, *, multichannel=None, **kwargs) -> None: - super(ShareSmbSettings, self).__init__(**kwargs) - self.multichannel = multichannel - - -class ShareStats(Model): - """Stats for the share. - - All required parameters must be populated in order to send to Azure. - - :param share_usage_bytes: Required. The approximate size of the data - stored in bytes. Note that this value may not include all recently created - or recently resized files. - :type share_usage_bytes: int - """ - - _validation = { - 'share_usage_bytes': {'required': True}, - } - - _attribute_map = { - 'share_usage_bytes': {'key': 'ShareUsageBytes', 'type': 'int', 'xml': {'name': 'ShareUsageBytes'}}, - } - _xml_map = { - } - - def __init__(self, *, share_usage_bytes: int, **kwargs) -> None: - super(ShareStats, self).__init__(**kwargs) - self.share_usage_bytes = share_usage_bytes - - -class SignedIdentifier(Model): - """Signed identifier. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. A unique id. - :type id: str - :param access_policy: The access policy. - :type access_policy: ~azure.storage.fileshare.models.AccessPolicy - """ - - _validation = { - 'id': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}}, - 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}}, - } - _xml_map = { - } - - def __init__(self, *, id: str, access_policy=None, **kwargs) -> None: - super(SignedIdentifier, self).__init__(**kwargs) - self.id = id - self.access_policy = access_policy - - -class SmbMultichannel(Model): - """Settings for SMB multichannel. - - :param enabled: If SMB multichannel is enabled. - :type enabled: bool - """ - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - } - _xml_map = { - 'name': 'Multichannel' - } - - def __init__(self, *, enabled: bool=None, **kwargs) -> None: - super(SmbMultichannel, self).__init__(**kwargs) - self.enabled = enabled - - -class SourceModifiedAccessConditions(Model): - """Additional parameters for upload_range_from_url operation. - - :param source_if_match_crc64: Specify the crc64 value to operate only on - range with a matching crc64 checksum. - :type source_if_match_crc64: bytearray - :param source_if_none_match_crc64: Specify the crc64 value to operate only - on range without a matching crc64 checksum. - :type source_if_none_match_crc64: bytearray - """ - - _attribute_map = { - 'source_if_match_crc64': {'key': '', 'type': 'bytearray', 'xml': {'name': 'source_if_match_crc64'}}, - 'source_if_none_match_crc64': {'key': '', 'type': 'bytearray', 'xml': {'name': 'source_if_none_match_crc64'}}, - } - _xml_map = { - } - - def __init__(self, *, source_if_match_crc64: bytearray=None, source_if_none_match_crc64: bytearray=None, **kwargs) -> None: - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_match_crc64 = source_if_match_crc64 - self.source_if_none_match_crc64 = source_if_none_match_crc64 - - -class StorageError(Model): - """StorageError. - - :param message: - :type message: str - """ - - _attribute_map = { - 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, - } - _xml_map = { - } - - def __init__(self, *, message: str=None, **kwargs) -> None: - super(StorageError, self).__init__(**kwargs) - self.message = message - - -class StorageErrorException(HttpResponseError): - """Server responsed with exception of type: 'StorageError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, response, deserialize, *args): - - model_name = 'StorageError' - self.error = deserialize(model_name, response) - if self.error is None: - self.error = deserialize.dependencies[model_name]() - super(StorageErrorException, self).__init__(response=response) - - -class StorageServiceProperties(Model): - """Storage service properties. - - :param hour_metrics: A summary of request statistics grouped by API in - hourly aggregates for files. - :type hour_metrics: ~azure.storage.fileshare.models.Metrics - :param minute_metrics: A summary of request statistics grouped by API in - minute aggregates for files. - :type minute_metrics: ~azure.storage.fileshare.models.Metrics - :param cors: The set of CORS rules. - :type cors: list[~azure.storage.fileshare.models.CorsRule] - :param protocol: Protocol settings - :type protocol: ~azure.storage.fileshare.models.ShareProtocolSettings - """ - - _attribute_map = { - 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}}, - 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}}, - 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}}, - 'protocol': {'key': 'Protocol', 'type': 'ShareProtocolSettings', 'xml': {'name': 'ProtocolSettings'}}, - } - _xml_map = { - } - - def __init__(self, *, hour_metrics=None, minute_metrics=None, cors=None, protocol=None, **kwargs) -> None: - super(StorageServiceProperties, self).__init__(**kwargs) - self.hour_metrics = hour_metrics - self.minute_metrics = minute_metrics - self.cors = cors - self.protocol = protocol diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/__init__.py deleted file mode 100644 index 65680c9..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._share_operations import ShareOperations -from ._directory_operations import DirectoryOperations -from ._file_operations import FileOperations - -__all__ = [ - 'ServiceOperations', - 'ShareOperations', - 'DirectoryOperations', - 'FileOperations', -] diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/_directory_operations.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/_directory_operations.py deleted file mode 100644 index c38bc8d..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/_directory_operations.py +++ /dev/null @@ -1,672 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class DirectoryOperations(object): - """DirectoryOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar restype: . Constant value: "directory". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.restype = "directory" - - def create(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, cls=None, **kwargs): - """Creates a new directory under the specified share or parent directory. - - :param file_attributes: If specified, the provided file attributes - shall be set. Default value: ‘Archive’ for file and ‘Directory’ for - directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. - Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. - Default value: Now. - :type file_last_write_time: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{shareName}/{directory}'} - - def get_properties(self, sharesnapshot=None, timeout=None, cls=None, **kwargs): - """Returns all system properties for the specified directory, and can also - be used to check the existence of a directory. The data returned does - not include the files in the directory or any subdirectories. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{shareName}/{directory}'} - - def delete(self, timeout=None, cls=None, **kwargs): - """Removes the specified empty directory. Note that the directory must be - empty before it can be deleted. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{shareName}/{directory}'} - - def set_properties(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, file_permission="inherit", file_permission_key=None, cls=None, **kwargs): - """Sets properties on the directory. - - :param file_attributes: If specified, the provided file attributes - shall be set. Default value: ‘Archive’ for file and ‘Directory’ for - directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. - Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. - Default value: Now. - :type file_last_write_time: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "properties" - - # Construct URL - url = self.set_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_properties.metadata = {'url': '/{shareName}/{directory}'} - - def set_metadata(self, timeout=None, metadata=None, cls=None, **kwargs): - """Updates user defined metadata for the specified directory. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{shareName}/{directory}'} - - def list_files_and_directories_segment(self, prefix=None, sharesnapshot=None, marker=None, maxresults=None, timeout=None, cls=None, **kwargs): - """Returns a list of files or directories under the specified share or - directory. It lists the contents only for a single level of the - directory hierarchy. - - :param prefix: Filters the results to return only entries whose name - begins with the specified prefix. - :type prefix: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. - If the request does not specify maxresults, or specifies a value - greater than 5,000, the server will return up to 5,000 items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListFilesAndDirectoriesSegmentResponse or the result of - cls(response) - :rtype: - ~azure.storage.fileshare.models.ListFilesAndDirectoriesSegmentResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "list" - - # Construct URL - url = self.list_files_and_directories_segment.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListFilesAndDirectoriesSegmentResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_files_and_directories_segment.metadata = {'url': '/{shareName}/{directory}'} - - def list_handles(self, marker=None, maxresults=None, timeout=None, sharesnapshot=None, recursive=None, cls=None, **kwargs): - """Lists handles for directory. - - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. - If the request does not specify maxresults, or specifies a value - greater than 5,000, the server will return up to 5,000 items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param recursive: Specifies operation should apply to the directory - specified in the URI, its files, its subdirectories and their files. - :type recursive: bool - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListHandlesResponse or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListHandlesResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "listhandles" - - # Construct URL - url = self.list_handles.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - if recursive is not None: - header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListHandlesResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_handles.metadata = {'url': '/{shareName}/{directory}'} - - def force_close_handles(self, handle_id, timeout=None, marker=None, sharesnapshot=None, recursive=None, cls=None, **kwargs): - """Closes all handles open for given directory. - - :param handle_id: Specifies handle ID opened on the file or directory - to be closed. Asterisk (‘*’) is a wildcard that specifies all handles. - :type handle_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param recursive: Specifies operation should apply to the directory - specified in the URI, its files, its subdirectories and their files. - :type recursive: bool - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "forceclosehandles" - - # Construct URL - url = self.force_close_handles.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') - if recursive is not None: - header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-marker': self._deserialize('str', response.headers.get('x-ms-marker')), - 'x-ms-number-of-handles-closed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')), - 'x-ms-number-of-handles-failed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - force_close_handles.metadata = {'url': '/{shareName}/{directory}'} diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/_file_operations.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/_file_operations.py deleted file mode 100644 index 48df932..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/_file_operations.py +++ /dev/null @@ -1,1670 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class FileOperations(object): - """FileOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar x_ms_type: Dummy constant parameter, file type can only be file. Constant value: "file". - :ivar x_ms_copy_action: . Constant value: "abort". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.x_ms_type = "file" - self.x_ms_copy_action = "abort" - - def create(self, file_content_length, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, file_http_headers=None, lease_access_conditions=None, cls=None, **kwargs): - """Creates a new file or replaces a file. Note it only initializes the - file with no content. - - :param file_content_length: Specifies the maximum size for the file, - up to 4 TB. - :type file_content_length: long - :param file_attributes: If specified, the provided file attributes - shall be set. Default value: ‘Archive’ for file and ‘Directory’ for - directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. - Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. - Default value: Now. - :type file_last_write_time: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_http_headers: Additional parameters for the operation - :type file_http_headers: - ~azure.storage.fileshare.models.FileHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - file_content_type = None - if file_http_headers is not None: - file_content_type = file_http_headers.file_content_type - file_content_encoding = None - if file_http_headers is not None: - file_content_encoding = file_http_headers.file_content_encoding - file_content_language = None - if file_http_headers is not None: - file_content_language = file_http_headers.file_content_language - file_cache_control = None - if file_http_headers is not None: - file_cache_control = file_http_headers.file_cache_control - file_content_md5 = None - if file_http_headers is not None: - file_content_md5 = file_http_headers.file_content_md5 - file_content_disposition = None - if file_http_headers is not None: - file_content_disposition = file_http_headers.file_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') - header_parameters['x-ms-type'] = self._serialize.header("self.x_ms_type", self.x_ms_type, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - if file_content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", file_content_type, 'str') - if file_content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", file_content_encoding, 'str') - if file_content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", file_content_language, 'str') - if file_cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", file_cache_control, 'str') - if file_content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", file_content_md5, 'bytearray') - if file_content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", file_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def download(self, timeout=None, range=None, range_get_content_md5=None, lease_access_conditions=None, cls=None, **kwargs): - """Reads or downloads a file from the system, including its metadata and - properties. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param range: Return file data only from the specified byte range. - :type range: str - :param range_get_content_md5: When this header is set to true and - specified together with the Range header, the service returns the MD5 - hash for the range, as long as the range is less than or equal to 4 MB - in size. - :type range_get_content_md5: bool - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: object or the result of cls(response) - :rtype: Generator - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.download.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-content-md5')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - if response.status_code == 206: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-content-md5')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - download.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def get_properties(self, sharesnapshot=None, timeout=None, lease_access_conditions=None, cls=None, **kwargs): - """Returns all user-defined metadata, standard HTTP properties, and system - properties for the file. It does not return the content of the file. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'x-ms-type': self._deserialize('str', response.headers.get('x-ms-type')), - 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), - 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), - 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), - 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), - 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), - 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def delete(self, timeout=None, lease_access_conditions=None, cls=None, **kwargs): - """removes the file from the storage account. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def set_http_headers(self, file_attributes="none", file_creation_time="now", file_last_write_time="now", timeout=None, file_content_length=None, file_permission="inherit", file_permission_key=None, file_http_headers=None, lease_access_conditions=None, cls=None, **kwargs): - """Sets HTTP headers on the file. - - :param file_attributes: If specified, the provided file attributes - shall be set. Default value: ‘Archive’ for file and ‘Directory’ for - directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. - Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. - Default value: Now. - :type file_last_write_time: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param file_content_length: Resizes a file to the specified size. If - the specified byte value is less than the current size of the file, - then all ranges above the specified byte value are cleared. - :type file_content_length: long - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_http_headers: Additional parameters for the operation - :type file_http_headers: - ~azure.storage.fileshare.models.FileHTTPHeaders - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - file_content_type = None - if file_http_headers is not None: - file_content_type = file_http_headers.file_content_type - file_content_encoding = None - if file_http_headers is not None: - file_content_encoding = file_http_headers.file_content_encoding - file_content_language = None - if file_http_headers is not None: - file_content_language = file_http_headers.file_content_language - file_cache_control = None - if file_http_headers is not None: - file_cache_control = file_http_headers.file_cache_control - file_content_md5 = None - if file_http_headers is not None: - file_content_md5 = file_http_headers.file_content_md5 - file_content_disposition = None - if file_http_headers is not None: - file_content_disposition = file_http_headers.file_content_disposition - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "properties" - - # Construct URL - url = self.set_http_headers.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_content_length is not None: - header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - if file_content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", file_content_type, 'str') - if file_content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", file_content_encoding, 'str') - if file_content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", file_content_language, 'str') - if file_cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", file_cache_control, 'str') - if file_content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", file_content_md5, 'bytearray') - if file_content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", file_content_disposition, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-file-attributes': self._deserialize('str', response.headers.get('x-ms-file-attributes')), - 'x-ms-file-creation-time': self._deserialize('str', response.headers.get('x-ms-file-creation-time')), - 'x-ms-file-last-write-time': self._deserialize('str', response.headers.get('x-ms-file-last-write-time')), - 'x-ms-file-change-time': self._deserialize('str', response.headers.get('x-ms-file-change-time')), - 'x-ms-file-id': self._deserialize('str', response.headers.get('x-ms-file-id')), - 'x-ms-file-parent-id': self._deserialize('str', response.headers.get('x-ms-file-parent-id')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_http_headers.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def set_metadata(self, timeout=None, metadata=None, lease_access_conditions=None, cls=None, **kwargs): - """Updates user-defined metadata for the specified file. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, cls=None, **kwargs): - """[Update] The Lease File operation establishes and manages a lock on a - file for write and delete operations. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or - negative one (-1) for a lease that never expires. A non-infinite lease - can be between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The File service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "lease" - action = "acquire" - - # Construct URL - url = self.acquire_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - acquire_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def release_lease(self, lease_id, timeout=None, request_id=None, cls=None, **kwargs): - """[Update] The Lease File operation establishes and manages a lock on a - file for write and delete operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "lease" - action = "release" - - # Construct URL - url = self.release_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - release_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def change_lease(self, lease_id, timeout=None, proposed_lease_id=None, request_id=None, cls=None, **kwargs): - """[Update] The Lease File operation establishes and manages a lock on a - file for write and delete operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The File service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "lease" - action = "change" - - # Construct URL - url = self.change_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - change_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def break_lease(self, timeout=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs): - """[Update] The Lease File operation establishes and manages a lock on a - file for write and delete operations. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "lease" - action = "break" - - # Construct URL - url = self.break_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - break_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def upload_range(self, range, content_length, file_range_write="update", optionalbody=None, timeout=None, content_md5=None, lease_access_conditions=None, cls=None, **kwargs): - """Upload a range of bytes to a file. - - :param range: Specifies the range of bytes to be written. Both the - start and end of the range must be specified. For an update operation, - the range can be up to 4 MB in size. For a clear operation, the range - can be up to the value of the file's full size. The File service - accepts only a single byte range for the Range and 'x-ms-range' - headers, and the byte range must be specified in the following format: - bytes=startByte-endByte. - :type range: str - :param file_range_write: Specify one of the following options: - - Update: Writes the bytes specified by the request body into the - specified range. The Range and Content-Length headers must match to - perform the update. - Clear: Clears the specified range and releases - the space used in storage for that range. To clear a range, set the - Content-Length header to zero, and set the Range header to a value - that indicates the range to clear, up to maximum file size. Possible - values include: 'update', 'clear' - :type file_range_write: str or - ~azure.storage.fileshare.models.FileRangeWriteType - :param content_length: Specifies the number of bytes being transmitted - in the request body. When the x-ms-write header is set to clear, the - value of this header must be set to zero. - :type content_length: long - :param optionalbody: Initial data. - :type optionalbody: Generator - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param content_md5: An MD5 hash of the content. This hash is used to - verify the integrity of the data during transport. When the - Content-MD5 header is specified, the File service compares the hash of - the content that has arrived with the header value that was sent. If - the two hashes do not match, the operation will fail with error code - 400 (Bad Request). - :type content_md5: bytearray - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "range" - - # Construct URL - url = self.upload_range.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-write'] = self._serialize.header("file_range_write", file_range_write, 'FileRangeWriteType') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("content_md5", content_md5, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct body - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, stream_content=optionalbody) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload_range.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def upload_range_from_url(self, range, copy_source, content_length, timeout=None, source_range=None, source_content_crc64=None, source_modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs): - """Upload a range of bytes to a file where the contents are read from a - URL. - - :param range: Writes data to the specified byte range in the file. - :type range: str - :param copy_source: Specifies the URL of the source file or blob, up - to 2 KB in length. To copy a file to another file within the same - storage account, you may use Shared Key to authenticate the source - file. If you are copying a file from another storage account, or if - you are copying a blob from the same storage account or another - storage account, then you must authenticate the source file or blob - using a shared access signature. If the source is a public blob, no - authentication is required to perform the copy operation. A file in a - share snapshot can also be specified as a copy source. - :type copy_source: str - :param content_length: Specifies the number of bytes being transmitted - in the request body. When the x-ms-write header is set to clear, the - value of this header must be set to zero. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_crc64: Specify the crc64 calculated for the - range of bytes that must be read from the copy source. - :type source_content_crc64: bytearray - :param source_modified_access_conditions: Additional parameters for - the operation - :type source_modified_access_conditions: - ~azure.storage.fileshare.models.SourceModifiedAccessConditions - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - source_if_match_crc64 = None - if source_modified_access_conditions is not None: - source_if_match_crc64 = source_modified_access_conditions.source_if_match_crc64 - source_if_none_match_crc64 = None - if source_modified_access_conditions is not None: - source_if_none_match_crc64 = source_modified_access_conditions.source_if_none_match_crc64 - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "range" - - # Construct URL - url = self.upload_range_from_url.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - header_parameters['x-ms-write'] = self._serialize.header("self._config.file_range_write_from_url", self._config.file_range_write_from_url, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if source_content_crc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_content_crc64", source_content_crc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if source_if_match_crc64 is not None: - header_parameters['x-ms-source-if-match-crc64'] = self._serialize.header("source_if_match_crc64", source_if_match_crc64, 'bytearray') - if source_if_none_match_crc64 is not None: - header_parameters['x-ms-source-if-none-match-crc64'] = self._serialize.header("source_if_none_match_crc64", source_if_none_match_crc64, 'bytearray') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - upload_range_from_url.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def get_range_list(self, sharesnapshot=None, prevsharesnapshot=None, timeout=None, range=None, lease_access_conditions=None, cls=None, **kwargs): - """Returns the list of valid ranges for a file. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param prevsharesnapshot: The previous snapshot parameter is an opaque - DateTime value that, when present, specifies the previous snapshot. - :type prevsharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param range: Specifies the range of bytes over which to list ranges, - inclusively. - :type range: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: ShareFileRangeList or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ShareFileRangeList - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "rangelist" - - # Construct URL - url = self.get_range_list.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if prevsharesnapshot is not None: - query_parameters['prevsharesnapshot'] = self._serialize.query("prevsharesnapshot", prevsharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ShareFileRangeList', response) - header_dict = { - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'x-ms-content-length': self._deserialize('long', response.headers.get('x-ms-content-length')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_range_list.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def start_copy(self, copy_source, timeout=None, metadata=None, file_permission="inherit", file_permission_key=None, copy_file_smb_info=None, lease_access_conditions=None, cls=None, **kwargs): - """Copies a blob or file to a destination file within the storage account. - - :param copy_source: Specifies the URL of the source file or blob, up - to 2 KB in length. To copy a file to another file within the same - storage account, you may use Shared Key to authenticate the source - file. If you are copying a file from another storage account, or if - you are copying a blob from the same storage account or another - storage account, then you must authenticate the source file or blob - using a shared access signature. If the source is a public blob, no - authentication is required to perform the copy operation. A file in a - share snapshot can also be specified as a copy source. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param copy_file_smb_info: Additional parameters for the operation - :type copy_file_smb_info: - ~azure.storage.fileshare.models.CopyFileSmbInfo - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - file_permission_copy_mode = None - if copy_file_smb_info is not None: - file_permission_copy_mode = copy_file_smb_info.file_permission_copy_mode - ignore_read_only = None - if copy_file_smb_info is not None: - ignore_read_only = copy_file_smb_info.ignore_read_only - file_attributes = None - if copy_file_smb_info is not None: - file_attributes = copy_file_smb_info.file_attributes - file_creation_time = None - if copy_file_smb_info is not None: - file_creation_time = copy_file_smb_info.file_creation_time - file_last_write_time = None - if copy_file_smb_info is not None: - file_last_write_time = copy_file_smb_info.file_last_write_time - set_archive_attribute = None - if copy_file_smb_info is not None: - set_archive_attribute = copy_file_smb_info.set_archive_attribute - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.start_copy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - if file_permission_copy_mode is not None: - header_parameters['x-ms-file-permission-copy-mode'] = self._serialize.header("file_permission_copy_mode", file_permission_copy_mode, 'PermissionCopyModeType') - if ignore_read_only is not None: - header_parameters['x-ms-file-copy-ignore-read-only'] = self._serialize.header("ignore_read_only", ignore_read_only, 'bool') - if file_attributes is not None: - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - if file_creation_time is not None: - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - if file_last_write_time is not None: - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - if set_archive_attribute is not None: - header_parameters['x-ms-file-copy-set-archive'] = self._serialize.header("set_archive_attribute", set_archive_attribute, 'bool') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), - 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - start_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def abort_copy(self, copy_id, timeout=None, lease_access_conditions=None, cls=None, **kwargs): - """Aborts a pending Copy File operation, and leaves a destination file - with zero length and full metadata. - - :param copy_id: The copy identifier provided in the x-ms-copy-id - header of the original Copy File operation. - :type copy_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "copy" - - # Construct URL - url = self.abort_copy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-copy-action'] = self._serialize.header("self.x_ms_copy_action", self.x_ms_copy_action, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - abort_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def list_handles(self, marker=None, maxresults=None, timeout=None, sharesnapshot=None, cls=None, **kwargs): - """Lists handles for file. - - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. - If the request does not specify maxresults, or specifies a value - greater than 5,000, the server will return up to 5,000 items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListHandlesResponse or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListHandlesResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "listhandles" - - # Construct URL - url = self.list_handles.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListHandlesResponse', response) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} - - def force_close_handles(self, handle_id, timeout=None, marker=None, sharesnapshot=None, cls=None, **kwargs): - """Closes all handles open for given file. - - :param handle_id: Specifies handle ID opened on the file or directory - to be closed. Asterisk (‘*’) is a wildcard that specifies all handles. - :type handle_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "forceclosehandles" - - # Construct URL - url = self.force_close_handles.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-marker': self._deserialize('str', response.headers.get('x-ms-marker')), - 'x-ms-number-of-handles-closed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')), - 'x-ms-number-of-handles-failed': self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - force_close_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/_service_operations.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/_service_operations.py deleted file mode 100644 index cd43e83..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/_service_operations.py +++ /dev/null @@ -1,253 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class ServiceOperations(object): - """ServiceOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar restype: . Constant value: "service". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.restype = "service" - - def set_properties(self, storage_service_properties, timeout=None, cls=None, **kwargs): - """Sets properties for a storage account's File service endpoint, - including properties for Storage Analytics metrics and CORS - (Cross-Origin Resource Sharing) rules. - - :param storage_service_properties: The StorageService properties. - :type storage_service_properties: - ~azure.storage.fileshare.models.StorageServiceProperties - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "properties" - - # Construct URL - url = self.set_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct body - body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_properties.metadata = {'url': '/'} - - def get_properties(self, timeout=None, cls=None, **kwargs): - """Gets the properties of a storage account's File service, including - properties for Storage Analytics metrics and CORS (Cross-Origin - Resource Sharing) rules. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: StorageServiceProperties or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.StorageServiceProperties - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "properties" - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('StorageServiceProperties', response) - header_dict = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_properties.metadata = {'url': '/'} - - def list_shares_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, cls=None, **kwargs): - """The List Shares Segment operation returns a list of the shares and - share snapshots under the specified account. - - :param prefix: Filters the results to return only entries whose name - begins with the specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list - to be returned with the next list operation. The operation returns a - marker value within the response body if the list returned was not - complete. The marker value may then be used in a subsequent call to - request the next set of list items. The marker value is opaque to the - client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. - If the request does not specify maxresults, or specifies a value - greater than 5,000, the server will return up to 5,000 items. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets - to include in the response. - :type include: list[str or - ~azure.storage.fileshare.models.ListSharesIncludeType] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListSharesResponse or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListSharesResponse - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "list" - - # Construct URL - url = self.list_shares_segment.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[ListSharesIncludeType]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListSharesResponse', response) - header_dict = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_shares_segment.metadata = {'url': '/'} diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/_share_operations.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/_share_operations.py deleted file mode 100644 index 5e1bbb6..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/operations/_share_operations.py +++ /dev/null @@ -1,1315 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import map_error - -from .. import models - - -class ShareOperations(object): - """ShareOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar restype: . Constant value: "share". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - - self._config = config - self.restype = "share" - - def create(self, timeout=None, metadata=None, quota=None, cls=None, **kwargs): - """Creates a new share under the specified account. If the share with the - same name already exists, the operation fails. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param quota: Specifies the maximum size of the share, in gigabytes. - :type quota: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if quota is not None: - header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{shareName}'} - - def get_properties(self, sharesnapshot=None, timeout=None, lease_access_conditions=None, cls=None, **kwargs): - """Returns all user-defined metadata and system properties for the - specified share or share snapshot. The data returned does not include - the share's list of files. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.get_properties.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-share-quota': self._deserialize('int', response.headers.get('x-ms-share-quota')), - 'x-ms-share-provisioned-iops': self._deserialize('int', response.headers.get('x-ms-share-provisioned-iops')), - 'x-ms-share-provisioned-ingress-mbps': self._deserialize('int', response.headers.get('x-ms-share-provisioned-ingress-mbps')), - 'x-ms-share-provisioned-egress-mbps': self._deserialize('int', response.headers.get('x-ms-share-provisioned-egress-mbps')), - 'x-ms-share-next-allowed-quota-downgrade-time': self._deserialize('rfc-1123', response.headers.get('x-ms-share-next-allowed-quota-downgrade-time')), - 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), - 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), - 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_properties.metadata = {'url': '/{shareName}'} - - def delete(self, sharesnapshot=None, timeout=None, delete_snapshots=None, lease_access_conditions=None, cls=None, **kwargs): - """Operation marks the specified share or share snapshot for deletion. The - share or share snapshot and any files contained within it are later - deleted during garbage collection. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param delete_snapshots: Specifies the option include to delete the - base share and all of its snapshots. Possible values include: - 'include' - :type delete_snapshots: str or - ~azure.storage.fileshare.models.DeleteSnapshotsOptionType - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{shareName}'} - - def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, sharesnapshot=None, request_id=None, cls=None, **kwargs): - """The Lease Share operation establishes and manages a lock on a share, or - the specified snapshot for set and delete share operations. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or - negative one (-1) for a lease that never expires. A non-infinite lease - can be between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The File service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "lease" - action = "acquire" - - # Construct URL - url = self.acquire_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - acquire_lease.metadata = {'url': '/{shareName}'} - - def release_lease(self, lease_id, timeout=None, sharesnapshot=None, request_id=None, cls=None, **kwargs): - """The Lease Share operation establishes and manages a lock on a share, or - the specified snapshot for set and delete share operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "lease" - action = "release" - - # Construct URL - url = self.release_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - release_lease.metadata = {'url': '/{shareName}'} - - def change_lease(self, lease_id, timeout=None, proposed_lease_id=None, sharesnapshot=None, request_id=None, cls=None, **kwargs): - """The Lease Share operation establishes and manages a lock on a share, or - the specified snapshot for set and delete share operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. - The File service returns 400 (Invalid request) if the proposed lease - ID is not in the correct format. See Guid Constructor (String) for a - list of valid GUID string formats. - :type proposed_lease_id: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "lease" - action = "change" - - # Construct URL - url = self.change_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - change_lease.metadata = {'url': '/{shareName}'} - - def renew_lease(self, lease_id, timeout=None, sharesnapshot=None, request_id=None, cls=None, **kwargs): - """The Lease Share operation establishes and manages a lock on a share, or - the specified snapshot for set and delete share operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "lease" - action = "renew" - - # Construct URL - url = self.renew_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - renew_lease.metadata = {'url': '/{shareName}'} - - def break_lease(self, timeout=None, break_period=None, request_id=None, sharesnapshot=None, lease_access_conditions=None, cls=None, **kwargs): - """The Lease Share operation establishes and manages a lock on a share, or - the specified snapshot for set and delete share operations. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param break_period: For a break operation, proposed duration the - lease should continue before it is broken, in seconds, between 0 and - 60. This break period is only used if it is shorter than the time - remaining on the lease. If longer, the time remaining on the lease is - used. A new lease will not be available before the break period has - expired, but the lease may be held for longer than the break period. - If this header does not appear with a break operation, a - fixed-duration lease breaks after the remaining lease period elapses, - and an infinite lease breaks immediately. - :type break_period: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime - value that, when present, specifies the share snapshot to query. - :type sharesnapshot: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "lease" - action = "break" - - # Construct URL - url = self.break_lease.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - - # Construct headers - header_parameters = {} - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-lease-time': self._deserialize('int', response.headers.get('x-ms-lease-time')), - 'x-ms-lease-id': self._deserialize('str', response.headers.get('x-ms-lease-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - break_lease.metadata = {'url': '/{shareName}'} - - def create_snapshot(self, timeout=None, metadata=None, cls=None, **kwargs): - """Creates a read-only snapshot of a share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "snapshot" - - # Construct URL - url = self.create_snapshot.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-snapshot': self._deserialize('str', response.headers.get('x-ms-snapshot')), - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create_snapshot.metadata = {'url': '/{shareName}'} - - def create_permission(self, share_permission, timeout=None, cls=None, **kwargs): - """Create a permission (a security descriptor). - - :param share_permission: A permission (a security descriptor) at the - share level. - :type share_permission: - ~azure.storage.fileshare.models.SharePermission - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "filepermission" - - # Construct URL - url = self.create_permission.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct body - body_content = self._serialize.body(share_permission, 'SharePermission', is_xml=False) - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-file-permission-key': self._deserialize('str', response.headers.get('x-ms-file-permission-key')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create_permission.metadata = {'url': '/{shareName}'} - - def get_permission(self, file_permission_key, timeout=None, cls=None, **kwargs): - """Returns the permission (security descriptor) for a given key. - - :param file_permission_key: Key of the permission to be set for the - directory/file. - :type file_permission_key: str - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param callable cls: A custom type or function that will be passed the - direct response - :return: SharePermission or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.SharePermission - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "filepermission" - - # Construct URL - url = self.get_permission.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('SharePermission', response) - header_dict = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_permission.metadata = {'url': '/{shareName}'} - - def set_quota(self, timeout=None, quota=None, lease_access_conditions=None, cls=None, **kwargs): - """Sets quota for the specified share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param quota: Specifies the maximum size of the share, in gigabytes. - :type quota: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "properties" - - # Construct URL - url = self.set_quota.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if quota is not None: - header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_quota.metadata = {'url': '/{shareName}'} - - def set_metadata(self, timeout=None, metadata=None, lease_access_conditions=None, cls=None, **kwargs): - """Sets one or more user-defined name-value pairs for the specified share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage - object. - :type metadata: str - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "metadata" - - # Construct URL - url = self.set_metadata.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_metadata.metadata = {'url': '/{shareName}'} - - def get_access_policy(self, timeout=None, lease_access_conditions=None, cls=None, **kwargs): - """Returns information about stored access policies specified on the - share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: list or the result of cls(response) - :rtype: list[~azure.storage.fileshare.models.SignedIdentifier] - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "acl" - - # Construct URL - url = self.get_access_policy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[SignedIdentifier]', response) - header_dict = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_access_policy.metadata = {'url': '/{shareName}'} - - def set_access_policy(self, share_acl=None, timeout=None, lease_access_conditions=None, cls=None, **kwargs): - """Sets a stored access policy for use with shared access signatures. - - :param share_acl: The ACL for the share. - :type share_acl: - list[~azure.storage.fileshare.models.SignedIdentifier] - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "acl" - - # Construct URL - url = self.set_access_policy.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct body - serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifier', 'wrapped': True}} - if share_acl is not None: - body_content = self._serialize.body(share_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt) - else: - body_content = None - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_access_policy.metadata = {'url': '/{shareName}'} - - def get_statistics(self, timeout=None, lease_access_conditions=None, cls=None, **kwargs): - """Retrieves statistics related to the share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param lease_access_conditions: Additional parameters for the - operation - :type lease_access_conditions: - ~azure.storage.fileshare.models.LeaseAccessConditions - :param callable cls: A custom type or function that will be passed the - direct response - :return: ShareStats or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ShareStats - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - lease_id = None - if lease_access_conditions is not None: - lease_id = lease_access_conditions.lease_id - - comp = "stats" - - # Construct URL - url = self.get_statistics.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ShareStats', response) - header_dict = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_statistics.metadata = {'url': '/{shareName}'} - - def restore(self, timeout=None, request_id=None, deleted_share_name=None, deleted_share_version=None, cls=None, **kwargs): - """Restores a previously deleted Share. - - :param timeout: The timeout parameter is expressed in seconds. For - more information, see Setting - Timeouts for File Service Operations. - :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param deleted_share_name: Specifies the name of the - preivously-deleted share. - :type deleted_share_name: str - :param deleted_share_version: Specifies the version of the - preivously-deleted share. - :type deleted_share_version: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - comp = "undelete" - - # Construct URL - url = self.restore.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - if deleted_share_name is not None: - header_parameters['x-ms-deleted-share-name'] = self._serialize.header("deleted_share_name", deleted_share_name, 'str') - if deleted_share_version is not None: - header_parameters['x-ms-deleted-share-version'] = self._serialize.header("deleted_share_version", deleted_share_version, 'str') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - if cls: - response_headers = { - 'ETag': self._deserialize('str', response.headers.get('ETag')), - 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - restore.metadata = {'url': '/{shareName}'} diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/version.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/version.py deleted file mode 100644 index 6ef707d..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/version.py +++ /dev/null @@ -1,13 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -VERSION = "2020-02-10" - diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_lease.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_lease.py deleted file mode 100644 index 789e147..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_lease.py +++ /dev/null @@ -1,237 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import uuid - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, TypeVar, TYPE_CHECKING -) - -from azure.core.tracing.decorator import distributed_trace - -from ._shared.response_handlers import return_response_headers, process_storage_error -from ._generated.models import StorageErrorException -from ._generated.operations import FileOperations, ShareOperations - -if TYPE_CHECKING: - from datetime import datetime - ShareFileClient = TypeVar("ShareFileClient") - ShareClient = TypeVar("ShareClient") - - -class ShareLeaseClient(object): - """Creates a new ShareLeaseClient. - - This client provides lease operations on a ShareClient or ShareFileClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the file or share to lease. - :type client: ~azure.storage.fileshare.ShareFileClient or - ~azure.storage.fileshare.ShareClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - def __init__( - self, client, lease_id=None - ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs - # type: (Union[ShareFileClient, ShareClient], Optional[str]) -> None - self.id = lease_id or str(uuid.uuid4()) - self.last_modified = None - self.etag = None - if hasattr(client, 'file_name'): - self._client = client._client.file # type: ignore # pylint: disable=protected-access - self._snapshot = None - elif hasattr(client, 'share_name'): - self._client = client._client.share - self._snapshot = client.snapshot - else: - raise TypeError("Lease must use ShareFileClient or ShareClient.") - - def __enter__(self): - return self - - def __exit__(self, *args): - self.release() - - @distributed_trace - def acquire(self, **kwargs): - # type: (**Any) -> None - """Requests a new lease. This operation establishes and manages a lock on a - file or share for write and delete operations. If the file or share does not have an active lease, - the File or Share service creates a lease on the file or share. If the file has an active lease, - you can only request a new lease using the active lease ID. - - - If the file or share does not have an active lease, the File or Share service creates a - lease on the file and returns a new lease ID. - - :keyword int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. File leases never expire. A non-infinite share lease can be - between 15 and 60 seconds. A share lease duration cannot be changed - using renew or change. Default is -1 (infinite share lease). - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - try: - lease_duration = kwargs.pop('lease_duration', -1) - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - response = self._client.acquire_lease( - timeout=kwargs.pop('timeout', None), - duration=lease_duration, - proposed_lease_id=self.id, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - self.etag = response.get('etag') # type: str - - @distributed_trace - def renew(self, **kwargs): - # type: (Any) -> None - """Renews the share lease. - - The share lease can be renewed if the lease ID specified in the - lease client matches that associated with the share. Note that - the lease may be renewed even if it has expired as long as the share - has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - .. versionadded:: 12.6.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - if isinstance(self._client, FileOperations): - raise TypeError("Lease renewal operations are only valid for ShareClient.") - try: - response = self._client.renew_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - sharesnapshot=self._snapshot, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def release(self, **kwargs): - # type: (Any) -> None - """Releases the lease. The lease may be released if the lease ID specified on the request matches - that associated with the share or file. Releasing the lease allows another client to immediately acquire - the lease for the share or file as soon as the release is complete. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - try: - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - response = self._client.release_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """ Changes the lease ID of an active lease. A change must include the current lease ID in x-ms-lease-id and - a new lease ID in x-ms-proposed-lease-id. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The File or Share service will raise an error - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - try: - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - response = self._client.change_lease( - lease_id=self.id, - proposed_lease_id=proposed_lease_id, - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def break_lease(self, **kwargs): - # type: (Any) -> int - """Force breaks the lease if the file or share has an active lease. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. An infinite lease breaks immediately. - - Once a lease is broken, it cannot be changed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :keyword int lease_break_period: - This is the proposed duration of seconds that the share lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the share lease. If longer, the time remaining on the share lease is used. - A new share lease will not be available before the break period has - expired, but the share lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration share lease breaks after the remaining share lease - period elapses, and an infinite share lease breaks immediately. - - .. versionadded:: 12.6.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - try: - lease_break_period = kwargs.pop('lease_break_period', None) - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - if isinstance(self._client, ShareOperations): - kwargs['break_period'] = lease_break_period - if isinstance(self._client, FileOperations) and lease_break_period: - raise TypeError("Setting a lease break period is only applicable to Share leases.") - - response = self._client.break_lease( - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_models.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_models.py deleted file mode 100644 index 5a96c48..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_models.py +++ /dev/null @@ -1,965 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines - -from azure.core.paging import PageIterator -from ._parser import _parse_datetime_from_str -from ._shared.response_handlers import return_context_and_deserialized, process_storage_error -from ._shared.models import DictMixin, get_enum_value -from ._generated.models import StorageErrorException -from ._generated.models import Metrics as GeneratedMetrics -from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy -from ._generated.models import CorsRule as GeneratedCorsRule -from ._generated.models import ShareProtocolSettings as GeneratedShareProtocolSettings -from ._generated.models import ShareSmbSettings as GeneratedShareSmbSettings -from ._generated.models import SmbMultichannel as GeneratedSmbMultichannel -from ._generated.models import AccessPolicy as GenAccessPolicy -from ._generated.models import DirectoryItem - - -def _wrap_item(item): - if isinstance(item, DirectoryItem): - return {'name': item.name, 'is_directory': True} - return {'name': item.name, 'size': item.properties.content_length, 'is_directory': False} - - -class Metrics(GeneratedMetrics): - """A summary of request statistics grouped by API in hour or minute aggregates - for files. - - All required parameters must be populated in order to send to Azure. - - :keyword str version: The version of Storage Analytics to configure. - :keyword bool enabled: Required. Indicates whether metrics are enabled for the - File service. - :keyword bool include_ap_is: Indicates whether metrics should generate summary - statistics for called API operations. - :keyword ~azure.storage.fileshare.RetentionPolicy retention_policy: Determines how long the associated data should - persist. - """ - - def __init__(self, **kwargs): - self.version = kwargs.get('version', u'1.0') - self.enabled = kwargs.get('enabled', False) - self.include_apis = kwargs.get('include_apis') - self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - version=generated.version, - enabled=generated.enabled, - include_apis=generated.include_apis, - retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access - ) - - -class RetentionPolicy(GeneratedRetentionPolicy): - """The retention policy which determines how long the associated data should - persist. - - All required parameters must be populated in order to send to Azure. - - :param bool enabled: Required. Indicates whether a retention policy is enabled - for the storage service. - :param int days: Indicates the number of days that metrics or logging or - soft-deleted data should be retained. All data older than this value will - be deleted. - """ - - def __init__(self, enabled=False, days=None): - self.enabled = enabled - self.days = days - if self.enabled and (self.days is None): - raise ValueError("If policy is enabled, 'days' must be specified.") - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - enabled=generated.enabled, - days=generated.days, - ) - - -class CorsRule(GeneratedCorsRule): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param list(str) allowed_origins: - A list of origin domains that will be allowed via CORS, or "*" to allow - all domains. The list of must contain at least one entry. Limited to 64 - origin domains. Each allowed origin can have up to 256 characters. - :param list(str) allowed_methods: - A list of HTTP methods that are allowed to be executed by the origin. - The list of must contain at least one entry. For Azure Storage, - permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. - :keyword list(str) allowed_headers: - Defaults to an empty list. A list of headers allowed to be part of - the cross-origin request. Limited to 64 defined headers and 2 prefixed - headers. Each header can be up to 256 characters. - :keyword list(str) exposed_headers: - Defaults to an empty list. A list of response headers to expose to CORS - clients. Limited to 64 defined headers and two prefixed headers. Each - header can be up to 256 characters. - :keyword int max_age_in_seconds: - The number of seconds that the client/browser should cache a - preflight response. - """ - - def __init__(self, allowed_origins, allowed_methods, **kwargs): - self.allowed_origins = ','.join(allowed_origins) - self.allowed_methods = ','.join(allowed_methods) - self.allowed_headers = ','.join(kwargs.get('allowed_headers', [])) - self.exposed_headers = ','.join(kwargs.get('exposed_headers', [])) - self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0) - - @classmethod - def _from_generated(cls, generated): - return cls( - [generated.allowed_origins], - [generated.allowed_methods], - allowed_headers=[generated.allowed_headers], - exposed_headers=[generated.exposed_headers], - max_age_in_seconds=generated.max_age_in_seconds, - ) - - -class ShareSmbSettings(GeneratedShareSmbSettings): - """ Settings for the SMB protocol. - - :param SmbMultichannel multichannel: Required. Sets the multichannel settings. - """ - def __init__(self, multichannel): - self.multichannel = multichannel - - -class SmbMultichannel(GeneratedSmbMultichannel): - """ Settings for Multichannel. - - :param bool enabled: Required. If SMB Multichannel is enabled. - """ - def __init__(self, enabled): - self.enabled = enabled - - -class ShareProtocolSettings(GeneratedShareProtocolSettings): - """Protocol Settings class used by the set and get service properties methods in the share service. - - Contains protocol properties of the share service such as the SMB setting of the share service. - - :param SmbSettings smb: Required. Sets SMB settings. - """ - def __init__(self, smb): - self.smb = smb - - @classmethod - def _from_generated(cls, generated): - return cls( - smb=generated.smb) - - -class AccessPolicy(GenAccessPolicy): - """Access Policy class used by the set and get acl methods in each service. - - A stored access policy can specify the start time, expiry time, and - permissions for the Shared Access Signatures with which it's associated. - Depending on how you want to control access to your resource, you can - specify all of these parameters within the stored access policy, and omit - them from the URL for the Shared Access Signature. Doing so permits you to - modify the associated signature's behavior at any time, as well as to revoke - it. Or you can specify one or more of the access policy parameters within - the stored access policy, and the others on the URL. Finally, you can - specify all of the parameters on the URL. In this case, you can use the - stored access policy to revoke the signature, but not to modify its behavior. - - Together the Shared Access Signature and the stored access policy must - include all fields required to authenticate the signature. If any required - fields are missing, the request will fail. Likewise, if a field is specified - both in the Shared Access Signature URL and in the stored access policy, the - request will fail with status code 400 (Bad Request). - - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.fileshare.FileSasPermissions or - ~azure.storage.fileshare.ShareSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - """ - def __init__(self, permission=None, expiry=None, start=None): - self.start = start - self.expiry = expiry - self.permission = permission - - -class LeaseProperties(DictMixin): - """File Lease Properties. - - :ivar str status: - The lease status of the file or share. Possible values: locked|unlocked - :ivar str state: - Lease state of the file or share. Possible values: available|leased|expired|breaking|broken - :ivar str duration: - When a file or share is leased, specifies whether the lease is of infinite or fixed duration. - """ - - def __init__(self, **kwargs): - self.status = get_enum_value(kwargs.get('x-ms-lease-status')) - self.state = get_enum_value(kwargs.get('x-ms-lease-state')) - self.duration = get_enum_value(kwargs.get('x-ms-lease-duration')) - - @classmethod - def _from_generated(cls, generated): - lease = cls() - lease.status = get_enum_value(generated.properties.lease_status) - lease.state = get_enum_value(generated.properties.lease_state) - lease.duration = get_enum_value(generated.properties.lease_duration) - return lease - - -class ContentSettings(DictMixin): - """Used to store the content settings of a file. - - :param str content_type: - The content type specified for the file. If no content type was - specified, the default content type is application/octet-stream. - :param str content_encoding: - If the content_encoding has previously been set - for the file, that value is stored. - :param str content_language: - If the content_language has previously been set - for the file, that value is stored. - :param str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the file, that value is stored. - :param str cache_control: - If the cache_control has previously been set for - the file, that value is stored. - :param str content_md5: - If the content_md5 has been set for the file, this response - header is stored so that the client can check for message content - integrity. - """ - - def __init__( - self, content_type=None, content_encoding=None, - content_language=None, content_disposition=None, - cache_control=None, content_md5=None, **kwargs): - - self.content_type = content_type or kwargs.get('Content-Type') - self.content_encoding = content_encoding or kwargs.get('Content-Encoding') - self.content_language = content_language or kwargs.get('Content-Language') - self.content_md5 = content_md5 or kwargs.get('Content-MD5') - self.content_disposition = content_disposition or kwargs.get('Content-Disposition') - self.cache_control = cache_control or kwargs.get('Cache-Control') - - @classmethod - def _from_generated(cls, generated): - settings = cls() - settings.content_type = generated.properties.content_type or None - settings.content_encoding = generated.properties.content_encoding or None - settings.content_language = generated.properties.content_language or None - settings.content_md5 = generated.properties.content_md5 or None - settings.content_disposition = generated.properties.content_disposition or None - settings.cache_control = generated.properties.cache_control or None - return settings - - -class ShareProperties(DictMixin): - """Share's properties class. - - :ivar str name: - The name of the share. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the share was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar int quota: - The allocated quota. - :ivar dict metadata: A dict with name_value pairs to associate with the - share as metadata. - :ivar str snapshot: - Snapshot of the share. - :ivar bool deleted: - To indicate if this share is deleted or not. - This is a service returned value, and the value will be set when list shared including deleted ones. - :ivar datetime deleted: - To indicate the deleted time of the deleted share. - This is a service returned value, and the value will be set when list shared including deleted ones. - :ivar str version: - To indicate the version of deleted share. - This is a service returned value, and the value will be set when list shared including deleted ones. - :ivar int remaining_retention_days: - To indicate how many remaining days the deleted share will be kept. - This is a service returned value, and the value will be set when list shared including deleted ones. - """ - - def __init__(self, **kwargs): - self.name = None - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.quota = kwargs.get('x-ms-share-quota') - self.next_allowed_quota_downgrade_time = kwargs.get('x-ms-share-next-allowed-quota-downgrade-time') - self.metadata = kwargs.get('metadata') - self.snapshot = None - self.deleted = None - self.deleted_time = None - self.version = None - self.remaining_retention_days = None - self.provisioned_egress_mbps = kwargs.get('x-ms-share-provisioned-egress-mbps') - self.provisioned_ingress_mbps = kwargs.get('x-ms-share-provisioned-ingress-mbps') - self.provisioned_iops = kwargs.get('x-ms-share-provisioned-iops') - self.lease = LeaseProperties(**kwargs) - - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.last_modified = generated.properties.last_modified - props.etag = generated.properties.etag - props.quota = generated.properties.quota - props.next_allowed_quota_downgrade_time = generated.properties.next_allowed_quota_downgrade_time - props.metadata = generated.metadata - props.snapshot = generated.snapshot - props.deleted = generated.deleted - props.deleted_time = generated.properties.deleted_time - props.version = generated.version - props.remaining_retention_days = generated.properties.remaining_retention_days - props.provisioned_egress_mbps = generated.properties.provisioned_egress_mbps - props.provisioned_ingress_mbps = generated.properties.provisioned_ingress_mbps - props.provisioned_iops = generated.properties.provisioned_iops - props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access - return props - - -class SharePropertiesPaged(PageIterator): - """An iterable of Share properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.fileshare.ShareProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only shares whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(SharePropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - prefix=self.prefix, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [ShareProperties._from_generated(i) for i in self._response.share_items] # pylint: disable=protected-access - return self._response.next_marker or None, self.current_page - - -class Handle(DictMixin): - """A listed Azure Storage handle item. - - All required parameters must be populated in order to send to Azure. - - :keyword str handle_id: Required. XSMB service handle ID - :keyword str path: Required. File or directory name including full path starting - from share root - :keyword str file_id: Required. FileId uniquely identifies the file or - directory. - :keyword str parent_id: ParentId uniquely identifies the parent directory of the - object. - :keyword str session_id: Required. SMB session ID in context of which the file - handle was opened - :keyword str client_ip: Required. Client IP that opened the handle - :keyword ~datetime.datetime open_time: Required. Time when the session that previously opened - the handle has last been reconnected. (UTC) - :keyword ~datetime.datetime last_reconnect_time: Time handle was last connected to (UTC) - """ - - def __init__(self, **kwargs): - self.id = kwargs.get('handle_id') - self.path = kwargs.get('path') - self.file_id = kwargs.get('file_id') - self.parent_id = kwargs.get('parent_id') - self.session_id = kwargs.get('session_id') - self.client_ip = kwargs.get('client_ip') - self.open_time = kwargs.get('open_time') - self.last_reconnect_time = kwargs.get('last_reconnect_time') - - @classmethod - def _from_generated(cls, generated): - handle = cls() - handle.id = generated.handle_id - handle.path = generated.path - handle.file_id = generated.file_id - handle.parent_id = generated.parent_id - handle.session_id = generated.session_id - handle.client_ip = generated.client_ip - handle.open_time = generated.open_time - handle.last_reconnect_time = generated.last_reconnect_time - return handle - - -class HandlesPaged(PageIterator): - """An iterable of Handles. - - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.fileshare.Handle) - - :param callable command: Function to retrieve the next page of items. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, results_per_page=None, continuation_token=None): - super(HandlesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.current_page = [Handle._from_generated(h) for h in self._response.handle_list] # pylint: disable=protected-access - return self._response.next_marker or None, self.current_page - - -class DirectoryProperties(DictMixin): - """Directory's properties class. - - :ivar str name: - The name of the directory. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the directory was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar bool server_encrypted: - Whether encryption is enabled. - :keyword dict metadata: A dict with name_value pairs to associate with the - directory as metadata. - :ivar change_time: Change time for the file. - :vartype change_time: str or ~datetime.datetime - :ivar creation_time: Creation time for the file. - :vartype creation_time: str or ~datetime.datetime - :ivar last_write_time: Last write time for the file. - :vartype last_write_time: str or ~datetime.datetime - :ivar file_attributes: - The file system attributes for files and directories. - :vartype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :ivar permission_key: Key of the permission to be set for the - directory/file. - :vartype permission_key: str - :ivar file_id: Required. FileId uniquely identifies the file or - directory. - :vartype file_id: str - :ivar parent_id: ParentId uniquely identifies the parent directory of the - object. - :vartype parent_id: str - """ - - def __init__(self, **kwargs): - self.name = None - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.server_encrypted = kwargs.get('x-ms-server-encrypted') - self.metadata = kwargs.get('metadata') - self.change_time = _parse_datetime_from_str(kwargs.get('x-ms-file-change-time')) - self.creation_time = _parse_datetime_from_str(kwargs.get('x-ms-file-creation-time')) - self.last_write_time = _parse_datetime_from_str(kwargs.get('x-ms-file-last-write-time')) - self.file_attributes = kwargs.get('x-ms-file-attributes') - self.permission_key = kwargs.get('x-ms-file-permission-key') - self.file_id = kwargs.get('x-ms-file-id') - self.parent_id = kwargs.get('x-ms-file-parent-id') - - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.last_modified = generated.properties.last_modified - props.etag = generated.properties.etag - props.server_encrypted = generated.properties.server_encrypted - props.metadata = generated.metadata - return props - - -class DirectoryPropertiesPaged(PageIterator): - """An iterable for the contents of a directory. - - This iterable will yield dicts for the contents of the directory. The dicts - will have the keys 'name' (str) and 'is_directory' (bool). - Items that are files (is_directory=False) will have an additional 'content_length' key. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(dict(str, Any)) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only directories whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(DirectoryPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - prefix=self.prefix, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [_wrap_item(i) for i in self._response.segment.directory_items] - self.current_page.extend([_wrap_item(i) for i in self._response.segment.file_items]) - return self._response.next_marker or None, self.current_page - - -class FileProperties(DictMixin): - """File's properties class. - - :ivar str name: - The name of the file. - :ivar str path: - The path of the file. - :ivar str share: - The name of share. - :ivar str snapshot: - File snapshot. - :ivar int content_length: - Size of file in bytes. - :ivar dict metadata: A dict with name_value pairs to associate with the - file as metadata. - :ivar str file_type: - Type of the file. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the file was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar int size: - Size of file in bytes. - :ivar str content_range: - The range of bytes. - :ivar bool server_encrypted: - Whether encryption is enabled. - :ivar copy: - The copy properties. - :vartype copy: ~azure.storage.fileshare.CopyProperties - :ivar content_settings: - The content settings for the file. - :vartype content_settings: ~azure.storage.fileshare.ContentSettings - """ - - def __init__(self, **kwargs): - self.name = kwargs.get('name') - self.path = None - self.share = None - self.snapshot = None - self.content_length = kwargs.get('Content-Length') - self.metadata = kwargs.get('metadata') - self.file_type = kwargs.get('x-ms-type') - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.size = kwargs.get('Content-Length') - self.content_range = kwargs.get('Content-Range') - self.server_encrypted = kwargs.get('x-ms-server-encrypted') - self.copy = CopyProperties(**kwargs) - self.content_settings = ContentSettings(**kwargs) - self.lease = LeaseProperties(**kwargs) - self.change_time = _parse_datetime_from_str(kwargs.get('x-ms-file-change-time')) - self.creation_time = _parse_datetime_from_str(kwargs.get('x-ms-file-creation-time')) - self.last_write_time = _parse_datetime_from_str(kwargs.get('x-ms-file-last-write-time')) - self.file_attributes = kwargs.get('x-ms-file-attributes') - self.permission_key = kwargs.get('x-ms-file-permission-key') - self.file_id = kwargs.get('x-ms-file-id') - self.parent_id = kwargs.get('x-ms-file-parent-id') - - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.content_length = generated.properties.content_length - props.metadata = generated.properties.metadata - props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access - return props - - -class CopyProperties(DictMixin): - """File Copy Properties. - - :ivar str id: - String identifier for the last attempted Copy File operation where this file - was the destination file. This header does not appear if this file has never - been the destination in a Copy File operation, or if this file has been - modified after a concluded Copy File operation. - :ivar str source: - URL up to 2 KB in length that specifies the source file used in the last attempted - Copy File operation where this file was the destination file. This header does not - appear if this file has never been the destination in a Copy File operation, or if - this file has been modified after a concluded Copy File operation. - :ivar str status: - State of the copy operation identified by Copy ID, with these values: - success: - Copy completed successfully. - pending: - Copy is in progress. Check copy_status_description if intermittent, - non-fatal errors impede copy progress but don't cause failure. - aborted: - Copy was ended by Abort Copy File. - failed: - Copy failed. See copy_status_description for failure details. - :ivar str progress: - Contains the number of bytes copied and the total bytes in the source in the last - attempted Copy File operation where this file was the destination file. Can show - between 0 and Content-Length bytes copied. - :ivar datetime completion_time: - Conclusion time of the last attempted Copy File operation where this file was the - destination file. This value can specify the time of a completed, aborted, or - failed copy attempt. - :ivar str status_description: - Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal - or non-fatal copy operation failure. - :ivar bool incremental_copy: - Copies the snapshot of the source file to a destination file. - The snapshot is copied such that only the differential changes between - the previously copied snapshot are transferred to the destination - :ivar datetime destination_snapshot: - Included if the file is incremental copy or incremental copy snapshot, - if x-ms-copy-status is success. Snapshot time of the last successful - incremental copy snapshot for this file. - """ - - def __init__(self, **kwargs): - self.id = kwargs.get('x-ms-copy-id') - self.source = kwargs.get('x-ms-copy-source') - self.status = get_enum_value(kwargs.get('x-ms-copy-status')) - self.progress = kwargs.get('x-ms-copy-progress') - self.completion_time = kwargs.get('x-ms-copy-completion_time') - self.status_description = kwargs.get('x-ms-copy-status-description') - self.incremental_copy = kwargs.get('x-ms-incremental-copy') - self.destination_snapshot = kwargs.get('x-ms-copy-destination-snapshot') - - @classmethod - def _from_generated(cls, generated): - copy = cls() - copy.id = generated.properties.copy_id or None - copy.status = get_enum_value(generated.properties.copy_status) or None - copy.source = generated.properties.copy_source or None - copy.progress = generated.properties.copy_progress or None - copy.completion_time = generated.properties.copy_completion_time or None - copy.status_description = generated.properties.copy_status_description or None - copy.incremental_copy = generated.properties.incremental_copy or None - copy.destination_snapshot = generated.properties.destination_snapshot or None - return copy - - -class FileSasPermissions(object): - """FileSasPermissions class to be used with - generating shared access signature operations. - - :param bool read: - Read the content, properties, metadata. Use the file as the source of a copy - operation. - :param bool create: - Create a new file or copy a file to a new file. - :param bool write: - Create or write content, properties, metadata. Resize the file. Use the file - as the destination of a copy operation within the same account. - :param bool delete: - Delete the file. - """ - def __init__(self, read=False, create=False, write=False, delete=False): - self.read = read - self.create = create - self.write = write - self.delete = delete - self._str = (('r' if self.read else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a FileSasPermissions from a string. - - To specify read, create, write, or delete permissions you need only to - include the first letter of the word in the string. E.g. For read and - create permissions, you would provide a string "rc". - - :param str permission: The string which dictates the read, create, - write, or delete permissions - :return: A FileSasPermissions object - :rtype: ~azure.storage.fileshare.FileSasPermissions - """ - p_read = 'r' in permission - p_create = 'c' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - - parsed = cls(p_read, p_create, p_write, p_delete) - - return parsed - - -class ShareSasPermissions(object): - """ShareSasPermissions class to be used to be used with - generating shared access signature and access policy operations. - - :param bool read: - Read the content, properties or metadata of any file in the share. Use any - file in the share as the source of a copy operation. - :param bool write: - For any file in the share, create or write content, properties or metadata. - Resize the file. Use the file as the destination of a copy operation within - the same account. - Note: You cannot grant permissions to read or write share properties or - metadata with a service SAS. Use an account SAS instead. - :param bool delete: - Delete any file in the share. - Note: You cannot grant permissions to delete a share with a service SAS. Use - an account SAS instead. - :param bool list: - List files and directories in the share. - """ - def __init__(self, read=False, write=False, delete=False, list=False): # pylint: disable=redefined-builtin - self.read = read - self.write = write - self.delete = delete - self.list = list - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('l' if self.list else '')) - - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a ShareSasPermissions from a string. - - To specify read, write, delete, or list permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, write, - delete, or list permissions - :return: A ShareSasPermissions object - :rtype: ~azure.storage.fileshare.ShareSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_list = 'l' in permission - - parsed = cls(p_read, p_write, p_delete, p_list) - - return parsed - -class NTFSAttributes(object): - """ - Valid set of attributes to set for file or directory. - To set attribute for directory, 'Directory' should always be enabled except setting 'None' for directory. - - :ivar bool read_only: - Enable/disable 'ReadOnly' attribute for DIRECTORY or FILE - :ivar bool hidden: - Enable/disable 'Hidden' attribute for DIRECTORY or FILE - :ivar bool system: - Enable/disable 'System' attribute for DIRECTORY or FILE - :ivar bool none: - Enable/disable 'None' attribute for DIRECTORY or FILE to clear all attributes of FILE/DIRECTORY - :ivar bool directory: - Enable/disable 'Directory' attribute for DIRECTORY - :ivar bool archive: - Enable/disable 'Archive' attribute for DIRECTORY or FILE - :ivar bool temporary: - Enable/disable 'Temporary' attribute for FILE - :ivar bool offline: - Enable/disable 'Offline' attribute for DIRECTORY or FILE - :ivar bool not_content_indexed: - Enable/disable 'NotContentIndexed' attribute for DIRECTORY or FILE - :ivar bool no_scrub_data: - Enable/disable 'NoScrubData' attribute for DIRECTORY or FILE - """ - def __init__(self, read_only=False, hidden=False, system=False, none=False, directory=False, archive=False, - temporary=False, offline=False, not_content_indexed=False, no_scrub_data=False): - - self.read_only = read_only - self.hidden = hidden - self.system = system - self.none = none - self.directory = directory - self.archive = archive - self.temporary = temporary - self.offline = offline - self.not_content_indexed = not_content_indexed - self.no_scrub_data = no_scrub_data - self._str = (('ReadOnly|' if self.read_only else '') + - ('Hidden|' if self.hidden else '') + - ('System|' if self.system else '') + - ('None|' if self.none else '') + - ('Directory|' if self.directory else '') + - ('Archive|' if self.archive else '') + - ('Temporary|' if self.temporary else '') + - ('Offline|' if self.offline else '') + - ('NotContentIndexed|' if self.not_content_indexed else '') + - ('NoScrubData|' if self.no_scrub_data else '')) - - def __str__(self): - concatenated_params = self._str - return concatenated_params.strip('|') - - @classmethod - def from_string(cls, string): - """Create a NTFSAttributes from a string. - - To specify permissions you can pass in a string with the - desired permissions, e.g. "ReadOnly|Hidden|System" - - :param str string: The string which dictates the permissions. - :return: A NTFSAttributes object - :rtype: ~azure.storage.fileshare.NTFSAttributes - """ - read_only = "ReadOnly" in string - hidden = "Hidden" in string - system = "System" in string - none = "None" in string - directory = "Directory" in string - archive = "Archive" in string - temporary = "Temporary" in string - offline = "Offline" in string - not_content_indexed = "NotContentIndexed" in string - no_scrub_data = "NoScrubData" in string - - parsed = cls(read_only, hidden, system, none, directory, archive, temporary, offline, not_content_indexed, - no_scrub_data) - parsed._str = string # pylint: disable = protected-access - return parsed - - -def service_properties_deserialize(generated): - """Deserialize a ServiceProperties objects into a dict. - """ - return { - 'hour_metrics': Metrics._from_generated(generated.hour_metrics), # pylint: disable=protected-access - 'minute_metrics': Metrics._from_generated(generated.minute_metrics), # pylint: disable=protected-access - 'cors': [CorsRule._from_generated(cors) for cors in generated.cors], # pylint: disable=protected-access - 'protocol': ShareProtocolSettings._from_generated(generated.protocol), # pylint: disable=protected-access - } diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_parser.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_parser.py deleted file mode 100644 index db7cab5..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_parser.py +++ /dev/null @@ -1,42 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from datetime import datetime, timedelta - -_ERROR_TOO_MANY_FILE_PERMISSIONS = 'file_permission and file_permission_key should not be set at the same time' -_FILE_PERMISSION_TOO_LONG = 'Size of file_permission is too large. file_permission should be <=8KB, else' \ - 'please use file_permission_key' - - -def _get_file_permission(file_permission, file_permission_key, default_permission): - # if file_permission and file_permission_key are both empty, then use the default_permission - # value as file permission, file_permission size should be <= 8KB, else file permission_key should be used - if file_permission and len(str(file_permission).encode('utf-8')) > 8 * 1024: - raise ValueError(_FILE_PERMISSION_TOO_LONG) - - if not file_permission: - if not file_permission_key: - return default_permission - return None - - if not file_permission_key: - return file_permission - - raise ValueError(_ERROR_TOO_MANY_FILE_PERMISSIONS) - - -def _parse_datetime_from_str(string_datetime): - if not string_datetime: - return None - dt, _, us = string_datetime.partition(".") - dt = datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S") - us = int(us[:-2]) # microseconds - datetime_obj = dt + timedelta(microseconds=us) - return datetime_obj - - -def _datetime_to_str(datetime_obj): - return datetime_obj if isinstance(datetime_obj, str) else datetime_obj.isoformat() + '0Z' diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_serialize.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_serialize.py deleted file mode 100644 index fc02b90..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_serialize.py +++ /dev/null @@ -1,113 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from azure.core import MatchConditions - -from ._parser import _datetime_to_str, _get_file_permission -from ._generated.models import SourceModifiedAccessConditions, LeaseAccessConditions, CopyFileSmbInfo - - -_SUPPORTED_API_VERSIONS = [ - '2019-02-02', - '2019-07-07', - '2019-12-12', - '2020-02-10', -] - - -def _get_match_headers(kwargs, match_param, etag_param): - # type: (str) -> Tuple(Dict[str, Any], Optional[str], Optional[str]) - # TODO: extract this method to shared folder also add some comments, so that share, datalake and blob can use it. - if_match = None - if_none_match = None - match_condition = kwargs.pop(match_param, None) - if match_condition == MatchConditions.IfNotModified: - if_match = kwargs.pop(etag_param, None) - if not if_match: - raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) - elif match_condition == MatchConditions.IfPresent: - if_match = '*' - elif match_condition == MatchConditions.IfModified: - if_none_match = kwargs.pop(etag_param, None) - if not if_none_match: - raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) - elif match_condition == MatchConditions.IfMissing: - if_none_match = '*' - elif match_condition is None: - if etag_param in kwargs: - raise ValueError("'{}' specified without '{}'.".format(etag_param, match_param)) - else: - raise TypeError("Invalid match condition: {}".format(match_condition)) - return if_match, if_none_match - - -def get_source_conditions(kwargs): - # type: (Dict[str, Any]) -> SourceModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') - return SourceModifiedAccessConditions( - source_if_modified_since=kwargs.pop('source_if_modified_since', None), - source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), - source_if_match=if_match or kwargs.pop('source_if_match', None), - source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None) - ) - - -def get_access_conditions(lease): - # type: (Optional[Union[ShareLeaseClient, str]]) -> Union[LeaseAccessConditions, None] - try: - lease_id = lease.id # type: ignore - except AttributeError: - lease_id = lease # type: ignore - return LeaseAccessConditions(lease_id=lease_id) if lease_id else None - - -def get_smb_properties(kwargs): - # type: (Dict[str, Any]) -> Dict[str, Any] - ignore_read_only = kwargs.pop('ignore_read_only', None) - set_archive_attribute = kwargs.pop('set_archive_attribute', None) - file_permission = kwargs.pop('file_permission', None) - file_permission_key = kwargs.pop('permission_key', None) - file_attributes = kwargs.pop('file_attributes', None) - file_creation_time = kwargs.pop('file_creation_time', None) or "" - file_last_write_time = kwargs.pop('file_last_write_time', None) or "" - - file_permission_copy_mode = None - file_permission = _get_file_permission(file_permission, file_permission_key, None) - - if file_permission: - if file_permission.lower() == "source": - file_permission = None - file_permission_copy_mode = "source" - else: - file_permission_copy_mode = "override" - elif file_permission_key: - if file_permission_key.lower() == "source": - file_permission_key = None - file_permission_copy_mode = "source" - else: - file_permission_copy_mode = "override" - return { - 'file_permission': file_permission, - 'file_permission_key': file_permission_key, - 'copy_file_smb_info': CopyFileSmbInfo( - file_permission_copy_mode=file_permission_copy_mode, - ignore_read_only=ignore_read_only, - file_attributes=file_attributes, - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - set_archive_attribute=set_archive_attribute - ) - - } - -def get_api_version(kwargs, default): - # type: (Dict[str, Any]) -> str - api_version = kwargs.pop('api_version', None) - if api_version and api_version not in _SUPPORTED_API_VERSIONS: - versions = '\n'.join(_SUPPORTED_API_VERSIONS) - raise ValueError("Unsupported API version '{}'. Please select from:\n{}".format(api_version, versions)) - return api_version or default diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_share_client.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_share_client.py deleted file mode 100644 index 111e91c..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_share_client.py +++ /dev/null @@ -1,806 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Optional, Union, Dict, Any, Iterable, TYPE_CHECKING -) -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import Pipeline -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.request_handlers import add_metadata_headers, serialize_iso -from ._shared.response_handlers import ( - return_response_headers, - process_storage_error, - return_headers_and_deserialized) -from ._generated import AzureFileStorage -from ._generated.version import VERSION -from ._generated.models import ( - StorageErrorException, - SignedIdentifier, - DeleteSnapshotsOptionType, - SharePermission) -from ._deserialize import deserialize_share_properties, deserialize_permission_key, deserialize_permission -from ._serialize import get_api_version, get_access_conditions -from ._directory_client import ShareDirectoryClient -from ._file_client import ShareFileClient -from ._lease import ShareLeaseClient - -if TYPE_CHECKING: - from ._models import ShareProperties, AccessPolicy - - -class ShareClient(StorageAccountHostsMixin): - """A client to interact with a specific share, although that share may not yet exist. - - For operations relating to a specific directory or file in this share, the clients for - those entities can also be retrieved using the :func:`get_directory_client` and :func:`get_file_client` functions. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the share, - use the :func:`from_share_url` classmethod. - :param share_name: - The name of the share with which to interact. - :type share_name: str - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not share_name: - raise ValueError("Please specify a share name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - if hasattr(credential, 'get_token'): - raise ValueError("Token credentials not supported by the File service.") - - path_snapshot = None - path_snapshot, sas_token = parse_query(parsed_url.query) - if not sas_token and not credential: - raise ValueError( - 'You need to provide either an account shared key or SAS token when creating a storage service.') - try: - self.snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - self.snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - self.snapshot = snapshot or path_snapshot - - self.share_name = share_name - self._query_str, credential = self._format_query_string( - sas_token, credential, share_snapshot=self.snapshot) - super(ShareClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) - self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access - - @classmethod - def from_share_url(cls, share_url, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareClient - """ - :param str share_url: The full URI to the share. - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :returns: A share client. - :rtype: ~azure.storage.fileshare.ShareClient - """ - try: - if not share_url.lower().startswith('http'): - share_url = "https://" + share_url - except AttributeError: - raise ValueError("Share URL must be a string.") - parsed_url = urlparse(share_url.rstrip('/')) - if not (parsed_url.path and parsed_url.netloc): - raise ValueError("Invalid URL: {}".format(share_url)) - - share_path = parsed_url.path.lstrip('/').split('/') - account_path = "" - if len(share_path) > 1: - account_path = "/" + "/".join(share_path[:-1]) - account_url = "{}://{}{}?{}".format( - parsed_url.scheme, - parsed_url.netloc.rstrip('/'), - account_path, - parsed_url.query) - - share_name = unquote(share_path[-1]) - path_snapshot, _ = parse_query(parsed_url.query) - if snapshot: - try: - path_snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - path_snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - path_snapshot = snapshot - - if not share_name: - raise ValueError("Invalid URL. Please provide a URL with a valid share name") - return cls(account_url, share_name, path_snapshot, credential, **kwargs) - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - share_name = self.share_name - if isinstance(share_name, six.text_type): - share_name = share_name.encode('UTF-8') - return "{}://{}/{}{}".format( - self.scheme, - hostname, - quote(share_name), - self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - share_name, # type: str - snapshot=None, # type: Optional[str] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareClient - """Create ShareClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param share_name: The name of the share. - :type share_name: str - :param str snapshot: - The optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :returns: A share client. - :rtype: ~azure.storage.fileshare.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START create_share_client_from_conn_string] - :end-before: [END create_share_client_from_conn_string] - :language: python - :dedent: 8 - :caption: Gets the share client from connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, share_name=share_name, snapshot=snapshot, credential=credential, **kwargs) - - def get_directory_client(self, directory_path=None): - # type: (Optional[str]) -> ShareDirectoryClient - """Get a client to interact with the specified directory. - The directory need not already exist. - - :param str directory_path: - Path to the specified directory. - :returns: A Directory Client. - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - """ - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - - return ShareDirectoryClient( - self.url, share_name=self.share_name, directory_path=directory_path or "", snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, - _location_mode=self._location_mode) - - def get_file_client(self, file_path): - # type: (str) -> ShareFileClient - """Get a client to interact with the specified file. - The file need not already exist. - - :param str file_path: - Path to the specified file. - :returns: A File Client. - :rtype: ~azure.storage.fileshare.ShareFileClient - """ - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - - return ShareFileClient( - self.url, share_name=self.share_name, file_path=file_path, snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode) - - @distributed_trace - def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): - # type: (int, Optional[str], **Any) -> ShareLeaseClient - """Requests a new lease. - - If the share does not have an active lease, the Share - Service creates a lease on the share and returns a new lease. - - .. versionadded:: 12.6.0 - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Share Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A ShareLeaseClient object. - :rtype: ~azure.storage.fileshare.ShareLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START acquire_and_release_lease_on_share] - :end-before: [END acquire_and_release_lease_on_share] - :language: python - :dedent: 8 - :caption: Acquiring a lease on a share. - """ - kwargs['lease_duration'] = lease_duration - lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore - lease.acquire(**kwargs) - return lease - - @distributed_trace - def create_share(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Creates a new Share under the account. If a share with the - same name already exists, the operation fails. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the share as metadata. - :keyword int quota: - The quota to be allotted. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START create_share] - :end-before: [END create_share] - :language: python - :dedent: 8 - :caption: Creates a file share. - """ - metadata = kwargs.pop('metadata', None) - quota = kwargs.pop('quota', None) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - - try: - return self._client.share.create( # type: ignore - timeout=timeout, - metadata=metadata, - quota=quota, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def create_snapshot( # type: ignore - self, - **kwargs # type: Optional[Any] - ): - # type: (...) -> Dict[str, Any] - """Creates a snapshot of the share. - - A snapshot is a read-only version of a share that's taken at a point in time. - It can be read, copied, or deleted, but not modified. Snapshots provide a way - to back up a share as it appears at a moment in time. - - A snapshot of a share has the same name as the base share from which the snapshot - is taken, with a DateTime value appended to indicate the time at which the - snapshot was taken. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the share as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Snapshot ID, Etag, and last modified). - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START create_share_snapshot] - :end-before: [END create_share_snapshot] - :language: python - :dedent: 12 - :caption: Creates a snapshot of the file share. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return self._client.share.create_snapshot( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def delete_share( - self, delete_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> None - """Marks the specified share for deletion. The share is - later deleted during garbage collection. - - :param bool delete_snapshots: - Indicates if snapshots are to be deleted. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-02-02'. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START delete_share] - :end-before: [END delete_share] - :language: python - :dedent: 12 - :caption: Deletes the share and any snapshots. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - delete_include = None - if delete_snapshots: - delete_include = DeleteSnapshotsOptionType.include - try: - self._client.share.delete( - timeout=timeout, - sharesnapshot=self.snapshot, - lease_access_conditions=access_conditions, - delete_snapshots=delete_include, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_share_properties(self, **kwargs): - # type: (Any) -> ShareProperties - """Returns all user-defined metadata and system properties for the - specified share. The data returned does not include the shares's - list of files or directories. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-02-02'. - - :returns: The share properties. - :rtype: ~azure.storage.fileshare.ShareProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_hello_world.py - :start-after: [START get_share_properties] - :end-before: [END get_share_properties] - :language: python - :dedent: 12 - :caption: Gets the share properties. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - props = self._client.share.get_properties( - timeout=timeout, - sharesnapshot=self.snapshot, - cls=deserialize_share_properties, - lease_access_conditions=access_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - props.name = self.share_name - props.snapshot = self.snapshot - return props # type: ignore - - @distributed_trace - def set_share_quota(self, quota, **kwargs): - # type: (int, Any) -> Dict[str, Any] - """Sets the quota for the share. - - :param int quota: - Specifies the maximum size of the share, in gigabytes. - Must be greater than 0, and less than or equal to 5TB. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-02-02'. - - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START set_share_quota] - :end-before: [END set_share_quota] - :language: python - :dedent: 12 - :caption: Sets the share quota. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - return self._client.share.set_quota( # type: ignore - timeout=timeout, - quota=quota, - lease_access_conditions=access_conditions, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def set_share_metadata(self, metadata, **kwargs): - # type: (Dict[str, Any], Any) -> Dict[str, Any] - """Sets the metadata for the share. - - Each call to this operation replaces all existing metadata - attached to the share. To remove all metadata from the share, - call this operation with no metadata dict. - - :param metadata: - Name-value pairs associated with the share as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-02-02'. - - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START set_share_metadata] - :end-before: [END set_share_metadata] - :language: python - :dedent: 12 - :caption: Sets the share metadata. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - try: - return self._client.share.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - lease_access_conditions=access_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_share_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the share. The permissions - indicate whether files in a share may be accessed publicly. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-02-02'. - - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - response, identifiers = self._client.share.get_access_policy( - timeout=timeout, - cls=return_headers_and_deserialized, - lease_access_conditions=access_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return { - 'public_access': response.get('share_public_access'), - 'signed_identifiers': identifiers or [] - } - - @distributed_trace - def set_share_access_policy(self, signed_identifiers, **kwargs): - # type: (Dict[str, AccessPolicy], Any) -> Dict[str, str] - """Sets the permissions for the share, or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether files in a share may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the share. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict(str, :class:`~azure.storage.fileshare.AccessPolicy`) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-02-02'. - - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - if len(signed_identifiers) > 5: - raise ValueError( - 'Too many access policies provided. The server does not support setting ' - 'more than 5 access policies on a single resource.') - identifiers = [] - for key, value in signed_identifiers.items(): - if value: - value.start = serialize_iso(value.start) - value.expiry = serialize_iso(value.expiry) - identifiers.append(SignedIdentifier(id=key, access_policy=value)) - signed_identifiers = identifiers # type: ignore - try: - return self._client.share.set_access_policy( # type: ignore - share_acl=signed_identifiers or None, - timeout=timeout, - cls=return_response_headers, - lease_access_conditions=access_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_share_stats(self, **kwargs): - # type: (Any) -> int - """Gets the approximate size of the data stored on the share in bytes. - - Note that this value may not include all recently created - or recently re-sized files. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-02-02'. - - :return: The approximate size of the data (in bytes) stored on the share. - :rtype: int - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - stats = self._client.share.get_statistics( - timeout=timeout, - lease_access_conditions=access_conditions, - **kwargs) - return stats.share_usage_bytes # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_directories_and_files( - self, directory_name=None, # type: Optional[str] - name_starts_with=None, # type: Optional[str] - marker=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Iterable[Dict[str,str]] - """Lists the directories and files under the share. - - :param str directory_name: - Name of a directory. - :param str name_starts_with: - Filters the results to return only directories whose names - begin with the specified prefix. - :param str marker: - An opaque continuation token. This value can be retrieved from the - next_marker field of a previous generator object. If specified, - this generator will begin returning results from this point. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START share_list_files_in_dir] - :end-before: [END share_list_files_in_dir] - :language: python - :dedent: 12 - :caption: List directories and files in the share. - """ - timeout = kwargs.pop('timeout', None) - directory = self.get_directory_client(directory_name) - kwargs.setdefault('merge_span', True) - return directory.list_directories_and_files( - name_starts_with=name_starts_with, marker=marker, timeout=timeout, **kwargs) - - @staticmethod - def _create_permission_for_share_options(file_permission, # type: str - **kwargs): - options = { - 'share_permission': SharePermission(permission=file_permission), - 'cls': deserialize_permission_key, - 'timeout': kwargs.pop('timeout', None), - } - options.update(kwargs) - return options - - @distributed_trace - def create_permission_for_share(self, file_permission, # type: str - **kwargs # type: Any - ): - # type: (...) -> str - """Create a permission (a security descriptor) at the share level. - - This 'permission' can be used for the files/directories in the share. - If a 'permission' already exists, it shall return the key of it, else - creates a new permission at the share level and return its key. - - :param str file_permission: - File permission, a Portable SDDL - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A file permission key - :rtype: str - """ - timeout = kwargs.pop('timeout', None) - options = self._create_permission_for_share_options(file_permission, timeout=timeout, **kwargs) - try: - return self._client.share.create_permission(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def get_permission_for_share( # type: ignore - self, permission_key, # type: str - **kwargs # type: Any - ): - # type: (...) -> str - """Get a permission (a security descriptor) for a given key. - - This 'permission' can be used for the files/directories in the share. - - :param str permission_key: - Key of the file permission to retrieve - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A file permission (a portable SDDL) - :rtype: str - """ - timeout = kwargs.pop('timeout', None) - try: - return self._client.share.get_permission( # type: ignore - file_permission_key=permission_key, - cls=deserialize_permission, - timeout=timeout, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def create_directory(self, directory_name, **kwargs): - # type: (str, Any) -> ShareDirectoryClient - """Creates a directory in the share and returns a client to interact - with the directory. - - :param str directory_name: - The name of the directory. - :keyword metadata: - Name-value pairs associated with the directory as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: ShareDirectoryClient - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - """ - directory = self.get_directory_client(directory_name) - kwargs.setdefault('merge_span', True) - directory.create_directory(**kwargs) - return directory # type: ignore - - @distributed_trace - def delete_directory(self, directory_name, **kwargs): - # type: (str, Any) -> None - """Marks the directory for deletion. The directory is - later deleted during garbage collection. - - :param str directory_name: - The name of the directory. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - directory = self.get_directory_client(directory_name) - directory.delete_directory(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_share_service_client.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_share_service_client.py deleted file mode 100644 index d6b240e..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_share_service_client.py +++ /dev/null @@ -1,415 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Dict, List, - TYPE_CHECKING -) -try: - from urllib.parse import urlparse -except ImportError: - from urlparse import urlparse # type: ignore - -from azure.core.paging import ItemPaged -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import Pipeline -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.response_handlers import process_storage_error -from ._generated import AzureFileStorage -from ._generated.models import StorageErrorException, StorageServiceProperties -from ._generated.version import VERSION -from ._share_client import ShareClient -from ._serialize import get_api_version -from ._models import ( - SharePropertiesPaged, - service_properties_deserialize, -) - -if TYPE_CHECKING: - from datetime import datetime - from ._models import ( - ShareProperties, - Metrics, - CorsRule, - ShareProtocolSettings - ) - - -class ShareServiceClient(StorageAccountHostsMixin): - """A client to interact with the File Share Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete shares within the account. - For operations relating to a specific share, a client for that entity - can also be retrieved using the :func:`get_share_client` function. - - :param str account_url: - The URL to the file share storage account. Any other entities included - in the URL path (e.g. share or file) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_authentication.py - :start-after: [START create_share_service_client] - :end-before: [END create_share_service_client] - :language: python - :dedent: 8 - :caption: Create the share service client with url and credential. - """ - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - if hasattr(credential, 'get_token'): - raise ValueError("Token credentials not supported by the File Share service.") - - _, sas_token = parse_query(parsed_url.query) - if not sas_token and not credential: - raise ValueError( - 'You need to provide either an account shared key or SAS token when creating a storage service.') - self._query_str, credential = self._format_query_string(sas_token, credential) - super(ShareServiceClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) - self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - return "{}://{}/{}".format(self.scheme, hostname, self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> ShareServiceClient - """Create ShareServiceClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :returns: A File Share service client. - :rtype: ~azure.storage.fileshare.ShareServiceClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_authentication.py - :start-after: [START create_share_service_client_from_conn_string] - :end-before: [END create_share_service_client_from_conn_string] - :language: python - :dedent: 8 - :caption: Create the share service client with connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls(account_url, credential=credential, **kwargs) - - @distributed_trace - def get_service_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the properties of a storage account's File Share service, including - Azure Storage Analytics. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A dictionary containing file service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START get_service_properties] - :end-before: [END get_service_properties] - :language: python - :dedent: 8 - :caption: Get file share service properties. - """ - timeout = kwargs.pop('timeout', None) - try: - service_props = self._client.service.get_properties(timeout=timeout, **kwargs) - return service_properties_deserialize(service_props) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def set_service_properties( - self, hour_metrics=None, # type: Optional[Metrics] - minute_metrics=None, # type: Optional[Metrics] - cors=None, # type: Optional[List[CorsRule]] - protocol=None, # type: Optional[ShareProtocolSettings], - **kwargs - ): - # type: (...) -> None - """Sets the properties of a storage account's File Share service, including - Azure Storage Analytics. If an element (e.g. hour_metrics) is left as None, the - existing settings on the service for that functionality are preserved. - - :param hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for files. - :type hour_metrics: ~azure.storage.fileshare.Metrics - :param minute_metrics: - The minute metrics settings provide request statistics - for each minute for files. - :type minute_metrics: ~azure.storage.fileshare.Metrics - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list(:class:`~azure.storage.fileshare.CorsRule`) - :param protocol: - Sets protocol settings - :type protocol: ~azure.storage.fileshare.ShareProtocolSettings - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START set_service_properties] - :end-before: [END set_service_properties] - :language: python - :dedent: 8 - :caption: Sets file share service properties. - """ - timeout = kwargs.pop('timeout', None) - props = StorageServiceProperties( - hour_metrics=hour_metrics, - minute_metrics=minute_metrics, - cors=cors, - protocol=protocol - ) - try: - self._client.service.set_properties(storage_service_properties=props, timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_shares( - self, name_starts_with=None, # type: Optional[str] - include_metadata=False, # type: Optional[bool] - include_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> ItemPaged[ShareProperties] - """Returns auto-paging iterable of dict-like ShareProperties under the specified account. - The generator will lazily follow the continuation tokens returned by - the service and stop when all shares have been returned. - - :param str name_starts_with: - Filters the results to return only shares whose names - begin with the specified name_starts_with. - :param bool include_metadata: - Specifies that share metadata be returned in the response. - :param bool include_snapshots: - Specifies that share snapshot be returned in the response. - :keyword bool include_deleted: - Specifies that deleted shares be returned in the response. - This is only for share soft delete enabled account. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) of ShareProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.ShareProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START fsc_list_shares] - :end-before: [END fsc_list_shares] - :language: python - :dedent: 12 - :caption: List shares in the file share service. - """ - timeout = kwargs.pop('timeout', None) - include = [] - include_deleted = kwargs.pop('include_deleted', None) - if include_deleted: - include.append("deleted") - if include_metadata: - include.append('metadata') - if include_snapshots: - include.append('snapshots') - - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.service.list_shares_segment, - include=include, - timeout=timeout, - **kwargs) - return ItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=SharePropertiesPaged) - - @distributed_trace - def create_share( - self, share_name, # type: str - **kwargs - ): - # type: (...) -> ShareClient - """Creates a new share under the specified account. If the share - with the same name already exists, the operation fails. Returns a client with - which to interact with the newly created share. - - :param str share_name: The name of the share to create. - :keyword dict(str,str) metadata: - A dict with name_value pairs to associate with the - share as metadata. Example:{'Category':'test'} - :keyword int quota: - Quota in bytes. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.fileshare.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START fsc_create_shares] - :end-before: [END fsc_create_shares] - :language: python - :dedent: 8 - :caption: Create a share in the file share service. - """ - metadata = kwargs.pop('metadata', None) - quota = kwargs.pop('quota', None) - timeout = kwargs.pop('timeout', None) - share = self.get_share_client(share_name) - kwargs.setdefault('merge_span', True) - share.create_share(metadata=metadata, quota=quota, timeout=timeout, **kwargs) - return share - - @distributed_trace - def delete_share( - self, share_name, # type: Union[ShareProperties, str] - delete_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> None - """Marks the specified share for deletion. The share is - later deleted during garbage collection. - - :param share_name: - The share to delete. This can either be the name of the share, - or an instance of ShareProperties. - :type share_name: str or ~azure.storage.fileshare.ShareProperties - :param bool delete_snapshots: - Indicates if snapshots are to be deleted. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START fsc_delete_shares] - :end-before: [END fsc_delete_shares] - :language: python - :dedent: 12 - :caption: Delete a share in the file share service. - """ - timeout = kwargs.pop('timeout', None) - share = self.get_share_client(share_name) - kwargs.setdefault('merge_span', True) - share.delete_share( - delete_snapshots=delete_snapshots, timeout=timeout, **kwargs) - - @distributed_trace - def undelete_share(self, deleted_share_name, deleted_share_version, **kwargs): - # type: (str, str, **Any) -> ShareClient - """Restores soft-deleted share. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.2.0 - This operation was introduced in API version '2019-12-12'. - - :param str deleted_share_name: - Specifies the name of the deleted share to restore. - :param str deleted_share_version: - Specifies the version of the deleted share to restore. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.fileshare.ShareClient - """ - share = self.get_share_client(deleted_share_name) - - try: - share._client.share.restore(deleted_share_name=deleted_share_name, # pylint: disable = protected-access - deleted_share_version=deleted_share_version, - timeout=kwargs.pop('timeout', None), **kwargs) - return share - except StorageErrorException as error: - process_storage_error(error) - - def get_share_client(self, share, snapshot=None): - # type: (Union[ShareProperties, str],Optional[Union[Dict[str, Any], str]]) -> ShareClient - """Get a client to interact with the specified share. - The share need not already exist. - - :param share: - The share. This can either be the name of the share, - or an instance of ShareProperties. - :type share: str or ~azure.storage.fileshare.ShareProperties - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :returns: A ShareClient. - :rtype: ~azure.storage.fileshare.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START get_share_client] - :end-before: [END get_share_client] - :language: python - :dedent: 8 - :caption: Gets the share client. - """ - try: - share_name = share.name - except AttributeError: - share_name = share - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareClient( - self.url, share_name=share_name, snapshot=snapshot, credential=self.credential, - api_version=self.api_version, _hosts=self._hosts, - _configuration=self._config, _pipeline=_pipeline, _location_mode=self._location_mode) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/__init__.py deleted file mode 100644 index 160f882..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import hmac - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore - -import six - - -def url_quote(url): - return quote(url) - - -def url_unquote(url): - return unquote(url) - - -def encode_base64(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def decode_base64_to_bytes(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - return base64.b64decode(data) - - -def decode_base64_to_text(data): - decoded_bytes = decode_base64_to_bytes(data) - return decoded_bytes.decode('utf-8') - - -def sign_string(key, string_to_sign, key_is_base64=True): - if key_is_base64: - key = decode_base64_to_bytes(key) - else: - if isinstance(key, six.text_type): - key = key.encode('utf-8') - if isinstance(string_to_sign, six.text_type): - string_to_sign = string_to_sign.encode('utf-8') - signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) - digest = signed_hmac_sha256.digest() - encoded_digest = encode_base64(digest) - return encoded_digest diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/authentication.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/authentication.py deleted file mode 100644 index d04c1e4..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/authentication.py +++ /dev/null @@ -1,142 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import logging -import sys - -try: - from urllib.parse import urlparse, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import unquote # type: ignore - -try: - from yarl import URL -except ImportError: - pass - -try: - from azure.core.pipeline.transport import AioHttpTransport -except ImportError: - AioHttpTransport = None - -from azure.core.exceptions import ClientAuthenticationError -from azure.core.pipeline.policies import SansIOHTTPPolicy - -from . import sign_string - - -logger = logging.getLogger(__name__) - - - -# wraps a given exception with the desired exception type -def _wrap_exception(ex, desired_type): - msg = "" - if ex.args: - msg = ex.args[0] - if sys.version_info >= (3,): - # Automatic chaining in Python 3 means we keep the trace - return desired_type(msg) - # There isn't a good solution in 2 for keeping the stack trace - # in general, or that will not result in an error in 3 - # However, we can keep the previous error type and message - # TODO: In the future we will log the trace - return desired_type('{}: {}'.format(ex.__class__.__name__, msg)) - - -class AzureSigningError(ClientAuthenticationError): - """ - Represents a fatal error when attempting to sign a request. - In general, the cause of this exception is user error. For example, the given account key is not valid. - Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. - """ - - -# pylint: disable=no-self-use -class SharedKeyCredentialPolicy(SansIOHTTPPolicy): - - def __init__(self, account_name, account_key): - self.account_name = account_name - self.account_key = account_key - super(SharedKeyCredentialPolicy, self).__init__() - - @staticmethod - def _get_headers(request, headers_to_sign): - headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) - if 'content-length' in headers and headers['content-length'] == '0': - del headers['content-length'] - return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' - - @staticmethod - def _get_verb(request): - return request.http_request.method + '\n' - - def _get_canonicalized_resource(self, request): - uri_path = urlparse(request.http_request.url).path - try: - if isinstance(request.context.transport, AioHttpTransport) or \ - isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \ - isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None), - AioHttpTransport): - uri_path = URL(uri_path) - return '/' + self.account_name + str(uri_path) - except TypeError: - pass - return '/' + self.account_name + uri_path - - @staticmethod - def _get_canonicalized_headers(request): - string_to_sign = '' - x_ms_headers = [] - for name, value in request.http_request.headers.items(): - if name.startswith('x-ms-'): - x_ms_headers.append((name.lower(), value)) - x_ms_headers.sort() - for name, value in x_ms_headers: - if value is not None: - string_to_sign += ''.join([name, ':', value, '\n']) - return string_to_sign - - @staticmethod - def _get_canonicalized_resource_query(request): - sorted_queries = list(request.http_request.query.items()) - sorted_queries.sort() - - string_to_sign = '' - for name, value in sorted_queries: - if value is not None: - string_to_sign += '\n' + name.lower() + ':' + unquote(value) - - return string_to_sign - - def _add_authorization_header(self, request, string_to_sign): - try: - signature = sign_string(self.account_key, string_to_sign) - auth_string = 'SharedKey ' + self.account_name + ':' + signature - request.http_request.headers['Authorization'] = auth_string - except Exception as ex: - # Wrap any error that occurred as signing error - # Doing so will clarify/locate the source of problem - raise _wrap_exception(ex, AzureSigningError) - - def on_request(self, request): - string_to_sign = \ - self._get_verb(request) + \ - self._get_headers( - request, - [ - 'content-encoding', 'content-language', 'content-length', - 'content-md5', 'content-type', 'date', 'if-modified-since', - 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' - ] - ) + \ - self._get_canonicalized_headers(request) + \ - self._get_canonicalized_resource(request) + \ - self._get_canonicalized_resource_query(request) - - self._add_authorization_header(request, string_to_sign) - #logger.debug("String_to_sign=%s", string_to_sign) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/base_client.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/base_client.py deleted file mode 100644 index 14deea6..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/base_client.py +++ /dev/null @@ -1,437 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, - Optional, - Any, - Iterable, - Dict, - List, - Type, - Tuple, - TYPE_CHECKING, -) -import logging - -try: - from urllib.parse import parse_qs, quote -except ImportError: - from urlparse import parse_qs # type: ignore - from urllib2 import quote # type: ignore - -import six - -from azure.core.configuration import Configuration -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline import Pipeline -from azure.core.pipeline.transport import RequestsTransport, HttpTransport -from azure.core.pipeline.policies import ( - RedirectPolicy, - ContentDecodePolicy, - BearerTokenCredentialPolicy, - ProxyPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, - UserAgentPolicy -) - -from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .models import LocationMode -from .authentication import SharedKeyCredentialPolicy -from .shared_access_signature import QueryStringConstants -from .policies import ( - StorageHeadersPolicy, - StorageContentValidation, - StorageRequestHook, - StorageResponseHook, - StorageLoggingPolicy, - StorageHosts, - QueueMessagePolicy, - ExponentialRetry, -) -from .._version import VERSION -from .._generated.models import StorageErrorException -from .response_handlers import process_storage_error, PartialBatchErrorException - - -_LOGGER = logging.getLogger(__name__) -_SERVICE_PARAMS = { - "blob": {"primary": "BlobEndpoint", "secondary": "BlobSecondaryEndpoint"}, - "queue": {"primary": "QueueEndpoint", "secondary": "QueueSecondaryEndpoint"}, - "file": {"primary": "FileEndpoint", "secondary": "FileSecondaryEndpoint"}, - "dfs": {"primary": "BlobEndpoint", "secondary": "BlobEndpoint"}, -} - - -class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - parsed_url, # type: Any - service, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) - self._hosts = kwargs.get("_hosts") - self.scheme = parsed_url.scheme - - if service not in ["blob", "queue", "file-share", "dfs"]: - raise ValueError("Invalid service: {}".format(service)) - service_name = service.split('-')[0] - account = parsed_url.netloc.split(".{}.core.".format(service_name)) - - self.account_name = account[0] if len(account) > 1 else None - if not self.account_name and parsed_url.netloc.startswith("localhost") \ - or parsed_url.netloc.startswith("127.0.0.1"): - self.account_name = parsed_url.path.strip("/") - - self.credential = _format_shared_key_credential(self.account_name, credential) - if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): - raise ValueError("Token credential is only supported with HTTPS.") - - secondary_hostname = None - if hasattr(self.credential, "account_name"): - self.account_name = self.credential.account_name - secondary_hostname = "{}-secondary.{}.{}".format( - self.credential.account_name, service_name, SERVICE_HOST_BASE) - - if not self._hosts: - if len(account) > 1: - secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") - if kwargs.get("secondary_hostname"): - secondary_hostname = kwargs["secondary_hostname"] - primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') - self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} - - self.require_encryption = kwargs.get("require_encryption", False) - self.key_encryption_key = kwargs.get("key_encryption_key") - self.key_resolver_function = kwargs.get("key_resolver_function") - self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) - - def __enter__(self): - self._client.__enter__() - return self - - def __exit__(self, *args): - self._client.__exit__(*args) - - def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._client.close() - - @property - def url(self): - """The full endpoint URL to this entity, including SAS token if used. - - This could be either the primary endpoint, - or the secondary endpoint depending on the current :func:`location_mode`. - """ - return self._format_url(self._hosts[self._location_mode]) - - @property - def primary_endpoint(self): - """The full primary endpoint URL. - - :type: str - """ - return self._format_url(self._hosts[LocationMode.PRIMARY]) - - @property - def primary_hostname(self): - """The hostname of the primary endpoint. - - :type: str - """ - return self._hosts[LocationMode.PRIMARY] - - @property - def secondary_endpoint(self): - """The full secondary endpoint URL if configured. - - If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str - :raise ValueError: - """ - if not self._hosts[LocationMode.SECONDARY]: - raise ValueError("No secondary host configured.") - return self._format_url(self._hosts[LocationMode.SECONDARY]) - - @property - def secondary_hostname(self): - """The hostname of the secondary endpoint. - - If not available this will be None. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str or None - """ - return self._hosts[LocationMode.SECONDARY] - - @property - def location_mode(self): - """The location mode that the client is currently using. - - By default this will be "primary". Options include "primary" and "secondary". - - :type: str - """ - - return self._location_mode - - @location_mode.setter - def location_mode(self, value): - if self._hosts.get(value): - self._location_mode = value - self._client._config.url = self.url # pylint: disable=protected-access - else: - raise ValueError("No host URL for location mode: {}".format(value)) - - @property - def api_version(self): - """The version of the Storage API used for requests. - - :type: str - """ - return self._client._config.version # pylint: disable=protected-access - - def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): - query_str = "?" - if snapshot: - query_str += "snapshot={}&".format(self.snapshot) - if share_snapshot: - query_str += "sharesnapshot={}&".format(self.snapshot) - if sas_token and not credential: - query_str += sas_token - elif is_credential_sastoken(credential): - query_str += credential.lstrip("?") - credential = None - return query_str.rstrip("?&"), credential - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, "get_token"): - self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - - config = kwargs.get("_configuration") or create_configuration(**kwargs) - if kwargs.get("_pipeline"): - return config, kwargs["_pipeline"] - config.transport = kwargs.get("transport") # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - config.transport = RequestsTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.headers_policy, - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - StorageRequestHook(**kwargs), - self._credential_policy, - ContentDecodePolicy(response_encoding="utf-8"), - RedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), - config.retry_policy, - config.logging_policy, - StorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs) - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, Pipeline(config.transport, policies=policies) - - def _batch_send( - self, *reqs, # type: HttpRequest - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - request = self._client._client.post( # pylint: disable=protected-access - url='https://{}/?comp=batch'.format(self.primary_hostname), - headers={ - 'x-ms-version': self.api_version - } - ) - - request.set_multipart_mixed( - *reqs, - policies=[ - StorageHeadersPolicy(), - self._credential_policy - ], - enforce_https=False - ) - - pipeline_response = self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() - if raise_on_any_failure: - parts = list(response.parts()) - if any(p for p in parts if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts - ) - raise error - return iter(parts) - return parts - except StorageErrorException as error: - process_storage_error(error) - -class TransportWrapper(HttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, transport): - self._transport = transport - - def send(self, request, **kwargs): - return self._transport.send(request, **kwargs) - - def open(self): - pass - - def close(self): - pass - - def __enter__(self): - pass - - def __exit__(self, *args): # pylint: disable=arguments-differ - pass - - -def _format_shared_key_credential(account_name, credential): - if isinstance(credential, six.string_types): - if not account_name: - raise ValueError("Unable to determine account name for shared key credential.") - credential = {"account_name": account_name, "account_key": credential} - if isinstance(credential, dict): - if "account_name" not in credential: - raise ValueError("Shared key credential missing 'account_name") - if "account_key" not in credential: - raise ValueError("Shared key credential missing 'account_key") - return SharedKeyCredentialPolicy(**credential) - return credential - - -def parse_connection_str(conn_str, credential, service): - conn_str = conn_str.rstrip(";") - conn_settings = [s.split("=", 1) for s in conn_str.split(";")] - if any(len(tup) != 2 for tup in conn_settings): - raise ValueError("Connection string is either blank or malformed.") - conn_settings = dict(conn_settings) - endpoints = _SERVICE_PARAMS[service] - primary = None - secondary = None - if not credential: - try: - credential = {"account_name": conn_settings["AccountName"], "account_key": conn_settings["AccountKey"]} - except KeyError: - credential = conn_settings.get("SharedAccessSignature") - if endpoints["primary"] in conn_settings: - primary = conn_settings[endpoints["primary"]] - if endpoints["secondary"] in conn_settings: - secondary = conn_settings[endpoints["secondary"]] - else: - if endpoints["secondary"] in conn_settings: - raise ValueError("Connection string specifies only secondary endpoint.") - try: - primary = "{}://{}.{}.{}".format( - conn_settings["DefaultEndpointsProtocol"], - conn_settings["AccountName"], - service, - conn_settings["EndpointSuffix"], - ) - secondary = "{}-secondary.{}.{}".format( - conn_settings["AccountName"], service, conn_settings["EndpointSuffix"] - ) - except KeyError: - pass - - if not primary: - try: - primary = "https://{}.{}.{}".format( - conn_settings["AccountName"], service, conn_settings.get("EndpointSuffix", SERVICE_HOST_BASE) - ) - except KeyError: - raise ValueError("Connection string missing required connection details.") - return primary, secondary, credential - - -def create_configuration(**kwargs): - # type: (**Any) -> Configuration - config = Configuration(**kwargs) - config.headers_policy = StorageHeadersPolicy(**kwargs) - config.user_agent_policy = UserAgentPolicy( - sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) - config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) - config.logging_policy = StorageLoggingPolicy(**kwargs) - config.proxy_policy = ProxyPolicy(**kwargs) - - # Storage settings - config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) - config.copy_polling_interval = 15 - - # Block blob uploads - config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) - config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) - config.use_byte_buffer = kwargs.get("use_byte_buffer", False) - - # Page blob uploads - config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) - - # Blob downloads - config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) - config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) - - # File uploads - config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) - return config - - -def parse_query(query_str): - sas_values = QueryStringConstants.to_list() - parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} - sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values] - sas_token = None - if sas_params: - sas_token = "&".join(sas_params) - - snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") - return snapshot, sas_token - - -def is_credential_sastoken(credential): - if not credential or not isinstance(credential, six.string_types): - return False - - sas_values = QueryStringConstants.to_list() - parsed_query = parse_qs(credential.lstrip("?")) - if parsed_query and all([k in sas_values for k in parsed_query.keys()]): - return True - return False diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/base_client_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/base_client_async.py deleted file mode 100644 index d252ad0..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/base_client_async.py +++ /dev/null @@ -1,179 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging -from azure.core.pipeline import AsyncPipeline -from azure.core.async_paging import AsyncList -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline.policies import ( - ContentDecodePolicy, - AsyncBearerTokenCredentialPolicy, - AsyncRedirectPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, -) -from azure.core.pipeline.transport import AsyncHttpTransport - -from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .authentication import SharedKeyCredentialPolicy -from .base_client import create_configuration -from .policies import ( - StorageContentValidation, - StorageRequestHook, - StorageHosts, - StorageHeadersPolicy, - QueueMessagePolicy -) -from .policies_async import AsyncStorageResponseHook - -from .._generated.models import StorageErrorException -from .response_handlers import process_storage_error, PartialBatchErrorException - -if TYPE_CHECKING: - from azure.core.pipeline import Pipeline - from azure.core.pipeline.transport import HttpRequest - from azure.core.configuration import Configuration -_LOGGER = logging.getLogger(__name__) - - -class AsyncStorageAccountHostsMixin(object): - - def __enter__(self): - raise TypeError("Async client only supports 'async with'.") - - def __exit__(self, *args): - pass - - async def __aenter__(self): - await self._client.__aenter__() - return self - - async def __aexit__(self, *args): - await self._client.__aexit__(*args) - - async def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._client.close() - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, 'get_token'): - self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - config = kwargs.get('_configuration') or create_configuration(**kwargs) - if kwargs.get('_pipeline'): - return config, kwargs['_pipeline'] - config.transport = kwargs.get('transport') # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - try: - from azure.core.pipeline.transport import AioHttpTransport - except ImportError: - raise ImportError("Unable to create async transport. Please check aiohttp is installed.") - config.transport = AioHttpTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.headers_policy, - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - StorageRequestHook(**kwargs), - self._credential_policy, - ContentDecodePolicy(response_encoding="utf-8"), - AsyncRedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), # type: ignore - config.retry_policy, - config.logging_policy, - AsyncStorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs), - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, AsyncPipeline(config.transport, policies=policies) - - async def _batch_send( - self, *reqs: 'HttpRequest', - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - request = self._client._client.post( # pylint: disable=protected-access - url='https://{}/?comp=batch'.format(self.primary_hostname), - headers={ - 'x-ms-version': self.api_version - } - ) - - request.set_multipart_mixed( - *reqs, - policies=[ - StorageHeadersPolicy(), - self._credential_policy - ], - enforce_https=False - ) - - pipeline_response = await self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() # Return an AsyncIterator - if raise_on_any_failure: - parts_list = [] - async for part in parts: - parts_list.append(part) - if any(p for p in parts_list if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts_list - ) - raise error - return AsyncList(parts_list) - return parts - except StorageErrorException as error: - process_storage_error(error) - - -class AsyncTransportWrapper(AsyncHttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, async_transport): - self._transport = async_transport - - async def send(self, request, **kwargs): - return await self._transport.send(request, **kwargs) - - async def open(self): - pass - - async def close(self): - pass - - async def __aenter__(self): - pass - - async def __aexit__(self, *args): # pylint: disable=arguments-differ - pass diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/constants.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/constants.py deleted file mode 100644 index 7fb05b5..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/constants.py +++ /dev/null @@ -1,26 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -from .._generated.version import VERSION - - -X_MS_VERSION = VERSION - -# Socket timeout in seconds -CONNECTION_TIMEOUT = 20 -READ_TIMEOUT = 20 - -# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned) -# The socket timeout is now the maximum total duration to send all data. -if sys.version_info >= (3, 5): - # the timeout to connect is 20 seconds, and the read timeout is 2000 seconds - # the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) - READ_TIMEOUT = 2000 - -STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" - -SERVICE_HOST_BASE = 'core.windows.net' diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/encryption.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/encryption.py deleted file mode 100644 index 62607cc..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/encryption.py +++ /dev/null @@ -1,542 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os -from os import urandom -from json import ( - dumps, - loads, -) -from collections import OrderedDict - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.ciphers import Cipher -from cryptography.hazmat.primitives.ciphers.algorithms import AES -from cryptography.hazmat.primitives.ciphers.modes import CBC -from cryptography.hazmat.primitives.padding import PKCS7 - -from azure.core.exceptions import HttpResponseError - -from .._version import VERSION -from . import encode_base64, decode_base64_to_bytes - - -_ENCRYPTION_PROTOCOL_V1 = '1.0' -_ERROR_OBJECT_INVALID = \ - '{0} does not define a complete interface. Value of {1} is either missing or invalid.' - - -def _validate_not_none(param_name, param): - if param is None: - raise ValueError('{0} should not be None.'.format(param_name)) - - -def _validate_key_encryption_key_wrap(kek): - # Note that None is not callable and so will fail the second clause of each check. - if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) - if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) - - -class _EncryptionAlgorithm(object): - ''' - Specifies which client encryption algorithm is used. - ''' - AES_CBC_256 = 'AES_CBC_256' - - -class _WrappedContentKey: - ''' - Represents the envelope key details stored on the service. - ''' - - def __init__(self, algorithm, encrypted_key, key_id): - ''' - :param str algorithm: - The algorithm used for wrapping. - :param bytes encrypted_key: - The encrypted content-encryption-key. - :param str key_id: - The key-encryption-key identifier string. - ''' - - _validate_not_none('algorithm', algorithm) - _validate_not_none('encrypted_key', encrypted_key) - _validate_not_none('key_id', key_id) - - self.algorithm = algorithm - self.encrypted_key = encrypted_key - self.key_id = key_id - - -class _EncryptionAgent: - ''' - Represents the encryption agent stored on the service. - It consists of the encryption protocol version and encryption algorithm used. - ''' - - def __init__(self, encryption_algorithm, protocol): - ''' - :param _EncryptionAlgorithm encryption_algorithm: - The algorithm used for encrypting the message contents. - :param str protocol: - The protocol version used for encryption. - ''' - - _validate_not_none('encryption_algorithm', encryption_algorithm) - _validate_not_none('protocol', protocol) - - self.encryption_algorithm = str(encryption_algorithm) - self.protocol = protocol - - -class _EncryptionData: - ''' - Represents the encryption data that is stored on the service. - ''' - - def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, - key_wrapping_metadata): - ''' - :param bytes content_encryption_IV: - The content encryption initialization vector. - :param _EncryptionAgent encryption_agent: - The encryption agent. - :param _WrappedContentKey wrapped_content_key: - An object that stores the wrapping algorithm, the key identifier, - and the encrypted key bytes. - :param dict key_wrapping_metadata: - A dict containing metadata related to the key wrapping. - ''' - - _validate_not_none('content_encryption_IV', content_encryption_IV) - _validate_not_none('encryption_agent', encryption_agent) - _validate_not_none('wrapped_content_key', wrapped_content_key) - - self.content_encryption_IV = content_encryption_IV - self.encryption_agent = encryption_agent - self.wrapped_content_key = wrapped_content_key - self.key_wrapping_metadata = key_wrapping_metadata - - -def _generate_encryption_data_dict(kek, cek, iv): - ''' - Generates and returns the encryption metadata as a dict. - - :param object kek: The key encryption key. See calling functions for more information. - :param bytes cek: The content encryption key. - :param bytes iv: The initialization vector. - :return: A dict containing all the encryption metadata. - :rtype: dict - ''' - # Encrypt the cek. - wrapped_cek = kek.wrap_key(cek) - - # Build the encryption_data dict. - # Use OrderedDict to comply with Java's ordering requirement. - wrapped_content_key = OrderedDict() - wrapped_content_key['KeyId'] = kek.get_kid() - wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek) - wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() - - encryption_agent = OrderedDict() - encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 - encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 - - encryption_data_dict = OrderedDict() - encryption_data_dict['WrappedContentKey'] = wrapped_content_key - encryption_data_dict['EncryptionAgent'] = encryption_agent - encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv) - encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION} - - return encryption_data_dict - - -def _dict_to_encryption_data(encryption_data_dict): - ''' - Converts the specified dictionary to an EncryptionData object for - eventual use in decryption. - - :param dict encryption_data_dict: - The dictionary containing the encryption data. - :return: an _EncryptionData object built from the dictionary. - :rtype: _EncryptionData - ''' - try: - if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: - raise ValueError("Unsupported encryption version.") - except KeyError: - raise ValueError("Unsupported encryption version.") - wrapped_content_key = encryption_data_dict['WrappedContentKey'] - wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], - decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), - wrapped_content_key['KeyId']) - - encryption_agent = encryption_data_dict['EncryptionAgent'] - encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], - encryption_agent['Protocol']) - - if 'KeyWrappingMetadata' in encryption_data_dict: - key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] - else: - key_wrapping_metadata = None - - encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), - encryption_agent, - wrapped_content_key, - key_wrapping_metadata) - - return encryption_data - - -def _generate_AES_CBC_cipher(cek, iv): - ''' - Generates and returns an encryption cipher for AES CBC using the given cek and iv. - - :param bytes[] cek: The content encryption key for the cipher. - :param bytes[] iv: The initialization vector for the cipher. - :return: A cipher for encrypting in AES256 CBC. - :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher - ''' - - backend = default_backend() - algorithm = AES(cek) - mode = CBC(iv) - return Cipher(algorithm, mode, backend) - - -def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): - ''' - Extracts and returns the content_encryption_key stored in the encryption_data object - and performs necessary validation on all parameters. - :param _EncryptionData encryption_data: - The encryption metadata of the retrieved value. - :param obj key_encryption_key: - The key_encryption_key used to unwrap the cek. Please refer to high-level service object - instance variables for more details. - :param func key_resolver: - A function used that, given a key_id, will return a key_encryption_key. Please refer - to high-level service object instance variables for more details. - :return: the content_encryption_key stored in the encryption_data object. - :rtype: bytes[] - ''' - - _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) - _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) - - if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol: - raise ValueError('Encryption version is not supported.') - - content_encryption_key = None - - # If the resolver exists, give priority to the key it finds. - if key_resolver is not None: - key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) - - _validate_not_none('key_encryption_key', key_encryption_key) - if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) - if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid(): - raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.') - # Will throw an exception if the specified algorithm is not supported. - content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, - encryption_data.wrapped_content_key.algorithm) - _validate_not_none('content_encryption_key', content_encryption_key) - - return content_encryption_key - - -def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None): - ''' - Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. - Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). - Returns the original plaintex. - - :param str message: - The ciphertext to be decrypted. - :param _EncryptionData encryption_data: - The metadata associated with this ciphertext. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted plaintext. - :rtype: str - ''' - _validate_not_none('message', message) - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) - - if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm: - raise ValueError('Specified encryption algorithm is not supported.') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) - - # decrypt data - decrypted_data = message - decryptor = cipher.decryptor() - decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) - - # unpad data - unpadder = PKCS7(128).unpadder() - decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) - - return decrypted_data - - -def encrypt_blob(blob, key_encryption_key): - ''' - Encrypts the given blob using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encryption metadata. This method should - only be used when a blob is small enough for single shot upload. Encrypting larger blobs - is done as a part of the upload_data_chunks method. - - :param bytes blob: - The blob to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. - :rtype: (str, bytes) - ''' - - _validate_not_none('blob', blob) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(blob) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - - return dumps(encryption_data), encrypted_data - - -def generate_blob_encryption_data(key_encryption_key): - ''' - Generates the encryption_metadata for the blob. - - :param bytes key_encryption_key: - The key-encryption-key used to wrap the cek associate with this blob. - :return: A tuple containing the cek and iv for this blob as well as the - serialized encryption metadata for the blob. - :rtype: (bytes, bytes, str) - ''' - encryption_data = None - content_encryption_key = None - initialization_vector = None - if key_encryption_key: - _validate_key_encryption_key_wrap(key_encryption_key) - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - encryption_data = _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - encryption_data = dumps(encryption_data) - - return content_encryption_key, initialization_vector, encryption_data - - -def decrypt_blob(require_encryption, key_encryption_key, key_resolver, - content, start_offset, end_offset, response_headers): - ''' - Decrypts the given blob contents and returns only the requested range. - - :param bool require_encryption: - Whether or not the calling blob service requires objects to be decrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :param key_resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted blob content. - :rtype: bytes - ''' - try: - encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata'])) - except: # pylint: disable=bare-except - if require_encryption: - raise ValueError( - 'Encryption required, but received data does not contain appropriate metatadata.' + \ - 'Data was either not encrypted or metadata has been lost.') - - return content - - if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256: - raise ValueError('Specified encryption algorithm is not supported.') - - blob_type = response_headers['x-ms-blob-type'] - - iv = None - unpad = False - if 'content-range' in response_headers: - content_range = response_headers['content-range'] - # Format: 'bytes x-y/size' - - # Ignore the word 'bytes' - content_range = content_range.split(' ') - - content_range = content_range[1].split('-') - content_range = content_range[1].split('/') - end_range = int(content_range[0]) - blob_size = int(content_range[1]) - - if start_offset >= 16: - iv = content[:16] - content = content[16:] - start_offset -= 16 - else: - iv = encryption_data.content_encryption_IV - - if end_range == blob_size - 1: - unpad = True - else: - unpad = True - iv = encryption_data.content_encryption_IV - - if blob_type == 'PageBlob': - unpad = False - - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) - cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) - decryptor = cipher.decryptor() - - content = decryptor.update(content) + decryptor.finalize() - if unpad: - unpadder = PKCS7(128).unpadder() - content = unpadder.update(content) + unpadder.finalize() - - return content[start_offset: len(content) - end_offset] - - -def get_blob_encryptor_and_padder(cek, iv, should_pad): - encryptor = None - padder = None - - if cek is not None and iv is not None: - cipher = _generate_AES_CBC_cipher(cek, iv) - encryptor = cipher.encryptor() - padder = PKCS7(128).padder() if should_pad else None - - return encryptor, padder - - -def encrypt_queue_message(message, key_encryption_key): - ''' - Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encrypted message and the encryption metadata. - - :param object message: - The plain text messge to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A json-formatted string containing the encrypted message and the encryption metadata. - :rtype: str - ''' - - _validate_not_none('message', message) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = os.urandom(32) - initialization_vector = os.urandom(16) - - # Queue encoding functions all return unicode strings, and encryption should - # operate on binary strings. - message = message.encode('utf-8') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(message) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - - # Build the dictionary structure. - queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data), - 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector)} - - return dumps(queue_message) - - -def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver): - ''' - Returns the decrypted message contents from an EncryptedQueueMessage. - If no encryption metadata is present, will return the unaltered message. - :param str message: - The JSON formatted QueueEncryptedMessage contents with all associated metadata. - :param bool require_encryption: - If set, will enforce that the retrieved messages are encrypted and decrypt them. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The plain text message from the queue message. - :rtype: str - ''' - - try: - message = loads(message) - - encryption_data = _dict_to_encryption_data(message['EncryptionData']) - decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents']) - except (KeyError, ValueError): - # Message was not json formatted and so was not encrypted - # or the user provided a json formatted message. - if require_encryption: - raise ValueError('Message was not encrypted.') - - return message - try: - return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=response, - error=error) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/models.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/models.py deleted file mode 100644 index 27cd236..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/models.py +++ /dev/null @@ -1,468 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-instance-attributes - -from enum import Enum - - -def get_enum_value(value): - if value is None or value in ["None", ""]: - return None - try: - return value.value - except AttributeError: - return value - - -class StorageErrorCode(str, Enum): - - # Generic storage values - account_already_exists = "AccountAlreadyExists" - account_being_created = "AccountBeingCreated" - account_is_disabled = "AccountIsDisabled" - authentication_failed = "AuthenticationFailed" - authorization_failure = "AuthorizationFailure" - no_authentication_information = "NoAuthenticationInformation" - condition_headers_not_supported = "ConditionHeadersNotSupported" - condition_not_met = "ConditionNotMet" - empty_metadata_key = "EmptyMetadataKey" - insufficient_account_permissions = "InsufficientAccountPermissions" - internal_error = "InternalError" - invalid_authentication_info = "InvalidAuthenticationInfo" - invalid_header_value = "InvalidHeaderValue" - invalid_http_verb = "InvalidHttpVerb" - invalid_input = "InvalidInput" - invalid_md5 = "InvalidMd5" - invalid_metadata = "InvalidMetadata" - invalid_query_parameter_value = "InvalidQueryParameterValue" - invalid_range = "InvalidRange" - invalid_resource_name = "InvalidResourceName" - invalid_uri = "InvalidUri" - invalid_xml_document = "InvalidXmlDocument" - invalid_xml_node_value = "InvalidXmlNodeValue" - md5_mismatch = "Md5Mismatch" - metadata_too_large = "MetadataTooLarge" - missing_content_length_header = "MissingContentLengthHeader" - missing_required_query_parameter = "MissingRequiredQueryParameter" - missing_required_header = "MissingRequiredHeader" - missing_required_xml_node = "MissingRequiredXmlNode" - multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" - operation_timed_out = "OperationTimedOut" - out_of_range_input = "OutOfRangeInput" - out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" - request_body_too_large = "RequestBodyTooLarge" - resource_type_mismatch = "ResourceTypeMismatch" - request_url_failed_to_parse = "RequestUrlFailedToParse" - resource_already_exists = "ResourceAlreadyExists" - resource_not_found = "ResourceNotFound" - server_busy = "ServerBusy" - unsupported_header = "UnsupportedHeader" - unsupported_xml_node = "UnsupportedXmlNode" - unsupported_query_parameter = "UnsupportedQueryParameter" - unsupported_http_verb = "UnsupportedHttpVerb" - - # Blob values - append_position_condition_not_met = "AppendPositionConditionNotMet" - blob_already_exists = "BlobAlreadyExists" - blob_not_found = "BlobNotFound" - blob_overwritten = "BlobOverwritten" - blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" - block_count_exceeds_limit = "BlockCountExceedsLimit" - block_list_too_long = "BlockListTooLong" - cannot_change_to_lower_tier = "CannotChangeToLowerTier" - cannot_verify_copy_source = "CannotVerifyCopySource" - container_already_exists = "ContainerAlreadyExists" - container_being_deleted = "ContainerBeingDeleted" - container_disabled = "ContainerDisabled" - container_not_found = "ContainerNotFound" - content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" - copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" - copy_id_mismatch = "CopyIdMismatch" - feature_version_mismatch = "FeatureVersionMismatch" - incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" - incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" - incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" - infinite_lease_duration_required = "InfiniteLeaseDurationRequired" - invalid_blob_or_block = "InvalidBlobOrBlock" - invalid_blob_tier = "InvalidBlobTier" - invalid_blob_type = "InvalidBlobType" - invalid_block_id = "InvalidBlockId" - invalid_block_list = "InvalidBlockList" - invalid_operation = "InvalidOperation" - invalid_page_range = "InvalidPageRange" - invalid_source_blob_type = "InvalidSourceBlobType" - invalid_source_blob_url = "InvalidSourceBlobUrl" - invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" - lease_already_present = "LeaseAlreadyPresent" - lease_already_broken = "LeaseAlreadyBroken" - lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" - lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" - lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" - lease_id_missing = "LeaseIdMissing" - lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" - lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" - lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" - lease_lost = "LeaseLost" - lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" - lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" - lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" - max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" - no_pending_copy_operation = "NoPendingCopyOperation" - operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" - pending_copy_operation = "PendingCopyOperation" - previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" - previous_snapshot_not_found = "PreviousSnapshotNotFound" - previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" - sequence_number_condition_not_met = "SequenceNumberConditionNotMet" - sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" - snapshot_count_exceeded = "SnapshotCountExceeded" - snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" - snapshots_present = "SnapshotsPresent" - source_condition_not_met = "SourceConditionNotMet" - system_in_use = "SystemInUse" - target_condition_not_met = "TargetConditionNotMet" - unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" - blob_being_rehydrated = "BlobBeingRehydrated" - blob_archived = "BlobArchived" - blob_not_archived = "BlobNotArchived" - - # Queue values - invalid_marker = "InvalidMarker" - message_not_found = "MessageNotFound" - message_too_large = "MessageTooLarge" - pop_receipt_mismatch = "PopReceiptMismatch" - queue_already_exists = "QueueAlreadyExists" - queue_being_deleted = "QueueBeingDeleted" - queue_disabled = "QueueDisabled" - queue_not_empty = "QueueNotEmpty" - queue_not_found = "QueueNotFound" - - # File values - cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" - client_cache_flush_delay = "ClientCacheFlushDelay" - delete_pending = "DeletePending" - directory_not_empty = "DirectoryNotEmpty" - file_lock_conflict = "FileLockConflict" - invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" - parent_not_found = "ParentNotFound" - read_only_attribute = "ReadOnlyAttribute" - share_already_exists = "ShareAlreadyExists" - share_being_deleted = "ShareBeingDeleted" - share_disabled = "ShareDisabled" - share_not_found = "ShareNotFound" - sharing_violation = "SharingViolation" - share_snapshot_in_progress = "ShareSnapshotInProgress" - share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" - share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" - share_has_snapshots = "ShareHasSnapshots" - container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" - - # DataLake values - content_length_must_be_zero = 'ContentLengthMustBeZero' - path_already_exists = 'PathAlreadyExists' - invalid_flush_position = 'InvalidFlushPosition' - invalid_property_name = 'InvalidPropertyName' - invalid_source_uri = 'InvalidSourceUri' - unsupported_rest_version = 'UnsupportedRestVersion' - file_system_not_found = 'FilesystemNotFound' - path_not_found = 'PathNotFound' - rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound' - source_path_not_found = 'SourcePathNotFound' - destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted' - file_system_already_exists = 'FilesystemAlreadyExists' - file_system_being_deleted = 'FilesystemBeingDeleted' - invalid_destination_path = 'InvalidDestinationPath' - invalid_rename_source_path = 'InvalidRenameSourcePath' - invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType' - lease_is_already_broken = 'LeaseIsAlreadyBroken' - lease_name_mismatch = 'LeaseNameMismatch' - path_conflict = 'PathConflict' - source_path_is_being_deleted = 'SourcePathIsBeingDeleted' - - -class DictMixin(object): - - def __setitem__(self, key, item): - self.__dict__[key] = item - - def __getitem__(self, key): - return self.__dict__[key] - - def __repr__(self): - return str(self) - - def __len__(self): - return len(self.keys()) - - def __delitem__(self, key): - self.__dict__[key] = None - - def __eq__(self, other): - """Compare objects by comparing all attributes.""" - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - def __ne__(self, other): - """Compare objects by comparing all attributes.""" - return not self.__eq__(other) - - def __str__(self): - return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) - - def has_key(self, k): - return k in self.__dict__ - - def update(self, *args, **kwargs): - return self.__dict__.update(*args, **kwargs) - - def keys(self): - return [k for k in self.__dict__ if not k.startswith('_')] - - def values(self): - return [v for k, v in self.__dict__.items() if not k.startswith('_')] - - def items(self): - return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] - - def get(self, key, default=None): - if key in self.__dict__: - return self.__dict__[key] - return default - - -class LocationMode(object): - """ - Specifies the location the request should be sent to. This mode only applies - for RA-GRS accounts which allow secondary read access. All other account types - must use PRIMARY. - """ - - PRIMARY = 'primary' #: Requests should be sent to the primary location. - SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. - - -class ResourceTypes(object): - """ - Specifies the resource types that are accessible with the account SAS. - - :param bool service: - Access to service-level APIs (e.g., Get/Set Service Properties, - Get Service Stats, List Containers/Queues/Shares) - :param bool container: - Access to container-level APIs (e.g., Create/Delete Container, - Create/Delete Queue, Create/Delete Share, - List Blobs/Files and Directories) - :param bool object: - Access to object-level APIs for blobs, queue messages, and - files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) - """ - - def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin - self.service = service - self.container = container - self.object = object - self._str = (('s' if self.service else '') + - ('c' if self.container else '') + - ('o' if self.object else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create a ResourceTypes from a string. - - To specify service, container, or object you need only to - include the first letter of the word in the string. E.g. service and container, - you would provide a string "sc". - - :param str string: Specify service, container, or object in - in the string with the first letter of the word. - :return: A ResourceTypes object - :rtype: ~azure.storage.fileshare.ResourceTypes - """ - res_service = 's' in string - res_container = 'c' in string - res_object = 'o' in string - - parsed = cls(res_service, res_container, res_object) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class AccountSasPermissions(object): - """ - :class:`~ResourceTypes` class to be used with generate_account_sas - function and for the AccessPolicies used with set_*_acl. There are two types of - SAS which may be used to grant resource access. One is to grant access to a - specific resource (resource-specific). Another is to grant access to the - entire service for a specific account and allow certain operations based on - perms found here. - - :param bool read: - Valid for all signed resources types (Service, Container, and Object). - Permits read permissions to the specified resource type. - :param bool write: - Valid for all signed resources types (Service, Container, and Object). - Permits write permissions to the specified resource type. - :param bool delete: - Valid for Container and Object resource types, except for queue messages. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool list: - Valid for Service and Container resource types only. - :param bool add: - Valid for the following Object resource types only: queue messages, and append blobs. - :param bool create: - Valid for the following Object resource types only: blobs and files. - Users can create new blobs or files, but may not overwrite existing - blobs or files. - :param bool update: - Valid for the following Object resource types only: queue messages. - :param bool process: - Valid for the following Object resource type only: queue messages. - :keyword bool tag: - To enable set or get tags on the blobs in the container. - :keyword bool filter_by_tags: - To enable get blobs by tags, this should be used together with list permission. - """ - def __init__(self, read=False, write=False, delete=False, - list=False, # pylint: disable=redefined-builtin - add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): - self.read = read - self.write = write - self.delete = delete - self.delete_previous_version = delete_previous_version - self.list = list - self.add = add - self.create = create - self.update = update - self.process = process - self.tag = kwargs.pop('tag', False) - self.filter_by_tags = kwargs.pop('filter_by_tags', False) - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('l' if self.list else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('u' if self.update else '') + - ('p' if self.process else '') + - ('f' if self.filter_by_tags else '') + - ('t' if self.tag else '') - ) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create AccountSasPermissions from a string. - - To specify read, write, delete, etc. permissions you need only to - include the first letter of the word in the string. E.g. for read and write - permissions you would provide a string "rw". - - :param str permission: Specify permissions in - the string with the first letter of the word. - :return: An AccountSasPermissions object - :rtype: ~azure.storage.fileshare.AccountSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_delete_previous_version = 'x' in permission - p_list = 'l' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_update = 'u' in permission - p_process = 'p' in permission - p_tag = 't' in permission - p_filter_by_tags = 'f' in permission - parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, - list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, - filter_by_tags=p_filter_by_tags) - - return parsed - - -class Services(object): - """Specifies the services accessible with the account SAS. - - :param bool blob: - Access for the `~azure.storage.blob.BlobServiceClient` - :param bool queue: - Access for the `~azure.storage.queue.QueueServiceClient` - :param bool fileshare: - Access for the `~azure.storage.fileshare.ShareServiceClient` - """ - - def __init__(self, blob=False, queue=False, fileshare=False): - self.blob = blob - self.queue = queue - self.fileshare = fileshare - self._str = (('b' if self.blob else '') + - ('q' if self.queue else '') + - ('f' if self.fileshare else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create Services from a string. - - To specify blob, queue, or file you need only to - include the first letter of the word in the string. E.g. for blob and queue - you would provide a string "bq". - - :param str string: Specify blob, queue, or file in - in the string with the first letter of the word. - :return: A Services object - :rtype: ~azure.storage.fileshare.Services - """ - res_blob = 'b' in string - res_queue = 'q' in string - res_file = 'f' in string - - parsed = cls(res_blob, res_queue, res_file) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class UserDelegationKey(object): - """ - Represents a user delegation key, provided to the user by Azure Storage - based on their Azure Active Directory access token. - - The fields are saved as simple strings since the user does not have to interact with this object; - to generate an identify SAS, the user can simply pass it to the right API. - - :ivar str signed_oid: - Object ID of this token. - :ivar str signed_tid: - Tenant ID of the tenant that issued this token. - :ivar str signed_start: - The datetime this token becomes valid. - :ivar str signed_expiry: - The datetime this token expires. - :ivar str signed_service: - What service this key is valid for. - :ivar str signed_version: - The version identifier of the REST service that created this token. - :ivar str value: - The user delegation key. - """ - def __init__(self): - self.signed_oid = None - self.signed_tid = None - self.signed_start = None - self.signed_expiry = None - self.signed_service = None - self.signed_version = None - self.value = None diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/parser.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/parser.py deleted file mode 100644 index c6feba8..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/parser.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys - -if sys.version_info < (3,): - def _str(value): - if isinstance(value, unicode): # pylint: disable=undefined-variable - return value.encode('utf-8') - - return str(value) -else: - _str = str - - -def _to_utc_datetime(value): - return value.strftime('%Y-%m-%dT%H:%M:%SZ') diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/policies.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/policies.py deleted file mode 100644 index c9bc798..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/policies.py +++ /dev/null @@ -1,610 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import re -import random -from time import time -from io import SEEK_SET, UnsupportedOperation -import logging -import uuid -import types -from typing import Any, TYPE_CHECKING -from wsgiref.handlers import format_date_time -try: - from urllib.parse import ( - urlparse, - parse_qsl, - urlunparse, - urlencode, - ) -except ImportError: - from urllib import urlencode # type: ignore - from urlparse import ( # type: ignore - urlparse, - parse_qsl, - urlunparse, - ) - -from azure.core.pipeline.policies import ( - HeadersPolicy, - SansIOHTTPPolicy, - NetworkTraceLoggingPolicy, - HTTPPolicy, - RequestHistory -) -from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError - -from .models import LocationMode - -try: - _unicode_type = unicode # type: ignore -except NameError: - _unicode_type = str - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -def encode_base64(data): - if isinstance(data, _unicode_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def is_exhausted(settings): - """Are we out of retries?""" - retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) - retry_counts = list(filter(None, retry_counts)) - if not retry_counts: - return False - return min(retry_counts) < 0 - - -def retry_hook(settings, **kwargs): - if settings['hook']: - settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) - - -def is_retry(response, mode): - """Is this method/status code retryable? (Based on whitelists and control - variables such as the number of total retries to allow, whether to - respect the Retry-After header, whether this header is present, and - whether the returned status code is on the list of status codes to - be retried upon on the presence of the aforementioned header) - """ - status = response.http_response.status_code - if 300 <= status < 500: - # An exception occured, but in most cases it was expected. Examples could - # include a 309 Conflict or 412 Precondition Failed. - if status == 404 and mode == LocationMode.SECONDARY: - # Response code 404 should be retried if secondary was used. - return True - if status == 408: - # Response code 408 is a timeout and should be retried. - return True - return False - if status >= 500: - # Response codes above 500 with the exception of 501 Not Implemented and - # 505 Version Not Supported indicate a server issue and should be retried. - if status in [501, 505]: - return False - return True - return False - - -def urljoin(base_url, stub_url): - parsed = urlparse(base_url) - parsed = parsed._replace(path=parsed.path + '/' + stub_url) - return parsed.geturl() - - -class QueueMessagePolicy(SansIOHTTPPolicy): - - def on_request(self, request): - message_id = request.context.options.pop('queue_message_id', None) - if message_id: - request.http_request.url = urljoin( - request.http_request.url, - message_id) - - -class StorageHeadersPolicy(HeadersPolicy): - request_id_header_name = 'x-ms-client-request-id' - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - super(StorageHeadersPolicy, self).on_request(request) - current_time = format_date_time(time()) - request.http_request.headers['x-ms-date'] = current_time - - custom_id = request.context.options.pop('client_request_id', None) - request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) - - # def on_response(self, request, response): - # # raise exception if the echoed client request id from the service is not identical to the one we sent - # if self.request_id_header_name in response.http_response.headers: - - # client_request_id = request.http_request.headers.get(self.request_id_header_name) - - # if response.http_response.headers[self.request_id_header_name] != client_request_id: - # raise AzureError( - # "Echoed client request ID: {} does not match sent client request ID: {}. " - # "Service request ID: {}".format( - # response.http_response.headers[self.request_id_header_name], client_request_id, - # response.http_response.headers['x-ms-request-id']), - # response=response.http_response - # ) - - -class StorageHosts(SansIOHTTPPolicy): - - def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument - self.hosts = hosts - super(StorageHosts, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - request.context.options['hosts'] = self.hosts - parsed_url = urlparse(request.http_request.url) - - # Detect what location mode we're currently requesting with - location_mode = LocationMode.PRIMARY - for key, value in self.hosts.items(): - if parsed_url.netloc == value: - location_mode = key - - # See if a specific location mode has been specified, and if so, redirect - use_location = request.context.options.pop('use_location', None) - if use_location: - # Lock retries to the specific location - request.context.options['retry_to_secondary'] = False - if use_location not in self.hosts: - raise ValueError("Attempting to use undefined host location {}".format(use_location)) - if use_location != location_mode: - # Update request URL to use the specified location - updated = parsed_url._replace(netloc=self.hosts[use_location]) - request.http_request.url = updated.geturl() - location_mode = use_location - - request.context.options['location_mode'] = location_mode - - -class StorageLoggingPolicy(NetworkTraceLoggingPolicy): - """A policy that logs HTTP request and response to the DEBUG logger. - - This accepts both global configuration, and per-request level with "enable_http_logger" - """ - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - http_request = request.http_request - options = request.context.options - if options.pop("logging_enable", self.enable_http_logger): - request.context["logging_enable"] = True - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - log_url = http_request.url - query_params = http_request.query - if 'sig' in query_params: - log_url = log_url.replace(query_params['sig'], "sig=*****") - _LOGGER.debug("Request URL: %r", log_url) - _LOGGER.debug("Request method: %r", http_request.method) - _LOGGER.debug("Request headers:") - for header, value in http_request.headers.items(): - if header.lower() == 'authorization': - value = '*****' - elif header.lower() == 'x-ms-copy-source' and 'sig' in value: - # take the url apart and scrub away the signed signature - scheme, netloc, path, params, query, fragment = urlparse(value) - parsed_qs = dict(parse_qsl(query)) - parsed_qs['sig'] = '*****' - - # the SAS needs to be put back together - value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) - - _LOGGER.debug(" %r: %r", header, value) - _LOGGER.debug("Request body:") - - # We don't want to log the binary data of a file upload. - if isinstance(http_request.body, types.GeneratorType): - _LOGGER.debug("File upload") - else: - _LOGGER.debug(str(http_request.body)) - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log request: %r", err) - - def on_response(self, request, response): - # type: (PipelineRequest, PipelineResponse, Any) -> None - if response.context.pop("logging_enable", self.enable_http_logger): - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - _LOGGER.debug("Response status: %r", response.http_response.status_code) - _LOGGER.debug("Response headers:") - for res_header, value in response.http_response.headers.items(): - _LOGGER.debug(" %r: %r", res_header, value) - - # We don't want to log binary data if the response is a file. - _LOGGER.debug("Response content:") - pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) - header = response.http_response.headers.get('content-disposition') - - if header and pattern.match(header): - filename = header.partition('=')[2] - _LOGGER.debug("File attachments: %s", filename) - elif response.http_response.headers.get("content-type", "").endswith("octet-stream"): - _LOGGER.debug("Body contains binary data.") - elif response.http_response.headers.get("content-type", "").startswith("image"): - _LOGGER.debug("Body contains image data.") - else: - if response.context.options.get('stream', False): - _LOGGER.debug("Body is streamable") - else: - _LOGGER.debug(response.http_response.text()) - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log response: %s", repr(err)) - - -class StorageRequestHook(SansIOHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._request_callback = kwargs.get('raw_request_hook') - super(StorageRequestHook, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, **Any) -> PipelineResponse - request_callback = request.context.options.pop('raw_request_hook', self._request_callback) - if request_callback: - request_callback(request) - - -class StorageResponseHook(HTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(StorageResponseHook, self).__init__() - - def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = self.next.send(request) - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - response_callback(response) - request.context['response_callback'] = response_callback - return response - - -class StorageContentValidation(SansIOHTTPPolicy): - """A simple policy that sends the given headers - with the request. - - This will overwrite any headers already defined in the request. - """ - header_name = 'Content-MD5' - - def __init__(self, **kwargs): # pylint: disable=unused-argument - super(StorageContentValidation, self).__init__() - - @staticmethod - def get_content_md5(data): - md5 = hashlib.md5() # nosec - if isinstance(data, bytes): - md5.update(data) - elif hasattr(data, 'read'): - pos = 0 - try: - pos = data.tell() - except: # pylint: disable=bare-except - pass - for chunk in iter(lambda: data.read(4096), b""): - md5.update(chunk) - try: - data.seek(pos, SEEK_SET) - except (AttributeError, IOError): - raise ValueError("Data should be bytes or a seekable file-like object.") - else: - raise ValueError("Data should be bytes or a seekable file-like object.") - - return md5.digest() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - validate_content = request.context.options.pop('validate_content', False) - if validate_content and request.http_request.method != 'GET': - computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) - request.http_request.headers[self.header_name] = computed_md5 - request.context['validate_content_md5'] = computed_md5 - request.context['validate_content'] = validate_content - - def on_response(self, request, response): - if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): - computed_md5 = request.context.get('validate_content_md5') or \ - encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) - if response.http_response.headers['content-md5'] != computed_md5: - raise AzureError( - 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format( - response.http_response.headers['content-md5'], computed_md5), - response=response.http_response - ) - - -class StorageRetryPolicy(HTTPPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - def __init__(self, **kwargs): - self.total_retries = kwargs.pop('retry_total', 10) - self.connect_retries = kwargs.pop('retry_connect', 3) - self.read_retries = kwargs.pop('retry_read', 3) - self.status_retries = kwargs.pop('retry_status', 3) - self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) - super(StorageRetryPolicy, self).__init__() - - def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use - """ - A function which sets the next host location on the request, if applicable. - - :param ~azure.storage.models.RetryContext context: - The retry context containing the previous host location and the request - to evaluate and possibly modify. - """ - if settings['hosts'] and all(settings['hosts'].values()): - url = urlparse(request.url) - # If there's more than one possible location, retry to the alternative - if settings['mode'] == LocationMode.PRIMARY: - settings['mode'] = LocationMode.SECONDARY - else: - settings['mode'] = LocationMode.PRIMARY - updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) - request.url = updated.geturl() - - def configure_retries(self, request): # pylint: disable=no-self-use - body_position = None - if hasattr(request.http_request.body, 'read'): - try: - body_position = request.http_request.body.tell() - except (AttributeError, UnsupportedOperation): - # if body position cannot be obtained, then retries will not work - pass - options = request.context.options - return { - 'total': options.pop("retry_total", self.total_retries), - 'connect': options.pop("retry_connect", self.connect_retries), - 'read': options.pop("retry_read", self.read_retries), - 'status': options.pop("retry_status", self.status_retries), - 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), - 'mode': options.pop("location_mode", LocationMode.PRIMARY), - 'hosts': options.pop("hosts", None), - 'hook': options.pop("retry_hook", None), - 'body_position': body_position, - 'count': 0, - 'history': [] - } - - def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use - """ Formula for computing the current backoff. - Should be calculated by child class. - - :rtype: float - """ - return 0 - - def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - transport.sleep(backoff) - - def increment(self, settings, request, response=None, error=None): - """Increment the retry counters. - - :param response: A pipeline response object. - :param error: An error encountered during the request, or - None if the response was received successfully. - - :return: Whether the retry attempts are exhausted. - """ - settings['total'] -= 1 - - if error and isinstance(error, ServiceRequestError): - # Errors when we're fairly sure that the server did not receive the - # request, so it should be safe to retry. - settings['connect'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - elif error and isinstance(error, ServiceResponseError): - # Errors that occur after the request has been started, so we should - # assume that the server began processing it. - settings['read'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - else: - # Incrementing because of a server error like a 500 in - # status_forcelist and a the given method is in the whitelist - if response: - settings['status'] -= 1 - settings['history'].append(RequestHistory(request, http_response=response)) - - if not is_exhausted(settings): - if request.method not in ['PUT'] and settings['retry_secondary']: - self._set_next_host_location(settings, request) - - # rewind the request body if it is a stream - if request.body and hasattr(request.body, 'read'): - # no position was saved, then retry would not work - if settings['body_position'] is None: - return False - try: - # attempt to rewind the body to the initial position - request.body.seek(settings['body_position'], SEEK_SET) - except (UnsupportedOperation, ValueError): - # if body is not seekable, then retry would not work - return False - settings['count'] += 1 - return True - return False - - def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(StorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(StorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/policies_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/policies_async.py deleted file mode 100644 index e0926b8..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/policies_async.py +++ /dev/null @@ -1,220 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -import asyncio -import random -import logging -from typing import Any, TYPE_CHECKING - -from azure.core.pipeline.policies import AsyncHTTPPolicy -from azure.core.exceptions import AzureError - -from .policies import is_retry, StorageRetryPolicy - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -async def retry_hook(settings, **kwargs): - if settings['hook']: - if asyncio.iscoroutine(settings['hook']): - await settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - else: - settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - - -class AsyncStorageResponseHook(AsyncHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(AsyncStorageResponseHook, self).__init__() - - async def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = await self.next.send(request) - await response.http_response.load_body() - - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - if asyncio.iscoroutine(response_callback): - await response_callback(response) - else: - response_callback(response) - request.context['response_callback'] = response_callback - return response - -class AsyncStorageRetryPolicy(StorageRetryPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - async def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - await transport.sleep(backoff) - - async def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = await self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - await self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - await self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(AsyncStorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(AsyncStorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/request_handlers.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/request_handlers.py deleted file mode 100644 index 4f15b65..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/request_handlers.py +++ /dev/null @@ -1,147 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) - -import logging -from os import fstat -from io import (SEEK_END, SEEK_SET, UnsupportedOperation) - -import isodate - -from azure.core.exceptions import raise_with_traceback - - -_LOGGER = logging.getLogger(__name__) - - -def serialize_iso(attr): - """Serialize Datetime object into ISO-8601 formatted string. - - :param Datetime attr: Object to be serialized. - :rtype: str - :raises: ValueError if format invalid. - """ - if not attr: - return None - if isinstance(attr, str): - attr = isodate.parse_datetime(attr) - try: - utc = attr.utctimetuple() - if utc.tm_year > 9999 or utc.tm_year < 1: - raise OverflowError("Hit max or min date") - - date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( - utc.tm_year, utc.tm_mon, utc.tm_mday, - utc.tm_hour, utc.tm_min, utc.tm_sec) - return date + 'Z' - except (ValueError, OverflowError) as err: - msg = "Unable to serialize datetime object." - raise_with_traceback(ValueError, msg, err) - except AttributeError as err: - msg = "ISO-8601 object must be valid Datetime object." - raise_with_traceback(TypeError, msg, err) - - -def get_length(data): - length = None - # Check if object implements the __len__ method, covers most input cases such as bytearray. - try: - length = len(data) - except: # pylint: disable=bare-except - pass - - if not length: - # Check if the stream is a file-like stream object. - # If so, calculate the size using the file descriptor. - try: - fileno = data.fileno() - except (AttributeError, UnsupportedOperation): - pass - else: - try: - return fstat(fileno).st_size - except OSError: - # Not a valid fileno, may be possible requests returned - # a socket number? - pass - - # If the stream is seekable and tell() is implemented, calculate the stream size. - try: - current_position = data.tell() - data.seek(0, SEEK_END) - length = data.tell() - current_position - data.seek(current_position, SEEK_SET) - except (AttributeError, UnsupportedOperation): - pass - - return length - - -def read_length(data): - try: - if hasattr(data, 'read'): - read_data = b'' - for chunk in iter(lambda: data.read(4096), b""): - read_data += chunk - return len(read_data), read_data - if hasattr(data, '__iter__'): - read_data = b'' - for chunk in data: - read_data += chunk - return len(read_data), read_data - except: # pylint: disable=bare-except - pass - raise ValueError("Unable to calculate content length, please specify.") - - -def validate_and_format_range_headers( - start_range, end_range, start_range_required=True, - end_range_required=True, check_content_md5=False, align_to_page=False): - # If end range is provided, start range must be provided - if (start_range_required or end_range is not None) and start_range is None: - raise ValueError("start_range value cannot be None.") - if end_range_required and end_range is None: - raise ValueError("end_range value cannot be None.") - - # Page ranges must be 512 aligned - if align_to_page: - if start_range is not None and start_range % 512 != 0: - raise ValueError("Invalid page blob start_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(start_range)) - if end_range is not None and end_range % 512 != 511: - raise ValueError("Invalid page blob end_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(end_range)) - - # Format based on whether end_range is present - range_header = None - if end_range is not None: - range_header = 'bytes={0}-{1}'.format(start_range, end_range) - elif start_range is not None: - range_header = "bytes={0}-".format(start_range) - - # Content MD5 can only be provided for a complete range less than 4MB in size - range_validation = None - if check_content_md5: - if start_range is None or end_range is None: - raise ValueError("Both start and end range requied for MD5 content validation.") - if end_range - start_range > 4 * 1024 * 1024: - raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") - range_validation = 'true' - - return range_header, range_validation - - -def add_metadata_headers(metadata=None): - # type: (Optional[Dict[str, str]]) -> Dict[str, str] - headers = {} - if metadata: - for key, value in metadata.items(): - headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value - return headers diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/response_handlers.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/response_handlers.py deleted file mode 100644 index ac526e5..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/response_handlers.py +++ /dev/null @@ -1,159 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging - -from azure.core.pipeline.policies import ContentDecodePolicy -from azure.core.exceptions import ( - HttpResponseError, - ResourceNotFoundError, - ResourceModifiedError, - ResourceExistsError, - ClientAuthenticationError, - DecodeError) - -from .parser import _to_utc_datetime -from .models import StorageErrorCode, UserDelegationKey, get_enum_value - - -if TYPE_CHECKING: - from datetime import datetime - from azure.core.exceptions import AzureError - - -_LOGGER = logging.getLogger(__name__) - - -class PartialBatchErrorException(HttpResponseError): - """There is a partial failure in batch operations. - - :param str message: The message of the exception. - :param response: Server response to be deserialized. - :param list parts: A list of the parts in multipart response. - """ - - def __init__(self, message, response, parts): - self.parts = parts - super(PartialBatchErrorException, self).__init__(message=message, response=response) - - -def parse_length_from_content_range(content_range): - ''' - Parses the blob length from the content range header: bytes 1-3/65537 - ''' - if content_range is None: - return None - - # First, split in space and take the second half: '1-3/65537' - # Next, split on slash and take the second half: '65537' - # Finally, convert to an int: 65537 - return int(content_range.split(' ', 1)[1].split('/', 1)[1]) - - -def normalize_headers(headers): - normalized = {} - for key, value in headers.items(): - if key.startswith('x-ms-'): - key = key[5:] - normalized[key.lower().replace('-', '_')] = get_enum_value(value) - return normalized - - -def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument - raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} - return {k[10:]: v for k, v in raw_metadata.items()} - - -def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers) - - -def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers), deserialized - - -def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return response.location_mode, deserialized - - -def process_storage_error(storage_error): - raise_error = HttpResponseError - error_code = storage_error.response.headers.get('x-ms-error-code') - error_message = storage_error.message - additional_data = {} - try: - error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) - if error_body: - for info in error_body.iter(): - if info.tag.lower() == 'code': - error_code = info.text - elif info.tag.lower() == 'message': - error_message = info.text - else: - additional_data[info.tag] = info.text - except DecodeError: - pass - - try: - if error_code: - error_code = StorageErrorCode(error_code) - if error_code in [StorageErrorCode.condition_not_met, - StorageErrorCode.blob_overwritten]: - raise_error = ResourceModifiedError - if error_code in [StorageErrorCode.invalid_authentication_info, - StorageErrorCode.authentication_failed]: - raise_error = ClientAuthenticationError - if error_code in [StorageErrorCode.resource_not_found, - StorageErrorCode.cannot_verify_copy_source, - StorageErrorCode.blob_not_found, - StorageErrorCode.queue_not_found, - StorageErrorCode.container_not_found, - StorageErrorCode.parent_not_found, - StorageErrorCode.share_not_found]: - raise_error = ResourceNotFoundError - if error_code in [StorageErrorCode.account_already_exists, - StorageErrorCode.account_being_created, - StorageErrorCode.resource_already_exists, - StorageErrorCode.resource_type_mismatch, - StorageErrorCode.blob_already_exists, - StorageErrorCode.queue_already_exists, - StorageErrorCode.container_already_exists, - StorageErrorCode.container_being_deleted, - StorageErrorCode.queue_being_deleted, - StorageErrorCode.share_already_exists, - StorageErrorCode.share_being_deleted]: - raise_error = ResourceExistsError - except ValueError: - # Got an unknown error code - pass - - try: - error_message += "\nErrorCode:{}".format(error_code.value) - except AttributeError: - error_message += "\nErrorCode:{}".format(error_code) - for name, info in additional_data.items(): - error_message += "\n{}:{}".format(name, info) - - error = raise_error(message=error_message, response=storage_error.response) - error.error_code = error_code - error.additional_info = additional_data - raise error - - -def parse_to_internal_user_delegation_key(service_user_delegation_key): - internal_user_delegation_key = UserDelegationKey() - internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid - internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid - internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) - internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) - internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service - internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version - internal_user_delegation_key.value = service_user_delegation_key.value - return internal_user_delegation_key diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/shared_access_signature.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/shared_access_signature.py deleted file mode 100644 index 07aad5f..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/shared_access_signature.py +++ /dev/null @@ -1,220 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from datetime import date - -from .parser import _str, _to_utc_datetime -from .constants import X_MS_VERSION -from . import sign_string, url_quote - - -class QueryStringConstants(object): - SIGNED_SIGNATURE = 'sig' - SIGNED_PERMISSION = 'sp' - SIGNED_START = 'st' - SIGNED_EXPIRY = 'se' - SIGNED_RESOURCE = 'sr' - SIGNED_IDENTIFIER = 'si' - SIGNED_IP = 'sip' - SIGNED_PROTOCOL = 'spr' - SIGNED_VERSION = 'sv' - SIGNED_CACHE_CONTROL = 'rscc' - SIGNED_CONTENT_DISPOSITION = 'rscd' - SIGNED_CONTENT_ENCODING = 'rsce' - SIGNED_CONTENT_LANGUAGE = 'rscl' - SIGNED_CONTENT_TYPE = 'rsct' - START_PK = 'spk' - START_RK = 'srk' - END_PK = 'epk' - END_RK = 'erk' - SIGNED_RESOURCE_TYPES = 'srt' - SIGNED_SERVICES = 'ss' - SIGNED_OID = 'skoid' - SIGNED_TID = 'sktid' - SIGNED_KEY_START = 'skt' - SIGNED_KEY_EXPIRY = 'ske' - SIGNED_KEY_SERVICE = 'sks' - SIGNED_KEY_VERSION = 'skv' - - # for ADLS - SIGNED_AUTHORIZED_OID = 'saoid' - SIGNED_UNAUTHORIZED_OID = 'suoid' - SIGNED_CORRELATION_ID = 'scid' - SIGNED_DIRECTORY_DEPTH = 'sdd' - - @staticmethod - def to_list(): - return [ - QueryStringConstants.SIGNED_SIGNATURE, - QueryStringConstants.SIGNED_PERMISSION, - QueryStringConstants.SIGNED_START, - QueryStringConstants.SIGNED_EXPIRY, - QueryStringConstants.SIGNED_RESOURCE, - QueryStringConstants.SIGNED_IDENTIFIER, - QueryStringConstants.SIGNED_IP, - QueryStringConstants.SIGNED_PROTOCOL, - QueryStringConstants.SIGNED_VERSION, - QueryStringConstants.SIGNED_CACHE_CONTROL, - QueryStringConstants.SIGNED_CONTENT_DISPOSITION, - QueryStringConstants.SIGNED_CONTENT_ENCODING, - QueryStringConstants.SIGNED_CONTENT_LANGUAGE, - QueryStringConstants.SIGNED_CONTENT_TYPE, - QueryStringConstants.START_PK, - QueryStringConstants.START_RK, - QueryStringConstants.END_PK, - QueryStringConstants.END_RK, - QueryStringConstants.SIGNED_RESOURCE_TYPES, - QueryStringConstants.SIGNED_SERVICES, - QueryStringConstants.SIGNED_OID, - QueryStringConstants.SIGNED_TID, - QueryStringConstants.SIGNED_KEY_START, - QueryStringConstants.SIGNED_KEY_EXPIRY, - QueryStringConstants.SIGNED_KEY_SERVICE, - QueryStringConstants.SIGNED_KEY_VERSION, - # for ADLS - QueryStringConstants.SIGNED_AUTHORIZED_OID, - QueryStringConstants.SIGNED_UNAUTHORIZED_OID, - QueryStringConstants.SIGNED_CORRELATION_ID, - QueryStringConstants.SIGNED_DIRECTORY_DEPTH, - ] - - -class SharedAccessSignature(object): - ''' - Provides a factory for creating account access - signature tokens with an account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - :param str x_ms_version: - The service version used to generate the shared access signatures. - ''' - self.account_name = account_name - self.account_key = account_key - self.x_ms_version = x_ms_version - - def generate_account(self, services, resource_types, permission, expiry, start=None, - ip=None, protocol=None): - ''' - Generates a shared access signature for the account. - Use the returned signature with the sas_token parameter of the service - or to create a new account object. - - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account - SAS. You can combine values to provide access to more than one - resource type. - :param AccountSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. You can combine - values to provide more than one permission. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_account(services, resource_types) - sas.add_account_signature(self.account_name, self.account_key) - - return sas.get_token() - - -class _SharedAccessHelper(object): - def __init__(self): - self.query_dict = {} - - def _add_query(self, name, val): - if val: - self.query_dict[name] = _str(val) if val is not None else None - - def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): - if isinstance(start, date): - start = _to_utc_datetime(start) - - if isinstance(expiry, date): - expiry = _to_utc_datetime(expiry) - - self._add_query(QueryStringConstants.SIGNED_START, start) - self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) - self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) - self._add_query(QueryStringConstants.SIGNED_IP, ip) - self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) - self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) - - def add_resource(self, resource): - self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) - - def add_id(self, policy_id): - self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) - - def add_account(self, services, resource_types): - self._add_query(QueryStringConstants.SIGNED_SERVICES, services) - self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) - - def add_override_response_headers(self, cache_control, - content_disposition, - content_encoding, - content_language, - content_type): - self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) - self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) - self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) - self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) - self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) - - def add_account_signature(self, account_name, account_key): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - string_to_sign = \ - (account_name + '\n' + - get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + - get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + - get_value_to_append(QueryStringConstants.SIGNED_START) + - get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - get_value_to_append(QueryStringConstants.SIGNED_IP) + - get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(QueryStringConstants.SIGNED_VERSION)) - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key, string_to_sign)) - - def get_token(self): - return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/uploads.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/uploads.py deleted file mode 100644 index abf3fb2..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/uploads.py +++ /dev/null @@ -1,550 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from concurrent import futures -from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) -from threading import Lock -from itertools import islice -from math import ceil - -import six - -from azure.core.tracing.common import with_current_context - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." - - -def _parallel_uploads(executor, uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(executor.submit(with_current_context(uploader), next_chunk)) - except StopIteration: - break - - # Wait for the remaining uploads to finish - done, _running = futures.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - validate_content=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - validate_content=validate_content, - **kwargs) - if parallel: - executor = futures.ThreadPoolExecutor(max_concurrency) - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - executor.submit(with_current_context(uploader.process_chunk), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - executor = futures.ThreadPoolExecutor(max_concurrency) - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - executor.submit(with_current_context(uploader.process_substream_block), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] - return sorted(range_ids) - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b"" - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError("Blob data should be of type bytes.") - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b"" or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - def _update_progress(self, length): - if self.progress_lock is not None: - with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = self._upload_chunk(chunk_offset, chunk_data) - self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) - - def process_substream_block(self, block_data): - return self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - def _upload_substream_block(self, block_id, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_substream_block_with_progress(self, block_id, block_stream): - range_id = self._upload_substream_block(block_id, block_stream) - self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop("modified_access_conditions", None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - self.service.stage_block( - block_id, - len(chunk_data), - chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return index, block_id - - def _upload_substream_block(self, block_id, block_stream): - try: - self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - return not any(bytearray(chunk_data)) - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = self.service.upload_pages( - chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = self.service.append_block( - chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - self.current_length = int(self.response_headers["blob_append_offset"]) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = self.service.append_block( - chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response - - -class SubStream(IOBase): - - def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): - # Python 2.7: file-like objects created with open() typically support seek(), but are not - # derivations of io.IOBase and thus do not implement seekable(). - # Python > 3.0: file-like objects created with open() are derived from io.IOBase. - try: - # only the main thread runs this, so there's no need grabbing the lock - wrapped_stream.seek(0, SEEK_CUR) - except: - raise ValueError("Wrapped stream must support seek().") - - self._lock = lockObj - self._wrapped_stream = wrapped_stream - self._position = 0 - self._stream_begin_index = stream_begin_index - self._length = length - self._buffer = BytesIO() - - # we must avoid buffering more than necessary, and also not use up too much memory - # so the max buffer size is capped at 4MB - self._max_buffer_size = ( - length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE - ) - self._current_buffer_start = 0 - self._current_buffer_size = 0 - super(SubStream, self).__init__() - - def __len__(self): - return self._length - - def close(self): - if self._buffer: - self._buffer.close() - self._wrapped_stream = None - IOBase.close(self) - - def fileno(self): - return self._wrapped_stream.fileno() - - def flush(self): - pass - - def read(self, size=None): - if self.closed: # pylint: disable=using-constant-test - raise ValueError("Stream is closed.") - - if size is None: - size = self._length - self._position - - # adjust if out of bounds - if size + self._position >= self._length: - size = self._length - self._position - - # return fast - if size == 0 or self._buffer.closed: - return b"" - - # attempt first read from the read buffer and update position - read_buffer = self._buffer.read(size) - bytes_read = len(read_buffer) - bytes_remaining = size - bytes_read - self._position += bytes_read - - # repopulate the read buffer from the underlying stream to fulfill the request - # ensure the seek and read operations are done atomically (only if a lock is provided) - if bytes_remaining > 0: - with self._buffer: - # either read in the max buffer size specified on the class - # or read in just enough data for the current block/sub stream - current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) - - # lock is only defined if max_concurrency > 1 (parallel uploads) - if self._lock: - with self._lock: - # reposition the underlying stream to match the start of the data to read - absolute_position = self._stream_begin_index + self._position - self._wrapped_stream.seek(absolute_position, SEEK_SET) - # If we can't seek to the right location, our read will be corrupted so fail fast. - if self._wrapped_stream.tell() != absolute_position: - raise IOError("Stream failed to seek to the desired location.") - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - else: - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - - if buffer_from_stream: - # update the buffer with new data from the wrapped stream - # we need to note down the start position and size of the buffer, in case seek is performed later - self._buffer = BytesIO(buffer_from_stream) - self._current_buffer_start = self._position - self._current_buffer_size = len(buffer_from_stream) - - # read the remaining bytes from the new buffer and update position - second_read_buffer = self._buffer.read(bytes_remaining) - read_buffer += second_read_buffer - self._position += len(second_read_buffer) - - return read_buffer - - def readable(self): - return True - - def readinto(self, b): - raise UnsupportedOperation - - def seek(self, offset, whence=0): - if whence is SEEK_SET: - start_index = 0 - elif whence is SEEK_CUR: - start_index = self._position - elif whence is SEEK_END: - start_index = self._length - offset = -offset - else: - raise ValueError("Invalid argument for the 'whence' parameter.") - - pos = start_index + offset - - if pos > self._length: - pos = self._length - elif pos < 0: - pos = 0 - - # check if buffer is still valid - # if not, drop buffer - if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: - self._buffer.close() - self._buffer = BytesIO() - else: # if yes seek to correct position - delta = pos - self._current_buffer_start - self._buffer.seek(delta, SEEK_SET) - - self._position = pos - return pos - - def seekable(self): - return True - - def tell(self): - return self._position - - def write(self): - raise UnsupportedOperation - - def writelines(self): - raise UnsupportedOperation - - def writeable(self): - return False - - -class IterStreamer(object): - """ - File-like streaming iterator. - """ - - def __init__(self, generator, encoding="UTF-8"): - self.generator = generator - self.iterator = iter(generator) - self.leftover = b"" - self.encoding = encoding - - def __len__(self): - return self.generator.__len__() - - def __iter__(self): - return self.iterator - - def seekable(self): - return False - - def __next__(self): - return next(self.iterator) - - next = __next__ # Python 2 compatibility. - - def tell(self, *args, **kwargs): - raise UnsupportedOperation("Data generator does not support tell.") - - def seek(self, *args, **kwargs): - raise UnsupportedOperation("Data generator is unseekable.") - - def read(self, size): - data = self.leftover - count = len(self.leftover) - try: - while count < size: - chunk = self.__next__() - if isinstance(chunk, six.text_type): - chunk = chunk.encode(self.encoding) - data += chunk - count += len(chunk) - except StopIteration: - pass - - if count > size: - self.leftover = data[size:] - - return data[:size] diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/uploads_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/uploads_async.py deleted file mode 100644 index f6a8725..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared/uploads_async.py +++ /dev/null @@ -1,351 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -import asyncio -from asyncio import Lock -from itertools import islice -import threading - -from math import ceil - -import six - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder -from .uploads import SubStream, IterStreamer # pylint: disable=unused-import - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' - - -async def _parallel_uploads(uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(asyncio.ensure_future(uploader(next_chunk))) - except StopIteration: - break - - # Wait for the remaining uploads to finish - if running: - done, _running = await asyncio.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -async def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - asyncio.ensure_future(uploader.process_chunk(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [] - for chunk in uploader.get_chunk_streams(): - range_ids.append(await uploader.process_chunk(chunk)) - - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -async def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - asyncio.ensure_future(uploader.process_substream_block(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [] - for block in uploader.get_substream_blocks(): - range_ids.append(await uploader.process_substream_block(block)) - return sorted(range_ids) - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = threading.Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b'' - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b'' or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - async def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - async def _update_progress(self, length): - if self.progress_lock is not None: - async with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - async def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = await self._upload_chunk(chunk_offset, chunk_data) - await self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) - - async def process_substream_block(self, block_data): - return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - async def _upload_substream_block(self, block_id, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_substream_block_with_progress(self, block_id, block_stream): - range_id = await self._upload_substream_block(block_id, block_stream) - await self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop('modified_access_conditions', None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - await self.service.stage_block( - block_id, - len(chunk_data), - chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - return index, block_id - - async def _upload_substream_block(self, block_id, block_stream): - try: - await self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - for each_byte in chunk_data: - if each_byte not in [0, b'\x00']: - return False - return True - - async def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = await self.service.upload_pages( - chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = await self.service.append_block( - chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - self.current_length = int(self.response_headers['blob_append_offset']) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = await self.service.append_block( - chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = await self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - return range_id, response diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared_access_signature.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared_access_signature.py deleted file mode 100644 index 20dad95..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_shared_access_signature.py +++ /dev/null @@ -1,491 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, List, TYPE_CHECKING -) - -from ._shared import sign_string -from ._shared.constants import X_MS_VERSION -from ._shared.models import Services -from ._shared.shared_access_signature import SharedAccessSignature, _SharedAccessHelper, QueryStringConstants -from ._shared.parser import _str - -if TYPE_CHECKING: - from datetime import datetime - from .import ( - ResourceTypes, - AccountSasPermissions, - ShareSasPermissions, - FileSasPermissions - ) - -class FileSharedAccessSignature(SharedAccessSignature): - ''' - Provides a factory for creating file and share access - signature tokens with a common account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - ''' - super(FileSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) - - def generate_file(self, share_name, directory_name=None, file_name=None, - permission=None, expiry=None, start=None, policy_id=None, - ip=None, protocol=None, cache_control=None, - content_disposition=None, content_encoding=None, - content_language=None, content_type=None): - ''' - Generates a shared access signature for the file. - Use the returned signature with the sas_token parameter of FileService. - - :param str share_name: - Name of share. - :param str directory_name: - Name of directory. SAS tokens cannot be created for directories, so - this parameter should only be present if file_name is provided. - :param str file_name: - Name of file. - :param ~azure.storage.fileshare.FileSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, create, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_file_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - resource_path = share_name - if directory_name is not None: - resource_path += '/' + _str(directory_name) if directory_name is not None else None - resource_path += '/' + _str(file_name) if file_name is not None else None - - sas = _FileSharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(policy_id) - sas.add_resource('f') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_resource_signature(self.account_name, self.account_key, resource_path) - - return sas.get_token() - - def generate_share(self, share_name, permission=None, expiry=None, - start=None, policy_id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None): - ''' - Generates a shared access signature for the share. - Use the returned signature with the sas_token parameter of FileService. - - :param str share_name: - Name of share. - :param ShareSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, create, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_file_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - sas = _FileSharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(policy_id) - sas.add_resource('s') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_resource_signature(self.account_name, self.account_key, share_name) - - return sas.get_token() - - -class _FileSharedAccessHelper(_SharedAccessHelper): - - def add_resource_signature(self, account_name, account_key, path): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - if path[0] != '/': - path = '/' + path - - canonicalized_resource = '/file/' + account_name + path + '\n' - - # Form the string to sign from shared_access_policy and canonicalized - # resource. The order of values is important. - string_to_sign = \ - (get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(QueryStringConstants.SIGNED_START) + - get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - canonicalized_resource + - get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER) + - get_value_to_append(QueryStringConstants.SIGNED_IP) + - get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(QueryStringConstants.SIGNED_VERSION) + - get_value_to_append(QueryStringConstants.SIGNED_CACHE_CONTROL) + - get_value_to_append(QueryStringConstants.SIGNED_CONTENT_DISPOSITION) + - get_value_to_append(QueryStringConstants.SIGNED_CONTENT_ENCODING) + - get_value_to_append(QueryStringConstants.SIGNED_CONTENT_LANGUAGE) + - get_value_to_append(QueryStringConstants.SIGNED_CONTENT_TYPE)) - - # remove the trailing newline - if string_to_sign[-1] == '\n': - string_to_sign = string_to_sign[:-1] - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key, string_to_sign)) - - -def generate_account_sas( - account_name, # type: str - account_key, # type: str - resource_types, # type: Union[ResourceTypes, str] - permission, # type: Union[AccountSasPermissions, str] - expiry, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> str - """Generates a shared access signature for the file service. - - Use the returned signature with the credential parameter of any ShareServiceClient, - ShareClient, ShareDirectoryClient, or ShareFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - :param ~azure.storage.fileshare.ResourceTypes resource_types: - Specifies the resource types that are accessible with the account SAS. - :param ~azure.storage.fileshare.AccountSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_authentication.py - :start-after: [START generate_sas_token] - :end-before: [END generate_sas_token] - :language: python - :dedent: 8 - :caption: Generate a sas token. - """ - sas = SharedAccessSignature(account_name, account_key) - return sas.generate_account( - services=Services(fileshare=True), - resource_types=resource_types, - permission=permission, - expiry=expiry, - start=start, - ip=ip, - **kwargs - ) # type: ignore - - -def generate_share_sas( - account_name, # type: str - share_name, # type: str - account_key, # type: str - permission=None, # type: Optional[Union[ShareSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - policy_id=None, # type: Optional[str] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): # type: (...) -> str - """Generates a shared access signature for a share. - - Use the returned signature with the credential parameter of any ShareServiceClient, - ShareClient, ShareDirectoryClient, or ShareFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str share_name: - The name of the share. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - :param ~azure.storage.fileshare.ShareSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, create, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - :func:`~azure.storage.fileshare.ShareClient.set_share_access_policy`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - sas = FileSharedAccessSignature(account_name, account_key) - return sas.generate_share( - share_name=share_name, - permission=permission, - expiry=expiry, - start=start, - policy_id=policy_id, - ip=ip, - **kwargs - ) - - -def generate_file_sas( - account_name, # type: str - share_name, # type: str - file_path, # type: List[str] - account_key, # type: str - permission=None, # type: Optional[Union[FileSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - policy_id=None, # type: Optional[str] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> str - """Generates a shared access signature for a file. - - Use the returned signature with the credential parameter of any ShareServiceClient, - ShareClient, ShareDirectoryClient, or ShareFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str share_name: - The name of the share. - :param file_path: - The file path represented as a list of path segments, including the file name. - :type file_path: List[str] - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - :param ~azure.storage.fileshare.FileSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - sas = FileSharedAccessSignature(account_name, account_key) - if len(file_path) > 1: - dir_path = '/'.join(file_path[:-1]) - else: - dir_path = None # type: ignore - return sas.generate_file( # type: ignore - share_name=share_name, - directory_name=dir_path, - file_name=file_path[-1], - permission=permission, - expiry=expiry, - start=start, - policy_id=policy_id, - ip=ip, - **kwargs - ) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_version.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/_version.py deleted file mode 100644 index 3174c50..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_version.py +++ /dev/null @@ -1,7 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -VERSION = "12.3.0b1" diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/__init__.py deleted file mode 100644 index 73393b8..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ._file_client_async import ShareFileClient -from ._directory_client_async import ShareDirectoryClient -from ._share_client_async import ShareClient -from ._share_service_client_async import ShareServiceClient -from ._lease_async import ShareLeaseClient - - -__all__ = [ - 'ShareFileClient', - 'ShareDirectoryClient', - 'ShareClient', - 'ShareServiceClient', - 'ShareLeaseClient', -] diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_directory_client_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_directory_client_async.py deleted file mode 100644 index 29b6396..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_directory_client_async.py +++ /dev/null @@ -1,593 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import functools -import time -from typing import ( # pylint: disable=unused-import - Optional, Union, Any, Dict, TYPE_CHECKING -) - -from azure.core.async_paging import AsyncItemPaged -from azure.core.pipeline import AsyncPipeline -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from .._parser import _get_file_permission, _datetime_to_str -from .._shared.parser import _str - -from .._generated.aio import AzureFileStorage -from .._generated.version import VERSION -from .._generated.models import StorageErrorException -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.policies_async import ExponentialRetry -from .._shared.request_handlers import add_metadata_headers -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._deserialize import deserialize_directory_properties -from .._serialize import get_api_version -from .._directory_client import ShareDirectoryClient as ShareDirectoryClientBase -from ._file_client_async import ShareFileClient -from ._models import DirectoryPropertiesPaged, HandlesPaged - -if TYPE_CHECKING: - from datetime import datetime - from .._models import ShareProperties, DirectoryProperties, ContentSettings, NTFSAttributes - from .._generated.models import HandleItem - - -class ShareDirectoryClient(AsyncStorageAccountHostsMixin, ShareDirectoryClientBase): - """A client to interact with a specific directory, although it may not yet exist. - - For operations relating to a specific subdirectory or file in this share, the clients for those - entities can also be retrieved using the :func:`get_subdirectory_client` and :func:`get_file_client` functions. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the directory, - use the :func:`from_directory_url` classmethod. - :param share_name: - The name of the share for the directory. - :type share_name: str - :param str directory_path: - The directory path for the directory with which to interact. - If specified, this value will override a directory value specified in the directory URL. - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword loop: - The event loop to run the asynchronous tasks. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - directory_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Optional[Any] - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - loop = kwargs.pop('loop', None) - super(ShareDirectoryClient, self).__init__( - account_url, - share_name=share_name, - directory_path=directory_path, - snapshot=snapshot, - credential=credential, - loop=loop, - **kwargs) - self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline, loop=loop) - self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access - self._loop = loop - - def get_file_client(self, file_name, **kwargs): - # type: (str, Any) -> ShareFileClient - """Get a client to interact with a specific file. - - The file need not already exist. - - :param str file_name: - The name of the file. - :returns: A File Client. - :rtype: ~azure.storage.fileshare.ShareFileClient - """ - if self.directory_path: - file_name = self.directory_path.rstrip('/') + "/" + file_name - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareFileClient( - self.url, file_path=file_name, share_name=self.share_name, snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop, **kwargs) - - def get_subdirectory_client(self, directory_name, **kwargs): - # type: (str, Any) -> ShareDirectoryClient - """Get a client to interact with a specific subdirectory. - - The subdirectory need not already exist. - - :param str directory_name: - The name of the subdirectory. - :returns: A Directory Client. - :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START get_subdirectory_client] - :end-before: [END get_subdirectory_client] - :language: python - :dedent: 16 - :caption: Gets the subdirectory client. - """ - directory_path = self.directory_path.rstrip('/') + "/" + directory_name - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareDirectoryClient( - self.url, share_name=self.share_name, directory_path=directory_path, snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop, **kwargs) - - @distributed_trace_async - async def create_directory(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Creates a new directory under the directory referenced by the client. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the directory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Directory-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START create_directory] - :end-before: [END create_directory] - :language: python - :dedent: 16 - :caption: Creates a directory. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return await self._client.directory.create( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def delete_directory(self, **kwargs): - # type: (**Any) -> None - """Marks the directory for deletion. The directory is - later deleted during garbage collection. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START delete_directory] - :end-before: [END delete_directory] - :language: python - :dedent: 16 - :caption: Deletes a directory. - """ - timeout = kwargs.pop('timeout', None) - try: - await self._client.directory.delete(timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_directories_and_files(self, name_starts_with=None, **kwargs): - # type: (Optional[str], Any) -> AsyncItemPaged - """Lists all the directories and files under the directory. - - :param str name_starts_with: - Filters the results to return only entities whose names - begin with the specified prefix. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties - :rtype: ~azure.core.async_paging.AsyncItemPaged[DirectoryProperties and FileProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START lists_directory] - :end-before: [END lists_directory] - :language: python - :dedent: 16 - :caption: List directories and files. - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.directory.list_files_and_directories_segment, - sharesnapshot=self.snapshot, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=DirectoryPropertiesPaged) - - @distributed_trace - def list_handles(self, recursive=False, **kwargs): - # type: (bool, Any) -> AsyncItemPaged - """Lists opened handles on a directory or a file under the directory. - - :param bool recursive: - Boolean that specifies if operation should apply to the directory specified by the client, - its files, its subdirectories and their files. Default value is False. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of HandleItem - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.HandleItem] - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.directory.list_handles, - sharesnapshot=self.snapshot, - timeout=timeout, - recursive=recursive, - **kwargs) - return AsyncItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=HandlesPaged) - - @distributed_trace_async - async def close_handle(self, handle, **kwargs): - # type: (Union[str, HandleItem], Any) -> Dict[str, int] - """Close an open file handle. - - :param handle: - A specific handle to close. - :type handle: str or ~azure.storage.fileshare.Handle - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - try: - handle_id = handle.id # type: ignore - except AttributeError: - handle_id = handle - if handle_id == '*': - raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") - try: - response = await self._client.directory.force_close_handles( - handle_id, - marker=None, - recursive=None, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - return { - 'closed_handles_count': response.get('number_of_handles_closed', 0), - 'failed_handles_count': response.get('number_of_handles_failed', 0) - } - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def close_all_handles(self, recursive=False, **kwargs): - # type: (bool, Any) -> Dict[str, int] - """Close any open file handles. - - This operation will block until the service has closed all open handles. - - :param bool recursive: - Boolean that specifies if operation should apply to the directory specified by the client, - its files, its subdirectories and their files. Default value is False. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - timeout = kwargs.pop('timeout', None) - start_time = time.time() - - try_close = True - continuation_token = None - total_closed = 0 - total_failed = 0 - while try_close: - try: - response = await self._client.directory.force_close_handles( - handle_id='*', - timeout=timeout, - marker=continuation_token, - recursive=recursive, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - continuation_token = response.get('marker') - try_close = bool(continuation_token) - total_closed += response.get('number_of_handles_closed', 0) - total_failed += response.get('number_of_handles_failed', 0) - if timeout: - timeout = max(0, timeout - (time.time() - start_time)) - return { - 'closed_handles_count': total_closed, - 'failed_handles_count': total_failed - } - - @distributed_trace_async - async def get_directory_properties(self, **kwargs): - # type: (Any) -> DirectoryProperties - """Returns all user-defined metadata and system properties for the - specified directory. The data returned does not include the directory's - list of files. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: DirectoryProperties - :rtype: ~azure.storage.fileshare.DirectoryProperties - """ - timeout = kwargs.pop('timeout', None) - try: - response = await self._client.directory.get_properties( - timeout=timeout, - cls=deserialize_directory_properties, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return response # type: ignore - - @distributed_trace_async - async def set_directory_metadata(self, metadata, **kwargs): - # type: (Dict[str, Any], Any) -> Dict[str, Any] - """Sets the metadata for the directory. - - Each call to this operation replaces all existing metadata - attached to the directory. To remove all metadata from the directory, - call this operation with an empty metadata dict. - - :param metadata: - Name-value pairs associated with the directory as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Directory-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - try: - return await self._client.directory.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def set_http_headers(self, file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="preserve", # type: Union[str, datetime] - file_last_write_time="preserve", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Sets HTTP headers on the directory. - - :param file_attributes: - The file system attributes for files and directories. - If not set, indicates preservation of existing values. - Here is an example for when the var type is str: 'Temporary|Archive' - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Preserve. - :type file_creation_time: str or datetime - :param file_last_write_time: Last write time for the file - Default value: Preserve. - :type file_last_write_time: str or datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - file_permission = _get_file_permission(file_permission, permission_key, 'preserve') - try: - return await self._client.directory.set_properties( # type: ignore - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - timeout=timeout, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def create_subdirectory( - self, directory_name, # type: str - **kwargs - ): - # type: (...) -> ShareDirectoryClient - """Creates a new subdirectory and returns a client to interact - with the subdirectory. - - :param str directory_name: - The name of the subdirectory. - :keyword dict(str,str) metadata: - Name-value pairs associated with the subdirectory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: ShareDirectoryClient - :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START create_subdirectory] - :end-before: [END create_subdirectory] - :language: python - :dedent: 16 - :caption: Create a subdirectory. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - subdir = self.get_subdirectory_client(directory_name) - await subdir.create_directory(metadata=metadata, timeout=timeout, **kwargs) - return subdir # type: ignore - - @distributed_trace_async - async def delete_subdirectory( - self, directory_name, # type: str - **kwargs - ): - # type: (...) -> None - """Deletes a subdirectory. - - :param str directory_name: - The name of the subdirectory. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START delete_subdirectory] - :end-before: [END delete_subdirectory] - :language: python - :dedent: 16 - :caption: Delete a subdirectory. - """ - timeout = kwargs.pop('timeout', None) - subdir = self.get_subdirectory_client(directory_name) - await subdir.delete_directory(timeout=timeout, **kwargs) - - @distributed_trace_async - async def upload_file( - self, file_name, # type: str - data, # type: Any - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> ShareFileClient - """Creates a new file in the directory and returns a ShareFileClient - to interact with the file. - - :param str file_name: - The name of the file. - :param Any data: - Content of the file. - :param int length: - Length of the file in bytes. Specify its maximum size, up to 1 TiB. - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: ShareFileClient - :rtype: ~azure.storage.fileshare.aio.ShareFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START upload_file_to_directory] - :end-before: [END upload_file_to_directory] - :language: python - :dedent: 16 - :caption: Upload a file to a directory. - """ - file_client = self.get_file_client(file_name) - await file_client.upload_file( - data, - length=length, - **kwargs) - return file_client # type: ignore - - @distributed_trace_async - async def delete_file( - self, file_name, # type: str - **kwargs # type: Optional[Any] - ): - # type: (...) -> None - """Marks the specified file for deletion. The file is later - deleted during garbage collection. - - :param str file_name: - The name of the file to delete. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START delete_file_in_directory] - :end-before: [END delete_file_in_directory] - :language: python - :dedent: 16 - :caption: Delete a file in a directory. - """ - file_client = self.get_file_client(file_name) - await file_client.delete_file(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_download_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_download_async.py deleted file mode 100644 index c0db16d..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_download_async.py +++ /dev/null @@ -1,467 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import asyncio -import sys -from io import BytesIO -from itertools import islice -import warnings - -from azure.core.exceptions import HttpResponseError -from .._shared.encryption import decrypt_blob -from .._shared.request_handlers import validate_and_format_range_headers -from .._shared.response_handlers import process_storage_error, parse_length_from_content_range -from .._download import process_range_and_offset, _ChunkDownloader - - -async def process_content(data, start_offset, end_offset, encryption): - if data is None: - raise ValueError("Response cannot be None.") - try: - content = data.response.body() - except Exception as error: - raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) - if encryption.get('key') is not None or encryption.get('resolver') is not None: - try: - return decrypt_blob( - encryption.get('required'), - encryption.get('key'), - encryption.get('resolver'), - content, - start_offset, - end_offset, - data.response.headers) - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=data.response, - error=error) - return content - - -class _AsyncChunkDownloader(_ChunkDownloader): - def __init__(self, **kwargs): - super(_AsyncChunkDownloader, self).__init__(**kwargs) - self.stream_lock = asyncio.Lock() if kwargs.get('parallel') else None - self.progress_lock = asyncio.Lock() if kwargs.get('parallel') else None - - async def process_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - chunk_data = await self._download_chunk(chunk_start, chunk_end - 1) - length = chunk_end - chunk_start - if length > 0: - await self._write_to_stream(chunk_data, chunk_start) - await self._update_progress(length) - - async def yield_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - return await self._download_chunk(chunk_start, chunk_end - 1) - - async def _update_progress(self, length): - if self.progress_lock: - async with self.progress_lock: # pylint: disable=not-async-context-manager - self.progress_total += length - else: - self.progress_total += length - - async def _write_to_stream(self, chunk_data, chunk_start): - if self.stream_lock: - async with self.stream_lock: # pylint: disable=not-async-context-manager - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - else: - self.stream.write(chunk_data) - - async def _download_chunk(self, chunk_start, chunk_end): - download_range, offset = process_range_and_offset( - chunk_start, chunk_end, chunk_end, self.encryption_options - ) - range_header, range_validation = validate_and_format_range_headers( - download_range[0], - download_range[1], - check_content_md5=self.validate_content - ) - try: - _, response = await self.client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self.validate_content, - data_stream_total=self.total_size, - download_stream_current=self.progress_total, - **self.request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - chunk_data = await process_content(response, offset[0], offset[1], self.encryption_options) - return chunk_data - - -class _AsyncChunkIterator(object): - """Async iterator for chunks in file download stream.""" - - def __init__(self, size, content, downloader): - self.size = size - self._current_content = content - self._iter_downloader = downloader - self._iter_chunks = None - self._complete = (size == 0) - - def __len__(self): - return self.size - - def __iter__(self): - raise TypeError("Async stream must be iterated asynchronously.") - - def __aiter__(self): - return self - - async def __anext__(self): - """Iterate through responses.""" - if self._complete: - raise StopAsyncIteration("Download complete") - if not self._iter_downloader: - # If no iterator was supplied, the download completed with - # the initial GET, so we just return that data - self._complete = True - return self._current_content - - if not self._iter_chunks: - self._iter_chunks = self._iter_downloader.get_chunk_offsets() - else: - try: - chunk = next(self._iter_chunks) - except StopIteration: - raise StopAsyncIteration("Download complete") - self._current_content = await self._iter_downloader.yield_chunk(chunk) - - return self._current_content - - -class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the file being downloaded. - :ivar: str path: - The full path of the file. - :ivar str share: - The name of the share where the file is. - :ivar ~azure.storage.fileshare.FileProperties properties: - The properties of the file being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the file. - """ - - def __init__( - self, - client=None, - config=None, - start_range=None, - end_range=None, - validate_content=None, - encryption_options=None, - max_concurrency=1, - name=None, - path=None, - share=None, - encoding=None, - **kwargs - ): - self.name = name - self.path = path - self.share = share - self.properties = None - self.size = None - - self._client = client - self._config = config - self._start_range = start_range - self._end_range = end_range - self._max_concurrency = max_concurrency - self._encoding = encoding - self._validate_content = validate_content - self._encryption_options = encryption_options or {} - self._request_options = kwargs - self._location_mode = None - self._download_complete = False - self._current_content = None - self._file_size = None - self._response = None - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - self._first_get_size = self._config.max_single_get_size if not self._validate_content \ - else self._config.max_chunk_get_size - initial_request_start = self._start_range if self._start_range is not None else 0 - if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: - initial_request_end = self._end_range - else: - initial_request_end = initial_request_start + self._first_get_size - 1 - - self._initial_range, self._initial_offset = process_range_and_offset( - initial_request_start, initial_request_end, self._end_range, self._encryption_options - ) - - def __len__(self): - return self.size - - async def _setup(self): - self._response = await self._initial_request() - self.properties = self._response.properties - self.properties.name = self.name - self.properties.path = self.path - self.properties.share = self.share - - # Set the content length to the download size instead of the size of - # the last range - self.properties.size = self.size - - # Overwrite the content range to the user requested range - self.properties.content_range = 'bytes {0}-{1}/{2}'.format( - self._start_range, - self._end_range, - self._file_size - ) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - self.properties.content_md5 = None - - if self.size == 0: - self._current_content = b"" - else: - self._current_content = await process_content( - self._response, - self._initial_offset[0], - self._initial_offset[1], - self._encryption_options - ) - - async def _initial_request(self): - range_header, range_validation = validate_and_format_range_headers( - self._initial_range[0], - self._initial_range[1], - start_range_required=False, - end_range_required=False, - check_content_md5=self._validate_content) - - try: - location_mode, response = await self._client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self._validate_content, - data_stream_total=None, - download_stream_current=0, - **self._request_options) - - # Check the location we read from to ensure we use the same one - # for subsequent requests. - self._location_mode = location_mode - - # Parse the total file size and adjust the download size if ranges - # were specified - self._file_size = parse_length_from_content_range(response.properties.content_range) - if self._end_range is not None: - # Use the length unless it is over the end of the file - self.size = min(self._file_size, self._end_range - self._start_range + 1) - elif self._start_range is not None: - self.size = self._file_size - self._start_range - else: - self.size = self._file_size - - except HttpResponseError as error: - if self._start_range is None and error.response.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - try: - _, response = await self._client.download( - validate_content=self._validate_content, - data_stream_total=0, - download_stream_current=0, - **self._request_options) - except HttpResponseError as error: - process_storage_error(error) - - # Set the download size to empty - self.size = 0 - self._file_size = 0 - else: - process_storage_error(error) - - # If the file is small, the download is complete at this point. - # If file size is large, download the rest of the file in chunks. - if response.properties.size == self.size: - self._download_complete = True - return response - - def chunks(self): - """Iterate over chunks in the download stream. - - :rtype: Iterable[bytes] - """ - if self.size == 0 or self._download_complete: - iter_downloader = None - else: - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - iter_downloader = _AsyncChunkDownloader( - client=self._client, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # Start where the first download ended - end_range=data_end, - stream=None, - parallel=False, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options) - return _AsyncChunkIterator( - size=self.size, - content=self._current_content, - downloader=iter_downloader) - - async def readall(self): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - stream = BytesIO() - await self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - async def content_as_bytes(self, max_concurrency=1): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :rtype: bytes - """ - warnings.warn( - "content_as_bytes is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - return await self.readall() - - async def content_as_text(self, max_concurrency=1, encoding="UTF-8"): - """Download the contents of this file, and decode as text. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :param str encoding: - Test encoding to decode the downloaded bytes. Default is UTF-8. - :rtype: str - """ - warnings.warn( - "content_as_text is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self._encoding = encoding - return await self.readall() - - async def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - # the stream must be seekable if parallel download is required - parallel = self._max_concurrency > 1 - if parallel: - error_message = "Target stream handle must be seekable." - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(error_message) - - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(error_message) - - # Write the content to the user stream - stream.write(self._current_content) - if self._download_complete: - return self.size - - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - - downloader = _AsyncChunkDownloader( - client=self._client, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # start where the first download ended - end_range=data_end, - stream=stream, - parallel=parallel, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options) - - dl_tasks = downloader.get_chunk_offsets() - running_futures = [ - asyncio.ensure_future(downloader.process_chunk(d)) - for d in islice(dl_tasks, 0, self._max_concurrency) - ] - while running_futures: - # Wait for some download to finish before adding a new one - _done, running_futures = await asyncio.wait( - running_futures, return_when=asyncio.FIRST_COMPLETED) - try: - next_chunk = next(dl_tasks) - except StopIteration: - break - else: - running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk))) - - if running_futures: - # Wait for the remaining downloads to finish - await asyncio.wait(running_futures) - return self.size - - async def download_to_stream(self, stream, max_concurrency=1): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The properties of the downloaded file. - :rtype: Any - """ - warnings.warn( - "download_to_stream is deprecated, use readinto instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - await self.readinto(stream) - return self.properties diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_file_client_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_file_client_async.py deleted file mode 100644 index d008e1b..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_file_client_async.py +++ /dev/null @@ -1,1197 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines, invalid-overridden-method, too-many-public-methods -import functools -import time -from io import BytesIO -from typing import Optional, Union, IO, List, Tuple, Dict, Any, Iterable, TYPE_CHECKING # pylint: disable=unused-import - -import six -from azure.core.async_paging import AsyncItemPaged - -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from .._parser import _datetime_to_str, _get_file_permission -from .._shared.parser import _str - -from .._generated.aio import AzureFileStorage -from .._generated.version import VERSION -from .._generated.models import StorageErrorException, FileHTTPHeaders -from .._shared.policies_async import ExponentialRetry -from .._shared.uploads_async import upload_data_chunks, FileChunkUploader, IterStreamer -from .._shared.base_client_async import AsyncStorageAccountHostsMixin -from .._shared.request_handlers import add_metadata_headers, get_length -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._deserialize import deserialize_file_properties, deserialize_file_stream, get_file_ranges_result -from .._serialize import get_access_conditions, get_smb_properties, get_api_version -from .._file_client import ShareFileClient as ShareFileClientBase -from ._models import HandlesPaged -from ._lease_async import ShareLeaseClient -from ._download_async import StorageStreamDownloader - -if TYPE_CHECKING: - from datetime import datetime - from .._models import ShareProperties, ContentSettings, FileProperties, NTFSAttributes - from .._generated.models import HandleItem - - -async def _upload_file_helper( - client, - stream, - size, - metadata, - content_settings, - validate_content, - timeout, - max_concurrency, - file_settings, - file_attributes="none", - file_creation_time="now", - file_last_write_time="now", - file_permission=None, - file_permission_key=None, - **kwargs -): - try: - if size is None or size < 0: - raise ValueError("A content size must be specified for a File.") - response = await client.create_file( - size, content_settings=content_settings, metadata=metadata, - file_attributes=file_attributes, - file_creation_time=file_creation_time, - file_last_write_time=file_last_write_time, - file_permission=file_permission, - permission_key=file_permission_key, - timeout=timeout, - **kwargs - ) - if size == 0: - return response - - responses = await upload_data_chunks( - service=client, - uploader_class=FileChunkUploader, - total_size=size, - chunk_size=file_settings.max_range_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - timeout=timeout, - **kwargs - ) - return sorted(responses, key=lambda r: r.get('last_modified'))[-1] - except StorageErrorException as error: - process_storage_error(error) - - -class ShareFileClient(AsyncStorageAccountHostsMixin, ShareFileClientBase): - """A client to interact with a specific file, although that file may not yet exist. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the - file, use the :func:`from_file_url` classmethod. - :param share_name: - The name of the share for the file. - :type share_name: str - :param str file_path: - The file path to the file with which to interact. If specified, this value will override - a file value specified in the file URL. - :param str snapshot: - An optional file snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword loop: - The event loop to run the asynchronous tasks. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - - def __init__( # type: ignore - self, - account_url, # type: str - share_name, # type: str - file_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs["retry_policy"] = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) - loop = kwargs.pop('loop', None) - super(ShareFileClient, self).__init__( - account_url, share_name=share_name, file_path=file_path, snapshot=snapshot, - credential=credential, loop=loop, **kwargs - ) - self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline, loop=loop) - self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access - self._loop = loop - - @distributed_trace_async - async def acquire_lease(self, lease_id=None, **kwargs): - # type: (Optional[str], **Any) -> ShareLeaseClient - """Requests a new lease. - - If the file does not have an active lease, the File - Service creates a lease on the blob and returns a new lease. - - :param str lease_id: - Proposed lease ID, in a GUID string format. The File Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A ShareLeaseClient object. - :rtype: ~azure.storage.fileshare.aio.ShareLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START acquire_lease_on_blob] - :end-before: [END acquire_lease_on_blob] - :language: python - :dedent: 8 - :caption: Acquiring a lease on a blob. - """ - kwargs['lease_duration'] = -1 - lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore - await lease.acquire(**kwargs) - return lease - - @distributed_trace_async - async def create_file( # type: ignore - self, - size, # type: int - file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="now", # type: Union[str, datetime] - file_last_write_time="now", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Creates a new file. - - Note that it only initializes the file with no content. - - :param int size: Specifies the maximum size for the file, - up to 1 TB. - :param file_attributes: - The file system attributes for files and directories. - If not set, the default value would be "None" and the attributes will be set to "Archive". - Here is an example for when the var type is str: 'Temporary|Archive'. - file_attributes value is not case sensitive. - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Now. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Now. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START create_file] - :end-before: [END create_file] - :language: python - :dedent: 16 - :caption: Create a file. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - content_settings = kwargs.pop('content_settings', None) - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - if self.require_encryption and not self.key_encryption_key: - raise ValueError("Encryption required but no key was provided.") - - headers = kwargs.pop("headers", {}) - headers.update(add_metadata_headers(metadata)) - file_http_headers = None - if content_settings: - file_http_headers = FileHTTPHeaders( - file_cache_control=content_settings.cache_control, - file_content_type=content_settings.content_type, - file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - file_content_encoding=content_settings.content_encoding, - file_content_language=content_settings.content_language, - file_content_disposition=content_settings.content_disposition, - ) - file_permission = _get_file_permission(file_permission, permission_key, 'Inherit') - try: - return await self._client.file.create( # type: ignore - file_content_length=size, - metadata=metadata, - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - file_http_headers=file_http_headers, - lease_access_conditions=access_conditions, - headers=headers, - timeout=timeout, - cls=return_response_headers, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_file( - self, data, # type: Any - length=None, # type: Optional[int] - file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="now", # type: Union[str, datetime] - file_last_write_time="now", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Uploads a new file. - - :param Any data: - Content of the file. - :param int length: - Length of the file in bytes. Specify its maximum size, up to 1 TiB. - :param file_attributes: - The file system attributes for files and directories. - If not set, the default value would be "None" and the attributes will be set to "Archive". - Here is an example for when the var type is str: 'Temporary|Archive'. - file_attributes value is not case sensitive. - :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes - :param file_creation_time: Creation time for the file - Default value: Now. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Now. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword str encoding: - Defaults to UTF-8. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START upload_file] - :end-before: [END upload_file] - :language: python - :dedent: 16 - :caption: Upload a file. - """ - metadata = kwargs.pop('metadata', None) - content_settings = kwargs.pop('content_settings', None) - max_concurrency = kwargs.pop('max_concurrency', 1) - validate_content = kwargs.pop('validate_content', False) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - - if isinstance(data, six.text_type): - data = data.encode(encoding) - if length is None: - length = get_length(data) - if isinstance(data, bytes): - data = data[:length] - - if isinstance(data, bytes): - stream = BytesIO(data) - elif hasattr(data, "read"): - stream = data - elif hasattr(data, "__iter__"): - stream = IterStreamer(data, encoding=encoding) # type: ignore - else: - raise TypeError("Unsupported data type: {}".format(type(data))) - return await _upload_file_helper( # type: ignore - self, - stream, - length, - metadata, - content_settings, - validate_content, - timeout, - max_concurrency, - self._config, - file_attributes=file_attributes, - file_creation_time=file_creation_time, - file_last_write_time=file_last_write_time, - file_permission=file_permission, - file_permission_key=permission_key, - **kwargs - ) - - @distributed_trace_async - async def start_copy_from_url(self, source_url, **kwargs): - # type: (str, Any) -> Any - """Initiates the copying of data from a source URL into the file - referenced by the client. - - The status of this copy operation can be found using the `get_properties` - method. - - :param str source_url: - Specifies the URL of the source file. - :keyword str file_permission: - If specified the permission (security descriptor) shall be set for the directory/file. - This value can be set to "source" to copy the security descriptor from the source file. - Otherwise if set, this value will be used to override the source value. If not set, permission value - is inherited from the parent directory of the target file. This setting can be - used if Permission size is <= 8KB, otherwise permission_key shall be used. - If SDDL is specified as input, it must have owner, group and dacl. - Note: Only one of the file_permission or permission_key should be specified. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword str permission_key: - Key of the permission to be set for the directory/file. - This value can be set to "source" to copy the security descriptor from the source file. - Otherwise if set, this value will be used to override the source value. If not set, permission value - is inherited from the parent directory of the target file. - Note: Only one of the file_permission or permission_key should be specified. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword file_attributes: - This value can be set to "source" to copy file attributes from the source file to the target file, - or to clear all attributes, it can be set to "None". Otherwise it can be set to a list of attributes - to set on the target file. If this is not set, the default value is "Archive". - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :keyword file_creation_time: - This value can be set to "source" to copy the creation time from the source file to the target file, - or a datetime to set as creation time on the target file. This could also be a string in ISO 8601 format. - If this is not set, creation time will be set to the date time value of the creation - (or when it was overwritten) of the target file by copy engine. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_creation_time: str or ~datetime.datetime - :keyword file_last_write_time: - This value can be set to "source" to copy the last write time from the source file to the target file, or - a datetime to set as the last write time on the target file. This could also be a string in ISO 8601 format. - If this is not set, value will be the last write time to the file by the copy engine. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_last_write_time: str or ~datetime.datetime - :keyword bool ignore_read_only: - Specifies the option to overwrite the target file if it already exists and has read-only attribute set. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword bool set_archive_attribute: - Specifies the option to set the archive attribute on the target file. - True means the archive attribute will be set on the target file despite attribute - overrides or the source file state. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START copy_file_from_url] - :end-before: [END copy_file_from_url] - :language: python - :dedent: 16 - :caption: Copy a file from a URL - """ - metadata = kwargs.pop('metadata', None) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop("headers", {}) - headers.update(add_metadata_headers(metadata)) - kwargs.update(get_smb_properties(kwargs)) - try: - return await self._client.file.start_copy( - source_url, - metadata=metadata, - lease_access_conditions=access_conditions, - headers=headers, - cls=return_response_headers, - timeout=timeout, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def abort_copy(self, copy_id, **kwargs): - # type: (Union[str, FileProperties], Any) -> None - """Abort an ongoing copy operation. - - This will leave a destination file with zero length and full metadata. - This will raise an error if the copy operation has already ended. - - :param copy_id: - The copy operation to abort. This can be either an ID, or an - instance of FileProperties. - :type copy_id: str or ~azure.storage.fileshare.FileProperties - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - copy_id = copy_id.copy.id - except AttributeError: - try: - copy_id = copy_id["copy_id"] - except TypeError: - pass - try: - await self._client.file.abort_copy(copy_id=copy_id, - lease_access_conditions=access_conditions, - timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def download_file( - self, - offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs - ): - # type: (...) -> Iterable[bytes] - """Downloads a file to a stream with automatic chunking. - - :param int offset: - Start of byte range to use for downloading a section of the file. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A iterable data generator (stream) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START download_file] - :end-before: [END download_file] - :language: python - :dedent: 16 - :caption: Download a file. - """ - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - if length is not None and offset is None: - raise ValueError("Offset value must not be None if length is set.") - - range_end = None - if length is not None: - range_end = offset + length - 1 # Service actually uses an end-range inclusive index - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - - downloader = StorageStreamDownloader( - client=self._client.file, - config=self._config, - start_range=offset, - end_range=range_end, - encryption_options=None, - name=self.file_name, - path='/'.join(self.file_path), - share=self.share_name, - lease_access_conditions=access_conditions, - cls=deserialize_file_stream, - **kwargs - ) - await downloader._setup() # pylint: disable=protected-access - return downloader - - @distributed_trace_async - async def delete_file(self, **kwargs): - # type: (Any) -> None - """Marks the specified file for deletion. The file is - later deleted during garbage collection. - - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START delete_file] - :end-before: [END delete_file] - :language: python - :dedent: 16 - :caption: Delete a file. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - await self._client.file.delete(lease_access_conditions=access_conditions, timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_file_properties(self, **kwargs): - # type: (Any) -> FileProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file. - - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: FileProperties - :rtype: ~azure.storage.fileshare.FileProperties - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - file_props = await self._client.file.get_properties( - sharesnapshot=self.snapshot, - lease_access_conditions=access_conditions, - timeout=timeout, - cls=deserialize_file_properties, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - file_props.name = self.file_name - file_props.share = self.share_name - file_props.snapshot = self.snapshot - file_props.path = "/".join(self.file_path) - return file_props # type: ignore - - @distributed_trace_async - async def set_http_headers(self, content_settings, # type: ContentSettings - file_attributes="preserve", # type: Union[str, NTFSAttributes] - file_creation_time="preserve", # type: Union[str, datetime] - file_last_write_time="preserve", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Sets HTTP headers on the file. - - :param ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param file_attributes: - The file system attributes for files and directories. - If not set, indicates preservation of existing values. - Here is an example for when the var type is str: 'Temporary|Archive' - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Preserve. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Preserve. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - file_content_length = kwargs.pop("size", None) - file_http_headers = FileHTTPHeaders( - file_cache_control=content_settings.cache_control, - file_content_type=content_settings.content_type, - file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - file_content_encoding=content_settings.content_encoding, - file_content_language=content_settings.content_language, - file_content_disposition=content_settings.content_disposition, - ) - file_permission = _get_file_permission(file_permission, permission_key, 'preserve') - try: - return await self._client.file.set_http_headers( # type: ignore - file_content_length=file_content_length, - file_http_headers=file_http_headers, - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - lease_access_conditions=access_conditions, - timeout=timeout, - cls=return_response_headers, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def set_file_metadata(self, metadata=None, **kwargs): # type: ignore - # type: (Optional[Dict[str, Any]], Any) -> Dict[str, Any] - """Sets user-defined metadata for the specified file as one or more - name-value pairs. - - Each call to this operation replaces all existing metadata - attached to the file. To remove all metadata from the file, - call this operation with no metadata dict. - - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop("headers", {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return await self._client.file.set_metadata( # type: ignore - metadata=metadata, lease_access_conditions=access_conditions, - timeout=timeout, cls=return_response_headers, headers=headers, **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_range( # type: ignore - self, - data, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Upload a range of bytes to a file. - - :param bytes data: - The data to upload. - :param int offset: - Start of byte range to use for uploading a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for uploading a section of the file. - The range can be up to 4 MB in size. - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - file. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - validate_content = kwargs.pop('validate_content', False) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - if isinstance(data, six.text_type): - data = data.encode(encoding) - end_range = offset + length - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - try: - return await self._client.file.upload_range( # type: ignore - range=content_range, - content_length=length, - optionalbody=data, - timeout=timeout, - validate_content=validate_content, - lease_access_conditions=access_conditions, - cls=return_response_headers, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_range_from_url(self, source_url, - offset, - length, - source_offset, - **kwargs - ): - # type: (str, int, int, int, **Any) -> Dict[str, Any] - """ - Writes the bytes from one Azure File endpoint into the specified range of another Azure File endpoint. - - :param int offset: - Start of byte range to use for updating a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for updating a section of the file. - The range can be up to 4 MB in size. - :param str source_url: - A URL of up to 2 KB in length that specifies an Azure file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.file.core.windows.net/myshare/mydir/myfile - https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - The service will read the same number of bytes as the destination range (length-offset). - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._upload_range_from_url_options( - source_url=source_url, - offset=offset, - length=length, - source_offset=source_offset, - **kwargs - ) - try: - return await self._client.file.upload_range_from_url(**options) # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_ranges( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> List[Dict[str, int]] - """Returns the list of valid page ranges for a file or snapshot - of a file. - - :param int offset: - Specifies the start offset of bytes over which to get ranges. - :param int length: - Number of bytes to use over which to get ranges. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A list of valid ranges. - :rtype: List[dict[str, int]] - """ - options = self._get_ranges_options( - offset=offset, - length=length, - **kwargs) - try: - ranges = await self._client.file.get_range_list(**options) - except StorageErrorException as error: - process_storage_error(error) - return [{'start': file_range.start, 'end': file_range.end} for file_range in ranges.ranges] - - @distributed_trace_async - async def get_ranges_diff( # type: ignore - self, - previous_sharesnapshot, # type: Union[str, Dict[str, Any]] - offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a file or snapshot - of a file. - - .. versionadded:: 12.6.0 - - :param int offset: - Specifies the start offset of bytes over which to get ranges. - :param int length: - Number of bytes to use over which to get ranges. - :param str previous_sharesnapshot: - The snapshot diff parameter that contains an opaque DateTime value that - specifies a previous file snapshot to be compared - against a more recent snapshot or the current file. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of file ranges as dictionaries with 'start' and 'end' keys. - The first element are filled file ranges, the 2nd element is cleared file ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_ranges_options( - offset=offset, - length=length, - previous_sharesnapshot=previous_sharesnapshot, - **kwargs) - try: - ranges = await self._client.file.get_range_list(**options) - except StorageErrorException as error: - process_storage_error(error) - return get_file_ranges_result(ranges) - - @distributed_trace_async - async def clear_range( # type: ignore - self, - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Clears the specified range and releases the space used in storage for - that range. - - :param int offset: - Start of byte range to use for clearing a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for clearing a section of the file. - The range can be up to 4 MB in size. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Unsupported method for encryption.") - - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 bytes file size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 bytes file size") - end_range = length + offset - 1 # Reformat to an inclusive range index - content_range = "bytes={0}-{1}".format(offset, end_range) - try: - return await self._client.file.upload_range( # type: ignore - timeout=timeout, - cls=return_response_headers, - content_length=0, - file_range_write="clear", - range=content_range, - lease_access_conditions=access_conditions, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def resize_file(self, size, **kwargs): - # type: (int, Any) -> Dict[str, Any] - """Resizes a file to the specified size. - - :param int size: - Size to resize file to (in bytes) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - return await self._client.file.set_http_headers( # type: ignore - file_content_length=size, - file_attributes="preserve", - file_creation_time="preserve", - file_last_write_time="preserve", - file_permission="preserve", - lease_access_conditions=access_conditions, - cls=return_response_headers, - timeout=timeout, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_handles(self, **kwargs): - # type: (Any) -> AsyncItemPaged - """Lists handles for file. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of HandleItem - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.HandleItem] - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop("results_per_page", None) - command = functools.partial( - self._client.file.list_handles, - sharesnapshot=self.snapshot, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=HandlesPaged) - - @distributed_trace_async - async def close_handle(self, handle, **kwargs): - # type: (Union[str, HandleItem], Any) -> Dict[str, int] - """Close an open file handle. - - :param handle: - A specific handle to close. - :type handle: str or ~azure.storage.fileshare.Handle - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - try: - handle_id = handle.id # type: ignore - except AttributeError: - handle_id = handle - if handle_id == '*': - raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") - try: - response = await self._client.file.force_close_handles( - handle_id, - marker=None, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - return { - 'closed_handles_count': response.get('number_of_handles_closed', 0), - 'failed_handles_count': response.get('number_of_handles_failed', 0) - } - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def close_all_handles(self, **kwargs): - # type: (Any) -> Dict[str, int] - """Close any open file handles. - - This operation will block until the service has closed all open handles. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - timeout = kwargs.pop('timeout', None) - start_time = time.time() - - try_close = True - continuation_token = None - total_closed = 0 - total_failed = 0 - while try_close: - try: - response = await self._client.file.force_close_handles( - handle_id='*', - timeout=timeout, - marker=continuation_token, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - except StorageErrorException as error: - process_storage_error(error) - continuation_token = response.get('marker') - try_close = bool(continuation_token) - total_closed += response.get('number_of_handles_closed', 0) - total_failed += response.get('number_of_handles_failed', 0) - if timeout: - timeout = max(0, timeout - (time.time() - start_time)) - return { - 'closed_handles_count': total_closed, - 'failed_handles_count': total_failed - } diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_lease_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_lease_async.py deleted file mode 100644 index 0f6fdb3..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_lease_async.py +++ /dev/null @@ -1,229 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, - TypeVar, TYPE_CHECKING -) - -from azure.core.tracing.decorator_async import distributed_trace_async - -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._generated.models import ( - StorageErrorException) -from .._generated.aio.operations_async import FileOperations, ShareOperations -from .._lease import ShareLeaseClient as LeaseClientBase - -if TYPE_CHECKING: - from datetime import datetime - ShareFileClient = TypeVar("ShareFileClient") - ShareClient = TypeVar("ShareClient") - - -class ShareLeaseClient(LeaseClientBase): - """Creates a new ShareLeaseClient. - - This client provides lease operations on a ShareClient or ShareFileClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the file or share to lease. - :type client: ~azure.storage.fileshare.ShareFileClient or - ~azure.storage.fileshare.ShareClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - - def __enter__(self): - raise TypeError("Async lease must use 'async with'.") - - def __exit__(self, *args): - self.release() - - async def __aenter__(self): - return self - - async def __aexit__(self, *args): - await self.release() - - @distributed_trace_async - async def acquire(self, **kwargs): - # type: (**Any) -> None - """Requests a new lease. This operation establishes and manages a lock on a - file or share for write and delete operations. If the file or share does not have an active lease, - the File or Share service creates a lease on the file or share. If the file has an active lease, - you can only request a new lease using the active lease ID. - - - If the file or share does not have an active lease, the File or Share service creates a - lease on the file and returns a new lease ID. - - :keyword int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. File leases never expire. A non-infinite share lease can be - between 15 and 60 seconds. A share lease duration cannot be changed - using renew or change. Default is -1 (infinite share lease). - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - try: - lease_duration = kwargs.pop('lease_duration', -1) - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - response = await self._client.acquire_lease( - timeout=kwargs.pop('timeout', None), - duration=lease_duration, - proposed_lease_id=self.id, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - self.etag = response.get('etag') # type: str - - @distributed_trace_async - async def renew(self, **kwargs): - # type: (Any) -> None - """Renews the share lease. - - The share lease can be renewed if the lease ID specified in the - lease client matches that associated with the share. Note that - the lease may be renewed even if it has expired as long as the share - has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - .. versionadded:: 12.6.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - if isinstance(self._client, FileOperations): - raise TypeError("Lease renewal operations are only valid for ShareClient.") - try: - response = await self._client.renew_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - sharesnapshot=self._snapshot, - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def release(self, **kwargs): - # type: (Any) -> None - """Releases the lease. The lease may be released if the lease ID specified on the request matches - that associated with the share or file. Releasing the lease allows another client to immediately acquire - the lease for the share or file as soon as the release is complete. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - try: - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - response = await self._client.release_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """ Changes the lease ID of an active lease. A change must include the current lease ID in x-ms-lease-id and - a new lease ID in x-ms-proposed-lease-id. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The File or Share service raises an error - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - try: - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - response = await self._client.change_lease( - lease_id=self.id, - proposed_lease_id=proposed_lease_id, - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def break_lease(self, **kwargs): - # type: (Any) -> int - """Force breaks the lease if the file or share has an active lease. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. An infinite lease breaks immediately. - - Once a lease is broken, it cannot be changed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :keyword int lease_break_period: - This is the proposed duration of seconds that the share lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the share lease. If longer, the time remaining on the share lease is used. - A new share lease will not be available before the break period has - expired, but the share lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration share lease breaks after the remaining share lease - period elapses, and an infinite share lease breaks immediately. - - .. versionadded:: 12.6.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - try: - lease_break_period = kwargs.pop('lease_break_period', None) - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - if isinstance(self._client, ShareOperations): - kwargs['break_period'] = lease_break_period - if isinstance(self._client, FileOperations) and lease_break_period: - raise TypeError("Setting a lease break period is only applicable to Share leases.") - - response = await self._client.break_lease( - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_models.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_models.py deleted file mode 100644 index affee8f..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_models.py +++ /dev/null @@ -1,178 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines - -from azure.core.async_paging import AsyncPageIterator - -from .._shared.response_handlers import return_context_and_deserialized, process_storage_error -from .._generated.models import StorageErrorException -from .._generated.models import DirectoryItem -from .._models import Handle, ShareProperties - - -def _wrap_item(item): - if isinstance(item, DirectoryItem): - return {'name': item.name, 'is_directory': True} - return {'name': item.name, 'size': item.properties.content_length, 'is_directory': False} - - -class SharePropertiesPaged(AsyncPageIterator): - """An iterable of Share properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.fileshare.ShareProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only shares whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(SharePropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [ShareProperties._from_generated(i) for i in self._response.share_items] # pylint: disable=protected-access - return self._response.next_marker or None, self.current_page - - -class HandlesPaged(AsyncPageIterator): - """An iterable of Handles. - - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str marker: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.fileshare.Handle) - - :param callable command: Function to retrieve the next page of items. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, results_per_page=None, continuation_token=None): - super(HandlesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.current_page = [Handle._from_generated(h) for h in self._response.handle_list] # pylint: disable=protected-access - return self._response.next_marker or None, self.current_page - - -class DirectoryPropertiesPaged(AsyncPageIterator): - """An iterable for the contents of a directory. - - This iterable will yield dicts for the contents of the directory. The dicts - will have the keys 'name' (str) and 'is_directory' (bool). - Items that are files (is_directory=False) will have an additional 'content_length' key. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(dict(str, Any)) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only directories whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(DirectoryPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - prefix=self.prefix, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except StorageErrorException as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [_wrap_item(i) for i in self._response.segment.directory_items] - self.current_page.extend([_wrap_item(i) for i in self._response.segment.file_items]) - return self._response.next_marker or None, self.current_page diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_share_client_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_share_client_async.py deleted file mode 100644 index e4f9b65..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_share_client_async.py +++ /dev/null @@ -1,664 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -from typing import ( # pylint: disable=unused-import - Optional, Union, Dict, Any, Iterable, TYPE_CHECKING -) - -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.pipeline import AsyncPipeline -from .._shared.policies_async import ExponentialRetry -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.request_handlers import add_metadata_headers, serialize_iso -from .._shared.response_handlers import ( - return_response_headers, - process_storage_error, - return_headers_and_deserialized) -from .._generated.aio import AzureFileStorage -from .._generated.version import VERSION -from .._generated.models import ( - StorageErrorException, - SignedIdentifier, - DeleteSnapshotsOptionType) -from .._deserialize import deserialize_share_properties, deserialize_permission -from .._serialize import get_api_version, get_access_conditions -from .._share_client import ShareClient as ShareClientBase -from ._directory_client_async import ShareDirectoryClient -from ._file_client_async import ShareFileClient -from ..aio._lease_async import ShareLeaseClient - - -if TYPE_CHECKING: - from .._models import ShareProperties, AccessPolicy - - -class ShareClient(AsyncStorageAccountHostsMixin, ShareClientBase): - """A client to interact with a specific share, although that share may not yet exist. - - For operations relating to a specific directory or file in this share, the clients for - those entities can also be retrieved using the :func:`get_directory_client` and :func:`get_file_client` functions. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the share, - use the :func:`from_share_url` classmethod. - :param share_name: - The name of the share with which to interact. - :type share_name: str - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword loop: - The event loop to run the asynchronous tasks. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - loop = kwargs.pop('loop', None) - super(ShareClient, self).__init__( - account_url, - share_name=share_name, - snapshot=snapshot, - credential=credential, - loop=loop, - **kwargs) - self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline, loop=loop) - self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access - self._loop = loop - - def get_directory_client(self, directory_path=None): - # type: (Optional[str]) -> ShareDirectoryClient - """Get a client to interact with the specified directory. - The directory need not already exist. - - :param str directory_path: - Path to the specified directory. - :returns: A Directory Client. - :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient - """ - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - - return ShareDirectoryClient( - self.url, share_name=self.share_name, directory_path=directory_path or "", snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop) - - def get_file_client(self, file_path): - # type: (str) -> ShareFileClient - """Get a client to interact with the specified file. - The file need not already exist. - - :param str file_path: - Path to the specified file. - :returns: A File Client. - :rtype: ~azure.storage.fileshare.aio.ShareFileClient - """ - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - - return ShareFileClient( - self.url, share_name=self.share_name, file_path=file_path, snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop) - - @distributed_trace_async() - async def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): - # type: (int, Optional[str], **Any) -> ShareLeaseClient - """Requests a new lease. - - If the share does not have an active lease, the Share - Service creates a lease on the share and returns a new lease. - - .. versionadded:: 12.6.0 - - :param int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :param str lease_id: - Proposed lease ID, in a GUID string format. The Share Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A ShareLeaseClient object. - :rtype: ~azure.storage.fileshare.ShareLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START acquire_lease_on_share] - :end-before: [END acquire_lease_on_share] - :language: python - :dedent: 8 - :caption: Acquiring a lease on a share. - """ - kwargs['lease_duration'] = lease_duration - lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore - await lease.acquire(**kwargs) - return lease - - @distributed_trace_async - async def create_share(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Creates a new Share under the account. If a share with the - same name already exists, the operation fails. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the share as metadata. - :keyword int quota: - The quota to be allotted. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START create_share] - :end-before: [END create_share] - :language: python - :dedent: 12 - :caption: Creates a file share. - """ - metadata = kwargs.pop('metadata', None) - quota = kwargs.pop('quota', None) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - - try: - return await self._client.share.create( # type: ignore - timeout=timeout, - metadata=metadata, - quota=quota, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def create_snapshot( # type: ignore - self, - **kwargs # type: Optional[Any] - ): - # type: (...) -> Dict[str, Any] - """Creates a snapshot of the share. - - A snapshot is a read-only version of a share that's taken at a point in time. - It can be read, copied, or deleted, but not modified. Snapshots provide a way - to back up a share as it appears at a moment in time. - - A snapshot of a share has the same name as the base share from which the snapshot - is taken, with a DateTime value appended to indicate the time at which the - snapshot was taken. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the share as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Snapshot ID, Etag, and last modified). - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START create_share_snapshot] - :end-before: [END create_share_snapshot] - :language: python - :dedent: 16 - :caption: Creates a snapshot of the file share. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return await self._client.share.create_snapshot( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def delete_share( - self, delete_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> None - """Marks the specified share for deletion. The share is - later deleted during garbage collection. - - :param bool delete_snapshots: - Indicates if snapshots are to be deleted. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-02-02'. - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START delete_share] - :end-before: [END delete_share] - :language: python - :dedent: 16 - :caption: Deletes the share and any snapshots. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - delete_include = None - if delete_snapshots: - delete_include = DeleteSnapshotsOptionType.include - try: - await self._client.share.delete( - timeout=timeout, - sharesnapshot=self.snapshot, - delete_snapshots=delete_include, - lease_access_conditions=access_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_share_properties(self, **kwargs): - # type: (Any) -> ShareProperties - """Returns all user-defined metadata and system properties for the - specified share. The data returned does not include the shares's - list of files or directories. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-02-02'. - - :returns: The share properties. - :rtype: ~azure.storage.fileshare.ShareProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_hello_world_async.py - :start-after: [START get_share_properties] - :end-before: [END get_share_properties] - :language: python - :dedent: 16 - :caption: Gets the share properties. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - props = await self._client.share.get_properties( - timeout=timeout, - sharesnapshot=self.snapshot, - cls=deserialize_share_properties, - lease_access_conditions=access_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - props.name = self.share_name - props.snapshot = self.snapshot - return props # type: ignore - - @distributed_trace_async - async def set_share_quota(self, quota, **kwargs): - # type: (int, Any) -> Dict[str, Any] - """Sets the quota for the share. - - :param int quota: - Specifies the maximum size of the share, in gigabytes. - Must be greater than 0, and less than or equal to 5TB. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-02-02'. - - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START set_share_quota] - :end-before: [END set_share_quota] - :language: python - :dedent: 16 - :caption: Sets the share quota. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - return await self._client.share.set_quota( # type: ignore - timeout=timeout, - quota=quota, - cls=return_response_headers, - lease_access_conditions=access_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def set_share_metadata(self, metadata, **kwargs): - # type: (Dict[str, Any], Any) -> Dict[str, Any] - """Sets the metadata for the share. - - Each call to this operation replaces all existing metadata - attached to the share. To remove all metadata from the share, - call this operation with no metadata dict. - - :param metadata: - Name-value pairs associated with the share as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-02-02'. - - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START set_share_metadata] - :end-before: [END set_share_metadata] - :language: python - :dedent: 16 - :caption: Sets the share metadata. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - try: - return await self._client.share.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - lease_access_conditions=access_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_share_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the share. The permissions - indicate whether files in a share may be accessed publicly. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-02-02'. - - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - response, identifiers = await self._client.share.get_access_policy( - timeout=timeout, - cls=return_headers_and_deserialized, - lease_access_conditions=access_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - return { - 'public_access': response.get('share_public_access'), - 'signed_identifiers': identifiers or [] - } - - @distributed_trace_async - async def set_share_access_policy(self, signed_identifiers, **kwargs): - # type: (Dict[str, AccessPolicy], Any) -> Dict[str, str] - """Sets the permissions for the share, or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether files in a share may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the share. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict(str, :class:`~azure.storage.fileshare.AccessPolicy`) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-02-02'. - - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - if len(signed_identifiers) > 5: - raise ValueError( - 'Too many access policies provided. The server does not support setting ' - 'more than 5 access policies on a single resource.') - identifiers = [] - for key, value in signed_identifiers.items(): - if value: - value.start = serialize_iso(value.start) - value.expiry = serialize_iso(value.expiry) - identifiers.append(SignedIdentifier(id=key, access_policy=value)) - signed_identifiers = identifiers # type: ignore - - try: - return await self._client.share.set_access_policy( # type: ignore - share_acl=signed_identifiers or None, - timeout=timeout, - cls=return_response_headers, - lease_access_conditions=access_conditions, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_share_stats(self, **kwargs): - # type: (Any) -> int - """Gets the approximate size of the data stored on the share in bytes. - - Note that this value may not include all recently created - or recently re-sized files. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-02-02'. - - :return: The approximate size of the data (in bytes) stored on the share. - :rtype: int - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - stats = await self._client.share.get_statistics( - timeout=timeout, - lease_access_conditions=access_conditions, - **kwargs) - return stats.share_usage_bytes # type: ignore - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_directories_and_files( # type: ignore - self, directory_name=None, # type: Optional[str] - name_starts_with=None, # type: Optional[str] - marker=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Iterable[Dict[str,str]] - """Lists the directories and files under the share. - - :param str directory_name: - Name of a directory. - :param str name_starts_with: - Filters the results to return only directories whose names - begin with the specified prefix. - :param str marker: - An opaque continuation token. This value can be retrieved from the - next_marker field of a previous generator object. If specified, - this generator will begin returning results from this point. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START share_list_files_in_dir] - :end-before: [END share_list_files_in_dir] - :language: python - :dedent: 16 - :caption: List directories and files in the share. - """ - timeout = kwargs.pop('timeout', None) - directory = self.get_directory_client(directory_name) - return directory.list_directories_and_files( - name_starts_with=name_starts_with, marker=marker, timeout=timeout, **kwargs) - - @distributed_trace_async - async def create_permission_for_share(self, file_permission, # type: str - **kwargs # type: Any - ): - # type: (...) -> str - """Create a permission (a security descriptor) at the share level. - - This 'permission' can be used for the files/directories in the share. - If a 'permission' already exists, it shall return the key of it, else - creates a new permission at the share level and return its key. - - :param str file_permission: - File permission, a Portable SDDL - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A file permission key - :rtype: str - """ - timeout = kwargs.pop('timeout', None) - options = self._create_permission_for_share_options(file_permission, timeout=timeout, **kwargs) - try: - return await self._client.share.create_permission(**options) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def get_permission_for_share( # type: ignore - self, permission_key, # type: str - **kwargs # type: Any - ): - # type: (...) -> str - """Get a permission (a security descriptor) for a given key. - - This 'permission' can be used for the files/directories in the share. - - :param str permission_key: - Key of the file permission to retrieve - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A file permission (a portable SDDL) - :rtype: str - """ - timeout = kwargs.pop('timeout', None) - try: - return await self._client.share.get_permission( # type: ignore - file_permission_key=permission_key, - cls=deserialize_permission, - timeout=timeout, - **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def create_directory(self, directory_name, **kwargs): - # type: (str, Any) -> ShareDirectoryClient - """Creates a directory in the share and returns a client to interact - with the directory. - - :param str directory_name: - The name of the directory. - :keyword dict(str,str) metadata: - Name-value pairs associated with the directory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: ShareDirectoryClient - :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient - """ - directory = self.get_directory_client(directory_name) - kwargs.setdefault('merge_span', True) - await directory.create_directory(**kwargs) - return directory # type: ignore - - @distributed_trace_async - async def delete_directory(self, directory_name, **kwargs): - # type: (str, Any) -> None - """Marks the directory for deletion. The directory is - later deleted during garbage collection. - - :param str directory_name: - The name of the directory. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - directory = self.get_directory_client(directory_name) - await directory.delete_directory(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_share_service_client_async.py b/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_share_service_client_async.py deleted file mode 100644 index af67dcd..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/aio/_share_service_client_async.py +++ /dev/null @@ -1,368 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, - TYPE_CHECKING -) - -from azure.core.async_paging import AsyncItemPaged -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import AsyncPipeline -from azure.core.tracing.decorator_async import distributed_trace_async - -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.response_handlers import process_storage_error -from .._shared.policies_async import ExponentialRetry -from .._generated.aio import AzureFileStorage -from .._generated.models import StorageErrorException, StorageServiceProperties -from .._generated.version import VERSION -from .._share_service_client import ShareServiceClient as ShareServiceClientBase -from .._serialize import get_api_version -from ._share_client_async import ShareClient -from ._models import SharePropertiesPaged -from .._models import service_properties_deserialize - -if TYPE_CHECKING: - from datetime import datetime - from .._shared.models import ResourceTypes, AccountSasPermissions - from .._models import ( - ShareProperties, - Metrics, - CorsRule, - ShareProtocolSettings, - ) - - -class ShareServiceClient(AsyncStorageAccountHostsMixin, ShareServiceClientBase): - """A client to interact with the File Share Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete shares within the account. - For operations relating to a specific share, a client for that entity - can also be retrieved using the :func:`get_share_client` function. - - :param str account_url: - The URL to the file share storage account. Any other entities included - in the URL path (e.g. share or file) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword loop: - The event loop to run the asynchronous tasks. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_authentication_async.py - :start-after: [START create_share_service_client] - :end-before: [END create_share_service_client] - :language: python - :dedent: 8 - :caption: Create the share service client with url and credential. - """ - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - loop = kwargs.pop('loop', None) - super(ShareServiceClient, self).__init__( - account_url, - credential=credential, - loop=loop, - **kwargs) - self._client = AzureFileStorage(version=VERSION, url=self.url, pipeline=self._pipeline, loop=loop) - self._client._config.version = get_api_version(kwargs, VERSION) # pylint: disable=protected-access - self._loop = loop - - @distributed_trace_async - async def get_service_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the properties of a storage account's File Share service, including - Azure Storage Analytics. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A dictionary containing file service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START get_service_properties] - :end-before: [END get_service_properties] - :language: python - :dedent: 12 - :caption: Get file share service properties. - """ - timeout = kwargs.pop('timeout', None) - try: - service_props = await self._client.service.get_properties(timeout=timeout, **kwargs) - return service_properties_deserialize(service_props) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace_async - async def set_service_properties( - self, hour_metrics=None, # type: Optional[Metrics] - minute_metrics=None, # type: Optional[Metrics] - cors=None, # type: Optional[List[CorsRule]] - protocol=None, # type: Optional[ShareProtocolSettings], - **kwargs - ): - # type: (...) -> None - """Sets the properties of a storage account's File Share service, including - Azure Storage Analytics. If an element (e.g. hour_metrics) is left as None, the - existing settings on the service for that functionality are preserved. - - :param hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for files. - :type hour_metrics: ~azure.storage.fileshare.Metrics - :param minute_metrics: - The minute metrics settings provide request statistics - for each minute for files. - :type minute_metrics: ~azure.storage.fileshare.Metrics - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list(:class:`~azure.storage.fileshare.CorsRule`) - :param protocol_settings: - Sets protocol settings - :type protocol: ~azure.storage.fileshare.ShareProtocolSettings - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START set_service_properties] - :end-before: [END set_service_properties] - :language: python - :dedent: 8 - :caption: Sets file share service properties. - """ - timeout = kwargs.pop('timeout', None) - props = StorageServiceProperties( - hour_metrics=hour_metrics, - minute_metrics=minute_metrics, - cors=cors, - protocol=protocol - ) - try: - await self._client.service.set_properties(props, timeout=timeout, **kwargs) - except StorageErrorException as error: - process_storage_error(error) - - @distributed_trace - def list_shares( - self, name_starts_with=None, # type: Optional[str] - include_metadata=False, # type: Optional[bool] - include_snapshots=False, # type: Optional[bool] - **kwargs # type: Any - ): # type: (...) -> AsyncItemPaged - """Returns auto-paging iterable of dict-like ShareProperties under the specified account. - The generator will lazily follow the continuation tokens returned by - the service and stop when all shares have been returned. - - :param str name_starts_with: - Filters the results to return only shares whose names - begin with the specified name_starts_with. - :param bool include_metadata: - Specifies that share metadata be returned in the response. - :param bool include_snapshots: - Specifies that share snapshot be returned in the response. - :keyword bool include_deleted: - Specifies that deleted shares be returned in the response. - This is only for share soft delete enabled account. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) of ShareProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.ShareProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START fsc_list_shares] - :end-before: [END fsc_list_shares] - :language: python - :dedent: 16 - :caption: List shares in the file share service. - """ - timeout = kwargs.pop('timeout', None) - include = [] - if include_metadata: - include.append('metadata') - if include_snapshots: - include.append('snapshots') - include_deleted = kwargs.pop('include_deleted', None) - if include_deleted: - include.append("deleted") - - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.service.list_shares_segment, - include=include, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=SharePropertiesPaged) - - @distributed_trace_async - async def create_share( - self, share_name, # type: str - **kwargs - ): - # type: (...) -> ShareClient - """Creates a new share under the specified account. If the share - with the same name already exists, the operation fails. Returns a client with - which to interact with the newly created share. - - :param str share_name: The name of the share to create. - :keyword dict(str,str) metadata: - A dict with name_value pairs to associate with the - share as metadata. Example:{'Category':'test'} - :keyword int quota: - Quota in bytes. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.fileshare.aio.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START fsc_create_shares] - :end-before: [END fsc_create_shares] - :language: python - :dedent: 12 - :caption: Create a share in the file share service. - """ - metadata = kwargs.pop('metadata', None) - quota = kwargs.pop('quota', None) - timeout = kwargs.pop('timeout', None) - share = self.get_share_client(share_name) - kwargs.setdefault('merge_span', True) - await share.create_share(metadata=metadata, quota=quota, timeout=timeout, **kwargs) - return share - - @distributed_trace_async - async def delete_share( - self, share_name, # type: Union[ShareProperties, str] - delete_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> None - """Marks the specified share for deletion. The share is - later deleted during garbage collection. - - :param share_name: - The share to delete. This can either be the name of the share, - or an instance of ShareProperties. - :type share_name: str or ~azure.storage.fileshare.ShareProperties - :param bool delete_snapshots: - Indicates if snapshots are to be deleted. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START fsc_delete_shares] - :end-before: [END fsc_delete_shares] - :language: python - :dedent: 16 - :caption: Delete a share in the file share service. - """ - timeout = kwargs.pop('timeout', None) - share = self.get_share_client(share_name) - kwargs.setdefault('merge_span', True) - await share.delete_share( - delete_snapshots=delete_snapshots, timeout=timeout, **kwargs) - - @distributed_trace_async - async def undelete_share(self, deleted_share_name, deleted_share_version, **kwargs): - # type: (str, str, **Any) -> ShareClient - """Restores soft-deleted share. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.2.0 - This operation was introduced in API version '2019-12-12'. - - :param str deleted_share_name: - Specifies the name of the deleted share to restore. - :param str deleted_share_version: - Specifies the version of the deleted share to restore. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.fileshare.aio.ShareClient - """ - share = self.get_share_client(deleted_share_name) - try: - await share._client.share.restore(deleted_share_name=deleted_share_name, # pylint: disable = protected-access - deleted_share_version=deleted_share_version, - timeout=kwargs.pop('timeout', None), **kwargs) - return share - except StorageErrorException as error: - process_storage_error(error) - - def get_share_client(self, share, snapshot=None): - # type: (Union[ShareProperties, str],Optional[Union[Dict[str, Any], str]]) -> ShareClient - """Get a client to interact with the specified share. - The share need not already exist. - - :param share: - The share. This can either be the name of the share, - or an instance of ShareProperties. - :type share: str or ~azure.storage.fileshare.ShareProperties - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :returns: A ShareClient. - :rtype: ~azure.storage.fileshare.aio.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START get_share_client] - :end-before: [END get_share_client] - :language: python - :dedent: 8 - :caption: Gets the share client. - """ - try: - share_name = share.name - except AttributeError: - share_name = share - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareClient( - self.url, share_name=share_name, snapshot=snapshot, credential=self.credential, - api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop) diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/__init__.py deleted file mode 100644 index af67e01..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/__init__.py +++ /dev/null @@ -1,82 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ._version import VERSION -from ._file_client import ShareFileClient -from ._directory_client import ShareDirectoryClient -from ._share_client import ShareClient -from ._share_service_client import ShareServiceClient -from ._lease import ShareLeaseClient -from ._shared_access_signature import generate_account_sas, generate_share_sas, generate_file_sas -from ._shared.policies import ExponentialRetry, LinearRetry -from ._shared.models import ( - LocationMode, - ResourceTypes, - AccountSasPermissions, - StorageErrorCode) -from ._models import ( - ShareProperties, - DirectoryProperties, - Handle, - FileProperties, - Metrics, - RetentionPolicy, - CorsRule, - ShareSmbSettings, - SmbMultichannel, - ShareProtocolSettings, - ShareProtocols, - AccessPolicy, - FileSasPermissions, - ShareSasPermissions, - ContentSettings, - NTFSAttributes) -from ._generated.models import ( - HandleItem, - ShareAccessTier -) -from ._generated.models import ( - ShareRootSquash -) - -__version__ = VERSION - - -__all__ = [ - 'ShareFileClient', - 'ShareDirectoryClient', - 'ShareClient', - 'ShareServiceClient', - 'ShareLeaseClient', - 'ExponentialRetry', - 'LinearRetry', - 'LocationMode', - 'ResourceTypes', - 'AccountSasPermissions', - 'StorageErrorCode', - 'Metrics', - 'RetentionPolicy', - 'CorsRule', - 'ShareSmbSettings', - 'ShareAccessTier', - 'SmbMultichannel', - 'ShareProtocolSettings', - 'AccessPolicy', - 'FileSasPermissions', - 'ShareSasPermissions', - 'ShareProtocols', - 'ShareProperties', - 'DirectoryProperties', - 'FileProperties', - 'ContentSettings', - 'Handle', - 'NTFSAttributes', - 'HandleItem', - 'ShareRootSquash', - 'generate_account_sas', - 'generate_share_sas', - 'generate_file_sas' -] diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_deserialize.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_deserialize.py deleted file mode 100644 index 6839469..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_deserialize.py +++ /dev/null @@ -1,83 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use -from typing import ( # pylint: disable=unused-import - Tuple, Dict, List, - TYPE_CHECKING -) - -from ._models import ShareProperties, DirectoryProperties, FileProperties -from ._shared.response_handlers import deserialize_metadata -from ._generated.models import ShareFileRangeList - - -def deserialize_share_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - share_properties = ShareProperties( - metadata=metadata, - **headers - ) - return share_properties - - -def deserialize_directory_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - directory_properties = DirectoryProperties( - metadata=metadata, - **headers - ) - return directory_properties - - -def deserialize_file_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - file_properties = FileProperties( - metadata=metadata, - **headers - ) - if 'Content-Range' in headers: - if 'x-ms-content-md5' in headers: - file_properties.content_settings.content_md5 = headers['x-ms-content-md5'] - else: - file_properties.content_settings.content_md5 = None - return file_properties - - -def deserialize_file_stream(response, obj, headers): - file_properties = deserialize_file_properties(response, obj, headers) - obj.properties = file_properties - return response.http_response.location_mode, obj - - -def deserialize_permission(response, obj, headers): # pylint: disable=unused-argument - ''' - Extracts out file permission - ''' - - return obj.permission - - -def deserialize_permission_key(response, obj, headers): # pylint: disable=unused-argument - ''' - Extracts out file permission key - ''' - - if response is None or headers is None: - return None - return headers.get('x-ms-file-permission-key', None) - - -def get_file_ranges_result(ranges): - # type: (ShareFileRangeList) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - file_ranges = [] # type: ignore - clear_ranges = [] # type: List - if ranges.ranges: - file_ranges = [ - {'start': file_range.start, 'end': file_range.end} for file_range in ranges.ranges] # type: ignore - if ranges.clear_ranges: - clear_ranges = [ - {'start': clear_range.start, 'end': clear_range.end} for clear_range in ranges.clear_ranges] - return file_ranges, clear_ranges diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_directory_client.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_directory_client.py deleted file mode 100644 index 557c94f..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_directory_client.py +++ /dev/null @@ -1,714 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -import time -from typing import ( # pylint: disable=unused-import - Optional, Union, Any, Dict, TYPE_CHECKING -) - - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six -from azure.core.exceptions import HttpResponseError -from azure.core.paging import ItemPaged -from azure.core.pipeline import Pipeline -from azure.core.tracing.decorator import distributed_trace - -from ._generated import AzureFileStorage -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.request_handlers import add_metadata_headers -from ._shared.response_handlers import return_response_headers, process_storage_error -from ._shared.parser import _str -from ._parser import _get_file_permission, _datetime_to_str -from ._deserialize import deserialize_directory_properties -from ._serialize import get_api_version -from ._file_client import ShareFileClient -from ._models import DirectoryPropertiesPaged, HandlesPaged, NTFSAttributes # pylint: disable=unused-import - -if TYPE_CHECKING: - from datetime import datetime - from ._models import ShareProperties, DirectoryProperties, ContentSettings - from ._generated.models import HandleItem - - -class ShareDirectoryClient(StorageAccountHostsMixin): - """A client to interact with a specific directory, although it may not yet exist. - - For operations relating to a specific subdirectory or file in this share, the clients for those - entities can also be retrieved using the :func:`get_subdirectory_client` and :func:`get_file_client` functions. - - For more optional configuration, please click - `here `_. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the directory, - use the :func:`from_directory_url` classmethod. - :param share_name: - The name of the share for the directory. - :type share_name: str - :param str directory_path: - The directory path for the directory with which to interact. - If specified, this value will override a directory value specified in the directory URL. - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - directory_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Optional[Any] - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not share_name: - raise ValueError("Please specify a share name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - if hasattr(credential, 'get_token'): - raise ValueError("Token credentials not supported by the File service.") - - path_snapshot, sas_token = parse_query(parsed_url.query) - if not sas_token and not credential: - raise ValueError( - 'You need to provide either an account shared key or SAS token when creating a storage service.') - try: - self.snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - self.snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - self.snapshot = snapshot or path_snapshot - - self.share_name = share_name - self.directory_path = directory_path - - self._query_str, credential = self._format_query_string( - sas_token, credential, share_snapshot=self.snapshot) - super(ShareDirectoryClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) - self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline) - default_api_version = self._client._config.version # pylint: disable=protected-access - self._client._config.version = get_api_version(kwargs, default_api_version) # pylint: disable=protected-access - - @classmethod - def from_directory_url(cls, directory_url, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Optional[Any] - ): - # type: (...) -> ShareDirectoryClient - """Create a ShareDirectoryClient from a directory url. - - :param str directory_url: - The full URI to the directory. - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :returns: A directory client. - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - """ - try: - if not directory_url.lower().startswith('http'): - directory_url = "https://" + directory_url - except AttributeError: - raise ValueError("Directory URL must be a string.") - parsed_url = urlparse(directory_url.rstrip('/')) - if not parsed_url.path and not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(directory_url)) - account_url = parsed_url.netloc.rstrip('/') + "?" + parsed_url.query - path_snapshot, _ = parse_query(parsed_url.query) - - share_name, _, path_dir = parsed_url.path.lstrip('/').partition('/') - share_name = unquote(share_name) - - directory_path = path_dir - snapshot = snapshot or path_snapshot - - return cls( - account_url=account_url, share_name=share_name, directory_path=directory_path, - credential=credential, **kwargs) - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - share_name = self.share_name - if isinstance(share_name, six.text_type): - share_name = share_name.encode('UTF-8') - directory_path = "" - if self.directory_path: - directory_path = "/" + quote(self.directory_path, safe='~') - return "{}://{}/{}{}{}".format( - self.scheme, - hostname, - quote(share_name), - directory_path, - self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - share_name, # type: str - directory_path, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareDirectoryClient - """Create ShareDirectoryClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param share_name: The name of the share. - :type share_name: str - :param str directory_path: - The directory path. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :returns: A directory client. - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, share_name=share_name, directory_path=directory_path, credential=credential, **kwargs) - - def get_file_client(self, file_name, **kwargs): - # type: (str, Any) -> ShareFileClient - """Get a client to interact with a specific file. - - The file need not already exist. - - :param file_name: - The name of the file. - :returns: A File Client. - :rtype: ~azure.storage.fileshare.ShareFileClient - """ - if self.directory_path: - file_name = self.directory_path.rstrip('/') + "/" + file_name - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareFileClient( - self.url, file_path=file_name, share_name=self.share_name, napshot=self.snapshot, - credential=self.credential, api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, **kwargs) - - def get_subdirectory_client(self, directory_name, **kwargs): - # type: (str, Any) -> ShareDirectoryClient - """Get a client to interact with a specific subdirectory. - - The subdirectory need not already exist. - - :param str directory_name: - The name of the subdirectory. - :returns: A Directory Client. - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START get_subdirectory_client] - :end-before: [END get_subdirectory_client] - :language: python - :dedent: 12 - :caption: Gets the subdirectory client. - """ - directory_path = self.directory_path.rstrip('/') + "/" + directory_name - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareDirectoryClient( - self.url, share_name=self.share_name, directory_path=directory_path, snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, - _location_mode=self._location_mode, **kwargs) - - @distributed_trace - def create_directory(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Creates a new directory under the directory referenced by the client. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the directory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Directory-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START create_directory] - :end-before: [END create_directory] - :language: python - :dedent: 12 - :caption: Creates a directory. - """ - timeout = kwargs.pop('timeout', None) - metadata = kwargs.pop('metadata', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return self._client.directory.create( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def delete_directory(self, **kwargs): - # type: (**Any) -> None - """Marks the directory for deletion. The directory is - later deleted during garbage collection. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START delete_directory] - :end-before: [END delete_directory] - :language: python - :dedent: 12 - :caption: Deletes a directory. - """ - timeout = kwargs.pop('timeout', None) - try: - self._client.directory.delete(timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_directories_and_files(self, name_starts_with=None, **kwargs): - # type: (Optional[str], **Any) -> ItemPaged - """Lists all the directories and files under the directory. - - :param str name_starts_with: - Filters the results to return only entities whose names - begin with the specified prefix. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties - :rtype: ~azure.core.paging.ItemPaged[DirectoryProperties and FileProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START lists_directory] - :end-before: [END lists_directory] - :language: python - :dedent: 12 - :caption: List directories and files. - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.directory.list_files_and_directories_segment, - sharesnapshot=self.snapshot, - timeout=timeout, - **kwargs) - return ItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=DirectoryPropertiesPaged) - - @distributed_trace - def list_handles(self, recursive=False, **kwargs): - # type: (bool, Any) -> ItemPaged - """Lists opened handles on a directory or a file under the directory. - - :param bool recursive: - Boolean that specifies if operation should apply to the directory specified by the client, - its files, its subdirectories and their files. Default value is False. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of HandleItem - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.HandleItem] - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.directory.list_handles, - sharesnapshot=self.snapshot, - timeout=timeout, - recursive=recursive, - **kwargs) - return ItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=HandlesPaged) - - @distributed_trace - def close_handle(self, handle, **kwargs): - # type: (Union[str, HandleItem], Any) -> Dict[str, int] - """Close an open file handle. - - :param handle: - A specific handle to close. - :type handle: str or ~azure.storage.fileshare.Handle - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - try: - handle_id = handle.id # type: ignore - except AttributeError: - handle_id = handle - if handle_id == '*': - raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") - try: - response = self._client.directory.force_close_handles( - handle_id, - marker=None, - recursive=None, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - return { - 'closed_handles_count': response.get('number_of_handles_closed', 0), - 'failed_handles_count': response.get('number_of_handles_failed', 0) - } - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def close_all_handles(self, recursive=False, **kwargs): - # type: (bool, Any) -> Dict[str, int] - """Close any open file handles. - - This operation will block until the service has closed all open handles. - - :param bool recursive: - Boolean that specifies if operation should apply to the directory specified by the client, - its files, its subdirectories and their files. Default value is False. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - timeout = kwargs.pop('timeout', None) - start_time = time.time() - - try_close = True - continuation_token = None - total_closed = 0 - total_failed = 0 - while try_close: - try: - response = self._client.directory.force_close_handles( - handle_id='*', - timeout=timeout, - marker=continuation_token, - recursive=recursive, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - continuation_token = response.get('marker') - try_close = bool(continuation_token) - total_closed += response.get('number_of_handles_closed', 0) - total_failed += response.get('number_of_handles_failed', 0) - if timeout: - timeout = max(0, timeout - (time.time() - start_time)) - return { - 'closed_handles_count': total_closed, - 'failed_handles_count': total_failed - } - - @distributed_trace - def get_directory_properties(self, **kwargs): - # type: (Any) -> DirectoryProperties - """Returns all user-defined metadata and system properties for the - specified directory. The data returned does not include the directory's - list of files. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: DirectoryProperties - :rtype: ~azure.storage.fileshare.DirectoryProperties - """ - timeout = kwargs.pop('timeout', None) - try: - response = self._client.directory.get_properties( - timeout=timeout, - cls=deserialize_directory_properties, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return response # type: ignore - - @distributed_trace - def set_directory_metadata(self, metadata, **kwargs): - # type: (Dict[str, Any], Any) -> Dict[str, Any] - """Sets the metadata for the directory. - - Each call to this operation replaces all existing metadata - attached to the directory. To remove all metadata from the directory, - call this operation with an empty metadata dict. - - :param metadata: - Name-value pairs associated with the directory as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Directory-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - try: - return self._client.directory.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def set_http_headers(self, file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="preserve", # type: Union[str, datetime] - file_last_write_time="preserve", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Sets HTTP headers on the directory. - - :param file_attributes: - The file system attributes for files and directories. - If not set, indicates preservation of existing values. - Here is an example for when the var type is str: 'Temporary|Archive' - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Preserve. - :type file_creation_time: str or datetime - :param file_last_write_time: Last write time for the file - Default value: Preserve. - :type file_last_write_time: str or datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - file_permission = _get_file_permission(file_permission, permission_key, 'preserve') - try: - return self._client.directory.set_properties( # type: ignore - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - timeout=timeout, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def create_subdirectory( - self, directory_name, # type: str - **kwargs): - # type: (...) -> ShareDirectoryClient - """Creates a new subdirectory and returns a client to interact - with the subdirectory. - - :param str directory_name: - The name of the subdirectory. - :keyword dict(str,str) metadata: - Name-value pairs associated with the subdirectory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: ShareDirectoryClient - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START create_subdirectory] - :end-before: [END create_subdirectory] - :language: python - :dedent: 12 - :caption: Create a subdirectory. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - subdir = self.get_subdirectory_client(directory_name) - subdir.create_directory(metadata=metadata, timeout=timeout, **kwargs) - return subdir # type: ignore - - @distributed_trace - def delete_subdirectory( - self, directory_name, # type: str - **kwargs - ): - # type: (...) -> None - """Deletes a subdirectory. - - :param str directory_name: - The name of the subdirectory. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START delete_subdirectory] - :end-before: [END delete_subdirectory] - :language: python - :dedent: 12 - :caption: Delete a subdirectory. - """ - timeout = kwargs.pop('timeout', None) - subdir = self.get_subdirectory_client(directory_name) - subdir.delete_directory(timeout=timeout, **kwargs) - - @distributed_trace - def upload_file( - self, file_name, # type: str - data, # type: Any - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> ShareFileClient - """Creates a new file in the directory and returns a ShareFileClient - to interact with the file. - - :param str file_name: - The name of the file. - :param Any data: - Content of the file. - :param int length: - Length of the file in bytes. Specify its maximum size, up to 1 TiB. - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: ShareFileClient - :rtype: ~azure.storage.fileshare.ShareFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START upload_file_to_directory] - :end-before: [END upload_file_to_directory] - :language: python - :dedent: 12 - :caption: Upload a file to a directory. - """ - file_client = self.get_file_client(file_name) - file_client.upload_file( - data, - length=length, - **kwargs) - return file_client # type: ignore - - @distributed_trace - def delete_file( - self, file_name, # type: str - **kwargs # type: Optional[Any] - ): - # type: (...) -> None - """Marks the specified file for deletion. The file is later - deleted during garbage collection. - - :param str file_name: - The name of the file to delete. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START delete_file_in_directory] - :end-before: [END delete_file_in_directory] - :language: python - :dedent: 12 - :caption: Delete a file in a directory. - """ - file_client = self.get_file_client(file_name) - file_client.delete_file(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_download.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_download.py deleted file mode 100644 index 8f47bee..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_download.py +++ /dev/null @@ -1,546 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -import threading -import warnings -from io import BytesIO -from typing import Iterator - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.common import with_current_context -from ._shared.encryption import decrypt_blob -from ._shared.request_handlers import validate_and_format_range_headers -from ._shared.response_handlers import process_storage_error, parse_length_from_content_range - - -def process_range_and_offset(start_range, end_range, length, encryption): - start_offset, end_offset = 0, 0 - if encryption.get("key") is not None or encryption.get("resolver") is not None: - if start_range is not None: - # Align the start of the range along a 16 byte block - start_offset = start_range % 16 - start_range -= start_offset - - # Include an extra 16 bytes for the IV if necessary - # Because of the previous offsetting, start_range will always - # be a multiple of 16. - if start_range > 0: - start_offset += 16 - start_range -= 16 - - if length is not None: - # Align the end of the range along a 16 byte block - end_offset = 15 - (end_range % 16) - end_range += end_offset - - return (start_range, end_range), (start_offset, end_offset) - - -def process_content(data, start_offset, end_offset, encryption): - if data is None: - raise ValueError("Response cannot be None.") - try: - content = b"".join(list(data)) - except Exception as error: - raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) - if content and encryption.get("key") is not None or encryption.get("resolver") is not None: - try: - return decrypt_blob( - encryption.get("required"), - encryption.get("key"), - encryption.get("resolver"), - content, - start_offset, - end_offset, - data.response.headers, - ) - except Exception as error: - raise HttpResponseError(message="Decryption failed.", response=data.response, error=error) - return content - - -class _ChunkDownloader(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - client=None, - total_size=None, - chunk_size=None, - current_progress=None, - start_range=None, - end_range=None, - stream=None, - parallel=None, - validate_content=None, - encryption_options=None, - **kwargs - ): - self.client = client - - # Information on the download range/chunk size - self.chunk_size = chunk_size - self.total_size = total_size - self.start_index = start_range - self.end_index = end_range - - # The destination that we will write to - self.stream = stream - self.stream_lock = threading.Lock() if parallel else None - self.progress_lock = threading.Lock() if parallel else None - - # For a parallel download, the stream is always seekable, so we note down the current position - # in order to seek to the right place when out-of-order chunks come in - self.stream_start = stream.tell() if parallel else None - - # Download progress so far - self.progress_total = current_progress - - # Encryption - self.encryption_options = encryption_options - - # Parameters for each get operation - self.validate_content = validate_content - self.request_options = kwargs - - def _calculate_range(self, chunk_start): - if chunk_start + self.chunk_size > self.end_index: - chunk_end = self.end_index - else: - chunk_end = chunk_start + self.chunk_size - return chunk_start, chunk_end - - def get_chunk_offsets(self): - index = self.start_index - while index < self.end_index: - yield index - index += self.chunk_size - - def process_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - chunk_data = self._download_chunk(chunk_start, chunk_end - 1) - length = chunk_end - chunk_start - if length > 0: - self._write_to_stream(chunk_data, chunk_start) - self._update_progress(length) - - def yield_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - return self._download_chunk(chunk_start, chunk_end - 1) - - def _update_progress(self, length): - if self.progress_lock: - with self.progress_lock: # pylint: disable=not-context-manager - self.progress_total += length - else: - self.progress_total += length - - def _write_to_stream(self, chunk_data, chunk_start): - if self.stream_lock: - with self.stream_lock: # pylint: disable=not-context-manager - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - else: - self.stream.write(chunk_data) - - def _download_chunk(self, chunk_start, chunk_end): - download_range, offset = process_range_and_offset( - chunk_start, chunk_end, chunk_end, self.encryption_options - ) - range_header, range_validation = validate_and_format_range_headers( - download_range[0], download_range[1], check_content_md5=self.validate_content - ) - - try: - _, response = self.client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self.validate_content, - data_stream_total=self.total_size, - download_stream_current=self.progress_total, - **self.request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - chunk_data = process_content(response, offset[0], offset[1], self.encryption_options) - return chunk_data - - -class _ChunkIterator(object): - """Async iterator for chunks in blob download stream.""" - - def __init__(self, size, content, downloader, chunk_size): - self.size = size - self._chunk_size = chunk_size - self._current_content = content - self._iter_downloader = downloader - self._iter_chunks = None - self._complete = (size == 0) - - def __len__(self): - return self.size - - def __iter__(self): - return self - - def __next__(self): - """Iterate through responses.""" - if self._complete: - raise StopIteration("Download complete") - if not self._iter_downloader: - # cut the data obtained from initial GET into chunks - if len(self._current_content) > self._chunk_size: - return self._get_chunk_data() - self._complete = True - return self._current_content - - if not self._iter_chunks: - self._iter_chunks = self._iter_downloader.get_chunk_offsets() - - # initial GET result still has more than _chunk_size bytes of data - if len(self._current_content) >= self._chunk_size: - return self._get_chunk_data() - - try: - chunk = next(self._iter_chunks) - self._current_content += self._iter_downloader.yield_chunk(chunk) - except StopIteration as e: - self._complete = True - if self._current_content: - return self._current_content - raise e - - return self._get_chunk_data() - - next = __next__ # Python 2 compatibility. - - def _get_chunk_data(self): - chunk_data = self._current_content[: self._chunk_size] - self._current_content = self._current_content[self._chunk_size:] - return chunk_data - - -class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the file being downloaded. - :ivar: str path: - The full path of the file. - :ivar str share: - The name of the share where the file is. - :ivar ~azure.storage.fileshare.FileProperties properties: - The properties of the file being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the file. - """ - - def __init__( - self, - client=None, - config=None, - start_range=None, - end_range=None, - validate_content=None, - encryption_options=None, - max_concurrency=1, - name=None, - path=None, - share=None, - encoding=None, - **kwargs - ): - self.name = name - self.path = path - self.share = share - self.properties = None - self.size = None - - self._client = client - self._config = config - self._start_range = start_range - self._end_range = end_range - self._max_concurrency = max_concurrency - self._encoding = encoding - self._validate_content = validate_content - self._encryption_options = encryption_options or {} - self._request_options = kwargs - self._location_mode = None - self._download_complete = False - self._current_content = None - self._file_size = None - self._response = None - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - self._first_get_size = ( - self._config.max_single_get_size if not self._validate_content else self._config.max_chunk_get_size - ) - initial_request_start = self._start_range if self._start_range is not None else 0 - if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: - initial_request_end = self._end_range - else: - initial_request_end = initial_request_start + self._first_get_size - 1 - - self._initial_range, self._initial_offset = process_range_and_offset( - initial_request_start, initial_request_end, self._end_range, self._encryption_options - ) - - self._response = self._initial_request() - self.properties = self._response.properties - self.properties.name = self.name - self.properties.path = self.path - self.properties.share = self.share - - # Set the content length to the download size instead of the size of - # the last range - self.properties.size = self.size - - # Overwrite the content range to the user requested range - self.properties.content_range = "bytes {0}-{1}/{2}".format( - self._start_range, - self._end_range, - self._file_size - ) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - self.properties.content_md5 = None - - if self.size == 0: - self._current_content = b"" - else: - self._current_content = process_content( - self._response, - self._initial_offset[0], - self._initial_offset[1], - self._encryption_options - ) - - def __len__(self): - return self.size - - def _initial_request(self): - range_header, range_validation = validate_and_format_range_headers( - self._initial_range[0], - self._initial_range[1], - start_range_required=False, - end_range_required=False, - check_content_md5=self._validate_content - ) - - try: - location_mode, response = self._client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self._validate_content, - data_stream_total=None, - download_stream_current=0, - **self._request_options - ) - - # Check the location we read from to ensure we use the same one - # for subsequent requests. - self._location_mode = location_mode - - # Parse the total file size and adjust the download size if ranges - # were specified - self._file_size = parse_length_from_content_range(response.properties.content_range) - if self._end_range is not None: - # Use the end range index unless it is over the end of the file - self.size = min(self._file_size, self._end_range - self._start_range + 1) - elif self._start_range is not None: - self.size = self._file_size - self._start_range - else: - self.size = self._file_size - - except HttpResponseError as error: - if self._start_range is None and error.response.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - try: - _, response = self._client.download( - validate_content=self._validate_content, - data_stream_total=0, - download_stream_current=0, - **self._request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - # Set the download size to empty - self.size = 0 - self._file_size = 0 - else: - process_storage_error(error) - - # If the file is small, the download is complete at this point. - # If file size is large, download the rest of the file in chunks. - if response.properties.size == self.size: - self._download_complete = True - return response - - def chunks(self): - # type: () -> Iterator[bytes] - """Iterate over chunks in the download stream. - - :rtype: Iterator[bytes] - """ - if self.size == 0 or self._download_complete: - iter_downloader = None - else: - data_end = self._file_size - if self._end_range is not None: - # Use the end range index unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - iter_downloader = _ChunkDownloader( - client=self._client, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # start where the first download ended - end_range=data_end, - stream=None, - parallel=False, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options - ) - return _ChunkIterator( - size=self.size, - content=self._current_content, - downloader=iter_downloader, - chunk_size=self._config.max_chunk_get_size) - - def readall(self): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - stream = BytesIO() - self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - def content_as_bytes(self, max_concurrency=1): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :rtype: bytes - """ - warnings.warn( - "content_as_bytes is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - return self.readall() - - def content_as_text(self, max_concurrency=1, encoding="UTF-8"): - """Download the contents of this file, and decode as text. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :param str encoding: - Test encoding to decode the downloaded bytes. Default is UTF-8. - :rtype: str - """ - warnings.warn( - "content_as_text is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self._encoding = encoding - return self.readall() - - def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - # The stream must be seekable if parallel download is required - parallel = self._max_concurrency > 1 - if parallel: - error_message = "Target stream handle must be seekable." - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(error_message) - - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(error_message) - - # Write the content to the user stream - stream.write(self._current_content) - if self._download_complete: - return self.size - - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - - downloader = _ChunkDownloader( - client=self._client, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # Start where the first download ended - end_range=data_end, - stream=stream, - parallel=parallel, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options - ) - if parallel: - import concurrent.futures - with concurrent.futures.ThreadPoolExecutor(self._max_concurrency) as executor: - list(executor.map( - with_current_context(downloader.process_chunk), - downloader.get_chunk_offsets() - )) - else: - for chunk in downloader.get_chunk_offsets(): - downloader.process_chunk(chunk) - return self.size - - def download_to_stream(self, stream, max_concurrency=1): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The properties of the downloaded file. - :rtype: Any - """ - warnings.warn( - "download_to_stream is deprecated, use readinto instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self.readinto(stream) - return self.properties diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_file_client.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_file_client.py deleted file mode 100644 index 6ac6dc0..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_file_client.py +++ /dev/null @@ -1,1408 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines, too-many-public-methods -import functools -import time -from io import BytesIO -from typing import ( # pylint: disable=unused-import - Optional, Union, IO, List, Dict, Any, Iterable, Tuple, - TYPE_CHECKING -) - - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six -from azure.core.exceptions import HttpResponseError -from azure.core.paging import ItemPaged # pylint: disable=ungrouped-imports -from azure.core.tracing.decorator import distributed_trace - -from ._generated import AzureFileStorage -from ._generated.models import FileHTTPHeaders -from ._shared.uploads import IterStreamer, FileChunkUploader, upload_data_chunks -from ._shared.base_client import StorageAccountHostsMixin, parse_connection_str, parse_query -from ._shared.request_handlers import add_metadata_headers, get_length -from ._shared.response_handlers import return_response_headers, process_storage_error -from ._shared.parser import _str -from ._parser import _get_file_permission, _datetime_to_str -from ._lease import ShareLeaseClient -from ._serialize import get_source_conditions, get_access_conditions, get_smb_properties, get_api_version -from ._deserialize import deserialize_file_properties, deserialize_file_stream, get_file_ranges_result -from ._models import HandlesPaged, NTFSAttributes # pylint: disable=unused-import -from ._download import StorageStreamDownloader - -if TYPE_CHECKING: - from datetime import datetime - from ._models import ShareProperties, ContentSettings, FileProperties, Handle - from ._generated.models import HandleItem - - -def _upload_file_helper( - client, - stream, - size, - metadata, - content_settings, - validate_content, - timeout, - max_concurrency, - file_settings, - file_attributes="none", - file_creation_time="now", - file_last_write_time="now", - file_permission=None, - file_permission_key=None, - **kwargs): - try: - if size is None or size < 0: - raise ValueError("A content size must be specified for a File.") - response = client.create_file( - size, - content_settings=content_settings, - metadata=metadata, - timeout=timeout, - file_attributes=file_attributes, - file_creation_time=file_creation_time, - file_last_write_time=file_last_write_time, - file_permission=file_permission, - permission_key=file_permission_key, - **kwargs - ) - if size == 0: - return response - - responses = upload_data_chunks( - service=client, - uploader_class=FileChunkUploader, - total_size=size, - chunk_size=file_settings.max_range_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - timeout=timeout, - **kwargs - ) - return sorted(responses, key=lambda r: r.get('last_modified'))[-1] - except HttpResponseError as error: - process_storage_error(error) - - -class ShareFileClient(StorageAccountHostsMixin): - """A client to interact with a specific file, although that file may not yet exist. - - For more optional configuration, please click - `here `_. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the - file, use the :func:`from_file_url` classmethod. - :param share_name: - The name of the share for the file. - :type share_name: str - :param str file_path: - The file path to the file with which to interact. If specified, this value will override - a file value specified in the file URL. - :param str snapshot: - An optional file snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - file_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not (share_name and file_path): - raise ValueError("Please specify a share name and file name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - if hasattr(credential, 'get_token'): - raise ValueError("Token credentials not supported by the File service.") - - path_snapshot = None - path_snapshot, sas_token = parse_query(parsed_url.query) - if not sas_token and not credential: - raise ValueError( - 'You need to provide either an account shared key or SAS token when creating a storage service.') - try: - self.snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - self.snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - self.snapshot = snapshot or path_snapshot - - self.share_name = share_name - self.file_path = file_path.split('/') - self.file_name = self.file_path[-1] - self.directory_path = "/".join(self.file_path[:-1]) - - self._query_str, credential = self._format_query_string( - sas_token, credential, share_snapshot=self.snapshot) - super(ShareFileClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) - self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline) - default_api_version = self._client._config.version # pylint: disable=protected-access - self._client._config.version = get_api_version(kwargs, default_api_version) # pylint: disable=protected-access - - @classmethod - def from_file_url( - cls, file_url, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareFileClient - """A client to interact with a specific file, although that file may not yet exist. - - :param str file_url: The full URI to the file. - :param str snapshot: - An optional file snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :returns: A File client. - :rtype: ~azure.storage.fileshare.ShareFileClient - """ - try: - if not file_url.lower().startswith('http'): - file_url = "https://" + file_url - except AttributeError: - raise ValueError("File URL must be a string.") - parsed_url = urlparse(file_url.rstrip('/')) - - if not (parsed_url.netloc and parsed_url.path): - raise ValueError("Invalid URL: {}".format(file_url)) - account_url = parsed_url.netloc.rstrip('/') + "?" + parsed_url.query - - path_share, _, path_file = parsed_url.path.lstrip('/').partition('/') - path_snapshot, _ = parse_query(parsed_url.query) - snapshot = snapshot or path_snapshot - share_name = unquote(path_share) - file_path = '/'.join([unquote(p) for p in path_file.split('/')]) - return cls(account_url, share_name, file_path, snapshot, credential, **kwargs) - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - share_name = self.share_name - if isinstance(share_name, six.text_type): - share_name = share_name.encode('UTF-8') - return "{}://{}/{}/{}{}".format( - self.scheme, - hostname, - quote(share_name), - "/".join([quote(p, safe='~') for p in self.file_path]), - self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - share_name, # type: str - file_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareFileClient - """Create ShareFileClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param share_name: The name of the share. - :type share_name: str - :param str file_path: - The file path. - :param str snapshot: - An optional file snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :returns: A File client. - :rtype: ~azure.storage.fileshare.ShareFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_hello_world.py - :start-after: [START create_file_client] - :end-before: [END create_file_client] - :language: python - :dedent: 12 - :caption: Creates the file client with connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, share_name=share_name, file_path=file_path, snapshot=snapshot, credential=credential, **kwargs) - - @distributed_trace - def acquire_lease(self, lease_id=None, **kwargs): - # type: (Optional[str], **Any) -> ShareLeaseClient - """Requests a new lease. - - If the file does not have an active lease, the File - Service creates a lease on the blob and returns a new lease. - - :param str lease_id: - Proposed lease ID, in a GUID string format. The File Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A ShareLeaseClient object. - :rtype: ~azure.storage.fileshare.ShareLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START acquire_and_release_lease_on_file] - :end-before: [END acquire_and_release_lease_on_file] - :language: python - :dedent: 12 - :caption: Acquiring a lease on a file. - """ - kwargs['lease_duration'] = -1 - lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore - lease.acquire(**kwargs) - return lease - - @distributed_trace - def create_file( # type: ignore - self, size, # type: int - file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="now", # type: Union[str, datetime] - file_last_write_time="now", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Creates a new file. - - Note that it only initializes the file with no content. - - :param int size: Specifies the maximum size for the file, - up to 1 TB. - :param file_attributes: - The file system attributes for files and directories. - If not set, the default value would be "None" and the attributes will be set to "Archive". - Here is an example for when the var type is str: 'Temporary|Archive'. - file_attributes value is not case sensitive. - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Now. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Now. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START create_file] - :end-before: [END create_file] - :language: python - :dedent: 12 - :caption: Create a file. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - content_settings = kwargs.pop('content_settings', None) - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - if self.require_encryption and not self.key_encryption_key: - raise ValueError("Encryption required but no key was provided.") - - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - file_http_headers = None - if content_settings: - file_http_headers = FileHTTPHeaders( - file_cache_control=content_settings.cache_control, - file_content_type=content_settings.content_type, - file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - file_content_encoding=content_settings.content_encoding, - file_content_language=content_settings.content_language, - file_content_disposition=content_settings.content_disposition - ) - file_permission = _get_file_permission(file_permission, permission_key, 'Inherit') - try: - return self._client.file.create( # type: ignore - file_content_length=size, - metadata=metadata, - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - file_http_headers=file_http_headers, - lease_access_conditions=access_conditions, - headers=headers, - timeout=timeout, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def upload_file( - self, data, # type: Any - length=None, # type: Optional[int] - file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="now", # type: Union[str, datetime] - file_last_write_time="now", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Uploads a new file. - - :param Any data: - Content of the file. - :param int length: - Length of the file in bytes. Specify its maximum size, up to 1 TiB. - :param file_attributes: - The file system attributes for files and directories. - If not set, the default value would be "None" and the attributes will be set to "Archive". - Here is an example for when the var type is str: 'Temporary|Archive'. - file_attributes value is not case sensitive. - :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes - :param file_creation_time: Creation time for the file - Default value: Now. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Now. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START upload_file] - :end-before: [END upload_file] - :language: python - :dedent: 12 - :caption: Upload a file. - """ - metadata = kwargs.pop('metadata', None) - content_settings = kwargs.pop('content_settings', None) - max_concurrency = kwargs.pop('max_concurrency', 1) - validate_content = kwargs.pop('validate_content', False) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - - if isinstance(data, six.text_type): - data = data.encode(encoding) - if length is None: - length = get_length(data) - if isinstance(data, bytes): - data = data[:length] - - if isinstance(data, bytes): - stream = BytesIO(data) - elif hasattr(data, 'read'): - stream = data - elif hasattr(data, '__iter__'): - stream = IterStreamer(data, encoding=encoding) # type: ignore - else: - raise TypeError("Unsupported data type: {}".format(type(data))) - return _upload_file_helper( # type: ignore - self, - stream, - length, - metadata, - content_settings, - validate_content, - timeout, - max_concurrency, - self._config, - file_attributes=file_attributes, - file_creation_time=file_creation_time, - file_last_write_time=file_last_write_time, - file_permission=file_permission, - file_permission_key=permission_key, - **kwargs) - - @distributed_trace - def start_copy_from_url(self, source_url, **kwargs): - # type: (str, Any) -> Any - """Initiates the copying of data from a source URL into the file - referenced by the client. - - The status of this copy operation can be found using the `get_properties` - method. - - :param str source_url: - Specifies the URL of the source file. - :keyword str file_permission: - If specified the permission (security descriptor) shall be set for the directory/file. - This value can be set to "source" to copy the security descriptor from the source file. - Otherwise if set, this value will be used to override the source value. If not set, permission value - is inherited from the parent directory of the target file. This setting can be - used if Permission size is <= 8KB, otherwise permission_key shall be used. - If SDDL is specified as input, it must have owner, group and dacl. - Note: Only one of the file_permission or permission_key should be specified. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword str permission_key: - Key of the permission to be set for the directory/file. - This value can be set to "source" to copy the security descriptor from the source file. - Otherwise if set, this value will be used to override the source value. If not set, permission value - is inherited from the parent directory of the target file. - Note: Only one of the file_permission or permission_key should be specified. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword file_attributes: - This value can be set to "source" to copy file attributes from the source file to the target file, - or to clear all attributes, it can be set to "None". Otherwise it can be set to a list of attributes - to set on the target file. If this is not set, the default value is "Archive". - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :keyword file_creation_time: - This value can be set to "source" to copy the creation time from the source file to the target file, - or a datetime to set as creation time on the target file. This could also be a string in ISO 8601 format. - If this is not set, creation time will be set to the date time value of the creation - (or when it was overwritten) of the target file by copy engine. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_creation_time: str or ~datetime.datetime - :keyword file_last_write_time: - This value can be set to "source" to copy the last write time from the source file to the target file, or - a datetime to set as the last write time on the target file. This could also be a string in ISO 8601 format. - If this is not set, value will be the last write time to the file by the copy engine. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_last_write_time: str or ~datetime.datetime - :keyword bool ignore_read_only: - Specifies the option to overwrite the target file if it already exists and has read-only attribute set. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword bool set_archive_attribute: - Specifies the option to set the archive attribute on the target file. - True means the archive attribute will be set on the target file despite attribute - overrides or the source file state. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START copy_file_from_url] - :end-before: [END copy_file_from_url] - :language: python - :dedent: 12 - :caption: Copy a file from a URL - """ - metadata = kwargs.pop('metadata', None) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - kwargs.update(get_smb_properties(kwargs)) - try: - return self._client.file.start_copy( - source_url, - metadata=metadata, - lease_access_conditions=access_conditions, - headers=headers, - cls=return_response_headers, - timeout=timeout, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - def abort_copy(self, copy_id, **kwargs): - # type: (Union[str, FileProperties], Any) -> None - """Abort an ongoing copy operation. - - This will leave a destination file with zero length and full metadata. - This will raise an error if the copy operation has already ended. - - :param copy_id: - The copy operation to abort. This can be either an ID, or an - instance of FileProperties. - :type copy_id: str or ~azure.storage.fileshare.FileProperties - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - copy_id = copy_id.copy.id - except AttributeError: - try: - copy_id = copy_id['copy_id'] - except TypeError: - pass - try: - self._client.file.abort_copy(copy_id=copy_id, - lease_access_conditions=access_conditions, - timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def download_file( - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs - ): - # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader - """Downloads a file to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the file into - a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks. - - :param int offset: - Start of byte range to use for downloading a section of the file. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.fileshare.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START download_file] - :end-before: [END download_file] - :language: python - :dedent: 12 - :caption: Download a file. - """ - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - if length is not None and offset is None: - raise ValueError("Offset value must not be None if length is set.") - - range_end = None - if length is not None: - range_end = offset + length - 1 # Service actually uses an end-range inclusive index - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - - return StorageStreamDownloader( - client=self._client.file, - config=self._config, - start_range=offset, - end_range=range_end, - encryption_options=None, - name=self.file_name, - path='/'.join(self.file_path), - share=self.share_name, - lease_access_conditions=access_conditions, - cls=deserialize_file_stream, - **kwargs) - - @distributed_trace - def delete_file(self, **kwargs): - # type: (Any) -> None - """Marks the specified file for deletion. The file is - later deleted during garbage collection. - - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START delete_file] - :end-before: [END delete_file] - :language: python - :dedent: 12 - :caption: Delete a file. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - self._client.file.delete(lease_access_conditions=access_conditions, timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def get_file_properties(self, **kwargs): - # type: (Any) -> FileProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file. - - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: FileProperties - :rtype: ~azure.storage.fileshare.FileProperties - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - file_props = self._client.file.get_properties( - sharesnapshot=self.snapshot, - lease_access_conditions=access_conditions, - timeout=timeout, - cls=deserialize_file_properties, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - file_props.name = self.file_name - file_props.share = self.share_name - file_props.snapshot = self.snapshot - file_props.path = '/'.join(self.file_path) - return file_props # type: ignore - - @distributed_trace - def set_http_headers(self, content_settings, # type: ContentSettings - file_attributes="preserve", # type: Union[str, NTFSAttributes] - file_creation_time="preserve", # type: Union[str, datetime] - file_last_write_time="preserve", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Sets HTTP headers on the file. - - :param ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param file_attributes: - The file system attributes for files and directories. - If not set, indicates preservation of existing values. - Here is an example for when the var type is str: 'Temporary|Archive' - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Preserve. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Preserve. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - file_content_length = kwargs.pop('size', None) - file_http_headers = FileHTTPHeaders( - file_cache_control=content_settings.cache_control, - file_content_type=content_settings.content_type, - file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - file_content_encoding=content_settings.content_encoding, - file_content_language=content_settings.content_language, - file_content_disposition=content_settings.content_disposition - ) - file_permission = _get_file_permission(file_permission, permission_key, 'preserve') - try: - return self._client.file.set_http_headers( # type: ignore - file_content_length=file_content_length, - file_http_headers=file_http_headers, - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - lease_access_conditions=access_conditions, - timeout=timeout, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def set_file_metadata(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, Any]], Any) -> Dict[str, Any] - """Sets user-defined metadata for the specified file as one or more - name-value pairs. - - Each call to this operation replaces all existing metadata - attached to the file. To remove all metadata from the file, - call this operation with no metadata dict. - - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return self._client.file.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - metadata=metadata, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def upload_range( # type: ignore - self, data, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Upload a range of bytes to a file. - - :param bytes data: - The data to upload. - :param int offset: - Start of byte range to use for uploading a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for uploading a section of the file. - The range can be up to 4 MB in size. - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - file. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - validate_content = kwargs.pop('validate_content', False) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - if isinstance(data, six.text_type): - data = data.encode(encoding) - - end_range = offset + length - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - try: - return self._client.file.upload_range( # type: ignore - range=content_range, - content_length=length, - optionalbody=data, - timeout=timeout, - validate_content=validate_content, - lease_access_conditions=access_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @staticmethod - def _upload_range_from_url_options(source_url, # type: str - offset, # type: int - length, # type: int - source_offset, # type: int - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - - if offset is None: - raise ValueError("offset must be provided.") - if length is None: - raise ValueError("length must be provided.") - if source_offset is None: - raise ValueError("source_offset must be provided.") - - # Format range - end_range = offset + length - 1 - destination_range = 'bytes={0}-{1}'.format(offset, end_range) - source_range = 'bytes={0}-{1}'.format(source_offset, source_offset + length - 1) - - source_mod_conditions = get_source_conditions(kwargs) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - - options = { - 'copy_source': source_url, - 'content_length': 0, - 'source_range': source_range, - 'range': destination_range, - 'source_modified_access_conditions': source_mod_conditions, - 'lease_access_conditions': access_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def upload_range_from_url(self, source_url, - offset, - length, - source_offset, - **kwargs - ): - # type: (str, int, int, int, **Any) -> Dict[str, Any] - """ - Writes the bytes from one Azure File endpoint into the specified range of another Azure File endpoint. - - :param int offset: - Start of byte range to use for updating a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for updating a section of the file. - The range can be up to 4 MB in size. - :param str source_url: - A URL of up to 2 KB in length that specifies an Azure file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.file.core.windows.net/myshare/mydir/myfile - https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - The service will read the same number of bytes as the destination range (length-offset). - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source - blob has been modified since the specified date/time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source blob - has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._upload_range_from_url_options( - source_url=source_url, - offset=offset, - length=length, - source_offset=source_offset, - **kwargs - ) - try: - return self._client.file.upload_range_from_url(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _get_ranges_options( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - previous_sharesnapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Unsupported method for encryption.") - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - - content_range = None - if offset is not None: - if length is not None: - end_range = offset + length - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - else: - content_range = 'bytes={0}-'.format(offset) - options = { - 'sharesnapshot': self.snapshot, - 'lease_access_conditions': access_conditions, - 'timeout': kwargs.pop('timeout', None), - 'range': content_range} - if previous_sharesnapshot: - try: - options['prevsharesnapshot'] = previous_sharesnapshot.snapshot # type: ignore - except AttributeError: - try: - options['prevsharesnapshot'] = previous_sharesnapshot['snapshot'] # type: ignore - except TypeError: - options['prevsharesnapshot'] = previous_sharesnapshot - options.update(kwargs) - return options - - @distributed_trace - def get_ranges( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> List[Dict[str, int]] - """Returns the list of valid page ranges for a file or snapshot - of a file. - - :param int offset: - Specifies the start offset of bytes over which to get ranges. - :param int length: - Number of bytes to use over which to get ranges. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A list of valid ranges. - :rtype: List[dict[str, int]] - """ - options = self._get_ranges_options( - offset=offset, - length=length, - **kwargs) - try: - ranges = self._client.file.get_range_list(**options) - except HttpResponseError as error: - process_storage_error(error) - return [{'start': file_range.start, 'end': file_range.end} for file_range in ranges.ranges] - - def get_ranges_diff( # type: ignore - self, - previous_sharesnapshot, # type: Union[str, Dict[str, Any]] - offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a file or snapshot - of a file. - - .. versionadded:: 12.6.0 - - :param int offset: - Specifies the start offset of bytes over which to get ranges. - :param int length: - Number of bytes to use over which to get ranges. - :param str previous_sharesnapshot: - The snapshot diff parameter that contains an opaque DateTime value that - specifies a previous file snapshot to be compared - against a more recent snapshot or the current file. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of file ranges as dictionaries with 'start' and 'end' keys. - The first element are filled file ranges, the 2nd element is cleared file ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_ranges_options( - offset=offset, - length=length, - previous_sharesnapshot=previous_sharesnapshot, - **kwargs) - try: - ranges = self._client.file.get_range_list(**options) - except HttpResponseError as error: - process_storage_error(error) - return get_file_ranges_result(ranges) - - @distributed_trace - def clear_range( # type: ignore - self, offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Clears the specified range and releases the space used in storage for - that range. - - :param int offset: - Start of byte range to use for clearing a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for clearing a section of the file. - The range can be up to 4 MB in size. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Unsupported method for encryption.") - - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 bytes file size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 bytes file size") - end_range = length + offset - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - try: - return self._client.file.upload_range( # type: ignore - timeout=timeout, - cls=return_response_headers, - content_length=0, - optionalbody=None, - file_range_write="clear", - range=content_range, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def resize_file(self, size, **kwargs): - # type: (int, Any) -> Dict[str, Any] - """Resizes a file to the specified size. - - :param int size: - Size to resize file to (in bytes) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - return self._client.file.set_http_headers( # type: ignore - file_content_length=size, - file_attributes="preserve", - file_creation_time="preserve", - file_last_write_time="preserve", - file_permission="preserve", - lease_access_conditions=access_conditions, - cls=return_response_headers, - timeout=timeout, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_handles(self, **kwargs): - # type: (Any) -> ItemPaged[Handle] - """Lists handles for file. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of HandleItem - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.HandleItem] - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.file.list_handles, - sharesnapshot=self.snapshot, - timeout=timeout, - **kwargs) - return ItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=HandlesPaged) - - @distributed_trace - def close_handle(self, handle, **kwargs): - # type: (Union[str, HandleItem], Any) -> Dict[str, int] - """Close an open file handle. - - :param handle: - A specific handle to close. - :type handle: str or ~azure.storage.fileshare.Handle - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - try: - handle_id = handle.id # type: ignore - except AttributeError: - handle_id = handle - if handle_id == '*': - raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") - try: - response = self._client.file.force_close_handles( - handle_id, - marker=None, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - return { - 'closed_handles_count': response.get('number_of_handles_closed', 0), - 'failed_handles_count': response.get('number_of_handles_failed', 0) - } - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def close_all_handles(self, **kwargs): - # type: (Any) -> Dict[str, int] - """Close any open file handles. - - This operation will block until the service has closed all open handles. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - timeout = kwargs.pop('timeout', None) - start_time = time.time() - - try_close = True - continuation_token = None - total_closed = 0 - total_failed = 0 - while try_close: - try: - response = self._client.file.force_close_handles( - handle_id='*', - timeout=timeout, - marker=continuation_token, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - continuation_token = response.get('marker') - try_close = bool(continuation_token) - total_closed += response.get('number_of_handles_closed', 0) - total_failed += response.get('number_of_handles_failed', 0) - if timeout: - timeout = max(0, timeout - (time.time() - start_time)) - return { - 'closed_handles_count': total_closed, - 'failed_handles_count': total_failed - } diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/__init__.py deleted file mode 100644 index 34ce526..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._azure_file_storage import AzureFileStorage -__all__ = ['AzureFileStorage'] - -try: - from ._patch import patch_sdk # type: ignore - patch_sdk() -except ImportError: - pass diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/_azure_file_storage.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/_azure_file_storage.py deleted file mode 100644 index 3717742..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/_azure_file_storage.py +++ /dev/null @@ -1,76 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import TYPE_CHECKING - -from azure.core import PipelineClient -from msrest import Deserializer, Serializer - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any - -from ._configuration import AzureFileStorageConfiguration -from .operations import ServiceOperations -from .operations import ShareOperations -from .operations import DirectoryOperations -from .operations import FileOperations -from . import models - - -class AzureFileStorage(object): - """AzureFileStorage. - - :ivar service: ServiceOperations operations - :vartype service: azure.storage.fileshare.operations.ServiceOperations - :ivar share: ShareOperations operations - :vartype share: azure.storage.fileshare.operations.ShareOperations - :ivar directory: DirectoryOperations operations - :vartype directory: azure.storage.fileshare.operations.DirectoryOperations - :ivar file: FileOperations operations - :vartype file: azure.storage.fileshare.operations.FileOperations - :param url: The URL of the service account, share, directory or file that is the target of the desired operation. - :type url: str - """ - - def __init__( - self, - url, # type: str - **kwargs # type: Any - ): - # type: (...) -> None - base_url = '{url}' - self._config = AzureFileStorageConfiguration(url, **kwargs) - self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._serialize.client_side_validation = False - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.share = ShareOperations( - self._client, self._config, self._serialize, self._deserialize) - self.directory = DirectoryOperations( - self._client, self._config, self._serialize, self._deserialize) - self.file = FileOperations( - self._client, self._config, self._serialize, self._deserialize) - - def close(self): - # type: () -> None - self._client.close() - - def __enter__(self): - # type: () -> AzureFileStorage - self._client.__enter__() - return self - - def __exit__(self, *exc_details): - # type: (Any) -> None - self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/_configuration.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/_configuration.py deleted file mode 100644 index 0fc4ad4..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/_configuration.py +++ /dev/null @@ -1,59 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import TYPE_CHECKING - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any - -VERSION = "unknown" - -class AzureFileStorageConfiguration(Configuration): - """Configuration for AzureFileStorage. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, share, directory or file that is the target of the desired operation. - :type url: str - """ - - def __init__( - self, - url, # type: str - **kwargs # type: Any - ): - # type: (...) -> None - if url is None: - raise ValueError("Parameter 'url' must not be None.") - super(AzureFileStorageConfiguration, self).__init__(**kwargs) - - self.url = url - self.version = "2020-04-08" - self.file_range_write_from_url = "update" - kwargs.setdefault('sdk_moniker', 'azurefilestorage/{}'.format(VERSION)) - self._configure(**kwargs) - - def _configure( - self, - **kwargs # type: Any - ): - # type: (...) -> None - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) - self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/aio/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/aio/__init__.py deleted file mode 100644 index f306ba0..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/aio/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._azure_file_storage import AzureFileStorage -__all__ = ['AzureFileStorage'] diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/aio/_azure_file_storage.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/aio/_azure_file_storage.py deleted file mode 100644 index f3f1d7c..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/aio/_azure_file_storage.py +++ /dev/null @@ -1,68 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any - -from azure.core import AsyncPipelineClient -from msrest import Deserializer, Serializer - -from ._configuration import AzureFileStorageConfiguration -from .operations import ServiceOperations -from .operations import ShareOperations -from .operations import DirectoryOperations -from .operations import FileOperations -from .. import models - - -class AzureFileStorage(object): - """AzureFileStorage. - - :ivar service: ServiceOperations operations - :vartype service: azure.storage.fileshare.aio.operations.ServiceOperations - :ivar share: ShareOperations operations - :vartype share: azure.storage.fileshare.aio.operations.ShareOperations - :ivar directory: DirectoryOperations operations - :vartype directory: azure.storage.fileshare.aio.operations.DirectoryOperations - :ivar file: FileOperations operations - :vartype file: azure.storage.fileshare.aio.operations.FileOperations - :param url: The URL of the service account, share, directory or file that is the target of the desired operation. - :type url: str - """ - - def __init__( - self, - url: str, - **kwargs: Any - ) -> None: - base_url = '{url}' - self._config = AzureFileStorageConfiguration(url, **kwargs) - self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._serialize.client_side_validation = False - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.share = ShareOperations( - self._client, self._config, self._serialize, self._deserialize) - self.directory = DirectoryOperations( - self._client, self._config, self._serialize, self._deserialize) - self.file = FileOperations( - self._client, self._config, self._serialize, self._deserialize) - - async def close(self) -> None: - await self._client.close() - - async def __aenter__(self) -> "AzureFileStorage": - await self._client.__aenter__() - return self - - async def __aexit__(self, *exc_details) -> None: - await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/aio/_configuration.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/aio/_configuration.py deleted file mode 100644 index af7c665..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/aio/_configuration.py +++ /dev/null @@ -1,53 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -VERSION = "unknown" - -class AzureFileStorageConfiguration(Configuration): - """Configuration for AzureFileStorage. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, share, directory or file that is the target of the desired operation. - :type url: str - """ - - def __init__( - self, - url: str, - **kwargs: Any - ) -> None: - if url is None: - raise ValueError("Parameter 'url' must not be None.") - super(AzureFileStorageConfiguration, self).__init__(**kwargs) - - self.url = url - self.version = "2020-04-08" - self.file_range_write_from_url = "update" - kwargs.setdefault('sdk_moniker', 'azurefilestorage/{}'.format(VERSION)) - self._configure(**kwargs) - - def _configure( - self, - **kwargs: Any - ) -> None: - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) - self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/aio/operations/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/aio/operations/__init__.py deleted file mode 100644 index ba8fb22..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/aio/operations/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._share_operations import ShareOperations -from ._directory_operations import DirectoryOperations -from ._file_operations import FileOperations - -__all__ = [ - 'ServiceOperations', - 'ShareOperations', - 'DirectoryOperations', - 'FileOperations', -] diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/aio/operations/_directory_operations.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/aio/operations/_directory_operations.py deleted file mode 100644 index 02221cb..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/aio/operations/_directory_operations.py +++ /dev/null @@ -1,739 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, Optional, TypeVar -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class DirectoryOperations: - """DirectoryOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.fileshare.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - file_permission: Optional[str] = "inherit", - file_permission_key: Optional[str] = None, - file_attributes: str = "none", - file_creation_time: str = "now", - file_last_write_time: str = "now", - **kwargs - ) -> None: - """Creates a new directory under the specified share or parent directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else x-ms-file- - permission-key header shall be used. Default value: Inherit. If SDDL is specified as input, it - must have owner, group and dacl. Note: Only one of the x-ms-file-permission or x-ms-file- - permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_attributes: If specified, the provided file attributes shall be set. Default value: - ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. Default value: Now. - :type file_last_write_time: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - async def get_properties( - self, - sharesnapshot: Optional[str] = None, - timeout: Optional[int] = None, - **kwargs - ) -> None: - """Returns all system properties for the specified directory, and can also be used to check the - existence of a directory. The data returned does not include the files in the directory or any - subdirectories. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - async def delete( - self, - timeout: Optional[int] = None, - **kwargs - ) -> None: - """Removes the specified empty directory. Note that the directory must be empty before it can be - deleted. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - async def set_properties( - self, - timeout: Optional[int] = None, - file_permission: Optional[str] = "inherit", - file_permission_key: Optional[str] = None, - file_attributes: str = "none", - file_creation_time: str = "now", - file_last_write_time: str = "now", - **kwargs - ) -> None: - """Sets properties on the directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else x-ms-file- - permission-key header shall be used. Default value: Inherit. If SDDL is specified as input, it - must have owner, group and dacl. Note: Only one of the x-ms-file-permission or x-ms-file- - permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_attributes: If specified, the provided file attributes shall be set. Default value: - ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. Default value: Now. - :type file_last_write_time: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - async def set_metadata( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - **kwargs - ) -> None: - """Updates user defined metadata for the specified directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - async def list_files_and_directories_segment( - self, - prefix: Optional[str] = None, - sharesnapshot: Optional[str] = None, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - timeout: Optional[int] = None, - **kwargs - ) -> "_models.ListFilesAndDirectoriesSegmentResponse": - """Returns a list of files or directories under the specified share or directory. It lists the - contents only for a single level of the directory hierarchy. - - :param prefix: Filters the results to return only entries whose name begins with the specified - prefix. - :type prefix: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. If the request does not - specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 - items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListFilesAndDirectoriesSegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListFilesAndDirectoriesSegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListFilesAndDirectoriesSegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_files_and_directories_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListFilesAndDirectoriesSegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_files_and_directories_segment.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - async def list_handles( - self, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - timeout: Optional[int] = None, - sharesnapshot: Optional[str] = None, - recursive: Optional[bool] = None, - **kwargs - ) -> "_models.ListHandlesResponse": - """Lists handles for directory. - - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. If the request does not - specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 - items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param recursive: Specifies operation should apply to the directory specified in the URI, its - files, its subdirectories and their files. - :type recursive: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListHandlesResponse, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListHandlesResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListHandlesResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "listhandles" - accept = "application/xml" - - # Construct URL - url = self.list_handles.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if recursive is not None: - header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListHandlesResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_handles.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - async def force_close_handles( - self, - handle_id: str, - timeout: Optional[int] = None, - marker: Optional[str] = None, - sharesnapshot: Optional[str] = None, - recursive: Optional[bool] = None, - **kwargs - ) -> None: - """Closes all handles open for given directory. - - :param handle_id: Specifies handle ID opened on the file or directory to be closed. Asterisk - (‘*’) is a wildcard that specifies all handles. - :type handle_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param recursive: Specifies operation should apply to the directory specified in the URI, its - files, its subdirectories and their files. - :type recursive: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "forceclosehandles" - accept = "application/xml" - - # Construct URL - url = self.force_close_handles.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') - if recursive is not None: - header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-marker']=self._deserialize('str', response.headers.get('x-ms-marker')) - response_headers['x-ms-number-of-handles-closed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')) - response_headers['x-ms-number-of-handles-failed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')) - - if cls: - return cls(pipeline_response, None, response_headers) - - force_close_handles.metadata = {'url': '/{shareName}/{directory}'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/aio/operations/_file_operations.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/aio/operations/_file_operations.py deleted file mode 100644 index 68bd184..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/aio/operations/_file_operations.py +++ /dev/null @@ -1,1770 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class FileOperations: - """FileOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.fileshare.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - file_content_length: int, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - file_permission: Optional[str] = "inherit", - file_permission_key: Optional[str] = None, - file_attributes: str = "none", - file_creation_time: str = "now", - file_last_write_time: str = "now", - file_http_headers: Optional["_models.FileHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """Creates a new file or replaces a file. Note it only initializes the file with no content. - - :param file_content_length: Specifies the maximum size for the file, up to 4 TB. - :type file_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else x-ms-file- - permission-key header shall be used. Default value: Inherit. If SDDL is specified as input, it - must have owner, group and dacl. Note: Only one of the x-ms-file-permission or x-ms-file- - permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_attributes: If specified, the provided file attributes shall be set. Default value: - ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. Default value: Now. - :type file_last_write_time: str - :param file_http_headers: Parameter group. - :type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _file_content_type = None - _file_content_encoding = None - _file_content_language = None - _file_cache_control = None - _file_content_md5 = None - _file_content_disposition = None - _lease_id = None - if file_http_headers is not None: - _file_content_type = file_http_headers.file_content_type - _file_content_encoding = file_http_headers.file_content_encoding - _file_content_language = file_http_headers.file_content_language - _file_cache_control = file_http_headers.file_cache_control - _file_content_md5 = file_http_headers.file_content_md5 - _file_content_disposition = file_http_headers.file_content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - file_type_constant = "file" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') - header_parameters['x-ms-type'] = self._serialize.header("file_type_constant", file_type_constant, 'str') - if _file_content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", _file_content_type, 'str') - if _file_content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", _file_content_encoding, 'str') - if _file_content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", _file_content_language, 'str') - if _file_cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", _file_cache_control, 'str') - if _file_content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", _file_content_md5, 'bytearray') - if _file_content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", _file_content_disposition, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def download( - self, - timeout: Optional[int] = None, - range: Optional[str] = None, - range_get_content_md5: Optional[bool] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> IO: - """Reads or downloads a file from the system, including its metadata and properties. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param range: Return file data only from the specified byte range. - :type range: str - :param range_get_content_md5: When this header is set to true and specified together with the - Range header, the service returns the MD5 hash for the range, as long as the range is less than - or equal to 4 MB in size. - :type range_get_content_md5: bool - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - accept = "application/xml" - - # Construct URL - url = self.download.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-content-md5')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-content-md5')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - download.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def get_properties( - self, - sharesnapshot: Optional[str] = None, - timeout: Optional[int] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """Returns all user-defined metadata, standard HTTP properties, and system properties for the - file. It does not return the content of the file. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-type']=self._deserialize('str', response.headers.get('x-ms-type')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def delete( - self, - timeout: Optional[int] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """removes the file from the storage account. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def set_http_headers( - self, - timeout: Optional[int] = None, - file_content_length: Optional[int] = None, - file_permission: Optional[str] = "inherit", - file_permission_key: Optional[str] = None, - file_attributes: str = "none", - file_creation_time: str = "now", - file_last_write_time: str = "now", - file_http_headers: Optional["_models.FileHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """Sets HTTP headers on the file. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param file_content_length: Resizes a file to the specified size. If the specified byte value - is less than the current size of the file, then all ranges above the specified byte value are - cleared. - :type file_content_length: long - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else x-ms-file- - permission-key header shall be used. Default value: Inherit. If SDDL is specified as input, it - must have owner, group and dacl. Note: Only one of the x-ms-file-permission or x-ms-file- - permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_attributes: If specified, the provided file attributes shall be set. Default value: - ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. Default value: Now. - :type file_last_write_time: str - :param file_http_headers: Parameter group. - :type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _file_content_type = None - _file_content_encoding = None - _file_content_language = None - _file_cache_control = None - _file_content_md5 = None - _file_content_disposition = None - _lease_id = None - if file_http_headers is not None: - _file_content_type = file_http_headers.file_content_type - _file_content_encoding = file_http_headers.file_content_encoding - _file_content_language = file_http_headers.file_content_language - _file_cache_control = file_http_headers.file_cache_control - _file_content_md5 = file_http_headers.file_content_md5 - _file_content_disposition = file_http_headers.file_content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.set_http_headers.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_content_length is not None: - header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') - if _file_content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", _file_content_type, 'str') - if _file_content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", _file_content_encoding, 'str') - if _file_content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", _file_content_language, 'str') - if _file_cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", _file_cache_control, 'str') - if _file_content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", _file_content_md5, 'bytearray') - if _file_content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", _file_content_disposition, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_http_headers.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def set_metadata( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """Updates user-defined metadata for the specified file. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def acquire_lease( - self, - timeout: Optional[int] = None, - duration: Optional[int] = None, - proposed_lease_id: Optional[str] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> None: - """[Update] The Lease File operation establishes and manages a lock on a file for write and delete - operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a - lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease - duration cannot be changed using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "acquire" - accept = "application/xml" - - # Construct URL - url = self.acquire_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - acquire_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def release_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> None: - """[Update] The Lease File operation establishes and manages a lock on a file for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "release" - accept = "application/xml" - - # Construct URL - url = self.release_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - release_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def change_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - proposed_lease_id: Optional[str] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> None: - """[Update] The Lease File operation establishes and manages a lock on a file for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "change" - accept = "application/xml" - - # Construct URL - url = self.change_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - change_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def break_lease( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """[Update] The Lease File operation establishes and manages a lock on a file for write and delete - operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "lease" - action = "break" - accept = "application/xml" - - # Construct URL - url = self.break_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - break_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def upload_range( - self, - range: str, - content_length: int, - optionalbody: IO, - timeout: Optional[int] = None, - file_range_write: Union[str, "_models.FileRangeWriteType"] = "update", - content_md5: Optional[bytearray] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """Upload a range of bytes to a file. - - :param range: Specifies the range of bytes to be written. Both the start and end of the range - must be specified. For an update operation, the range can be up to 4 MB in size. For a clear - operation, the range can be up to the value of the file's full size. The File service accepts - only a single byte range for the Range and 'x-ms-range' headers, and the byte range must be - specified in the following format: bytes=startByte-endByte. - :type range: str - :param content_length: Specifies the number of bytes being transmitted in the request body. - When the x-ms-write header is set to clear, the value of this header must be set to zero. - :type content_length: long - :param optionalbody: Initial data. - :type optionalbody: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param file_range_write: Specify one of the following options: - Update: Writes the bytes - specified by the request body into the specified range. The Range and Content-Length headers - must match to perform the update. - Clear: Clears the specified range and releases the space - used in storage for that range. To clear a range, set the Content-Length header to zero, and - set the Range header to a value that indicates the range to clear, up to maximum file size. - :type file_range_write: str or ~azure.storage.fileshare.models.FileRangeWriteType - :param content_md5: An MD5 hash of the content. This hash is used to verify the integrity of - the data during transport. When the Content-MD5 header is specified, the File service compares - the hash of the content that has arrived with the header value that was sent. If the two hashes - do not match, the operation will fail with error code 400 (Bad Request). - :type content_md5: bytearray - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "range" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.upload_range.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-write'] = self._serialize.header("file_range_write", file_range_write, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("content_md5", content_md5, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = optionalbody - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload_range.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def upload_range_from_url( - self, - range: str, - copy_source: str, - content_length: int, - timeout: Optional[int] = None, - source_range: Optional[str] = None, - source_content_crc64: Optional[bytearray] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """Upload a range of bytes to a file where the contents are read from a URL. - - :param range: Writes data to the specified byte range in the file. - :type range: str - :param copy_source: Specifies the URL of the source file or blob, up to 2 KB in length. To copy - a file to another file within the same storage account, you may use Shared Key to authenticate - the source file. If you are copying a file from another storage account, or if you are copying - a blob from the same storage account or another storage account, then you must authenticate the - source file or blob using a shared access signature. If the source is a public blob, no - authentication is required to perform the copy operation. A file in a share snapshot can also - be specified as a copy source. - :type copy_source: str - :param content_length: Specifies the number of bytes being transmitted in the request body. - When the x-ms-write header is set to clear, the value of this header must be set to zero. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_crc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_content_crc64: bytearray - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.fileshare.models.SourceModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _source_if_match_crc64 = None - _source_if_none_match_crc64 = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if source_modified_access_conditions is not None: - _source_if_match_crc64 = source_modified_access_conditions.source_if_match_crc64 - _source_if_none_match_crc64 = source_modified_access_conditions.source_if_none_match_crc64 - comp = "range" - accept = "application/xml" - - # Construct URL - url = self.upload_range_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - header_parameters['x-ms-write'] = self._serialize.header("self._config.file_range_write_from_url", self._config.file_range_write_from_url, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if source_content_crc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_content_crc64", source_content_crc64, 'bytearray') - if _source_if_match_crc64 is not None: - header_parameters['x-ms-source-if-match-crc64'] = self._serialize.header("source_if_match_crc64", _source_if_match_crc64, 'bytearray') - if _source_if_none_match_crc64 is not None: - header_parameters['x-ms-source-if-none-match-crc64'] = self._serialize.header("source_if_none_match_crc64", _source_if_none_match_crc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload_range_from_url.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def get_range_list( - self, - sharesnapshot: Optional[str] = None, - prevsharesnapshot: Optional[str] = None, - timeout: Optional[int] = None, - range: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> "_models.ShareFileRangeList": - """Returns the list of valid ranges for a file. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param prevsharesnapshot: The previous snapshot parameter is an opaque DateTime value that, - when present, specifies the previous snapshot. - :type prevsharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param range: Specifies the range of bytes over which to list ranges, inclusively. - :type range: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ShareFileRangeList, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ShareFileRangeList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ShareFileRangeList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "rangelist" - accept = "application/xml" - - # Construct URL - url = self.get_range_list.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if prevsharesnapshot is not None: - query_parameters['prevsharesnapshot'] = self._serialize.query("prevsharesnapshot", prevsharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-content-length']=self._deserialize('long', response.headers.get('x-ms-content-length')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ShareFileRangeList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_range_list.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def start_copy( - self, - copy_source: str, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - file_permission: Optional[str] = "inherit", - file_permission_key: Optional[str] = None, - copy_file_smb_info: Optional["_models.CopyFileSmbInfo"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """Copies a blob or file to a destination file within the storage account. - - :param copy_source: Specifies the URL of the source file or blob, up to 2 KB in length. To copy - a file to another file within the same storage account, you may use Shared Key to authenticate - the source file. If you are copying a file from another storage account, or if you are copying - a blob from the same storage account or another storage account, then you must authenticate the - source file or blob using a shared access signature. If the source is a public blob, no - authentication is required to perform the copy operation. A file in a share snapshot can also - be specified as a copy source. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else x-ms-file- - permission-key header shall be used. Default value: Inherit. If SDDL is specified as input, it - must have owner, group and dacl. Note: Only one of the x-ms-file-permission or x-ms-file- - permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param copy_file_smb_info: Parameter group. - :type copy_file_smb_info: ~azure.storage.fileshare.models.CopyFileSmbInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _file_permission_copy_mode = None - _ignore_read_only = None - _file_attributes = None - _file_creation_time = None - _file_last_write_time = None - _set_archive_attribute = None - _lease_id = None - if copy_file_smb_info is not None: - _file_permission_copy_mode = copy_file_smb_info.file_permission_copy_mode - _ignore_read_only = copy_file_smb_info.ignore_read_only - _file_attributes = copy_file_smb_info.file_attributes - _file_creation_time = copy_file_smb_info.file_creation_time - _file_last_write_time = copy_file_smb_info.file_last_write_time - _set_archive_attribute = copy_file_smb_info.set_archive_attribute - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - accept = "application/xml" - - # Construct URL - url = self.start_copy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - if _file_permission_copy_mode is not None: - header_parameters['x-ms-file-permission-copy-mode'] = self._serialize.header("file_permission_copy_mode", _file_permission_copy_mode, 'str') - if _ignore_read_only is not None: - header_parameters['x-ms-file-copy-ignore-read-only'] = self._serialize.header("ignore_read_only", _ignore_read_only, 'bool') - if _file_attributes is not None: - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", _file_attributes, 'str') - if _file_creation_time is not None: - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", _file_creation_time, 'str') - if _file_last_write_time is not None: - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", _file_last_write_time, 'str') - if _set_archive_attribute is not None: - header_parameters['x-ms-file-copy-set-archive'] = self._serialize.header("set_archive_attribute", _set_archive_attribute, 'bool') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - start_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def abort_copy( - self, - copy_id: str, - timeout: Optional[int] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """Aborts a pending Copy File operation, and leaves a destination file with zero length and full - metadata. - - :param copy_id: The copy identifier provided in the x-ms-copy-id header of the original Copy - File operation. - :type copy_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "copy" - copy_action_abort_constant = "abort" - accept = "application/xml" - - # Construct URL - url = self.abort_copy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-copy-action'] = self._serialize.header("copy_action_abort_constant", copy_action_abort_constant, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - abort_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def list_handles( - self, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - timeout: Optional[int] = None, - sharesnapshot: Optional[str] = None, - **kwargs - ) -> "_models.ListHandlesResponse": - """Lists handles for file. - - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. If the request does not - specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 - items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListHandlesResponse, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListHandlesResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListHandlesResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "listhandles" - accept = "application/xml" - - # Construct URL - url = self.list_handles.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListHandlesResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def force_close_handles( - self, - handle_id: str, - timeout: Optional[int] = None, - marker: Optional[str] = None, - sharesnapshot: Optional[str] = None, - **kwargs - ) -> None: - """Closes all handles open for given file. - - :param handle_id: Specifies handle ID opened on the file or directory to be closed. Asterisk - (‘*’) is a wildcard that specifies all handles. - :type handle_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "forceclosehandles" - accept = "application/xml" - - # Construct URL - url = self.force_close_handles.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-marker']=self._deserialize('str', response.headers.get('x-ms-marker')) - response_headers['x-ms-number-of-handles-closed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')) - response_headers['x-ms-number-of-handles-failed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')) - - if cls: - return cls(pipeline_response, None, response_headers) - - force_close_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/aio/operations/_service_operations.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/aio/operations/_service_operations.py deleted file mode 100644 index 7d5bf30..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/aio/operations/_service_operations.py +++ /dev/null @@ -1,269 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class ServiceOperations: - """ServiceOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.fileshare.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def set_properties( - self, - storage_service_properties: "_models.StorageServiceProperties", - timeout: Optional[int] = None, - **kwargs - ) -> None: - """Sets properties for a storage account's File service endpoint, including properties for Storage - Analytics metrics and CORS (Cross-Origin Resource Sharing) rules. - - :param storage_service_properties: The StorageService properties. - :type storage_service_properties: ~azure.storage.fileshare.models.StorageServiceProperties - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "properties" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/'} # type: ignore - - async def get_properties( - self, - timeout: Optional[int] = None, - **kwargs - ) -> "_models.StorageServiceProperties": - """Gets the properties of a storage account's File service, including properties for Storage - Analytics metrics and CORS (Cross-Origin Resource Sharing) rules. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: StorageServiceProperties, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.StorageServiceProperties - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceProperties"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('StorageServiceProperties', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_properties.metadata = {'url': '/'} # type: ignore - - async def list_shares_segment( - self, - prefix: Optional[str] = None, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - include: Optional[List[Union[str, "_models.ListSharesIncludeType"]]] = None, - timeout: Optional[int] = None, - **kwargs - ) -> "_models.ListSharesResponse": - """The List Shares Segment operation returns a list of the shares and share snapshots under the - specified account. - - :param prefix: Filters the results to return only entries whose name begins with the specified - prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. If the request does not - specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 - items. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.fileshare.models.ListSharesIncludeType] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListSharesResponse, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListSharesResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListSharesResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_shares_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('ListSharesResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_shares_segment.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/aio/operations/_share_operations.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/aio/operations/_share_operations.py deleted file mode 100644 index 7dc9b8f..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/aio/operations/_share_operations.py +++ /dev/null @@ -1,1485 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class ShareOperations: - """ShareOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.fileshare.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - quota: Optional[int] = None, - access_tier: Optional[Union[str, "_models.ShareAccessTier"]] = None, - enabled_protocols: Optional[str] = None, - root_squash: Optional[Union[str, "_models.ShareRootSquash"]] = None, - **kwargs - ) -> None: - """Creates a new share under the specified account. If the share with the same name already - exists, the operation fails. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param quota: Specifies the maximum size of the share, in gigabytes. - :type quota: int - :param access_tier: Specifies the access tier of the share. - :type access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier - :param enabled_protocols: Protocols to enable on the share. - :type enabled_protocols: str - :param root_squash: Root squash to set on the share. Only valid for NFS shares. - :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if quota is not None: - header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) - if access_tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("access_tier", access_tier, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if enabled_protocols is not None: - header_parameters['x-ms-enabled-protocols'] = self._serialize.header("enabled_protocols", enabled_protocols, 'str') - if root_squash is not None: - header_parameters['x-ms-root-squash'] = self._serialize.header("root_squash", root_squash, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{shareName}'} # type: ignore - - async def get_properties( - self, - sharesnapshot: Optional[str] = None, - timeout: Optional[int] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """Returns all user-defined metadata and system properties for the specified share or share - snapshot. The data returned does not include the share's list of files. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-share-quota']=self._deserialize('int', response.headers.get('x-ms-share-quota')) - response_headers['x-ms-share-provisioned-iops']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-iops')) - response_headers['x-ms-share-provisioned-ingress-mbps']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-ingress-mbps')) - response_headers['x-ms-share-provisioned-egress-mbps']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-egress-mbps')) - response_headers['x-ms-share-next-allowed-quota-downgrade-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-share-next-allowed-quota-downgrade-time')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-access-tier']=self._deserialize('str', response.headers.get('x-ms-access-tier')) - response_headers['x-ms-access-tier-change-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')) - response_headers['x-ms-access-tier-transition-state']=self._deserialize('str', response.headers.get('x-ms-access-tier-transition-state')) - response_headers['x-ms-enabled-protocols']=self._deserialize('str', response.headers.get('x-ms-enabled-protocols')) - response_headers['x-ms-root-squash']=self._deserialize('str', response.headers.get('x-ms-root-squash')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{shareName}'} # type: ignore - - async def delete( - self, - sharesnapshot: Optional[str] = None, - timeout: Optional[int] = None, - delete_snapshots: Optional[Union[str, "_models.DeleteSnapshotsOptionType"]] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """Operation marks the specified share or share snapshot for deletion. The share or share snapshot - and any files contained within it are later deleted during garbage collection. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param delete_snapshots: Specifies the option include to delete the base share and all of its - snapshots. - :type delete_snapshots: str or ~azure.storage.fileshare.models.DeleteSnapshotsOptionType - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{shareName}'} # type: ignore - - async def acquire_lease( - self, - timeout: Optional[int] = None, - duration: Optional[int] = None, - proposed_lease_id: Optional[str] = None, - sharesnapshot: Optional[str] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> None: - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a - lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease - duration cannot be changed using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "acquire" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.acquire_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - acquire_lease.metadata = {'url': '/{shareName}'} # type: ignore - - async def release_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - sharesnapshot: Optional[str] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> None: - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "release" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.release_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - release_lease.metadata = {'url': '/{shareName}'} # type: ignore - - async def change_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - proposed_lease_id: Optional[str] = None, - sharesnapshot: Optional[str] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> None: - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "change" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.change_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - change_lease.metadata = {'url': '/{shareName}'} # type: ignore - - async def renew_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - sharesnapshot: Optional[str] = None, - request_id_parameter: Optional[str] = None, - **kwargs - ) -> None: - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "renew" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.renew_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - renew_lease.metadata = {'url': '/{shareName}'} # type: ignore - - async def break_lease( - self, - timeout: Optional[int] = None, - break_period: Optional[int] = None, - request_id_parameter: Optional[str] = None, - sharesnapshot: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param break_period: For a break operation, proposed duration the lease should continue before - it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining on the lease is used. A new - lease will not be available before the break period has expired, but the lease may be held for - longer than the break period. If this header does not appear with a break operation, a fixed- - duration lease breaks after the remaining lease period elapses, and an infinite lease breaks - immediately. - :type break_period: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "lease" - action = "break" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.break_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - break_lease.metadata = {'url': '/{shareName}'} # type: ignore - - async def create_snapshot( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - **kwargs - ) -> None: - """Creates a read-only snapshot of a share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - comp = "snapshot" - accept = "application/xml" - - # Construct URL - url = self.create_snapshot.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-snapshot']=self._deserialize('str', response.headers.get('x-ms-snapshot')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create_snapshot.metadata = {'url': '/{shareName}'} # type: ignore - - async def create_permission( - self, - share_permission: "_models.SharePermission", - timeout: Optional[int] = None, - **kwargs - ) -> None: - """Create a permission (a security descriptor). - - :param share_permission: A permission (a security descriptor) at the share level. - :type share_permission: ~azure.storage.fileshare.models.SharePermission - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - comp = "filepermission" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/xml" - - # Construct URL - url = self.create_permission.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(share_permission, 'SharePermission') - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create_permission.metadata = {'url': '/{shareName}'} # type: ignore - - async def get_permission( - self, - file_permission_key: str, - timeout: Optional[int] = None, - **kwargs - ) -> "_models.SharePermission": - """Returns the permission (security descriptor) for a given key. - - :param file_permission_key: Key of the permission to be set for the directory/file. - :type file_permission_key: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SharePermission, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.SharePermission - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.SharePermission"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - comp = "filepermission" - accept = "application/json" - - # Construct URL - url = self.get_permission.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('SharePermission', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_permission.metadata = {'url': '/{shareName}'} # type: ignore - - async def set_properties( - self, - timeout: Optional[int] = None, - quota: Optional[int] = None, - access_tier: Optional[Union[str, "_models.ShareAccessTier"]] = None, - root_squash: Optional[Union[str, "_models.ShareRootSquash"]] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """Sets properties for the specified share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param quota: Specifies the maximum size of the share, in gigabytes. - :type quota: int - :param access_tier: Specifies the access tier of the share. - :type access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier - :param root_squash: Root squash to set on the share. Only valid for NFS shares. - :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if quota is not None: - header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) - if access_tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("access_tier", access_tier, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if root_squash is not None: - header_parameters['x-ms-root-squash'] = self._serialize.header("root_squash", root_squash, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/{shareName}'} # type: ignore - - async def set_metadata( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """Sets one or more user-defined name-value pairs for the specified share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{shareName}'} # type: ignore - - async def get_access_policy( - self, - timeout: Optional[int] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> List["_models.SignedIdentifier"]: - """Returns information about stored access policies specified on the share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of SignedIdentifier, or the result of cls(response) - :rtype: list[~azure.storage.fileshare.models.SignedIdentifier] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.SignedIdentifier"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "acl" - accept = "application/xml" - - # Construct URL - url = self.get_access_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('[SignedIdentifier]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_access_policy.metadata = {'url': '/{shareName}'} # type: ignore - - async def set_access_policy( - self, - timeout: Optional[int] = None, - share_acl: Optional[List["_models.SignedIdentifier"]] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> None: - """Sets a stored access policy for use with shared access signatures. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param share_acl: The ACL for the share. - :type share_acl: list[~azure.storage.fileshare.models.SignedIdentifier] - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "acl" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_access_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'wrapped': True}} - if share_acl is not None: - body_content = self._serialize.body(share_acl, '[SignedIdentifier]', is_xml=True, serialization_ctxt=serialization_ctxt) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_policy.metadata = {'url': '/{shareName}'} # type: ignore - - async def get_statistics( - self, - timeout: Optional[int] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs - ) -> "_models.ShareStats": - """Retrieves statistics related to the share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ShareStats, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ShareStats - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ShareStats"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "stats" - accept = "application/xml" - - # Construct URL - url = self.get_statistics.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ShareStats', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_statistics.metadata = {'url': '/{shareName}'} # type: ignore - - async def restore( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - deleted_share_name: Optional[str] = None, - deleted_share_version: Optional[str] = None, - **kwargs - ) -> None: - """Restores a previously deleted Share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param deleted_share_name: Specifies the name of the preivously-deleted share. - :type deleted_share_name: str - :param deleted_share_version: Specifies the version of the preivously-deleted share. - :type deleted_share_version: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - comp = "undelete" - accept = "application/xml" - - # Construct URL - url = self.restore.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if deleted_share_name is not None: - header_parameters['x-ms-deleted-share-name'] = self._serialize.header("deleted_share_name", deleted_share_name, 'str') - if deleted_share_version is not None: - header_parameters['x-ms-deleted-share-version'] = self._serialize.header("deleted_share_version", deleted_share_version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - restore.metadata = {'url': '/{shareName}'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/models/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/models/__init__.py deleted file mode 100644 index 8a030e5..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/models/__init__.py +++ /dev/null @@ -1,125 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -try: - from ._models_py3 import AccessPolicy - from ._models_py3 import ClearRange - from ._models_py3 import CopyFileSmbInfo - from ._models_py3 import CorsRule - from ._models_py3 import DirectoryItem - from ._models_py3 import FileHTTPHeaders - from ._models_py3 import FileItem - from ._models_py3 import FileProperty - from ._models_py3 import FileRange - from ._models_py3 import FilesAndDirectoriesListSegment - from ._models_py3 import HandleItem - from ._models_py3 import LeaseAccessConditions - from ._models_py3 import ListFilesAndDirectoriesSegmentResponse - from ._models_py3 import ListHandlesResponse - from ._models_py3 import ListSharesResponse - from ._models_py3 import Metrics - from ._models_py3 import RetentionPolicy - from ._models_py3 import ShareFileRangeList - from ._models_py3 import ShareItemInternal - from ._models_py3 import SharePermission - from ._models_py3 import SharePropertiesInternal - from ._models_py3 import ShareProtocolSettings - from ._models_py3 import ShareSmbSettings - from ._models_py3 import ShareStats - from ._models_py3 import SignedIdentifier - from ._models_py3 import SmbMultichannel - from ._models_py3 import SourceModifiedAccessConditions - from ._models_py3 import StorageError - from ._models_py3 import StorageServiceProperties -except (SyntaxError, ImportError): - from ._models import AccessPolicy # type: ignore - from ._models import ClearRange # type: ignore - from ._models import CopyFileSmbInfo # type: ignore - from ._models import CorsRule # type: ignore - from ._models import DirectoryItem # type: ignore - from ._models import FileHTTPHeaders # type: ignore - from ._models import FileItem # type: ignore - from ._models import FileProperty # type: ignore - from ._models import FileRange # type: ignore - from ._models import FilesAndDirectoriesListSegment # type: ignore - from ._models import HandleItem # type: ignore - from ._models import LeaseAccessConditions # type: ignore - from ._models import ListFilesAndDirectoriesSegmentResponse # type: ignore - from ._models import ListHandlesResponse # type: ignore - from ._models import ListSharesResponse # type: ignore - from ._models import Metrics # type: ignore - from ._models import RetentionPolicy # type: ignore - from ._models import ShareFileRangeList # type: ignore - from ._models import ShareItemInternal # type: ignore - from ._models import SharePermission # type: ignore - from ._models import SharePropertiesInternal # type: ignore - from ._models import ShareProtocolSettings # type: ignore - from ._models import ShareSmbSettings # type: ignore - from ._models import ShareStats # type: ignore - from ._models import SignedIdentifier # type: ignore - from ._models import SmbMultichannel # type: ignore - from ._models import SourceModifiedAccessConditions # type: ignore - from ._models import StorageError # type: ignore - from ._models import StorageServiceProperties # type: ignore - -from ._azure_file_storage_enums import ( - CopyStatusType, - DeleteSnapshotsOptionType, - FileRangeWriteType, - LeaseDurationType, - LeaseStateType, - LeaseStatusType, - ListSharesIncludeType, - PermissionCopyModeType, - ShareAccessTier, - ShareRootSquash, - StorageErrorCode, -) - -__all__ = [ - 'AccessPolicy', - 'ClearRange', - 'CopyFileSmbInfo', - 'CorsRule', - 'DirectoryItem', - 'FileHTTPHeaders', - 'FileItem', - 'FileProperty', - 'FileRange', - 'FilesAndDirectoriesListSegment', - 'HandleItem', - 'LeaseAccessConditions', - 'ListFilesAndDirectoriesSegmentResponse', - 'ListHandlesResponse', - 'ListSharesResponse', - 'Metrics', - 'RetentionPolicy', - 'ShareFileRangeList', - 'ShareItemInternal', - 'SharePermission', - 'SharePropertiesInternal', - 'ShareProtocolSettings', - 'ShareSmbSettings', - 'ShareStats', - 'SignedIdentifier', - 'SmbMultichannel', - 'SourceModifiedAccessConditions', - 'StorageError', - 'StorageServiceProperties', - 'CopyStatusType', - 'DeleteSnapshotsOptionType', - 'FileRangeWriteType', - 'LeaseDurationType', - 'LeaseStateType', - 'LeaseStatusType', - 'ListSharesIncludeType', - 'PermissionCopyModeType', - 'ShareAccessTier', - 'ShareRootSquash', - 'StorageErrorCode', -] diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/models/_azure_file_storage_enums.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/models/_azure_file_storage_enums.py deleted file mode 100644 index 19c0e2a..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/models/_azure_file_storage_enums.py +++ /dev/null @@ -1,162 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from enum import Enum, EnumMeta -from six import with_metaclass - -class _CaseInsensitiveEnumMeta(EnumMeta): - def __getitem__(self, name): - return super().__getitem__(name.upper()) - - def __getattr__(cls, name): - """Return the enum member matching `name` - We use __getattr__ instead of descriptors or inserting into the enum - class' __dict__ in order to support `name` and `value` being both - properties for enum members (which live in the class' __dict__) and - enum members themselves. - """ - try: - return cls._member_map_[name.upper()] - except KeyError: - raise AttributeError(name) - - -class CopyStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - PENDING = "pending" - SUCCESS = "success" - ABORTED = "aborted" - FAILED = "failed" - -class DeleteSnapshotsOptionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - INCLUDE = "include" - INCLUDE_LEASED = "include-leased" - -class FileRangeWriteType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - UPDATE = "update" - CLEAR = "clear" - -class LeaseDurationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """When a share is leased, specifies whether the lease is of infinite or fixed duration. - """ - - INFINITE = "infinite" - FIXED = "fixed" - -class LeaseStateType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Lease state of the share. - """ - - AVAILABLE = "available" - LEASED = "leased" - EXPIRED = "expired" - BREAKING = "breaking" - BROKEN = "broken" - -class LeaseStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The current lease status of the share. - """ - - LOCKED = "locked" - UNLOCKED = "unlocked" - -class ListSharesIncludeType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - SNAPSHOTS = "snapshots" - METADATA = "metadata" - DELETED = "deleted" - -class PermissionCopyModeType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - SOURCE = "source" - OVERRIDE = "override" - -class ShareAccessTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - TRANSACTION_OPTIMIZED = "TransactionOptimized" - HOT = "Hot" - COOL = "Cool" - -class ShareRootSquash(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - NO_ROOT_SQUASH = "NoRootSquash" - ROOT_SQUASH = "RootSquash" - ALL_SQUASH = "AllSquash" - -class StorageErrorCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Error codes returned by the service - """ - - ACCOUNT_ALREADY_EXISTS = "AccountAlreadyExists" - ACCOUNT_BEING_CREATED = "AccountBeingCreated" - ACCOUNT_IS_DISABLED = "AccountIsDisabled" - AUTHENTICATION_FAILED = "AuthenticationFailed" - AUTHORIZATION_FAILURE = "AuthorizationFailure" - CONDITION_HEADERS_NOT_SUPPORTED = "ConditionHeadersNotSupported" - CONDITION_NOT_MET = "ConditionNotMet" - EMPTY_METADATA_KEY = "EmptyMetadataKey" - INSUFFICIENT_ACCOUNT_PERMISSIONS = "InsufficientAccountPermissions" - INTERNAL_ERROR = "InternalError" - INVALID_AUTHENTICATION_INFO = "InvalidAuthenticationInfo" - INVALID_HEADER_VALUE = "InvalidHeaderValue" - INVALID_HTTP_VERB = "InvalidHttpVerb" - INVALID_INPUT = "InvalidInput" - INVALID_MD5 = "InvalidMd5" - INVALID_METADATA = "InvalidMetadata" - INVALID_QUERY_PARAMETER_VALUE = "InvalidQueryParameterValue" - INVALID_RANGE = "InvalidRange" - INVALID_RESOURCE_NAME = "InvalidResourceName" - INVALID_URI = "InvalidUri" - INVALID_XML_DOCUMENT = "InvalidXmlDocument" - INVALID_XML_NODE_VALUE = "InvalidXmlNodeValue" - MD5_MISMATCH = "Md5Mismatch" - METADATA_TOO_LARGE = "MetadataTooLarge" - MISSING_CONTENT_LENGTH_HEADER = "MissingContentLengthHeader" - MISSING_REQUIRED_QUERY_PARAMETER = "MissingRequiredQueryParameter" - MISSING_REQUIRED_HEADER = "MissingRequiredHeader" - MISSING_REQUIRED_XML_NODE = "MissingRequiredXmlNode" - MULTIPLE_CONDITION_HEADERS_NOT_SUPPORTED = "MultipleConditionHeadersNotSupported" - OPERATION_TIMED_OUT = "OperationTimedOut" - OUT_OF_RANGE_INPUT = "OutOfRangeInput" - OUT_OF_RANGE_QUERY_PARAMETER_VALUE = "OutOfRangeQueryParameterValue" - REQUEST_BODY_TOO_LARGE = "RequestBodyTooLarge" - RESOURCE_TYPE_MISMATCH = "ResourceTypeMismatch" - REQUEST_URL_FAILED_TO_PARSE = "RequestUrlFailedToParse" - RESOURCE_ALREADY_EXISTS = "ResourceAlreadyExists" - RESOURCE_NOT_FOUND = "ResourceNotFound" - SERVER_BUSY = "ServerBusy" - UNSUPPORTED_HEADER = "UnsupportedHeader" - UNSUPPORTED_XML_NODE = "UnsupportedXmlNode" - UNSUPPORTED_QUERY_PARAMETER = "UnsupportedQueryParameter" - UNSUPPORTED_HTTP_VERB = "UnsupportedHttpVerb" - CANNOT_DELETE_FILE_OR_DIRECTORY = "CannotDeleteFileOrDirectory" - CLIENT_CACHE_FLUSH_DELAY = "ClientCacheFlushDelay" - DELETE_PENDING = "DeletePending" - DIRECTORY_NOT_EMPTY = "DirectoryNotEmpty" - FILE_LOCK_CONFLICT = "FileLockConflict" - INVALID_FILE_OR_DIRECTORY_PATH_NAME = "InvalidFileOrDirectoryPathName" - PARENT_NOT_FOUND = "ParentNotFound" - READ_ONLY_ATTRIBUTE = "ReadOnlyAttribute" - SHARE_ALREADY_EXISTS = "ShareAlreadyExists" - SHARE_BEING_DELETED = "ShareBeingDeleted" - SHARE_DISABLED = "ShareDisabled" - SHARE_NOT_FOUND = "ShareNotFound" - SHARING_VIOLATION = "SharingViolation" - SHARE_SNAPSHOT_IN_PROGRESS = "ShareSnapshotInProgress" - SHARE_SNAPSHOT_COUNT_EXCEEDED = "ShareSnapshotCountExceeded" - SHARE_SNAPSHOT_OPERATION_NOT_SUPPORTED = "ShareSnapshotOperationNotSupported" - SHARE_HAS_SNAPSHOTS = "ShareHasSnapshots" - CONTAINER_QUOTA_DOWNGRADE_NOT_ALLOWED = "ContainerQuotaDowngradeNotAllowed" - AUTHORIZATION_SOURCE_IP_MISMATCH = "AuthorizationSourceIPMismatch" - AUTHORIZATION_PROTOCOL_MISMATCH = "AuthorizationProtocolMismatch" - AUTHORIZATION_PERMISSION_MISMATCH = "AuthorizationPermissionMismatch" - AUTHORIZATION_SERVICE_MISMATCH = "AuthorizationServiceMismatch" - AUTHORIZATION_RESOURCE_TYPE_MISMATCH = "AuthorizationResourceTypeMismatch" - FEATURE_VERSION_MISMATCH = "FeatureVersionMismatch" diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/models/_models.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/models/_models.py deleted file mode 100644 index 665c309..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/models/_models.py +++ /dev/null @@ -1,1052 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import HttpResponseError -import msrest.serialization - - -class AccessPolicy(msrest.serialization.Model): - """An Access policy. - - :param start: The date-time the policy is active. - :type start: str - :param expiry: The date-time the policy expires. - :type expiry: str - :param permission: The permissions for the ACL policy. - :type permission: str - """ - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str'}, - 'expiry': {'key': 'Expiry', 'type': 'str'}, - 'permission': {'key': 'Permission', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(AccessPolicy, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.expiry = kwargs.get('expiry', None) - self.permission = kwargs.get('permission', None) - - -class ClearRange(msrest.serialization.Model): - """ClearRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'ClearRange' - } - - def __init__( - self, - **kwargs - ): - super(ClearRange, self).__init__(**kwargs) - self.start = kwargs['start'] - self.end = kwargs['end'] - - -class CopyFileSmbInfo(msrest.serialization.Model): - """Parameter group. - - :param file_permission_copy_mode: Specifies the option to copy file security descriptor from - source file or to set it using the value which is defined by the header value of x-ms-file- - permission or x-ms-file-permission-key. Possible values include: "source", "override". - :type file_permission_copy_mode: str or ~azure.storage.fileshare.models.PermissionCopyModeType - :param ignore_read_only: Specifies the option to overwrite the target file if it already exists - and has read-only attribute set. - :type ignore_read_only: bool - :param file_attributes: Specifies either the option to copy file attributes from a source - file(source) to a target file or a list of attributes to set on a target file. - :type file_attributes: str - :param file_creation_time: Specifies either the option to copy file creation time from a source - file(source) to a target file or a time value in ISO 8601 format to set as creation time on a - target file. - :type file_creation_time: str - :param file_last_write_time: Specifies either the option to copy file last write time from a - source file(source) to a target file or a time value in ISO 8601 format to set as last write - time on a target file. - :type file_last_write_time: str - :param set_archive_attribute: Specifies the option to set archive attribute on a target file. - True means archive attribute will be set on a target file despite attribute overrides or a - source file state. - :type set_archive_attribute: bool - """ - - _attribute_map = { - 'file_permission_copy_mode': {'key': 'filePermissionCopyMode', 'type': 'str'}, - 'ignore_read_only': {'key': 'ignoreReadOnly', 'type': 'bool'}, - 'file_attributes': {'key': 'fileAttributes', 'type': 'str'}, - 'file_creation_time': {'key': 'fileCreationTime', 'type': 'str'}, - 'file_last_write_time': {'key': 'fileLastWriteTime', 'type': 'str'}, - 'set_archive_attribute': {'key': 'setArchiveAttribute', 'type': 'bool'}, - } - - def __init__( - self, - **kwargs - ): - super(CopyFileSmbInfo, self).__init__(**kwargs) - self.file_permission_copy_mode = kwargs.get('file_permission_copy_mode', None) - self.ignore_read_only = kwargs.get('ignore_read_only', None) - self.file_attributes = kwargs.get('file_attributes', None) - self.file_creation_time = kwargs.get('file_creation_time', None) - self.file_last_write_time = kwargs.get('file_last_write_time', None) - self.set_archive_attribute = kwargs.get('set_archive_attribute', None) - - -class CorsRule(msrest.serialization.Model): - """CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param allowed_origins: Required. The origin domains that are permitted to make a request - against the storage service via CORS. The origin domain is the domain from which the request - originates. Note that the origin must be an exact case-sensitive match with the origin that the - user age sends to the service. You can also use the wildcard character '*' to allow all origin - domains to make requests via CORS. - :type allowed_origins: str - :param allowed_methods: Required. The methods (HTTP request verbs) that the origin domain may - use for a CORS request. (comma separated). - :type allowed_methods: str - :param allowed_headers: Required. The request headers that the origin domain may specify on the - CORS request. - :type allowed_headers: str - :param exposed_headers: Required. The response headers that may be sent in the response to the - CORS request and exposed by the browser to the request issuer. - :type exposed_headers: str - :param max_age_in_seconds: Required. The maximum amount time that a browser should cache the - preflight OPTIONS request. - :type max_age_in_seconds: int - """ - - _validation = { - 'allowed_origins': {'required': True}, - 'allowed_methods': {'required': True}, - 'allowed_headers': {'required': True}, - 'exposed_headers': {'required': True}, - 'max_age_in_seconds': {'required': True, 'minimum': 0}, - } - - _attribute_map = { - 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str'}, - 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str'}, - 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str'}, - 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str'}, - 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int'}, - } - - def __init__( - self, - **kwargs - ): - super(CorsRule, self).__init__(**kwargs) - self.allowed_origins = kwargs['allowed_origins'] - self.allowed_methods = kwargs['allowed_methods'] - self.allowed_headers = kwargs['allowed_headers'] - self.exposed_headers = kwargs['exposed_headers'] - self.max_age_in_seconds = kwargs['max_age_in_seconds'] - - -class DirectoryItem(msrest.serialization.Model): - """A listed directory item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - } - _xml_map = { - 'name': 'Directory' - } - - def __init__( - self, - **kwargs - ): - super(DirectoryItem, self).__init__(**kwargs) - self.name = kwargs['name'] - - -class FileHTTPHeaders(msrest.serialization.Model): - """Parameter group. - - :param file_content_type: Sets the MIME content type of the file. The default type is - 'application/octet-stream'. - :type file_content_type: str - :param file_content_encoding: Specifies which content encodings have been applied to the file. - :type file_content_encoding: str - :param file_content_language: Specifies the natural languages used by this resource. - :type file_content_language: str - :param file_cache_control: Sets the file's cache control. The File service stores this value - but does not use or modify it. - :type file_cache_control: str - :param file_content_md5: Sets the file's MD5 hash. - :type file_content_md5: bytearray - :param file_content_disposition: Sets the file's Content-Disposition header. - :type file_content_disposition: str - """ - - _attribute_map = { - 'file_content_type': {'key': 'fileContentType', 'type': 'str'}, - 'file_content_encoding': {'key': 'fileContentEncoding', 'type': 'str'}, - 'file_content_language': {'key': 'fileContentLanguage', 'type': 'str'}, - 'file_cache_control': {'key': 'fileCacheControl', 'type': 'str'}, - 'file_content_md5': {'key': 'fileContentMD5', 'type': 'bytearray'}, - 'file_content_disposition': {'key': 'fileContentDisposition', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(FileHTTPHeaders, self).__init__(**kwargs) - self.file_content_type = kwargs.get('file_content_type', None) - self.file_content_encoding = kwargs.get('file_content_encoding', None) - self.file_content_language = kwargs.get('file_content_language', None) - self.file_cache_control = kwargs.get('file_cache_control', None) - self.file_content_md5 = kwargs.get('file_content_md5', None) - self.file_content_disposition = kwargs.get('file_content_disposition', None) - - -class FileItem(msrest.serialization.Model): - """A listed file item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param properties: Required. File properties. - :type properties: ~azure.storage.fileshare.models.FileProperty - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'properties': {'key': 'Properties', 'type': 'FileProperty'}, - } - _xml_map = { - 'name': 'File' - } - - def __init__( - self, - **kwargs - ): - super(FileItem, self).__init__(**kwargs) - self.name = kwargs['name'] - self.properties = kwargs['properties'] - - -class FileProperty(msrest.serialization.Model): - """File properties. - - All required parameters must be populated in order to send to Azure. - - :param content_length: Required. Content length of the file. This value may not be up-to-date - since an SMB client may have modified the file locally. The value of Content-Length may not - reflect that fact until the handle is closed or the op-lock is broken. To retrieve current - property values, call Get File Properties. - :type content_length: long - """ - - _validation = { - 'content_length': {'required': True}, - } - - _attribute_map = { - 'content_length': {'key': 'Content-Length', 'type': 'long'}, - } - - def __init__( - self, - **kwargs - ): - super(FileProperty, self).__init__(**kwargs) - self.content_length = kwargs['content_length'] - - -class FileRange(msrest.serialization.Model): - """An Azure Storage file range. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. Start of the range. - :type start: long - :param end: Required. End of the range. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long'}, - 'end': {'key': 'End', 'type': 'long'}, - } - _xml_map = { - 'name': 'Range' - } - - def __init__( - self, - **kwargs - ): - super(FileRange, self).__init__(**kwargs) - self.start = kwargs['start'] - self.end = kwargs['end'] - - -class FilesAndDirectoriesListSegment(msrest.serialization.Model): - """Abstract for entries that can be listed from Directory. - - All required parameters must be populated in order to send to Azure. - - :param directory_items: Required. - :type directory_items: list[~azure.storage.fileshare.models.DirectoryItem] - :param file_items: Required. - :type file_items: list[~azure.storage.fileshare.models.FileItem] - """ - - _validation = { - 'directory_items': {'required': True}, - 'file_items': {'required': True}, - } - - _attribute_map = { - 'directory_items': {'key': 'DirectoryItems', 'type': '[DirectoryItem]'}, - 'file_items': {'key': 'FileItems', 'type': '[FileItem]'}, - } - _xml_map = { - 'name': 'Entries' - } - - def __init__( - self, - **kwargs - ): - super(FilesAndDirectoriesListSegment, self).__init__(**kwargs) - self.directory_items = kwargs['directory_items'] - self.file_items = kwargs['file_items'] - - -class HandleItem(msrest.serialization.Model): - """A listed Azure Storage handle item. - - All required parameters must be populated in order to send to Azure. - - :param handle_id: Required. XSMB service handle ID. - :type handle_id: str - :param path: Required. File or directory name including full path starting from share root. - :type path: str - :param file_id: Required. FileId uniquely identifies the file or directory. - :type file_id: str - :param parent_id: ParentId uniquely identifies the parent directory of the object. - :type parent_id: str - :param session_id: Required. SMB session ID in context of which the file handle was opened. - :type session_id: str - :param client_ip: Required. Client IP that opened the handle. - :type client_ip: str - :param open_time: Required. Time when the session that previously opened the handle has last - been reconnected. (UTC). - :type open_time: ~datetime.datetime - :param last_reconnect_time: Time handle was last connected to (UTC). - :type last_reconnect_time: ~datetime.datetime - """ - - _validation = { - 'handle_id': {'required': True}, - 'path': {'required': True}, - 'file_id': {'required': True}, - 'session_id': {'required': True}, - 'client_ip': {'required': True}, - 'open_time': {'required': True}, - } - - _attribute_map = { - 'handle_id': {'key': 'HandleId', 'type': 'str'}, - 'path': {'key': 'Path', 'type': 'str'}, - 'file_id': {'key': 'FileId', 'type': 'str'}, - 'parent_id': {'key': 'ParentId', 'type': 'str'}, - 'session_id': {'key': 'SessionId', 'type': 'str'}, - 'client_ip': {'key': 'ClientIp', 'type': 'str'}, - 'open_time': {'key': 'OpenTime', 'type': 'rfc-1123'}, - 'last_reconnect_time': {'key': 'LastReconnectTime', 'type': 'rfc-1123'}, - } - _xml_map = { - 'name': 'Handle' - } - - def __init__( - self, - **kwargs - ): - super(HandleItem, self).__init__(**kwargs) - self.handle_id = kwargs['handle_id'] - self.path = kwargs['path'] - self.file_id = kwargs['file_id'] - self.parent_id = kwargs.get('parent_id', None) - self.session_id = kwargs['session_id'] - self.client_ip = kwargs['client_ip'] - self.open_time = kwargs['open_time'] - self.last_reconnect_time = kwargs.get('last_reconnect_time', None) - - -class LeaseAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param lease_id: If specified, the operation only succeeds if the resource's lease is active - and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': 'leaseId', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = kwargs.get('lease_id', None) - - -class ListFilesAndDirectoriesSegmentResponse(msrest.serialization.Model): - """An enumeration of directories and files. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param share_name: Required. - :type share_name: str - :param share_snapshot: - :type share_snapshot: str - :param directory_path: Required. - :type directory_path: str - :param prefix: Required. - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param segment: Required. Abstract for entries that can be listed from Directory. - :type segment: ~azure.storage.fileshare.models.FilesAndDirectoriesListSegment - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'share_name': {'required': True}, - 'directory_path': {'required': True}, - 'prefix': {'required': True}, - 'segment': {'required': True}, - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'share_name': {'key': 'ShareName', 'type': 'str', 'xml': {'attr': True}}, - 'share_snapshot': {'key': 'ShareSnapshot', 'type': 'str', 'xml': {'attr': True}}, - 'directory_path': {'key': 'DirectoryPath', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'segment': {'key': 'Segment', 'type': 'FilesAndDirectoriesListSegment'}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - **kwargs - ): - super(ListFilesAndDirectoriesSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs['service_endpoint'] - self.share_name = kwargs['share_name'] - self.share_snapshot = kwargs.get('share_snapshot', None) - self.directory_path = kwargs['directory_path'] - self.prefix = kwargs['prefix'] - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.segment = kwargs['segment'] - self.next_marker = kwargs['next_marker'] - - -class ListHandlesResponse(msrest.serialization.Model): - """An enumeration of handles. - - All required parameters must be populated in order to send to Azure. - - :param handle_list: - :type handle_list: list[~azure.storage.fileshare.models.HandleItem] - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'handle_list': {'key': 'HandleList', 'type': '[HandleItem]', 'xml': {'name': 'Entries', 'wrapped': True, 'itemsName': 'Handle'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - **kwargs - ): - super(ListHandlesResponse, self).__init__(**kwargs) - self.handle_list = kwargs.get('handle_list', None) - self.next_marker = kwargs['next_marker'] - - -class ListSharesResponse(msrest.serialization.Model): - """An enumeration of shares. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param share_items: - :type share_items: list[~azure.storage.fileshare.models.ShareItemInternal] - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'share_items': {'key': 'ShareItems', 'type': '[ShareItemInternal]', 'xml': {'name': 'Shares', 'wrapped': True, 'itemsName': 'Share'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - **kwargs - ): - super(ListSharesResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs['service_endpoint'] - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.share_items = kwargs.get('share_items', None) - self.next_marker = kwargs['next_marker'] - - -class Metrics(msrest.serialization.Model): - """Storage Analytics metrics for file service. - - All required parameters must be populated in order to send to Azure. - - :param version: Required. The version of Storage Analytics to configure. - :type version: str - :param enabled: Required. Indicates whether metrics are enabled for the File service. - :type enabled: bool - :param include_apis: Indicates whether metrics should generate summary statistics for called - API operations. - :type include_apis: bool - :param retention_policy: The retention policy. - :type retention_policy: ~azure.storage.fileshare.models.RetentionPolicy - """ - - _validation = { - 'version': {'required': True}, - 'enabled': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str'}, - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool'}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, - } - - def __init__( - self, - **kwargs - ): - super(Metrics, self).__init__(**kwargs) - self.version = kwargs['version'] - self.enabled = kwargs['enabled'] - self.include_apis = kwargs.get('include_apis', None) - self.retention_policy = kwargs.get('retention_policy', None) - - -class RetentionPolicy(msrest.serialization.Model): - """The retention policy. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether a retention policy is enabled for the File service. - If false, metrics data is retained, and the user is responsible for deleting it. - :type enabled: bool - :param days: Indicates the number of days that metrics data should be retained. All data older - than this value will be deleted. Metrics data is deleted on a best-effort basis after the - retention period expires. - :type days: int - """ - - _validation = { - 'enabled': {'required': True}, - 'days': {'maximum': 365, 'minimum': 1}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'days': {'key': 'Days', 'type': 'int'}, - } - - def __init__( - self, - **kwargs - ): - super(RetentionPolicy, self).__init__(**kwargs) - self.enabled = kwargs['enabled'] - self.days = kwargs.get('days', None) - - -class ShareFileRangeList(msrest.serialization.Model): - """The list of file ranges. - - :param ranges: - :type ranges: list[~azure.storage.fileshare.models.FileRange] - :param clear_ranges: - :type clear_ranges: list[~azure.storage.fileshare.models.ClearRange] - """ - - _attribute_map = { - 'ranges': {'key': 'Ranges', 'type': '[FileRange]'}, - 'clear_ranges': {'key': 'ClearRanges', 'type': '[ClearRange]'}, - } - - def __init__( - self, - **kwargs - ): - super(ShareFileRangeList, self).__init__(**kwargs) - self.ranges = kwargs.get('ranges', None) - self.clear_ranges = kwargs.get('clear_ranges', None) - - -class ShareItemInternal(msrest.serialization.Model): - """A listed Azure Storage share item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param snapshot: - :type snapshot: str - :param deleted: - :type deleted: bool - :param version: - :type version: str - :param properties: Required. Properties of a share. - :type properties: ~azure.storage.fileshare.models.SharePropertiesInternal - :param metadata: Dictionary of :code:``. - :type metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'snapshot': {'key': 'Snapshot', 'type': 'str'}, - 'deleted': {'key': 'Deleted', 'type': 'bool'}, - 'version': {'key': 'Version', 'type': 'str'}, - 'properties': {'key': 'Properties', 'type': 'SharePropertiesInternal'}, - 'metadata': {'key': 'Metadata', 'type': '{str}'}, - } - _xml_map = { - 'name': 'Share' - } - - def __init__( - self, - **kwargs - ): - super(ShareItemInternal, self).__init__(**kwargs) - self.name = kwargs['name'] - self.snapshot = kwargs.get('snapshot', None) - self.deleted = kwargs.get('deleted', None) - self.version = kwargs.get('version', None) - self.properties = kwargs['properties'] - self.metadata = kwargs.get('metadata', None) - - -class SharePermission(msrest.serialization.Model): - """A permission (a security descriptor) at the share level. - - All required parameters must be populated in order to send to Azure. - - :param permission: Required. The permission in the Security Descriptor Definition Language - (SDDL). - :type permission: str - """ - - _validation = { - 'permission': {'required': True}, - } - - _attribute_map = { - 'permission': {'key': 'permission', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(SharePermission, self).__init__(**kwargs) - self.permission = kwargs['permission'] - - -class SharePropertiesInternal(msrest.serialization.Model): - """Properties of a share. - - All required parameters must be populated in order to send to Azure. - - :param last_modified: Required. - :type last_modified: ~datetime.datetime - :param etag: Required. - :type etag: str - :param quota: Required. - :type quota: int - :param provisioned_iops: - :type provisioned_iops: int - :param provisioned_ingress_m_bps: - :type provisioned_ingress_m_bps: int - :param provisioned_egress_m_bps: - :type provisioned_egress_m_bps: int - :param next_allowed_quota_downgrade_time: - :type next_allowed_quota_downgrade_time: ~datetime.datetime - :param deleted_time: - :type deleted_time: ~datetime.datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param access_tier: - :type access_tier: str - :param access_tier_change_time: - :type access_tier_change_time: ~datetime.datetime - :param access_tier_transition_state: - :type access_tier_transition_state: str - :param lease_status: The current lease status of the share. Possible values include: "locked", - "unlocked". - :type lease_status: str or ~azure.storage.fileshare.models.LeaseStatusType - :param lease_state: Lease state of the share. Possible values include: "available", "leased", - "expired", "breaking", "broken". - :type lease_state: str or ~azure.storage.fileshare.models.LeaseStateType - :param lease_duration: When a share is leased, specifies whether the lease is of infinite or - fixed duration. Possible values include: "infinite", "fixed". - :type lease_duration: str or ~azure.storage.fileshare.models.LeaseDurationType - :param enabled_protocols: - :type enabled_protocols: str - :param root_squash: Possible values include: "NoRootSquash", "RootSquash", "AllSquash". - :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - 'quota': {'required': True}, - } - - _attribute_map = { - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - 'quota': {'key': 'Quota', 'type': 'int'}, - 'provisioned_iops': {'key': 'ProvisionedIops', 'type': 'int'}, - 'provisioned_ingress_m_bps': {'key': 'ProvisionedIngressMBps', 'type': 'int'}, - 'provisioned_egress_m_bps': {'key': 'ProvisionedEgressMBps', 'type': 'int'}, - 'next_allowed_quota_downgrade_time': {'key': 'NextAllowedQuotaDowngradeTime', 'type': 'rfc-1123'}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, - 'access_tier': {'key': 'AccessTier', 'type': 'str'}, - 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'}, - 'access_tier_transition_state': {'key': 'AccessTierTransitionState', 'type': 'str'}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, - 'lease_state': {'key': 'LeaseState', 'type': 'str'}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, - 'enabled_protocols': {'key': 'EnabledProtocols', 'type': 'str'}, - 'root_squash': {'key': 'RootSquash', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(SharePropertiesInternal, self).__init__(**kwargs) - self.last_modified = kwargs['last_modified'] - self.etag = kwargs['etag'] - self.quota = kwargs['quota'] - self.provisioned_iops = kwargs.get('provisioned_iops', None) - self.provisioned_ingress_m_bps = kwargs.get('provisioned_ingress_m_bps', None) - self.provisioned_egress_m_bps = kwargs.get('provisioned_egress_m_bps', None) - self.next_allowed_quota_downgrade_time = kwargs.get('next_allowed_quota_downgrade_time', None) - self.deleted_time = kwargs.get('deleted_time', None) - self.remaining_retention_days = kwargs.get('remaining_retention_days', None) - self.access_tier = kwargs.get('access_tier', None) - self.access_tier_change_time = kwargs.get('access_tier_change_time', None) - self.access_tier_transition_state = kwargs.get('access_tier_transition_state', None) - self.lease_status = kwargs.get('lease_status', None) - self.lease_state = kwargs.get('lease_state', None) - self.lease_duration = kwargs.get('lease_duration', None) - self.enabled_protocols = kwargs.get('enabled_protocols', None) - self.root_squash = kwargs.get('root_squash', None) - - -class ShareProtocolSettings(msrest.serialization.Model): - """Protocol settings. - - :param smb: Settings for SMB protocol. - :type smb: ~azure.storage.fileshare.models.ShareSmbSettings - """ - - _attribute_map = { - 'smb': {'key': 'Smb', 'type': 'ShareSmbSettings', 'xml': {'name': 'SMB'}}, - } - - def __init__( - self, - **kwargs - ): - super(ShareProtocolSettings, self).__init__(**kwargs) - self.smb = kwargs.get('smb', None) - - -class ShareSmbSettings(msrest.serialization.Model): - """Settings for SMB protocol. - - :param multichannel: Settings for SMB Multichannel. - :type multichannel: ~azure.storage.fileshare.models.SmbMultichannel - """ - - _attribute_map = { - 'multichannel': {'key': 'Multichannel', 'type': 'SmbMultichannel'}, - } - - def __init__( - self, - **kwargs - ): - super(ShareSmbSettings, self).__init__(**kwargs) - self.multichannel = kwargs.get('multichannel', None) - - -class ShareStats(msrest.serialization.Model): - """Stats for the share. - - All required parameters must be populated in order to send to Azure. - - :param share_usage_bytes: Required. The approximate size of the data stored in bytes. Note that - this value may not include all recently created or recently resized files. - :type share_usage_bytes: int - """ - - _validation = { - 'share_usage_bytes': {'required': True}, - } - - _attribute_map = { - 'share_usage_bytes': {'key': 'ShareUsageBytes', 'type': 'int'}, - } - - def __init__( - self, - **kwargs - ): - super(ShareStats, self).__init__(**kwargs) - self.share_usage_bytes = kwargs['share_usage_bytes'] - - -class SignedIdentifier(msrest.serialization.Model): - """Signed identifier. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. A unique id. - :type id: str - :param access_policy: The access policy. - :type access_policy: ~azure.storage.fileshare.models.AccessPolicy - """ - - _validation = { - 'id': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'Id', 'type': 'str'}, - 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy'}, - } - - def __init__( - self, - **kwargs - ): - super(SignedIdentifier, self).__init__(**kwargs) - self.id = kwargs['id'] - self.access_policy = kwargs.get('access_policy', None) - - -class SmbMultichannel(msrest.serialization.Model): - """Settings for SMB multichannel. - - :param enabled: If SMB multichannel is enabled. - :type enabled: bool - """ - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - } - _xml_map = { - 'name': 'Multichannel' - } - - def __init__( - self, - **kwargs - ): - super(SmbMultichannel, self).__init__(**kwargs) - self.enabled = kwargs.get('enabled', None) - - -class SourceModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param source_if_match_crc64: Specify the crc64 value to operate only on range with a matching - crc64 checksum. - :type source_if_match_crc64: bytearray - :param source_if_none_match_crc64: Specify the crc64 value to operate only on range without a - matching crc64 checksum. - :type source_if_none_match_crc64: bytearray - """ - - _attribute_map = { - 'source_if_match_crc64': {'key': 'sourceIfMatchCrc64', 'type': 'bytearray'}, - 'source_if_none_match_crc64': {'key': 'sourceIfNoneMatchCrc64', 'type': 'bytearray'}, - } - - def __init__( - self, - **kwargs - ): - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_match_crc64 = kwargs.get('source_if_match_crc64', None) - self.source_if_none_match_crc64 = kwargs.get('source_if_none_match_crc64', None) - - -class StorageError(msrest.serialization.Model): - """StorageError. - - :param message: - :type message: str - """ - - _attribute_map = { - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(StorageError, self).__init__(**kwargs) - self.message = kwargs.get('message', None) - - -class StorageServiceProperties(msrest.serialization.Model): - """Storage service properties. - - :param hour_metrics: A summary of request statistics grouped by API in hourly aggregates for - files. - :type hour_metrics: ~azure.storage.fileshare.models.Metrics - :param minute_metrics: A summary of request statistics grouped by API in minute aggregates for - files. - :type minute_metrics: ~azure.storage.fileshare.models.Metrics - :param cors: The set of CORS rules. - :type cors: list[~azure.storage.fileshare.models.CorsRule] - :param protocol: Protocol settings. - :type protocol: ~azure.storage.fileshare.models.ShareProtocolSettings - """ - - _attribute_map = { - 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics'}, - 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics'}, - 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'wrapped': True}}, - 'protocol': {'key': 'Protocol', 'type': 'ShareProtocolSettings', 'xml': {'name': 'ProtocolSettings'}}, - } - - def __init__( - self, - **kwargs - ): - super(StorageServiceProperties, self).__init__(**kwargs) - self.hour_metrics = kwargs.get('hour_metrics', None) - self.minute_metrics = kwargs.get('minute_metrics', None) - self.cors = kwargs.get('cors', None) - self.protocol = kwargs.get('protocol', None) diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/models/_models_py3.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/models/_models_py3.py deleted file mode 100644 index 0a52ef9..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/models/_models_py3.py +++ /dev/null @@ -1,1187 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -import datetime -from typing import Dict, List, Optional, Union - -from azure.core.exceptions import HttpResponseError -import msrest.serialization - -from ._azure_file_storage_enums import * - - -class AccessPolicy(msrest.serialization.Model): - """An Access policy. - - :param start: The date-time the policy is active. - :type start: str - :param expiry: The date-time the policy expires. - :type expiry: str - :param permission: The permissions for the ACL policy. - :type permission: str - """ - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str'}, - 'expiry': {'key': 'Expiry', 'type': 'str'}, - 'permission': {'key': 'Permission', 'type': 'str'}, - } - - def __init__( - self, - *, - start: Optional[str] = None, - expiry: Optional[str] = None, - permission: Optional[str] = None, - **kwargs - ): - super(AccessPolicy, self).__init__(**kwargs) - self.start = start - self.expiry = expiry - self.permission = permission - - -class ClearRange(msrest.serialization.Model): - """ClearRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'ClearRange' - } - - def __init__( - self, - *, - start: int, - end: int, - **kwargs - ): - super(ClearRange, self).__init__(**kwargs) - self.start = start - self.end = end - - -class CopyFileSmbInfo(msrest.serialization.Model): - """Parameter group. - - :param file_permission_copy_mode: Specifies the option to copy file security descriptor from - source file or to set it using the value which is defined by the header value of x-ms-file- - permission or x-ms-file-permission-key. Possible values include: "source", "override". - :type file_permission_copy_mode: str or ~azure.storage.fileshare.models.PermissionCopyModeType - :param ignore_read_only: Specifies the option to overwrite the target file if it already exists - and has read-only attribute set. - :type ignore_read_only: bool - :param file_attributes: Specifies either the option to copy file attributes from a source - file(source) to a target file or a list of attributes to set on a target file. - :type file_attributes: str - :param file_creation_time: Specifies either the option to copy file creation time from a source - file(source) to a target file or a time value in ISO 8601 format to set as creation time on a - target file. - :type file_creation_time: str - :param file_last_write_time: Specifies either the option to copy file last write time from a - source file(source) to a target file or a time value in ISO 8601 format to set as last write - time on a target file. - :type file_last_write_time: str - :param set_archive_attribute: Specifies the option to set archive attribute on a target file. - True means archive attribute will be set on a target file despite attribute overrides or a - source file state. - :type set_archive_attribute: bool - """ - - _attribute_map = { - 'file_permission_copy_mode': {'key': 'filePermissionCopyMode', 'type': 'str'}, - 'ignore_read_only': {'key': 'ignoreReadOnly', 'type': 'bool'}, - 'file_attributes': {'key': 'fileAttributes', 'type': 'str'}, - 'file_creation_time': {'key': 'fileCreationTime', 'type': 'str'}, - 'file_last_write_time': {'key': 'fileLastWriteTime', 'type': 'str'}, - 'set_archive_attribute': {'key': 'setArchiveAttribute', 'type': 'bool'}, - } - - def __init__( - self, - *, - file_permission_copy_mode: Optional[Union[str, "PermissionCopyModeType"]] = None, - ignore_read_only: Optional[bool] = None, - file_attributes: Optional[str] = None, - file_creation_time: Optional[str] = None, - file_last_write_time: Optional[str] = None, - set_archive_attribute: Optional[bool] = None, - **kwargs - ): - super(CopyFileSmbInfo, self).__init__(**kwargs) - self.file_permission_copy_mode = file_permission_copy_mode - self.ignore_read_only = ignore_read_only - self.file_attributes = file_attributes - self.file_creation_time = file_creation_time - self.file_last_write_time = file_last_write_time - self.set_archive_attribute = set_archive_attribute - - -class CorsRule(msrest.serialization.Model): - """CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param allowed_origins: Required. The origin domains that are permitted to make a request - against the storage service via CORS. The origin domain is the domain from which the request - originates. Note that the origin must be an exact case-sensitive match with the origin that the - user age sends to the service. You can also use the wildcard character '*' to allow all origin - domains to make requests via CORS. - :type allowed_origins: str - :param allowed_methods: Required. The methods (HTTP request verbs) that the origin domain may - use for a CORS request. (comma separated). - :type allowed_methods: str - :param allowed_headers: Required. The request headers that the origin domain may specify on the - CORS request. - :type allowed_headers: str - :param exposed_headers: Required. The response headers that may be sent in the response to the - CORS request and exposed by the browser to the request issuer. - :type exposed_headers: str - :param max_age_in_seconds: Required. The maximum amount time that a browser should cache the - preflight OPTIONS request. - :type max_age_in_seconds: int - """ - - _validation = { - 'allowed_origins': {'required': True}, - 'allowed_methods': {'required': True}, - 'allowed_headers': {'required': True}, - 'exposed_headers': {'required': True}, - 'max_age_in_seconds': {'required': True, 'minimum': 0}, - } - - _attribute_map = { - 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str'}, - 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str'}, - 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str'}, - 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str'}, - 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int'}, - } - - def __init__( - self, - *, - allowed_origins: str, - allowed_methods: str, - allowed_headers: str, - exposed_headers: str, - max_age_in_seconds: int, - **kwargs - ): - super(CorsRule, self).__init__(**kwargs) - self.allowed_origins = allowed_origins - self.allowed_methods = allowed_methods - self.allowed_headers = allowed_headers - self.exposed_headers = exposed_headers - self.max_age_in_seconds = max_age_in_seconds - - -class DirectoryItem(msrest.serialization.Model): - """A listed directory item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - } - _xml_map = { - 'name': 'Directory' - } - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(DirectoryItem, self).__init__(**kwargs) - self.name = name - - -class FileHTTPHeaders(msrest.serialization.Model): - """Parameter group. - - :param file_content_type: Sets the MIME content type of the file. The default type is - 'application/octet-stream'. - :type file_content_type: str - :param file_content_encoding: Specifies which content encodings have been applied to the file. - :type file_content_encoding: str - :param file_content_language: Specifies the natural languages used by this resource. - :type file_content_language: str - :param file_cache_control: Sets the file's cache control. The File service stores this value - but does not use or modify it. - :type file_cache_control: str - :param file_content_md5: Sets the file's MD5 hash. - :type file_content_md5: bytearray - :param file_content_disposition: Sets the file's Content-Disposition header. - :type file_content_disposition: str - """ - - _attribute_map = { - 'file_content_type': {'key': 'fileContentType', 'type': 'str'}, - 'file_content_encoding': {'key': 'fileContentEncoding', 'type': 'str'}, - 'file_content_language': {'key': 'fileContentLanguage', 'type': 'str'}, - 'file_cache_control': {'key': 'fileCacheControl', 'type': 'str'}, - 'file_content_md5': {'key': 'fileContentMD5', 'type': 'bytearray'}, - 'file_content_disposition': {'key': 'fileContentDisposition', 'type': 'str'}, - } - - def __init__( - self, - *, - file_content_type: Optional[str] = None, - file_content_encoding: Optional[str] = None, - file_content_language: Optional[str] = None, - file_cache_control: Optional[str] = None, - file_content_md5: Optional[bytearray] = None, - file_content_disposition: Optional[str] = None, - **kwargs - ): - super(FileHTTPHeaders, self).__init__(**kwargs) - self.file_content_type = file_content_type - self.file_content_encoding = file_content_encoding - self.file_content_language = file_content_language - self.file_cache_control = file_cache_control - self.file_content_md5 = file_content_md5 - self.file_content_disposition = file_content_disposition - - -class FileItem(msrest.serialization.Model): - """A listed file item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param properties: Required. File properties. - :type properties: ~azure.storage.fileshare.models.FileProperty - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'properties': {'key': 'Properties', 'type': 'FileProperty'}, - } - _xml_map = { - 'name': 'File' - } - - def __init__( - self, - *, - name: str, - properties: "FileProperty", - **kwargs - ): - super(FileItem, self).__init__(**kwargs) - self.name = name - self.properties = properties - - -class FileProperty(msrest.serialization.Model): - """File properties. - - All required parameters must be populated in order to send to Azure. - - :param content_length: Required. Content length of the file. This value may not be up-to-date - since an SMB client may have modified the file locally. The value of Content-Length may not - reflect that fact until the handle is closed or the op-lock is broken. To retrieve current - property values, call Get File Properties. - :type content_length: long - """ - - _validation = { - 'content_length': {'required': True}, - } - - _attribute_map = { - 'content_length': {'key': 'Content-Length', 'type': 'long'}, - } - - def __init__( - self, - *, - content_length: int, - **kwargs - ): - super(FileProperty, self).__init__(**kwargs) - self.content_length = content_length - - -class FileRange(msrest.serialization.Model): - """An Azure Storage file range. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. Start of the range. - :type start: long - :param end: Required. End of the range. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long'}, - 'end': {'key': 'End', 'type': 'long'}, - } - _xml_map = { - 'name': 'Range' - } - - def __init__( - self, - *, - start: int, - end: int, - **kwargs - ): - super(FileRange, self).__init__(**kwargs) - self.start = start - self.end = end - - -class FilesAndDirectoriesListSegment(msrest.serialization.Model): - """Abstract for entries that can be listed from Directory. - - All required parameters must be populated in order to send to Azure. - - :param directory_items: Required. - :type directory_items: list[~azure.storage.fileshare.models.DirectoryItem] - :param file_items: Required. - :type file_items: list[~azure.storage.fileshare.models.FileItem] - """ - - _validation = { - 'directory_items': {'required': True}, - 'file_items': {'required': True}, - } - - _attribute_map = { - 'directory_items': {'key': 'DirectoryItems', 'type': '[DirectoryItem]'}, - 'file_items': {'key': 'FileItems', 'type': '[FileItem]'}, - } - _xml_map = { - 'name': 'Entries' - } - - def __init__( - self, - *, - directory_items: List["DirectoryItem"], - file_items: List["FileItem"], - **kwargs - ): - super(FilesAndDirectoriesListSegment, self).__init__(**kwargs) - self.directory_items = directory_items - self.file_items = file_items - - -class HandleItem(msrest.serialization.Model): - """A listed Azure Storage handle item. - - All required parameters must be populated in order to send to Azure. - - :param handle_id: Required. XSMB service handle ID. - :type handle_id: str - :param path: Required. File or directory name including full path starting from share root. - :type path: str - :param file_id: Required. FileId uniquely identifies the file or directory. - :type file_id: str - :param parent_id: ParentId uniquely identifies the parent directory of the object. - :type parent_id: str - :param session_id: Required. SMB session ID in context of which the file handle was opened. - :type session_id: str - :param client_ip: Required. Client IP that opened the handle. - :type client_ip: str - :param open_time: Required. Time when the session that previously opened the handle has last - been reconnected. (UTC). - :type open_time: ~datetime.datetime - :param last_reconnect_time: Time handle was last connected to (UTC). - :type last_reconnect_time: ~datetime.datetime - """ - - _validation = { - 'handle_id': {'required': True}, - 'path': {'required': True}, - 'file_id': {'required': True}, - 'session_id': {'required': True}, - 'client_ip': {'required': True}, - 'open_time': {'required': True}, - } - - _attribute_map = { - 'handle_id': {'key': 'HandleId', 'type': 'str'}, - 'path': {'key': 'Path', 'type': 'str'}, - 'file_id': {'key': 'FileId', 'type': 'str'}, - 'parent_id': {'key': 'ParentId', 'type': 'str'}, - 'session_id': {'key': 'SessionId', 'type': 'str'}, - 'client_ip': {'key': 'ClientIp', 'type': 'str'}, - 'open_time': {'key': 'OpenTime', 'type': 'rfc-1123'}, - 'last_reconnect_time': {'key': 'LastReconnectTime', 'type': 'rfc-1123'}, - } - _xml_map = { - 'name': 'Handle' - } - - def __init__( - self, - *, - handle_id: str, - path: str, - file_id: str, - session_id: str, - client_ip: str, - open_time: datetime.datetime, - parent_id: Optional[str] = None, - last_reconnect_time: Optional[datetime.datetime] = None, - **kwargs - ): - super(HandleItem, self).__init__(**kwargs) - self.handle_id = handle_id - self.path = path - self.file_id = file_id - self.parent_id = parent_id - self.session_id = session_id - self.client_ip = client_ip - self.open_time = open_time - self.last_reconnect_time = last_reconnect_time - - -class LeaseAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param lease_id: If specified, the operation only succeeds if the resource's lease is active - and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': 'leaseId', 'type': 'str'}, - } - - def __init__( - self, - *, - lease_id: Optional[str] = None, - **kwargs - ): - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = lease_id - - -class ListFilesAndDirectoriesSegmentResponse(msrest.serialization.Model): - """An enumeration of directories and files. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param share_name: Required. - :type share_name: str - :param share_snapshot: - :type share_snapshot: str - :param directory_path: Required. - :type directory_path: str - :param prefix: Required. - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param segment: Required. Abstract for entries that can be listed from Directory. - :type segment: ~azure.storage.fileshare.models.FilesAndDirectoriesListSegment - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'share_name': {'required': True}, - 'directory_path': {'required': True}, - 'prefix': {'required': True}, - 'segment': {'required': True}, - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'share_name': {'key': 'ShareName', 'type': 'str', 'xml': {'attr': True}}, - 'share_snapshot': {'key': 'ShareSnapshot', 'type': 'str', 'xml': {'attr': True}}, - 'directory_path': {'key': 'DirectoryPath', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'segment': {'key': 'Segment', 'type': 'FilesAndDirectoriesListSegment'}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - *, - service_endpoint: str, - share_name: str, - directory_path: str, - prefix: str, - segment: "FilesAndDirectoriesListSegment", - next_marker: str, - share_snapshot: Optional[str] = None, - marker: Optional[str] = None, - max_results: Optional[int] = None, - **kwargs - ): - super(ListFilesAndDirectoriesSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.share_name = share_name - self.share_snapshot = share_snapshot - self.directory_path = directory_path - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.segment = segment - self.next_marker = next_marker - - -class ListHandlesResponse(msrest.serialization.Model): - """An enumeration of handles. - - All required parameters must be populated in order to send to Azure. - - :param handle_list: - :type handle_list: list[~azure.storage.fileshare.models.HandleItem] - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'handle_list': {'key': 'HandleList', 'type': '[HandleItem]', 'xml': {'name': 'Entries', 'wrapped': True, 'itemsName': 'Handle'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - *, - next_marker: str, - handle_list: Optional[List["HandleItem"]] = None, - **kwargs - ): - super(ListHandlesResponse, self).__init__(**kwargs) - self.handle_list = handle_list - self.next_marker = next_marker - - -class ListSharesResponse(msrest.serialization.Model): - """An enumeration of shares. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param share_items: - :type share_items: list[~azure.storage.fileshare.models.ShareItemInternal] - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'share_items': {'key': 'ShareItems', 'type': '[ShareItemInternal]', 'xml': {'name': 'Shares', 'wrapped': True, 'itemsName': 'Share'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - *, - service_endpoint: str, - next_marker: str, - prefix: Optional[str] = None, - marker: Optional[str] = None, - max_results: Optional[int] = None, - share_items: Optional[List["ShareItemInternal"]] = None, - **kwargs - ): - super(ListSharesResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.share_items = share_items - self.next_marker = next_marker - - -class Metrics(msrest.serialization.Model): - """Storage Analytics metrics for file service. - - All required parameters must be populated in order to send to Azure. - - :param version: Required. The version of Storage Analytics to configure. - :type version: str - :param enabled: Required. Indicates whether metrics are enabled for the File service. - :type enabled: bool - :param include_apis: Indicates whether metrics should generate summary statistics for called - API operations. - :type include_apis: bool - :param retention_policy: The retention policy. - :type retention_policy: ~azure.storage.fileshare.models.RetentionPolicy - """ - - _validation = { - 'version': {'required': True}, - 'enabled': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str'}, - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool'}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, - } - - def __init__( - self, - *, - version: str, - enabled: bool, - include_apis: Optional[bool] = None, - retention_policy: Optional["RetentionPolicy"] = None, - **kwargs - ): - super(Metrics, self).__init__(**kwargs) - self.version = version - self.enabled = enabled - self.include_apis = include_apis - self.retention_policy = retention_policy - - -class RetentionPolicy(msrest.serialization.Model): - """The retention policy. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether a retention policy is enabled for the File service. - If false, metrics data is retained, and the user is responsible for deleting it. - :type enabled: bool - :param days: Indicates the number of days that metrics data should be retained. All data older - than this value will be deleted. Metrics data is deleted on a best-effort basis after the - retention period expires. - :type days: int - """ - - _validation = { - 'enabled': {'required': True}, - 'days': {'maximum': 365, 'minimum': 1}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'days': {'key': 'Days', 'type': 'int'}, - } - - def __init__( - self, - *, - enabled: bool, - days: Optional[int] = None, - **kwargs - ): - super(RetentionPolicy, self).__init__(**kwargs) - self.enabled = enabled - self.days = days - - -class ShareFileRangeList(msrest.serialization.Model): - """The list of file ranges. - - :param ranges: - :type ranges: list[~azure.storage.fileshare.models.FileRange] - :param clear_ranges: - :type clear_ranges: list[~azure.storage.fileshare.models.ClearRange] - """ - - _attribute_map = { - 'ranges': {'key': 'Ranges', 'type': '[FileRange]'}, - 'clear_ranges': {'key': 'ClearRanges', 'type': '[ClearRange]'}, - } - - def __init__( - self, - *, - ranges: Optional[List["FileRange"]] = None, - clear_ranges: Optional[List["ClearRange"]] = None, - **kwargs - ): - super(ShareFileRangeList, self).__init__(**kwargs) - self.ranges = ranges - self.clear_ranges = clear_ranges - - -class ShareItemInternal(msrest.serialization.Model): - """A listed Azure Storage share item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param snapshot: - :type snapshot: str - :param deleted: - :type deleted: bool - :param version: - :type version: str - :param properties: Required. Properties of a share. - :type properties: ~azure.storage.fileshare.models.SharePropertiesInternal - :param metadata: Dictionary of :code:``. - :type metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'snapshot': {'key': 'Snapshot', 'type': 'str'}, - 'deleted': {'key': 'Deleted', 'type': 'bool'}, - 'version': {'key': 'Version', 'type': 'str'}, - 'properties': {'key': 'Properties', 'type': 'SharePropertiesInternal'}, - 'metadata': {'key': 'Metadata', 'type': '{str}'}, - } - _xml_map = { - 'name': 'Share' - } - - def __init__( - self, - *, - name: str, - properties: "SharePropertiesInternal", - snapshot: Optional[str] = None, - deleted: Optional[bool] = None, - version: Optional[str] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs - ): - super(ShareItemInternal, self).__init__(**kwargs) - self.name = name - self.snapshot = snapshot - self.deleted = deleted - self.version = version - self.properties = properties - self.metadata = metadata - - -class SharePermission(msrest.serialization.Model): - """A permission (a security descriptor) at the share level. - - All required parameters must be populated in order to send to Azure. - - :param permission: Required. The permission in the Security Descriptor Definition Language - (SDDL). - :type permission: str - """ - - _validation = { - 'permission': {'required': True}, - } - - _attribute_map = { - 'permission': {'key': 'permission', 'type': 'str'}, - } - - def __init__( - self, - *, - permission: str, - **kwargs - ): - super(SharePermission, self).__init__(**kwargs) - self.permission = permission - - -class SharePropertiesInternal(msrest.serialization.Model): - """Properties of a share. - - All required parameters must be populated in order to send to Azure. - - :param last_modified: Required. - :type last_modified: ~datetime.datetime - :param etag: Required. - :type etag: str - :param quota: Required. - :type quota: int - :param provisioned_iops: - :type provisioned_iops: int - :param provisioned_ingress_m_bps: - :type provisioned_ingress_m_bps: int - :param provisioned_egress_m_bps: - :type provisioned_egress_m_bps: int - :param next_allowed_quota_downgrade_time: - :type next_allowed_quota_downgrade_time: ~datetime.datetime - :param deleted_time: - :type deleted_time: ~datetime.datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param access_tier: - :type access_tier: str - :param access_tier_change_time: - :type access_tier_change_time: ~datetime.datetime - :param access_tier_transition_state: - :type access_tier_transition_state: str - :param lease_status: The current lease status of the share. Possible values include: "locked", - "unlocked". - :type lease_status: str or ~azure.storage.fileshare.models.LeaseStatusType - :param lease_state: Lease state of the share. Possible values include: "available", "leased", - "expired", "breaking", "broken". - :type lease_state: str or ~azure.storage.fileshare.models.LeaseStateType - :param lease_duration: When a share is leased, specifies whether the lease is of infinite or - fixed duration. Possible values include: "infinite", "fixed". - :type lease_duration: str or ~azure.storage.fileshare.models.LeaseDurationType - :param enabled_protocols: - :type enabled_protocols: str - :param root_squash: Possible values include: "NoRootSquash", "RootSquash", "AllSquash". - :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - 'quota': {'required': True}, - } - - _attribute_map = { - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - 'quota': {'key': 'Quota', 'type': 'int'}, - 'provisioned_iops': {'key': 'ProvisionedIops', 'type': 'int'}, - 'provisioned_ingress_m_bps': {'key': 'ProvisionedIngressMBps', 'type': 'int'}, - 'provisioned_egress_m_bps': {'key': 'ProvisionedEgressMBps', 'type': 'int'}, - 'next_allowed_quota_downgrade_time': {'key': 'NextAllowedQuotaDowngradeTime', 'type': 'rfc-1123'}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, - 'access_tier': {'key': 'AccessTier', 'type': 'str'}, - 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'}, - 'access_tier_transition_state': {'key': 'AccessTierTransitionState', 'type': 'str'}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, - 'lease_state': {'key': 'LeaseState', 'type': 'str'}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, - 'enabled_protocols': {'key': 'EnabledProtocols', 'type': 'str'}, - 'root_squash': {'key': 'RootSquash', 'type': 'str'}, - } - - def __init__( - self, - *, - last_modified: datetime.datetime, - etag: str, - quota: int, - provisioned_iops: Optional[int] = None, - provisioned_ingress_m_bps: Optional[int] = None, - provisioned_egress_m_bps: Optional[int] = None, - next_allowed_quota_downgrade_time: Optional[datetime.datetime] = None, - deleted_time: Optional[datetime.datetime] = None, - remaining_retention_days: Optional[int] = None, - access_tier: Optional[str] = None, - access_tier_change_time: Optional[datetime.datetime] = None, - access_tier_transition_state: Optional[str] = None, - lease_status: Optional[Union[str, "LeaseStatusType"]] = None, - lease_state: Optional[Union[str, "LeaseStateType"]] = None, - lease_duration: Optional[Union[str, "LeaseDurationType"]] = None, - enabled_protocols: Optional[str] = None, - root_squash: Optional[Union[str, "ShareRootSquash"]] = None, - **kwargs - ): - super(SharePropertiesInternal, self).__init__(**kwargs) - self.last_modified = last_modified - self.etag = etag - self.quota = quota - self.provisioned_iops = provisioned_iops - self.provisioned_ingress_m_bps = provisioned_ingress_m_bps - self.provisioned_egress_m_bps = provisioned_egress_m_bps - self.next_allowed_quota_downgrade_time = next_allowed_quota_downgrade_time - self.deleted_time = deleted_time - self.remaining_retention_days = remaining_retention_days - self.access_tier = access_tier - self.access_tier_change_time = access_tier_change_time - self.access_tier_transition_state = access_tier_transition_state - self.lease_status = lease_status - self.lease_state = lease_state - self.lease_duration = lease_duration - self.enabled_protocols = enabled_protocols - self.root_squash = root_squash - - -class ShareProtocolSettings(msrest.serialization.Model): - """Protocol settings. - - :param smb: Settings for SMB protocol. - :type smb: ~azure.storage.fileshare.models.ShareSmbSettings - """ - - _attribute_map = { - 'smb': {'key': 'Smb', 'type': 'ShareSmbSettings', 'xml': {'name': 'SMB'}}, - } - - def __init__( - self, - *, - smb: Optional["ShareSmbSettings"] = None, - **kwargs - ): - super(ShareProtocolSettings, self).__init__(**kwargs) - self.smb = smb - - -class ShareSmbSettings(msrest.serialization.Model): - """Settings for SMB protocol. - - :param multichannel: Settings for SMB Multichannel. - :type multichannel: ~azure.storage.fileshare.models.SmbMultichannel - """ - - _attribute_map = { - 'multichannel': {'key': 'Multichannel', 'type': 'SmbMultichannel'}, - } - - def __init__( - self, - *, - multichannel: Optional["SmbMultichannel"] = None, - **kwargs - ): - super(ShareSmbSettings, self).__init__(**kwargs) - self.multichannel = multichannel - - -class ShareStats(msrest.serialization.Model): - """Stats for the share. - - All required parameters must be populated in order to send to Azure. - - :param share_usage_bytes: Required. The approximate size of the data stored in bytes. Note that - this value may not include all recently created or recently resized files. - :type share_usage_bytes: int - """ - - _validation = { - 'share_usage_bytes': {'required': True}, - } - - _attribute_map = { - 'share_usage_bytes': {'key': 'ShareUsageBytes', 'type': 'int'}, - } - - def __init__( - self, - *, - share_usage_bytes: int, - **kwargs - ): - super(ShareStats, self).__init__(**kwargs) - self.share_usage_bytes = share_usage_bytes - - -class SignedIdentifier(msrest.serialization.Model): - """Signed identifier. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. A unique id. - :type id: str - :param access_policy: The access policy. - :type access_policy: ~azure.storage.fileshare.models.AccessPolicy - """ - - _validation = { - 'id': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'Id', 'type': 'str'}, - 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy'}, - } - - def __init__( - self, - *, - id: str, - access_policy: Optional["AccessPolicy"] = None, - **kwargs - ): - super(SignedIdentifier, self).__init__(**kwargs) - self.id = id - self.access_policy = access_policy - - -class SmbMultichannel(msrest.serialization.Model): - """Settings for SMB multichannel. - - :param enabled: If SMB multichannel is enabled. - :type enabled: bool - """ - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - } - _xml_map = { - 'name': 'Multichannel' - } - - def __init__( - self, - *, - enabled: Optional[bool] = None, - **kwargs - ): - super(SmbMultichannel, self).__init__(**kwargs) - self.enabled = enabled - - -class SourceModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param source_if_match_crc64: Specify the crc64 value to operate only on range with a matching - crc64 checksum. - :type source_if_match_crc64: bytearray - :param source_if_none_match_crc64: Specify the crc64 value to operate only on range without a - matching crc64 checksum. - :type source_if_none_match_crc64: bytearray - """ - - _attribute_map = { - 'source_if_match_crc64': {'key': 'sourceIfMatchCrc64', 'type': 'bytearray'}, - 'source_if_none_match_crc64': {'key': 'sourceIfNoneMatchCrc64', 'type': 'bytearray'}, - } - - def __init__( - self, - *, - source_if_match_crc64: Optional[bytearray] = None, - source_if_none_match_crc64: Optional[bytearray] = None, - **kwargs - ): - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_match_crc64 = source_if_match_crc64 - self.source_if_none_match_crc64 = source_if_none_match_crc64 - - -class StorageError(msrest.serialization.Model): - """StorageError. - - :param message: - :type message: str - """ - - _attribute_map = { - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__( - self, - *, - message: Optional[str] = None, - **kwargs - ): - super(StorageError, self).__init__(**kwargs) - self.message = message - - -class StorageServiceProperties(msrest.serialization.Model): - """Storage service properties. - - :param hour_metrics: A summary of request statistics grouped by API in hourly aggregates for - files. - :type hour_metrics: ~azure.storage.fileshare.models.Metrics - :param minute_metrics: A summary of request statistics grouped by API in minute aggregates for - files. - :type minute_metrics: ~azure.storage.fileshare.models.Metrics - :param cors: The set of CORS rules. - :type cors: list[~azure.storage.fileshare.models.CorsRule] - :param protocol: Protocol settings. - :type protocol: ~azure.storage.fileshare.models.ShareProtocolSettings - """ - - _attribute_map = { - 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics'}, - 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics'}, - 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'wrapped': True}}, - 'protocol': {'key': 'Protocol', 'type': 'ShareProtocolSettings', 'xml': {'name': 'ProtocolSettings'}}, - } - - def __init__( - self, - *, - hour_metrics: Optional["Metrics"] = None, - minute_metrics: Optional["Metrics"] = None, - cors: Optional[List["CorsRule"]] = None, - protocol: Optional["ShareProtocolSettings"] = None, - **kwargs - ): - super(StorageServiceProperties, self).__init__(**kwargs) - self.hour_metrics = hour_metrics - self.minute_metrics = minute_metrics - self.cors = cors - self.protocol = protocol diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/operations/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/operations/__init__.py deleted file mode 100644 index ba8fb22..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/operations/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._share_operations import ShareOperations -from ._directory_operations import DirectoryOperations -from ._file_operations import FileOperations - -__all__ = [ - 'ServiceOperations', - 'ShareOperations', - 'DirectoryOperations', - 'FileOperations', -] diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/operations/_directory_operations.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/operations/_directory_operations.py deleted file mode 100644 index 20dd7c0..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/operations/_directory_operations.py +++ /dev/null @@ -1,751 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, Optional, TypeVar - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class DirectoryOperations(object): - """DirectoryOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.fileshare.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - file_permission="inherit", # type: Optional[str] - file_permission_key=None, # type: Optional[str] - file_attributes="none", # type: str - file_creation_time="now", # type: str - file_last_write_time="now", # type: str - **kwargs # type: Any - ): - # type: (...) -> None - """Creates a new directory under the specified share or parent directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else x-ms-file- - permission-key header shall be used. Default value: Inherit. If SDDL is specified as input, it - must have owner, group and dacl. Note: Only one of the x-ms-file-permission or x-ms-file- - permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_attributes: If specified, the provided file attributes shall be set. Default value: - ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. Default value: Now. - :type file_last_write_time: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - def get_properties( - self, - sharesnapshot=None, # type: Optional[str] - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Returns all system properties for the specified directory, and can also be used to check the - existence of a directory. The data returned does not include the files in the directory or any - subdirectories. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - def delete( - self, - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Removes the specified empty directory. Note that the directory must be empty before it can be - deleted. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - def set_properties( - self, - timeout=None, # type: Optional[int] - file_permission="inherit", # type: Optional[str] - file_permission_key=None, # type: Optional[str] - file_attributes="none", # type: str - file_creation_time="now", # type: str - file_last_write_time="now", # type: str - **kwargs # type: Any - ): - # type: (...) -> None - """Sets properties on the directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else x-ms-file- - permission-key header shall be used. Default value: Inherit. If SDDL is specified as input, it - must have owner, group and dacl. Note: Only one of the x-ms-file-permission or x-ms-file- - permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_attributes: If specified, the provided file attributes shall be set. Default value: - ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. Default value: Now. - :type file_last_write_time: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - def set_metadata( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Updates user defined metadata for the specified directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - def list_files_and_directories_segment( - self, - prefix=None, # type: Optional[str] - sharesnapshot=None, # type: Optional[str] - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ListFilesAndDirectoriesSegmentResponse" - """Returns a list of files or directories under the specified share or directory. It lists the - contents only for a single level of the directory hierarchy. - - :param prefix: Filters the results to return only entries whose name begins with the specified - prefix. - :type prefix: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. If the request does not - specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 - items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListFilesAndDirectoriesSegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListFilesAndDirectoriesSegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListFilesAndDirectoriesSegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_files_and_directories_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListFilesAndDirectoriesSegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_files_and_directories_segment.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - def list_handles( - self, - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - timeout=None, # type: Optional[int] - sharesnapshot=None, # type: Optional[str] - recursive=None, # type: Optional[bool] - **kwargs # type: Any - ): - # type: (...) -> "_models.ListHandlesResponse" - """Lists handles for directory. - - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. If the request does not - specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 - items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param recursive: Specifies operation should apply to the directory specified in the URI, its - files, its subdirectories and their files. - :type recursive: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListHandlesResponse, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListHandlesResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListHandlesResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "listhandles" - accept = "application/xml" - - # Construct URL - url = self.list_handles.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if recursive is not None: - header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListHandlesResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_handles.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - def force_close_handles( - self, - handle_id, # type: str - timeout=None, # type: Optional[int] - marker=None, # type: Optional[str] - sharesnapshot=None, # type: Optional[str] - recursive=None, # type: Optional[bool] - **kwargs # type: Any - ): - # type: (...) -> None - """Closes all handles open for given directory. - - :param handle_id: Specifies handle ID opened on the file or directory to be closed. Asterisk - (‘*’) is a wildcard that specifies all handles. - :type handle_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param recursive: Specifies operation should apply to the directory specified in the URI, its - files, its subdirectories and their files. - :type recursive: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "forceclosehandles" - accept = "application/xml" - - # Construct URL - url = self.force_close_handles.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') - if recursive is not None: - header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-marker']=self._deserialize('str', response.headers.get('x-ms-marker')) - response_headers['x-ms-number-of-handles-closed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')) - response_headers['x-ms-number-of-handles-failed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')) - - if cls: - return cls(pipeline_response, None, response_headers) - - force_close_handles.metadata = {'url': '/{shareName}/{directory}'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/operations/_file_operations.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/operations/_file_operations.py deleted file mode 100644 index 8c37bb7..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/operations/_file_operations.py +++ /dev/null @@ -1,1791 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class FileOperations(object): - """FileOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.fileshare.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - file_content_length, # type: int - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - file_permission="inherit", # type: Optional[str] - file_permission_key=None, # type: Optional[str] - file_attributes="none", # type: str - file_creation_time="now", # type: str - file_last_write_time="now", # type: str - file_http_headers=None, # type: Optional["_models.FileHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Creates a new file or replaces a file. Note it only initializes the file with no content. - - :param file_content_length: Specifies the maximum size for the file, up to 4 TB. - :type file_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else x-ms-file- - permission-key header shall be used. Default value: Inherit. If SDDL is specified as input, it - must have owner, group and dacl. Note: Only one of the x-ms-file-permission or x-ms-file- - permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_attributes: If specified, the provided file attributes shall be set. Default value: - ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. Default value: Now. - :type file_last_write_time: str - :param file_http_headers: Parameter group. - :type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _file_content_type = None - _file_content_encoding = None - _file_content_language = None - _file_cache_control = None - _file_content_md5 = None - _file_content_disposition = None - _lease_id = None - if file_http_headers is not None: - _file_content_type = file_http_headers.file_content_type - _file_content_encoding = file_http_headers.file_content_encoding - _file_content_language = file_http_headers.file_content_language - _file_cache_control = file_http_headers.file_cache_control - _file_content_md5 = file_http_headers.file_content_md5 - _file_content_disposition = file_http_headers.file_content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - file_type_constant = "file" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') - header_parameters['x-ms-type'] = self._serialize.header("file_type_constant", file_type_constant, 'str') - if _file_content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", _file_content_type, 'str') - if _file_content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", _file_content_encoding, 'str') - if _file_content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", _file_content_language, 'str') - if _file_cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", _file_cache_control, 'str') - if _file_content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", _file_content_md5, 'bytearray') - if _file_content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", _file_content_disposition, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def download( - self, - timeout=None, # type: Optional[int] - range=None, # type: Optional[str] - range_get_content_md5=None, # type: Optional[bool] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> IO - """Reads or downloads a file from the system, including its metadata and properties. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param range: Return file data only from the specified byte range. - :type range: str - :param range_get_content_md5: When this header is set to true and specified together with the - Range header, the service returns the MD5 hash for the range, as long as the range is less than - or equal to 4 MB in size. - :type range_get_content_md5: bool - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - accept = "application/xml" - - # Construct URL - url = self.download.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-content-md5')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-content-md5')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - download.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def get_properties( - self, - sharesnapshot=None, # type: Optional[str] - timeout=None, # type: Optional[int] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Returns all user-defined metadata, standard HTTP properties, and system properties for the - file. It does not return the content of the file. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-type']=self._deserialize('str', response.headers.get('x-ms-type')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def delete( - self, - timeout=None, # type: Optional[int] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """removes the file from the storage account. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def set_http_headers( - self, - timeout=None, # type: Optional[int] - file_content_length=None, # type: Optional[int] - file_permission="inherit", # type: Optional[str] - file_permission_key=None, # type: Optional[str] - file_attributes="none", # type: str - file_creation_time="now", # type: str - file_last_write_time="now", # type: str - file_http_headers=None, # type: Optional["_models.FileHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Sets HTTP headers on the file. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param file_content_length: Resizes a file to the specified size. If the specified byte value - is less than the current size of the file, then all ranges above the specified byte value are - cleared. - :type file_content_length: long - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else x-ms-file- - permission-key header shall be used. Default value: Inherit. If SDDL is specified as input, it - must have owner, group and dacl. Note: Only one of the x-ms-file-permission or x-ms-file- - permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_attributes: If specified, the provided file attributes shall be set. Default value: - ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. Default value: Now. - :type file_last_write_time: str - :param file_http_headers: Parameter group. - :type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _file_content_type = None - _file_content_encoding = None - _file_content_language = None - _file_cache_control = None - _file_content_md5 = None - _file_content_disposition = None - _lease_id = None - if file_http_headers is not None: - _file_content_type = file_http_headers.file_content_type - _file_content_encoding = file_http_headers.file_content_encoding - _file_content_language = file_http_headers.file_content_language - _file_cache_control = file_http_headers.file_cache_control - _file_content_md5 = file_http_headers.file_content_md5 - _file_content_disposition = file_http_headers.file_content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.set_http_headers.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_content_length is not None: - header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') - if _file_content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", _file_content_type, 'str') - if _file_content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", _file_content_encoding, 'str') - if _file_content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", _file_content_language, 'str') - if _file_cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", _file_cache_control, 'str') - if _file_content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", _file_content_md5, 'bytearray') - if _file_content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", _file_content_disposition, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_http_headers.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def set_metadata( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Updates user-defined metadata for the specified file. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def acquire_lease( - self, - timeout=None, # type: Optional[int] - duration=None, # type: Optional[int] - proposed_lease_id=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease File operation establishes and manages a lock on a file for write and delete - operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a - lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease - duration cannot be changed using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "acquire" - accept = "application/xml" - - # Construct URL - url = self.acquire_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - acquire_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def release_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease File operation establishes and manages a lock on a file for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "release" - accept = "application/xml" - - # Construct URL - url = self.release_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - release_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def change_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - proposed_lease_id=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease File operation establishes and manages a lock on a file for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "change" - accept = "application/xml" - - # Construct URL - url = self.change_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - change_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def break_lease( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease File operation establishes and manages a lock on a file for write and delete - operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "lease" - action = "break" - accept = "application/xml" - - # Construct URL - url = self.break_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - break_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def upload_range( - self, - range, # type: str - content_length, # type: int - optionalbody, # type: IO - timeout=None, # type: Optional[int] - file_range_write="update", # type: Union[str, "_models.FileRangeWriteType"] - content_md5=None, # type: Optional[bytearray] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Upload a range of bytes to a file. - - :param range: Specifies the range of bytes to be written. Both the start and end of the range - must be specified. For an update operation, the range can be up to 4 MB in size. For a clear - operation, the range can be up to the value of the file's full size. The File service accepts - only a single byte range for the Range and 'x-ms-range' headers, and the byte range must be - specified in the following format: bytes=startByte-endByte. - :type range: str - :param content_length: Specifies the number of bytes being transmitted in the request body. - When the x-ms-write header is set to clear, the value of this header must be set to zero. - :type content_length: long - :param optionalbody: Initial data. - :type optionalbody: IO - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param file_range_write: Specify one of the following options: - Update: Writes the bytes - specified by the request body into the specified range. The Range and Content-Length headers - must match to perform the update. - Clear: Clears the specified range and releases the space - used in storage for that range. To clear a range, set the Content-Length header to zero, and - set the Range header to a value that indicates the range to clear, up to maximum file size. - :type file_range_write: str or ~azure.storage.fileshare.models.FileRangeWriteType - :param content_md5: An MD5 hash of the content. This hash is used to verify the integrity of - the data during transport. When the Content-MD5 header is specified, the File service compares - the hash of the content that has arrived with the header value that was sent. If the two hashes - do not match, the operation will fail with error code 400 (Bad Request). - :type content_md5: bytearray - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "range" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.upload_range.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-write'] = self._serialize.header("file_range_write", file_range_write, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("content_md5", content_md5, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = optionalbody - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload_range.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def upload_range_from_url( - self, - range, # type: str - copy_source, # type: str - content_length, # type: int - timeout=None, # type: Optional[int] - source_range=None, # type: Optional[str] - source_content_crc64=None, # type: Optional[bytearray] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Upload a range of bytes to a file where the contents are read from a URL. - - :param range: Writes data to the specified byte range in the file. - :type range: str - :param copy_source: Specifies the URL of the source file or blob, up to 2 KB in length. To copy - a file to another file within the same storage account, you may use Shared Key to authenticate - the source file. If you are copying a file from another storage account, or if you are copying - a blob from the same storage account or another storage account, then you must authenticate the - source file or blob using a shared access signature. If the source is a public blob, no - authentication is required to perform the copy operation. A file in a share snapshot can also - be specified as a copy source. - :type copy_source: str - :param content_length: Specifies the number of bytes being transmitted in the request body. - When the x-ms-write header is set to clear, the value of this header must be set to zero. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_crc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_content_crc64: bytearray - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.fileshare.models.SourceModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _source_if_match_crc64 = None - _source_if_none_match_crc64 = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if source_modified_access_conditions is not None: - _source_if_match_crc64 = source_modified_access_conditions.source_if_match_crc64 - _source_if_none_match_crc64 = source_modified_access_conditions.source_if_none_match_crc64 - comp = "range" - accept = "application/xml" - - # Construct URL - url = self.upload_range_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - header_parameters['x-ms-write'] = self._serialize.header("self._config.file_range_write_from_url", self._config.file_range_write_from_url, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if source_content_crc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_content_crc64", source_content_crc64, 'bytearray') - if _source_if_match_crc64 is not None: - header_parameters['x-ms-source-if-match-crc64'] = self._serialize.header("source_if_match_crc64", _source_if_match_crc64, 'bytearray') - if _source_if_none_match_crc64 is not None: - header_parameters['x-ms-source-if-none-match-crc64'] = self._serialize.header("source_if_none_match_crc64", _source_if_none_match_crc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload_range_from_url.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def get_range_list( - self, - sharesnapshot=None, # type: Optional[str] - prevsharesnapshot=None, # type: Optional[str] - timeout=None, # type: Optional[int] - range=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> "_models.ShareFileRangeList" - """Returns the list of valid ranges for a file. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param prevsharesnapshot: The previous snapshot parameter is an opaque DateTime value that, - when present, specifies the previous snapshot. - :type prevsharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param range: Specifies the range of bytes over which to list ranges, inclusively. - :type range: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ShareFileRangeList, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ShareFileRangeList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ShareFileRangeList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "rangelist" - accept = "application/xml" - - # Construct URL - url = self.get_range_list.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if prevsharesnapshot is not None: - query_parameters['prevsharesnapshot'] = self._serialize.query("prevsharesnapshot", prevsharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-content-length']=self._deserialize('long', response.headers.get('x-ms-content-length')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ShareFileRangeList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_range_list.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def start_copy( - self, - copy_source, # type: str - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - file_permission="inherit", # type: Optional[str] - file_permission_key=None, # type: Optional[str] - copy_file_smb_info=None, # type: Optional["_models.CopyFileSmbInfo"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Copies a blob or file to a destination file within the storage account. - - :param copy_source: Specifies the URL of the source file or blob, up to 2 KB in length. To copy - a file to another file within the same storage account, you may use Shared Key to authenticate - the source file. If you are copying a file from another storage account, or if you are copying - a blob from the same storage account or another storage account, then you must authenticate the - source file or blob using a shared access signature. If the source is a public blob, no - authentication is required to perform the copy operation. A file in a share snapshot can also - be specified as a copy source. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else x-ms-file- - permission-key header shall be used. Default value: Inherit. If SDDL is specified as input, it - must have owner, group and dacl. Note: Only one of the x-ms-file-permission or x-ms-file- - permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param copy_file_smb_info: Parameter group. - :type copy_file_smb_info: ~azure.storage.fileshare.models.CopyFileSmbInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _file_permission_copy_mode = None - _ignore_read_only = None - _file_attributes = None - _file_creation_time = None - _file_last_write_time = None - _set_archive_attribute = None - _lease_id = None - if copy_file_smb_info is not None: - _file_permission_copy_mode = copy_file_smb_info.file_permission_copy_mode - _ignore_read_only = copy_file_smb_info.ignore_read_only - _file_attributes = copy_file_smb_info.file_attributes - _file_creation_time = copy_file_smb_info.file_creation_time - _file_last_write_time = copy_file_smb_info.file_last_write_time - _set_archive_attribute = copy_file_smb_info.set_archive_attribute - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - accept = "application/xml" - - # Construct URL - url = self.start_copy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - if _file_permission_copy_mode is not None: - header_parameters['x-ms-file-permission-copy-mode'] = self._serialize.header("file_permission_copy_mode", _file_permission_copy_mode, 'str') - if _ignore_read_only is not None: - header_parameters['x-ms-file-copy-ignore-read-only'] = self._serialize.header("ignore_read_only", _ignore_read_only, 'bool') - if _file_attributes is not None: - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", _file_attributes, 'str') - if _file_creation_time is not None: - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", _file_creation_time, 'str') - if _file_last_write_time is not None: - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", _file_last_write_time, 'str') - if _set_archive_attribute is not None: - header_parameters['x-ms-file-copy-set-archive'] = self._serialize.header("set_archive_attribute", _set_archive_attribute, 'bool') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - start_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def abort_copy( - self, - copy_id, # type: str - timeout=None, # type: Optional[int] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Aborts a pending Copy File operation, and leaves a destination file with zero length and full - metadata. - - :param copy_id: The copy identifier provided in the x-ms-copy-id header of the original Copy - File operation. - :type copy_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "copy" - copy_action_abort_constant = "abort" - accept = "application/xml" - - # Construct URL - url = self.abort_copy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-copy-action'] = self._serialize.header("copy_action_abort_constant", copy_action_abort_constant, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - abort_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def list_handles( - self, - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - timeout=None, # type: Optional[int] - sharesnapshot=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.ListHandlesResponse" - """Lists handles for file. - - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. If the request does not - specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 - items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListHandlesResponse, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListHandlesResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListHandlesResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "listhandles" - accept = "application/xml" - - # Construct URL - url = self.list_handles.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListHandlesResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def force_close_handles( - self, - handle_id, # type: str - timeout=None, # type: Optional[int] - marker=None, # type: Optional[str] - sharesnapshot=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Closes all handles open for given file. - - :param handle_id: Specifies handle ID opened on the file or directory to be closed. Asterisk - (‘*’) is a wildcard that specifies all handles. - :type handle_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "forceclosehandles" - accept = "application/xml" - - # Construct URL - url = self.force_close_handles.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-marker']=self._deserialize('str', response.headers.get('x-ms-marker')) - response_headers['x-ms-number-of-handles-closed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')) - response_headers['x-ms-number-of-handles-failed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')) - - if cls: - return cls(pipeline_response, None, response_headers) - - force_close_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/operations/_service_operations.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/operations/_service_operations.py deleted file mode 100644 index 94f79e7..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/operations/_service_operations.py +++ /dev/null @@ -1,276 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class ServiceOperations(object): - """ServiceOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.fileshare.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def set_properties( - self, - storage_service_properties, # type: "_models.StorageServiceProperties" - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Sets properties for a storage account's File service endpoint, including properties for Storage - Analytics metrics and CORS (Cross-Origin Resource Sharing) rules. - - :param storage_service_properties: The StorageService properties. - :type storage_service_properties: ~azure.storage.fileshare.models.StorageServiceProperties - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "properties" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/'} # type: ignore - - def get_properties( - self, - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.StorageServiceProperties" - """Gets the properties of a storage account's File service, including properties for Storage - Analytics metrics and CORS (Cross-Origin Resource Sharing) rules. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: StorageServiceProperties, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.StorageServiceProperties - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceProperties"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('StorageServiceProperties', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_properties.metadata = {'url': '/'} # type: ignore - - def list_shares_segment( - self, - prefix=None, # type: Optional[str] - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - include=None, # type: Optional[List[Union[str, "_models.ListSharesIncludeType"]]] - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ListSharesResponse" - """The List Shares Segment operation returns a list of the shares and share snapshots under the - specified account. - - :param prefix: Filters the results to return only entries whose name begins with the specified - prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. If the request does not - specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 - items. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.fileshare.models.ListSharesIncludeType] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListSharesResponse, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListSharesResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListSharesResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_shares_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('ListSharesResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_shares_segment.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/operations/_share_operations.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/operations/_share_operations.py deleted file mode 100644 index 6f1ebed..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_generated/operations/_share_operations.py +++ /dev/null @@ -1,1506 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class ShareOperations(object): - """ShareOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.fileshare.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - quota=None, # type: Optional[int] - access_tier=None, # type: Optional[Union[str, "_models.ShareAccessTier"]] - enabled_protocols=None, # type: Optional[str] - root_squash=None, # type: Optional[Union[str, "_models.ShareRootSquash"]] - **kwargs # type: Any - ): - # type: (...) -> None - """Creates a new share under the specified account. If the share with the same name already - exists, the operation fails. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param quota: Specifies the maximum size of the share, in gigabytes. - :type quota: int - :param access_tier: Specifies the access tier of the share. - :type access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier - :param enabled_protocols: Protocols to enable on the share. - :type enabled_protocols: str - :param root_squash: Root squash to set on the share. Only valid for NFS shares. - :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if quota is not None: - header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) - if access_tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("access_tier", access_tier, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if enabled_protocols is not None: - header_parameters['x-ms-enabled-protocols'] = self._serialize.header("enabled_protocols", enabled_protocols, 'str') - if root_squash is not None: - header_parameters['x-ms-root-squash'] = self._serialize.header("root_squash", root_squash, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{shareName}'} # type: ignore - - def get_properties( - self, - sharesnapshot=None, # type: Optional[str] - timeout=None, # type: Optional[int] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Returns all user-defined metadata and system properties for the specified share or share - snapshot. The data returned does not include the share's list of files. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-share-quota']=self._deserialize('int', response.headers.get('x-ms-share-quota')) - response_headers['x-ms-share-provisioned-iops']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-iops')) - response_headers['x-ms-share-provisioned-ingress-mbps']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-ingress-mbps')) - response_headers['x-ms-share-provisioned-egress-mbps']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-egress-mbps')) - response_headers['x-ms-share-next-allowed-quota-downgrade-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-share-next-allowed-quota-downgrade-time')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-access-tier']=self._deserialize('str', response.headers.get('x-ms-access-tier')) - response_headers['x-ms-access-tier-change-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')) - response_headers['x-ms-access-tier-transition-state']=self._deserialize('str', response.headers.get('x-ms-access-tier-transition-state')) - response_headers['x-ms-enabled-protocols']=self._deserialize('str', response.headers.get('x-ms-enabled-protocols')) - response_headers['x-ms-root-squash']=self._deserialize('str', response.headers.get('x-ms-root-squash')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{shareName}'} # type: ignore - - def delete( - self, - sharesnapshot=None, # type: Optional[str] - timeout=None, # type: Optional[int] - delete_snapshots=None, # type: Optional[Union[str, "_models.DeleteSnapshotsOptionType"]] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Operation marks the specified share or share snapshot for deletion. The share or share snapshot - and any files contained within it are later deleted during garbage collection. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param delete_snapshots: Specifies the option include to delete the base share and all of its - snapshots. - :type delete_snapshots: str or ~azure.storage.fileshare.models.DeleteSnapshotsOptionType - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{shareName}'} # type: ignore - - def acquire_lease( - self, - timeout=None, # type: Optional[int] - duration=None, # type: Optional[int] - proposed_lease_id=None, # type: Optional[str] - sharesnapshot=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a - lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease - duration cannot be changed using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "acquire" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.acquire_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - acquire_lease.metadata = {'url': '/{shareName}'} # type: ignore - - def release_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - sharesnapshot=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "release" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.release_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - release_lease.metadata = {'url': '/{shareName}'} # type: ignore - - def change_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - proposed_lease_id=None, # type: Optional[str] - sharesnapshot=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "change" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.change_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - change_lease.metadata = {'url': '/{shareName}'} # type: ignore - - def renew_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - sharesnapshot=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "renew" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.renew_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - renew_lease.metadata = {'url': '/{shareName}'} # type: ignore - - def break_lease( - self, - timeout=None, # type: Optional[int] - break_period=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - sharesnapshot=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param break_period: For a break operation, proposed duration the lease should continue before - it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining on the lease is used. A new - lease will not be available before the break period has expired, but the lease may be held for - longer than the break period. If this header does not appear with a break operation, a fixed- - duration lease breaks after the remaining lease period elapses, and an infinite lease breaks - immediately. - :type break_period: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "lease" - action = "break" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.break_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - break_lease.metadata = {'url': '/{shareName}'} # type: ignore - - def create_snapshot( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Creates a read-only snapshot of a share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - comp = "snapshot" - accept = "application/xml" - - # Construct URL - url = self.create_snapshot.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-snapshot']=self._deserialize('str', response.headers.get('x-ms-snapshot')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create_snapshot.metadata = {'url': '/{shareName}'} # type: ignore - - def create_permission( - self, - share_permission, # type: "_models.SharePermission" - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Create a permission (a security descriptor). - - :param share_permission: A permission (a security descriptor) at the share level. - :type share_permission: ~azure.storage.fileshare.models.SharePermission - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - comp = "filepermission" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/xml" - - # Construct URL - url = self.create_permission.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(share_permission, 'SharePermission') - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create_permission.metadata = {'url': '/{shareName}'} # type: ignore - - def get_permission( - self, - file_permission_key, # type: str - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.SharePermission" - """Returns the permission (security descriptor) for a given key. - - :param file_permission_key: Key of the permission to be set for the directory/file. - :type file_permission_key: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SharePermission, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.SharePermission - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.SharePermission"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - comp = "filepermission" - accept = "application/json" - - # Construct URL - url = self.get_permission.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('SharePermission', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_permission.metadata = {'url': '/{shareName}'} # type: ignore - - def set_properties( - self, - timeout=None, # type: Optional[int] - quota=None, # type: Optional[int] - access_tier=None, # type: Optional[Union[str, "_models.ShareAccessTier"]] - root_squash=None, # type: Optional[Union[str, "_models.ShareRootSquash"]] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Sets properties for the specified share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param quota: Specifies the maximum size of the share, in gigabytes. - :type quota: int - :param access_tier: Specifies the access tier of the share. - :type access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier - :param root_squash: Root squash to set on the share. Only valid for NFS shares. - :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if quota is not None: - header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) - if access_tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("access_tier", access_tier, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if root_squash is not None: - header_parameters['x-ms-root-squash'] = self._serialize.header("root_squash", root_squash, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/{shareName}'} # type: ignore - - def set_metadata( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Sets one or more user-defined name-value pairs for the specified share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{shareName}'} # type: ignore - - def get_access_policy( - self, - timeout=None, # type: Optional[int] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> List["_models.SignedIdentifier"] - """Returns information about stored access policies specified on the share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of SignedIdentifier, or the result of cls(response) - :rtype: list[~azure.storage.fileshare.models.SignedIdentifier] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.SignedIdentifier"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "acl" - accept = "application/xml" - - # Construct URL - url = self.get_access_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('[SignedIdentifier]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_access_policy.metadata = {'url': '/{shareName}'} # type: ignore - - def set_access_policy( - self, - timeout=None, # type: Optional[int] - share_acl=None, # type: Optional[List["_models.SignedIdentifier"]] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Sets a stored access policy for use with shared access signatures. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param share_acl: The ACL for the share. - :type share_acl: list[~azure.storage.fileshare.models.SignedIdentifier] - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "acl" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_access_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'wrapped': True}} - if share_acl is not None: - body_content = self._serialize.body(share_acl, '[SignedIdentifier]', is_xml=True, serialization_ctxt=serialization_ctxt) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_policy.metadata = {'url': '/{shareName}'} # type: ignore - - def get_statistics( - self, - timeout=None, # type: Optional[int] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> "_models.ShareStats" - """Retrieves statistics related to the share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ShareStats, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ShareStats - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ShareStats"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "stats" - accept = "application/xml" - - # Construct URL - url = self.get_statistics.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ShareStats', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_statistics.metadata = {'url': '/{shareName}'} # type: ignore - - def restore( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - deleted_share_name=None, # type: Optional[str] - deleted_share_version=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Restores a previously deleted Share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting Timeouts for File Service - Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param deleted_share_name: Specifies the name of the preivously-deleted share. - :type deleted_share_name: str - :param deleted_share_version: Specifies the version of the preivously-deleted share. - :type deleted_share_version: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - comp = "undelete" - accept = "application/xml" - - # Construct URL - url = self.restore.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if deleted_share_name is not None: - header_parameters['x-ms-deleted-share-name'] = self._serialize.header("deleted_share_name", deleted_share_name, 'str') - if deleted_share_version is not None: - header_parameters['x-ms-deleted-share-version'] = self._serialize.header("deleted_share_version", deleted_share_version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - restore.metadata = {'url': '/{shareName}'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_lease.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_lease.py deleted file mode 100644 index 7c38145..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_lease.py +++ /dev/null @@ -1,237 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import uuid - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, TypeVar, TYPE_CHECKING -) - -from azure.core.tracing.decorator import distributed_trace -from azure.core.exceptions import HttpResponseError - -from ._shared.response_handlers import return_response_headers, process_storage_error -from ._generated.operations import FileOperations, ShareOperations - -if TYPE_CHECKING: - from datetime import datetime - ShareFileClient = TypeVar("ShareFileClient") - ShareClient = TypeVar("ShareClient") - - -class ShareLeaseClient(object): - """Creates a new ShareLeaseClient. - - This client provides lease operations on a ShareClient or ShareFileClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the file or share to lease. - :type client: ~azure.storage.fileshare.ShareFileClient or - ~azure.storage.fileshare.ShareClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - def __init__( - self, client, lease_id=None - ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs - # type: (Union[ShareFileClient, ShareClient], Optional[str]) -> None - self.id = lease_id or str(uuid.uuid4()) - self.last_modified = None - self.etag = None - if hasattr(client, 'file_name'): - self._client = client._client.file # type: ignore # pylint: disable=protected-access - self._snapshot = None - elif hasattr(client, 'share_name'): - self._client = client._client.share - self._snapshot = client.snapshot - else: - raise TypeError("Lease must use ShareFileClient or ShareClient.") - - def __enter__(self): - return self - - def __exit__(self, *args): - self.release() - - @distributed_trace - def acquire(self, **kwargs): - # type: (**Any) -> None - """Requests a new lease. This operation establishes and manages a lock on a - file or share for write and delete operations. If the file or share does not have an active lease, - the File or Share service creates a lease on the file or share. If the file has an active lease, - you can only request a new lease using the active lease ID. - - - If the file or share does not have an active lease, the File or Share service creates a - lease on the file and returns a new lease ID. - - :keyword int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. File leases never expire. A non-infinite share lease can be - between 15 and 60 seconds. A share lease duration cannot be changed - using renew or change. Default is -1 (infinite share lease). - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - try: - lease_duration = kwargs.pop('lease_duration', -1) - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - response = self._client.acquire_lease( - timeout=kwargs.pop('timeout', None), - duration=lease_duration, - proposed_lease_id=self.id, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - self.etag = response.get('etag') # type: str - - @distributed_trace - def renew(self, **kwargs): - # type: (Any) -> None - """Renews the share lease. - - The share lease can be renewed if the lease ID specified in the - lease client matches that associated with the share. Note that - the lease may be renewed even if it has expired as long as the share - has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - .. versionadded:: 12.6.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - if isinstance(self._client, FileOperations): - raise TypeError("Lease renewal operations are only valid for ShareClient.") - try: - response = self._client.renew_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - sharesnapshot=self._snapshot, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def release(self, **kwargs): - # type: (Any) -> None - """Releases the lease. The lease may be released if the lease ID specified on the request matches - that associated with the share or file. Releasing the lease allows another client to immediately acquire - the lease for the share or file as soon as the release is complete. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - try: - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - response = self._client.release_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """ Changes the lease ID of an active lease. A change must include the current lease ID in x-ms-lease-id and - a new lease ID in x-ms-proposed-lease-id. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The File or Share service will raise an error - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - try: - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - response = self._client.change_lease( - lease_id=self.id, - proposed_lease_id=proposed_lease_id, - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def break_lease(self, **kwargs): - # type: (Any) -> int - """Force breaks the lease if the file or share has an active lease. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. An infinite lease breaks immediately. - - Once a lease is broken, it cannot be changed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :keyword int lease_break_period: - This is the proposed duration of seconds that the share lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the share lease. If longer, the time remaining on the share lease is used. - A new share lease will not be available before the break period has - expired, but the share lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration share lease breaks after the remaining share lease - period elapses, and an infinite share lease breaks immediately. - - .. versionadded:: 12.6.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - try: - lease_break_period = kwargs.pop('lease_break_period', None) - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - if isinstance(self._client, ShareOperations): - kwargs['break_period'] = lease_break_period - if isinstance(self._client, FileOperations) and lease_break_period: - raise TypeError("Setting a lease break period is only applicable to Share leases.") - - response = self._client.break_lease( - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_models.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_models.py deleted file mode 100644 index ef93743..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_models.py +++ /dev/null @@ -1,993 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines - -from enum import Enum - -from azure.core.paging import PageIterator -from azure.core.exceptions import HttpResponseError -from ._parser import _parse_datetime_from_str -from ._shared.response_handlers import return_context_and_deserialized, process_storage_error -from ._shared.models import DictMixin, get_enum_value -from ._generated.models import Metrics as GeneratedMetrics -from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy -from ._generated.models import CorsRule as GeneratedCorsRule -from ._generated.models import ShareProtocolSettings as GeneratedShareProtocolSettings -from ._generated.models import ShareSmbSettings as GeneratedShareSmbSettings -from ._generated.models import SmbMultichannel as GeneratedSmbMultichannel -from ._generated.models import AccessPolicy as GenAccessPolicy -from ._generated.models import DirectoryItem - - -def _wrap_item(item): - if isinstance(item, DirectoryItem): - return {'name': item.name, 'is_directory': True} - return {'name': item.name, 'size': item.properties.content_length, 'is_directory': False} - - -class Metrics(GeneratedMetrics): - """A summary of request statistics grouped by API in hour or minute aggregates - for files. - - All required parameters must be populated in order to send to Azure. - - :keyword str version: The version of Storage Analytics to configure. - :keyword bool enabled: Required. Indicates whether metrics are enabled for the - File service. - :keyword bool include_ap_is: Indicates whether metrics should generate summary - statistics for called API operations. - :keyword ~azure.storage.fileshare.RetentionPolicy retention_policy: Determines how long the associated data should - persist. - """ - - def __init__(self, **kwargs): - self.version = kwargs.get('version', u'1.0') - self.enabled = kwargs.get('enabled', False) - self.include_apis = kwargs.get('include_apis') - self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - version=generated.version, - enabled=generated.enabled, - include_apis=generated.include_apis, - retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access - ) - - -class RetentionPolicy(GeneratedRetentionPolicy): - """The retention policy which determines how long the associated data should - persist. - - All required parameters must be populated in order to send to Azure. - - :param bool enabled: Required. Indicates whether a retention policy is enabled - for the storage service. - :param int days: Indicates the number of days that metrics or logging or - soft-deleted data should be retained. All data older than this value will - be deleted. - """ - - def __init__(self, enabled=False, days=None): - self.enabled = enabled - self.days = days - if self.enabled and (self.days is None): - raise ValueError("If policy is enabled, 'days' must be specified.") - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - enabled=generated.enabled, - days=generated.days, - ) - - -class CorsRule(GeneratedCorsRule): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param list(str) allowed_origins: - A list of origin domains that will be allowed via CORS, or "*" to allow - all domains. The list of must contain at least one entry. Limited to 64 - origin domains. Each allowed origin can have up to 256 characters. - :param list(str) allowed_methods: - A list of HTTP methods that are allowed to be executed by the origin. - The list of must contain at least one entry. For Azure Storage, - permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. - :keyword list(str) allowed_headers: - Defaults to an empty list. A list of headers allowed to be part of - the cross-origin request. Limited to 64 defined headers and 2 prefixed - headers. Each header can be up to 256 characters. - :keyword list(str) exposed_headers: - Defaults to an empty list. A list of response headers to expose to CORS - clients. Limited to 64 defined headers and two prefixed headers. Each - header can be up to 256 characters. - :keyword int max_age_in_seconds: - The number of seconds that the client/browser should cache a - preflight response. - """ - - def __init__(self, allowed_origins, allowed_methods, **kwargs): - self.allowed_origins = ','.join(allowed_origins) - self.allowed_methods = ','.join(allowed_methods) - self.allowed_headers = ','.join(kwargs.get('allowed_headers', [])) - self.exposed_headers = ','.join(kwargs.get('exposed_headers', [])) - self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0) - - @classmethod - def _from_generated(cls, generated): - return cls( - [generated.allowed_origins], - [generated.allowed_methods], - allowed_headers=[generated.allowed_headers], - exposed_headers=[generated.exposed_headers], - max_age_in_seconds=generated.max_age_in_seconds, - ) - - -class ShareSmbSettings(GeneratedShareSmbSettings): - """ Settings for the SMB protocol. - - :keyword SmbMultichannel multichannel: Sets the multichannel settings. - """ - def __init__(self, **kwargs): - self.multichannel = kwargs.get('multichannel') - if self.multichannel is None: - raise ValueError("The value 'multichannel' must be specified.") - - -class SmbMultichannel(GeneratedSmbMultichannel): - """ Settings for Multichannel. - - :keyword bool enabled: If SMB Multichannel is enabled. - """ - def __init__(self, **kwargs): - self.enabled = kwargs.get('enabled') - if self.enabled is None: - raise ValueError("The value 'enabled' must be specified.") - - -class ShareProtocolSettings(GeneratedShareProtocolSettings): - """Protocol Settings class used by the set and get service properties methods in the share service. - - Contains protocol properties of the share service such as the SMB setting of the share service. - - :keyword SmbSettings smb: Sets SMB settings. - """ - def __init__(self, **kwargs): - self.smb = kwargs.get('smb') - if self.smb is None: - raise ValueError("The value 'smb' must be specified.") - - @classmethod - def _from_generated(cls, generated): - return cls( - smb=generated.smb) - - -class AccessPolicy(GenAccessPolicy): - """Access Policy class used by the set and get acl methods in each service. - - A stored access policy can specify the start time, expiry time, and - permissions for the Shared Access Signatures with which it's associated. - Depending on how you want to control access to your resource, you can - specify all of these parameters within the stored access policy, and omit - them from the URL for the Shared Access Signature. Doing so permits you to - modify the associated signature's behavior at any time, as well as to revoke - it. Or you can specify one or more of the access policy parameters within - the stored access policy, and the others on the URL. Finally, you can - specify all of the parameters on the URL. In this case, you can use the - stored access policy to revoke the signature, but not to modify its behavior. - - Together the Shared Access Signature and the stored access policy must - include all fields required to authenticate the signature. If any required - fields are missing, the request will fail. Likewise, if a field is specified - both in the Shared Access Signature URL and in the stored access policy, the - request will fail with status code 400 (Bad Request). - - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.fileshare.FileSasPermissions or - ~azure.storage.fileshare.ShareSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - """ - def __init__(self, permission=None, expiry=None, start=None): - self.start = start - self.expiry = expiry - self.permission = permission - - -class LeaseProperties(DictMixin): - """File or Share Lease Properties. - - :ivar str status: - The lease status of the file or share. Possible values: locked|unlocked - :ivar str state: - Lease state of the file or share. Possible values: available|leased|expired|breaking|broken - :ivar str duration: - When a file or share is leased, specifies whether the lease is of infinite or fixed duration. - """ - - def __init__(self, **kwargs): - self.status = get_enum_value(kwargs.get('x-ms-lease-status')) - self.state = get_enum_value(kwargs.get('x-ms-lease-state')) - self.duration = get_enum_value(kwargs.get('x-ms-lease-duration')) - - @classmethod - def _from_generated(cls, generated): - lease = cls() - lease.status = get_enum_value(generated.properties.lease_status) - lease.state = get_enum_value(generated.properties.lease_state) - lease.duration = get_enum_value(generated.properties.lease_duration) - return lease - - -class ContentSettings(DictMixin): - """Used to store the content settings of a file. - - :param str content_type: - The content type specified for the file. If no content type was - specified, the default content type is application/octet-stream. - :param str content_encoding: - If the content_encoding has previously been set - for the file, that value is stored. - :param str content_language: - If the content_language has previously been set - for the file, that value is stored. - :param str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the file, that value is stored. - :param str cache_control: - If the cache_control has previously been set for - the file, that value is stored. - :param bytearray content_md5: - If the content_md5 has been set for the file, this response - header is stored so that the client can check for message content - integrity. - """ - - def __init__( - self, content_type=None, content_encoding=None, - content_language=None, content_disposition=None, - cache_control=None, content_md5=None, **kwargs): - - self.content_type = content_type or kwargs.get('Content-Type') - self.content_encoding = content_encoding or kwargs.get('Content-Encoding') - self.content_language = content_language or kwargs.get('Content-Language') - self.content_md5 = content_md5 or kwargs.get('Content-MD5') - self.content_disposition = content_disposition or kwargs.get('Content-Disposition') - self.cache_control = cache_control or kwargs.get('Cache-Control') - - @classmethod - def _from_generated(cls, generated): - settings = cls() - settings.content_type = generated.properties.content_type or None - settings.content_encoding = generated.properties.content_encoding or None - settings.content_language = generated.properties.content_language or None - settings.content_md5 = generated.properties.content_md5 or None - settings.content_disposition = generated.properties.content_disposition or None - settings.cache_control = generated.properties.cache_control or None - return settings - - -class ShareProperties(DictMixin): - """Share's properties class. - - :ivar str name: - The name of the share. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the share was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar int quota: - The allocated quota. - :ivar str access_tier: - The share's access tier. - :ivar dict metadata: A dict with name_value pairs to associate with the - share as metadata. - :ivar str snapshot: - Snapshot of the share. - :ivar bool deleted: - To indicate if this share is deleted or not. - This is a service returned value, and the value will be set when list shared including deleted ones. - :ivar datetime deleted: - To indicate the deleted time of the deleted share. - This is a service returned value, and the value will be set when list shared including deleted ones. - :ivar str version: - To indicate the version of deleted share. - This is a service returned value, and the value will be set when list shared including deleted ones. - :ivar int remaining_retention_days: - To indicate how many remaining days the deleted share will be kept. - This is a service returned value, and the value will be set when list shared including deleted ones. - :ivar ~azure.storage.fileshare.models.ShareRootSquash or str root_squash: - Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash'. - :ivar list(str) protocols: - Indicates the protocols enabled on the share. The protocol can be either SMB or NFS. - """ - - def __init__(self, **kwargs): - self.name = None - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.quota = kwargs.get('x-ms-share-quota') - self.access_tier = kwargs.get('x-ms-access-tier') - self.next_allowed_quota_downgrade_time = kwargs.get('x-ms-share-next-allowed-quota-downgrade-time') - self.metadata = kwargs.get('metadata') - self.snapshot = None - self.deleted = None - self.deleted_time = None - self.version = None - self.remaining_retention_days = None - self.provisioned_egress_mbps = kwargs.get('x-ms-share-provisioned-egress-mbps') - self.provisioned_ingress_mbps = kwargs.get('x-ms-share-provisioned-ingress-mbps') - self.provisioned_iops = kwargs.get('x-ms-share-provisioned-iops') - self.lease = LeaseProperties(**kwargs) - self.protocols = [protocol.strip() for protocol in kwargs.get('x-ms-enabled-protocols', None).split(',')]\ - if kwargs.get('x-ms-enabled-protocols', None) else None - self.root_squash = kwargs.get('x-ms-root-squash', None) - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.last_modified = generated.properties.last_modified - props.etag = generated.properties.etag - props.quota = generated.properties.quota - props.access_tier = generated.properties.access_tier - props.next_allowed_quota_downgrade_time = generated.properties.next_allowed_quota_downgrade_time - props.metadata = generated.metadata - props.snapshot = generated.snapshot - props.deleted = generated.deleted - props.deleted_time = generated.properties.deleted_time - props.version = generated.version - props.remaining_retention_days = generated.properties.remaining_retention_days - props.provisioned_egress_mbps = generated.properties.provisioned_egress_m_bps - props.provisioned_ingress_mbps = generated.properties.provisioned_ingress_m_bps - props.provisioned_iops = generated.properties.provisioned_iops - props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access - props.protocols = [protocol.strip() for protocol in generated.properties.enabled_protocols.split(',')]\ - if generated.properties.enabled_protocols else None - props.root_squash = generated.properties.root_squash - - return props - - -class SharePropertiesPaged(PageIterator): - """An iterable of Share properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.fileshare.ShareProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only shares whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(SharePropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - prefix=self.prefix, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [ShareProperties._from_generated(i) for i in self._response.share_items] # pylint: disable=protected-access - return self._response.next_marker or None, self.current_page - - -class Handle(DictMixin): - """A listed Azure Storage handle item. - - All required parameters must be populated in order to send to Azure. - - :keyword str handle_id: Required. XSMB service handle ID - :keyword str path: Required. File or directory name including full path starting - from share root - :keyword str file_id: Required. FileId uniquely identifies the file or - directory. - :keyword str parent_id: ParentId uniquely identifies the parent directory of the - object. - :keyword str session_id: Required. SMB session ID in context of which the file - handle was opened - :keyword str client_ip: Required. Client IP that opened the handle - :keyword ~datetime.datetime open_time: Required. Time when the session that previously opened - the handle has last been reconnected. (UTC) - :keyword ~datetime.datetime last_reconnect_time: Time handle was last connected to (UTC) - """ - - def __init__(self, **kwargs): - self.id = kwargs.get('handle_id') - self.path = kwargs.get('path') - self.file_id = kwargs.get('file_id') - self.parent_id = kwargs.get('parent_id') - self.session_id = kwargs.get('session_id') - self.client_ip = kwargs.get('client_ip') - self.open_time = kwargs.get('open_time') - self.last_reconnect_time = kwargs.get('last_reconnect_time') - - @classmethod - def _from_generated(cls, generated): - handle = cls() - handle.id = generated.handle_id - handle.path = generated.path - handle.file_id = generated.file_id - handle.parent_id = generated.parent_id - handle.session_id = generated.session_id - handle.client_ip = generated.client_ip - handle.open_time = generated.open_time - handle.last_reconnect_time = generated.last_reconnect_time - return handle - - -class HandlesPaged(PageIterator): - """An iterable of Handles. - - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.fileshare.Handle) - - :param callable command: Function to retrieve the next page of items. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, results_per_page=None, continuation_token=None): - super(HandlesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.current_page = [Handle._from_generated(h) for h in self._response.handle_list] # pylint: disable=protected-access - return self._response.next_marker or None, self.current_page - - -class DirectoryProperties(DictMixin): - """Directory's properties class. - - :ivar str name: - The name of the directory. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the directory was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar bool server_encrypted: - Whether encryption is enabled. - :keyword dict metadata: A dict with name_value pairs to associate with the - directory as metadata. - :ivar change_time: Change time for the file. - :vartype change_time: str or ~datetime.datetime - :ivar creation_time: Creation time for the file. - :vartype creation_time: str or ~datetime.datetime - :ivar last_write_time: Last write time for the file. - :vartype last_write_time: str or ~datetime.datetime - :ivar file_attributes: - The file system attributes for files and directories. - :vartype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :ivar permission_key: Key of the permission to be set for the - directory/file. - :vartype permission_key: str - :ivar file_id: Required. FileId uniquely identifies the file or - directory. - :vartype file_id: str - :ivar parent_id: ParentId uniquely identifies the parent directory of the - object. - :vartype parent_id: str - """ - - def __init__(self, **kwargs): - self.name = None - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.server_encrypted = kwargs.get('x-ms-server-encrypted') - self.metadata = kwargs.get('metadata') - self.change_time = _parse_datetime_from_str(kwargs.get('x-ms-file-change-time')) - self.creation_time = _parse_datetime_from_str(kwargs.get('x-ms-file-creation-time')) - self.last_write_time = _parse_datetime_from_str(kwargs.get('x-ms-file-last-write-time')) - self.file_attributes = kwargs.get('x-ms-file-attributes') - self.permission_key = kwargs.get('x-ms-file-permission-key') - self.file_id = kwargs.get('x-ms-file-id') - self.parent_id = kwargs.get('x-ms-file-parent-id') - - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.last_modified = generated.properties.last_modified - props.etag = generated.properties.etag - props.server_encrypted = generated.properties.server_encrypted - props.metadata = generated.metadata - return props - - -class DirectoryPropertiesPaged(PageIterator): - """An iterable for the contents of a directory. - - This iterable will yield dicts for the contents of the directory. The dicts - will have the keys 'name' (str) and 'is_directory' (bool). - Items that are files (is_directory=False) will have an additional 'content_length' key. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(dict(str, Any)) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only directories whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(DirectoryPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - prefix=self.prefix, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [_wrap_item(i) for i in self._response.segment.directory_items] - self.current_page.extend([_wrap_item(i) for i in self._response.segment.file_items]) - return self._response.next_marker or None, self.current_page - - -class FileProperties(DictMixin): - """File's properties class. - - :ivar str name: - The name of the file. - :ivar str path: - The path of the file. - :ivar str share: - The name of share. - :ivar str snapshot: - File snapshot. - :ivar int content_length: - Size of file in bytes. - :ivar dict metadata: A dict with name_value pairs to associate with the - file as metadata. - :ivar str file_type: - Type of the file. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the file was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar int size: - Size of file in bytes. - :ivar str content_range: - The range of bytes. - :ivar bool server_encrypted: - Whether encryption is enabled. - :ivar copy: - The copy properties. - :vartype copy: ~azure.storage.fileshare.CopyProperties - :ivar content_settings: - The content settings for the file. - :vartype content_settings: ~azure.storage.fileshare.ContentSettings - """ - - def __init__(self, **kwargs): - self.name = kwargs.get('name') - self.path = None - self.share = None - self.snapshot = None - self.content_length = kwargs.get('Content-Length') - self.metadata = kwargs.get('metadata') - self.file_type = kwargs.get('x-ms-type') - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.size = kwargs.get('Content-Length') - self.content_range = kwargs.get('Content-Range') - self.server_encrypted = kwargs.get('x-ms-server-encrypted') - self.copy = CopyProperties(**kwargs) - self.content_settings = ContentSettings(**kwargs) - self.lease = LeaseProperties(**kwargs) - self.change_time = _parse_datetime_from_str(kwargs.get('x-ms-file-change-time')) - self.creation_time = _parse_datetime_from_str(kwargs.get('x-ms-file-creation-time')) - self.last_write_time = _parse_datetime_from_str(kwargs.get('x-ms-file-last-write-time')) - self.file_attributes = kwargs.get('x-ms-file-attributes') - self.permission_key = kwargs.get('x-ms-file-permission-key') - self.file_id = kwargs.get('x-ms-file-id') - self.parent_id = kwargs.get('x-ms-file-parent-id') - - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.content_length = generated.properties.content_length - props.metadata = generated.properties.metadata - props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access - return props - - -class ShareProtocols(str, Enum): - """Enabled protocols on the share""" - SMB = "SMB" - NFS = "NFS" - - -class CopyProperties(DictMixin): - """File Copy Properties. - - :ivar str id: - String identifier for the last attempted Copy File operation where this file - was the destination file. This header does not appear if this file has never - been the destination in a Copy File operation, or if this file has been - modified after a concluded Copy File operation. - :ivar str source: - URL up to 2 KB in length that specifies the source file used in the last attempted - Copy File operation where this file was the destination file. This header does not - appear if this file has never been the destination in a Copy File operation, or if - this file has been modified after a concluded Copy File operation. - :ivar str status: - State of the copy operation identified by Copy ID, with these values: - success: - Copy completed successfully. - pending: - Copy is in progress. Check copy_status_description if intermittent, - non-fatal errors impede copy progress but don't cause failure. - aborted: - Copy was ended by Abort Copy File. - failed: - Copy failed. See copy_status_description for failure details. - :ivar str progress: - Contains the number of bytes copied and the total bytes in the source in the last - attempted Copy File operation where this file was the destination file. Can show - between 0 and Content-Length bytes copied. - :ivar datetime completion_time: - Conclusion time of the last attempted Copy File operation where this file was the - destination file. This value can specify the time of a completed, aborted, or - failed copy attempt. - :ivar str status_description: - Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal - or non-fatal copy operation failure. - :ivar bool incremental_copy: - Copies the snapshot of the source file to a destination file. - The snapshot is copied such that only the differential changes between - the previously copied snapshot are transferred to the destination - :ivar datetime destination_snapshot: - Included if the file is incremental copy or incremental copy snapshot, - if x-ms-copy-status is success. Snapshot time of the last successful - incremental copy snapshot for this file. - """ - - def __init__(self, **kwargs): - self.id = kwargs.get('x-ms-copy-id') - self.source = kwargs.get('x-ms-copy-source') - self.status = get_enum_value(kwargs.get('x-ms-copy-status')) - self.progress = kwargs.get('x-ms-copy-progress') - self.completion_time = kwargs.get('x-ms-copy-completion_time') - self.status_description = kwargs.get('x-ms-copy-status-description') - self.incremental_copy = kwargs.get('x-ms-incremental-copy') - self.destination_snapshot = kwargs.get('x-ms-copy-destination-snapshot') - - @classmethod - def _from_generated(cls, generated): - copy = cls() - copy.id = generated.properties.copy_id or None - copy.status = get_enum_value(generated.properties.copy_status) or None - copy.source = generated.properties.copy_source or None - copy.progress = generated.properties.copy_progress or None - copy.completion_time = generated.properties.copy_completion_time or None - copy.status_description = generated.properties.copy_status_description or None - copy.incremental_copy = generated.properties.incremental_copy or None - copy.destination_snapshot = generated.properties.destination_snapshot or None - return copy - - -class FileSasPermissions(object): - """FileSasPermissions class to be used with - generating shared access signature operations. - - :param bool read: - Read the content, properties, metadata. Use the file as the source of a copy - operation. - :param bool create: - Create a new file or copy a file to a new file. - :param bool write: - Create or write content, properties, metadata. Resize the file. Use the file - as the destination of a copy operation within the same account. - :param bool delete: - Delete the file. - """ - def __init__(self, read=False, create=False, write=False, delete=False): - self.read = read - self.create = create - self.write = write - self.delete = delete - self._str = (('r' if self.read else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a FileSasPermissions from a string. - - To specify read, create, write, or delete permissions you need only to - include the first letter of the word in the string. E.g. For read and - create permissions, you would provide a string "rc". - - :param str permission: The string which dictates the read, create, - write, or delete permissions - :return: A FileSasPermissions object - :rtype: ~azure.storage.fileshare.FileSasPermissions - """ - p_read = 'r' in permission - p_create = 'c' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - - parsed = cls(p_read, p_create, p_write, p_delete) - - return parsed - - -class ShareSasPermissions(object): - """ShareSasPermissions class to be used to be used with - generating shared access signature and access policy operations. - - :param bool read: - Read the content, properties or metadata of any file in the share. Use any - file in the share as the source of a copy operation. - :param bool write: - For any file in the share, create or write content, properties or metadata. - Resize the file. Use the file as the destination of a copy operation within - the same account. - Note: You cannot grant permissions to read or write share properties or - metadata with a service SAS. Use an account SAS instead. - :param bool delete: - Delete any file in the share. - Note: You cannot grant permissions to delete a share with a service SAS. Use - an account SAS instead. - :param bool list: - List files and directories in the share. - """ - def __init__(self, read=False, write=False, delete=False, list=False): # pylint: disable=redefined-builtin - self.read = read - self.write = write - self.delete = delete - self.list = list - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('l' if self.list else '')) - - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a ShareSasPermissions from a string. - - To specify read, write, delete, or list permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, write, - delete, or list permissions - :return: A ShareSasPermissions object - :rtype: ~azure.storage.fileshare.ShareSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_list = 'l' in permission - - parsed = cls(p_read, p_write, p_delete, p_list) - - return parsed - -class NTFSAttributes(object): - """ - Valid set of attributes to set for file or directory. - To set attribute for directory, 'Directory' should always be enabled except setting 'None' for directory. - - :ivar bool read_only: - Enable/disable 'ReadOnly' attribute for DIRECTORY or FILE - :ivar bool hidden: - Enable/disable 'Hidden' attribute for DIRECTORY or FILE - :ivar bool system: - Enable/disable 'System' attribute for DIRECTORY or FILE - :ivar bool none: - Enable/disable 'None' attribute for DIRECTORY or FILE to clear all attributes of FILE/DIRECTORY - :ivar bool directory: - Enable/disable 'Directory' attribute for DIRECTORY - :ivar bool archive: - Enable/disable 'Archive' attribute for DIRECTORY or FILE - :ivar bool temporary: - Enable/disable 'Temporary' attribute for FILE - :ivar bool offline: - Enable/disable 'Offline' attribute for DIRECTORY or FILE - :ivar bool not_content_indexed: - Enable/disable 'NotContentIndexed' attribute for DIRECTORY or FILE - :ivar bool no_scrub_data: - Enable/disable 'NoScrubData' attribute for DIRECTORY or FILE - """ - def __init__(self, read_only=False, hidden=False, system=False, none=False, directory=False, archive=False, - temporary=False, offline=False, not_content_indexed=False, no_scrub_data=False): - - self.read_only = read_only - self.hidden = hidden - self.system = system - self.none = none - self.directory = directory - self.archive = archive - self.temporary = temporary - self.offline = offline - self.not_content_indexed = not_content_indexed - self.no_scrub_data = no_scrub_data - self._str = (('ReadOnly|' if self.read_only else '') + - ('Hidden|' if self.hidden else '') + - ('System|' if self.system else '') + - ('None|' if self.none else '') + - ('Directory|' if self.directory else '') + - ('Archive|' if self.archive else '') + - ('Temporary|' if self.temporary else '') + - ('Offline|' if self.offline else '') + - ('NotContentIndexed|' if self.not_content_indexed else '') + - ('NoScrubData|' if self.no_scrub_data else '')) - - def __str__(self): - concatenated_params = self._str - return concatenated_params.strip('|') - - @classmethod - def from_string(cls, string): - """Create a NTFSAttributes from a string. - - To specify permissions you can pass in a string with the - desired permissions, e.g. "ReadOnly|Hidden|System" - - :param str string: The string which dictates the permissions. - :return: A NTFSAttributes object - :rtype: ~azure.storage.fileshare.NTFSAttributes - """ - read_only = "ReadOnly" in string - hidden = "Hidden" in string - system = "System" in string - none = "None" in string - directory = "Directory" in string - archive = "Archive" in string - temporary = "Temporary" in string - offline = "Offline" in string - not_content_indexed = "NotContentIndexed" in string - no_scrub_data = "NoScrubData" in string - - parsed = cls(read_only, hidden, system, none, directory, archive, temporary, offline, not_content_indexed, - no_scrub_data) - parsed._str = string # pylint: disable = protected-access - return parsed - - -def service_properties_deserialize(generated): - """Deserialize a ServiceProperties objects into a dict. - """ - return { - 'hour_metrics': Metrics._from_generated(generated.hour_metrics), # pylint: disable=protected-access - 'minute_metrics': Metrics._from_generated(generated.minute_metrics), # pylint: disable=protected-access - 'cors': [CorsRule._from_generated(cors) for cors in generated.cors], # pylint: disable=protected-access - 'protocol': ShareProtocolSettings._from_generated(generated.protocol), # pylint: disable=protected-access - } diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_parser.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_parser.py deleted file mode 100644 index db7cab5..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_parser.py +++ /dev/null @@ -1,42 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from datetime import datetime, timedelta - -_ERROR_TOO_MANY_FILE_PERMISSIONS = 'file_permission and file_permission_key should not be set at the same time' -_FILE_PERMISSION_TOO_LONG = 'Size of file_permission is too large. file_permission should be <=8KB, else' \ - 'please use file_permission_key' - - -def _get_file_permission(file_permission, file_permission_key, default_permission): - # if file_permission and file_permission_key are both empty, then use the default_permission - # value as file permission, file_permission size should be <= 8KB, else file permission_key should be used - if file_permission and len(str(file_permission).encode('utf-8')) > 8 * 1024: - raise ValueError(_FILE_PERMISSION_TOO_LONG) - - if not file_permission: - if not file_permission_key: - return default_permission - return None - - if not file_permission_key: - return file_permission - - raise ValueError(_ERROR_TOO_MANY_FILE_PERMISSIONS) - - -def _parse_datetime_from_str(string_datetime): - if not string_datetime: - return None - dt, _, us = string_datetime.partition(".") - dt = datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S") - us = int(us[:-2]) # microseconds - datetime_obj = dt + timedelta(microseconds=us) - return datetime_obj - - -def _datetime_to_str(datetime_obj): - return datetime_obj if isinstance(datetime_obj, str) else datetime_obj.isoformat() + '0Z' diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_serialize.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_serialize.py deleted file mode 100644 index d8d7d26..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_serialize.py +++ /dev/null @@ -1,114 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from azure.core import MatchConditions - -from ._parser import _datetime_to_str, _get_file_permission -from ._generated.models import SourceModifiedAccessConditions, LeaseAccessConditions, CopyFileSmbInfo - - -_SUPPORTED_API_VERSIONS = [ - '2019-02-02', - '2019-07-07', - '2019-12-12', - '2020-02-10', - '2020-04-08' -] - - -def _get_match_headers(kwargs, match_param, etag_param): - # type: (str) -> Tuple(Dict[str, Any], Optional[str], Optional[str]) - # TODO: extract this method to shared folder also add some comments, so that share, datalake and blob can use it. - if_match = None - if_none_match = None - match_condition = kwargs.pop(match_param, None) - if match_condition == MatchConditions.IfNotModified: - if_match = kwargs.pop(etag_param, None) - if not if_match: - raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) - elif match_condition == MatchConditions.IfPresent: - if_match = '*' - elif match_condition == MatchConditions.IfModified: - if_none_match = kwargs.pop(etag_param, None) - if not if_none_match: - raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) - elif match_condition == MatchConditions.IfMissing: - if_none_match = '*' - elif match_condition is None: - if etag_param in kwargs: - raise ValueError("'{}' specified without '{}'.".format(etag_param, match_param)) - else: - raise TypeError("Invalid match condition: {}".format(match_condition)) - return if_match, if_none_match - - -def get_source_conditions(kwargs): - # type: (Dict[str, Any]) -> SourceModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') - return SourceModifiedAccessConditions( - source_if_modified_since=kwargs.pop('source_if_modified_since', None), - source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), - source_if_match=if_match or kwargs.pop('source_if_match', None), - source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None) - ) - - -def get_access_conditions(lease): - # type: (Optional[Union[ShareLeaseClient, str]]) -> Union[LeaseAccessConditions, None] - try: - lease_id = lease.id # type: ignore - except AttributeError: - lease_id = lease # type: ignore - return LeaseAccessConditions(lease_id=lease_id) if lease_id else None - - -def get_smb_properties(kwargs): - # type: (Dict[str, Any]) -> Dict[str, Any] - ignore_read_only = kwargs.pop('ignore_read_only', None) - set_archive_attribute = kwargs.pop('set_archive_attribute', None) - file_permission = kwargs.pop('file_permission', None) - file_permission_key = kwargs.pop('permission_key', None) - file_attributes = kwargs.pop('file_attributes', None) - file_creation_time = kwargs.pop('file_creation_time', None) or "" - file_last_write_time = kwargs.pop('file_last_write_time', None) or "" - - file_permission_copy_mode = None - file_permission = _get_file_permission(file_permission, file_permission_key, None) - - if file_permission: - if file_permission.lower() == "source": - file_permission = None - file_permission_copy_mode = "source" - else: - file_permission_copy_mode = "override" - elif file_permission_key: - if file_permission_key.lower() == "source": - file_permission_key = None - file_permission_copy_mode = "source" - else: - file_permission_copy_mode = "override" - return { - 'file_permission': file_permission, - 'file_permission_key': file_permission_key, - 'copy_file_smb_info': CopyFileSmbInfo( - file_permission_copy_mode=file_permission_copy_mode, - ignore_read_only=ignore_read_only, - file_attributes=file_attributes, - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - set_archive_attribute=set_archive_attribute - ) - - } - -def get_api_version(kwargs, default): - # type: (Dict[str, Any]) -> str - api_version = kwargs.pop('api_version', None) - if api_version and api_version not in _SUPPORTED_API_VERSIONS: - versions = '\n'.join(_SUPPORTED_API_VERSIONS) - raise ValueError("Unsupported API version '{}'. Please select from:\n{}".format(api_version, versions)) - return api_version or default diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_share_client.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_share_client.py deleted file mode 100644 index 2b6e5e6..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_share_client.py +++ /dev/null @@ -1,897 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Optional, Union, Dict, Any, Iterable, TYPE_CHECKING -) - - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import Pipeline -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.request_handlers import add_metadata_headers, serialize_iso -from ._shared.response_handlers import ( - return_response_headers, - process_storage_error, - return_headers_and_deserialized) -from ._generated import AzureFileStorage -from ._generated.models import ( - SignedIdentifier, - DeleteSnapshotsOptionType, - SharePermission) -from ._deserialize import deserialize_share_properties, deserialize_permission_key, deserialize_permission -from ._serialize import get_api_version, get_access_conditions -from ._directory_client import ShareDirectoryClient -from ._file_client import ShareFileClient -from ._lease import ShareLeaseClient -from ._models import ShareProtocols - - -if TYPE_CHECKING: - from ._models import ShareProperties, AccessPolicy - - -class ShareClient(StorageAccountHostsMixin): - """A client to interact with a specific share, although that share may not yet exist. - - For operations relating to a specific directory or file in this share, the clients for - those entities can also be retrieved using the :func:`get_directory_client` and :func:`get_file_client` functions. - - For more optional configuration, please click - `here `_. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the share, - use the :func:`from_share_url` classmethod. - :param share_name: - The name of the share with which to interact. - :type share_name: str - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not share_name: - raise ValueError("Please specify a share name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - if hasattr(credential, 'get_token'): - raise ValueError("Token credentials not supported by the File service.") - - path_snapshot = None - path_snapshot, sas_token = parse_query(parsed_url.query) - if not sas_token and not credential: - raise ValueError( - 'You need to provide either an account shared key or SAS token when creating a storage service.') - try: - self.snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - self.snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - self.snapshot = snapshot or path_snapshot - - self.share_name = share_name - self._query_str, credential = self._format_query_string( - sas_token, credential, share_snapshot=self.snapshot) - super(ShareClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) - self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline) - default_api_version = self._client._config.version # pylint: disable=protected-access - self._client._config.version = get_api_version(kwargs, default_api_version) # pylint: disable=protected-access - - @classmethod - def from_share_url(cls, share_url, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareClient - """ - :param str share_url: The full URI to the share. - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :returns: A share client. - :rtype: ~azure.storage.fileshare.ShareClient - """ - try: - if not share_url.lower().startswith('http'): - share_url = "https://" + share_url - except AttributeError: - raise ValueError("Share URL must be a string.") - parsed_url = urlparse(share_url.rstrip('/')) - if not (parsed_url.path and parsed_url.netloc): - raise ValueError("Invalid URL: {}".format(share_url)) - - share_path = parsed_url.path.lstrip('/').split('/') - account_path = "" - if len(share_path) > 1: - account_path = "/" + "/".join(share_path[:-1]) - account_url = "{}://{}{}?{}".format( - parsed_url.scheme, - parsed_url.netloc.rstrip('/'), - account_path, - parsed_url.query) - - share_name = unquote(share_path[-1]) - path_snapshot, _ = parse_query(parsed_url.query) - if snapshot: - try: - path_snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - path_snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - path_snapshot = snapshot - - if not share_name: - raise ValueError("Invalid URL. Please provide a URL with a valid share name") - return cls(account_url, share_name, path_snapshot, credential, **kwargs) - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - share_name = self.share_name - if isinstance(share_name, six.text_type): - share_name = share_name.encode('UTF-8') - return "{}://{}/{}{}".format( - self.scheme, - hostname, - quote(share_name), - self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - share_name, # type: str - snapshot=None, # type: Optional[str] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareClient - """Create ShareClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param share_name: The name of the share. - :type share_name: str - :param str snapshot: - The optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :returns: A share client. - :rtype: ~azure.storage.fileshare.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START create_share_client_from_conn_string] - :end-before: [END create_share_client_from_conn_string] - :language: python - :dedent: 8 - :caption: Gets the share client from connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, share_name=share_name, snapshot=snapshot, credential=credential, **kwargs) - - def get_directory_client(self, directory_path=None): - # type: (Optional[str]) -> ShareDirectoryClient - """Get a client to interact with the specified directory. - The directory need not already exist. - - :param str directory_path: - Path to the specified directory. - :returns: A Directory Client. - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - """ - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - - return ShareDirectoryClient( - self.url, share_name=self.share_name, directory_path=directory_path or "", snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, - _location_mode=self._location_mode) - - def get_file_client(self, file_path): - # type: (str) -> ShareFileClient - """Get a client to interact with the specified file. - The file need not already exist. - - :param str file_path: - Path to the specified file. - :returns: A File Client. - :rtype: ~azure.storage.fileshare.ShareFileClient - """ - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - - return ShareFileClient( - self.url, share_name=self.share_name, file_path=file_path, snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode) - - @distributed_trace - def acquire_lease(self, **kwargs): - # type: (**Any) -> ShareLeaseClient - """Requests a new lease. - - If the share does not have an active lease, the Share - Service creates a lease on the share and returns a new lease. - - .. versionadded:: 12.5.0 - - :keyword int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword str lease_id: - Proposed lease ID, in a GUID string format. The Share Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A ShareLeaseClient object. - :rtype: ~azure.storage.fileshare.ShareLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START acquire_and_release_lease_on_share] - :end-before: [END acquire_and_release_lease_on_share] - :language: python - :dedent: 8 - :caption: Acquiring a lease on a share. - """ - kwargs['lease_duration'] = kwargs.pop('lease_duration', -1) - lease_id = kwargs.pop('lease_id', None) - lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore - lease.acquire(**kwargs) - return lease - - @distributed_trace - def create_share(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Creates a new Share under the account. If a share with the - same name already exists, the operation fails. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the share as metadata. - :keyword int quota: - The quota to be allotted. - :keyword access_tier: - Specifies the access tier of the share. - Possible values: 'TransactionOptimized', 'Hot', 'Cool' - :paramtype access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword protocols: - Protocols to enable on the share. Only one protocol can be enabled on the share. - :paramtype protocols: str or ~azure.storage.fileshare.ShareProtocols - :keyword root_squash: - Root squash to set on the share. - Only valid for NFS shares. Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash'. - :paramtype root_squash: str or ~azure.storage.fileshare.ShareRootSquash - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START create_share] - :end-before: [END create_share] - :language: python - :dedent: 8 - :caption: Creates a file share. - """ - metadata = kwargs.pop('metadata', None) - quota = kwargs.pop('quota', None) - access_tier = kwargs.pop('access_tier', None) - timeout = kwargs.pop('timeout', None) - root_squash = kwargs.pop('root_squash', None) - protocols = kwargs.pop('protocols', None) - if protocols and protocols not in ['NFS', 'SMB', ShareProtocols.SMB, ShareProtocols.NFS]: - raise ValueError("The enabled protocol must be set to either SMB or NFS.") - if root_squash and protocols not in ['NFS', ShareProtocols.NFS]: - raise ValueError("The 'root_squash' keyword can only be used on NFS enabled shares.") - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - - try: - return self._client.share.create( # type: ignore - timeout=timeout, - metadata=metadata, - quota=quota, - access_tier=access_tier, - root_squash=root_squash, - enabled_protocols=protocols, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def create_snapshot( # type: ignore - self, - **kwargs # type: Optional[Any] - ): - # type: (...) -> Dict[str, Any] - """Creates a snapshot of the share. - - A snapshot is a read-only version of a share that's taken at a point in time. - It can be read, copied, or deleted, but not modified. Snapshots provide a way - to back up a share as it appears at a moment in time. - - A snapshot of a share has the same name as the base share from which the snapshot - is taken, with a DateTime value appended to indicate the time at which the - snapshot was taken. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the share as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Snapshot ID, Etag, and last modified). - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START create_share_snapshot] - :end-before: [END create_share_snapshot] - :language: python - :dedent: 12 - :caption: Creates a snapshot of the file share. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return self._client.share.create_snapshot( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def delete_share( - self, delete_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> None - """Marks the specified share for deletion. The share is - later deleted during garbage collection. - - :param bool delete_snapshots: - Indicates if snapshots are to be deleted. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START delete_share] - :end-before: [END delete_share] - :language: python - :dedent: 12 - :caption: Deletes the share and any snapshots. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - delete_include = None - if delete_snapshots: - delete_include = DeleteSnapshotsOptionType.include - try: - self._client.share.delete( - timeout=timeout, - sharesnapshot=self.snapshot, - lease_access_conditions=access_conditions, - delete_snapshots=delete_include, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def get_share_properties(self, **kwargs): - # type: (Any) -> ShareProperties - """Returns all user-defined metadata and system properties for the - specified share. The data returned does not include the shares's - list of files or directories. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: The share properties. - :rtype: ~azure.storage.fileshare.ShareProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_hello_world.py - :start-after: [START get_share_properties] - :end-before: [END get_share_properties] - :language: python - :dedent: 12 - :caption: Gets the share properties. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - props = self._client.share.get_properties( - timeout=timeout, - sharesnapshot=self.snapshot, - cls=deserialize_share_properties, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - props.name = self.share_name - props.snapshot = self.snapshot - return props # type: ignore - - @distributed_trace - def set_share_quota(self, quota, **kwargs): - # type: (int, Any) -> Dict[str, Any] - """Sets the quota for the share. - - :param int quota: - Specifies the maximum size of the share, in gigabytes. - Must be greater than 0, and less than or equal to 5TB. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START set_share_quota] - :end-before: [END set_share_quota] - :language: python - :dedent: 12 - :caption: Sets the share quota. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - return self._client.share.set_properties( # type: ignore - timeout=timeout, - quota=quota, - access_tier=None, - lease_access_conditions=access_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def set_share_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Sets the share properties. - - .. versionadded:: 12.4.0 - - :keyword access_tier: - Specifies the access tier of the share. - Possible values: 'TransactionOptimized', 'Hot', and 'Cool' - :paramtype access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier - :keyword int quota: - Specifies the maximum size of the share, in gigabytes. - Must be greater than 0, and less than or equal to 5TB. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword root_squash: - Root squash to set on the share. - Only valid for NFS shares. Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash'. - :paramtype root_squash: str or ~azure.storage.fileshare.ShareRootSquash - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START set_share_properties] - :end-before: [END set_share_properties] - :language: python - :dedent: 12 - :caption: Sets the share properties. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - access_tier = kwargs.pop('access_tier', None) - quota = kwargs.pop('quota', None) - root_squash = kwargs.pop('root_squash', None) - if all(parameter is None for parameter in [access_tier, quota, root_squash]): - raise ValueError("set_share_properties should be called with at least one parameter.") - try: - return self._client.share.set_properties( # type: ignore - timeout=timeout, - quota=quota, - access_tier=access_tier, - root_squash=root_squash, - lease_access_conditions=access_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def set_share_metadata(self, metadata, **kwargs): - # type: (Dict[str, Any], Any) -> Dict[str, Any] - """Sets the metadata for the share. - - Each call to this operation replaces all existing metadata - attached to the share. To remove all metadata from the share, - call this operation with no metadata dict. - - :param metadata: - Name-value pairs associated with the share as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START set_share_metadata] - :end-before: [END set_share_metadata] - :language: python - :dedent: 12 - :caption: Sets the share metadata. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - try: - return self._client.share.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def get_share_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the share. The permissions - indicate whether files in a share may be accessed publicly. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - response, identifiers = self._client.share.get_access_policy( - timeout=timeout, - cls=return_headers_and_deserialized, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return { - 'public_access': response.get('share_public_access'), - 'signed_identifiers': identifiers or [] - } - - @distributed_trace - def set_share_access_policy(self, signed_identifiers, **kwargs): - # type: (Dict[str, AccessPolicy], Any) -> Dict[str, str] - """Sets the permissions for the share, or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether files in a share may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the share. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict(str, :class:`~azure.storage.fileshare.AccessPolicy`) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - if len(signed_identifiers) > 5: - raise ValueError( - 'Too many access policies provided. The server does not support setting ' - 'more than 5 access policies on a single resource.') - identifiers = [] - for key, value in signed_identifiers.items(): - if value: - value.start = serialize_iso(value.start) - value.expiry = serialize_iso(value.expiry) - identifiers.append(SignedIdentifier(id=key, access_policy=value)) - signed_identifiers = identifiers # type: ignore - try: - return self._client.share.set_access_policy( # type: ignore - share_acl=signed_identifiers or None, - timeout=timeout, - cls=return_response_headers, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def get_share_stats(self, **kwargs): - # type: (Any) -> int - """Gets the approximate size of the data stored on the share in bytes. - - Note that this value may not include all recently created - or recently re-sized files. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :return: The approximate size of the data (in bytes) stored on the share. - :rtype: int - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - stats = self._client.share.get_statistics( - timeout=timeout, - lease_access_conditions=access_conditions, - **kwargs) - return stats.share_usage_bytes # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_directories_and_files( - self, directory_name=None, # type: Optional[str] - name_starts_with=None, # type: Optional[str] - marker=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Iterable[Dict[str,str]] - """Lists the directories and files under the share. - - :param str directory_name: - Name of a directory. - :param str name_starts_with: - Filters the results to return only directories whose names - begin with the specified prefix. - :param str marker: - An opaque continuation token. This value can be retrieved from the - next_marker field of a previous generator object. If specified, - this generator will begin returning results from this point. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START share_list_files_in_dir] - :end-before: [END share_list_files_in_dir] - :language: python - :dedent: 12 - :caption: List directories and files in the share. - """ - timeout = kwargs.pop('timeout', None) - directory = self.get_directory_client(directory_name) - kwargs.setdefault('merge_span', True) - return directory.list_directories_and_files( - name_starts_with=name_starts_with, marker=marker, timeout=timeout, **kwargs) - - @staticmethod - def _create_permission_for_share_options(file_permission, # type: str - **kwargs): - options = { - 'share_permission': SharePermission(permission=file_permission), - 'cls': deserialize_permission_key, - 'timeout': kwargs.pop('timeout', None), - } - options.update(kwargs) - return options - - @distributed_trace - def create_permission_for_share(self, file_permission, # type: str - **kwargs # type: Any - ): - # type: (...) -> str - """Create a permission (a security descriptor) at the share level. - - This 'permission' can be used for the files/directories in the share. - If a 'permission' already exists, it shall return the key of it, else - creates a new permission at the share level and return its key. - - :param str file_permission: - File permission, a Portable SDDL - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A file permission key - :rtype: str - """ - timeout = kwargs.pop('timeout', None) - options = self._create_permission_for_share_options(file_permission, timeout=timeout, **kwargs) - try: - return self._client.share.create_permission(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def get_permission_for_share( # type: ignore - self, permission_key, # type: str - **kwargs # type: Any - ): - # type: (...) -> str - """Get a permission (a security descriptor) for a given key. - - This 'permission' can be used for the files/directories in the share. - - :param str permission_key: - Key of the file permission to retrieve - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A file permission (a portable SDDL) - :rtype: str - """ - timeout = kwargs.pop('timeout', None) - try: - return self._client.share.get_permission( # type: ignore - file_permission_key=permission_key, - cls=deserialize_permission, - timeout=timeout, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def create_directory(self, directory_name, **kwargs): - # type: (str, Any) -> ShareDirectoryClient - """Creates a directory in the share and returns a client to interact - with the directory. - - :param str directory_name: - The name of the directory. - :keyword metadata: - Name-value pairs associated with the directory as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: ShareDirectoryClient - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - """ - directory = self.get_directory_client(directory_name) - kwargs.setdefault('merge_span', True) - directory.create_directory(**kwargs) - return directory # type: ignore - - @distributed_trace - def delete_directory(self, directory_name, **kwargs): - # type: (str, Any) -> None - """Marks the directory for deletion. The directory is - later deleted during garbage collection. - - :param str directory_name: - The name of the directory. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - directory = self.get_directory_client(directory_name) - directory.delete_directory(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_share_service_client.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_share_service_client.py deleted file mode 100644 index 600426b..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_share_service_client.py +++ /dev/null @@ -1,424 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Dict, List, - TYPE_CHECKING -) - - -try: - from urllib.parse import urlparse -except ImportError: - from urlparse import urlparse # type: ignore - -from azure.core.exceptions import HttpResponseError -from azure.core.paging import ItemPaged -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import Pipeline -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.response_handlers import process_storage_error -from ._generated import AzureFileStorage -from ._generated.models import StorageServiceProperties -from ._share_client import ShareClient -from ._serialize import get_api_version -from ._models import ( - SharePropertiesPaged, - service_properties_deserialize, -) - -if TYPE_CHECKING: - from datetime import datetime - from ._models import ( - ShareProperties, - Metrics, - CorsRule, - ShareProtocolSettings - ) - - -class ShareServiceClient(StorageAccountHostsMixin): - """A client to interact with the File Share Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete shares within the account. - For operations relating to a specific share, a client for that entity - can also be retrieved using the :func:`get_share_client` function. - - For more optional configuration, please click - `here `_. - - :param str account_url: - The URL to the file share storage account. Any other entities included - in the URL path (e.g. share or file) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_authentication.py - :start-after: [START create_share_service_client] - :end-before: [END create_share_service_client] - :language: python - :dedent: 8 - :caption: Create the share service client with url and credential. - """ - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - if hasattr(credential, 'get_token'): - raise ValueError("Token credentials not supported by the File Share service.") - - _, sas_token = parse_query(parsed_url.query) - if not sas_token and not credential: - raise ValueError( - 'You need to provide either an account shared key or SAS token when creating a storage service.') - self._query_str, credential = self._format_query_string(sas_token, credential) - super(ShareServiceClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) - self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline) - default_api_version = self._client._config.version # pylint: disable=protected-access - self._client._config.version = get_api_version(kwargs, default_api_version) # pylint: disable=protected-access - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - return "{}://{}/{}".format(self.scheme, hostname, self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> ShareServiceClient - """Create ShareServiceClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :returns: A File Share service client. - :rtype: ~azure.storage.fileshare.ShareServiceClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_authentication.py - :start-after: [START create_share_service_client_from_conn_string] - :end-before: [END create_share_service_client_from_conn_string] - :language: python - :dedent: 8 - :caption: Create the share service client with connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls(account_url, credential=credential, **kwargs) - - @distributed_trace - def get_service_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the properties of a storage account's File Share service, including - Azure Storage Analytics. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A dictionary containing file service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START get_service_properties] - :end-before: [END get_service_properties] - :language: python - :dedent: 8 - :caption: Get file share service properties. - """ - timeout = kwargs.pop('timeout', None) - try: - service_props = self._client.service.get_properties(timeout=timeout, **kwargs) - return service_properties_deserialize(service_props) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def set_service_properties( - self, hour_metrics=None, # type: Optional[Metrics] - minute_metrics=None, # type: Optional[Metrics] - cors=None, # type: Optional[List[CorsRule]] - protocol=None, # type: Optional[ShareProtocolSettings], - **kwargs - ): - # type: (...) -> None - """Sets the properties of a storage account's File Share service, including - Azure Storage Analytics. If an element (e.g. hour_metrics) is left as None, the - existing settings on the service for that functionality are preserved. - - :param hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for files. - :type hour_metrics: ~azure.storage.fileshare.Metrics - :param minute_metrics: - The minute metrics settings provide request statistics - for each minute for files. - :type minute_metrics: ~azure.storage.fileshare.Metrics - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list(:class:`~azure.storage.fileshare.CorsRule`) - :param protocol: - Sets protocol settings - :type protocol: ~azure.storage.fileshare.ShareProtocolSettings - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START set_service_properties] - :end-before: [END set_service_properties] - :language: python - :dedent: 8 - :caption: Sets file share service properties. - """ - timeout = kwargs.pop('timeout', None) - props = StorageServiceProperties( - hour_metrics=hour_metrics, - minute_metrics=minute_metrics, - cors=cors, - protocol=protocol - ) - try: - self._client.service.set_properties(storage_service_properties=props, timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_shares( - self, name_starts_with=None, # type: Optional[str] - include_metadata=False, # type: Optional[bool] - include_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> ItemPaged[ShareProperties] - """Returns auto-paging iterable of dict-like ShareProperties under the specified account. - The generator will lazily follow the continuation tokens returned by - the service and stop when all shares have been returned. - - :param str name_starts_with: - Filters the results to return only shares whose names - begin with the specified name_starts_with. - :param bool include_metadata: - Specifies that share metadata be returned in the response. - :param bool include_snapshots: - Specifies that share snapshot be returned in the response. - :keyword bool include_deleted: - Specifies that deleted shares be returned in the response. - This is only for share soft delete enabled account. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) of ShareProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.ShareProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START fsc_list_shares] - :end-before: [END fsc_list_shares] - :language: python - :dedent: 12 - :caption: List shares in the file share service. - """ - timeout = kwargs.pop('timeout', None) - include = [] - include_deleted = kwargs.pop('include_deleted', None) - if include_deleted: - include.append("deleted") - if include_metadata: - include.append('metadata') - if include_snapshots: - include.append('snapshots') - - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.service.list_shares_segment, - include=include, - timeout=timeout, - **kwargs) - return ItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=SharePropertiesPaged) - - @distributed_trace - def create_share( - self, share_name, # type: str - **kwargs - ): - # type: (...) -> ShareClient - """Creates a new share under the specified account. If the share - with the same name already exists, the operation fails. Returns a client with - which to interact with the newly created share. - - :param str share_name: The name of the share to create. - :keyword dict(str,str) metadata: - A dict with name_value pairs to associate with the - share as metadata. Example:{'Category':'test'} - :keyword int quota: - Quota in bytes. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.fileshare.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START fsc_create_shares] - :end-before: [END fsc_create_shares] - :language: python - :dedent: 8 - :caption: Create a share in the file share service. - """ - metadata = kwargs.pop('metadata', None) - quota = kwargs.pop('quota', None) - timeout = kwargs.pop('timeout', None) - share = self.get_share_client(share_name) - kwargs.setdefault('merge_span', True) - share.create_share(metadata=metadata, quota=quota, timeout=timeout, **kwargs) - return share - - @distributed_trace - def delete_share( - self, share_name, # type: Union[ShareProperties, str] - delete_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> None - """Marks the specified share for deletion. The share is - later deleted during garbage collection. - - :param share_name: - The share to delete. This can either be the name of the share, - or an instance of ShareProperties. - :type share_name: str or ~azure.storage.fileshare.ShareProperties - :param bool delete_snapshots: - Indicates if snapshots are to be deleted. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START fsc_delete_shares] - :end-before: [END fsc_delete_shares] - :language: python - :dedent: 12 - :caption: Delete a share in the file share service. - """ - timeout = kwargs.pop('timeout', None) - share = self.get_share_client(share_name) - kwargs.setdefault('merge_span', True) - share.delete_share( - delete_snapshots=delete_snapshots, timeout=timeout, **kwargs) - - @distributed_trace - def undelete_share(self, deleted_share_name, deleted_share_version, **kwargs): - # type: (str, str, **Any) -> ShareClient - """Restores soft-deleted share. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.2.0 - This operation was introduced in API version '2019-12-12'. - - :param str deleted_share_name: - Specifies the name of the deleted share to restore. - :param str deleted_share_version: - Specifies the version of the deleted share to restore. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.fileshare.ShareClient - """ - share = self.get_share_client(deleted_share_name) - - try: - share._client.share.restore(deleted_share_name=deleted_share_name, # pylint: disable = protected-access - deleted_share_version=deleted_share_version, - timeout=kwargs.pop('timeout', None), **kwargs) - return share - except HttpResponseError as error: - process_storage_error(error) - - def get_share_client(self, share, snapshot=None): - # type: (Union[ShareProperties, str],Optional[Union[Dict[str, Any], str]]) -> ShareClient - """Get a client to interact with the specified share. - The share need not already exist. - - :param share: - The share. This can either be the name of the share, - or an instance of ShareProperties. - :type share: str or ~azure.storage.fileshare.ShareProperties - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :returns: A ShareClient. - :rtype: ~azure.storage.fileshare.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START get_share_client] - :end-before: [END get_share_client] - :language: python - :dedent: 8 - :caption: Gets the share client. - """ - try: - share_name = share.name - except AttributeError: - share_name = share - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareClient( - self.url, share_name=share_name, snapshot=snapshot, credential=self.credential, - api_version=self.api_version, _hosts=self._hosts, - _configuration=self._config, _pipeline=_pipeline, _location_mode=self._location_mode) diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/__init__.py deleted file mode 100644 index 160f882..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import hmac - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore - -import six - - -def url_quote(url): - return quote(url) - - -def url_unquote(url): - return unquote(url) - - -def encode_base64(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def decode_base64_to_bytes(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - return base64.b64decode(data) - - -def decode_base64_to_text(data): - decoded_bytes = decode_base64_to_bytes(data) - return decoded_bytes.decode('utf-8') - - -def sign_string(key, string_to_sign, key_is_base64=True): - if key_is_base64: - key = decode_base64_to_bytes(key) - else: - if isinstance(key, six.text_type): - key = key.encode('utf-8') - if isinstance(string_to_sign, six.text_type): - string_to_sign = string_to_sign.encode('utf-8') - signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) - digest = signed_hmac_sha256.digest() - encoded_digest = encode_base64(digest) - return encoded_digest diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/authentication.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/authentication.py deleted file mode 100644 index d04c1e4..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/authentication.py +++ /dev/null @@ -1,142 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import logging -import sys - -try: - from urllib.parse import urlparse, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import unquote # type: ignore - -try: - from yarl import URL -except ImportError: - pass - -try: - from azure.core.pipeline.transport import AioHttpTransport -except ImportError: - AioHttpTransport = None - -from azure.core.exceptions import ClientAuthenticationError -from azure.core.pipeline.policies import SansIOHTTPPolicy - -from . import sign_string - - -logger = logging.getLogger(__name__) - - - -# wraps a given exception with the desired exception type -def _wrap_exception(ex, desired_type): - msg = "" - if ex.args: - msg = ex.args[0] - if sys.version_info >= (3,): - # Automatic chaining in Python 3 means we keep the trace - return desired_type(msg) - # There isn't a good solution in 2 for keeping the stack trace - # in general, or that will not result in an error in 3 - # However, we can keep the previous error type and message - # TODO: In the future we will log the trace - return desired_type('{}: {}'.format(ex.__class__.__name__, msg)) - - -class AzureSigningError(ClientAuthenticationError): - """ - Represents a fatal error when attempting to sign a request. - In general, the cause of this exception is user error. For example, the given account key is not valid. - Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. - """ - - -# pylint: disable=no-self-use -class SharedKeyCredentialPolicy(SansIOHTTPPolicy): - - def __init__(self, account_name, account_key): - self.account_name = account_name - self.account_key = account_key - super(SharedKeyCredentialPolicy, self).__init__() - - @staticmethod - def _get_headers(request, headers_to_sign): - headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) - if 'content-length' in headers and headers['content-length'] == '0': - del headers['content-length'] - return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' - - @staticmethod - def _get_verb(request): - return request.http_request.method + '\n' - - def _get_canonicalized_resource(self, request): - uri_path = urlparse(request.http_request.url).path - try: - if isinstance(request.context.transport, AioHttpTransport) or \ - isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \ - isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None), - AioHttpTransport): - uri_path = URL(uri_path) - return '/' + self.account_name + str(uri_path) - except TypeError: - pass - return '/' + self.account_name + uri_path - - @staticmethod - def _get_canonicalized_headers(request): - string_to_sign = '' - x_ms_headers = [] - for name, value in request.http_request.headers.items(): - if name.startswith('x-ms-'): - x_ms_headers.append((name.lower(), value)) - x_ms_headers.sort() - for name, value in x_ms_headers: - if value is not None: - string_to_sign += ''.join([name, ':', value, '\n']) - return string_to_sign - - @staticmethod - def _get_canonicalized_resource_query(request): - sorted_queries = list(request.http_request.query.items()) - sorted_queries.sort() - - string_to_sign = '' - for name, value in sorted_queries: - if value is not None: - string_to_sign += '\n' + name.lower() + ':' + unquote(value) - - return string_to_sign - - def _add_authorization_header(self, request, string_to_sign): - try: - signature = sign_string(self.account_key, string_to_sign) - auth_string = 'SharedKey ' + self.account_name + ':' + signature - request.http_request.headers['Authorization'] = auth_string - except Exception as ex: - # Wrap any error that occurred as signing error - # Doing so will clarify/locate the source of problem - raise _wrap_exception(ex, AzureSigningError) - - def on_request(self, request): - string_to_sign = \ - self._get_verb(request) + \ - self._get_headers( - request, - [ - 'content-encoding', 'content-language', 'content-length', - 'content-md5', 'content-type', 'date', 'if-modified-since', - 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' - ] - ) + \ - self._get_canonicalized_headers(request) + \ - self._get_canonicalized_resource(request) + \ - self._get_canonicalized_resource_query(request) - - self._add_authorization_header(request, string_to_sign) - #logger.debug("String_to_sign=%s", string_to_sign) diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/base_client.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/base_client.py deleted file mode 100644 index 5e524b2..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/base_client.py +++ /dev/null @@ -1,459 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import logging -import uuid -from typing import ( # pylint: disable=unused-import - Optional, - Any, - Tuple, -) - -try: - from urllib.parse import parse_qs, quote -except ImportError: - from urlparse import parse_qs # type: ignore - from urllib2 import quote # type: ignore - -import six - -from azure.core.configuration import Configuration -from azure.core.credentials import AzureSasCredential -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline import Pipeline -from azure.core.pipeline.transport import RequestsTransport, HttpTransport -from azure.core.pipeline.policies import ( - RedirectPolicy, - ContentDecodePolicy, - BearerTokenCredentialPolicy, - ProxyPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, - UserAgentPolicy, - AzureSasCredentialPolicy -) - -from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .models import LocationMode -from .authentication import SharedKeyCredentialPolicy -from .shared_access_signature import QueryStringConstants -from .request_handlers import serialize_batch_body, _get_batch_request_delimiter -from .policies import ( - StorageHeadersPolicy, - StorageContentValidation, - StorageRequestHook, - StorageResponseHook, - StorageLoggingPolicy, - StorageHosts, - QueueMessagePolicy, - ExponentialRetry, -) -from .._version import VERSION -from .response_handlers import process_storage_error, PartialBatchErrorException - - -_LOGGER = logging.getLogger(__name__) -_SERVICE_PARAMS = { - "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"}, - "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"}, - "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"}, - "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"}, -} - -class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - parsed_url, # type: Any - service, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) - self._hosts = kwargs.get("_hosts") - self.scheme = parsed_url.scheme - - if service not in ["blob", "queue", "file-share", "dfs"]: - raise ValueError("Invalid service: {}".format(service)) - service_name = service.split('-')[0] - account = parsed_url.netloc.split(".{}.core.".format(service_name)) - - self.account_name = account[0] if len(account) > 1 else None - if not self.account_name and parsed_url.netloc.startswith("localhost") \ - or parsed_url.netloc.startswith("127.0.0.1"): - self.account_name = parsed_url.path.strip("/") - - self.credential = _format_shared_key_credential(self.account_name, credential) - if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): - raise ValueError("Token credential is only supported with HTTPS.") - - secondary_hostname = None - if hasattr(self.credential, "account_name"): - self.account_name = self.credential.account_name - secondary_hostname = "{}-secondary.{}.{}".format( - self.credential.account_name, service_name, SERVICE_HOST_BASE) - - if not self._hosts: - if len(account) > 1: - secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") - if kwargs.get("secondary_hostname"): - secondary_hostname = kwargs["secondary_hostname"] - primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') - self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} - - self.require_encryption = kwargs.get("require_encryption", False) - self.key_encryption_key = kwargs.get("key_encryption_key") - self.key_resolver_function = kwargs.get("key_resolver_function") - self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) - - def __enter__(self): - self._client.__enter__() - return self - - def __exit__(self, *args): - self._client.__exit__(*args) - - def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._client.close() - - @property - def url(self): - """The full endpoint URL to this entity, including SAS token if used. - - This could be either the primary endpoint, - or the secondary endpoint depending on the current :func:`location_mode`. - """ - return self._format_url(self._hosts[self._location_mode]) - - @property - def primary_endpoint(self): - """The full primary endpoint URL. - - :type: str - """ - return self._format_url(self._hosts[LocationMode.PRIMARY]) - - @property - def primary_hostname(self): - """The hostname of the primary endpoint. - - :type: str - """ - return self._hosts[LocationMode.PRIMARY] - - @property - def secondary_endpoint(self): - """The full secondary endpoint URL if configured. - - If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str - :raise ValueError: - """ - if not self._hosts[LocationMode.SECONDARY]: - raise ValueError("No secondary host configured.") - return self._format_url(self._hosts[LocationMode.SECONDARY]) - - @property - def secondary_hostname(self): - """The hostname of the secondary endpoint. - - If not available this will be None. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str or None - """ - return self._hosts[LocationMode.SECONDARY] - - @property - def location_mode(self): - """The location mode that the client is currently using. - - By default this will be "primary". Options include "primary" and "secondary". - - :type: str - """ - - return self._location_mode - - @location_mode.setter - def location_mode(self, value): - if self._hosts.get(value): - self._location_mode = value - self._client._config.url = self.url # pylint: disable=protected-access - else: - raise ValueError("No host URL for location mode: {}".format(value)) - - @property - def api_version(self): - """The version of the Storage API used for requests. - - :type: str - """ - return self._client._config.version # pylint: disable=protected-access - - def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): - query_str = "?" - if snapshot: - query_str += "snapshot={}&".format(self.snapshot) - if share_snapshot: - query_str += "sharesnapshot={}&".format(self.snapshot) - if sas_token and isinstance(credential, AzureSasCredential): - raise ValueError( - "You cannot use AzureSasCredential when the resource URI also contains a Shared Access Signature.") - if sas_token and not credential: - query_str += sas_token - elif is_credential_sastoken(credential): - query_str += credential.lstrip("?") - credential = None - return query_str.rstrip("?&"), credential - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, "get_token"): - self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif isinstance(credential, AzureSasCredential): - self._credential_policy = AzureSasCredentialPolicy(credential) - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - - config = kwargs.get("_configuration") or create_configuration(**kwargs) - if kwargs.get("_pipeline"): - return config, kwargs["_pipeline"] - config.transport = kwargs.get("transport") # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - config.transport = RequestsTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - ContentDecodePolicy(response_encoding="utf-8"), - RedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), - config.retry_policy, - config.headers_policy, - StorageRequestHook(**kwargs), - self._credential_policy, - config.logging_policy, - StorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs) - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, Pipeline(config.transport, policies=policies) - - def _batch_send( - self, - *reqs, # type: HttpRequest - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - batch_id = str(uuid.uuid1()) - - request = self._client._client.post( # pylint: disable=protected-access - url='{}://{}/{}?{}comp=batch{}{}'.format( - self.scheme, - self.primary_hostname, - kwargs.pop('path', ""), - kwargs.pop('restype', ""), - kwargs.pop('sas', ""), - kwargs.pop('timeout', "") - ), - headers={ - 'x-ms-version': self.api_version, - "Content-Type": "multipart/mixed; boundary=" + _get_batch_request_delimiter(batch_id, False, False) - } - ) - - policies = [StorageHeadersPolicy()] - if self._credential_policy: - policies.append(self._credential_policy) - - request.set_multipart_mixed( - *reqs, - policies=policies, - enforce_https=False - ) - - Pipeline._prepare_multipart_mixed_request(request) # pylint: disable=protected-access - body = serialize_batch_body(request.multipart_mixed_info[0], batch_id) - request.set_bytes_body(body) - - temp = request.multipart_mixed_info - request.multipart_mixed_info = None - pipeline_response = self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - request.multipart_mixed_info = temp - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() - if raise_on_any_failure: - parts = list(response.parts()) - if any(p for p in parts if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts - ) - raise error - return iter(parts) - return parts - except HttpResponseError as error: - process_storage_error(error) - -class TransportWrapper(HttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, transport): - self._transport = transport - - def send(self, request, **kwargs): - return self._transport.send(request, **kwargs) - - def open(self): - pass - - def close(self): - pass - - def __enter__(self): - pass - - def __exit__(self, *args): # pylint: disable=arguments-differ - pass - - -def _format_shared_key_credential(account_name, credential): - if isinstance(credential, six.string_types): - if not account_name: - raise ValueError("Unable to determine account name for shared key credential.") - credential = {"account_name": account_name, "account_key": credential} - if isinstance(credential, dict): - if "account_name" not in credential: - raise ValueError("Shared key credential missing 'account_name") - if "account_key" not in credential: - raise ValueError("Shared key credential missing 'account_key") - return SharedKeyCredentialPolicy(**credential) - return credential - - -def parse_connection_str(conn_str, credential, service): - conn_str = conn_str.rstrip(";") - conn_settings = [s.split("=", 1) for s in conn_str.split(";")] - if any(len(tup) != 2 for tup in conn_settings): - raise ValueError("Connection string is either blank or malformed.") - conn_settings = dict((key.upper(), val) for key, val in conn_settings) - endpoints = _SERVICE_PARAMS[service] - primary = None - secondary = None - if not credential: - try: - credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]} - except KeyError: - credential = conn_settings.get("SHAREDACCESSSIGNATURE") - if endpoints["primary"] in conn_settings: - primary = conn_settings[endpoints["primary"]] - if endpoints["secondary"] in conn_settings: - secondary = conn_settings[endpoints["secondary"]] - else: - if endpoints["secondary"] in conn_settings: - raise ValueError("Connection string specifies only secondary endpoint.") - try: - primary = "{}://{}.{}.{}".format( - conn_settings["DEFAULTENDPOINTSPROTOCOL"], - conn_settings["ACCOUNTNAME"], - service, - conn_settings["ENDPOINTSUFFIX"], - ) - secondary = "{}-secondary.{}.{}".format( - conn_settings["ACCOUNTNAME"], service, conn_settings["ENDPOINTSUFFIX"] - ) - except KeyError: - pass - - if not primary: - try: - primary = "https://{}.{}.{}".format( - conn_settings["ACCOUNTNAME"], service, conn_settings.get("ENDPOINTSUFFIX", SERVICE_HOST_BASE) - ) - except KeyError: - raise ValueError("Connection string missing required connection details.") - return primary, secondary, credential - - -def create_configuration(**kwargs): - # type: (**Any) -> Configuration - config = Configuration(**kwargs) - config.headers_policy = StorageHeadersPolicy(**kwargs) - config.user_agent_policy = UserAgentPolicy( - sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) - config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) - config.logging_policy = StorageLoggingPolicy(**kwargs) - config.proxy_policy = ProxyPolicy(**kwargs) - - # Storage settings - config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) - config.copy_polling_interval = 15 - - # Block blob uploads - config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) - config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) - config.use_byte_buffer = kwargs.get("use_byte_buffer", False) - - # Page blob uploads - config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) - - # Datalake file uploads - config.min_large_chunk_upload_threshold = kwargs.get("min_large_chunk_upload_threshold", 100 * 1024 * 1024 + 1) - - # Blob downloads - config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) - config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) - - # File uploads - config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) - return config - - -def parse_query(query_str): - sas_values = QueryStringConstants.to_list() - parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} - sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values] - sas_token = None - if sas_params: - sas_token = "&".join(sas_params) - - snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") - return snapshot, sas_token - - -def is_credential_sastoken(credential): - if not credential or not isinstance(credential, six.string_types): - return False - - sas_values = QueryStringConstants.to_list() - parsed_query = parse_qs(credential.lstrip("?")) - if parsed_query and all([k in sas_values for k in parsed_query.keys()]): - return True - return False diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/base_client_async.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/base_client_async.py deleted file mode 100644 index 091c350..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/base_client_async.py +++ /dev/null @@ -1,183 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging - -from azure.core.credentials import AzureSasCredential -from azure.core.pipeline import AsyncPipeline -from azure.core.async_paging import AsyncList -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline.policies import ( - ContentDecodePolicy, - AsyncBearerTokenCredentialPolicy, - AsyncRedirectPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, - AzureSasCredentialPolicy, -) -from azure.core.pipeline.transport import AsyncHttpTransport - -from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .authentication import SharedKeyCredentialPolicy -from .base_client import create_configuration -from .policies import ( - StorageContentValidation, - StorageRequestHook, - StorageHosts, - StorageHeadersPolicy, - QueueMessagePolicy -) -from .policies_async import AsyncStorageResponseHook - -from .response_handlers import process_storage_error, PartialBatchErrorException - -if TYPE_CHECKING: - from azure.core.pipeline import Pipeline - from azure.core.pipeline.transport import HttpRequest - from azure.core.configuration import Configuration -_LOGGER = logging.getLogger(__name__) - - -class AsyncStorageAccountHostsMixin(object): - - def __enter__(self): - raise TypeError("Async client only supports 'async with'.") - - def __exit__(self, *args): - pass - - async def __aenter__(self): - await self._client.__aenter__() - return self - - async def __aexit__(self, *args): - await self._client.__aexit__(*args) - - async def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._client.close() - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, 'get_token'): - self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif isinstance(credential, AzureSasCredential): - self._credential_policy = AzureSasCredentialPolicy(credential) - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - config = kwargs.get('_configuration') or create_configuration(**kwargs) - if kwargs.get('_pipeline'): - return config, kwargs['_pipeline'] - config.transport = kwargs.get('transport') # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - try: - from azure.core.pipeline.transport import AioHttpTransport - except ImportError: - raise ImportError("Unable to create async transport. Please check aiohttp is installed.") - config.transport = AioHttpTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.headers_policy, - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - StorageRequestHook(**kwargs), - self._credential_policy, - ContentDecodePolicy(response_encoding="utf-8"), - AsyncRedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), # type: ignore - config.retry_policy, - config.logging_policy, - AsyncStorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs), - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, AsyncPipeline(config.transport, policies=policies) - - async def _batch_send( - self, *reqs: 'HttpRequest', - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - request = self._client._client.post( # pylint: disable=protected-access - url='https://{}/?comp=batch'.format(self.primary_hostname), - headers={ - 'x-ms-version': self.api_version - } - ) - - request.set_multipart_mixed( - *reqs, - policies=[ - StorageHeadersPolicy(), - self._credential_policy - ], - enforce_https=False - ) - - pipeline_response = await self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() # Return an AsyncIterator - if raise_on_any_failure: - parts_list = [] - async for part in parts: - parts_list.append(part) - if any(p for p in parts_list if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts_list - ) - raise error - return AsyncList(parts_list) - return parts - except HttpResponseError as error: - process_storage_error(error) - - -class AsyncTransportWrapper(AsyncHttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, async_transport): - self._transport = async_transport - - async def send(self, request, **kwargs): - return await self._transport.send(request, **kwargs) - - async def open(self): - pass - - async def close(self): - pass - - async def __aenter__(self): - pass - - async def __aexit__(self, *args): # pylint: disable=arguments-differ - pass diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/constants.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/constants.py deleted file mode 100644 index 66f9a47..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/constants.py +++ /dev/null @@ -1,26 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -from .._generated import AzureFileStorage - - -X_MS_VERSION = AzureFileStorage(url="get_api_version")._config.version # pylint: disable=protected-access - -# Socket timeout in seconds -CONNECTION_TIMEOUT = 20 -READ_TIMEOUT = 20 - -# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned) -# The socket timeout is now the maximum total duration to send all data. -if sys.version_info >= (3, 5): - # the timeout to connect is 20 seconds, and the read timeout is 2000 seconds - # the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) - READ_TIMEOUT = 2000 - -STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" - -SERVICE_HOST_BASE = 'core.windows.net' diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/encryption.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/encryption.py deleted file mode 100644 index 62607cc..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/encryption.py +++ /dev/null @@ -1,542 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os -from os import urandom -from json import ( - dumps, - loads, -) -from collections import OrderedDict - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.ciphers import Cipher -from cryptography.hazmat.primitives.ciphers.algorithms import AES -from cryptography.hazmat.primitives.ciphers.modes import CBC -from cryptography.hazmat.primitives.padding import PKCS7 - -from azure.core.exceptions import HttpResponseError - -from .._version import VERSION -from . import encode_base64, decode_base64_to_bytes - - -_ENCRYPTION_PROTOCOL_V1 = '1.0' -_ERROR_OBJECT_INVALID = \ - '{0} does not define a complete interface. Value of {1} is either missing or invalid.' - - -def _validate_not_none(param_name, param): - if param is None: - raise ValueError('{0} should not be None.'.format(param_name)) - - -def _validate_key_encryption_key_wrap(kek): - # Note that None is not callable and so will fail the second clause of each check. - if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) - if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) - - -class _EncryptionAlgorithm(object): - ''' - Specifies which client encryption algorithm is used. - ''' - AES_CBC_256 = 'AES_CBC_256' - - -class _WrappedContentKey: - ''' - Represents the envelope key details stored on the service. - ''' - - def __init__(self, algorithm, encrypted_key, key_id): - ''' - :param str algorithm: - The algorithm used for wrapping. - :param bytes encrypted_key: - The encrypted content-encryption-key. - :param str key_id: - The key-encryption-key identifier string. - ''' - - _validate_not_none('algorithm', algorithm) - _validate_not_none('encrypted_key', encrypted_key) - _validate_not_none('key_id', key_id) - - self.algorithm = algorithm - self.encrypted_key = encrypted_key - self.key_id = key_id - - -class _EncryptionAgent: - ''' - Represents the encryption agent stored on the service. - It consists of the encryption protocol version and encryption algorithm used. - ''' - - def __init__(self, encryption_algorithm, protocol): - ''' - :param _EncryptionAlgorithm encryption_algorithm: - The algorithm used for encrypting the message contents. - :param str protocol: - The protocol version used for encryption. - ''' - - _validate_not_none('encryption_algorithm', encryption_algorithm) - _validate_not_none('protocol', protocol) - - self.encryption_algorithm = str(encryption_algorithm) - self.protocol = protocol - - -class _EncryptionData: - ''' - Represents the encryption data that is stored on the service. - ''' - - def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, - key_wrapping_metadata): - ''' - :param bytes content_encryption_IV: - The content encryption initialization vector. - :param _EncryptionAgent encryption_agent: - The encryption agent. - :param _WrappedContentKey wrapped_content_key: - An object that stores the wrapping algorithm, the key identifier, - and the encrypted key bytes. - :param dict key_wrapping_metadata: - A dict containing metadata related to the key wrapping. - ''' - - _validate_not_none('content_encryption_IV', content_encryption_IV) - _validate_not_none('encryption_agent', encryption_agent) - _validate_not_none('wrapped_content_key', wrapped_content_key) - - self.content_encryption_IV = content_encryption_IV - self.encryption_agent = encryption_agent - self.wrapped_content_key = wrapped_content_key - self.key_wrapping_metadata = key_wrapping_metadata - - -def _generate_encryption_data_dict(kek, cek, iv): - ''' - Generates and returns the encryption metadata as a dict. - - :param object kek: The key encryption key. See calling functions for more information. - :param bytes cek: The content encryption key. - :param bytes iv: The initialization vector. - :return: A dict containing all the encryption metadata. - :rtype: dict - ''' - # Encrypt the cek. - wrapped_cek = kek.wrap_key(cek) - - # Build the encryption_data dict. - # Use OrderedDict to comply with Java's ordering requirement. - wrapped_content_key = OrderedDict() - wrapped_content_key['KeyId'] = kek.get_kid() - wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek) - wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() - - encryption_agent = OrderedDict() - encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 - encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 - - encryption_data_dict = OrderedDict() - encryption_data_dict['WrappedContentKey'] = wrapped_content_key - encryption_data_dict['EncryptionAgent'] = encryption_agent - encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv) - encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION} - - return encryption_data_dict - - -def _dict_to_encryption_data(encryption_data_dict): - ''' - Converts the specified dictionary to an EncryptionData object for - eventual use in decryption. - - :param dict encryption_data_dict: - The dictionary containing the encryption data. - :return: an _EncryptionData object built from the dictionary. - :rtype: _EncryptionData - ''' - try: - if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: - raise ValueError("Unsupported encryption version.") - except KeyError: - raise ValueError("Unsupported encryption version.") - wrapped_content_key = encryption_data_dict['WrappedContentKey'] - wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], - decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), - wrapped_content_key['KeyId']) - - encryption_agent = encryption_data_dict['EncryptionAgent'] - encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], - encryption_agent['Protocol']) - - if 'KeyWrappingMetadata' in encryption_data_dict: - key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] - else: - key_wrapping_metadata = None - - encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), - encryption_agent, - wrapped_content_key, - key_wrapping_metadata) - - return encryption_data - - -def _generate_AES_CBC_cipher(cek, iv): - ''' - Generates and returns an encryption cipher for AES CBC using the given cek and iv. - - :param bytes[] cek: The content encryption key for the cipher. - :param bytes[] iv: The initialization vector for the cipher. - :return: A cipher for encrypting in AES256 CBC. - :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher - ''' - - backend = default_backend() - algorithm = AES(cek) - mode = CBC(iv) - return Cipher(algorithm, mode, backend) - - -def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): - ''' - Extracts and returns the content_encryption_key stored in the encryption_data object - and performs necessary validation on all parameters. - :param _EncryptionData encryption_data: - The encryption metadata of the retrieved value. - :param obj key_encryption_key: - The key_encryption_key used to unwrap the cek. Please refer to high-level service object - instance variables for more details. - :param func key_resolver: - A function used that, given a key_id, will return a key_encryption_key. Please refer - to high-level service object instance variables for more details. - :return: the content_encryption_key stored in the encryption_data object. - :rtype: bytes[] - ''' - - _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) - _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) - - if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol: - raise ValueError('Encryption version is not supported.') - - content_encryption_key = None - - # If the resolver exists, give priority to the key it finds. - if key_resolver is not None: - key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) - - _validate_not_none('key_encryption_key', key_encryption_key) - if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) - if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid(): - raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.') - # Will throw an exception if the specified algorithm is not supported. - content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, - encryption_data.wrapped_content_key.algorithm) - _validate_not_none('content_encryption_key', content_encryption_key) - - return content_encryption_key - - -def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None): - ''' - Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. - Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). - Returns the original plaintex. - - :param str message: - The ciphertext to be decrypted. - :param _EncryptionData encryption_data: - The metadata associated with this ciphertext. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted plaintext. - :rtype: str - ''' - _validate_not_none('message', message) - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) - - if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm: - raise ValueError('Specified encryption algorithm is not supported.') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) - - # decrypt data - decrypted_data = message - decryptor = cipher.decryptor() - decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) - - # unpad data - unpadder = PKCS7(128).unpadder() - decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) - - return decrypted_data - - -def encrypt_blob(blob, key_encryption_key): - ''' - Encrypts the given blob using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encryption metadata. This method should - only be used when a blob is small enough for single shot upload. Encrypting larger blobs - is done as a part of the upload_data_chunks method. - - :param bytes blob: - The blob to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. - :rtype: (str, bytes) - ''' - - _validate_not_none('blob', blob) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(blob) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - - return dumps(encryption_data), encrypted_data - - -def generate_blob_encryption_data(key_encryption_key): - ''' - Generates the encryption_metadata for the blob. - - :param bytes key_encryption_key: - The key-encryption-key used to wrap the cek associate with this blob. - :return: A tuple containing the cek and iv for this blob as well as the - serialized encryption metadata for the blob. - :rtype: (bytes, bytes, str) - ''' - encryption_data = None - content_encryption_key = None - initialization_vector = None - if key_encryption_key: - _validate_key_encryption_key_wrap(key_encryption_key) - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - encryption_data = _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - encryption_data = dumps(encryption_data) - - return content_encryption_key, initialization_vector, encryption_data - - -def decrypt_blob(require_encryption, key_encryption_key, key_resolver, - content, start_offset, end_offset, response_headers): - ''' - Decrypts the given blob contents and returns only the requested range. - - :param bool require_encryption: - Whether or not the calling blob service requires objects to be decrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :param key_resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted blob content. - :rtype: bytes - ''' - try: - encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata'])) - except: # pylint: disable=bare-except - if require_encryption: - raise ValueError( - 'Encryption required, but received data does not contain appropriate metatadata.' + \ - 'Data was either not encrypted or metadata has been lost.') - - return content - - if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256: - raise ValueError('Specified encryption algorithm is not supported.') - - blob_type = response_headers['x-ms-blob-type'] - - iv = None - unpad = False - if 'content-range' in response_headers: - content_range = response_headers['content-range'] - # Format: 'bytes x-y/size' - - # Ignore the word 'bytes' - content_range = content_range.split(' ') - - content_range = content_range[1].split('-') - content_range = content_range[1].split('/') - end_range = int(content_range[0]) - blob_size = int(content_range[1]) - - if start_offset >= 16: - iv = content[:16] - content = content[16:] - start_offset -= 16 - else: - iv = encryption_data.content_encryption_IV - - if end_range == blob_size - 1: - unpad = True - else: - unpad = True - iv = encryption_data.content_encryption_IV - - if blob_type == 'PageBlob': - unpad = False - - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) - cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) - decryptor = cipher.decryptor() - - content = decryptor.update(content) + decryptor.finalize() - if unpad: - unpadder = PKCS7(128).unpadder() - content = unpadder.update(content) + unpadder.finalize() - - return content[start_offset: len(content) - end_offset] - - -def get_blob_encryptor_and_padder(cek, iv, should_pad): - encryptor = None - padder = None - - if cek is not None and iv is not None: - cipher = _generate_AES_CBC_cipher(cek, iv) - encryptor = cipher.encryptor() - padder = PKCS7(128).padder() if should_pad else None - - return encryptor, padder - - -def encrypt_queue_message(message, key_encryption_key): - ''' - Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encrypted message and the encryption metadata. - - :param object message: - The plain text messge to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A json-formatted string containing the encrypted message and the encryption metadata. - :rtype: str - ''' - - _validate_not_none('message', message) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = os.urandom(32) - initialization_vector = os.urandom(16) - - # Queue encoding functions all return unicode strings, and encryption should - # operate on binary strings. - message = message.encode('utf-8') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(message) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - - # Build the dictionary structure. - queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data), - 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector)} - - return dumps(queue_message) - - -def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver): - ''' - Returns the decrypted message contents from an EncryptedQueueMessage. - If no encryption metadata is present, will return the unaltered message. - :param str message: - The JSON formatted QueueEncryptedMessage contents with all associated metadata. - :param bool require_encryption: - If set, will enforce that the retrieved messages are encrypted and decrypt them. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The plain text message from the queue message. - :rtype: str - ''' - - try: - message = loads(message) - - encryption_data = _dict_to_encryption_data(message['EncryptionData']) - decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents']) - except (KeyError, ValueError): - # Message was not json formatted and so was not encrypted - # or the user provided a json formatted message. - if require_encryption: - raise ValueError('Message was not encrypted.') - - return message - try: - return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=response, - error=error) diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/models.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/models.py deleted file mode 100644 index 27cd236..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/models.py +++ /dev/null @@ -1,468 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-instance-attributes - -from enum import Enum - - -def get_enum_value(value): - if value is None or value in ["None", ""]: - return None - try: - return value.value - except AttributeError: - return value - - -class StorageErrorCode(str, Enum): - - # Generic storage values - account_already_exists = "AccountAlreadyExists" - account_being_created = "AccountBeingCreated" - account_is_disabled = "AccountIsDisabled" - authentication_failed = "AuthenticationFailed" - authorization_failure = "AuthorizationFailure" - no_authentication_information = "NoAuthenticationInformation" - condition_headers_not_supported = "ConditionHeadersNotSupported" - condition_not_met = "ConditionNotMet" - empty_metadata_key = "EmptyMetadataKey" - insufficient_account_permissions = "InsufficientAccountPermissions" - internal_error = "InternalError" - invalid_authentication_info = "InvalidAuthenticationInfo" - invalid_header_value = "InvalidHeaderValue" - invalid_http_verb = "InvalidHttpVerb" - invalid_input = "InvalidInput" - invalid_md5 = "InvalidMd5" - invalid_metadata = "InvalidMetadata" - invalid_query_parameter_value = "InvalidQueryParameterValue" - invalid_range = "InvalidRange" - invalid_resource_name = "InvalidResourceName" - invalid_uri = "InvalidUri" - invalid_xml_document = "InvalidXmlDocument" - invalid_xml_node_value = "InvalidXmlNodeValue" - md5_mismatch = "Md5Mismatch" - metadata_too_large = "MetadataTooLarge" - missing_content_length_header = "MissingContentLengthHeader" - missing_required_query_parameter = "MissingRequiredQueryParameter" - missing_required_header = "MissingRequiredHeader" - missing_required_xml_node = "MissingRequiredXmlNode" - multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" - operation_timed_out = "OperationTimedOut" - out_of_range_input = "OutOfRangeInput" - out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" - request_body_too_large = "RequestBodyTooLarge" - resource_type_mismatch = "ResourceTypeMismatch" - request_url_failed_to_parse = "RequestUrlFailedToParse" - resource_already_exists = "ResourceAlreadyExists" - resource_not_found = "ResourceNotFound" - server_busy = "ServerBusy" - unsupported_header = "UnsupportedHeader" - unsupported_xml_node = "UnsupportedXmlNode" - unsupported_query_parameter = "UnsupportedQueryParameter" - unsupported_http_verb = "UnsupportedHttpVerb" - - # Blob values - append_position_condition_not_met = "AppendPositionConditionNotMet" - blob_already_exists = "BlobAlreadyExists" - blob_not_found = "BlobNotFound" - blob_overwritten = "BlobOverwritten" - blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" - block_count_exceeds_limit = "BlockCountExceedsLimit" - block_list_too_long = "BlockListTooLong" - cannot_change_to_lower_tier = "CannotChangeToLowerTier" - cannot_verify_copy_source = "CannotVerifyCopySource" - container_already_exists = "ContainerAlreadyExists" - container_being_deleted = "ContainerBeingDeleted" - container_disabled = "ContainerDisabled" - container_not_found = "ContainerNotFound" - content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" - copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" - copy_id_mismatch = "CopyIdMismatch" - feature_version_mismatch = "FeatureVersionMismatch" - incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" - incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" - incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" - infinite_lease_duration_required = "InfiniteLeaseDurationRequired" - invalid_blob_or_block = "InvalidBlobOrBlock" - invalid_blob_tier = "InvalidBlobTier" - invalid_blob_type = "InvalidBlobType" - invalid_block_id = "InvalidBlockId" - invalid_block_list = "InvalidBlockList" - invalid_operation = "InvalidOperation" - invalid_page_range = "InvalidPageRange" - invalid_source_blob_type = "InvalidSourceBlobType" - invalid_source_blob_url = "InvalidSourceBlobUrl" - invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" - lease_already_present = "LeaseAlreadyPresent" - lease_already_broken = "LeaseAlreadyBroken" - lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" - lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" - lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" - lease_id_missing = "LeaseIdMissing" - lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" - lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" - lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" - lease_lost = "LeaseLost" - lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" - lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" - lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" - max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" - no_pending_copy_operation = "NoPendingCopyOperation" - operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" - pending_copy_operation = "PendingCopyOperation" - previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" - previous_snapshot_not_found = "PreviousSnapshotNotFound" - previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" - sequence_number_condition_not_met = "SequenceNumberConditionNotMet" - sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" - snapshot_count_exceeded = "SnapshotCountExceeded" - snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" - snapshots_present = "SnapshotsPresent" - source_condition_not_met = "SourceConditionNotMet" - system_in_use = "SystemInUse" - target_condition_not_met = "TargetConditionNotMet" - unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" - blob_being_rehydrated = "BlobBeingRehydrated" - blob_archived = "BlobArchived" - blob_not_archived = "BlobNotArchived" - - # Queue values - invalid_marker = "InvalidMarker" - message_not_found = "MessageNotFound" - message_too_large = "MessageTooLarge" - pop_receipt_mismatch = "PopReceiptMismatch" - queue_already_exists = "QueueAlreadyExists" - queue_being_deleted = "QueueBeingDeleted" - queue_disabled = "QueueDisabled" - queue_not_empty = "QueueNotEmpty" - queue_not_found = "QueueNotFound" - - # File values - cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" - client_cache_flush_delay = "ClientCacheFlushDelay" - delete_pending = "DeletePending" - directory_not_empty = "DirectoryNotEmpty" - file_lock_conflict = "FileLockConflict" - invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" - parent_not_found = "ParentNotFound" - read_only_attribute = "ReadOnlyAttribute" - share_already_exists = "ShareAlreadyExists" - share_being_deleted = "ShareBeingDeleted" - share_disabled = "ShareDisabled" - share_not_found = "ShareNotFound" - sharing_violation = "SharingViolation" - share_snapshot_in_progress = "ShareSnapshotInProgress" - share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" - share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" - share_has_snapshots = "ShareHasSnapshots" - container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" - - # DataLake values - content_length_must_be_zero = 'ContentLengthMustBeZero' - path_already_exists = 'PathAlreadyExists' - invalid_flush_position = 'InvalidFlushPosition' - invalid_property_name = 'InvalidPropertyName' - invalid_source_uri = 'InvalidSourceUri' - unsupported_rest_version = 'UnsupportedRestVersion' - file_system_not_found = 'FilesystemNotFound' - path_not_found = 'PathNotFound' - rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound' - source_path_not_found = 'SourcePathNotFound' - destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted' - file_system_already_exists = 'FilesystemAlreadyExists' - file_system_being_deleted = 'FilesystemBeingDeleted' - invalid_destination_path = 'InvalidDestinationPath' - invalid_rename_source_path = 'InvalidRenameSourcePath' - invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType' - lease_is_already_broken = 'LeaseIsAlreadyBroken' - lease_name_mismatch = 'LeaseNameMismatch' - path_conflict = 'PathConflict' - source_path_is_being_deleted = 'SourcePathIsBeingDeleted' - - -class DictMixin(object): - - def __setitem__(self, key, item): - self.__dict__[key] = item - - def __getitem__(self, key): - return self.__dict__[key] - - def __repr__(self): - return str(self) - - def __len__(self): - return len(self.keys()) - - def __delitem__(self, key): - self.__dict__[key] = None - - def __eq__(self, other): - """Compare objects by comparing all attributes.""" - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - def __ne__(self, other): - """Compare objects by comparing all attributes.""" - return not self.__eq__(other) - - def __str__(self): - return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) - - def has_key(self, k): - return k in self.__dict__ - - def update(self, *args, **kwargs): - return self.__dict__.update(*args, **kwargs) - - def keys(self): - return [k for k in self.__dict__ if not k.startswith('_')] - - def values(self): - return [v for k, v in self.__dict__.items() if not k.startswith('_')] - - def items(self): - return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] - - def get(self, key, default=None): - if key in self.__dict__: - return self.__dict__[key] - return default - - -class LocationMode(object): - """ - Specifies the location the request should be sent to. This mode only applies - for RA-GRS accounts which allow secondary read access. All other account types - must use PRIMARY. - """ - - PRIMARY = 'primary' #: Requests should be sent to the primary location. - SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. - - -class ResourceTypes(object): - """ - Specifies the resource types that are accessible with the account SAS. - - :param bool service: - Access to service-level APIs (e.g., Get/Set Service Properties, - Get Service Stats, List Containers/Queues/Shares) - :param bool container: - Access to container-level APIs (e.g., Create/Delete Container, - Create/Delete Queue, Create/Delete Share, - List Blobs/Files and Directories) - :param bool object: - Access to object-level APIs for blobs, queue messages, and - files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) - """ - - def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin - self.service = service - self.container = container - self.object = object - self._str = (('s' if self.service else '') + - ('c' if self.container else '') + - ('o' if self.object else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create a ResourceTypes from a string. - - To specify service, container, or object you need only to - include the first letter of the word in the string. E.g. service and container, - you would provide a string "sc". - - :param str string: Specify service, container, or object in - in the string with the first letter of the word. - :return: A ResourceTypes object - :rtype: ~azure.storage.fileshare.ResourceTypes - """ - res_service = 's' in string - res_container = 'c' in string - res_object = 'o' in string - - parsed = cls(res_service, res_container, res_object) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class AccountSasPermissions(object): - """ - :class:`~ResourceTypes` class to be used with generate_account_sas - function and for the AccessPolicies used with set_*_acl. There are two types of - SAS which may be used to grant resource access. One is to grant access to a - specific resource (resource-specific). Another is to grant access to the - entire service for a specific account and allow certain operations based on - perms found here. - - :param bool read: - Valid for all signed resources types (Service, Container, and Object). - Permits read permissions to the specified resource type. - :param bool write: - Valid for all signed resources types (Service, Container, and Object). - Permits write permissions to the specified resource type. - :param bool delete: - Valid for Container and Object resource types, except for queue messages. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool list: - Valid for Service and Container resource types only. - :param bool add: - Valid for the following Object resource types only: queue messages, and append blobs. - :param bool create: - Valid for the following Object resource types only: blobs and files. - Users can create new blobs or files, but may not overwrite existing - blobs or files. - :param bool update: - Valid for the following Object resource types only: queue messages. - :param bool process: - Valid for the following Object resource type only: queue messages. - :keyword bool tag: - To enable set or get tags on the blobs in the container. - :keyword bool filter_by_tags: - To enable get blobs by tags, this should be used together with list permission. - """ - def __init__(self, read=False, write=False, delete=False, - list=False, # pylint: disable=redefined-builtin - add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): - self.read = read - self.write = write - self.delete = delete - self.delete_previous_version = delete_previous_version - self.list = list - self.add = add - self.create = create - self.update = update - self.process = process - self.tag = kwargs.pop('tag', False) - self.filter_by_tags = kwargs.pop('filter_by_tags', False) - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('l' if self.list else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('u' if self.update else '') + - ('p' if self.process else '') + - ('f' if self.filter_by_tags else '') + - ('t' if self.tag else '') - ) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create AccountSasPermissions from a string. - - To specify read, write, delete, etc. permissions you need only to - include the first letter of the word in the string. E.g. for read and write - permissions you would provide a string "rw". - - :param str permission: Specify permissions in - the string with the first letter of the word. - :return: An AccountSasPermissions object - :rtype: ~azure.storage.fileshare.AccountSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_delete_previous_version = 'x' in permission - p_list = 'l' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_update = 'u' in permission - p_process = 'p' in permission - p_tag = 't' in permission - p_filter_by_tags = 'f' in permission - parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, - list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, - filter_by_tags=p_filter_by_tags) - - return parsed - - -class Services(object): - """Specifies the services accessible with the account SAS. - - :param bool blob: - Access for the `~azure.storage.blob.BlobServiceClient` - :param bool queue: - Access for the `~azure.storage.queue.QueueServiceClient` - :param bool fileshare: - Access for the `~azure.storage.fileshare.ShareServiceClient` - """ - - def __init__(self, blob=False, queue=False, fileshare=False): - self.blob = blob - self.queue = queue - self.fileshare = fileshare - self._str = (('b' if self.blob else '') + - ('q' if self.queue else '') + - ('f' if self.fileshare else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create Services from a string. - - To specify blob, queue, or file you need only to - include the first letter of the word in the string. E.g. for blob and queue - you would provide a string "bq". - - :param str string: Specify blob, queue, or file in - in the string with the first letter of the word. - :return: A Services object - :rtype: ~azure.storage.fileshare.Services - """ - res_blob = 'b' in string - res_queue = 'q' in string - res_file = 'f' in string - - parsed = cls(res_blob, res_queue, res_file) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class UserDelegationKey(object): - """ - Represents a user delegation key, provided to the user by Azure Storage - based on their Azure Active Directory access token. - - The fields are saved as simple strings since the user does not have to interact with this object; - to generate an identify SAS, the user can simply pass it to the right API. - - :ivar str signed_oid: - Object ID of this token. - :ivar str signed_tid: - Tenant ID of the tenant that issued this token. - :ivar str signed_start: - The datetime this token becomes valid. - :ivar str signed_expiry: - The datetime this token expires. - :ivar str signed_service: - What service this key is valid for. - :ivar str signed_version: - The version identifier of the REST service that created this token. - :ivar str value: - The user delegation key. - """ - def __init__(self): - self.signed_oid = None - self.signed_tid = None - self.signed_start = None - self.signed_expiry = None - self.signed_service = None - self.signed_version = None - self.value = None diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/parser.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/parser.py deleted file mode 100644 index c6feba8..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/parser.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys - -if sys.version_info < (3,): - def _str(value): - if isinstance(value, unicode): # pylint: disable=undefined-variable - return value.encode('utf-8') - - return str(value) -else: - _str = str - - -def _to_utc_datetime(value): - return value.strftime('%Y-%m-%dT%H:%M:%SZ') diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/policies.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/policies.py deleted file mode 100644 index c9bc798..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/policies.py +++ /dev/null @@ -1,610 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import re -import random -from time import time -from io import SEEK_SET, UnsupportedOperation -import logging -import uuid -import types -from typing import Any, TYPE_CHECKING -from wsgiref.handlers import format_date_time -try: - from urllib.parse import ( - urlparse, - parse_qsl, - urlunparse, - urlencode, - ) -except ImportError: - from urllib import urlencode # type: ignore - from urlparse import ( # type: ignore - urlparse, - parse_qsl, - urlunparse, - ) - -from azure.core.pipeline.policies import ( - HeadersPolicy, - SansIOHTTPPolicy, - NetworkTraceLoggingPolicy, - HTTPPolicy, - RequestHistory -) -from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError - -from .models import LocationMode - -try: - _unicode_type = unicode # type: ignore -except NameError: - _unicode_type = str - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -def encode_base64(data): - if isinstance(data, _unicode_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def is_exhausted(settings): - """Are we out of retries?""" - retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) - retry_counts = list(filter(None, retry_counts)) - if not retry_counts: - return False - return min(retry_counts) < 0 - - -def retry_hook(settings, **kwargs): - if settings['hook']: - settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) - - -def is_retry(response, mode): - """Is this method/status code retryable? (Based on whitelists and control - variables such as the number of total retries to allow, whether to - respect the Retry-After header, whether this header is present, and - whether the returned status code is on the list of status codes to - be retried upon on the presence of the aforementioned header) - """ - status = response.http_response.status_code - if 300 <= status < 500: - # An exception occured, but in most cases it was expected. Examples could - # include a 309 Conflict or 412 Precondition Failed. - if status == 404 and mode == LocationMode.SECONDARY: - # Response code 404 should be retried if secondary was used. - return True - if status == 408: - # Response code 408 is a timeout and should be retried. - return True - return False - if status >= 500: - # Response codes above 500 with the exception of 501 Not Implemented and - # 505 Version Not Supported indicate a server issue and should be retried. - if status in [501, 505]: - return False - return True - return False - - -def urljoin(base_url, stub_url): - parsed = urlparse(base_url) - parsed = parsed._replace(path=parsed.path + '/' + stub_url) - return parsed.geturl() - - -class QueueMessagePolicy(SansIOHTTPPolicy): - - def on_request(self, request): - message_id = request.context.options.pop('queue_message_id', None) - if message_id: - request.http_request.url = urljoin( - request.http_request.url, - message_id) - - -class StorageHeadersPolicy(HeadersPolicy): - request_id_header_name = 'x-ms-client-request-id' - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - super(StorageHeadersPolicy, self).on_request(request) - current_time = format_date_time(time()) - request.http_request.headers['x-ms-date'] = current_time - - custom_id = request.context.options.pop('client_request_id', None) - request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) - - # def on_response(self, request, response): - # # raise exception if the echoed client request id from the service is not identical to the one we sent - # if self.request_id_header_name in response.http_response.headers: - - # client_request_id = request.http_request.headers.get(self.request_id_header_name) - - # if response.http_response.headers[self.request_id_header_name] != client_request_id: - # raise AzureError( - # "Echoed client request ID: {} does not match sent client request ID: {}. " - # "Service request ID: {}".format( - # response.http_response.headers[self.request_id_header_name], client_request_id, - # response.http_response.headers['x-ms-request-id']), - # response=response.http_response - # ) - - -class StorageHosts(SansIOHTTPPolicy): - - def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument - self.hosts = hosts - super(StorageHosts, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - request.context.options['hosts'] = self.hosts - parsed_url = urlparse(request.http_request.url) - - # Detect what location mode we're currently requesting with - location_mode = LocationMode.PRIMARY - for key, value in self.hosts.items(): - if parsed_url.netloc == value: - location_mode = key - - # See if a specific location mode has been specified, and if so, redirect - use_location = request.context.options.pop('use_location', None) - if use_location: - # Lock retries to the specific location - request.context.options['retry_to_secondary'] = False - if use_location not in self.hosts: - raise ValueError("Attempting to use undefined host location {}".format(use_location)) - if use_location != location_mode: - # Update request URL to use the specified location - updated = parsed_url._replace(netloc=self.hosts[use_location]) - request.http_request.url = updated.geturl() - location_mode = use_location - - request.context.options['location_mode'] = location_mode - - -class StorageLoggingPolicy(NetworkTraceLoggingPolicy): - """A policy that logs HTTP request and response to the DEBUG logger. - - This accepts both global configuration, and per-request level with "enable_http_logger" - """ - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - http_request = request.http_request - options = request.context.options - if options.pop("logging_enable", self.enable_http_logger): - request.context["logging_enable"] = True - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - log_url = http_request.url - query_params = http_request.query - if 'sig' in query_params: - log_url = log_url.replace(query_params['sig'], "sig=*****") - _LOGGER.debug("Request URL: %r", log_url) - _LOGGER.debug("Request method: %r", http_request.method) - _LOGGER.debug("Request headers:") - for header, value in http_request.headers.items(): - if header.lower() == 'authorization': - value = '*****' - elif header.lower() == 'x-ms-copy-source' and 'sig' in value: - # take the url apart and scrub away the signed signature - scheme, netloc, path, params, query, fragment = urlparse(value) - parsed_qs = dict(parse_qsl(query)) - parsed_qs['sig'] = '*****' - - # the SAS needs to be put back together - value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) - - _LOGGER.debug(" %r: %r", header, value) - _LOGGER.debug("Request body:") - - # We don't want to log the binary data of a file upload. - if isinstance(http_request.body, types.GeneratorType): - _LOGGER.debug("File upload") - else: - _LOGGER.debug(str(http_request.body)) - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log request: %r", err) - - def on_response(self, request, response): - # type: (PipelineRequest, PipelineResponse, Any) -> None - if response.context.pop("logging_enable", self.enable_http_logger): - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - _LOGGER.debug("Response status: %r", response.http_response.status_code) - _LOGGER.debug("Response headers:") - for res_header, value in response.http_response.headers.items(): - _LOGGER.debug(" %r: %r", res_header, value) - - # We don't want to log binary data if the response is a file. - _LOGGER.debug("Response content:") - pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) - header = response.http_response.headers.get('content-disposition') - - if header and pattern.match(header): - filename = header.partition('=')[2] - _LOGGER.debug("File attachments: %s", filename) - elif response.http_response.headers.get("content-type", "").endswith("octet-stream"): - _LOGGER.debug("Body contains binary data.") - elif response.http_response.headers.get("content-type", "").startswith("image"): - _LOGGER.debug("Body contains image data.") - else: - if response.context.options.get('stream', False): - _LOGGER.debug("Body is streamable") - else: - _LOGGER.debug(response.http_response.text()) - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log response: %s", repr(err)) - - -class StorageRequestHook(SansIOHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._request_callback = kwargs.get('raw_request_hook') - super(StorageRequestHook, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, **Any) -> PipelineResponse - request_callback = request.context.options.pop('raw_request_hook', self._request_callback) - if request_callback: - request_callback(request) - - -class StorageResponseHook(HTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(StorageResponseHook, self).__init__() - - def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = self.next.send(request) - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - response_callback(response) - request.context['response_callback'] = response_callback - return response - - -class StorageContentValidation(SansIOHTTPPolicy): - """A simple policy that sends the given headers - with the request. - - This will overwrite any headers already defined in the request. - """ - header_name = 'Content-MD5' - - def __init__(self, **kwargs): # pylint: disable=unused-argument - super(StorageContentValidation, self).__init__() - - @staticmethod - def get_content_md5(data): - md5 = hashlib.md5() # nosec - if isinstance(data, bytes): - md5.update(data) - elif hasattr(data, 'read'): - pos = 0 - try: - pos = data.tell() - except: # pylint: disable=bare-except - pass - for chunk in iter(lambda: data.read(4096), b""): - md5.update(chunk) - try: - data.seek(pos, SEEK_SET) - except (AttributeError, IOError): - raise ValueError("Data should be bytes or a seekable file-like object.") - else: - raise ValueError("Data should be bytes or a seekable file-like object.") - - return md5.digest() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - validate_content = request.context.options.pop('validate_content', False) - if validate_content and request.http_request.method != 'GET': - computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) - request.http_request.headers[self.header_name] = computed_md5 - request.context['validate_content_md5'] = computed_md5 - request.context['validate_content'] = validate_content - - def on_response(self, request, response): - if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): - computed_md5 = request.context.get('validate_content_md5') or \ - encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) - if response.http_response.headers['content-md5'] != computed_md5: - raise AzureError( - 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format( - response.http_response.headers['content-md5'], computed_md5), - response=response.http_response - ) - - -class StorageRetryPolicy(HTTPPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - def __init__(self, **kwargs): - self.total_retries = kwargs.pop('retry_total', 10) - self.connect_retries = kwargs.pop('retry_connect', 3) - self.read_retries = kwargs.pop('retry_read', 3) - self.status_retries = kwargs.pop('retry_status', 3) - self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) - super(StorageRetryPolicy, self).__init__() - - def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use - """ - A function which sets the next host location on the request, if applicable. - - :param ~azure.storage.models.RetryContext context: - The retry context containing the previous host location and the request - to evaluate and possibly modify. - """ - if settings['hosts'] and all(settings['hosts'].values()): - url = urlparse(request.url) - # If there's more than one possible location, retry to the alternative - if settings['mode'] == LocationMode.PRIMARY: - settings['mode'] = LocationMode.SECONDARY - else: - settings['mode'] = LocationMode.PRIMARY - updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) - request.url = updated.geturl() - - def configure_retries(self, request): # pylint: disable=no-self-use - body_position = None - if hasattr(request.http_request.body, 'read'): - try: - body_position = request.http_request.body.tell() - except (AttributeError, UnsupportedOperation): - # if body position cannot be obtained, then retries will not work - pass - options = request.context.options - return { - 'total': options.pop("retry_total", self.total_retries), - 'connect': options.pop("retry_connect", self.connect_retries), - 'read': options.pop("retry_read", self.read_retries), - 'status': options.pop("retry_status", self.status_retries), - 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), - 'mode': options.pop("location_mode", LocationMode.PRIMARY), - 'hosts': options.pop("hosts", None), - 'hook': options.pop("retry_hook", None), - 'body_position': body_position, - 'count': 0, - 'history': [] - } - - def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use - """ Formula for computing the current backoff. - Should be calculated by child class. - - :rtype: float - """ - return 0 - - def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - transport.sleep(backoff) - - def increment(self, settings, request, response=None, error=None): - """Increment the retry counters. - - :param response: A pipeline response object. - :param error: An error encountered during the request, or - None if the response was received successfully. - - :return: Whether the retry attempts are exhausted. - """ - settings['total'] -= 1 - - if error and isinstance(error, ServiceRequestError): - # Errors when we're fairly sure that the server did not receive the - # request, so it should be safe to retry. - settings['connect'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - elif error and isinstance(error, ServiceResponseError): - # Errors that occur after the request has been started, so we should - # assume that the server began processing it. - settings['read'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - else: - # Incrementing because of a server error like a 500 in - # status_forcelist and a the given method is in the whitelist - if response: - settings['status'] -= 1 - settings['history'].append(RequestHistory(request, http_response=response)) - - if not is_exhausted(settings): - if request.method not in ['PUT'] and settings['retry_secondary']: - self._set_next_host_location(settings, request) - - # rewind the request body if it is a stream - if request.body and hasattr(request.body, 'read'): - # no position was saved, then retry would not work - if settings['body_position'] is None: - return False - try: - # attempt to rewind the body to the initial position - request.body.seek(settings['body_position'], SEEK_SET) - except (UnsupportedOperation, ValueError): - # if body is not seekable, then retry would not work - return False - settings['count'] += 1 - return True - return False - - def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(StorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(StorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/policies_async.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/policies_async.py deleted file mode 100644 index e0926b8..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/policies_async.py +++ /dev/null @@ -1,220 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -import asyncio -import random -import logging -from typing import Any, TYPE_CHECKING - -from azure.core.pipeline.policies import AsyncHTTPPolicy -from azure.core.exceptions import AzureError - -from .policies import is_retry, StorageRetryPolicy - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -async def retry_hook(settings, **kwargs): - if settings['hook']: - if asyncio.iscoroutine(settings['hook']): - await settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - else: - settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - - -class AsyncStorageResponseHook(AsyncHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(AsyncStorageResponseHook, self).__init__() - - async def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = await self.next.send(request) - await response.http_response.load_body() - - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - if asyncio.iscoroutine(response_callback): - await response_callback(response) - else: - response_callback(response) - request.context['response_callback'] = response_callback - return response - -class AsyncStorageRetryPolicy(StorageRetryPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - async def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - await transport.sleep(backoff) - - async def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = await self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - await self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - await self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(AsyncStorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(AsyncStorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/request_handlers.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/request_handlers.py deleted file mode 100644 index 37354d7..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/request_handlers.py +++ /dev/null @@ -1,273 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) - -import logging -from os import fstat -from io import (SEEK_END, SEEK_SET, UnsupportedOperation) - -import isodate - -from azure.core.exceptions import raise_with_traceback - - -_LOGGER = logging.getLogger(__name__) - -_REQUEST_DELIMITER_PREFIX = "batch_" -_HTTP1_1_IDENTIFIER = "HTTP/1.1" -_HTTP_LINE_ENDING = "\r\n" - - -def serialize_iso(attr): - """Serialize Datetime object into ISO-8601 formatted string. - - :param Datetime attr: Object to be serialized. - :rtype: str - :raises: ValueError if format invalid. - """ - if not attr: - return None - if isinstance(attr, str): - attr = isodate.parse_datetime(attr) - try: - utc = attr.utctimetuple() - if utc.tm_year > 9999 or utc.tm_year < 1: - raise OverflowError("Hit max or min date") - - date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( - utc.tm_year, utc.tm_mon, utc.tm_mday, - utc.tm_hour, utc.tm_min, utc.tm_sec) - return date + 'Z' - except (ValueError, OverflowError) as err: - msg = "Unable to serialize datetime object." - raise_with_traceback(ValueError, msg, err) - except AttributeError as err: - msg = "ISO-8601 object must be valid Datetime object." - raise_with_traceback(TypeError, msg, err) - - -def get_length(data): - length = None - # Check if object implements the __len__ method, covers most input cases such as bytearray. - try: - length = len(data) - except: # pylint: disable=bare-except - pass - - if not length: - # Check if the stream is a file-like stream object. - # If so, calculate the size using the file descriptor. - try: - fileno = data.fileno() - except (AttributeError, UnsupportedOperation): - pass - else: - try: - return fstat(fileno).st_size - except OSError: - # Not a valid fileno, may be possible requests returned - # a socket number? - pass - - # If the stream is seekable and tell() is implemented, calculate the stream size. - try: - current_position = data.tell() - data.seek(0, SEEK_END) - length = data.tell() - current_position - data.seek(current_position, SEEK_SET) - except (AttributeError, UnsupportedOperation): - pass - - return length - - -def read_length(data): - try: - if hasattr(data, 'read'): - read_data = b'' - for chunk in iter(lambda: data.read(4096), b""): - read_data += chunk - return len(read_data), read_data - if hasattr(data, '__iter__'): - read_data = b'' - for chunk in data: - read_data += chunk - return len(read_data), read_data - except: # pylint: disable=bare-except - pass - raise ValueError("Unable to calculate content length, please specify.") - - -def validate_and_format_range_headers( - start_range, end_range, start_range_required=True, - end_range_required=True, check_content_md5=False, align_to_page=False): - # If end range is provided, start range must be provided - if (start_range_required or end_range is not None) and start_range is None: - raise ValueError("start_range value cannot be None.") - if end_range_required and end_range is None: - raise ValueError("end_range value cannot be None.") - - # Page ranges must be 512 aligned - if align_to_page: - if start_range is not None and start_range % 512 != 0: - raise ValueError("Invalid page blob start_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(start_range)) - if end_range is not None and end_range % 512 != 511: - raise ValueError("Invalid page blob end_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(end_range)) - - # Format based on whether end_range is present - range_header = None - if end_range is not None: - range_header = 'bytes={0}-{1}'.format(start_range, end_range) - elif start_range is not None: - range_header = "bytes={0}-".format(start_range) - - # Content MD5 can only be provided for a complete range less than 4MB in size - range_validation = None - if check_content_md5: - if start_range is None or end_range is None: - raise ValueError("Both start and end range requied for MD5 content validation.") - if end_range - start_range > 4 * 1024 * 1024: - raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") - range_validation = 'true' - - return range_header, range_validation - - -def add_metadata_headers(metadata=None): - # type: (Optional[Dict[str, str]]) -> Dict[str, str] - headers = {} - if metadata: - for key, value in metadata.items(): - headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value - return headers - - -def serialize_batch_body(requests, batch_id): - """ - -- - - -- - (repeated as needed) - ---- - - Serializes the requests in this batch to a single HTTP mixed/multipart body. - - :param list[~azure.core.pipeline.transport.HttpRequest] requests: - a list of sub-request for the batch request - :param str batch_id: - to be embedded in batch sub-request delimiter - :return: The body bytes for this batch. - """ - - if requests is None or len(requests) == 0: - raise ValueError('Please provide sub-request(s) for this batch request') - - delimiter_bytes = (_get_batch_request_delimiter(batch_id, True, False) + _HTTP_LINE_ENDING).encode('utf-8') - newline_bytes = _HTTP_LINE_ENDING.encode('utf-8') - batch_body = list() - - content_index = 0 - for request in requests: - request.headers.update({ - "Content-ID": str(content_index), - "Content-Length": str(0) - }) - batch_body.append(delimiter_bytes) - batch_body.append(_make_body_from_sub_request(request)) - batch_body.append(newline_bytes) - content_index += 1 - - batch_body.append(_get_batch_request_delimiter(batch_id, True, True).encode('utf-8')) - # final line of body MUST have \r\n at the end, or it will not be properly read by the service - batch_body.append(newline_bytes) - - return bytes().join(batch_body) - - -def _get_batch_request_delimiter(batch_id, is_prepend_dashes=False, is_append_dashes=False): - """ - Gets the delimiter used for this batch request's mixed/multipart HTTP format. - - :param str batch_id: - Randomly generated id - :param bool is_prepend_dashes: - Whether to include the starting dashes. Used in the body, but non on defining the delimiter. - :param bool is_append_dashes: - Whether to include the ending dashes. Used in the body on the closing delimiter only. - :return: The delimiter, WITHOUT a trailing newline. - """ - - prepend_dashes = '--' if is_prepend_dashes else '' - append_dashes = '--' if is_append_dashes else '' - - return prepend_dashes + _REQUEST_DELIMITER_PREFIX + batch_id + append_dashes - - -def _make_body_from_sub_request(sub_request): - """ - Content-Type: application/http - Content-ID: - Content-Transfer-Encoding: (if present) - - HTTP/ -
:
(repeated as necessary) - Content-Length: - (newline if content length > 0) - (if content length > 0) - - Serializes an http request. - - :param ~azure.core.pipeline.transport.HttpRequest sub_request: - Request to serialize. - :return: The serialized sub-request in bytes - """ - - # put the sub-request's headers into a list for efficient str concatenation - sub_request_body = list() - - # get headers for ease of manipulation; remove headers as they are used - headers = sub_request.headers - - # append opening headers - sub_request_body.append("Content-Type: application/http") - sub_request_body.append(_HTTP_LINE_ENDING) - - sub_request_body.append("Content-ID: ") - sub_request_body.append(headers.pop("Content-ID", "")) - sub_request_body.append(_HTTP_LINE_ENDING) - - sub_request_body.append("Content-Transfer-Encoding: binary") - sub_request_body.append(_HTTP_LINE_ENDING) - - # append blank line - sub_request_body.append(_HTTP_LINE_ENDING) - - # append HTTP verb and path and query and HTTP version - sub_request_body.append(sub_request.method) - sub_request_body.append(' ') - sub_request_body.append(sub_request.url) - sub_request_body.append(' ') - sub_request_body.append(_HTTP1_1_IDENTIFIER) - sub_request_body.append(_HTTP_LINE_ENDING) - - # append remaining headers (this will set the Content-Length, as it was set on `sub-request`) - for header_name, header_value in headers.items(): - if header_value is not None: - sub_request_body.append(header_name) - sub_request_body.append(": ") - sub_request_body.append(header_value) - sub_request_body.append(_HTTP_LINE_ENDING) - - # append blank line - sub_request_body.append(_HTTP_LINE_ENDING) - - return ''.join(sub_request_body).encode() diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/response_handlers.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/response_handlers.py deleted file mode 100644 index 006913f..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/response_handlers.py +++ /dev/null @@ -1,159 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging - -from azure.core.pipeline.policies import ContentDecodePolicy -from azure.core.exceptions import ( - HttpResponseError, - ResourceNotFoundError, - ResourceModifiedError, - ResourceExistsError, - ClientAuthenticationError, - DecodeError) - -from .parser import _to_utc_datetime -from .models import StorageErrorCode, UserDelegationKey, get_enum_value - - -if TYPE_CHECKING: - from datetime import datetime - from azure.core.exceptions import AzureError - - -_LOGGER = logging.getLogger(__name__) - - -class PartialBatchErrorException(HttpResponseError): - """There is a partial failure in batch operations. - - :param str message: The message of the exception. - :param response: Server response to be deserialized. - :param list parts: A list of the parts in multipart response. - """ - - def __init__(self, message, response, parts): - self.parts = parts - super(PartialBatchErrorException, self).__init__(message=message, response=response) - - -def parse_length_from_content_range(content_range): - ''' - Parses the blob length from the content range header: bytes 1-3/65537 - ''' - if content_range is None: - return None - - # First, split in space and take the second half: '1-3/65537' - # Next, split on slash and take the second half: '65537' - # Finally, convert to an int: 65537 - return int(content_range.split(' ', 1)[1].split('/', 1)[1]) - - -def normalize_headers(headers): - normalized = {} - for key, value in headers.items(): - if key.startswith('x-ms-'): - key = key[5:] - normalized[key.lower().replace('-', '_')] = get_enum_value(value) - return normalized - - -def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument - raw_metadata = {k: v for k, v in response.http_response.headers.items() if k.startswith("x-ms-meta-")} - return {k[10:]: v for k, v in raw_metadata.items()} - - -def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers) - - -def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers), deserialized - - -def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return response.http_response.location_mode, deserialized - - -def process_storage_error(storage_error): - raise_error = HttpResponseError - error_code = storage_error.response.headers.get('x-ms-error-code') - error_message = storage_error.message - additional_data = {} - try: - error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) - if error_body: - for info in error_body.iter(): - if info.tag.lower() == 'code': - error_code = info.text - elif info.tag.lower() == 'message': - error_message = info.text - else: - additional_data[info.tag] = info.text - except DecodeError: - pass - - try: - if error_code: - error_code = StorageErrorCode(error_code) - if error_code in [StorageErrorCode.condition_not_met, - StorageErrorCode.blob_overwritten]: - raise_error = ResourceModifiedError - if error_code in [StorageErrorCode.invalid_authentication_info, - StorageErrorCode.authentication_failed]: - raise_error = ClientAuthenticationError - if error_code in [StorageErrorCode.resource_not_found, - StorageErrorCode.cannot_verify_copy_source, - StorageErrorCode.blob_not_found, - StorageErrorCode.queue_not_found, - StorageErrorCode.container_not_found, - StorageErrorCode.parent_not_found, - StorageErrorCode.share_not_found]: - raise_error = ResourceNotFoundError - if error_code in [StorageErrorCode.account_already_exists, - StorageErrorCode.account_being_created, - StorageErrorCode.resource_already_exists, - StorageErrorCode.resource_type_mismatch, - StorageErrorCode.blob_already_exists, - StorageErrorCode.queue_already_exists, - StorageErrorCode.container_already_exists, - StorageErrorCode.container_being_deleted, - StorageErrorCode.queue_being_deleted, - StorageErrorCode.share_already_exists, - StorageErrorCode.share_being_deleted]: - raise_error = ResourceExistsError - except ValueError: - # Got an unknown error code - pass - - try: - error_message += "\nErrorCode:{}".format(error_code.value) - except AttributeError: - error_message += "\nErrorCode:{}".format(error_code) - for name, info in additional_data.items(): - error_message += "\n{}:{}".format(name, info) - - error = raise_error(message=error_message, response=storage_error.response) - error.error_code = error_code - error.additional_info = additional_data - error.raise_with_traceback() - - -def parse_to_internal_user_delegation_key(service_user_delegation_key): - internal_user_delegation_key = UserDelegationKey() - internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid - internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid - internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) - internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) - internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service - internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version - internal_user_delegation_key.value = service_user_delegation_key.value - return internal_user_delegation_key diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/shared_access_signature.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/shared_access_signature.py deleted file mode 100644 index 07aad5f..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/shared_access_signature.py +++ /dev/null @@ -1,220 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from datetime import date - -from .parser import _str, _to_utc_datetime -from .constants import X_MS_VERSION -from . import sign_string, url_quote - - -class QueryStringConstants(object): - SIGNED_SIGNATURE = 'sig' - SIGNED_PERMISSION = 'sp' - SIGNED_START = 'st' - SIGNED_EXPIRY = 'se' - SIGNED_RESOURCE = 'sr' - SIGNED_IDENTIFIER = 'si' - SIGNED_IP = 'sip' - SIGNED_PROTOCOL = 'spr' - SIGNED_VERSION = 'sv' - SIGNED_CACHE_CONTROL = 'rscc' - SIGNED_CONTENT_DISPOSITION = 'rscd' - SIGNED_CONTENT_ENCODING = 'rsce' - SIGNED_CONTENT_LANGUAGE = 'rscl' - SIGNED_CONTENT_TYPE = 'rsct' - START_PK = 'spk' - START_RK = 'srk' - END_PK = 'epk' - END_RK = 'erk' - SIGNED_RESOURCE_TYPES = 'srt' - SIGNED_SERVICES = 'ss' - SIGNED_OID = 'skoid' - SIGNED_TID = 'sktid' - SIGNED_KEY_START = 'skt' - SIGNED_KEY_EXPIRY = 'ske' - SIGNED_KEY_SERVICE = 'sks' - SIGNED_KEY_VERSION = 'skv' - - # for ADLS - SIGNED_AUTHORIZED_OID = 'saoid' - SIGNED_UNAUTHORIZED_OID = 'suoid' - SIGNED_CORRELATION_ID = 'scid' - SIGNED_DIRECTORY_DEPTH = 'sdd' - - @staticmethod - def to_list(): - return [ - QueryStringConstants.SIGNED_SIGNATURE, - QueryStringConstants.SIGNED_PERMISSION, - QueryStringConstants.SIGNED_START, - QueryStringConstants.SIGNED_EXPIRY, - QueryStringConstants.SIGNED_RESOURCE, - QueryStringConstants.SIGNED_IDENTIFIER, - QueryStringConstants.SIGNED_IP, - QueryStringConstants.SIGNED_PROTOCOL, - QueryStringConstants.SIGNED_VERSION, - QueryStringConstants.SIGNED_CACHE_CONTROL, - QueryStringConstants.SIGNED_CONTENT_DISPOSITION, - QueryStringConstants.SIGNED_CONTENT_ENCODING, - QueryStringConstants.SIGNED_CONTENT_LANGUAGE, - QueryStringConstants.SIGNED_CONTENT_TYPE, - QueryStringConstants.START_PK, - QueryStringConstants.START_RK, - QueryStringConstants.END_PK, - QueryStringConstants.END_RK, - QueryStringConstants.SIGNED_RESOURCE_TYPES, - QueryStringConstants.SIGNED_SERVICES, - QueryStringConstants.SIGNED_OID, - QueryStringConstants.SIGNED_TID, - QueryStringConstants.SIGNED_KEY_START, - QueryStringConstants.SIGNED_KEY_EXPIRY, - QueryStringConstants.SIGNED_KEY_SERVICE, - QueryStringConstants.SIGNED_KEY_VERSION, - # for ADLS - QueryStringConstants.SIGNED_AUTHORIZED_OID, - QueryStringConstants.SIGNED_UNAUTHORIZED_OID, - QueryStringConstants.SIGNED_CORRELATION_ID, - QueryStringConstants.SIGNED_DIRECTORY_DEPTH, - ] - - -class SharedAccessSignature(object): - ''' - Provides a factory for creating account access - signature tokens with an account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - :param str x_ms_version: - The service version used to generate the shared access signatures. - ''' - self.account_name = account_name - self.account_key = account_key - self.x_ms_version = x_ms_version - - def generate_account(self, services, resource_types, permission, expiry, start=None, - ip=None, protocol=None): - ''' - Generates a shared access signature for the account. - Use the returned signature with the sas_token parameter of the service - or to create a new account object. - - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account - SAS. You can combine values to provide access to more than one - resource type. - :param AccountSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. You can combine - values to provide more than one permission. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_account(services, resource_types) - sas.add_account_signature(self.account_name, self.account_key) - - return sas.get_token() - - -class _SharedAccessHelper(object): - def __init__(self): - self.query_dict = {} - - def _add_query(self, name, val): - if val: - self.query_dict[name] = _str(val) if val is not None else None - - def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): - if isinstance(start, date): - start = _to_utc_datetime(start) - - if isinstance(expiry, date): - expiry = _to_utc_datetime(expiry) - - self._add_query(QueryStringConstants.SIGNED_START, start) - self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) - self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) - self._add_query(QueryStringConstants.SIGNED_IP, ip) - self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) - self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) - - def add_resource(self, resource): - self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) - - def add_id(self, policy_id): - self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) - - def add_account(self, services, resource_types): - self._add_query(QueryStringConstants.SIGNED_SERVICES, services) - self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) - - def add_override_response_headers(self, cache_control, - content_disposition, - content_encoding, - content_language, - content_type): - self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) - self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) - self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) - self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) - self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) - - def add_account_signature(self, account_name, account_key): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - string_to_sign = \ - (account_name + '\n' + - get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + - get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + - get_value_to_append(QueryStringConstants.SIGNED_START) + - get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - get_value_to_append(QueryStringConstants.SIGNED_IP) + - get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(QueryStringConstants.SIGNED_VERSION)) - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key, string_to_sign)) - - def get_token(self): - return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/uploads.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/uploads.py deleted file mode 100644 index 1b619df..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/uploads.py +++ /dev/null @@ -1,602 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from concurrent import futures -from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) -from threading import Lock -from itertools import islice -from math import ceil - -import six - -from azure.core.tracing.common import with_current_context - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." - - -def _parallel_uploads(executor, uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(executor.submit(with_current_context(uploader), next_chunk)) - except StopIteration: - break - - # Wait for the remaining uploads to finish - done, _running = futures.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - validate_content=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - validate_content=validate_content, - **kwargs) - if parallel: - with futures.ThreadPoolExecutor(max_concurrency) as executor: - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - executor.submit(with_current_context(uploader.process_chunk), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - with futures.ThreadPoolExecutor(max_concurrency) as executor: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - executor.submit(with_current_context(uploader.process_substream_block), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] - if any(range_ids): - return sorted(range_ids) - return [] - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b"" - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError("Blob data should be of type bytes.") - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b"" or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - def _update_progress(self, length): - if self.progress_lock is not None: - with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = self._upload_chunk(chunk_offset, chunk_data) - self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield index, SubStream(self.stream, index, length, lock) - - def process_substream_block(self, block_data): - return self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - def _upload_substream_block(self, index, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_substream_block_with_progress(self, index, block_stream): - range_id = self._upload_substream_block(index, block_stream) - self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop("modified_access_conditions", None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - self.service.stage_block( - block_id, - len(chunk_data), - chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return index, block_id - - def _upload_substream_block(self, index, block_stream): - try: - block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) - self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - return not any(bytearray(chunk_data)) - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = self.service.upload_pages( - body=chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - def _upload_substream_block(self, index, block_stream): - pass - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - self.current_length = int(self.response_headers["blob_append_offset"]) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - def _upload_substream_block(self, index, block_stream): - pass - - -class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - self.response_headers = self.service.append_data( - body=chunk_data, - position=chunk_offset, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - def _upload_substream_block(self, index, block_stream): - try: - self.service.append_data( - body=block_stream, - position=index, - content_length=len(block_stream), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response - - # TODO: Implement this method. - def _upload_substream_block(self, index, block_stream): - pass - - -class SubStream(IOBase): - - def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): - # Python 2.7: file-like objects created with open() typically support seek(), but are not - # derivations of io.IOBase and thus do not implement seekable(). - # Python > 3.0: file-like objects created with open() are derived from io.IOBase. - try: - # only the main thread runs this, so there's no need grabbing the lock - wrapped_stream.seek(0, SEEK_CUR) - except: - raise ValueError("Wrapped stream must support seek().") - - self._lock = lockObj - self._wrapped_stream = wrapped_stream - self._position = 0 - self._stream_begin_index = stream_begin_index - self._length = length - self._buffer = BytesIO() - - # we must avoid buffering more than necessary, and also not use up too much memory - # so the max buffer size is capped at 4MB - self._max_buffer_size = ( - length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE - ) - self._current_buffer_start = 0 - self._current_buffer_size = 0 - super(SubStream, self).__init__() - - def __len__(self): - return self._length - - def close(self): - if self._buffer: - self._buffer.close() - self._wrapped_stream = None - IOBase.close(self) - - def fileno(self): - return self._wrapped_stream.fileno() - - def flush(self): - pass - - def read(self, size=None): - if self.closed: # pylint: disable=using-constant-test - raise ValueError("Stream is closed.") - - if size is None: - size = self._length - self._position - - # adjust if out of bounds - if size + self._position >= self._length: - size = self._length - self._position - - # return fast - if size == 0 or self._buffer.closed: - return b"" - - # attempt first read from the read buffer and update position - read_buffer = self._buffer.read(size) - bytes_read = len(read_buffer) - bytes_remaining = size - bytes_read - self._position += bytes_read - - # repopulate the read buffer from the underlying stream to fulfill the request - # ensure the seek and read operations are done atomically (only if a lock is provided) - if bytes_remaining > 0: - with self._buffer: - # either read in the max buffer size specified on the class - # or read in just enough data for the current block/sub stream - current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) - - # lock is only defined if max_concurrency > 1 (parallel uploads) - if self._lock: - with self._lock: - # reposition the underlying stream to match the start of the data to read - absolute_position = self._stream_begin_index + self._position - self._wrapped_stream.seek(absolute_position, SEEK_SET) - # If we can't seek to the right location, our read will be corrupted so fail fast. - if self._wrapped_stream.tell() != absolute_position: - raise IOError("Stream failed to seek to the desired location.") - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - else: - absolute_position = self._stream_begin_index + self._position - # It's possible that there's connection problem during data transfer, - # so when we retry we don't want to read from current position of wrapped stream, - # instead we should seek to where we want to read from. - if self._wrapped_stream.tell() != absolute_position: - self._wrapped_stream.seek(absolute_position, SEEK_SET) - - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - - if buffer_from_stream: - # update the buffer with new data from the wrapped stream - # we need to note down the start position and size of the buffer, in case seek is performed later - self._buffer = BytesIO(buffer_from_stream) - self._current_buffer_start = self._position - self._current_buffer_size = len(buffer_from_stream) - - # read the remaining bytes from the new buffer and update position - second_read_buffer = self._buffer.read(bytes_remaining) - read_buffer += second_read_buffer - self._position += len(second_read_buffer) - - return read_buffer - - def readable(self): - return True - - def readinto(self, b): - raise UnsupportedOperation - - def seek(self, offset, whence=0): - if whence is SEEK_SET: - start_index = 0 - elif whence is SEEK_CUR: - start_index = self._position - elif whence is SEEK_END: - start_index = self._length - offset = -offset - else: - raise ValueError("Invalid argument for the 'whence' parameter.") - - pos = start_index + offset - - if pos > self._length: - pos = self._length - elif pos < 0: - pos = 0 - - # check if buffer is still valid - # if not, drop buffer - if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: - self._buffer.close() - self._buffer = BytesIO() - else: # if yes seek to correct position - delta = pos - self._current_buffer_start - self._buffer.seek(delta, SEEK_SET) - - self._position = pos - return pos - - def seekable(self): - return True - - def tell(self): - return self._position - - def write(self): - raise UnsupportedOperation - - def writelines(self): - raise UnsupportedOperation - - def writeable(self): - return False - - -class IterStreamer(object): - """ - File-like streaming iterator. - """ - - def __init__(self, generator, encoding="UTF-8"): - self.generator = generator - self.iterator = iter(generator) - self.leftover = b"" - self.encoding = encoding - - def __len__(self): - return self.generator.__len__() - - def __iter__(self): - return self.iterator - - def seekable(self): - return False - - def __next__(self): - return next(self.iterator) - - next = __next__ # Python 2 compatibility. - - def tell(self, *args, **kwargs): - raise UnsupportedOperation("Data generator does not support tell.") - - def seek(self, *args, **kwargs): - raise UnsupportedOperation("Data generator is unseekable.") - - def read(self, size): - data = self.leftover - count = len(self.leftover) - try: - while count < size: - chunk = self.__next__() - if isinstance(chunk, six.text_type): - chunk = chunk.encode(self.encoding) - data += chunk - count += len(chunk) - except StopIteration: - pass - - if count > size: - self.leftover = data[size:] - - return data[:size] diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/uploads_async.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/uploads_async.py deleted file mode 100644 index 5ed192b..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/uploads_async.py +++ /dev/null @@ -1,395 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -import asyncio -from asyncio import Lock -from itertools import islice -import threading - -from math import ceil - -import six - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder -from .uploads import SubStream, IterStreamer # pylint: disable=unused-import - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' - - -async def _parallel_uploads(uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(asyncio.ensure_future(uploader(next_chunk))) - except StopIteration: - break - - # Wait for the remaining uploads to finish - if running: - done, _running = await asyncio.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -async def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - asyncio.ensure_future(uploader.process_chunk(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [] - for chunk in uploader.get_chunk_streams(): - range_ids.append(await uploader.process_chunk(chunk)) - - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -async def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - asyncio.ensure_future(uploader.process_substream_block(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [] - for block in uploader.get_substream_blocks(): - range_ids.append(await uploader.process_substream_block(block)) - if any(range_ids): - return sorted(range_ids) - return - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = threading.Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b'' - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b'' or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - async def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - async def _update_progress(self, length): - if self.progress_lock is not None: - async with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - async def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = await self._upload_chunk(chunk_offset, chunk_data) - await self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield index, SubStream(self.stream, index, length, lock) - - async def process_substream_block(self, block_data): - return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - async def _upload_substream_block(self, index, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_substream_block_with_progress(self, index, block_stream): - range_id = await self._upload_substream_block(index, block_stream) - await self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop('modified_access_conditions', None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - await self.service.stage_block( - block_id, - len(chunk_data), - body=chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - return index, block_id - - async def _upload_substream_block(self, index, block_stream): - try: - block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) - await self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - for each_byte in chunk_data: - if each_byte not in [0, b'\x00']: - return False - return True - - async def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = await self.service.upload_pages( - body=chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - async def _upload_substream_block(self, index, block_stream): - pass - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = await self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - self.current_length = int(self.response_headers['blob_append_offset']) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = await self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - async def _upload_substream_block(self, index, block_stream): - pass - - -class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - self.response_headers = await self.service.append_data( - body=chunk_data, - position=chunk_offset, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - async def _upload_substream_block(self, index, block_stream): - try: - await self.service.append_data( - body=block_stream, - position=index, - content_length=len(block_stream), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = await self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - return range_id, response - - # TODO: Implement this method. - async def _upload_substream_block(self, index, block_stream): - pass diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared_access_signature.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared_access_signature.py deleted file mode 100644 index 20dad95..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared_access_signature.py +++ /dev/null @@ -1,491 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, List, TYPE_CHECKING -) - -from ._shared import sign_string -from ._shared.constants import X_MS_VERSION -from ._shared.models import Services -from ._shared.shared_access_signature import SharedAccessSignature, _SharedAccessHelper, QueryStringConstants -from ._shared.parser import _str - -if TYPE_CHECKING: - from datetime import datetime - from .import ( - ResourceTypes, - AccountSasPermissions, - ShareSasPermissions, - FileSasPermissions - ) - -class FileSharedAccessSignature(SharedAccessSignature): - ''' - Provides a factory for creating file and share access - signature tokens with a common account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - ''' - super(FileSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) - - def generate_file(self, share_name, directory_name=None, file_name=None, - permission=None, expiry=None, start=None, policy_id=None, - ip=None, protocol=None, cache_control=None, - content_disposition=None, content_encoding=None, - content_language=None, content_type=None): - ''' - Generates a shared access signature for the file. - Use the returned signature with the sas_token parameter of FileService. - - :param str share_name: - Name of share. - :param str directory_name: - Name of directory. SAS tokens cannot be created for directories, so - this parameter should only be present if file_name is provided. - :param str file_name: - Name of file. - :param ~azure.storage.fileshare.FileSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, create, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_file_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - resource_path = share_name - if directory_name is not None: - resource_path += '/' + _str(directory_name) if directory_name is not None else None - resource_path += '/' + _str(file_name) if file_name is not None else None - - sas = _FileSharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(policy_id) - sas.add_resource('f') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_resource_signature(self.account_name, self.account_key, resource_path) - - return sas.get_token() - - def generate_share(self, share_name, permission=None, expiry=None, - start=None, policy_id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None): - ''' - Generates a shared access signature for the share. - Use the returned signature with the sas_token parameter of FileService. - - :param str share_name: - Name of share. - :param ShareSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, create, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_file_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - sas = _FileSharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(policy_id) - sas.add_resource('s') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_resource_signature(self.account_name, self.account_key, share_name) - - return sas.get_token() - - -class _FileSharedAccessHelper(_SharedAccessHelper): - - def add_resource_signature(self, account_name, account_key, path): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - if path[0] != '/': - path = '/' + path - - canonicalized_resource = '/file/' + account_name + path + '\n' - - # Form the string to sign from shared_access_policy and canonicalized - # resource. The order of values is important. - string_to_sign = \ - (get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(QueryStringConstants.SIGNED_START) + - get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - canonicalized_resource + - get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER) + - get_value_to_append(QueryStringConstants.SIGNED_IP) + - get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(QueryStringConstants.SIGNED_VERSION) + - get_value_to_append(QueryStringConstants.SIGNED_CACHE_CONTROL) + - get_value_to_append(QueryStringConstants.SIGNED_CONTENT_DISPOSITION) + - get_value_to_append(QueryStringConstants.SIGNED_CONTENT_ENCODING) + - get_value_to_append(QueryStringConstants.SIGNED_CONTENT_LANGUAGE) + - get_value_to_append(QueryStringConstants.SIGNED_CONTENT_TYPE)) - - # remove the trailing newline - if string_to_sign[-1] == '\n': - string_to_sign = string_to_sign[:-1] - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key, string_to_sign)) - - -def generate_account_sas( - account_name, # type: str - account_key, # type: str - resource_types, # type: Union[ResourceTypes, str] - permission, # type: Union[AccountSasPermissions, str] - expiry, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> str - """Generates a shared access signature for the file service. - - Use the returned signature with the credential parameter of any ShareServiceClient, - ShareClient, ShareDirectoryClient, or ShareFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - :param ~azure.storage.fileshare.ResourceTypes resource_types: - Specifies the resource types that are accessible with the account SAS. - :param ~azure.storage.fileshare.AccountSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_authentication.py - :start-after: [START generate_sas_token] - :end-before: [END generate_sas_token] - :language: python - :dedent: 8 - :caption: Generate a sas token. - """ - sas = SharedAccessSignature(account_name, account_key) - return sas.generate_account( - services=Services(fileshare=True), - resource_types=resource_types, - permission=permission, - expiry=expiry, - start=start, - ip=ip, - **kwargs - ) # type: ignore - - -def generate_share_sas( - account_name, # type: str - share_name, # type: str - account_key, # type: str - permission=None, # type: Optional[Union[ShareSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - policy_id=None, # type: Optional[str] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): # type: (...) -> str - """Generates a shared access signature for a share. - - Use the returned signature with the credential parameter of any ShareServiceClient, - ShareClient, ShareDirectoryClient, or ShareFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str share_name: - The name of the share. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - :param ~azure.storage.fileshare.ShareSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, create, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - :func:`~azure.storage.fileshare.ShareClient.set_share_access_policy`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - sas = FileSharedAccessSignature(account_name, account_key) - return sas.generate_share( - share_name=share_name, - permission=permission, - expiry=expiry, - start=start, - policy_id=policy_id, - ip=ip, - **kwargs - ) - - -def generate_file_sas( - account_name, # type: str - share_name, # type: str - file_path, # type: List[str] - account_key, # type: str - permission=None, # type: Optional[Union[FileSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - policy_id=None, # type: Optional[str] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> str - """Generates a shared access signature for a file. - - Use the returned signature with the credential parameter of any ShareServiceClient, - ShareClient, ShareDirectoryClient, or ShareFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str share_name: - The name of the share. - :param file_path: - The file path represented as a list of path segments, including the file name. - :type file_path: List[str] - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - :param ~azure.storage.fileshare.FileSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - sas = FileSharedAccessSignature(account_name, account_key) - if len(file_path) > 1: - dir_path = '/'.join(file_path[:-1]) - else: - dir_path = None # type: ignore - return sas.generate_file( # type: ignore - share_name=share_name, - directory_name=dir_path, - file_name=file_path[-1], - permission=permission, - expiry=expiry, - start=start, - policy_id=policy_id, - ip=ip, - **kwargs - ) diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_version.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_version.py deleted file mode 100644 index d731da5..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_version.py +++ /dev/null @@ -1,7 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -VERSION = "12.5.0" diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/__init__.py deleted file mode 100644 index 73393b8..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ._file_client_async import ShareFileClient -from ._directory_client_async import ShareDirectoryClient -from ._share_client_async import ShareClient -from ._share_service_client_async import ShareServiceClient -from ._lease_async import ShareLeaseClient - - -__all__ = [ - 'ShareFileClient', - 'ShareDirectoryClient', - 'ShareClient', - 'ShareServiceClient', - 'ShareLeaseClient', -] diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_directory_client_async.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_directory_client_async.py deleted file mode 100644 index d26f28a..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_directory_client_async.py +++ /dev/null @@ -1,594 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import functools -import time -from typing import ( # pylint: disable=unused-import - Optional, Union, Any, Dict, TYPE_CHECKING -) - -from azure.core.async_paging import AsyncItemPaged -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline import AsyncPipeline -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from .._parser import _get_file_permission, _datetime_to_str -from .._shared.parser import _str - -from .._generated.aio import AzureFileStorage -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.policies_async import ExponentialRetry -from .._shared.request_handlers import add_metadata_headers -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._deserialize import deserialize_directory_properties -from .._serialize import get_api_version -from .._directory_client import ShareDirectoryClient as ShareDirectoryClientBase -from ._file_client_async import ShareFileClient -from ._models import DirectoryPropertiesPaged, HandlesPaged - -if TYPE_CHECKING: - from datetime import datetime - from .._models import ShareProperties, DirectoryProperties, ContentSettings, NTFSAttributes - from .._generated.models import HandleItem - - -class ShareDirectoryClient(AsyncStorageAccountHostsMixin, ShareDirectoryClientBase): - """A client to interact with a specific directory, although it may not yet exist. - - For operations relating to a specific subdirectory or file in this share, the clients for those - entities can also be retrieved using the :func:`get_subdirectory_client` and :func:`get_file_client` functions. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the directory, - use the :func:`from_directory_url` classmethod. - :param share_name: - The name of the share for the directory. - :type share_name: str - :param str directory_path: - The directory path for the directory with which to interact. - If specified, this value will override a directory value specified in the directory URL. - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword loop: - The event loop to run the asynchronous tasks. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - directory_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Optional[Any] - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - loop = kwargs.pop('loop', None) - super(ShareDirectoryClient, self).__init__( - account_url, - share_name=share_name, - directory_path=directory_path, - snapshot=snapshot, - credential=credential, - loop=loop, - **kwargs) - self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline, loop=loop) - default_api_version = self._client._config.version # pylint: disable=protected-access - self._client._config.version = get_api_version(kwargs, default_api_version) # pylint: disable=protected-access - self._loop = loop - - def get_file_client(self, file_name, **kwargs): - # type: (str, Any) -> ShareFileClient - """Get a client to interact with a specific file. - - The file need not already exist. - - :param str file_name: - The name of the file. - :returns: A File Client. - :rtype: ~azure.storage.fileshare.ShareFileClient - """ - if self.directory_path: - file_name = self.directory_path.rstrip('/') + "/" + file_name - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareFileClient( - self.url, file_path=file_name, share_name=self.share_name, snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop, **kwargs) - - def get_subdirectory_client(self, directory_name, **kwargs): - # type: (str, Any) -> ShareDirectoryClient - """Get a client to interact with a specific subdirectory. - - The subdirectory need not already exist. - - :param str directory_name: - The name of the subdirectory. - :returns: A Directory Client. - :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START get_subdirectory_client] - :end-before: [END get_subdirectory_client] - :language: python - :dedent: 16 - :caption: Gets the subdirectory client. - """ - directory_path = self.directory_path.rstrip('/') + "/" + directory_name - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareDirectoryClient( - self.url, share_name=self.share_name, directory_path=directory_path, snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop, **kwargs) - - @distributed_trace_async - async def create_directory(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Creates a new directory under the directory referenced by the client. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the directory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Directory-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START create_directory] - :end-before: [END create_directory] - :language: python - :dedent: 16 - :caption: Creates a directory. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return await self._client.directory.create( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def delete_directory(self, **kwargs): - # type: (**Any) -> None - """Marks the directory for deletion. The directory is - later deleted during garbage collection. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START delete_directory] - :end-before: [END delete_directory] - :language: python - :dedent: 16 - :caption: Deletes a directory. - """ - timeout = kwargs.pop('timeout', None) - try: - await self._client.directory.delete(timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_directories_and_files(self, name_starts_with=None, **kwargs): - # type: (Optional[str], Any) -> AsyncItemPaged - """Lists all the directories and files under the directory. - - :param str name_starts_with: - Filters the results to return only entities whose names - begin with the specified prefix. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties - :rtype: ~azure.core.async_paging.AsyncItemPaged[DirectoryProperties and FileProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START lists_directory] - :end-before: [END lists_directory] - :language: python - :dedent: 16 - :caption: List directories and files. - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.directory.list_files_and_directories_segment, - sharesnapshot=self.snapshot, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=DirectoryPropertiesPaged) - - @distributed_trace - def list_handles(self, recursive=False, **kwargs): - # type: (bool, Any) -> AsyncItemPaged - """Lists opened handles on a directory or a file under the directory. - - :param bool recursive: - Boolean that specifies if operation should apply to the directory specified by the client, - its files, its subdirectories and their files. Default value is False. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of HandleItem - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.HandleItem] - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.directory.list_handles, - sharesnapshot=self.snapshot, - timeout=timeout, - recursive=recursive, - **kwargs) - return AsyncItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=HandlesPaged) - - @distributed_trace_async - async def close_handle(self, handle, **kwargs): - # type: (Union[str, HandleItem], Any) -> Dict[str, int] - """Close an open file handle. - - :param handle: - A specific handle to close. - :type handle: str or ~azure.storage.fileshare.Handle - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - try: - handle_id = handle.id # type: ignore - except AttributeError: - handle_id = handle - if handle_id == '*': - raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") - try: - response = await self._client.directory.force_close_handles( - handle_id, - marker=None, - recursive=None, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - return { - 'closed_handles_count': response.get('number_of_handles_closed', 0), - 'failed_handles_count': response.get('number_of_handles_failed', 0) - } - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def close_all_handles(self, recursive=False, **kwargs): - # type: (bool, Any) -> Dict[str, int] - """Close any open file handles. - - This operation will block until the service has closed all open handles. - - :param bool recursive: - Boolean that specifies if operation should apply to the directory specified by the client, - its files, its subdirectories and their files. Default value is False. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - timeout = kwargs.pop('timeout', None) - start_time = time.time() - - try_close = True - continuation_token = None - total_closed = 0 - total_failed = 0 - while try_close: - try: - response = await self._client.directory.force_close_handles( - handle_id='*', - timeout=timeout, - marker=continuation_token, - recursive=recursive, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - continuation_token = response.get('marker') - try_close = bool(continuation_token) - total_closed += response.get('number_of_handles_closed', 0) - total_failed += response.get('number_of_handles_failed', 0) - if timeout: - timeout = max(0, timeout - (time.time() - start_time)) - return { - 'closed_handles_count': total_closed, - 'failed_handles_count': total_failed - } - - @distributed_trace_async - async def get_directory_properties(self, **kwargs): - # type: (Any) -> DirectoryProperties - """Returns all user-defined metadata and system properties for the - specified directory. The data returned does not include the directory's - list of files. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: DirectoryProperties - :rtype: ~azure.storage.fileshare.DirectoryProperties - """ - timeout = kwargs.pop('timeout', None) - try: - response = await self._client.directory.get_properties( - timeout=timeout, - cls=deserialize_directory_properties, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return response # type: ignore - - @distributed_trace_async - async def set_directory_metadata(self, metadata, **kwargs): - # type: (Dict[str, Any], Any) -> Dict[str, Any] - """Sets the metadata for the directory. - - Each call to this operation replaces all existing metadata - attached to the directory. To remove all metadata from the directory, - call this operation with an empty metadata dict. - - :param metadata: - Name-value pairs associated with the directory as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Directory-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - try: - return await self._client.directory.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_http_headers(self, file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="preserve", # type: Union[str, datetime] - file_last_write_time="preserve", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Sets HTTP headers on the directory. - - :param file_attributes: - The file system attributes for files and directories. - If not set, indicates preservation of existing values. - Here is an example for when the var type is str: 'Temporary|Archive' - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Preserve. - :type file_creation_time: str or datetime - :param file_last_write_time: Last write time for the file - Default value: Preserve. - :type file_last_write_time: str or datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - file_permission = _get_file_permission(file_permission, permission_key, 'preserve') - try: - return await self._client.directory.set_properties( # type: ignore - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - timeout=timeout, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def create_subdirectory( - self, directory_name, # type: str - **kwargs - ): - # type: (...) -> ShareDirectoryClient - """Creates a new subdirectory and returns a client to interact - with the subdirectory. - - :param str directory_name: - The name of the subdirectory. - :keyword dict(str,str) metadata: - Name-value pairs associated with the subdirectory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: ShareDirectoryClient - :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START create_subdirectory] - :end-before: [END create_subdirectory] - :language: python - :dedent: 16 - :caption: Create a subdirectory. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - subdir = self.get_subdirectory_client(directory_name) - await subdir.create_directory(metadata=metadata, timeout=timeout, **kwargs) - return subdir # type: ignore - - @distributed_trace_async - async def delete_subdirectory( - self, directory_name, # type: str - **kwargs - ): - # type: (...) -> None - """Deletes a subdirectory. - - :param str directory_name: - The name of the subdirectory. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START delete_subdirectory] - :end-before: [END delete_subdirectory] - :language: python - :dedent: 16 - :caption: Delete a subdirectory. - """ - timeout = kwargs.pop('timeout', None) - subdir = self.get_subdirectory_client(directory_name) - await subdir.delete_directory(timeout=timeout, **kwargs) - - @distributed_trace_async - async def upload_file( - self, file_name, # type: str - data, # type: Any - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> ShareFileClient - """Creates a new file in the directory and returns a ShareFileClient - to interact with the file. - - :param str file_name: - The name of the file. - :param Any data: - Content of the file. - :param int length: - Length of the file in bytes. Specify its maximum size, up to 1 TiB. - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: ShareFileClient - :rtype: ~azure.storage.fileshare.aio.ShareFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START upload_file_to_directory] - :end-before: [END upload_file_to_directory] - :language: python - :dedent: 16 - :caption: Upload a file to a directory. - """ - file_client = self.get_file_client(file_name) - await file_client.upload_file( - data, - length=length, - **kwargs) - return file_client # type: ignore - - @distributed_trace_async - async def delete_file( - self, file_name, # type: str - **kwargs # type: Optional[Any] - ): - # type: (...) -> None - """Marks the specified file for deletion. The file is later - deleted during garbage collection. - - :param str file_name: - The name of the file to delete. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START delete_file_in_directory] - :end-before: [END delete_file_in_directory] - :language: python - :dedent: 16 - :caption: Delete a file in a directory. - """ - file_client = self.get_file_client(file_name) - await file_client.delete_file(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_download_async.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_download_async.py deleted file mode 100644 index b046fc0..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_download_async.py +++ /dev/null @@ -1,486 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import asyncio -import sys -from io import BytesIO -from itertools import islice -import warnings - -from typing import AsyncIterator -from azure.core.exceptions import HttpResponseError -from .._shared.encryption import decrypt_blob -from .._shared.request_handlers import validate_and_format_range_headers -from .._shared.response_handlers import process_storage_error, parse_length_from_content_range -from .._download import process_range_and_offset, _ChunkDownloader - - -async def process_content(data, start_offset, end_offset, encryption): - if data is None: - raise ValueError("Response cannot be None.") - try: - content = data.response.body() - except Exception as error: - raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) - if encryption.get('key') is not None or encryption.get('resolver') is not None: - try: - return decrypt_blob( - encryption.get('required'), - encryption.get('key'), - encryption.get('resolver'), - content, - start_offset, - end_offset, - data.response.headers) - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=data.response, - error=error) - return content - - -class _AsyncChunkDownloader(_ChunkDownloader): - def __init__(self, **kwargs): - super(_AsyncChunkDownloader, self).__init__(**kwargs) - self.stream_lock = asyncio.Lock() if kwargs.get('parallel') else None - self.progress_lock = asyncio.Lock() if kwargs.get('parallel') else None - - async def process_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - chunk_data = await self._download_chunk(chunk_start, chunk_end - 1) - length = chunk_end - chunk_start - if length > 0: - await self._write_to_stream(chunk_data, chunk_start) - await self._update_progress(length) - - async def yield_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - return await self._download_chunk(chunk_start, chunk_end - 1) - - async def _update_progress(self, length): - if self.progress_lock: - async with self.progress_lock: # pylint: disable=not-async-context-manager - self.progress_total += length - else: - self.progress_total += length - - async def _write_to_stream(self, chunk_data, chunk_start): - if self.stream_lock: - async with self.stream_lock: # pylint: disable=not-async-context-manager - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - else: - self.stream.write(chunk_data) - - async def _download_chunk(self, chunk_start, chunk_end): - download_range, offset = process_range_and_offset( - chunk_start, chunk_end, chunk_end, self.encryption_options - ) - range_header, range_validation = validate_and_format_range_headers( - download_range[0], - download_range[1], - check_content_md5=self.validate_content - ) - try: - _, response = await self.client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self.validate_content, - data_stream_total=self.total_size, - download_stream_current=self.progress_total, - **self.request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - chunk_data = await process_content(response, offset[0], offset[1], self.encryption_options) - return chunk_data - - -class _AsyncChunkIterator(object): - """Async iterator for chunks in blob download stream.""" - - def __init__(self, size, content, downloader, chunk_size): - self.size = size - self._chunk_size = chunk_size - self._current_content = content - self._iter_downloader = downloader - self._iter_chunks = None - self._complete = (size == 0) - - def __len__(self): - return self.size - - def __iter__(self): - raise TypeError("Async stream must be iterated asynchronously.") - - def __aiter__(self): - return self - - async def __anext__(self): - """Iterate through responses.""" - if self._complete: - raise StopAsyncIteration("Download complete") - if not self._iter_downloader: - # cut the data obtained from initial GET into chunks - if len(self._current_content) > self._chunk_size: - return self._get_chunk_data() - self._complete = True - return self._current_content - - if not self._iter_chunks: - self._iter_chunks = self._iter_downloader.get_chunk_offsets() - - # initial GET result still has more than _chunk_size bytes of data - if len(self._current_content) >= self._chunk_size: - return self._get_chunk_data() - - try: - chunk = next(self._iter_chunks) - self._current_content += await self._iter_downloader.yield_chunk(chunk) - except StopIteration: - self._complete = True - # it's likely that there some data left in self._current_content - if self._current_content: - return self._current_content - raise StopAsyncIteration("Download complete") - - return self._get_chunk_data() - - def _get_chunk_data(self): - chunk_data = self._current_content[: self._chunk_size] - self._current_content = self._current_content[self._chunk_size:] - return chunk_data - - -class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the file being downloaded. - :ivar: str path: - The full path of the file. - :ivar str share: - The name of the share where the file is. - :ivar ~azure.storage.fileshare.FileProperties properties: - The properties of the file being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the file. - """ - - def __init__( - self, - client=None, - config=None, - start_range=None, - end_range=None, - validate_content=None, - encryption_options=None, - max_concurrency=1, - name=None, - path=None, - share=None, - encoding=None, - **kwargs - ): - self.name = name - self.path = path - self.share = share - self.properties = None - self.size = None - - self._client = client - self._config = config - self._start_range = start_range - self._end_range = end_range - self._max_concurrency = max_concurrency - self._encoding = encoding - self._validate_content = validate_content - self._encryption_options = encryption_options or {} - self._request_options = kwargs - self._location_mode = None - self._download_complete = False - self._current_content = None - self._file_size = None - self._response = None - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - self._first_get_size = self._config.max_single_get_size if not self._validate_content \ - else self._config.max_chunk_get_size - initial_request_start = self._start_range if self._start_range is not None else 0 - if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: - initial_request_end = self._end_range - else: - initial_request_end = initial_request_start + self._first_get_size - 1 - - self._initial_range, self._initial_offset = process_range_and_offset( - initial_request_start, initial_request_end, self._end_range, self._encryption_options - ) - - def __len__(self): - return self.size - - async def _setup(self): - self._response = await self._initial_request() - self.properties = self._response.properties - self.properties.name = self.name - self.properties.path = self.path - self.properties.share = self.share - - # Set the content length to the download size instead of the size of - # the last range - self.properties.size = self.size - - # Overwrite the content range to the user requested range - self.properties.content_range = 'bytes {0}-{1}/{2}'.format( - self._start_range, - self._end_range, - self._file_size - ) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - self.properties.content_md5 = None - - if self.size == 0: - self._current_content = b"" - else: - self._current_content = await process_content( - self._response, - self._initial_offset[0], - self._initial_offset[1], - self._encryption_options - ) - - async def _initial_request(self): - range_header, range_validation = validate_and_format_range_headers( - self._initial_range[0], - self._initial_range[1], - start_range_required=False, - end_range_required=False, - check_content_md5=self._validate_content) - - try: - location_mode, response = await self._client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self._validate_content, - data_stream_total=None, - download_stream_current=0, - **self._request_options) - - # Check the location we read from to ensure we use the same one - # for subsequent requests. - self._location_mode = location_mode - - # Parse the total file size and adjust the download size if ranges - # were specified - self._file_size = parse_length_from_content_range(response.properties.content_range) - if self._end_range is not None: - # Use the length unless it is over the end of the file - self.size = min(self._file_size, self._end_range - self._start_range + 1) - elif self._start_range is not None: - self.size = self._file_size - self._start_range - else: - self.size = self._file_size - - except HttpResponseError as error: - if self._start_range is None and error.response.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - try: - _, response = await self._client.download( - validate_content=self._validate_content, - data_stream_total=0, - download_stream_current=0, - **self._request_options) - except HttpResponseError as error: - process_storage_error(error) - - # Set the download size to empty - self.size = 0 - self._file_size = 0 - else: - process_storage_error(error) - - # If the file is small, the download is complete at this point. - # If file size is large, download the rest of the file in chunks. - if response.properties.size == self.size: - self._download_complete = True - return response - - def chunks(self): - # type: () -> AsyncIterator[bytes] - """Iterate over chunks in the download stream. - - :rtype: AsyncIterator[bytes] - """ - if self.size == 0 or self._download_complete: - iter_downloader = None - else: - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - iter_downloader = _AsyncChunkDownloader( - client=self._client, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # Start where the first download ended - end_range=data_end, - stream=None, - parallel=False, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options) - return _AsyncChunkIterator( - size=self.size, - content=self._current_content, - downloader=iter_downloader, - chunk_size=self._config.max_chunk_get_size - ) - - async def readall(self): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - stream = BytesIO() - await self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - async def content_as_bytes(self, max_concurrency=1): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :rtype: bytes - """ - warnings.warn( - "content_as_bytes is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - return await self.readall() - - async def content_as_text(self, max_concurrency=1, encoding="UTF-8"): - """Download the contents of this file, and decode as text. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :param str encoding: - Test encoding to decode the downloaded bytes. Default is UTF-8. - :rtype: str - """ - warnings.warn( - "content_as_text is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self._encoding = encoding - return await self.readall() - - async def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - # the stream must be seekable if parallel download is required - parallel = self._max_concurrency > 1 - if parallel: - error_message = "Target stream handle must be seekable." - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(error_message) - - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(error_message) - - # Write the content to the user stream - stream.write(self._current_content) - if self._download_complete: - return self.size - - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - - downloader = _AsyncChunkDownloader( - client=self._client, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # start where the first download ended - end_range=data_end, - stream=stream, - parallel=parallel, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - **self._request_options) - - dl_tasks = downloader.get_chunk_offsets() - running_futures = [ - asyncio.ensure_future(downloader.process_chunk(d)) - for d in islice(dl_tasks, 0, self._max_concurrency) - ] - while running_futures: - # Wait for some download to finish before adding a new one - _done, running_futures = await asyncio.wait( - running_futures, return_when=asyncio.FIRST_COMPLETED) - try: - next_chunk = next(dl_tasks) - except StopIteration: - break - else: - running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk))) - - if running_futures: - # Wait for the remaining downloads to finish - await asyncio.wait(running_futures) - return self.size - - async def download_to_stream(self, stream, max_concurrency=1): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The properties of the downloaded file. - :rtype: Any - """ - warnings.warn( - "download_to_stream is deprecated, use readinto instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - await self.readinto(stream) - return self.properties diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_file_client_async.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_file_client_async.py deleted file mode 100644 index 6c21d22..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_file_client_async.py +++ /dev/null @@ -1,1203 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines, invalid-overridden-method, too-many-public-methods -import functools -import time -from io import BytesIO -from typing import Optional, Union, IO, List, Tuple, Dict, Any, Iterable, TYPE_CHECKING # pylint: disable=unused-import - -import six -from azure.core.async_paging import AsyncItemPaged -from azure.core.exceptions import HttpResponseError - -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from .._parser import _datetime_to_str, _get_file_permission -from .._shared.parser import _str - -from .._generated.aio import AzureFileStorage -from .._generated.models import FileHTTPHeaders -from .._shared.policies_async import ExponentialRetry -from .._shared.uploads_async import upload_data_chunks, FileChunkUploader, IterStreamer -from .._shared.base_client_async import AsyncStorageAccountHostsMixin -from .._shared.request_handlers import add_metadata_headers, get_length -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._deserialize import deserialize_file_properties, deserialize_file_stream, get_file_ranges_result -from .._serialize import get_access_conditions, get_smb_properties, get_api_version -from .._file_client import ShareFileClient as ShareFileClientBase -from ._models import HandlesPaged -from ._lease_async import ShareLeaseClient -from ._download_async import StorageStreamDownloader - -if TYPE_CHECKING: - from datetime import datetime - from .._models import ShareProperties, ContentSettings, FileProperties, NTFSAttributes - from .._generated.models import HandleItem - - -async def _upload_file_helper( - client, - stream, - size, - metadata, - content_settings, - validate_content, - timeout, - max_concurrency, - file_settings, - file_attributes="none", - file_creation_time="now", - file_last_write_time="now", - file_permission=None, - file_permission_key=None, - **kwargs -): - try: - if size is None or size < 0: - raise ValueError("A content size must be specified for a File.") - response = await client.create_file( - size, content_settings=content_settings, metadata=metadata, - file_attributes=file_attributes, - file_creation_time=file_creation_time, - file_last_write_time=file_last_write_time, - file_permission=file_permission, - permission_key=file_permission_key, - timeout=timeout, - **kwargs - ) - if size == 0: - return response - - responses = await upload_data_chunks( - service=client, - uploader_class=FileChunkUploader, - total_size=size, - chunk_size=file_settings.max_range_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - timeout=timeout, - **kwargs - ) - return sorted(responses, key=lambda r: r.get('last_modified'))[-1] - except HttpResponseError as error: - process_storage_error(error) - - -class ShareFileClient(AsyncStorageAccountHostsMixin, ShareFileClientBase): - """A client to interact with a specific file, although that file may not yet exist. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the - file, use the :func:`from_file_url` classmethod. - :param share_name: - The name of the share for the file. - :type share_name: str - :param str file_path: - The file path to the file with which to interact. If specified, this value will override - a file value specified in the file URL. - :param str snapshot: - An optional file snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword loop: - The event loop to run the asynchronous tasks. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - - def __init__( # type: ignore - self, - account_url, # type: str - share_name, # type: str - file_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs["retry_policy"] = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) - loop = kwargs.pop('loop', None) - super(ShareFileClient, self).__init__( - account_url, share_name=share_name, file_path=file_path, snapshot=snapshot, - credential=credential, loop=loop, **kwargs - ) - self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline, loop=loop) - default_api_version = self._client._config.version # pylint: disable=protected-access - self._client._config.version = get_api_version(kwargs, default_api_version) # pylint: disable=protected-access - self._loop = loop - - @distributed_trace_async - async def acquire_lease(self, lease_id=None, **kwargs): - # type: (Optional[str], **Any) -> ShareLeaseClient - """Requests a new lease. - - If the file does not have an active lease, the File - Service creates a lease on the blob and returns a new lease. - - :param str lease_id: - Proposed lease ID, in a GUID string format. The File Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A ShareLeaseClient object. - :rtype: ~azure.storage.fileshare.aio.ShareLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START acquire_lease_on_blob] - :end-before: [END acquire_lease_on_blob] - :language: python - :dedent: 8 - :caption: Acquiring a lease on a blob. - """ - kwargs['lease_duration'] = -1 - lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore - await lease.acquire(**kwargs) - return lease - - @distributed_trace_async - async def create_file( # type: ignore - self, - size, # type: int - file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="now", # type: Union[str, datetime] - file_last_write_time="now", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Creates a new file. - - Note that it only initializes the file with no content. - - :param int size: Specifies the maximum size for the file, - up to 1 TB. - :param file_attributes: - The file system attributes for files and directories. - If not set, the default value would be "None" and the attributes will be set to "Archive". - Here is an example for when the var type is str: 'Temporary|Archive'. - file_attributes value is not case sensitive. - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Now. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Now. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START create_file] - :end-before: [END create_file] - :language: python - :dedent: 16 - :caption: Create a file. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - content_settings = kwargs.pop('content_settings', None) - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - if self.require_encryption and not self.key_encryption_key: - raise ValueError("Encryption required but no key was provided.") - - headers = kwargs.pop("headers", {}) - headers.update(add_metadata_headers(metadata)) - file_http_headers = None - if content_settings: - file_http_headers = FileHTTPHeaders( - file_cache_control=content_settings.cache_control, - file_content_type=content_settings.content_type, - file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - file_content_encoding=content_settings.content_encoding, - file_content_language=content_settings.content_language, - file_content_disposition=content_settings.content_disposition, - ) - file_permission = _get_file_permission(file_permission, permission_key, 'Inherit') - try: - return await self._client.file.create( # type: ignore - file_content_length=size, - metadata=metadata, - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - file_http_headers=file_http_headers, - lease_access_conditions=access_conditions, - headers=headers, - timeout=timeout, - cls=return_response_headers, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_file( - self, data, # type: Any - length=None, # type: Optional[int] - file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="now", # type: Union[str, datetime] - file_last_write_time="now", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Uploads a new file. - - :param Any data: - Content of the file. - :param int length: - Length of the file in bytes. Specify its maximum size, up to 1 TiB. - :param file_attributes: - The file system attributes for files and directories. - If not set, the default value would be "None" and the attributes will be set to "Archive". - Here is an example for when the var type is str: 'Temporary|Archive'. - file_attributes value is not case sensitive. - :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes - :param file_creation_time: Creation time for the file - Default value: Now. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Now. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword str encoding: - Defaults to UTF-8. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START upload_file] - :end-before: [END upload_file] - :language: python - :dedent: 16 - :caption: Upload a file. - """ - metadata = kwargs.pop('metadata', None) - content_settings = kwargs.pop('content_settings', None) - max_concurrency = kwargs.pop('max_concurrency', 1) - validate_content = kwargs.pop('validate_content', False) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - - if isinstance(data, six.text_type): - data = data.encode(encoding) - if length is None: - length = get_length(data) - if isinstance(data, bytes): - data = data[:length] - - if isinstance(data, bytes): - stream = BytesIO(data) - elif hasattr(data, "read"): - stream = data - elif hasattr(data, "__iter__"): - stream = IterStreamer(data, encoding=encoding) # type: ignore - else: - raise TypeError("Unsupported data type: {}".format(type(data))) - return await _upload_file_helper( # type: ignore - self, - stream, - length, - metadata, - content_settings, - validate_content, - timeout, - max_concurrency, - self._config, - file_attributes=file_attributes, - file_creation_time=file_creation_time, - file_last_write_time=file_last_write_time, - file_permission=file_permission, - file_permission_key=permission_key, - **kwargs - ) - - @distributed_trace_async - async def start_copy_from_url(self, source_url, **kwargs): - # type: (str, Any) -> Any - """Initiates the copying of data from a source URL into the file - referenced by the client. - - The status of this copy operation can be found using the `get_properties` - method. - - :param str source_url: - Specifies the URL of the source file. - :keyword str file_permission: - If specified the permission (security descriptor) shall be set for the directory/file. - This value can be set to "source" to copy the security descriptor from the source file. - Otherwise if set, this value will be used to override the source value. If not set, permission value - is inherited from the parent directory of the target file. This setting can be - used if Permission size is <= 8KB, otherwise permission_key shall be used. - If SDDL is specified as input, it must have owner, group and dacl. - Note: Only one of the file_permission or permission_key should be specified. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword str permission_key: - Key of the permission to be set for the directory/file. - This value can be set to "source" to copy the security descriptor from the source file. - Otherwise if set, this value will be used to override the source value. If not set, permission value - is inherited from the parent directory of the target file. - Note: Only one of the file_permission or permission_key should be specified. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword file_attributes: - This value can be set to "source" to copy file attributes from the source file to the target file, - or to clear all attributes, it can be set to "None". Otherwise it can be set to a list of attributes - to set on the target file. If this is not set, the default value is "Archive". - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :keyword file_creation_time: - This value can be set to "source" to copy the creation time from the source file to the target file, - or a datetime to set as creation time on the target file. This could also be a string in ISO 8601 format. - If this is not set, creation time will be set to the date time value of the creation - (or when it was overwritten) of the target file by copy engine. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_creation_time: str or ~datetime.datetime - :keyword file_last_write_time: - This value can be set to "source" to copy the last write time from the source file to the target file, or - a datetime to set as the last write time on the target file. This could also be a string in ISO 8601 format. - If this is not set, value will be the last write time to the file by the copy engine. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_last_write_time: str or ~datetime.datetime - :keyword bool ignore_read_only: - Specifies the option to overwrite the target file if it already exists and has read-only attribute set. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword bool set_archive_attribute: - Specifies the option to set the archive attribute on the target file. - True means the archive attribute will be set on the target file despite attribute - overrides or the source file state. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START copy_file_from_url] - :end-before: [END copy_file_from_url] - :language: python - :dedent: 16 - :caption: Copy a file from a URL - """ - metadata = kwargs.pop('metadata', None) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop("headers", {}) - headers.update(add_metadata_headers(metadata)) - kwargs.update(get_smb_properties(kwargs)) - try: - return await self._client.file.start_copy( - source_url, - metadata=metadata, - lease_access_conditions=access_conditions, - headers=headers, - cls=return_response_headers, - timeout=timeout, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def abort_copy(self, copy_id, **kwargs): - # type: (Union[str, FileProperties], Any) -> None - """Abort an ongoing copy operation. - - This will leave a destination file with zero length and full metadata. - This will raise an error if the copy operation has already ended. - - :param copy_id: - The copy operation to abort. This can be either an ID, or an - instance of FileProperties. - :type copy_id: str or ~azure.storage.fileshare.FileProperties - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - copy_id = copy_id.copy.id - except AttributeError: - try: - copy_id = copy_id["copy_id"] - except TypeError: - pass - try: - await self._client.file.abort_copy(copy_id=copy_id, - lease_access_conditions=access_conditions, - timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def download_file( - self, - offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs - ): - # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader - """Downloads a file to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the file into - a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks. - - :param int offset: - Start of byte range to use for downloading a section of the file. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.fileshare.aio.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START download_file] - :end-before: [END download_file] - :language: python - :dedent: 16 - :caption: Download a file. - """ - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - if length is not None and offset is None: - raise ValueError("Offset value must not be None if length is set.") - - range_end = None - if length is not None: - range_end = offset + length - 1 # Service actually uses an end-range inclusive index - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - - downloader = StorageStreamDownloader( - client=self._client.file, - config=self._config, - start_range=offset, - end_range=range_end, - encryption_options=None, - name=self.file_name, - path='/'.join(self.file_path), - share=self.share_name, - lease_access_conditions=access_conditions, - cls=deserialize_file_stream, - **kwargs - ) - await downloader._setup() # pylint: disable=protected-access - return downloader - - @distributed_trace_async - async def delete_file(self, **kwargs): - # type: (Any) -> None - """Marks the specified file for deletion. The file is - later deleted during garbage collection. - - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START delete_file] - :end-before: [END delete_file] - :language: python - :dedent: 16 - :caption: Delete a file. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - await self._client.file.delete(lease_access_conditions=access_conditions, timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_file_properties(self, **kwargs): - # type: (Any) -> FileProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file. - - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: FileProperties - :rtype: ~azure.storage.fileshare.FileProperties - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - file_props = await self._client.file.get_properties( - sharesnapshot=self.snapshot, - lease_access_conditions=access_conditions, - timeout=timeout, - cls=deserialize_file_properties, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - file_props.name = self.file_name - file_props.share = self.share_name - file_props.snapshot = self.snapshot - file_props.path = "/".join(self.file_path) - return file_props # type: ignore - - @distributed_trace_async - async def set_http_headers(self, content_settings, # type: ContentSettings - file_attributes="preserve", # type: Union[str, NTFSAttributes] - file_creation_time="preserve", # type: Union[str, datetime] - file_last_write_time="preserve", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Sets HTTP headers on the file. - - :param ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param file_attributes: - The file system attributes for files and directories. - If not set, indicates preservation of existing values. - Here is an example for when the var type is str: 'Temporary|Archive' - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Preserve. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Preserve. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - file_content_length = kwargs.pop("size", None) - file_http_headers = FileHTTPHeaders( - file_cache_control=content_settings.cache_control, - file_content_type=content_settings.content_type, - file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - file_content_encoding=content_settings.content_encoding, - file_content_language=content_settings.content_language, - file_content_disposition=content_settings.content_disposition, - ) - file_permission = _get_file_permission(file_permission, permission_key, 'preserve') - try: - return await self._client.file.set_http_headers( # type: ignore - file_content_length=file_content_length, - file_http_headers=file_http_headers, - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - lease_access_conditions=access_conditions, - timeout=timeout, - cls=return_response_headers, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_file_metadata(self, metadata=None, **kwargs): # type: ignore - # type: (Optional[Dict[str, Any]], Any) -> Dict[str, Any] - """Sets user-defined metadata for the specified file as one or more - name-value pairs. - - Each call to this operation replaces all existing metadata - attached to the file. To remove all metadata from the file, - call this operation with no metadata dict. - - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop("headers", {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return await self._client.file.set_metadata( # type: ignore - metadata=metadata, lease_access_conditions=access_conditions, - timeout=timeout, cls=return_response_headers, headers=headers, **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_range( # type: ignore - self, - data, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Upload a range of bytes to a file. - - :param bytes data: - The data to upload. - :param int offset: - Start of byte range to use for uploading a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for uploading a section of the file. - The range can be up to 4 MB in size. - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - file. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - validate_content = kwargs.pop('validate_content', False) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - if isinstance(data, six.text_type): - data = data.encode(encoding) - end_range = offset + length - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - try: - return await self._client.file.upload_range( # type: ignore - range=content_range, - content_length=length, - optionalbody=data, - timeout=timeout, - validate_content=validate_content, - lease_access_conditions=access_conditions, - cls=return_response_headers, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_range_from_url(self, source_url, - offset, - length, - source_offset, - **kwargs - ): - # type: (str, int, int, int, **Any) -> Dict[str, Any] - """ - Writes the bytes from one Azure File endpoint into the specified range of another Azure File endpoint. - - :param int offset: - Start of byte range to use for updating a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for updating a section of the file. - The range can be up to 4 MB in size. - :param str source_url: - A URL of up to 2 KB in length that specifies an Azure file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.file.core.windows.net/myshare/mydir/myfile - https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - The service will read the same number of bytes as the destination range (length-offset). - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - """ - options = self._upload_range_from_url_options( - source_url=source_url, - offset=offset, - length=length, - source_offset=source_offset, - **kwargs - ) - try: - return await self._client.file.upload_range_from_url(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_ranges( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> List[Dict[str, int]] - """Returns the list of valid page ranges for a file or snapshot - of a file. - - :param int offset: - Specifies the start offset of bytes over which to get ranges. - :param int length: - Number of bytes to use over which to get ranges. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A list of valid ranges. - :rtype: List[dict[str, int]] - """ - options = self._get_ranges_options( - offset=offset, - length=length, - **kwargs) - try: - ranges = await self._client.file.get_range_list(**options) - except HttpResponseError as error: - process_storage_error(error) - return [{'start': file_range.start, 'end': file_range.end} for file_range in ranges.ranges] - - @distributed_trace_async - async def get_ranges_diff( # type: ignore - self, - previous_sharesnapshot, # type: Union[str, Dict[str, Any]] - offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a file or snapshot - of a file. - - .. versionadded:: 12.6.0 - - :param int offset: - Specifies the start offset of bytes over which to get ranges. - :param int length: - Number of bytes to use over which to get ranges. - :param str previous_sharesnapshot: - The snapshot diff parameter that contains an opaque DateTime value that - specifies a previous file snapshot to be compared - against a more recent snapshot or the current file. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of file ranges as dictionaries with 'start' and 'end' keys. - The first element are filled file ranges, the 2nd element is cleared file ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_ranges_options( - offset=offset, - length=length, - previous_sharesnapshot=previous_sharesnapshot, - **kwargs) - try: - ranges = await self._client.file.get_range_list(**options) - except HttpResponseError as error: - process_storage_error(error) - return get_file_ranges_result(ranges) - - @distributed_trace_async - async def clear_range( # type: ignore - self, - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Clears the specified range and releases the space used in storage for - that range. - - :param int offset: - Start of byte range to use for clearing a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for clearing a section of the file. - The range can be up to 4 MB in size. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Unsupported method for encryption.") - - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 bytes file size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 bytes file size") - end_range = length + offset - 1 # Reformat to an inclusive range index - content_range = "bytes={0}-{1}".format(offset, end_range) - try: - return await self._client.file.upload_range( # type: ignore - timeout=timeout, - cls=return_response_headers, - content_length=0, - optionalbody=None, - file_range_write="clear", - range=content_range, - lease_access_conditions=access_conditions, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def resize_file(self, size, **kwargs): - # type: (int, Any) -> Dict[str, Any] - """Resizes a file to the specified size. - - :param int size: - Size to resize file to (in bytes) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - return await self._client.file.set_http_headers( # type: ignore - file_content_length=size, - file_attributes="preserve", - file_creation_time="preserve", - file_last_write_time="preserve", - file_permission="preserve", - lease_access_conditions=access_conditions, - cls=return_response_headers, - timeout=timeout, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_handles(self, **kwargs): - # type: (Any) -> AsyncItemPaged - """Lists handles for file. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of HandleItem - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.HandleItem] - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop("results_per_page", None) - command = functools.partial( - self._client.file.list_handles, - sharesnapshot=self.snapshot, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=HandlesPaged) - - @distributed_trace_async - async def close_handle(self, handle, **kwargs): - # type: (Union[str, HandleItem], Any) -> Dict[str, int] - """Close an open file handle. - - :param handle: - A specific handle to close. - :type handle: str or ~azure.storage.fileshare.Handle - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - try: - handle_id = handle.id # type: ignore - except AttributeError: - handle_id = handle - if handle_id == '*': - raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") - try: - response = await self._client.file.force_close_handles( - handle_id, - marker=None, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - return { - 'closed_handles_count': response.get('number_of_handles_closed', 0), - 'failed_handles_count': response.get('number_of_handles_failed', 0) - } - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def close_all_handles(self, **kwargs): - # type: (Any) -> Dict[str, int] - """Close any open file handles. - - This operation will block until the service has closed all open handles. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - timeout = kwargs.pop('timeout', None) - start_time = time.time() - - try_close = True - continuation_token = None - total_closed = 0 - total_failed = 0 - while try_close: - try: - response = await self._client.file.force_close_handles( - handle_id='*', - timeout=timeout, - marker=continuation_token, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - continuation_token = response.get('marker') - try_close = bool(continuation_token) - total_closed += response.get('number_of_handles_closed', 0) - total_failed += response.get('number_of_handles_failed', 0) - if timeout: - timeout = max(0, timeout - (time.time() - start_time)) - return { - 'closed_handles_count': total_closed, - 'failed_handles_count': total_failed - } diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_lease_async.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_lease_async.py deleted file mode 100644 index 0d99845..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_lease_async.py +++ /dev/null @@ -1,228 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, - TypeVar, TYPE_CHECKING -) - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator_async import distributed_trace_async - -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._generated.aio.operations import FileOperations, ShareOperations -from .._lease import ShareLeaseClient as LeaseClientBase - -if TYPE_CHECKING: - from datetime import datetime - ShareFileClient = TypeVar("ShareFileClient") - ShareClient = TypeVar("ShareClient") - - -class ShareLeaseClient(LeaseClientBase): - """Creates a new ShareLeaseClient. - - This client provides lease operations on a ShareClient or ShareFileClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the file or share to lease. - :type client: ~azure.storage.fileshare.ShareFileClient or - ~azure.storage.fileshare.ShareClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - - def __enter__(self): - raise TypeError("Async lease must use 'async with'.") - - def __exit__(self, *args): - self.release() - - async def __aenter__(self): - return self - - async def __aexit__(self, *args): - await self.release() - - @distributed_trace_async - async def acquire(self, **kwargs): - # type: (**Any) -> None - """Requests a new lease. This operation establishes and manages a lock on a - file or share for write and delete operations. If the file or share does not have an active lease, - the File or Share service creates a lease on the file or share. If the file has an active lease, - you can only request a new lease using the active lease ID. - - - If the file or share does not have an active lease, the File or Share service creates a - lease on the file and returns a new lease ID. - - :keyword int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. File leases never expire. A non-infinite share lease can be - between 15 and 60 seconds. A share lease duration cannot be changed - using renew or change. Default is -1 (infinite share lease). - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - try: - lease_duration = kwargs.pop('lease_duration', -1) - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - response = await self._client.acquire_lease( - timeout=kwargs.pop('timeout', None), - duration=lease_duration, - proposed_lease_id=self.id, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - self.etag = response.get('etag') # type: str - - @distributed_trace_async - async def renew(self, **kwargs): - # type: (Any) -> None - """Renews the share lease. - - The share lease can be renewed if the lease ID specified in the - lease client matches that associated with the share. Note that - the lease may be renewed even if it has expired as long as the share - has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - .. versionadded:: 12.6.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - if isinstance(self._client, FileOperations): - raise TypeError("Lease renewal operations are only valid for ShareClient.") - try: - response = await self._client.renew_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - sharesnapshot=self._snapshot, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def release(self, **kwargs): - # type: (Any) -> None - """Releases the lease. The lease may be released if the lease ID specified on the request matches - that associated with the share or file. Releasing the lease allows another client to immediately acquire - the lease for the share or file as soon as the release is complete. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - try: - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - response = await self._client.release_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """ Changes the lease ID of an active lease. A change must include the current lease ID in x-ms-lease-id and - a new lease ID in x-ms-proposed-lease-id. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The File or Share service raises an error - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - try: - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - response = await self._client.change_lease( - lease_id=self.id, - proposed_lease_id=proposed_lease_id, - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def break_lease(self, **kwargs): - # type: (Any) -> int - """Force breaks the lease if the file or share has an active lease. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. An infinite lease breaks immediately. - - Once a lease is broken, it cannot be changed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :keyword int lease_break_period: - This is the proposed duration of seconds that the share lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the share lease. If longer, the time remaining on the share lease is used. - A new share lease will not be available before the break period has - expired, but the share lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration share lease breaks after the remaining share lease - period elapses, and an infinite share lease breaks immediately. - - .. versionadded:: 12.5.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - try: - lease_break_period = kwargs.pop('lease_break_period', None) - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - if isinstance(self._client, ShareOperations): - kwargs['break_period'] = lease_break_period - if isinstance(self._client, FileOperations) and lease_break_period: - raise TypeError("Setting a lease break period is only applicable to Share leases.") - - response = await self._client.break_lease( - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_models.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_models.py deleted file mode 100644 index ceca247..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_models.py +++ /dev/null @@ -1,178 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines - -from azure.core.async_paging import AsyncPageIterator -from azure.core.exceptions import HttpResponseError - -from .._shared.response_handlers import return_context_and_deserialized, process_storage_error -from .._generated.models import DirectoryItem -from .._models import Handle, ShareProperties - - -def _wrap_item(item): - if isinstance(item, DirectoryItem): - return {'name': item.name, 'is_directory': True} - return {'name': item.name, 'size': item.properties.content_length, 'is_directory': False} - - -class SharePropertiesPaged(AsyncPageIterator): - """An iterable of Share properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.fileshare.ShareProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only shares whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(SharePropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [ShareProperties._from_generated(i) for i in self._response.share_items] # pylint: disable=protected-access - return self._response.next_marker or None, self.current_page - - -class HandlesPaged(AsyncPageIterator): - """An iterable of Handles. - - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str marker: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.fileshare.Handle) - - :param callable command: Function to retrieve the next page of items. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, results_per_page=None, continuation_token=None): - super(HandlesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.current_page = [Handle._from_generated(h) for h in self._response.handle_list] # pylint: disable=protected-access - return self._response.next_marker or None, self.current_page - - -class DirectoryPropertiesPaged(AsyncPageIterator): - """An iterable for the contents of a directory. - - This iterable will yield dicts for the contents of the directory. The dicts - will have the keys 'name' (str) and 'is_directory' (bool). - Items that are files (is_directory=False) will have an additional 'content_length' key. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(dict(str, Any)) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only directories whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(DirectoryPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - prefix=self.prefix, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [_wrap_item(i) for i in self._response.segment.directory_items] - self.current_page.extend([_wrap_item(i) for i in self._response.segment.file_items]) - return self._response.next_marker or None, self.current_page diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_share_client_async.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_share_client_async.py deleted file mode 100644 index 05fc93b..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_share_client_async.py +++ /dev/null @@ -1,744 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -from typing import ( # pylint: disable=unused-import - Optional, Union, Dict, Any, Iterable, TYPE_CHECKING -) - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.pipeline import AsyncPipeline -from .._shared.policies_async import ExponentialRetry -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.request_handlers import add_metadata_headers, serialize_iso -from .._shared.response_handlers import ( - return_response_headers, - process_storage_error, - return_headers_and_deserialized) -from .._generated.aio import AzureFileStorage -from .._generated.models import ( - SignedIdentifier, - DeleteSnapshotsOptionType) -from .._deserialize import deserialize_share_properties, deserialize_permission -from .._serialize import get_api_version, get_access_conditions -from .._share_client import ShareClient as ShareClientBase -from ._directory_client_async import ShareDirectoryClient -from ._file_client_async import ShareFileClient -from ..aio._lease_async import ShareLeaseClient -from .._models import ShareProtocols - -if TYPE_CHECKING: - from .._models import ShareProperties, AccessPolicy - - -class ShareClient(AsyncStorageAccountHostsMixin, ShareClientBase): - """A client to interact with a specific share, although that share may not yet exist. - - For operations relating to a specific directory or file in this share, the clients for - those entities can also be retrieved using the :func:`get_directory_client` and :func:`get_file_client` functions. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the share, - use the :func:`from_share_url` classmethod. - :param share_name: - The name of the share with which to interact. - :type share_name: str - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword loop: - The event loop to run the asynchronous tasks. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - loop = kwargs.pop('loop', None) - super(ShareClient, self).__init__( - account_url, - share_name=share_name, - snapshot=snapshot, - credential=credential, - loop=loop, - **kwargs) - self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline, loop=loop) - default_api_version = self._client._config.version # pylint: disable=protected-access - self._client._config.version = get_api_version(kwargs, default_api_version) # pylint: disable=protected-access - self._loop = loop - - def get_directory_client(self, directory_path=None): - # type: (Optional[str]) -> ShareDirectoryClient - """Get a client to interact with the specified directory. - The directory need not already exist. - - :param str directory_path: - Path to the specified directory. - :returns: A Directory Client. - :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient - """ - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - - return ShareDirectoryClient( - self.url, share_name=self.share_name, directory_path=directory_path or "", snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop) - - def get_file_client(self, file_path): - # type: (str) -> ShareFileClient - """Get a client to interact with the specified file. - The file need not already exist. - - :param str file_path: - Path to the specified file. - :returns: A File Client. - :rtype: ~azure.storage.fileshare.aio.ShareFileClient - """ - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - - return ShareFileClient( - self.url, share_name=self.share_name, file_path=file_path, snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop) - - @distributed_trace_async() - async def acquire_lease(self, **kwargs): - # type: (**Any) -> ShareLeaseClient - """Requests a new lease. - - If the share does not have an active lease, the Share - Service creates a lease on the share and returns a new lease. - - .. versionadded:: 12.5.0 - - :keyword int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword str lease_id: - Proposed lease ID, in a GUID string format. The Share Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A ShareLeaseClient object. - :rtype: ~azure.storage.fileshare.ShareLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START acquire_lease_on_share] - :end-before: [END acquire_lease_on_share] - :language: python - :dedent: 8 - :caption: Acquiring a lease on a share. - """ - kwargs['lease_duration'] = kwargs.pop('lease_duration', -1) - lease_id = kwargs.pop('lease_id', None) - lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore - await lease.acquire(**kwargs) - return lease - - @distributed_trace_async - async def create_share(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Creates a new Share under the account. If a share with the - same name already exists, the operation fails. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the share as metadata. - :keyword int quota: - The quota to be allotted. - :keyword access_tier: - Specifies the access tier of the share. - Possible values: 'TransactionOptimized', 'Hot', 'Cool' - :paramtype access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword protocols: - Protocols to enable on the share. Only one protocol can be enabled on the share. - :paramtype protocols: str or ~azure.storage.fileshare.ShareProtocols - :keyword root_squash: - Root squash to set on the share. - Only valid for NFS shares. Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash'. - :paramtype root_squash: str or ~azure.storage.fileshare.ShareRootSquash - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START create_share] - :end-before: [END create_share] - :language: python - :dedent: 12 - :caption: Creates a file share. - """ - metadata = kwargs.pop('metadata', None) - quota = kwargs.pop('quota', None) - access_tier = kwargs.pop('access_tier', None) - timeout = kwargs.pop('timeout', None) - root_squash = kwargs.pop('root_squash', None) - protocols = kwargs.pop('protocols', None) - if protocols and protocols not in ['NFS', 'SMB', ShareProtocols.SMB, ShareProtocols.NFS]: - raise ValueError("The enabled protocol must be set to either SMB or NFS.") - if root_squash and protocols not in ['NFS', ShareProtocols.NFS]: - raise ValueError("The 'root_squash' keyword can only be used on NFS enabled shares.") - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - - try: - return await self._client.share.create( # type: ignore - timeout=timeout, - metadata=metadata, - quota=quota, - access_tier=access_tier, - root_squash=root_squash, - enabled_protocols=protocols, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def create_snapshot( # type: ignore - self, - **kwargs # type: Optional[Any] - ): - # type: (...) -> Dict[str, Any] - """Creates a snapshot of the share. - - A snapshot is a read-only version of a share that's taken at a point in time. - It can be read, copied, or deleted, but not modified. Snapshots provide a way - to back up a share as it appears at a moment in time. - - A snapshot of a share has the same name as the base share from which the snapshot - is taken, with a DateTime value appended to indicate the time at which the - snapshot was taken. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the share as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Snapshot ID, Etag, and last modified). - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START create_share_snapshot] - :end-before: [END create_share_snapshot] - :language: python - :dedent: 16 - :caption: Creates a snapshot of the file share. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return await self._client.share.create_snapshot( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def delete_share( - self, delete_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> None - """Marks the specified share for deletion. The share is - later deleted during garbage collection. - - :param bool delete_snapshots: - Indicates if snapshots are to be deleted. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START delete_share] - :end-before: [END delete_share] - :language: python - :dedent: 16 - :caption: Deletes the share and any snapshots. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - delete_include = None - if delete_snapshots: - delete_include = DeleteSnapshotsOptionType.include - try: - await self._client.share.delete( - timeout=timeout, - sharesnapshot=self.snapshot, - delete_snapshots=delete_include, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_share_properties(self, **kwargs): - # type: (Any) -> ShareProperties - """Returns all user-defined metadata and system properties for the - specified share. The data returned does not include the shares's - list of files or directories. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: The share properties. - :rtype: ~azure.storage.fileshare.ShareProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_hello_world_async.py - :start-after: [START get_share_properties] - :end-before: [END get_share_properties] - :language: python - :dedent: 16 - :caption: Gets the share properties. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - props = await self._client.share.get_properties( - timeout=timeout, - sharesnapshot=self.snapshot, - cls=deserialize_share_properties, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - props.name = self.share_name - props.snapshot = self.snapshot - return props # type: ignore - - @distributed_trace_async - async def set_share_quota(self, quota, **kwargs): - # type: (int, Any) -> Dict[str, Any] - """Sets the quota for the share. - - :param int quota: - Specifies the maximum size of the share, in gigabytes. - Must be greater than 0, and less than or equal to 5TB. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START set_share_quota] - :end-before: [END set_share_quota] - :language: python - :dedent: 16 - :caption: Sets the share quota. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - return await self._client.share.set_properties( # type: ignore - timeout=timeout, - quota=quota, - access_tier=None, - cls=return_response_headers, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - async def set_share_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Sets the share properties. - - .. versionadded:: 12.3.0 - - :keyword access_tier: - Specifies the access tier of the share. - Possible values: 'TransactionOptimized', 'Hot', and 'Cool' - :paramtype access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier - :keyword int quota: - Specifies the maximum size of the share, in gigabytes. - Must be greater than 0, and less than or equal to 5TB. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword root_squash: - Root squash to set on the share. - Only valid for NFS shares. Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash' - :paramtype root_squash: str or ~azure.storage.fileshare.ShareRootSquash - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START set_share_properties] - :end-before: [END set_share_properties] - :language: python - :dedent: 16 - :caption: Sets the share properties. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - access_tier = kwargs.pop('access_tier', None) - quota = kwargs.pop('quota', None) - root_squash = kwargs.pop('root_squash', None) - if all(parameter is None for parameter in [access_tier, quota, root_squash]): - raise ValueError("set_share_properties should be called with at least one parameter.") - try: - return await self._client.share.set_properties( # type: ignore - timeout=timeout, - quota=quota, - access_tier=access_tier, - root_squash=root_squash, - lease_access_conditions=access_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_share_metadata(self, metadata, **kwargs): - # type: (Dict[str, Any], Any) -> Dict[str, Any] - """Sets the metadata for the share. - - Each call to this operation replaces all existing metadata - attached to the share. To remove all metadata from the share, - call this operation with no metadata dict. - - :param metadata: - Name-value pairs associated with the share as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START set_share_metadata] - :end-before: [END set_share_metadata] - :language: python - :dedent: 16 - :caption: Sets the share metadata. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - try: - return await self._client.share.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_share_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the share. The permissions - indicate whether files in a share may be accessed publicly. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - response, identifiers = await self._client.share.get_access_policy( - timeout=timeout, - cls=return_headers_and_deserialized, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return { - 'public_access': response.get('share_public_access'), - 'signed_identifiers': identifiers or [] - } - - @distributed_trace_async - async def set_share_access_policy(self, signed_identifiers, **kwargs): - # type: (Dict[str, AccessPolicy], Any) -> Dict[str, str] - """Sets the permissions for the share, or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether files in a share may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the share. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict(str, :class:`~azure.storage.fileshare.AccessPolicy`) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - if len(signed_identifiers) > 5: - raise ValueError( - 'Too many access policies provided. The server does not support setting ' - 'more than 5 access policies on a single resource.') - identifiers = [] - for key, value in signed_identifiers.items(): - if value: - value.start = serialize_iso(value.start) - value.expiry = serialize_iso(value.expiry) - identifiers.append(SignedIdentifier(id=key, access_policy=value)) - signed_identifiers = identifiers # type: ignore - - try: - return await self._client.share.set_access_policy( # type: ignore - share_acl=signed_identifiers or None, - timeout=timeout, - cls=return_response_headers, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_share_stats(self, **kwargs): - # type: (Any) -> int - """Gets the approximate size of the data stored on the share in bytes. - - Note that this value may not include all recently created - or recently re-sized files. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :return: The approximate size of the data (in bytes) stored on the share. - :rtype: int - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - stats = await self._client.share.get_statistics( - timeout=timeout, - lease_access_conditions=access_conditions, - **kwargs) - return stats.share_usage_bytes # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_directories_and_files( # type: ignore - self, directory_name=None, # type: Optional[str] - name_starts_with=None, # type: Optional[str] - marker=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Iterable[Dict[str,str]] - """Lists the directories and files under the share. - - :param str directory_name: - Name of a directory. - :param str name_starts_with: - Filters the results to return only directories whose names - begin with the specified prefix. - :param str marker: - An opaque continuation token. This value can be retrieved from the - next_marker field of a previous generator object. If specified, - this generator will begin returning results from this point. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START share_list_files_in_dir] - :end-before: [END share_list_files_in_dir] - :language: python - :dedent: 16 - :caption: List directories and files in the share. - """ - timeout = kwargs.pop('timeout', None) - directory = self.get_directory_client(directory_name) - return directory.list_directories_and_files( - name_starts_with=name_starts_with, marker=marker, timeout=timeout, **kwargs) - - @distributed_trace_async - async def create_permission_for_share(self, file_permission, # type: str - **kwargs # type: Any - ): - # type: (...) -> str - """Create a permission (a security descriptor) at the share level. - - This 'permission' can be used for the files/directories in the share. - If a 'permission' already exists, it shall return the key of it, else - creates a new permission at the share level and return its key. - - :param str file_permission: - File permission, a Portable SDDL - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A file permission key - :rtype: str - """ - timeout = kwargs.pop('timeout', None) - options = self._create_permission_for_share_options(file_permission, timeout=timeout, **kwargs) - try: - return await self._client.share.create_permission(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_permission_for_share( # type: ignore - self, permission_key, # type: str - **kwargs # type: Any - ): - # type: (...) -> str - """Get a permission (a security descriptor) for a given key. - - This 'permission' can be used for the files/directories in the share. - - :param str permission_key: - Key of the file permission to retrieve - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A file permission (a portable SDDL) - :rtype: str - """ - timeout = kwargs.pop('timeout', None) - try: - return await self._client.share.get_permission( # type: ignore - file_permission_key=permission_key, - cls=deserialize_permission, - timeout=timeout, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def create_directory(self, directory_name, **kwargs): - # type: (str, Any) -> ShareDirectoryClient - """Creates a directory in the share and returns a client to interact - with the directory. - - :param str directory_name: - The name of the directory. - :keyword dict(str,str) metadata: - Name-value pairs associated with the directory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: ShareDirectoryClient - :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient - """ - directory = self.get_directory_client(directory_name) - kwargs.setdefault('merge_span', True) - await directory.create_directory(**kwargs) - return directory # type: ignore - - @distributed_trace_async - async def delete_directory(self, directory_name, **kwargs): - # type: (str, Any) -> None - """Marks the directory for deletion. The directory is - later deleted during garbage collection. - - :param str directory_name: - The name of the directory. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - directory = self.get_directory_client(directory_name) - await directory.delete_directory(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_share_service_client_async.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_share_service_client_async.py deleted file mode 100644 index d6df033..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_share_service_client_async.py +++ /dev/null @@ -1,370 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, - TYPE_CHECKING -) - -from azure.core.async_paging import AsyncItemPaged -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import AsyncPipeline -from azure.core.tracing.decorator_async import distributed_trace_async - -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.response_handlers import process_storage_error -from .._shared.policies_async import ExponentialRetry -from .._generated.aio import AzureFileStorage -from .._generated.models import StorageServiceProperties -from .._share_service_client import ShareServiceClient as ShareServiceClientBase -from .._serialize import get_api_version -from ._share_client_async import ShareClient -from ._models import SharePropertiesPaged -from .._models import service_properties_deserialize - -if TYPE_CHECKING: - from datetime import datetime - from .._shared.models import ResourceTypes, AccountSasPermissions - from .._models import ( - ShareProperties, - Metrics, - CorsRule, - ShareProtocolSettings, - ) - - -class ShareServiceClient(AsyncStorageAccountHostsMixin, ShareServiceClientBase): - """A client to interact with the File Share Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete shares within the account. - For operations relating to a specific share, a client for that entity - can also be retrieved using the :func:`get_share_client` function. - - :param str account_url: - The URL to the file share storage account. Any other entities included - in the URL path (e.g. share or file) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword loop: - The event loop to run the asynchronous tasks. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_authentication_async.py - :start-after: [START create_share_service_client] - :end-before: [END create_share_service_client] - :language: python - :dedent: 8 - :caption: Create the share service client with url and credential. - """ - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - loop = kwargs.pop('loop', None) - super(ShareServiceClient, self).__init__( - account_url, - credential=credential, - loop=loop, - **kwargs) - self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline, loop=loop) - default_api_version = self._client._config.version # pylint: disable=protected-access - self._client._config.version = get_api_version(kwargs, default_api_version) # pylint: disable=protected-access - self._loop = loop - - @distributed_trace_async - async def get_service_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the properties of a storage account's File Share service, including - Azure Storage Analytics. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A dictionary containing file service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START get_service_properties] - :end-before: [END get_service_properties] - :language: python - :dedent: 12 - :caption: Get file share service properties. - """ - timeout = kwargs.pop('timeout', None) - try: - service_props = await self._client.service.get_properties(timeout=timeout, **kwargs) - return service_properties_deserialize(service_props) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_service_properties( - self, hour_metrics=None, # type: Optional[Metrics] - minute_metrics=None, # type: Optional[Metrics] - cors=None, # type: Optional[List[CorsRule]] - protocol=None, # type: Optional[ShareProtocolSettings], - **kwargs - ): - # type: (...) -> None - """Sets the properties of a storage account's File Share service, including - Azure Storage Analytics. If an element (e.g. hour_metrics) is left as None, the - existing settings on the service for that functionality are preserved. - - :param hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for files. - :type hour_metrics: ~azure.storage.fileshare.Metrics - :param minute_metrics: - The minute metrics settings provide request statistics - for each minute for files. - :type minute_metrics: ~azure.storage.fileshare.Metrics - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list(:class:`~azure.storage.fileshare.CorsRule`) - :param protocol_settings: - Sets protocol settings - :type protocol: ~azure.storage.fileshare.ShareProtocolSettings - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START set_service_properties] - :end-before: [END set_service_properties] - :language: python - :dedent: 8 - :caption: Sets file share service properties. - """ - timeout = kwargs.pop('timeout', None) - props = StorageServiceProperties( - hour_metrics=hour_metrics, - minute_metrics=minute_metrics, - cors=cors, - protocol=protocol - ) - try: - await self._client.service.set_properties(props, timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_shares( - self, name_starts_with=None, # type: Optional[str] - include_metadata=False, # type: Optional[bool] - include_snapshots=False, # type: Optional[bool] - **kwargs # type: Any - ): # type: (...) -> AsyncItemPaged - """Returns auto-paging iterable of dict-like ShareProperties under the specified account. - The generator will lazily follow the continuation tokens returned by - the service and stop when all shares have been returned. - - :param str name_starts_with: - Filters the results to return only shares whose names - begin with the specified name_starts_with. - :param bool include_metadata: - Specifies that share metadata be returned in the response. - :param bool include_snapshots: - Specifies that share snapshot be returned in the response. - :keyword bool include_deleted: - Specifies that deleted shares be returned in the response. - This is only for share soft delete enabled account. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) of ShareProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.ShareProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START fsc_list_shares] - :end-before: [END fsc_list_shares] - :language: python - :dedent: 16 - :caption: List shares in the file share service. - """ - timeout = kwargs.pop('timeout', None) - include = [] - if include_metadata: - include.append('metadata') - if include_snapshots: - include.append('snapshots') - include_deleted = kwargs.pop('include_deleted', None) - if include_deleted: - include.append("deleted") - - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.service.list_shares_segment, - include=include, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=SharePropertiesPaged) - - @distributed_trace_async - async def create_share( - self, share_name, # type: str - **kwargs - ): - # type: (...) -> ShareClient - """Creates a new share under the specified account. If the share - with the same name already exists, the operation fails. Returns a client with - which to interact with the newly created share. - - :param str share_name: The name of the share to create. - :keyword dict(str,str) metadata: - A dict with name_value pairs to associate with the - share as metadata. Example:{'Category':'test'} - :keyword int quota: - Quota in bytes. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.fileshare.aio.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START fsc_create_shares] - :end-before: [END fsc_create_shares] - :language: python - :dedent: 12 - :caption: Create a share in the file share service. - """ - metadata = kwargs.pop('metadata', None) - quota = kwargs.pop('quota', None) - timeout = kwargs.pop('timeout', None) - share = self.get_share_client(share_name) - kwargs.setdefault('merge_span', True) - await share.create_share(metadata=metadata, quota=quota, timeout=timeout, **kwargs) - return share - - @distributed_trace_async - async def delete_share( - self, share_name, # type: Union[ShareProperties, str] - delete_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> None - """Marks the specified share for deletion. The share is - later deleted during garbage collection. - - :param share_name: - The share to delete. This can either be the name of the share, - or an instance of ShareProperties. - :type share_name: str or ~azure.storage.fileshare.ShareProperties - :param bool delete_snapshots: - Indicates if snapshots are to be deleted. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START fsc_delete_shares] - :end-before: [END fsc_delete_shares] - :language: python - :dedent: 16 - :caption: Delete a share in the file share service. - """ - timeout = kwargs.pop('timeout', None) - share = self.get_share_client(share_name) - kwargs.setdefault('merge_span', True) - await share.delete_share( - delete_snapshots=delete_snapshots, timeout=timeout, **kwargs) - - @distributed_trace_async - async def undelete_share(self, deleted_share_name, deleted_share_version, **kwargs): - # type: (str, str, **Any) -> ShareClient - """Restores soft-deleted share. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.2.0 - This operation was introduced in API version '2019-12-12'. - - :param str deleted_share_name: - Specifies the name of the deleted share to restore. - :param str deleted_share_version: - Specifies the version of the deleted share to restore. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.fileshare.aio.ShareClient - """ - share = self.get_share_client(deleted_share_name) - try: - await share._client.share.restore(deleted_share_name=deleted_share_name, # pylint: disable = protected-access - deleted_share_version=deleted_share_version, - timeout=kwargs.pop('timeout', None), **kwargs) - return share - except HttpResponseError as error: - process_storage_error(error) - - def get_share_client(self, share, snapshot=None): - # type: (Union[ShareProperties, str],Optional[Union[Dict[str, Any], str]]) -> ShareClient - """Get a client to interact with the specified share. - The share need not already exist. - - :param share: - The share. This can either be the name of the share, - or an instance of ShareProperties. - :type share: str or ~azure.storage.fileshare.ShareProperties - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :returns: A ShareClient. - :rtype: ~azure.storage.fileshare.aio.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START get_share_client] - :end-before: [END get_share_client] - :language: python - :dedent: 8 - :caption: Gets the share client. - """ - try: - share_name = share.name - except AttributeError: - share_name = share - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareClient( - self.url, share_name=share_name, snapshot=snapshot, credential=self.credential, - api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop) diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/py.typed b/azure/multiapi/storagev2/fileshare/v2020_04_08/py.typed deleted file mode 100644 index e69de29..0000000 diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/__init__.py deleted file mode 100644 index af67e01..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/__init__.py +++ /dev/null @@ -1,82 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ._version import VERSION -from ._file_client import ShareFileClient -from ._directory_client import ShareDirectoryClient -from ._share_client import ShareClient -from ._share_service_client import ShareServiceClient -from ._lease import ShareLeaseClient -from ._shared_access_signature import generate_account_sas, generate_share_sas, generate_file_sas -from ._shared.policies import ExponentialRetry, LinearRetry -from ._shared.models import ( - LocationMode, - ResourceTypes, - AccountSasPermissions, - StorageErrorCode) -from ._models import ( - ShareProperties, - DirectoryProperties, - Handle, - FileProperties, - Metrics, - RetentionPolicy, - CorsRule, - ShareSmbSettings, - SmbMultichannel, - ShareProtocolSettings, - ShareProtocols, - AccessPolicy, - FileSasPermissions, - ShareSasPermissions, - ContentSettings, - NTFSAttributes) -from ._generated.models import ( - HandleItem, - ShareAccessTier -) -from ._generated.models import ( - ShareRootSquash -) - -__version__ = VERSION - - -__all__ = [ - 'ShareFileClient', - 'ShareDirectoryClient', - 'ShareClient', - 'ShareServiceClient', - 'ShareLeaseClient', - 'ExponentialRetry', - 'LinearRetry', - 'LocationMode', - 'ResourceTypes', - 'AccountSasPermissions', - 'StorageErrorCode', - 'Metrics', - 'RetentionPolicy', - 'CorsRule', - 'ShareSmbSettings', - 'ShareAccessTier', - 'SmbMultichannel', - 'ShareProtocolSettings', - 'AccessPolicy', - 'FileSasPermissions', - 'ShareSasPermissions', - 'ShareProtocols', - 'ShareProperties', - 'DirectoryProperties', - 'FileProperties', - 'ContentSettings', - 'Handle', - 'NTFSAttributes', - 'HandleItem', - 'ShareRootSquash', - 'generate_account_sas', - 'generate_share_sas', - 'generate_file_sas' -] diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_deserialize.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_deserialize.py deleted file mode 100644 index 6839469..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_deserialize.py +++ /dev/null @@ -1,83 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use -from typing import ( # pylint: disable=unused-import - Tuple, Dict, List, - TYPE_CHECKING -) - -from ._models import ShareProperties, DirectoryProperties, FileProperties -from ._shared.response_handlers import deserialize_metadata -from ._generated.models import ShareFileRangeList - - -def deserialize_share_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - share_properties = ShareProperties( - metadata=metadata, - **headers - ) - return share_properties - - -def deserialize_directory_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - directory_properties = DirectoryProperties( - metadata=metadata, - **headers - ) - return directory_properties - - -def deserialize_file_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - file_properties = FileProperties( - metadata=metadata, - **headers - ) - if 'Content-Range' in headers: - if 'x-ms-content-md5' in headers: - file_properties.content_settings.content_md5 = headers['x-ms-content-md5'] - else: - file_properties.content_settings.content_md5 = None - return file_properties - - -def deserialize_file_stream(response, obj, headers): - file_properties = deserialize_file_properties(response, obj, headers) - obj.properties = file_properties - return response.http_response.location_mode, obj - - -def deserialize_permission(response, obj, headers): # pylint: disable=unused-argument - ''' - Extracts out file permission - ''' - - return obj.permission - - -def deserialize_permission_key(response, obj, headers): # pylint: disable=unused-argument - ''' - Extracts out file permission key - ''' - - if response is None or headers is None: - return None - return headers.get('x-ms-file-permission-key', None) - - -def get_file_ranges_result(ranges): - # type: (ShareFileRangeList) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - file_ranges = [] # type: ignore - clear_ranges = [] # type: List - if ranges.ranges: - file_ranges = [ - {'start': file_range.start, 'end': file_range.end} for file_range in ranges.ranges] # type: ignore - if ranges.clear_ranges: - clear_ranges = [ - {'start': clear_range.start, 'end': clear_range.end} for clear_range in ranges.clear_ranges] - return file_ranges, clear_ranges diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_directory_client.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_directory_client.py deleted file mode 100644 index 1c9d5f1..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_directory_client.py +++ /dev/null @@ -1,726 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -import time -from typing import ( # pylint: disable=unused-import - Optional, Union, Any, Dict, TYPE_CHECKING -) - - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six -from azure.core.exceptions import HttpResponseError -from azure.core.paging import ItemPaged -from azure.core.pipeline import Pipeline -from azure.core.tracing.decorator import distributed_trace - -from ._generated import AzureFileStorage -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.request_handlers import add_metadata_headers -from ._shared.response_handlers import return_response_headers, process_storage_error -from ._shared.parser import _str -from ._parser import _get_file_permission, _datetime_to_str -from ._deserialize import deserialize_directory_properties -from ._serialize import get_api_version -from ._file_client import ShareFileClient -from ._models import DirectoryPropertiesPaged, HandlesPaged, NTFSAttributes # pylint: disable=unused-import - -if TYPE_CHECKING: - from datetime import datetime - from ._models import ShareProperties, DirectoryProperties, ContentSettings - from ._generated.models import HandleItem - - -class ShareDirectoryClient(StorageAccountHostsMixin): - """A client to interact with a specific directory, although it may not yet exist. - - For operations relating to a specific subdirectory or file in this share, the clients for those - entities can also be retrieved using the :func:`get_subdirectory_client` and :func:`get_file_client` functions. - - For more optional configuration, please click - `here `_. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the directory, - use the :func:`from_directory_url` classmethod. - :param share_name: - The name of the share for the directory. - :type share_name: str - :param str directory_path: - The directory path for the directory with which to interact. - If specified, this value will override a directory value specified in the directory URL. - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - directory_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Optional[Any] - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not share_name: - raise ValueError("Please specify a share name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - if hasattr(credential, 'get_token'): - raise ValueError("Token credentials not supported by the File service.") - - path_snapshot, sas_token = parse_query(parsed_url.query) - if not sas_token and not credential: - raise ValueError( - 'You need to provide either an account shared key or SAS token when creating a storage service.') - try: - self.snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - self.snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - self.snapshot = snapshot or path_snapshot - - self.share_name = share_name - self.directory_path = directory_path - - self._query_str, credential = self._format_query_string( - sas_token, credential, share_snapshot=self.snapshot) - super(ShareDirectoryClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) - self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - - @classmethod - def from_directory_url(cls, directory_url, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Optional[Any] - ): - # type: (...) -> ShareDirectoryClient - """Create a ShareDirectoryClient from a directory url. - - :param str directory_url: - The full URI to the directory. - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :returns: A directory client. - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - """ - try: - if not directory_url.lower().startswith('http'): - directory_url = "https://" + directory_url - except AttributeError: - raise ValueError("Directory URL must be a string.") - parsed_url = urlparse(directory_url.rstrip('/')) - if not parsed_url.path and not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(directory_url)) - account_url = parsed_url.netloc.rstrip('/') + "?" + parsed_url.query - path_snapshot, _ = parse_query(parsed_url.query) - - share_name, _, path_dir = parsed_url.path.lstrip('/').partition('/') - share_name = unquote(share_name) - - directory_path = path_dir - snapshot = snapshot or path_snapshot - - return cls( - account_url=account_url, share_name=share_name, directory_path=directory_path, - credential=credential, **kwargs) - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - share_name = self.share_name - if isinstance(share_name, six.text_type): - share_name = share_name.encode('UTF-8') - directory_path = "" - if self.directory_path: - directory_path = "/" + quote(self.directory_path, safe='~') - return "{}://{}/{}{}{}".format( - self.scheme, - hostname, - quote(share_name), - directory_path, - self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - share_name, # type: str - directory_path, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareDirectoryClient - """Create ShareDirectoryClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param share_name: The name of the share. - :type share_name: str - :param str directory_path: - The directory path. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :returns: A directory client. - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, share_name=share_name, directory_path=directory_path, credential=credential, **kwargs) - - def get_file_client(self, file_name, **kwargs): - # type: (str, Any) -> ShareFileClient - """Get a client to interact with a specific file. - - The file need not already exist. - - :param file_name: - The name of the file. - :returns: A File Client. - :rtype: ~azure.storage.fileshare.ShareFileClient - """ - if self.directory_path: - file_name = self.directory_path.rstrip('/') + "/" + file_name - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareFileClient( - self.url, file_path=file_name, share_name=self.share_name, napshot=self.snapshot, - credential=self.credential, api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, **kwargs) - - def get_subdirectory_client(self, directory_name, **kwargs): - # type: (str, Any) -> ShareDirectoryClient - """Get a client to interact with a specific subdirectory. - - The subdirectory need not already exist. - - :param str directory_name: - The name of the subdirectory. - :returns: A Directory Client. - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START get_subdirectory_client] - :end-before: [END get_subdirectory_client] - :language: python - :dedent: 12 - :caption: Gets the subdirectory client. - """ - directory_path = self.directory_path.rstrip('/') + "/" + directory_name - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareDirectoryClient( - self.url, share_name=self.share_name, directory_path=directory_path, snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, - _location_mode=self._location_mode, **kwargs) - - @distributed_trace - def create_directory(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Creates a new directory under the directory referenced by the client. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the directory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Directory-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START create_directory] - :end-before: [END create_directory] - :language: python - :dedent: 12 - :caption: Creates a directory. - """ - timeout = kwargs.pop('timeout', None) - metadata = kwargs.pop('metadata', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return self._client.directory.create( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def delete_directory(self, **kwargs): - # type: (**Any) -> None - """Marks the directory for deletion. The directory is - later deleted during garbage collection. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START delete_directory] - :end-before: [END delete_directory] - :language: python - :dedent: 12 - :caption: Deletes a directory. - """ - timeout = kwargs.pop('timeout', None) - try: - self._client.directory.delete(timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_directories_and_files(self, name_starts_with=None, **kwargs): - # type: (Optional[str], **Any) -> ItemPaged - """Lists all the directories and files under the directory. - - :param str name_starts_with: - Filters the results to return only entities whose names - begin with the specified prefix. - :keyword list[str] include: - Include this parameter to specify one or more datasets to include in the response. - Possible str values are "timestamps", "Etag", "Attributes", "PermissionKey". - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-10-02'. - - :keyword bool include_extended_info: - If this is set to true, file id will be returned in listed results. - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-10-02'. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties - :rtype: ~azure.core.paging.ItemPaged[DirectoryProperties and FileProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START lists_directory] - :end-before: [END lists_directory] - :language: python - :dedent: 12 - :caption: List directories and files. - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.directory.list_files_and_directories_segment, - sharesnapshot=self.snapshot, - timeout=timeout, - **kwargs) - return ItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=DirectoryPropertiesPaged) - - @distributed_trace - def list_handles(self, recursive=False, **kwargs): - # type: (bool, Any) -> ItemPaged - """Lists opened handles on a directory or a file under the directory. - - :param bool recursive: - Boolean that specifies if operation should apply to the directory specified by the client, - its files, its subdirectories and their files. Default value is False. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of HandleItem - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.HandleItem] - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.directory.list_handles, - sharesnapshot=self.snapshot, - timeout=timeout, - recursive=recursive, - **kwargs) - return ItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=HandlesPaged) - - @distributed_trace - def close_handle(self, handle, **kwargs): - # type: (Union[str, HandleItem], Any) -> Dict[str, int] - """Close an open file handle. - - :param handle: - A specific handle to close. - :type handle: str or ~azure.storage.fileshare.Handle - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - try: - handle_id = handle.id # type: ignore - except AttributeError: - handle_id = handle - if handle_id == '*': - raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") - try: - response = self._client.directory.force_close_handles( - handle_id, - marker=None, - recursive=None, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - return { - 'closed_handles_count': response.get('number_of_handles_closed', 0), - 'failed_handles_count': response.get('number_of_handles_failed', 0) - } - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def close_all_handles(self, recursive=False, **kwargs): - # type: (bool, Any) -> Dict[str, int] - """Close any open file handles. - - This operation will block until the service has closed all open handles. - - :param bool recursive: - Boolean that specifies if operation should apply to the directory specified by the client, - its files, its subdirectories and their files. Default value is False. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - timeout = kwargs.pop('timeout', None) - start_time = time.time() - - try_close = True - continuation_token = None - total_closed = 0 - total_failed = 0 - while try_close: - try: - response = self._client.directory.force_close_handles( - handle_id='*', - timeout=timeout, - marker=continuation_token, - recursive=recursive, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - continuation_token = response.get('marker') - try_close = bool(continuation_token) - total_closed += response.get('number_of_handles_closed', 0) - total_failed += response.get('number_of_handles_failed', 0) - if timeout: - timeout = max(0, timeout - (time.time() - start_time)) - return { - 'closed_handles_count': total_closed, - 'failed_handles_count': total_failed - } - - @distributed_trace - def get_directory_properties(self, **kwargs): - # type: (Any) -> DirectoryProperties - """Returns all user-defined metadata and system properties for the - specified directory. The data returned does not include the directory's - list of files. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: DirectoryProperties - :rtype: ~azure.storage.fileshare.DirectoryProperties - """ - timeout = kwargs.pop('timeout', None) - try: - response = self._client.directory.get_properties( - timeout=timeout, - cls=deserialize_directory_properties, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return response # type: ignore - - @distributed_trace - def set_directory_metadata(self, metadata, **kwargs): - # type: (Dict[str, Any], Any) -> Dict[str, Any] - """Sets the metadata for the directory. - - Each call to this operation replaces all existing metadata - attached to the directory. To remove all metadata from the directory, - call this operation with an empty metadata dict. - - :param metadata: - Name-value pairs associated with the directory as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Directory-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - try: - return self._client.directory.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def set_http_headers(self, file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="preserve", # type: Union[str, datetime] - file_last_write_time="preserve", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Sets HTTP headers on the directory. - - :param file_attributes: - The file system attributes for files and directories. - If not set, indicates preservation of existing values. - Here is an example for when the var type is str: 'Temporary|Archive' - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Preserve. - :type file_creation_time: str or datetime - :param file_last_write_time: Last write time for the file - Default value: Preserve. - :type file_last_write_time: str or datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - file_permission = _get_file_permission(file_permission, permission_key, 'preserve') - try: - return self._client.directory.set_properties( # type: ignore - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - timeout=timeout, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def create_subdirectory( - self, directory_name, # type: str - **kwargs): - # type: (...) -> ShareDirectoryClient - """Creates a new subdirectory and returns a client to interact - with the subdirectory. - - :param str directory_name: - The name of the subdirectory. - :keyword dict(str,str) metadata: - Name-value pairs associated with the subdirectory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: ShareDirectoryClient - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START create_subdirectory] - :end-before: [END create_subdirectory] - :language: python - :dedent: 12 - :caption: Create a subdirectory. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - subdir = self.get_subdirectory_client(directory_name) - subdir.create_directory(metadata=metadata, timeout=timeout, **kwargs) - return subdir # type: ignore - - @distributed_trace - def delete_subdirectory( - self, directory_name, # type: str - **kwargs - ): - # type: (...) -> None - """Deletes a subdirectory. - - :param str directory_name: - The name of the subdirectory. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START delete_subdirectory] - :end-before: [END delete_subdirectory] - :language: python - :dedent: 12 - :caption: Delete a subdirectory. - """ - timeout = kwargs.pop('timeout', None) - subdir = self.get_subdirectory_client(directory_name) - subdir.delete_directory(timeout=timeout, **kwargs) - - @distributed_trace - def upload_file( - self, file_name, # type: str - data, # type: Any - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> ShareFileClient - """Creates a new file in the directory and returns a ShareFileClient - to interact with the file. - - :param str file_name: - The name of the file. - :param Any data: - Content of the file. - :param int length: - Length of the file in bytes. Specify its maximum size, up to 1 TiB. - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: ShareFileClient - :rtype: ~azure.storage.fileshare.ShareFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START upload_file_to_directory] - :end-before: [END upload_file_to_directory] - :language: python - :dedent: 12 - :caption: Upload a file to a directory. - """ - file_client = self.get_file_client(file_name) - file_client.upload_file( - data, - length=length, - **kwargs) - return file_client # type: ignore - - @distributed_trace - def delete_file( - self, file_name, # type: str - **kwargs # type: Optional[Any] - ): - # type: (...) -> None - """Marks the specified file for deletion. The file is later - deleted during garbage collection. - - :param str file_name: - The name of the file to delete. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START delete_file_in_directory] - :end-before: [END delete_file_in_directory] - :language: python - :dedent: 12 - :caption: Delete a file in a directory. - """ - file_client = self.get_file_client(file_name) - file_client.delete_file(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_download.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_download.py deleted file mode 100644 index a2db5aa..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_download.py +++ /dev/null @@ -1,554 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -import threading -import warnings -from io import BytesIO -from typing import Iterator - -from azure.core.exceptions import HttpResponseError, ResourceModifiedError -from azure.core.tracing.common import with_current_context -from ._shared.encryption import decrypt_blob -from ._shared.request_handlers import validate_and_format_range_headers -from ._shared.response_handlers import process_storage_error, parse_length_from_content_range - - -def process_range_and_offset(start_range, end_range, length, encryption): - start_offset, end_offset = 0, 0 - if encryption.get("key") is not None or encryption.get("resolver") is not None: - if start_range is not None: - # Align the start of the range along a 16 byte block - start_offset = start_range % 16 - start_range -= start_offset - - # Include an extra 16 bytes for the IV if necessary - # Because of the previous offsetting, start_range will always - # be a multiple of 16. - if start_range > 0: - start_offset += 16 - start_range -= 16 - - if length is not None: - # Align the end of the range along a 16 byte block - end_offset = 15 - (end_range % 16) - end_range += end_offset - - return (start_range, end_range), (start_offset, end_offset) - - -def process_content(data, start_offset, end_offset, encryption): - if data is None: - raise ValueError("Response cannot be None.") - try: - content = b"".join(list(data)) - except Exception as error: - raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) - if content and encryption.get("key") is not None or encryption.get("resolver") is not None: - try: - return decrypt_blob( - encryption.get("required"), - encryption.get("key"), - encryption.get("resolver"), - content, - start_offset, - end_offset, - data.response.headers, - ) - except Exception as error: - raise HttpResponseError(message="Decryption failed.", response=data.response, error=error) - return content - - -class _ChunkDownloader(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - client=None, - total_size=None, - chunk_size=None, - current_progress=None, - start_range=None, - end_range=None, - stream=None, - parallel=None, - validate_content=None, - encryption_options=None, - etag=None, - **kwargs - ): - self.client = client - self.etag = etag - # Information on the download range/chunk size - self.chunk_size = chunk_size - self.total_size = total_size - self.start_index = start_range - self.end_index = end_range - - # The destination that we will write to - self.stream = stream - self.stream_lock = threading.Lock() if parallel else None - self.progress_lock = threading.Lock() if parallel else None - - # For a parallel download, the stream is always seekable, so we note down the current position - # in order to seek to the right place when out-of-order chunks come in - self.stream_start = stream.tell() if parallel else None - - # Download progress so far - self.progress_total = current_progress - - # Encryption - self.encryption_options = encryption_options - - # Parameters for each get operation - self.validate_content = validate_content - self.request_options = kwargs - - def _calculate_range(self, chunk_start): - if chunk_start + self.chunk_size > self.end_index: - chunk_end = self.end_index - else: - chunk_end = chunk_start + self.chunk_size - return chunk_start, chunk_end - - def get_chunk_offsets(self): - index = self.start_index - while index < self.end_index: - yield index - index += self.chunk_size - - def process_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - chunk_data = self._download_chunk(chunk_start, chunk_end - 1) - length = chunk_end - chunk_start - if length > 0: - self._write_to_stream(chunk_data, chunk_start) - self._update_progress(length) - - def yield_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - return self._download_chunk(chunk_start, chunk_end - 1) - - def _update_progress(self, length): - if self.progress_lock: - with self.progress_lock: # pylint: disable=not-context-manager - self.progress_total += length - else: - self.progress_total += length - - def _write_to_stream(self, chunk_data, chunk_start): - if self.stream_lock: - with self.stream_lock: # pylint: disable=not-context-manager - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - else: - self.stream.write(chunk_data) - - def _download_chunk(self, chunk_start, chunk_end): - download_range, offset = process_range_and_offset( - chunk_start, chunk_end, chunk_end, self.encryption_options - ) - range_header, range_validation = validate_and_format_range_headers( - download_range[0], download_range[1], check_content_md5=self.validate_content - ) - - try: - _, response = self.client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self.validate_content, - data_stream_total=self.total_size, - download_stream_current=self.progress_total, - **self.request_options - ) - if response.properties.etag != self.etag: - raise ResourceModifiedError(message="The file has been modified while downloading.") - - except HttpResponseError as error: - process_storage_error(error) - - chunk_data = process_content(response, offset[0], offset[1], self.encryption_options) - return chunk_data - - -class _ChunkIterator(object): - """Async iterator for chunks in blob download stream.""" - - def __init__(self, size, content, downloader, chunk_size): - self.size = size - self._chunk_size = chunk_size - self._current_content = content - self._iter_downloader = downloader - self._iter_chunks = None - self._complete = (size == 0) - - def __len__(self): - return self.size - - def __iter__(self): - return self - - def __next__(self): - """Iterate through responses.""" - if self._complete: - raise StopIteration("Download complete") - if not self._iter_downloader: - # cut the data obtained from initial GET into chunks - if len(self._current_content) > self._chunk_size: - return self._get_chunk_data() - self._complete = True - return self._current_content - - if not self._iter_chunks: - self._iter_chunks = self._iter_downloader.get_chunk_offsets() - - # initial GET result still has more than _chunk_size bytes of data - if len(self._current_content) >= self._chunk_size: - return self._get_chunk_data() - - try: - chunk = next(self._iter_chunks) - self._current_content += self._iter_downloader.yield_chunk(chunk) - except StopIteration as e: - self._complete = True - if self._current_content: - return self._current_content - raise e - - return self._get_chunk_data() - - next = __next__ # Python 2 compatibility. - - def _get_chunk_data(self): - chunk_data = self._current_content[: self._chunk_size] - self._current_content = self._current_content[self._chunk_size:] - return chunk_data - - -class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the file being downloaded. - :ivar: str path: - The full path of the file. - :ivar str share: - The name of the share where the file is. - :ivar ~azure.storage.fileshare.FileProperties properties: - The properties of the file being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the file. - """ - - def __init__( - self, - client=None, - config=None, - start_range=None, - end_range=None, - validate_content=None, - encryption_options=None, - max_concurrency=1, - name=None, - path=None, - share=None, - encoding=None, - **kwargs - ): - self.name = name - self.path = path - self.share = share - self.properties = None - self.size = None - - self._client = client - self._config = config - self._start_range = start_range - self._end_range = end_range - self._max_concurrency = max_concurrency - self._encoding = encoding - self._validate_content = validate_content - self._encryption_options = encryption_options or {} - self._request_options = kwargs - self._location_mode = None - self._download_complete = False - self._current_content = None - self._file_size = None - self._response = None - self._etag = None - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - self._first_get_size = ( - self._config.max_single_get_size if not self._validate_content else self._config.max_chunk_get_size - ) - initial_request_start = self._start_range if self._start_range is not None else 0 - if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: - initial_request_end = self._end_range - else: - initial_request_end = initial_request_start + self._first_get_size - 1 - - self._initial_range, self._initial_offset = process_range_and_offset( - initial_request_start, initial_request_end, self._end_range, self._encryption_options - ) - - self._response = self._initial_request() - self.properties = self._response.properties - self.properties.name = self.name - self.properties.path = self.path - self.properties.share = self.share - - # Set the content length to the download size instead of the size of - # the last range - self.properties.size = self.size - - # Overwrite the content range to the user requested range - self.properties.content_range = "bytes {0}-{1}/{2}".format( - self._start_range, - self._end_range, - self._file_size - ) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - self.properties.content_md5 = None - - if self.size == 0: - self._current_content = b"" - else: - self._current_content = process_content( - self._response, - self._initial_offset[0], - self._initial_offset[1], - self._encryption_options - ) - - def __len__(self): - return self.size - - def _initial_request(self): - range_header, range_validation = validate_and_format_range_headers( - self._initial_range[0], - self._initial_range[1], - start_range_required=False, - end_range_required=False, - check_content_md5=self._validate_content - ) - - try: - location_mode, response = self._client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self._validate_content, - data_stream_total=None, - download_stream_current=0, - **self._request_options - ) - - # Check the location we read from to ensure we use the same one - # for subsequent requests. - self._location_mode = location_mode - - # Parse the total file size and adjust the download size if ranges - # were specified - self._file_size = parse_length_from_content_range(response.properties.content_range) - if self._end_range is not None: - # Use the end range index unless it is over the end of the file - self.size = min(self._file_size, self._end_range - self._start_range + 1) - elif self._start_range is not None: - self.size = self._file_size - self._start_range - else: - self.size = self._file_size - - except HttpResponseError as error: - if self._start_range is None and error.response.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - try: - _, response = self._client.download( - validate_content=self._validate_content, - data_stream_total=0, - download_stream_current=0, - **self._request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - # Set the download size to empty - self.size = 0 - self._file_size = 0 - else: - process_storage_error(error) - - # If the file is small, the download is complete at this point. - # If file size is large, download the rest of the file in chunks. - if response.properties.size == self.size: - self._download_complete = True - self._etag = response.properties.etag - return response - - def chunks(self): - # type: () -> Iterator[bytes] - """Iterate over chunks in the download stream. - - :rtype: Iterator[bytes] - """ - if self.size == 0 or self._download_complete: - iter_downloader = None - else: - data_end = self._file_size - if self._end_range is not None: - # Use the end range index unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - iter_downloader = _ChunkDownloader( - client=self._client, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # start where the first download ended - end_range=data_end, - stream=None, - parallel=False, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - etag=self._etag, - **self._request_options - ) - return _ChunkIterator( - size=self.size, - content=self._current_content, - downloader=iter_downloader, - chunk_size=self._config.max_chunk_get_size) - - def readall(self): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - stream = BytesIO() - self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - def content_as_bytes(self, max_concurrency=1): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :rtype: bytes - """ - warnings.warn( - "content_as_bytes is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - return self.readall() - - def content_as_text(self, max_concurrency=1, encoding="UTF-8"): - """Download the contents of this file, and decode as text. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :param str encoding: - Test encoding to decode the downloaded bytes. Default is UTF-8. - :rtype: str - """ - warnings.warn( - "content_as_text is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self._encoding = encoding - return self.readall() - - def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - # The stream must be seekable if parallel download is required - parallel = self._max_concurrency > 1 - if parallel: - error_message = "Target stream handle must be seekable." - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(error_message) - - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(error_message) - - # Write the content to the user stream - stream.write(self._current_content) - if self._download_complete: - return self.size - - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - - downloader = _ChunkDownloader( - client=self._client, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # Start where the first download ended - end_range=data_end, - stream=stream, - parallel=parallel, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - etag=self._etag, - **self._request_options - ) - if parallel: - import concurrent.futures - with concurrent.futures.ThreadPoolExecutor(self._max_concurrency) as executor: - list(executor.map( - with_current_context(downloader.process_chunk), - downloader.get_chunk_offsets() - )) - else: - for chunk in downloader.get_chunk_offsets(): - downloader.process_chunk(chunk) - return self.size - - def download_to_stream(self, stream, max_concurrency=1): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The properties of the downloaded file. - :rtype: Any - """ - warnings.warn( - "download_to_stream is deprecated, use readinto instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self.readinto(stream) - return self.properties diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_file_client.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_file_client.py deleted file mode 100644 index 5f8f979..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_file_client.py +++ /dev/null @@ -1,1411 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines, too-many-public-methods -import functools -import time -from io import BytesIO -from typing import ( # pylint: disable=unused-import - Optional, Union, IO, List, Dict, Any, Iterable, Tuple, - TYPE_CHECKING -) - - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six -from azure.core.exceptions import HttpResponseError -from azure.core.paging import ItemPaged # pylint: disable=ungrouped-imports -from azure.core.tracing.decorator import distributed_trace - -from ._generated import AzureFileStorage -from ._generated.models import FileHTTPHeaders -from ._shared.uploads import IterStreamer, FileChunkUploader, upload_data_chunks -from ._shared.base_client import StorageAccountHostsMixin, parse_connection_str, parse_query -from ._shared.request_handlers import add_metadata_headers, get_length -from ._shared.response_handlers import return_response_headers, process_storage_error -from ._shared.parser import _str -from ._parser import _get_file_permission, _datetime_to_str -from ._lease import ShareLeaseClient -from ._serialize import get_source_conditions, get_access_conditions, get_smb_properties, get_api_version -from ._deserialize import deserialize_file_properties, deserialize_file_stream, get_file_ranges_result -from ._models import HandlesPaged, NTFSAttributes # pylint: disable=unused-import -from ._download import StorageStreamDownloader - -if TYPE_CHECKING: - from datetime import datetime - from ._models import ShareProperties, ContentSettings, FileProperties, Handle - from ._generated.models import HandleItem - - -def _upload_file_helper( - client, - stream, - size, - metadata, - content_settings, - validate_content, - timeout, - max_concurrency, - file_settings, - file_attributes="none", - file_creation_time="now", - file_last_write_time="now", - file_permission=None, - file_permission_key=None, - **kwargs): - try: - if size is None or size < 0: - raise ValueError("A content size must be specified for a File.") - response = client.create_file( - size, - content_settings=content_settings, - metadata=metadata, - timeout=timeout, - file_attributes=file_attributes, - file_creation_time=file_creation_time, - file_last_write_time=file_last_write_time, - file_permission=file_permission, - permission_key=file_permission_key, - **kwargs - ) - if size == 0: - return response - - responses = upload_data_chunks( - service=client, - uploader_class=FileChunkUploader, - total_size=size, - chunk_size=file_settings.max_range_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - timeout=timeout, - **kwargs - ) - return sorted(responses, key=lambda r: r.get('last_modified'))[-1] - except HttpResponseError as error: - process_storage_error(error) - - -class ShareFileClient(StorageAccountHostsMixin): - """A client to interact with a specific file, although that file may not yet exist. - - For more optional configuration, please click - `here `_. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the - file, use the :func:`from_file_url` classmethod. - :param share_name: - The name of the share for the file. - :type share_name: str - :param str file_path: - The file path to the file with which to interact. If specified, this value will override - a file value specified in the file URL. - :param str snapshot: - An optional file snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - file_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not (share_name and file_path): - raise ValueError("Please specify a share name and file name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - if hasattr(credential, 'get_token'): - raise ValueError("Token credentials not supported by the File service.") - - path_snapshot = None - path_snapshot, sas_token = parse_query(parsed_url.query) - if not sas_token and not credential: - raise ValueError( - 'You need to provide either an account shared key or SAS token when creating a storage service.') - try: - self.snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - self.snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - self.snapshot = snapshot or path_snapshot - - self.share_name = share_name - self.file_path = file_path.split('/') - self.file_name = self.file_path[-1] - self.directory_path = "/".join(self.file_path[:-1]) - - self._query_str, credential = self._format_query_string( - sas_token, credential, share_snapshot=self.snapshot) - super(ShareFileClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) - self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - - @classmethod - def from_file_url( - cls, file_url, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareFileClient - """A client to interact with a specific file, although that file may not yet exist. - - :param str file_url: The full URI to the file. - :param str snapshot: - An optional file snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :returns: A File client. - :rtype: ~azure.storage.fileshare.ShareFileClient - """ - try: - if not file_url.lower().startswith('http'): - file_url = "https://" + file_url - except AttributeError: - raise ValueError("File URL must be a string.") - parsed_url = urlparse(file_url.rstrip('/')) - - if not (parsed_url.netloc and parsed_url.path): - raise ValueError("Invalid URL: {}".format(file_url)) - account_url = parsed_url.netloc.rstrip('/') + "?" + parsed_url.query - - path_share, _, path_file = parsed_url.path.lstrip('/').partition('/') - path_snapshot, _ = parse_query(parsed_url.query) - snapshot = snapshot or path_snapshot - share_name = unquote(path_share) - file_path = '/'.join([unquote(p) for p in path_file.split('/')]) - return cls(account_url, share_name, file_path, snapshot, credential, **kwargs) - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - share_name = self.share_name - if isinstance(share_name, six.text_type): - share_name = share_name.encode('UTF-8') - return "{}://{}/{}/{}{}".format( - self.scheme, - hostname, - quote(share_name), - "/".join([quote(p, safe='~') for p in self.file_path]), - self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - share_name, # type: str - file_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareFileClient - """Create ShareFileClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param share_name: The name of the share. - :type share_name: str - :param str file_path: - The file path. - :param str snapshot: - An optional file snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :returns: A File client. - :rtype: ~azure.storage.fileshare.ShareFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_hello_world.py - :start-after: [START create_file_client] - :end-before: [END create_file_client] - :language: python - :dedent: 12 - :caption: Creates the file client with connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, share_name=share_name, file_path=file_path, snapshot=snapshot, credential=credential, **kwargs) - - @distributed_trace - def acquire_lease(self, lease_id=None, **kwargs): - # type: (Optional[str], **Any) -> ShareLeaseClient - """Requests a new lease. - - If the file does not have an active lease, the File - Service creates a lease on the blob and returns a new lease. - - :param str lease_id: - Proposed lease ID, in a GUID string format. The File Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A ShareLeaseClient object. - :rtype: ~azure.storage.fileshare.ShareLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START acquire_and_release_lease_on_file] - :end-before: [END acquire_and_release_lease_on_file] - :language: python - :dedent: 12 - :caption: Acquiring a lease on a file. - """ - kwargs['lease_duration'] = -1 - lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore - lease.acquire(**kwargs) - return lease - - @distributed_trace - def create_file( # type: ignore - self, size, # type: int - file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="now", # type: Union[str, datetime] - file_last_write_time="now", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Creates a new file. - - Note that it only initializes the file with no content. - - :param int size: Specifies the maximum size for the file, - up to 1 TB. - :param file_attributes: - The file system attributes for files and directories. - If not set, the default value would be "None" and the attributes will be set to "Archive". - Here is an example for when the var type is str: 'Temporary|Archive'. - file_attributes value is not case sensitive. - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Now. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Now. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START create_file] - :end-before: [END create_file] - :language: python - :dedent: 12 - :caption: Create a file. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - content_settings = kwargs.pop('content_settings', None) - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - if self.require_encryption and not self.key_encryption_key: - raise ValueError("Encryption required but no key was provided.") - - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - file_http_headers = None - if content_settings: - file_http_headers = FileHTTPHeaders( - file_cache_control=content_settings.cache_control, - file_content_type=content_settings.content_type, - file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - file_content_encoding=content_settings.content_encoding, - file_content_language=content_settings.content_language, - file_content_disposition=content_settings.content_disposition - ) - file_permission = _get_file_permission(file_permission, permission_key, 'Inherit') - try: - return self._client.file.create( # type: ignore - file_content_length=size, - metadata=metadata, - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - file_http_headers=file_http_headers, - lease_access_conditions=access_conditions, - headers=headers, - timeout=timeout, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def upload_file( - self, data, # type: Any - length=None, # type: Optional[int] - file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="now", # type: Union[str, datetime] - file_last_write_time="now", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Uploads a new file. - - :param Any data: - Content of the file. - :param int length: - Length of the file in bytes. Specify its maximum size, up to 1 TiB. - :param file_attributes: - The file system attributes for files and directories. - If not set, the default value would be "None" and the attributes will be set to "Archive". - Here is an example for when the var type is str: 'Temporary|Archive'. - file_attributes value is not case sensitive. - :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes - :param file_creation_time: Creation time for the file - Default value: Now. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Now. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START upload_file] - :end-before: [END upload_file] - :language: python - :dedent: 12 - :caption: Upload a file. - """ - metadata = kwargs.pop('metadata', None) - content_settings = kwargs.pop('content_settings', None) - max_concurrency = kwargs.pop('max_concurrency', 1) - validate_content = kwargs.pop('validate_content', False) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - - if isinstance(data, six.text_type): - data = data.encode(encoding) - if length is None: - length = get_length(data) - if isinstance(data, bytes): - data = data[:length] - - if isinstance(data, bytes): - stream = BytesIO(data) - elif hasattr(data, 'read'): - stream = data - elif hasattr(data, '__iter__'): - stream = IterStreamer(data, encoding=encoding) # type: ignore - else: - raise TypeError("Unsupported data type: {}".format(type(data))) - return _upload_file_helper( # type: ignore - self, - stream, - length, - metadata, - content_settings, - validate_content, - timeout, - max_concurrency, - self._config, - file_attributes=file_attributes, - file_creation_time=file_creation_time, - file_last_write_time=file_last_write_time, - file_permission=file_permission, - file_permission_key=permission_key, - **kwargs) - - @distributed_trace - def start_copy_from_url(self, source_url, **kwargs): - # type: (str, Any) -> Any - """Initiates the copying of data from a source URL into the file - referenced by the client. - - The status of this copy operation can be found using the `get_properties` - method. - - :param str source_url: - Specifies the URL of the source file. - :keyword str file_permission: - If specified the permission (security descriptor) shall be set for the directory/file. - This value can be set to "source" to copy the security descriptor from the source file. - Otherwise if set, this value will be used to override the source value. If not set, permission value - is inherited from the parent directory of the target file. This setting can be - used if Permission size is <= 8KB, otherwise permission_key shall be used. - If SDDL is specified as input, it must have owner, group and dacl. - Note: Only one of the file_permission or permission_key should be specified. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword str permission_key: - Key of the permission to be set for the directory/file. - This value can be set to "source" to copy the security descriptor from the source file. - Otherwise if set, this value will be used to override the source value. If not set, permission value - is inherited from the parent directory of the target file. - Note: Only one of the file_permission or permission_key should be specified. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword file_attributes: - This value can be set to "source" to copy file attributes from the source file to the target file, - or to clear all attributes, it can be set to "None". Otherwise it can be set to a list of attributes - to set on the target file. If this is not set, the default value is "Archive". - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :keyword file_creation_time: - This value can be set to "source" to copy the creation time from the source file to the target file, - or a datetime to set as creation time on the target file. This could also be a string in ISO 8601 format. - If this is not set, creation time will be set to the date time value of the creation - (or when it was overwritten) of the target file by copy engine. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_creation_time: str or ~datetime.datetime - :keyword file_last_write_time: - This value can be set to "source" to copy the last write time from the source file to the target file, or - a datetime to set as the last write time on the target file. This could also be a string in ISO 8601 format. - If this is not set, value will be the last write time to the file by the copy engine. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_last_write_time: str or ~datetime.datetime - :keyword bool ignore_read_only: - Specifies the option to overwrite the target file if it already exists and has read-only attribute set. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword bool set_archive_attribute: - Specifies the option to set the archive attribute on the target file. - True means the archive attribute will be set on the target file despite attribute - overrides or the source file state. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START copy_file_from_url] - :end-before: [END copy_file_from_url] - :language: python - :dedent: 12 - :caption: Copy a file from a URL - """ - metadata = kwargs.pop('metadata', None) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - kwargs.update(get_smb_properties(kwargs)) - try: - return self._client.file.start_copy( - source_url, - metadata=metadata, - lease_access_conditions=access_conditions, - headers=headers, - cls=return_response_headers, - timeout=timeout, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - def abort_copy(self, copy_id, **kwargs): - # type: (Union[str, FileProperties], Any) -> None - """Abort an ongoing copy operation. - - This will leave a destination file with zero length and full metadata. - This will raise an error if the copy operation has already ended. - - :param copy_id: - The copy operation to abort. This can be either an ID, or an - instance of FileProperties. - :type copy_id: str or ~azure.storage.fileshare.FileProperties - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - copy_id = copy_id.copy.id - except AttributeError: - try: - copy_id = copy_id['copy_id'] - except TypeError: - pass - try: - self._client.file.abort_copy(copy_id=copy_id, - lease_access_conditions=access_conditions, - timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def download_file( - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs - ): - # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader - """Downloads a file to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the file into - a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks. - - :param int offset: - Start of byte range to use for downloading a section of the file. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.fileshare.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START download_file] - :end-before: [END download_file] - :language: python - :dedent: 12 - :caption: Download a file. - """ - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - if length is not None and offset is None: - raise ValueError("Offset value must not be None if length is set.") - - range_end = None - if length is not None: - range_end = offset + length - 1 # Service actually uses an end-range inclusive index - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - - return StorageStreamDownloader( - client=self._client.file, - config=self._config, - start_range=offset, - end_range=range_end, - encryption_options=None, - name=self.file_name, - path='/'.join(self.file_path), - share=self.share_name, - lease_access_conditions=access_conditions, - cls=deserialize_file_stream, - **kwargs) - - @distributed_trace - def delete_file(self, **kwargs): - # type: (Any) -> None - """Marks the specified file for deletion. The file is - later deleted during garbage collection. - - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START delete_file] - :end-before: [END delete_file] - :language: python - :dedent: 12 - :caption: Delete a file. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - self._client.file.delete(lease_access_conditions=access_conditions, timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def get_file_properties(self, **kwargs): - # type: (Any) -> FileProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file. - - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: FileProperties - :rtype: ~azure.storage.fileshare.FileProperties - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - file_props = self._client.file.get_properties( - sharesnapshot=self.snapshot, - lease_access_conditions=access_conditions, - timeout=timeout, - cls=deserialize_file_properties, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - file_props.name = self.file_name - file_props.share = self.share_name - file_props.snapshot = self.snapshot - file_props.path = '/'.join(self.file_path) - return file_props # type: ignore - - @distributed_trace - def set_http_headers(self, content_settings, # type: ContentSettings - file_attributes="preserve", # type: Union[str, NTFSAttributes] - file_creation_time="preserve", # type: Union[str, datetime] - file_last_write_time="preserve", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Sets HTTP headers on the file. - - :param ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param file_attributes: - The file system attributes for files and directories. - If not set, indicates preservation of existing values. - Here is an example for when the var type is str: 'Temporary|Archive' - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Preserve. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Preserve. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - file_content_length = kwargs.pop('size', None) - file_http_headers = FileHTTPHeaders( - file_cache_control=content_settings.cache_control, - file_content_type=content_settings.content_type, - file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - file_content_encoding=content_settings.content_encoding, - file_content_language=content_settings.content_language, - file_content_disposition=content_settings.content_disposition - ) - file_permission = _get_file_permission(file_permission, permission_key, 'preserve') - try: - return self._client.file.set_http_headers( # type: ignore - file_content_length=file_content_length, - file_http_headers=file_http_headers, - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - lease_access_conditions=access_conditions, - timeout=timeout, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def set_file_metadata(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, Any]], Any) -> Dict[str, Any] - """Sets user-defined metadata for the specified file as one or more - name-value pairs. - - Each call to this operation replaces all existing metadata - attached to the file. To remove all metadata from the file, - call this operation with no metadata dict. - - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return self._client.file.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - metadata=metadata, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def upload_range( # type: ignore - self, data, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Upload a range of bytes to a file. - - :param bytes data: - The data to upload. - :param int offset: - Start of byte range to use for uploading a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for uploading a section of the file. - The range can be up to 4 MB in size. - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - file. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - validate_content = kwargs.pop('validate_content', False) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - if isinstance(data, six.text_type): - data = data.encode(encoding) - - end_range = offset + length - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - try: - return self._client.file.upload_range( # type: ignore - range=content_range, - content_length=length, - optionalbody=data, - timeout=timeout, - validate_content=validate_content, - lease_access_conditions=access_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @staticmethod - def _upload_range_from_url_options(source_url, # type: str - offset, # type: int - length, # type: int - source_offset, # type: int - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - - if offset is None: - raise ValueError("offset must be provided.") - if length is None: - raise ValueError("length must be provided.") - if source_offset is None: - raise ValueError("source_offset must be provided.") - - # Format range - end_range = offset + length - 1 - destination_range = 'bytes={0}-{1}'.format(offset, end_range) - source_range = 'bytes={0}-{1}'.format(source_offset, source_offset + length - 1) - source_authorization = kwargs.pop('source_authorization', None) - source_mod_conditions = get_source_conditions(kwargs) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - - options = { - 'copy_source_authorization': source_authorization, - 'copy_source': source_url, - 'content_length': 0, - 'source_range': source_range, - 'range': destination_range, - 'source_modified_access_conditions': source_mod_conditions, - 'lease_access_conditions': access_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def upload_range_from_url(self, source_url, - offset, - length, - source_offset, - **kwargs - ): - # type: (str, int, int, int, **Any) -> Dict[str, Any] - """ - Writes the bytes from one Azure File endpoint into the specified range of another Azure File endpoint. - - :param int offset: - Start of byte range to use for updating a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for updating a section of the file. - The range can be up to 4 MB in size. - :param str source_url: - A URL of up to 2 KB in length that specifies an Azure file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.file.core.windows.net/myshare/mydir/myfile - https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - The service will read the same number of bytes as the destination range (length-offset). - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source - blob has been modified since the specified date/time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source blob - has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str source_authorization: - Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is - the prefix of the source_authorization string. - """ - options = self._upload_range_from_url_options( - source_url=source_url, - offset=offset, - length=length, - source_offset=source_offset, - **kwargs - ) - try: - return self._client.file.upload_range_from_url(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _get_ranges_options( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - previous_sharesnapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Unsupported method for encryption.") - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - - content_range = None - if offset is not None: - if length is not None: - end_range = offset + length - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - else: - content_range = 'bytes={0}-'.format(offset) - options = { - 'sharesnapshot': self.snapshot, - 'lease_access_conditions': access_conditions, - 'timeout': kwargs.pop('timeout', None), - 'range': content_range} - if previous_sharesnapshot: - try: - options['prevsharesnapshot'] = previous_sharesnapshot.snapshot # type: ignore - except AttributeError: - try: - options['prevsharesnapshot'] = previous_sharesnapshot['snapshot'] # type: ignore - except TypeError: - options['prevsharesnapshot'] = previous_sharesnapshot - options.update(kwargs) - return options - - @distributed_trace - def get_ranges( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> List[Dict[str, int]] - """Returns the list of valid page ranges for a file or snapshot - of a file. - - :param int offset: - Specifies the start offset of bytes over which to get ranges. - :param int length: - Number of bytes to use over which to get ranges. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A list of valid ranges. - :rtype: List[dict[str, int]] - """ - options = self._get_ranges_options( - offset=offset, - length=length, - **kwargs) - try: - ranges = self._client.file.get_range_list(**options) - except HttpResponseError as error: - process_storage_error(error) - return [{'start': file_range.start, 'end': file_range.end} for file_range in ranges.ranges] - - def get_ranges_diff( # type: ignore - self, - previous_sharesnapshot, # type: Union[str, Dict[str, Any]] - offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a file or snapshot - of a file. - - .. versionadded:: 12.6.0 - - :param int offset: - Specifies the start offset of bytes over which to get ranges. - :param int length: - Number of bytes to use over which to get ranges. - :param str previous_sharesnapshot: - The snapshot diff parameter that contains an opaque DateTime value that - specifies a previous file snapshot to be compared - against a more recent snapshot or the current file. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of file ranges as dictionaries with 'start' and 'end' keys. - The first element are filled file ranges, the 2nd element is cleared file ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_ranges_options( - offset=offset, - length=length, - previous_sharesnapshot=previous_sharesnapshot, - **kwargs) - try: - ranges = self._client.file.get_range_list(**options) - except HttpResponseError as error: - process_storage_error(error) - return get_file_ranges_result(ranges) - - @distributed_trace - def clear_range( # type: ignore - self, offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Clears the specified range and releases the space used in storage for - that range. - - :param int offset: - Start of byte range to use for clearing a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for clearing a section of the file. - The range can be up to 4 MB in size. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Unsupported method for encryption.") - - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 bytes file size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 bytes file size") - end_range = length + offset - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - try: - return self._client.file.upload_range( # type: ignore - timeout=timeout, - cls=return_response_headers, - content_length=0, - optionalbody=None, - file_range_write="clear", - range=content_range, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def resize_file(self, size, **kwargs): - # type: (int, Any) -> Dict[str, Any] - """Resizes a file to the specified size. - - :param int size: - Size to resize file to (in bytes) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - return self._client.file.set_http_headers( # type: ignore - file_content_length=size, - file_attributes="preserve", - file_creation_time="preserve", - file_last_write_time="preserve", - file_permission="preserve", - lease_access_conditions=access_conditions, - cls=return_response_headers, - timeout=timeout, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_handles(self, **kwargs): - # type: (Any) -> ItemPaged[Handle] - """Lists handles for file. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of HandleItem - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.HandleItem] - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.file.list_handles, - sharesnapshot=self.snapshot, - timeout=timeout, - **kwargs) - return ItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=HandlesPaged) - - @distributed_trace - def close_handle(self, handle, **kwargs): - # type: (Union[str, HandleItem], Any) -> Dict[str, int] - """Close an open file handle. - - :param handle: - A specific handle to close. - :type handle: str or ~azure.storage.fileshare.Handle - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - try: - handle_id = handle.id # type: ignore - except AttributeError: - handle_id = handle - if handle_id == '*': - raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") - try: - response = self._client.file.force_close_handles( - handle_id, - marker=None, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - return { - 'closed_handles_count': response.get('number_of_handles_closed', 0), - 'failed_handles_count': response.get('number_of_handles_failed', 0) - } - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def close_all_handles(self, **kwargs): - # type: (Any) -> Dict[str, int] - """Close any open file handles. - - This operation will block until the service has closed all open handles. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - timeout = kwargs.pop('timeout', None) - start_time = time.time() - - try_close = True - continuation_token = None - total_closed = 0 - total_failed = 0 - while try_close: - try: - response = self._client.file.force_close_handles( - handle_id='*', - timeout=timeout, - marker=continuation_token, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - continuation_token = response.get('marker') - try_close = bool(continuation_token) - total_closed += response.get('number_of_handles_closed', 0) - total_failed += response.get('number_of_handles_failed', 0) - if timeout: - timeout = max(0, timeout - (time.time() - start_time)) - return { - 'closed_handles_count': total_closed, - 'failed_handles_count': total_failed - } diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/__init__.py deleted file mode 100644 index 34ce526..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._azure_file_storage import AzureFileStorage -__all__ = ['AzureFileStorage'] - -try: - from ._patch import patch_sdk # type: ignore - patch_sdk() -except ImportError: - pass diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/_azure_file_storage.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/_azure_file_storage.py deleted file mode 100644 index 6275ae4..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/_azure_file_storage.py +++ /dev/null @@ -1,96 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import TYPE_CHECKING - -from azure.core import PipelineClient -from msrest import Deserializer, Serializer - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any - - from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from ._configuration import AzureFileStorageConfiguration -from .operations import ServiceOperations -from .operations import ShareOperations -from .operations import DirectoryOperations -from .operations import FileOperations -from . import models - - -class AzureFileStorage(object): - """AzureFileStorage. - - :ivar service: ServiceOperations operations - :vartype service: azure.storage.fileshare.operations.ServiceOperations - :ivar share: ShareOperations operations - :vartype share: azure.storage.fileshare.operations.ShareOperations - :ivar directory: DirectoryOperations operations - :vartype directory: azure.storage.fileshare.operations.DirectoryOperations - :ivar file: FileOperations operations - :vartype file: azure.storage.fileshare.operations.FileOperations - :param url: The URL of the service account, share, directory or file that is the target of the desired operation. - :type url: str - """ - - def __init__( - self, - url, # type: str - **kwargs # type: Any - ): - # type: (...) -> None - base_url = '{url}' - self._config = AzureFileStorageConfiguration(url, **kwargs) - self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._serialize.client_side_validation = False - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.share = ShareOperations( - self._client, self._config, self._serialize, self._deserialize) - self.directory = DirectoryOperations( - self._client, self._config, self._serialize, self._deserialize) - self.file = FileOperations( - self._client, self._config, self._serialize, self._deserialize) - - def _send_request(self, http_request, **kwargs): - # type: (HttpRequest, Any) -> HttpResponse - """Runs the network request through the client's chained policies. - - :param http_request: The network request you want to make. Required. - :type http_request: ~azure.core.pipeline.transport.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to True. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.pipeline.transport.HttpResponse - """ - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - http_request.url = self._client.format_url(http_request.url, **path_format_arguments) - stream = kwargs.pop("stream", True) - pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs) - return pipeline_response.http_response - - def close(self): - # type: () -> None - self._client.close() - - def __enter__(self): - # type: () -> AzureFileStorage - self._client.__enter__() - return self - - def __exit__(self, *exc_details): - # type: (Any) -> None - self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/_configuration.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/_configuration.py deleted file mode 100644 index 7d76099..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/_configuration.py +++ /dev/null @@ -1,59 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import TYPE_CHECKING - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any - -VERSION = "unknown" - -class AzureFileStorageConfiguration(Configuration): - """Configuration for AzureFileStorage. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, share, directory or file that is the target of the desired operation. - :type url: str - """ - - def __init__( - self, - url, # type: str - **kwargs # type: Any - ): - # type: (...) -> None - if url is None: - raise ValueError("Parameter 'url' must not be None.") - super(AzureFileStorageConfiguration, self).__init__(**kwargs) - - self.url = url - self.version = "2020-10-02" - self.file_range_write_from_url = "update" - kwargs.setdefault('sdk_moniker', 'azurefilestorage/{}'.format(VERSION)) - self._configure(**kwargs) - - def _configure( - self, - **kwargs # type: Any - ): - # type: (...) -> None - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) - self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/__init__.py deleted file mode 100644 index f306ba0..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._azure_file_storage import AzureFileStorage -__all__ = ['AzureFileStorage'] diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/_azure_file_storage.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/_azure_file_storage.py deleted file mode 100644 index 7453a46..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/_azure_file_storage.py +++ /dev/null @@ -1,86 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any - -from azure.core import AsyncPipelineClient -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest -from msrest import Deserializer, Serializer - -from ._configuration import AzureFileStorageConfiguration -from .operations import ServiceOperations -from .operations import ShareOperations -from .operations import DirectoryOperations -from .operations import FileOperations -from .. import models - - -class AzureFileStorage(object): - """AzureFileStorage. - - :ivar service: ServiceOperations operations - :vartype service: azure.storage.fileshare.aio.operations.ServiceOperations - :ivar share: ShareOperations operations - :vartype share: azure.storage.fileshare.aio.operations.ShareOperations - :ivar directory: DirectoryOperations operations - :vartype directory: azure.storage.fileshare.aio.operations.DirectoryOperations - :ivar file: FileOperations operations - :vartype file: azure.storage.fileshare.aio.operations.FileOperations - :param url: The URL of the service account, share, directory or file that is the target of the desired operation. - :type url: str - """ - - def __init__( - self, - url: str, - **kwargs: Any - ) -> None: - base_url = '{url}' - self._config = AzureFileStorageConfiguration(url, **kwargs) - self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._serialize.client_side_validation = False - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.share = ShareOperations( - self._client, self._config, self._serialize, self._deserialize) - self.directory = DirectoryOperations( - self._client, self._config, self._serialize, self._deserialize) - self.file = FileOperations( - self._client, self._config, self._serialize, self._deserialize) - - async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse: - """Runs the network request through the client's chained policies. - - :param http_request: The network request you want to make. Required. - :type http_request: ~azure.core.pipeline.transport.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to True. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.pipeline.transport.AsyncHttpResponse - """ - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - http_request.url = self._client.format_url(http_request.url, **path_format_arguments) - stream = kwargs.pop("stream", True) - pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs) - return pipeline_response.http_response - - async def close(self) -> None: - await self._client.close() - - async def __aenter__(self) -> "AzureFileStorage": - await self._client.__aenter__() - return self - - async def __aexit__(self, *exc_details) -> None: - await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/_configuration.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/_configuration.py deleted file mode 100644 index 4ec5174..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/_configuration.py +++ /dev/null @@ -1,53 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -VERSION = "unknown" - -class AzureFileStorageConfiguration(Configuration): - """Configuration for AzureFileStorage. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, share, directory or file that is the target of the desired operation. - :type url: str - """ - - def __init__( - self, - url: str, - **kwargs: Any - ) -> None: - if url is None: - raise ValueError("Parameter 'url' must not be None.") - super(AzureFileStorageConfiguration, self).__init__(**kwargs) - - self.url = url - self.version = "2020-10-02" - self.file_range_write_from_url = "update" - kwargs.setdefault('sdk_moniker', 'azurefilestorage/{}'.format(VERSION)) - self._configure(**kwargs) - - def _configure( - self, - **kwargs: Any - ) -> None: - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) - self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/__init__.py deleted file mode 100644 index ba8fb22..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._share_operations import ShareOperations -from ._directory_operations import DirectoryOperations -from ._file_operations import FileOperations - -__all__ = [ - 'ServiceOperations', - 'ShareOperations', - 'DirectoryOperations', - 'FileOperations', -] diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/_directory_operations.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/_directory_operations.py deleted file mode 100644 index 2d3fa63..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/_directory_operations.py +++ /dev/null @@ -1,750 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class DirectoryOperations: - """DirectoryOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.fileshare.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - file_permission: Optional[str] = "inherit", - file_permission_key: Optional[str] = None, - file_attributes: str = "none", - file_creation_time: str = "now", - file_last_write_time: str = "now", - **kwargs: Any - ) -> None: - """Creates a new directory under the specified share or parent directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else - x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_attributes: If specified, the provided file attributes shall be set. Default value: - ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. Default value: Now. - :type file_last_write_time: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - async def get_properties( - self, - sharesnapshot: Optional[str] = None, - timeout: Optional[int] = None, - **kwargs: Any - ) -> None: - """Returns all system properties for the specified directory, and can also be used to check the - existence of a directory. The data returned does not include the files in the directory or any - subdirectories. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - async def delete( - self, - timeout: Optional[int] = None, - **kwargs: Any - ) -> None: - """Removes the specified empty directory. Note that the directory must be empty before it can be - deleted. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - async def set_properties( - self, - timeout: Optional[int] = None, - file_permission: Optional[str] = "inherit", - file_permission_key: Optional[str] = None, - file_attributes: str = "none", - file_creation_time: str = "now", - file_last_write_time: str = "now", - **kwargs: Any - ) -> None: - """Sets properties on the directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else - x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_attributes: If specified, the provided file attributes shall be set. Default value: - ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. Default value: Now. - :type file_last_write_time: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - async def set_metadata( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - **kwargs: Any - ) -> None: - """Updates user defined metadata for the specified directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - async def list_files_and_directories_segment( - self, - prefix: Optional[str] = None, - sharesnapshot: Optional[str] = None, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - timeout: Optional[int] = None, - include: Optional[List[Union[str, "_models.ListFilesIncludeType"]]] = None, - include_extended_info: Optional[bool] = None, - **kwargs: Any - ) -> "_models.ListFilesAndDirectoriesSegmentResponse": - """Returns a list of files or directories under the specified share or directory. It lists the - contents only for a single level of the directory hierarchy. - - :param prefix: Filters the results to return only entries whose name begins with the specified - prefix. - :type prefix: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. If the request does not - specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 - items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.fileshare.models.ListFilesIncludeType] - :param include_extended_info: - :type include_extended_info: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListFilesAndDirectoriesSegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListFilesAndDirectoriesSegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListFilesAndDirectoriesSegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_files_and_directories_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if include_extended_info is not None: - header_parameters['x-ms-file-extended-info'] = self._serialize.header("include_extended_info", include_extended_info, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListFilesAndDirectoriesSegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_files_and_directories_segment.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - async def list_handles( - self, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - timeout: Optional[int] = None, - sharesnapshot: Optional[str] = None, - recursive: Optional[bool] = None, - **kwargs: Any - ) -> "_models.ListHandlesResponse": - """Lists handles for directory. - - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. If the request does not - specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 - items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param recursive: Specifies operation should apply to the directory specified in the URI, its - files, its subdirectories and their files. - :type recursive: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListHandlesResponse, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListHandlesResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListHandlesResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "listhandles" - accept = "application/xml" - - # Construct URL - url = self.list_handles.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if recursive is not None: - header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListHandlesResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_handles.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - async def force_close_handles( - self, - handle_id: str, - timeout: Optional[int] = None, - marker: Optional[str] = None, - sharesnapshot: Optional[str] = None, - recursive: Optional[bool] = None, - **kwargs: Any - ) -> None: - """Closes all handles open for given directory. - - :param handle_id: Specifies handle ID opened on the file or directory to be closed. Asterisk - (‘*’) is a wildcard that specifies all handles. - :type handle_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param recursive: Specifies operation should apply to the directory specified in the URI, its - files, its subdirectories and their files. - :type recursive: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "forceclosehandles" - accept = "application/xml" - - # Construct URL - url = self.force_close_handles.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') - if recursive is not None: - header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-marker']=self._deserialize('str', response.headers.get('x-ms-marker')) - response_headers['x-ms-number-of-handles-closed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')) - response_headers['x-ms-number-of-handles-failed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')) - - if cls: - return cls(pipeline_response, None, response_headers) - - force_close_handles.metadata = {'url': '/{shareName}/{directory}'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/_file_operations.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/_file_operations.py deleted file mode 100644 index eb8bddb..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/_file_operations.py +++ /dev/null @@ -1,1776 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class FileOperations: - """FileOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.fileshare.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - file_content_length: int, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - file_permission: Optional[str] = "inherit", - file_permission_key: Optional[str] = None, - file_attributes: str = "none", - file_creation_time: str = "now", - file_last_write_time: str = "now", - file_http_headers: Optional["_models.FileHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Creates a new file or replaces a file. Note it only initializes the file with no content. - - :param file_content_length: Specifies the maximum size for the file, up to 4 TB. - :type file_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else - x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_attributes: If specified, the provided file attributes shall be set. Default value: - ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. Default value: Now. - :type file_last_write_time: str - :param file_http_headers: Parameter group. - :type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _file_content_type = None - _file_content_encoding = None - _file_content_language = None - _file_cache_control = None - _file_content_md5 = None - _file_content_disposition = None - _lease_id = None - if file_http_headers is not None: - _file_content_type = file_http_headers.file_content_type - _file_content_encoding = file_http_headers.file_content_encoding - _file_content_language = file_http_headers.file_content_language - _file_cache_control = file_http_headers.file_cache_control - _file_content_md5 = file_http_headers.file_content_md5 - _file_content_disposition = file_http_headers.file_content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - file_type_constant = "file" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') - header_parameters['x-ms-type'] = self._serialize.header("file_type_constant", file_type_constant, 'str') - if _file_content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", _file_content_type, 'str') - if _file_content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", _file_content_encoding, 'str') - if _file_content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", _file_content_language, 'str') - if _file_cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", _file_cache_control, 'str') - if _file_content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", _file_content_md5, 'bytearray') - if _file_content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", _file_content_disposition, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def download( - self, - timeout: Optional[int] = None, - range: Optional[str] = None, - range_get_content_md5: Optional[bool] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> IO: - """Reads or downloads a file from the system, including its metadata and properties. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param range: Return file data only from the specified byte range. - :type range: str - :param range_get_content_md5: When this header is set to true and specified together with the - Range header, the service returns the MD5 hash for the range, as long as the range is less than - or equal to 4 MB in size. - :type range_get_content_md5: bool - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - accept = "application/xml" - - # Construct URL - url = self.download.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-content-md5')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-content-md5')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - download.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def get_properties( - self, - sharesnapshot: Optional[str] = None, - timeout: Optional[int] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Returns all user-defined metadata, standard HTTP properties, and system properties for the - file. It does not return the content of the file. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-type']=self._deserialize('str', response.headers.get('x-ms-type')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def delete( - self, - timeout: Optional[int] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """removes the file from the storage account. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def set_http_headers( - self, - timeout: Optional[int] = None, - file_content_length: Optional[int] = None, - file_permission: Optional[str] = "inherit", - file_permission_key: Optional[str] = None, - file_attributes: str = "none", - file_creation_time: str = "now", - file_last_write_time: str = "now", - file_http_headers: Optional["_models.FileHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Sets HTTP headers on the file. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param file_content_length: Resizes a file to the specified size. If the specified byte value - is less than the current size of the file, then all ranges above the specified byte value are - cleared. - :type file_content_length: long - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else - x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_attributes: If specified, the provided file attributes shall be set. Default value: - ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. Default value: Now. - :type file_last_write_time: str - :param file_http_headers: Parameter group. - :type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _file_content_type = None - _file_content_encoding = None - _file_content_language = None - _file_cache_control = None - _file_content_md5 = None - _file_content_disposition = None - _lease_id = None - if file_http_headers is not None: - _file_content_type = file_http_headers.file_content_type - _file_content_encoding = file_http_headers.file_content_encoding - _file_content_language = file_http_headers.file_content_language - _file_cache_control = file_http_headers.file_cache_control - _file_content_md5 = file_http_headers.file_content_md5 - _file_content_disposition = file_http_headers.file_content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.set_http_headers.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_content_length is not None: - header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') - if _file_content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", _file_content_type, 'str') - if _file_content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", _file_content_encoding, 'str') - if _file_content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", _file_content_language, 'str') - if _file_cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", _file_cache_control, 'str') - if _file_content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", _file_content_md5, 'bytearray') - if _file_content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", _file_content_disposition, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_http_headers.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def set_metadata( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Updates user-defined metadata for the specified file. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def acquire_lease( - self, - timeout: Optional[int] = None, - duration: Optional[int] = None, - proposed_lease_id: Optional[str] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> None: - """[Update] The Lease File operation establishes and manages a lock on a file for write and delete - operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a - lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease - duration cannot be changed using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "acquire" - accept = "application/xml" - - # Construct URL - url = self.acquire_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - acquire_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def release_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> None: - """[Update] The Lease File operation establishes and manages a lock on a file for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "release" - accept = "application/xml" - - # Construct URL - url = self.release_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - release_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def change_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - proposed_lease_id: Optional[str] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> None: - """[Update] The Lease File operation establishes and manages a lock on a file for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "change" - accept = "application/xml" - - # Construct URL - url = self.change_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - change_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def break_lease( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """[Update] The Lease File operation establishes and manages a lock on a file for write and delete - operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "lease" - action = "break" - accept = "application/xml" - - # Construct URL - url = self.break_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - break_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def upload_range( - self, - range: str, - content_length: int, - timeout: Optional[int] = None, - file_range_write: Union[str, "_models.FileRangeWriteType"] = "update", - content_md5: Optional[bytearray] = None, - optionalbody: Optional[IO] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Upload a range of bytes to a file. - - :param range: Specifies the range of bytes to be written. Both the start and end of the range - must be specified. For an update operation, the range can be up to 4 MB in size. For a clear - operation, the range can be up to the value of the file's full size. The File service accepts - only a single byte range for the Range and 'x-ms-range' headers, and the byte range must be - specified in the following format: bytes=startByte-endByte. - :type range: str - :param content_length: Specifies the number of bytes being transmitted in the request body. - When the x-ms-write header is set to clear, the value of this header must be set to zero. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param file_range_write: Specify one of the following options: - Update: Writes the bytes - specified by the request body into the specified range. The Range and Content-Length headers - must match to perform the update. - Clear: Clears the specified range and releases the space - used in storage for that range. To clear a range, set the Content-Length header to zero, and - set the Range header to a value that indicates the range to clear, up to maximum file size. - :type file_range_write: str or ~azure.storage.fileshare.models.FileRangeWriteType - :param content_md5: An MD5 hash of the content. This hash is used to verify the integrity of - the data during transport. When the Content-MD5 header is specified, the File service compares - the hash of the content that has arrived with the header value that was sent. If the two hashes - do not match, the operation will fail with error code 400 (Bad Request). - :type content_md5: bytearray - :param optionalbody: Initial data. - :type optionalbody: IO - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "range" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.upload_range.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-write'] = self._serialize.header("file_range_write", file_range_write, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("content_md5", content_md5, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = optionalbody - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload_range.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def upload_range_from_url( - self, - range: str, - copy_source: str, - content_length: int, - timeout: Optional[int] = None, - source_range: Optional[str] = None, - source_content_crc64: Optional[bytearray] = None, - copy_source_authorization: Optional[str] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Upload a range of bytes to a file where the contents are read from a URL. - - :param range: Writes data to the specified byte range in the file. - :type range: str - :param copy_source: Specifies the URL of the source file or blob, up to 2 KB in length. To copy - a file to another file within the same storage account, you may use Shared Key to authenticate - the source file. If you are copying a file from another storage account, or if you are copying - a blob from the same storage account or another storage account, then you must authenticate the - source file or blob using a shared access signature. If the source is a public blob, no - authentication is required to perform the copy operation. A file in a share snapshot can also - be specified as a copy source. - :type copy_source: str - :param content_length: Specifies the number of bytes being transmitted in the request body. - When the x-ms-write header is set to clear, the value of this header must be set to zero. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_crc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_content_crc64: bytearray - :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid - OAuth access token to copy source. - :type copy_source_authorization: str - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.fileshare.models.SourceModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _source_if_match_crc64 = None - _source_if_none_match_crc64 = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if source_modified_access_conditions is not None: - _source_if_match_crc64 = source_modified_access_conditions.source_if_match_crc64 - _source_if_none_match_crc64 = source_modified_access_conditions.source_if_none_match_crc64 - comp = "range" - accept = "application/xml" - - # Construct URL - url = self.upload_range_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - header_parameters['x-ms-write'] = self._serialize.header("self._config.file_range_write_from_url", self._config.file_range_write_from_url, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if source_content_crc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_content_crc64", source_content_crc64, 'bytearray') - if _source_if_match_crc64 is not None: - header_parameters['x-ms-source-if-match-crc64'] = self._serialize.header("source_if_match_crc64", _source_if_match_crc64, 'bytearray') - if _source_if_none_match_crc64 is not None: - header_parameters['x-ms-source-if-none-match-crc64'] = self._serialize.header("source_if_none_match_crc64", _source_if_none_match_crc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if copy_source_authorization is not None: - header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload_range_from_url.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def get_range_list( - self, - sharesnapshot: Optional[str] = None, - prevsharesnapshot: Optional[str] = None, - timeout: Optional[int] = None, - range: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> "_models.ShareFileRangeList": - """Returns the list of valid ranges for a file. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param prevsharesnapshot: The previous snapshot parameter is an opaque DateTime value that, - when present, specifies the previous snapshot. - :type prevsharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param range: Specifies the range of bytes over which to list ranges, inclusively. - :type range: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ShareFileRangeList, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ShareFileRangeList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ShareFileRangeList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "rangelist" - accept = "application/xml" - - # Construct URL - url = self.get_range_list.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if prevsharesnapshot is not None: - query_parameters['prevsharesnapshot'] = self._serialize.query("prevsharesnapshot", prevsharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-content-length']=self._deserialize('long', response.headers.get('x-ms-content-length')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ShareFileRangeList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_range_list.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def start_copy( - self, - copy_source: str, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - file_permission: Optional[str] = "inherit", - file_permission_key: Optional[str] = None, - copy_file_smb_info: Optional["_models.CopyFileSmbInfo"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Copies a blob or file to a destination file within the storage account. - - :param copy_source: Specifies the URL of the source file or blob, up to 2 KB in length. To copy - a file to another file within the same storage account, you may use Shared Key to authenticate - the source file. If you are copying a file from another storage account, or if you are copying - a blob from the same storage account or another storage account, then you must authenticate the - source file or blob using a shared access signature. If the source is a public blob, no - authentication is required to perform the copy operation. A file in a share snapshot can also - be specified as a copy source. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else - x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param copy_file_smb_info: Parameter group. - :type copy_file_smb_info: ~azure.storage.fileshare.models.CopyFileSmbInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _file_permission_copy_mode = None - _ignore_read_only = None - _file_attributes = None - _file_creation_time = None - _file_last_write_time = None - _set_archive_attribute = None - _lease_id = None - if copy_file_smb_info is not None: - _file_permission_copy_mode = copy_file_smb_info.file_permission_copy_mode - _ignore_read_only = copy_file_smb_info.ignore_read_only - _file_attributes = copy_file_smb_info.file_attributes - _file_creation_time = copy_file_smb_info.file_creation_time - _file_last_write_time = copy_file_smb_info.file_last_write_time - _set_archive_attribute = copy_file_smb_info.set_archive_attribute - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - accept = "application/xml" - - # Construct URL - url = self.start_copy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - if _file_permission_copy_mode is not None: - header_parameters['x-ms-file-permission-copy-mode'] = self._serialize.header("file_permission_copy_mode", _file_permission_copy_mode, 'str') - if _ignore_read_only is not None: - header_parameters['x-ms-file-copy-ignore-read-only'] = self._serialize.header("ignore_read_only", _ignore_read_only, 'bool') - if _file_attributes is not None: - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", _file_attributes, 'str') - if _file_creation_time is not None: - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", _file_creation_time, 'str') - if _file_last_write_time is not None: - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", _file_last_write_time, 'str') - if _set_archive_attribute is not None: - header_parameters['x-ms-file-copy-set-archive'] = self._serialize.header("set_archive_attribute", _set_archive_attribute, 'bool') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - start_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def abort_copy( - self, - copy_id: str, - timeout: Optional[int] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Aborts a pending Copy File operation, and leaves a destination file with zero length and full - metadata. - - :param copy_id: The copy identifier provided in the x-ms-copy-id header of the original Copy - File operation. - :type copy_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "copy" - copy_action_abort_constant = "abort" - accept = "application/xml" - - # Construct URL - url = self.abort_copy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-copy-action'] = self._serialize.header("copy_action_abort_constant", copy_action_abort_constant, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - abort_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def list_handles( - self, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - timeout: Optional[int] = None, - sharesnapshot: Optional[str] = None, - **kwargs: Any - ) -> "_models.ListHandlesResponse": - """Lists handles for file. - - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. If the request does not - specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 - items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListHandlesResponse, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListHandlesResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListHandlesResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "listhandles" - accept = "application/xml" - - # Construct URL - url = self.list_handles.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListHandlesResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def force_close_handles( - self, - handle_id: str, - timeout: Optional[int] = None, - marker: Optional[str] = None, - sharesnapshot: Optional[str] = None, - **kwargs: Any - ) -> None: - """Closes all handles open for given file. - - :param handle_id: Specifies handle ID opened on the file or directory to be closed. Asterisk - (‘*’) is a wildcard that specifies all handles. - :type handle_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "forceclosehandles" - accept = "application/xml" - - # Construct URL - url = self.force_close_handles.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-marker']=self._deserialize('str', response.headers.get('x-ms-marker')) - response_headers['x-ms-number-of-handles-closed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')) - response_headers['x-ms-number-of-handles-failed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')) - - if cls: - return cls(pipeline_response, None, response_headers) - - force_close_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/_service_operations.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/_service_operations.py deleted file mode 100644 index f413f29..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/_service_operations.py +++ /dev/null @@ -1,269 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class ServiceOperations: - """ServiceOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.fileshare.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def set_properties( - self, - storage_service_properties: "_models.StorageServiceProperties", - timeout: Optional[int] = None, - **kwargs: Any - ) -> None: - """Sets properties for a storage account's File service endpoint, including properties for Storage - Analytics metrics and CORS (Cross-Origin Resource Sharing) rules. - - :param storage_service_properties: The StorageService properties. - :type storage_service_properties: ~azure.storage.fileshare.models.StorageServiceProperties - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "properties" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/'} # type: ignore - - async def get_properties( - self, - timeout: Optional[int] = None, - **kwargs: Any - ) -> "_models.StorageServiceProperties": - """Gets the properties of a storage account's File service, including properties for Storage - Analytics metrics and CORS (Cross-Origin Resource Sharing) rules. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: StorageServiceProperties, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.StorageServiceProperties - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceProperties"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('StorageServiceProperties', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_properties.metadata = {'url': '/'} # type: ignore - - async def list_shares_segment( - self, - prefix: Optional[str] = None, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - include: Optional[List[Union[str, "_models.ListSharesIncludeType"]]] = None, - timeout: Optional[int] = None, - **kwargs: Any - ) -> "_models.ListSharesResponse": - """The List Shares Segment operation returns a list of the shares and share snapshots under the - specified account. - - :param prefix: Filters the results to return only entries whose name begins with the specified - prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. If the request does not - specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 - items. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.fileshare.models.ListSharesIncludeType] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListSharesResponse, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListSharesResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListSharesResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_shares_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('ListSharesResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_shares_segment.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/_share_operations.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/_share_operations.py deleted file mode 100644 index ca08ada..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/_share_operations.py +++ /dev/null @@ -1,1485 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class ShareOperations: - """ShareOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.fileshare.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - quota: Optional[int] = None, - access_tier: Optional[Union[str, "_models.ShareAccessTier"]] = None, - enabled_protocols: Optional[str] = None, - root_squash: Optional[Union[str, "_models.ShareRootSquash"]] = None, - **kwargs: Any - ) -> None: - """Creates a new share under the specified account. If the share with the same name already - exists, the operation fails. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param quota: Specifies the maximum size of the share, in gigabytes. - :type quota: int - :param access_tier: Specifies the access tier of the share. - :type access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier - :param enabled_protocols: Protocols to enable on the share. - :type enabled_protocols: str - :param root_squash: Root squash to set on the share. Only valid for NFS shares. - :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if quota is not None: - header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) - if access_tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("access_tier", access_tier, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if enabled_protocols is not None: - header_parameters['x-ms-enabled-protocols'] = self._serialize.header("enabled_protocols", enabled_protocols, 'str') - if root_squash is not None: - header_parameters['x-ms-root-squash'] = self._serialize.header("root_squash", root_squash, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{shareName}'} # type: ignore - - async def get_properties( - self, - sharesnapshot: Optional[str] = None, - timeout: Optional[int] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Returns all user-defined metadata and system properties for the specified share or share - snapshot. The data returned does not include the share's list of files. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-share-quota']=self._deserialize('int', response.headers.get('x-ms-share-quota')) - response_headers['x-ms-share-provisioned-iops']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-iops')) - response_headers['x-ms-share-provisioned-ingress-mbps']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-ingress-mbps')) - response_headers['x-ms-share-provisioned-egress-mbps']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-egress-mbps')) - response_headers['x-ms-share-next-allowed-quota-downgrade-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-share-next-allowed-quota-downgrade-time')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-access-tier']=self._deserialize('str', response.headers.get('x-ms-access-tier')) - response_headers['x-ms-access-tier-change-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')) - response_headers['x-ms-access-tier-transition-state']=self._deserialize('str', response.headers.get('x-ms-access-tier-transition-state')) - response_headers['x-ms-enabled-protocols']=self._deserialize('str', response.headers.get('x-ms-enabled-protocols')) - response_headers['x-ms-root-squash']=self._deserialize('str', response.headers.get('x-ms-root-squash')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{shareName}'} # type: ignore - - async def delete( - self, - sharesnapshot: Optional[str] = None, - timeout: Optional[int] = None, - delete_snapshots: Optional[Union[str, "_models.DeleteSnapshotsOptionType"]] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Operation marks the specified share or share snapshot for deletion. The share or share snapshot - and any files contained within it are later deleted during garbage collection. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param delete_snapshots: Specifies the option include to delete the base share and all of its - snapshots. - :type delete_snapshots: str or ~azure.storage.fileshare.models.DeleteSnapshotsOptionType - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{shareName}'} # type: ignore - - async def acquire_lease( - self, - timeout: Optional[int] = None, - duration: Optional[int] = None, - proposed_lease_id: Optional[str] = None, - sharesnapshot: Optional[str] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> None: - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a - lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease - duration cannot be changed using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "acquire" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.acquire_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - acquire_lease.metadata = {'url': '/{shareName}'} # type: ignore - - async def release_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - sharesnapshot: Optional[str] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> None: - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "release" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.release_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - release_lease.metadata = {'url': '/{shareName}'} # type: ignore - - async def change_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - proposed_lease_id: Optional[str] = None, - sharesnapshot: Optional[str] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> None: - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "change" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.change_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - change_lease.metadata = {'url': '/{shareName}'} # type: ignore - - async def renew_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - sharesnapshot: Optional[str] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> None: - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "renew" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.renew_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - renew_lease.metadata = {'url': '/{shareName}'} # type: ignore - - async def break_lease( - self, - timeout: Optional[int] = None, - break_period: Optional[int] = None, - request_id_parameter: Optional[str] = None, - sharesnapshot: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param break_period: For a break operation, proposed duration the lease should continue before - it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining on the lease is used. A new - lease will not be available before the break period has expired, but the lease may be held for - longer than the break period. If this header does not appear with a break operation, a - fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease - breaks immediately. - :type break_period: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "lease" - action = "break" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.break_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - break_lease.metadata = {'url': '/{shareName}'} # type: ignore - - async def create_snapshot( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - **kwargs: Any - ) -> None: - """Creates a read-only snapshot of a share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - comp = "snapshot" - accept = "application/xml" - - # Construct URL - url = self.create_snapshot.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-snapshot']=self._deserialize('str', response.headers.get('x-ms-snapshot')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create_snapshot.metadata = {'url': '/{shareName}'} # type: ignore - - async def create_permission( - self, - share_permission: "_models.SharePermission", - timeout: Optional[int] = None, - **kwargs: Any - ) -> None: - """Create a permission (a security descriptor). - - :param share_permission: A permission (a security descriptor) at the share level. - :type share_permission: ~azure.storage.fileshare.models.SharePermission - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - comp = "filepermission" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/xml" - - # Construct URL - url = self.create_permission.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(share_permission, 'SharePermission') - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create_permission.metadata = {'url': '/{shareName}'} # type: ignore - - async def get_permission( - self, - file_permission_key: str, - timeout: Optional[int] = None, - **kwargs: Any - ) -> "_models.SharePermission": - """Returns the permission (security descriptor) for a given key. - - :param file_permission_key: Key of the permission to be set for the directory/file. - :type file_permission_key: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SharePermission, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.SharePermission - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.SharePermission"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - comp = "filepermission" - accept = "application/json" - - # Construct URL - url = self.get_permission.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('SharePermission', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_permission.metadata = {'url': '/{shareName}'} # type: ignore - - async def set_properties( - self, - timeout: Optional[int] = None, - quota: Optional[int] = None, - access_tier: Optional[Union[str, "_models.ShareAccessTier"]] = None, - root_squash: Optional[Union[str, "_models.ShareRootSquash"]] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Sets properties for the specified share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param quota: Specifies the maximum size of the share, in gigabytes. - :type quota: int - :param access_tier: Specifies the access tier of the share. - :type access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier - :param root_squash: Root squash to set on the share. Only valid for NFS shares. - :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if quota is not None: - header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) - if access_tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("access_tier", access_tier, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if root_squash is not None: - header_parameters['x-ms-root-squash'] = self._serialize.header("root_squash", root_squash, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/{shareName}'} # type: ignore - - async def set_metadata( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Sets one or more user-defined name-value pairs for the specified share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{shareName}'} # type: ignore - - async def get_access_policy( - self, - timeout: Optional[int] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> List["_models.SignedIdentifier"]: - """Returns information about stored access policies specified on the share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of SignedIdentifier, or the result of cls(response) - :rtype: list[~azure.storage.fileshare.models.SignedIdentifier] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.SignedIdentifier"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "acl" - accept = "application/xml" - - # Construct URL - url = self.get_access_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('[SignedIdentifier]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_access_policy.metadata = {'url': '/{shareName}'} # type: ignore - - async def set_access_policy( - self, - timeout: Optional[int] = None, - share_acl: Optional[List["_models.SignedIdentifier"]] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Sets a stored access policy for use with shared access signatures. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param share_acl: The ACL for the share. - :type share_acl: list[~azure.storage.fileshare.models.SignedIdentifier] - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "acl" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_access_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'wrapped': True}} - if share_acl is not None: - body_content = self._serialize.body(share_acl, '[SignedIdentifier]', is_xml=True, serialization_ctxt=serialization_ctxt) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_policy.metadata = {'url': '/{shareName}'} # type: ignore - - async def get_statistics( - self, - timeout: Optional[int] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> "_models.ShareStats": - """Retrieves statistics related to the share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ShareStats, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ShareStats - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ShareStats"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "stats" - accept = "application/xml" - - # Construct URL - url = self.get_statistics.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ShareStats', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_statistics.metadata = {'url': '/{shareName}'} # type: ignore - - async def restore( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - deleted_share_name: Optional[str] = None, - deleted_share_version: Optional[str] = None, - **kwargs: Any - ) -> None: - """Restores a previously deleted Share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param deleted_share_name: Specifies the name of the preivously-deleted share. - :type deleted_share_name: str - :param deleted_share_version: Specifies the version of the preivously-deleted share. - :type deleted_share_version: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - comp = "undelete" - accept = "application/xml" - - # Construct URL - url = self.restore.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if deleted_share_name is not None: - header_parameters['x-ms-deleted-share-name'] = self._serialize.header("deleted_share_name", deleted_share_name, 'str') - if deleted_share_version is not None: - header_parameters['x-ms-deleted-share-version'] = self._serialize.header("deleted_share_version", deleted_share_version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - restore.metadata = {'url': '/{shareName}'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/models/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/models/__init__.py deleted file mode 100644 index 27d6752..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/models/__init__.py +++ /dev/null @@ -1,127 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -try: - from ._models_py3 import AccessPolicy - from ._models_py3 import ClearRange - from ._models_py3 import CopyFileSmbInfo - from ._models_py3 import CorsRule - from ._models_py3 import DirectoryItem - from ._models_py3 import FileHTTPHeaders - from ._models_py3 import FileItem - from ._models_py3 import FileProperty - from ._models_py3 import FileRange - from ._models_py3 import FilesAndDirectoriesListSegment - from ._models_py3 import HandleItem - from ._models_py3 import LeaseAccessConditions - from ._models_py3 import ListFilesAndDirectoriesSegmentResponse - from ._models_py3 import ListHandlesResponse - from ._models_py3 import ListSharesResponse - from ._models_py3 import Metrics - from ._models_py3 import RetentionPolicy - from ._models_py3 import ShareFileRangeList - from ._models_py3 import ShareItemInternal - from ._models_py3 import SharePermission - from ._models_py3 import SharePropertiesInternal - from ._models_py3 import ShareProtocolSettings - from ._models_py3 import ShareSmbSettings - from ._models_py3 import ShareStats - from ._models_py3 import SignedIdentifier - from ._models_py3 import SmbMultichannel - from ._models_py3 import SourceModifiedAccessConditions - from ._models_py3 import StorageError - from ._models_py3 import StorageServiceProperties -except (SyntaxError, ImportError): - from ._models import AccessPolicy # type: ignore - from ._models import ClearRange # type: ignore - from ._models import CopyFileSmbInfo # type: ignore - from ._models import CorsRule # type: ignore - from ._models import DirectoryItem # type: ignore - from ._models import FileHTTPHeaders # type: ignore - from ._models import FileItem # type: ignore - from ._models import FileProperty # type: ignore - from ._models import FileRange # type: ignore - from ._models import FilesAndDirectoriesListSegment # type: ignore - from ._models import HandleItem # type: ignore - from ._models import LeaseAccessConditions # type: ignore - from ._models import ListFilesAndDirectoriesSegmentResponse # type: ignore - from ._models import ListHandlesResponse # type: ignore - from ._models import ListSharesResponse # type: ignore - from ._models import Metrics # type: ignore - from ._models import RetentionPolicy # type: ignore - from ._models import ShareFileRangeList # type: ignore - from ._models import ShareItemInternal # type: ignore - from ._models import SharePermission # type: ignore - from ._models import SharePropertiesInternal # type: ignore - from ._models import ShareProtocolSettings # type: ignore - from ._models import ShareSmbSettings # type: ignore - from ._models import ShareStats # type: ignore - from ._models import SignedIdentifier # type: ignore - from ._models import SmbMultichannel # type: ignore - from ._models import SourceModifiedAccessConditions # type: ignore - from ._models import StorageError # type: ignore - from ._models import StorageServiceProperties # type: ignore - -from ._azure_file_storage_enums import ( - CopyStatusType, - DeleteSnapshotsOptionType, - FileRangeWriteType, - LeaseDurationType, - LeaseStateType, - LeaseStatusType, - ListFilesIncludeType, - ListSharesIncludeType, - PermissionCopyModeType, - ShareAccessTier, - ShareRootSquash, - StorageErrorCode, -) - -__all__ = [ - 'AccessPolicy', - 'ClearRange', - 'CopyFileSmbInfo', - 'CorsRule', - 'DirectoryItem', - 'FileHTTPHeaders', - 'FileItem', - 'FileProperty', - 'FileRange', - 'FilesAndDirectoriesListSegment', - 'HandleItem', - 'LeaseAccessConditions', - 'ListFilesAndDirectoriesSegmentResponse', - 'ListHandlesResponse', - 'ListSharesResponse', - 'Metrics', - 'RetentionPolicy', - 'ShareFileRangeList', - 'ShareItemInternal', - 'SharePermission', - 'SharePropertiesInternal', - 'ShareProtocolSettings', - 'ShareSmbSettings', - 'ShareStats', - 'SignedIdentifier', - 'SmbMultichannel', - 'SourceModifiedAccessConditions', - 'StorageError', - 'StorageServiceProperties', - 'CopyStatusType', - 'DeleteSnapshotsOptionType', - 'FileRangeWriteType', - 'LeaseDurationType', - 'LeaseStateType', - 'LeaseStatusType', - 'ListFilesIncludeType', - 'ListSharesIncludeType', - 'PermissionCopyModeType', - 'ShareAccessTier', - 'ShareRootSquash', - 'StorageErrorCode', -] diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/models/_azure_file_storage_enums.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/models/_azure_file_storage_enums.py deleted file mode 100644 index 1c8b351..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/models/_azure_file_storage_enums.py +++ /dev/null @@ -1,169 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from enum import Enum, EnumMeta -from six import with_metaclass - -class _CaseInsensitiveEnumMeta(EnumMeta): - def __getitem__(self, name): - return super().__getitem__(name.upper()) - - def __getattr__(cls, name): - """Return the enum member matching `name` - We use __getattr__ instead of descriptors or inserting into the enum - class' __dict__ in order to support `name` and `value` being both - properties for enum members (which live in the class' __dict__) and - enum members themselves. - """ - try: - return cls._member_map_[name.upper()] - except KeyError: - raise AttributeError(name) - - -class CopyStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - PENDING = "pending" - SUCCESS = "success" - ABORTED = "aborted" - FAILED = "failed" - -class DeleteSnapshotsOptionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - INCLUDE = "include" - INCLUDE_LEASED = "include-leased" - -class FileRangeWriteType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - UPDATE = "update" - CLEAR = "clear" - -class LeaseDurationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """When a share is leased, specifies whether the lease is of infinite or fixed duration. - """ - - INFINITE = "infinite" - FIXED = "fixed" - -class LeaseStateType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Lease state of the share. - """ - - AVAILABLE = "available" - LEASED = "leased" - EXPIRED = "expired" - BREAKING = "breaking" - BROKEN = "broken" - -class LeaseStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The current lease status of the share. - """ - - LOCKED = "locked" - UNLOCKED = "unlocked" - -class ListFilesIncludeType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - TIMESTAMPS = "Timestamps" - ETAG = "Etag" - ATTRIBUTES = "Attributes" - PERMISSION_KEY = "PermissionKey" - -class ListSharesIncludeType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - SNAPSHOTS = "snapshots" - METADATA = "metadata" - DELETED = "deleted" - -class PermissionCopyModeType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - SOURCE = "source" - OVERRIDE = "override" - -class ShareAccessTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - TRANSACTION_OPTIMIZED = "TransactionOptimized" - HOT = "Hot" - COOL = "Cool" - -class ShareRootSquash(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - NO_ROOT_SQUASH = "NoRootSquash" - ROOT_SQUASH = "RootSquash" - ALL_SQUASH = "AllSquash" - -class StorageErrorCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Error codes returned by the service - """ - - ACCOUNT_ALREADY_EXISTS = "AccountAlreadyExists" - ACCOUNT_BEING_CREATED = "AccountBeingCreated" - ACCOUNT_IS_DISABLED = "AccountIsDisabled" - AUTHENTICATION_FAILED = "AuthenticationFailed" - AUTHORIZATION_FAILURE = "AuthorizationFailure" - CONDITION_HEADERS_NOT_SUPPORTED = "ConditionHeadersNotSupported" - CONDITION_NOT_MET = "ConditionNotMet" - EMPTY_METADATA_KEY = "EmptyMetadataKey" - INSUFFICIENT_ACCOUNT_PERMISSIONS = "InsufficientAccountPermissions" - INTERNAL_ERROR = "InternalError" - INVALID_AUTHENTICATION_INFO = "InvalidAuthenticationInfo" - INVALID_HEADER_VALUE = "InvalidHeaderValue" - INVALID_HTTP_VERB = "InvalidHttpVerb" - INVALID_INPUT = "InvalidInput" - INVALID_MD5 = "InvalidMd5" - INVALID_METADATA = "InvalidMetadata" - INVALID_QUERY_PARAMETER_VALUE = "InvalidQueryParameterValue" - INVALID_RANGE = "InvalidRange" - INVALID_RESOURCE_NAME = "InvalidResourceName" - INVALID_URI = "InvalidUri" - INVALID_XML_DOCUMENT = "InvalidXmlDocument" - INVALID_XML_NODE_VALUE = "InvalidXmlNodeValue" - MD5_MISMATCH = "Md5Mismatch" - METADATA_TOO_LARGE = "MetadataTooLarge" - MISSING_CONTENT_LENGTH_HEADER = "MissingContentLengthHeader" - MISSING_REQUIRED_QUERY_PARAMETER = "MissingRequiredQueryParameter" - MISSING_REQUIRED_HEADER = "MissingRequiredHeader" - MISSING_REQUIRED_XML_NODE = "MissingRequiredXmlNode" - MULTIPLE_CONDITION_HEADERS_NOT_SUPPORTED = "MultipleConditionHeadersNotSupported" - OPERATION_TIMED_OUT = "OperationTimedOut" - OUT_OF_RANGE_INPUT = "OutOfRangeInput" - OUT_OF_RANGE_QUERY_PARAMETER_VALUE = "OutOfRangeQueryParameterValue" - REQUEST_BODY_TOO_LARGE = "RequestBodyTooLarge" - RESOURCE_TYPE_MISMATCH = "ResourceTypeMismatch" - REQUEST_URL_FAILED_TO_PARSE = "RequestUrlFailedToParse" - RESOURCE_ALREADY_EXISTS = "ResourceAlreadyExists" - RESOURCE_NOT_FOUND = "ResourceNotFound" - SERVER_BUSY = "ServerBusy" - UNSUPPORTED_HEADER = "UnsupportedHeader" - UNSUPPORTED_XML_NODE = "UnsupportedXmlNode" - UNSUPPORTED_QUERY_PARAMETER = "UnsupportedQueryParameter" - UNSUPPORTED_HTTP_VERB = "UnsupportedHttpVerb" - CANNOT_DELETE_FILE_OR_DIRECTORY = "CannotDeleteFileOrDirectory" - CLIENT_CACHE_FLUSH_DELAY = "ClientCacheFlushDelay" - DELETE_PENDING = "DeletePending" - DIRECTORY_NOT_EMPTY = "DirectoryNotEmpty" - FILE_LOCK_CONFLICT = "FileLockConflict" - INVALID_FILE_OR_DIRECTORY_PATH_NAME = "InvalidFileOrDirectoryPathName" - PARENT_NOT_FOUND = "ParentNotFound" - READ_ONLY_ATTRIBUTE = "ReadOnlyAttribute" - SHARE_ALREADY_EXISTS = "ShareAlreadyExists" - SHARE_BEING_DELETED = "ShareBeingDeleted" - SHARE_DISABLED = "ShareDisabled" - SHARE_NOT_FOUND = "ShareNotFound" - SHARING_VIOLATION = "SharingViolation" - SHARE_SNAPSHOT_IN_PROGRESS = "ShareSnapshotInProgress" - SHARE_SNAPSHOT_COUNT_EXCEEDED = "ShareSnapshotCountExceeded" - SHARE_SNAPSHOT_OPERATION_NOT_SUPPORTED = "ShareSnapshotOperationNotSupported" - SHARE_HAS_SNAPSHOTS = "ShareHasSnapshots" - CONTAINER_QUOTA_DOWNGRADE_NOT_ALLOWED = "ContainerQuotaDowngradeNotAllowed" - AUTHORIZATION_SOURCE_IP_MISMATCH = "AuthorizationSourceIPMismatch" - AUTHORIZATION_PROTOCOL_MISMATCH = "AuthorizationProtocolMismatch" - AUTHORIZATION_PERMISSION_MISMATCH = "AuthorizationPermissionMismatch" - AUTHORIZATION_SERVICE_MISMATCH = "AuthorizationServiceMismatch" - AUTHORIZATION_RESOURCE_TYPE_MISMATCH = "AuthorizationResourceTypeMismatch" - FEATURE_VERSION_MISMATCH = "FeatureVersionMismatch" diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/models/_models.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/models/_models.py deleted file mode 100644 index 024a56b..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/models/_models.py +++ /dev/null @@ -1,1115 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import HttpResponseError -import msrest.serialization - - -class AccessPolicy(msrest.serialization.Model): - """An Access policy. - - :param start: The date-time the policy is active. - :type start: str - :param expiry: The date-time the policy expires. - :type expiry: str - :param permission: The permissions for the ACL policy. - :type permission: str - """ - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str'}, - 'expiry': {'key': 'Expiry', 'type': 'str'}, - 'permission': {'key': 'Permission', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(AccessPolicy, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.expiry = kwargs.get('expiry', None) - self.permission = kwargs.get('permission', None) - - -class ClearRange(msrest.serialization.Model): - """ClearRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'ClearRange' - } - - def __init__( - self, - **kwargs - ): - super(ClearRange, self).__init__(**kwargs) - self.start = kwargs['start'] - self.end = kwargs['end'] - - -class CopyFileSmbInfo(msrest.serialization.Model): - """Parameter group. - - :param file_permission_copy_mode: Specifies the option to copy file security descriptor from - source file or to set it using the value which is defined by the header value of - x-ms-file-permission or x-ms-file-permission-key. Possible values include: "source", - "override". - :type file_permission_copy_mode: str or ~azure.storage.fileshare.models.PermissionCopyModeType - :param ignore_read_only: Specifies the option to overwrite the target file if it already exists - and has read-only attribute set. - :type ignore_read_only: bool - :param file_attributes: Specifies either the option to copy file attributes from a source - file(source) to a target file or a list of attributes to set on a target file. - :type file_attributes: str - :param file_creation_time: Specifies either the option to copy file creation time from a source - file(source) to a target file or a time value in ISO 8601 format to set as creation time on a - target file. - :type file_creation_time: str - :param file_last_write_time: Specifies either the option to copy file last write time from a - source file(source) to a target file or a time value in ISO 8601 format to set as last write - time on a target file. - :type file_last_write_time: str - :param set_archive_attribute: Specifies the option to set archive attribute on a target file. - True means archive attribute will be set on a target file despite attribute overrides or a - source file state. - :type set_archive_attribute: bool - """ - - _attribute_map = { - 'file_permission_copy_mode': {'key': 'filePermissionCopyMode', 'type': 'str'}, - 'ignore_read_only': {'key': 'ignoreReadOnly', 'type': 'bool'}, - 'file_attributes': {'key': 'fileAttributes', 'type': 'str'}, - 'file_creation_time': {'key': 'fileCreationTime', 'type': 'str'}, - 'file_last_write_time': {'key': 'fileLastWriteTime', 'type': 'str'}, - 'set_archive_attribute': {'key': 'setArchiveAttribute', 'type': 'bool'}, - } - - def __init__( - self, - **kwargs - ): - super(CopyFileSmbInfo, self).__init__(**kwargs) - self.file_permission_copy_mode = kwargs.get('file_permission_copy_mode', None) - self.ignore_read_only = kwargs.get('ignore_read_only', None) - self.file_attributes = kwargs.get('file_attributes', None) - self.file_creation_time = kwargs.get('file_creation_time', None) - self.file_last_write_time = kwargs.get('file_last_write_time', None) - self.set_archive_attribute = kwargs.get('set_archive_attribute', None) - - -class CorsRule(msrest.serialization.Model): - """CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param allowed_origins: Required. The origin domains that are permitted to make a request - against the storage service via CORS. The origin domain is the domain from which the request - originates. Note that the origin must be an exact case-sensitive match with the origin that the - user age sends to the service. You can also use the wildcard character '*' to allow all origin - domains to make requests via CORS. - :type allowed_origins: str - :param allowed_methods: Required. The methods (HTTP request verbs) that the origin domain may - use for a CORS request. (comma separated). - :type allowed_methods: str - :param allowed_headers: Required. The request headers that the origin domain may specify on the - CORS request. - :type allowed_headers: str - :param exposed_headers: Required. The response headers that may be sent in the response to the - CORS request and exposed by the browser to the request issuer. - :type exposed_headers: str - :param max_age_in_seconds: Required. The maximum amount time that a browser should cache the - preflight OPTIONS request. - :type max_age_in_seconds: int - """ - - _validation = { - 'allowed_origins': {'required': True}, - 'allowed_methods': {'required': True}, - 'allowed_headers': {'required': True}, - 'exposed_headers': {'required': True}, - 'max_age_in_seconds': {'required': True, 'minimum': 0}, - } - - _attribute_map = { - 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str'}, - 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str'}, - 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str'}, - 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str'}, - 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int'}, - } - - def __init__( - self, - **kwargs - ): - super(CorsRule, self).__init__(**kwargs) - self.allowed_origins = kwargs['allowed_origins'] - self.allowed_methods = kwargs['allowed_methods'] - self.allowed_headers = kwargs['allowed_headers'] - self.exposed_headers = kwargs['exposed_headers'] - self.max_age_in_seconds = kwargs['max_age_in_seconds'] - - -class DirectoryItem(msrest.serialization.Model): - """A listed directory item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param file_id: - :type file_id: str - :param properties: File properties. - :type properties: ~azure.storage.fileshare.models.FileProperty - :param attributes: - :type attributes: str - :param permission_key: - :type permission_key: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'file_id': {'key': 'FileId', 'type': 'str'}, - 'properties': {'key': 'Properties', 'type': 'FileProperty'}, - 'attributes': {'key': 'Attributes', 'type': 'str'}, - 'permission_key': {'key': 'PermissionKey', 'type': 'str'}, - } - _xml_map = { - 'name': 'Directory' - } - - def __init__( - self, - **kwargs - ): - super(DirectoryItem, self).__init__(**kwargs) - self.name = kwargs['name'] - self.file_id = kwargs.get('file_id', None) - self.properties = kwargs.get('properties', None) - self.attributes = kwargs.get('attributes', None) - self.permission_key = kwargs.get('permission_key', None) - - -class FileHTTPHeaders(msrest.serialization.Model): - """Parameter group. - - :param file_content_type: Sets the MIME content type of the file. The default type is - 'application/octet-stream'. - :type file_content_type: str - :param file_content_encoding: Specifies which content encodings have been applied to the file. - :type file_content_encoding: str - :param file_content_language: Specifies the natural languages used by this resource. - :type file_content_language: str - :param file_cache_control: Sets the file's cache control. The File service stores this value - but does not use or modify it. - :type file_cache_control: str - :param file_content_md5: Sets the file's MD5 hash. - :type file_content_md5: bytearray - :param file_content_disposition: Sets the file's Content-Disposition header. - :type file_content_disposition: str - """ - - _attribute_map = { - 'file_content_type': {'key': 'fileContentType', 'type': 'str'}, - 'file_content_encoding': {'key': 'fileContentEncoding', 'type': 'str'}, - 'file_content_language': {'key': 'fileContentLanguage', 'type': 'str'}, - 'file_cache_control': {'key': 'fileCacheControl', 'type': 'str'}, - 'file_content_md5': {'key': 'fileContentMD5', 'type': 'bytearray'}, - 'file_content_disposition': {'key': 'fileContentDisposition', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(FileHTTPHeaders, self).__init__(**kwargs) - self.file_content_type = kwargs.get('file_content_type', None) - self.file_content_encoding = kwargs.get('file_content_encoding', None) - self.file_content_language = kwargs.get('file_content_language', None) - self.file_cache_control = kwargs.get('file_cache_control', None) - self.file_content_md5 = kwargs.get('file_content_md5', None) - self.file_content_disposition = kwargs.get('file_content_disposition', None) - - -class FileItem(msrest.serialization.Model): - """A listed file item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param file_id: - :type file_id: str - :param properties: Required. File properties. - :type properties: ~azure.storage.fileshare.models.FileProperty - :param attributes: - :type attributes: str - :param permission_key: - :type permission_key: str - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'file_id': {'key': 'FileId', 'type': 'str'}, - 'properties': {'key': 'Properties', 'type': 'FileProperty'}, - 'attributes': {'key': 'Attributes', 'type': 'str'}, - 'permission_key': {'key': 'PermissionKey', 'type': 'str'}, - } - _xml_map = { - 'name': 'File' - } - - def __init__( - self, - **kwargs - ): - super(FileItem, self).__init__(**kwargs) - self.name = kwargs['name'] - self.file_id = kwargs.get('file_id', None) - self.properties = kwargs['properties'] - self.attributes = kwargs.get('attributes', None) - self.permission_key = kwargs.get('permission_key', None) - - -class FileProperty(msrest.serialization.Model): - """File properties. - - All required parameters must be populated in order to send to Azure. - - :param content_length: Required. Content length of the file. This value may not be up-to-date - since an SMB client may have modified the file locally. The value of Content-Length may not - reflect that fact until the handle is closed or the op-lock is broken. To retrieve current - property values, call Get File Properties. - :type content_length: long - :param creation_time: - :type creation_time: ~datetime.datetime - :param last_access_time: - :type last_access_time: ~datetime.datetime - :param last_write_time: - :type last_write_time: ~datetime.datetime - :param change_time: - :type change_time: ~datetime.datetime - :param last_modified: - :type last_modified: ~datetime.datetime - :param etag: - :type etag: str - """ - - _validation = { - 'content_length': {'required': True}, - } - - _attribute_map = { - 'content_length': {'key': 'Content-Length', 'type': 'long'}, - 'creation_time': {'key': 'CreationTime', 'type': 'iso-8601'}, - 'last_access_time': {'key': 'LastAccessTime', 'type': 'iso-8601'}, - 'last_write_time': {'key': 'LastWriteTime', 'type': 'iso-8601'}, - 'change_time': {'key': 'ChangeTime', 'type': 'iso-8601'}, - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(FileProperty, self).__init__(**kwargs) - self.content_length = kwargs['content_length'] - self.creation_time = kwargs.get('creation_time', None) - self.last_access_time = kwargs.get('last_access_time', None) - self.last_write_time = kwargs.get('last_write_time', None) - self.change_time = kwargs.get('change_time', None) - self.last_modified = kwargs.get('last_modified', None) - self.etag = kwargs.get('etag', None) - - -class FileRange(msrest.serialization.Model): - """An Azure Storage file range. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. Start of the range. - :type start: long - :param end: Required. End of the range. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long'}, - 'end': {'key': 'End', 'type': 'long'}, - } - _xml_map = { - 'name': 'Range' - } - - def __init__( - self, - **kwargs - ): - super(FileRange, self).__init__(**kwargs) - self.start = kwargs['start'] - self.end = kwargs['end'] - - -class FilesAndDirectoriesListSegment(msrest.serialization.Model): - """Abstract for entries that can be listed from Directory. - - All required parameters must be populated in order to send to Azure. - - :param directory_items: Required. - :type directory_items: list[~azure.storage.fileshare.models.DirectoryItem] - :param file_items: Required. - :type file_items: list[~azure.storage.fileshare.models.FileItem] - """ - - _validation = { - 'directory_items': {'required': True}, - 'file_items': {'required': True}, - } - - _attribute_map = { - 'directory_items': {'key': 'DirectoryItems', 'type': '[DirectoryItem]'}, - 'file_items': {'key': 'FileItems', 'type': '[FileItem]'}, - } - _xml_map = { - 'name': 'Entries' - } - - def __init__( - self, - **kwargs - ): - super(FilesAndDirectoriesListSegment, self).__init__(**kwargs) - self.directory_items = kwargs['directory_items'] - self.file_items = kwargs['file_items'] - - -class HandleItem(msrest.serialization.Model): - """A listed Azure Storage handle item. - - All required parameters must be populated in order to send to Azure. - - :param handle_id: Required. XSMB service handle ID. - :type handle_id: str - :param path: Required. File or directory name including full path starting from share root. - :type path: str - :param file_id: Required. FileId uniquely identifies the file or directory. - :type file_id: str - :param parent_id: ParentId uniquely identifies the parent directory of the object. - :type parent_id: str - :param session_id: Required. SMB session ID in context of which the file handle was opened. - :type session_id: str - :param client_ip: Required. Client IP that opened the handle. - :type client_ip: str - :param open_time: Required. Time when the session that previously opened the handle has last - been reconnected. (UTC). - :type open_time: ~datetime.datetime - :param last_reconnect_time: Time handle was last connected to (UTC). - :type last_reconnect_time: ~datetime.datetime - """ - - _validation = { - 'handle_id': {'required': True}, - 'path': {'required': True}, - 'file_id': {'required': True}, - 'session_id': {'required': True}, - 'client_ip': {'required': True}, - 'open_time': {'required': True}, - } - - _attribute_map = { - 'handle_id': {'key': 'HandleId', 'type': 'str'}, - 'path': {'key': 'Path', 'type': 'str'}, - 'file_id': {'key': 'FileId', 'type': 'str'}, - 'parent_id': {'key': 'ParentId', 'type': 'str'}, - 'session_id': {'key': 'SessionId', 'type': 'str'}, - 'client_ip': {'key': 'ClientIp', 'type': 'str'}, - 'open_time': {'key': 'OpenTime', 'type': 'rfc-1123'}, - 'last_reconnect_time': {'key': 'LastReconnectTime', 'type': 'rfc-1123'}, - } - _xml_map = { - 'name': 'Handle' - } - - def __init__( - self, - **kwargs - ): - super(HandleItem, self).__init__(**kwargs) - self.handle_id = kwargs['handle_id'] - self.path = kwargs['path'] - self.file_id = kwargs['file_id'] - self.parent_id = kwargs.get('parent_id', None) - self.session_id = kwargs['session_id'] - self.client_ip = kwargs['client_ip'] - self.open_time = kwargs['open_time'] - self.last_reconnect_time = kwargs.get('last_reconnect_time', None) - - -class LeaseAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param lease_id: If specified, the operation only succeeds if the resource's lease is active - and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': 'leaseId', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = kwargs.get('lease_id', None) - - -class ListFilesAndDirectoriesSegmentResponse(msrest.serialization.Model): - """An enumeration of directories and files. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param share_name: Required. - :type share_name: str - :param share_snapshot: - :type share_snapshot: str - :param directory_path: Required. - :type directory_path: str - :param prefix: Required. - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param segment: Required. Abstract for entries that can be listed from Directory. - :type segment: ~azure.storage.fileshare.models.FilesAndDirectoriesListSegment - :param next_marker: Required. - :type next_marker: str - :param directory_id: - :type directory_id: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'share_name': {'required': True}, - 'directory_path': {'required': True}, - 'prefix': {'required': True}, - 'segment': {'required': True}, - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'share_name': {'key': 'ShareName', 'type': 'str', 'xml': {'attr': True}}, - 'share_snapshot': {'key': 'ShareSnapshot', 'type': 'str', 'xml': {'attr': True}}, - 'directory_path': {'key': 'DirectoryPath', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'segment': {'key': 'Segment', 'type': 'FilesAndDirectoriesListSegment'}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - 'directory_id': {'key': 'DirectoryId', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - **kwargs - ): - super(ListFilesAndDirectoriesSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs['service_endpoint'] - self.share_name = kwargs['share_name'] - self.share_snapshot = kwargs.get('share_snapshot', None) - self.directory_path = kwargs['directory_path'] - self.prefix = kwargs['prefix'] - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.segment = kwargs['segment'] - self.next_marker = kwargs['next_marker'] - self.directory_id = kwargs.get('directory_id', None) - - -class ListHandlesResponse(msrest.serialization.Model): - """An enumeration of handles. - - All required parameters must be populated in order to send to Azure. - - :param handle_list: - :type handle_list: list[~azure.storage.fileshare.models.HandleItem] - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'handle_list': {'key': 'HandleList', 'type': '[HandleItem]', 'xml': {'name': 'Entries', 'wrapped': True, 'itemsName': 'Handle'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - **kwargs - ): - super(ListHandlesResponse, self).__init__(**kwargs) - self.handle_list = kwargs.get('handle_list', None) - self.next_marker = kwargs['next_marker'] - - -class ListSharesResponse(msrest.serialization.Model): - """An enumeration of shares. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param share_items: - :type share_items: list[~azure.storage.fileshare.models.ShareItemInternal] - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'share_items': {'key': 'ShareItems', 'type': '[ShareItemInternal]', 'xml': {'name': 'Shares', 'wrapped': True, 'itemsName': 'Share'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - **kwargs - ): - super(ListSharesResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs['service_endpoint'] - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.share_items = kwargs.get('share_items', None) - self.next_marker = kwargs['next_marker'] - - -class Metrics(msrest.serialization.Model): - """Storage Analytics metrics for file service. - - All required parameters must be populated in order to send to Azure. - - :param version: Required. The version of Storage Analytics to configure. - :type version: str - :param enabled: Required. Indicates whether metrics are enabled for the File service. - :type enabled: bool - :param include_apis: Indicates whether metrics should generate summary statistics for called - API operations. - :type include_apis: bool - :param retention_policy: The retention policy. - :type retention_policy: ~azure.storage.fileshare.models.RetentionPolicy - """ - - _validation = { - 'version': {'required': True}, - 'enabled': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str'}, - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool'}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, - } - - def __init__( - self, - **kwargs - ): - super(Metrics, self).__init__(**kwargs) - self.version = kwargs['version'] - self.enabled = kwargs['enabled'] - self.include_apis = kwargs.get('include_apis', None) - self.retention_policy = kwargs.get('retention_policy', None) - - -class RetentionPolicy(msrest.serialization.Model): - """The retention policy. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether a retention policy is enabled for the File service. - If false, metrics data is retained, and the user is responsible for deleting it. - :type enabled: bool - :param days: Indicates the number of days that metrics data should be retained. All data older - than this value will be deleted. Metrics data is deleted on a best-effort basis after the - retention period expires. - :type days: int - """ - - _validation = { - 'enabled': {'required': True}, - 'days': {'maximum': 365, 'minimum': 1}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'days': {'key': 'Days', 'type': 'int'}, - } - - def __init__( - self, - **kwargs - ): - super(RetentionPolicy, self).__init__(**kwargs) - self.enabled = kwargs['enabled'] - self.days = kwargs.get('days', None) - - -class ShareFileRangeList(msrest.serialization.Model): - """The list of file ranges. - - :param ranges: - :type ranges: list[~azure.storage.fileshare.models.FileRange] - :param clear_ranges: - :type clear_ranges: list[~azure.storage.fileshare.models.ClearRange] - """ - - _attribute_map = { - 'ranges': {'key': 'Ranges', 'type': '[FileRange]'}, - 'clear_ranges': {'key': 'ClearRanges', 'type': '[ClearRange]'}, - } - - def __init__( - self, - **kwargs - ): - super(ShareFileRangeList, self).__init__(**kwargs) - self.ranges = kwargs.get('ranges', None) - self.clear_ranges = kwargs.get('clear_ranges', None) - - -class ShareItemInternal(msrest.serialization.Model): - """A listed Azure Storage share item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param snapshot: - :type snapshot: str - :param deleted: - :type deleted: bool - :param version: - :type version: str - :param properties: Required. Properties of a share. - :type properties: ~azure.storage.fileshare.models.SharePropertiesInternal - :param metadata: Dictionary of :code:``. - :type metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'snapshot': {'key': 'Snapshot', 'type': 'str'}, - 'deleted': {'key': 'Deleted', 'type': 'bool'}, - 'version': {'key': 'Version', 'type': 'str'}, - 'properties': {'key': 'Properties', 'type': 'SharePropertiesInternal'}, - 'metadata': {'key': 'Metadata', 'type': '{str}'}, - } - _xml_map = { - 'name': 'Share' - } - - def __init__( - self, - **kwargs - ): - super(ShareItemInternal, self).__init__(**kwargs) - self.name = kwargs['name'] - self.snapshot = kwargs.get('snapshot', None) - self.deleted = kwargs.get('deleted', None) - self.version = kwargs.get('version', None) - self.properties = kwargs['properties'] - self.metadata = kwargs.get('metadata', None) - - -class SharePermission(msrest.serialization.Model): - """A permission (a security descriptor) at the share level. - - All required parameters must be populated in order to send to Azure. - - :param permission: Required. The permission in the Security Descriptor Definition Language - (SDDL). - :type permission: str - """ - - _validation = { - 'permission': {'required': True}, - } - - _attribute_map = { - 'permission': {'key': 'permission', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(SharePermission, self).__init__(**kwargs) - self.permission = kwargs['permission'] - - -class SharePropertiesInternal(msrest.serialization.Model): - """Properties of a share. - - All required parameters must be populated in order to send to Azure. - - :param last_modified: Required. - :type last_modified: ~datetime.datetime - :param etag: Required. - :type etag: str - :param quota: Required. - :type quota: int - :param provisioned_iops: - :type provisioned_iops: int - :param provisioned_ingress_m_bps: - :type provisioned_ingress_m_bps: int - :param provisioned_egress_m_bps: - :type provisioned_egress_m_bps: int - :param next_allowed_quota_downgrade_time: - :type next_allowed_quota_downgrade_time: ~datetime.datetime - :param deleted_time: - :type deleted_time: ~datetime.datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param access_tier: - :type access_tier: str - :param access_tier_change_time: - :type access_tier_change_time: ~datetime.datetime - :param access_tier_transition_state: - :type access_tier_transition_state: str - :param lease_status: The current lease status of the share. Possible values include: "locked", - "unlocked". - :type lease_status: str or ~azure.storage.fileshare.models.LeaseStatusType - :param lease_state: Lease state of the share. Possible values include: "available", "leased", - "expired", "breaking", "broken". - :type lease_state: str or ~azure.storage.fileshare.models.LeaseStateType - :param lease_duration: When a share is leased, specifies whether the lease is of infinite or - fixed duration. Possible values include: "infinite", "fixed". - :type lease_duration: str or ~azure.storage.fileshare.models.LeaseDurationType - :param enabled_protocols: - :type enabled_protocols: str - :param root_squash: Possible values include: "NoRootSquash", "RootSquash", "AllSquash". - :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - 'quota': {'required': True}, - } - - _attribute_map = { - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - 'quota': {'key': 'Quota', 'type': 'int'}, - 'provisioned_iops': {'key': 'ProvisionedIops', 'type': 'int'}, - 'provisioned_ingress_m_bps': {'key': 'ProvisionedIngressMBps', 'type': 'int'}, - 'provisioned_egress_m_bps': {'key': 'ProvisionedEgressMBps', 'type': 'int'}, - 'next_allowed_quota_downgrade_time': {'key': 'NextAllowedQuotaDowngradeTime', 'type': 'rfc-1123'}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, - 'access_tier': {'key': 'AccessTier', 'type': 'str'}, - 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'}, - 'access_tier_transition_state': {'key': 'AccessTierTransitionState', 'type': 'str'}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, - 'lease_state': {'key': 'LeaseState', 'type': 'str'}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, - 'enabled_protocols': {'key': 'EnabledProtocols', 'type': 'str'}, - 'root_squash': {'key': 'RootSquash', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(SharePropertiesInternal, self).__init__(**kwargs) - self.last_modified = kwargs['last_modified'] - self.etag = kwargs['etag'] - self.quota = kwargs['quota'] - self.provisioned_iops = kwargs.get('provisioned_iops', None) - self.provisioned_ingress_m_bps = kwargs.get('provisioned_ingress_m_bps', None) - self.provisioned_egress_m_bps = kwargs.get('provisioned_egress_m_bps', None) - self.next_allowed_quota_downgrade_time = kwargs.get('next_allowed_quota_downgrade_time', None) - self.deleted_time = kwargs.get('deleted_time', None) - self.remaining_retention_days = kwargs.get('remaining_retention_days', None) - self.access_tier = kwargs.get('access_tier', None) - self.access_tier_change_time = kwargs.get('access_tier_change_time', None) - self.access_tier_transition_state = kwargs.get('access_tier_transition_state', None) - self.lease_status = kwargs.get('lease_status', None) - self.lease_state = kwargs.get('lease_state', None) - self.lease_duration = kwargs.get('lease_duration', None) - self.enabled_protocols = kwargs.get('enabled_protocols', None) - self.root_squash = kwargs.get('root_squash', None) - - -class ShareProtocolSettings(msrest.serialization.Model): - """Protocol settings. - - :param smb: Settings for SMB protocol. - :type smb: ~azure.storage.fileshare.models.ShareSmbSettings - """ - - _attribute_map = { - 'smb': {'key': 'Smb', 'type': 'ShareSmbSettings'}, - } - _xml_map = { - 'name': 'ProtocolSettings' - } - - def __init__( - self, - **kwargs - ): - super(ShareProtocolSettings, self).__init__(**kwargs) - self.smb = kwargs.get('smb', None) - - -class ShareSmbSettings(msrest.serialization.Model): - """Settings for SMB protocol. - - :param multichannel: Settings for SMB Multichannel. - :type multichannel: ~azure.storage.fileshare.models.SmbMultichannel - """ - - _attribute_map = { - 'multichannel': {'key': 'Multichannel', 'type': 'SmbMultichannel'}, - } - _xml_map = { - 'name': 'SMB' - } - - def __init__( - self, - **kwargs - ): - super(ShareSmbSettings, self).__init__(**kwargs) - self.multichannel = kwargs.get('multichannel', None) - - -class ShareStats(msrest.serialization.Model): - """Stats for the share. - - All required parameters must be populated in order to send to Azure. - - :param share_usage_bytes: Required. The approximate size of the data stored in bytes. Note that - this value may not include all recently created or recently resized files. - :type share_usage_bytes: int - """ - - _validation = { - 'share_usage_bytes': {'required': True}, - } - - _attribute_map = { - 'share_usage_bytes': {'key': 'ShareUsageBytes', 'type': 'int'}, - } - - def __init__( - self, - **kwargs - ): - super(ShareStats, self).__init__(**kwargs) - self.share_usage_bytes = kwargs['share_usage_bytes'] - - -class SignedIdentifier(msrest.serialization.Model): - """Signed identifier. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. A unique id. - :type id: str - :param access_policy: The access policy. - :type access_policy: ~azure.storage.fileshare.models.AccessPolicy - """ - - _validation = { - 'id': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'Id', 'type': 'str'}, - 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy'}, - } - - def __init__( - self, - **kwargs - ): - super(SignedIdentifier, self).__init__(**kwargs) - self.id = kwargs['id'] - self.access_policy = kwargs.get('access_policy', None) - - -class SmbMultichannel(msrest.serialization.Model): - """Settings for SMB multichannel. - - :param enabled: If SMB multichannel is enabled. - :type enabled: bool - """ - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - } - _xml_map = { - 'name': 'Multichannel' - } - - def __init__( - self, - **kwargs - ): - super(SmbMultichannel, self).__init__(**kwargs) - self.enabled = kwargs.get('enabled', None) - - -class SourceModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param source_if_match_crc64: Specify the crc64 value to operate only on range with a matching - crc64 checksum. - :type source_if_match_crc64: bytearray - :param source_if_none_match_crc64: Specify the crc64 value to operate only on range without a - matching crc64 checksum. - :type source_if_none_match_crc64: bytearray - """ - - _attribute_map = { - 'source_if_match_crc64': {'key': 'sourceIfMatchCrc64', 'type': 'bytearray'}, - 'source_if_none_match_crc64': {'key': 'sourceIfNoneMatchCrc64', 'type': 'bytearray'}, - } - - def __init__( - self, - **kwargs - ): - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_match_crc64 = kwargs.get('source_if_match_crc64', None) - self.source_if_none_match_crc64 = kwargs.get('source_if_none_match_crc64', None) - - -class StorageError(msrest.serialization.Model): - """StorageError. - - :param message: - :type message: str - """ - - _attribute_map = { - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(StorageError, self).__init__(**kwargs) - self.message = kwargs.get('message', None) - - -class StorageServiceProperties(msrest.serialization.Model): - """Storage service properties. - - :param hour_metrics: A summary of request statistics grouped by API in hourly aggregates for - files. - :type hour_metrics: ~azure.storage.fileshare.models.Metrics - :param minute_metrics: A summary of request statistics grouped by API in minute aggregates for - files. - :type minute_metrics: ~azure.storage.fileshare.models.Metrics - :param cors: The set of CORS rules. - :type cors: list[~azure.storage.fileshare.models.CorsRule] - :param protocol: Protocol settings. - :type protocol: ~azure.storage.fileshare.models.ShareProtocolSettings - """ - - _attribute_map = { - 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics'}, - 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics'}, - 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'wrapped': True}}, - 'protocol': {'key': 'Protocol', 'type': 'ShareProtocolSettings'}, - } - - def __init__( - self, - **kwargs - ): - super(StorageServiceProperties, self).__init__(**kwargs) - self.hour_metrics = kwargs.get('hour_metrics', None) - self.minute_metrics = kwargs.get('minute_metrics', None) - self.cors = kwargs.get('cors', None) - self.protocol = kwargs.get('protocol', None) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/models/_models_py3.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/models/_models_py3.py deleted file mode 100644 index c95e0af..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/models/_models_py3.py +++ /dev/null @@ -1,1264 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -import datetime -from typing import Dict, List, Optional, Union - -from azure.core.exceptions import HttpResponseError -import msrest.serialization - -from ._azure_file_storage_enums import * - - -class AccessPolicy(msrest.serialization.Model): - """An Access policy. - - :param start: The date-time the policy is active. - :type start: str - :param expiry: The date-time the policy expires. - :type expiry: str - :param permission: The permissions for the ACL policy. - :type permission: str - """ - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str'}, - 'expiry': {'key': 'Expiry', 'type': 'str'}, - 'permission': {'key': 'Permission', 'type': 'str'}, - } - - def __init__( - self, - *, - start: Optional[str] = None, - expiry: Optional[str] = None, - permission: Optional[str] = None, - **kwargs - ): - super(AccessPolicy, self).__init__(**kwargs) - self.start = start - self.expiry = expiry - self.permission = permission - - -class ClearRange(msrest.serialization.Model): - """ClearRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'ClearRange' - } - - def __init__( - self, - *, - start: int, - end: int, - **kwargs - ): - super(ClearRange, self).__init__(**kwargs) - self.start = start - self.end = end - - -class CopyFileSmbInfo(msrest.serialization.Model): - """Parameter group. - - :param file_permission_copy_mode: Specifies the option to copy file security descriptor from - source file or to set it using the value which is defined by the header value of - x-ms-file-permission or x-ms-file-permission-key. Possible values include: "source", - "override". - :type file_permission_copy_mode: str or ~azure.storage.fileshare.models.PermissionCopyModeType - :param ignore_read_only: Specifies the option to overwrite the target file if it already exists - and has read-only attribute set. - :type ignore_read_only: bool - :param file_attributes: Specifies either the option to copy file attributes from a source - file(source) to a target file or a list of attributes to set on a target file. - :type file_attributes: str - :param file_creation_time: Specifies either the option to copy file creation time from a source - file(source) to a target file or a time value in ISO 8601 format to set as creation time on a - target file. - :type file_creation_time: str - :param file_last_write_time: Specifies either the option to copy file last write time from a - source file(source) to a target file or a time value in ISO 8601 format to set as last write - time on a target file. - :type file_last_write_time: str - :param set_archive_attribute: Specifies the option to set archive attribute on a target file. - True means archive attribute will be set on a target file despite attribute overrides or a - source file state. - :type set_archive_attribute: bool - """ - - _attribute_map = { - 'file_permission_copy_mode': {'key': 'filePermissionCopyMode', 'type': 'str'}, - 'ignore_read_only': {'key': 'ignoreReadOnly', 'type': 'bool'}, - 'file_attributes': {'key': 'fileAttributes', 'type': 'str'}, - 'file_creation_time': {'key': 'fileCreationTime', 'type': 'str'}, - 'file_last_write_time': {'key': 'fileLastWriteTime', 'type': 'str'}, - 'set_archive_attribute': {'key': 'setArchiveAttribute', 'type': 'bool'}, - } - - def __init__( - self, - *, - file_permission_copy_mode: Optional[Union[str, "PermissionCopyModeType"]] = None, - ignore_read_only: Optional[bool] = None, - file_attributes: Optional[str] = None, - file_creation_time: Optional[str] = None, - file_last_write_time: Optional[str] = None, - set_archive_attribute: Optional[bool] = None, - **kwargs - ): - super(CopyFileSmbInfo, self).__init__(**kwargs) - self.file_permission_copy_mode = file_permission_copy_mode - self.ignore_read_only = ignore_read_only - self.file_attributes = file_attributes - self.file_creation_time = file_creation_time - self.file_last_write_time = file_last_write_time - self.set_archive_attribute = set_archive_attribute - - -class CorsRule(msrest.serialization.Model): - """CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param allowed_origins: Required. The origin domains that are permitted to make a request - against the storage service via CORS. The origin domain is the domain from which the request - originates. Note that the origin must be an exact case-sensitive match with the origin that the - user age sends to the service. You can also use the wildcard character '*' to allow all origin - domains to make requests via CORS. - :type allowed_origins: str - :param allowed_methods: Required. The methods (HTTP request verbs) that the origin domain may - use for a CORS request. (comma separated). - :type allowed_methods: str - :param allowed_headers: Required. The request headers that the origin domain may specify on the - CORS request. - :type allowed_headers: str - :param exposed_headers: Required. The response headers that may be sent in the response to the - CORS request and exposed by the browser to the request issuer. - :type exposed_headers: str - :param max_age_in_seconds: Required. The maximum amount time that a browser should cache the - preflight OPTIONS request. - :type max_age_in_seconds: int - """ - - _validation = { - 'allowed_origins': {'required': True}, - 'allowed_methods': {'required': True}, - 'allowed_headers': {'required': True}, - 'exposed_headers': {'required': True}, - 'max_age_in_seconds': {'required': True, 'minimum': 0}, - } - - _attribute_map = { - 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str'}, - 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str'}, - 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str'}, - 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str'}, - 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int'}, - } - - def __init__( - self, - *, - allowed_origins: str, - allowed_methods: str, - allowed_headers: str, - exposed_headers: str, - max_age_in_seconds: int, - **kwargs - ): - super(CorsRule, self).__init__(**kwargs) - self.allowed_origins = allowed_origins - self.allowed_methods = allowed_methods - self.allowed_headers = allowed_headers - self.exposed_headers = exposed_headers - self.max_age_in_seconds = max_age_in_seconds - - -class DirectoryItem(msrest.serialization.Model): - """A listed directory item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param file_id: - :type file_id: str - :param properties: File properties. - :type properties: ~azure.storage.fileshare.models.FileProperty - :param attributes: - :type attributes: str - :param permission_key: - :type permission_key: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'file_id': {'key': 'FileId', 'type': 'str'}, - 'properties': {'key': 'Properties', 'type': 'FileProperty'}, - 'attributes': {'key': 'Attributes', 'type': 'str'}, - 'permission_key': {'key': 'PermissionKey', 'type': 'str'}, - } - _xml_map = { - 'name': 'Directory' - } - - def __init__( - self, - *, - name: str, - file_id: Optional[str] = None, - properties: Optional["FileProperty"] = None, - attributes: Optional[str] = None, - permission_key: Optional[str] = None, - **kwargs - ): - super(DirectoryItem, self).__init__(**kwargs) - self.name = name - self.file_id = file_id - self.properties = properties - self.attributes = attributes - self.permission_key = permission_key - - -class FileHTTPHeaders(msrest.serialization.Model): - """Parameter group. - - :param file_content_type: Sets the MIME content type of the file. The default type is - 'application/octet-stream'. - :type file_content_type: str - :param file_content_encoding: Specifies which content encodings have been applied to the file. - :type file_content_encoding: str - :param file_content_language: Specifies the natural languages used by this resource. - :type file_content_language: str - :param file_cache_control: Sets the file's cache control. The File service stores this value - but does not use or modify it. - :type file_cache_control: str - :param file_content_md5: Sets the file's MD5 hash. - :type file_content_md5: bytearray - :param file_content_disposition: Sets the file's Content-Disposition header. - :type file_content_disposition: str - """ - - _attribute_map = { - 'file_content_type': {'key': 'fileContentType', 'type': 'str'}, - 'file_content_encoding': {'key': 'fileContentEncoding', 'type': 'str'}, - 'file_content_language': {'key': 'fileContentLanguage', 'type': 'str'}, - 'file_cache_control': {'key': 'fileCacheControl', 'type': 'str'}, - 'file_content_md5': {'key': 'fileContentMD5', 'type': 'bytearray'}, - 'file_content_disposition': {'key': 'fileContentDisposition', 'type': 'str'}, - } - - def __init__( - self, - *, - file_content_type: Optional[str] = None, - file_content_encoding: Optional[str] = None, - file_content_language: Optional[str] = None, - file_cache_control: Optional[str] = None, - file_content_md5: Optional[bytearray] = None, - file_content_disposition: Optional[str] = None, - **kwargs - ): - super(FileHTTPHeaders, self).__init__(**kwargs) - self.file_content_type = file_content_type - self.file_content_encoding = file_content_encoding - self.file_content_language = file_content_language - self.file_cache_control = file_cache_control - self.file_content_md5 = file_content_md5 - self.file_content_disposition = file_content_disposition - - -class FileItem(msrest.serialization.Model): - """A listed file item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param file_id: - :type file_id: str - :param properties: Required. File properties. - :type properties: ~azure.storage.fileshare.models.FileProperty - :param attributes: - :type attributes: str - :param permission_key: - :type permission_key: str - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'file_id': {'key': 'FileId', 'type': 'str'}, - 'properties': {'key': 'Properties', 'type': 'FileProperty'}, - 'attributes': {'key': 'Attributes', 'type': 'str'}, - 'permission_key': {'key': 'PermissionKey', 'type': 'str'}, - } - _xml_map = { - 'name': 'File' - } - - def __init__( - self, - *, - name: str, - properties: "FileProperty", - file_id: Optional[str] = None, - attributes: Optional[str] = None, - permission_key: Optional[str] = None, - **kwargs - ): - super(FileItem, self).__init__(**kwargs) - self.name = name - self.file_id = file_id - self.properties = properties - self.attributes = attributes - self.permission_key = permission_key - - -class FileProperty(msrest.serialization.Model): - """File properties. - - All required parameters must be populated in order to send to Azure. - - :param content_length: Required. Content length of the file. This value may not be up-to-date - since an SMB client may have modified the file locally. The value of Content-Length may not - reflect that fact until the handle is closed or the op-lock is broken. To retrieve current - property values, call Get File Properties. - :type content_length: long - :param creation_time: - :type creation_time: ~datetime.datetime - :param last_access_time: - :type last_access_time: ~datetime.datetime - :param last_write_time: - :type last_write_time: ~datetime.datetime - :param change_time: - :type change_time: ~datetime.datetime - :param last_modified: - :type last_modified: ~datetime.datetime - :param etag: - :type etag: str - """ - - _validation = { - 'content_length': {'required': True}, - } - - _attribute_map = { - 'content_length': {'key': 'Content-Length', 'type': 'long'}, - 'creation_time': {'key': 'CreationTime', 'type': 'iso-8601'}, - 'last_access_time': {'key': 'LastAccessTime', 'type': 'iso-8601'}, - 'last_write_time': {'key': 'LastWriteTime', 'type': 'iso-8601'}, - 'change_time': {'key': 'ChangeTime', 'type': 'iso-8601'}, - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - } - - def __init__( - self, - *, - content_length: int, - creation_time: Optional[datetime.datetime] = None, - last_access_time: Optional[datetime.datetime] = None, - last_write_time: Optional[datetime.datetime] = None, - change_time: Optional[datetime.datetime] = None, - last_modified: Optional[datetime.datetime] = None, - etag: Optional[str] = None, - **kwargs - ): - super(FileProperty, self).__init__(**kwargs) - self.content_length = content_length - self.creation_time = creation_time - self.last_access_time = last_access_time - self.last_write_time = last_write_time - self.change_time = change_time - self.last_modified = last_modified - self.etag = etag - - -class FileRange(msrest.serialization.Model): - """An Azure Storage file range. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. Start of the range. - :type start: long - :param end: Required. End of the range. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long'}, - 'end': {'key': 'End', 'type': 'long'}, - } - _xml_map = { - 'name': 'Range' - } - - def __init__( - self, - *, - start: int, - end: int, - **kwargs - ): - super(FileRange, self).__init__(**kwargs) - self.start = start - self.end = end - - -class FilesAndDirectoriesListSegment(msrest.serialization.Model): - """Abstract for entries that can be listed from Directory. - - All required parameters must be populated in order to send to Azure. - - :param directory_items: Required. - :type directory_items: list[~azure.storage.fileshare.models.DirectoryItem] - :param file_items: Required. - :type file_items: list[~azure.storage.fileshare.models.FileItem] - """ - - _validation = { - 'directory_items': {'required': True}, - 'file_items': {'required': True}, - } - - _attribute_map = { - 'directory_items': {'key': 'DirectoryItems', 'type': '[DirectoryItem]'}, - 'file_items': {'key': 'FileItems', 'type': '[FileItem]'}, - } - _xml_map = { - 'name': 'Entries' - } - - def __init__( - self, - *, - directory_items: List["DirectoryItem"], - file_items: List["FileItem"], - **kwargs - ): - super(FilesAndDirectoriesListSegment, self).__init__(**kwargs) - self.directory_items = directory_items - self.file_items = file_items - - -class HandleItem(msrest.serialization.Model): - """A listed Azure Storage handle item. - - All required parameters must be populated in order to send to Azure. - - :param handle_id: Required. XSMB service handle ID. - :type handle_id: str - :param path: Required. File or directory name including full path starting from share root. - :type path: str - :param file_id: Required. FileId uniquely identifies the file or directory. - :type file_id: str - :param parent_id: ParentId uniquely identifies the parent directory of the object. - :type parent_id: str - :param session_id: Required. SMB session ID in context of which the file handle was opened. - :type session_id: str - :param client_ip: Required. Client IP that opened the handle. - :type client_ip: str - :param open_time: Required. Time when the session that previously opened the handle has last - been reconnected. (UTC). - :type open_time: ~datetime.datetime - :param last_reconnect_time: Time handle was last connected to (UTC). - :type last_reconnect_time: ~datetime.datetime - """ - - _validation = { - 'handle_id': {'required': True}, - 'path': {'required': True}, - 'file_id': {'required': True}, - 'session_id': {'required': True}, - 'client_ip': {'required': True}, - 'open_time': {'required': True}, - } - - _attribute_map = { - 'handle_id': {'key': 'HandleId', 'type': 'str'}, - 'path': {'key': 'Path', 'type': 'str'}, - 'file_id': {'key': 'FileId', 'type': 'str'}, - 'parent_id': {'key': 'ParentId', 'type': 'str'}, - 'session_id': {'key': 'SessionId', 'type': 'str'}, - 'client_ip': {'key': 'ClientIp', 'type': 'str'}, - 'open_time': {'key': 'OpenTime', 'type': 'rfc-1123'}, - 'last_reconnect_time': {'key': 'LastReconnectTime', 'type': 'rfc-1123'}, - } - _xml_map = { - 'name': 'Handle' - } - - def __init__( - self, - *, - handle_id: str, - path: str, - file_id: str, - session_id: str, - client_ip: str, - open_time: datetime.datetime, - parent_id: Optional[str] = None, - last_reconnect_time: Optional[datetime.datetime] = None, - **kwargs - ): - super(HandleItem, self).__init__(**kwargs) - self.handle_id = handle_id - self.path = path - self.file_id = file_id - self.parent_id = parent_id - self.session_id = session_id - self.client_ip = client_ip - self.open_time = open_time - self.last_reconnect_time = last_reconnect_time - - -class LeaseAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param lease_id: If specified, the operation only succeeds if the resource's lease is active - and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': 'leaseId', 'type': 'str'}, - } - - def __init__( - self, - *, - lease_id: Optional[str] = None, - **kwargs - ): - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = lease_id - - -class ListFilesAndDirectoriesSegmentResponse(msrest.serialization.Model): - """An enumeration of directories and files. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param share_name: Required. - :type share_name: str - :param share_snapshot: - :type share_snapshot: str - :param directory_path: Required. - :type directory_path: str - :param prefix: Required. - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param segment: Required. Abstract for entries that can be listed from Directory. - :type segment: ~azure.storage.fileshare.models.FilesAndDirectoriesListSegment - :param next_marker: Required. - :type next_marker: str - :param directory_id: - :type directory_id: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'share_name': {'required': True}, - 'directory_path': {'required': True}, - 'prefix': {'required': True}, - 'segment': {'required': True}, - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'share_name': {'key': 'ShareName', 'type': 'str', 'xml': {'attr': True}}, - 'share_snapshot': {'key': 'ShareSnapshot', 'type': 'str', 'xml': {'attr': True}}, - 'directory_path': {'key': 'DirectoryPath', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'segment': {'key': 'Segment', 'type': 'FilesAndDirectoriesListSegment'}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - 'directory_id': {'key': 'DirectoryId', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - *, - service_endpoint: str, - share_name: str, - directory_path: str, - prefix: str, - segment: "FilesAndDirectoriesListSegment", - next_marker: str, - share_snapshot: Optional[str] = None, - marker: Optional[str] = None, - max_results: Optional[int] = None, - directory_id: Optional[str] = None, - **kwargs - ): - super(ListFilesAndDirectoriesSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.share_name = share_name - self.share_snapshot = share_snapshot - self.directory_path = directory_path - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.segment = segment - self.next_marker = next_marker - self.directory_id = directory_id - - -class ListHandlesResponse(msrest.serialization.Model): - """An enumeration of handles. - - All required parameters must be populated in order to send to Azure. - - :param handle_list: - :type handle_list: list[~azure.storage.fileshare.models.HandleItem] - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'handle_list': {'key': 'HandleList', 'type': '[HandleItem]', 'xml': {'name': 'Entries', 'wrapped': True, 'itemsName': 'Handle'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - *, - next_marker: str, - handle_list: Optional[List["HandleItem"]] = None, - **kwargs - ): - super(ListHandlesResponse, self).__init__(**kwargs) - self.handle_list = handle_list - self.next_marker = next_marker - - -class ListSharesResponse(msrest.serialization.Model): - """An enumeration of shares. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param share_items: - :type share_items: list[~azure.storage.fileshare.models.ShareItemInternal] - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'share_items': {'key': 'ShareItems', 'type': '[ShareItemInternal]', 'xml': {'name': 'Shares', 'wrapped': True, 'itemsName': 'Share'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - *, - service_endpoint: str, - next_marker: str, - prefix: Optional[str] = None, - marker: Optional[str] = None, - max_results: Optional[int] = None, - share_items: Optional[List["ShareItemInternal"]] = None, - **kwargs - ): - super(ListSharesResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.share_items = share_items - self.next_marker = next_marker - - -class Metrics(msrest.serialization.Model): - """Storage Analytics metrics for file service. - - All required parameters must be populated in order to send to Azure. - - :param version: Required. The version of Storage Analytics to configure. - :type version: str - :param enabled: Required. Indicates whether metrics are enabled for the File service. - :type enabled: bool - :param include_apis: Indicates whether metrics should generate summary statistics for called - API operations. - :type include_apis: bool - :param retention_policy: The retention policy. - :type retention_policy: ~azure.storage.fileshare.models.RetentionPolicy - """ - - _validation = { - 'version': {'required': True}, - 'enabled': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str'}, - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool'}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, - } - - def __init__( - self, - *, - version: str, - enabled: bool, - include_apis: Optional[bool] = None, - retention_policy: Optional["RetentionPolicy"] = None, - **kwargs - ): - super(Metrics, self).__init__(**kwargs) - self.version = version - self.enabled = enabled - self.include_apis = include_apis - self.retention_policy = retention_policy - - -class RetentionPolicy(msrest.serialization.Model): - """The retention policy. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether a retention policy is enabled for the File service. - If false, metrics data is retained, and the user is responsible for deleting it. - :type enabled: bool - :param days: Indicates the number of days that metrics data should be retained. All data older - than this value will be deleted. Metrics data is deleted on a best-effort basis after the - retention period expires. - :type days: int - """ - - _validation = { - 'enabled': {'required': True}, - 'days': {'maximum': 365, 'minimum': 1}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'days': {'key': 'Days', 'type': 'int'}, - } - - def __init__( - self, - *, - enabled: bool, - days: Optional[int] = None, - **kwargs - ): - super(RetentionPolicy, self).__init__(**kwargs) - self.enabled = enabled - self.days = days - - -class ShareFileRangeList(msrest.serialization.Model): - """The list of file ranges. - - :param ranges: - :type ranges: list[~azure.storage.fileshare.models.FileRange] - :param clear_ranges: - :type clear_ranges: list[~azure.storage.fileshare.models.ClearRange] - """ - - _attribute_map = { - 'ranges': {'key': 'Ranges', 'type': '[FileRange]'}, - 'clear_ranges': {'key': 'ClearRanges', 'type': '[ClearRange]'}, - } - - def __init__( - self, - *, - ranges: Optional[List["FileRange"]] = None, - clear_ranges: Optional[List["ClearRange"]] = None, - **kwargs - ): - super(ShareFileRangeList, self).__init__(**kwargs) - self.ranges = ranges - self.clear_ranges = clear_ranges - - -class ShareItemInternal(msrest.serialization.Model): - """A listed Azure Storage share item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param snapshot: - :type snapshot: str - :param deleted: - :type deleted: bool - :param version: - :type version: str - :param properties: Required. Properties of a share. - :type properties: ~azure.storage.fileshare.models.SharePropertiesInternal - :param metadata: Dictionary of :code:``. - :type metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'snapshot': {'key': 'Snapshot', 'type': 'str'}, - 'deleted': {'key': 'Deleted', 'type': 'bool'}, - 'version': {'key': 'Version', 'type': 'str'}, - 'properties': {'key': 'Properties', 'type': 'SharePropertiesInternal'}, - 'metadata': {'key': 'Metadata', 'type': '{str}'}, - } - _xml_map = { - 'name': 'Share' - } - - def __init__( - self, - *, - name: str, - properties: "SharePropertiesInternal", - snapshot: Optional[str] = None, - deleted: Optional[bool] = None, - version: Optional[str] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs - ): - super(ShareItemInternal, self).__init__(**kwargs) - self.name = name - self.snapshot = snapshot - self.deleted = deleted - self.version = version - self.properties = properties - self.metadata = metadata - - -class SharePermission(msrest.serialization.Model): - """A permission (a security descriptor) at the share level. - - All required parameters must be populated in order to send to Azure. - - :param permission: Required. The permission in the Security Descriptor Definition Language - (SDDL). - :type permission: str - """ - - _validation = { - 'permission': {'required': True}, - } - - _attribute_map = { - 'permission': {'key': 'permission', 'type': 'str'}, - } - - def __init__( - self, - *, - permission: str, - **kwargs - ): - super(SharePermission, self).__init__(**kwargs) - self.permission = permission - - -class SharePropertiesInternal(msrest.serialization.Model): - """Properties of a share. - - All required parameters must be populated in order to send to Azure. - - :param last_modified: Required. - :type last_modified: ~datetime.datetime - :param etag: Required. - :type etag: str - :param quota: Required. - :type quota: int - :param provisioned_iops: - :type provisioned_iops: int - :param provisioned_ingress_m_bps: - :type provisioned_ingress_m_bps: int - :param provisioned_egress_m_bps: - :type provisioned_egress_m_bps: int - :param next_allowed_quota_downgrade_time: - :type next_allowed_quota_downgrade_time: ~datetime.datetime - :param deleted_time: - :type deleted_time: ~datetime.datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param access_tier: - :type access_tier: str - :param access_tier_change_time: - :type access_tier_change_time: ~datetime.datetime - :param access_tier_transition_state: - :type access_tier_transition_state: str - :param lease_status: The current lease status of the share. Possible values include: "locked", - "unlocked". - :type lease_status: str or ~azure.storage.fileshare.models.LeaseStatusType - :param lease_state: Lease state of the share. Possible values include: "available", "leased", - "expired", "breaking", "broken". - :type lease_state: str or ~azure.storage.fileshare.models.LeaseStateType - :param lease_duration: When a share is leased, specifies whether the lease is of infinite or - fixed duration. Possible values include: "infinite", "fixed". - :type lease_duration: str or ~azure.storage.fileshare.models.LeaseDurationType - :param enabled_protocols: - :type enabled_protocols: str - :param root_squash: Possible values include: "NoRootSquash", "RootSquash", "AllSquash". - :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - 'quota': {'required': True}, - } - - _attribute_map = { - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - 'quota': {'key': 'Quota', 'type': 'int'}, - 'provisioned_iops': {'key': 'ProvisionedIops', 'type': 'int'}, - 'provisioned_ingress_m_bps': {'key': 'ProvisionedIngressMBps', 'type': 'int'}, - 'provisioned_egress_m_bps': {'key': 'ProvisionedEgressMBps', 'type': 'int'}, - 'next_allowed_quota_downgrade_time': {'key': 'NextAllowedQuotaDowngradeTime', 'type': 'rfc-1123'}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, - 'access_tier': {'key': 'AccessTier', 'type': 'str'}, - 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'}, - 'access_tier_transition_state': {'key': 'AccessTierTransitionState', 'type': 'str'}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, - 'lease_state': {'key': 'LeaseState', 'type': 'str'}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, - 'enabled_protocols': {'key': 'EnabledProtocols', 'type': 'str'}, - 'root_squash': {'key': 'RootSquash', 'type': 'str'}, - } - - def __init__( - self, - *, - last_modified: datetime.datetime, - etag: str, - quota: int, - provisioned_iops: Optional[int] = None, - provisioned_ingress_m_bps: Optional[int] = None, - provisioned_egress_m_bps: Optional[int] = None, - next_allowed_quota_downgrade_time: Optional[datetime.datetime] = None, - deleted_time: Optional[datetime.datetime] = None, - remaining_retention_days: Optional[int] = None, - access_tier: Optional[str] = None, - access_tier_change_time: Optional[datetime.datetime] = None, - access_tier_transition_state: Optional[str] = None, - lease_status: Optional[Union[str, "LeaseStatusType"]] = None, - lease_state: Optional[Union[str, "LeaseStateType"]] = None, - lease_duration: Optional[Union[str, "LeaseDurationType"]] = None, - enabled_protocols: Optional[str] = None, - root_squash: Optional[Union[str, "ShareRootSquash"]] = None, - **kwargs - ): - super(SharePropertiesInternal, self).__init__(**kwargs) - self.last_modified = last_modified - self.etag = etag - self.quota = quota - self.provisioned_iops = provisioned_iops - self.provisioned_ingress_m_bps = provisioned_ingress_m_bps - self.provisioned_egress_m_bps = provisioned_egress_m_bps - self.next_allowed_quota_downgrade_time = next_allowed_quota_downgrade_time - self.deleted_time = deleted_time - self.remaining_retention_days = remaining_retention_days - self.access_tier = access_tier - self.access_tier_change_time = access_tier_change_time - self.access_tier_transition_state = access_tier_transition_state - self.lease_status = lease_status - self.lease_state = lease_state - self.lease_duration = lease_duration - self.enabled_protocols = enabled_protocols - self.root_squash = root_squash - - -class ShareProtocolSettings(msrest.serialization.Model): - """Protocol settings. - - :param smb: Settings for SMB protocol. - :type smb: ~azure.storage.fileshare.models.ShareSmbSettings - """ - - _attribute_map = { - 'smb': {'key': 'Smb', 'type': 'ShareSmbSettings'}, - } - _xml_map = { - 'name': 'ProtocolSettings' - } - - def __init__( - self, - *, - smb: Optional["ShareSmbSettings"] = None, - **kwargs - ): - super(ShareProtocolSettings, self).__init__(**kwargs) - self.smb = smb - - -class ShareSmbSettings(msrest.serialization.Model): - """Settings for SMB protocol. - - :param multichannel: Settings for SMB Multichannel. - :type multichannel: ~azure.storage.fileshare.models.SmbMultichannel - """ - - _attribute_map = { - 'multichannel': {'key': 'Multichannel', 'type': 'SmbMultichannel'}, - } - _xml_map = { - 'name': 'SMB' - } - - def __init__( - self, - *, - multichannel: Optional["SmbMultichannel"] = None, - **kwargs - ): - super(ShareSmbSettings, self).__init__(**kwargs) - self.multichannel = multichannel - - -class ShareStats(msrest.serialization.Model): - """Stats for the share. - - All required parameters must be populated in order to send to Azure. - - :param share_usage_bytes: Required. The approximate size of the data stored in bytes. Note that - this value may not include all recently created or recently resized files. - :type share_usage_bytes: int - """ - - _validation = { - 'share_usage_bytes': {'required': True}, - } - - _attribute_map = { - 'share_usage_bytes': {'key': 'ShareUsageBytes', 'type': 'int'}, - } - - def __init__( - self, - *, - share_usage_bytes: int, - **kwargs - ): - super(ShareStats, self).__init__(**kwargs) - self.share_usage_bytes = share_usage_bytes - - -class SignedIdentifier(msrest.serialization.Model): - """Signed identifier. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. A unique id. - :type id: str - :param access_policy: The access policy. - :type access_policy: ~azure.storage.fileshare.models.AccessPolicy - """ - - _validation = { - 'id': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'Id', 'type': 'str'}, - 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy'}, - } - - def __init__( - self, - *, - id: str, - access_policy: Optional["AccessPolicy"] = None, - **kwargs - ): - super(SignedIdentifier, self).__init__(**kwargs) - self.id = id - self.access_policy = access_policy - - -class SmbMultichannel(msrest.serialization.Model): - """Settings for SMB multichannel. - - :param enabled: If SMB multichannel is enabled. - :type enabled: bool - """ - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - } - _xml_map = { - 'name': 'Multichannel' - } - - def __init__( - self, - *, - enabled: Optional[bool] = None, - **kwargs - ): - super(SmbMultichannel, self).__init__(**kwargs) - self.enabled = enabled - - -class SourceModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param source_if_match_crc64: Specify the crc64 value to operate only on range with a matching - crc64 checksum. - :type source_if_match_crc64: bytearray - :param source_if_none_match_crc64: Specify the crc64 value to operate only on range without a - matching crc64 checksum. - :type source_if_none_match_crc64: bytearray - """ - - _attribute_map = { - 'source_if_match_crc64': {'key': 'sourceIfMatchCrc64', 'type': 'bytearray'}, - 'source_if_none_match_crc64': {'key': 'sourceIfNoneMatchCrc64', 'type': 'bytearray'}, - } - - def __init__( - self, - *, - source_if_match_crc64: Optional[bytearray] = None, - source_if_none_match_crc64: Optional[bytearray] = None, - **kwargs - ): - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_match_crc64 = source_if_match_crc64 - self.source_if_none_match_crc64 = source_if_none_match_crc64 - - -class StorageError(msrest.serialization.Model): - """StorageError. - - :param message: - :type message: str - """ - - _attribute_map = { - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__( - self, - *, - message: Optional[str] = None, - **kwargs - ): - super(StorageError, self).__init__(**kwargs) - self.message = message - - -class StorageServiceProperties(msrest.serialization.Model): - """Storage service properties. - - :param hour_metrics: A summary of request statistics grouped by API in hourly aggregates for - files. - :type hour_metrics: ~azure.storage.fileshare.models.Metrics - :param minute_metrics: A summary of request statistics grouped by API in minute aggregates for - files. - :type minute_metrics: ~azure.storage.fileshare.models.Metrics - :param cors: The set of CORS rules. - :type cors: list[~azure.storage.fileshare.models.CorsRule] - :param protocol: Protocol settings. - :type protocol: ~azure.storage.fileshare.models.ShareProtocolSettings - """ - - _attribute_map = { - 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics'}, - 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics'}, - 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'wrapped': True}}, - 'protocol': {'key': 'Protocol', 'type': 'ShareProtocolSettings'}, - } - - def __init__( - self, - *, - hour_metrics: Optional["Metrics"] = None, - minute_metrics: Optional["Metrics"] = None, - cors: Optional[List["CorsRule"]] = None, - protocol: Optional["ShareProtocolSettings"] = None, - **kwargs - ): - super(StorageServiceProperties, self).__init__(**kwargs) - self.hour_metrics = hour_metrics - self.minute_metrics = minute_metrics - self.cors = cors - self.protocol = protocol diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/__init__.py deleted file mode 100644 index ba8fb22..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._share_operations import ShareOperations -from ._directory_operations import DirectoryOperations -from ._file_operations import FileOperations - -__all__ = [ - 'ServiceOperations', - 'ShareOperations', - 'DirectoryOperations', - 'FileOperations', -] diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/_directory_operations.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/_directory_operations.py deleted file mode 100644 index 8b241b5..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/_directory_operations.py +++ /dev/null @@ -1,762 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class DirectoryOperations(object): - """DirectoryOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.fileshare.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - file_permission="inherit", # type: Optional[str] - file_permission_key=None, # type: Optional[str] - file_attributes="none", # type: str - file_creation_time="now", # type: str - file_last_write_time="now", # type: str - **kwargs # type: Any - ): - # type: (...) -> None - """Creates a new directory under the specified share or parent directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else - x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_attributes: If specified, the provided file attributes shall be set. Default value: - ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. Default value: Now. - :type file_last_write_time: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - def get_properties( - self, - sharesnapshot=None, # type: Optional[str] - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Returns all system properties for the specified directory, and can also be used to check the - existence of a directory. The data returned does not include the files in the directory or any - subdirectories. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - def delete( - self, - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Removes the specified empty directory. Note that the directory must be empty before it can be - deleted. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - def set_properties( - self, - timeout=None, # type: Optional[int] - file_permission="inherit", # type: Optional[str] - file_permission_key=None, # type: Optional[str] - file_attributes="none", # type: str - file_creation_time="now", # type: str - file_last_write_time="now", # type: str - **kwargs # type: Any - ): - # type: (...) -> None - """Sets properties on the directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else - x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_attributes: If specified, the provided file attributes shall be set. Default value: - ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. Default value: Now. - :type file_last_write_time: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - def set_metadata( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Updates user defined metadata for the specified directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - def list_files_and_directories_segment( - self, - prefix=None, # type: Optional[str] - sharesnapshot=None, # type: Optional[str] - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - timeout=None, # type: Optional[int] - include=None, # type: Optional[List[Union[str, "_models.ListFilesIncludeType"]]] - include_extended_info=None, # type: Optional[bool] - **kwargs # type: Any - ): - # type: (...) -> "_models.ListFilesAndDirectoriesSegmentResponse" - """Returns a list of files or directories under the specified share or directory. It lists the - contents only for a single level of the directory hierarchy. - - :param prefix: Filters the results to return only entries whose name begins with the specified - prefix. - :type prefix: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. If the request does not - specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 - items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.fileshare.models.ListFilesIncludeType] - :param include_extended_info: - :type include_extended_info: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListFilesAndDirectoriesSegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListFilesAndDirectoriesSegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListFilesAndDirectoriesSegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_files_and_directories_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if include_extended_info is not None: - header_parameters['x-ms-file-extended-info'] = self._serialize.header("include_extended_info", include_extended_info, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListFilesAndDirectoriesSegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_files_and_directories_segment.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - def list_handles( - self, - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - timeout=None, # type: Optional[int] - sharesnapshot=None, # type: Optional[str] - recursive=None, # type: Optional[bool] - **kwargs # type: Any - ): - # type: (...) -> "_models.ListHandlesResponse" - """Lists handles for directory. - - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. If the request does not - specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 - items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param recursive: Specifies operation should apply to the directory specified in the URI, its - files, its subdirectories and their files. - :type recursive: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListHandlesResponse, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListHandlesResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListHandlesResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "listhandles" - accept = "application/xml" - - # Construct URL - url = self.list_handles.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if recursive is not None: - header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListHandlesResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_handles.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - def force_close_handles( - self, - handle_id, # type: str - timeout=None, # type: Optional[int] - marker=None, # type: Optional[str] - sharesnapshot=None, # type: Optional[str] - recursive=None, # type: Optional[bool] - **kwargs # type: Any - ): - # type: (...) -> None - """Closes all handles open for given directory. - - :param handle_id: Specifies handle ID opened on the file or directory to be closed. Asterisk - (‘*’) is a wildcard that specifies all handles. - :type handle_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param recursive: Specifies operation should apply to the directory specified in the URI, its - files, its subdirectories and their files. - :type recursive: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "forceclosehandles" - accept = "application/xml" - - # Construct URL - url = self.force_close_handles.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') - if recursive is not None: - header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-marker']=self._deserialize('str', response.headers.get('x-ms-marker')) - response_headers['x-ms-number-of-handles-closed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')) - response_headers['x-ms-number-of-handles-failed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')) - - if cls: - return cls(pipeline_response, None, response_headers) - - force_close_handles.metadata = {'url': '/{shareName}/{directory}'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/_file_operations.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/_file_operations.py deleted file mode 100644 index 577c303..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/_file_operations.py +++ /dev/null @@ -1,1797 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class FileOperations(object): - """FileOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.fileshare.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - file_content_length, # type: int - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - file_permission="inherit", # type: Optional[str] - file_permission_key=None, # type: Optional[str] - file_attributes="none", # type: str - file_creation_time="now", # type: str - file_last_write_time="now", # type: str - file_http_headers=None, # type: Optional["_models.FileHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Creates a new file or replaces a file. Note it only initializes the file with no content. - - :param file_content_length: Specifies the maximum size for the file, up to 4 TB. - :type file_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else - x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_attributes: If specified, the provided file attributes shall be set. Default value: - ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. Default value: Now. - :type file_last_write_time: str - :param file_http_headers: Parameter group. - :type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _file_content_type = None - _file_content_encoding = None - _file_content_language = None - _file_cache_control = None - _file_content_md5 = None - _file_content_disposition = None - _lease_id = None - if file_http_headers is not None: - _file_content_type = file_http_headers.file_content_type - _file_content_encoding = file_http_headers.file_content_encoding - _file_content_language = file_http_headers.file_content_language - _file_cache_control = file_http_headers.file_cache_control - _file_content_md5 = file_http_headers.file_content_md5 - _file_content_disposition = file_http_headers.file_content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - file_type_constant = "file" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') - header_parameters['x-ms-type'] = self._serialize.header("file_type_constant", file_type_constant, 'str') - if _file_content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", _file_content_type, 'str') - if _file_content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", _file_content_encoding, 'str') - if _file_content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", _file_content_language, 'str') - if _file_cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", _file_cache_control, 'str') - if _file_content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", _file_content_md5, 'bytearray') - if _file_content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", _file_content_disposition, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def download( - self, - timeout=None, # type: Optional[int] - range=None, # type: Optional[str] - range_get_content_md5=None, # type: Optional[bool] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> IO - """Reads or downloads a file from the system, including its metadata and properties. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param range: Return file data only from the specified byte range. - :type range: str - :param range_get_content_md5: When this header is set to true and specified together with the - Range header, the service returns the MD5 hash for the range, as long as the range is less than - or equal to 4 MB in size. - :type range_get_content_md5: bool - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - accept = "application/xml" - - # Construct URL - url = self.download.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-content-md5')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-content-md5')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - download.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def get_properties( - self, - sharesnapshot=None, # type: Optional[str] - timeout=None, # type: Optional[int] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Returns all user-defined metadata, standard HTTP properties, and system properties for the - file. It does not return the content of the file. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-type']=self._deserialize('str', response.headers.get('x-ms-type')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def delete( - self, - timeout=None, # type: Optional[int] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """removes the file from the storage account. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def set_http_headers( - self, - timeout=None, # type: Optional[int] - file_content_length=None, # type: Optional[int] - file_permission="inherit", # type: Optional[str] - file_permission_key=None, # type: Optional[str] - file_attributes="none", # type: str - file_creation_time="now", # type: str - file_last_write_time="now", # type: str - file_http_headers=None, # type: Optional["_models.FileHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Sets HTTP headers on the file. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param file_content_length: Resizes a file to the specified size. If the specified byte value - is less than the current size of the file, then all ranges above the specified byte value are - cleared. - :type file_content_length: long - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else - x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_attributes: If specified, the provided file attributes shall be set. Default value: - ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. Default value: Now. - :type file_last_write_time: str - :param file_http_headers: Parameter group. - :type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _file_content_type = None - _file_content_encoding = None - _file_content_language = None - _file_cache_control = None - _file_content_md5 = None - _file_content_disposition = None - _lease_id = None - if file_http_headers is not None: - _file_content_type = file_http_headers.file_content_type - _file_content_encoding = file_http_headers.file_content_encoding - _file_content_language = file_http_headers.file_content_language - _file_cache_control = file_http_headers.file_cache_control - _file_content_md5 = file_http_headers.file_content_md5 - _file_content_disposition = file_http_headers.file_content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.set_http_headers.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_content_length is not None: - header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') - if _file_content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", _file_content_type, 'str') - if _file_content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", _file_content_encoding, 'str') - if _file_content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", _file_content_language, 'str') - if _file_cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", _file_cache_control, 'str') - if _file_content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", _file_content_md5, 'bytearray') - if _file_content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", _file_content_disposition, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_http_headers.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def set_metadata( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Updates user-defined metadata for the specified file. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def acquire_lease( - self, - timeout=None, # type: Optional[int] - duration=None, # type: Optional[int] - proposed_lease_id=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease File operation establishes and manages a lock on a file for write and delete - operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a - lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease - duration cannot be changed using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "acquire" - accept = "application/xml" - - # Construct URL - url = self.acquire_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - acquire_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def release_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease File operation establishes and manages a lock on a file for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "release" - accept = "application/xml" - - # Construct URL - url = self.release_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - release_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def change_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - proposed_lease_id=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease File operation establishes and manages a lock on a file for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "change" - accept = "application/xml" - - # Construct URL - url = self.change_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - change_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def break_lease( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease File operation establishes and manages a lock on a file for write and delete - operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "lease" - action = "break" - accept = "application/xml" - - # Construct URL - url = self.break_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - break_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def upload_range( - self, - range, # type: str - content_length, # type: int - timeout=None, # type: Optional[int] - file_range_write="update", # type: Union[str, "_models.FileRangeWriteType"] - content_md5=None, # type: Optional[bytearray] - optionalbody=None, # type: Optional[IO] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Upload a range of bytes to a file. - - :param range: Specifies the range of bytes to be written. Both the start and end of the range - must be specified. For an update operation, the range can be up to 4 MB in size. For a clear - operation, the range can be up to the value of the file's full size. The File service accepts - only a single byte range for the Range and 'x-ms-range' headers, and the byte range must be - specified in the following format: bytes=startByte-endByte. - :type range: str - :param content_length: Specifies the number of bytes being transmitted in the request body. - When the x-ms-write header is set to clear, the value of this header must be set to zero. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param file_range_write: Specify one of the following options: - Update: Writes the bytes - specified by the request body into the specified range. The Range and Content-Length headers - must match to perform the update. - Clear: Clears the specified range and releases the space - used in storage for that range. To clear a range, set the Content-Length header to zero, and - set the Range header to a value that indicates the range to clear, up to maximum file size. - :type file_range_write: str or ~azure.storage.fileshare.models.FileRangeWriteType - :param content_md5: An MD5 hash of the content. This hash is used to verify the integrity of - the data during transport. When the Content-MD5 header is specified, the File service compares - the hash of the content that has arrived with the header value that was sent. If the two hashes - do not match, the operation will fail with error code 400 (Bad Request). - :type content_md5: bytearray - :param optionalbody: Initial data. - :type optionalbody: IO - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "range" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.upload_range.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-write'] = self._serialize.header("file_range_write", file_range_write, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("content_md5", content_md5, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = optionalbody - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload_range.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def upload_range_from_url( - self, - range, # type: str - copy_source, # type: str - content_length, # type: int - timeout=None, # type: Optional[int] - source_range=None, # type: Optional[str] - source_content_crc64=None, # type: Optional[bytearray] - copy_source_authorization=None, # type: Optional[str] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Upload a range of bytes to a file where the contents are read from a URL. - - :param range: Writes data to the specified byte range in the file. - :type range: str - :param copy_source: Specifies the URL of the source file or blob, up to 2 KB in length. To copy - a file to another file within the same storage account, you may use Shared Key to authenticate - the source file. If you are copying a file from another storage account, or if you are copying - a blob from the same storage account or another storage account, then you must authenticate the - source file or blob using a shared access signature. If the source is a public blob, no - authentication is required to perform the copy operation. A file in a share snapshot can also - be specified as a copy source. - :type copy_source: str - :param content_length: Specifies the number of bytes being transmitted in the request body. - When the x-ms-write header is set to clear, the value of this header must be set to zero. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_crc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_content_crc64: bytearray - :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid - OAuth access token to copy source. - :type copy_source_authorization: str - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.fileshare.models.SourceModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _source_if_match_crc64 = None - _source_if_none_match_crc64 = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if source_modified_access_conditions is not None: - _source_if_match_crc64 = source_modified_access_conditions.source_if_match_crc64 - _source_if_none_match_crc64 = source_modified_access_conditions.source_if_none_match_crc64 - comp = "range" - accept = "application/xml" - - # Construct URL - url = self.upload_range_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - header_parameters['x-ms-write'] = self._serialize.header("self._config.file_range_write_from_url", self._config.file_range_write_from_url, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if source_content_crc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_content_crc64", source_content_crc64, 'bytearray') - if _source_if_match_crc64 is not None: - header_parameters['x-ms-source-if-match-crc64'] = self._serialize.header("source_if_match_crc64", _source_if_match_crc64, 'bytearray') - if _source_if_none_match_crc64 is not None: - header_parameters['x-ms-source-if-none-match-crc64'] = self._serialize.header("source_if_none_match_crc64", _source_if_none_match_crc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if copy_source_authorization is not None: - header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload_range_from_url.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def get_range_list( - self, - sharesnapshot=None, # type: Optional[str] - prevsharesnapshot=None, # type: Optional[str] - timeout=None, # type: Optional[int] - range=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> "_models.ShareFileRangeList" - """Returns the list of valid ranges for a file. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param prevsharesnapshot: The previous snapshot parameter is an opaque DateTime value that, - when present, specifies the previous snapshot. - :type prevsharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param range: Specifies the range of bytes over which to list ranges, inclusively. - :type range: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ShareFileRangeList, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ShareFileRangeList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ShareFileRangeList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "rangelist" - accept = "application/xml" - - # Construct URL - url = self.get_range_list.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if prevsharesnapshot is not None: - query_parameters['prevsharesnapshot'] = self._serialize.query("prevsharesnapshot", prevsharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-content-length']=self._deserialize('long', response.headers.get('x-ms-content-length')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ShareFileRangeList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_range_list.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def start_copy( - self, - copy_source, # type: str - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - file_permission="inherit", # type: Optional[str] - file_permission_key=None, # type: Optional[str] - copy_file_smb_info=None, # type: Optional["_models.CopyFileSmbInfo"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Copies a blob or file to a destination file within the storage account. - - :param copy_source: Specifies the URL of the source file or blob, up to 2 KB in length. To copy - a file to another file within the same storage account, you may use Shared Key to authenticate - the source file. If you are copying a file from another storage account, or if you are copying - a blob from the same storage account or another storage account, then you must authenticate the - source file or blob using a shared access signature. If the source is a public blob, no - authentication is required to perform the copy operation. A file in a share snapshot can also - be specified as a copy source. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else - x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param copy_file_smb_info: Parameter group. - :type copy_file_smb_info: ~azure.storage.fileshare.models.CopyFileSmbInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _file_permission_copy_mode = None - _ignore_read_only = None - _file_attributes = None - _file_creation_time = None - _file_last_write_time = None - _set_archive_attribute = None - _lease_id = None - if copy_file_smb_info is not None: - _file_permission_copy_mode = copy_file_smb_info.file_permission_copy_mode - _ignore_read_only = copy_file_smb_info.ignore_read_only - _file_attributes = copy_file_smb_info.file_attributes - _file_creation_time = copy_file_smb_info.file_creation_time - _file_last_write_time = copy_file_smb_info.file_last_write_time - _set_archive_attribute = copy_file_smb_info.set_archive_attribute - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - accept = "application/xml" - - # Construct URL - url = self.start_copy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - if _file_permission_copy_mode is not None: - header_parameters['x-ms-file-permission-copy-mode'] = self._serialize.header("file_permission_copy_mode", _file_permission_copy_mode, 'str') - if _ignore_read_only is not None: - header_parameters['x-ms-file-copy-ignore-read-only'] = self._serialize.header("ignore_read_only", _ignore_read_only, 'bool') - if _file_attributes is not None: - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", _file_attributes, 'str') - if _file_creation_time is not None: - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", _file_creation_time, 'str') - if _file_last_write_time is not None: - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", _file_last_write_time, 'str') - if _set_archive_attribute is not None: - header_parameters['x-ms-file-copy-set-archive'] = self._serialize.header("set_archive_attribute", _set_archive_attribute, 'bool') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - start_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def abort_copy( - self, - copy_id, # type: str - timeout=None, # type: Optional[int] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Aborts a pending Copy File operation, and leaves a destination file with zero length and full - metadata. - - :param copy_id: The copy identifier provided in the x-ms-copy-id header of the original Copy - File operation. - :type copy_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "copy" - copy_action_abort_constant = "abort" - accept = "application/xml" - - # Construct URL - url = self.abort_copy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-copy-action'] = self._serialize.header("copy_action_abort_constant", copy_action_abort_constant, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - abort_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def list_handles( - self, - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - timeout=None, # type: Optional[int] - sharesnapshot=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.ListHandlesResponse" - """Lists handles for file. - - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. If the request does not - specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 - items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListHandlesResponse, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListHandlesResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListHandlesResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "listhandles" - accept = "application/xml" - - # Construct URL - url = self.list_handles.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListHandlesResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def force_close_handles( - self, - handle_id, # type: str - timeout=None, # type: Optional[int] - marker=None, # type: Optional[str] - sharesnapshot=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Closes all handles open for given file. - - :param handle_id: Specifies handle ID opened on the file or directory to be closed. Asterisk - (‘*’) is a wildcard that specifies all handles. - :type handle_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "forceclosehandles" - accept = "application/xml" - - # Construct URL - url = self.force_close_handles.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-marker']=self._deserialize('str', response.headers.get('x-ms-marker')) - response_headers['x-ms-number-of-handles-closed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')) - response_headers['x-ms-number-of-handles-failed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')) - - if cls: - return cls(pipeline_response, None, response_headers) - - force_close_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/_service_operations.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/_service_operations.py deleted file mode 100644 index aaf67d3..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/_service_operations.py +++ /dev/null @@ -1,276 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class ServiceOperations(object): - """ServiceOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.fileshare.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def set_properties( - self, - storage_service_properties, # type: "_models.StorageServiceProperties" - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Sets properties for a storage account's File service endpoint, including properties for Storage - Analytics metrics and CORS (Cross-Origin Resource Sharing) rules. - - :param storage_service_properties: The StorageService properties. - :type storage_service_properties: ~azure.storage.fileshare.models.StorageServiceProperties - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "properties" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/'} # type: ignore - - def get_properties( - self, - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.StorageServiceProperties" - """Gets the properties of a storage account's File service, including properties for Storage - Analytics metrics and CORS (Cross-Origin Resource Sharing) rules. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: StorageServiceProperties, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.StorageServiceProperties - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceProperties"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('StorageServiceProperties', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_properties.metadata = {'url': '/'} # type: ignore - - def list_shares_segment( - self, - prefix=None, # type: Optional[str] - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - include=None, # type: Optional[List[Union[str, "_models.ListSharesIncludeType"]]] - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ListSharesResponse" - """The List Shares Segment operation returns a list of the shares and share snapshots under the - specified account. - - :param prefix: Filters the results to return only entries whose name begins with the specified - prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. If the request does not - specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 - items. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.fileshare.models.ListSharesIncludeType] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListSharesResponse, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListSharesResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListSharesResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_shares_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('ListSharesResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_shares_segment.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/_share_operations.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/_share_operations.py deleted file mode 100644 index 02a94bf..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/_share_operations.py +++ /dev/null @@ -1,1506 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class ShareOperations(object): - """ShareOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.fileshare.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - quota=None, # type: Optional[int] - access_tier=None, # type: Optional[Union[str, "_models.ShareAccessTier"]] - enabled_protocols=None, # type: Optional[str] - root_squash=None, # type: Optional[Union[str, "_models.ShareRootSquash"]] - **kwargs # type: Any - ): - # type: (...) -> None - """Creates a new share under the specified account. If the share with the same name already - exists, the operation fails. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param quota: Specifies the maximum size of the share, in gigabytes. - :type quota: int - :param access_tier: Specifies the access tier of the share. - :type access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier - :param enabled_protocols: Protocols to enable on the share. - :type enabled_protocols: str - :param root_squash: Root squash to set on the share. Only valid for NFS shares. - :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if quota is not None: - header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) - if access_tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("access_tier", access_tier, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if enabled_protocols is not None: - header_parameters['x-ms-enabled-protocols'] = self._serialize.header("enabled_protocols", enabled_protocols, 'str') - if root_squash is not None: - header_parameters['x-ms-root-squash'] = self._serialize.header("root_squash", root_squash, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{shareName}'} # type: ignore - - def get_properties( - self, - sharesnapshot=None, # type: Optional[str] - timeout=None, # type: Optional[int] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Returns all user-defined metadata and system properties for the specified share or share - snapshot. The data returned does not include the share's list of files. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-share-quota']=self._deserialize('int', response.headers.get('x-ms-share-quota')) - response_headers['x-ms-share-provisioned-iops']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-iops')) - response_headers['x-ms-share-provisioned-ingress-mbps']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-ingress-mbps')) - response_headers['x-ms-share-provisioned-egress-mbps']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-egress-mbps')) - response_headers['x-ms-share-next-allowed-quota-downgrade-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-share-next-allowed-quota-downgrade-time')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-access-tier']=self._deserialize('str', response.headers.get('x-ms-access-tier')) - response_headers['x-ms-access-tier-change-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')) - response_headers['x-ms-access-tier-transition-state']=self._deserialize('str', response.headers.get('x-ms-access-tier-transition-state')) - response_headers['x-ms-enabled-protocols']=self._deserialize('str', response.headers.get('x-ms-enabled-protocols')) - response_headers['x-ms-root-squash']=self._deserialize('str', response.headers.get('x-ms-root-squash')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{shareName}'} # type: ignore - - def delete( - self, - sharesnapshot=None, # type: Optional[str] - timeout=None, # type: Optional[int] - delete_snapshots=None, # type: Optional[Union[str, "_models.DeleteSnapshotsOptionType"]] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Operation marks the specified share or share snapshot for deletion. The share or share snapshot - and any files contained within it are later deleted during garbage collection. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param delete_snapshots: Specifies the option include to delete the base share and all of its - snapshots. - :type delete_snapshots: str or ~azure.storage.fileshare.models.DeleteSnapshotsOptionType - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{shareName}'} # type: ignore - - def acquire_lease( - self, - timeout=None, # type: Optional[int] - duration=None, # type: Optional[int] - proposed_lease_id=None, # type: Optional[str] - sharesnapshot=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a - lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease - duration cannot be changed using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "acquire" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.acquire_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - acquire_lease.metadata = {'url': '/{shareName}'} # type: ignore - - def release_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - sharesnapshot=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "release" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.release_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - release_lease.metadata = {'url': '/{shareName}'} # type: ignore - - def change_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - proposed_lease_id=None, # type: Optional[str] - sharesnapshot=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "change" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.change_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - change_lease.metadata = {'url': '/{shareName}'} # type: ignore - - def renew_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - sharesnapshot=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "renew" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.renew_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - renew_lease.metadata = {'url': '/{shareName}'} # type: ignore - - def break_lease( - self, - timeout=None, # type: Optional[int] - break_period=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - sharesnapshot=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param break_period: For a break operation, proposed duration the lease should continue before - it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining on the lease is used. A new - lease will not be available before the break period has expired, but the lease may be held for - longer than the break period. If this header does not appear with a break operation, a - fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease - breaks immediately. - :type break_period: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "lease" - action = "break" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.break_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - break_lease.metadata = {'url': '/{shareName}'} # type: ignore - - def create_snapshot( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Creates a read-only snapshot of a share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - comp = "snapshot" - accept = "application/xml" - - # Construct URL - url = self.create_snapshot.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-snapshot']=self._deserialize('str', response.headers.get('x-ms-snapshot')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create_snapshot.metadata = {'url': '/{shareName}'} # type: ignore - - def create_permission( - self, - share_permission, # type: "_models.SharePermission" - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Create a permission (a security descriptor). - - :param share_permission: A permission (a security descriptor) at the share level. - :type share_permission: ~azure.storage.fileshare.models.SharePermission - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - comp = "filepermission" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/xml" - - # Construct URL - url = self.create_permission.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(share_permission, 'SharePermission') - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create_permission.metadata = {'url': '/{shareName}'} # type: ignore - - def get_permission( - self, - file_permission_key, # type: str - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.SharePermission" - """Returns the permission (security descriptor) for a given key. - - :param file_permission_key: Key of the permission to be set for the directory/file. - :type file_permission_key: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SharePermission, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.SharePermission - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.SharePermission"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - comp = "filepermission" - accept = "application/json" - - # Construct URL - url = self.get_permission.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('SharePermission', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_permission.metadata = {'url': '/{shareName}'} # type: ignore - - def set_properties( - self, - timeout=None, # type: Optional[int] - quota=None, # type: Optional[int] - access_tier=None, # type: Optional[Union[str, "_models.ShareAccessTier"]] - root_squash=None, # type: Optional[Union[str, "_models.ShareRootSquash"]] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Sets properties for the specified share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param quota: Specifies the maximum size of the share, in gigabytes. - :type quota: int - :param access_tier: Specifies the access tier of the share. - :type access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier - :param root_squash: Root squash to set on the share. Only valid for NFS shares. - :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if quota is not None: - header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) - if access_tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("access_tier", access_tier, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if root_squash is not None: - header_parameters['x-ms-root-squash'] = self._serialize.header("root_squash", root_squash, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/{shareName}'} # type: ignore - - def set_metadata( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Sets one or more user-defined name-value pairs for the specified share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{shareName}'} # type: ignore - - def get_access_policy( - self, - timeout=None, # type: Optional[int] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> List["_models.SignedIdentifier"] - """Returns information about stored access policies specified on the share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of SignedIdentifier, or the result of cls(response) - :rtype: list[~azure.storage.fileshare.models.SignedIdentifier] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.SignedIdentifier"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "acl" - accept = "application/xml" - - # Construct URL - url = self.get_access_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('[SignedIdentifier]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_access_policy.metadata = {'url': '/{shareName}'} # type: ignore - - def set_access_policy( - self, - timeout=None, # type: Optional[int] - share_acl=None, # type: Optional[List["_models.SignedIdentifier"]] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Sets a stored access policy for use with shared access signatures. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param share_acl: The ACL for the share. - :type share_acl: list[~azure.storage.fileshare.models.SignedIdentifier] - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "acl" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_access_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'wrapped': True}} - if share_acl is not None: - body_content = self._serialize.body(share_acl, '[SignedIdentifier]', is_xml=True, serialization_ctxt=serialization_ctxt) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_policy.metadata = {'url': '/{shareName}'} # type: ignore - - def get_statistics( - self, - timeout=None, # type: Optional[int] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> "_models.ShareStats" - """Retrieves statistics related to the share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ShareStats, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ShareStats - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ShareStats"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "stats" - accept = "application/xml" - - # Construct URL - url = self.get_statistics.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ShareStats', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_statistics.metadata = {'url': '/{shareName}'} # type: ignore - - def restore( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - deleted_share_name=None, # type: Optional[str] - deleted_share_version=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Restores a previously deleted Share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param deleted_share_name: Specifies the name of the preivously-deleted share. - :type deleted_share_name: str - :param deleted_share_version: Specifies the version of the preivously-deleted share. - :type deleted_share_version: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - comp = "undelete" - accept = "application/xml" - - # Construct URL - url = self.restore.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if deleted_share_name is not None: - header_parameters['x-ms-deleted-share-name'] = self._serialize.header("deleted_share_name", deleted_share_name, 'str') - if deleted_share_version is not None: - header_parameters['x-ms-deleted-share-version'] = self._serialize.header("deleted_share_version", deleted_share_version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - restore.metadata = {'url': '/{shareName}'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_lease.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_lease.py deleted file mode 100644 index 7c38145..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_lease.py +++ /dev/null @@ -1,237 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import uuid - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, TypeVar, TYPE_CHECKING -) - -from azure.core.tracing.decorator import distributed_trace -from azure.core.exceptions import HttpResponseError - -from ._shared.response_handlers import return_response_headers, process_storage_error -from ._generated.operations import FileOperations, ShareOperations - -if TYPE_CHECKING: - from datetime import datetime - ShareFileClient = TypeVar("ShareFileClient") - ShareClient = TypeVar("ShareClient") - - -class ShareLeaseClient(object): - """Creates a new ShareLeaseClient. - - This client provides lease operations on a ShareClient or ShareFileClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the file or share to lease. - :type client: ~azure.storage.fileshare.ShareFileClient or - ~azure.storage.fileshare.ShareClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - def __init__( - self, client, lease_id=None - ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs - # type: (Union[ShareFileClient, ShareClient], Optional[str]) -> None - self.id = lease_id or str(uuid.uuid4()) - self.last_modified = None - self.etag = None - if hasattr(client, 'file_name'): - self._client = client._client.file # type: ignore # pylint: disable=protected-access - self._snapshot = None - elif hasattr(client, 'share_name'): - self._client = client._client.share - self._snapshot = client.snapshot - else: - raise TypeError("Lease must use ShareFileClient or ShareClient.") - - def __enter__(self): - return self - - def __exit__(self, *args): - self.release() - - @distributed_trace - def acquire(self, **kwargs): - # type: (**Any) -> None - """Requests a new lease. This operation establishes and manages a lock on a - file or share for write and delete operations. If the file or share does not have an active lease, - the File or Share service creates a lease on the file or share. If the file has an active lease, - you can only request a new lease using the active lease ID. - - - If the file or share does not have an active lease, the File or Share service creates a - lease on the file and returns a new lease ID. - - :keyword int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. File leases never expire. A non-infinite share lease can be - between 15 and 60 seconds. A share lease duration cannot be changed - using renew or change. Default is -1 (infinite share lease). - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - try: - lease_duration = kwargs.pop('lease_duration', -1) - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - response = self._client.acquire_lease( - timeout=kwargs.pop('timeout', None), - duration=lease_duration, - proposed_lease_id=self.id, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - self.etag = response.get('etag') # type: str - - @distributed_trace - def renew(self, **kwargs): - # type: (Any) -> None - """Renews the share lease. - - The share lease can be renewed if the lease ID specified in the - lease client matches that associated with the share. Note that - the lease may be renewed even if it has expired as long as the share - has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - .. versionadded:: 12.6.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - if isinstance(self._client, FileOperations): - raise TypeError("Lease renewal operations are only valid for ShareClient.") - try: - response = self._client.renew_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - sharesnapshot=self._snapshot, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def release(self, **kwargs): - # type: (Any) -> None - """Releases the lease. The lease may be released if the lease ID specified on the request matches - that associated with the share or file. Releasing the lease allows another client to immediately acquire - the lease for the share or file as soon as the release is complete. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - try: - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - response = self._client.release_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """ Changes the lease ID of an active lease. A change must include the current lease ID in x-ms-lease-id and - a new lease ID in x-ms-proposed-lease-id. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The File or Share service will raise an error - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - try: - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - response = self._client.change_lease( - lease_id=self.id, - proposed_lease_id=proposed_lease_id, - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def break_lease(self, **kwargs): - # type: (Any) -> int - """Force breaks the lease if the file or share has an active lease. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. An infinite lease breaks immediately. - - Once a lease is broken, it cannot be changed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :keyword int lease_break_period: - This is the proposed duration of seconds that the share lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the share lease. If longer, the time remaining on the share lease is used. - A new share lease will not be available before the break period has - expired, but the share lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration share lease breaks after the remaining share lease - period elapses, and an infinite share lease breaks immediately. - - .. versionadded:: 12.6.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - try: - lease_break_period = kwargs.pop('lease_break_period', None) - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - if isinstance(self._client, ShareOperations): - kwargs['break_period'] = lease_break_period - if isinstance(self._client, FileOperations) and lease_break_period: - raise TypeError("Setting a lease break period is only applicable to Share leases.") - - response = self._client.break_lease( - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_models.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_models.py deleted file mode 100644 index 0f7a2fa..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_models.py +++ /dev/null @@ -1,1011 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines - -from enum import Enum - -from azure.core.paging import PageIterator -from azure.core.exceptions import HttpResponseError -from ._parser import _parse_datetime_from_str -from ._shared.response_handlers import return_context_and_deserialized, process_storage_error -from ._shared.models import DictMixin, get_enum_value -from ._generated.models import Metrics as GeneratedMetrics -from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy -from ._generated.models import CorsRule as GeneratedCorsRule -from ._generated.models import ShareProtocolSettings as GeneratedShareProtocolSettings -from ._generated.models import ShareSmbSettings as GeneratedShareSmbSettings -from ._generated.models import SmbMultichannel as GeneratedSmbMultichannel -from ._generated.models import AccessPolicy as GenAccessPolicy -from ._generated.models import DirectoryItem - - -def _wrap_item(item): - if isinstance(item, DirectoryItem): - return {'name': item.name, 'is_directory': True} - return {'name': item.name, 'size': item.properties.content_length, 'is_directory': False} - - -class Metrics(GeneratedMetrics): - """A summary of request statistics grouped by API in hour or minute aggregates - for files. - - All required parameters must be populated in order to send to Azure. - - :keyword str version: The version of Storage Analytics to configure. - :keyword bool enabled: Required. Indicates whether metrics are enabled for the - File service. - :keyword bool include_ap_is: Indicates whether metrics should generate summary - statistics for called API operations. - :keyword ~azure.storage.fileshare.RetentionPolicy retention_policy: Determines how long the associated data should - persist. - """ - - def __init__(self, **kwargs): - self.version = kwargs.get('version', u'1.0') - self.enabled = kwargs.get('enabled', False) - self.include_apis = kwargs.get('include_apis') - self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - version=generated.version, - enabled=generated.enabled, - include_apis=generated.include_apis, - retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access - ) - - -class RetentionPolicy(GeneratedRetentionPolicy): - """The retention policy which determines how long the associated data should - persist. - - All required parameters must be populated in order to send to Azure. - - :param bool enabled: Required. Indicates whether a retention policy is enabled - for the storage service. - :param int days: Indicates the number of days that metrics or logging or - soft-deleted data should be retained. All data older than this value will - be deleted. - """ - - def __init__(self, enabled=False, days=None): - self.enabled = enabled - self.days = days - if self.enabled and (self.days is None): - raise ValueError("If policy is enabled, 'days' must be specified.") - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - enabled=generated.enabled, - days=generated.days, - ) - - -class CorsRule(GeneratedCorsRule): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param list(str) allowed_origins: - A list of origin domains that will be allowed via CORS, or "*" to allow - all domains. The list of must contain at least one entry. Limited to 64 - origin domains. Each allowed origin can have up to 256 characters. - :param list(str) allowed_methods: - A list of HTTP methods that are allowed to be executed by the origin. - The list of must contain at least one entry. For Azure Storage, - permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. - :keyword list(str) allowed_headers: - Defaults to an empty list. A list of headers allowed to be part of - the cross-origin request. Limited to 64 defined headers and 2 prefixed - headers. Each header can be up to 256 characters. - :keyword list(str) exposed_headers: - Defaults to an empty list. A list of response headers to expose to CORS - clients. Limited to 64 defined headers and two prefixed headers. Each - header can be up to 256 characters. - :keyword int max_age_in_seconds: - The number of seconds that the client/browser should cache a - preflight response. - """ - - def __init__(self, allowed_origins, allowed_methods, **kwargs): - self.allowed_origins = ','.join(allowed_origins) - self.allowed_methods = ','.join(allowed_methods) - self.allowed_headers = ','.join(kwargs.get('allowed_headers', [])) - self.exposed_headers = ','.join(kwargs.get('exposed_headers', [])) - self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0) - - @classmethod - def _from_generated(cls, generated): - return cls( - [generated.allowed_origins], - [generated.allowed_methods], - allowed_headers=[generated.allowed_headers], - exposed_headers=[generated.exposed_headers], - max_age_in_seconds=generated.max_age_in_seconds, - ) - - -class ShareSmbSettings(GeneratedShareSmbSettings): - """ Settings for the SMB protocol. - - :keyword SmbMultichannel multichannel: Sets the multichannel settings. - """ - def __init__(self, **kwargs): - self.multichannel = kwargs.get('multichannel') - if self.multichannel is None: - raise ValueError("The value 'multichannel' must be specified.") - - -class SmbMultichannel(GeneratedSmbMultichannel): - """ Settings for Multichannel. - - :keyword bool enabled: If SMB Multichannel is enabled. - """ - def __init__(self, **kwargs): - self.enabled = kwargs.get('enabled') - if self.enabled is None: - raise ValueError("The value 'enabled' must be specified.") - - -class ShareProtocolSettings(GeneratedShareProtocolSettings): - """Protocol Settings class used by the set and get service properties methods in the share service. - - Contains protocol properties of the share service such as the SMB setting of the share service. - - :keyword SmbSettings smb: Sets SMB settings. - """ - def __init__(self, **kwargs): - self.smb = kwargs.get('smb') - if self.smb is None: - raise ValueError("The value 'smb' must be specified.") - - @classmethod - def _from_generated(cls, generated): - return cls( - smb=generated.smb) - - -class AccessPolicy(GenAccessPolicy): - """Access Policy class used by the set and get acl methods in each service. - - A stored access policy can specify the start time, expiry time, and - permissions for the Shared Access Signatures with which it's associated. - Depending on how you want to control access to your resource, you can - specify all of these parameters within the stored access policy, and omit - them from the URL for the Shared Access Signature. Doing so permits you to - modify the associated signature's behavior at any time, as well as to revoke - it. Or you can specify one or more of the access policy parameters within - the stored access policy, and the others on the URL. Finally, you can - specify all of the parameters on the URL. In this case, you can use the - stored access policy to revoke the signature, but not to modify its behavior. - - Together the Shared Access Signature and the stored access policy must - include all fields required to authenticate the signature. If any required - fields are missing, the request will fail. Likewise, if a field is specified - both in the Shared Access Signature URL and in the stored access policy, the - request will fail with status code 400 (Bad Request). - - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.fileshare.FileSasPermissions or - ~azure.storage.fileshare.ShareSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - """ - def __init__(self, permission=None, expiry=None, start=None): - self.start = start - self.expiry = expiry - self.permission = permission - - -class LeaseProperties(DictMixin): - """File or Share Lease Properties. - - :ivar str status: - The lease status of the file or share. Possible values: locked|unlocked - :ivar str state: - Lease state of the file or share. Possible values: available|leased|expired|breaking|broken - :ivar str duration: - When a file or share is leased, specifies whether the lease is of infinite or fixed duration. - """ - - def __init__(self, **kwargs): - self.status = get_enum_value(kwargs.get('x-ms-lease-status')) - self.state = get_enum_value(kwargs.get('x-ms-lease-state')) - self.duration = get_enum_value(kwargs.get('x-ms-lease-duration')) - - @classmethod - def _from_generated(cls, generated): - lease = cls() - lease.status = get_enum_value(generated.properties.lease_status) - lease.state = get_enum_value(generated.properties.lease_state) - lease.duration = get_enum_value(generated.properties.lease_duration) - return lease - - -class ContentSettings(DictMixin): - """Used to store the content settings of a file. - - :param str content_type: - The content type specified for the file. If no content type was - specified, the default content type is application/octet-stream. - :param str content_encoding: - If the content_encoding has previously been set - for the file, that value is stored. - :param str content_language: - If the content_language has previously been set - for the file, that value is stored. - :param str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the file, that value is stored. - :param str cache_control: - If the cache_control has previously been set for - the file, that value is stored. - :param bytearray content_md5: - If the content_md5 has been set for the file, this response - header is stored so that the client can check for message content - integrity. - """ - - def __init__( - self, content_type=None, content_encoding=None, - content_language=None, content_disposition=None, - cache_control=None, content_md5=None, **kwargs): - - self.content_type = content_type or kwargs.get('Content-Type') - self.content_encoding = content_encoding or kwargs.get('Content-Encoding') - self.content_language = content_language or kwargs.get('Content-Language') - self.content_md5 = content_md5 or kwargs.get('Content-MD5') - self.content_disposition = content_disposition or kwargs.get('Content-Disposition') - self.cache_control = cache_control or kwargs.get('Cache-Control') - - @classmethod - def _from_generated(cls, generated): - settings = cls() - settings.content_type = generated.properties.content_type or None - settings.content_encoding = generated.properties.content_encoding or None - settings.content_language = generated.properties.content_language or None - settings.content_md5 = generated.properties.content_md5 or None - settings.content_disposition = generated.properties.content_disposition or None - settings.cache_control = generated.properties.cache_control or None - return settings - - -class ShareProperties(DictMixin): - """Share's properties class. - - :ivar str name: - The name of the share. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the share was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar int quota: - The allocated quota. - :ivar str access_tier: - The share's access tier. - :ivar dict metadata: A dict with name_value pairs to associate with the - share as metadata. - :ivar str snapshot: - Snapshot of the share. - :ivar bool deleted: - To indicate if this share is deleted or not. - This is a service returned value, and the value will be set when list shared including deleted ones. - :ivar datetime deleted: - To indicate the deleted time of the deleted share. - This is a service returned value, and the value will be set when list shared including deleted ones. - :ivar str version: - To indicate the version of deleted share. - This is a service returned value, and the value will be set when list shared including deleted ones. - :ivar int remaining_retention_days: - To indicate how many remaining days the deleted share will be kept. - This is a service returned value, and the value will be set when list shared including deleted ones. - :ivar ~azure.storage.fileshare.models.ShareRootSquash or str root_squash: - Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash'. - :ivar list(str) protocols: - Indicates the protocols enabled on the share. The protocol can be either SMB or NFS. - """ - - def __init__(self, **kwargs): - self.name = None - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.quota = kwargs.get('x-ms-share-quota') - self.access_tier = kwargs.get('x-ms-access-tier') - self.next_allowed_quota_downgrade_time = kwargs.get('x-ms-share-next-allowed-quota-downgrade-time') - self.metadata = kwargs.get('metadata') - self.snapshot = None - self.deleted = None - self.deleted_time = None - self.version = None - self.remaining_retention_days = None - self.provisioned_egress_mbps = kwargs.get('x-ms-share-provisioned-egress-mbps') - self.provisioned_ingress_mbps = kwargs.get('x-ms-share-provisioned-ingress-mbps') - self.provisioned_iops = kwargs.get('x-ms-share-provisioned-iops') - self.lease = LeaseProperties(**kwargs) - self.protocols = [protocol.strip() for protocol in kwargs.get('x-ms-enabled-protocols', None).split(',')]\ - if kwargs.get('x-ms-enabled-protocols', None) else None - self.root_squash = kwargs.get('x-ms-root-squash', None) - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.last_modified = generated.properties.last_modified - props.etag = generated.properties.etag - props.quota = generated.properties.quota - props.access_tier = generated.properties.access_tier - props.next_allowed_quota_downgrade_time = generated.properties.next_allowed_quota_downgrade_time - props.metadata = generated.metadata - props.snapshot = generated.snapshot - props.deleted = generated.deleted - props.deleted_time = generated.properties.deleted_time - props.version = generated.version - props.remaining_retention_days = generated.properties.remaining_retention_days - props.provisioned_egress_mbps = generated.properties.provisioned_egress_m_bps - props.provisioned_ingress_mbps = generated.properties.provisioned_ingress_m_bps - props.provisioned_iops = generated.properties.provisioned_iops - props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access - props.protocols = [protocol.strip() for protocol in generated.properties.enabled_protocols.split(',')]\ - if generated.properties.enabled_protocols else None - props.root_squash = generated.properties.root_squash - - return props - - -class SharePropertiesPaged(PageIterator): - """An iterable of Share properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.fileshare.ShareProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only shares whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(SharePropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - prefix=self.prefix, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [ShareProperties._from_generated(i) for i in self._response.share_items] # pylint: disable=protected-access - return self._response.next_marker or None, self.current_page - - -class Handle(DictMixin): - """A listed Azure Storage handle item. - - All required parameters must be populated in order to send to Azure. - - :keyword str handle_id: Required. XSMB service handle ID - :keyword str path: Required. File or directory name including full path starting - from share root - :keyword str file_id: Required. FileId uniquely identifies the file or - directory. - :keyword str parent_id: ParentId uniquely identifies the parent directory of the - object. - :keyword str session_id: Required. SMB session ID in context of which the file - handle was opened - :keyword str client_ip: Required. Client IP that opened the handle - :keyword ~datetime.datetime open_time: Required. Time when the session that previously opened - the handle has last been reconnected. (UTC) - :keyword ~datetime.datetime last_reconnect_time: Time handle was last connected to (UTC) - """ - - def __init__(self, **kwargs): - self.id = kwargs.get('handle_id') - self.path = kwargs.get('path') - self.file_id = kwargs.get('file_id') - self.parent_id = kwargs.get('parent_id') - self.session_id = kwargs.get('session_id') - self.client_ip = kwargs.get('client_ip') - self.open_time = kwargs.get('open_time') - self.last_reconnect_time = kwargs.get('last_reconnect_time') - - @classmethod - def _from_generated(cls, generated): - handle = cls() - handle.id = generated.handle_id - handle.path = generated.path - handle.file_id = generated.file_id - handle.parent_id = generated.parent_id - handle.session_id = generated.session_id - handle.client_ip = generated.client_ip - handle.open_time = generated.open_time - handle.last_reconnect_time = generated.last_reconnect_time - return handle - - -class HandlesPaged(PageIterator): - """An iterable of Handles. - - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.fileshare.Handle) - - :param callable command: Function to retrieve the next page of items. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, results_per_page=None, continuation_token=None): - super(HandlesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.current_page = [Handle._from_generated(h) for h in self._response.handle_list] # pylint: disable=protected-access - return self._response.next_marker or None, self.current_page - - -class DirectoryProperties(DictMixin): - """Directory's properties class. - - :ivar str name: - The name of the directory. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the directory was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar bool server_encrypted: - Whether encryption is enabled. - :keyword dict metadata: A dict with name_value pairs to associate with the - directory as metadata. - :ivar change_time: Change time for the file. - :vartype change_time: str or ~datetime.datetime - :ivar creation_time: Creation time for the file. - :vartype creation_time: str or ~datetime.datetime - :ivar last_write_time: Last write time for the file. - :vartype last_write_time: str or ~datetime.datetime - :ivar last_access_time: Last access time for the file. - :vartype last_access_time: ~datetime.datetime - :ivar file_attributes: - The file system attributes for files and directories. - :vartype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :ivar permission_key: Key of the permission to be set for the - directory/file. - :vartype permission_key: str - :ivar file_id: Required. FileId uniquely identifies the file or - directory. - :vartype file_id: str - :ivar parent_id: ParentId uniquely identifies the parent directory of the - object. - :vartype parent_id: str - """ - - def __init__(self, **kwargs): - self.name = None - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.server_encrypted = kwargs.get('x-ms-server-encrypted') - self.metadata = kwargs.get('metadata') - self.change_time = _parse_datetime_from_str(kwargs.get('x-ms-file-change-time')) - self.creation_time = _parse_datetime_from_str(kwargs.get('x-ms-file-creation-time')) - self.last_write_time = _parse_datetime_from_str(kwargs.get('x-ms-file-last-write-time')) - self.last_access_time = None - self.file_attributes = kwargs.get('x-ms-file-attributes') - self.permission_key = kwargs.get('x-ms-file-permission-key') - self.file_id = kwargs.get('x-ms-file-id') - self.parent_id = kwargs.get('x-ms-file-parent-id') - self.is_directory = True - - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.file_id = generated.file_id - props.file_attributes = generated.attributes - props.last_modified = generated.properties.last_modified - props.creation_time = generated.properties.creation_time - props.last_access_time = generated.properties.last_access_time - props.last_write_time = generated.properties.last_write_time - props.change_time = generated.properties.change_time - props.etag = generated.properties.etag - props.permission_key = generated.permission_key - return props - - -class DirectoryPropertiesPaged(PageIterator): - """An iterable for the contents of a directory. - - This iterable will yield dicts for the contents of the directory. The dicts - will have the keys 'name' (str) and 'is_directory' (bool). - Items that are files (is_directory=False) will have an additional 'content_length' key. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(dict(str, Any)) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only directories whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(DirectoryPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - prefix=self.prefix, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [DirectoryProperties._from_generated(i) for i in self._response.segment.directory_items] # pylint: disable = protected-access - self.current_page.extend([FileProperties._from_generated(i) for i in self._response.segment.file_items]) # pylint: disable = protected-access - return self._response.next_marker or None, self.current_page - - -class FileProperties(DictMixin): - """File's properties class. - - :ivar str name: - The name of the file. - :ivar str path: - The path of the file. - :ivar str share: - The name of share. - :ivar str snapshot: - File snapshot. - :ivar int content_length: - Size of file in bytes. - :ivar dict metadata: A dict with name_value pairs to associate with the - file as metadata. - :ivar str file_type: - Type of the file. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the file was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar int size: - Size of file in bytes. - :ivar str content_range: - The range of bytes. - :ivar bool server_encrypted: - Whether encryption is enabled. - :ivar copy: - The copy properties. - :vartype copy: ~azure.storage.fileshare.CopyProperties - :ivar content_settings: - The content settings for the file. - :vartype content_settings: ~azure.storage.fileshare.ContentSettings - """ - - def __init__(self, **kwargs): - self.name = kwargs.get('name') - self.path = None - self.share = None - self.snapshot = None - self.content_length = kwargs.get('Content-Length') - self.metadata = kwargs.get('metadata') - self.file_type = kwargs.get('x-ms-type') - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.size = kwargs.get('Content-Length') - self.content_range = kwargs.get('Content-Range') - self.server_encrypted = kwargs.get('x-ms-server-encrypted') - self.copy = CopyProperties(**kwargs) - self.content_settings = ContentSettings(**kwargs) - self.lease = LeaseProperties(**kwargs) - self.change_time = _parse_datetime_from_str(kwargs.get('x-ms-file-change-time')) - self.creation_time = _parse_datetime_from_str(kwargs.get('x-ms-file-creation-time')) - self.last_write_time = _parse_datetime_from_str(kwargs.get('x-ms-file-last-write-time')) - self.last_access_time = None - self.file_attributes = kwargs.get('x-ms-file-attributes') - self.permission_key = kwargs.get('x-ms-file-permission-key') - self.file_id = kwargs.get('x-ms-file-id') - self.parent_id = kwargs.get('x-ms-file-parent-id') - self.is_directory = False - - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.file_id = generated.file_id - props.etag = generated.properties.etag - props.file_attributes = generated.attributes - props.last_modified = generated.properties.last_modified - props.creation_time = generated.properties.creation_time - props.last_access_time = generated.properties.last_access_time - props.last_write_time = generated.properties.last_write_time - props.change_time = generated.properties.change_time - props.size = generated.properties.content_length - props.permission_key = generated.permission_key - return props - - -class ShareProtocols(str, Enum): - """Enabled protocols on the share""" - SMB = "SMB" - NFS = "NFS" - - -class CopyProperties(DictMixin): - """File Copy Properties. - - :ivar str id: - String identifier for the last attempted Copy File operation where this file - was the destination file. This header does not appear if this file has never - been the destination in a Copy File operation, or if this file has been - modified after a concluded Copy File operation. - :ivar str source: - URL up to 2 KB in length that specifies the source file used in the last attempted - Copy File operation where this file was the destination file. This header does not - appear if this file has never been the destination in a Copy File operation, or if - this file has been modified after a concluded Copy File operation. - :ivar str status: - State of the copy operation identified by Copy ID, with these values: - success: - Copy completed successfully. - pending: - Copy is in progress. Check copy_status_description if intermittent, - non-fatal errors impede copy progress but don't cause failure. - aborted: - Copy was ended by Abort Copy File. - failed: - Copy failed. See copy_status_description for failure details. - :ivar str progress: - Contains the number of bytes copied and the total bytes in the source in the last - attempted Copy File operation where this file was the destination file. Can show - between 0 and Content-Length bytes copied. - :ivar datetime completion_time: - Conclusion time of the last attempted Copy File operation where this file was the - destination file. This value can specify the time of a completed, aborted, or - failed copy attempt. - :ivar str status_description: - Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal - or non-fatal copy operation failure. - :ivar bool incremental_copy: - Copies the snapshot of the source file to a destination file. - The snapshot is copied such that only the differential changes between - the previously copied snapshot are transferred to the destination - :ivar datetime destination_snapshot: - Included if the file is incremental copy or incremental copy snapshot, - if x-ms-copy-status is success. Snapshot time of the last successful - incremental copy snapshot for this file. - """ - - def __init__(self, **kwargs): - self.id = kwargs.get('x-ms-copy-id') - self.source = kwargs.get('x-ms-copy-source') - self.status = get_enum_value(kwargs.get('x-ms-copy-status')) - self.progress = kwargs.get('x-ms-copy-progress') - self.completion_time = kwargs.get('x-ms-copy-completion_time') - self.status_description = kwargs.get('x-ms-copy-status-description') - self.incremental_copy = kwargs.get('x-ms-incremental-copy') - self.destination_snapshot = kwargs.get('x-ms-copy-destination-snapshot') - - @classmethod - def _from_generated(cls, generated): - copy = cls() - copy.id = generated.properties.copy_id or None - copy.status = get_enum_value(generated.properties.copy_status) or None - copy.source = generated.properties.copy_source or None - copy.progress = generated.properties.copy_progress or None - copy.completion_time = generated.properties.copy_completion_time or None - copy.status_description = generated.properties.copy_status_description or None - copy.incremental_copy = generated.properties.incremental_copy or None - copy.destination_snapshot = generated.properties.destination_snapshot or None - return copy - - -class FileSasPermissions(object): - """FileSasPermissions class to be used with - generating shared access signature operations. - - :param bool read: - Read the content, properties, metadata. Use the file as the source of a copy - operation. - :param bool create: - Create a new file or copy a file to a new file. - :param bool write: - Create or write content, properties, metadata. Resize the file. Use the file - as the destination of a copy operation within the same account. - :param bool delete: - Delete the file. - """ - def __init__(self, read=False, create=False, write=False, delete=False): - self.read = read - self.create = create - self.write = write - self.delete = delete - self._str = (('r' if self.read else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a FileSasPermissions from a string. - - To specify read, create, write, or delete permissions you need only to - include the first letter of the word in the string. E.g. For read and - create permissions, you would provide a string "rc". - - :param str permission: The string which dictates the read, create, - write, or delete permissions - :return: A FileSasPermissions object - :rtype: ~azure.storage.fileshare.FileSasPermissions - """ - p_read = 'r' in permission - p_create = 'c' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - - parsed = cls(p_read, p_create, p_write, p_delete) - - return parsed - - -class ShareSasPermissions(object): - """ShareSasPermissions class to be used to be used with - generating shared access signature and access policy operations. - - :param bool read: - Read the content, properties or metadata of any file in the share. Use any - file in the share as the source of a copy operation. - :param bool write: - For any file in the share, create or write content, properties or metadata. - Resize the file. Use the file as the destination of a copy operation within - the same account. - Note: You cannot grant permissions to read or write share properties or - metadata with a service SAS. Use an account SAS instead. - :param bool delete: - Delete any file in the share. - Note: You cannot grant permissions to delete a share with a service SAS. Use - an account SAS instead. - :param bool list: - List files and directories in the share. - """ - def __init__(self, read=False, write=False, delete=False, list=False): # pylint: disable=redefined-builtin - self.read = read - self.write = write - self.delete = delete - self.list = list - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('l' if self.list else '')) - - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a ShareSasPermissions from a string. - - To specify read, write, delete, or list permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, write, - delete, or list permissions - :return: A ShareSasPermissions object - :rtype: ~azure.storage.fileshare.ShareSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_list = 'l' in permission - - parsed = cls(p_read, p_write, p_delete, p_list) - - return parsed - -class NTFSAttributes(object): - """ - Valid set of attributes to set for file or directory. - To set attribute for directory, 'Directory' should always be enabled except setting 'None' for directory. - - :ivar bool read_only: - Enable/disable 'ReadOnly' attribute for DIRECTORY or FILE - :ivar bool hidden: - Enable/disable 'Hidden' attribute for DIRECTORY or FILE - :ivar bool system: - Enable/disable 'System' attribute for DIRECTORY or FILE - :ivar bool none: - Enable/disable 'None' attribute for DIRECTORY or FILE to clear all attributes of FILE/DIRECTORY - :ivar bool directory: - Enable/disable 'Directory' attribute for DIRECTORY - :ivar bool archive: - Enable/disable 'Archive' attribute for DIRECTORY or FILE - :ivar bool temporary: - Enable/disable 'Temporary' attribute for FILE - :ivar bool offline: - Enable/disable 'Offline' attribute for DIRECTORY or FILE - :ivar bool not_content_indexed: - Enable/disable 'NotContentIndexed' attribute for DIRECTORY or FILE - :ivar bool no_scrub_data: - Enable/disable 'NoScrubData' attribute for DIRECTORY or FILE - """ - def __init__(self, read_only=False, hidden=False, system=False, none=False, directory=False, archive=False, - temporary=False, offline=False, not_content_indexed=False, no_scrub_data=False): - - self.read_only = read_only - self.hidden = hidden - self.system = system - self.none = none - self.directory = directory - self.archive = archive - self.temporary = temporary - self.offline = offline - self.not_content_indexed = not_content_indexed - self.no_scrub_data = no_scrub_data - self._str = (('ReadOnly|' if self.read_only else '') + - ('Hidden|' if self.hidden else '') + - ('System|' if self.system else '') + - ('None|' if self.none else '') + - ('Directory|' if self.directory else '') + - ('Archive|' if self.archive else '') + - ('Temporary|' if self.temporary else '') + - ('Offline|' if self.offline else '') + - ('NotContentIndexed|' if self.not_content_indexed else '') + - ('NoScrubData|' if self.no_scrub_data else '')) - - def __str__(self): - concatenated_params = self._str - return concatenated_params.strip('|') - - @classmethod - def from_string(cls, string): - """Create a NTFSAttributes from a string. - - To specify permissions you can pass in a string with the - desired permissions, e.g. "ReadOnly|Hidden|System" - - :param str string: The string which dictates the permissions. - :return: A NTFSAttributes object - :rtype: ~azure.storage.fileshare.NTFSAttributes - """ - read_only = "ReadOnly" in string - hidden = "Hidden" in string - system = "System" in string - none = "None" in string - directory = "Directory" in string - archive = "Archive" in string - temporary = "Temporary" in string - offline = "Offline" in string - not_content_indexed = "NotContentIndexed" in string - no_scrub_data = "NoScrubData" in string - - parsed = cls(read_only, hidden, system, none, directory, archive, temporary, offline, not_content_indexed, - no_scrub_data) - parsed._str = string # pylint: disable = protected-access - return parsed - - -def service_properties_deserialize(generated): - """Deserialize a ServiceProperties objects into a dict. - """ - return { - 'hour_metrics': Metrics._from_generated(generated.hour_metrics), # pylint: disable=protected-access - 'minute_metrics': Metrics._from_generated(generated.minute_metrics), # pylint: disable=protected-access - 'cors': [CorsRule._from_generated(cors) for cors in generated.cors], # pylint: disable=protected-access - 'protocol': ShareProtocolSettings._from_generated(generated.protocol), # pylint: disable=protected-access - } diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_parser.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_parser.py deleted file mode 100644 index db7cab5..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_parser.py +++ /dev/null @@ -1,42 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from datetime import datetime, timedelta - -_ERROR_TOO_MANY_FILE_PERMISSIONS = 'file_permission and file_permission_key should not be set at the same time' -_FILE_PERMISSION_TOO_LONG = 'Size of file_permission is too large. file_permission should be <=8KB, else' \ - 'please use file_permission_key' - - -def _get_file_permission(file_permission, file_permission_key, default_permission): - # if file_permission and file_permission_key are both empty, then use the default_permission - # value as file permission, file_permission size should be <= 8KB, else file permission_key should be used - if file_permission and len(str(file_permission).encode('utf-8')) > 8 * 1024: - raise ValueError(_FILE_PERMISSION_TOO_LONG) - - if not file_permission: - if not file_permission_key: - return default_permission - return None - - if not file_permission_key: - return file_permission - - raise ValueError(_ERROR_TOO_MANY_FILE_PERMISSIONS) - - -def _parse_datetime_from_str(string_datetime): - if not string_datetime: - return None - dt, _, us = string_datetime.partition(".") - dt = datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S") - us = int(us[:-2]) # microseconds - datetime_obj = dt + timedelta(microseconds=us) - return datetime_obj - - -def _datetime_to_str(datetime_obj): - return datetime_obj if isinstance(datetime_obj, str) else datetime_obj.isoformat() + '0Z' diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_serialize.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_serialize.py deleted file mode 100644 index 9a050da..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_serialize.py +++ /dev/null @@ -1,119 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from azure.core import MatchConditions - -from ._parser import _datetime_to_str, _get_file_permission -from ._generated.models import SourceModifiedAccessConditions, LeaseAccessConditions, CopyFileSmbInfo - - -_SUPPORTED_API_VERSIONS = [ - '2019-02-02', - '2019-07-07', - '2019-10-10', - '2019-12-12', - '2020-02-10', - '2020-04-08', - '2020-06-12', - '2020-08-04', - '2020-10-02' -] - - -def _get_match_headers(kwargs, match_param, etag_param): - # type: (str) -> Tuple(Dict[str, Any], Optional[str], Optional[str]) - # TODO: extract this method to shared folder also add some comments, so that share, datalake and blob can use it. - if_match = None - if_none_match = None - match_condition = kwargs.pop(match_param, None) - if match_condition == MatchConditions.IfNotModified: - if_match = kwargs.pop(etag_param, None) - if not if_match: - raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) - elif match_condition == MatchConditions.IfPresent: - if_match = '*' - elif match_condition == MatchConditions.IfModified: - if_none_match = kwargs.pop(etag_param, None) - if not if_none_match: - raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) - elif match_condition == MatchConditions.IfMissing: - if_none_match = '*' - elif match_condition is None: - if etag_param in kwargs: - raise ValueError("'{}' specified without '{}'.".format(etag_param, match_param)) - else: - raise TypeError("Invalid match condition: {}".format(match_condition)) - return if_match, if_none_match - - -def get_source_conditions(kwargs): - # type: (Dict[str, Any]) -> SourceModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') - return SourceModifiedAccessConditions( - source_if_modified_since=kwargs.pop('source_if_modified_since', None), - source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), - source_if_match=if_match or kwargs.pop('source_if_match', None), - source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None) - ) - - -def get_access_conditions(lease): - # type: (Optional[Union[ShareLeaseClient, str]]) -> Union[LeaseAccessConditions, None] - try: - lease_id = lease.id # type: ignore - except AttributeError: - lease_id = lease # type: ignore - return LeaseAccessConditions(lease_id=lease_id) if lease_id else None - - -def get_smb_properties(kwargs): - # type: (Dict[str, Any]) -> Dict[str, Any] - ignore_read_only = kwargs.pop('ignore_read_only', None) - set_archive_attribute = kwargs.pop('set_archive_attribute', None) - file_permission = kwargs.pop('file_permission', None) - file_permission_key = kwargs.pop('permission_key', None) - file_attributes = kwargs.pop('file_attributes', None) - file_creation_time = kwargs.pop('file_creation_time', None) or "" - file_last_write_time = kwargs.pop('file_last_write_time', None) or "" - - file_permission_copy_mode = None - file_permission = _get_file_permission(file_permission, file_permission_key, None) - - if file_permission: - if file_permission.lower() == "source": - file_permission = None - file_permission_copy_mode = "source" - else: - file_permission_copy_mode = "override" - elif file_permission_key: - if file_permission_key.lower() == "source": - file_permission_key = None - file_permission_copy_mode = "source" - else: - file_permission_copy_mode = "override" - return { - 'file_permission': file_permission, - 'file_permission_key': file_permission_key, - 'copy_file_smb_info': CopyFileSmbInfo( - file_permission_copy_mode=file_permission_copy_mode, - ignore_read_only=ignore_read_only, - file_attributes=file_attributes, - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - set_archive_attribute=set_archive_attribute - ) - - } - - -def get_api_version(kwargs): - # type: (Dict[str, Any]) -> str - api_version = kwargs.get('api_version', None) - if api_version and api_version not in _SUPPORTED_API_VERSIONS: - versions = '\n'.join(_SUPPORTED_API_VERSIONS) - raise ValueError("Unsupported API version '{}'. Please select from:\n{}".format(api_version, versions)) - return api_version or _SUPPORTED_API_VERSIONS[-1] diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_share_client.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_share_client.py deleted file mode 100644 index a0b317b..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_share_client.py +++ /dev/null @@ -1,909 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Optional, Union, Dict, Any, Iterable, TYPE_CHECKING -) - - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import Pipeline -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.request_handlers import add_metadata_headers, serialize_iso -from ._shared.response_handlers import ( - return_response_headers, - process_storage_error, - return_headers_and_deserialized) -from ._generated import AzureFileStorage -from ._generated.models import ( - SignedIdentifier, - DeleteSnapshotsOptionType, - SharePermission) -from ._deserialize import deserialize_share_properties, deserialize_permission_key, deserialize_permission -from ._serialize import get_api_version, get_access_conditions -from ._directory_client import ShareDirectoryClient -from ._file_client import ShareFileClient -from ._lease import ShareLeaseClient -from ._models import ShareProtocols - - -if TYPE_CHECKING: - from ._models import ShareProperties, AccessPolicy - - -class ShareClient(StorageAccountHostsMixin): - """A client to interact with a specific share, although that share may not yet exist. - - For operations relating to a specific directory or file in this share, the clients for - those entities can also be retrieved using the :func:`get_directory_client` and :func:`get_file_client` functions. - - For more optional configuration, please click - `here `_. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the share, - use the :func:`from_share_url` classmethod. - :param share_name: - The name of the share with which to interact. - :type share_name: str - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not share_name: - raise ValueError("Please specify a share name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - if hasattr(credential, 'get_token'): - raise ValueError("Token credentials not supported by the File service.") - - path_snapshot = None - path_snapshot, sas_token = parse_query(parsed_url.query) - if not sas_token and not credential: - raise ValueError( - 'You need to provide either an account shared key or SAS token when creating a storage service.') - try: - self.snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - self.snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - self.snapshot = snapshot or path_snapshot - - self.share_name = share_name - self._query_str, credential = self._format_query_string( - sas_token, credential, share_snapshot=self.snapshot) - super(ShareClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) - self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - - @classmethod - def from_share_url(cls, share_url, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareClient - """ - :param str share_url: The full URI to the share. - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :returns: A share client. - :rtype: ~azure.storage.fileshare.ShareClient - """ - try: - if not share_url.lower().startswith('http'): - share_url = "https://" + share_url - except AttributeError: - raise ValueError("Share URL must be a string.") - parsed_url = urlparse(share_url.rstrip('/')) - if not (parsed_url.path and parsed_url.netloc): - raise ValueError("Invalid URL: {}".format(share_url)) - - share_path = parsed_url.path.lstrip('/').split('/') - account_path = "" - if len(share_path) > 1: - account_path = "/" + "/".join(share_path[:-1]) - account_url = "{}://{}{}?{}".format( - parsed_url.scheme, - parsed_url.netloc.rstrip('/'), - account_path, - parsed_url.query) - - share_name = unquote(share_path[-1]) - path_snapshot, _ = parse_query(parsed_url.query) - if snapshot: - try: - path_snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - path_snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - path_snapshot = snapshot - - if not share_name: - raise ValueError("Invalid URL. Please provide a URL with a valid share name") - return cls(account_url, share_name, path_snapshot, credential, **kwargs) - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - share_name = self.share_name - if isinstance(share_name, six.text_type): - share_name = share_name.encode('UTF-8') - return "{}://{}/{}{}".format( - self.scheme, - hostname, - quote(share_name), - self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - share_name, # type: str - snapshot=None, # type: Optional[str] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareClient - """Create ShareClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param share_name: The name of the share. - :type share_name: str - :param str snapshot: - The optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :returns: A share client. - :rtype: ~azure.storage.fileshare.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START create_share_client_from_conn_string] - :end-before: [END create_share_client_from_conn_string] - :language: python - :dedent: 8 - :caption: Gets the share client from connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, share_name=share_name, snapshot=snapshot, credential=credential, **kwargs) - - def get_directory_client(self, directory_path=None): - # type: (Optional[str]) -> ShareDirectoryClient - """Get a client to interact with the specified directory. - The directory need not already exist. - - :param str directory_path: - Path to the specified directory. - :returns: A Directory Client. - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - """ - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - - return ShareDirectoryClient( - self.url, share_name=self.share_name, directory_path=directory_path or "", snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, - _location_mode=self._location_mode) - - def get_file_client(self, file_path): - # type: (str) -> ShareFileClient - """Get a client to interact with the specified file. - The file need not already exist. - - :param str file_path: - Path to the specified file. - :returns: A File Client. - :rtype: ~azure.storage.fileshare.ShareFileClient - """ - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - - return ShareFileClient( - self.url, share_name=self.share_name, file_path=file_path, snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode) - - @distributed_trace - def acquire_lease(self, **kwargs): - # type: (**Any) -> ShareLeaseClient - """Requests a new lease. - - If the share does not have an active lease, the Share - Service creates a lease on the share and returns a new lease. - - .. versionadded:: 12.5.0 - - :keyword int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword str lease_id: - Proposed lease ID, in a GUID string format. The Share Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A ShareLeaseClient object. - :rtype: ~azure.storage.fileshare.ShareLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START acquire_and_release_lease_on_share] - :end-before: [END acquire_and_release_lease_on_share] - :language: python - :dedent: 8 - :caption: Acquiring a lease on a share. - """ - kwargs['lease_duration'] = kwargs.pop('lease_duration', -1) - lease_id = kwargs.pop('lease_id', None) - lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore - lease.acquire(**kwargs) - return lease - - @distributed_trace - def create_share(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Creates a new Share under the account. If a share with the - same name already exists, the operation fails. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the share as metadata. - :keyword int quota: - The quota to be allotted. - :keyword access_tier: - Specifies the access tier of the share. - Possible values: 'TransactionOptimized', 'Hot', 'Cool' - :paramtype access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword protocols: - Protocols to enable on the share. Only one protocol can be enabled on the share. - :paramtype protocols: str or ~azure.storage.fileshare.ShareProtocols - :keyword root_squash: - Root squash to set on the share. - Only valid for NFS shares. Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash'. - :paramtype root_squash: str or ~azure.storage.fileshare.ShareRootSquash - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START create_share] - :end-before: [END create_share] - :language: python - :dedent: 8 - :caption: Creates a file share. - """ - metadata = kwargs.pop('metadata', None) - quota = kwargs.pop('quota', None) - access_tier = kwargs.pop('access_tier', None) - timeout = kwargs.pop('timeout', None) - root_squash = kwargs.pop('root_squash', None) - protocols = kwargs.pop('protocols', None) - if protocols and protocols not in ['NFS', 'SMB', ShareProtocols.SMB, ShareProtocols.NFS]: - raise ValueError("The enabled protocol must be set to either SMB or NFS.") - if root_squash and protocols not in ['NFS', ShareProtocols.NFS]: - raise ValueError("The 'root_squash' keyword can only be used on NFS enabled shares.") - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - - try: - return self._client.share.create( # type: ignore - timeout=timeout, - metadata=metadata, - quota=quota, - access_tier=access_tier, - root_squash=root_squash, - enabled_protocols=protocols, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def create_snapshot( # type: ignore - self, - **kwargs # type: Optional[Any] - ): - # type: (...) -> Dict[str, Any] - """Creates a snapshot of the share. - - A snapshot is a read-only version of a share that's taken at a point in time. - It can be read, copied, or deleted, but not modified. Snapshots provide a way - to back up a share as it appears at a moment in time. - - A snapshot of a share has the same name as the base share from which the snapshot - is taken, with a DateTime value appended to indicate the time at which the - snapshot was taken. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the share as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Snapshot ID, Etag, and last modified). - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START create_share_snapshot] - :end-before: [END create_share_snapshot] - :language: python - :dedent: 12 - :caption: Creates a snapshot of the file share. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return self._client.share.create_snapshot( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def delete_share( - self, delete_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> None - """Marks the specified share for deletion. The share is - later deleted during garbage collection. - - :param bool delete_snapshots: - Indicates if snapshots are to be deleted. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START delete_share] - :end-before: [END delete_share] - :language: python - :dedent: 12 - :caption: Deletes the share and any snapshots. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - delete_include = None - if delete_snapshots: - delete_include = DeleteSnapshotsOptionType.include - try: - self._client.share.delete( - timeout=timeout, - sharesnapshot=self.snapshot, - lease_access_conditions=access_conditions, - delete_snapshots=delete_include, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def get_share_properties(self, **kwargs): - # type: (Any) -> ShareProperties - """Returns all user-defined metadata and system properties for the - specified share. The data returned does not include the shares's - list of files or directories. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: The share properties. - :rtype: ~azure.storage.fileshare.ShareProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_hello_world.py - :start-after: [START get_share_properties] - :end-before: [END get_share_properties] - :language: python - :dedent: 12 - :caption: Gets the share properties. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - props = self._client.share.get_properties( - timeout=timeout, - sharesnapshot=self.snapshot, - cls=deserialize_share_properties, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - props.name = self.share_name - props.snapshot = self.snapshot - return props # type: ignore - - @distributed_trace - def set_share_quota(self, quota, **kwargs): - # type: (int, Any) -> Dict[str, Any] - """Sets the quota for the share. - - :param int quota: - Specifies the maximum size of the share, in gigabytes. - Must be greater than 0, and less than or equal to 5TB. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START set_share_quota] - :end-before: [END set_share_quota] - :language: python - :dedent: 12 - :caption: Sets the share quota. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - return self._client.share.set_properties( # type: ignore - timeout=timeout, - quota=quota, - access_tier=None, - lease_access_conditions=access_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def set_share_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Sets the share properties. - - .. versionadded:: 12.4.0 - - :keyword access_tier: - Specifies the access tier of the share. - Possible values: 'TransactionOptimized', 'Hot', and 'Cool' - :paramtype access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier - :keyword int quota: - Specifies the maximum size of the share, in gigabytes. - Must be greater than 0, and less than or equal to 5TB. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword root_squash: - Root squash to set on the share. - Only valid for NFS shares. Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash'. - :paramtype root_squash: str or ~azure.storage.fileshare.ShareRootSquash - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START set_share_properties] - :end-before: [END set_share_properties] - :language: python - :dedent: 12 - :caption: Sets the share properties. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - access_tier = kwargs.pop('access_tier', None) - quota = kwargs.pop('quota', None) - root_squash = kwargs.pop('root_squash', None) - if all(parameter is None for parameter in [access_tier, quota, root_squash]): - raise ValueError("set_share_properties should be called with at least one parameter.") - try: - return self._client.share.set_properties( # type: ignore - timeout=timeout, - quota=quota, - access_tier=access_tier, - root_squash=root_squash, - lease_access_conditions=access_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def set_share_metadata(self, metadata, **kwargs): - # type: (Dict[str, Any], Any) -> Dict[str, Any] - """Sets the metadata for the share. - - Each call to this operation replaces all existing metadata - attached to the share. To remove all metadata from the share, - call this operation with no metadata dict. - - :param metadata: - Name-value pairs associated with the share as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START set_share_metadata] - :end-before: [END set_share_metadata] - :language: python - :dedent: 12 - :caption: Sets the share metadata. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - try: - return self._client.share.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def get_share_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the share. The permissions - indicate whether files in a share may be accessed publicly. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - response, identifiers = self._client.share.get_access_policy( - timeout=timeout, - cls=return_headers_and_deserialized, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return { - 'public_access': response.get('share_public_access'), - 'signed_identifiers': identifiers or [] - } - - @distributed_trace - def set_share_access_policy(self, signed_identifiers, **kwargs): - # type: (Dict[str, AccessPolicy], Any) -> Dict[str, str] - """Sets the permissions for the share, or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether files in a share may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the share. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict(str, :class:`~azure.storage.fileshare.AccessPolicy`) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - if len(signed_identifiers) > 5: - raise ValueError( - 'Too many access policies provided. The server does not support setting ' - 'more than 5 access policies on a single resource.') - identifiers = [] - for key, value in signed_identifiers.items(): - if value: - value.start = serialize_iso(value.start) - value.expiry = serialize_iso(value.expiry) - identifiers.append(SignedIdentifier(id=key, access_policy=value)) - signed_identifiers = identifiers # type: ignore - try: - return self._client.share.set_access_policy( # type: ignore - share_acl=signed_identifiers or None, - timeout=timeout, - cls=return_response_headers, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def get_share_stats(self, **kwargs): - # type: (Any) -> int - """Gets the approximate size of the data stored on the share in bytes. - - Note that this value may not include all recently created - or recently re-sized files. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :return: The approximate size of the data (in bytes) stored on the share. - :rtype: int - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - stats = self._client.share.get_statistics( - timeout=timeout, - lease_access_conditions=access_conditions, - **kwargs) - return stats.share_usage_bytes # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_directories_and_files( - self, directory_name=None, # type: Optional[str] - name_starts_with=None, # type: Optional[str] - marker=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Iterable[Dict[str,str]] - """Lists the directories and files under the share. - - :param str directory_name: - Name of a directory. - :param str name_starts_with: - Filters the results to return only directories whose names - begin with the specified prefix. - :param str marker: - An opaque continuation token. This value can be retrieved from the - next_marker field of a previous generator object. If specified, - this generator will begin returning results from this point. - :keyword list[str] include: - Include this parameter to specify one or more datasets to include in the response. - Possible str values are "timestamps", "Etag", "Attributes", "PermissionKey". - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-10-02'. - - :keyword bool include_extended_info: - If this is set to true, file id will be returned in listed results. - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-10-02'. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START share_list_files_in_dir] - :end-before: [END share_list_files_in_dir] - :language: python - :dedent: 12 - :caption: List directories and files in the share. - """ - timeout = kwargs.pop('timeout', None) - directory = self.get_directory_client(directory_name) - kwargs.setdefault('merge_span', True) - return directory.list_directories_and_files( - name_starts_with=name_starts_with, marker=marker, timeout=timeout, **kwargs) - - @staticmethod - def _create_permission_for_share_options(file_permission, # type: str - **kwargs): - options = { - 'share_permission': SharePermission(permission=file_permission), - 'cls': deserialize_permission_key, - 'timeout': kwargs.pop('timeout', None), - } - options.update(kwargs) - return options - - @distributed_trace - def create_permission_for_share(self, file_permission, # type: str - **kwargs # type: Any - ): - # type: (...) -> str - """Create a permission (a security descriptor) at the share level. - - This 'permission' can be used for the files/directories in the share. - If a 'permission' already exists, it shall return the key of it, else - creates a new permission at the share level and return its key. - - :param str file_permission: - File permission, a Portable SDDL - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A file permission key - :rtype: str - """ - timeout = kwargs.pop('timeout', None) - options = self._create_permission_for_share_options(file_permission, timeout=timeout, **kwargs) - try: - return self._client.share.create_permission(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def get_permission_for_share( # type: ignore - self, permission_key, # type: str - **kwargs # type: Any - ): - # type: (...) -> str - """Get a permission (a security descriptor) for a given key. - - This 'permission' can be used for the files/directories in the share. - - :param str permission_key: - Key of the file permission to retrieve - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A file permission (a portable SDDL) - :rtype: str - """ - timeout = kwargs.pop('timeout', None) - try: - return self._client.share.get_permission( # type: ignore - file_permission_key=permission_key, - cls=deserialize_permission, - timeout=timeout, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def create_directory(self, directory_name, **kwargs): - # type: (str, Any) -> ShareDirectoryClient - """Creates a directory in the share and returns a client to interact - with the directory. - - :param str directory_name: - The name of the directory. - :keyword metadata: - Name-value pairs associated with the directory as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: ShareDirectoryClient - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - """ - directory = self.get_directory_client(directory_name) - kwargs.setdefault('merge_span', True) - directory.create_directory(**kwargs) - return directory # type: ignore - - @distributed_trace - def delete_directory(self, directory_name, **kwargs): - # type: (str, Any) -> None - """Marks the directory for deletion. The directory is - later deleted during garbage collection. - - :param str directory_name: - The name of the directory. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - directory = self.get_directory_client(directory_name) - directory.delete_directory(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_share_service_client.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_share_service_client.py deleted file mode 100644 index 537a0e5..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_share_service_client.py +++ /dev/null @@ -1,423 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Dict, List, - TYPE_CHECKING -) - - -try: - from urllib.parse import urlparse -except ImportError: - from urlparse import urlparse # type: ignore - -from azure.core.exceptions import HttpResponseError -from azure.core.paging import ItemPaged -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import Pipeline -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.response_handlers import process_storage_error -from ._generated import AzureFileStorage -from ._generated.models import StorageServiceProperties -from ._share_client import ShareClient -from ._serialize import get_api_version -from ._models import ( - SharePropertiesPaged, - service_properties_deserialize, -) - -if TYPE_CHECKING: - from datetime import datetime - from ._models import ( - ShareProperties, - Metrics, - CorsRule, - ShareProtocolSettings - ) - - -class ShareServiceClient(StorageAccountHostsMixin): - """A client to interact with the File Share Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete shares within the account. - For operations relating to a specific share, a client for that entity - can also be retrieved using the :func:`get_share_client` function. - - For more optional configuration, please click - `here `_. - - :param str account_url: - The URL to the file share storage account. Any other entities included - in the URL path (e.g. share or file) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_authentication.py - :start-after: [START create_share_service_client] - :end-before: [END create_share_service_client] - :language: python - :dedent: 8 - :caption: Create the share service client with url and credential. - """ - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - if hasattr(credential, 'get_token'): - raise ValueError("Token credentials not supported by the File Share service.") - - _, sas_token = parse_query(parsed_url.query) - if not sas_token and not credential: - raise ValueError( - 'You need to provide either an account shared key or SAS token when creating a storage service.') - self._query_str, credential = self._format_query_string(sas_token, credential) - super(ShareServiceClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) - self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - return "{}://{}/{}".format(self.scheme, hostname, self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> ShareServiceClient - """Create ShareServiceClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :returns: A File Share service client. - :rtype: ~azure.storage.fileshare.ShareServiceClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_authentication.py - :start-after: [START create_share_service_client_from_conn_string] - :end-before: [END create_share_service_client_from_conn_string] - :language: python - :dedent: 8 - :caption: Create the share service client with connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls(account_url, credential=credential, **kwargs) - - @distributed_trace - def get_service_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the properties of a storage account's File Share service, including - Azure Storage Analytics. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A dictionary containing file service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START get_service_properties] - :end-before: [END get_service_properties] - :language: python - :dedent: 8 - :caption: Get file share service properties. - """ - timeout = kwargs.pop('timeout', None) - try: - service_props = self._client.service.get_properties(timeout=timeout, **kwargs) - return service_properties_deserialize(service_props) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def set_service_properties( - self, hour_metrics=None, # type: Optional[Metrics] - minute_metrics=None, # type: Optional[Metrics] - cors=None, # type: Optional[List[CorsRule]] - protocol=None, # type: Optional[ShareProtocolSettings], - **kwargs - ): - # type: (...) -> None - """Sets the properties of a storage account's File Share service, including - Azure Storage Analytics. If an element (e.g. hour_metrics) is left as None, the - existing settings on the service for that functionality are preserved. - - :param hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for files. - :type hour_metrics: ~azure.storage.fileshare.Metrics - :param minute_metrics: - The minute metrics settings provide request statistics - for each minute for files. - :type minute_metrics: ~azure.storage.fileshare.Metrics - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list(:class:`~azure.storage.fileshare.CorsRule`) - :param protocol: - Sets protocol settings - :type protocol: ~azure.storage.fileshare.ShareProtocolSettings - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START set_service_properties] - :end-before: [END set_service_properties] - :language: python - :dedent: 8 - :caption: Sets file share service properties. - """ - timeout = kwargs.pop('timeout', None) - props = StorageServiceProperties( - hour_metrics=hour_metrics, - minute_metrics=minute_metrics, - cors=cors, - protocol=protocol - ) - try: - self._client.service.set_properties(storage_service_properties=props, timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_shares( - self, name_starts_with=None, # type: Optional[str] - include_metadata=False, # type: Optional[bool] - include_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> ItemPaged[ShareProperties] - """Returns auto-paging iterable of dict-like ShareProperties under the specified account. - The generator will lazily follow the continuation tokens returned by - the service and stop when all shares have been returned. - - :param str name_starts_with: - Filters the results to return only shares whose names - begin with the specified name_starts_with. - :param bool include_metadata: - Specifies that share metadata be returned in the response. - :param bool include_snapshots: - Specifies that share snapshot be returned in the response. - :keyword bool include_deleted: - Specifies that deleted shares be returned in the response. - This is only for share soft delete enabled account. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) of ShareProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.ShareProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START fsc_list_shares] - :end-before: [END fsc_list_shares] - :language: python - :dedent: 12 - :caption: List shares in the file share service. - """ - timeout = kwargs.pop('timeout', None) - include = [] - include_deleted = kwargs.pop('include_deleted', None) - if include_deleted: - include.append("deleted") - if include_metadata: - include.append('metadata') - if include_snapshots: - include.append('snapshots') - - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.service.list_shares_segment, - include=include, - timeout=timeout, - **kwargs) - return ItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=SharePropertiesPaged) - - @distributed_trace - def create_share( - self, share_name, # type: str - **kwargs - ): - # type: (...) -> ShareClient - """Creates a new share under the specified account. If the share - with the same name already exists, the operation fails. Returns a client with - which to interact with the newly created share. - - :param str share_name: The name of the share to create. - :keyword dict(str,str) metadata: - A dict with name_value pairs to associate with the - share as metadata. Example:{'Category':'test'} - :keyword int quota: - Quota in bytes. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.fileshare.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START fsc_create_shares] - :end-before: [END fsc_create_shares] - :language: python - :dedent: 8 - :caption: Create a share in the file share service. - """ - metadata = kwargs.pop('metadata', None) - quota = kwargs.pop('quota', None) - timeout = kwargs.pop('timeout', None) - share = self.get_share_client(share_name) - kwargs.setdefault('merge_span', True) - share.create_share(metadata=metadata, quota=quota, timeout=timeout, **kwargs) - return share - - @distributed_trace - def delete_share( - self, share_name, # type: Union[ShareProperties, str] - delete_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> None - """Marks the specified share for deletion. The share is - later deleted during garbage collection. - - :param share_name: - The share to delete. This can either be the name of the share, - or an instance of ShareProperties. - :type share_name: str or ~azure.storage.fileshare.ShareProperties - :param bool delete_snapshots: - Indicates if snapshots are to be deleted. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START fsc_delete_shares] - :end-before: [END fsc_delete_shares] - :language: python - :dedent: 12 - :caption: Delete a share in the file share service. - """ - timeout = kwargs.pop('timeout', None) - share = self.get_share_client(share_name) - kwargs.setdefault('merge_span', True) - share.delete_share( - delete_snapshots=delete_snapshots, timeout=timeout, **kwargs) - - @distributed_trace - def undelete_share(self, deleted_share_name, deleted_share_version, **kwargs): - # type: (str, str, **Any) -> ShareClient - """Restores soft-deleted share. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.2.0 - This operation was introduced in API version '2019-12-12'. - - :param str deleted_share_name: - Specifies the name of the deleted share to restore. - :param str deleted_share_version: - Specifies the version of the deleted share to restore. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.fileshare.ShareClient - """ - share = self.get_share_client(deleted_share_name) - - try: - share._client.share.restore(deleted_share_name=deleted_share_name, # pylint: disable = protected-access - deleted_share_version=deleted_share_version, - timeout=kwargs.pop('timeout', None), **kwargs) - return share - except HttpResponseError as error: - process_storage_error(error) - - def get_share_client(self, share, snapshot=None): - # type: (Union[ShareProperties, str],Optional[Union[Dict[str, Any], str]]) -> ShareClient - """Get a client to interact with the specified share. - The share need not already exist. - - :param share: - The share. This can either be the name of the share, - or an instance of ShareProperties. - :type share: str or ~azure.storage.fileshare.ShareProperties - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :returns: A ShareClient. - :rtype: ~azure.storage.fileshare.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START get_share_client] - :end-before: [END get_share_client] - :language: python - :dedent: 8 - :caption: Gets the share client. - """ - try: - share_name = share.name - except AttributeError: - share_name = share - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareClient( - self.url, share_name=share_name, snapshot=snapshot, credential=self.credential, - api_version=self.api_version, _hosts=self._hosts, - _configuration=self._config, _pipeline=_pipeline, _location_mode=self._location_mode) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/__init__.py deleted file mode 100644 index 160f882..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import hmac - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore - -import six - - -def url_quote(url): - return quote(url) - - -def url_unquote(url): - return unquote(url) - - -def encode_base64(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def decode_base64_to_bytes(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - return base64.b64decode(data) - - -def decode_base64_to_text(data): - decoded_bytes = decode_base64_to_bytes(data) - return decoded_bytes.decode('utf-8') - - -def sign_string(key, string_to_sign, key_is_base64=True): - if key_is_base64: - key = decode_base64_to_bytes(key) - else: - if isinstance(key, six.text_type): - key = key.encode('utf-8') - if isinstance(string_to_sign, six.text_type): - string_to_sign = string_to_sign.encode('utf-8') - signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) - digest = signed_hmac_sha256.digest() - encoded_digest = encode_base64(digest) - return encoded_digest diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/authentication.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/authentication.py deleted file mode 100644 index d04c1e4..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/authentication.py +++ /dev/null @@ -1,142 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import logging -import sys - -try: - from urllib.parse import urlparse, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import unquote # type: ignore - -try: - from yarl import URL -except ImportError: - pass - -try: - from azure.core.pipeline.transport import AioHttpTransport -except ImportError: - AioHttpTransport = None - -from azure.core.exceptions import ClientAuthenticationError -from azure.core.pipeline.policies import SansIOHTTPPolicy - -from . import sign_string - - -logger = logging.getLogger(__name__) - - - -# wraps a given exception with the desired exception type -def _wrap_exception(ex, desired_type): - msg = "" - if ex.args: - msg = ex.args[0] - if sys.version_info >= (3,): - # Automatic chaining in Python 3 means we keep the trace - return desired_type(msg) - # There isn't a good solution in 2 for keeping the stack trace - # in general, or that will not result in an error in 3 - # However, we can keep the previous error type and message - # TODO: In the future we will log the trace - return desired_type('{}: {}'.format(ex.__class__.__name__, msg)) - - -class AzureSigningError(ClientAuthenticationError): - """ - Represents a fatal error when attempting to sign a request. - In general, the cause of this exception is user error. For example, the given account key is not valid. - Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. - """ - - -# pylint: disable=no-self-use -class SharedKeyCredentialPolicy(SansIOHTTPPolicy): - - def __init__(self, account_name, account_key): - self.account_name = account_name - self.account_key = account_key - super(SharedKeyCredentialPolicy, self).__init__() - - @staticmethod - def _get_headers(request, headers_to_sign): - headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) - if 'content-length' in headers and headers['content-length'] == '0': - del headers['content-length'] - return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' - - @staticmethod - def _get_verb(request): - return request.http_request.method + '\n' - - def _get_canonicalized_resource(self, request): - uri_path = urlparse(request.http_request.url).path - try: - if isinstance(request.context.transport, AioHttpTransport) or \ - isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \ - isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None), - AioHttpTransport): - uri_path = URL(uri_path) - return '/' + self.account_name + str(uri_path) - except TypeError: - pass - return '/' + self.account_name + uri_path - - @staticmethod - def _get_canonicalized_headers(request): - string_to_sign = '' - x_ms_headers = [] - for name, value in request.http_request.headers.items(): - if name.startswith('x-ms-'): - x_ms_headers.append((name.lower(), value)) - x_ms_headers.sort() - for name, value in x_ms_headers: - if value is not None: - string_to_sign += ''.join([name, ':', value, '\n']) - return string_to_sign - - @staticmethod - def _get_canonicalized_resource_query(request): - sorted_queries = list(request.http_request.query.items()) - sorted_queries.sort() - - string_to_sign = '' - for name, value in sorted_queries: - if value is not None: - string_to_sign += '\n' + name.lower() + ':' + unquote(value) - - return string_to_sign - - def _add_authorization_header(self, request, string_to_sign): - try: - signature = sign_string(self.account_key, string_to_sign) - auth_string = 'SharedKey ' + self.account_name + ':' + signature - request.http_request.headers['Authorization'] = auth_string - except Exception as ex: - # Wrap any error that occurred as signing error - # Doing so will clarify/locate the source of problem - raise _wrap_exception(ex, AzureSigningError) - - def on_request(self, request): - string_to_sign = \ - self._get_verb(request) + \ - self._get_headers( - request, - [ - 'content-encoding', 'content-language', 'content-length', - 'content-md5', 'content-type', 'date', 'if-modified-since', - 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' - ] - ) + \ - self._get_canonicalized_headers(request) + \ - self._get_canonicalized_resource(request) + \ - self._get_canonicalized_resource_query(request) - - self._add_authorization_header(request, string_to_sign) - #logger.debug("String_to_sign=%s", string_to_sign) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/base_client.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/base_client.py deleted file mode 100644 index 5e524b2..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/base_client.py +++ /dev/null @@ -1,459 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import logging -import uuid -from typing import ( # pylint: disable=unused-import - Optional, - Any, - Tuple, -) - -try: - from urllib.parse import parse_qs, quote -except ImportError: - from urlparse import parse_qs # type: ignore - from urllib2 import quote # type: ignore - -import six - -from azure.core.configuration import Configuration -from azure.core.credentials import AzureSasCredential -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline import Pipeline -from azure.core.pipeline.transport import RequestsTransport, HttpTransport -from azure.core.pipeline.policies import ( - RedirectPolicy, - ContentDecodePolicy, - BearerTokenCredentialPolicy, - ProxyPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, - UserAgentPolicy, - AzureSasCredentialPolicy -) - -from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .models import LocationMode -from .authentication import SharedKeyCredentialPolicy -from .shared_access_signature import QueryStringConstants -from .request_handlers import serialize_batch_body, _get_batch_request_delimiter -from .policies import ( - StorageHeadersPolicy, - StorageContentValidation, - StorageRequestHook, - StorageResponseHook, - StorageLoggingPolicy, - StorageHosts, - QueueMessagePolicy, - ExponentialRetry, -) -from .._version import VERSION -from .response_handlers import process_storage_error, PartialBatchErrorException - - -_LOGGER = logging.getLogger(__name__) -_SERVICE_PARAMS = { - "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"}, - "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"}, - "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"}, - "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"}, -} - -class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - parsed_url, # type: Any - service, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) - self._hosts = kwargs.get("_hosts") - self.scheme = parsed_url.scheme - - if service not in ["blob", "queue", "file-share", "dfs"]: - raise ValueError("Invalid service: {}".format(service)) - service_name = service.split('-')[0] - account = parsed_url.netloc.split(".{}.core.".format(service_name)) - - self.account_name = account[0] if len(account) > 1 else None - if not self.account_name and parsed_url.netloc.startswith("localhost") \ - or parsed_url.netloc.startswith("127.0.0.1"): - self.account_name = parsed_url.path.strip("/") - - self.credential = _format_shared_key_credential(self.account_name, credential) - if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): - raise ValueError("Token credential is only supported with HTTPS.") - - secondary_hostname = None - if hasattr(self.credential, "account_name"): - self.account_name = self.credential.account_name - secondary_hostname = "{}-secondary.{}.{}".format( - self.credential.account_name, service_name, SERVICE_HOST_BASE) - - if not self._hosts: - if len(account) > 1: - secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") - if kwargs.get("secondary_hostname"): - secondary_hostname = kwargs["secondary_hostname"] - primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') - self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} - - self.require_encryption = kwargs.get("require_encryption", False) - self.key_encryption_key = kwargs.get("key_encryption_key") - self.key_resolver_function = kwargs.get("key_resolver_function") - self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) - - def __enter__(self): - self._client.__enter__() - return self - - def __exit__(self, *args): - self._client.__exit__(*args) - - def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._client.close() - - @property - def url(self): - """The full endpoint URL to this entity, including SAS token if used. - - This could be either the primary endpoint, - or the secondary endpoint depending on the current :func:`location_mode`. - """ - return self._format_url(self._hosts[self._location_mode]) - - @property - def primary_endpoint(self): - """The full primary endpoint URL. - - :type: str - """ - return self._format_url(self._hosts[LocationMode.PRIMARY]) - - @property - def primary_hostname(self): - """The hostname of the primary endpoint. - - :type: str - """ - return self._hosts[LocationMode.PRIMARY] - - @property - def secondary_endpoint(self): - """The full secondary endpoint URL if configured. - - If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str - :raise ValueError: - """ - if not self._hosts[LocationMode.SECONDARY]: - raise ValueError("No secondary host configured.") - return self._format_url(self._hosts[LocationMode.SECONDARY]) - - @property - def secondary_hostname(self): - """The hostname of the secondary endpoint. - - If not available this will be None. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str or None - """ - return self._hosts[LocationMode.SECONDARY] - - @property - def location_mode(self): - """The location mode that the client is currently using. - - By default this will be "primary". Options include "primary" and "secondary". - - :type: str - """ - - return self._location_mode - - @location_mode.setter - def location_mode(self, value): - if self._hosts.get(value): - self._location_mode = value - self._client._config.url = self.url # pylint: disable=protected-access - else: - raise ValueError("No host URL for location mode: {}".format(value)) - - @property - def api_version(self): - """The version of the Storage API used for requests. - - :type: str - """ - return self._client._config.version # pylint: disable=protected-access - - def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): - query_str = "?" - if snapshot: - query_str += "snapshot={}&".format(self.snapshot) - if share_snapshot: - query_str += "sharesnapshot={}&".format(self.snapshot) - if sas_token and isinstance(credential, AzureSasCredential): - raise ValueError( - "You cannot use AzureSasCredential when the resource URI also contains a Shared Access Signature.") - if sas_token and not credential: - query_str += sas_token - elif is_credential_sastoken(credential): - query_str += credential.lstrip("?") - credential = None - return query_str.rstrip("?&"), credential - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, "get_token"): - self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif isinstance(credential, AzureSasCredential): - self._credential_policy = AzureSasCredentialPolicy(credential) - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - - config = kwargs.get("_configuration") or create_configuration(**kwargs) - if kwargs.get("_pipeline"): - return config, kwargs["_pipeline"] - config.transport = kwargs.get("transport") # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - config.transport = RequestsTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - ContentDecodePolicy(response_encoding="utf-8"), - RedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), - config.retry_policy, - config.headers_policy, - StorageRequestHook(**kwargs), - self._credential_policy, - config.logging_policy, - StorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs) - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, Pipeline(config.transport, policies=policies) - - def _batch_send( - self, - *reqs, # type: HttpRequest - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - batch_id = str(uuid.uuid1()) - - request = self._client._client.post( # pylint: disable=protected-access - url='{}://{}/{}?{}comp=batch{}{}'.format( - self.scheme, - self.primary_hostname, - kwargs.pop('path', ""), - kwargs.pop('restype', ""), - kwargs.pop('sas', ""), - kwargs.pop('timeout', "") - ), - headers={ - 'x-ms-version': self.api_version, - "Content-Type": "multipart/mixed; boundary=" + _get_batch_request_delimiter(batch_id, False, False) - } - ) - - policies = [StorageHeadersPolicy()] - if self._credential_policy: - policies.append(self._credential_policy) - - request.set_multipart_mixed( - *reqs, - policies=policies, - enforce_https=False - ) - - Pipeline._prepare_multipart_mixed_request(request) # pylint: disable=protected-access - body = serialize_batch_body(request.multipart_mixed_info[0], batch_id) - request.set_bytes_body(body) - - temp = request.multipart_mixed_info - request.multipart_mixed_info = None - pipeline_response = self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - request.multipart_mixed_info = temp - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() - if raise_on_any_failure: - parts = list(response.parts()) - if any(p for p in parts if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts - ) - raise error - return iter(parts) - return parts - except HttpResponseError as error: - process_storage_error(error) - -class TransportWrapper(HttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, transport): - self._transport = transport - - def send(self, request, **kwargs): - return self._transport.send(request, **kwargs) - - def open(self): - pass - - def close(self): - pass - - def __enter__(self): - pass - - def __exit__(self, *args): # pylint: disable=arguments-differ - pass - - -def _format_shared_key_credential(account_name, credential): - if isinstance(credential, six.string_types): - if not account_name: - raise ValueError("Unable to determine account name for shared key credential.") - credential = {"account_name": account_name, "account_key": credential} - if isinstance(credential, dict): - if "account_name" not in credential: - raise ValueError("Shared key credential missing 'account_name") - if "account_key" not in credential: - raise ValueError("Shared key credential missing 'account_key") - return SharedKeyCredentialPolicy(**credential) - return credential - - -def parse_connection_str(conn_str, credential, service): - conn_str = conn_str.rstrip(";") - conn_settings = [s.split("=", 1) for s in conn_str.split(";")] - if any(len(tup) != 2 for tup in conn_settings): - raise ValueError("Connection string is either blank or malformed.") - conn_settings = dict((key.upper(), val) for key, val in conn_settings) - endpoints = _SERVICE_PARAMS[service] - primary = None - secondary = None - if not credential: - try: - credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]} - except KeyError: - credential = conn_settings.get("SHAREDACCESSSIGNATURE") - if endpoints["primary"] in conn_settings: - primary = conn_settings[endpoints["primary"]] - if endpoints["secondary"] in conn_settings: - secondary = conn_settings[endpoints["secondary"]] - else: - if endpoints["secondary"] in conn_settings: - raise ValueError("Connection string specifies only secondary endpoint.") - try: - primary = "{}://{}.{}.{}".format( - conn_settings["DEFAULTENDPOINTSPROTOCOL"], - conn_settings["ACCOUNTNAME"], - service, - conn_settings["ENDPOINTSUFFIX"], - ) - secondary = "{}-secondary.{}.{}".format( - conn_settings["ACCOUNTNAME"], service, conn_settings["ENDPOINTSUFFIX"] - ) - except KeyError: - pass - - if not primary: - try: - primary = "https://{}.{}.{}".format( - conn_settings["ACCOUNTNAME"], service, conn_settings.get("ENDPOINTSUFFIX", SERVICE_HOST_BASE) - ) - except KeyError: - raise ValueError("Connection string missing required connection details.") - return primary, secondary, credential - - -def create_configuration(**kwargs): - # type: (**Any) -> Configuration - config = Configuration(**kwargs) - config.headers_policy = StorageHeadersPolicy(**kwargs) - config.user_agent_policy = UserAgentPolicy( - sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) - config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) - config.logging_policy = StorageLoggingPolicy(**kwargs) - config.proxy_policy = ProxyPolicy(**kwargs) - - # Storage settings - config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) - config.copy_polling_interval = 15 - - # Block blob uploads - config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) - config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) - config.use_byte_buffer = kwargs.get("use_byte_buffer", False) - - # Page blob uploads - config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) - - # Datalake file uploads - config.min_large_chunk_upload_threshold = kwargs.get("min_large_chunk_upload_threshold", 100 * 1024 * 1024 + 1) - - # Blob downloads - config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) - config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) - - # File uploads - config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) - return config - - -def parse_query(query_str): - sas_values = QueryStringConstants.to_list() - parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} - sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values] - sas_token = None - if sas_params: - sas_token = "&".join(sas_params) - - snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") - return snapshot, sas_token - - -def is_credential_sastoken(credential): - if not credential or not isinstance(credential, six.string_types): - return False - - sas_values = QueryStringConstants.to_list() - parsed_query = parse_qs(credential.lstrip("?")) - if parsed_query and all([k in sas_values for k in parsed_query.keys()]): - return True - return False diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/base_client_async.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/base_client_async.py deleted file mode 100644 index 091c350..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/base_client_async.py +++ /dev/null @@ -1,183 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging - -from azure.core.credentials import AzureSasCredential -from azure.core.pipeline import AsyncPipeline -from azure.core.async_paging import AsyncList -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline.policies import ( - ContentDecodePolicy, - AsyncBearerTokenCredentialPolicy, - AsyncRedirectPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, - AzureSasCredentialPolicy, -) -from azure.core.pipeline.transport import AsyncHttpTransport - -from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .authentication import SharedKeyCredentialPolicy -from .base_client import create_configuration -from .policies import ( - StorageContentValidation, - StorageRequestHook, - StorageHosts, - StorageHeadersPolicy, - QueueMessagePolicy -) -from .policies_async import AsyncStorageResponseHook - -from .response_handlers import process_storage_error, PartialBatchErrorException - -if TYPE_CHECKING: - from azure.core.pipeline import Pipeline - from azure.core.pipeline.transport import HttpRequest - from azure.core.configuration import Configuration -_LOGGER = logging.getLogger(__name__) - - -class AsyncStorageAccountHostsMixin(object): - - def __enter__(self): - raise TypeError("Async client only supports 'async with'.") - - def __exit__(self, *args): - pass - - async def __aenter__(self): - await self._client.__aenter__() - return self - - async def __aexit__(self, *args): - await self._client.__aexit__(*args) - - async def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._client.close() - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, 'get_token'): - self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif isinstance(credential, AzureSasCredential): - self._credential_policy = AzureSasCredentialPolicy(credential) - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - config = kwargs.get('_configuration') or create_configuration(**kwargs) - if kwargs.get('_pipeline'): - return config, kwargs['_pipeline'] - config.transport = kwargs.get('transport') # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - try: - from azure.core.pipeline.transport import AioHttpTransport - except ImportError: - raise ImportError("Unable to create async transport. Please check aiohttp is installed.") - config.transport = AioHttpTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.headers_policy, - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - StorageRequestHook(**kwargs), - self._credential_policy, - ContentDecodePolicy(response_encoding="utf-8"), - AsyncRedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), # type: ignore - config.retry_policy, - config.logging_policy, - AsyncStorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs), - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, AsyncPipeline(config.transport, policies=policies) - - async def _batch_send( - self, *reqs: 'HttpRequest', - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - request = self._client._client.post( # pylint: disable=protected-access - url='https://{}/?comp=batch'.format(self.primary_hostname), - headers={ - 'x-ms-version': self.api_version - } - ) - - request.set_multipart_mixed( - *reqs, - policies=[ - StorageHeadersPolicy(), - self._credential_policy - ], - enforce_https=False - ) - - pipeline_response = await self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() # Return an AsyncIterator - if raise_on_any_failure: - parts_list = [] - async for part in parts: - parts_list.append(part) - if any(p for p in parts_list if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts_list - ) - raise error - return AsyncList(parts_list) - return parts - except HttpResponseError as error: - process_storage_error(error) - - -class AsyncTransportWrapper(AsyncHttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, async_transport): - self._transport = async_transport - - async def send(self, request, **kwargs): - return await self._transport.send(request, **kwargs) - - async def open(self): - pass - - async def close(self): - pass - - async def __aenter__(self): - pass - - async def __aexit__(self, *args): # pylint: disable=arguments-differ - pass diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/constants.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/constants.py deleted file mode 100644 index 66f9a47..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/constants.py +++ /dev/null @@ -1,26 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -from .._generated import AzureFileStorage - - -X_MS_VERSION = AzureFileStorage(url="get_api_version")._config.version # pylint: disable=protected-access - -# Socket timeout in seconds -CONNECTION_TIMEOUT = 20 -READ_TIMEOUT = 20 - -# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned) -# The socket timeout is now the maximum total duration to send all data. -if sys.version_info >= (3, 5): - # the timeout to connect is 20 seconds, and the read timeout is 2000 seconds - # the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) - READ_TIMEOUT = 2000 - -STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" - -SERVICE_HOST_BASE = 'core.windows.net' diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/encryption.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/encryption.py deleted file mode 100644 index 62607cc..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/encryption.py +++ /dev/null @@ -1,542 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os -from os import urandom -from json import ( - dumps, - loads, -) -from collections import OrderedDict - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.ciphers import Cipher -from cryptography.hazmat.primitives.ciphers.algorithms import AES -from cryptography.hazmat.primitives.ciphers.modes import CBC -from cryptography.hazmat.primitives.padding import PKCS7 - -from azure.core.exceptions import HttpResponseError - -from .._version import VERSION -from . import encode_base64, decode_base64_to_bytes - - -_ENCRYPTION_PROTOCOL_V1 = '1.0' -_ERROR_OBJECT_INVALID = \ - '{0} does not define a complete interface. Value of {1} is either missing or invalid.' - - -def _validate_not_none(param_name, param): - if param is None: - raise ValueError('{0} should not be None.'.format(param_name)) - - -def _validate_key_encryption_key_wrap(kek): - # Note that None is not callable and so will fail the second clause of each check. - if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) - if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) - - -class _EncryptionAlgorithm(object): - ''' - Specifies which client encryption algorithm is used. - ''' - AES_CBC_256 = 'AES_CBC_256' - - -class _WrappedContentKey: - ''' - Represents the envelope key details stored on the service. - ''' - - def __init__(self, algorithm, encrypted_key, key_id): - ''' - :param str algorithm: - The algorithm used for wrapping. - :param bytes encrypted_key: - The encrypted content-encryption-key. - :param str key_id: - The key-encryption-key identifier string. - ''' - - _validate_not_none('algorithm', algorithm) - _validate_not_none('encrypted_key', encrypted_key) - _validate_not_none('key_id', key_id) - - self.algorithm = algorithm - self.encrypted_key = encrypted_key - self.key_id = key_id - - -class _EncryptionAgent: - ''' - Represents the encryption agent stored on the service. - It consists of the encryption protocol version and encryption algorithm used. - ''' - - def __init__(self, encryption_algorithm, protocol): - ''' - :param _EncryptionAlgorithm encryption_algorithm: - The algorithm used for encrypting the message contents. - :param str protocol: - The protocol version used for encryption. - ''' - - _validate_not_none('encryption_algorithm', encryption_algorithm) - _validate_not_none('protocol', protocol) - - self.encryption_algorithm = str(encryption_algorithm) - self.protocol = protocol - - -class _EncryptionData: - ''' - Represents the encryption data that is stored on the service. - ''' - - def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, - key_wrapping_metadata): - ''' - :param bytes content_encryption_IV: - The content encryption initialization vector. - :param _EncryptionAgent encryption_agent: - The encryption agent. - :param _WrappedContentKey wrapped_content_key: - An object that stores the wrapping algorithm, the key identifier, - and the encrypted key bytes. - :param dict key_wrapping_metadata: - A dict containing metadata related to the key wrapping. - ''' - - _validate_not_none('content_encryption_IV', content_encryption_IV) - _validate_not_none('encryption_agent', encryption_agent) - _validate_not_none('wrapped_content_key', wrapped_content_key) - - self.content_encryption_IV = content_encryption_IV - self.encryption_agent = encryption_agent - self.wrapped_content_key = wrapped_content_key - self.key_wrapping_metadata = key_wrapping_metadata - - -def _generate_encryption_data_dict(kek, cek, iv): - ''' - Generates and returns the encryption metadata as a dict. - - :param object kek: The key encryption key. See calling functions for more information. - :param bytes cek: The content encryption key. - :param bytes iv: The initialization vector. - :return: A dict containing all the encryption metadata. - :rtype: dict - ''' - # Encrypt the cek. - wrapped_cek = kek.wrap_key(cek) - - # Build the encryption_data dict. - # Use OrderedDict to comply with Java's ordering requirement. - wrapped_content_key = OrderedDict() - wrapped_content_key['KeyId'] = kek.get_kid() - wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek) - wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() - - encryption_agent = OrderedDict() - encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 - encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 - - encryption_data_dict = OrderedDict() - encryption_data_dict['WrappedContentKey'] = wrapped_content_key - encryption_data_dict['EncryptionAgent'] = encryption_agent - encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv) - encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION} - - return encryption_data_dict - - -def _dict_to_encryption_data(encryption_data_dict): - ''' - Converts the specified dictionary to an EncryptionData object for - eventual use in decryption. - - :param dict encryption_data_dict: - The dictionary containing the encryption data. - :return: an _EncryptionData object built from the dictionary. - :rtype: _EncryptionData - ''' - try: - if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: - raise ValueError("Unsupported encryption version.") - except KeyError: - raise ValueError("Unsupported encryption version.") - wrapped_content_key = encryption_data_dict['WrappedContentKey'] - wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], - decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), - wrapped_content_key['KeyId']) - - encryption_agent = encryption_data_dict['EncryptionAgent'] - encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], - encryption_agent['Protocol']) - - if 'KeyWrappingMetadata' in encryption_data_dict: - key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] - else: - key_wrapping_metadata = None - - encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), - encryption_agent, - wrapped_content_key, - key_wrapping_metadata) - - return encryption_data - - -def _generate_AES_CBC_cipher(cek, iv): - ''' - Generates and returns an encryption cipher for AES CBC using the given cek and iv. - - :param bytes[] cek: The content encryption key for the cipher. - :param bytes[] iv: The initialization vector for the cipher. - :return: A cipher for encrypting in AES256 CBC. - :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher - ''' - - backend = default_backend() - algorithm = AES(cek) - mode = CBC(iv) - return Cipher(algorithm, mode, backend) - - -def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): - ''' - Extracts and returns the content_encryption_key stored in the encryption_data object - and performs necessary validation on all parameters. - :param _EncryptionData encryption_data: - The encryption metadata of the retrieved value. - :param obj key_encryption_key: - The key_encryption_key used to unwrap the cek. Please refer to high-level service object - instance variables for more details. - :param func key_resolver: - A function used that, given a key_id, will return a key_encryption_key. Please refer - to high-level service object instance variables for more details. - :return: the content_encryption_key stored in the encryption_data object. - :rtype: bytes[] - ''' - - _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) - _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) - - if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol: - raise ValueError('Encryption version is not supported.') - - content_encryption_key = None - - # If the resolver exists, give priority to the key it finds. - if key_resolver is not None: - key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) - - _validate_not_none('key_encryption_key', key_encryption_key) - if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) - if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid(): - raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.') - # Will throw an exception if the specified algorithm is not supported. - content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, - encryption_data.wrapped_content_key.algorithm) - _validate_not_none('content_encryption_key', content_encryption_key) - - return content_encryption_key - - -def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None): - ''' - Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. - Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). - Returns the original plaintex. - - :param str message: - The ciphertext to be decrypted. - :param _EncryptionData encryption_data: - The metadata associated with this ciphertext. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted plaintext. - :rtype: str - ''' - _validate_not_none('message', message) - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) - - if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm: - raise ValueError('Specified encryption algorithm is not supported.') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) - - # decrypt data - decrypted_data = message - decryptor = cipher.decryptor() - decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) - - # unpad data - unpadder = PKCS7(128).unpadder() - decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) - - return decrypted_data - - -def encrypt_blob(blob, key_encryption_key): - ''' - Encrypts the given blob using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encryption metadata. This method should - only be used when a blob is small enough for single shot upload. Encrypting larger blobs - is done as a part of the upload_data_chunks method. - - :param bytes blob: - The blob to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. - :rtype: (str, bytes) - ''' - - _validate_not_none('blob', blob) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(blob) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - - return dumps(encryption_data), encrypted_data - - -def generate_blob_encryption_data(key_encryption_key): - ''' - Generates the encryption_metadata for the blob. - - :param bytes key_encryption_key: - The key-encryption-key used to wrap the cek associate with this blob. - :return: A tuple containing the cek and iv for this blob as well as the - serialized encryption metadata for the blob. - :rtype: (bytes, bytes, str) - ''' - encryption_data = None - content_encryption_key = None - initialization_vector = None - if key_encryption_key: - _validate_key_encryption_key_wrap(key_encryption_key) - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - encryption_data = _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - encryption_data = dumps(encryption_data) - - return content_encryption_key, initialization_vector, encryption_data - - -def decrypt_blob(require_encryption, key_encryption_key, key_resolver, - content, start_offset, end_offset, response_headers): - ''' - Decrypts the given blob contents and returns only the requested range. - - :param bool require_encryption: - Whether or not the calling blob service requires objects to be decrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :param key_resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted blob content. - :rtype: bytes - ''' - try: - encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata'])) - except: # pylint: disable=bare-except - if require_encryption: - raise ValueError( - 'Encryption required, but received data does not contain appropriate metatadata.' + \ - 'Data was either not encrypted or metadata has been lost.') - - return content - - if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256: - raise ValueError('Specified encryption algorithm is not supported.') - - blob_type = response_headers['x-ms-blob-type'] - - iv = None - unpad = False - if 'content-range' in response_headers: - content_range = response_headers['content-range'] - # Format: 'bytes x-y/size' - - # Ignore the word 'bytes' - content_range = content_range.split(' ') - - content_range = content_range[1].split('-') - content_range = content_range[1].split('/') - end_range = int(content_range[0]) - blob_size = int(content_range[1]) - - if start_offset >= 16: - iv = content[:16] - content = content[16:] - start_offset -= 16 - else: - iv = encryption_data.content_encryption_IV - - if end_range == blob_size - 1: - unpad = True - else: - unpad = True - iv = encryption_data.content_encryption_IV - - if blob_type == 'PageBlob': - unpad = False - - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) - cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) - decryptor = cipher.decryptor() - - content = decryptor.update(content) + decryptor.finalize() - if unpad: - unpadder = PKCS7(128).unpadder() - content = unpadder.update(content) + unpadder.finalize() - - return content[start_offset: len(content) - end_offset] - - -def get_blob_encryptor_and_padder(cek, iv, should_pad): - encryptor = None - padder = None - - if cek is not None and iv is not None: - cipher = _generate_AES_CBC_cipher(cek, iv) - encryptor = cipher.encryptor() - padder = PKCS7(128).padder() if should_pad else None - - return encryptor, padder - - -def encrypt_queue_message(message, key_encryption_key): - ''' - Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encrypted message and the encryption metadata. - - :param object message: - The plain text messge to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A json-formatted string containing the encrypted message and the encryption metadata. - :rtype: str - ''' - - _validate_not_none('message', message) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = os.urandom(32) - initialization_vector = os.urandom(16) - - # Queue encoding functions all return unicode strings, and encryption should - # operate on binary strings. - message = message.encode('utf-8') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(message) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - - # Build the dictionary structure. - queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data), - 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector)} - - return dumps(queue_message) - - -def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver): - ''' - Returns the decrypted message contents from an EncryptedQueueMessage. - If no encryption metadata is present, will return the unaltered message. - :param str message: - The JSON formatted QueueEncryptedMessage contents with all associated metadata. - :param bool require_encryption: - If set, will enforce that the retrieved messages are encrypted and decrypt them. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The plain text message from the queue message. - :rtype: str - ''' - - try: - message = loads(message) - - encryption_data = _dict_to_encryption_data(message['EncryptionData']) - decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents']) - except (KeyError, ValueError): - # Message was not json formatted and so was not encrypted - # or the user provided a json formatted message. - if require_encryption: - raise ValueError('Message was not encrypted.') - - return message - try: - return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=response, - error=error) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/models.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/models.py deleted file mode 100644 index 27cd236..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/models.py +++ /dev/null @@ -1,468 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-instance-attributes - -from enum import Enum - - -def get_enum_value(value): - if value is None or value in ["None", ""]: - return None - try: - return value.value - except AttributeError: - return value - - -class StorageErrorCode(str, Enum): - - # Generic storage values - account_already_exists = "AccountAlreadyExists" - account_being_created = "AccountBeingCreated" - account_is_disabled = "AccountIsDisabled" - authentication_failed = "AuthenticationFailed" - authorization_failure = "AuthorizationFailure" - no_authentication_information = "NoAuthenticationInformation" - condition_headers_not_supported = "ConditionHeadersNotSupported" - condition_not_met = "ConditionNotMet" - empty_metadata_key = "EmptyMetadataKey" - insufficient_account_permissions = "InsufficientAccountPermissions" - internal_error = "InternalError" - invalid_authentication_info = "InvalidAuthenticationInfo" - invalid_header_value = "InvalidHeaderValue" - invalid_http_verb = "InvalidHttpVerb" - invalid_input = "InvalidInput" - invalid_md5 = "InvalidMd5" - invalid_metadata = "InvalidMetadata" - invalid_query_parameter_value = "InvalidQueryParameterValue" - invalid_range = "InvalidRange" - invalid_resource_name = "InvalidResourceName" - invalid_uri = "InvalidUri" - invalid_xml_document = "InvalidXmlDocument" - invalid_xml_node_value = "InvalidXmlNodeValue" - md5_mismatch = "Md5Mismatch" - metadata_too_large = "MetadataTooLarge" - missing_content_length_header = "MissingContentLengthHeader" - missing_required_query_parameter = "MissingRequiredQueryParameter" - missing_required_header = "MissingRequiredHeader" - missing_required_xml_node = "MissingRequiredXmlNode" - multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" - operation_timed_out = "OperationTimedOut" - out_of_range_input = "OutOfRangeInput" - out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" - request_body_too_large = "RequestBodyTooLarge" - resource_type_mismatch = "ResourceTypeMismatch" - request_url_failed_to_parse = "RequestUrlFailedToParse" - resource_already_exists = "ResourceAlreadyExists" - resource_not_found = "ResourceNotFound" - server_busy = "ServerBusy" - unsupported_header = "UnsupportedHeader" - unsupported_xml_node = "UnsupportedXmlNode" - unsupported_query_parameter = "UnsupportedQueryParameter" - unsupported_http_verb = "UnsupportedHttpVerb" - - # Blob values - append_position_condition_not_met = "AppendPositionConditionNotMet" - blob_already_exists = "BlobAlreadyExists" - blob_not_found = "BlobNotFound" - blob_overwritten = "BlobOverwritten" - blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" - block_count_exceeds_limit = "BlockCountExceedsLimit" - block_list_too_long = "BlockListTooLong" - cannot_change_to_lower_tier = "CannotChangeToLowerTier" - cannot_verify_copy_source = "CannotVerifyCopySource" - container_already_exists = "ContainerAlreadyExists" - container_being_deleted = "ContainerBeingDeleted" - container_disabled = "ContainerDisabled" - container_not_found = "ContainerNotFound" - content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" - copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" - copy_id_mismatch = "CopyIdMismatch" - feature_version_mismatch = "FeatureVersionMismatch" - incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" - incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" - incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" - infinite_lease_duration_required = "InfiniteLeaseDurationRequired" - invalid_blob_or_block = "InvalidBlobOrBlock" - invalid_blob_tier = "InvalidBlobTier" - invalid_blob_type = "InvalidBlobType" - invalid_block_id = "InvalidBlockId" - invalid_block_list = "InvalidBlockList" - invalid_operation = "InvalidOperation" - invalid_page_range = "InvalidPageRange" - invalid_source_blob_type = "InvalidSourceBlobType" - invalid_source_blob_url = "InvalidSourceBlobUrl" - invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" - lease_already_present = "LeaseAlreadyPresent" - lease_already_broken = "LeaseAlreadyBroken" - lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" - lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" - lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" - lease_id_missing = "LeaseIdMissing" - lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" - lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" - lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" - lease_lost = "LeaseLost" - lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" - lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" - lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" - max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" - no_pending_copy_operation = "NoPendingCopyOperation" - operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" - pending_copy_operation = "PendingCopyOperation" - previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" - previous_snapshot_not_found = "PreviousSnapshotNotFound" - previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" - sequence_number_condition_not_met = "SequenceNumberConditionNotMet" - sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" - snapshot_count_exceeded = "SnapshotCountExceeded" - snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" - snapshots_present = "SnapshotsPresent" - source_condition_not_met = "SourceConditionNotMet" - system_in_use = "SystemInUse" - target_condition_not_met = "TargetConditionNotMet" - unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" - blob_being_rehydrated = "BlobBeingRehydrated" - blob_archived = "BlobArchived" - blob_not_archived = "BlobNotArchived" - - # Queue values - invalid_marker = "InvalidMarker" - message_not_found = "MessageNotFound" - message_too_large = "MessageTooLarge" - pop_receipt_mismatch = "PopReceiptMismatch" - queue_already_exists = "QueueAlreadyExists" - queue_being_deleted = "QueueBeingDeleted" - queue_disabled = "QueueDisabled" - queue_not_empty = "QueueNotEmpty" - queue_not_found = "QueueNotFound" - - # File values - cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" - client_cache_flush_delay = "ClientCacheFlushDelay" - delete_pending = "DeletePending" - directory_not_empty = "DirectoryNotEmpty" - file_lock_conflict = "FileLockConflict" - invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" - parent_not_found = "ParentNotFound" - read_only_attribute = "ReadOnlyAttribute" - share_already_exists = "ShareAlreadyExists" - share_being_deleted = "ShareBeingDeleted" - share_disabled = "ShareDisabled" - share_not_found = "ShareNotFound" - sharing_violation = "SharingViolation" - share_snapshot_in_progress = "ShareSnapshotInProgress" - share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" - share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" - share_has_snapshots = "ShareHasSnapshots" - container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" - - # DataLake values - content_length_must_be_zero = 'ContentLengthMustBeZero' - path_already_exists = 'PathAlreadyExists' - invalid_flush_position = 'InvalidFlushPosition' - invalid_property_name = 'InvalidPropertyName' - invalid_source_uri = 'InvalidSourceUri' - unsupported_rest_version = 'UnsupportedRestVersion' - file_system_not_found = 'FilesystemNotFound' - path_not_found = 'PathNotFound' - rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound' - source_path_not_found = 'SourcePathNotFound' - destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted' - file_system_already_exists = 'FilesystemAlreadyExists' - file_system_being_deleted = 'FilesystemBeingDeleted' - invalid_destination_path = 'InvalidDestinationPath' - invalid_rename_source_path = 'InvalidRenameSourcePath' - invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType' - lease_is_already_broken = 'LeaseIsAlreadyBroken' - lease_name_mismatch = 'LeaseNameMismatch' - path_conflict = 'PathConflict' - source_path_is_being_deleted = 'SourcePathIsBeingDeleted' - - -class DictMixin(object): - - def __setitem__(self, key, item): - self.__dict__[key] = item - - def __getitem__(self, key): - return self.__dict__[key] - - def __repr__(self): - return str(self) - - def __len__(self): - return len(self.keys()) - - def __delitem__(self, key): - self.__dict__[key] = None - - def __eq__(self, other): - """Compare objects by comparing all attributes.""" - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - def __ne__(self, other): - """Compare objects by comparing all attributes.""" - return not self.__eq__(other) - - def __str__(self): - return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) - - def has_key(self, k): - return k in self.__dict__ - - def update(self, *args, **kwargs): - return self.__dict__.update(*args, **kwargs) - - def keys(self): - return [k for k in self.__dict__ if not k.startswith('_')] - - def values(self): - return [v for k, v in self.__dict__.items() if not k.startswith('_')] - - def items(self): - return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] - - def get(self, key, default=None): - if key in self.__dict__: - return self.__dict__[key] - return default - - -class LocationMode(object): - """ - Specifies the location the request should be sent to. This mode only applies - for RA-GRS accounts which allow secondary read access. All other account types - must use PRIMARY. - """ - - PRIMARY = 'primary' #: Requests should be sent to the primary location. - SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. - - -class ResourceTypes(object): - """ - Specifies the resource types that are accessible with the account SAS. - - :param bool service: - Access to service-level APIs (e.g., Get/Set Service Properties, - Get Service Stats, List Containers/Queues/Shares) - :param bool container: - Access to container-level APIs (e.g., Create/Delete Container, - Create/Delete Queue, Create/Delete Share, - List Blobs/Files and Directories) - :param bool object: - Access to object-level APIs for blobs, queue messages, and - files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) - """ - - def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin - self.service = service - self.container = container - self.object = object - self._str = (('s' if self.service else '') + - ('c' if self.container else '') + - ('o' if self.object else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create a ResourceTypes from a string. - - To specify service, container, or object you need only to - include the first letter of the word in the string. E.g. service and container, - you would provide a string "sc". - - :param str string: Specify service, container, or object in - in the string with the first letter of the word. - :return: A ResourceTypes object - :rtype: ~azure.storage.fileshare.ResourceTypes - """ - res_service = 's' in string - res_container = 'c' in string - res_object = 'o' in string - - parsed = cls(res_service, res_container, res_object) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class AccountSasPermissions(object): - """ - :class:`~ResourceTypes` class to be used with generate_account_sas - function and for the AccessPolicies used with set_*_acl. There are two types of - SAS which may be used to grant resource access. One is to grant access to a - specific resource (resource-specific). Another is to grant access to the - entire service for a specific account and allow certain operations based on - perms found here. - - :param bool read: - Valid for all signed resources types (Service, Container, and Object). - Permits read permissions to the specified resource type. - :param bool write: - Valid for all signed resources types (Service, Container, and Object). - Permits write permissions to the specified resource type. - :param bool delete: - Valid for Container and Object resource types, except for queue messages. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool list: - Valid for Service and Container resource types only. - :param bool add: - Valid for the following Object resource types only: queue messages, and append blobs. - :param bool create: - Valid for the following Object resource types only: blobs and files. - Users can create new blobs or files, but may not overwrite existing - blobs or files. - :param bool update: - Valid for the following Object resource types only: queue messages. - :param bool process: - Valid for the following Object resource type only: queue messages. - :keyword bool tag: - To enable set or get tags on the blobs in the container. - :keyword bool filter_by_tags: - To enable get blobs by tags, this should be used together with list permission. - """ - def __init__(self, read=False, write=False, delete=False, - list=False, # pylint: disable=redefined-builtin - add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): - self.read = read - self.write = write - self.delete = delete - self.delete_previous_version = delete_previous_version - self.list = list - self.add = add - self.create = create - self.update = update - self.process = process - self.tag = kwargs.pop('tag', False) - self.filter_by_tags = kwargs.pop('filter_by_tags', False) - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('l' if self.list else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('u' if self.update else '') + - ('p' if self.process else '') + - ('f' if self.filter_by_tags else '') + - ('t' if self.tag else '') - ) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create AccountSasPermissions from a string. - - To specify read, write, delete, etc. permissions you need only to - include the first letter of the word in the string. E.g. for read and write - permissions you would provide a string "rw". - - :param str permission: Specify permissions in - the string with the first letter of the word. - :return: An AccountSasPermissions object - :rtype: ~azure.storage.fileshare.AccountSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_delete_previous_version = 'x' in permission - p_list = 'l' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_update = 'u' in permission - p_process = 'p' in permission - p_tag = 't' in permission - p_filter_by_tags = 'f' in permission - parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, - list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, - filter_by_tags=p_filter_by_tags) - - return parsed - - -class Services(object): - """Specifies the services accessible with the account SAS. - - :param bool blob: - Access for the `~azure.storage.blob.BlobServiceClient` - :param bool queue: - Access for the `~azure.storage.queue.QueueServiceClient` - :param bool fileshare: - Access for the `~azure.storage.fileshare.ShareServiceClient` - """ - - def __init__(self, blob=False, queue=False, fileshare=False): - self.blob = blob - self.queue = queue - self.fileshare = fileshare - self._str = (('b' if self.blob else '') + - ('q' if self.queue else '') + - ('f' if self.fileshare else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create Services from a string. - - To specify blob, queue, or file you need only to - include the first letter of the word in the string. E.g. for blob and queue - you would provide a string "bq". - - :param str string: Specify blob, queue, or file in - in the string with the first letter of the word. - :return: A Services object - :rtype: ~azure.storage.fileshare.Services - """ - res_blob = 'b' in string - res_queue = 'q' in string - res_file = 'f' in string - - parsed = cls(res_blob, res_queue, res_file) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class UserDelegationKey(object): - """ - Represents a user delegation key, provided to the user by Azure Storage - based on their Azure Active Directory access token. - - The fields are saved as simple strings since the user does not have to interact with this object; - to generate an identify SAS, the user can simply pass it to the right API. - - :ivar str signed_oid: - Object ID of this token. - :ivar str signed_tid: - Tenant ID of the tenant that issued this token. - :ivar str signed_start: - The datetime this token becomes valid. - :ivar str signed_expiry: - The datetime this token expires. - :ivar str signed_service: - What service this key is valid for. - :ivar str signed_version: - The version identifier of the REST service that created this token. - :ivar str value: - The user delegation key. - """ - def __init__(self): - self.signed_oid = None - self.signed_tid = None - self.signed_start = None - self.signed_expiry = None - self.signed_service = None - self.signed_version = None - self.value = None diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/parser.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/parser.py deleted file mode 100644 index c6feba8..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/parser.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys - -if sys.version_info < (3,): - def _str(value): - if isinstance(value, unicode): # pylint: disable=undefined-variable - return value.encode('utf-8') - - return str(value) -else: - _str = str - - -def _to_utc_datetime(value): - return value.strftime('%Y-%m-%dT%H:%M:%SZ') diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/policies.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/policies.py deleted file mode 100644 index 11fc984..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/policies.py +++ /dev/null @@ -1,608 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import re -import random -from time import time -from io import SEEK_SET, UnsupportedOperation -import logging -import uuid -import types -from typing import Any, TYPE_CHECKING -from wsgiref.handlers import format_date_time -try: - from urllib.parse import ( - urlparse, - parse_qsl, - urlunparse, - urlencode, - ) -except ImportError: - from urllib import urlencode # type: ignore - from urlparse import ( # type: ignore - urlparse, - parse_qsl, - urlunparse, - ) - -from azure.core.pipeline.policies import ( - HeadersPolicy, - SansIOHTTPPolicy, - NetworkTraceLoggingPolicy, - HTTPPolicy, - RequestHistory -) -from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError - -from .models import LocationMode - -try: - _unicode_type = unicode # type: ignore -except NameError: - _unicode_type = str - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -def encode_base64(data): - if isinstance(data, _unicode_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def is_exhausted(settings): - """Are we out of retries?""" - retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) - retry_counts = list(filter(None, retry_counts)) - if not retry_counts: - return False - return min(retry_counts) < 0 - - -def retry_hook(settings, **kwargs): - if settings['hook']: - settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) - - -def is_retry(response, mode): - """Is this method/status code retryable? (Based on whitelists and control - variables such as the number of total retries to allow, whether to - respect the Retry-After header, whether this header is present, and - whether the returned status code is on the list of status codes to - be retried upon on the presence of the aforementioned header) - """ - status = response.http_response.status_code - if 300 <= status < 500: - # An exception occured, but in most cases it was expected. Examples could - # include a 309 Conflict or 412 Precondition Failed. - if status == 404 and mode == LocationMode.SECONDARY: - # Response code 404 should be retried if secondary was used. - return True - if status == 408: - # Response code 408 is a timeout and should be retried. - return True - return False - if status >= 500: - # Response codes above 500 with the exception of 501 Not Implemented and - # 505 Version Not Supported indicate a server issue and should be retried. - if status in [501, 505]: - return False - return True - return False - - -def urljoin(base_url, stub_url): - parsed = urlparse(base_url) - parsed = parsed._replace(path=parsed.path + '/' + stub_url) - return parsed.geturl() - - -class QueueMessagePolicy(SansIOHTTPPolicy): - - def on_request(self, request): - message_id = request.context.options.pop('queue_message_id', None) - if message_id: - request.http_request.url = urljoin( - request.http_request.url, - message_id) - - -class StorageHeadersPolicy(HeadersPolicy): - request_id_header_name = 'x-ms-client-request-id' - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - super(StorageHeadersPolicy, self).on_request(request) - current_time = format_date_time(time()) - request.http_request.headers['x-ms-date'] = current_time - - custom_id = request.context.options.pop('client_request_id', None) - request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) - - # def on_response(self, request, response): - # # raise exception if the echoed client request id from the service is not identical to the one we sent - # if self.request_id_header_name in response.http_response.headers: - - # client_request_id = request.http_request.headers.get(self.request_id_header_name) - - # if response.http_response.headers[self.request_id_header_name] != client_request_id: - # raise AzureError( - # "Echoed client request ID: {} does not match sent client request ID: {}. " - # "Service request ID: {}".format( - # response.http_response.headers[self.request_id_header_name], client_request_id, - # response.http_response.headers['x-ms-request-id']), - # response=response.http_response - # ) - - -class StorageHosts(SansIOHTTPPolicy): - - def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument - self.hosts = hosts - super(StorageHosts, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - request.context.options['hosts'] = self.hosts - parsed_url = urlparse(request.http_request.url) - - # Detect what location mode we're currently requesting with - location_mode = LocationMode.PRIMARY - for key, value in self.hosts.items(): - if parsed_url.netloc == value: - location_mode = key - - # See if a specific location mode has been specified, and if so, redirect - use_location = request.context.options.pop('use_location', None) - if use_location: - # Lock retries to the specific location - request.context.options['retry_to_secondary'] = False - if use_location not in self.hosts: - raise ValueError("Attempting to use undefined host location {}".format(use_location)) - if use_location != location_mode: - # Update request URL to use the specified location - updated = parsed_url._replace(netloc=self.hosts[use_location]) - request.http_request.url = updated.geturl() - location_mode = use_location - - request.context.options['location_mode'] = location_mode - - -class StorageLoggingPolicy(NetworkTraceLoggingPolicy): - """A policy that logs HTTP request and response to the DEBUG logger. - - This accepts both global configuration, and per-request level with "enable_http_logger" - """ - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - http_request = request.http_request - options = request.context.options - if options.pop("logging_enable", self.enable_http_logger): - request.context["logging_enable"] = True - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - log_url = http_request.url - query_params = http_request.query - if 'sig' in query_params: - log_url = log_url.replace(query_params['sig'], "sig=*****") - _LOGGER.debug("Request URL: %r", log_url) - _LOGGER.debug("Request method: %r", http_request.method) - _LOGGER.debug("Request headers:") - for header, value in http_request.headers.items(): - if header.lower() == 'authorization': - value = '*****' - elif header.lower() == 'x-ms-copy-source' and 'sig' in value: - # take the url apart and scrub away the signed signature - scheme, netloc, path, params, query, fragment = urlparse(value) - parsed_qs = dict(parse_qsl(query)) - parsed_qs['sig'] = '*****' - - # the SAS needs to be put back together - value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) - - _LOGGER.debug(" %r: %r", header, value) - _LOGGER.debug("Request body:") - - # We don't want to log the binary data of a file upload. - if isinstance(http_request.body, types.GeneratorType): - _LOGGER.debug("File upload") - else: - _LOGGER.debug(str(http_request.body)) - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log request: %r", err) - - def on_response(self, request, response): - # type: (PipelineRequest, PipelineResponse, Any) -> None - if response.context.pop("logging_enable", self.enable_http_logger): - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - _LOGGER.debug("Response status: %r", response.http_response.status_code) - _LOGGER.debug("Response headers:") - for res_header, value in response.http_response.headers.items(): - _LOGGER.debug(" %r: %r", res_header, value) - - # We don't want to log binary data if the response is a file. - _LOGGER.debug("Response content:") - pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) - header = response.http_response.headers.get('content-disposition') - - if header and pattern.match(header): - filename = header.partition('=')[2] - _LOGGER.debug("File attachments: %s", filename) - elif response.http_response.headers.get("content-type", "").endswith("octet-stream"): - _LOGGER.debug("Body contains binary data.") - elif response.http_response.headers.get("content-type", "").startswith("image"): - _LOGGER.debug("Body contains image data.") - else: - if response.context.options.get('stream', False): - _LOGGER.debug("Body is streamable") - else: - _LOGGER.debug(response.http_response.text()) - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log response: %s", repr(err)) - - -class StorageRequestHook(SansIOHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._request_callback = kwargs.get('raw_request_hook') - super(StorageRequestHook, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, **Any) -> PipelineResponse - request_callback = request.context.options.pop('raw_request_hook', self._request_callback) - if request_callback: - request_callback(request) - - -class StorageResponseHook(HTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(StorageResponseHook, self).__init__() - - def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = self.next.send(request) - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - response_callback(response) - request.context['response_callback'] = response_callback - return response - - -class StorageContentValidation(SansIOHTTPPolicy): - """A simple policy that sends the given headers - with the request. - - This will overwrite any headers already defined in the request. - """ - header_name = 'Content-MD5' - - def __init__(self, **kwargs): # pylint: disable=unused-argument - super(StorageContentValidation, self).__init__() - - @staticmethod - def get_content_md5(data): - md5 = hashlib.md5() # nosec - if isinstance(data, bytes): - md5.update(data) - elif hasattr(data, 'read'): - pos = 0 - try: - pos = data.tell() - except: # pylint: disable=bare-except - pass - for chunk in iter(lambda: data.read(4096), b""): - md5.update(chunk) - try: - data.seek(pos, SEEK_SET) - except (AttributeError, IOError): - raise ValueError("Data should be bytes or a seekable file-like object.") - else: - raise ValueError("Data should be bytes or a seekable file-like object.") - - return md5.digest() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - validate_content = request.context.options.pop('validate_content', False) - if validate_content and request.http_request.method != 'GET': - computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) - request.http_request.headers[self.header_name] = computed_md5 - request.context['validate_content_md5'] = computed_md5 - request.context['validate_content'] = validate_content - - def on_response(self, request, response): - if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): - computed_md5 = request.context.get('validate_content_md5') or \ - encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) - if response.http_response.headers['content-md5'] != computed_md5: - raise AzureError( - 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format( - response.http_response.headers['content-md5'], computed_md5), - response=response.http_response - ) - - -class StorageRetryPolicy(HTTPPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - def __init__(self, **kwargs): - self.total_retries = kwargs.pop('retry_total', 10) - self.connect_retries = kwargs.pop('retry_connect', 3) - self.read_retries = kwargs.pop('retry_read', 3) - self.status_retries = kwargs.pop('retry_status', 3) - self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) - super(StorageRetryPolicy, self).__init__() - - def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use - """ - A function which sets the next host location on the request, if applicable. - - :param ~azure.storage.models.RetryContext context: - The retry context containing the previous host location and the request - to evaluate and possibly modify. - """ - if settings['hosts'] and all(settings['hosts'].values()): - url = urlparse(request.url) - # If there's more than one possible location, retry to the alternative - if settings['mode'] == LocationMode.PRIMARY: - settings['mode'] = LocationMode.SECONDARY - else: - settings['mode'] = LocationMode.PRIMARY - updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) - request.url = updated.geturl() - - def configure_retries(self, request): # pylint: disable=no-self-use - body_position = None - if hasattr(request.http_request.body, 'read'): - try: - body_position = request.http_request.body.tell() - except (AttributeError, UnsupportedOperation): - # if body position cannot be obtained, then retries will not work - pass - options = request.context.options - return { - 'total': options.pop("retry_total", self.total_retries), - 'connect': options.pop("retry_connect", self.connect_retries), - 'read': options.pop("retry_read", self.read_retries), - 'status': options.pop("retry_status", self.status_retries), - 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), - 'mode': options.pop("location_mode", LocationMode.PRIMARY), - 'hosts': options.pop("hosts", None), - 'hook': options.pop("retry_hook", None), - 'body_position': body_position, - 'count': 0, - 'history': [] - } - - def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use - """ Formula for computing the current backoff. - Should be calculated by child class. - - :rtype: float - """ - return 0 - - def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - transport.sleep(backoff) - - def increment(self, settings, request, response=None, error=None): - """Increment the retry counters. - - :param response: A pipeline response object. - :param error: An error encountered during the request, or - None if the response was received successfully. - - :return: Whether the retry attempts are exhausted. - """ - settings['total'] -= 1 - - if error and isinstance(error, ServiceRequestError): - # Errors when we're fairly sure that the server did not receive the - # request, so it should be safe to retry. - settings['connect'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - elif error and isinstance(error, ServiceResponseError): - # Errors that occur after the request has been started, so we should - # assume that the server began processing it. - settings['read'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - else: - # Incrementing because of a server error like a 500 in - # status_forcelist and a the given method is in the whitelist - if response: - settings['status'] -= 1 - settings['history'].append(RequestHistory(request, http_response=response)) - - if not is_exhausted(settings): - if request.method not in ['PUT'] and settings['retry_secondary']: - self._set_next_host_location(settings, request) - - # rewind the request body if it is a stream - if request.body and hasattr(request.body, 'read'): - # no position was saved, then retry would not work - if settings['body_position'] is None: - return False - try: - # attempt to rewind the body to the initial position - request.body.seek(settings['body_position'], SEEK_SET) - except (UnsupportedOperation, ValueError): - # if body is not seekable, then retry would not work - return False - settings['count'] += 1 - return True - return False - - def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(StorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(StorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/policies_async.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/policies_async.py deleted file mode 100644 index e0926b8..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/policies_async.py +++ /dev/null @@ -1,220 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -import asyncio -import random -import logging -from typing import Any, TYPE_CHECKING - -from azure.core.pipeline.policies import AsyncHTTPPolicy -from azure.core.exceptions import AzureError - -from .policies import is_retry, StorageRetryPolicy - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -async def retry_hook(settings, **kwargs): - if settings['hook']: - if asyncio.iscoroutine(settings['hook']): - await settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - else: - settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - - -class AsyncStorageResponseHook(AsyncHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(AsyncStorageResponseHook, self).__init__() - - async def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = await self.next.send(request) - await response.http_response.load_body() - - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - if asyncio.iscoroutine(response_callback): - await response_callback(response) - else: - response_callback(response) - request.context['response_callback'] = response_callback - return response - -class AsyncStorageRetryPolicy(StorageRetryPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - async def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - await transport.sleep(backoff) - - async def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = await self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - await self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - await self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(AsyncStorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(AsyncStorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/request_handlers.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/request_handlers.py deleted file mode 100644 index 37354d7..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/request_handlers.py +++ /dev/null @@ -1,273 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) - -import logging -from os import fstat -from io import (SEEK_END, SEEK_SET, UnsupportedOperation) - -import isodate - -from azure.core.exceptions import raise_with_traceback - - -_LOGGER = logging.getLogger(__name__) - -_REQUEST_DELIMITER_PREFIX = "batch_" -_HTTP1_1_IDENTIFIER = "HTTP/1.1" -_HTTP_LINE_ENDING = "\r\n" - - -def serialize_iso(attr): - """Serialize Datetime object into ISO-8601 formatted string. - - :param Datetime attr: Object to be serialized. - :rtype: str - :raises: ValueError if format invalid. - """ - if not attr: - return None - if isinstance(attr, str): - attr = isodate.parse_datetime(attr) - try: - utc = attr.utctimetuple() - if utc.tm_year > 9999 or utc.tm_year < 1: - raise OverflowError("Hit max or min date") - - date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( - utc.tm_year, utc.tm_mon, utc.tm_mday, - utc.tm_hour, utc.tm_min, utc.tm_sec) - return date + 'Z' - except (ValueError, OverflowError) as err: - msg = "Unable to serialize datetime object." - raise_with_traceback(ValueError, msg, err) - except AttributeError as err: - msg = "ISO-8601 object must be valid Datetime object." - raise_with_traceback(TypeError, msg, err) - - -def get_length(data): - length = None - # Check if object implements the __len__ method, covers most input cases such as bytearray. - try: - length = len(data) - except: # pylint: disable=bare-except - pass - - if not length: - # Check if the stream is a file-like stream object. - # If so, calculate the size using the file descriptor. - try: - fileno = data.fileno() - except (AttributeError, UnsupportedOperation): - pass - else: - try: - return fstat(fileno).st_size - except OSError: - # Not a valid fileno, may be possible requests returned - # a socket number? - pass - - # If the stream is seekable and tell() is implemented, calculate the stream size. - try: - current_position = data.tell() - data.seek(0, SEEK_END) - length = data.tell() - current_position - data.seek(current_position, SEEK_SET) - except (AttributeError, UnsupportedOperation): - pass - - return length - - -def read_length(data): - try: - if hasattr(data, 'read'): - read_data = b'' - for chunk in iter(lambda: data.read(4096), b""): - read_data += chunk - return len(read_data), read_data - if hasattr(data, '__iter__'): - read_data = b'' - for chunk in data: - read_data += chunk - return len(read_data), read_data - except: # pylint: disable=bare-except - pass - raise ValueError("Unable to calculate content length, please specify.") - - -def validate_and_format_range_headers( - start_range, end_range, start_range_required=True, - end_range_required=True, check_content_md5=False, align_to_page=False): - # If end range is provided, start range must be provided - if (start_range_required or end_range is not None) and start_range is None: - raise ValueError("start_range value cannot be None.") - if end_range_required and end_range is None: - raise ValueError("end_range value cannot be None.") - - # Page ranges must be 512 aligned - if align_to_page: - if start_range is not None and start_range % 512 != 0: - raise ValueError("Invalid page blob start_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(start_range)) - if end_range is not None and end_range % 512 != 511: - raise ValueError("Invalid page blob end_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(end_range)) - - # Format based on whether end_range is present - range_header = None - if end_range is not None: - range_header = 'bytes={0}-{1}'.format(start_range, end_range) - elif start_range is not None: - range_header = "bytes={0}-".format(start_range) - - # Content MD5 can only be provided for a complete range less than 4MB in size - range_validation = None - if check_content_md5: - if start_range is None or end_range is None: - raise ValueError("Both start and end range requied for MD5 content validation.") - if end_range - start_range > 4 * 1024 * 1024: - raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") - range_validation = 'true' - - return range_header, range_validation - - -def add_metadata_headers(metadata=None): - # type: (Optional[Dict[str, str]]) -> Dict[str, str] - headers = {} - if metadata: - for key, value in metadata.items(): - headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value - return headers - - -def serialize_batch_body(requests, batch_id): - """ - -- - - -- - (repeated as needed) - ---- - - Serializes the requests in this batch to a single HTTP mixed/multipart body. - - :param list[~azure.core.pipeline.transport.HttpRequest] requests: - a list of sub-request for the batch request - :param str batch_id: - to be embedded in batch sub-request delimiter - :return: The body bytes for this batch. - """ - - if requests is None or len(requests) == 0: - raise ValueError('Please provide sub-request(s) for this batch request') - - delimiter_bytes = (_get_batch_request_delimiter(batch_id, True, False) + _HTTP_LINE_ENDING).encode('utf-8') - newline_bytes = _HTTP_LINE_ENDING.encode('utf-8') - batch_body = list() - - content_index = 0 - for request in requests: - request.headers.update({ - "Content-ID": str(content_index), - "Content-Length": str(0) - }) - batch_body.append(delimiter_bytes) - batch_body.append(_make_body_from_sub_request(request)) - batch_body.append(newline_bytes) - content_index += 1 - - batch_body.append(_get_batch_request_delimiter(batch_id, True, True).encode('utf-8')) - # final line of body MUST have \r\n at the end, or it will not be properly read by the service - batch_body.append(newline_bytes) - - return bytes().join(batch_body) - - -def _get_batch_request_delimiter(batch_id, is_prepend_dashes=False, is_append_dashes=False): - """ - Gets the delimiter used for this batch request's mixed/multipart HTTP format. - - :param str batch_id: - Randomly generated id - :param bool is_prepend_dashes: - Whether to include the starting dashes. Used in the body, but non on defining the delimiter. - :param bool is_append_dashes: - Whether to include the ending dashes. Used in the body on the closing delimiter only. - :return: The delimiter, WITHOUT a trailing newline. - """ - - prepend_dashes = '--' if is_prepend_dashes else '' - append_dashes = '--' if is_append_dashes else '' - - return prepend_dashes + _REQUEST_DELIMITER_PREFIX + batch_id + append_dashes - - -def _make_body_from_sub_request(sub_request): - """ - Content-Type: application/http - Content-ID: - Content-Transfer-Encoding: (if present) - - HTTP/ -
:
(repeated as necessary) - Content-Length: - (newline if content length > 0) - (if content length > 0) - - Serializes an http request. - - :param ~azure.core.pipeline.transport.HttpRequest sub_request: - Request to serialize. - :return: The serialized sub-request in bytes - """ - - # put the sub-request's headers into a list for efficient str concatenation - sub_request_body = list() - - # get headers for ease of manipulation; remove headers as they are used - headers = sub_request.headers - - # append opening headers - sub_request_body.append("Content-Type: application/http") - sub_request_body.append(_HTTP_LINE_ENDING) - - sub_request_body.append("Content-ID: ") - sub_request_body.append(headers.pop("Content-ID", "")) - sub_request_body.append(_HTTP_LINE_ENDING) - - sub_request_body.append("Content-Transfer-Encoding: binary") - sub_request_body.append(_HTTP_LINE_ENDING) - - # append blank line - sub_request_body.append(_HTTP_LINE_ENDING) - - # append HTTP verb and path and query and HTTP version - sub_request_body.append(sub_request.method) - sub_request_body.append(' ') - sub_request_body.append(sub_request.url) - sub_request_body.append(' ') - sub_request_body.append(_HTTP1_1_IDENTIFIER) - sub_request_body.append(_HTTP_LINE_ENDING) - - # append remaining headers (this will set the Content-Length, as it was set on `sub-request`) - for header_name, header_value in headers.items(): - if header_value is not None: - sub_request_body.append(header_name) - sub_request_body.append(": ") - sub_request_body.append(header_value) - sub_request_body.append(_HTTP_LINE_ENDING) - - # append blank line - sub_request_body.append(_HTTP_LINE_ENDING) - - return ''.join(sub_request_body).encode() diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/response_handlers.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/response_handlers.py deleted file mode 100644 index 1863949..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/response_handlers.py +++ /dev/null @@ -1,192 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging -from xml.etree.ElementTree import Element - -from azure.core.pipeline.policies import ContentDecodePolicy -from azure.core.exceptions import ( - HttpResponseError, - ResourceNotFoundError, - ResourceModifiedError, - ResourceExistsError, - ClientAuthenticationError, - DecodeError) - -from .parser import _to_utc_datetime -from .models import StorageErrorCode, UserDelegationKey, get_enum_value - - -if TYPE_CHECKING: - from datetime import datetime - from azure.core.exceptions import AzureError - - -_LOGGER = logging.getLogger(__name__) - - -class PartialBatchErrorException(HttpResponseError): - """There is a partial failure in batch operations. - - :param str message: The message of the exception. - :param response: Server response to be deserialized. - :param list parts: A list of the parts in multipart response. - """ - - def __init__(self, message, response, parts): - self.parts = parts - super(PartialBatchErrorException, self).__init__(message=message, response=response) - - -def parse_length_from_content_range(content_range): - ''' - Parses the blob length from the content range header: bytes 1-3/65537 - ''' - if content_range is None: - return None - - # First, split in space and take the second half: '1-3/65537' - # Next, split on slash and take the second half: '65537' - # Finally, convert to an int: 65537 - return int(content_range.split(' ', 1)[1].split('/', 1)[1]) - - -def normalize_headers(headers): - normalized = {} - for key, value in headers.items(): - if key.startswith('x-ms-'): - key = key[5:] - normalized[key.lower().replace('-', '_')] = get_enum_value(value) - return normalized - - -def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument - raw_metadata = {k: v for k, v in response.http_response.headers.items() if k.startswith("x-ms-meta-")} - return {k[10:]: v for k, v in raw_metadata.items()} - - -def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers) - - -def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers), deserialized - - -def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return response.http_response.location_mode, deserialized - - -def process_storage_error(storage_error): # pylint:disable=too-many-statements - raise_error = HttpResponseError - serialized = False - if not storage_error.response: - raise storage_error - # If it is one of those three then it has been serialized prior by the generated layer. - if isinstance(storage_error, (PartialBatchErrorException, - ClientAuthenticationError, ResourceNotFoundError, ResourceExistsError)): - serialized = True - error_code = storage_error.response.headers.get('x-ms-error-code') - error_message = storage_error.message - additional_data = {} - error_dict = {} - try: - error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) - # If it is an XML response - if isinstance(error_body, Element): - error_dict = { - child.tag.lower(): child.text - for child in error_body - } - # If it is a JSON response - elif isinstance(error_body, dict): - error_dict = error_body.get('error', {}) - elif not error_code: - _LOGGER.warning( - 'Unexpected return type % from ContentDecodePolicy.deserialize_from_http_generics.', type(error_body)) - error_dict = {'message': str(error_body)} - - # If we extracted from a Json or XML response - if error_dict: - error_code = error_dict.get('code') - error_message = error_dict.get('message') - additional_data = {k: v for k, v in error_dict.items() if k not in {'code', 'message'}} - except DecodeError: - pass - - try: - # This check would be unnecessary if we have already serialized the error - if error_code and not serialized: - error_code = StorageErrorCode(error_code) - if error_code in [StorageErrorCode.condition_not_met, - StorageErrorCode.blob_overwritten]: - raise_error = ResourceModifiedError - if error_code in [StorageErrorCode.invalid_authentication_info, - StorageErrorCode.authentication_failed]: - raise_error = ClientAuthenticationError - if error_code in [StorageErrorCode.resource_not_found, - StorageErrorCode.cannot_verify_copy_source, - StorageErrorCode.blob_not_found, - StorageErrorCode.queue_not_found, - StorageErrorCode.container_not_found, - StorageErrorCode.parent_not_found, - StorageErrorCode.share_not_found]: - raise_error = ResourceNotFoundError - if error_code in [StorageErrorCode.account_already_exists, - StorageErrorCode.account_being_created, - StorageErrorCode.resource_already_exists, - StorageErrorCode.resource_type_mismatch, - StorageErrorCode.blob_already_exists, - StorageErrorCode.queue_already_exists, - StorageErrorCode.container_already_exists, - StorageErrorCode.container_being_deleted, - StorageErrorCode.queue_being_deleted, - StorageErrorCode.share_already_exists, - StorageErrorCode.share_being_deleted]: - raise_error = ResourceExistsError - except ValueError: - # Got an unknown error code - pass - - # Error message should include all the error properties - try: - error_message += "\nErrorCode:{}".format(error_code.value) - except AttributeError: - error_message += "\nErrorCode:{}".format(error_code) - for name, info in additional_data.items(): - error_message += "\n{}:{}".format(name, info) - - # No need to create an instance if it has already been serialized by the generated layer - if serialized: - storage_error.message = error_message - error = storage_error - else: - error = raise_error(message=error_message, response=storage_error.response) - # Ensure these properties are stored in the error instance as well (not just the error message) - error.error_code = error_code - error.additional_info = additional_data - # error.args is what's surfaced on the traceback - show error message in all cases - error.args = (error.message,) - try: - # `from None` prevents us from double printing the exception (suppresses generated layer error context) - exec("raise error from None") # pylint: disable=exec-used # nosec - except SyntaxError: - raise error - - -def parse_to_internal_user_delegation_key(service_user_delegation_key): - internal_user_delegation_key = UserDelegationKey() - internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid - internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid - internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) - internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) - internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service - internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version - internal_user_delegation_key.value = service_user_delegation_key.value - return internal_user_delegation_key diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/shared_access_signature.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/shared_access_signature.py deleted file mode 100644 index 07aad5f..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/shared_access_signature.py +++ /dev/null @@ -1,220 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from datetime import date - -from .parser import _str, _to_utc_datetime -from .constants import X_MS_VERSION -from . import sign_string, url_quote - - -class QueryStringConstants(object): - SIGNED_SIGNATURE = 'sig' - SIGNED_PERMISSION = 'sp' - SIGNED_START = 'st' - SIGNED_EXPIRY = 'se' - SIGNED_RESOURCE = 'sr' - SIGNED_IDENTIFIER = 'si' - SIGNED_IP = 'sip' - SIGNED_PROTOCOL = 'spr' - SIGNED_VERSION = 'sv' - SIGNED_CACHE_CONTROL = 'rscc' - SIGNED_CONTENT_DISPOSITION = 'rscd' - SIGNED_CONTENT_ENCODING = 'rsce' - SIGNED_CONTENT_LANGUAGE = 'rscl' - SIGNED_CONTENT_TYPE = 'rsct' - START_PK = 'spk' - START_RK = 'srk' - END_PK = 'epk' - END_RK = 'erk' - SIGNED_RESOURCE_TYPES = 'srt' - SIGNED_SERVICES = 'ss' - SIGNED_OID = 'skoid' - SIGNED_TID = 'sktid' - SIGNED_KEY_START = 'skt' - SIGNED_KEY_EXPIRY = 'ske' - SIGNED_KEY_SERVICE = 'sks' - SIGNED_KEY_VERSION = 'skv' - - # for ADLS - SIGNED_AUTHORIZED_OID = 'saoid' - SIGNED_UNAUTHORIZED_OID = 'suoid' - SIGNED_CORRELATION_ID = 'scid' - SIGNED_DIRECTORY_DEPTH = 'sdd' - - @staticmethod - def to_list(): - return [ - QueryStringConstants.SIGNED_SIGNATURE, - QueryStringConstants.SIGNED_PERMISSION, - QueryStringConstants.SIGNED_START, - QueryStringConstants.SIGNED_EXPIRY, - QueryStringConstants.SIGNED_RESOURCE, - QueryStringConstants.SIGNED_IDENTIFIER, - QueryStringConstants.SIGNED_IP, - QueryStringConstants.SIGNED_PROTOCOL, - QueryStringConstants.SIGNED_VERSION, - QueryStringConstants.SIGNED_CACHE_CONTROL, - QueryStringConstants.SIGNED_CONTENT_DISPOSITION, - QueryStringConstants.SIGNED_CONTENT_ENCODING, - QueryStringConstants.SIGNED_CONTENT_LANGUAGE, - QueryStringConstants.SIGNED_CONTENT_TYPE, - QueryStringConstants.START_PK, - QueryStringConstants.START_RK, - QueryStringConstants.END_PK, - QueryStringConstants.END_RK, - QueryStringConstants.SIGNED_RESOURCE_TYPES, - QueryStringConstants.SIGNED_SERVICES, - QueryStringConstants.SIGNED_OID, - QueryStringConstants.SIGNED_TID, - QueryStringConstants.SIGNED_KEY_START, - QueryStringConstants.SIGNED_KEY_EXPIRY, - QueryStringConstants.SIGNED_KEY_SERVICE, - QueryStringConstants.SIGNED_KEY_VERSION, - # for ADLS - QueryStringConstants.SIGNED_AUTHORIZED_OID, - QueryStringConstants.SIGNED_UNAUTHORIZED_OID, - QueryStringConstants.SIGNED_CORRELATION_ID, - QueryStringConstants.SIGNED_DIRECTORY_DEPTH, - ] - - -class SharedAccessSignature(object): - ''' - Provides a factory for creating account access - signature tokens with an account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - :param str x_ms_version: - The service version used to generate the shared access signatures. - ''' - self.account_name = account_name - self.account_key = account_key - self.x_ms_version = x_ms_version - - def generate_account(self, services, resource_types, permission, expiry, start=None, - ip=None, protocol=None): - ''' - Generates a shared access signature for the account. - Use the returned signature with the sas_token parameter of the service - or to create a new account object. - - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account - SAS. You can combine values to provide access to more than one - resource type. - :param AccountSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. You can combine - values to provide more than one permission. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_account(services, resource_types) - sas.add_account_signature(self.account_name, self.account_key) - - return sas.get_token() - - -class _SharedAccessHelper(object): - def __init__(self): - self.query_dict = {} - - def _add_query(self, name, val): - if val: - self.query_dict[name] = _str(val) if val is not None else None - - def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): - if isinstance(start, date): - start = _to_utc_datetime(start) - - if isinstance(expiry, date): - expiry = _to_utc_datetime(expiry) - - self._add_query(QueryStringConstants.SIGNED_START, start) - self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) - self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) - self._add_query(QueryStringConstants.SIGNED_IP, ip) - self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) - self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) - - def add_resource(self, resource): - self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) - - def add_id(self, policy_id): - self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) - - def add_account(self, services, resource_types): - self._add_query(QueryStringConstants.SIGNED_SERVICES, services) - self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) - - def add_override_response_headers(self, cache_control, - content_disposition, - content_encoding, - content_language, - content_type): - self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) - self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) - self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) - self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) - self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) - - def add_account_signature(self, account_name, account_key): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - string_to_sign = \ - (account_name + '\n' + - get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + - get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + - get_value_to_append(QueryStringConstants.SIGNED_START) + - get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - get_value_to_append(QueryStringConstants.SIGNED_IP) + - get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(QueryStringConstants.SIGNED_VERSION)) - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key, string_to_sign)) - - def get_token(self): - return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/uploads.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/uploads.py deleted file mode 100644 index 1b619df..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/uploads.py +++ /dev/null @@ -1,602 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from concurrent import futures -from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) -from threading import Lock -from itertools import islice -from math import ceil - -import six - -from azure.core.tracing.common import with_current_context - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." - - -def _parallel_uploads(executor, uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(executor.submit(with_current_context(uploader), next_chunk)) - except StopIteration: - break - - # Wait for the remaining uploads to finish - done, _running = futures.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - validate_content=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - validate_content=validate_content, - **kwargs) - if parallel: - with futures.ThreadPoolExecutor(max_concurrency) as executor: - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - executor.submit(with_current_context(uploader.process_chunk), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - with futures.ThreadPoolExecutor(max_concurrency) as executor: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - executor.submit(with_current_context(uploader.process_substream_block), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] - if any(range_ids): - return sorted(range_ids) - return [] - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b"" - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError("Blob data should be of type bytes.") - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b"" or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - def _update_progress(self, length): - if self.progress_lock is not None: - with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = self._upload_chunk(chunk_offset, chunk_data) - self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield index, SubStream(self.stream, index, length, lock) - - def process_substream_block(self, block_data): - return self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - def _upload_substream_block(self, index, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_substream_block_with_progress(self, index, block_stream): - range_id = self._upload_substream_block(index, block_stream) - self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop("modified_access_conditions", None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - self.service.stage_block( - block_id, - len(chunk_data), - chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return index, block_id - - def _upload_substream_block(self, index, block_stream): - try: - block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) - self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - return not any(bytearray(chunk_data)) - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = self.service.upload_pages( - body=chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - def _upload_substream_block(self, index, block_stream): - pass - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - self.current_length = int(self.response_headers["blob_append_offset"]) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - def _upload_substream_block(self, index, block_stream): - pass - - -class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - self.response_headers = self.service.append_data( - body=chunk_data, - position=chunk_offset, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - def _upload_substream_block(self, index, block_stream): - try: - self.service.append_data( - body=block_stream, - position=index, - content_length=len(block_stream), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response - - # TODO: Implement this method. - def _upload_substream_block(self, index, block_stream): - pass - - -class SubStream(IOBase): - - def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): - # Python 2.7: file-like objects created with open() typically support seek(), but are not - # derivations of io.IOBase and thus do not implement seekable(). - # Python > 3.0: file-like objects created with open() are derived from io.IOBase. - try: - # only the main thread runs this, so there's no need grabbing the lock - wrapped_stream.seek(0, SEEK_CUR) - except: - raise ValueError("Wrapped stream must support seek().") - - self._lock = lockObj - self._wrapped_stream = wrapped_stream - self._position = 0 - self._stream_begin_index = stream_begin_index - self._length = length - self._buffer = BytesIO() - - # we must avoid buffering more than necessary, and also not use up too much memory - # so the max buffer size is capped at 4MB - self._max_buffer_size = ( - length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE - ) - self._current_buffer_start = 0 - self._current_buffer_size = 0 - super(SubStream, self).__init__() - - def __len__(self): - return self._length - - def close(self): - if self._buffer: - self._buffer.close() - self._wrapped_stream = None - IOBase.close(self) - - def fileno(self): - return self._wrapped_stream.fileno() - - def flush(self): - pass - - def read(self, size=None): - if self.closed: # pylint: disable=using-constant-test - raise ValueError("Stream is closed.") - - if size is None: - size = self._length - self._position - - # adjust if out of bounds - if size + self._position >= self._length: - size = self._length - self._position - - # return fast - if size == 0 or self._buffer.closed: - return b"" - - # attempt first read from the read buffer and update position - read_buffer = self._buffer.read(size) - bytes_read = len(read_buffer) - bytes_remaining = size - bytes_read - self._position += bytes_read - - # repopulate the read buffer from the underlying stream to fulfill the request - # ensure the seek and read operations are done atomically (only if a lock is provided) - if bytes_remaining > 0: - with self._buffer: - # either read in the max buffer size specified on the class - # or read in just enough data for the current block/sub stream - current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) - - # lock is only defined if max_concurrency > 1 (parallel uploads) - if self._lock: - with self._lock: - # reposition the underlying stream to match the start of the data to read - absolute_position = self._stream_begin_index + self._position - self._wrapped_stream.seek(absolute_position, SEEK_SET) - # If we can't seek to the right location, our read will be corrupted so fail fast. - if self._wrapped_stream.tell() != absolute_position: - raise IOError("Stream failed to seek to the desired location.") - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - else: - absolute_position = self._stream_begin_index + self._position - # It's possible that there's connection problem during data transfer, - # so when we retry we don't want to read from current position of wrapped stream, - # instead we should seek to where we want to read from. - if self._wrapped_stream.tell() != absolute_position: - self._wrapped_stream.seek(absolute_position, SEEK_SET) - - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - - if buffer_from_stream: - # update the buffer with new data from the wrapped stream - # we need to note down the start position and size of the buffer, in case seek is performed later - self._buffer = BytesIO(buffer_from_stream) - self._current_buffer_start = self._position - self._current_buffer_size = len(buffer_from_stream) - - # read the remaining bytes from the new buffer and update position - second_read_buffer = self._buffer.read(bytes_remaining) - read_buffer += second_read_buffer - self._position += len(second_read_buffer) - - return read_buffer - - def readable(self): - return True - - def readinto(self, b): - raise UnsupportedOperation - - def seek(self, offset, whence=0): - if whence is SEEK_SET: - start_index = 0 - elif whence is SEEK_CUR: - start_index = self._position - elif whence is SEEK_END: - start_index = self._length - offset = -offset - else: - raise ValueError("Invalid argument for the 'whence' parameter.") - - pos = start_index + offset - - if pos > self._length: - pos = self._length - elif pos < 0: - pos = 0 - - # check if buffer is still valid - # if not, drop buffer - if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: - self._buffer.close() - self._buffer = BytesIO() - else: # if yes seek to correct position - delta = pos - self._current_buffer_start - self._buffer.seek(delta, SEEK_SET) - - self._position = pos - return pos - - def seekable(self): - return True - - def tell(self): - return self._position - - def write(self): - raise UnsupportedOperation - - def writelines(self): - raise UnsupportedOperation - - def writeable(self): - return False - - -class IterStreamer(object): - """ - File-like streaming iterator. - """ - - def __init__(self, generator, encoding="UTF-8"): - self.generator = generator - self.iterator = iter(generator) - self.leftover = b"" - self.encoding = encoding - - def __len__(self): - return self.generator.__len__() - - def __iter__(self): - return self.iterator - - def seekable(self): - return False - - def __next__(self): - return next(self.iterator) - - next = __next__ # Python 2 compatibility. - - def tell(self, *args, **kwargs): - raise UnsupportedOperation("Data generator does not support tell.") - - def seek(self, *args, **kwargs): - raise UnsupportedOperation("Data generator is unseekable.") - - def read(self, size): - data = self.leftover - count = len(self.leftover) - try: - while count < size: - chunk = self.__next__() - if isinstance(chunk, six.text_type): - chunk = chunk.encode(self.encoding) - data += chunk - count += len(chunk) - except StopIteration: - pass - - if count > size: - self.leftover = data[size:] - - return data[:size] diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/uploads_async.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/uploads_async.py deleted file mode 100644 index 5ed192b..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/uploads_async.py +++ /dev/null @@ -1,395 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -import asyncio -from asyncio import Lock -from itertools import islice -import threading - -from math import ceil - -import six - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder -from .uploads import SubStream, IterStreamer # pylint: disable=unused-import - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' - - -async def _parallel_uploads(uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(asyncio.ensure_future(uploader(next_chunk))) - except StopIteration: - break - - # Wait for the remaining uploads to finish - if running: - done, _running = await asyncio.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -async def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - asyncio.ensure_future(uploader.process_chunk(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [] - for chunk in uploader.get_chunk_streams(): - range_ids.append(await uploader.process_chunk(chunk)) - - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -async def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - asyncio.ensure_future(uploader.process_substream_block(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [] - for block in uploader.get_substream_blocks(): - range_ids.append(await uploader.process_substream_block(block)) - if any(range_ids): - return sorted(range_ids) - return - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = threading.Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b'' - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b'' or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - async def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - async def _update_progress(self, length): - if self.progress_lock is not None: - async with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - async def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = await self._upload_chunk(chunk_offset, chunk_data) - await self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield index, SubStream(self.stream, index, length, lock) - - async def process_substream_block(self, block_data): - return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - async def _upload_substream_block(self, index, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_substream_block_with_progress(self, index, block_stream): - range_id = await self._upload_substream_block(index, block_stream) - await self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop('modified_access_conditions', None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - await self.service.stage_block( - block_id, - len(chunk_data), - body=chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - return index, block_id - - async def _upload_substream_block(self, index, block_stream): - try: - block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) - await self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - for each_byte in chunk_data: - if each_byte not in [0, b'\x00']: - return False - return True - - async def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = await self.service.upload_pages( - body=chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - async def _upload_substream_block(self, index, block_stream): - pass - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = await self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - self.current_length = int(self.response_headers['blob_append_offset']) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = await self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - async def _upload_substream_block(self, index, block_stream): - pass - - -class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - self.response_headers = await self.service.append_data( - body=chunk_data, - position=chunk_offset, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - async def _upload_substream_block(self, index, block_stream): - try: - await self.service.append_data( - body=block_stream, - position=index, - content_length=len(block_stream), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = await self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - return range_id, response - - # TODO: Implement this method. - async def _upload_substream_block(self, index, block_stream): - pass diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared_access_signature.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared_access_signature.py deleted file mode 100644 index 20dad95..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared_access_signature.py +++ /dev/null @@ -1,491 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, List, TYPE_CHECKING -) - -from ._shared import sign_string -from ._shared.constants import X_MS_VERSION -from ._shared.models import Services -from ._shared.shared_access_signature import SharedAccessSignature, _SharedAccessHelper, QueryStringConstants -from ._shared.parser import _str - -if TYPE_CHECKING: - from datetime import datetime - from .import ( - ResourceTypes, - AccountSasPermissions, - ShareSasPermissions, - FileSasPermissions - ) - -class FileSharedAccessSignature(SharedAccessSignature): - ''' - Provides a factory for creating file and share access - signature tokens with a common account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - ''' - super(FileSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) - - def generate_file(self, share_name, directory_name=None, file_name=None, - permission=None, expiry=None, start=None, policy_id=None, - ip=None, protocol=None, cache_control=None, - content_disposition=None, content_encoding=None, - content_language=None, content_type=None): - ''' - Generates a shared access signature for the file. - Use the returned signature with the sas_token parameter of FileService. - - :param str share_name: - Name of share. - :param str directory_name: - Name of directory. SAS tokens cannot be created for directories, so - this parameter should only be present if file_name is provided. - :param str file_name: - Name of file. - :param ~azure.storage.fileshare.FileSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, create, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_file_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - resource_path = share_name - if directory_name is not None: - resource_path += '/' + _str(directory_name) if directory_name is not None else None - resource_path += '/' + _str(file_name) if file_name is not None else None - - sas = _FileSharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(policy_id) - sas.add_resource('f') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_resource_signature(self.account_name, self.account_key, resource_path) - - return sas.get_token() - - def generate_share(self, share_name, permission=None, expiry=None, - start=None, policy_id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None): - ''' - Generates a shared access signature for the share. - Use the returned signature with the sas_token parameter of FileService. - - :param str share_name: - Name of share. - :param ShareSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, create, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_file_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - sas = _FileSharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(policy_id) - sas.add_resource('s') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_resource_signature(self.account_name, self.account_key, share_name) - - return sas.get_token() - - -class _FileSharedAccessHelper(_SharedAccessHelper): - - def add_resource_signature(self, account_name, account_key, path): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - if path[0] != '/': - path = '/' + path - - canonicalized_resource = '/file/' + account_name + path + '\n' - - # Form the string to sign from shared_access_policy and canonicalized - # resource. The order of values is important. - string_to_sign = \ - (get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(QueryStringConstants.SIGNED_START) + - get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - canonicalized_resource + - get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER) + - get_value_to_append(QueryStringConstants.SIGNED_IP) + - get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(QueryStringConstants.SIGNED_VERSION) + - get_value_to_append(QueryStringConstants.SIGNED_CACHE_CONTROL) + - get_value_to_append(QueryStringConstants.SIGNED_CONTENT_DISPOSITION) + - get_value_to_append(QueryStringConstants.SIGNED_CONTENT_ENCODING) + - get_value_to_append(QueryStringConstants.SIGNED_CONTENT_LANGUAGE) + - get_value_to_append(QueryStringConstants.SIGNED_CONTENT_TYPE)) - - # remove the trailing newline - if string_to_sign[-1] == '\n': - string_to_sign = string_to_sign[:-1] - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key, string_to_sign)) - - -def generate_account_sas( - account_name, # type: str - account_key, # type: str - resource_types, # type: Union[ResourceTypes, str] - permission, # type: Union[AccountSasPermissions, str] - expiry, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> str - """Generates a shared access signature for the file service. - - Use the returned signature with the credential parameter of any ShareServiceClient, - ShareClient, ShareDirectoryClient, or ShareFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - :param ~azure.storage.fileshare.ResourceTypes resource_types: - Specifies the resource types that are accessible with the account SAS. - :param ~azure.storage.fileshare.AccountSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_authentication.py - :start-after: [START generate_sas_token] - :end-before: [END generate_sas_token] - :language: python - :dedent: 8 - :caption: Generate a sas token. - """ - sas = SharedAccessSignature(account_name, account_key) - return sas.generate_account( - services=Services(fileshare=True), - resource_types=resource_types, - permission=permission, - expiry=expiry, - start=start, - ip=ip, - **kwargs - ) # type: ignore - - -def generate_share_sas( - account_name, # type: str - share_name, # type: str - account_key, # type: str - permission=None, # type: Optional[Union[ShareSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - policy_id=None, # type: Optional[str] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): # type: (...) -> str - """Generates a shared access signature for a share. - - Use the returned signature with the credential parameter of any ShareServiceClient, - ShareClient, ShareDirectoryClient, or ShareFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str share_name: - The name of the share. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - :param ~azure.storage.fileshare.ShareSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, create, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - :func:`~azure.storage.fileshare.ShareClient.set_share_access_policy`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - sas = FileSharedAccessSignature(account_name, account_key) - return sas.generate_share( - share_name=share_name, - permission=permission, - expiry=expiry, - start=start, - policy_id=policy_id, - ip=ip, - **kwargs - ) - - -def generate_file_sas( - account_name, # type: str - share_name, # type: str - file_path, # type: List[str] - account_key, # type: str - permission=None, # type: Optional[Union[FileSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - policy_id=None, # type: Optional[str] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> str - """Generates a shared access signature for a file. - - Use the returned signature with the credential parameter of any ShareServiceClient, - ShareClient, ShareDirectoryClient, or ShareFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str share_name: - The name of the share. - :param file_path: - The file path represented as a list of path segments, including the file name. - :type file_path: List[str] - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - :param ~azure.storage.fileshare.FileSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered read, write, delete, list. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - sas = FileSharedAccessSignature(account_name, account_key) - if len(file_path) > 1: - dir_path = '/'.join(file_path[:-1]) - else: - dir_path = None # type: ignore - return sas.generate_file( # type: ignore - share_name=share_name, - directory_name=dir_path, - file_name=file_path[-1], - permission=permission, - expiry=expiry, - start=start, - policy_id=policy_id, - ip=ip, - **kwargs - ) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_version.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_version.py deleted file mode 100644 index c9d0e60..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/_version.py +++ /dev/null @@ -1,7 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -VERSION = "12.6.0" diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/__init__.py deleted file mode 100644 index 73393b8..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ._file_client_async import ShareFileClient -from ._directory_client_async import ShareDirectoryClient -from ._share_client_async import ShareClient -from ._share_service_client_async import ShareServiceClient -from ._lease_async import ShareLeaseClient - - -__all__ = [ - 'ShareFileClient', - 'ShareDirectoryClient', - 'ShareClient', - 'ShareServiceClient', - 'ShareLeaseClient', -] diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_directory_client_async.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_directory_client_async.py deleted file mode 100644 index ae7767d..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_directory_client_async.py +++ /dev/null @@ -1,606 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import functools -import time -from typing import ( # pylint: disable=unused-import - Optional, Union, Any, Dict, TYPE_CHECKING -) - -from azure.core.async_paging import AsyncItemPaged -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline import AsyncPipeline -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from .._parser import _get_file_permission, _datetime_to_str -from .._shared.parser import _str - -from .._generated.aio import AzureFileStorage -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.policies_async import ExponentialRetry -from .._shared.request_handlers import add_metadata_headers -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._deserialize import deserialize_directory_properties -from .._serialize import get_api_version -from .._directory_client import ShareDirectoryClient as ShareDirectoryClientBase -from ._file_client_async import ShareFileClient -from ._models import DirectoryPropertiesPaged, HandlesPaged - -if TYPE_CHECKING: - from datetime import datetime - from .._models import ShareProperties, DirectoryProperties, ContentSettings, NTFSAttributes - from .._generated.models import HandleItem - - -class ShareDirectoryClient(AsyncStorageAccountHostsMixin, ShareDirectoryClientBase): - """A client to interact with a specific directory, although it may not yet exist. - - For operations relating to a specific subdirectory or file in this share, the clients for those - entities can also be retrieved using the :func:`get_subdirectory_client` and :func:`get_file_client` functions. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the directory, - use the :func:`from_directory_url` classmethod. - :param share_name: - The name of the share for the directory. - :type share_name: str - :param str directory_path: - The directory path for the directory with which to interact. - If specified, this value will override a directory value specified in the directory URL. - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword loop: - The event loop to run the asynchronous tasks. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - directory_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Optional[Any] - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - loop = kwargs.pop('loop', None) - super(ShareDirectoryClient, self).__init__( - account_url, - share_name=share_name, - directory_path=directory_path, - snapshot=snapshot, - credential=credential, - loop=loop, - **kwargs) - self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline, loop=loop) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - self._loop = loop - - def get_file_client(self, file_name, **kwargs): - # type: (str, Any) -> ShareFileClient - """Get a client to interact with a specific file. - - The file need not already exist. - - :param str file_name: - The name of the file. - :returns: A File Client. - :rtype: ~azure.storage.fileshare.ShareFileClient - """ - if self.directory_path: - file_name = self.directory_path.rstrip('/') + "/" + file_name - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareFileClient( - self.url, file_path=file_name, share_name=self.share_name, snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop, **kwargs) - - def get_subdirectory_client(self, directory_name, **kwargs): - # type: (str, Any) -> ShareDirectoryClient - """Get a client to interact with a specific subdirectory. - - The subdirectory need not already exist. - - :param str directory_name: - The name of the subdirectory. - :returns: A Directory Client. - :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START get_subdirectory_client] - :end-before: [END get_subdirectory_client] - :language: python - :dedent: 16 - :caption: Gets the subdirectory client. - """ - directory_path = self.directory_path.rstrip('/') + "/" + directory_name - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareDirectoryClient( - self.url, share_name=self.share_name, directory_path=directory_path, snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop, **kwargs) - - @distributed_trace_async - async def create_directory(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Creates a new directory under the directory referenced by the client. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the directory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Directory-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START create_directory] - :end-before: [END create_directory] - :language: python - :dedent: 16 - :caption: Creates a directory. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return await self._client.directory.create( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def delete_directory(self, **kwargs): - # type: (**Any) -> None - """Marks the directory for deletion. The directory is - later deleted during garbage collection. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START delete_directory] - :end-before: [END delete_directory] - :language: python - :dedent: 16 - :caption: Deletes a directory. - """ - timeout = kwargs.pop('timeout', None) - try: - await self._client.directory.delete(timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_directories_and_files(self, name_starts_with=None, **kwargs): - # type: (Optional[str], Any) -> AsyncItemPaged - """Lists all the directories and files under the directory. - - :param str name_starts_with: - Filters the results to return only entities whose names - begin with the specified prefix. - :keyword list[str] include: - Include this parameter to specify one or more datasets to include in the response. - Possible str values are "timestamps", "Etag", "Attributes", "PermissionKey". - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-10-02'. - - :keyword bool include_extended_info: - If this is set to true, file id will be returned in listed results. - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-10-02'. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties - :rtype: ~azure.core.async_paging.AsyncItemPaged[DirectoryProperties and FileProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START lists_directory] - :end-before: [END lists_directory] - :language: python - :dedent: 16 - :caption: List directories and files. - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.directory.list_files_and_directories_segment, - sharesnapshot=self.snapshot, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=DirectoryPropertiesPaged) - - @distributed_trace - def list_handles(self, recursive=False, **kwargs): - # type: (bool, Any) -> AsyncItemPaged - """Lists opened handles on a directory or a file under the directory. - - :param bool recursive: - Boolean that specifies if operation should apply to the directory specified by the client, - its files, its subdirectories and their files. Default value is False. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of HandleItem - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.HandleItem] - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.directory.list_handles, - sharesnapshot=self.snapshot, - timeout=timeout, - recursive=recursive, - **kwargs) - return AsyncItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=HandlesPaged) - - @distributed_trace_async - async def close_handle(self, handle, **kwargs): - # type: (Union[str, HandleItem], Any) -> Dict[str, int] - """Close an open file handle. - - :param handle: - A specific handle to close. - :type handle: str or ~azure.storage.fileshare.Handle - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - try: - handle_id = handle.id # type: ignore - except AttributeError: - handle_id = handle - if handle_id == '*': - raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") - try: - response = await self._client.directory.force_close_handles( - handle_id, - marker=None, - recursive=None, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - return { - 'closed_handles_count': response.get('number_of_handles_closed', 0), - 'failed_handles_count': response.get('number_of_handles_failed', 0) - } - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def close_all_handles(self, recursive=False, **kwargs): - # type: (bool, Any) -> Dict[str, int] - """Close any open file handles. - - This operation will block until the service has closed all open handles. - - :param bool recursive: - Boolean that specifies if operation should apply to the directory specified by the client, - its files, its subdirectories and their files. Default value is False. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - timeout = kwargs.pop('timeout', None) - start_time = time.time() - - try_close = True - continuation_token = None - total_closed = 0 - total_failed = 0 - while try_close: - try: - response = await self._client.directory.force_close_handles( - handle_id='*', - timeout=timeout, - marker=continuation_token, - recursive=recursive, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - continuation_token = response.get('marker') - try_close = bool(continuation_token) - total_closed += response.get('number_of_handles_closed', 0) - total_failed += response.get('number_of_handles_failed', 0) - if timeout: - timeout = max(0, timeout - (time.time() - start_time)) - return { - 'closed_handles_count': total_closed, - 'failed_handles_count': total_failed - } - - @distributed_trace_async - async def get_directory_properties(self, **kwargs): - # type: (Any) -> DirectoryProperties - """Returns all user-defined metadata and system properties for the - specified directory. The data returned does not include the directory's - list of files. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: DirectoryProperties - :rtype: ~azure.storage.fileshare.DirectoryProperties - """ - timeout = kwargs.pop('timeout', None) - try: - response = await self._client.directory.get_properties( - timeout=timeout, - cls=deserialize_directory_properties, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return response # type: ignore - - @distributed_trace_async - async def set_directory_metadata(self, metadata, **kwargs): - # type: (Dict[str, Any], Any) -> Dict[str, Any] - """Sets the metadata for the directory. - - Each call to this operation replaces all existing metadata - attached to the directory. To remove all metadata from the directory, - call this operation with an empty metadata dict. - - :param metadata: - Name-value pairs associated with the directory as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Directory-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - try: - return await self._client.directory.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_http_headers(self, file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="preserve", # type: Union[str, datetime] - file_last_write_time="preserve", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Sets HTTP headers on the directory. - - :param file_attributes: - The file system attributes for files and directories. - If not set, indicates preservation of existing values. - Here is an example for when the var type is str: 'Temporary|Archive' - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Preserve. - :type file_creation_time: str or datetime - :param file_last_write_time: Last write time for the file - Default value: Preserve. - :type file_last_write_time: str or datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - file_permission = _get_file_permission(file_permission, permission_key, 'preserve') - try: - return await self._client.directory.set_properties( # type: ignore - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - timeout=timeout, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def create_subdirectory( - self, directory_name, # type: str - **kwargs - ): - # type: (...) -> ShareDirectoryClient - """Creates a new subdirectory and returns a client to interact - with the subdirectory. - - :param str directory_name: - The name of the subdirectory. - :keyword dict(str,str) metadata: - Name-value pairs associated with the subdirectory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: ShareDirectoryClient - :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START create_subdirectory] - :end-before: [END create_subdirectory] - :language: python - :dedent: 16 - :caption: Create a subdirectory. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - subdir = self.get_subdirectory_client(directory_name) - await subdir.create_directory(metadata=metadata, timeout=timeout, **kwargs) - return subdir # type: ignore - - @distributed_trace_async - async def delete_subdirectory( - self, directory_name, # type: str - **kwargs - ): - # type: (...) -> None - """Deletes a subdirectory. - - :param str directory_name: - The name of the subdirectory. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START delete_subdirectory] - :end-before: [END delete_subdirectory] - :language: python - :dedent: 16 - :caption: Delete a subdirectory. - """ - timeout = kwargs.pop('timeout', None) - subdir = self.get_subdirectory_client(directory_name) - await subdir.delete_directory(timeout=timeout, **kwargs) - - @distributed_trace_async - async def upload_file( - self, file_name, # type: str - data, # type: Any - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> ShareFileClient - """Creates a new file in the directory and returns a ShareFileClient - to interact with the file. - - :param str file_name: - The name of the file. - :param Any data: - Content of the file. - :param int length: - Length of the file in bytes. Specify its maximum size, up to 1 TiB. - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: ShareFileClient - :rtype: ~azure.storage.fileshare.aio.ShareFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START upload_file_to_directory] - :end-before: [END upload_file_to_directory] - :language: python - :dedent: 16 - :caption: Upload a file to a directory. - """ - file_client = self.get_file_client(file_name) - await file_client.upload_file( - data, - length=length, - **kwargs) - return file_client # type: ignore - - @distributed_trace_async - async def delete_file( - self, file_name, # type: str - **kwargs # type: Optional[Any] - ): - # type: (...) -> None - """Marks the specified file for deletion. The file is later - deleted during garbage collection. - - :param str file_name: - The name of the file to delete. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START delete_file_in_directory] - :end-before: [END delete_file_in_directory] - :language: python - :dedent: 16 - :caption: Delete a file in a directory. - """ - file_client = self.get_file_client(file_name) - await file_client.delete_file(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_download_async.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_download_async.py deleted file mode 100644 index 971f12e..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_download_async.py +++ /dev/null @@ -1,492 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import asyncio -import sys -from io import BytesIO -from itertools import islice -import warnings - -from typing import AsyncIterator -from azure.core.exceptions import HttpResponseError, ResourceModifiedError -from .._shared.encryption import decrypt_blob -from .._shared.request_handlers import validate_and_format_range_headers -from .._shared.response_handlers import process_storage_error, parse_length_from_content_range -from .._download import process_range_and_offset, _ChunkDownloader - - -async def process_content(data, start_offset, end_offset, encryption): - if data is None: - raise ValueError("Response cannot be None.") - try: - content = data.response.body() - except Exception as error: - raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) - if encryption.get('key') is not None or encryption.get('resolver') is not None: - try: - return decrypt_blob( - encryption.get('required'), - encryption.get('key'), - encryption.get('resolver'), - content, - start_offset, - end_offset, - data.response.headers) - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=data.response, - error=error) - return content - - -class _AsyncChunkDownloader(_ChunkDownloader): - def __init__(self, **kwargs): - super(_AsyncChunkDownloader, self).__init__(**kwargs) - self.stream_lock = asyncio.Lock() if kwargs.get('parallel') else None - self.progress_lock = asyncio.Lock() if kwargs.get('parallel') else None - - async def process_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - chunk_data = await self._download_chunk(chunk_start, chunk_end - 1) - length = chunk_end - chunk_start - if length > 0: - await self._write_to_stream(chunk_data, chunk_start) - await self._update_progress(length) - - async def yield_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - return await self._download_chunk(chunk_start, chunk_end - 1) - - async def _update_progress(self, length): - if self.progress_lock: - async with self.progress_lock: # pylint: disable=not-async-context-manager - self.progress_total += length - else: - self.progress_total += length - - async def _write_to_stream(self, chunk_data, chunk_start): - if self.stream_lock: - async with self.stream_lock: # pylint: disable=not-async-context-manager - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - else: - self.stream.write(chunk_data) - - async def _download_chunk(self, chunk_start, chunk_end): - download_range, offset = process_range_and_offset( - chunk_start, chunk_end, chunk_end, self.encryption_options - ) - range_header, range_validation = validate_and_format_range_headers( - download_range[0], - download_range[1], - check_content_md5=self.validate_content - ) - try: - _, response = await self.client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self.validate_content, - data_stream_total=self.total_size, - download_stream_current=self.progress_total, - **self.request_options - ) - if response.properties.etag != self.etag: - raise ResourceModifiedError(message="The file has been modified while downloading.") - except HttpResponseError as error: - process_storage_error(error) - - chunk_data = await process_content(response, offset[0], offset[1], self.encryption_options) - return chunk_data - - -class _AsyncChunkIterator(object): - """Async iterator for chunks in blob download stream.""" - - def __init__(self, size, content, downloader, chunk_size): - self.size = size - self._chunk_size = chunk_size - self._current_content = content - self._iter_downloader = downloader - self._iter_chunks = None - self._complete = (size == 0) - - def __len__(self): - return self.size - - def __iter__(self): - raise TypeError("Async stream must be iterated asynchronously.") - - def __aiter__(self): - return self - - async def __anext__(self): - """Iterate through responses.""" - if self._complete: - raise StopAsyncIteration("Download complete") - if not self._iter_downloader: - # cut the data obtained from initial GET into chunks - if len(self._current_content) > self._chunk_size: - return self._get_chunk_data() - self._complete = True - return self._current_content - - if not self._iter_chunks: - self._iter_chunks = self._iter_downloader.get_chunk_offsets() - - # initial GET result still has more than _chunk_size bytes of data - if len(self._current_content) >= self._chunk_size: - return self._get_chunk_data() - - try: - chunk = next(self._iter_chunks) - self._current_content += await self._iter_downloader.yield_chunk(chunk) - except StopIteration: - self._complete = True - # it's likely that there some data left in self._current_content - if self._current_content: - return self._current_content - raise StopAsyncIteration("Download complete") - - return self._get_chunk_data() - - def _get_chunk_data(self): - chunk_data = self._current_content[: self._chunk_size] - self._current_content = self._current_content[self._chunk_size:] - return chunk_data - - -class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the file being downloaded. - :ivar: str path: - The full path of the file. - :ivar str share: - The name of the share where the file is. - :ivar ~azure.storage.fileshare.FileProperties properties: - The properties of the file being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the file. - """ - - def __init__( - self, - client=None, - config=None, - start_range=None, - end_range=None, - validate_content=None, - encryption_options=None, - max_concurrency=1, - name=None, - path=None, - share=None, - encoding=None, - **kwargs - ): - self.name = name - self.path = path - self.share = share - self.properties = None - self.size = None - - self._client = client - self._config = config - self._start_range = start_range - self._end_range = end_range - self._max_concurrency = max_concurrency - self._encoding = encoding - self._validate_content = validate_content - self._encryption_options = encryption_options or {} - self._request_options = kwargs - self._location_mode = None - self._download_complete = False - self._current_content = None - self._file_size = None - self._response = None - self._etag = None - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - self._first_get_size = self._config.max_single_get_size if not self._validate_content \ - else self._config.max_chunk_get_size - initial_request_start = self._start_range if self._start_range is not None else 0 - if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: - initial_request_end = self._end_range - else: - initial_request_end = initial_request_start + self._first_get_size - 1 - - self._initial_range, self._initial_offset = process_range_and_offset( - initial_request_start, initial_request_end, self._end_range, self._encryption_options - ) - - def __len__(self): - return self.size - - async def _setup(self): - self._response = await self._initial_request() - self.properties = self._response.properties - self.properties.name = self.name - self.properties.path = self.path - self.properties.share = self.share - - # Set the content length to the download size instead of the size of - # the last range - self.properties.size = self.size - - # Overwrite the content range to the user requested range - self.properties.content_range = 'bytes {0}-{1}/{2}'.format( - self._start_range, - self._end_range, - self._file_size - ) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - self.properties.content_md5 = None - - if self.size == 0: - self._current_content = b"" - else: - self._current_content = await process_content( - self._response, - self._initial_offset[0], - self._initial_offset[1], - self._encryption_options - ) - - async def _initial_request(self): - range_header, range_validation = validate_and_format_range_headers( - self._initial_range[0], - self._initial_range[1], - start_range_required=False, - end_range_required=False, - check_content_md5=self._validate_content) - - try: - location_mode, response = await self._client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self._validate_content, - data_stream_total=None, - download_stream_current=0, - **self._request_options) - - # Check the location we read from to ensure we use the same one - # for subsequent requests. - self._location_mode = location_mode - - # Parse the total file size and adjust the download size if ranges - # were specified - self._file_size = parse_length_from_content_range(response.properties.content_range) - if self._end_range is not None: - # Use the length unless it is over the end of the file - self.size = min(self._file_size, self._end_range - self._start_range + 1) - elif self._start_range is not None: - self.size = self._file_size - self._start_range - else: - self.size = self._file_size - - except HttpResponseError as error: - if self._start_range is None and error.response.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - try: - _, response = await self._client.download( - validate_content=self._validate_content, - data_stream_total=0, - download_stream_current=0, - **self._request_options) - except HttpResponseError as error: - process_storage_error(error) - - # Set the download size to empty - self.size = 0 - self._file_size = 0 - else: - process_storage_error(error) - - # If the file is small, the download is complete at this point. - # If file size is large, download the rest of the file in chunks. - if response.properties.size == self.size: - self._download_complete = True - self._etag = response.properties.etag - return response - - def chunks(self): - # type: () -> AsyncIterator[bytes] - """Iterate over chunks in the download stream. - - :rtype: AsyncIterator[bytes] - """ - if self.size == 0 or self._download_complete: - iter_downloader = None - else: - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - iter_downloader = _AsyncChunkDownloader( - client=self._client, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # Start where the first download ended - end_range=data_end, - stream=None, - parallel=False, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - etag=self._etag, - **self._request_options) - return _AsyncChunkIterator( - size=self.size, - content=self._current_content, - downloader=iter_downloader, - chunk_size=self._config.max_chunk_get_size - ) - - async def readall(self): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - stream = BytesIO() - await self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - async def content_as_bytes(self, max_concurrency=1): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :rtype: bytes - """ - warnings.warn( - "content_as_bytes is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - return await self.readall() - - async def content_as_text(self, max_concurrency=1, encoding="UTF-8"): - """Download the contents of this file, and decode as text. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :param str encoding: - Test encoding to decode the downloaded bytes. Default is UTF-8. - :rtype: str - """ - warnings.warn( - "content_as_text is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self._encoding = encoding - return await self.readall() - - async def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - # the stream must be seekable if parallel download is required - parallel = self._max_concurrency > 1 - if parallel: - error_message = "Target stream handle must be seekable." - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(error_message) - - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(error_message) - - # Write the content to the user stream - stream.write(self._current_content) - if self._download_complete: - return self.size - - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - - downloader = _AsyncChunkDownloader( - client=self._client, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # start where the first download ended - end_range=data_end, - stream=stream, - parallel=parallel, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - etag=self._etag, - **self._request_options) - - dl_tasks = downloader.get_chunk_offsets() - running_futures = [ - asyncio.ensure_future(downloader.process_chunk(d)) - for d in islice(dl_tasks, 0, self._max_concurrency) - ] - while running_futures: - # Wait for some download to finish before adding a new one - _done, running_futures = await asyncio.wait( - running_futures, return_when=asyncio.FIRST_COMPLETED) - try: - next_chunk = next(dl_tasks) - except StopIteration: - break - else: - running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk))) - - if running_futures: - # Wait for the remaining downloads to finish - await asyncio.wait(running_futures) - return self.size - - async def download_to_stream(self, stream, max_concurrency=1): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The properties of the downloaded file. - :rtype: Any - """ - warnings.warn( - "download_to_stream is deprecated, use readinto instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - await self.readinto(stream) - return self.properties diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_file_client_async.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_file_client_async.py deleted file mode 100644 index f4ee50f..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_file_client_async.py +++ /dev/null @@ -1,1205 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines, invalid-overridden-method, too-many-public-methods -import functools -import time -from io import BytesIO -from typing import Optional, Union, IO, List, Tuple, Dict, Any, Iterable, TYPE_CHECKING # pylint: disable=unused-import - -import six -from azure.core.async_paging import AsyncItemPaged -from azure.core.exceptions import HttpResponseError - -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from .._parser import _datetime_to_str, _get_file_permission -from .._shared.parser import _str - -from .._generated.aio import AzureFileStorage -from .._generated.models import FileHTTPHeaders -from .._shared.policies_async import ExponentialRetry -from .._shared.uploads_async import upload_data_chunks, FileChunkUploader, IterStreamer -from .._shared.base_client_async import AsyncStorageAccountHostsMixin -from .._shared.request_handlers import add_metadata_headers, get_length -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._deserialize import deserialize_file_properties, deserialize_file_stream, get_file_ranges_result -from .._serialize import get_access_conditions, get_smb_properties, get_api_version -from .._file_client import ShareFileClient as ShareFileClientBase -from ._models import HandlesPaged -from ._lease_async import ShareLeaseClient -from ._download_async import StorageStreamDownloader - -if TYPE_CHECKING: - from datetime import datetime - from .._models import ShareProperties, ContentSettings, FileProperties, NTFSAttributes - from .._generated.models import HandleItem - - -async def _upload_file_helper( - client, - stream, - size, - metadata, - content_settings, - validate_content, - timeout, - max_concurrency, - file_settings, - file_attributes="none", - file_creation_time="now", - file_last_write_time="now", - file_permission=None, - file_permission_key=None, - **kwargs -): - try: - if size is None or size < 0: - raise ValueError("A content size must be specified for a File.") - response = await client.create_file( - size, content_settings=content_settings, metadata=metadata, - file_attributes=file_attributes, - file_creation_time=file_creation_time, - file_last_write_time=file_last_write_time, - file_permission=file_permission, - permission_key=file_permission_key, - timeout=timeout, - **kwargs - ) - if size == 0: - return response - - responses = await upload_data_chunks( - service=client, - uploader_class=FileChunkUploader, - total_size=size, - chunk_size=file_settings.max_range_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - timeout=timeout, - **kwargs - ) - return sorted(responses, key=lambda r: r.get('last_modified'))[-1] - except HttpResponseError as error: - process_storage_error(error) - - -class ShareFileClient(AsyncStorageAccountHostsMixin, ShareFileClientBase): - """A client to interact with a specific file, although that file may not yet exist. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the - file, use the :func:`from_file_url` classmethod. - :param share_name: - The name of the share for the file. - :type share_name: str - :param str file_path: - The file path to the file with which to interact. If specified, this value will override - a file value specified in the file URL. - :param str snapshot: - An optional file snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword loop: - The event loop to run the asynchronous tasks. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - - def __init__( # type: ignore - self, - account_url, # type: str - share_name, # type: str - file_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs["retry_policy"] = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) - loop = kwargs.pop('loop', None) - super(ShareFileClient, self).__init__( - account_url, share_name=share_name, file_path=file_path, snapshot=snapshot, - credential=credential, loop=loop, **kwargs - ) - self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline, loop=loop) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - self._loop = loop - - @distributed_trace_async - async def acquire_lease(self, lease_id=None, **kwargs): - # type: (Optional[str], **Any) -> ShareLeaseClient - """Requests a new lease. - - If the file does not have an active lease, the File - Service creates a lease on the blob and returns a new lease. - - :param str lease_id: - Proposed lease ID, in a GUID string format. The File Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A ShareLeaseClient object. - :rtype: ~azure.storage.fileshare.aio.ShareLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START acquire_lease_on_blob] - :end-before: [END acquire_lease_on_blob] - :language: python - :dedent: 8 - :caption: Acquiring a lease on a blob. - """ - kwargs['lease_duration'] = -1 - lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore - await lease.acquire(**kwargs) - return lease - - @distributed_trace_async - async def create_file( # type: ignore - self, - size, # type: int - file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="now", # type: Union[str, datetime] - file_last_write_time="now", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Creates a new file. - - Note that it only initializes the file with no content. - - :param int size: Specifies the maximum size for the file, - up to 1 TB. - :param file_attributes: - The file system attributes for files and directories. - If not set, the default value would be "None" and the attributes will be set to "Archive". - Here is an example for when the var type is str: 'Temporary|Archive'. - file_attributes value is not case sensitive. - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Now. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Now. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START create_file] - :end-before: [END create_file] - :language: python - :dedent: 16 - :caption: Create a file. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - content_settings = kwargs.pop('content_settings', None) - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - if self.require_encryption and not self.key_encryption_key: - raise ValueError("Encryption required but no key was provided.") - - headers = kwargs.pop("headers", {}) - headers.update(add_metadata_headers(metadata)) - file_http_headers = None - if content_settings: - file_http_headers = FileHTTPHeaders( - file_cache_control=content_settings.cache_control, - file_content_type=content_settings.content_type, - file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - file_content_encoding=content_settings.content_encoding, - file_content_language=content_settings.content_language, - file_content_disposition=content_settings.content_disposition, - ) - file_permission = _get_file_permission(file_permission, permission_key, 'Inherit') - try: - return await self._client.file.create( # type: ignore - file_content_length=size, - metadata=metadata, - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - file_http_headers=file_http_headers, - lease_access_conditions=access_conditions, - headers=headers, - timeout=timeout, - cls=return_response_headers, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_file( - self, data, # type: Any - length=None, # type: Optional[int] - file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="now", # type: Union[str, datetime] - file_last_write_time="now", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Uploads a new file. - - :param Any data: - Content of the file. - :param int length: - Length of the file in bytes. Specify its maximum size, up to 1 TiB. - :param file_attributes: - The file system attributes for files and directories. - If not set, the default value would be "None" and the attributes will be set to "Archive". - Here is an example for when the var type is str: 'Temporary|Archive'. - file_attributes value is not case sensitive. - :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes - :param file_creation_time: Creation time for the file - Default value: Now. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Now. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword str encoding: - Defaults to UTF-8. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START upload_file] - :end-before: [END upload_file] - :language: python - :dedent: 16 - :caption: Upload a file. - """ - metadata = kwargs.pop('metadata', None) - content_settings = kwargs.pop('content_settings', None) - max_concurrency = kwargs.pop('max_concurrency', 1) - validate_content = kwargs.pop('validate_content', False) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - - if isinstance(data, six.text_type): - data = data.encode(encoding) - if length is None: - length = get_length(data) - if isinstance(data, bytes): - data = data[:length] - - if isinstance(data, bytes): - stream = BytesIO(data) - elif hasattr(data, "read"): - stream = data - elif hasattr(data, "__iter__"): - stream = IterStreamer(data, encoding=encoding) # type: ignore - else: - raise TypeError("Unsupported data type: {}".format(type(data))) - return await _upload_file_helper( # type: ignore - self, - stream, - length, - metadata, - content_settings, - validate_content, - timeout, - max_concurrency, - self._config, - file_attributes=file_attributes, - file_creation_time=file_creation_time, - file_last_write_time=file_last_write_time, - file_permission=file_permission, - file_permission_key=permission_key, - **kwargs - ) - - @distributed_trace_async - async def start_copy_from_url(self, source_url, **kwargs): - # type: (str, Any) -> Any - """Initiates the copying of data from a source URL into the file - referenced by the client. - - The status of this copy operation can be found using the `get_properties` - method. - - :param str source_url: - Specifies the URL of the source file. - :keyword str file_permission: - If specified the permission (security descriptor) shall be set for the directory/file. - This value can be set to "source" to copy the security descriptor from the source file. - Otherwise if set, this value will be used to override the source value. If not set, permission value - is inherited from the parent directory of the target file. This setting can be - used if Permission size is <= 8KB, otherwise permission_key shall be used. - If SDDL is specified as input, it must have owner, group and dacl. - Note: Only one of the file_permission or permission_key should be specified. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword str permission_key: - Key of the permission to be set for the directory/file. - This value can be set to "source" to copy the security descriptor from the source file. - Otherwise if set, this value will be used to override the source value. If not set, permission value - is inherited from the parent directory of the target file. - Note: Only one of the file_permission or permission_key should be specified. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword file_attributes: - This value can be set to "source" to copy file attributes from the source file to the target file, - or to clear all attributes, it can be set to "None". Otherwise it can be set to a list of attributes - to set on the target file. If this is not set, the default value is "Archive". - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :keyword file_creation_time: - This value can be set to "source" to copy the creation time from the source file to the target file, - or a datetime to set as creation time on the target file. This could also be a string in ISO 8601 format. - If this is not set, creation time will be set to the date time value of the creation - (or when it was overwritten) of the target file by copy engine. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_creation_time: str or ~datetime.datetime - :keyword file_last_write_time: - This value can be set to "source" to copy the last write time from the source file to the target file, or - a datetime to set as the last write time on the target file. This could also be a string in ISO 8601 format. - If this is not set, value will be the last write time to the file by the copy engine. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_last_write_time: str or ~datetime.datetime - :keyword bool ignore_read_only: - Specifies the option to overwrite the target file if it already exists and has read-only attribute set. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword bool set_archive_attribute: - Specifies the option to set the archive attribute on the target file. - True means the archive attribute will be set on the target file despite attribute - overrides or the source file state. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START copy_file_from_url] - :end-before: [END copy_file_from_url] - :language: python - :dedent: 16 - :caption: Copy a file from a URL - """ - metadata = kwargs.pop('metadata', None) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop("headers", {}) - headers.update(add_metadata_headers(metadata)) - kwargs.update(get_smb_properties(kwargs)) - try: - return await self._client.file.start_copy( - source_url, - metadata=metadata, - lease_access_conditions=access_conditions, - headers=headers, - cls=return_response_headers, - timeout=timeout, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def abort_copy(self, copy_id, **kwargs): - # type: (Union[str, FileProperties], Any) -> None - """Abort an ongoing copy operation. - - This will leave a destination file with zero length and full metadata. - This will raise an error if the copy operation has already ended. - - :param copy_id: - The copy operation to abort. This can be either an ID, or an - instance of FileProperties. - :type copy_id: str or ~azure.storage.fileshare.FileProperties - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - copy_id = copy_id.copy.id - except AttributeError: - try: - copy_id = copy_id["copy_id"] - except TypeError: - pass - try: - await self._client.file.abort_copy(copy_id=copy_id, - lease_access_conditions=access_conditions, - timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def download_file( - self, - offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs - ): - # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader - """Downloads a file to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the file into - a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks. - - :param int offset: - Start of byte range to use for downloading a section of the file. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.fileshare.aio.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START download_file] - :end-before: [END download_file] - :language: python - :dedent: 16 - :caption: Download a file. - """ - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - if length is not None and offset is None: - raise ValueError("Offset value must not be None if length is set.") - - range_end = None - if length is not None: - range_end = offset + length - 1 # Service actually uses an end-range inclusive index - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - - downloader = StorageStreamDownloader( - client=self._client.file, - config=self._config, - start_range=offset, - end_range=range_end, - encryption_options=None, - name=self.file_name, - path='/'.join(self.file_path), - share=self.share_name, - lease_access_conditions=access_conditions, - cls=deserialize_file_stream, - **kwargs - ) - await downloader._setup() # pylint: disable=protected-access - return downloader - - @distributed_trace_async - async def delete_file(self, **kwargs): - # type: (Any) -> None - """Marks the specified file for deletion. The file is - later deleted during garbage collection. - - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START delete_file] - :end-before: [END delete_file] - :language: python - :dedent: 16 - :caption: Delete a file. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - await self._client.file.delete(lease_access_conditions=access_conditions, timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_file_properties(self, **kwargs): - # type: (Any) -> FileProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file. - - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: FileProperties - :rtype: ~azure.storage.fileshare.FileProperties - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - file_props = await self._client.file.get_properties( - sharesnapshot=self.snapshot, - lease_access_conditions=access_conditions, - timeout=timeout, - cls=deserialize_file_properties, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - file_props.name = self.file_name - file_props.share = self.share_name - file_props.snapshot = self.snapshot - file_props.path = "/".join(self.file_path) - return file_props # type: ignore - - @distributed_trace_async - async def set_http_headers(self, content_settings, # type: ContentSettings - file_attributes="preserve", # type: Union[str, NTFSAttributes] - file_creation_time="preserve", # type: Union[str, datetime] - file_last_write_time="preserve", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Sets HTTP headers on the file. - - :param ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param file_attributes: - The file system attributes for files and directories. - If not set, indicates preservation of existing values. - Here is an example for when the var type is str: 'Temporary|Archive' - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Preserve. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Preserve. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - file_content_length = kwargs.pop("size", None) - file_http_headers = FileHTTPHeaders( - file_cache_control=content_settings.cache_control, - file_content_type=content_settings.content_type, - file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - file_content_encoding=content_settings.content_encoding, - file_content_language=content_settings.content_language, - file_content_disposition=content_settings.content_disposition, - ) - file_permission = _get_file_permission(file_permission, permission_key, 'preserve') - try: - return await self._client.file.set_http_headers( # type: ignore - file_content_length=file_content_length, - file_http_headers=file_http_headers, - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - lease_access_conditions=access_conditions, - timeout=timeout, - cls=return_response_headers, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_file_metadata(self, metadata=None, **kwargs): # type: ignore - # type: (Optional[Dict[str, Any]], Any) -> Dict[str, Any] - """Sets user-defined metadata for the specified file as one or more - name-value pairs. - - Each call to this operation replaces all existing metadata - attached to the file. To remove all metadata from the file, - call this operation with no metadata dict. - - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop("headers", {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return await self._client.file.set_metadata( # type: ignore - metadata=metadata, lease_access_conditions=access_conditions, - timeout=timeout, cls=return_response_headers, headers=headers, **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_range( # type: ignore - self, - data, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Upload a range of bytes to a file. - - :param bytes data: - The data to upload. - :param int offset: - Start of byte range to use for uploading a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for uploading a section of the file. - The range can be up to 4 MB in size. - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - file. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - validate_content = kwargs.pop('validate_content', False) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - if isinstance(data, six.text_type): - data = data.encode(encoding) - end_range = offset + length - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - try: - return await self._client.file.upload_range( # type: ignore - range=content_range, - content_length=length, - optionalbody=data, - timeout=timeout, - validate_content=validate_content, - lease_access_conditions=access_conditions, - cls=return_response_headers, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_range_from_url(self, source_url, - offset, - length, - source_offset, - **kwargs - ): - # type: (str, int, int, int, **Any) -> Dict[str, Any] - """ - Writes the bytes from one Azure File endpoint into the specified range of another Azure File endpoint. - - :param int offset: - Start of byte range to use for updating a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for updating a section of the file. - The range can be up to 4 MB in size. - :param str source_url: - A URL of up to 2 KB in length that specifies an Azure file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.file.core.windows.net/myshare/mydir/myfile - https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - The service will read the same number of bytes as the destination range (length-offset). - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str source_authorization: - Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is - the prefix of the source_authorization string. - """ - options = self._upload_range_from_url_options( - source_url=source_url, - offset=offset, - length=length, - source_offset=source_offset, - **kwargs - ) - try: - return await self._client.file.upload_range_from_url(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_ranges( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> List[Dict[str, int]] - """Returns the list of valid page ranges for a file or snapshot - of a file. - - :param int offset: - Specifies the start offset of bytes over which to get ranges. - :param int length: - Number of bytes to use over which to get ranges. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A list of valid ranges. - :rtype: List[dict[str, int]] - """ - options = self._get_ranges_options( - offset=offset, - length=length, - **kwargs) - try: - ranges = await self._client.file.get_range_list(**options) - except HttpResponseError as error: - process_storage_error(error) - return [{'start': file_range.start, 'end': file_range.end} for file_range in ranges.ranges] - - @distributed_trace_async - async def get_ranges_diff( # type: ignore - self, - previous_sharesnapshot, # type: Union[str, Dict[str, Any]] - offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a file or snapshot - of a file. - - .. versionadded:: 12.6.0 - - :param int offset: - Specifies the start offset of bytes over which to get ranges. - :param int length: - Number of bytes to use over which to get ranges. - :param str previous_sharesnapshot: - The snapshot diff parameter that contains an opaque DateTime value that - specifies a previous file snapshot to be compared - against a more recent snapshot or the current file. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of file ranges as dictionaries with 'start' and 'end' keys. - The first element are filled file ranges, the 2nd element is cleared file ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_ranges_options( - offset=offset, - length=length, - previous_sharesnapshot=previous_sharesnapshot, - **kwargs) - try: - ranges = await self._client.file.get_range_list(**options) - except HttpResponseError as error: - process_storage_error(error) - return get_file_ranges_result(ranges) - - @distributed_trace_async - async def clear_range( # type: ignore - self, - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Clears the specified range and releases the space used in storage for - that range. - - :param int offset: - Start of byte range to use for clearing a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for clearing a section of the file. - The range can be up to 4 MB in size. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Unsupported method for encryption.") - - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 bytes file size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 bytes file size") - end_range = length + offset - 1 # Reformat to an inclusive range index - content_range = "bytes={0}-{1}".format(offset, end_range) - try: - return await self._client.file.upload_range( # type: ignore - timeout=timeout, - cls=return_response_headers, - content_length=0, - optionalbody=None, - file_range_write="clear", - range=content_range, - lease_access_conditions=access_conditions, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def resize_file(self, size, **kwargs): - # type: (int, Any) -> Dict[str, Any] - """Resizes a file to the specified size. - - :param int size: - Size to resize file to (in bytes) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - return await self._client.file.set_http_headers( # type: ignore - file_content_length=size, - file_attributes="preserve", - file_creation_time="preserve", - file_last_write_time="preserve", - file_permission="preserve", - lease_access_conditions=access_conditions, - cls=return_response_headers, - timeout=timeout, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_handles(self, **kwargs): - # type: (Any) -> AsyncItemPaged - """Lists handles for file. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of HandleItem - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.HandleItem] - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop("results_per_page", None) - command = functools.partial( - self._client.file.list_handles, - sharesnapshot=self.snapshot, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=HandlesPaged) - - @distributed_trace_async - async def close_handle(self, handle, **kwargs): - # type: (Union[str, HandleItem], Any) -> Dict[str, int] - """Close an open file handle. - - :param handle: - A specific handle to close. - :type handle: str or ~azure.storage.fileshare.Handle - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - try: - handle_id = handle.id # type: ignore - except AttributeError: - handle_id = handle - if handle_id == '*': - raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") - try: - response = await self._client.file.force_close_handles( - handle_id, - marker=None, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - return { - 'closed_handles_count': response.get('number_of_handles_closed', 0), - 'failed_handles_count': response.get('number_of_handles_failed', 0) - } - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def close_all_handles(self, **kwargs): - # type: (Any) -> Dict[str, int] - """Close any open file handles. - - This operation will block until the service has closed all open handles. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - timeout = kwargs.pop('timeout', None) - start_time = time.time() - - try_close = True - continuation_token = None - total_closed = 0 - total_failed = 0 - while try_close: - try: - response = await self._client.file.force_close_handles( - handle_id='*', - timeout=timeout, - marker=continuation_token, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - continuation_token = response.get('marker') - try_close = bool(continuation_token) - total_closed += response.get('number_of_handles_closed', 0) - total_failed += response.get('number_of_handles_failed', 0) - if timeout: - timeout = max(0, timeout - (time.time() - start_time)) - return { - 'closed_handles_count': total_closed, - 'failed_handles_count': total_failed - } diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_lease_async.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_lease_async.py deleted file mode 100644 index 0d99845..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_lease_async.py +++ /dev/null @@ -1,228 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, - TypeVar, TYPE_CHECKING -) - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator_async import distributed_trace_async - -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._generated.aio.operations import FileOperations, ShareOperations -from .._lease import ShareLeaseClient as LeaseClientBase - -if TYPE_CHECKING: - from datetime import datetime - ShareFileClient = TypeVar("ShareFileClient") - ShareClient = TypeVar("ShareClient") - - -class ShareLeaseClient(LeaseClientBase): - """Creates a new ShareLeaseClient. - - This client provides lease operations on a ShareClient or ShareFileClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the file or share to lease. - :type client: ~azure.storage.fileshare.ShareFileClient or - ~azure.storage.fileshare.ShareClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - - def __enter__(self): - raise TypeError("Async lease must use 'async with'.") - - def __exit__(self, *args): - self.release() - - async def __aenter__(self): - return self - - async def __aexit__(self, *args): - await self.release() - - @distributed_trace_async - async def acquire(self, **kwargs): - # type: (**Any) -> None - """Requests a new lease. This operation establishes and manages a lock on a - file or share for write and delete operations. If the file or share does not have an active lease, - the File or Share service creates a lease on the file or share. If the file has an active lease, - you can only request a new lease using the active lease ID. - - - If the file or share does not have an active lease, the File or Share service creates a - lease on the file and returns a new lease ID. - - :keyword int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. File leases never expire. A non-infinite share lease can be - between 15 and 60 seconds. A share lease duration cannot be changed - using renew or change. Default is -1 (infinite share lease). - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - try: - lease_duration = kwargs.pop('lease_duration', -1) - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - response = await self._client.acquire_lease( - timeout=kwargs.pop('timeout', None), - duration=lease_duration, - proposed_lease_id=self.id, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - self.etag = response.get('etag') # type: str - - @distributed_trace_async - async def renew(self, **kwargs): - # type: (Any) -> None - """Renews the share lease. - - The share lease can be renewed if the lease ID specified in the - lease client matches that associated with the share. Note that - the lease may be renewed even if it has expired as long as the share - has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - .. versionadded:: 12.6.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - if isinstance(self._client, FileOperations): - raise TypeError("Lease renewal operations are only valid for ShareClient.") - try: - response = await self._client.renew_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - sharesnapshot=self._snapshot, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def release(self, **kwargs): - # type: (Any) -> None - """Releases the lease. The lease may be released if the lease ID specified on the request matches - that associated with the share or file. Releasing the lease allows another client to immediately acquire - the lease for the share or file as soon as the release is complete. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - try: - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - response = await self._client.release_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """ Changes the lease ID of an active lease. A change must include the current lease ID in x-ms-lease-id and - a new lease ID in x-ms-proposed-lease-id. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The File or Share service raises an error - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - try: - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - response = await self._client.change_lease( - lease_id=self.id, - proposed_lease_id=proposed_lease_id, - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def break_lease(self, **kwargs): - # type: (Any) -> int - """Force breaks the lease if the file or share has an active lease. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. An infinite lease breaks immediately. - - Once a lease is broken, it cannot be changed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :keyword int lease_break_period: - This is the proposed duration of seconds that the share lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the share lease. If longer, the time remaining on the share lease is used. - A new share lease will not be available before the break period has - expired, but the share lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration share lease breaks after the remaining share lease - period elapses, and an infinite share lease breaks immediately. - - .. versionadded:: 12.5.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - try: - lease_break_period = kwargs.pop('lease_break_period', None) - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - if isinstance(self._client, ShareOperations): - kwargs['break_period'] = lease_break_period - if isinstance(self._client, FileOperations) and lease_break_period: - raise TypeError("Setting a lease break period is only applicable to Share leases.") - - response = await self._client.break_lease( - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_models.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_models.py deleted file mode 100644 index e81133c..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_models.py +++ /dev/null @@ -1,178 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines - -from azure.core.async_paging import AsyncPageIterator -from azure.core.exceptions import HttpResponseError - -from .._shared.response_handlers import return_context_and_deserialized, process_storage_error -from .._generated.models import DirectoryItem -from .._models import Handle, ShareProperties, DirectoryProperties, FileProperties - - -def _wrap_item(item): - if isinstance(item, DirectoryItem): - return {'name': item.name, 'is_directory': True} - return {'name': item.name, 'size': item.properties.content_length, 'is_directory': False} - - -class SharePropertiesPaged(AsyncPageIterator): - """An iterable of Share properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.fileshare.ShareProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only shares whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(SharePropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [ShareProperties._from_generated(i) for i in self._response.share_items] # pylint: disable=protected-access - return self._response.next_marker or None, self.current_page - - -class HandlesPaged(AsyncPageIterator): - """An iterable of Handles. - - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str marker: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.fileshare.Handle) - - :param callable command: Function to retrieve the next page of items. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, results_per_page=None, continuation_token=None): - super(HandlesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.current_page = [Handle._from_generated(h) for h in self._response.handle_list] # pylint: disable=protected-access - return self._response.next_marker or None, self.current_page - - -class DirectoryPropertiesPaged(AsyncPageIterator): - """An iterable for the contents of a directory. - - This iterable will yield dicts for the contents of the directory. The dicts - will have the keys 'name' (str) and 'is_directory' (bool). - Items that are files (is_directory=False) will have an additional 'content_length' key. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(dict(str, Any)) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only directories whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(DirectoryPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - prefix=self.prefix, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [DirectoryProperties._from_generated(i) for i in self._response.segment.directory_items] # pylint: disable = protected-access - self.current_page.extend([FileProperties._from_generated(i) for i in self._response.segment.file_items]) # pylint: disable = protected-access - return self._response.next_marker or None, self.current_page diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_share_client_async.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_share_client_async.py deleted file mode 100644 index f8ded64..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_share_client_async.py +++ /dev/null @@ -1,756 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -from typing import ( # pylint: disable=unused-import - Optional, Union, Dict, Any, Iterable, TYPE_CHECKING -) - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.pipeline import AsyncPipeline -from .._shared.policies_async import ExponentialRetry -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.request_handlers import add_metadata_headers, serialize_iso -from .._shared.response_handlers import ( - return_response_headers, - process_storage_error, - return_headers_and_deserialized) -from .._generated.aio import AzureFileStorage -from .._generated.models import ( - SignedIdentifier, - DeleteSnapshotsOptionType) -from .._deserialize import deserialize_share_properties, deserialize_permission -from .._serialize import get_api_version, get_access_conditions -from .._share_client import ShareClient as ShareClientBase -from ._directory_client_async import ShareDirectoryClient -from ._file_client_async import ShareFileClient -from ..aio._lease_async import ShareLeaseClient -from .._models import ShareProtocols - -if TYPE_CHECKING: - from .._models import ShareProperties, AccessPolicy - - -class ShareClient(AsyncStorageAccountHostsMixin, ShareClientBase): - """A client to interact with a specific share, although that share may not yet exist. - - For operations relating to a specific directory or file in this share, the clients for - those entities can also be retrieved using the :func:`get_directory_client` and :func:`get_file_client` functions. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the share, - use the :func:`from_share_url` classmethod. - :param share_name: - The name of the share with which to interact. - :type share_name: str - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword loop: - The event loop to run the asynchronous tasks. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - loop = kwargs.pop('loop', None) - super(ShareClient, self).__init__( - account_url, - share_name=share_name, - snapshot=snapshot, - credential=credential, - loop=loop, - **kwargs) - self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline, loop=loop) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - self._loop = loop - - def get_directory_client(self, directory_path=None): - # type: (Optional[str]) -> ShareDirectoryClient - """Get a client to interact with the specified directory. - The directory need not already exist. - - :param str directory_path: - Path to the specified directory. - :returns: A Directory Client. - :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient - """ - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - - return ShareDirectoryClient( - self.url, share_name=self.share_name, directory_path=directory_path or "", snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop) - - def get_file_client(self, file_path): - # type: (str) -> ShareFileClient - """Get a client to interact with the specified file. - The file need not already exist. - - :param str file_path: - Path to the specified file. - :returns: A File Client. - :rtype: ~azure.storage.fileshare.aio.ShareFileClient - """ - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - - return ShareFileClient( - self.url, share_name=self.share_name, file_path=file_path, snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop) - - @distributed_trace_async() - async def acquire_lease(self, **kwargs): - # type: (**Any) -> ShareLeaseClient - """Requests a new lease. - - If the share does not have an active lease, the Share - Service creates a lease on the share and returns a new lease. - - .. versionadded:: 12.5.0 - - :keyword int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword str lease_id: - Proposed lease ID, in a GUID string format. The Share Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A ShareLeaseClient object. - :rtype: ~azure.storage.fileshare.ShareLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START acquire_lease_on_share] - :end-before: [END acquire_lease_on_share] - :language: python - :dedent: 8 - :caption: Acquiring a lease on a share. - """ - kwargs['lease_duration'] = kwargs.pop('lease_duration', -1) - lease_id = kwargs.pop('lease_id', None) - lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore - await lease.acquire(**kwargs) - return lease - - @distributed_trace_async - async def create_share(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Creates a new Share under the account. If a share with the - same name already exists, the operation fails. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the share as metadata. - :keyword int quota: - The quota to be allotted. - :keyword access_tier: - Specifies the access tier of the share. - Possible values: 'TransactionOptimized', 'Hot', 'Cool' - :paramtype access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword protocols: - Protocols to enable on the share. Only one protocol can be enabled on the share. - :paramtype protocols: str or ~azure.storage.fileshare.ShareProtocols - :keyword root_squash: - Root squash to set on the share. - Only valid for NFS shares. Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash'. - :paramtype root_squash: str or ~azure.storage.fileshare.ShareRootSquash - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START create_share] - :end-before: [END create_share] - :language: python - :dedent: 12 - :caption: Creates a file share. - """ - metadata = kwargs.pop('metadata', None) - quota = kwargs.pop('quota', None) - access_tier = kwargs.pop('access_tier', None) - timeout = kwargs.pop('timeout', None) - root_squash = kwargs.pop('root_squash', None) - protocols = kwargs.pop('protocols', None) - if protocols and protocols not in ['NFS', 'SMB', ShareProtocols.SMB, ShareProtocols.NFS]: - raise ValueError("The enabled protocol must be set to either SMB or NFS.") - if root_squash and protocols not in ['NFS', ShareProtocols.NFS]: - raise ValueError("The 'root_squash' keyword can only be used on NFS enabled shares.") - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - - try: - return await self._client.share.create( # type: ignore - timeout=timeout, - metadata=metadata, - quota=quota, - access_tier=access_tier, - root_squash=root_squash, - enabled_protocols=protocols, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def create_snapshot( # type: ignore - self, - **kwargs # type: Optional[Any] - ): - # type: (...) -> Dict[str, Any] - """Creates a snapshot of the share. - - A snapshot is a read-only version of a share that's taken at a point in time. - It can be read, copied, or deleted, but not modified. Snapshots provide a way - to back up a share as it appears at a moment in time. - - A snapshot of a share has the same name as the base share from which the snapshot - is taken, with a DateTime value appended to indicate the time at which the - snapshot was taken. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the share as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Snapshot ID, Etag, and last modified). - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START create_share_snapshot] - :end-before: [END create_share_snapshot] - :language: python - :dedent: 16 - :caption: Creates a snapshot of the file share. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return await self._client.share.create_snapshot( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def delete_share( - self, delete_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> None - """Marks the specified share for deletion. The share is - later deleted during garbage collection. - - :param bool delete_snapshots: - Indicates if snapshots are to be deleted. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START delete_share] - :end-before: [END delete_share] - :language: python - :dedent: 16 - :caption: Deletes the share and any snapshots. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - delete_include = None - if delete_snapshots: - delete_include = DeleteSnapshotsOptionType.include - try: - await self._client.share.delete( - timeout=timeout, - sharesnapshot=self.snapshot, - delete_snapshots=delete_include, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_share_properties(self, **kwargs): - # type: (Any) -> ShareProperties - """Returns all user-defined metadata and system properties for the - specified share. The data returned does not include the shares's - list of files or directories. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: The share properties. - :rtype: ~azure.storage.fileshare.ShareProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_hello_world_async.py - :start-after: [START get_share_properties] - :end-before: [END get_share_properties] - :language: python - :dedent: 16 - :caption: Gets the share properties. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - props = await self._client.share.get_properties( - timeout=timeout, - sharesnapshot=self.snapshot, - cls=deserialize_share_properties, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - props.name = self.share_name - props.snapshot = self.snapshot - return props # type: ignore - - @distributed_trace_async - async def set_share_quota(self, quota, **kwargs): - # type: (int, Any) -> Dict[str, Any] - """Sets the quota for the share. - - :param int quota: - Specifies the maximum size of the share, in gigabytes. - Must be greater than 0, and less than or equal to 5TB. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START set_share_quota] - :end-before: [END set_share_quota] - :language: python - :dedent: 16 - :caption: Sets the share quota. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - return await self._client.share.set_properties( # type: ignore - timeout=timeout, - quota=quota, - access_tier=None, - cls=return_response_headers, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - async def set_share_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Sets the share properties. - - .. versionadded:: 12.3.0 - - :keyword access_tier: - Specifies the access tier of the share. - Possible values: 'TransactionOptimized', 'Hot', and 'Cool' - :paramtype access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier - :keyword int quota: - Specifies the maximum size of the share, in gigabytes. - Must be greater than 0, and less than or equal to 5TB. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword root_squash: - Root squash to set on the share. - Only valid for NFS shares. Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash' - :paramtype root_squash: str or ~azure.storage.fileshare.ShareRootSquash - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START set_share_properties] - :end-before: [END set_share_properties] - :language: python - :dedent: 16 - :caption: Sets the share properties. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - access_tier = kwargs.pop('access_tier', None) - quota = kwargs.pop('quota', None) - root_squash = kwargs.pop('root_squash', None) - if all(parameter is None for parameter in [access_tier, quota, root_squash]): - raise ValueError("set_share_properties should be called with at least one parameter.") - try: - return await self._client.share.set_properties( # type: ignore - timeout=timeout, - quota=quota, - access_tier=access_tier, - root_squash=root_squash, - lease_access_conditions=access_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_share_metadata(self, metadata, **kwargs): - # type: (Dict[str, Any], Any) -> Dict[str, Any] - """Sets the metadata for the share. - - Each call to this operation replaces all existing metadata - attached to the share. To remove all metadata from the share, - call this operation with no metadata dict. - - :param metadata: - Name-value pairs associated with the share as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START set_share_metadata] - :end-before: [END set_share_metadata] - :language: python - :dedent: 16 - :caption: Sets the share metadata. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - try: - return await self._client.share.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_share_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the share. The permissions - indicate whether files in a share may be accessed publicly. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - response, identifiers = await self._client.share.get_access_policy( - timeout=timeout, - cls=return_headers_and_deserialized, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return { - 'public_access': response.get('share_public_access'), - 'signed_identifiers': identifiers or [] - } - - @distributed_trace_async - async def set_share_access_policy(self, signed_identifiers, **kwargs): - # type: (Dict[str, AccessPolicy], Any) -> Dict[str, str] - """Sets the permissions for the share, or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether files in a share may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the share. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict(str, :class:`~azure.storage.fileshare.AccessPolicy`) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - if len(signed_identifiers) > 5: - raise ValueError( - 'Too many access policies provided. The server does not support setting ' - 'more than 5 access policies on a single resource.') - identifiers = [] - for key, value in signed_identifiers.items(): - if value: - value.start = serialize_iso(value.start) - value.expiry = serialize_iso(value.expiry) - identifiers.append(SignedIdentifier(id=key, access_policy=value)) - signed_identifiers = identifiers # type: ignore - - try: - return await self._client.share.set_access_policy( # type: ignore - share_acl=signed_identifiers or None, - timeout=timeout, - cls=return_response_headers, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_share_stats(self, **kwargs): - # type: (Any) -> int - """Gets the approximate size of the data stored on the share in bytes. - - Note that this value may not include all recently created - or recently re-sized files. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :return: The approximate size of the data (in bytes) stored on the share. - :rtype: int - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - stats = await self._client.share.get_statistics( - timeout=timeout, - lease_access_conditions=access_conditions, - **kwargs) - return stats.share_usage_bytes # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_directories_and_files( # type: ignore - self, directory_name=None, # type: Optional[str] - name_starts_with=None, # type: Optional[str] - marker=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Iterable[Dict[str,str]] - """Lists the directories and files under the share. - - :param str directory_name: - Name of a directory. - :param str name_starts_with: - Filters the results to return only directories whose names - begin with the specified prefix. - :param str marker: - An opaque continuation token. This value can be retrieved from the - next_marker field of a previous generator object. If specified, - this generator will begin returning results from this point. - :keyword list[str] include: - Include this parameter to specify one or more datasets to include in the response. - Possible str values are "timestamps", "Etag", "Attributes", "PermissionKey". - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-10-02'. - - :keyword bool include_extended_info: - If this is set to true, file id will be returned in listed results. - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-10-02'. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START share_list_files_in_dir] - :end-before: [END share_list_files_in_dir] - :language: python - :dedent: 16 - :caption: List directories and files in the share. - """ - timeout = kwargs.pop('timeout', None) - directory = self.get_directory_client(directory_name) - return directory.list_directories_and_files( - name_starts_with=name_starts_with, marker=marker, timeout=timeout, **kwargs) - - @distributed_trace_async - async def create_permission_for_share(self, file_permission, # type: str - **kwargs # type: Any - ): - # type: (...) -> str - """Create a permission (a security descriptor) at the share level. - - This 'permission' can be used for the files/directories in the share. - If a 'permission' already exists, it shall return the key of it, else - creates a new permission at the share level and return its key. - - :param str file_permission: - File permission, a Portable SDDL - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A file permission key - :rtype: str - """ - timeout = kwargs.pop('timeout', None) - options = self._create_permission_for_share_options(file_permission, timeout=timeout, **kwargs) - try: - return await self._client.share.create_permission(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_permission_for_share( # type: ignore - self, permission_key, # type: str - **kwargs # type: Any - ): - # type: (...) -> str - """Get a permission (a security descriptor) for a given key. - - This 'permission' can be used for the files/directories in the share. - - :param str permission_key: - Key of the file permission to retrieve - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A file permission (a portable SDDL) - :rtype: str - """ - timeout = kwargs.pop('timeout', None) - try: - return await self._client.share.get_permission( # type: ignore - file_permission_key=permission_key, - cls=deserialize_permission, - timeout=timeout, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def create_directory(self, directory_name, **kwargs): - # type: (str, Any) -> ShareDirectoryClient - """Creates a directory in the share and returns a client to interact - with the directory. - - :param str directory_name: - The name of the directory. - :keyword dict(str,str) metadata: - Name-value pairs associated with the directory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: ShareDirectoryClient - :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient - """ - directory = self.get_directory_client(directory_name) - kwargs.setdefault('merge_span', True) - await directory.create_directory(**kwargs) - return directory # type: ignore - - @distributed_trace_async - async def delete_directory(self, directory_name, **kwargs): - # type: (str, Any) -> None - """Marks the directory for deletion. The directory is - later deleted during garbage collection. - - :param str directory_name: - The name of the directory. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - directory = self.get_directory_client(directory_name) - await directory.delete_directory(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_share_service_client_async.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_share_service_client_async.py deleted file mode 100644 index dc9ef08..0000000 --- a/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_share_service_client_async.py +++ /dev/null @@ -1,369 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, - TYPE_CHECKING -) - -from azure.core.async_paging import AsyncItemPaged -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import AsyncPipeline -from azure.core.tracing.decorator_async import distributed_trace_async - -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.response_handlers import process_storage_error -from .._shared.policies_async import ExponentialRetry -from .._generated.aio import AzureFileStorage -from .._generated.models import StorageServiceProperties -from .._share_service_client import ShareServiceClient as ShareServiceClientBase -from .._serialize import get_api_version -from ._share_client_async import ShareClient -from ._models import SharePropertiesPaged -from .._models import service_properties_deserialize - -if TYPE_CHECKING: - from datetime import datetime - from .._shared.models import ResourceTypes, AccountSasPermissions - from .._models import ( - ShareProperties, - Metrics, - CorsRule, - ShareProtocolSettings, - ) - - -class ShareServiceClient(AsyncStorageAccountHostsMixin, ShareServiceClientBase): - """A client to interact with the File Share Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete shares within the account. - For operations relating to a specific share, a client for that entity - can also be retrieved using the :func:`get_share_client` function. - - :param str account_url: - The URL to the file share storage account. Any other entities included - in the URL path (e.g. share or file) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is '2019-07-07'. - Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword loop: - The event loop to run the asynchronous tasks. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_authentication_async.py - :start-after: [START create_share_service_client] - :end-before: [END create_share_service_client] - :language: python - :dedent: 8 - :caption: Create the share service client with url and credential. - """ - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - loop = kwargs.pop('loop', None) - super(ShareServiceClient, self).__init__( - account_url, - credential=credential, - loop=loop, - **kwargs) - self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline, loop=loop) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - self._loop = loop - - @distributed_trace_async - async def get_service_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the properties of a storage account's File Share service, including - Azure Storage Analytics. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A dictionary containing file service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START get_service_properties] - :end-before: [END get_service_properties] - :language: python - :dedent: 12 - :caption: Get file share service properties. - """ - timeout = kwargs.pop('timeout', None) - try: - service_props = await self._client.service.get_properties(timeout=timeout, **kwargs) - return service_properties_deserialize(service_props) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_service_properties( - self, hour_metrics=None, # type: Optional[Metrics] - minute_metrics=None, # type: Optional[Metrics] - cors=None, # type: Optional[List[CorsRule]] - protocol=None, # type: Optional[ShareProtocolSettings], - **kwargs - ): - # type: (...) -> None - """Sets the properties of a storage account's File Share service, including - Azure Storage Analytics. If an element (e.g. hour_metrics) is left as None, the - existing settings on the service for that functionality are preserved. - - :param hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for files. - :type hour_metrics: ~azure.storage.fileshare.Metrics - :param minute_metrics: - The minute metrics settings provide request statistics - for each minute for files. - :type minute_metrics: ~azure.storage.fileshare.Metrics - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list(:class:`~azure.storage.fileshare.CorsRule`) - :param protocol_settings: - Sets protocol settings - :type protocol: ~azure.storage.fileshare.ShareProtocolSettings - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START set_service_properties] - :end-before: [END set_service_properties] - :language: python - :dedent: 8 - :caption: Sets file share service properties. - """ - timeout = kwargs.pop('timeout', None) - props = StorageServiceProperties( - hour_metrics=hour_metrics, - minute_metrics=minute_metrics, - cors=cors, - protocol=protocol - ) - try: - await self._client.service.set_properties(props, timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_shares( - self, name_starts_with=None, # type: Optional[str] - include_metadata=False, # type: Optional[bool] - include_snapshots=False, # type: Optional[bool] - **kwargs # type: Any - ): # type: (...) -> AsyncItemPaged - """Returns auto-paging iterable of dict-like ShareProperties under the specified account. - The generator will lazily follow the continuation tokens returned by - the service and stop when all shares have been returned. - - :param str name_starts_with: - Filters the results to return only shares whose names - begin with the specified name_starts_with. - :param bool include_metadata: - Specifies that share metadata be returned in the response. - :param bool include_snapshots: - Specifies that share snapshot be returned in the response. - :keyword bool include_deleted: - Specifies that deleted shares be returned in the response. - This is only for share soft delete enabled account. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) of ShareProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.ShareProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START fsc_list_shares] - :end-before: [END fsc_list_shares] - :language: python - :dedent: 16 - :caption: List shares in the file share service. - """ - timeout = kwargs.pop('timeout', None) - include = [] - if include_metadata: - include.append('metadata') - if include_snapshots: - include.append('snapshots') - include_deleted = kwargs.pop('include_deleted', None) - if include_deleted: - include.append("deleted") - - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.service.list_shares_segment, - include=include, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=SharePropertiesPaged) - - @distributed_trace_async - async def create_share( - self, share_name, # type: str - **kwargs - ): - # type: (...) -> ShareClient - """Creates a new share under the specified account. If the share - with the same name already exists, the operation fails. Returns a client with - which to interact with the newly created share. - - :param str share_name: The name of the share to create. - :keyword dict(str,str) metadata: - A dict with name_value pairs to associate with the - share as metadata. Example:{'Category':'test'} - :keyword int quota: - Quota in bytes. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.fileshare.aio.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START fsc_create_shares] - :end-before: [END fsc_create_shares] - :language: python - :dedent: 12 - :caption: Create a share in the file share service. - """ - metadata = kwargs.pop('metadata', None) - quota = kwargs.pop('quota', None) - timeout = kwargs.pop('timeout', None) - share = self.get_share_client(share_name) - kwargs.setdefault('merge_span', True) - await share.create_share(metadata=metadata, quota=quota, timeout=timeout, **kwargs) - return share - - @distributed_trace_async - async def delete_share( - self, share_name, # type: Union[ShareProperties, str] - delete_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> None - """Marks the specified share for deletion. The share is - later deleted during garbage collection. - - :param share_name: - The share to delete. This can either be the name of the share, - or an instance of ShareProperties. - :type share_name: str or ~azure.storage.fileshare.ShareProperties - :param bool delete_snapshots: - Indicates if snapshots are to be deleted. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START fsc_delete_shares] - :end-before: [END fsc_delete_shares] - :language: python - :dedent: 16 - :caption: Delete a share in the file share service. - """ - timeout = kwargs.pop('timeout', None) - share = self.get_share_client(share_name) - kwargs.setdefault('merge_span', True) - await share.delete_share( - delete_snapshots=delete_snapshots, timeout=timeout, **kwargs) - - @distributed_trace_async - async def undelete_share(self, deleted_share_name, deleted_share_version, **kwargs): - # type: (str, str, **Any) -> ShareClient - """Restores soft-deleted share. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.2.0 - This operation was introduced in API version '2019-12-12'. - - :param str deleted_share_name: - Specifies the name of the deleted share to restore. - :param str deleted_share_version: - Specifies the version of the deleted share to restore. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.fileshare.aio.ShareClient - """ - share = self.get_share_client(deleted_share_name) - try: - await share._client.share.restore(deleted_share_name=deleted_share_name, # pylint: disable = protected-access - deleted_share_version=deleted_share_version, - timeout=kwargs.pop('timeout', None), **kwargs) - return share - except HttpResponseError as error: - process_storage_error(error) - - def get_share_client(self, share, snapshot=None): - # type: (Union[ShareProperties, str],Optional[Union[Dict[str, Any], str]]) -> ShareClient - """Get a client to interact with the specified share. - The share need not already exist. - - :param share: - The share. This can either be the name of the share, - or an instance of ShareProperties. - :type share: str or ~azure.storage.fileshare.ShareProperties - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :returns: A ShareClient. - :rtype: ~azure.storage.fileshare.aio.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START get_share_client] - :end-before: [END get_share_client] - :language: python - :dedent: 8 - :caption: Gets the share client. - """ - try: - share_name = share.name - except AttributeError: - share_name = share - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareClient( - self.url, share_name=share_name, snapshot=snapshot, credential=self.credential, - api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/py.typed b/azure/multiapi/storagev2/fileshare/v2020_10_02/py.typed deleted file mode 100644 index e69de29..0000000 diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/__init__.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/__init__.py deleted file mode 100644 index af67e01..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/__init__.py +++ /dev/null @@ -1,82 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ._version import VERSION -from ._file_client import ShareFileClient -from ._directory_client import ShareDirectoryClient -from ._share_client import ShareClient -from ._share_service_client import ShareServiceClient -from ._lease import ShareLeaseClient -from ._shared_access_signature import generate_account_sas, generate_share_sas, generate_file_sas -from ._shared.policies import ExponentialRetry, LinearRetry -from ._shared.models import ( - LocationMode, - ResourceTypes, - AccountSasPermissions, - StorageErrorCode) -from ._models import ( - ShareProperties, - DirectoryProperties, - Handle, - FileProperties, - Metrics, - RetentionPolicy, - CorsRule, - ShareSmbSettings, - SmbMultichannel, - ShareProtocolSettings, - ShareProtocols, - AccessPolicy, - FileSasPermissions, - ShareSasPermissions, - ContentSettings, - NTFSAttributes) -from ._generated.models import ( - HandleItem, - ShareAccessTier -) -from ._generated.models import ( - ShareRootSquash -) - -__version__ = VERSION - - -__all__ = [ - 'ShareFileClient', - 'ShareDirectoryClient', - 'ShareClient', - 'ShareServiceClient', - 'ShareLeaseClient', - 'ExponentialRetry', - 'LinearRetry', - 'LocationMode', - 'ResourceTypes', - 'AccountSasPermissions', - 'StorageErrorCode', - 'Metrics', - 'RetentionPolicy', - 'CorsRule', - 'ShareSmbSettings', - 'ShareAccessTier', - 'SmbMultichannel', - 'ShareProtocolSettings', - 'AccessPolicy', - 'FileSasPermissions', - 'ShareSasPermissions', - 'ShareProtocols', - 'ShareProperties', - 'DirectoryProperties', - 'FileProperties', - 'ContentSettings', - 'Handle', - 'NTFSAttributes', - 'HandleItem', - 'ShareRootSquash', - 'generate_account_sas', - 'generate_share_sas', - 'generate_file_sas' -] diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_deserialize.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_deserialize.py deleted file mode 100644 index 6839469..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_deserialize.py +++ /dev/null @@ -1,83 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use -from typing import ( # pylint: disable=unused-import - Tuple, Dict, List, - TYPE_CHECKING -) - -from ._models import ShareProperties, DirectoryProperties, FileProperties -from ._shared.response_handlers import deserialize_metadata -from ._generated.models import ShareFileRangeList - - -def deserialize_share_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - share_properties = ShareProperties( - metadata=metadata, - **headers - ) - return share_properties - - -def deserialize_directory_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - directory_properties = DirectoryProperties( - metadata=metadata, - **headers - ) - return directory_properties - - -def deserialize_file_properties(response, obj, headers): - metadata = deserialize_metadata(response, obj, headers) - file_properties = FileProperties( - metadata=metadata, - **headers - ) - if 'Content-Range' in headers: - if 'x-ms-content-md5' in headers: - file_properties.content_settings.content_md5 = headers['x-ms-content-md5'] - else: - file_properties.content_settings.content_md5 = None - return file_properties - - -def deserialize_file_stream(response, obj, headers): - file_properties = deserialize_file_properties(response, obj, headers) - obj.properties = file_properties - return response.http_response.location_mode, obj - - -def deserialize_permission(response, obj, headers): # pylint: disable=unused-argument - ''' - Extracts out file permission - ''' - - return obj.permission - - -def deserialize_permission_key(response, obj, headers): # pylint: disable=unused-argument - ''' - Extracts out file permission key - ''' - - if response is None or headers is None: - return None - return headers.get('x-ms-file-permission-key', None) - - -def get_file_ranges_result(ranges): - # type: (ShareFileRangeList) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - file_ranges = [] # type: ignore - clear_ranges = [] # type: List - if ranges.ranges: - file_ranges = [ - {'start': file_range.start, 'end': file_range.end} for file_range in ranges.ranges] # type: ignore - if ranges.clear_ranges: - clear_ranges = [ - {'start': clear_range.start, 'end': clear_range.end} for clear_range in ranges.clear_ranges] - return file_ranges, clear_ranges diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_directory_client.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_directory_client.py deleted file mode 100644 index 04668d9..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_directory_client.py +++ /dev/null @@ -1,838 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -import time -from typing import ( # pylint: disable=unused-import - Optional, Union, Any, Dict, TYPE_CHECKING -) - - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six -from azure.core.exceptions import HttpResponseError, ResourceNotFoundError -from azure.core.paging import ItemPaged -from azure.core.pipeline import Pipeline -from azure.core.tracing.decorator import distributed_trace - -from ._generated import AzureFileStorage -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.request_handlers import add_metadata_headers -from ._shared.response_handlers import return_response_headers, process_storage_error -from ._shared.parser import _str -from ._parser import _get_file_permission, _datetime_to_str -from ._deserialize import deserialize_directory_properties -from ._serialize import get_api_version, get_dest_access_conditions, get_rename_smb_properties -from ._file_client import ShareFileClient -from ._models import DirectoryPropertiesPaged, HandlesPaged, NTFSAttributes # pylint: disable=unused-import - -if TYPE_CHECKING: - from datetime import datetime - from ._models import ShareProperties, DirectoryProperties, ContentSettings - from ._generated.models import HandleItem - - -class ShareDirectoryClient(StorageAccountHostsMixin): - """A client to interact with a specific directory, although it may not yet exist. - - For operations relating to a specific subdirectory or file in this share, the clients for those - entities can also be retrieved using the :func:`get_subdirectory_client` and :func:`get_file_client` functions. - - For more optional configuration, please click - `here `_. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the directory, - use the :func:`from_directory_url` classmethod. - :param share_name: - The name of the share for the directory. - :type share_name: str - :param str directory_path: - The directory path for the directory with which to interact. - If specified, this value will override a directory value specified in the directory URL. - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is the most recent service version that is - compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - directory_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Optional[Any] - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not share_name: - raise ValueError("Please specify a share name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - if hasattr(credential, 'get_token'): - raise ValueError("Token credentials not supported by the File service.") - - path_snapshot, sas_token = parse_query(parsed_url.query) - if not sas_token and not credential: - raise ValueError( - 'You need to provide either an account shared key or SAS token when creating a storage service.') - try: - self.snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - self.snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - self.snapshot = snapshot or path_snapshot - - self.share_name = share_name - self.directory_path = directory_path - - self._query_str, credential = self._format_query_string( - sas_token, credential, share_snapshot=self.snapshot) - super(ShareDirectoryClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) - self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - - @classmethod - def from_directory_url(cls, directory_url, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Optional[Any] - ): - # type: (...) -> ShareDirectoryClient - """Create a ShareDirectoryClient from a directory url. - - :param str directory_url: - The full URI to the directory. - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :returns: A directory client. - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - """ - try: - if not directory_url.lower().startswith('http'): - directory_url = "https://" + directory_url - except AttributeError: - raise ValueError("Directory URL must be a string.") - parsed_url = urlparse(directory_url.rstrip('/')) - if not parsed_url.path and not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(directory_url)) - account_url = parsed_url.netloc.rstrip('/') + "?" + parsed_url.query - path_snapshot, _ = parse_query(parsed_url.query) - - share_name, _, path_dir = parsed_url.path.lstrip('/').partition('/') - share_name = unquote(share_name) - - directory_path = path_dir - snapshot = snapshot or path_snapshot - - return cls( - account_url=account_url, share_name=share_name, directory_path=directory_path, - credential=credential, **kwargs) - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - share_name = self.share_name - if isinstance(share_name, six.text_type): - share_name = share_name.encode('UTF-8') - directory_path = "" - if self.directory_path: - directory_path = "/" + quote(self.directory_path, safe='~') - return "{}://{}/{}{}{}".format( - self.scheme, - hostname, - quote(share_name), - directory_path, - self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - share_name, # type: str - directory_path, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareDirectoryClient - """Create ShareDirectoryClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param share_name: The name of the share. - :type share_name: str - :param str directory_path: - The directory path. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :returns: A directory client. - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, share_name=share_name, directory_path=directory_path, credential=credential, **kwargs) - - def get_file_client(self, file_name, **kwargs): - # type: (str, Any) -> ShareFileClient - """Get a client to interact with a specific file. - - The file need not already exist. - - :param file_name: - The name of the file. - :returns: A File Client. - :rtype: ~azure.storage.fileshare.ShareFileClient - """ - if self.directory_path: - file_name = self.directory_path.rstrip('/') + "/" + file_name - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareFileClient( - self.url, file_path=file_name, share_name=self.share_name, snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, **kwargs) - - def get_subdirectory_client(self, directory_name, **kwargs): - # type: (str, Any) -> ShareDirectoryClient - """Get a client to interact with a specific subdirectory. - - The subdirectory need not already exist. - - :param str directory_name: - The name of the subdirectory. - :returns: A Directory Client. - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START get_subdirectory_client] - :end-before: [END get_subdirectory_client] - :language: python - :dedent: 12 - :caption: Gets the subdirectory client. - """ - directory_path = self.directory_path.rstrip('/') + "/" + directory_name - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareDirectoryClient( - self.url, share_name=self.share_name, directory_path=directory_path, snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, - _location_mode=self._location_mode, **kwargs) - - @distributed_trace - def create_directory(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Creates a new directory under the directory referenced by the client. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the directory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Directory-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START create_directory] - :end-before: [END create_directory] - :language: python - :dedent: 12 - :caption: Creates a directory. - """ - timeout = kwargs.pop('timeout', None) - metadata = kwargs.pop('metadata', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return self._client.directory.create( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def delete_directory(self, **kwargs): - # type: (**Any) -> None - """Marks the directory for deletion. The directory is - later deleted during garbage collection. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START delete_directory] - :end-before: [END delete_directory] - :language: python - :dedent: 12 - :caption: Deletes a directory. - """ - timeout = kwargs.pop('timeout', None) - try: - self._client.directory.delete(timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def rename_directory( - self, new_name, # type: str - **kwargs # type: Any - ): - # type: (...) -> ShareDirectoryClient - """ - Rename the source directory. - - :param str new_name: - The new directory name. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword bool overwrite: - A boolean value for if the destination file already exists, whether this request will - overwrite the file or not. If true, the rename will succeed and will overwrite the - destination file. If not provided or if false and the destination file does exist, the - request will not overwrite the destination file. If provided and the destination file - doesn't exist, the rename will succeed. - :keyword bool ignore_read_only: - A boolean value that specifies whether the ReadOnly attribute on a preexisting destination - file should be respected. If true, the rename will succeed, otherwise, a previous file at the - destination with the ReadOnly attribute set will cause the rename to fail. - :keyword str file_permission: - If specified the permission (security descriptor) shall be set for the directory. This header - can be used if Permission size is <= 8KB, else file_permission_key shall be used. - If SDDL is specified as input, it must have owner, group and dacl. - A value of 'preserve' can be passed to preserve source permissions. - Note: Only one of the file_permission or file_permission_key should be specified. - :keyword str file_permission_key: - Key of the permission to be set for the directory. - Note: Only one of the file-permission or file-permission-key should be specified. - :keyword file_attributes: - The file system attributes for the directory. - :paramtype file_attributes:~azure.storage.fileshare.NTFSAttributes or str - :keyword file_creation_time: - Creation time for the directory. - :paramtype file_creation_time:~datetime.datetime or str - :keyword file_last_write_time: - Last write time for the file. - :paramtype file_last_write_time:~datetime.datetime or str - :keyword Dict[str,str] metadata: - A name-value pair to associate with a file storage object. - :keyword destination_lease: - Required if the destination file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - :paramtype destination_lease: ~azure.storage.fileshare.ShareLeaseClient or str - :returns: The new Directory Client. - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - """ - if not new_name: - raise ValueError("Please specify a new directory name.") - - new_name = new_name.strip('/') - new_path_and_query = new_name.split('?') - new_dir_path = new_path_and_query[0] - if len(new_path_and_query) == 2: - new_dir_sas = new_path_and_query[1] or self._query_str.strip('?') - else: - new_dir_sas = self._query_str.strip('?') - - new_directory_client = ShareDirectoryClient( - '{}://{}'.format(self.scheme, self.primary_hostname), self.share_name, new_dir_path, - credential=new_dir_sas or self.credential, api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function - ) - - kwargs.update(get_rename_smb_properties(kwargs)) - - timeout = kwargs.pop('timeout', None) - overwrite = kwargs.pop('overwrite', None) - metadata = kwargs.pop('metadata', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - - destination_access_conditions = get_dest_access_conditions(kwargs.pop('destination_lease', None)) - - try: - new_directory_client._client.directory.rename( # pylint: disable=protected-access - self.url, - timeout=timeout, - replace_if_exists=overwrite, - destination_lease_access_conditions=destination_access_conditions, - headers=headers, - **kwargs) - - return new_directory_client - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_directories_and_files(self, name_starts_with=None, **kwargs): - # type: (Optional[str], **Any) -> ItemPaged - """Lists all the directories and files under the directory. - - :param str name_starts_with: - Filters the results to return only entities whose names - begin with the specified prefix. - :keyword list[str] include: - Include this parameter to specify one or more datasets to include in the response. - Possible str values are "timestamps", "Etag", "Attributes", "PermissionKey". - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-10-02'. - - :keyword bool include_extended_info: - If this is set to true, file id will be returned in listed results. - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-10-02'. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties - :rtype: ~azure.core.paging.ItemPaged[DirectoryProperties and FileProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START lists_directory] - :end-before: [END lists_directory] - :language: python - :dedent: 12 - :caption: List directories and files. - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.directory.list_files_and_directories_segment, - sharesnapshot=self.snapshot, - timeout=timeout, - **kwargs) - return ItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=DirectoryPropertiesPaged) - - @distributed_trace - def list_handles(self, recursive=False, **kwargs): - # type: (bool, Any) -> ItemPaged - """Lists opened handles on a directory or a file under the directory. - - :param bool recursive: - Boolean that specifies if operation should apply to the directory specified by the client, - its files, its subdirectories and their files. Default value is False. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of HandleItem - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.HandleItem] - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.directory.list_handles, - sharesnapshot=self.snapshot, - timeout=timeout, - recursive=recursive, - **kwargs) - return ItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=HandlesPaged) - - @distributed_trace - def close_handle(self, handle, **kwargs): - # type: (Union[str, HandleItem], Any) -> Dict[str, int] - """Close an open file handle. - - :param handle: - A specific handle to close. - :type handle: str or ~azure.storage.fileshare.Handle - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - try: - handle_id = handle.id # type: ignore - except AttributeError: - handle_id = handle - if handle_id == '*': - raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") - try: - response = self._client.directory.force_close_handles( - handle_id, - marker=None, - recursive=None, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - return { - 'closed_handles_count': response.get('number_of_handles_closed', 0), - 'failed_handles_count': response.get('number_of_handles_failed', 0) - } - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def close_all_handles(self, recursive=False, **kwargs): - # type: (bool, Any) -> Dict[str, int] - """Close any open file handles. - - This operation will block until the service has closed all open handles. - - :param bool recursive: - Boolean that specifies if operation should apply to the directory specified by the client, - its files, its subdirectories and their files. Default value is False. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - timeout = kwargs.pop('timeout', None) - start_time = time.time() - - try_close = True - continuation_token = None - total_closed = 0 - total_failed = 0 - while try_close: - try: - response = self._client.directory.force_close_handles( - handle_id='*', - timeout=timeout, - marker=continuation_token, - recursive=recursive, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - continuation_token = response.get('marker') - try_close = bool(continuation_token) - total_closed += response.get('number_of_handles_closed', 0) - total_failed += response.get('number_of_handles_failed', 0) - if timeout: - timeout = max(0, timeout - (time.time() - start_time)) - return { - 'closed_handles_count': total_closed, - 'failed_handles_count': total_failed - } - - @distributed_trace - def get_directory_properties(self, **kwargs): - # type: (Any) -> DirectoryProperties - """Returns all user-defined metadata and system properties for the - specified directory. The data returned does not include the directory's - list of files. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: DirectoryProperties - :rtype: ~azure.storage.fileshare.DirectoryProperties - """ - timeout = kwargs.pop('timeout', None) - try: - response = self._client.directory.get_properties( - timeout=timeout, - cls=deserialize_directory_properties, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return response # type: ignore - - @distributed_trace - def set_directory_metadata(self, metadata, **kwargs): - # type: (Dict[str, Any], Any) -> Dict[str, Any] - """Sets the metadata for the directory. - - Each call to this operation replaces all existing metadata - attached to the directory. To remove all metadata from the directory, - call this operation with an empty metadata dict. - - :param metadata: - Name-value pairs associated with the directory as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Directory-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - try: - return self._client.directory.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a directory exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: True if the directory exists, False otherwise. - :rtype: bool - """ - try: - self._client.directory.get_properties(**kwargs) - return True - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceNotFoundError: - return False - - @distributed_trace - def set_http_headers(self, file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="preserve", # type: Union[str, datetime] - file_last_write_time="preserve", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Sets HTTP headers on the directory. - - :param file_attributes: - The file system attributes for files and directories. - If not set, indicates preservation of existing values. - Here is an example for when the var type is str: 'Temporary|Archive' - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Preserve. - :type file_creation_time: str or datetime - :param file_last_write_time: Last write time for the file - Default value: Preserve. - :type file_last_write_time: str or datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - file_permission = _get_file_permission(file_permission, permission_key, 'preserve') - try: - return self._client.directory.set_properties( # type: ignore - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - timeout=timeout, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def create_subdirectory( - self, directory_name, # type: str - **kwargs): - # type: (...) -> ShareDirectoryClient - """Creates a new subdirectory and returns a client to interact - with the subdirectory. - - :param str directory_name: - The name of the subdirectory. - :keyword dict(str,str) metadata: - Name-value pairs associated with the subdirectory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: ShareDirectoryClient - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START create_subdirectory] - :end-before: [END create_subdirectory] - :language: python - :dedent: 12 - :caption: Create a subdirectory. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - subdir = self.get_subdirectory_client(directory_name) - subdir.create_directory(metadata=metadata, timeout=timeout, **kwargs) - return subdir # type: ignore - - @distributed_trace - def delete_subdirectory( - self, directory_name, # type: str - **kwargs - ): - # type: (...) -> None - """Deletes a subdirectory. - - :param str directory_name: - The name of the subdirectory. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START delete_subdirectory] - :end-before: [END delete_subdirectory] - :language: python - :dedent: 12 - :caption: Delete a subdirectory. - """ - timeout = kwargs.pop('timeout', None) - subdir = self.get_subdirectory_client(directory_name) - subdir.delete_directory(timeout=timeout, **kwargs) - - @distributed_trace - def upload_file( - self, file_name, # type: str - data, # type: Any - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> ShareFileClient - """Creates a new file in the directory and returns a ShareFileClient - to interact with the file. - - :param str file_name: - The name of the file. - :param Any data: - Content of the file. - :param int length: - Length of the file in bytes. Specify its maximum size, up to 1 TiB. - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: ShareFileClient - :rtype: ~azure.storage.fileshare.ShareFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START upload_file_to_directory] - :end-before: [END upload_file_to_directory] - :language: python - :dedent: 12 - :caption: Upload a file to a directory. - """ - file_client = self.get_file_client(file_name) - file_client.upload_file( - data, - length=length, - **kwargs) - return file_client # type: ignore - - @distributed_trace - def delete_file( - self, file_name, # type: str - **kwargs # type: Optional[Any] - ): - # type: (...) -> None - """Marks the specified file for deletion. The file is later - deleted during garbage collection. - - :param str file_name: - The name of the file to delete. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory.py - :start-after: [START delete_file_in_directory] - :end-before: [END delete_file_in_directory] - :language: python - :dedent: 12 - :caption: Delete a file in a directory. - """ - file_client = self.get_file_client(file_name) - file_client.delete_file(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_download.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_download.py deleted file mode 100644 index a2db5aa..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_download.py +++ /dev/null @@ -1,554 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -import threading -import warnings -from io import BytesIO -from typing import Iterator - -from azure.core.exceptions import HttpResponseError, ResourceModifiedError -from azure.core.tracing.common import with_current_context -from ._shared.encryption import decrypt_blob -from ._shared.request_handlers import validate_and_format_range_headers -from ._shared.response_handlers import process_storage_error, parse_length_from_content_range - - -def process_range_and_offset(start_range, end_range, length, encryption): - start_offset, end_offset = 0, 0 - if encryption.get("key") is not None or encryption.get("resolver") is not None: - if start_range is not None: - # Align the start of the range along a 16 byte block - start_offset = start_range % 16 - start_range -= start_offset - - # Include an extra 16 bytes for the IV if necessary - # Because of the previous offsetting, start_range will always - # be a multiple of 16. - if start_range > 0: - start_offset += 16 - start_range -= 16 - - if length is not None: - # Align the end of the range along a 16 byte block - end_offset = 15 - (end_range % 16) - end_range += end_offset - - return (start_range, end_range), (start_offset, end_offset) - - -def process_content(data, start_offset, end_offset, encryption): - if data is None: - raise ValueError("Response cannot be None.") - try: - content = b"".join(list(data)) - except Exception as error: - raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) - if content and encryption.get("key") is not None or encryption.get("resolver") is not None: - try: - return decrypt_blob( - encryption.get("required"), - encryption.get("key"), - encryption.get("resolver"), - content, - start_offset, - end_offset, - data.response.headers, - ) - except Exception as error: - raise HttpResponseError(message="Decryption failed.", response=data.response, error=error) - return content - - -class _ChunkDownloader(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - client=None, - total_size=None, - chunk_size=None, - current_progress=None, - start_range=None, - end_range=None, - stream=None, - parallel=None, - validate_content=None, - encryption_options=None, - etag=None, - **kwargs - ): - self.client = client - self.etag = etag - # Information on the download range/chunk size - self.chunk_size = chunk_size - self.total_size = total_size - self.start_index = start_range - self.end_index = end_range - - # The destination that we will write to - self.stream = stream - self.stream_lock = threading.Lock() if parallel else None - self.progress_lock = threading.Lock() if parallel else None - - # For a parallel download, the stream is always seekable, so we note down the current position - # in order to seek to the right place when out-of-order chunks come in - self.stream_start = stream.tell() if parallel else None - - # Download progress so far - self.progress_total = current_progress - - # Encryption - self.encryption_options = encryption_options - - # Parameters for each get operation - self.validate_content = validate_content - self.request_options = kwargs - - def _calculate_range(self, chunk_start): - if chunk_start + self.chunk_size > self.end_index: - chunk_end = self.end_index - else: - chunk_end = chunk_start + self.chunk_size - return chunk_start, chunk_end - - def get_chunk_offsets(self): - index = self.start_index - while index < self.end_index: - yield index - index += self.chunk_size - - def process_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - chunk_data = self._download_chunk(chunk_start, chunk_end - 1) - length = chunk_end - chunk_start - if length > 0: - self._write_to_stream(chunk_data, chunk_start) - self._update_progress(length) - - def yield_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - return self._download_chunk(chunk_start, chunk_end - 1) - - def _update_progress(self, length): - if self.progress_lock: - with self.progress_lock: # pylint: disable=not-context-manager - self.progress_total += length - else: - self.progress_total += length - - def _write_to_stream(self, chunk_data, chunk_start): - if self.stream_lock: - with self.stream_lock: # pylint: disable=not-context-manager - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - else: - self.stream.write(chunk_data) - - def _download_chunk(self, chunk_start, chunk_end): - download_range, offset = process_range_and_offset( - chunk_start, chunk_end, chunk_end, self.encryption_options - ) - range_header, range_validation = validate_and_format_range_headers( - download_range[0], download_range[1], check_content_md5=self.validate_content - ) - - try: - _, response = self.client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self.validate_content, - data_stream_total=self.total_size, - download_stream_current=self.progress_total, - **self.request_options - ) - if response.properties.etag != self.etag: - raise ResourceModifiedError(message="The file has been modified while downloading.") - - except HttpResponseError as error: - process_storage_error(error) - - chunk_data = process_content(response, offset[0], offset[1], self.encryption_options) - return chunk_data - - -class _ChunkIterator(object): - """Async iterator for chunks in blob download stream.""" - - def __init__(self, size, content, downloader, chunk_size): - self.size = size - self._chunk_size = chunk_size - self._current_content = content - self._iter_downloader = downloader - self._iter_chunks = None - self._complete = (size == 0) - - def __len__(self): - return self.size - - def __iter__(self): - return self - - def __next__(self): - """Iterate through responses.""" - if self._complete: - raise StopIteration("Download complete") - if not self._iter_downloader: - # cut the data obtained from initial GET into chunks - if len(self._current_content) > self._chunk_size: - return self._get_chunk_data() - self._complete = True - return self._current_content - - if not self._iter_chunks: - self._iter_chunks = self._iter_downloader.get_chunk_offsets() - - # initial GET result still has more than _chunk_size bytes of data - if len(self._current_content) >= self._chunk_size: - return self._get_chunk_data() - - try: - chunk = next(self._iter_chunks) - self._current_content += self._iter_downloader.yield_chunk(chunk) - except StopIteration as e: - self._complete = True - if self._current_content: - return self._current_content - raise e - - return self._get_chunk_data() - - next = __next__ # Python 2 compatibility. - - def _get_chunk_data(self): - chunk_data = self._current_content[: self._chunk_size] - self._current_content = self._current_content[self._chunk_size:] - return chunk_data - - -class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the file being downloaded. - :ivar: str path: - The full path of the file. - :ivar str share: - The name of the share where the file is. - :ivar ~azure.storage.fileshare.FileProperties properties: - The properties of the file being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the file. - """ - - def __init__( - self, - client=None, - config=None, - start_range=None, - end_range=None, - validate_content=None, - encryption_options=None, - max_concurrency=1, - name=None, - path=None, - share=None, - encoding=None, - **kwargs - ): - self.name = name - self.path = path - self.share = share - self.properties = None - self.size = None - - self._client = client - self._config = config - self._start_range = start_range - self._end_range = end_range - self._max_concurrency = max_concurrency - self._encoding = encoding - self._validate_content = validate_content - self._encryption_options = encryption_options or {} - self._request_options = kwargs - self._location_mode = None - self._download_complete = False - self._current_content = None - self._file_size = None - self._response = None - self._etag = None - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - self._first_get_size = ( - self._config.max_single_get_size if not self._validate_content else self._config.max_chunk_get_size - ) - initial_request_start = self._start_range if self._start_range is not None else 0 - if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: - initial_request_end = self._end_range - else: - initial_request_end = initial_request_start + self._first_get_size - 1 - - self._initial_range, self._initial_offset = process_range_and_offset( - initial_request_start, initial_request_end, self._end_range, self._encryption_options - ) - - self._response = self._initial_request() - self.properties = self._response.properties - self.properties.name = self.name - self.properties.path = self.path - self.properties.share = self.share - - # Set the content length to the download size instead of the size of - # the last range - self.properties.size = self.size - - # Overwrite the content range to the user requested range - self.properties.content_range = "bytes {0}-{1}/{2}".format( - self._start_range, - self._end_range, - self._file_size - ) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - self.properties.content_md5 = None - - if self.size == 0: - self._current_content = b"" - else: - self._current_content = process_content( - self._response, - self._initial_offset[0], - self._initial_offset[1], - self._encryption_options - ) - - def __len__(self): - return self.size - - def _initial_request(self): - range_header, range_validation = validate_and_format_range_headers( - self._initial_range[0], - self._initial_range[1], - start_range_required=False, - end_range_required=False, - check_content_md5=self._validate_content - ) - - try: - location_mode, response = self._client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self._validate_content, - data_stream_total=None, - download_stream_current=0, - **self._request_options - ) - - # Check the location we read from to ensure we use the same one - # for subsequent requests. - self._location_mode = location_mode - - # Parse the total file size and adjust the download size if ranges - # were specified - self._file_size = parse_length_from_content_range(response.properties.content_range) - if self._end_range is not None: - # Use the end range index unless it is over the end of the file - self.size = min(self._file_size, self._end_range - self._start_range + 1) - elif self._start_range is not None: - self.size = self._file_size - self._start_range - else: - self.size = self._file_size - - except HttpResponseError as error: - if self._start_range is None and error.response.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - try: - _, response = self._client.download( - validate_content=self._validate_content, - data_stream_total=0, - download_stream_current=0, - **self._request_options - ) - except HttpResponseError as error: - process_storage_error(error) - - # Set the download size to empty - self.size = 0 - self._file_size = 0 - else: - process_storage_error(error) - - # If the file is small, the download is complete at this point. - # If file size is large, download the rest of the file in chunks. - if response.properties.size == self.size: - self._download_complete = True - self._etag = response.properties.etag - return response - - def chunks(self): - # type: () -> Iterator[bytes] - """Iterate over chunks in the download stream. - - :rtype: Iterator[bytes] - """ - if self.size == 0 or self._download_complete: - iter_downloader = None - else: - data_end = self._file_size - if self._end_range is not None: - # Use the end range index unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - iter_downloader = _ChunkDownloader( - client=self._client, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # start where the first download ended - end_range=data_end, - stream=None, - parallel=False, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - etag=self._etag, - **self._request_options - ) - return _ChunkIterator( - size=self.size, - content=self._current_content, - downloader=iter_downloader, - chunk_size=self._config.max_chunk_get_size) - - def readall(self): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - stream = BytesIO() - self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - def content_as_bytes(self, max_concurrency=1): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :rtype: bytes - """ - warnings.warn( - "content_as_bytes is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - return self.readall() - - def content_as_text(self, max_concurrency=1, encoding="UTF-8"): - """Download the contents of this file, and decode as text. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :param str encoding: - Test encoding to decode the downloaded bytes. Default is UTF-8. - :rtype: str - """ - warnings.warn( - "content_as_text is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self._encoding = encoding - return self.readall() - - def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - # The stream must be seekable if parallel download is required - parallel = self._max_concurrency > 1 - if parallel: - error_message = "Target stream handle must be seekable." - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(error_message) - - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(error_message) - - # Write the content to the user stream - stream.write(self._current_content) - if self._download_complete: - return self.size - - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - - downloader = _ChunkDownloader( - client=self._client, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # Start where the first download ended - end_range=data_end, - stream=stream, - parallel=parallel, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - etag=self._etag, - **self._request_options - ) - if parallel: - import concurrent.futures - with concurrent.futures.ThreadPoolExecutor(self._max_concurrency) as executor: - list(executor.map( - with_current_context(downloader.process_chunk), - downloader.get_chunk_offsets() - )) - else: - for chunk in downloader.get_chunk_offsets(): - downloader.process_chunk(chunk) - return self.size - - def download_to_stream(self, stream, max_concurrency=1): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The properties of the downloaded file. - :rtype: Any - """ - warnings.warn( - "download_to_stream is deprecated, use readinto instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self.readinto(stream) - return self.properties diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_file_client.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_file_client.py deleted file mode 100644 index 3892956..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_file_client.py +++ /dev/null @@ -1,1516 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines, too-many-public-methods -import functools -import time -from io import BytesIO -from typing import ( # pylint: disable=unused-import - Optional, Union, IO, List, Dict, Any, Iterable, Tuple, - TYPE_CHECKING -) - - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six -from azure.core.exceptions import HttpResponseError -from azure.core.paging import ItemPaged # pylint: disable=ungrouped-imports -from azure.core.tracing.decorator import distributed_trace - -from ._generated import AzureFileStorage -from ._generated.models import FileHTTPHeaders -from ._shared.uploads import IterStreamer, FileChunkUploader, upload_data_chunks -from ._shared.base_client import StorageAccountHostsMixin, parse_connection_str, parse_query -from ._shared.request_handlers import add_metadata_headers, get_length -from ._shared.response_handlers import return_response_headers, process_storage_error -from ._shared.parser import _str -from ._parser import _get_file_permission, _datetime_to_str -from ._lease import ShareLeaseClient -from ._serialize import ( - get_access_conditions, - get_api_version, - get_dest_access_conditions, - get_rename_smb_properties, - get_smb_properties, - get_source_conditions, - get_source_access_conditions) -from ._deserialize import deserialize_file_properties, deserialize_file_stream, get_file_ranges_result -from ._models import HandlesPaged, NTFSAttributes # pylint: disable=unused-import -from ._download import StorageStreamDownloader - -if TYPE_CHECKING: - from datetime import datetime - from ._models import ShareProperties, ContentSettings, FileProperties, Handle - from ._generated.models import HandleItem - - -def _upload_file_helper( - client, - stream, - size, - metadata, - content_settings, - validate_content, - timeout, - max_concurrency, - file_settings, - file_attributes="none", - file_creation_time="now", - file_last_write_time="now", - file_permission=None, - file_permission_key=None, - **kwargs): - try: - if size is None or size < 0: - raise ValueError("A content size must be specified for a File.") - response = client.create_file( - size, - content_settings=content_settings, - metadata=metadata, - timeout=timeout, - file_attributes=file_attributes, - file_creation_time=file_creation_time, - file_last_write_time=file_last_write_time, - file_permission=file_permission, - permission_key=file_permission_key, - **kwargs - ) - if size == 0: - return response - - responses = upload_data_chunks( - service=client, - uploader_class=FileChunkUploader, - total_size=size, - chunk_size=file_settings.max_range_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - timeout=timeout, - **kwargs - ) - return sorted(responses, key=lambda r: r.get('last_modified'))[-1] - except HttpResponseError as error: - process_storage_error(error) - - -class ShareFileClient(StorageAccountHostsMixin): - """A client to interact with a specific file, although that file may not yet exist. - - For more optional configuration, please click - `here `_. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the - file, use the :func:`from_file_url` classmethod. - :param share_name: - The name of the share for the file. - :type share_name: str - :param str file_path: - The file path to the file with which to interact. If specified, this value will override - a file value specified in the file URL. - :param str snapshot: - An optional file snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is the most recent service version that is - compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - file_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not (share_name and file_path): - raise ValueError("Please specify a share name and file name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - if hasattr(credential, 'get_token'): - raise ValueError("Token credentials not supported by the File service.") - - path_snapshot = None - path_snapshot, sas_token = parse_query(parsed_url.query) - if not sas_token and not credential: - raise ValueError( - 'You need to provide either an account shared key or SAS token when creating a storage service.') - try: - self.snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - self.snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - self.snapshot = snapshot or path_snapshot - - self.share_name = share_name - self.file_path = file_path.split('/') - self.file_name = self.file_path[-1] - self.directory_path = "/".join(self.file_path[:-1]) - - self._query_str, credential = self._format_query_string( - sas_token, credential, share_snapshot=self.snapshot) - super(ShareFileClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) - self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - - @classmethod - def from_file_url( - cls, file_url, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareFileClient - """A client to interact with a specific file, although that file may not yet exist. - - :param str file_url: The full URI to the file. - :param str snapshot: - An optional file snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :returns: A File client. - :rtype: ~azure.storage.fileshare.ShareFileClient - """ - try: - if not file_url.lower().startswith('http'): - file_url = "https://" + file_url - except AttributeError: - raise ValueError("File URL must be a string.") - parsed_url = urlparse(file_url.rstrip('/')) - - if not (parsed_url.netloc and parsed_url.path): - raise ValueError("Invalid URL: {}".format(file_url)) - account_url = parsed_url.netloc.rstrip('/') + "?" + parsed_url.query - - path_share, _, path_file = parsed_url.path.lstrip('/').partition('/') - path_snapshot, _ = parse_query(parsed_url.query) - snapshot = snapshot or path_snapshot - share_name = unquote(path_share) - file_path = '/'.join([unquote(p) for p in path_file.split('/')]) - return cls(account_url, share_name, file_path, snapshot, credential, **kwargs) - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - share_name = self.share_name - if isinstance(share_name, six.text_type): - share_name = share_name.encode('UTF-8') - return "{}://{}/{}/{}{}".format( - self.scheme, - hostname, - quote(share_name), - "/".join([quote(p, safe='~') for p in self.file_path]), - self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - share_name, # type: str - file_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareFileClient - """Create ShareFileClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param share_name: The name of the share. - :type share_name: str - :param str file_path: - The file path. - :param str snapshot: - An optional file snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :returns: A File client. - :rtype: ~azure.storage.fileshare.ShareFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_hello_world.py - :start-after: [START create_file_client] - :end-before: [END create_file_client] - :language: python - :dedent: 12 - :caption: Creates the file client with connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, share_name=share_name, file_path=file_path, snapshot=snapshot, credential=credential, **kwargs) - - @distributed_trace - def acquire_lease(self, lease_id=None, **kwargs): - # type: (Optional[str], **Any) -> ShareLeaseClient - """Requests a new lease. - - If the file does not have an active lease, the File - Service creates a lease on the blob and returns a new lease. - - :param str lease_id: - Proposed lease ID, in a GUID string format. The File Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A ShareLeaseClient object. - :rtype: ~azure.storage.fileshare.ShareLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START acquire_and_release_lease_on_file] - :end-before: [END acquire_and_release_lease_on_file] - :language: python - :dedent: 12 - :caption: Acquiring a lease on a file. - """ - kwargs['lease_duration'] = -1 - lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore - lease.acquire(**kwargs) - return lease - - @distributed_trace - def create_file( # type: ignore - self, size, # type: int - file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="now", # type: Union[str, datetime] - file_last_write_time="now", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Creates a new file. - - Note that it only initializes the file with no content. - - :param int size: Specifies the maximum size for the file, - up to 1 TB. - :param file_attributes: - The file system attributes for files and directories. - If not set, the default value would be "None" and the attributes will be set to "Archive". - Here is an example for when the var type is str: 'Temporary|Archive'. - file_attributes value is not case sensitive. - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Now. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Now. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START create_file] - :end-before: [END create_file] - :language: python - :dedent: 12 - :caption: Create a file. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - content_settings = kwargs.pop('content_settings', None) - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - if self.require_encryption and not self.key_encryption_key: - raise ValueError("Encryption required but no key was provided.") - - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - file_http_headers = None - if content_settings: - file_http_headers = FileHTTPHeaders( - file_cache_control=content_settings.cache_control, - file_content_type=content_settings.content_type, - file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - file_content_encoding=content_settings.content_encoding, - file_content_language=content_settings.content_language, - file_content_disposition=content_settings.content_disposition - ) - file_permission = _get_file_permission(file_permission, permission_key, 'Inherit') - try: - return self._client.file.create( # type: ignore - file_content_length=size, - metadata=metadata, - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - file_http_headers=file_http_headers, - lease_access_conditions=access_conditions, - headers=headers, - timeout=timeout, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def upload_file( - self, data, # type: Any - length=None, # type: Optional[int] - file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="now", # type: Union[str, datetime] - file_last_write_time="now", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Uploads a new file. - - :param Any data: - Content of the file. - :param int length: - Length of the file in bytes. Specify its maximum size, up to 1 TiB. - :param file_attributes: - The file system attributes for files and directories. - If not set, the default value would be "None" and the attributes will be set to "Archive". - Here is an example for when the var type is str: 'Temporary|Archive'. - file_attributes value is not case sensitive. - :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes - :param file_creation_time: Creation time for the file - Default value: Now. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Now. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START upload_file] - :end-before: [END upload_file] - :language: python - :dedent: 12 - :caption: Upload a file. - """ - metadata = kwargs.pop('metadata', None) - content_settings = kwargs.pop('content_settings', None) - max_concurrency = kwargs.pop('max_concurrency', 1) - validate_content = kwargs.pop('validate_content', False) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - - if isinstance(data, six.text_type): - data = data.encode(encoding) - if length is None: - length = get_length(data) - if isinstance(data, bytes): - data = data[:length] - - if isinstance(data, bytes): - stream = BytesIO(data) - elif hasattr(data, 'read'): - stream = data - elif hasattr(data, '__iter__'): - stream = IterStreamer(data, encoding=encoding) # type: ignore - else: - raise TypeError("Unsupported data type: {}".format(type(data))) - return _upload_file_helper( # type: ignore - self, - stream, - length, - metadata, - content_settings, - validate_content, - timeout, - max_concurrency, - self._config, - file_attributes=file_attributes, - file_creation_time=file_creation_time, - file_last_write_time=file_last_write_time, - file_permission=file_permission, - file_permission_key=permission_key, - **kwargs) - - @distributed_trace - def start_copy_from_url(self, source_url, **kwargs): - # type: (str, Any) -> Any - """Initiates the copying of data from a source URL into the file - referenced by the client. - - The status of this copy operation can be found using the `get_properties` - method. - - :param str source_url: - Specifies the URL of the source file. - :keyword str file_permission: - If specified the permission (security descriptor) shall be set for the directory/file. - This value can be set to "source" to copy the security descriptor from the source file. - Otherwise if set, this value will be used to override the source value. If not set, permission value - is inherited from the parent directory of the target file. This setting can be - used if Permission size is <= 8KB, otherwise permission_key shall be used. - If SDDL is specified as input, it must have owner, group and dacl. - Note: Only one of the file_permission or permission_key should be specified. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword str permission_key: - Key of the permission to be set for the directory/file. - This value can be set to "source" to copy the security descriptor from the source file. - Otherwise if set, this value will be used to override the source value. If not set, permission value - is inherited from the parent directory of the target file. - Note: Only one of the file_permission or permission_key should be specified. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword file_attributes: - This value can be set to "source" to copy file attributes from the source file to the target file, - or to clear all attributes, it can be set to "None". Otherwise it can be set to a list of attributes - to set on the target file. If this is not set, the default value is "Archive". - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :keyword file_creation_time: - This value can be set to "source" to copy the creation time from the source file to the target file, - or a datetime to set as creation time on the target file. This could also be a string in ISO 8601 format. - If this is not set, creation time will be set to the date time value of the creation - (or when it was overwritten) of the target file by copy engine. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_creation_time: str or ~datetime.datetime - :keyword file_last_write_time: - This value can be set to "source" to copy the last write time from the source file to the target file, or - a datetime to set as the last write time on the target file. This could also be a string in ISO 8601 format. - If this is not set, value will be the last write time to the file by the copy engine. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_last_write_time: str or ~datetime.datetime - :keyword bool ignore_read_only: - Specifies the option to overwrite the target file if it already exists and has read-only attribute set. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword bool set_archive_attribute: - Specifies the option to set the archive attribute on the target file. - True means the archive attribute will be set on the target file despite attribute - overrides or the source file state. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START copy_file_from_url] - :end-before: [END copy_file_from_url] - :language: python - :dedent: 12 - :caption: Copy a file from a URL - """ - metadata = kwargs.pop('metadata', None) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - kwargs.update(get_smb_properties(kwargs)) - try: - return self._client.file.start_copy( - source_url, - metadata=metadata, - lease_access_conditions=access_conditions, - headers=headers, - cls=return_response_headers, - timeout=timeout, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - def abort_copy(self, copy_id, **kwargs): - # type: (Union[str, FileProperties], Any) -> None - """Abort an ongoing copy operation. - - This will leave a destination file with zero length and full metadata. - This will raise an error if the copy operation has already ended. - - :param copy_id: - The copy operation to abort. This can be either an ID, or an - instance of FileProperties. - :type copy_id: str or ~azure.storage.fileshare.FileProperties - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - copy_id = copy_id.copy.id - except AttributeError: - try: - copy_id = copy_id['copy_id'] - except TypeError: - pass - try: - self._client.file.abort_copy(copy_id=copy_id, - lease_access_conditions=access_conditions, - timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def download_file( - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> StorageStreamDownloader - """Downloads a file to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the file into - a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks. - - :param int offset: - Start of byte range to use for downloading a section of the file. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.fileshare.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START download_file] - :end-before: [END download_file] - :language: python - :dedent: 12 - :caption: Download a file. - """ - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - if length is not None and offset is None: - raise ValueError("Offset value must not be None if length is set.") - - range_end = None - if length is not None: - range_end = offset + length - 1 # Service actually uses an end-range inclusive index - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - - return StorageStreamDownloader( - client=self._client.file, - config=self._config, - start_range=offset, - end_range=range_end, - encryption_options=None, - name=self.file_name, - path='/'.join(self.file_path), - share=self.share_name, - lease_access_conditions=access_conditions, - cls=deserialize_file_stream, - **kwargs) - - @distributed_trace - def delete_file(self, **kwargs): - # type: (Any) -> None - """Marks the specified file for deletion. The file is - later deleted during garbage collection. - - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client.py - :start-after: [START delete_file] - :end-before: [END delete_file] - :language: python - :dedent: 12 - :caption: Delete a file. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - self._client.file.delete(lease_access_conditions=access_conditions, timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def rename_file( - self, new_name, # type: str - **kwargs # type: Any - ): - # type: (...) -> ShareFileClient - """ - Rename the source file. - - :param str new_name: - The new file name. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword bool overwrite: - A boolean value for if the destination file already exists, whether this request will - overwrite the file or not. If true, the rename will succeed and will overwrite the - destination file. If not provided or if false and the destination file does exist, the - request will not overwrite the destination file. If provided and the destination file - doesn't exist, the rename will succeed. - :keyword bool ignore_read_only: - A boolean value that specifies whether the ReadOnly attribute on a preexisting destination - file should be respected. If true, the rename will succeed, otherwise, a previous file at the - destination with the ReadOnly attribute set will cause the rename to fail. - :keyword str file_permission: - If specified the permission (security descriptor) shall be set for the file. This header - can be used if Permission size is <= 8KB, else file_permission_key shall be used. - If SDDL is specified as input, it must have owner, group and dacl. - A value of 'preserve' can be passed to preserve source permissions. - Note: Only one of the file_permission or file_permission_key should be specified. - :keyword str file_permission_key: - Key of the permission to be set for the file. - Note: Only one of the file-permission or file-permission-key should be specified. - :keyword file_attributes: - The file system attributes for the file. - :paramtype file_attributes:~azure.storage.fileshare.NTFSAttributes or str - :keyword file_creation_time: - Creation time for the file. - :paramtype file_creation_time:~datetime.datetime or str - :keyword file_last_write_time: - Last write time for the file. - :paramtype file_last_write_time:~datetime.datetime or str - :keyword Dict[str,str] metadata: - A name-value pair to associate with a file storage object. - :keyword source_lease: - Required if the source file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - :paramtype source_lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword destination_lease: - Required if the destination file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - :paramtype destination_lease: ~azure.storage.fileshare.ShareLeaseClient or str - :returns: The new File Client. - :rtype: ~azure.storage.fileshare.ShareFileClient - """ - if not new_name: - raise ValueError("Please specify a new file name.") - - new_name = new_name.strip('/') - new_path_and_query = new_name.split('?') - new_file_path = new_path_and_query[0] - if len(new_path_and_query) == 2: - new_file_sas = new_path_and_query[1] or self._query_str.strip('?') - else: - new_file_sas = self._query_str.strip('?') - - new_file_client = ShareFileClient( - '{}://{}'.format(self.scheme, self.primary_hostname), self.share_name, new_file_path, - credential=new_file_sas or self.credential, api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function - ) - - kwargs.update(get_rename_smb_properties(kwargs)) - - timeout = kwargs.pop('timeout', None) - overwrite = kwargs.pop('overwrite', None) - metadata = kwargs.pop('metadata', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - - source_access_conditions = get_source_access_conditions(kwargs.pop('source_lease', None)) - dest_access_conditions = get_dest_access_conditions(kwargs.pop('destination_lease', None)) - - try: - new_file_client._client.file.rename( # pylint: disable=protected-access - self.url, - timeout=timeout, - replace_if_exists=overwrite, - source_lease_access_conditions=source_access_conditions, - destination_lease_access_conditions=dest_access_conditions, - headers=headers, - **kwargs) - - return new_file_client - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def get_file_properties(self, **kwargs): - # type: (Any) -> FileProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file. - - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: FileProperties - :rtype: ~azure.storage.fileshare.FileProperties - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - file_props = self._client.file.get_properties( - sharesnapshot=self.snapshot, - lease_access_conditions=access_conditions, - timeout=timeout, - cls=deserialize_file_properties, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - file_props.name = self.file_name - file_props.share = self.share_name - file_props.snapshot = self.snapshot - file_props.path = '/'.join(self.file_path) - return file_props # type: ignore - - @distributed_trace - def set_http_headers(self, content_settings, # type: ContentSettings - file_attributes="preserve", # type: Union[str, NTFSAttributes] - file_creation_time="preserve", # type: Union[str, datetime] - file_last_write_time="preserve", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Sets HTTP headers on the file. - - :param ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param file_attributes: - The file system attributes for files and directories. - If not set, indicates preservation of existing values. - Here is an example for when the var type is str: 'Temporary|Archive' - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Preserve. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Preserve. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - file_content_length = kwargs.pop('size', None) - file_http_headers = FileHTTPHeaders( - file_cache_control=content_settings.cache_control, - file_content_type=content_settings.content_type, - file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - file_content_encoding=content_settings.content_encoding, - file_content_language=content_settings.content_language, - file_content_disposition=content_settings.content_disposition - ) - file_permission = _get_file_permission(file_permission, permission_key, 'preserve') - try: - return self._client.file.set_http_headers( # type: ignore - file_content_length=file_content_length, - file_http_headers=file_http_headers, - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - lease_access_conditions=access_conditions, - timeout=timeout, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def set_file_metadata(self, metadata=None, **kwargs): - # type: (Optional[Dict[str, Any]], Any) -> Dict[str, Any] - """Sets user-defined metadata for the specified file as one or more - name-value pairs. - - Each call to this operation replaces all existing metadata - attached to the file. To remove all metadata from the file, - call this operation with no metadata dict. - - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return self._client.file.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - metadata=metadata, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def upload_range( # type: ignore - self, data, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Upload a range of bytes to a file. - - :param bytes data: - The data to upload. - :param int offset: - Start of byte range to use for uploading a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for uploading a section of the file. - The range can be up to 4 MB in size. - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - file. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - validate_content = kwargs.pop('validate_content', False) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - if isinstance(data, six.text_type): - data = data.encode(encoding) - - end_range = offset + length - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - try: - return self._client.file.upload_range( # type: ignore - range=content_range, - content_length=length, - optionalbody=data, - timeout=timeout, - validate_content=validate_content, - lease_access_conditions=access_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @staticmethod - def _upload_range_from_url_options(source_url, # type: str - offset, # type: int - length, # type: int - source_offset, # type: int - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - - if offset is None: - raise ValueError("offset must be provided.") - if length is None: - raise ValueError("length must be provided.") - if source_offset is None: - raise ValueError("source_offset must be provided.") - - # Format range - end_range = offset + length - 1 - destination_range = 'bytes={0}-{1}'.format(offset, end_range) - source_range = 'bytes={0}-{1}'.format(source_offset, source_offset + length - 1) - source_authorization = kwargs.pop('source_authorization', None) - source_mod_conditions = get_source_conditions(kwargs) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - - options = { - 'copy_source_authorization': source_authorization, - 'copy_source': source_url, - 'content_length': 0, - 'source_range': source_range, - 'range': destination_range, - 'source_modified_access_conditions': source_mod_conditions, - 'lease_access_conditions': access_conditions, - 'timeout': kwargs.pop('timeout', None), - 'cls': return_response_headers} - options.update(kwargs) - return options - - @distributed_trace - def upload_range_from_url(self, source_url, - offset, - length, - source_offset, - **kwargs - ): - # type: (str, int, int, int, **Any) -> Dict[str, Any] - """ - Writes the bytes from one Azure File endpoint into the specified range of another Azure File endpoint. - - :param int offset: - Start of byte range to use for updating a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for updating a section of the file. - The range can be up to 4 MB in size. - :param str source_url: - A URL of up to 2 KB in length that specifies an Azure file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.file.core.windows.net/myshare/mydir/myfile - https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - The service will read the same number of bytes as the destination range (length-offset). - :keyword ~datetime.datetime source_if_modified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source - blob has been modified since the specified date/time. - :keyword ~datetime.datetime source_if_unmodified_since: - A DateTime value. Azure expects the date value passed in to be UTC. - If timezone is included, any non-UTC datetimes will be converted to UTC. - If a date is passed in without timezone info, it is assumed to be UTC. - Specify this conditional header to copy the blob only if the source blob - has not been modified since the specified date/time. - :keyword str source_etag: - The source ETag value, or the wildcard character (*). Used to check if the resource has changed, - and act according to the condition specified by the `match_condition` parameter. - :keyword ~azure.core.MatchConditions source_match_condition: - The source match condition to use upon the etag. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str source_authorization: - Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is - the prefix of the source_authorization string. - """ - options = self._upload_range_from_url_options( - source_url=source_url, - offset=offset, - length=length, - source_offset=source_offset, - **kwargs - ) - try: - return self._client.file.upload_range_from_url(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - def _get_ranges_options( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - previous_sharesnapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - **kwargs - ): - # type: (...) -> Dict[str, Any] - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Unsupported method for encryption.") - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - - content_range = None - if offset is not None: - if length is not None: - end_range = offset + length - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - else: - content_range = 'bytes={0}-'.format(offset) - options = { - 'sharesnapshot': self.snapshot, - 'lease_access_conditions': access_conditions, - 'timeout': kwargs.pop('timeout', None), - 'range': content_range} - if previous_sharesnapshot: - try: - options['prevsharesnapshot'] = previous_sharesnapshot.snapshot # type: ignore - except AttributeError: - try: - options['prevsharesnapshot'] = previous_sharesnapshot['snapshot'] # type: ignore - except TypeError: - options['prevsharesnapshot'] = previous_sharesnapshot - options.update(kwargs) - return options - - @distributed_trace - def get_ranges( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> List[Dict[str, int]] - """Returns the list of valid page ranges for a file or snapshot - of a file. - - :param int offset: - Specifies the start offset of bytes over which to get ranges. - :param int length: - Number of bytes to use over which to get ranges. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A list of valid ranges. - :rtype: List[dict[str, int]] - """ - options = self._get_ranges_options( - offset=offset, - length=length, - **kwargs) - try: - ranges = self._client.file.get_range_list(**options) - except HttpResponseError as error: - process_storage_error(error) - return [{'start': file_range.start, 'end': file_range.end} for file_range in ranges.ranges] - - def get_ranges_diff( # type: ignore - self, - previous_sharesnapshot, # type: Union[str, Dict[str, Any]] - offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a file or snapshot - of a file. - - .. versionadded:: 12.6.0 - - :param int offset: - Specifies the start offset of bytes over which to get ranges. - :param int length: - Number of bytes to use over which to get ranges. - :param str previous_sharesnapshot: - The snapshot diff parameter that contains an opaque DateTime value that - specifies a previous file snapshot to be compared - against a more recent snapshot or the current file. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of file ranges as dictionaries with 'start' and 'end' keys. - The first element are filled file ranges, the 2nd element is cleared file ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_ranges_options( - offset=offset, - length=length, - previous_sharesnapshot=previous_sharesnapshot, - **kwargs) - try: - ranges = self._client.file.get_range_list(**options) - except HttpResponseError as error: - process_storage_error(error) - return get_file_ranges_result(ranges) - - @distributed_trace - def clear_range( # type: ignore - self, offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Clears the specified range and releases the space used in storage for - that range. - - :param int offset: - Start of byte range to use for clearing a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for clearing a section of the file. - The range can be up to 4 MB in size. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Unsupported method for encryption.") - - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 bytes file size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 bytes file size") - end_range = length + offset - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - try: - return self._client.file.upload_range( # type: ignore - timeout=timeout, - cls=return_response_headers, - content_length=0, - optionalbody=None, - file_range_write="clear", - range=content_range, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def resize_file(self, size, **kwargs): - # type: (int, Any) -> Dict[str, Any] - """Resizes a file to the specified size. - - :param int size: - Size to resize file to (in bytes) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - return self._client.file.set_http_headers( # type: ignore - file_content_length=size, - file_attributes="preserve", - file_creation_time="preserve", - file_last_write_time="preserve", - file_permission="preserve", - lease_access_conditions=access_conditions, - cls=return_response_headers, - timeout=timeout, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_handles(self, **kwargs): - # type: (Any) -> ItemPaged[Handle] - """Lists handles for file. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of HandleItem - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.HandleItem] - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.file.list_handles, - sharesnapshot=self.snapshot, - timeout=timeout, - **kwargs) - return ItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=HandlesPaged) - - @distributed_trace - def close_handle(self, handle, **kwargs): - # type: (Union[str, HandleItem], Any) -> Dict[str, int] - """Close an open file handle. - - :param handle: - A specific handle to close. - :type handle: str or ~azure.storage.fileshare.Handle - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - try: - handle_id = handle.id # type: ignore - except AttributeError: - handle_id = handle - if handle_id == '*': - raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") - try: - response = self._client.file.force_close_handles( - handle_id, - marker=None, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - return { - 'closed_handles_count': response.get('number_of_handles_closed', 0), - 'failed_handles_count': response.get('number_of_handles_failed', 0) - } - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def close_all_handles(self, **kwargs): - # type: (Any) -> Dict[str, int] - """Close any open file handles. - - This operation will block until the service has closed all open handles. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - timeout = kwargs.pop('timeout', None) - start_time = time.time() - - try_close = True - continuation_token = None - total_closed = 0 - total_failed = 0 - while try_close: - try: - response = self._client.file.force_close_handles( - handle_id='*', - timeout=timeout, - marker=continuation_token, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - continuation_token = response.get('marker') - try_close = bool(continuation_token) - total_closed += response.get('number_of_handles_closed', 0) - total_failed += response.get('number_of_handles_failed', 0) - if timeout: - timeout = max(0, timeout - (time.time() - start_time)) - return { - 'closed_handles_count': total_closed, - 'failed_handles_count': total_failed - } diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/__init__.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/__init__.py deleted file mode 100644 index 34ce526..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._azure_file_storage import AzureFileStorage -__all__ = ['AzureFileStorage'] - -try: - from ._patch import patch_sdk # type: ignore - patch_sdk() -except ImportError: - pass diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/_azure_file_storage.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/_azure_file_storage.py deleted file mode 100644 index 6275ae4..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/_azure_file_storage.py +++ /dev/null @@ -1,96 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import TYPE_CHECKING - -from azure.core import PipelineClient -from msrest import Deserializer, Serializer - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any - - from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from ._configuration import AzureFileStorageConfiguration -from .operations import ServiceOperations -from .operations import ShareOperations -from .operations import DirectoryOperations -from .operations import FileOperations -from . import models - - -class AzureFileStorage(object): - """AzureFileStorage. - - :ivar service: ServiceOperations operations - :vartype service: azure.storage.fileshare.operations.ServiceOperations - :ivar share: ShareOperations operations - :vartype share: azure.storage.fileshare.operations.ShareOperations - :ivar directory: DirectoryOperations operations - :vartype directory: azure.storage.fileshare.operations.DirectoryOperations - :ivar file: FileOperations operations - :vartype file: azure.storage.fileshare.operations.FileOperations - :param url: The URL of the service account, share, directory or file that is the target of the desired operation. - :type url: str - """ - - def __init__( - self, - url, # type: str - **kwargs # type: Any - ): - # type: (...) -> None - base_url = '{url}' - self._config = AzureFileStorageConfiguration(url, **kwargs) - self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._serialize.client_side_validation = False - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.share = ShareOperations( - self._client, self._config, self._serialize, self._deserialize) - self.directory = DirectoryOperations( - self._client, self._config, self._serialize, self._deserialize) - self.file = FileOperations( - self._client, self._config, self._serialize, self._deserialize) - - def _send_request(self, http_request, **kwargs): - # type: (HttpRequest, Any) -> HttpResponse - """Runs the network request through the client's chained policies. - - :param http_request: The network request you want to make. Required. - :type http_request: ~azure.core.pipeline.transport.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to True. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.pipeline.transport.HttpResponse - """ - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - http_request.url = self._client.format_url(http_request.url, **path_format_arguments) - stream = kwargs.pop("stream", True) - pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs) - return pipeline_response.http_response - - def close(self): - # type: () -> None - self._client.close() - - def __enter__(self): - # type: () -> AzureFileStorage - self._client.__enter__() - return self - - def __exit__(self, *exc_details): - # type: (Any) -> None - self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/_configuration.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/_configuration.py deleted file mode 100644 index d5000a6..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/_configuration.py +++ /dev/null @@ -1,59 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import TYPE_CHECKING - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any - -VERSION = "unknown" - -class AzureFileStorageConfiguration(Configuration): - """Configuration for AzureFileStorage. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, share, directory or file that is the target of the desired operation. - :type url: str - """ - - def __init__( - self, - url, # type: str - **kwargs # type: Any - ): - # type: (...) -> None - if url is None: - raise ValueError("Parameter 'url' must not be None.") - super(AzureFileStorageConfiguration, self).__init__(**kwargs) - - self.url = url - self.version = "2021-04-10" - self.file_range_write_from_url = "update" - kwargs.setdefault('sdk_moniker', 'azurefilestorage/{}'.format(VERSION)) - self._configure(**kwargs) - - def _configure( - self, - **kwargs # type: Any - ): - # type: (...) -> None - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) - self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/_patch.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/_patch.py deleted file mode 100644 index 74e48ec..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/_patch.py +++ /dev/null @@ -1,31 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# -# Copyright (c) Microsoft Corporation. All rights reserved. -# -# The MIT License (MIT) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the ""Software""), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# -# -------------------------------------------------------------------------- - -# This file is used for handwritten extensions to the generated code. Example: -# https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/customize_code/how-to-patch-sdk-code.md -def patch_sdk(): - pass \ No newline at end of file diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/aio/__init__.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/aio/__init__.py deleted file mode 100644 index f306ba0..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/aio/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._azure_file_storage import AzureFileStorage -__all__ = ['AzureFileStorage'] diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/aio/_azure_file_storage.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/aio/_azure_file_storage.py deleted file mode 100644 index 7453a46..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/aio/_azure_file_storage.py +++ /dev/null @@ -1,86 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any - -from azure.core import AsyncPipelineClient -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest -from msrest import Deserializer, Serializer - -from ._configuration import AzureFileStorageConfiguration -from .operations import ServiceOperations -from .operations import ShareOperations -from .operations import DirectoryOperations -from .operations import FileOperations -from .. import models - - -class AzureFileStorage(object): - """AzureFileStorage. - - :ivar service: ServiceOperations operations - :vartype service: azure.storage.fileshare.aio.operations.ServiceOperations - :ivar share: ShareOperations operations - :vartype share: azure.storage.fileshare.aio.operations.ShareOperations - :ivar directory: DirectoryOperations operations - :vartype directory: azure.storage.fileshare.aio.operations.DirectoryOperations - :ivar file: FileOperations operations - :vartype file: azure.storage.fileshare.aio.operations.FileOperations - :param url: The URL of the service account, share, directory or file that is the target of the desired operation. - :type url: str - """ - - def __init__( - self, - url: str, - **kwargs: Any - ) -> None: - base_url = '{url}' - self._config = AzureFileStorageConfiguration(url, **kwargs) - self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._serialize.client_side_validation = False - self._deserialize = Deserializer(client_models) - - self.service = ServiceOperations( - self._client, self._config, self._serialize, self._deserialize) - self.share = ShareOperations( - self._client, self._config, self._serialize, self._deserialize) - self.directory = DirectoryOperations( - self._client, self._config, self._serialize, self._deserialize) - self.file = FileOperations( - self._client, self._config, self._serialize, self._deserialize) - - async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse: - """Runs the network request through the client's chained policies. - - :param http_request: The network request you want to make. Required. - :type http_request: ~azure.core.pipeline.transport.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to True. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.pipeline.transport.AsyncHttpResponse - """ - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - http_request.url = self._client.format_url(http_request.url, **path_format_arguments) - stream = kwargs.pop("stream", True) - pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs) - return pipeline_response.http_response - - async def close(self) -> None: - await self._client.close() - - async def __aenter__(self) -> "AzureFileStorage": - await self._client.__aenter__() - return self - - async def __aexit__(self, *exc_details) -> None: - await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/aio/_configuration.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/aio/_configuration.py deleted file mode 100644 index 737d0fb..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/aio/_configuration.py +++ /dev/null @@ -1,53 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -VERSION = "unknown" - -class AzureFileStorageConfiguration(Configuration): - """Configuration for AzureFileStorage. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, share, directory or file that is the target of the desired operation. - :type url: str - """ - - def __init__( - self, - url: str, - **kwargs: Any - ) -> None: - if url is None: - raise ValueError("Parameter 'url' must not be None.") - super(AzureFileStorageConfiguration, self).__init__(**kwargs) - - self.url = url - self.version = "2021-04-10" - self.file_range_write_from_url = "update" - kwargs.setdefault('sdk_moniker', 'azurefilestorage/{}'.format(VERSION)) - self._configure(**kwargs) - - def _configure( - self, - **kwargs: Any - ) -> None: - self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) - self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/aio/operations/__init__.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/aio/operations/__init__.py deleted file mode 100644 index ba8fb22..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/aio/operations/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._share_operations import ShareOperations -from ._directory_operations import DirectoryOperations -from ._file_operations import FileOperations - -__all__ = [ - 'ServiceOperations', - 'ShareOperations', - 'DirectoryOperations', - 'FileOperations', -] diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/aio/operations/_directory_operations.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/aio/operations/_directory_operations.py deleted file mode 100644 index 8e99a73..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/aio/operations/_directory_operations.py +++ /dev/null @@ -1,900 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class DirectoryOperations: - """DirectoryOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.fileshare.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - file_permission: Optional[str] = "inherit", - file_permission_key: Optional[str] = None, - file_attributes: str = "none", - file_creation_time: str = "now", - file_last_write_time: str = "now", - **kwargs: Any - ) -> None: - """Creates a new directory under the specified share or parent directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else - x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_attributes: If specified, the provided file attributes shall be set. Default value: - ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. Default value: Now. - :type file_last_write_time: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - async def get_properties( - self, - sharesnapshot: Optional[str] = None, - timeout: Optional[int] = None, - **kwargs: Any - ) -> None: - """Returns all system properties for the specified directory, and can also be used to check the - existence of a directory. The data returned does not include the files in the directory or any - subdirectories. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - async def delete( - self, - timeout: Optional[int] = None, - **kwargs: Any - ) -> None: - """Removes the specified empty directory. Note that the directory must be empty before it can be - deleted. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - async def set_properties( - self, - timeout: Optional[int] = None, - file_permission: Optional[str] = "inherit", - file_permission_key: Optional[str] = None, - file_attributes: str = "none", - file_creation_time: str = "now", - file_last_write_time: str = "now", - **kwargs: Any - ) -> None: - """Sets properties on the directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else - x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_attributes: If specified, the provided file attributes shall be set. Default value: - ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. Default value: Now. - :type file_last_write_time: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - async def set_metadata( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - **kwargs: Any - ) -> None: - """Updates user defined metadata for the specified directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - async def list_files_and_directories_segment( - self, - prefix: Optional[str] = None, - sharesnapshot: Optional[str] = None, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - timeout: Optional[int] = None, - include: Optional[List[Union[str, "_models.ListFilesIncludeType"]]] = None, - include_extended_info: Optional[bool] = None, - **kwargs: Any - ) -> "_models.ListFilesAndDirectoriesSegmentResponse": - """Returns a list of files or directories under the specified share or directory. It lists the - contents only for a single level of the directory hierarchy. - - :param prefix: Filters the results to return only entries whose name begins with the specified - prefix. - :type prefix: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. If the request does not - specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 - items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.fileshare.models.ListFilesIncludeType] - :param include_extended_info: Include extended information. - :type include_extended_info: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListFilesAndDirectoriesSegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListFilesAndDirectoriesSegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListFilesAndDirectoriesSegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_files_and_directories_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if include_extended_info is not None: - header_parameters['x-ms-file-extended-info'] = self._serialize.header("include_extended_info", include_extended_info, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListFilesAndDirectoriesSegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_files_and_directories_segment.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - async def list_handles( - self, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - timeout: Optional[int] = None, - sharesnapshot: Optional[str] = None, - recursive: Optional[bool] = None, - **kwargs: Any - ) -> "_models.ListHandlesResponse": - """Lists handles for directory. - - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. If the request does not - specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 - items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param recursive: Specifies operation should apply to the directory specified in the URI, its - files, its subdirectories and their files. - :type recursive: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListHandlesResponse, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListHandlesResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListHandlesResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "listhandles" - accept = "application/xml" - - # Construct URL - url = self.list_handles.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if recursive is not None: - header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListHandlesResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_handles.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - async def force_close_handles( - self, - handle_id: str, - timeout: Optional[int] = None, - marker: Optional[str] = None, - sharesnapshot: Optional[str] = None, - recursive: Optional[bool] = None, - **kwargs: Any - ) -> None: - """Closes all handles open for given directory. - - :param handle_id: Specifies handle ID opened on the file or directory to be closed. Asterisk - (‘*’) is a wildcard that specifies all handles. - :type handle_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param recursive: Specifies operation should apply to the directory specified in the URI, its - files, its subdirectories and their files. - :type recursive: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "forceclosehandles" - accept = "application/xml" - - # Construct URL - url = self.force_close_handles.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') - if recursive is not None: - header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-marker']=self._deserialize('str', response.headers.get('x-ms-marker')) - response_headers['x-ms-number-of-handles-closed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')) - response_headers['x-ms-number-of-handles-failed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')) - - if cls: - return cls(pipeline_response, None, response_headers) - - force_close_handles.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - async def rename( - self, - rename_source: str, - timeout: Optional[int] = None, - replace_if_exists: Optional[bool] = None, - ignore_read_only: Optional[bool] = None, - file_permission: Optional[str] = "inherit", - file_permission_key: Optional[str] = None, - metadata: Optional[str] = None, - source_lease_access_conditions: Optional["_models.SourceLeaseAccessConditions"] = None, - destination_lease_access_conditions: Optional["_models.DestinationLeaseAccessConditions"] = None, - copy_file_smb_info: Optional["_models.CopyFileSmbInfo"] = None, - **kwargs: Any - ) -> None: - """Renames a directory. - - :param rename_source: Required. Specifies the URI-style path of the source file, up to 2 KB in - length. - :type rename_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param replace_if_exists: Optional. A boolean value for if the destination file already exists, - whether this request will overwrite the file or not. If true, the rename will succeed and will - overwrite the destination file. If not provided or if false and the destination file does - exist, the request will not overwrite the destination file. If provided and the destination - file doesn’t exist, the rename will succeed. Note: This value does not override the - x-ms-file-copy-ignore-read-only header value. - :type replace_if_exists: bool - :param ignore_read_only: Optional. A boolean value that specifies whether the ReadOnly - attribute on a preexisting destination file should be respected. If true, the rename will - succeed, otherwise, a previous file at the destination with the ReadOnly attribute set will - cause the rename to fail. - :type ignore_read_only: bool - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else - x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param source_lease_access_conditions: Parameter group. - :type source_lease_access_conditions: ~azure.storage.fileshare.models.SourceLeaseAccessConditions - :param destination_lease_access_conditions: Parameter group. - :type destination_lease_access_conditions: ~azure.storage.fileshare.models.DestinationLeaseAccessConditions - :param copy_file_smb_info: Parameter group. - :type copy_file_smb_info: ~azure.storage.fileshare.models.CopyFileSmbInfo - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _source_lease_id = None - _destination_lease_id = None - _file_attributes = None - _file_creation_time = None - _file_last_write_time = None - if copy_file_smb_info is not None: - _file_attributes = copy_file_smb_info.file_attributes - _file_creation_time = copy_file_smb_info.file_creation_time - _file_last_write_time = copy_file_smb_info.file_last_write_time - if destination_lease_access_conditions is not None: - _destination_lease_id = destination_lease_access_conditions.destination_lease_id - if source_lease_access_conditions is not None: - _source_lease_id = source_lease_access_conditions.source_lease_id - restype = "directory" - comp = "rename" - accept = "application/xml" - - # Construct URL - url = self.rename.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['x-ms-file-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if replace_if_exists is not None: - header_parameters['x-ms-file-rename-replace-if-exists'] = self._serialize.header("replace_if_exists", replace_if_exists, 'bool') - if ignore_read_only is not None: - header_parameters['x-ms-file-rename-ignore-readonly'] = self._serialize.header("ignore_read_only", ignore_read_only, 'bool') - if _source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", _source_lease_id, 'str') - if _destination_lease_id is not None: - header_parameters['x-ms-destination-lease-id'] = self._serialize.header("destination_lease_id", _destination_lease_id, 'str') - if _file_attributes is not None: - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", _file_attributes, 'str') - if _file_creation_time is not None: - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", _file_creation_time, 'str') - if _file_last_write_time is not None: - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", _file_last_write_time, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - rename.metadata = {'url': '/{shareName}/{directory}'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/aio/operations/_file_operations.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/aio/operations/_file_operations.py deleted file mode 100644 index 240596b..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/aio/operations/_file_operations.py +++ /dev/null @@ -1,1924 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class FileOperations: - """FileOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.fileshare.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - file_content_length: int, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - file_permission: Optional[str] = "inherit", - file_permission_key: Optional[str] = None, - file_attributes: str = "none", - file_creation_time: str = "now", - file_last_write_time: str = "now", - file_http_headers: Optional["_models.FileHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Creates a new file or replaces a file. Note it only initializes the file with no content. - - :param file_content_length: Specifies the maximum size for the file, up to 4 TB. - :type file_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else - x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_attributes: If specified, the provided file attributes shall be set. Default value: - ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. Default value: Now. - :type file_last_write_time: str - :param file_http_headers: Parameter group. - :type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _file_content_type = None - _file_content_encoding = None - _file_content_language = None - _file_cache_control = None - _file_content_md5 = None - _file_content_disposition = None - _lease_id = None - if file_http_headers is not None: - _file_content_type = file_http_headers.file_content_type - _file_content_encoding = file_http_headers.file_content_encoding - _file_content_language = file_http_headers.file_content_language - _file_cache_control = file_http_headers.file_cache_control - _file_content_md5 = file_http_headers.file_content_md5 - _file_content_disposition = file_http_headers.file_content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - file_type_constant = "file" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') - header_parameters['x-ms-type'] = self._serialize.header("file_type_constant", file_type_constant, 'str') - if _file_content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", _file_content_type, 'str') - if _file_content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", _file_content_encoding, 'str') - if _file_content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", _file_content_language, 'str') - if _file_cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", _file_cache_control, 'str') - if _file_content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", _file_content_md5, 'bytearray') - if _file_content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", _file_content_disposition, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def download( - self, - timeout: Optional[int] = None, - range: Optional[str] = None, - range_get_content_md5: Optional[bool] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> IO: - """Reads or downloads a file from the system, including its metadata and properties. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param range: Return file data only from the specified byte range. - :type range: str - :param range_get_content_md5: When this header is set to true and specified together with the - Range header, the service returns the MD5 hash for the range, as long as the range is less than - or equal to 4 MB in size. - :type range_get_content_md5: bool - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - accept = "application/xml" - - # Construct URL - url = self.download.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-content-md5')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-content-md5')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - download.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def get_properties( - self, - sharesnapshot: Optional[str] = None, - timeout: Optional[int] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Returns all user-defined metadata, standard HTTP properties, and system properties for the - file. It does not return the content of the file. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-type']=self._deserialize('str', response.headers.get('x-ms-type')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def delete( - self, - timeout: Optional[int] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """removes the file from the storage account. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def set_http_headers( - self, - timeout: Optional[int] = None, - file_content_length: Optional[int] = None, - file_permission: Optional[str] = "inherit", - file_permission_key: Optional[str] = None, - file_attributes: str = "none", - file_creation_time: str = "now", - file_last_write_time: str = "now", - file_http_headers: Optional["_models.FileHTTPHeaders"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Sets HTTP headers on the file. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param file_content_length: Resizes a file to the specified size. If the specified byte value - is less than the current size of the file, then all ranges above the specified byte value are - cleared. - :type file_content_length: long - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else - x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_attributes: If specified, the provided file attributes shall be set. Default value: - ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. Default value: Now. - :type file_last_write_time: str - :param file_http_headers: Parameter group. - :type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _file_content_type = None - _file_content_encoding = None - _file_content_language = None - _file_cache_control = None - _file_content_md5 = None - _file_content_disposition = None - _lease_id = None - if file_http_headers is not None: - _file_content_type = file_http_headers.file_content_type - _file_content_encoding = file_http_headers.file_content_encoding - _file_content_language = file_http_headers.file_content_language - _file_cache_control = file_http_headers.file_cache_control - _file_content_md5 = file_http_headers.file_content_md5 - _file_content_disposition = file_http_headers.file_content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.set_http_headers.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_content_length is not None: - header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') - if _file_content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", _file_content_type, 'str') - if _file_content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", _file_content_encoding, 'str') - if _file_content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", _file_content_language, 'str') - if _file_cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", _file_cache_control, 'str') - if _file_content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", _file_content_md5, 'bytearray') - if _file_content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", _file_content_disposition, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_http_headers.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def set_metadata( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Updates user-defined metadata for the specified file. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def acquire_lease( - self, - timeout: Optional[int] = None, - duration: Optional[int] = None, - proposed_lease_id: Optional[str] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> None: - """[Update] The Lease File operation establishes and manages a lock on a file for write and delete - operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a - lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease - duration cannot be changed using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "acquire" - accept = "application/xml" - - # Construct URL - url = self.acquire_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - acquire_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def release_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> None: - """[Update] The Lease File operation establishes and manages a lock on a file for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "release" - accept = "application/xml" - - # Construct URL - url = self.release_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - release_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def change_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - proposed_lease_id: Optional[str] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> None: - """[Update] The Lease File operation establishes and manages a lock on a file for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "change" - accept = "application/xml" - - # Construct URL - url = self.change_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - change_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def break_lease( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """[Update] The Lease File operation establishes and manages a lock on a file for write and delete - operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "lease" - action = "break" - accept = "application/xml" - - # Construct URL - url = self.break_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - break_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def upload_range( - self, - range: str, - content_length: int, - timeout: Optional[int] = None, - file_range_write: Union[str, "_models.FileRangeWriteType"] = "update", - content_md5: Optional[bytearray] = None, - optionalbody: Optional[IO] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Upload a range of bytes to a file. - - :param range: Specifies the range of bytes to be written. Both the start and end of the range - must be specified. For an update operation, the range can be up to 4 MB in size. For a clear - operation, the range can be up to the value of the file's full size. The File service accepts - only a single byte range for the Range and 'x-ms-range' headers, and the byte range must be - specified in the following format: bytes=startByte-endByte. - :type range: str - :param content_length: Specifies the number of bytes being transmitted in the request body. - When the x-ms-write header is set to clear, the value of this header must be set to zero. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param file_range_write: Specify one of the following options: - Update: Writes the bytes - specified by the request body into the specified range. The Range and Content-Length headers - must match to perform the update. - Clear: Clears the specified range and releases the space - used in storage for that range. To clear a range, set the Content-Length header to zero, and - set the Range header to a value that indicates the range to clear, up to maximum file size. - :type file_range_write: str or ~azure.storage.fileshare.models.FileRangeWriteType - :param content_md5: An MD5 hash of the content. This hash is used to verify the integrity of - the data during transport. When the Content-MD5 header is specified, the File service compares - the hash of the content that has arrived with the header value that was sent. If the two hashes - do not match, the operation will fail with error code 400 (Bad Request). - :type content_md5: bytearray - :param optionalbody: Initial data. - :type optionalbody: IO - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "range" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.upload_range.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-write'] = self._serialize.header("file_range_write", file_range_write, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("content_md5", content_md5, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = optionalbody - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload_range.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def upload_range_from_url( - self, - range: str, - copy_source: str, - content_length: int, - timeout: Optional[int] = None, - source_range: Optional[str] = None, - source_content_crc64: Optional[bytearray] = None, - copy_source_authorization: Optional[str] = None, - source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Upload a range of bytes to a file where the contents are read from a URL. - - :param range: Writes data to the specified byte range in the file. - :type range: str - :param copy_source: Specifies the URL of the source file or blob, up to 2 KB in length. To copy - a file to another file within the same storage account, you may use Shared Key to authenticate - the source file. If you are copying a file from another storage account, or if you are copying - a blob from the same storage account or another storage account, then you must authenticate the - source file or blob using a shared access signature. If the source is a public blob, no - authentication is required to perform the copy operation. A file in a share snapshot can also - be specified as a copy source. - :type copy_source: str - :param content_length: Specifies the number of bytes being transmitted in the request body. - When the x-ms-write header is set to clear, the value of this header must be set to zero. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_crc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_content_crc64: bytearray - :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid - OAuth access token to copy source. - :type copy_source_authorization: str - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.fileshare.models.SourceModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _source_if_match_crc64 = None - _source_if_none_match_crc64 = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if source_modified_access_conditions is not None: - _source_if_match_crc64 = source_modified_access_conditions.source_if_match_crc64 - _source_if_none_match_crc64 = source_modified_access_conditions.source_if_none_match_crc64 - comp = "range" - accept = "application/xml" - - # Construct URL - url = self.upload_range_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - header_parameters['x-ms-write'] = self._serialize.header("self._config.file_range_write_from_url", self._config.file_range_write_from_url, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if source_content_crc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_content_crc64", source_content_crc64, 'bytearray') - if _source_if_match_crc64 is not None: - header_parameters['x-ms-source-if-match-crc64'] = self._serialize.header("source_if_match_crc64", _source_if_match_crc64, 'bytearray') - if _source_if_none_match_crc64 is not None: - header_parameters['x-ms-source-if-none-match-crc64'] = self._serialize.header("source_if_none_match_crc64", _source_if_none_match_crc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if copy_source_authorization is not None: - header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload_range_from_url.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def get_range_list( - self, - sharesnapshot: Optional[str] = None, - prevsharesnapshot: Optional[str] = None, - timeout: Optional[int] = None, - range: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> "_models.ShareFileRangeList": - """Returns the list of valid ranges for a file. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param prevsharesnapshot: The previous snapshot parameter is an opaque DateTime value that, - when present, specifies the previous snapshot. - :type prevsharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param range: Specifies the range of bytes over which to list ranges, inclusively. - :type range: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ShareFileRangeList, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ShareFileRangeList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ShareFileRangeList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "rangelist" - accept = "application/xml" - - # Construct URL - url = self.get_range_list.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if prevsharesnapshot is not None: - query_parameters['prevsharesnapshot'] = self._serialize.query("prevsharesnapshot", prevsharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-content-length']=self._deserialize('long', response.headers.get('x-ms-content-length')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ShareFileRangeList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_range_list.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def start_copy( - self, - copy_source: str, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - file_permission: Optional[str] = "inherit", - file_permission_key: Optional[str] = None, - copy_file_smb_info: Optional["_models.CopyFileSmbInfo"] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Copies a blob or file to a destination file within the storage account. - - :param copy_source: Specifies the URL of the source file or blob, up to 2 KB in length. To copy - a file to another file within the same storage account, you may use Shared Key to authenticate - the source file. If you are copying a file from another storage account, or if you are copying - a blob from the same storage account or another storage account, then you must authenticate the - source file or blob using a shared access signature. If the source is a public blob, no - authentication is required to perform the copy operation. A file in a share snapshot can also - be specified as a copy source. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else - x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param copy_file_smb_info: Parameter group. - :type copy_file_smb_info: ~azure.storage.fileshare.models.CopyFileSmbInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _file_permission_copy_mode = None - _ignore_read_only = None - _file_attributes = None - _file_creation_time = None - _file_last_write_time = None - _set_archive_attribute = None - _lease_id = None - if copy_file_smb_info is not None: - _file_permission_copy_mode = copy_file_smb_info.file_permission_copy_mode - _ignore_read_only = copy_file_smb_info.ignore_read_only - _file_attributes = copy_file_smb_info.file_attributes - _file_creation_time = copy_file_smb_info.file_creation_time - _file_last_write_time = copy_file_smb_info.file_last_write_time - _set_archive_attribute = copy_file_smb_info.set_archive_attribute - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - accept = "application/xml" - - # Construct URL - url = self.start_copy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - if _file_permission_copy_mode is not None: - header_parameters['x-ms-file-permission-copy-mode'] = self._serialize.header("file_permission_copy_mode", _file_permission_copy_mode, 'str') - if _ignore_read_only is not None: - header_parameters['x-ms-file-copy-ignore-readonly'] = self._serialize.header("ignore_read_only", _ignore_read_only, 'bool') - if _file_attributes is not None: - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", _file_attributes, 'str') - if _file_creation_time is not None: - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", _file_creation_time, 'str') - if _file_last_write_time is not None: - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", _file_last_write_time, 'str') - if _set_archive_attribute is not None: - header_parameters['x-ms-file-copy-set-archive'] = self._serialize.header("set_archive_attribute", _set_archive_attribute, 'bool') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - start_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def abort_copy( - self, - copy_id: str, - timeout: Optional[int] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Aborts a pending Copy File operation, and leaves a destination file with zero length and full - metadata. - - :param copy_id: The copy identifier provided in the x-ms-copy-id header of the original Copy - File operation. - :type copy_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "copy" - copy_action_abort_constant = "abort" - accept = "application/xml" - - # Construct URL - url = self.abort_copy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-copy-action'] = self._serialize.header("copy_action_abort_constant", copy_action_abort_constant, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - abort_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def list_handles( - self, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - timeout: Optional[int] = None, - sharesnapshot: Optional[str] = None, - **kwargs: Any - ) -> "_models.ListHandlesResponse": - """Lists handles for file. - - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. If the request does not - specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 - items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListHandlesResponse, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListHandlesResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListHandlesResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "listhandles" - accept = "application/xml" - - # Construct URL - url = self.list_handles.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListHandlesResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def force_close_handles( - self, - handle_id: str, - timeout: Optional[int] = None, - marker: Optional[str] = None, - sharesnapshot: Optional[str] = None, - **kwargs: Any - ) -> None: - """Closes all handles open for given file. - - :param handle_id: Specifies handle ID opened on the file or directory to be closed. Asterisk - (‘*’) is a wildcard that specifies all handles. - :type handle_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "forceclosehandles" - accept = "application/xml" - - # Construct URL - url = self.force_close_handles.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-marker']=self._deserialize('str', response.headers.get('x-ms-marker')) - response_headers['x-ms-number-of-handles-closed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')) - response_headers['x-ms-number-of-handles-failed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')) - - if cls: - return cls(pipeline_response, None, response_headers) - - force_close_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - async def rename( - self, - rename_source: str, - timeout: Optional[int] = None, - replace_if_exists: Optional[bool] = None, - ignore_read_only: Optional[bool] = None, - file_permission: Optional[str] = "inherit", - file_permission_key: Optional[str] = None, - metadata: Optional[str] = None, - source_lease_access_conditions: Optional["_models.SourceLeaseAccessConditions"] = None, - destination_lease_access_conditions: Optional["_models.DestinationLeaseAccessConditions"] = None, - copy_file_smb_info: Optional["_models.CopyFileSmbInfo"] = None, - **kwargs: Any - ) -> None: - """Renames a file. - - :param rename_source: Required. Specifies the URI-style path of the source file, up to 2 KB in - length. - :type rename_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param replace_if_exists: Optional. A boolean value for if the destination file already exists, - whether this request will overwrite the file or not. If true, the rename will succeed and will - overwrite the destination file. If not provided or if false and the destination file does - exist, the request will not overwrite the destination file. If provided and the destination - file doesn’t exist, the rename will succeed. Note: This value does not override the - x-ms-file-copy-ignore-read-only header value. - :type replace_if_exists: bool - :param ignore_read_only: Optional. A boolean value that specifies whether the ReadOnly - attribute on a preexisting destination file should be respected. If true, the rename will - succeed, otherwise, a previous file at the destination with the ReadOnly attribute set will - cause the rename to fail. - :type ignore_read_only: bool - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else - x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param source_lease_access_conditions: Parameter group. - :type source_lease_access_conditions: ~azure.storage.fileshare.models.SourceLeaseAccessConditions - :param destination_lease_access_conditions: Parameter group. - :type destination_lease_access_conditions: ~azure.storage.fileshare.models.DestinationLeaseAccessConditions - :param copy_file_smb_info: Parameter group. - :type copy_file_smb_info: ~azure.storage.fileshare.models.CopyFileSmbInfo - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _source_lease_id = None - _destination_lease_id = None - _file_attributes = None - _file_creation_time = None - _file_last_write_time = None - if copy_file_smb_info is not None: - _file_attributes = copy_file_smb_info.file_attributes - _file_creation_time = copy_file_smb_info.file_creation_time - _file_last_write_time = copy_file_smb_info.file_last_write_time - if destination_lease_access_conditions is not None: - _destination_lease_id = destination_lease_access_conditions.destination_lease_id - if source_lease_access_conditions is not None: - _source_lease_id = source_lease_access_conditions.source_lease_id - comp = "rename" - accept = "application/xml" - - # Construct URL - url = self.rename.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['x-ms-file-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if replace_if_exists is not None: - header_parameters['x-ms-file-rename-replace-if-exists'] = self._serialize.header("replace_if_exists", replace_if_exists, 'bool') - if ignore_read_only is not None: - header_parameters['x-ms-file-rename-ignore-readonly'] = self._serialize.header("ignore_read_only", ignore_read_only, 'bool') - if _source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", _source_lease_id, 'str') - if _destination_lease_id is not None: - header_parameters['x-ms-destination-lease-id'] = self._serialize.header("destination_lease_id", _destination_lease_id, 'str') - if _file_attributes is not None: - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", _file_attributes, 'str') - if _file_creation_time is not None: - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", _file_creation_time, 'str') - if _file_last_write_time is not None: - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", _file_last_write_time, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - rename.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/aio/operations/_service_operations.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/aio/operations/_service_operations.py deleted file mode 100644 index f413f29..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/aio/operations/_service_operations.py +++ /dev/null @@ -1,269 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class ServiceOperations: - """ServiceOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.fileshare.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def set_properties( - self, - storage_service_properties: "_models.StorageServiceProperties", - timeout: Optional[int] = None, - **kwargs: Any - ) -> None: - """Sets properties for a storage account's File service endpoint, including properties for Storage - Analytics metrics and CORS (Cross-Origin Resource Sharing) rules. - - :param storage_service_properties: The StorageService properties. - :type storage_service_properties: ~azure.storage.fileshare.models.StorageServiceProperties - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "properties" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/'} # type: ignore - - async def get_properties( - self, - timeout: Optional[int] = None, - **kwargs: Any - ) -> "_models.StorageServiceProperties": - """Gets the properties of a storage account's File service, including properties for Storage - Analytics metrics and CORS (Cross-Origin Resource Sharing) rules. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: StorageServiceProperties, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.StorageServiceProperties - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceProperties"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('StorageServiceProperties', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_properties.metadata = {'url': '/'} # type: ignore - - async def list_shares_segment( - self, - prefix: Optional[str] = None, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - include: Optional[List[Union[str, "_models.ListSharesIncludeType"]]] = None, - timeout: Optional[int] = None, - **kwargs: Any - ) -> "_models.ListSharesResponse": - """The List Shares Segment operation returns a list of the shares and share snapshots under the - specified account. - - :param prefix: Filters the results to return only entries whose name begins with the specified - prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. If the request does not - specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 - items. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.fileshare.models.ListSharesIncludeType] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListSharesResponse, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListSharesResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListSharesResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_shares_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('ListSharesResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_shares_segment.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/aio/operations/_share_operations.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/aio/operations/_share_operations.py deleted file mode 100644 index de90e4e..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/aio/operations/_share_operations.py +++ /dev/null @@ -1,1486 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest - -from ... import models as _models - -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - -class ShareOperations: - """ShareOperations async operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.fileshare.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer) -> None: - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - async def create( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - quota: Optional[int] = None, - access_tier: Optional[Union[str, "_models.ShareAccessTier"]] = None, - enabled_protocols: Optional[str] = None, - root_squash: Optional[Union[str, "_models.ShareRootSquash"]] = None, - **kwargs: Any - ) -> None: - """Creates a new share under the specified account. If the share with the same name already - exists, the operation fails. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param quota: Specifies the maximum size of the share, in gigabytes. - :type quota: int - :param access_tier: Specifies the access tier of the share. - :type access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier - :param enabled_protocols: Protocols to enable on the share. - :type enabled_protocols: str - :param root_squash: Root squash to set on the share. Only valid for NFS shares. - :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if quota is not None: - header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) - if access_tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("access_tier", access_tier, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if enabled_protocols is not None: - header_parameters['x-ms-enabled-protocols'] = self._serialize.header("enabled_protocols", enabled_protocols, 'str') - if root_squash is not None: - header_parameters['x-ms-root-squash'] = self._serialize.header("root_squash", root_squash, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{shareName}'} # type: ignore - - async def get_properties( - self, - sharesnapshot: Optional[str] = None, - timeout: Optional[int] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Returns all user-defined metadata and system properties for the specified share or share - snapshot. The data returned does not include the share's list of files. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-share-quota']=self._deserialize('int', response.headers.get('x-ms-share-quota')) - response_headers['x-ms-share-provisioned-iops']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-iops')) - response_headers['x-ms-share-provisioned-ingress-mbps']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-ingress-mbps')) - response_headers['x-ms-share-provisioned-egress-mbps']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-egress-mbps')) - response_headers['x-ms-share-next-allowed-quota-downgrade-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-share-next-allowed-quota-downgrade-time')) - response_headers['x-ms-share-provisioned-bandwidth-mibps']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-bandwidth-mibps')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-access-tier']=self._deserialize('str', response.headers.get('x-ms-access-tier')) - response_headers['x-ms-access-tier-change-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')) - response_headers['x-ms-access-tier-transition-state']=self._deserialize('str', response.headers.get('x-ms-access-tier-transition-state')) - response_headers['x-ms-enabled-protocols']=self._deserialize('str', response.headers.get('x-ms-enabled-protocols')) - response_headers['x-ms-root-squash']=self._deserialize('str', response.headers.get('x-ms-root-squash')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{shareName}'} # type: ignore - - async def delete( - self, - sharesnapshot: Optional[str] = None, - timeout: Optional[int] = None, - delete_snapshots: Optional[Union[str, "_models.DeleteSnapshotsOptionType"]] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Operation marks the specified share or share snapshot for deletion. The share or share snapshot - and any files contained within it are later deleted during garbage collection. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param delete_snapshots: Specifies the option include to delete the base share and all of its - snapshots. - :type delete_snapshots: str or ~azure.storage.fileshare.models.DeleteSnapshotsOptionType - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{shareName}'} # type: ignore - - async def acquire_lease( - self, - timeout: Optional[int] = None, - duration: Optional[int] = None, - proposed_lease_id: Optional[str] = None, - sharesnapshot: Optional[str] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> None: - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a - lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease - duration cannot be changed using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "acquire" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.acquire_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - acquire_lease.metadata = {'url': '/{shareName}'} # type: ignore - - async def release_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - sharesnapshot: Optional[str] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> None: - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "release" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.release_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - release_lease.metadata = {'url': '/{shareName}'} # type: ignore - - async def change_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - proposed_lease_id: Optional[str] = None, - sharesnapshot: Optional[str] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> None: - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "change" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.change_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - change_lease.metadata = {'url': '/{shareName}'} # type: ignore - - async def renew_lease( - self, - lease_id: str, - timeout: Optional[int] = None, - sharesnapshot: Optional[str] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> None: - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "renew" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.renew_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - renew_lease.metadata = {'url': '/{shareName}'} # type: ignore - - async def break_lease( - self, - timeout: Optional[int] = None, - break_period: Optional[int] = None, - request_id_parameter: Optional[str] = None, - sharesnapshot: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param break_period: For a break operation, proposed duration the lease should continue before - it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining on the lease is used. A new - lease will not be available before the break period has expired, but the lease may be held for - longer than the break period. If this header does not appear with a break operation, a - fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease - breaks immediately. - :type break_period: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "lease" - action = "break" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.break_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - break_lease.metadata = {'url': '/{shareName}'} # type: ignore - - async def create_snapshot( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - **kwargs: Any - ) -> None: - """Creates a read-only snapshot of a share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - comp = "snapshot" - accept = "application/xml" - - # Construct URL - url = self.create_snapshot.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-snapshot']=self._deserialize('str', response.headers.get('x-ms-snapshot')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create_snapshot.metadata = {'url': '/{shareName}'} # type: ignore - - async def create_permission( - self, - share_permission: "_models.SharePermission", - timeout: Optional[int] = None, - **kwargs: Any - ) -> None: - """Create a permission (a security descriptor). - - :param share_permission: A permission (a security descriptor) at the share level. - :type share_permission: ~azure.storage.fileshare.models.SharePermission - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - comp = "filepermission" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/xml" - - # Construct URL - url = self.create_permission.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(share_permission, 'SharePermission') - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create_permission.metadata = {'url': '/{shareName}'} # type: ignore - - async def get_permission( - self, - file_permission_key: str, - timeout: Optional[int] = None, - **kwargs: Any - ) -> "_models.SharePermission": - """Returns the permission (security descriptor) for a given key. - - :param file_permission_key: Key of the permission to be set for the directory/file. - :type file_permission_key: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SharePermission, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.SharePermission - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.SharePermission"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - comp = "filepermission" - accept = "application/json" - - # Construct URL - url = self.get_permission.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('SharePermission', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_permission.metadata = {'url': '/{shareName}'} # type: ignore - - async def set_properties( - self, - timeout: Optional[int] = None, - quota: Optional[int] = None, - access_tier: Optional[Union[str, "_models.ShareAccessTier"]] = None, - root_squash: Optional[Union[str, "_models.ShareRootSquash"]] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Sets properties for the specified share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param quota: Specifies the maximum size of the share, in gigabytes. - :type quota: int - :param access_tier: Specifies the access tier of the share. - :type access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier - :param root_squash: Root squash to set on the share. Only valid for NFS shares. - :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if quota is not None: - header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) - if access_tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("access_tier", access_tier, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if root_squash is not None: - header_parameters['x-ms-root-squash'] = self._serialize.header("root_squash", root_squash, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/{shareName}'} # type: ignore - - async def set_metadata( - self, - timeout: Optional[int] = None, - metadata: Optional[str] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Sets one or more user-defined name-value pairs for the specified share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{shareName}'} # type: ignore - - async def get_access_policy( - self, - timeout: Optional[int] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> List["_models.SignedIdentifier"]: - """Returns information about stored access policies specified on the share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of SignedIdentifier, or the result of cls(response) - :rtype: list[~azure.storage.fileshare.models.SignedIdentifier] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.SignedIdentifier"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "acl" - accept = "application/xml" - - # Construct URL - url = self.get_access_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('[SignedIdentifier]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_access_policy.metadata = {'url': '/{shareName}'} # type: ignore - - async def set_access_policy( - self, - timeout: Optional[int] = None, - share_acl: Optional[List["_models.SignedIdentifier"]] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> None: - """Sets a stored access policy for use with shared access signatures. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param share_acl: The ACL for the share. - :type share_acl: list[~azure.storage.fileshare.models.SignedIdentifier] - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "acl" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_access_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'wrapped': True}} - if share_acl is not None: - body_content = self._serialize.body(share_acl, '[SignedIdentifier]', is_xml=True, serialization_ctxt=serialization_ctxt) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_policy.metadata = {'url': '/{shareName}'} # type: ignore - - async def get_statistics( - self, - timeout: Optional[int] = None, - lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, - **kwargs: Any - ) -> "_models.ShareStats": - """Retrieves statistics related to the share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ShareStats, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ShareStats - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ShareStats"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "stats" - accept = "application/xml" - - # Construct URL - url = self.get_statistics.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ShareStats', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_statistics.metadata = {'url': '/{shareName}'} # type: ignore - - async def restore( - self, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - deleted_share_name: Optional[str] = None, - deleted_share_version: Optional[str] = None, - **kwargs: Any - ) -> None: - """Restores a previously deleted Share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param deleted_share_name: Specifies the name of the previously-deleted share. - :type deleted_share_name: str - :param deleted_share_version: Specifies the version of the previously-deleted share. - :type deleted_share_version: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - comp = "undelete" - accept = "application/xml" - - # Construct URL - url = self.restore.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if deleted_share_name is not None: - header_parameters['x-ms-deleted-share-name'] = self._serialize.header("deleted_share_name", deleted_share_name, 'str') - if deleted_share_version is not None: - header_parameters['x-ms-deleted-share-version'] = self._serialize.header("deleted_share_version", deleted_share_version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - restore.metadata = {'url': '/{shareName}'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/models/__init__.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/models/__init__.py deleted file mode 100644 index 24faebc..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/models/__init__.py +++ /dev/null @@ -1,133 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -try: - from ._models_py3 import AccessPolicy - from ._models_py3 import ClearRange - from ._models_py3 import CopyFileSmbInfo - from ._models_py3 import CorsRule - from ._models_py3 import DestinationLeaseAccessConditions - from ._models_py3 import DirectoryItem - from ._models_py3 import FileHTTPHeaders - from ._models_py3 import FileItem - from ._models_py3 import FileProperty - from ._models_py3 import FileRange - from ._models_py3 import FilesAndDirectoriesListSegment - from ._models_py3 import HandleItem - from ._models_py3 import LeaseAccessConditions - from ._models_py3 import ListFilesAndDirectoriesSegmentResponse - from ._models_py3 import ListHandlesResponse - from ._models_py3 import ListSharesResponse - from ._models_py3 import Metrics - from ._models_py3 import RetentionPolicy - from ._models_py3 import ShareFileRangeList - from ._models_py3 import ShareItemInternal - from ._models_py3 import SharePermission - from ._models_py3 import SharePropertiesInternal - from ._models_py3 import ShareProtocolSettings - from ._models_py3 import ShareSmbSettings - from ._models_py3 import ShareStats - from ._models_py3 import SignedIdentifier - from ._models_py3 import SmbMultichannel - from ._models_py3 import SourceLeaseAccessConditions - from ._models_py3 import SourceModifiedAccessConditions - from ._models_py3 import StorageError - from ._models_py3 import StorageServiceProperties -except (SyntaxError, ImportError): - from ._models import AccessPolicy # type: ignore - from ._models import ClearRange # type: ignore - from ._models import CopyFileSmbInfo # type: ignore - from ._models import CorsRule # type: ignore - from ._models import DestinationLeaseAccessConditions # type: ignore - from ._models import DirectoryItem # type: ignore - from ._models import FileHTTPHeaders # type: ignore - from ._models import FileItem # type: ignore - from ._models import FileProperty # type: ignore - from ._models import FileRange # type: ignore - from ._models import FilesAndDirectoriesListSegment # type: ignore - from ._models import HandleItem # type: ignore - from ._models import LeaseAccessConditions # type: ignore - from ._models import ListFilesAndDirectoriesSegmentResponse # type: ignore - from ._models import ListHandlesResponse # type: ignore - from ._models import ListSharesResponse # type: ignore - from ._models import Metrics # type: ignore - from ._models import RetentionPolicy # type: ignore - from ._models import ShareFileRangeList # type: ignore - from ._models import ShareItemInternal # type: ignore - from ._models import SharePermission # type: ignore - from ._models import SharePropertiesInternal # type: ignore - from ._models import ShareProtocolSettings # type: ignore - from ._models import ShareSmbSettings # type: ignore - from ._models import ShareStats # type: ignore - from ._models import SignedIdentifier # type: ignore - from ._models import SmbMultichannel # type: ignore - from ._models import SourceLeaseAccessConditions # type: ignore - from ._models import SourceModifiedAccessConditions # type: ignore - from ._models import StorageError # type: ignore - from ._models import StorageServiceProperties # type: ignore - -from ._azure_file_storage_enums import ( - CopyStatusType, - DeleteSnapshotsOptionType, - FileRangeWriteType, - LeaseDurationType, - LeaseStateType, - LeaseStatusType, - ListFilesIncludeType, - ListSharesIncludeType, - PermissionCopyModeType, - ShareAccessTier, - ShareRootSquash, - StorageErrorCode, -) - -__all__ = [ - 'AccessPolicy', - 'ClearRange', - 'CopyFileSmbInfo', - 'CorsRule', - 'DestinationLeaseAccessConditions', - 'DirectoryItem', - 'FileHTTPHeaders', - 'FileItem', - 'FileProperty', - 'FileRange', - 'FilesAndDirectoriesListSegment', - 'HandleItem', - 'LeaseAccessConditions', - 'ListFilesAndDirectoriesSegmentResponse', - 'ListHandlesResponse', - 'ListSharesResponse', - 'Metrics', - 'RetentionPolicy', - 'ShareFileRangeList', - 'ShareItemInternal', - 'SharePermission', - 'SharePropertiesInternal', - 'ShareProtocolSettings', - 'ShareSmbSettings', - 'ShareStats', - 'SignedIdentifier', - 'SmbMultichannel', - 'SourceLeaseAccessConditions', - 'SourceModifiedAccessConditions', - 'StorageError', - 'StorageServiceProperties', - 'CopyStatusType', - 'DeleteSnapshotsOptionType', - 'FileRangeWriteType', - 'LeaseDurationType', - 'LeaseStateType', - 'LeaseStatusType', - 'ListFilesIncludeType', - 'ListSharesIncludeType', - 'PermissionCopyModeType', - 'ShareAccessTier', - 'ShareRootSquash', - 'StorageErrorCode', -] diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/models/_azure_file_storage_enums.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/models/_azure_file_storage_enums.py deleted file mode 100644 index 1c8b351..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/models/_azure_file_storage_enums.py +++ /dev/null @@ -1,169 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from enum import Enum, EnumMeta -from six import with_metaclass - -class _CaseInsensitiveEnumMeta(EnumMeta): - def __getitem__(self, name): - return super().__getitem__(name.upper()) - - def __getattr__(cls, name): - """Return the enum member matching `name` - We use __getattr__ instead of descriptors or inserting into the enum - class' __dict__ in order to support `name` and `value` being both - properties for enum members (which live in the class' __dict__) and - enum members themselves. - """ - try: - return cls._member_map_[name.upper()] - except KeyError: - raise AttributeError(name) - - -class CopyStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - PENDING = "pending" - SUCCESS = "success" - ABORTED = "aborted" - FAILED = "failed" - -class DeleteSnapshotsOptionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - INCLUDE = "include" - INCLUDE_LEASED = "include-leased" - -class FileRangeWriteType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - UPDATE = "update" - CLEAR = "clear" - -class LeaseDurationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """When a share is leased, specifies whether the lease is of infinite or fixed duration. - """ - - INFINITE = "infinite" - FIXED = "fixed" - -class LeaseStateType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Lease state of the share. - """ - - AVAILABLE = "available" - LEASED = "leased" - EXPIRED = "expired" - BREAKING = "breaking" - BROKEN = "broken" - -class LeaseStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The current lease status of the share. - """ - - LOCKED = "locked" - UNLOCKED = "unlocked" - -class ListFilesIncludeType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - TIMESTAMPS = "Timestamps" - ETAG = "Etag" - ATTRIBUTES = "Attributes" - PERMISSION_KEY = "PermissionKey" - -class ListSharesIncludeType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - SNAPSHOTS = "snapshots" - METADATA = "metadata" - DELETED = "deleted" - -class PermissionCopyModeType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - SOURCE = "source" - OVERRIDE = "override" - -class ShareAccessTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - TRANSACTION_OPTIMIZED = "TransactionOptimized" - HOT = "Hot" - COOL = "Cool" - -class ShareRootSquash(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - - NO_ROOT_SQUASH = "NoRootSquash" - ROOT_SQUASH = "RootSquash" - ALL_SQUASH = "AllSquash" - -class StorageErrorCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Error codes returned by the service - """ - - ACCOUNT_ALREADY_EXISTS = "AccountAlreadyExists" - ACCOUNT_BEING_CREATED = "AccountBeingCreated" - ACCOUNT_IS_DISABLED = "AccountIsDisabled" - AUTHENTICATION_FAILED = "AuthenticationFailed" - AUTHORIZATION_FAILURE = "AuthorizationFailure" - CONDITION_HEADERS_NOT_SUPPORTED = "ConditionHeadersNotSupported" - CONDITION_NOT_MET = "ConditionNotMet" - EMPTY_METADATA_KEY = "EmptyMetadataKey" - INSUFFICIENT_ACCOUNT_PERMISSIONS = "InsufficientAccountPermissions" - INTERNAL_ERROR = "InternalError" - INVALID_AUTHENTICATION_INFO = "InvalidAuthenticationInfo" - INVALID_HEADER_VALUE = "InvalidHeaderValue" - INVALID_HTTP_VERB = "InvalidHttpVerb" - INVALID_INPUT = "InvalidInput" - INVALID_MD5 = "InvalidMd5" - INVALID_METADATA = "InvalidMetadata" - INVALID_QUERY_PARAMETER_VALUE = "InvalidQueryParameterValue" - INVALID_RANGE = "InvalidRange" - INVALID_RESOURCE_NAME = "InvalidResourceName" - INVALID_URI = "InvalidUri" - INVALID_XML_DOCUMENT = "InvalidXmlDocument" - INVALID_XML_NODE_VALUE = "InvalidXmlNodeValue" - MD5_MISMATCH = "Md5Mismatch" - METADATA_TOO_LARGE = "MetadataTooLarge" - MISSING_CONTENT_LENGTH_HEADER = "MissingContentLengthHeader" - MISSING_REQUIRED_QUERY_PARAMETER = "MissingRequiredQueryParameter" - MISSING_REQUIRED_HEADER = "MissingRequiredHeader" - MISSING_REQUIRED_XML_NODE = "MissingRequiredXmlNode" - MULTIPLE_CONDITION_HEADERS_NOT_SUPPORTED = "MultipleConditionHeadersNotSupported" - OPERATION_TIMED_OUT = "OperationTimedOut" - OUT_OF_RANGE_INPUT = "OutOfRangeInput" - OUT_OF_RANGE_QUERY_PARAMETER_VALUE = "OutOfRangeQueryParameterValue" - REQUEST_BODY_TOO_LARGE = "RequestBodyTooLarge" - RESOURCE_TYPE_MISMATCH = "ResourceTypeMismatch" - REQUEST_URL_FAILED_TO_PARSE = "RequestUrlFailedToParse" - RESOURCE_ALREADY_EXISTS = "ResourceAlreadyExists" - RESOURCE_NOT_FOUND = "ResourceNotFound" - SERVER_BUSY = "ServerBusy" - UNSUPPORTED_HEADER = "UnsupportedHeader" - UNSUPPORTED_XML_NODE = "UnsupportedXmlNode" - UNSUPPORTED_QUERY_PARAMETER = "UnsupportedQueryParameter" - UNSUPPORTED_HTTP_VERB = "UnsupportedHttpVerb" - CANNOT_DELETE_FILE_OR_DIRECTORY = "CannotDeleteFileOrDirectory" - CLIENT_CACHE_FLUSH_DELAY = "ClientCacheFlushDelay" - DELETE_PENDING = "DeletePending" - DIRECTORY_NOT_EMPTY = "DirectoryNotEmpty" - FILE_LOCK_CONFLICT = "FileLockConflict" - INVALID_FILE_OR_DIRECTORY_PATH_NAME = "InvalidFileOrDirectoryPathName" - PARENT_NOT_FOUND = "ParentNotFound" - READ_ONLY_ATTRIBUTE = "ReadOnlyAttribute" - SHARE_ALREADY_EXISTS = "ShareAlreadyExists" - SHARE_BEING_DELETED = "ShareBeingDeleted" - SHARE_DISABLED = "ShareDisabled" - SHARE_NOT_FOUND = "ShareNotFound" - SHARING_VIOLATION = "SharingViolation" - SHARE_SNAPSHOT_IN_PROGRESS = "ShareSnapshotInProgress" - SHARE_SNAPSHOT_COUNT_EXCEEDED = "ShareSnapshotCountExceeded" - SHARE_SNAPSHOT_OPERATION_NOT_SUPPORTED = "ShareSnapshotOperationNotSupported" - SHARE_HAS_SNAPSHOTS = "ShareHasSnapshots" - CONTAINER_QUOTA_DOWNGRADE_NOT_ALLOWED = "ContainerQuotaDowngradeNotAllowed" - AUTHORIZATION_SOURCE_IP_MISMATCH = "AuthorizationSourceIPMismatch" - AUTHORIZATION_PROTOCOL_MISMATCH = "AuthorizationProtocolMismatch" - AUTHORIZATION_PERMISSION_MISMATCH = "AuthorizationPermissionMismatch" - AUTHORIZATION_SERVICE_MISMATCH = "AuthorizationServiceMismatch" - AUTHORIZATION_RESOURCE_TYPE_MISMATCH = "AuthorizationResourceTypeMismatch" - FEATURE_VERSION_MISMATCH = "FeatureVersionMismatch" diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/models/_models.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/models/_models.py deleted file mode 100644 index 46433c1..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/models/_models.py +++ /dev/null @@ -1,1162 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.core.exceptions import HttpResponseError -import msrest.serialization - - -class AccessPolicy(msrest.serialization.Model): - """An Access policy. - - :param start: The date-time the policy is active. - :type start: str - :param expiry: The date-time the policy expires. - :type expiry: str - :param permission: The permissions for the ACL policy. - :type permission: str - """ - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str'}, - 'expiry': {'key': 'Expiry', 'type': 'str'}, - 'permission': {'key': 'Permission', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(AccessPolicy, self).__init__(**kwargs) - self.start = kwargs.get('start', None) - self.expiry = kwargs.get('expiry', None) - self.permission = kwargs.get('permission', None) - - -class ClearRange(msrest.serialization.Model): - """ClearRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'ClearRange' - } - - def __init__( - self, - **kwargs - ): - super(ClearRange, self).__init__(**kwargs) - self.start = kwargs['start'] - self.end = kwargs['end'] - - -class CopyFileSmbInfo(msrest.serialization.Model): - """Parameter group. - - :param file_attributes: Specifies either the option to copy file attributes from a source - file(source) to a target file or a list of attributes to set on a target file. - :type file_attributes: str - :param file_creation_time: Specifies either the option to copy file creation time from a source - file(source) to a target file or a time value in ISO 8601 format to set as creation time on a - target file. - :type file_creation_time: str - :param file_last_write_time: Specifies either the option to copy file last write time from a - source file(source) to a target file or a time value in ISO 8601 format to set as last write - time on a target file. - :type file_last_write_time: str - :param file_permission_copy_mode: Specifies the option to copy file security descriptor from - source file or to set it using the value which is defined by the header value of - x-ms-file-permission or x-ms-file-permission-key. Possible values include: "source", - "override". - :type file_permission_copy_mode: str or ~azure.storage.fileshare.models.PermissionCopyModeType - :param ignore_read_only: Specifies the option to overwrite the target file if it already exists - and has read-only attribute set. - :type ignore_read_only: bool - :param set_archive_attribute: Specifies the option to set archive attribute on a target file. - True means archive attribute will be set on a target file despite attribute overrides or a - source file state. - :type set_archive_attribute: bool - """ - - _attribute_map = { - 'file_attributes': {'key': 'fileAttributes', 'type': 'str'}, - 'file_creation_time': {'key': 'fileCreationTime', 'type': 'str'}, - 'file_last_write_time': {'key': 'fileLastWriteTime', 'type': 'str'}, - 'file_permission_copy_mode': {'key': 'filePermissionCopyMode', 'type': 'str'}, - 'ignore_read_only': {'key': 'ignoreReadOnly', 'type': 'bool'}, - 'set_archive_attribute': {'key': 'setArchiveAttribute', 'type': 'bool'}, - } - - def __init__( - self, - **kwargs - ): - super(CopyFileSmbInfo, self).__init__(**kwargs) - self.file_attributes = kwargs.get('file_attributes', None) - self.file_creation_time = kwargs.get('file_creation_time', None) - self.file_last_write_time = kwargs.get('file_last_write_time', None) - self.file_permission_copy_mode = kwargs.get('file_permission_copy_mode', None) - self.ignore_read_only = kwargs.get('ignore_read_only', None) - self.set_archive_attribute = kwargs.get('set_archive_attribute', None) - - -class CorsRule(msrest.serialization.Model): - """CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param allowed_origins: Required. The origin domains that are permitted to make a request - against the storage service via CORS. The origin domain is the domain from which the request - originates. Note that the origin must be an exact case-sensitive match with the origin that the - user age sends to the service. You can also use the wildcard character '*' to allow all origin - domains to make requests via CORS. - :type allowed_origins: str - :param allowed_methods: Required. The methods (HTTP request verbs) that the origin domain may - use for a CORS request. (comma separated). - :type allowed_methods: str - :param allowed_headers: Required. The request headers that the origin domain may specify on the - CORS request. - :type allowed_headers: str - :param exposed_headers: Required. The response headers that may be sent in the response to the - CORS request and exposed by the browser to the request issuer. - :type exposed_headers: str - :param max_age_in_seconds: Required. The maximum amount time that a browser should cache the - preflight OPTIONS request. - :type max_age_in_seconds: int - """ - - _validation = { - 'allowed_origins': {'required': True}, - 'allowed_methods': {'required': True}, - 'allowed_headers': {'required': True}, - 'exposed_headers': {'required': True}, - 'max_age_in_seconds': {'required': True, 'minimum': 0}, - } - - _attribute_map = { - 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str'}, - 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str'}, - 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str'}, - 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str'}, - 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int'}, - } - - def __init__( - self, - **kwargs - ): - super(CorsRule, self).__init__(**kwargs) - self.allowed_origins = kwargs['allowed_origins'] - self.allowed_methods = kwargs['allowed_methods'] - self.allowed_headers = kwargs['allowed_headers'] - self.exposed_headers = kwargs['exposed_headers'] - self.max_age_in_seconds = kwargs['max_age_in_seconds'] - - -class DestinationLeaseAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param destination_lease_id: Required if the destination file has an active infinite lease. The - lease ID specified for this header must match the lease ID of the destination file. If the - request does not include the lease ID or it is not valid, the operation fails with status code - 412 (Precondition Failed). If this header is specified and the destination file does not - currently have an active lease, the operation will also fail with status code 412 (Precondition - Failed). - :type destination_lease_id: str - """ - - _attribute_map = { - 'destination_lease_id': {'key': 'destinationLeaseId', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(DestinationLeaseAccessConditions, self).__init__(**kwargs) - self.destination_lease_id = kwargs.get('destination_lease_id', None) - - -class DirectoryItem(msrest.serialization.Model): - """A listed directory item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param file_id: - :type file_id: str - :param properties: File properties. - :type properties: ~azure.storage.fileshare.models.FileProperty - :param attributes: - :type attributes: str - :param permission_key: - :type permission_key: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'file_id': {'key': 'FileId', 'type': 'str'}, - 'properties': {'key': 'Properties', 'type': 'FileProperty'}, - 'attributes': {'key': 'Attributes', 'type': 'str'}, - 'permission_key': {'key': 'PermissionKey', 'type': 'str'}, - } - _xml_map = { - 'name': 'Directory' - } - - def __init__( - self, - **kwargs - ): - super(DirectoryItem, self).__init__(**kwargs) - self.name = kwargs['name'] - self.file_id = kwargs.get('file_id', None) - self.properties = kwargs.get('properties', None) - self.attributes = kwargs.get('attributes', None) - self.permission_key = kwargs.get('permission_key', None) - - -class FileHTTPHeaders(msrest.serialization.Model): - """Parameter group. - - :param file_content_type: Sets the MIME content type of the file. The default type is - 'application/octet-stream'. - :type file_content_type: str - :param file_content_encoding: Specifies which content encodings have been applied to the file. - :type file_content_encoding: str - :param file_content_language: Specifies the natural languages used by this resource. - :type file_content_language: str - :param file_cache_control: Sets the file's cache control. The File service stores this value - but does not use or modify it. - :type file_cache_control: str - :param file_content_md5: Sets the file's MD5 hash. - :type file_content_md5: bytearray - :param file_content_disposition: Sets the file's Content-Disposition header. - :type file_content_disposition: str - """ - - _attribute_map = { - 'file_content_type': {'key': 'fileContentType', 'type': 'str'}, - 'file_content_encoding': {'key': 'fileContentEncoding', 'type': 'str'}, - 'file_content_language': {'key': 'fileContentLanguage', 'type': 'str'}, - 'file_cache_control': {'key': 'fileCacheControl', 'type': 'str'}, - 'file_content_md5': {'key': 'fileContentMD5', 'type': 'bytearray'}, - 'file_content_disposition': {'key': 'fileContentDisposition', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(FileHTTPHeaders, self).__init__(**kwargs) - self.file_content_type = kwargs.get('file_content_type', None) - self.file_content_encoding = kwargs.get('file_content_encoding', None) - self.file_content_language = kwargs.get('file_content_language', None) - self.file_cache_control = kwargs.get('file_cache_control', None) - self.file_content_md5 = kwargs.get('file_content_md5', None) - self.file_content_disposition = kwargs.get('file_content_disposition', None) - - -class FileItem(msrest.serialization.Model): - """A listed file item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param file_id: - :type file_id: str - :param properties: Required. File properties. - :type properties: ~azure.storage.fileshare.models.FileProperty - :param attributes: - :type attributes: str - :param permission_key: - :type permission_key: str - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'file_id': {'key': 'FileId', 'type': 'str'}, - 'properties': {'key': 'Properties', 'type': 'FileProperty'}, - 'attributes': {'key': 'Attributes', 'type': 'str'}, - 'permission_key': {'key': 'PermissionKey', 'type': 'str'}, - } - _xml_map = { - 'name': 'File' - } - - def __init__( - self, - **kwargs - ): - super(FileItem, self).__init__(**kwargs) - self.name = kwargs['name'] - self.file_id = kwargs.get('file_id', None) - self.properties = kwargs['properties'] - self.attributes = kwargs.get('attributes', None) - self.permission_key = kwargs.get('permission_key', None) - - -class FileProperty(msrest.serialization.Model): - """File properties. - - All required parameters must be populated in order to send to Azure. - - :param content_length: Required. Content length of the file. This value may not be up-to-date - since an SMB client may have modified the file locally. The value of Content-Length may not - reflect that fact until the handle is closed or the op-lock is broken. To retrieve current - property values, call Get File Properties. - :type content_length: long - :param creation_time: - :type creation_time: ~datetime.datetime - :param last_access_time: - :type last_access_time: ~datetime.datetime - :param last_write_time: - :type last_write_time: ~datetime.datetime - :param change_time: - :type change_time: ~datetime.datetime - :param last_modified: - :type last_modified: ~datetime.datetime - :param etag: - :type etag: str - """ - - _validation = { - 'content_length': {'required': True}, - } - - _attribute_map = { - 'content_length': {'key': 'Content-Length', 'type': 'long'}, - 'creation_time': {'key': 'CreationTime', 'type': 'iso-8601'}, - 'last_access_time': {'key': 'LastAccessTime', 'type': 'iso-8601'}, - 'last_write_time': {'key': 'LastWriteTime', 'type': 'iso-8601'}, - 'change_time': {'key': 'ChangeTime', 'type': 'iso-8601'}, - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(FileProperty, self).__init__(**kwargs) - self.content_length = kwargs['content_length'] - self.creation_time = kwargs.get('creation_time', None) - self.last_access_time = kwargs.get('last_access_time', None) - self.last_write_time = kwargs.get('last_write_time', None) - self.change_time = kwargs.get('change_time', None) - self.last_modified = kwargs.get('last_modified', None) - self.etag = kwargs.get('etag', None) - - -class FileRange(msrest.serialization.Model): - """An Azure Storage file range. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. Start of the range. - :type start: long - :param end: Required. End of the range. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long'}, - 'end': {'key': 'End', 'type': 'long'}, - } - _xml_map = { - 'name': 'Range' - } - - def __init__( - self, - **kwargs - ): - super(FileRange, self).__init__(**kwargs) - self.start = kwargs['start'] - self.end = kwargs['end'] - - -class FilesAndDirectoriesListSegment(msrest.serialization.Model): - """Abstract for entries that can be listed from Directory. - - All required parameters must be populated in order to send to Azure. - - :param directory_items: Required. - :type directory_items: list[~azure.storage.fileshare.models.DirectoryItem] - :param file_items: Required. - :type file_items: list[~azure.storage.fileshare.models.FileItem] - """ - - _validation = { - 'directory_items': {'required': True}, - 'file_items': {'required': True}, - } - - _attribute_map = { - 'directory_items': {'key': 'DirectoryItems', 'type': '[DirectoryItem]'}, - 'file_items': {'key': 'FileItems', 'type': '[FileItem]'}, - } - _xml_map = { - 'name': 'Entries' - } - - def __init__( - self, - **kwargs - ): - super(FilesAndDirectoriesListSegment, self).__init__(**kwargs) - self.directory_items = kwargs['directory_items'] - self.file_items = kwargs['file_items'] - - -class HandleItem(msrest.serialization.Model): - """A listed Azure Storage handle item. - - All required parameters must be populated in order to send to Azure. - - :param handle_id: Required. XSMB service handle ID. - :type handle_id: str - :param path: Required. File or directory name including full path starting from share root. - :type path: str - :param file_id: Required. FileId uniquely identifies the file or directory. - :type file_id: str - :param parent_id: ParentId uniquely identifies the parent directory of the object. - :type parent_id: str - :param session_id: Required. SMB session ID in context of which the file handle was opened. - :type session_id: str - :param client_ip: Required. Client IP that opened the handle. - :type client_ip: str - :param open_time: Required. Time when the session that previously opened the handle has last - been reconnected. (UTC). - :type open_time: ~datetime.datetime - :param last_reconnect_time: Time handle was last connected to (UTC). - :type last_reconnect_time: ~datetime.datetime - """ - - _validation = { - 'handle_id': {'required': True}, - 'path': {'required': True}, - 'file_id': {'required': True}, - 'session_id': {'required': True}, - 'client_ip': {'required': True}, - 'open_time': {'required': True}, - } - - _attribute_map = { - 'handle_id': {'key': 'HandleId', 'type': 'str'}, - 'path': {'key': 'Path', 'type': 'str'}, - 'file_id': {'key': 'FileId', 'type': 'str'}, - 'parent_id': {'key': 'ParentId', 'type': 'str'}, - 'session_id': {'key': 'SessionId', 'type': 'str'}, - 'client_ip': {'key': 'ClientIp', 'type': 'str'}, - 'open_time': {'key': 'OpenTime', 'type': 'rfc-1123'}, - 'last_reconnect_time': {'key': 'LastReconnectTime', 'type': 'rfc-1123'}, - } - _xml_map = { - 'name': 'Handle' - } - - def __init__( - self, - **kwargs - ): - super(HandleItem, self).__init__(**kwargs) - self.handle_id = kwargs['handle_id'] - self.path = kwargs['path'] - self.file_id = kwargs['file_id'] - self.parent_id = kwargs.get('parent_id', None) - self.session_id = kwargs['session_id'] - self.client_ip = kwargs['client_ip'] - self.open_time = kwargs['open_time'] - self.last_reconnect_time = kwargs.get('last_reconnect_time', None) - - -class LeaseAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param lease_id: If specified, the operation only succeeds if the resource's lease is active - and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': 'leaseId', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = kwargs.get('lease_id', None) - - -class ListFilesAndDirectoriesSegmentResponse(msrest.serialization.Model): - """An enumeration of directories and files. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param share_name: Required. - :type share_name: str - :param share_snapshot: - :type share_snapshot: str - :param directory_path: Required. - :type directory_path: str - :param prefix: Required. - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param segment: Required. Abstract for entries that can be listed from Directory. - :type segment: ~azure.storage.fileshare.models.FilesAndDirectoriesListSegment - :param next_marker: Required. - :type next_marker: str - :param directory_id: - :type directory_id: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'share_name': {'required': True}, - 'directory_path': {'required': True}, - 'prefix': {'required': True}, - 'segment': {'required': True}, - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'share_name': {'key': 'ShareName', 'type': 'str', 'xml': {'attr': True}}, - 'share_snapshot': {'key': 'ShareSnapshot', 'type': 'str', 'xml': {'attr': True}}, - 'directory_path': {'key': 'DirectoryPath', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'segment': {'key': 'Segment', 'type': 'FilesAndDirectoriesListSegment'}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - 'directory_id': {'key': 'DirectoryId', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - **kwargs - ): - super(ListFilesAndDirectoriesSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs['service_endpoint'] - self.share_name = kwargs['share_name'] - self.share_snapshot = kwargs.get('share_snapshot', None) - self.directory_path = kwargs['directory_path'] - self.prefix = kwargs['prefix'] - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.segment = kwargs['segment'] - self.next_marker = kwargs['next_marker'] - self.directory_id = kwargs.get('directory_id', None) - - -class ListHandlesResponse(msrest.serialization.Model): - """An enumeration of handles. - - All required parameters must be populated in order to send to Azure. - - :param handle_list: - :type handle_list: list[~azure.storage.fileshare.models.HandleItem] - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'handle_list': {'key': 'HandleList', 'type': '[HandleItem]', 'xml': {'name': 'Entries', 'wrapped': True, 'itemsName': 'Handle'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - **kwargs - ): - super(ListHandlesResponse, self).__init__(**kwargs) - self.handle_list = kwargs.get('handle_list', None) - self.next_marker = kwargs['next_marker'] - - -class ListSharesResponse(msrest.serialization.Model): - """An enumeration of shares. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param share_items: - :type share_items: list[~azure.storage.fileshare.models.ShareItemInternal] - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'share_items': {'key': 'ShareItems', 'type': '[ShareItemInternal]', 'xml': {'name': 'Shares', 'wrapped': True, 'itemsName': 'Share'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - **kwargs - ): - super(ListSharesResponse, self).__init__(**kwargs) - self.service_endpoint = kwargs['service_endpoint'] - self.prefix = kwargs.get('prefix', None) - self.marker = kwargs.get('marker', None) - self.max_results = kwargs.get('max_results', None) - self.share_items = kwargs.get('share_items', None) - self.next_marker = kwargs['next_marker'] - - -class Metrics(msrest.serialization.Model): - """Storage Analytics metrics for file service. - - All required parameters must be populated in order to send to Azure. - - :param version: Required. The version of Storage Analytics to configure. - :type version: str - :param enabled: Required. Indicates whether metrics are enabled for the File service. - :type enabled: bool - :param include_apis: Indicates whether metrics should generate summary statistics for called - API operations. - :type include_apis: bool - :param retention_policy: The retention policy. - :type retention_policy: ~azure.storage.fileshare.models.RetentionPolicy - """ - - _validation = { - 'version': {'required': True}, - 'enabled': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str'}, - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool'}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, - } - - def __init__( - self, - **kwargs - ): - super(Metrics, self).__init__(**kwargs) - self.version = kwargs['version'] - self.enabled = kwargs['enabled'] - self.include_apis = kwargs.get('include_apis', None) - self.retention_policy = kwargs.get('retention_policy', None) - - -class RetentionPolicy(msrest.serialization.Model): - """The retention policy. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether a retention policy is enabled for the File service. - If false, metrics data is retained, and the user is responsible for deleting it. - :type enabled: bool - :param days: Indicates the number of days that metrics data should be retained. All data older - than this value will be deleted. Metrics data is deleted on a best-effort basis after the - retention period expires. - :type days: int - """ - - _validation = { - 'enabled': {'required': True}, - 'days': {'maximum': 365, 'minimum': 1}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'days': {'key': 'Days', 'type': 'int'}, - } - - def __init__( - self, - **kwargs - ): - super(RetentionPolicy, self).__init__(**kwargs) - self.enabled = kwargs['enabled'] - self.days = kwargs.get('days', None) - - -class ShareFileRangeList(msrest.serialization.Model): - """The list of file ranges. - - :param ranges: - :type ranges: list[~azure.storage.fileshare.models.FileRange] - :param clear_ranges: - :type clear_ranges: list[~azure.storage.fileshare.models.ClearRange] - """ - - _attribute_map = { - 'ranges': {'key': 'Ranges', 'type': '[FileRange]'}, - 'clear_ranges': {'key': 'ClearRanges', 'type': '[ClearRange]'}, - } - - def __init__( - self, - **kwargs - ): - super(ShareFileRangeList, self).__init__(**kwargs) - self.ranges = kwargs.get('ranges', None) - self.clear_ranges = kwargs.get('clear_ranges', None) - - -class ShareItemInternal(msrest.serialization.Model): - """A listed Azure Storage share item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param snapshot: - :type snapshot: str - :param deleted: - :type deleted: bool - :param version: - :type version: str - :param properties: Required. Properties of a share. - :type properties: ~azure.storage.fileshare.models.SharePropertiesInternal - :param metadata: Dictionary of :code:``. - :type metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'snapshot': {'key': 'Snapshot', 'type': 'str'}, - 'deleted': {'key': 'Deleted', 'type': 'bool'}, - 'version': {'key': 'Version', 'type': 'str'}, - 'properties': {'key': 'Properties', 'type': 'SharePropertiesInternal'}, - 'metadata': {'key': 'Metadata', 'type': '{str}'}, - } - _xml_map = { - 'name': 'Share' - } - - def __init__( - self, - **kwargs - ): - super(ShareItemInternal, self).__init__(**kwargs) - self.name = kwargs['name'] - self.snapshot = kwargs.get('snapshot', None) - self.deleted = kwargs.get('deleted', None) - self.version = kwargs.get('version', None) - self.properties = kwargs['properties'] - self.metadata = kwargs.get('metadata', None) - - -class SharePermission(msrest.serialization.Model): - """A permission (a security descriptor) at the share level. - - All required parameters must be populated in order to send to Azure. - - :param permission: Required. The permission in the Security Descriptor Definition Language - (SDDL). - :type permission: str - """ - - _validation = { - 'permission': {'required': True}, - } - - _attribute_map = { - 'permission': {'key': 'permission', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(SharePermission, self).__init__(**kwargs) - self.permission = kwargs['permission'] - - -class SharePropertiesInternal(msrest.serialization.Model): - """Properties of a share. - - All required parameters must be populated in order to send to Azure. - - :param last_modified: Required. - :type last_modified: ~datetime.datetime - :param etag: Required. - :type etag: str - :param quota: Required. - :type quota: int - :param provisioned_iops: - :type provisioned_iops: int - :param provisioned_ingress_m_bps: - :type provisioned_ingress_m_bps: int - :param provisioned_egress_m_bps: - :type provisioned_egress_m_bps: int - :param provisioned_bandwidth_mi_bps: - :type provisioned_bandwidth_mi_bps: int - :param next_allowed_quota_downgrade_time: - :type next_allowed_quota_downgrade_time: ~datetime.datetime - :param deleted_time: - :type deleted_time: ~datetime.datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param access_tier: - :type access_tier: str - :param access_tier_change_time: - :type access_tier_change_time: ~datetime.datetime - :param access_tier_transition_state: - :type access_tier_transition_state: str - :param lease_status: The current lease status of the share. Possible values include: "locked", - "unlocked". - :type lease_status: str or ~azure.storage.fileshare.models.LeaseStatusType - :param lease_state: Lease state of the share. Possible values include: "available", "leased", - "expired", "breaking", "broken". - :type lease_state: str or ~azure.storage.fileshare.models.LeaseStateType - :param lease_duration: When a share is leased, specifies whether the lease is of infinite or - fixed duration. Possible values include: "infinite", "fixed". - :type lease_duration: str or ~azure.storage.fileshare.models.LeaseDurationType - :param enabled_protocols: - :type enabled_protocols: str - :param root_squash: Possible values include: "NoRootSquash", "RootSquash", "AllSquash". - :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - 'quota': {'required': True}, - } - - _attribute_map = { - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - 'quota': {'key': 'Quota', 'type': 'int'}, - 'provisioned_iops': {'key': 'ProvisionedIops', 'type': 'int'}, - 'provisioned_ingress_m_bps': {'key': 'ProvisionedIngressMBps', 'type': 'int'}, - 'provisioned_egress_m_bps': {'key': 'ProvisionedEgressMBps', 'type': 'int'}, - 'provisioned_bandwidth_mi_bps': {'key': 'ProvisionedBandwidthMiBps', 'type': 'int'}, - 'next_allowed_quota_downgrade_time': {'key': 'NextAllowedQuotaDowngradeTime', 'type': 'rfc-1123'}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, - 'access_tier': {'key': 'AccessTier', 'type': 'str'}, - 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'}, - 'access_tier_transition_state': {'key': 'AccessTierTransitionState', 'type': 'str'}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, - 'lease_state': {'key': 'LeaseState', 'type': 'str'}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, - 'enabled_protocols': {'key': 'EnabledProtocols', 'type': 'str'}, - 'root_squash': {'key': 'RootSquash', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(SharePropertiesInternal, self).__init__(**kwargs) - self.last_modified = kwargs['last_modified'] - self.etag = kwargs['etag'] - self.quota = kwargs['quota'] - self.provisioned_iops = kwargs.get('provisioned_iops', None) - self.provisioned_ingress_m_bps = kwargs.get('provisioned_ingress_m_bps', None) - self.provisioned_egress_m_bps = kwargs.get('provisioned_egress_m_bps', None) - self.provisioned_bandwidth_mi_bps = kwargs.get('provisioned_bandwidth_mi_bps', None) - self.next_allowed_quota_downgrade_time = kwargs.get('next_allowed_quota_downgrade_time', None) - self.deleted_time = kwargs.get('deleted_time', None) - self.remaining_retention_days = kwargs.get('remaining_retention_days', None) - self.access_tier = kwargs.get('access_tier', None) - self.access_tier_change_time = kwargs.get('access_tier_change_time', None) - self.access_tier_transition_state = kwargs.get('access_tier_transition_state', None) - self.lease_status = kwargs.get('lease_status', None) - self.lease_state = kwargs.get('lease_state', None) - self.lease_duration = kwargs.get('lease_duration', None) - self.enabled_protocols = kwargs.get('enabled_protocols', None) - self.root_squash = kwargs.get('root_squash', None) - - -class ShareProtocolSettings(msrest.serialization.Model): - """Protocol settings. - - :param smb: Settings for SMB protocol. - :type smb: ~azure.storage.fileshare.models.ShareSmbSettings - """ - - _attribute_map = { - 'smb': {'key': 'Smb', 'type': 'ShareSmbSettings'}, - } - _xml_map = { - 'name': 'ProtocolSettings' - } - - def __init__( - self, - **kwargs - ): - super(ShareProtocolSettings, self).__init__(**kwargs) - self.smb = kwargs.get('smb', None) - - -class ShareSmbSettings(msrest.serialization.Model): - """Settings for SMB protocol. - - :param multichannel: Settings for SMB Multichannel. - :type multichannel: ~azure.storage.fileshare.models.SmbMultichannel - """ - - _attribute_map = { - 'multichannel': {'key': 'Multichannel', 'type': 'SmbMultichannel'}, - } - _xml_map = { - 'name': 'SMB' - } - - def __init__( - self, - **kwargs - ): - super(ShareSmbSettings, self).__init__(**kwargs) - self.multichannel = kwargs.get('multichannel', None) - - -class ShareStats(msrest.serialization.Model): - """Stats for the share. - - All required parameters must be populated in order to send to Azure. - - :param share_usage_bytes: Required. The approximate size of the data stored in bytes. Note that - this value may not include all recently created or recently resized files. - :type share_usage_bytes: int - """ - - _validation = { - 'share_usage_bytes': {'required': True}, - } - - _attribute_map = { - 'share_usage_bytes': {'key': 'ShareUsageBytes', 'type': 'int'}, - } - - def __init__( - self, - **kwargs - ): - super(ShareStats, self).__init__(**kwargs) - self.share_usage_bytes = kwargs['share_usage_bytes'] - - -class SignedIdentifier(msrest.serialization.Model): - """Signed identifier. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. A unique id. - :type id: str - :param access_policy: The access policy. - :type access_policy: ~azure.storage.fileshare.models.AccessPolicy - """ - - _validation = { - 'id': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'Id', 'type': 'str'}, - 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy'}, - } - - def __init__( - self, - **kwargs - ): - super(SignedIdentifier, self).__init__(**kwargs) - self.id = kwargs['id'] - self.access_policy = kwargs.get('access_policy', None) - - -class SmbMultichannel(msrest.serialization.Model): - """Settings for SMB multichannel. - - :param enabled: If SMB multichannel is enabled. - :type enabled: bool - """ - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - } - _xml_map = { - 'name': 'Multichannel' - } - - def __init__( - self, - **kwargs - ): - super(SmbMultichannel, self).__init__(**kwargs) - self.enabled = kwargs.get('enabled', None) - - -class SourceLeaseAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param source_lease_id: Required if the source file has an active infinite lease. - :type source_lease_id: str - """ - - _attribute_map = { - 'source_lease_id': {'key': 'sourceLeaseId', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(SourceLeaseAccessConditions, self).__init__(**kwargs) - self.source_lease_id = kwargs.get('source_lease_id', None) - - -class SourceModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param source_if_match_crc64: Specify the crc64 value to operate only on range with a matching - crc64 checksum. - :type source_if_match_crc64: bytearray - :param source_if_none_match_crc64: Specify the crc64 value to operate only on range without a - matching crc64 checksum. - :type source_if_none_match_crc64: bytearray - """ - - _attribute_map = { - 'source_if_match_crc64': {'key': 'sourceIfMatchCrc64', 'type': 'bytearray'}, - 'source_if_none_match_crc64': {'key': 'sourceIfNoneMatchCrc64', 'type': 'bytearray'}, - } - - def __init__( - self, - **kwargs - ): - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_match_crc64 = kwargs.get('source_if_match_crc64', None) - self.source_if_none_match_crc64 = kwargs.get('source_if_none_match_crc64', None) - - -class StorageError(msrest.serialization.Model): - """StorageError. - - :param message: - :type message: str - """ - - _attribute_map = { - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(StorageError, self).__init__(**kwargs) - self.message = kwargs.get('message', None) - - -class StorageServiceProperties(msrest.serialization.Model): - """Storage service properties. - - :param hour_metrics: A summary of request statistics grouped by API in hourly aggregates for - files. - :type hour_metrics: ~azure.storage.fileshare.models.Metrics - :param minute_metrics: A summary of request statistics grouped by API in minute aggregates for - files. - :type minute_metrics: ~azure.storage.fileshare.models.Metrics - :param cors: The set of CORS rules. - :type cors: list[~azure.storage.fileshare.models.CorsRule] - :param protocol: Protocol settings. - :type protocol: ~azure.storage.fileshare.models.ShareProtocolSettings - """ - - _attribute_map = { - 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics'}, - 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics'}, - 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'wrapped': True}}, - 'protocol': {'key': 'Protocol', 'type': 'ShareProtocolSettings'}, - } - - def __init__( - self, - **kwargs - ): - super(StorageServiceProperties, self).__init__(**kwargs) - self.hour_metrics = kwargs.get('hour_metrics', None) - self.minute_metrics = kwargs.get('minute_metrics', None) - self.cors = kwargs.get('cors', None) - self.protocol = kwargs.get('protocol', None) diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/models/_models_py3.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/models/_models_py3.py deleted file mode 100644 index 1620eda..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/models/_models_py3.py +++ /dev/null @@ -1,1316 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -import datetime -from typing import Dict, List, Optional, Union - -from azure.core.exceptions import HttpResponseError -import msrest.serialization - -from ._azure_file_storage_enums import * - - -class AccessPolicy(msrest.serialization.Model): - """An Access policy. - - :param start: The date-time the policy is active. - :type start: str - :param expiry: The date-time the policy expires. - :type expiry: str - :param permission: The permissions for the ACL policy. - :type permission: str - """ - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str'}, - 'expiry': {'key': 'Expiry', 'type': 'str'}, - 'permission': {'key': 'Permission', 'type': 'str'}, - } - - def __init__( - self, - *, - start: Optional[str] = None, - expiry: Optional[str] = None, - permission: Optional[str] = None, - **kwargs - ): - super(AccessPolicy, self).__init__(**kwargs) - self.start = start - self.expiry = expiry - self.permission = permission - - -class ClearRange(msrest.serialization.Model): - """ClearRange. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. - :type start: long - :param end: Required. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, - 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, - } - _xml_map = { - 'name': 'ClearRange' - } - - def __init__( - self, - *, - start: int, - end: int, - **kwargs - ): - super(ClearRange, self).__init__(**kwargs) - self.start = start - self.end = end - - -class CopyFileSmbInfo(msrest.serialization.Model): - """Parameter group. - - :param file_attributes: Specifies either the option to copy file attributes from a source - file(source) to a target file or a list of attributes to set on a target file. - :type file_attributes: str - :param file_creation_time: Specifies either the option to copy file creation time from a source - file(source) to a target file or a time value in ISO 8601 format to set as creation time on a - target file. - :type file_creation_time: str - :param file_last_write_time: Specifies either the option to copy file last write time from a - source file(source) to a target file or a time value in ISO 8601 format to set as last write - time on a target file. - :type file_last_write_time: str - :param file_permission_copy_mode: Specifies the option to copy file security descriptor from - source file or to set it using the value which is defined by the header value of - x-ms-file-permission or x-ms-file-permission-key. Possible values include: "source", - "override". - :type file_permission_copy_mode: str or ~azure.storage.fileshare.models.PermissionCopyModeType - :param ignore_read_only: Specifies the option to overwrite the target file if it already exists - and has read-only attribute set. - :type ignore_read_only: bool - :param set_archive_attribute: Specifies the option to set archive attribute on a target file. - True means archive attribute will be set on a target file despite attribute overrides or a - source file state. - :type set_archive_attribute: bool - """ - - _attribute_map = { - 'file_attributes': {'key': 'fileAttributes', 'type': 'str'}, - 'file_creation_time': {'key': 'fileCreationTime', 'type': 'str'}, - 'file_last_write_time': {'key': 'fileLastWriteTime', 'type': 'str'}, - 'file_permission_copy_mode': {'key': 'filePermissionCopyMode', 'type': 'str'}, - 'ignore_read_only': {'key': 'ignoreReadOnly', 'type': 'bool'}, - 'set_archive_attribute': {'key': 'setArchiveAttribute', 'type': 'bool'}, - } - - def __init__( - self, - *, - file_attributes: Optional[str] = None, - file_creation_time: Optional[str] = None, - file_last_write_time: Optional[str] = None, - file_permission_copy_mode: Optional[Union[str, "PermissionCopyModeType"]] = None, - ignore_read_only: Optional[bool] = None, - set_archive_attribute: Optional[bool] = None, - **kwargs - ): - super(CopyFileSmbInfo, self).__init__(**kwargs) - self.file_attributes = file_attributes - self.file_creation_time = file_creation_time - self.file_last_write_time = file_last_write_time - self.file_permission_copy_mode = file_permission_copy_mode - self.ignore_read_only = ignore_read_only - self.set_archive_attribute = set_archive_attribute - - -class CorsRule(msrest.serialization.Model): - """CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param allowed_origins: Required. The origin domains that are permitted to make a request - against the storage service via CORS. The origin domain is the domain from which the request - originates. Note that the origin must be an exact case-sensitive match with the origin that the - user age sends to the service. You can also use the wildcard character '*' to allow all origin - domains to make requests via CORS. - :type allowed_origins: str - :param allowed_methods: Required. The methods (HTTP request verbs) that the origin domain may - use for a CORS request. (comma separated). - :type allowed_methods: str - :param allowed_headers: Required. The request headers that the origin domain may specify on the - CORS request. - :type allowed_headers: str - :param exposed_headers: Required. The response headers that may be sent in the response to the - CORS request and exposed by the browser to the request issuer. - :type exposed_headers: str - :param max_age_in_seconds: Required. The maximum amount time that a browser should cache the - preflight OPTIONS request. - :type max_age_in_seconds: int - """ - - _validation = { - 'allowed_origins': {'required': True}, - 'allowed_methods': {'required': True}, - 'allowed_headers': {'required': True}, - 'exposed_headers': {'required': True}, - 'max_age_in_seconds': {'required': True, 'minimum': 0}, - } - - _attribute_map = { - 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str'}, - 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str'}, - 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str'}, - 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str'}, - 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int'}, - } - - def __init__( - self, - *, - allowed_origins: str, - allowed_methods: str, - allowed_headers: str, - exposed_headers: str, - max_age_in_seconds: int, - **kwargs - ): - super(CorsRule, self).__init__(**kwargs) - self.allowed_origins = allowed_origins - self.allowed_methods = allowed_methods - self.allowed_headers = allowed_headers - self.exposed_headers = exposed_headers - self.max_age_in_seconds = max_age_in_seconds - - -class DestinationLeaseAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param destination_lease_id: Required if the destination file has an active infinite lease. The - lease ID specified for this header must match the lease ID of the destination file. If the - request does not include the lease ID or it is not valid, the operation fails with status code - 412 (Precondition Failed). If this header is specified and the destination file does not - currently have an active lease, the operation will also fail with status code 412 (Precondition - Failed). - :type destination_lease_id: str - """ - - _attribute_map = { - 'destination_lease_id': {'key': 'destinationLeaseId', 'type': 'str'}, - } - - def __init__( - self, - *, - destination_lease_id: Optional[str] = None, - **kwargs - ): - super(DestinationLeaseAccessConditions, self).__init__(**kwargs) - self.destination_lease_id = destination_lease_id - - -class DirectoryItem(msrest.serialization.Model): - """A listed directory item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param file_id: - :type file_id: str - :param properties: File properties. - :type properties: ~azure.storage.fileshare.models.FileProperty - :param attributes: - :type attributes: str - :param permission_key: - :type permission_key: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'file_id': {'key': 'FileId', 'type': 'str'}, - 'properties': {'key': 'Properties', 'type': 'FileProperty'}, - 'attributes': {'key': 'Attributes', 'type': 'str'}, - 'permission_key': {'key': 'PermissionKey', 'type': 'str'}, - } - _xml_map = { - 'name': 'Directory' - } - - def __init__( - self, - *, - name: str, - file_id: Optional[str] = None, - properties: Optional["FileProperty"] = None, - attributes: Optional[str] = None, - permission_key: Optional[str] = None, - **kwargs - ): - super(DirectoryItem, self).__init__(**kwargs) - self.name = name - self.file_id = file_id - self.properties = properties - self.attributes = attributes - self.permission_key = permission_key - - -class FileHTTPHeaders(msrest.serialization.Model): - """Parameter group. - - :param file_content_type: Sets the MIME content type of the file. The default type is - 'application/octet-stream'. - :type file_content_type: str - :param file_content_encoding: Specifies which content encodings have been applied to the file. - :type file_content_encoding: str - :param file_content_language: Specifies the natural languages used by this resource. - :type file_content_language: str - :param file_cache_control: Sets the file's cache control. The File service stores this value - but does not use or modify it. - :type file_cache_control: str - :param file_content_md5: Sets the file's MD5 hash. - :type file_content_md5: bytearray - :param file_content_disposition: Sets the file's Content-Disposition header. - :type file_content_disposition: str - """ - - _attribute_map = { - 'file_content_type': {'key': 'fileContentType', 'type': 'str'}, - 'file_content_encoding': {'key': 'fileContentEncoding', 'type': 'str'}, - 'file_content_language': {'key': 'fileContentLanguage', 'type': 'str'}, - 'file_cache_control': {'key': 'fileCacheControl', 'type': 'str'}, - 'file_content_md5': {'key': 'fileContentMD5', 'type': 'bytearray'}, - 'file_content_disposition': {'key': 'fileContentDisposition', 'type': 'str'}, - } - - def __init__( - self, - *, - file_content_type: Optional[str] = None, - file_content_encoding: Optional[str] = None, - file_content_language: Optional[str] = None, - file_cache_control: Optional[str] = None, - file_content_md5: Optional[bytearray] = None, - file_content_disposition: Optional[str] = None, - **kwargs - ): - super(FileHTTPHeaders, self).__init__(**kwargs) - self.file_content_type = file_content_type - self.file_content_encoding = file_content_encoding - self.file_content_language = file_content_language - self.file_cache_control = file_cache_control - self.file_content_md5 = file_content_md5 - self.file_content_disposition = file_content_disposition - - -class FileItem(msrest.serialization.Model): - """A listed file item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param file_id: - :type file_id: str - :param properties: Required. File properties. - :type properties: ~azure.storage.fileshare.models.FileProperty - :param attributes: - :type attributes: str - :param permission_key: - :type permission_key: str - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'file_id': {'key': 'FileId', 'type': 'str'}, - 'properties': {'key': 'Properties', 'type': 'FileProperty'}, - 'attributes': {'key': 'Attributes', 'type': 'str'}, - 'permission_key': {'key': 'PermissionKey', 'type': 'str'}, - } - _xml_map = { - 'name': 'File' - } - - def __init__( - self, - *, - name: str, - properties: "FileProperty", - file_id: Optional[str] = None, - attributes: Optional[str] = None, - permission_key: Optional[str] = None, - **kwargs - ): - super(FileItem, self).__init__(**kwargs) - self.name = name - self.file_id = file_id - self.properties = properties - self.attributes = attributes - self.permission_key = permission_key - - -class FileProperty(msrest.serialization.Model): - """File properties. - - All required parameters must be populated in order to send to Azure. - - :param content_length: Required. Content length of the file. This value may not be up-to-date - since an SMB client may have modified the file locally. The value of Content-Length may not - reflect that fact until the handle is closed or the op-lock is broken. To retrieve current - property values, call Get File Properties. - :type content_length: long - :param creation_time: - :type creation_time: ~datetime.datetime - :param last_access_time: - :type last_access_time: ~datetime.datetime - :param last_write_time: - :type last_write_time: ~datetime.datetime - :param change_time: - :type change_time: ~datetime.datetime - :param last_modified: - :type last_modified: ~datetime.datetime - :param etag: - :type etag: str - """ - - _validation = { - 'content_length': {'required': True}, - } - - _attribute_map = { - 'content_length': {'key': 'Content-Length', 'type': 'long'}, - 'creation_time': {'key': 'CreationTime', 'type': 'iso-8601'}, - 'last_access_time': {'key': 'LastAccessTime', 'type': 'iso-8601'}, - 'last_write_time': {'key': 'LastWriteTime', 'type': 'iso-8601'}, - 'change_time': {'key': 'ChangeTime', 'type': 'iso-8601'}, - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - } - - def __init__( - self, - *, - content_length: int, - creation_time: Optional[datetime.datetime] = None, - last_access_time: Optional[datetime.datetime] = None, - last_write_time: Optional[datetime.datetime] = None, - change_time: Optional[datetime.datetime] = None, - last_modified: Optional[datetime.datetime] = None, - etag: Optional[str] = None, - **kwargs - ): - super(FileProperty, self).__init__(**kwargs) - self.content_length = content_length - self.creation_time = creation_time - self.last_access_time = last_access_time - self.last_write_time = last_write_time - self.change_time = change_time - self.last_modified = last_modified - self.etag = etag - - -class FileRange(msrest.serialization.Model): - """An Azure Storage file range. - - All required parameters must be populated in order to send to Azure. - - :param start: Required. Start of the range. - :type start: long - :param end: Required. End of the range. - :type end: long - """ - - _validation = { - 'start': {'required': True}, - 'end': {'required': True}, - } - - _attribute_map = { - 'start': {'key': 'Start', 'type': 'long'}, - 'end': {'key': 'End', 'type': 'long'}, - } - _xml_map = { - 'name': 'Range' - } - - def __init__( - self, - *, - start: int, - end: int, - **kwargs - ): - super(FileRange, self).__init__(**kwargs) - self.start = start - self.end = end - - -class FilesAndDirectoriesListSegment(msrest.serialization.Model): - """Abstract for entries that can be listed from Directory. - - All required parameters must be populated in order to send to Azure. - - :param directory_items: Required. - :type directory_items: list[~azure.storage.fileshare.models.DirectoryItem] - :param file_items: Required. - :type file_items: list[~azure.storage.fileshare.models.FileItem] - """ - - _validation = { - 'directory_items': {'required': True}, - 'file_items': {'required': True}, - } - - _attribute_map = { - 'directory_items': {'key': 'DirectoryItems', 'type': '[DirectoryItem]'}, - 'file_items': {'key': 'FileItems', 'type': '[FileItem]'}, - } - _xml_map = { - 'name': 'Entries' - } - - def __init__( - self, - *, - directory_items: List["DirectoryItem"], - file_items: List["FileItem"], - **kwargs - ): - super(FilesAndDirectoriesListSegment, self).__init__(**kwargs) - self.directory_items = directory_items - self.file_items = file_items - - -class HandleItem(msrest.serialization.Model): - """A listed Azure Storage handle item. - - All required parameters must be populated in order to send to Azure. - - :param handle_id: Required. XSMB service handle ID. - :type handle_id: str - :param path: Required. File or directory name including full path starting from share root. - :type path: str - :param file_id: Required. FileId uniquely identifies the file or directory. - :type file_id: str - :param parent_id: ParentId uniquely identifies the parent directory of the object. - :type parent_id: str - :param session_id: Required. SMB session ID in context of which the file handle was opened. - :type session_id: str - :param client_ip: Required. Client IP that opened the handle. - :type client_ip: str - :param open_time: Required. Time when the session that previously opened the handle has last - been reconnected. (UTC). - :type open_time: ~datetime.datetime - :param last_reconnect_time: Time handle was last connected to (UTC). - :type last_reconnect_time: ~datetime.datetime - """ - - _validation = { - 'handle_id': {'required': True}, - 'path': {'required': True}, - 'file_id': {'required': True}, - 'session_id': {'required': True}, - 'client_ip': {'required': True}, - 'open_time': {'required': True}, - } - - _attribute_map = { - 'handle_id': {'key': 'HandleId', 'type': 'str'}, - 'path': {'key': 'Path', 'type': 'str'}, - 'file_id': {'key': 'FileId', 'type': 'str'}, - 'parent_id': {'key': 'ParentId', 'type': 'str'}, - 'session_id': {'key': 'SessionId', 'type': 'str'}, - 'client_ip': {'key': 'ClientIp', 'type': 'str'}, - 'open_time': {'key': 'OpenTime', 'type': 'rfc-1123'}, - 'last_reconnect_time': {'key': 'LastReconnectTime', 'type': 'rfc-1123'}, - } - _xml_map = { - 'name': 'Handle' - } - - def __init__( - self, - *, - handle_id: str, - path: str, - file_id: str, - session_id: str, - client_ip: str, - open_time: datetime.datetime, - parent_id: Optional[str] = None, - last_reconnect_time: Optional[datetime.datetime] = None, - **kwargs - ): - super(HandleItem, self).__init__(**kwargs) - self.handle_id = handle_id - self.path = path - self.file_id = file_id - self.parent_id = parent_id - self.session_id = session_id - self.client_ip = client_ip - self.open_time = open_time - self.last_reconnect_time = last_reconnect_time - - -class LeaseAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param lease_id: If specified, the operation only succeeds if the resource's lease is active - and matches this ID. - :type lease_id: str - """ - - _attribute_map = { - 'lease_id': {'key': 'leaseId', 'type': 'str'}, - } - - def __init__( - self, - *, - lease_id: Optional[str] = None, - **kwargs - ): - super(LeaseAccessConditions, self).__init__(**kwargs) - self.lease_id = lease_id - - -class ListFilesAndDirectoriesSegmentResponse(msrest.serialization.Model): - """An enumeration of directories and files. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param share_name: Required. - :type share_name: str - :param share_snapshot: - :type share_snapshot: str - :param directory_path: Required. - :type directory_path: str - :param prefix: Required. - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param segment: Required. Abstract for entries that can be listed from Directory. - :type segment: ~azure.storage.fileshare.models.FilesAndDirectoriesListSegment - :param next_marker: Required. - :type next_marker: str - :param directory_id: - :type directory_id: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'share_name': {'required': True}, - 'directory_path': {'required': True}, - 'prefix': {'required': True}, - 'segment': {'required': True}, - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'share_name': {'key': 'ShareName', 'type': 'str', 'xml': {'attr': True}}, - 'share_snapshot': {'key': 'ShareSnapshot', 'type': 'str', 'xml': {'attr': True}}, - 'directory_path': {'key': 'DirectoryPath', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'segment': {'key': 'Segment', 'type': 'FilesAndDirectoriesListSegment'}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - 'directory_id': {'key': 'DirectoryId', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - *, - service_endpoint: str, - share_name: str, - directory_path: str, - prefix: str, - segment: "FilesAndDirectoriesListSegment", - next_marker: str, - share_snapshot: Optional[str] = None, - marker: Optional[str] = None, - max_results: Optional[int] = None, - directory_id: Optional[str] = None, - **kwargs - ): - super(ListFilesAndDirectoriesSegmentResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.share_name = share_name - self.share_snapshot = share_snapshot - self.directory_path = directory_path - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.segment = segment - self.next_marker = next_marker - self.directory_id = directory_id - - -class ListHandlesResponse(msrest.serialization.Model): - """An enumeration of handles. - - All required parameters must be populated in order to send to Azure. - - :param handle_list: - :type handle_list: list[~azure.storage.fileshare.models.HandleItem] - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'handle_list': {'key': 'HandleList', 'type': '[HandleItem]', 'xml': {'name': 'Entries', 'wrapped': True, 'itemsName': 'Handle'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - *, - next_marker: str, - handle_list: Optional[List["HandleItem"]] = None, - **kwargs - ): - super(ListHandlesResponse, self).__init__(**kwargs) - self.handle_list = handle_list - self.next_marker = next_marker - - -class ListSharesResponse(msrest.serialization.Model): - """An enumeration of shares. - - All required parameters must be populated in order to send to Azure. - - :param service_endpoint: Required. - :type service_endpoint: str - :param prefix: - :type prefix: str - :param marker: - :type marker: str - :param max_results: - :type max_results: int - :param share_items: - :type share_items: list[~azure.storage.fileshare.models.ShareItemInternal] - :param next_marker: Required. - :type next_marker: str - """ - - _validation = { - 'service_endpoint': {'required': True}, - 'next_marker': {'required': True}, - } - - _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str'}, - 'marker': {'key': 'Marker', 'type': 'str'}, - 'max_results': {'key': 'MaxResults', 'type': 'int'}, - 'share_items': {'key': 'ShareItems', 'type': '[ShareItemInternal]', 'xml': {'name': 'Shares', 'wrapped': True, 'itemsName': 'Share'}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str'}, - } - _xml_map = { - 'name': 'EnumerationResults' - } - - def __init__( - self, - *, - service_endpoint: str, - next_marker: str, - prefix: Optional[str] = None, - marker: Optional[str] = None, - max_results: Optional[int] = None, - share_items: Optional[List["ShareItemInternal"]] = None, - **kwargs - ): - super(ListSharesResponse, self).__init__(**kwargs) - self.service_endpoint = service_endpoint - self.prefix = prefix - self.marker = marker - self.max_results = max_results - self.share_items = share_items - self.next_marker = next_marker - - -class Metrics(msrest.serialization.Model): - """Storage Analytics metrics for file service. - - All required parameters must be populated in order to send to Azure. - - :param version: Required. The version of Storage Analytics to configure. - :type version: str - :param enabled: Required. Indicates whether metrics are enabled for the File service. - :type enabled: bool - :param include_apis: Indicates whether metrics should generate summary statistics for called - API operations. - :type include_apis: bool - :param retention_policy: The retention policy. - :type retention_policy: ~azure.storage.fileshare.models.RetentionPolicy - """ - - _validation = { - 'version': {'required': True}, - 'enabled': {'required': True}, - } - - _attribute_map = { - 'version': {'key': 'Version', 'type': 'str'}, - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool'}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, - } - - def __init__( - self, - *, - version: str, - enabled: bool, - include_apis: Optional[bool] = None, - retention_policy: Optional["RetentionPolicy"] = None, - **kwargs - ): - super(Metrics, self).__init__(**kwargs) - self.version = version - self.enabled = enabled - self.include_apis = include_apis - self.retention_policy = retention_policy - - -class RetentionPolicy(msrest.serialization.Model): - """The retention policy. - - All required parameters must be populated in order to send to Azure. - - :param enabled: Required. Indicates whether a retention policy is enabled for the File service. - If false, metrics data is retained, and the user is responsible for deleting it. - :type enabled: bool - :param days: Indicates the number of days that metrics data should be retained. All data older - than this value will be deleted. Metrics data is deleted on a best-effort basis after the - retention period expires. - :type days: int - """ - - _validation = { - 'enabled': {'required': True}, - 'days': {'maximum': 365, 'minimum': 1}, - } - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - 'days': {'key': 'Days', 'type': 'int'}, - } - - def __init__( - self, - *, - enabled: bool, - days: Optional[int] = None, - **kwargs - ): - super(RetentionPolicy, self).__init__(**kwargs) - self.enabled = enabled - self.days = days - - -class ShareFileRangeList(msrest.serialization.Model): - """The list of file ranges. - - :param ranges: - :type ranges: list[~azure.storage.fileshare.models.FileRange] - :param clear_ranges: - :type clear_ranges: list[~azure.storage.fileshare.models.ClearRange] - """ - - _attribute_map = { - 'ranges': {'key': 'Ranges', 'type': '[FileRange]'}, - 'clear_ranges': {'key': 'ClearRanges', 'type': '[ClearRange]'}, - } - - def __init__( - self, - *, - ranges: Optional[List["FileRange"]] = None, - clear_ranges: Optional[List["ClearRange"]] = None, - **kwargs - ): - super(ShareFileRangeList, self).__init__(**kwargs) - self.ranges = ranges - self.clear_ranges = clear_ranges - - -class ShareItemInternal(msrest.serialization.Model): - """A listed Azure Storage share item. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. - :type name: str - :param snapshot: - :type snapshot: str - :param deleted: - :type deleted: bool - :param version: - :type version: str - :param properties: Required. Properties of a share. - :type properties: ~azure.storage.fileshare.models.SharePropertiesInternal - :param metadata: Dictionary of :code:``. - :type metadata: dict[str, str] - """ - - _validation = { - 'name': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'Name', 'type': 'str'}, - 'snapshot': {'key': 'Snapshot', 'type': 'str'}, - 'deleted': {'key': 'Deleted', 'type': 'bool'}, - 'version': {'key': 'Version', 'type': 'str'}, - 'properties': {'key': 'Properties', 'type': 'SharePropertiesInternal'}, - 'metadata': {'key': 'Metadata', 'type': '{str}'}, - } - _xml_map = { - 'name': 'Share' - } - - def __init__( - self, - *, - name: str, - properties: "SharePropertiesInternal", - snapshot: Optional[str] = None, - deleted: Optional[bool] = None, - version: Optional[str] = None, - metadata: Optional[Dict[str, str]] = None, - **kwargs - ): - super(ShareItemInternal, self).__init__(**kwargs) - self.name = name - self.snapshot = snapshot - self.deleted = deleted - self.version = version - self.properties = properties - self.metadata = metadata - - -class SharePermission(msrest.serialization.Model): - """A permission (a security descriptor) at the share level. - - All required parameters must be populated in order to send to Azure. - - :param permission: Required. The permission in the Security Descriptor Definition Language - (SDDL). - :type permission: str - """ - - _validation = { - 'permission': {'required': True}, - } - - _attribute_map = { - 'permission': {'key': 'permission', 'type': 'str'}, - } - - def __init__( - self, - *, - permission: str, - **kwargs - ): - super(SharePermission, self).__init__(**kwargs) - self.permission = permission - - -class SharePropertiesInternal(msrest.serialization.Model): - """Properties of a share. - - All required parameters must be populated in order to send to Azure. - - :param last_modified: Required. - :type last_modified: ~datetime.datetime - :param etag: Required. - :type etag: str - :param quota: Required. - :type quota: int - :param provisioned_iops: - :type provisioned_iops: int - :param provisioned_ingress_m_bps: - :type provisioned_ingress_m_bps: int - :param provisioned_egress_m_bps: - :type provisioned_egress_m_bps: int - :param provisioned_bandwidth_mi_bps: - :type provisioned_bandwidth_mi_bps: int - :param next_allowed_quota_downgrade_time: - :type next_allowed_quota_downgrade_time: ~datetime.datetime - :param deleted_time: - :type deleted_time: ~datetime.datetime - :param remaining_retention_days: - :type remaining_retention_days: int - :param access_tier: - :type access_tier: str - :param access_tier_change_time: - :type access_tier_change_time: ~datetime.datetime - :param access_tier_transition_state: - :type access_tier_transition_state: str - :param lease_status: The current lease status of the share. Possible values include: "locked", - "unlocked". - :type lease_status: str or ~azure.storage.fileshare.models.LeaseStatusType - :param lease_state: Lease state of the share. Possible values include: "available", "leased", - "expired", "breaking", "broken". - :type lease_state: str or ~azure.storage.fileshare.models.LeaseStateType - :param lease_duration: When a share is leased, specifies whether the lease is of infinite or - fixed duration. Possible values include: "infinite", "fixed". - :type lease_duration: str or ~azure.storage.fileshare.models.LeaseDurationType - :param enabled_protocols: - :type enabled_protocols: str - :param root_squash: Possible values include: "NoRootSquash", "RootSquash", "AllSquash". - :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash - """ - - _validation = { - 'last_modified': {'required': True}, - 'etag': {'required': True}, - 'quota': {'required': True}, - } - - _attribute_map = { - 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, - 'etag': {'key': 'Etag', 'type': 'str'}, - 'quota': {'key': 'Quota', 'type': 'int'}, - 'provisioned_iops': {'key': 'ProvisionedIops', 'type': 'int'}, - 'provisioned_ingress_m_bps': {'key': 'ProvisionedIngressMBps', 'type': 'int'}, - 'provisioned_egress_m_bps': {'key': 'ProvisionedEgressMBps', 'type': 'int'}, - 'provisioned_bandwidth_mi_bps': {'key': 'ProvisionedBandwidthMiBps', 'type': 'int'}, - 'next_allowed_quota_downgrade_time': {'key': 'NextAllowedQuotaDowngradeTime', 'type': 'rfc-1123'}, - 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, - 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, - 'access_tier': {'key': 'AccessTier', 'type': 'str'}, - 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'}, - 'access_tier_transition_state': {'key': 'AccessTierTransitionState', 'type': 'str'}, - 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, - 'lease_state': {'key': 'LeaseState', 'type': 'str'}, - 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, - 'enabled_protocols': {'key': 'EnabledProtocols', 'type': 'str'}, - 'root_squash': {'key': 'RootSquash', 'type': 'str'}, - } - - def __init__( - self, - *, - last_modified: datetime.datetime, - etag: str, - quota: int, - provisioned_iops: Optional[int] = None, - provisioned_ingress_m_bps: Optional[int] = None, - provisioned_egress_m_bps: Optional[int] = None, - provisioned_bandwidth_mi_bps: Optional[int] = None, - next_allowed_quota_downgrade_time: Optional[datetime.datetime] = None, - deleted_time: Optional[datetime.datetime] = None, - remaining_retention_days: Optional[int] = None, - access_tier: Optional[str] = None, - access_tier_change_time: Optional[datetime.datetime] = None, - access_tier_transition_state: Optional[str] = None, - lease_status: Optional[Union[str, "LeaseStatusType"]] = None, - lease_state: Optional[Union[str, "LeaseStateType"]] = None, - lease_duration: Optional[Union[str, "LeaseDurationType"]] = None, - enabled_protocols: Optional[str] = None, - root_squash: Optional[Union[str, "ShareRootSquash"]] = None, - **kwargs - ): - super(SharePropertiesInternal, self).__init__(**kwargs) - self.last_modified = last_modified - self.etag = etag - self.quota = quota - self.provisioned_iops = provisioned_iops - self.provisioned_ingress_m_bps = provisioned_ingress_m_bps - self.provisioned_egress_m_bps = provisioned_egress_m_bps - self.provisioned_bandwidth_mi_bps = provisioned_bandwidth_mi_bps - self.next_allowed_quota_downgrade_time = next_allowed_quota_downgrade_time - self.deleted_time = deleted_time - self.remaining_retention_days = remaining_retention_days - self.access_tier = access_tier - self.access_tier_change_time = access_tier_change_time - self.access_tier_transition_state = access_tier_transition_state - self.lease_status = lease_status - self.lease_state = lease_state - self.lease_duration = lease_duration - self.enabled_protocols = enabled_protocols - self.root_squash = root_squash - - -class ShareProtocolSettings(msrest.serialization.Model): - """Protocol settings. - - :param smb: Settings for SMB protocol. - :type smb: ~azure.storage.fileshare.models.ShareSmbSettings - """ - - _attribute_map = { - 'smb': {'key': 'Smb', 'type': 'ShareSmbSettings'}, - } - _xml_map = { - 'name': 'ProtocolSettings' - } - - def __init__( - self, - *, - smb: Optional["ShareSmbSettings"] = None, - **kwargs - ): - super(ShareProtocolSettings, self).__init__(**kwargs) - self.smb = smb - - -class ShareSmbSettings(msrest.serialization.Model): - """Settings for SMB protocol. - - :param multichannel: Settings for SMB Multichannel. - :type multichannel: ~azure.storage.fileshare.models.SmbMultichannel - """ - - _attribute_map = { - 'multichannel': {'key': 'Multichannel', 'type': 'SmbMultichannel'}, - } - _xml_map = { - 'name': 'SMB' - } - - def __init__( - self, - *, - multichannel: Optional["SmbMultichannel"] = None, - **kwargs - ): - super(ShareSmbSettings, self).__init__(**kwargs) - self.multichannel = multichannel - - -class ShareStats(msrest.serialization.Model): - """Stats for the share. - - All required parameters must be populated in order to send to Azure. - - :param share_usage_bytes: Required. The approximate size of the data stored in bytes. Note that - this value may not include all recently created or recently resized files. - :type share_usage_bytes: int - """ - - _validation = { - 'share_usage_bytes': {'required': True}, - } - - _attribute_map = { - 'share_usage_bytes': {'key': 'ShareUsageBytes', 'type': 'int'}, - } - - def __init__( - self, - *, - share_usage_bytes: int, - **kwargs - ): - super(ShareStats, self).__init__(**kwargs) - self.share_usage_bytes = share_usage_bytes - - -class SignedIdentifier(msrest.serialization.Model): - """Signed identifier. - - All required parameters must be populated in order to send to Azure. - - :param id: Required. A unique id. - :type id: str - :param access_policy: The access policy. - :type access_policy: ~azure.storage.fileshare.models.AccessPolicy - """ - - _validation = { - 'id': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'Id', 'type': 'str'}, - 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy'}, - } - - def __init__( - self, - *, - id: str, - access_policy: Optional["AccessPolicy"] = None, - **kwargs - ): - super(SignedIdentifier, self).__init__(**kwargs) - self.id = id - self.access_policy = access_policy - - -class SmbMultichannel(msrest.serialization.Model): - """Settings for SMB multichannel. - - :param enabled: If SMB multichannel is enabled. - :type enabled: bool - """ - - _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool'}, - } - _xml_map = { - 'name': 'Multichannel' - } - - def __init__( - self, - *, - enabled: Optional[bool] = None, - **kwargs - ): - super(SmbMultichannel, self).__init__(**kwargs) - self.enabled = enabled - - -class SourceLeaseAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param source_lease_id: Required if the source file has an active infinite lease. - :type source_lease_id: str - """ - - _attribute_map = { - 'source_lease_id': {'key': 'sourceLeaseId', 'type': 'str'}, - } - - def __init__( - self, - *, - source_lease_id: Optional[str] = None, - **kwargs - ): - super(SourceLeaseAccessConditions, self).__init__(**kwargs) - self.source_lease_id = source_lease_id - - -class SourceModifiedAccessConditions(msrest.serialization.Model): - """Parameter group. - - :param source_if_match_crc64: Specify the crc64 value to operate only on range with a matching - crc64 checksum. - :type source_if_match_crc64: bytearray - :param source_if_none_match_crc64: Specify the crc64 value to operate only on range without a - matching crc64 checksum. - :type source_if_none_match_crc64: bytearray - """ - - _attribute_map = { - 'source_if_match_crc64': {'key': 'sourceIfMatchCrc64', 'type': 'bytearray'}, - 'source_if_none_match_crc64': {'key': 'sourceIfNoneMatchCrc64', 'type': 'bytearray'}, - } - - def __init__( - self, - *, - source_if_match_crc64: Optional[bytearray] = None, - source_if_none_match_crc64: Optional[bytearray] = None, - **kwargs - ): - super(SourceModifiedAccessConditions, self).__init__(**kwargs) - self.source_if_match_crc64 = source_if_match_crc64 - self.source_if_none_match_crc64 = source_if_none_match_crc64 - - -class StorageError(msrest.serialization.Model): - """StorageError. - - :param message: - :type message: str - """ - - _attribute_map = { - 'message': {'key': 'Message', 'type': 'str'}, - } - - def __init__( - self, - *, - message: Optional[str] = None, - **kwargs - ): - super(StorageError, self).__init__(**kwargs) - self.message = message - - -class StorageServiceProperties(msrest.serialization.Model): - """Storage service properties. - - :param hour_metrics: A summary of request statistics grouped by API in hourly aggregates for - files. - :type hour_metrics: ~azure.storage.fileshare.models.Metrics - :param minute_metrics: A summary of request statistics grouped by API in minute aggregates for - files. - :type minute_metrics: ~azure.storage.fileshare.models.Metrics - :param cors: The set of CORS rules. - :type cors: list[~azure.storage.fileshare.models.CorsRule] - :param protocol: Protocol settings. - :type protocol: ~azure.storage.fileshare.models.ShareProtocolSettings - """ - - _attribute_map = { - 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics'}, - 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics'}, - 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'wrapped': True}}, - 'protocol': {'key': 'Protocol', 'type': 'ShareProtocolSettings'}, - } - - def __init__( - self, - *, - hour_metrics: Optional["Metrics"] = None, - minute_metrics: Optional["Metrics"] = None, - cors: Optional[List["CorsRule"]] = None, - protocol: Optional["ShareProtocolSettings"] = None, - **kwargs - ): - super(StorageServiceProperties, self).__init__(**kwargs) - self.hour_metrics = hour_metrics - self.minute_metrics = minute_metrics - self.cors = cors - self.protocol = protocol diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/operations/__init__.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/operations/__init__.py deleted file mode 100644 index ba8fb22..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/operations/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._share_operations import ShareOperations -from ._directory_operations import DirectoryOperations -from ._file_operations import FileOperations - -__all__ = [ - 'ServiceOperations', - 'ShareOperations', - 'DirectoryOperations', - 'FileOperations', -] diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/operations/_directory_operations.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/operations/_directory_operations.py deleted file mode 100644 index d6f42d0..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/operations/_directory_operations.py +++ /dev/null @@ -1,913 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class DirectoryOperations(object): - """DirectoryOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.fileshare.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - file_permission="inherit", # type: Optional[str] - file_permission_key=None, # type: Optional[str] - file_attributes="none", # type: str - file_creation_time="now", # type: str - file_last_write_time="now", # type: str - **kwargs # type: Any - ): - # type: (...) -> None - """Creates a new directory under the specified share or parent directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else - x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_attributes: If specified, the provided file attributes shall be set. Default value: - ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. Default value: Now. - :type file_last_write_time: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - def get_properties( - self, - sharesnapshot=None, # type: Optional[str] - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Returns all system properties for the specified directory, and can also be used to check the - existence of a directory. The data returned does not include the files in the directory or any - subdirectories. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - def delete( - self, - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Removes the specified empty directory. Note that the directory must be empty before it can be - deleted. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - def set_properties( - self, - timeout=None, # type: Optional[int] - file_permission="inherit", # type: Optional[str] - file_permission_key=None, # type: Optional[str] - file_attributes="none", # type: str - file_creation_time="now", # type: str - file_last_write_time="now", # type: str - **kwargs # type: Any - ): - # type: (...) -> None - """Sets properties on the directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else - x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_attributes: If specified, the provided file attributes shall be set. Default value: - ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. Default value: Now. - :type file_last_write_time: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - def set_metadata( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Updates user defined metadata for the specified directory. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - def list_files_and_directories_segment( - self, - prefix=None, # type: Optional[str] - sharesnapshot=None, # type: Optional[str] - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - timeout=None, # type: Optional[int] - include=None, # type: Optional[List[Union[str, "_models.ListFilesIncludeType"]]] - include_extended_info=None, # type: Optional[bool] - **kwargs # type: Any - ): - # type: (...) -> "_models.ListFilesAndDirectoriesSegmentResponse" - """Returns a list of files or directories under the specified share or directory. It lists the - contents only for a single level of the directory hierarchy. - - :param prefix: Filters the results to return only entries whose name begins with the specified - prefix. - :type prefix: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. If the request does not - specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 - items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.fileshare.models.ListFilesIncludeType] - :param include_extended_info: Include extended information. - :type include_extended_info: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListFilesAndDirectoriesSegmentResponse, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListFilesAndDirectoriesSegmentResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListFilesAndDirectoriesSegmentResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "directory" - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_files_and_directories_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if include_extended_info is not None: - header_parameters['x-ms-file-extended-info'] = self._serialize.header("include_extended_info", include_extended_info, 'bool') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListFilesAndDirectoriesSegmentResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_files_and_directories_segment.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - def list_handles( - self, - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - timeout=None, # type: Optional[int] - sharesnapshot=None, # type: Optional[str] - recursive=None, # type: Optional[bool] - **kwargs # type: Any - ): - # type: (...) -> "_models.ListHandlesResponse" - """Lists handles for directory. - - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. If the request does not - specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 - items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param recursive: Specifies operation should apply to the directory specified in the URI, its - files, its subdirectories and their files. - :type recursive: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListHandlesResponse, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListHandlesResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListHandlesResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "listhandles" - accept = "application/xml" - - # Construct URL - url = self.list_handles.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if recursive is not None: - header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListHandlesResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_handles.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - def force_close_handles( - self, - handle_id, # type: str - timeout=None, # type: Optional[int] - marker=None, # type: Optional[str] - sharesnapshot=None, # type: Optional[str] - recursive=None, # type: Optional[bool] - **kwargs # type: Any - ): - # type: (...) -> None - """Closes all handles open for given directory. - - :param handle_id: Specifies handle ID opened on the file or directory to be closed. Asterisk - (‘*’) is a wildcard that specifies all handles. - :type handle_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param recursive: Specifies operation should apply to the directory specified in the URI, its - files, its subdirectories and their files. - :type recursive: bool - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "forceclosehandles" - accept = "application/xml" - - # Construct URL - url = self.force_close_handles.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') - if recursive is not None: - header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-marker']=self._deserialize('str', response.headers.get('x-ms-marker')) - response_headers['x-ms-number-of-handles-closed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')) - response_headers['x-ms-number-of-handles-failed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')) - - if cls: - return cls(pipeline_response, None, response_headers) - - force_close_handles.metadata = {'url': '/{shareName}/{directory}'} # type: ignore - - def rename( - self, - rename_source, # type: str - timeout=None, # type: Optional[int] - replace_if_exists=None, # type: Optional[bool] - ignore_read_only=None, # type: Optional[bool] - file_permission="inherit", # type: Optional[str] - file_permission_key=None, # type: Optional[str] - metadata=None, # type: Optional[str] - source_lease_access_conditions=None, # type: Optional["_models.SourceLeaseAccessConditions"] - destination_lease_access_conditions=None, # type: Optional["_models.DestinationLeaseAccessConditions"] - copy_file_smb_info=None, # type: Optional["_models.CopyFileSmbInfo"] - **kwargs # type: Any - ): - # type: (...) -> None - """Renames a directory. - - :param rename_source: Required. Specifies the URI-style path of the source file, up to 2 KB in - length. - :type rename_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param replace_if_exists: Optional. A boolean value for if the destination file already exists, - whether this request will overwrite the file or not. If true, the rename will succeed and will - overwrite the destination file. If not provided or if false and the destination file does - exist, the request will not overwrite the destination file. If provided and the destination - file doesn’t exist, the rename will succeed. Note: This value does not override the - x-ms-file-copy-ignore-read-only header value. - :type replace_if_exists: bool - :param ignore_read_only: Optional. A boolean value that specifies whether the ReadOnly - attribute on a preexisting destination file should be respected. If true, the rename will - succeed, otherwise, a previous file at the destination with the ReadOnly attribute set will - cause the rename to fail. - :type ignore_read_only: bool - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else - x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param source_lease_access_conditions: Parameter group. - :type source_lease_access_conditions: ~azure.storage.fileshare.models.SourceLeaseAccessConditions - :param destination_lease_access_conditions: Parameter group. - :type destination_lease_access_conditions: ~azure.storage.fileshare.models.DestinationLeaseAccessConditions - :param copy_file_smb_info: Parameter group. - :type copy_file_smb_info: ~azure.storage.fileshare.models.CopyFileSmbInfo - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _source_lease_id = None - _destination_lease_id = None - _file_attributes = None - _file_creation_time = None - _file_last_write_time = None - if copy_file_smb_info is not None: - _file_attributes = copy_file_smb_info.file_attributes - _file_creation_time = copy_file_smb_info.file_creation_time - _file_last_write_time = copy_file_smb_info.file_last_write_time - if destination_lease_access_conditions is not None: - _destination_lease_id = destination_lease_access_conditions.destination_lease_id - if source_lease_access_conditions is not None: - _source_lease_id = source_lease_access_conditions.source_lease_id - restype = "directory" - comp = "rename" - accept = "application/xml" - - # Construct URL - url = self.rename.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['x-ms-file-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if replace_if_exists is not None: - header_parameters['x-ms-file-rename-replace-if-exists'] = self._serialize.header("replace_if_exists", replace_if_exists, 'bool') - if ignore_read_only is not None: - header_parameters['x-ms-file-rename-ignore-readonly'] = self._serialize.header("ignore_read_only", ignore_read_only, 'bool') - if _source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", _source_lease_id, 'str') - if _destination_lease_id is not None: - header_parameters['x-ms-destination-lease-id'] = self._serialize.header("destination_lease_id", _destination_lease_id, 'str') - if _file_attributes is not None: - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", _file_attributes, 'str') - if _file_creation_time is not None: - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", _file_creation_time, 'str') - if _file_last_write_time is not None: - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", _file_last_write_time, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - rename.metadata = {'url': '/{shareName}/{directory}'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/operations/_file_operations.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/operations/_file_operations.py deleted file mode 100644 index beec6ae..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/operations/_file_operations.py +++ /dev/null @@ -1,1946 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class FileOperations(object): - """FileOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.fileshare.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - file_content_length, # type: int - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - file_permission="inherit", # type: Optional[str] - file_permission_key=None, # type: Optional[str] - file_attributes="none", # type: str - file_creation_time="now", # type: str - file_last_write_time="now", # type: str - file_http_headers=None, # type: Optional["_models.FileHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Creates a new file or replaces a file. Note it only initializes the file with no content. - - :param file_content_length: Specifies the maximum size for the file, up to 4 TB. - :type file_content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else - x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_attributes: If specified, the provided file attributes shall be set. Default value: - ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. Default value: Now. - :type file_last_write_time: str - :param file_http_headers: Parameter group. - :type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _file_content_type = None - _file_content_encoding = None - _file_content_language = None - _file_cache_control = None - _file_content_md5 = None - _file_content_disposition = None - _lease_id = None - if file_http_headers is not None: - _file_content_type = file_http_headers.file_content_type - _file_content_encoding = file_http_headers.file_content_encoding - _file_content_language = file_http_headers.file_content_language - _file_cache_control = file_http_headers.file_cache_control - _file_content_md5 = file_http_headers.file_content_md5 - _file_content_disposition = file_http_headers.file_content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - file_type_constant = "file" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') - header_parameters['x-ms-type'] = self._serialize.header("file_type_constant", file_type_constant, 'str') - if _file_content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", _file_content_type, 'str') - if _file_content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", _file_content_encoding, 'str') - if _file_content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", _file_content_language, 'str') - if _file_cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", _file_cache_control, 'str') - if _file_content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", _file_content_md5, 'bytearray') - if _file_content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", _file_content_disposition, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def download( - self, - timeout=None, # type: Optional[int] - range=None, # type: Optional[str] - range_get_content_md5=None, # type: Optional[bool] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> IO - """Reads or downloads a file from the system, including its metadata and properties. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param range: Return file data only from the specified byte range. - :type range: str - :param range_get_content_md5: When this header is set to true and specified together with the - Range header, the service returns the MD5 hash for the range, as long as the range is less than - or equal to 4 MB in size. - :type range_get_content_md5: bool - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: IO, or the result of cls(response) - :rtype: IO - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[IO] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - accept = "application/xml" - - # Construct URL - url = self.download.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if range_get_content_md5 is not None: - header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200, 206]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - if response.status_code == 200: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-content-md5')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - deserialized = response.stream_download(self._client._pipeline) - - if response.status_code == 206: - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-content-md5')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - deserialized = response.stream_download(self._client._pipeline) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - download.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def get_properties( - self, - sharesnapshot=None, # type: Optional[str] - timeout=None, # type: Optional[int] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Returns all user-defined metadata, standard HTTP properties, and system properties for the - file. It does not return the content of the file. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.head(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['x-ms-type']=self._deserialize('str', response.headers.get('x-ms-type')) - response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) - response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) - response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) - response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) - response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) - response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def delete( - self, - timeout=None, # type: Optional[int] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """removes the file from the storage account. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def set_http_headers( - self, - timeout=None, # type: Optional[int] - file_content_length=None, # type: Optional[int] - file_permission="inherit", # type: Optional[str] - file_permission_key=None, # type: Optional[str] - file_attributes="none", # type: str - file_creation_time="now", # type: str - file_last_write_time="now", # type: str - file_http_headers=None, # type: Optional["_models.FileHTTPHeaders"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Sets HTTP headers on the file. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param file_content_length: Resizes a file to the specified size. If the specified byte value - is less than the current size of the file, then all ranges above the specified byte value are - cleared. - :type file_content_length: long - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else - x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param file_attributes: If specified, the provided file attributes shall be set. Default value: - ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. - :type file_attributes: str - :param file_creation_time: Creation time for the file/directory. Default value: Now. - :type file_creation_time: str - :param file_last_write_time: Last write time for the file/directory. Default value: Now. - :type file_last_write_time: str - :param file_http_headers: Parameter group. - :type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _file_content_type = None - _file_content_encoding = None - _file_content_language = None - _file_cache_control = None - _file_content_md5 = None - _file_content_disposition = None - _lease_id = None - if file_http_headers is not None: - _file_content_type = file_http_headers.file_content_type - _file_content_encoding = file_http_headers.file_content_encoding - _file_content_language = file_http_headers.file_content_language - _file_cache_control = file_http_headers.file_cache_control - _file_content_md5 = file_http_headers.file_content_md5 - _file_content_disposition = file_http_headers.file_content_disposition - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.set_http_headers.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if file_content_length is not None: - header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') - if _file_content_type is not None: - header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", _file_content_type, 'str') - if _file_content_encoding is not None: - header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", _file_content_encoding, 'str') - if _file_content_language is not None: - header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", _file_content_language, 'str') - if _file_cache_control is not None: - header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", _file_cache_control, 'str') - if _file_content_md5 is not None: - header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", _file_content_md5, 'bytearray') - if _file_content_disposition is not None: - header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", _file_content_disposition, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_http_headers.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def set_metadata( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Updates user-defined metadata for the specified file. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def acquire_lease( - self, - timeout=None, # type: Optional[int] - duration=None, # type: Optional[int] - proposed_lease_id=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease File operation establishes and manages a lock on a file for write and delete - operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a - lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease - duration cannot be changed using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "acquire" - accept = "application/xml" - - # Construct URL - url = self.acquire_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - acquire_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def release_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease File operation establishes and manages a lock on a file for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "release" - accept = "application/xml" - - # Construct URL - url = self.release_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - release_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def change_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - proposed_lease_id=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease File operation establishes and manages a lock on a file for write and delete - operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "change" - accept = "application/xml" - - # Construct URL - url = self.change_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - change_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def break_lease( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """[Update] The Lease File operation establishes and manages a lock on a file for write and delete - operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "lease" - action = "break" - accept = "application/xml" - - # Construct URL - url = self.break_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - break_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def upload_range( - self, - range, # type: str - content_length, # type: int - timeout=None, # type: Optional[int] - file_range_write="update", # type: Union[str, "_models.FileRangeWriteType"] - content_md5=None, # type: Optional[bytearray] - optionalbody=None, # type: Optional[IO] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Upload a range of bytes to a file. - - :param range: Specifies the range of bytes to be written. Both the start and end of the range - must be specified. For an update operation, the range can be up to 4 MB in size. For a clear - operation, the range can be up to the value of the file's full size. The File service accepts - only a single byte range for the Range and 'x-ms-range' headers, and the byte range must be - specified in the following format: bytes=startByte-endByte. - :type range: str - :param content_length: Specifies the number of bytes being transmitted in the request body. - When the x-ms-write header is set to clear, the value of this header must be set to zero. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param file_range_write: Specify one of the following options: - Update: Writes the bytes - specified by the request body into the specified range. The Range and Content-Length headers - must match to perform the update. - Clear: Clears the specified range and releases the space - used in storage for that range. To clear a range, set the Content-Length header to zero, and - set the Range header to a value that indicates the range to clear, up to maximum file size. - :type file_range_write: str or ~azure.storage.fileshare.models.FileRangeWriteType - :param content_md5: An MD5 hash of the content. This hash is used to verify the integrity of - the data during transport. When the Content-MD5 header is specified, the File service compares - the hash of the content that has arrived with the header value that was sent. If the two hashes - do not match, the operation will fail with error code 400 (Bad Request). - :type content_md5: bytearray - :param optionalbody: Initial data. - :type optionalbody: IO - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "range" - content_type = kwargs.pop("content_type", "application/octet-stream") - accept = "application/xml" - - # Construct URL - url = self.upload_range.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-write'] = self._serialize.header("file_range_write", file_range_write, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if content_md5 is not None: - header_parameters['Content-MD5'] = self._serialize.header("content_md5", content_md5, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content_kwargs['stream_content'] = optionalbody - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload_range.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def upload_range_from_url( - self, - range, # type: str - copy_source, # type: str - content_length, # type: int - timeout=None, # type: Optional[int] - source_range=None, # type: Optional[str] - source_content_crc64=None, # type: Optional[bytearray] - copy_source_authorization=None, # type: Optional[str] - source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Upload a range of bytes to a file where the contents are read from a URL. - - :param range: Writes data to the specified byte range in the file. - :type range: str - :param copy_source: Specifies the URL of the source file or blob, up to 2 KB in length. To copy - a file to another file within the same storage account, you may use Shared Key to authenticate - the source file. If you are copying a file from another storage account, or if you are copying - a blob from the same storage account or another storage account, then you must authenticate the - source file or blob using a shared access signature. If the source is a public blob, no - authentication is required to perform the copy operation. A file in a share snapshot can also - be specified as a copy source. - :type copy_source: str - :param content_length: Specifies the number of bytes being transmitted in the request body. - When the x-ms-write header is set to clear, the value of this header must be set to zero. - :type content_length: long - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param source_range: Bytes of source data in the specified range. - :type source_range: str - :param source_content_crc64: Specify the crc64 calculated for the range of bytes that must be - read from the copy source. - :type source_content_crc64: bytearray - :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid - OAuth access token to copy source. - :type copy_source_authorization: str - :param source_modified_access_conditions: Parameter group. - :type source_modified_access_conditions: ~azure.storage.fileshare.models.SourceModifiedAccessConditions - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _source_if_match_crc64 = None - _source_if_none_match_crc64 = None - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - if source_modified_access_conditions is not None: - _source_if_match_crc64 = source_modified_access_conditions.source_if_match_crc64 - _source_if_none_match_crc64 = source_modified_access_conditions.source_if_none_match_crc64 - comp = "range" - accept = "application/xml" - - # Construct URL - url = self.upload_range_from_url.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if source_range is not None: - header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') - header_parameters['x-ms-write'] = self._serialize.header("self._config.file_range_write_from_url", self._config.file_range_write_from_url, 'str') - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - if source_content_crc64 is not None: - header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_content_crc64", source_content_crc64, 'bytearray') - if _source_if_match_crc64 is not None: - header_parameters['x-ms-source-if-match-crc64'] = self._serialize.header("source_if_match_crc64", _source_if_match_crc64, 'bytearray') - if _source_if_none_match_crc64 is not None: - header_parameters['x-ms-source-if-none-match-crc64'] = self._serialize.header("source_if_none_match_crc64", _source_if_none_match_crc64, 'bytearray') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if copy_source_authorization is not None: - header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - - if cls: - return cls(pipeline_response, None, response_headers) - - upload_range_from_url.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def get_range_list( - self, - sharesnapshot=None, # type: Optional[str] - prevsharesnapshot=None, # type: Optional[str] - timeout=None, # type: Optional[int] - range=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> "_models.ShareFileRangeList" - """Returns the list of valid ranges for a file. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param prevsharesnapshot: The previous snapshot parameter is an opaque DateTime value that, - when present, specifies the previous snapshot. - :type prevsharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param range: Specifies the range of bytes over which to list ranges, inclusively. - :type range: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ShareFileRangeList, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ShareFileRangeList - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ShareFileRangeList"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "rangelist" - accept = "application/xml" - - # Construct URL - url = self.get_range_list.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if prevsharesnapshot is not None: - query_parameters['prevsharesnapshot'] = self._serialize.query("prevsharesnapshot", prevsharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if range is not None: - header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['x-ms-content-length']=self._deserialize('long', response.headers.get('x-ms-content-length')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ShareFileRangeList', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_range_list.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def start_copy( - self, - copy_source, # type: str - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - file_permission="inherit", # type: Optional[str] - file_permission_key=None, # type: Optional[str] - copy_file_smb_info=None, # type: Optional["_models.CopyFileSmbInfo"] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Copies a blob or file to a destination file within the storage account. - - :param copy_source: Specifies the URL of the source file or blob, up to 2 KB in length. To copy - a file to another file within the same storage account, you may use Shared Key to authenticate - the source file. If you are copying a file from another storage account, or if you are copying - a blob from the same storage account or another storage account, then you must authenticate the - source file or blob using a shared access signature. If the source is a public blob, no - authentication is required to perform the copy operation. A file in a share snapshot can also - be specified as a copy source. - :type copy_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else - x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param copy_file_smb_info: Parameter group. - :type copy_file_smb_info: ~azure.storage.fileshare.models.CopyFileSmbInfo - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _file_permission_copy_mode = None - _ignore_read_only = None - _file_attributes = None - _file_creation_time = None - _file_last_write_time = None - _set_archive_attribute = None - _lease_id = None - if copy_file_smb_info is not None: - _file_permission_copy_mode = copy_file_smb_info.file_permission_copy_mode - _ignore_read_only = copy_file_smb_info.ignore_read_only - _file_attributes = copy_file_smb_info.file_attributes - _file_creation_time = copy_file_smb_info.file_creation_time - _file_last_write_time = copy_file_smb_info.file_last_write_time - _set_archive_attribute = copy_file_smb_info.set_archive_attribute - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - accept = "application/xml" - - # Construct URL - url = self.start_copy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - if _file_permission_copy_mode is not None: - header_parameters['x-ms-file-permission-copy-mode'] = self._serialize.header("file_permission_copy_mode", _file_permission_copy_mode, 'str') - if _ignore_read_only is not None: - header_parameters['x-ms-file-copy-ignore-readonly'] = self._serialize.header("ignore_read_only", _ignore_read_only, 'bool') - if _file_attributes is not None: - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", _file_attributes, 'str') - if _file_creation_time is not None: - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", _file_creation_time, 'str') - if _file_last_write_time is not None: - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", _file_last_write_time, 'str') - if _set_archive_attribute is not None: - header_parameters['x-ms-file-copy-set-archive'] = self._serialize.header("set_archive_attribute", _set_archive_attribute, 'bool') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) - response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) - - if cls: - return cls(pipeline_response, None, response_headers) - - start_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def abort_copy( - self, - copy_id, # type: str - timeout=None, # type: Optional[int] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Aborts a pending Copy File operation, and leaves a destination file with zero length and full - metadata. - - :param copy_id: The copy identifier provided in the x-ms-copy-id header of the original Copy - File operation. - :type copy_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "copy" - copy_action_abort_constant = "abort" - accept = "application/xml" - - # Construct URL - url = self.abort_copy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-copy-action'] = self._serialize.header("copy_action_abort_constant", copy_action_abort_constant, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - abort_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def list_handles( - self, - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - timeout=None, # type: Optional[int] - sharesnapshot=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> "_models.ListHandlesResponse" - """Lists handles for file. - - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. If the request does not - specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 - items. - :type maxresults: int - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListHandlesResponse, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListHandlesResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListHandlesResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "listhandles" - accept = "application/xml" - - # Construct URL - url = self.list_handles.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ListHandlesResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def force_close_handles( - self, - handle_id, # type: str - timeout=None, # type: Optional[int] - marker=None, # type: Optional[str] - sharesnapshot=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Closes all handles open for given file. - - :param handle_id: Specifies handle ID opened on the file or directory to be closed. Asterisk - (‘*’) is a wildcard that specifies all handles. - :type handle_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "forceclosehandles" - accept = "application/xml" - - # Construct URL - url = self.force_close_handles.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-marker']=self._deserialize('str', response.headers.get('x-ms-marker')) - response_headers['x-ms-number-of-handles-closed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')) - response_headers['x-ms-number-of-handles-failed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')) - - if cls: - return cls(pipeline_response, None, response_headers) - - force_close_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore - - def rename( - self, - rename_source, # type: str - timeout=None, # type: Optional[int] - replace_if_exists=None, # type: Optional[bool] - ignore_read_only=None, # type: Optional[bool] - file_permission="inherit", # type: Optional[str] - file_permission_key=None, # type: Optional[str] - metadata=None, # type: Optional[str] - source_lease_access_conditions=None, # type: Optional["_models.SourceLeaseAccessConditions"] - destination_lease_access_conditions=None, # type: Optional["_models.DestinationLeaseAccessConditions"] - copy_file_smb_info=None, # type: Optional["_models.CopyFileSmbInfo"] - **kwargs # type: Any - ): - # type: (...) -> None - """Renames a file. - - :param rename_source: Required. Specifies the URI-style path of the source file, up to 2 KB in - length. - :type rename_source: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param replace_if_exists: Optional. A boolean value for if the destination file already exists, - whether this request will overwrite the file or not. If true, the rename will succeed and will - overwrite the destination file. If not provided or if false and the destination file does - exist, the request will not overwrite the destination file. If provided and the destination - file doesn’t exist, the rename will succeed. Note: This value does not override the - x-ms-file-copy-ignore-read-only header value. - :type replace_if_exists: bool - :param ignore_read_only: Optional. A boolean value that specifies whether the ReadOnly - attribute on a preexisting destination file should be respected. If true, the rename will - succeed, otherwise, a previous file at the destination with the ReadOnly attribute set will - cause the rename to fail. - :type ignore_read_only: bool - :param file_permission: If specified the permission (security descriptor) shall be set for the - directory/file. This header can be used if Permission size is <= 8KB, else - x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type file_permission: str - :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only - one of the x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission_key: str - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param source_lease_access_conditions: Parameter group. - :type source_lease_access_conditions: ~azure.storage.fileshare.models.SourceLeaseAccessConditions - :param destination_lease_access_conditions: Parameter group. - :type destination_lease_access_conditions: ~azure.storage.fileshare.models.DestinationLeaseAccessConditions - :param copy_file_smb_info: Parameter group. - :type copy_file_smb_info: ~azure.storage.fileshare.models.CopyFileSmbInfo - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _source_lease_id = None - _destination_lease_id = None - _file_attributes = None - _file_creation_time = None - _file_last_write_time = None - if copy_file_smb_info is not None: - _file_attributes = copy_file_smb_info.file_attributes - _file_creation_time = copy_file_smb_info.file_creation_time - _file_last_write_time = copy_file_smb_info.file_last_write_time - if destination_lease_access_conditions is not None: - _destination_lease_id = destination_lease_access_conditions.destination_lease_id - if source_lease_access_conditions is not None: - _source_lease_id = source_lease_access_conditions.source_lease_id - comp = "rename" - accept = "application/xml" - - # Construct URL - url = self.rename.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['x-ms-file-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') - if replace_if_exists is not None: - header_parameters['x-ms-file-rename-replace-if-exists'] = self._serialize.header("replace_if_exists", replace_if_exists, 'bool') - if ignore_read_only is not None: - header_parameters['x-ms-file-rename-ignore-readonly'] = self._serialize.header("ignore_read_only", ignore_read_only, 'bool') - if _source_lease_id is not None: - header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", _source_lease_id, 'str') - if _destination_lease_id is not None: - header_parameters['x-ms-destination-lease-id'] = self._serialize.header("destination_lease_id", _destination_lease_id, 'str') - if _file_attributes is not None: - header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", _file_attributes, 'str') - if _file_creation_time is not None: - header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", _file_creation_time, 'str') - if _file_last_write_time is not None: - header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", _file_last_write_time, 'str') - if file_permission is not None: - header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') - if file_permission_key is not None: - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) - response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) - response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) - response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) - response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) - response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) - - if cls: - return cls(pipeline_response, None, response_headers) - - rename.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/operations/_service_operations.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/operations/_service_operations.py deleted file mode 100644 index aaf67d3..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/operations/_service_operations.py +++ /dev/null @@ -1,276 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class ServiceOperations(object): - """ServiceOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.fileshare.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def set_properties( - self, - storage_service_properties, # type: "_models.StorageServiceProperties" - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Sets properties for a storage account's File service endpoint, including properties for Storage - Analytics metrics and CORS (Cross-Origin Resource Sharing) rules. - - :param storage_service_properties: The StorageService properties. - :type storage_service_properties: ~azure.storage.fileshare.models.StorageServiceProperties - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "properties" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties', is_xml=True) - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/'} # type: ignore - - def get_properties( - self, - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.StorageServiceProperties" - """Gets the properties of a storage account's File service, including properties for Storage - Analytics metrics and CORS (Cross-Origin Resource Sharing) rules. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: StorageServiceProperties, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.StorageServiceProperties - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceProperties"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "service" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('StorageServiceProperties', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_properties.metadata = {'url': '/'} # type: ignore - - def list_shares_segment( - self, - prefix=None, # type: Optional[str] - marker=None, # type: Optional[str] - maxresults=None, # type: Optional[int] - include=None, # type: Optional[List[Union[str, "_models.ListSharesIncludeType"]]] - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.ListSharesResponse" - """The List Shares Segment operation returns a list of the shares and share snapshots under the - specified account. - - :param prefix: Filters the results to return only entries whose name begins with the specified - prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list to be returned with the - next list operation. The operation returns a marker value within the response body if the list - returned was not complete. The marker value may then be used in a subsequent call to request - the next set of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of entries to return. If the request does not - specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 - items. - :type maxresults: int - :param include: Include this parameter to specify one or more datasets to include in the - response. - :type include: list[str or ~azure.storage.fileshare.models.ListSharesIncludeType] - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ListSharesResponse, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListSharesResponse - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ListSharesResponse"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "list" - accept = "application/xml" - - # Construct URL - url = self.list_shares_segment.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - deserialized = self._deserialize('ListSharesResponse', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - list_shares_segment.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/operations/_share_operations.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/operations/_share_operations.py deleted file mode 100644 index 0f93993..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_generated/operations/_share_operations.py +++ /dev/null @@ -1,1507 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import TYPE_CHECKING -import warnings - -from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpRequest, HttpResponse - -from .. import models as _models - -if TYPE_CHECKING: - # pylint: disable=unused-import,ungrouped-imports - from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union - - T = TypeVar('T') - ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -class ShareOperations(object): - """ShareOperations operations. - - You should not instantiate this class directly. Instead, you should create a Client instance that - instantiates it for you and attaches it as an attribute. - - :ivar models: Alias to model classes used in this operation group. - :type models: ~azure.storage.fileshare.models - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - """ - - models = _models - - def __init__(self, client, config, serializer, deserializer): - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self._config = config - - def create( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - quota=None, # type: Optional[int] - access_tier=None, # type: Optional[Union[str, "_models.ShareAccessTier"]] - enabled_protocols=None, # type: Optional[str] - root_squash=None, # type: Optional[Union[str, "_models.ShareRootSquash"]] - **kwargs # type: Any - ): - # type: (...) -> None - """Creates a new share under the specified account. If the share with the same name already - exists, the operation fails. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param quota: Specifies the maximum size of the share, in gigabytes. - :type quota: int - :param access_tier: Specifies the access tier of the share. - :type access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier - :param enabled_protocols: Protocols to enable on the share. - :type enabled_protocols: str - :param root_squash: Root squash to set on the share. Only valid for NFS shares. - :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.create.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - if quota is not None: - header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) - if access_tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("access_tier", access_tier, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if enabled_protocols is not None: - header_parameters['x-ms-enabled-protocols'] = self._serialize.header("enabled_protocols", enabled_protocols, 'str') - if root_squash is not None: - header_parameters['x-ms-root-squash'] = self._serialize.header("root_squash", root_squash, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create.metadata = {'url': '/{shareName}'} # type: ignore - - def get_properties( - self, - sharesnapshot=None, # type: Optional[str] - timeout=None, # type: Optional[int] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Returns all user-defined metadata and system properties for the specified share or share - snapshot. The data returned does not include the share's list of files. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.get_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-share-quota']=self._deserialize('int', response.headers.get('x-ms-share-quota')) - response_headers['x-ms-share-provisioned-iops']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-iops')) - response_headers['x-ms-share-provisioned-ingress-mbps']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-ingress-mbps')) - response_headers['x-ms-share-provisioned-egress-mbps']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-egress-mbps')) - response_headers['x-ms-share-next-allowed-quota-downgrade-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-share-next-allowed-quota-downgrade-time')) - response_headers['x-ms-share-provisioned-bandwidth-mibps']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-bandwidth-mibps')) - response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) - response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) - response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) - response_headers['x-ms-access-tier']=self._deserialize('str', response.headers.get('x-ms-access-tier')) - response_headers['x-ms-access-tier-change-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')) - response_headers['x-ms-access-tier-transition-state']=self._deserialize('str', response.headers.get('x-ms-access-tier-transition-state')) - response_headers['x-ms-enabled-protocols']=self._deserialize('str', response.headers.get('x-ms-enabled-protocols')) - response_headers['x-ms-root-squash']=self._deserialize('str', response.headers.get('x-ms-root-squash')) - - if cls: - return cls(pipeline_response, None, response_headers) - - get_properties.metadata = {'url': '/{shareName}'} # type: ignore - - def delete( - self, - sharesnapshot=None, # type: Optional[str] - timeout=None, # type: Optional[int] - delete_snapshots=None, # type: Optional[Union[str, "_models.DeleteSnapshotsOptionType"]] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Operation marks the specified share or share snapshot for deletion. The share or share snapshot - and any files contained within it are later deleted during garbage collection. - - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param delete_snapshots: Specifies the option include to delete the base share and all of its - snapshots. - :type delete_snapshots: str or ~azure.storage.fileshare.models.DeleteSnapshotsOptionType - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.delete.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if delete_snapshots is not None: - header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.delete(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - delete.metadata = {'url': '/{shareName}'} # type: ignore - - def acquire_lease( - self, - timeout=None, # type: Optional[int] - duration=None, # type: Optional[int] - proposed_lease_id=None, # type: Optional[str] - sharesnapshot=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a - lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease - duration cannot be changed using renew or change. - :type duration: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "acquire" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.acquire_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if duration is not None: - header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - acquire_lease.metadata = {'url': '/{shareName}'} # type: ignore - - def release_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - sharesnapshot=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "release" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.release_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - release_lease.metadata = {'url': '/{shareName}'} # type: ignore - - def change_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - proposed_lease_id=None, # type: Optional[str] - sharesnapshot=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns - 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid - Constructor (String) for a list of valid GUID string formats. - :type proposed_lease_id: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "change" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.change_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - if proposed_lease_id is not None: - header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - change_lease.metadata = {'url': '/{shareName}'} # type: ignore - - def renew_lease( - self, - lease_id, # type: str - timeout=None, # type: Optional[int] - sharesnapshot=None, # type: Optional[str] - request_id_parameter=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param lease_id: Specifies the current lease ID on the resource. - :type lease_id: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - comp = "lease" - action = "renew" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.renew_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - renew_lease.metadata = {'url': '/{shareName}'} # type: ignore - - def break_lease( - self, - timeout=None, # type: Optional[int] - break_period=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - sharesnapshot=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot - for set and delete share operations. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param break_period: For a break operation, proposed duration the lease should continue before - it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter - than the time remaining on the lease. If longer, the time remaining on the lease is used. A new - lease will not be available before the break period has expired, but the lease may be held for - longer than the break period. If this header does not appear with a break operation, a - fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease - breaks immediately. - :type break_period: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, - specifies the share snapshot to query. - :type sharesnapshot: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - comp = "lease" - action = "break" - restype = "share" - accept = "application/xml" - - # Construct URL - url = self.break_lease.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - if sharesnapshot is not None: - query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') - if break_period is not None: - header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) - response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - break_lease.metadata = {'url': '/{shareName}'} # type: ignore - - def create_snapshot( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Creates a read-only snapshot of a share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - comp = "snapshot" - accept = "application/xml" - - # Construct URL - url = self.create_snapshot.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-snapshot']=self._deserialize('str', response.headers.get('x-ms-snapshot')) - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create_snapshot.metadata = {'url': '/{shareName}'} # type: ignore - - def create_permission( - self, - share_permission, # type: "_models.SharePermission" - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> None - """Create a permission (a security descriptor). - - :param share_permission: A permission (a security descriptor) at the share level. - :type share_permission: ~azure.storage.fileshare.models.SharePermission - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - comp = "filepermission" - content_type = kwargs.pop("content_type", "application/json") - accept = "application/xml" - - # Construct URL - url = self.create_permission.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(share_permission, 'SharePermission') - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) - - if cls: - return cls(pipeline_response, None, response_headers) - - create_permission.metadata = {'url': '/{shareName}'} # type: ignore - - def get_permission( - self, - file_permission_key, # type: str - timeout=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> "_models.SharePermission" - """Returns the permission (security descriptor) for a given key. - - :param file_permission_key: Key of the permission to be set for the directory/file. - :type file_permission_key: str - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :keyword callable cls: A custom type or function that will be passed the direct response - :return: SharePermission, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.SharePermission - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.SharePermission"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - comp = "filepermission" - accept = "application/json" - - # Construct URL - url = self.get_permission.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('SharePermission', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_permission.metadata = {'url': '/{shareName}'} # type: ignore - - def set_properties( - self, - timeout=None, # type: Optional[int] - quota=None, # type: Optional[int] - access_tier=None, # type: Optional[Union[str, "_models.ShareAccessTier"]] - root_squash=None, # type: Optional[Union[str, "_models.ShareRootSquash"]] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Sets properties for the specified share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param quota: Specifies the maximum size of the share, in gigabytes. - :type quota: int - :param access_tier: Specifies the access tier of the share. - :type access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier - :param root_squash: Root squash to set on the share. Only valid for NFS shares. - :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "properties" - accept = "application/xml" - - # Construct URL - url = self.set_properties.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if quota is not None: - header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) - if access_tier is not None: - header_parameters['x-ms-access-tier'] = self._serialize.header("access_tier", access_tier, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - if root_squash is not None: - header_parameters['x-ms-root-squash'] = self._serialize.header("root_squash", root_squash, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_properties.metadata = {'url': '/{shareName}'} # type: ignore - - def set_metadata( - self, - timeout=None, # type: Optional[int] - metadata=None, # type: Optional[str] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Sets one or more user-defined name-value pairs for the specified share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param metadata: A name-value pair to associate with a file storage object. - :type metadata: str - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "metadata" - accept = "application/xml" - - # Construct URL - url = self.set_metadata.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - if metadata is not None: - header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_metadata.metadata = {'url': '/{shareName}'} # type: ignore - - def get_access_policy( - self, - timeout=None, # type: Optional[int] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> List["_models.SignedIdentifier"] - """Returns information about stored access policies specified on the share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: list of SignedIdentifier, or the result of cls(response) - :rtype: list[~azure.storage.fileshare.models.SignedIdentifier] - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[List["_models.SignedIdentifier"]] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "acl" - accept = "application/xml" - - # Construct URL - url = self.get_access_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('[SignedIdentifier]', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_access_policy.metadata = {'url': '/{shareName}'} # type: ignore - - def set_access_policy( - self, - timeout=None, # type: Optional[int] - share_acl=None, # type: Optional[List["_models.SignedIdentifier"]] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> None - """Sets a stored access policy for use with shared access signatures. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param share_acl: The ACL for the share. - :type share_acl: list[~azure.storage.fileshare.models.SignedIdentifier] - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "acl" - content_type = kwargs.pop("content_type", "application/xml") - accept = "application/xml" - - # Construct URL - url = self.set_access_policy.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - body_content_kwargs = {} # type: Dict[str, Any] - serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'wrapped': True}} - if share_acl is not None: - body_content = self._serialize.body(share_acl, '[SignedIdentifier]', is_xml=True, serialization_ctxt=serialization_ctxt) - else: - body_content = None - body_content_kwargs['content'] = body_content - request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - set_access_policy.metadata = {'url': '/{shareName}'} # type: ignore - - def get_statistics( - self, - timeout=None, # type: Optional[int] - lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] - **kwargs # type: Any - ): - # type: (...) -> "_models.ShareStats" - """Retrieves statistics related to the share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param lease_access_conditions: Parameter group. - :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions - :keyword callable cls: A custom type or function that will be passed the direct response - :return: ShareStats, or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ShareStats - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.ShareStats"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - - _lease_id = None - if lease_access_conditions is not None: - _lease_id = lease_access_conditions.lease_id - restype = "share" - comp = "stats" - accept = "application/xml" - - # Construct URL - url = self.get_statistics.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if _lease_id is not None: - header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - deserialized = self._deserialize('ShareStats', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, response_headers) - - return deserialized - get_statistics.metadata = {'url': '/{shareName}'} # type: ignore - - def restore( - self, - timeout=None, # type: Optional[int] - request_id_parameter=None, # type: Optional[str] - deleted_share_name=None, # type: Optional[str] - deleted_share_version=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None - """Restores a previously deleted Share. - - :param timeout: The timeout parameter is expressed in seconds. For more information, see - :code:`Setting - Timeouts for File Service Operations.`. - :type timeout: int - :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character - limit that is recorded in the analytics logs when storage analytics logging is enabled. - :type request_id_parameter: str - :param deleted_share_name: Specifies the name of the previously-deleted share. - :type deleted_share_name: str - :param deleted_share_version: Specifies the version of the previously-deleted share. - :type deleted_share_version: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: None, or the result of cls(response) - :rtype: None - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - restype = "share" - comp = "undelete" - accept = "application/xml" - - # Construct URL - url = self.restore.metadata['url'] # type: ignore - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id_parameter is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') - if deleted_share_name is not None: - header_parameters['x-ms-deleted-share-name'] = self._serialize.header("deleted_share_name", deleted_share_name, 'str') - if deleted_share_version is not None: - header_parameters['x-ms-deleted-share-version'] = self._serialize.header("deleted_share_version", deleted_share_version, 'str') - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.put(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.StorageError, response) - raise HttpResponseError(response=response, model=error) - - response_headers = {} - response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) - response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) - response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) - response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) - response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) - response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) - - if cls: - return cls(pipeline_response, None, response_headers) - - restore.metadata = {'url': '/{shareName}'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_lease.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_lease.py deleted file mode 100644 index 7c38145..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_lease.py +++ /dev/null @@ -1,237 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import uuid - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, TypeVar, TYPE_CHECKING -) - -from azure.core.tracing.decorator import distributed_trace -from azure.core.exceptions import HttpResponseError - -from ._shared.response_handlers import return_response_headers, process_storage_error -from ._generated.operations import FileOperations, ShareOperations - -if TYPE_CHECKING: - from datetime import datetime - ShareFileClient = TypeVar("ShareFileClient") - ShareClient = TypeVar("ShareClient") - - -class ShareLeaseClient(object): - """Creates a new ShareLeaseClient. - - This client provides lease operations on a ShareClient or ShareFileClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the file or share to lease. - :type client: ~azure.storage.fileshare.ShareFileClient or - ~azure.storage.fileshare.ShareClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - def __init__( - self, client, lease_id=None - ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs - # type: (Union[ShareFileClient, ShareClient], Optional[str]) -> None - self.id = lease_id or str(uuid.uuid4()) - self.last_modified = None - self.etag = None - if hasattr(client, 'file_name'): - self._client = client._client.file # type: ignore # pylint: disable=protected-access - self._snapshot = None - elif hasattr(client, 'share_name'): - self._client = client._client.share - self._snapshot = client.snapshot - else: - raise TypeError("Lease must use ShareFileClient or ShareClient.") - - def __enter__(self): - return self - - def __exit__(self, *args): - self.release() - - @distributed_trace - def acquire(self, **kwargs): - # type: (**Any) -> None - """Requests a new lease. This operation establishes and manages a lock on a - file or share for write and delete operations. If the file or share does not have an active lease, - the File or Share service creates a lease on the file or share. If the file has an active lease, - you can only request a new lease using the active lease ID. - - - If the file or share does not have an active lease, the File or Share service creates a - lease on the file and returns a new lease ID. - - :keyword int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. File leases never expire. A non-infinite share lease can be - between 15 and 60 seconds. A share lease duration cannot be changed - using renew or change. Default is -1 (infinite share lease). - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - try: - lease_duration = kwargs.pop('lease_duration', -1) - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - response = self._client.acquire_lease( - timeout=kwargs.pop('timeout', None), - duration=lease_duration, - proposed_lease_id=self.id, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - self.etag = response.get('etag') # type: str - - @distributed_trace - def renew(self, **kwargs): - # type: (Any) -> None - """Renews the share lease. - - The share lease can be renewed if the lease ID specified in the - lease client matches that associated with the share. Note that - the lease may be renewed even if it has expired as long as the share - has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - .. versionadded:: 12.6.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - if isinstance(self._client, FileOperations): - raise TypeError("Lease renewal operations are only valid for ShareClient.") - try: - response = self._client.renew_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - sharesnapshot=self._snapshot, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def release(self, **kwargs): - # type: (Any) -> None - """Releases the lease. The lease may be released if the lease ID specified on the request matches - that associated with the share or file. Releasing the lease allows another client to immediately acquire - the lease for the share or file as soon as the release is complete. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - try: - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - response = self._client.release_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """ Changes the lease ID of an active lease. A change must include the current lease ID in x-ms-lease-id and - a new lease ID in x-ms-proposed-lease-id. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The File or Share service will raise an error - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - try: - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - response = self._client.change_lease( - lease_id=self.id, - proposed_lease_id=proposed_lease_id, - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace - def break_lease(self, **kwargs): - # type: (Any) -> int - """Force breaks the lease if the file or share has an active lease. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. An infinite lease breaks immediately. - - Once a lease is broken, it cannot be changed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :keyword int lease_break_period: - This is the proposed duration of seconds that the share lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the share lease. If longer, the time remaining on the share lease is used. - A new share lease will not be available before the break period has - expired, but the share lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration share lease breaks after the remaining share lease - period elapses, and an infinite share lease breaks immediately. - - .. versionadded:: 12.6.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - try: - lease_break_period = kwargs.pop('lease_break_period', None) - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - if isinstance(self._client, ShareOperations): - kwargs['break_period'] = lease_break_period - if isinstance(self._client, FileOperations) and lease_break_period: - raise TypeError("Setting a lease break period is only applicable to Share leases.") - - response = self._client.break_lease( - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_models.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_models.py deleted file mode 100644 index 2b93558..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_models.py +++ /dev/null @@ -1,1020 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines - -from enum import Enum - -from azure.core.paging import PageIterator -from azure.core.exceptions import HttpResponseError -from ._parser import _parse_datetime_from_str -from ._shared.response_handlers import return_context_and_deserialized, process_storage_error -from ._shared.models import DictMixin, get_enum_value -from ._generated.models import Metrics as GeneratedMetrics -from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy -from ._generated.models import CorsRule as GeneratedCorsRule -from ._generated.models import ShareProtocolSettings as GeneratedShareProtocolSettings -from ._generated.models import ShareSmbSettings as GeneratedShareSmbSettings -from ._generated.models import SmbMultichannel as GeneratedSmbMultichannel -from ._generated.models import AccessPolicy as GenAccessPolicy -from ._generated.models import DirectoryItem - - -def _wrap_item(item): - if isinstance(item, DirectoryItem): - return {'name': item.name, 'is_directory': True} - return {'name': item.name, 'size': item.properties.content_length, 'is_directory': False} - - -class Metrics(GeneratedMetrics): - """A summary of request statistics grouped by API in hour or minute aggregates - for files. - - All required parameters must be populated in order to send to Azure. - - :keyword str version: The version of Storage Analytics to configure. - :keyword bool enabled: Required. Indicates whether metrics are enabled for the - File service. - :keyword bool include_ap_is: Indicates whether metrics should generate summary - statistics for called API operations. - :keyword ~azure.storage.fileshare.RetentionPolicy retention_policy: Determines how long the associated data should - persist. - """ - - def __init__(self, **kwargs): - self.version = kwargs.get('version', u'1.0') - self.enabled = kwargs.get('enabled', False) - self.include_apis = kwargs.get('include_apis') - self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - version=generated.version, - enabled=generated.enabled, - include_apis=generated.include_apis, - retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access - ) - - -class RetentionPolicy(GeneratedRetentionPolicy): - """The retention policy which determines how long the associated data should - persist. - - All required parameters must be populated in order to send to Azure. - - :param bool enabled: Required. Indicates whether a retention policy is enabled - for the storage service. - :param int days: Indicates the number of days that metrics or logging or - soft-deleted data should be retained. All data older than this value will - be deleted. - """ - - def __init__(self, enabled=False, days=None): - self.enabled = enabled - self.days = days - if self.enabled and (self.days is None): - raise ValueError("If policy is enabled, 'days' must be specified.") - - @classmethod - def _from_generated(cls, generated): - if not generated: - return cls() - return cls( - enabled=generated.enabled, - days=generated.days, - ) - - -class CorsRule(GeneratedCorsRule): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. - - All required parameters must be populated in order to send to Azure. - - :param list(str) allowed_origins: - A list of origin domains that will be allowed via CORS, or "*" to allow - all domains. The list of must contain at least one entry. Limited to 64 - origin domains. Each allowed origin can have up to 256 characters. - :param list(str) allowed_methods: - A list of HTTP methods that are allowed to be executed by the origin. - The list of must contain at least one entry. For Azure Storage, - permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. - :keyword list(str) allowed_headers: - Defaults to an empty list. A list of headers allowed to be part of - the cross-origin request. Limited to 64 defined headers and 2 prefixed - headers. Each header can be up to 256 characters. - :keyword list(str) exposed_headers: - Defaults to an empty list. A list of response headers to expose to CORS - clients. Limited to 64 defined headers and two prefixed headers. Each - header can be up to 256 characters. - :keyword int max_age_in_seconds: - The number of seconds that the client/browser should cache a - preflight response. - """ - - def __init__(self, allowed_origins, allowed_methods, **kwargs): - self.allowed_origins = ','.join(allowed_origins) - self.allowed_methods = ','.join(allowed_methods) - self.allowed_headers = ','.join(kwargs.get('allowed_headers', [])) - self.exposed_headers = ','.join(kwargs.get('exposed_headers', [])) - self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0) - - @classmethod - def _from_generated(cls, generated): - return cls( - [generated.allowed_origins], - [generated.allowed_methods], - allowed_headers=[generated.allowed_headers], - exposed_headers=[generated.exposed_headers], - max_age_in_seconds=generated.max_age_in_seconds, - ) - - -class ShareSmbSettings(GeneratedShareSmbSettings): - """ Settings for the SMB protocol. - - :keyword SmbMultichannel multichannel: Sets the multichannel settings. - """ - def __init__(self, **kwargs): - self.multichannel = kwargs.get('multichannel') - if self.multichannel is None: - raise ValueError("The value 'multichannel' must be specified.") - - -class SmbMultichannel(GeneratedSmbMultichannel): - """ Settings for Multichannel. - - :keyword bool enabled: If SMB Multichannel is enabled. - """ - def __init__(self, **kwargs): - self.enabled = kwargs.get('enabled') - if self.enabled is None: - raise ValueError("The value 'enabled' must be specified.") - - -class ShareProtocolSettings(GeneratedShareProtocolSettings): - """Protocol Settings class used by the set and get service properties methods in the share service. - - Contains protocol properties of the share service such as the SMB setting of the share service. - - :keyword SmbSettings smb: Sets SMB settings. - """ - def __init__(self, **kwargs): - self.smb = kwargs.get('smb') - if self.smb is None: - raise ValueError("The value 'smb' must be specified.") - - @classmethod - def _from_generated(cls, generated): - return cls( - smb=generated.smb) - - -class AccessPolicy(GenAccessPolicy): - """Access Policy class used by the set and get acl methods in each service. - - A stored access policy can specify the start time, expiry time, and - permissions for the Shared Access Signatures with which it's associated. - Depending on how you want to control access to your resource, you can - specify all of these parameters within the stored access policy, and omit - them from the URL for the Shared Access Signature. Doing so permits you to - modify the associated signature's behavior at any time, as well as to revoke - it. Or you can specify one or more of the access policy parameters within - the stored access policy, and the others on the URL. Finally, you can - specify all of the parameters on the URL. In this case, you can use the - stored access policy to revoke the signature, but not to modify its behavior. - - Together the Shared Access Signature and the stored access policy must - include all fields required to authenticate the signature. If any required - fields are missing, the request will fail. Likewise, if a field is specified - both in the Shared Access Signature URL and in the stored access policy, the - request will fail with status code 400 (Bad Request). - - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ~azure.storage.fileshare.FileSasPermissions or - ~azure.storage.fileshare.ShareSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - """ - def __init__(self, permission=None, expiry=None, start=None): - self.start = start - self.expiry = expiry - self.permission = permission - - -class LeaseProperties(DictMixin): - """File or Share Lease Properties. - - :ivar str status: - The lease status of the file or share. Possible values: locked|unlocked - :ivar str state: - Lease state of the file or share. Possible values: available|leased|expired|breaking|broken - :ivar str duration: - When a file or share is leased, specifies whether the lease is of infinite or fixed duration. - """ - - def __init__(self, **kwargs): - self.status = get_enum_value(kwargs.get('x-ms-lease-status')) - self.state = get_enum_value(kwargs.get('x-ms-lease-state')) - self.duration = get_enum_value(kwargs.get('x-ms-lease-duration')) - - @classmethod - def _from_generated(cls, generated): - lease = cls() - lease.status = get_enum_value(generated.properties.lease_status) - lease.state = get_enum_value(generated.properties.lease_state) - lease.duration = get_enum_value(generated.properties.lease_duration) - return lease - - -class ContentSettings(DictMixin): - """Used to store the content settings of a file. - - :param str content_type: - The content type specified for the file. If no content type was - specified, the default content type is application/octet-stream. - :param str content_encoding: - If the content_encoding has previously been set - for the file, that value is stored. - :param str content_language: - If the content_language has previously been set - for the file, that value is stored. - :param str content_disposition: - content_disposition conveys additional information about how to - process the response payload, and also can be used to attach - additional metadata. If content_disposition has previously been set - for the file, that value is stored. - :param str cache_control: - If the cache_control has previously been set for - the file, that value is stored. - :param bytearray content_md5: - If the content_md5 has been set for the file, this response - header is stored so that the client can check for message content - integrity. - """ - - def __init__( - self, content_type=None, content_encoding=None, - content_language=None, content_disposition=None, - cache_control=None, content_md5=None, **kwargs): - - self.content_type = content_type or kwargs.get('Content-Type') - self.content_encoding = content_encoding or kwargs.get('Content-Encoding') - self.content_language = content_language or kwargs.get('Content-Language') - self.content_md5 = content_md5 or kwargs.get('Content-MD5') - self.content_disposition = content_disposition or kwargs.get('Content-Disposition') - self.cache_control = cache_control or kwargs.get('Cache-Control') - - @classmethod - def _from_generated(cls, generated): - settings = cls() - settings.content_type = generated.properties.content_type or None - settings.content_encoding = generated.properties.content_encoding or None - settings.content_language = generated.properties.content_language or None - settings.content_md5 = generated.properties.content_md5 or None - settings.content_disposition = generated.properties.content_disposition or None - settings.cache_control = generated.properties.cache_control or None - return settings - - -class ShareProperties(DictMixin): - """Share's properties class. - - :ivar str name: - The name of the share. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the share was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar int quota: - The allocated quota. - :ivar str access_tier: - The share's access tier. - :ivar dict metadata: A dict with name_value pairs to associate with the - share as metadata. - :ivar str snapshot: - Snapshot of the share. - :ivar bool deleted: - To indicate if this share is deleted or not. - This is a service returned value, and the value will be set when list shared including deleted ones. - :ivar datetime deleted: - To indicate the deleted time of the deleted share. - This is a service returned value, and the value will be set when list shared including deleted ones. - :ivar str version: - To indicate the version of deleted share. - This is a service returned value, and the value will be set when list shared including deleted ones. - :ivar int remaining_retention_days: - To indicate how many remaining days the deleted share will be kept. - This is a service returned value, and the value will be set when list shared including deleted ones. - :ivar int provisioned_bandwidth: - Provisioned bandwidth in megabits/second. Only applicable to premium file accounts. - :ivar ~azure.storage.fileshare.models.ShareRootSquash or str root_squash: - Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash'. - :ivar list(str) protocols: - Indicates the protocols enabled on the share. The protocol can be either SMB or NFS. - """ - - def __init__(self, **kwargs): - self.name = None - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.quota = kwargs.get('x-ms-share-quota') - self.access_tier = kwargs.get('x-ms-access-tier') - self.next_allowed_quota_downgrade_time = kwargs.get('x-ms-share-next-allowed-quota-downgrade-time') - self.metadata = kwargs.get('metadata') - self.snapshot = None - self.deleted = None - self.deleted_time = None - self.version = None - self.remaining_retention_days = None - self.provisioned_egress_mbps = kwargs.get('x-ms-share-provisioned-egress-mbps') - self.provisioned_ingress_mbps = kwargs.get('x-ms-share-provisioned-ingress-mbps') - self.provisioned_iops = kwargs.get('x-ms-share-provisioned-iops') - self.provisioned_bandwidth = kwargs.get('x-ms-share-provisioned-bandwidth-mibps') - self.lease = LeaseProperties(**kwargs) - self.protocols = [protocol.strip() for protocol in kwargs.get('x-ms-enabled-protocols', None).split(',')]\ - if kwargs.get('x-ms-enabled-protocols', None) else None - self.root_squash = kwargs.get('x-ms-root-squash', None) - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.last_modified = generated.properties.last_modified - props.etag = generated.properties.etag - props.quota = generated.properties.quota - props.access_tier = generated.properties.access_tier - props.next_allowed_quota_downgrade_time = generated.properties.next_allowed_quota_downgrade_time - props.metadata = generated.metadata - props.snapshot = generated.snapshot - props.deleted = generated.deleted - props.deleted_time = generated.properties.deleted_time - props.version = generated.version - props.remaining_retention_days = generated.properties.remaining_retention_days - props.provisioned_egress_mbps = generated.properties.provisioned_egress_m_bps - props.provisioned_ingress_mbps = generated.properties.provisioned_ingress_m_bps - props.provisioned_iops = generated.properties.provisioned_iops - props.provisioned_bandwidth = generated.properties.provisioned_bandwidth_mi_bps - props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access - props.protocols = [protocol.strip() for protocol in generated.properties.enabled_protocols.split(',')]\ - if generated.properties.enabled_protocols else None - props.root_squash = generated.properties.root_squash - - return props - - -class SharePropertiesPaged(PageIterator): - """An iterable of Share properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.fileshare.ShareProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only shares whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(SharePropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - prefix=self.prefix, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [ShareProperties._from_generated(i) for i in self._response.share_items] # pylint: disable=protected-access - return self._response.next_marker or None, self.current_page - - -class Handle(DictMixin): - """A listed Azure Storage handle item. - - All required parameters must be populated in order to send to Azure. - - :keyword str handle_id: Required. XSMB service handle ID - :keyword str path: Required. File or directory name including full path starting - from share root - :keyword str file_id: Required. FileId uniquely identifies the file or - directory. - :keyword str parent_id: ParentId uniquely identifies the parent directory of the - object. - :keyword str session_id: Required. SMB session ID in context of which the file - handle was opened - :keyword str client_ip: Required. Client IP that opened the handle - :keyword ~datetime.datetime open_time: Required. Time when the session that previously opened - the handle has last been reconnected. (UTC) - :keyword ~datetime.datetime last_reconnect_time: Time handle was last connected to (UTC) - """ - - def __init__(self, **kwargs): - self.id = kwargs.get('handle_id') - self.path = kwargs.get('path') - self.file_id = kwargs.get('file_id') - self.parent_id = kwargs.get('parent_id') - self.session_id = kwargs.get('session_id') - self.client_ip = kwargs.get('client_ip') - self.open_time = kwargs.get('open_time') - self.last_reconnect_time = kwargs.get('last_reconnect_time') - - @classmethod - def _from_generated(cls, generated): - handle = cls() - handle.id = generated.handle_id - handle.path = generated.path - handle.file_id = generated.file_id - handle.parent_id = generated.parent_id - handle.session_id = generated.session_id - handle.client_ip = generated.client_ip - handle.open_time = generated.open_time - handle.last_reconnect_time = generated.last_reconnect_time - return handle - - -class HandlesPaged(PageIterator): - """An iterable of Handles. - - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.fileshare.Handle) - - :param callable command: Function to retrieve the next page of items. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, results_per_page=None, continuation_token=None): - super(HandlesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.current_page = [Handle._from_generated(h) for h in self._response.handle_list] # pylint: disable=protected-access - return self._response.next_marker or None, self.current_page - - -class DirectoryProperties(DictMixin): - """Directory's properties class. - - :ivar str name: - The name of the directory. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the directory was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar bool server_encrypted: - Whether encryption is enabled. - :keyword dict metadata: A dict with name_value pairs to associate with the - directory as metadata. - :ivar change_time: Change time for the file. - :vartype change_time: str or ~datetime.datetime - :ivar creation_time: Creation time for the file. - :vartype creation_time: str or ~datetime.datetime - :ivar last_write_time: Last write time for the file. - :vartype last_write_time: str or ~datetime.datetime - :ivar last_access_time: Last access time for the file. - :vartype last_access_time: ~datetime.datetime - :ivar file_attributes: - The file system attributes for files and directories. - :vartype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :ivar permission_key: Key of the permission to be set for the - directory/file. - :vartype permission_key: str - :ivar file_id: Required. FileId uniquely identifies the file or - directory. - :vartype file_id: str - :ivar parent_id: ParentId uniquely identifies the parent directory of the - object. - :vartype parent_id: str - """ - - def __init__(self, **kwargs): - self.name = None - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.server_encrypted = kwargs.get('x-ms-server-encrypted') - self.metadata = kwargs.get('metadata') - self.change_time = _parse_datetime_from_str(kwargs.get('x-ms-file-change-time')) - self.creation_time = _parse_datetime_from_str(kwargs.get('x-ms-file-creation-time')) - self.last_write_time = _parse_datetime_from_str(kwargs.get('x-ms-file-last-write-time')) - self.last_access_time = None - self.file_attributes = kwargs.get('x-ms-file-attributes') - self.permission_key = kwargs.get('x-ms-file-permission-key') - self.file_id = kwargs.get('x-ms-file-id') - self.parent_id = kwargs.get('x-ms-file-parent-id') - self.is_directory = True - - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.file_id = generated.file_id - props.file_attributes = generated.attributes - props.last_modified = generated.properties.last_modified - props.creation_time = generated.properties.creation_time - props.last_access_time = generated.properties.last_access_time - props.last_write_time = generated.properties.last_write_time - props.change_time = generated.properties.change_time - props.etag = generated.properties.etag - props.permission_key = generated.permission_key - return props - - -class DirectoryPropertiesPaged(PageIterator): - """An iterable for the contents of a directory. - - This iterable will yield dicts for the contents of the directory. The dicts - will have the keys 'name' (str) and 'is_directory' (bool). - Items that are files (is_directory=False) will have an additional 'content_length' key. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(dict(str, Any)) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only directories whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(DirectoryPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - def _get_next_cb(self, continuation_token): - try: - return self._command( - marker=continuation_token or None, - prefix=self.prefix, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [DirectoryProperties._from_generated(i) for i in self._response.segment.directory_items] # pylint: disable = protected-access - self.current_page.extend([FileProperties._from_generated(i) for i in self._response.segment.file_items]) # pylint: disable = protected-access - return self._response.next_marker or None, self.current_page - - -class FileProperties(DictMixin): - """File's properties class. - - :ivar str name: - The name of the file. - :ivar str path: - The path of the file. - :ivar str share: - The name of share. - :ivar str snapshot: - File snapshot. - :ivar int content_length: - Size of file in bytes. - :ivar dict metadata: A dict with name_value pairs to associate with the - file as metadata. - :ivar str file_type: - Type of the file. - :ivar ~datetime.datetime last_modified: - A datetime object representing the last time the file was modified. - :ivar str etag: - The ETag contains a value that you can use to perform operations - conditionally. - :ivar int size: - Size of file in bytes. - :ivar str content_range: - The range of bytes. - :ivar bool server_encrypted: - Whether encryption is enabled. - :ivar copy: - The copy properties. - :vartype copy: ~azure.storage.fileshare.CopyProperties - :ivar content_settings: - The content settings for the file. - :vartype content_settings: ~azure.storage.fileshare.ContentSettings - """ - - def __init__(self, **kwargs): - self.name = kwargs.get('name') - self.path = None - self.share = None - self.snapshot = None - self.content_length = kwargs.get('Content-Length') - self.metadata = kwargs.get('metadata') - self.file_type = kwargs.get('x-ms-type') - self.last_modified = kwargs.get('Last-Modified') - self.etag = kwargs.get('ETag') - self.size = kwargs.get('Content-Length') - self.content_range = kwargs.get('Content-Range') - self.server_encrypted = kwargs.get('x-ms-server-encrypted') - self.copy = CopyProperties(**kwargs) - self.content_settings = ContentSettings(**kwargs) - self.lease = LeaseProperties(**kwargs) - self.change_time = _parse_datetime_from_str(kwargs.get('x-ms-file-change-time')) - self.creation_time = _parse_datetime_from_str(kwargs.get('x-ms-file-creation-time')) - self.last_write_time = _parse_datetime_from_str(kwargs.get('x-ms-file-last-write-time')) - self.last_access_time = None - self.file_attributes = kwargs.get('x-ms-file-attributes') - self.permission_key = kwargs.get('x-ms-file-permission-key') - self.file_id = kwargs.get('x-ms-file-id') - self.parent_id = kwargs.get('x-ms-file-parent-id') - self.is_directory = False - - @classmethod - def _from_generated(cls, generated): - props = cls() - props.name = generated.name - props.file_id = generated.file_id - props.etag = generated.properties.etag - props.file_attributes = generated.attributes - props.last_modified = generated.properties.last_modified - props.creation_time = generated.properties.creation_time - props.last_access_time = generated.properties.last_access_time - props.last_write_time = generated.properties.last_write_time - props.change_time = generated.properties.change_time - props.size = generated.properties.content_length - props.permission_key = generated.permission_key - return props - - -class ShareProtocols(str, Enum): - """Enabled protocols on the share""" - SMB = "SMB" - NFS = "NFS" - - -class CopyProperties(DictMixin): - """File Copy Properties. - - :ivar str id: - String identifier for the last attempted Copy File operation where this file - was the destination file. This header does not appear if this file has never - been the destination in a Copy File operation, or if this file has been - modified after a concluded Copy File operation. - :ivar str source: - URL up to 2 KB in length that specifies the source file used in the last attempted - Copy File operation where this file was the destination file. This header does not - appear if this file has never been the destination in a Copy File operation, or if - this file has been modified after a concluded Copy File operation. - :ivar str status: - State of the copy operation identified by Copy ID, with these values: - success: - Copy completed successfully. - pending: - Copy is in progress. Check copy_status_description if intermittent, - non-fatal errors impede copy progress but don't cause failure. - aborted: - Copy was ended by Abort Copy File. - failed: - Copy failed. See copy_status_description for failure details. - :ivar str progress: - Contains the number of bytes copied and the total bytes in the source in the last - attempted Copy File operation where this file was the destination file. Can show - between 0 and Content-Length bytes copied. - :ivar datetime completion_time: - Conclusion time of the last attempted Copy File operation where this file was the - destination file. This value can specify the time of a completed, aborted, or - failed copy attempt. - :ivar str status_description: - Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal - or non-fatal copy operation failure. - :ivar bool incremental_copy: - Copies the snapshot of the source file to a destination file. - The snapshot is copied such that only the differential changes between - the previously copied snapshot are transferred to the destination - :ivar datetime destination_snapshot: - Included if the file is incremental copy or incremental copy snapshot, - if x-ms-copy-status is success. Snapshot time of the last successful - incremental copy snapshot for this file. - """ - - def __init__(self, **kwargs): - self.id = kwargs.get('x-ms-copy-id') - self.source = kwargs.get('x-ms-copy-source') - self.status = get_enum_value(kwargs.get('x-ms-copy-status')) - self.progress = kwargs.get('x-ms-copy-progress') - self.completion_time = kwargs.get('x-ms-copy-completion_time') - self.status_description = kwargs.get('x-ms-copy-status-description') - self.incremental_copy = kwargs.get('x-ms-incremental-copy') - self.destination_snapshot = kwargs.get('x-ms-copy-destination-snapshot') - - @classmethod - def _from_generated(cls, generated): - copy = cls() - copy.id = generated.properties.copy_id or None - copy.status = get_enum_value(generated.properties.copy_status) or None - copy.source = generated.properties.copy_source or None - copy.progress = generated.properties.copy_progress or None - copy.completion_time = generated.properties.copy_completion_time or None - copy.status_description = generated.properties.copy_status_description or None - copy.incremental_copy = generated.properties.incremental_copy or None - copy.destination_snapshot = generated.properties.destination_snapshot or None - return copy - - -class FileSasPermissions(object): - """FileSasPermissions class to be used with - generating shared access signature operations. - - :param bool read: - Read the content, properties, metadata. Use the file as the source of a copy - operation. - :param bool create: - Create a new file or copy a file to a new file. - :param bool write: - Create or write content, properties, metadata. Resize the file. Use the file - as the destination of a copy operation within the same account. - :param bool delete: - Delete the file. - """ - def __init__(self, read=False, create=False, write=False, delete=False): - self.read = read - self.create = create - self.write = write - self.delete = delete - self._str = (('r' if self.read else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a FileSasPermissions from a string. - - To specify read, create, write, or delete permissions you need only to - include the first letter of the word in the string. E.g. For read and - create permissions, you would provide a string "rc". - - :param str permission: The string which dictates the read, create, - write, or delete permissions - :return: A FileSasPermissions object - :rtype: ~azure.storage.fileshare.FileSasPermissions - """ - p_read = 'r' in permission - p_create = 'c' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - - parsed = cls(p_read, p_create, p_write, p_delete) - - return parsed - - -class ShareSasPermissions(object): - """ShareSasPermissions class to be used to be used with - generating shared access signature and access policy operations. - - :param bool read: - Read the content, properties or metadata of any file in the share. Use any - file in the share as the source of a copy operation. - :param bool write: - For any file in the share, create or write content, properties or metadata. - Resize the file. Use the file as the destination of a copy operation within - the same account. - Note: You cannot grant permissions to read or write share properties or - metadata with a service SAS. Use an account SAS instead. - :param bool delete: - Delete any file in the share. - Note: You cannot grant permissions to delete a share with a service SAS. Use - an account SAS instead. - :param bool list: - List files and directories in the share. - :param bool create: - Create a new file in the share, or copy a file to a new file in the share. - """ - def __init__(self, read=False, write=False, delete=False, list=False, create=False): # pylint: disable=redefined-builtin - self.read = read - self.create = create - self.write = write - self.delete = delete - self.list = list - self._str = (('r' if self.read else '') + - ('c' if self.create else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('l' if self.list else '')) - - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create a ShareSasPermissions from a string. - - To specify read, create, write, delete, or list permissions you need only to - include the first letter of the word in the string. E.g. For read and - write permissions, you would provide a string "rw". - - :param str permission: The string which dictates the read, create, write, - delete, or list permissions - :return: A ShareSasPermissions object - :rtype: ~azure.storage.fileshare.ShareSasPermissions - """ - p_read = 'r' in permission - p_create = 'c' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_list = 'l' in permission - - parsed = cls(p_read, p_write, p_delete, p_list, p_create) - - return parsed - -class NTFSAttributes(object): - """ - Valid set of attributes to set for file or directory. - To set attribute for directory, 'Directory' should always be enabled except setting 'None' for directory. - - :ivar bool read_only: - Enable/disable 'ReadOnly' attribute for DIRECTORY or FILE - :ivar bool hidden: - Enable/disable 'Hidden' attribute for DIRECTORY or FILE - :ivar bool system: - Enable/disable 'System' attribute for DIRECTORY or FILE - :ivar bool none: - Enable/disable 'None' attribute for DIRECTORY or FILE to clear all attributes of FILE/DIRECTORY - :ivar bool directory: - Enable/disable 'Directory' attribute for DIRECTORY - :ivar bool archive: - Enable/disable 'Archive' attribute for DIRECTORY or FILE - :ivar bool temporary: - Enable/disable 'Temporary' attribute for FILE - :ivar bool offline: - Enable/disable 'Offline' attribute for DIRECTORY or FILE - :ivar bool not_content_indexed: - Enable/disable 'NotContentIndexed' attribute for DIRECTORY or FILE - :ivar bool no_scrub_data: - Enable/disable 'NoScrubData' attribute for DIRECTORY or FILE - """ - def __init__(self, read_only=False, hidden=False, system=False, none=False, directory=False, archive=False, - temporary=False, offline=False, not_content_indexed=False, no_scrub_data=False): - - self.read_only = read_only - self.hidden = hidden - self.system = system - self.none = none - self.directory = directory - self.archive = archive - self.temporary = temporary - self.offline = offline - self.not_content_indexed = not_content_indexed - self.no_scrub_data = no_scrub_data - self._str = (('ReadOnly|' if self.read_only else '') + - ('Hidden|' if self.hidden else '') + - ('System|' if self.system else '') + - ('None|' if self.none else '') + - ('Directory|' if self.directory else '') + - ('Archive|' if self.archive else '') + - ('Temporary|' if self.temporary else '') + - ('Offline|' if self.offline else '') + - ('NotContentIndexed|' if self.not_content_indexed else '') + - ('NoScrubData|' if self.no_scrub_data else '')) - - def __str__(self): - concatenated_params = self._str - return concatenated_params.strip('|') - - @classmethod - def from_string(cls, string): - """Create a NTFSAttributes from a string. - - To specify permissions you can pass in a string with the - desired permissions, e.g. "ReadOnly|Hidden|System" - - :param str string: The string which dictates the permissions. - :return: A NTFSAttributes object - :rtype: ~azure.storage.fileshare.NTFSAttributes - """ - read_only = "ReadOnly" in string - hidden = "Hidden" in string - system = "System" in string - none = "None" in string - directory = "Directory" in string - archive = "Archive" in string - temporary = "Temporary" in string - offline = "Offline" in string - not_content_indexed = "NotContentIndexed" in string - no_scrub_data = "NoScrubData" in string - - parsed = cls(read_only, hidden, system, none, directory, archive, temporary, offline, not_content_indexed, - no_scrub_data) - parsed._str = string # pylint: disable = protected-access - return parsed - - -def service_properties_deserialize(generated): - """Deserialize a ServiceProperties objects into a dict. - """ - return { - 'hour_metrics': Metrics._from_generated(generated.hour_metrics), # pylint: disable=protected-access - 'minute_metrics': Metrics._from_generated(generated.minute_metrics), # pylint: disable=protected-access - 'cors': [CorsRule._from_generated(cors) for cors in generated.cors], # pylint: disable=protected-access - 'protocol': ShareProtocolSettings._from_generated(generated.protocol), # pylint: disable=protected-access - } diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_parser.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_parser.py deleted file mode 100644 index db7cab5..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_parser.py +++ /dev/null @@ -1,42 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from datetime import datetime, timedelta - -_ERROR_TOO_MANY_FILE_PERMISSIONS = 'file_permission and file_permission_key should not be set at the same time' -_FILE_PERMISSION_TOO_LONG = 'Size of file_permission is too large. file_permission should be <=8KB, else' \ - 'please use file_permission_key' - - -def _get_file_permission(file_permission, file_permission_key, default_permission): - # if file_permission and file_permission_key are both empty, then use the default_permission - # value as file permission, file_permission size should be <= 8KB, else file permission_key should be used - if file_permission and len(str(file_permission).encode('utf-8')) > 8 * 1024: - raise ValueError(_FILE_PERMISSION_TOO_LONG) - - if not file_permission: - if not file_permission_key: - return default_permission - return None - - if not file_permission_key: - return file_permission - - raise ValueError(_ERROR_TOO_MANY_FILE_PERMISSIONS) - - -def _parse_datetime_from_str(string_datetime): - if not string_datetime: - return None - dt, _, us = string_datetime.partition(".") - dt = datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S") - us = int(us[:-2]) # microseconds - datetime_obj = dt + timedelta(microseconds=us) - return datetime_obj - - -def _datetime_to_str(datetime_obj): - return datetime_obj if isinstance(datetime_obj, str) else datetime_obj.isoformat() + '0Z' diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_serialize.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_serialize.py deleted file mode 100644 index 0054a44..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_serialize.py +++ /dev/null @@ -1,164 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from azure.core import MatchConditions - -from ._parser import _datetime_to_str, _get_file_permission -from ._generated.models import ( - SourceModifiedAccessConditions, - LeaseAccessConditions, - SourceLeaseAccessConditions, - DestinationLeaseAccessConditions, - CopyFileSmbInfo) - - -_SUPPORTED_API_VERSIONS = [ - '2019-02-02', - '2019-07-07', - '2019-10-10', - '2019-12-12', - '2020-02-10', - '2020-04-08', - '2020-06-12', - '2020-08-04', - '2020-10-02', - '2021-02-12', - '2021-04-10' -] - - -def _get_match_headers(kwargs, match_param, etag_param): - # type: (str) -> Tuple(Dict[str, Any], Optional[str], Optional[str]) - # TODO: extract this method to shared folder also add some comments, so that share, datalake and blob can use it. - if_match = None - if_none_match = None - match_condition = kwargs.pop(match_param, None) - if match_condition == MatchConditions.IfNotModified: - if_match = kwargs.pop(etag_param, None) - if not if_match: - raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) - elif match_condition == MatchConditions.IfPresent: - if_match = '*' - elif match_condition == MatchConditions.IfModified: - if_none_match = kwargs.pop(etag_param, None) - if not if_none_match: - raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) - elif match_condition == MatchConditions.IfMissing: - if_none_match = '*' - elif match_condition is None: - if etag_param in kwargs: - raise ValueError("'{}' specified without '{}'.".format(etag_param, match_param)) - else: - raise TypeError("Invalid match condition: {}".format(match_condition)) - return if_match, if_none_match - - -def get_source_conditions(kwargs): - # type: (Dict[str, Any]) -> SourceModifiedAccessConditions - if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') - return SourceModifiedAccessConditions( - source_if_modified_since=kwargs.pop('source_if_modified_since', None), - source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), - source_if_match=if_match or kwargs.pop('source_if_match', None), - source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None) - ) - - -def get_access_conditions(lease): - # type: (ShareLeaseClient or str) -> LeaseAccessConditions or None - try: - lease_id = lease.id # type: ignore - except AttributeError: - lease_id = lease # type: ignore - return LeaseAccessConditions(lease_id=lease_id) if lease_id else None - - -def get_source_access_conditions(lease): - # type: (ShareLeaseClient or str) -> SourceLeaseAccessConditions or None - try: - lease_id = lease.id # type: ignore - except AttributeError: - lease_id = lease # type: ignore - return SourceLeaseAccessConditions(source_lease_id=lease_id) if lease_id else None - - -def get_dest_access_conditions(lease): - # type: (ShareLeaseClient or str) -> DestinationLeaseAccessConditions or None - try: - lease_id = lease.id # type: ignore - except AttributeError: - lease_id = lease # type: ignore - return DestinationLeaseAccessConditions(destination_lease_id=lease_id) if lease_id else None - - -def get_smb_properties(kwargs): - # type: (Dict[str, Any]) -> Dict[str, Any] - ignore_read_only = kwargs.pop('ignore_read_only', None) - set_archive_attribute = kwargs.pop('set_archive_attribute', None) - file_permission = kwargs.pop('file_permission', None) - file_permission_key = kwargs.pop('permission_key', None) - file_attributes = kwargs.pop('file_attributes', None) - file_creation_time = kwargs.pop('file_creation_time', None) or "" - file_last_write_time = kwargs.pop('file_last_write_time', None) or "" - - file_permission_copy_mode = None - file_permission = _get_file_permission(file_permission, file_permission_key, None) - - if file_permission: - if file_permission.lower() == "source": - file_permission = None - file_permission_copy_mode = "source" - else: - file_permission_copy_mode = "override" - elif file_permission_key: - if file_permission_key.lower() == "source": - file_permission_key = None - file_permission_copy_mode = "source" - else: - file_permission_copy_mode = "override" - return { - 'file_permission': file_permission, - 'file_permission_key': file_permission_key, - 'copy_file_smb_info': CopyFileSmbInfo( - file_permission_copy_mode=file_permission_copy_mode, - ignore_read_only=ignore_read_only, - file_attributes=file_attributes, - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - set_archive_attribute=set_archive_attribute - ) - - } - - -def get_rename_smb_properties(kwargs): - # type: (dict[str, Any]) -> dict[str, Any] - file_permission = kwargs.pop('file_permission', None) - file_permission_key = kwargs.pop('permission_key', None) - file_attributes = kwargs.pop('file_attributes', None) - file_creation_time = kwargs.pop('file_creation_time', None) - file_last_write_time = kwargs.pop('file_last_write_time', None) - - file_permission = _get_file_permission(file_permission, file_permission_key, None) - - return { - 'file_permission': file_permission, - 'file_permission_key': file_permission_key, - 'copy_file_smb_info': CopyFileSmbInfo( - file_attributes=file_attributes, - file_creation_time=_datetime_to_str(file_creation_time) if file_creation_time else None, - file_last_write_time=_datetime_to_str(file_last_write_time) if file_last_write_time else None - )} - - -def get_api_version(kwargs): - # type: (Dict[str, Any]) -> str - api_version = kwargs.get('api_version', None) - if api_version and api_version not in _SUPPORTED_API_VERSIONS: - versions = '\n'.join(_SUPPORTED_API_VERSIONS) - raise ValueError("Unsupported API version '{}'. Please select from:\n{}".format(api_version, versions)) - return api_version or _SUPPORTED_API_VERSIONS[-1] diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_share_client.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_share_client.py deleted file mode 100644 index f2189a1..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_share_client.py +++ /dev/null @@ -1,909 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Optional, Union, Dict, Any, Iterable, TYPE_CHECKING -) - - -try: - from urllib.parse import urlparse, quote, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import quote, unquote # type: ignore - -import six -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import Pipeline -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.request_handlers import add_metadata_headers, serialize_iso -from ._shared.response_handlers import ( - return_response_headers, - process_storage_error, - return_headers_and_deserialized) -from ._generated import AzureFileStorage -from ._generated.models import ( - SignedIdentifier, - DeleteSnapshotsOptionType, - SharePermission) -from ._deserialize import deserialize_share_properties, deserialize_permission_key, deserialize_permission -from ._serialize import get_api_version, get_access_conditions -from ._directory_client import ShareDirectoryClient -from ._file_client import ShareFileClient -from ._lease import ShareLeaseClient -from ._models import ShareProtocols - - -if TYPE_CHECKING: - from ._models import ShareProperties, AccessPolicy - - -class ShareClient(StorageAccountHostsMixin): - """A client to interact with a specific share, although that share may not yet exist. - - For operations relating to a specific directory or file in this share, the clients for - those entities can also be retrieved using the :func:`get_directory_client` and :func:`get_file_client` functions. - - For more optional configuration, please click - `here `_. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the share, - use the :func:`from_share_url` classmethod. - :param share_name: - The name of the share with which to interact. - :type share_name: str - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is the most recent service version that is - compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not share_name: - raise ValueError("Please specify a share name.") - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - if hasattr(credential, 'get_token'): - raise ValueError("Token credentials not supported by the File service.") - - path_snapshot = None - path_snapshot, sas_token = parse_query(parsed_url.query) - if not sas_token and not credential: - raise ValueError( - 'You need to provide either an account shared key or SAS token when creating a storage service.') - try: - self.snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - self.snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - self.snapshot = snapshot or path_snapshot - - self.share_name = share_name - self._query_str, credential = self._format_query_string( - sas_token, credential, share_snapshot=self.snapshot) - super(ShareClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) - self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - - @classmethod - def from_share_url(cls, share_url, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareClient - """ - :param str share_url: The full URI to the share. - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :returns: A share client. - :rtype: ~azure.storage.fileshare.ShareClient - """ - try: - if not share_url.lower().startswith('http'): - share_url = "https://" + share_url - except AttributeError: - raise ValueError("Share URL must be a string.") - parsed_url = urlparse(share_url.rstrip('/')) - if not (parsed_url.path and parsed_url.netloc): - raise ValueError("Invalid URL: {}".format(share_url)) - - share_path = parsed_url.path.lstrip('/').split('/') - account_path = "" - if len(share_path) > 1: - account_path = "/" + "/".join(share_path[:-1]) - account_url = "{}://{}{}?{}".format( - parsed_url.scheme, - parsed_url.netloc.rstrip('/'), - account_path, - parsed_url.query) - - share_name = unquote(share_path[-1]) - path_snapshot, _ = parse_query(parsed_url.query) - if snapshot: - try: - path_snapshot = snapshot.snapshot # type: ignore - except AttributeError: - try: - path_snapshot = snapshot['snapshot'] # type: ignore - except TypeError: - path_snapshot = snapshot - - if not share_name: - raise ValueError("Invalid URL. Please provide a URL with a valid share name") - return cls(account_url, share_name, path_snapshot, credential, **kwargs) - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - share_name = self.share_name - if isinstance(share_name, six.text_type): - share_name = share_name.encode('UTF-8') - return "{}://{}/{}{}".format( - self.scheme, - hostname, - quote(share_name), - self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - share_name, # type: str - snapshot=None, # type: Optional[str] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> ShareClient - """Create ShareClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param share_name: The name of the share. - :type share_name: str - :param str snapshot: - The optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :returns: A share client. - :rtype: ~azure.storage.fileshare.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START create_share_client_from_conn_string] - :end-before: [END create_share_client_from_conn_string] - :language: python - :dedent: 8 - :caption: Gets the share client from connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls( - account_url, share_name=share_name, snapshot=snapshot, credential=credential, **kwargs) - - def get_directory_client(self, directory_path=None): - # type: (Optional[str]) -> ShareDirectoryClient - """Get a client to interact with the specified directory. - The directory need not already exist. - - :param str directory_path: - Path to the specified directory. - :returns: A Directory Client. - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - """ - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - - return ShareDirectoryClient( - self.url, share_name=self.share_name, directory_path=directory_path or "", snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, - _location_mode=self._location_mode) - - def get_file_client(self, file_path): - # type: (str) -> ShareFileClient - """Get a client to interact with the specified file. - The file need not already exist. - - :param str file_path: - Path to the specified file. - :returns: A File Client. - :rtype: ~azure.storage.fileshare.ShareFileClient - """ - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - - return ShareFileClient( - self.url, share_name=self.share_name, file_path=file_path, snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode) - - @distributed_trace - def acquire_lease(self, **kwargs): - # type: (**Any) -> ShareLeaseClient - """Requests a new lease. - - If the share does not have an active lease, the Share - Service creates a lease on the share and returns a new lease. - - .. versionadded:: 12.5.0 - - :keyword int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword str lease_id: - Proposed lease ID, in a GUID string format. The Share Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A ShareLeaseClient object. - :rtype: ~azure.storage.fileshare.ShareLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START acquire_and_release_lease_on_share] - :end-before: [END acquire_and_release_lease_on_share] - :language: python - :dedent: 8 - :caption: Acquiring a lease on a share. - """ - kwargs['lease_duration'] = kwargs.pop('lease_duration', -1) - lease_id = kwargs.pop('lease_id', None) - lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore - lease.acquire(**kwargs) - return lease - - @distributed_trace - def create_share(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Creates a new Share under the account. If a share with the - same name already exists, the operation fails. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the share as metadata. - :keyword int quota: - The quota to be allotted. - :keyword access_tier: - Specifies the access tier of the share. - Possible values: 'TransactionOptimized', 'Hot', 'Cool' - :paramtype access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword protocols: - Protocols to enable on the share. Only one protocol can be enabled on the share. - :paramtype protocols: str or ~azure.storage.fileshare.ShareProtocols - :keyword root_squash: - Root squash to set on the share. - Only valid for NFS shares. Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash'. - :paramtype root_squash: str or ~azure.storage.fileshare.ShareRootSquash - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START create_share] - :end-before: [END create_share] - :language: python - :dedent: 8 - :caption: Creates a file share. - """ - metadata = kwargs.pop('metadata', None) - quota = kwargs.pop('quota', None) - access_tier = kwargs.pop('access_tier', None) - timeout = kwargs.pop('timeout', None) - root_squash = kwargs.pop('root_squash', None) - protocols = kwargs.pop('protocols', None) - if protocols and protocols not in ['NFS', 'SMB', ShareProtocols.SMB, ShareProtocols.NFS]: - raise ValueError("The enabled protocol must be set to either SMB or NFS.") - if root_squash and protocols not in ['NFS', ShareProtocols.NFS]: - raise ValueError("The 'root_squash' keyword can only be used on NFS enabled shares.") - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - - try: - return self._client.share.create( # type: ignore - timeout=timeout, - metadata=metadata, - quota=quota, - access_tier=access_tier, - root_squash=root_squash, - enabled_protocols=protocols, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def create_snapshot( # type: ignore - self, - **kwargs # type: Optional[Any] - ): - # type: (...) -> Dict[str, Any] - """Creates a snapshot of the share. - - A snapshot is a read-only version of a share that's taken at a point in time. - It can be read, copied, or deleted, but not modified. Snapshots provide a way - to back up a share as it appears at a moment in time. - - A snapshot of a share has the same name as the base share from which the snapshot - is taken, with a DateTime value appended to indicate the time at which the - snapshot was taken. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the share as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Snapshot ID, Etag, and last modified). - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START create_share_snapshot] - :end-before: [END create_share_snapshot] - :language: python - :dedent: 12 - :caption: Creates a snapshot of the file share. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return self._client.share.create_snapshot( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def delete_share( - self, delete_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> None - """Marks the specified share for deletion. The share is - later deleted during garbage collection. - - :param bool delete_snapshots: - Indicates if snapshots are to be deleted. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START delete_share] - :end-before: [END delete_share] - :language: python - :dedent: 12 - :caption: Deletes the share and any snapshots. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - delete_include = None - if delete_snapshots: - delete_include = DeleteSnapshotsOptionType.include - try: - self._client.share.delete( - timeout=timeout, - sharesnapshot=self.snapshot, - lease_access_conditions=access_conditions, - delete_snapshots=delete_include, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def get_share_properties(self, **kwargs): - # type: (Any) -> ShareProperties - """Returns all user-defined metadata and system properties for the - specified share. The data returned does not include the shares's - list of files or directories. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: The share properties. - :rtype: ~azure.storage.fileshare.ShareProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_hello_world.py - :start-after: [START get_share_properties] - :end-before: [END get_share_properties] - :language: python - :dedent: 12 - :caption: Gets the share properties. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - props = self._client.share.get_properties( - timeout=timeout, - sharesnapshot=self.snapshot, - cls=deserialize_share_properties, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - props.name = self.share_name - props.snapshot = self.snapshot - return props # type: ignore - - @distributed_trace - def set_share_quota(self, quota, **kwargs): - # type: (int, Any) -> Dict[str, Any] - """Sets the quota for the share. - - :param int quota: - Specifies the maximum size of the share, in gigabytes. - Must be greater than 0, and less than or equal to 5TB. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START set_share_quota] - :end-before: [END set_share_quota] - :language: python - :dedent: 12 - :caption: Sets the share quota. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - return self._client.share.set_properties( # type: ignore - timeout=timeout, - quota=quota, - access_tier=None, - lease_access_conditions=access_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def set_share_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Sets the share properties. - - .. versionadded:: 12.4.0 - - :keyword access_tier: - Specifies the access tier of the share. - Possible values: 'TransactionOptimized', 'Hot', and 'Cool' - :paramtype access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier - :keyword int quota: - Specifies the maximum size of the share, in gigabytes. - Must be greater than 0, and less than or equal to 5TB. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword root_squash: - Root squash to set on the share. - Only valid for NFS shares. Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash'. - :paramtype root_squash: str or ~azure.storage.fileshare.ShareRootSquash - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START set_share_properties] - :end-before: [END set_share_properties] - :language: python - :dedent: 12 - :caption: Sets the share properties. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - access_tier = kwargs.pop('access_tier', None) - quota = kwargs.pop('quota', None) - root_squash = kwargs.pop('root_squash', None) - if all(parameter is None for parameter in [access_tier, quota, root_squash]): - raise ValueError("set_share_properties should be called with at least one parameter.") - try: - return self._client.share.set_properties( # type: ignore - timeout=timeout, - quota=quota, - access_tier=access_tier, - root_squash=root_squash, - lease_access_conditions=access_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def set_share_metadata(self, metadata, **kwargs): - # type: (Dict[str, Any], Any) -> Dict[str, Any] - """Sets the metadata for the share. - - Each call to this operation replaces all existing metadata - attached to the share. To remove all metadata from the share, - call this operation with no metadata dict. - - :param metadata: - Name-value pairs associated with the share as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START set_share_metadata] - :end-before: [END set_share_metadata] - :language: python - :dedent: 12 - :caption: Sets the share metadata. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - try: - return self._client.share.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def get_share_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the share. The permissions - indicate whether files in a share may be accessed publicly. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - response, identifiers = self._client.share.get_access_policy( - timeout=timeout, - cls=return_headers_and_deserialized, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return { - 'public_access': response.get('share_public_access'), - 'signed_identifiers': identifiers or [] - } - - @distributed_trace - def set_share_access_policy(self, signed_identifiers, **kwargs): - # type: (Dict[str, AccessPolicy], Any) -> Dict[str, str] - """Sets the permissions for the share, or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether files in a share may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the share. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict(str, :class:`~azure.storage.fileshare.AccessPolicy`) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - if len(signed_identifiers) > 5: - raise ValueError( - 'Too many access policies provided. The server does not support setting ' - 'more than 5 access policies on a single resource.') - identifiers = [] - for key, value in signed_identifiers.items(): - if value: - value.start = serialize_iso(value.start) - value.expiry = serialize_iso(value.expiry) - identifiers.append(SignedIdentifier(id=key, access_policy=value)) - signed_identifiers = identifiers # type: ignore - try: - return self._client.share.set_access_policy( # type: ignore - share_acl=signed_identifiers or None, - timeout=timeout, - cls=return_response_headers, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def get_share_stats(self, **kwargs): - # type: (Any) -> int - """Gets the approximate size of the data stored on the share in bytes. - - Note that this value may not include all recently created - or recently re-sized files. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :return: The approximate size of the data (in bytes) stored on the share. - :rtype: int - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - stats = self._client.share.get_statistics( - timeout=timeout, - lease_access_conditions=access_conditions, - **kwargs) - return stats.share_usage_bytes # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_directories_and_files( - self, directory_name=None, # type: Optional[str] - name_starts_with=None, # type: Optional[str] - marker=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Iterable[Dict[str,str]] - """Lists the directories and files under the share. - - :param str directory_name: - Name of a directory. - :param str name_starts_with: - Filters the results to return only directories whose names - begin with the specified prefix. - :param str marker: - An opaque continuation token. This value can be retrieved from the - next_marker field of a previous generator object. If specified, - this generator will begin returning results from this point. - :keyword list[str] include: - Include this parameter to specify one or more datasets to include in the response. - Possible str values are "timestamps", "Etag", "Attributes", "PermissionKey". - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-10-02'. - - :keyword bool include_extended_info: - If this is set to true, file id will be returned in listed results. - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-10-02'. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START share_list_files_in_dir] - :end-before: [END share_list_files_in_dir] - :language: python - :dedent: 12 - :caption: List directories and files in the share. - """ - timeout = kwargs.pop('timeout', None) - directory = self.get_directory_client(directory_name) - kwargs.setdefault('merge_span', True) - return directory.list_directories_and_files( - name_starts_with=name_starts_with, marker=marker, timeout=timeout, **kwargs) - - @staticmethod - def _create_permission_for_share_options(file_permission, # type: str - **kwargs): - options = { - 'share_permission': SharePermission(permission=file_permission), - 'cls': deserialize_permission_key, - 'timeout': kwargs.pop('timeout', None), - } - options.update(kwargs) - return options - - @distributed_trace - def create_permission_for_share(self, file_permission, # type: str - **kwargs # type: Any - ): - # type: (...) -> str - """Create a permission (a security descriptor) at the share level. - - This 'permission' can be used for the files/directories in the share. - If a 'permission' already exists, it shall return the key of it, else - creates a new permission at the share level and return its key. - - :param str file_permission: - File permission, a Portable SDDL - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A file permission key - :rtype: str - """ - timeout = kwargs.pop('timeout', None) - options = self._create_permission_for_share_options(file_permission, timeout=timeout, **kwargs) - try: - return self._client.share.create_permission(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def get_permission_for_share( # type: ignore - self, permission_key, # type: str - **kwargs # type: Any - ): - # type: (...) -> str - """Get a permission (a security descriptor) for a given key. - - This 'permission' can be used for the files/directories in the share. - - :param str permission_key: - Key of the file permission to retrieve - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A file permission (a portable SDDL) - :rtype: str - """ - timeout = kwargs.pop('timeout', None) - try: - return self._client.share.get_permission( # type: ignore - file_permission_key=permission_key, - cls=deserialize_permission, - timeout=timeout, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def create_directory(self, directory_name, **kwargs): - # type: (str, Any) -> ShareDirectoryClient - """Creates a directory in the share and returns a client to interact - with the directory. - - :param str directory_name: - The name of the directory. - :keyword metadata: - Name-value pairs associated with the directory as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: ShareDirectoryClient - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - """ - directory = self.get_directory_client(directory_name) - kwargs.setdefault('merge_span', True) - directory.create_directory(**kwargs) - return directory # type: ignore - - @distributed_trace - def delete_directory(self, directory_name, **kwargs): - # type: (str, Any) -> None - """Marks the directory for deletion. The directory is - later deleted during garbage collection. - - :param str directory_name: - The name of the directory. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - directory = self.get_directory_client(directory_name) - directory.delete_directory(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_share_service_client.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_share_service_client.py deleted file mode 100644 index 2b41ed0..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_share_service_client.py +++ /dev/null @@ -1,423 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Dict, List, - TYPE_CHECKING -) - - -try: - from urllib.parse import urlparse -except ImportError: - from urlparse import urlparse # type: ignore - -from azure.core.exceptions import HttpResponseError -from azure.core.paging import ItemPaged -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import Pipeline -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query -from ._shared.response_handlers import process_storage_error -from ._generated import AzureFileStorage -from ._generated.models import StorageServiceProperties -from ._share_client import ShareClient -from ._serialize import get_api_version -from ._models import ( - SharePropertiesPaged, - service_properties_deserialize, -) - -if TYPE_CHECKING: - from datetime import datetime - from ._models import ( - ShareProperties, - Metrics, - CorsRule, - ShareProtocolSettings - ) - - -class ShareServiceClient(StorageAccountHostsMixin): - """A client to interact with the File Share Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete shares within the account. - For operations relating to a specific share, a client for that entity - can also be retrieved using the :func:`get_share_client` function. - - For more optional configuration, please click - `here `_. - - :param str account_url: - The URL to the file share storage account. Any other entities included - in the URL path (e.g. share or file) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is the most recent service version that is - compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_authentication.py - :start-after: [START create_share_service_client] - :end-before: [END create_share_service_client] - :language: python - :dedent: 8 - :caption: Create the share service client with url and credential. - """ - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - try: - if not account_url.lower().startswith('http'): - account_url = "https://" + account_url - except AttributeError: - raise ValueError("Account URL must be a string.") - parsed_url = urlparse(account_url.rstrip('/')) - if not parsed_url.netloc: - raise ValueError("Invalid URL: {}".format(account_url)) - if hasattr(credential, 'get_token'): - raise ValueError("Token credentials not supported by the File Share service.") - - _, sas_token = parse_query(parsed_url.query) - if not sas_token and not credential: - raise ValueError( - 'You need to provide either an account shared key or SAS token when creating a storage service.') - self._query_str, credential = self._format_query_string(sas_token, credential) - super(ShareServiceClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) - self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - - def _format_url(self, hostname): - """Format the endpoint URL according to the current location - mode hostname. - """ - return "{}://{}/{}".format(self.scheme, hostname, self._query_str) - - @classmethod - def from_connection_string( - cls, conn_str, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): # type: (...) -> ShareServiceClient - """Create ShareServiceClient from a Connection String. - - :param str conn_str: - A connection string to an Azure Storage account. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :returns: A File Share service client. - :rtype: ~azure.storage.fileshare.ShareServiceClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_authentication.py - :start-after: [START create_share_service_client_from_conn_string] - :end-before: [END create_share_service_client_from_conn_string] - :language: python - :dedent: 8 - :caption: Create the share service client with connection string. - """ - account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') - if 'secondary_hostname' not in kwargs: - kwargs['secondary_hostname'] = secondary - return cls(account_url, credential=credential, **kwargs) - - @distributed_trace - def get_service_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the properties of a storage account's File Share service, including - Azure Storage Analytics. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A dictionary containing file service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START get_service_properties] - :end-before: [END get_service_properties] - :language: python - :dedent: 8 - :caption: Get file share service properties. - """ - timeout = kwargs.pop('timeout', None) - try: - service_props = self._client.service.get_properties(timeout=timeout, **kwargs) - return service_properties_deserialize(service_props) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def set_service_properties( - self, hour_metrics=None, # type: Optional[Metrics] - minute_metrics=None, # type: Optional[Metrics] - cors=None, # type: Optional[List[CorsRule]] - protocol=None, # type: Optional[ShareProtocolSettings], - **kwargs - ): - # type: (...) -> None - """Sets the properties of a storage account's File Share service, including - Azure Storage Analytics. If an element (e.g. hour_metrics) is left as None, the - existing settings on the service for that functionality are preserved. - - :param hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for files. - :type hour_metrics: ~azure.storage.fileshare.Metrics - :param minute_metrics: - The minute metrics settings provide request statistics - for each minute for files. - :type minute_metrics: ~azure.storage.fileshare.Metrics - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list(:class:`~azure.storage.fileshare.CorsRule`) - :param protocol: - Sets protocol settings - :type protocol: ~azure.storage.fileshare.ShareProtocolSettings - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START set_service_properties] - :end-before: [END set_service_properties] - :language: python - :dedent: 8 - :caption: Sets file share service properties. - """ - timeout = kwargs.pop('timeout', None) - props = StorageServiceProperties( - hour_metrics=hour_metrics, - minute_metrics=minute_metrics, - cors=cors, - protocol=protocol - ) - try: - self._client.service.set_properties(storage_service_properties=props, timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_shares( - self, name_starts_with=None, # type: Optional[str] - include_metadata=False, # type: Optional[bool] - include_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> ItemPaged[ShareProperties] - """Returns auto-paging iterable of dict-like ShareProperties under the specified account. - The generator will lazily follow the continuation tokens returned by - the service and stop when all shares have been returned. - - :param str name_starts_with: - Filters the results to return only shares whose names - begin with the specified name_starts_with. - :param bool include_metadata: - Specifies that share metadata be returned in the response. - :param bool include_snapshots: - Specifies that share snapshot be returned in the response. - :keyword bool include_deleted: - Specifies that deleted shares be returned in the response. - This is only for share soft delete enabled account. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) of ShareProperties. - :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.ShareProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START fsc_list_shares] - :end-before: [END fsc_list_shares] - :language: python - :dedent: 12 - :caption: List shares in the file share service. - """ - timeout = kwargs.pop('timeout', None) - include = [] - include_deleted = kwargs.pop('include_deleted', None) - if include_deleted: - include.append("deleted") - if include_metadata: - include.append('metadata') - if include_snapshots: - include.append('snapshots') - - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.service.list_shares_segment, - include=include, - timeout=timeout, - **kwargs) - return ItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=SharePropertiesPaged) - - @distributed_trace - def create_share( - self, share_name, # type: str - **kwargs - ): - # type: (...) -> ShareClient - """Creates a new share under the specified account. If the share - with the same name already exists, the operation fails. Returns a client with - which to interact with the newly created share. - - :param str share_name: The name of the share to create. - :keyword dict(str,str) metadata: - A dict with name_value pairs to associate with the - share as metadata. Example:{'Category':'test'} - :keyword int quota: - Quota in bytes. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.fileshare.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START fsc_create_shares] - :end-before: [END fsc_create_shares] - :language: python - :dedent: 8 - :caption: Create a share in the file share service. - """ - metadata = kwargs.pop('metadata', None) - quota = kwargs.pop('quota', None) - timeout = kwargs.pop('timeout', None) - share = self.get_share_client(share_name) - kwargs.setdefault('merge_span', True) - share.create_share(metadata=metadata, quota=quota, timeout=timeout, **kwargs) - return share - - @distributed_trace - def delete_share( - self, share_name, # type: Union[ShareProperties, str] - delete_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> None - """Marks the specified share for deletion. The share is - later deleted during garbage collection. - - :param share_name: - The share to delete. This can either be the name of the share, - or an instance of ShareProperties. - :type share_name: str or ~azure.storage.fileshare.ShareProperties - :param bool delete_snapshots: - Indicates if snapshots are to be deleted. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START fsc_delete_shares] - :end-before: [END fsc_delete_shares] - :language: python - :dedent: 12 - :caption: Delete a share in the file share service. - """ - timeout = kwargs.pop('timeout', None) - share = self.get_share_client(share_name) - kwargs.setdefault('merge_span', True) - share.delete_share( - delete_snapshots=delete_snapshots, timeout=timeout, **kwargs) - - @distributed_trace - def undelete_share(self, deleted_share_name, deleted_share_version, **kwargs): - # type: (str, str, **Any) -> ShareClient - """Restores soft-deleted share. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.2.0 - This operation was introduced in API version '2019-12-12'. - - :param str deleted_share_name: - Specifies the name of the deleted share to restore. - :param str deleted_share_version: - Specifies the version of the deleted share to restore. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.fileshare.ShareClient - """ - share = self.get_share_client(deleted_share_name) - - try: - share._client.share.restore(deleted_share_name=deleted_share_name, # pylint: disable = protected-access - deleted_share_version=deleted_share_version, - timeout=kwargs.pop('timeout', None), **kwargs) - return share - except HttpResponseError as error: - process_storage_error(error) - - def get_share_client(self, share, snapshot=None): - # type: (Union[ShareProperties, str],Optional[Union[Dict[str, Any], str]]) -> ShareClient - """Get a client to interact with the specified share. - The share need not already exist. - - :param share: - The share. This can either be the name of the share, - or an instance of ShareProperties. - :type share: str or ~azure.storage.fileshare.ShareProperties - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :returns: A ShareClient. - :rtype: ~azure.storage.fileshare.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service.py - :start-after: [START get_share_client] - :end-before: [END get_share_client] - :language: python - :dedent: 8 - :caption: Gets the share client. - """ - try: - share_name = share.name - except AttributeError: - share_name = share - - _pipeline = Pipeline( - transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareClient( - self.url, share_name=share_name, snapshot=snapshot, credential=self.credential, - api_version=self.api_version, _hosts=self._hosts, - _configuration=self._config, _pipeline=_pipeline, _location_mode=self._location_mode) diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/__init__.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/__init__.py deleted file mode 100644 index 160f882..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import hmac - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore - -import six - - -def url_quote(url): - return quote(url) - - -def url_unquote(url): - return unquote(url) - - -def encode_base64(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def decode_base64_to_bytes(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - return base64.b64decode(data) - - -def decode_base64_to_text(data): - decoded_bytes = decode_base64_to_bytes(data) - return decoded_bytes.decode('utf-8') - - -def sign_string(key, string_to_sign, key_is_base64=True): - if key_is_base64: - key = decode_base64_to_bytes(key) - else: - if isinstance(key, six.text_type): - key = key.encode('utf-8') - if isinstance(string_to_sign, six.text_type): - string_to_sign = string_to_sign.encode('utf-8') - signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) - digest = signed_hmac_sha256.digest() - encoded_digest = encode_base64(digest) - return encoded_digest diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/authentication.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/authentication.py deleted file mode 100644 index d04c1e4..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/authentication.py +++ /dev/null @@ -1,142 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import logging -import sys - -try: - from urllib.parse import urlparse, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import unquote # type: ignore - -try: - from yarl import URL -except ImportError: - pass - -try: - from azure.core.pipeline.transport import AioHttpTransport -except ImportError: - AioHttpTransport = None - -from azure.core.exceptions import ClientAuthenticationError -from azure.core.pipeline.policies import SansIOHTTPPolicy - -from . import sign_string - - -logger = logging.getLogger(__name__) - - - -# wraps a given exception with the desired exception type -def _wrap_exception(ex, desired_type): - msg = "" - if ex.args: - msg = ex.args[0] - if sys.version_info >= (3,): - # Automatic chaining in Python 3 means we keep the trace - return desired_type(msg) - # There isn't a good solution in 2 for keeping the stack trace - # in general, or that will not result in an error in 3 - # However, we can keep the previous error type and message - # TODO: In the future we will log the trace - return desired_type('{}: {}'.format(ex.__class__.__name__, msg)) - - -class AzureSigningError(ClientAuthenticationError): - """ - Represents a fatal error when attempting to sign a request. - In general, the cause of this exception is user error. For example, the given account key is not valid. - Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. - """ - - -# pylint: disable=no-self-use -class SharedKeyCredentialPolicy(SansIOHTTPPolicy): - - def __init__(self, account_name, account_key): - self.account_name = account_name - self.account_key = account_key - super(SharedKeyCredentialPolicy, self).__init__() - - @staticmethod - def _get_headers(request, headers_to_sign): - headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) - if 'content-length' in headers and headers['content-length'] == '0': - del headers['content-length'] - return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' - - @staticmethod - def _get_verb(request): - return request.http_request.method + '\n' - - def _get_canonicalized_resource(self, request): - uri_path = urlparse(request.http_request.url).path - try: - if isinstance(request.context.transport, AioHttpTransport) or \ - isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \ - isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None), - AioHttpTransport): - uri_path = URL(uri_path) - return '/' + self.account_name + str(uri_path) - except TypeError: - pass - return '/' + self.account_name + uri_path - - @staticmethod - def _get_canonicalized_headers(request): - string_to_sign = '' - x_ms_headers = [] - for name, value in request.http_request.headers.items(): - if name.startswith('x-ms-'): - x_ms_headers.append((name.lower(), value)) - x_ms_headers.sort() - for name, value in x_ms_headers: - if value is not None: - string_to_sign += ''.join([name, ':', value, '\n']) - return string_to_sign - - @staticmethod - def _get_canonicalized_resource_query(request): - sorted_queries = list(request.http_request.query.items()) - sorted_queries.sort() - - string_to_sign = '' - for name, value in sorted_queries: - if value is not None: - string_to_sign += '\n' + name.lower() + ':' + unquote(value) - - return string_to_sign - - def _add_authorization_header(self, request, string_to_sign): - try: - signature = sign_string(self.account_key, string_to_sign) - auth_string = 'SharedKey ' + self.account_name + ':' + signature - request.http_request.headers['Authorization'] = auth_string - except Exception as ex: - # Wrap any error that occurred as signing error - # Doing so will clarify/locate the source of problem - raise _wrap_exception(ex, AzureSigningError) - - def on_request(self, request): - string_to_sign = \ - self._get_verb(request) + \ - self._get_headers( - request, - [ - 'content-encoding', 'content-language', 'content-length', - 'content-md5', 'content-type', 'date', 'if-modified-since', - 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' - ] - ) + \ - self._get_canonicalized_headers(request) + \ - self._get_canonicalized_resource(request) + \ - self._get_canonicalized_resource_query(request) - - self._add_authorization_header(request, string_to_sign) - #logger.debug("String_to_sign=%s", string_to_sign) diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/base_client.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/base_client.py deleted file mode 100644 index f8fae9e..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/base_client.py +++ /dev/null @@ -1,462 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import logging -import uuid -from typing import ( # pylint: disable=unused-import - Optional, - Any, - Tuple, -) - -try: - from urllib.parse import parse_qs, quote -except ImportError: - from urlparse import parse_qs # type: ignore - from urllib2 import quote # type: ignore - -import six - -from azure.core.configuration import Configuration -from azure.core.credentials import AzureSasCredential -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline import Pipeline -from azure.core.pipeline.transport import RequestsTransport, HttpTransport -from azure.core.pipeline.policies import ( - RedirectPolicy, - ContentDecodePolicy, - BearerTokenCredentialPolicy, - ProxyPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, - UserAgentPolicy, - AzureSasCredentialPolicy -) - -from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .models import LocationMode -from .authentication import SharedKeyCredentialPolicy -from .shared_access_signature import QueryStringConstants -from .request_handlers import serialize_batch_body, _get_batch_request_delimiter -from .policies import ( - StorageHeadersPolicy, - StorageContentValidation, - StorageRequestHook, - StorageResponseHook, - StorageLoggingPolicy, - StorageHosts, - QueueMessagePolicy, - ExponentialRetry, -) -from .._version import VERSION -from .response_handlers import process_storage_error, PartialBatchErrorException - - -_LOGGER = logging.getLogger(__name__) -_SERVICE_PARAMS = { - "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"}, - "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"}, - "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"}, - "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"}, -} - -class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - parsed_url, # type: Any - service, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) - self._hosts = kwargs.get("_hosts") - self.scheme = parsed_url.scheme - - if service not in ["blob", "queue", "file-share", "dfs"]: - raise ValueError("Invalid service: {}".format(service)) - service_name = service.split('-')[0] - account = parsed_url.netloc.split(".{}.core.".format(service_name)) - - self.account_name = account[0] if len(account) > 1 else None - if not self.account_name and parsed_url.netloc.startswith("localhost") \ - or parsed_url.netloc.startswith("127.0.0.1"): - self.account_name = parsed_url.path.strip("/") - - self.credential = _format_shared_key_credential(self.account_name, credential) - if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): - raise ValueError("Token credential is only supported with HTTPS.") - - secondary_hostname = None - if hasattr(self.credential, "account_name"): - self.account_name = self.credential.account_name - secondary_hostname = "{}-secondary.{}.{}".format( - self.credential.account_name, service_name, SERVICE_HOST_BASE) - - if not self._hosts: - if len(account) > 1: - secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") - if kwargs.get("secondary_hostname"): - secondary_hostname = kwargs["secondary_hostname"] - primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') - self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} - - self.require_encryption = kwargs.get("require_encryption", False) - self.key_encryption_key = kwargs.get("key_encryption_key") - self.key_resolver_function = kwargs.get("key_resolver_function") - self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) - - def __enter__(self): - self._client.__enter__() - return self - - def __exit__(self, *args): - self._client.__exit__(*args) - - def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._client.close() - - @property - def url(self): - """The full endpoint URL to this entity, including SAS token if used. - - This could be either the primary endpoint, - or the secondary endpoint depending on the current :func:`location_mode`. - """ - return self._format_url(self._hosts[self._location_mode]) - - @property - def primary_endpoint(self): - """The full primary endpoint URL. - - :type: str - """ - return self._format_url(self._hosts[LocationMode.PRIMARY]) - - @property - def primary_hostname(self): - """The hostname of the primary endpoint. - - :type: str - """ - return self._hosts[LocationMode.PRIMARY] - - @property - def secondary_endpoint(self): - """The full secondary endpoint URL if configured. - - If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str - :raise ValueError: - """ - if not self._hosts[LocationMode.SECONDARY]: - raise ValueError("No secondary host configured.") - return self._format_url(self._hosts[LocationMode.SECONDARY]) - - @property - def secondary_hostname(self): - """The hostname of the secondary endpoint. - - If not available this will be None. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str or None - """ - return self._hosts[LocationMode.SECONDARY] - - @property - def location_mode(self): - """The location mode that the client is currently using. - - By default this will be "primary". Options include "primary" and "secondary". - - :type: str - """ - - return self._location_mode - - @location_mode.setter - def location_mode(self, value): - if self._hosts.get(value): - self._location_mode = value - self._client._config.url = self.url # pylint: disable=protected-access - else: - raise ValueError("No host URL for location mode: {}".format(value)) - - @property - def api_version(self): - """The version of the Storage API used for requests. - - :type: str - """ - return self._client._config.version # pylint: disable=protected-access - - def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): - query_str = "?" - if snapshot: - query_str += "snapshot={}&".format(self.snapshot) - if share_snapshot: - query_str += "sharesnapshot={}&".format(self.snapshot) - if sas_token and isinstance(credential, AzureSasCredential): - raise ValueError( - "You cannot use AzureSasCredential when the resource URI also contains a Shared Access Signature.") - if sas_token and not credential: - query_str += sas_token - elif is_credential_sastoken(credential): - query_str += credential.lstrip("?") - credential = None - return query_str.rstrip("?&"), credential - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, "get_token"): - self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif isinstance(credential, AzureSasCredential): - self._credential_policy = AzureSasCredentialPolicy(credential) - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - - config = kwargs.get("_configuration") or create_configuration(**kwargs) - if kwargs.get("_pipeline"): - return config, kwargs["_pipeline"] - config.transport = kwargs.get("transport") # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - config.transport = RequestsTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - ContentDecodePolicy(response_encoding="utf-8"), - RedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), - config.retry_policy, - config.headers_policy, - StorageRequestHook(**kwargs), - self._credential_policy, - config.logging_policy, - StorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs) - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, Pipeline(config.transport, policies=policies) - - def _batch_send( - self, - *reqs, # type: HttpRequest - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - batch_id = str(uuid.uuid1()) - - request = self._client._client.post( # pylint: disable=protected-access - url='{}://{}/{}?{}comp=batch{}{}'.format( - self.scheme, - self.primary_hostname, - kwargs.pop('path', ""), - kwargs.pop('restype', ""), - kwargs.pop('sas', ""), - kwargs.pop('timeout', "") - ), - headers={ - 'x-ms-version': self.api_version, - "Content-Type": "multipart/mixed; boundary=" + _get_batch_request_delimiter(batch_id, False, False) - } - ) - - policies = [StorageHeadersPolicy()] - if self._credential_policy: - policies.append(self._credential_policy) - - request.set_multipart_mixed( - *reqs, - policies=policies, - enforce_https=False - ) - - Pipeline._prepare_multipart_mixed_request(request) # pylint: disable=protected-access - body = serialize_batch_body(request.multipart_mixed_info[0], batch_id) - request.set_bytes_body(body) - - temp = request.multipart_mixed_info - request.multipart_mixed_info = None - pipeline_response = self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - request.multipart_mixed_info = temp - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() - if raise_on_any_failure: - parts = list(response.parts()) - if any(p for p in parts if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts - ) - raise error - return iter(parts) - return parts - except HttpResponseError as error: - process_storage_error(error) - -class TransportWrapper(HttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, transport): - self._transport = transport - - def send(self, request, **kwargs): - return self._transport.send(request, **kwargs) - - def open(self): - pass - - def close(self): - pass - - def __enter__(self): - pass - - def __exit__(self, *args): # pylint: disable=arguments-differ - pass - - -def _format_shared_key_credential(account_name, credential): - if isinstance(credential, six.string_types): - if not account_name: - raise ValueError("Unable to determine account name for shared key credential.") - credential = {"account_name": account_name, "account_key": credential} - if isinstance(credential, dict): - if "account_name" not in credential: - raise ValueError("Shared key credential missing 'account_name") - if "account_key" not in credential: - raise ValueError("Shared key credential missing 'account_key") - return SharedKeyCredentialPolicy(**credential) - return credential - - -def parse_connection_str(conn_str, credential, service): - conn_str = conn_str.rstrip(";") - conn_settings = [s.split("=", 1) for s in conn_str.split(";")] - if any(len(tup) != 2 for tup in conn_settings): - raise ValueError("Connection string is either blank or malformed.") - conn_settings = dict((key.upper(), val) for key, val in conn_settings) - endpoints = _SERVICE_PARAMS[service] - primary = None - secondary = None - if not credential: - try: - credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]} - except KeyError: - credential = conn_settings.get("SHAREDACCESSSIGNATURE") - if endpoints["primary"] in conn_settings: - primary = conn_settings[endpoints["primary"]] - if endpoints["secondary"] in conn_settings: - secondary = conn_settings[endpoints["secondary"]] - else: - if endpoints["secondary"] in conn_settings: - raise ValueError("Connection string specifies only secondary endpoint.") - try: - primary = "{}://{}.{}.{}".format( - conn_settings["DEFAULTENDPOINTSPROTOCOL"], - conn_settings["ACCOUNTNAME"], - service, - conn_settings["ENDPOINTSUFFIX"], - ) - secondary = "{}-secondary.{}.{}".format( - conn_settings["ACCOUNTNAME"], service, conn_settings["ENDPOINTSUFFIX"] - ) - except KeyError: - pass - - if not primary: - try: - primary = "https://{}.{}.{}".format( - conn_settings["ACCOUNTNAME"], service, conn_settings.get("ENDPOINTSUFFIX", SERVICE_HOST_BASE) - ) - except KeyError: - raise ValueError("Connection string missing required connection details.") - if service == "dfs": - primary = primary.replace(".blob.", ".dfs.") - secondary = secondary.replace(".blob.", ".dfs.") - return primary, secondary, credential - - -def create_configuration(**kwargs): - # type: (**Any) -> Configuration - config = Configuration(**kwargs) - config.headers_policy = StorageHeadersPolicy(**kwargs) - config.user_agent_policy = UserAgentPolicy( - sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) - config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) - config.logging_policy = StorageLoggingPolicy(**kwargs) - config.proxy_policy = ProxyPolicy(**kwargs) - - # Storage settings - config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) - config.copy_polling_interval = 15 - - # Block blob uploads - config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) - config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) - config.use_byte_buffer = kwargs.get("use_byte_buffer", False) - - # Page blob uploads - config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) - - # Datalake file uploads - config.min_large_chunk_upload_threshold = kwargs.get("min_large_chunk_upload_threshold", 100 * 1024 * 1024 + 1) - - # Blob downloads - config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) - config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) - - # File uploads - config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) - return config - - -def parse_query(query_str): - sas_values = QueryStringConstants.to_list() - parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} - sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values] - sas_token = None - if sas_params: - sas_token = "&".join(sas_params) - - snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") - return snapshot, sas_token - - -def is_credential_sastoken(credential): - if not credential or not isinstance(credential, six.string_types): - return False - - sas_values = QueryStringConstants.to_list() - parsed_query = parse_qs(credential.lstrip("?")) - if parsed_query and all([k in sas_values for k in parsed_query.keys()]): - return True - return False diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/base_client_async.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/base_client_async.py deleted file mode 100644 index 091c350..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/base_client_async.py +++ /dev/null @@ -1,183 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging - -from azure.core.credentials import AzureSasCredential -from azure.core.pipeline import AsyncPipeline -from azure.core.async_paging import AsyncList -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline.policies import ( - ContentDecodePolicy, - AsyncBearerTokenCredentialPolicy, - AsyncRedirectPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, - AzureSasCredentialPolicy, -) -from azure.core.pipeline.transport import AsyncHttpTransport - -from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .authentication import SharedKeyCredentialPolicy -from .base_client import create_configuration -from .policies import ( - StorageContentValidation, - StorageRequestHook, - StorageHosts, - StorageHeadersPolicy, - QueueMessagePolicy -) -from .policies_async import AsyncStorageResponseHook - -from .response_handlers import process_storage_error, PartialBatchErrorException - -if TYPE_CHECKING: - from azure.core.pipeline import Pipeline - from azure.core.pipeline.transport import HttpRequest - from azure.core.configuration import Configuration -_LOGGER = logging.getLogger(__name__) - - -class AsyncStorageAccountHostsMixin(object): - - def __enter__(self): - raise TypeError("Async client only supports 'async with'.") - - def __exit__(self, *args): - pass - - async def __aenter__(self): - await self._client.__aenter__() - return self - - async def __aexit__(self, *args): - await self._client.__aexit__(*args) - - async def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._client.close() - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, 'get_token'): - self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif isinstance(credential, AzureSasCredential): - self._credential_policy = AzureSasCredentialPolicy(credential) - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - config = kwargs.get('_configuration') or create_configuration(**kwargs) - if kwargs.get('_pipeline'): - return config, kwargs['_pipeline'] - config.transport = kwargs.get('transport') # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - try: - from azure.core.pipeline.transport import AioHttpTransport - except ImportError: - raise ImportError("Unable to create async transport. Please check aiohttp is installed.") - config.transport = AioHttpTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.headers_policy, - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - StorageRequestHook(**kwargs), - self._credential_policy, - ContentDecodePolicy(response_encoding="utf-8"), - AsyncRedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), # type: ignore - config.retry_policy, - config.logging_policy, - AsyncStorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs), - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, AsyncPipeline(config.transport, policies=policies) - - async def _batch_send( - self, *reqs: 'HttpRequest', - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - request = self._client._client.post( # pylint: disable=protected-access - url='https://{}/?comp=batch'.format(self.primary_hostname), - headers={ - 'x-ms-version': self.api_version - } - ) - - request.set_multipart_mixed( - *reqs, - policies=[ - StorageHeadersPolicy(), - self._credential_policy - ], - enforce_https=False - ) - - pipeline_response = await self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() # Return an AsyncIterator - if raise_on_any_failure: - parts_list = [] - async for part in parts: - parts_list.append(part) - if any(p for p in parts_list if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts_list - ) - raise error - return AsyncList(parts_list) - return parts - except HttpResponseError as error: - process_storage_error(error) - - -class AsyncTransportWrapper(AsyncHttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, async_transport): - self._transport = async_transport - - async def send(self, request, **kwargs): - return await self._transport.send(request, **kwargs) - - async def open(self): - pass - - async def close(self): - pass - - async def __aenter__(self): - pass - - async def __aexit__(self, *args): # pylint: disable=arguments-differ - pass diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/constants.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/constants.py deleted file mode 100644 index 66f9a47..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/constants.py +++ /dev/null @@ -1,26 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys -from .._generated import AzureFileStorage - - -X_MS_VERSION = AzureFileStorage(url="get_api_version")._config.version # pylint: disable=protected-access - -# Socket timeout in seconds -CONNECTION_TIMEOUT = 20 -READ_TIMEOUT = 20 - -# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned) -# The socket timeout is now the maximum total duration to send all data. -if sys.version_info >= (3, 5): - # the timeout to connect is 20 seconds, and the read timeout is 2000 seconds - # the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) - READ_TIMEOUT = 2000 - -STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" - -SERVICE_HOST_BASE = 'core.windows.net' diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/encryption.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/encryption.py deleted file mode 100644 index 62607cc..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/encryption.py +++ /dev/null @@ -1,542 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os -from os import urandom -from json import ( - dumps, - loads, -) -from collections import OrderedDict - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.ciphers import Cipher -from cryptography.hazmat.primitives.ciphers.algorithms import AES -from cryptography.hazmat.primitives.ciphers.modes import CBC -from cryptography.hazmat.primitives.padding import PKCS7 - -from azure.core.exceptions import HttpResponseError - -from .._version import VERSION -from . import encode_base64, decode_base64_to_bytes - - -_ENCRYPTION_PROTOCOL_V1 = '1.0' -_ERROR_OBJECT_INVALID = \ - '{0} does not define a complete interface. Value of {1} is either missing or invalid.' - - -def _validate_not_none(param_name, param): - if param is None: - raise ValueError('{0} should not be None.'.format(param_name)) - - -def _validate_key_encryption_key_wrap(kek): - # Note that None is not callable and so will fail the second clause of each check. - if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) - if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) - - -class _EncryptionAlgorithm(object): - ''' - Specifies which client encryption algorithm is used. - ''' - AES_CBC_256 = 'AES_CBC_256' - - -class _WrappedContentKey: - ''' - Represents the envelope key details stored on the service. - ''' - - def __init__(self, algorithm, encrypted_key, key_id): - ''' - :param str algorithm: - The algorithm used for wrapping. - :param bytes encrypted_key: - The encrypted content-encryption-key. - :param str key_id: - The key-encryption-key identifier string. - ''' - - _validate_not_none('algorithm', algorithm) - _validate_not_none('encrypted_key', encrypted_key) - _validate_not_none('key_id', key_id) - - self.algorithm = algorithm - self.encrypted_key = encrypted_key - self.key_id = key_id - - -class _EncryptionAgent: - ''' - Represents the encryption agent stored on the service. - It consists of the encryption protocol version and encryption algorithm used. - ''' - - def __init__(self, encryption_algorithm, protocol): - ''' - :param _EncryptionAlgorithm encryption_algorithm: - The algorithm used for encrypting the message contents. - :param str protocol: - The protocol version used for encryption. - ''' - - _validate_not_none('encryption_algorithm', encryption_algorithm) - _validate_not_none('protocol', protocol) - - self.encryption_algorithm = str(encryption_algorithm) - self.protocol = protocol - - -class _EncryptionData: - ''' - Represents the encryption data that is stored on the service. - ''' - - def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, - key_wrapping_metadata): - ''' - :param bytes content_encryption_IV: - The content encryption initialization vector. - :param _EncryptionAgent encryption_agent: - The encryption agent. - :param _WrappedContentKey wrapped_content_key: - An object that stores the wrapping algorithm, the key identifier, - and the encrypted key bytes. - :param dict key_wrapping_metadata: - A dict containing metadata related to the key wrapping. - ''' - - _validate_not_none('content_encryption_IV', content_encryption_IV) - _validate_not_none('encryption_agent', encryption_agent) - _validate_not_none('wrapped_content_key', wrapped_content_key) - - self.content_encryption_IV = content_encryption_IV - self.encryption_agent = encryption_agent - self.wrapped_content_key = wrapped_content_key - self.key_wrapping_metadata = key_wrapping_metadata - - -def _generate_encryption_data_dict(kek, cek, iv): - ''' - Generates and returns the encryption metadata as a dict. - - :param object kek: The key encryption key. See calling functions for more information. - :param bytes cek: The content encryption key. - :param bytes iv: The initialization vector. - :return: A dict containing all the encryption metadata. - :rtype: dict - ''' - # Encrypt the cek. - wrapped_cek = kek.wrap_key(cek) - - # Build the encryption_data dict. - # Use OrderedDict to comply with Java's ordering requirement. - wrapped_content_key = OrderedDict() - wrapped_content_key['KeyId'] = kek.get_kid() - wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek) - wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() - - encryption_agent = OrderedDict() - encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 - encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 - - encryption_data_dict = OrderedDict() - encryption_data_dict['WrappedContentKey'] = wrapped_content_key - encryption_data_dict['EncryptionAgent'] = encryption_agent - encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv) - encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION} - - return encryption_data_dict - - -def _dict_to_encryption_data(encryption_data_dict): - ''' - Converts the specified dictionary to an EncryptionData object for - eventual use in decryption. - - :param dict encryption_data_dict: - The dictionary containing the encryption data. - :return: an _EncryptionData object built from the dictionary. - :rtype: _EncryptionData - ''' - try: - if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: - raise ValueError("Unsupported encryption version.") - except KeyError: - raise ValueError("Unsupported encryption version.") - wrapped_content_key = encryption_data_dict['WrappedContentKey'] - wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], - decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), - wrapped_content_key['KeyId']) - - encryption_agent = encryption_data_dict['EncryptionAgent'] - encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], - encryption_agent['Protocol']) - - if 'KeyWrappingMetadata' in encryption_data_dict: - key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] - else: - key_wrapping_metadata = None - - encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), - encryption_agent, - wrapped_content_key, - key_wrapping_metadata) - - return encryption_data - - -def _generate_AES_CBC_cipher(cek, iv): - ''' - Generates and returns an encryption cipher for AES CBC using the given cek and iv. - - :param bytes[] cek: The content encryption key for the cipher. - :param bytes[] iv: The initialization vector for the cipher. - :return: A cipher for encrypting in AES256 CBC. - :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher - ''' - - backend = default_backend() - algorithm = AES(cek) - mode = CBC(iv) - return Cipher(algorithm, mode, backend) - - -def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): - ''' - Extracts and returns the content_encryption_key stored in the encryption_data object - and performs necessary validation on all parameters. - :param _EncryptionData encryption_data: - The encryption metadata of the retrieved value. - :param obj key_encryption_key: - The key_encryption_key used to unwrap the cek. Please refer to high-level service object - instance variables for more details. - :param func key_resolver: - A function used that, given a key_id, will return a key_encryption_key. Please refer - to high-level service object instance variables for more details. - :return: the content_encryption_key stored in the encryption_data object. - :rtype: bytes[] - ''' - - _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) - _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) - - if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol: - raise ValueError('Encryption version is not supported.') - - content_encryption_key = None - - # If the resolver exists, give priority to the key it finds. - if key_resolver is not None: - key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) - - _validate_not_none('key_encryption_key', key_encryption_key) - if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) - if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid(): - raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.') - # Will throw an exception if the specified algorithm is not supported. - content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, - encryption_data.wrapped_content_key.algorithm) - _validate_not_none('content_encryption_key', content_encryption_key) - - return content_encryption_key - - -def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None): - ''' - Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. - Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). - Returns the original plaintex. - - :param str message: - The ciphertext to be decrypted. - :param _EncryptionData encryption_data: - The metadata associated with this ciphertext. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted plaintext. - :rtype: str - ''' - _validate_not_none('message', message) - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) - - if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm: - raise ValueError('Specified encryption algorithm is not supported.') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) - - # decrypt data - decrypted_data = message - decryptor = cipher.decryptor() - decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) - - # unpad data - unpadder = PKCS7(128).unpadder() - decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) - - return decrypted_data - - -def encrypt_blob(blob, key_encryption_key): - ''' - Encrypts the given blob using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encryption metadata. This method should - only be used when a blob is small enough for single shot upload. Encrypting larger blobs - is done as a part of the upload_data_chunks method. - - :param bytes blob: - The blob to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. - :rtype: (str, bytes) - ''' - - _validate_not_none('blob', blob) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(blob) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - - return dumps(encryption_data), encrypted_data - - -def generate_blob_encryption_data(key_encryption_key): - ''' - Generates the encryption_metadata for the blob. - - :param bytes key_encryption_key: - The key-encryption-key used to wrap the cek associate with this blob. - :return: A tuple containing the cek and iv for this blob as well as the - serialized encryption metadata for the blob. - :rtype: (bytes, bytes, str) - ''' - encryption_data = None - content_encryption_key = None - initialization_vector = None - if key_encryption_key: - _validate_key_encryption_key_wrap(key_encryption_key) - content_encryption_key = urandom(32) - initialization_vector = urandom(16) - encryption_data = _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector) - encryption_data['EncryptionMode'] = 'FullBlob' - encryption_data = dumps(encryption_data) - - return content_encryption_key, initialization_vector, encryption_data - - -def decrypt_blob(require_encryption, key_encryption_key, key_resolver, - content, start_offset, end_offset, response_headers): - ''' - Decrypts the given blob contents and returns only the requested range. - - :param bool require_encryption: - Whether or not the calling blob service requires objects to be decrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :param key_resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted blob content. - :rtype: bytes - ''' - try: - encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata'])) - except: # pylint: disable=bare-except - if require_encryption: - raise ValueError( - 'Encryption required, but received data does not contain appropriate metatadata.' + \ - 'Data was either not encrypted or metadata has been lost.') - - return content - - if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256: - raise ValueError('Specified encryption algorithm is not supported.') - - blob_type = response_headers['x-ms-blob-type'] - - iv = None - unpad = False - if 'content-range' in response_headers: - content_range = response_headers['content-range'] - # Format: 'bytes x-y/size' - - # Ignore the word 'bytes' - content_range = content_range.split(' ') - - content_range = content_range[1].split('-') - content_range = content_range[1].split('/') - end_range = int(content_range[0]) - blob_size = int(content_range[1]) - - if start_offset >= 16: - iv = content[:16] - content = content[16:] - start_offset -= 16 - else: - iv = encryption_data.content_encryption_IV - - if end_range == blob_size - 1: - unpad = True - else: - unpad = True - iv = encryption_data.content_encryption_IV - - if blob_type == 'PageBlob': - unpad = False - - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) - cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) - decryptor = cipher.decryptor() - - content = decryptor.update(content) + decryptor.finalize() - if unpad: - unpadder = PKCS7(128).unpadder() - content = unpadder.update(content) + unpadder.finalize() - - return content[start_offset: len(content) - end_offset] - - -def get_blob_encryptor_and_padder(cek, iv, should_pad): - encryptor = None - padder = None - - if cek is not None and iv is not None: - cipher = _generate_AES_CBC_cipher(cek, iv) - encryptor = cipher.encryptor() - padder = PKCS7(128).padder() if should_pad else None - - return encryptor, padder - - -def encrypt_queue_message(message, key_encryption_key): - ''' - Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encrypted message and the encryption metadata. - - :param object message: - The plain text messge to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :return: A json-formatted string containing the encrypted message and the encryption metadata. - :rtype: str - ''' - - _validate_not_none('message', message) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = os.urandom(32) - initialization_vector = os.urandom(16) - - # Queue encoding functions all return unicode strings, and encryption should - # operate on binary strings. - message = message.encode('utf-8') - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(message) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - - # Build the dictionary structure. - queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data), - 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector)} - - return dumps(queue_message) - - -def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver): - ''' - Returns the decrypted message contents from an EncryptedQueueMessage. - If no encryption metadata is present, will return the unaltered message. - :param str message: - The JSON formatted QueueEncryptedMessage contents with all associated metadata. - :param bool require_encryption: - If set, will enforce that the retrieved messages are encrypted and decrypt them. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The plain text message from the queue message. - :rtype: str - ''' - - try: - message = loads(message) - - encryption_data = _dict_to_encryption_data(message['EncryptionData']) - decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents']) - except (KeyError, ValueError): - # Message was not json formatted and so was not encrypted - # or the user provided a json formatted message. - if require_encryption: - raise ValueError('Message was not encrypted.') - - return message - try: - return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=response, - error=error) diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/models.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/models.py deleted file mode 100644 index 355f569..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/models.py +++ /dev/null @@ -1,481 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-instance-attributes - -from enum import Enum - - -def get_enum_value(value): - if value is None or value in ["None", ""]: - return None - try: - return value.value - except AttributeError: - return value - - -class StorageErrorCode(str, Enum): - - # Generic storage values - account_already_exists = "AccountAlreadyExists" - account_being_created = "AccountBeingCreated" - account_is_disabled = "AccountIsDisabled" - authentication_failed = "AuthenticationFailed" - authorization_failure = "AuthorizationFailure" - no_authentication_information = "NoAuthenticationInformation" - condition_headers_not_supported = "ConditionHeadersNotSupported" - condition_not_met = "ConditionNotMet" - empty_metadata_key = "EmptyMetadataKey" - insufficient_account_permissions = "InsufficientAccountPermissions" - internal_error = "InternalError" - invalid_authentication_info = "InvalidAuthenticationInfo" - invalid_header_value = "InvalidHeaderValue" - invalid_http_verb = "InvalidHttpVerb" - invalid_input = "InvalidInput" - invalid_md5 = "InvalidMd5" - invalid_metadata = "InvalidMetadata" - invalid_query_parameter_value = "InvalidQueryParameterValue" - invalid_range = "InvalidRange" - invalid_resource_name = "InvalidResourceName" - invalid_uri = "InvalidUri" - invalid_xml_document = "InvalidXmlDocument" - invalid_xml_node_value = "InvalidXmlNodeValue" - md5_mismatch = "Md5Mismatch" - metadata_too_large = "MetadataTooLarge" - missing_content_length_header = "MissingContentLengthHeader" - missing_required_query_parameter = "MissingRequiredQueryParameter" - missing_required_header = "MissingRequiredHeader" - missing_required_xml_node = "MissingRequiredXmlNode" - multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" - operation_timed_out = "OperationTimedOut" - out_of_range_input = "OutOfRangeInput" - out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" - request_body_too_large = "RequestBodyTooLarge" - resource_type_mismatch = "ResourceTypeMismatch" - request_url_failed_to_parse = "RequestUrlFailedToParse" - resource_already_exists = "ResourceAlreadyExists" - resource_not_found = "ResourceNotFound" - server_busy = "ServerBusy" - unsupported_header = "UnsupportedHeader" - unsupported_xml_node = "UnsupportedXmlNode" - unsupported_query_parameter = "UnsupportedQueryParameter" - unsupported_http_verb = "UnsupportedHttpVerb" - - # Blob values - append_position_condition_not_met = "AppendPositionConditionNotMet" - blob_already_exists = "BlobAlreadyExists" - blob_not_found = "BlobNotFound" - blob_overwritten = "BlobOverwritten" - blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" - block_count_exceeds_limit = "BlockCountExceedsLimit" - block_list_too_long = "BlockListTooLong" - cannot_change_to_lower_tier = "CannotChangeToLowerTier" - cannot_verify_copy_source = "CannotVerifyCopySource" - container_already_exists = "ContainerAlreadyExists" - container_being_deleted = "ContainerBeingDeleted" - container_disabled = "ContainerDisabled" - container_not_found = "ContainerNotFound" - content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" - copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" - copy_id_mismatch = "CopyIdMismatch" - feature_version_mismatch = "FeatureVersionMismatch" - incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" - incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" - incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" - infinite_lease_duration_required = "InfiniteLeaseDurationRequired" - invalid_blob_or_block = "InvalidBlobOrBlock" - invalid_blob_tier = "InvalidBlobTier" - invalid_blob_type = "InvalidBlobType" - invalid_block_id = "InvalidBlockId" - invalid_block_list = "InvalidBlockList" - invalid_operation = "InvalidOperation" - invalid_page_range = "InvalidPageRange" - invalid_source_blob_type = "InvalidSourceBlobType" - invalid_source_blob_url = "InvalidSourceBlobUrl" - invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" - lease_already_present = "LeaseAlreadyPresent" - lease_already_broken = "LeaseAlreadyBroken" - lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" - lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" - lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" - lease_id_missing = "LeaseIdMissing" - lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" - lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" - lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" - lease_lost = "LeaseLost" - lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" - lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" - lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" - max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" - no_pending_copy_operation = "NoPendingCopyOperation" - operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" - pending_copy_operation = "PendingCopyOperation" - previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" - previous_snapshot_not_found = "PreviousSnapshotNotFound" - previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" - sequence_number_condition_not_met = "SequenceNumberConditionNotMet" - sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" - snapshot_count_exceeded = "SnapshotCountExceeded" - snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" - snapshots_present = "SnapshotsPresent" - source_condition_not_met = "SourceConditionNotMet" - system_in_use = "SystemInUse" - target_condition_not_met = "TargetConditionNotMet" - unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" - blob_being_rehydrated = "BlobBeingRehydrated" - blob_archived = "BlobArchived" - blob_not_archived = "BlobNotArchived" - - # Queue values - invalid_marker = "InvalidMarker" - message_not_found = "MessageNotFound" - message_too_large = "MessageTooLarge" - pop_receipt_mismatch = "PopReceiptMismatch" - queue_already_exists = "QueueAlreadyExists" - queue_being_deleted = "QueueBeingDeleted" - queue_disabled = "QueueDisabled" - queue_not_empty = "QueueNotEmpty" - queue_not_found = "QueueNotFound" - - # File values - cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" - client_cache_flush_delay = "ClientCacheFlushDelay" - delete_pending = "DeletePending" - directory_not_empty = "DirectoryNotEmpty" - file_lock_conflict = "FileLockConflict" - invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" - parent_not_found = "ParentNotFound" - read_only_attribute = "ReadOnlyAttribute" - share_already_exists = "ShareAlreadyExists" - share_being_deleted = "ShareBeingDeleted" - share_disabled = "ShareDisabled" - share_not_found = "ShareNotFound" - sharing_violation = "SharingViolation" - share_snapshot_in_progress = "ShareSnapshotInProgress" - share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" - share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" - share_has_snapshots = "ShareHasSnapshots" - container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" - - # DataLake values - content_length_must_be_zero = 'ContentLengthMustBeZero' - path_already_exists = 'PathAlreadyExists' - invalid_flush_position = 'InvalidFlushPosition' - invalid_property_name = 'InvalidPropertyName' - invalid_source_uri = 'InvalidSourceUri' - unsupported_rest_version = 'UnsupportedRestVersion' - file_system_not_found = 'FilesystemNotFound' - path_not_found = 'PathNotFound' - rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound' - source_path_not_found = 'SourcePathNotFound' - destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted' - file_system_already_exists = 'FilesystemAlreadyExists' - file_system_being_deleted = 'FilesystemBeingDeleted' - invalid_destination_path = 'InvalidDestinationPath' - invalid_rename_source_path = 'InvalidRenameSourcePath' - invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType' - lease_is_already_broken = 'LeaseIsAlreadyBroken' - lease_name_mismatch = 'LeaseNameMismatch' - path_conflict = 'PathConflict' - source_path_is_being_deleted = 'SourcePathIsBeingDeleted' - - -class DictMixin(object): - - def __setitem__(self, key, item): - self.__dict__[key] = item - - def __getitem__(self, key): - return self.__dict__[key] - - def __repr__(self): - return str(self) - - def __len__(self): - return len(self.keys()) - - def __delitem__(self, key): - self.__dict__[key] = None - - def __eq__(self, other): - """Compare objects by comparing all attributes.""" - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - def __ne__(self, other): - """Compare objects by comparing all attributes.""" - return not self.__eq__(other) - - def __str__(self): - return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) - - def has_key(self, k): - return k in self.__dict__ - - def update(self, *args, **kwargs): - return self.__dict__.update(*args, **kwargs) - - def keys(self): - return [k for k in self.__dict__ if not k.startswith('_')] - - def values(self): - return [v for k, v in self.__dict__.items() if not k.startswith('_')] - - def items(self): - return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] - - def get(self, key, default=None): - if key in self.__dict__: - return self.__dict__[key] - return default - - -class LocationMode(object): - """ - Specifies the location the request should be sent to. This mode only applies - for RA-GRS accounts which allow secondary read access. All other account types - must use PRIMARY. - """ - - PRIMARY = 'primary' #: Requests should be sent to the primary location. - SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. - - -class ResourceTypes(object): - """ - Specifies the resource types that are accessible with the account SAS. - - :param bool service: - Access to service-level APIs (e.g., Get/Set Service Properties, - Get Service Stats, List Containers/Queues/Shares) - :param bool container: - Access to container-level APIs (e.g., Create/Delete Container, - Create/Delete Queue, Create/Delete Share, - List Blobs/Files and Directories) - :param bool object: - Access to object-level APIs for blobs, queue messages, and - files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) - """ - - def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin - self.service = service - self.container = container - self.object = object - self._str = (('s' if self.service else '') + - ('c' if self.container else '') + - ('o' if self.object else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create a ResourceTypes from a string. - - To specify service, container, or object you need only to - include the first letter of the word in the string. E.g. service and container, - you would provide a string "sc". - - :param str string: Specify service, container, or object in - in the string with the first letter of the word. - :return: A ResourceTypes object - :rtype: ~azure.storage.fileshare.ResourceTypes - """ - res_service = 's' in string - res_container = 'c' in string - res_object = 'o' in string - - parsed = cls(res_service, res_container, res_object) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class AccountSasPermissions(object): - """ - :class:`~ResourceTypes` class to be used with generate_account_sas - function and for the AccessPolicies used with set_*_acl. There are two types of - SAS which may be used to grant resource access. One is to grant access to a - specific resource (resource-specific). Another is to grant access to the - entire service for a specific account and allow certain operations based on - perms found here. - - :param bool read: - Valid for all signed resources types (Service, Container, and Object). - Permits read permissions to the specified resource type. - :param bool write: - Valid for all signed resources types (Service, Container, and Object). - Permits write permissions to the specified resource type. - :param bool delete: - Valid for Container and Object resource types, except for queue messages. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool list: - Valid for Service and Container resource types only. - :param bool add: - Valid for the following Object resource types only: queue messages, and append blobs. - :param bool create: - Valid for the following Object resource types only: blobs and files. - Users can create new blobs or files, but may not overwrite existing - blobs or files. - :param bool update: - Valid for the following Object resource types only: queue messages. - :param bool process: - Valid for the following Object resource type only: queue messages. - :keyword bool tag: - To enable set or get tags on the blobs in the container. - :keyword bool filter_by_tags: - To enable get blobs by tags, this should be used together with list permission. - :keyword bool set_immutability_policy: - To enable operations related to set/delete immutability policy. - To get immutability policy, you just need read permission. - :keyword bool permanent_delete: - To enable permanent delete on the blob is permitted. - Valid for Object resource type of Blob only. - """ - def __init__(self, read=False, write=False, delete=False, - list=False, # pylint: disable=redefined-builtin - add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): - self.read = read - self.write = write - self.delete = delete - self.delete_previous_version = delete_previous_version - self.permanent_delete = kwargs.pop('permanent_delete', False) - self.list = list - self.add = add - self.create = create - self.update = update - self.process = process - self.tag = kwargs.pop('tag', False) - self.filter_by_tags = kwargs.pop('filter_by_tags', False) - self.set_immutability_policy = kwargs.pop('set_immutability_policy', False) - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('y' if self.permanent_delete else '') + - ('l' if self.list else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('u' if self.update else '') + - ('p' if self.process else '') + - ('f' if self.filter_by_tags else '') + - ('t' if self.tag else '') + - ('i' if self.set_immutability_policy else '') - ) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create AccountSasPermissions from a string. - - To specify read, write, delete, etc. permissions you need only to - include the first letter of the word in the string. E.g. for read and write - permissions you would provide a string "rw". - - :param str permission: Specify permissions in - the string with the first letter of the word. - :return: An AccountSasPermissions object - :rtype: ~azure.storage.fileshare.AccountSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_delete_previous_version = 'x' in permission - p_permanent_delete = 'y' in permission - p_list = 'l' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_update = 'u' in permission - p_process = 'p' in permission - p_tag = 't' in permission - p_filter_by_tags = 'f' in permission - p_set_immutability_policy = 'i' in permission - parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, - list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, - filter_by_tags=p_filter_by_tags, set_immutability_policy=p_set_immutability_policy, - permanent_delete=p_permanent_delete) - - return parsed - - -class Services(object): - """Specifies the services accessible with the account SAS. - - :param bool blob: - Access for the `~azure.storage.blob.BlobServiceClient` - :param bool queue: - Access for the `~azure.storage.queue.QueueServiceClient` - :param bool fileshare: - Access for the `~azure.storage.fileshare.ShareServiceClient` - """ - - def __init__(self, blob=False, queue=False, fileshare=False): - self.blob = blob - self.queue = queue - self.fileshare = fileshare - self._str = (('b' if self.blob else '') + - ('q' if self.queue else '') + - ('f' if self.fileshare else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create Services from a string. - - To specify blob, queue, or file you need only to - include the first letter of the word in the string. E.g. for blob and queue - you would provide a string "bq". - - :param str string: Specify blob, queue, or file in - in the string with the first letter of the word. - :return: A Services object - :rtype: ~azure.storage.fileshare.Services - """ - res_blob = 'b' in string - res_queue = 'q' in string - res_file = 'f' in string - - parsed = cls(res_blob, res_queue, res_file) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class UserDelegationKey(object): - """ - Represents a user delegation key, provided to the user by Azure Storage - based on their Azure Active Directory access token. - - The fields are saved as simple strings since the user does not have to interact with this object; - to generate an identify SAS, the user can simply pass it to the right API. - - :ivar str signed_oid: - Object ID of this token. - :ivar str signed_tid: - Tenant ID of the tenant that issued this token. - :ivar str signed_start: - The datetime this token becomes valid. - :ivar str signed_expiry: - The datetime this token expires. - :ivar str signed_service: - What service this key is valid for. - :ivar str signed_version: - The version identifier of the REST service that created this token. - :ivar str value: - The user delegation key. - """ - def __init__(self): - self.signed_oid = None - self.signed_tid = None - self.signed_start = None - self.signed_expiry = None - self.signed_service = None - self.signed_version = None - self.value = None diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/parser.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/parser.py deleted file mode 100644 index c6feba8..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/parser.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys - -if sys.version_info < (3,): - def _str(value): - if isinstance(value, unicode): # pylint: disable=undefined-variable - return value.encode('utf-8') - - return str(value) -else: - _str = str - - -def _to_utc_datetime(value): - return value.strftime('%Y-%m-%dT%H:%M:%SZ') diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/policies.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/policies.py deleted file mode 100644 index 1c77692..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/policies.py +++ /dev/null @@ -1,622 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import re -import random -from time import time -from io import SEEK_SET, UnsupportedOperation -import logging -import uuid -from typing import Any, TYPE_CHECKING -from wsgiref.handlers import format_date_time -try: - from urllib.parse import ( - urlparse, - parse_qsl, - urlunparse, - urlencode, - ) -except ImportError: - from urllib import urlencode # type: ignore - from urlparse import ( # type: ignore - urlparse, - parse_qsl, - urlunparse, - ) - -from azure.core.pipeline.policies import ( - HeadersPolicy, - SansIOHTTPPolicy, - NetworkTraceLoggingPolicy, - HTTPPolicy, - RequestHistory -) -from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError - -from .models import LocationMode - -try: - _unicode_type = unicode # type: ignore -except NameError: - _unicode_type = str - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -def encode_base64(data): - if isinstance(data, _unicode_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def is_exhausted(settings): - """Are we out of retries?""" - retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) - retry_counts = list(filter(None, retry_counts)) - if not retry_counts: - return False - return min(retry_counts) < 0 - - -def retry_hook(settings, **kwargs): - if settings['hook']: - settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) - - -def is_retry(response, mode): # pylint: disable=too-many-return-statements - """Is this method/status code retryable? (Based on allowlists and control - variables such as the number of total retries to allow, whether to - respect the Retry-After header, whether this header is present, and - whether the returned status code is on the list of status codes to - be retried upon on the presence of the aforementioned header) - """ - status = response.http_response.status_code - if 300 <= status < 500: - # An exception occured, but in most cases it was expected. Examples could - # include a 309 Conflict or 412 Precondition Failed. - if status == 404 and mode == LocationMode.SECONDARY: - # Response code 404 should be retried if secondary was used. - return True - if status == 408: - # Response code 408 is a timeout and should be retried. - return True - return False - if status >= 500: - # Response codes above 500 with the exception of 501 Not Implemented and - # 505 Version Not Supported indicate a server issue and should be retried. - if status in [501, 505]: - return False - return True - # retry if invalid content md5 - if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): - computed_md5 = response.http_request.headers.get('content-md5', None) or \ - encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) - if response.http_response.headers['content-md5'] != computed_md5: - return True - return False - - -def urljoin(base_url, stub_url): - parsed = urlparse(base_url) - parsed = parsed._replace(path=parsed.path + '/' + stub_url) - return parsed.geturl() - - -class QueueMessagePolicy(SansIOHTTPPolicy): - - def on_request(self, request): - message_id = request.context.options.pop('queue_message_id', None) - if message_id: - request.http_request.url = urljoin( - request.http_request.url, - message_id) - - -class StorageHeadersPolicy(HeadersPolicy): - request_id_header_name = 'x-ms-client-request-id' - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - super(StorageHeadersPolicy, self).on_request(request) - current_time = format_date_time(time()) - request.http_request.headers['x-ms-date'] = current_time - - custom_id = request.context.options.pop('client_request_id', None) - request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) - - # def on_response(self, request, response): - # # raise exception if the echoed client request id from the service is not identical to the one we sent - # if self.request_id_header_name in response.http_response.headers: - - # client_request_id = request.http_request.headers.get(self.request_id_header_name) - - # if response.http_response.headers[self.request_id_header_name] != client_request_id: - # raise AzureError( - # "Echoed client request ID: {} does not match sent client request ID: {}. " - # "Service request ID: {}".format( - # response.http_response.headers[self.request_id_header_name], client_request_id, - # response.http_response.headers['x-ms-request-id']), - # response=response.http_response - # ) - - -class StorageHosts(SansIOHTTPPolicy): - - def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument - self.hosts = hosts - super(StorageHosts, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - request.context.options['hosts'] = self.hosts - parsed_url = urlparse(request.http_request.url) - - # Detect what location mode we're currently requesting with - location_mode = LocationMode.PRIMARY - for key, value in self.hosts.items(): - if parsed_url.netloc == value: - location_mode = key - - # See if a specific location mode has been specified, and if so, redirect - use_location = request.context.options.pop('use_location', None) - if use_location: - # Lock retries to the specific location - request.context.options['retry_to_secondary'] = False - if use_location not in self.hosts: - raise ValueError("Attempting to use undefined host location {}".format(use_location)) - if use_location != location_mode: - # Update request URL to use the specified location - updated = parsed_url._replace(netloc=self.hosts[use_location]) - request.http_request.url = updated.geturl() - location_mode = use_location - - request.context.options['location_mode'] = location_mode - - -class StorageLoggingPolicy(NetworkTraceLoggingPolicy): - """A policy that logs HTTP request and response to the DEBUG logger. - - This accepts both global configuration, and per-request level with "enable_http_logger" - """ - def __init__(self, logging_enable=False, **kwargs): - self.logging_body = kwargs.pop("logging_body", False) - super(StorageLoggingPolicy, self).__init__(logging_enable=logging_enable, **kwargs) - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - http_request = request.http_request - options = request.context.options - self.logging_body = self.logging_body or options.pop("logging_body", False) - if options.pop("logging_enable", self.enable_http_logger): - request.context["logging_enable"] = True - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - log_url = http_request.url - query_params = http_request.query - if 'sig' in query_params: - log_url = log_url.replace(query_params['sig'], "sig=*****") - _LOGGER.debug("Request URL: %r", log_url) - _LOGGER.debug("Request method: %r", http_request.method) - _LOGGER.debug("Request headers:") - for header, value in http_request.headers.items(): - if header.lower() == 'authorization': - value = '*****' - elif header.lower() == 'x-ms-copy-source' and 'sig' in value: - # take the url apart and scrub away the signed signature - scheme, netloc, path, params, query, fragment = urlparse(value) - parsed_qs = dict(parse_qsl(query)) - parsed_qs['sig'] = '*****' - - # the SAS needs to be put back together - value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) - - _LOGGER.debug(" %r: %r", header, value) - _LOGGER.debug("Request body:") - - if self.logging_body: - _LOGGER.debug(str(http_request.body)) - else: - # We don't want to log the binary data of a file upload. - _LOGGER.debug("Hidden body, please use logging_body to show body") - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log request: %r", err) - - def on_response(self, request, response): - # type: (PipelineRequest, PipelineResponse, Any) -> None - if response.context.pop("logging_enable", self.enable_http_logger): - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - _LOGGER.debug("Response status: %r", response.http_response.status_code) - _LOGGER.debug("Response headers:") - for res_header, value in response.http_response.headers.items(): - _LOGGER.debug(" %r: %r", res_header, value) - - # We don't want to log binary data if the response is a file. - _LOGGER.debug("Response content:") - pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) - header = response.http_response.headers.get('content-disposition') - resp_content_type = response.http_response.headers.get("content-type", "") - - if header and pattern.match(header): - filename = header.partition('=')[2] - _LOGGER.debug("File attachments: %s", filename) - elif resp_content_type.endswith("octet-stream"): - _LOGGER.debug("Body contains binary data.") - elif resp_content_type.startswith("image"): - _LOGGER.debug("Body contains image data.") - - if self.logging_body and resp_content_type.startswith("text"): - _LOGGER.debug(response.http_response.text()) - elif self.logging_body: - try: - _LOGGER.debug(response.http_response.body()) - except ValueError: - _LOGGER.debug("Body is streamable") - - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log response: %s", repr(err)) - - -class StorageRequestHook(SansIOHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._request_callback = kwargs.get('raw_request_hook') - super(StorageRequestHook, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, **Any) -> PipelineResponse - request_callback = request.context.options.pop('raw_request_hook', self._request_callback) - if request_callback: - request_callback(request) - - -class StorageResponseHook(HTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(StorageResponseHook, self).__init__() - - def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = self.next.send(request) - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - response_callback(response) - request.context['response_callback'] = response_callback - return response - - -class StorageContentValidation(SansIOHTTPPolicy): - """A simple policy that sends the given headers - with the request. - - This will overwrite any headers already defined in the request. - """ - header_name = 'Content-MD5' - - def __init__(self, **kwargs): # pylint: disable=unused-argument - super(StorageContentValidation, self).__init__() - - @staticmethod - def get_content_md5(data): - md5 = hashlib.md5() # nosec - if isinstance(data, bytes): - md5.update(data) - elif hasattr(data, 'read'): - pos = 0 - try: - pos = data.tell() - except: # pylint: disable=bare-except - pass - for chunk in iter(lambda: data.read(4096), b""): - md5.update(chunk) - try: - data.seek(pos, SEEK_SET) - except (AttributeError, IOError): - raise ValueError("Data should be bytes or a seekable file-like object.") - else: - raise ValueError("Data should be bytes or a seekable file-like object.") - - return md5.digest() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - validate_content = request.context.options.pop('validate_content', False) - if validate_content and request.http_request.method != 'GET': - computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) - request.http_request.headers[self.header_name] = computed_md5 - request.context['validate_content_md5'] = computed_md5 - request.context['validate_content'] = validate_content - - def on_response(self, request, response): - if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): - computed_md5 = request.context.get('validate_content_md5') or \ - encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) - if response.http_response.headers['content-md5'] != computed_md5: - raise AzureError( - 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format( - response.http_response.headers['content-md5'], computed_md5), - response=response.http_response - ) - - -class StorageRetryPolicy(HTTPPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - def __init__(self, **kwargs): - self.total_retries = kwargs.pop('retry_total', 10) - self.connect_retries = kwargs.pop('retry_connect', 3) - self.read_retries = kwargs.pop('retry_read', 3) - self.status_retries = kwargs.pop('retry_status', 3) - self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) - super(StorageRetryPolicy, self).__init__() - - def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use - """ - A function which sets the next host location on the request, if applicable. - - :param ~azure.storage.models.RetryContext context: - The retry context containing the previous host location and the request - to evaluate and possibly modify. - """ - if settings['hosts'] and all(settings['hosts'].values()): - url = urlparse(request.url) - # If there's more than one possible location, retry to the alternative - if settings['mode'] == LocationMode.PRIMARY: - settings['mode'] = LocationMode.SECONDARY - else: - settings['mode'] = LocationMode.PRIMARY - updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) - request.url = updated.geturl() - - def configure_retries(self, request): # pylint: disable=no-self-use - body_position = None - if hasattr(request.http_request.body, 'read'): - try: - body_position = request.http_request.body.tell() - except (AttributeError, UnsupportedOperation): - # if body position cannot be obtained, then retries will not work - pass - options = request.context.options - return { - 'total': options.pop("retry_total", self.total_retries), - 'connect': options.pop("retry_connect", self.connect_retries), - 'read': options.pop("retry_read", self.read_retries), - 'status': options.pop("retry_status", self.status_retries), - 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), - 'mode': options.pop("location_mode", LocationMode.PRIMARY), - 'hosts': options.pop("hosts", None), - 'hook': options.pop("retry_hook", None), - 'body_position': body_position, - 'count': 0, - 'history': [] - } - - def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use - """ Formula for computing the current backoff. - Should be calculated by child class. - - :rtype: float - """ - return 0 - - def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - transport.sleep(backoff) - - def increment(self, settings, request, response=None, error=None): - """Increment the retry counters. - - :param response: A pipeline response object. - :param error: An error encountered during the request, or - None if the response was received successfully. - - :return: Whether the retry attempts are exhausted. - """ - settings['total'] -= 1 - - if error and isinstance(error, ServiceRequestError): - # Errors when we're fairly sure that the server did not receive the - # request, so it should be safe to retry. - settings['connect'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - elif error and isinstance(error, ServiceResponseError): - # Errors that occur after the request has been started, so we should - # assume that the server began processing it. - settings['read'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - else: - # Incrementing because of a server error like a 500 in - # status_forcelist and a the given method is in the allowlist - if response: - settings['status'] -= 1 - settings['history'].append(RequestHistory(request, http_response=response)) - - if not is_exhausted(settings): - if request.method not in ['PUT'] and settings['retry_secondary']: - self._set_next_host_location(settings, request) - - # rewind the request body if it is a stream - if request.body and hasattr(request.body, 'read'): - # no position was saved, then retry would not work - if settings['body_position'] is None: - return False - try: - # attempt to rewind the body to the initial position - request.body.seek(settings['body_position'], SEEK_SET) - except (UnsupportedOperation, ValueError): - # if body is not seekable, then retry would not work - return False - settings['count'] += 1 - return True - return False - - def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(StorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(StorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/policies_async.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/policies_async.py deleted file mode 100644 index e0926b8..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/policies_async.py +++ /dev/null @@ -1,220 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -import asyncio -import random -import logging -from typing import Any, TYPE_CHECKING - -from azure.core.pipeline.policies import AsyncHTTPPolicy -from azure.core.exceptions import AzureError - -from .policies import is_retry, StorageRetryPolicy - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -async def retry_hook(settings, **kwargs): - if settings['hook']: - if asyncio.iscoroutine(settings['hook']): - await settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - else: - settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - - -class AsyncStorageResponseHook(AsyncHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(AsyncStorageResponseHook, self).__init__() - - async def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = await self.next.send(request) - await response.http_response.load_body() - - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - if asyncio.iscoroutine(response_callback): - await response_callback(response) - else: - response_callback(response) - request.context['response_callback'] = response_callback - return response - -class AsyncStorageRetryPolicy(StorageRetryPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - async def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - await transport.sleep(backoff) - - async def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = await self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - await self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - await self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(AsyncStorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(AsyncStorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/request_handlers.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/request_handlers.py deleted file mode 100644 index 325825c..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/request_handlers.py +++ /dev/null @@ -1,273 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) - -import logging -from os import fstat -from io import (SEEK_END, SEEK_SET, UnsupportedOperation) - -import isodate - -from azure.core.exceptions import raise_with_traceback - - -_LOGGER = logging.getLogger(__name__) - -_REQUEST_DELIMITER_PREFIX = "batch_" -_HTTP1_1_IDENTIFIER = "HTTP/1.1" -_HTTP_LINE_ENDING = "\r\n" - - -def serialize_iso(attr): - """Serialize Datetime object into ISO-8601 formatted string. - - :param Datetime attr: Object to be serialized. - :rtype: str - :raises: ValueError if format invalid. - """ - if not attr: - return None - if isinstance(attr, str): - attr = isodate.parse_datetime(attr) - try: - utc = attr.utctimetuple() - if utc.tm_year > 9999 or utc.tm_year < 1: - raise OverflowError("Hit max or min date") - - date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( - utc.tm_year, utc.tm_mon, utc.tm_mday, - utc.tm_hour, utc.tm_min, utc.tm_sec) - return date + 'Z' - except (ValueError, OverflowError) as err: - msg = "Unable to serialize datetime object." - raise_with_traceback(ValueError, msg, err) - except AttributeError as err: - msg = "ISO-8601 object must be valid Datetime object." - raise_with_traceback(TypeError, msg, err) - - -def get_length(data): - length = None - # Check if object implements the __len__ method, covers most input cases such as bytearray. - try: - length = len(data) - except: # pylint: disable=bare-except - pass - - if not length: - # Check if the stream is a file-like stream object. - # If so, calculate the size using the file descriptor. - try: - fileno = data.fileno() - except (AttributeError, UnsupportedOperation): - pass - else: - try: - return fstat(fileno).st_size - except OSError: - # Not a valid fileno, may be possible requests returned - # a socket number? - pass - - # If the stream is seekable and tell() is implemented, calculate the stream size. - try: - current_position = data.tell() - data.seek(0, SEEK_END) - length = data.tell() - current_position - data.seek(current_position, SEEK_SET) - except (AttributeError, OSError, UnsupportedOperation): - pass - - return length - - -def read_length(data): - try: - if hasattr(data, 'read'): - read_data = b'' - for chunk in iter(lambda: data.read(4096), b""): - read_data += chunk - return len(read_data), read_data - if hasattr(data, '__iter__'): - read_data = b'' - for chunk in data: - read_data += chunk - return len(read_data), read_data - except: # pylint: disable=bare-except - pass - raise ValueError("Unable to calculate content length, please specify.") - - -def validate_and_format_range_headers( - start_range, end_range, start_range_required=True, - end_range_required=True, check_content_md5=False, align_to_page=False): - # If end range is provided, start range must be provided - if (start_range_required or end_range is not None) and start_range is None: - raise ValueError("start_range value cannot be None.") - if end_range_required and end_range is None: - raise ValueError("end_range value cannot be None.") - - # Page ranges must be 512 aligned - if align_to_page: - if start_range is not None and start_range % 512 != 0: - raise ValueError("Invalid page blob start_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(start_range)) - if end_range is not None and end_range % 512 != 511: - raise ValueError("Invalid page blob end_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(end_range)) - - # Format based on whether end_range is present - range_header = None - if end_range is not None: - range_header = 'bytes={0}-{1}'.format(start_range, end_range) - elif start_range is not None: - range_header = "bytes={0}-".format(start_range) - - # Content MD5 can only be provided for a complete range less than 4MB in size - range_validation = None - if check_content_md5: - if start_range is None or end_range is None: - raise ValueError("Both start and end range requied for MD5 content validation.") - if end_range - start_range > 4 * 1024 * 1024: - raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") - range_validation = 'true' - - return range_header, range_validation - - -def add_metadata_headers(metadata=None): - # type: (Optional[Dict[str, str]]) -> Dict[str, str] - headers = {} - if metadata: - for key, value in metadata.items(): - headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value - return headers - - -def serialize_batch_body(requests, batch_id): - """ - -- - - -- - (repeated as needed) - ---- - - Serializes the requests in this batch to a single HTTP mixed/multipart body. - - :param list[~azure.core.pipeline.transport.HttpRequest] requests: - a list of sub-request for the batch request - :param str batch_id: - to be embedded in batch sub-request delimiter - :return: The body bytes for this batch. - """ - - if requests is None or len(requests) == 0: - raise ValueError('Please provide sub-request(s) for this batch request') - - delimiter_bytes = (_get_batch_request_delimiter(batch_id, True, False) + _HTTP_LINE_ENDING).encode('utf-8') - newline_bytes = _HTTP_LINE_ENDING.encode('utf-8') - batch_body = list() - - content_index = 0 - for request in requests: - request.headers.update({ - "Content-ID": str(content_index), - "Content-Length": str(0) - }) - batch_body.append(delimiter_bytes) - batch_body.append(_make_body_from_sub_request(request)) - batch_body.append(newline_bytes) - content_index += 1 - - batch_body.append(_get_batch_request_delimiter(batch_id, True, True).encode('utf-8')) - # final line of body MUST have \r\n at the end, or it will not be properly read by the service - batch_body.append(newline_bytes) - - return bytes().join(batch_body) - - -def _get_batch_request_delimiter(batch_id, is_prepend_dashes=False, is_append_dashes=False): - """ - Gets the delimiter used for this batch request's mixed/multipart HTTP format. - - :param str batch_id: - Randomly generated id - :param bool is_prepend_dashes: - Whether to include the starting dashes. Used in the body, but non on defining the delimiter. - :param bool is_append_dashes: - Whether to include the ending dashes. Used in the body on the closing delimiter only. - :return: The delimiter, WITHOUT a trailing newline. - """ - - prepend_dashes = '--' if is_prepend_dashes else '' - append_dashes = '--' if is_append_dashes else '' - - return prepend_dashes + _REQUEST_DELIMITER_PREFIX + batch_id + append_dashes - - -def _make_body_from_sub_request(sub_request): - """ - Content-Type: application/http - Content-ID: - Content-Transfer-Encoding: (if present) - - HTTP/ -
:
(repeated as necessary) - Content-Length: - (newline if content length > 0) - (if content length > 0) - - Serializes an http request. - - :param ~azure.core.pipeline.transport.HttpRequest sub_request: - Request to serialize. - :return: The serialized sub-request in bytes - """ - - # put the sub-request's headers into a list for efficient str concatenation - sub_request_body = list() - - # get headers for ease of manipulation; remove headers as they are used - headers = sub_request.headers - - # append opening headers - sub_request_body.append("Content-Type: application/http") - sub_request_body.append(_HTTP_LINE_ENDING) - - sub_request_body.append("Content-ID: ") - sub_request_body.append(headers.pop("Content-ID", "")) - sub_request_body.append(_HTTP_LINE_ENDING) - - sub_request_body.append("Content-Transfer-Encoding: binary") - sub_request_body.append(_HTTP_LINE_ENDING) - - # append blank line - sub_request_body.append(_HTTP_LINE_ENDING) - - # append HTTP verb and path and query and HTTP version - sub_request_body.append(sub_request.method) - sub_request_body.append(' ') - sub_request_body.append(sub_request.url) - sub_request_body.append(' ') - sub_request_body.append(_HTTP1_1_IDENTIFIER) - sub_request_body.append(_HTTP_LINE_ENDING) - - # append remaining headers (this will set the Content-Length, as it was set on `sub-request`) - for header_name, header_value in headers.items(): - if header_value is not None: - sub_request_body.append(header_name) - sub_request_body.append(": ") - sub_request_body.append(header_value) - sub_request_body.append(_HTTP_LINE_ENDING) - - # append blank line - sub_request_body.append(_HTTP_LINE_ENDING) - - return ''.join(sub_request_body).encode() diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/response_handlers.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/response_handlers.py deleted file mode 100644 index 0c375f8..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/response_handlers.py +++ /dev/null @@ -1,196 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging -from xml.etree.ElementTree import Element - -from azure.core.pipeline.policies import ContentDecodePolicy -from azure.core.exceptions import ( - HttpResponseError, - ResourceNotFoundError, - ResourceModifiedError, - ResourceExistsError, - ClientAuthenticationError, - DecodeError) - -from .parser import _to_utc_datetime -from .models import StorageErrorCode, UserDelegationKey, get_enum_value - - -if TYPE_CHECKING: - from datetime import datetime - from azure.core.exceptions import AzureError - - -_LOGGER = logging.getLogger(__name__) - - -class PartialBatchErrorException(HttpResponseError): - """There is a partial failure in batch operations. - - :param str message: The message of the exception. - :param response: Server response to be deserialized. - :param list parts: A list of the parts in multipart response. - """ - - def __init__(self, message, response, parts): - self.parts = parts - super(PartialBatchErrorException, self).__init__(message=message, response=response) - - -def parse_length_from_content_range(content_range): - ''' - Parses the blob length from the content range header: bytes 1-3/65537 - ''' - if content_range is None: - return None - - # First, split in space and take the second half: '1-3/65537' - # Next, split on slash and take the second half: '65537' - # Finally, convert to an int: 65537 - return int(content_range.split(' ', 1)[1].split('/', 1)[1]) - - -def normalize_headers(headers): - normalized = {} - for key, value in headers.items(): - if key.startswith('x-ms-'): - key = key[5:] - normalized[key.lower().replace('-', '_')] = get_enum_value(value) - return normalized - - -def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument - raw_metadata = {k: v for k, v in response.http_response.headers.items() if k.startswith("x-ms-meta-")} - return {k[10:]: v for k, v in raw_metadata.items()} - - -def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers) - - -def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers), deserialized - - -def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return response.http_response.location_mode, deserialized - - -def process_storage_error(storage_error): # pylint:disable=too-many-statements - raise_error = HttpResponseError - serialized = False - if not storage_error.response: - raise storage_error - # If it is one of those three then it has been serialized prior by the generated layer. - if isinstance(storage_error, (PartialBatchErrorException, - ClientAuthenticationError, ResourceNotFoundError, ResourceExistsError)): - serialized = True - error_code = storage_error.response.headers.get('x-ms-error-code') - error_message = storage_error.message - additional_data = {} - error_dict = {} - try: - error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) - try: - error_body = error_body or storage_error.response.reason - except AttributeError: - error_body = '' - # If it is an XML response - if isinstance(error_body, Element): - error_dict = { - child.tag.lower(): child.text - for child in error_body - } - # If it is a JSON response - elif isinstance(error_body, dict): - error_dict = error_body.get('error', {}) - elif not error_code: - _LOGGER.warning( - 'Unexpected return type %s from ContentDecodePolicy.deserialize_from_http_generics.', type(error_body)) - error_dict = {'message': str(error_body)} - - # If we extracted from a Json or XML response - if error_dict: - error_code = error_dict.get('code') - error_message = error_dict.get('message') - additional_data = {k: v for k, v in error_dict.items() if k not in {'code', 'message'}} - except DecodeError: - pass - - try: - # This check would be unnecessary if we have already serialized the error - if error_code and not serialized: - error_code = StorageErrorCode(error_code) - if error_code in [StorageErrorCode.condition_not_met, - StorageErrorCode.blob_overwritten]: - raise_error = ResourceModifiedError - if error_code in [StorageErrorCode.invalid_authentication_info, - StorageErrorCode.authentication_failed]: - raise_error = ClientAuthenticationError - if error_code in [StorageErrorCode.resource_not_found, - StorageErrorCode.cannot_verify_copy_source, - StorageErrorCode.blob_not_found, - StorageErrorCode.queue_not_found, - StorageErrorCode.container_not_found, - StorageErrorCode.parent_not_found, - StorageErrorCode.share_not_found]: - raise_error = ResourceNotFoundError - if error_code in [StorageErrorCode.account_already_exists, - StorageErrorCode.account_being_created, - StorageErrorCode.resource_already_exists, - StorageErrorCode.resource_type_mismatch, - StorageErrorCode.blob_already_exists, - StorageErrorCode.queue_already_exists, - StorageErrorCode.container_already_exists, - StorageErrorCode.container_being_deleted, - StorageErrorCode.queue_being_deleted, - StorageErrorCode.share_already_exists, - StorageErrorCode.share_being_deleted]: - raise_error = ResourceExistsError - except ValueError: - # Got an unknown error code - pass - - # Error message should include all the error properties - try: - error_message += "\nErrorCode:{}".format(error_code.value) - except AttributeError: - error_message += "\nErrorCode:{}".format(error_code) - for name, info in additional_data.items(): - error_message += "\n{}:{}".format(name, info) - - # No need to create an instance if it has already been serialized by the generated layer - if serialized: - storage_error.message = error_message - error = storage_error - else: - error = raise_error(message=error_message, response=storage_error.response) - # Ensure these properties are stored in the error instance as well (not just the error message) - error.error_code = error_code - error.additional_info = additional_data - # error.args is what's surfaced on the traceback - show error message in all cases - error.args = (error.message,) - try: - # `from None` prevents us from double printing the exception (suppresses generated layer error context) - exec("raise error from None") # pylint: disable=exec-used # nosec - except SyntaxError: - raise error - - -def parse_to_internal_user_delegation_key(service_user_delegation_key): - internal_user_delegation_key = UserDelegationKey() - internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid - internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid - internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) - internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) - internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service - internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version - internal_user_delegation_key.value = service_user_delegation_key.value - return internal_user_delegation_key diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/shared_access_signature.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/shared_access_signature.py deleted file mode 100644 index bfab717..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/shared_access_signature.py +++ /dev/null @@ -1,222 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from datetime import date - -from .parser import _str, _to_utc_datetime -from .constants import X_MS_VERSION -from . import sign_string, url_quote - - -class QueryStringConstants(object): - SIGNED_SIGNATURE = 'sig' - SIGNED_PERMISSION = 'sp' - SIGNED_START = 'st' - SIGNED_EXPIRY = 'se' - SIGNED_RESOURCE = 'sr' - SIGNED_IDENTIFIER = 'si' - SIGNED_IP = 'sip' - SIGNED_PROTOCOL = 'spr' - SIGNED_VERSION = 'sv' - SIGNED_CACHE_CONTROL = 'rscc' - SIGNED_CONTENT_DISPOSITION = 'rscd' - SIGNED_CONTENT_ENCODING = 'rsce' - SIGNED_CONTENT_LANGUAGE = 'rscl' - SIGNED_CONTENT_TYPE = 'rsct' - START_PK = 'spk' - START_RK = 'srk' - END_PK = 'epk' - END_RK = 'erk' - SIGNED_RESOURCE_TYPES = 'srt' - SIGNED_SERVICES = 'ss' - SIGNED_OID = 'skoid' - SIGNED_TID = 'sktid' - SIGNED_KEY_START = 'skt' - SIGNED_KEY_EXPIRY = 'ske' - SIGNED_KEY_SERVICE = 'sks' - SIGNED_KEY_VERSION = 'skv' - - # for ADLS - SIGNED_AUTHORIZED_OID = 'saoid' - SIGNED_UNAUTHORIZED_OID = 'suoid' - SIGNED_CORRELATION_ID = 'scid' - SIGNED_DIRECTORY_DEPTH = 'sdd' - - @staticmethod - def to_list(): - return [ - QueryStringConstants.SIGNED_SIGNATURE, - QueryStringConstants.SIGNED_PERMISSION, - QueryStringConstants.SIGNED_START, - QueryStringConstants.SIGNED_EXPIRY, - QueryStringConstants.SIGNED_RESOURCE, - QueryStringConstants.SIGNED_IDENTIFIER, - QueryStringConstants.SIGNED_IP, - QueryStringConstants.SIGNED_PROTOCOL, - QueryStringConstants.SIGNED_VERSION, - QueryStringConstants.SIGNED_CACHE_CONTROL, - QueryStringConstants.SIGNED_CONTENT_DISPOSITION, - QueryStringConstants.SIGNED_CONTENT_ENCODING, - QueryStringConstants.SIGNED_CONTENT_LANGUAGE, - QueryStringConstants.SIGNED_CONTENT_TYPE, - QueryStringConstants.START_PK, - QueryStringConstants.START_RK, - QueryStringConstants.END_PK, - QueryStringConstants.END_RK, - QueryStringConstants.SIGNED_RESOURCE_TYPES, - QueryStringConstants.SIGNED_SERVICES, - QueryStringConstants.SIGNED_OID, - QueryStringConstants.SIGNED_TID, - QueryStringConstants.SIGNED_KEY_START, - QueryStringConstants.SIGNED_KEY_EXPIRY, - QueryStringConstants.SIGNED_KEY_SERVICE, - QueryStringConstants.SIGNED_KEY_VERSION, - # for ADLS - QueryStringConstants.SIGNED_AUTHORIZED_OID, - QueryStringConstants.SIGNED_UNAUTHORIZED_OID, - QueryStringConstants.SIGNED_CORRELATION_ID, - QueryStringConstants.SIGNED_DIRECTORY_DEPTH, - ] - - -class SharedAccessSignature(object): - ''' - Provides a factory for creating account access - signature tokens with an account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - :param str x_ms_version: - The service version used to generate the shared access signatures. - ''' - self.account_name = account_name - self.account_key = account_key - self.x_ms_version = x_ms_version - - def generate_account(self, services, resource_types, permission, expiry, start=None, - ip=None, protocol=None): - ''' - Generates a shared access signature for the account. - Use the returned signature with the sas_token parameter of the service - or to create a new account object. - - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account - SAS. You can combine values to provide access to more than one - resource type. - :param AccountSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. You can combine - values to provide more than one permission. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_account(services, resource_types) - sas.add_account_signature(self.account_name, self.account_key) - - return sas.get_token() - - -class _SharedAccessHelper(object): - def __init__(self): - self.query_dict = {} - - def _add_query(self, name, val): - if val: - self.query_dict[name] = _str(val) if val is not None else None - - def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): - if isinstance(start, date): - start = _to_utc_datetime(start) - - if isinstance(expiry, date): - expiry = _to_utc_datetime(expiry) - - self._add_query(QueryStringConstants.SIGNED_START, start) - self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) - self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) - self._add_query(QueryStringConstants.SIGNED_IP, ip) - self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) - self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) - - def add_resource(self, resource): - self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) - - def add_id(self, policy_id): - self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) - - def add_account(self, services, resource_types): - self._add_query(QueryStringConstants.SIGNED_SERVICES, services) - self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) - - def add_override_response_headers(self, cache_control, - content_disposition, - content_encoding, - content_language, - content_type): - self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) - self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) - self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) - self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) - self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) - - def add_account_signature(self, account_name, account_key): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - string_to_sign = \ - (account_name + '\n' + - get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + - get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + - get_value_to_append(QueryStringConstants.SIGNED_START) + - get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - get_value_to_append(QueryStringConstants.SIGNED_IP) + - get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(QueryStringConstants.SIGNED_VERSION) + - '\n' # Signed Encryption Scope - always empty for fileshare - ) - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key, string_to_sign)) - - def get_token(self): - return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/uploads.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/uploads.py deleted file mode 100644 index 1b619df..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/uploads.py +++ /dev/null @@ -1,602 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from concurrent import futures -from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) -from threading import Lock -from itertools import islice -from math import ceil - -import six - -from azure.core.tracing.common import with_current_context - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." - - -def _parallel_uploads(executor, uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(executor.submit(with_current_context(uploader), next_chunk)) - except StopIteration: - break - - # Wait for the remaining uploads to finish - done, _running = futures.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - validate_content=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - validate_content=validate_content, - **kwargs) - if parallel: - with futures.ThreadPoolExecutor(max_concurrency) as executor: - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - executor.submit(with_current_context(uploader.process_chunk), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - with futures.ThreadPoolExecutor(max_concurrency) as executor: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - executor.submit(with_current_context(uploader.process_substream_block), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] - if any(range_ids): - return sorted(range_ids) - return [] - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b"" - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError("Blob data should be of type bytes.") - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b"" or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - def _update_progress(self, length): - if self.progress_lock is not None: - with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = self._upload_chunk(chunk_offset, chunk_data) - self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield index, SubStream(self.stream, index, length, lock) - - def process_substream_block(self, block_data): - return self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - def _upload_substream_block(self, index, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_substream_block_with_progress(self, index, block_stream): - range_id = self._upload_substream_block(index, block_stream) - self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop("modified_access_conditions", None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - self.service.stage_block( - block_id, - len(chunk_data), - chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return index, block_id - - def _upload_substream_block(self, index, block_stream): - try: - block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) - self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - return not any(bytearray(chunk_data)) - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = self.service.upload_pages( - body=chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - def _upload_substream_block(self, index, block_stream): - pass - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - self.current_length = int(self.response_headers["blob_append_offset"]) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - def _upload_substream_block(self, index, block_stream): - pass - - -class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - self.response_headers = self.service.append_data( - body=chunk_data, - position=chunk_offset, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - def _upload_substream_block(self, index, block_stream): - try: - self.service.append_data( - body=block_stream, - position=index, - content_length=len(block_stream), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response - - # TODO: Implement this method. - def _upload_substream_block(self, index, block_stream): - pass - - -class SubStream(IOBase): - - def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): - # Python 2.7: file-like objects created with open() typically support seek(), but are not - # derivations of io.IOBase and thus do not implement seekable(). - # Python > 3.0: file-like objects created with open() are derived from io.IOBase. - try: - # only the main thread runs this, so there's no need grabbing the lock - wrapped_stream.seek(0, SEEK_CUR) - except: - raise ValueError("Wrapped stream must support seek().") - - self._lock = lockObj - self._wrapped_stream = wrapped_stream - self._position = 0 - self._stream_begin_index = stream_begin_index - self._length = length - self._buffer = BytesIO() - - # we must avoid buffering more than necessary, and also not use up too much memory - # so the max buffer size is capped at 4MB - self._max_buffer_size = ( - length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE - ) - self._current_buffer_start = 0 - self._current_buffer_size = 0 - super(SubStream, self).__init__() - - def __len__(self): - return self._length - - def close(self): - if self._buffer: - self._buffer.close() - self._wrapped_stream = None - IOBase.close(self) - - def fileno(self): - return self._wrapped_stream.fileno() - - def flush(self): - pass - - def read(self, size=None): - if self.closed: # pylint: disable=using-constant-test - raise ValueError("Stream is closed.") - - if size is None: - size = self._length - self._position - - # adjust if out of bounds - if size + self._position >= self._length: - size = self._length - self._position - - # return fast - if size == 0 or self._buffer.closed: - return b"" - - # attempt first read from the read buffer and update position - read_buffer = self._buffer.read(size) - bytes_read = len(read_buffer) - bytes_remaining = size - bytes_read - self._position += bytes_read - - # repopulate the read buffer from the underlying stream to fulfill the request - # ensure the seek and read operations are done atomically (only if a lock is provided) - if bytes_remaining > 0: - with self._buffer: - # either read in the max buffer size specified on the class - # or read in just enough data for the current block/sub stream - current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) - - # lock is only defined if max_concurrency > 1 (parallel uploads) - if self._lock: - with self._lock: - # reposition the underlying stream to match the start of the data to read - absolute_position = self._stream_begin_index + self._position - self._wrapped_stream.seek(absolute_position, SEEK_SET) - # If we can't seek to the right location, our read will be corrupted so fail fast. - if self._wrapped_stream.tell() != absolute_position: - raise IOError("Stream failed to seek to the desired location.") - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - else: - absolute_position = self._stream_begin_index + self._position - # It's possible that there's connection problem during data transfer, - # so when we retry we don't want to read from current position of wrapped stream, - # instead we should seek to where we want to read from. - if self._wrapped_stream.tell() != absolute_position: - self._wrapped_stream.seek(absolute_position, SEEK_SET) - - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - - if buffer_from_stream: - # update the buffer with new data from the wrapped stream - # we need to note down the start position and size of the buffer, in case seek is performed later - self._buffer = BytesIO(buffer_from_stream) - self._current_buffer_start = self._position - self._current_buffer_size = len(buffer_from_stream) - - # read the remaining bytes from the new buffer and update position - second_read_buffer = self._buffer.read(bytes_remaining) - read_buffer += second_read_buffer - self._position += len(second_read_buffer) - - return read_buffer - - def readable(self): - return True - - def readinto(self, b): - raise UnsupportedOperation - - def seek(self, offset, whence=0): - if whence is SEEK_SET: - start_index = 0 - elif whence is SEEK_CUR: - start_index = self._position - elif whence is SEEK_END: - start_index = self._length - offset = -offset - else: - raise ValueError("Invalid argument for the 'whence' parameter.") - - pos = start_index + offset - - if pos > self._length: - pos = self._length - elif pos < 0: - pos = 0 - - # check if buffer is still valid - # if not, drop buffer - if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: - self._buffer.close() - self._buffer = BytesIO() - else: # if yes seek to correct position - delta = pos - self._current_buffer_start - self._buffer.seek(delta, SEEK_SET) - - self._position = pos - return pos - - def seekable(self): - return True - - def tell(self): - return self._position - - def write(self): - raise UnsupportedOperation - - def writelines(self): - raise UnsupportedOperation - - def writeable(self): - return False - - -class IterStreamer(object): - """ - File-like streaming iterator. - """ - - def __init__(self, generator, encoding="UTF-8"): - self.generator = generator - self.iterator = iter(generator) - self.leftover = b"" - self.encoding = encoding - - def __len__(self): - return self.generator.__len__() - - def __iter__(self): - return self.iterator - - def seekable(self): - return False - - def __next__(self): - return next(self.iterator) - - next = __next__ # Python 2 compatibility. - - def tell(self, *args, **kwargs): - raise UnsupportedOperation("Data generator does not support tell.") - - def seek(self, *args, **kwargs): - raise UnsupportedOperation("Data generator is unseekable.") - - def read(self, size): - data = self.leftover - count = len(self.leftover) - try: - while count < size: - chunk = self.__next__() - if isinstance(chunk, six.text_type): - chunk = chunk.encode(self.encoding) - data += chunk - count += len(chunk) - except StopIteration: - pass - - if count > size: - self.leftover = data[size:] - - return data[:size] diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/uploads_async.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/uploads_async.py deleted file mode 100644 index 5ed192b..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared/uploads_async.py +++ /dev/null @@ -1,395 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -import asyncio -from asyncio import Lock -from itertools import islice -import threading - -from math import ceil - -import six - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .encryption import get_blob_encryptor_and_padder -from .uploads import SubStream, IterStreamer # pylint: disable=unused-import - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' - - -async def _parallel_uploads(uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(asyncio.ensure_future(uploader(next_chunk))) - except StopIteration: - break - - # Wait for the remaining uploads to finish - if running: - done, _running = await asyncio.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -async def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - encryption_options=None, - **kwargs): - - if encryption_options: - encryptor, padder = get_blob_encryptor_and_padder( - encryption_options.get('cek'), - encryption_options.get('vector'), - uploader_class is not PageBlobChunkUploader) - kwargs['encryptor'] = encryptor - kwargs['padder'] = padder - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - asyncio.ensure_future(uploader.process_chunk(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [] - for chunk in uploader.get_chunk_streams(): - range_ids.append(await uploader.process_chunk(chunk)) - - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -async def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - **kwargs) - - if parallel: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - asyncio.ensure_future(uploader.process_substream_block(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [] - for block in uploader.get_substream_blocks(): - range_ids.append(await uploader.process_substream_block(block)) - if any(range_ids): - return sorted(range_ids) - return - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_start = stream.tell() if parallel else None - self.stream_lock = threading.Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b'' - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b'' or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - async def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - async def _update_progress(self, length): - if self.progress_lock is not None: - async with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - async def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = await self._upload_chunk(chunk_offset, chunk_data) - await self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield index, SubStream(self.stream, index, length, lock) - - async def process_substream_block(self, block_data): - return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - async def _upload_substream_block(self, index, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_substream_block_with_progress(self, index, block_stream): - range_id = await self._upload_substream_block(index, block_stream) - await self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop('modified_access_conditions', None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - await self.service.stage_block( - block_id, - len(chunk_data), - body=chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - return index, block_id - - async def _upload_substream_block(self, index, block_stream): - try: - block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) - await self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - for each_byte in chunk_data: - if each_byte not in [0, b'\x00']: - return False - return True - - async def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = await self.service.upload_pages( - body=chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - async def _upload_substream_block(self, index, block_stream): - pass - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = await self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - self.current_length = int(self.response_headers['blob_append_offset']) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = await self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - async def _upload_substream_block(self, index, block_stream): - pass - - -class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - self.response_headers = await self.service.append_data( - body=chunk_data, - position=chunk_offset, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - async def _upload_substream_block(self, index, block_stream): - try: - await self.service.append_data( - body=block_stream, - position=index, - content_length=len(block_stream), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = await self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - return range_id, response - - # TODO: Implement this method. - async def _upload_substream_block(self, index, block_stream): - pass diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared_access_signature.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared_access_signature.py deleted file mode 100644 index 234d1f9..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_shared_access_signature.py +++ /dev/null @@ -1,495 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, List, TYPE_CHECKING -) - -from ._shared import sign_string -from ._shared.constants import X_MS_VERSION -from ._shared.models import Services -from ._shared.shared_access_signature import SharedAccessSignature, _SharedAccessHelper, QueryStringConstants -from ._shared.parser import _str - -if TYPE_CHECKING: - from datetime import datetime - from .import ( - ResourceTypes, - AccountSasPermissions, - ShareSasPermissions, - FileSasPermissions - ) - -class FileSharedAccessSignature(SharedAccessSignature): - ''' - Provides a factory for creating file and share access - signature tokens with a common account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - ''' - super(FileSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) - - def generate_file(self, share_name, directory_name=None, file_name=None, - permission=None, expiry=None, start=None, policy_id=None, - ip=None, protocol=None, cache_control=None, - content_disposition=None, content_encoding=None, - content_language=None, content_type=None): - ''' - Generates a shared access signature for the file. - Use the returned signature with the sas_token parameter of FileService. - - :param str share_name: - Name of share. - :param str directory_name: - Name of directory. SAS tokens cannot be created for directories, so - this parameter should only be present if file_name is provided. - :param str file_name: - Name of file. - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered rcwd. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or FileSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_file_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - resource_path = share_name - if directory_name is not None: - resource_path += '/' + _str(directory_name) if directory_name is not None else None - resource_path += '/' + _str(file_name) if file_name is not None else None - - sas = _FileSharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(policy_id) - sas.add_resource('f') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_resource_signature(self.account_name, self.account_key, resource_path) - - return sas.get_token() - - def generate_share(self, share_name, permission=None, expiry=None, - start=None, policy_id=None, ip=None, protocol=None, - cache_control=None, content_disposition=None, - content_encoding=None, content_language=None, - content_type=None): - ''' - Generates a shared access signature for the share. - Use the returned signature with the sas_token parameter of FileService. - - :param str share_name: - Name of share. - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered rcwdl. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ShareSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - set_file_service_properties. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - :param str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :param str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :param str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :param str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :param str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - ''' - sas = _FileSharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_id(policy_id) - sas.add_resource('s') - sas.add_override_response_headers(cache_control, content_disposition, - content_encoding, content_language, - content_type) - sas.add_resource_signature(self.account_name, self.account_key, share_name) - - return sas.get_token() - - -class _FileSharedAccessHelper(_SharedAccessHelper): - - def add_resource_signature(self, account_name, account_key, path): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - if path[0] != '/': - path = '/' + path - - canonicalized_resource = '/file/' + account_name + path + '\n' - - # Form the string to sign from shared_access_policy and canonicalized - # resource. The order of values is important. - string_to_sign = \ - (get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(QueryStringConstants.SIGNED_START) + - get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - canonicalized_resource + - get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER) + - get_value_to_append(QueryStringConstants.SIGNED_IP) + - get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(QueryStringConstants.SIGNED_VERSION) + - get_value_to_append(QueryStringConstants.SIGNED_CACHE_CONTROL) + - get_value_to_append(QueryStringConstants.SIGNED_CONTENT_DISPOSITION) + - get_value_to_append(QueryStringConstants.SIGNED_CONTENT_ENCODING) + - get_value_to_append(QueryStringConstants.SIGNED_CONTENT_LANGUAGE) + - get_value_to_append(QueryStringConstants.SIGNED_CONTENT_TYPE)) - - # remove the trailing newline - if string_to_sign[-1] == '\n': - string_to_sign = string_to_sign[:-1] - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key, string_to_sign)) - - -def generate_account_sas( - account_name, # type: str - account_key, # type: str - resource_types, # type: Union[ResourceTypes, str] - permission, # type: Union[AccountSasPermissions, str] - expiry, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> str - """Generates a shared access signature for the file service. - - Use the returned signature with the credential parameter of any ShareServiceClient, - ShareClient, ShareDirectoryClient, or ShareFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - :param ~azure.storage.fileshare.ResourceTypes resource_types: - Specifies the resource types that are accessible with the account SAS. - :param ~azure.storage.fileshare.AccountSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_authentication.py - :start-after: [START generate_sas_token] - :end-before: [END generate_sas_token] - :language: python - :dedent: 8 - :caption: Generate a sas token. - """ - sas = SharedAccessSignature(account_name, account_key) - return sas.generate_account( - services=Services(fileshare=True), - resource_types=resource_types, - permission=permission, - expiry=expiry, - start=start, - ip=ip, - **kwargs - ) # type: ignore - - -def generate_share_sas( - account_name, # type: str - share_name, # type: str - account_key, # type: str - permission=None, # type: Optional[Union[ShareSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - policy_id=None, # type: Optional[str] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): # type: (...) -> str - """Generates a shared access signature for a share. - - Use the returned signature with the credential parameter of any ShareServiceClient, - ShareClient, ShareDirectoryClient, or ShareFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str share_name: - The name of the share. - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered rcwdl. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or ShareSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. To create a stored access policy, use - :func:`~azure.storage.fileshare.ShareClient.set_share_access_policy`. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - sas = FileSharedAccessSignature(account_name, account_key) - return sas.generate_share( - share_name=share_name, - permission=permission, - expiry=expiry, - start=start, - policy_id=policy_id, - ip=ip, - **kwargs - ) - - -def generate_file_sas( - account_name, # type: str - share_name, # type: str - file_path, # type: List[str] - account_key, # type: str - permission=None, # type: Optional[Union[FileSasPermissions, str]] - expiry=None, # type: Optional[Union[datetime, str]] - start=None, # type: Optional[Union[datetime, str]] - policy_id=None, # type: Optional[str] - ip=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> str - """Generates a shared access signature for a file. - - Use the returned signature with the credential parameter of any ShareServiceClient, - ShareClient, ShareDirectoryClient, or ShareFileClient. - - :param str account_name: - The storage account name used to generate the shared access signature. - :param str share_name: - The name of the share. - :param file_path: - The file path represented as a list of path segments, including the file name. - :type file_path: List[str] - :param str account_key: - The account key, also called shared key or access key, to generate the shared access signature. - :param permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Permissions must be ordered rcwd. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. - :type permission: str or FileSasPermissions - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: ~datetime.datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: ~datetime.datetime or str - :param str policy_id: - A unique value up to 64 characters in length that correlates to a - stored access policy. - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :keyword str cache_control: - Response header value for Cache-Control when resource is accessed - using this shared access signature. - :keyword str content_disposition: - Response header value for Content-Disposition when resource is accessed - using this shared access signature. - :keyword str content_encoding: - Response header value for Content-Encoding when resource is accessed - using this shared access signature. - :keyword str content_language: - Response header value for Content-Language when resource is accessed - using this shared access signature. - :keyword str content_type: - Response header value for Content-Type when resource is accessed - using this shared access signature. - :keyword str protocol: - Specifies the protocol permitted for a request made. The default value is https. - :return: A Shared Access Signature (sas) token. - :rtype: str - """ - sas = FileSharedAccessSignature(account_name, account_key) - if len(file_path) > 1: - dir_path = '/'.join(file_path[:-1]) - else: - dir_path = None # type: ignore - return sas.generate_file( # type: ignore - share_name=share_name, - directory_name=dir_path, - file_name=file_path[-1], - permission=permission, - expiry=expiry, - start=start, - policy_id=policy_id, - ip=ip, - **kwargs - ) diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/_version.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/_version.py deleted file mode 100644 index 23e2a0e..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/_version.py +++ /dev/null @@ -1,7 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -VERSION = "12.7.0" diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/aio/__init__.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/aio/__init__.py deleted file mode 100644 index 73393b8..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/aio/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from ._file_client_async import ShareFileClient -from ._directory_client_async import ShareDirectoryClient -from ._share_client_async import ShareClient -from ._share_service_client_async import ShareServiceClient -from ._lease_async import ShareLeaseClient - - -__all__ = [ - 'ShareFileClient', - 'ShareDirectoryClient', - 'ShareClient', - 'ShareServiceClient', - 'ShareLeaseClient', -] diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/aio/_directory_client_async.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/aio/_directory_client_async.py deleted file mode 100644 index 3a24224..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/aio/_directory_client_async.py +++ /dev/null @@ -1,718 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import functools -import time -from typing import ( # pylint: disable=unused-import - Optional, Union, Any, Dict, TYPE_CHECKING -) - -from azure.core.async_paging import AsyncItemPaged -from azure.core.exceptions import HttpResponseError, ResourceNotFoundError -from azure.core.pipeline import AsyncPipeline -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from .._parser import _get_file_permission, _datetime_to_str -from .._shared.parser import _str - -from .._generated.aio import AzureFileStorage -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.policies_async import ExponentialRetry -from .._shared.request_handlers import add_metadata_headers -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._deserialize import deserialize_directory_properties -from .._serialize import get_api_version, get_dest_access_conditions, get_rename_smb_properties -from .._directory_client import ShareDirectoryClient as ShareDirectoryClientBase -from ._file_client_async import ShareFileClient -from ._models import DirectoryPropertiesPaged, HandlesPaged - -if TYPE_CHECKING: - from datetime import datetime - from .._models import ShareProperties, DirectoryProperties, ContentSettings, NTFSAttributes - from .._generated.models import HandleItem - - -class ShareDirectoryClient(AsyncStorageAccountHostsMixin, ShareDirectoryClientBase): - """A client to interact with a specific directory, although it may not yet exist. - - For operations relating to a specific subdirectory or file in this share, the clients for those - entities can also be retrieved using the :func:`get_subdirectory_client` and :func:`get_file_client` functions. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the directory, - use the :func:`from_directory_url` classmethod. - :param share_name: - The name of the share for the directory. - :type share_name: str - :param str directory_path: - The directory path for the directory with which to interact. - If specified, this value will override a directory value specified in the directory URL. - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is the most recent service version that is - compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword loop: - The event loop to run the asynchronous tasks. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - directory_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Optional[Any] - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - loop = kwargs.pop('loop', None) - super(ShareDirectoryClient, self).__init__( - account_url, - share_name=share_name, - directory_path=directory_path, - snapshot=snapshot, - credential=credential, - loop=loop, - **kwargs) - self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline, loop=loop) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - self._loop = loop - - def get_file_client(self, file_name, **kwargs): - # type: (str, Any) -> ShareFileClient - """Get a client to interact with a specific file. - - The file need not already exist. - - :param str file_name: - The name of the file. - :returns: A File Client. - :rtype: ~azure.storage.fileshare.ShareFileClient - """ - if self.directory_path: - file_name = self.directory_path.rstrip('/') + "/" + file_name - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareFileClient( - self.url, file_path=file_name, share_name=self.share_name, snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop, **kwargs) - - def get_subdirectory_client(self, directory_name, **kwargs): - # type: (str, Any) -> ShareDirectoryClient - """Get a client to interact with a specific subdirectory. - - The subdirectory need not already exist. - - :param str directory_name: - The name of the subdirectory. - :returns: A Directory Client. - :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START get_subdirectory_client] - :end-before: [END get_subdirectory_client] - :language: python - :dedent: 16 - :caption: Gets the subdirectory client. - """ - directory_path = self.directory_path.rstrip('/') + "/" + directory_name - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareDirectoryClient( - self.url, share_name=self.share_name, directory_path=directory_path, snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop, **kwargs) - - @distributed_trace_async - async def create_directory(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Creates a new directory under the directory referenced by the client. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the directory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Directory-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START create_directory] - :end-before: [END create_directory] - :language: python - :dedent: 16 - :caption: Creates a directory. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return await self._client.directory.create( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def delete_directory(self, **kwargs): - # type: (**Any) -> None - """Marks the directory for deletion. The directory is - later deleted during garbage collection. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START delete_directory] - :end-before: [END delete_directory] - :language: python - :dedent: 16 - :caption: Deletes a directory. - """ - timeout = kwargs.pop('timeout', None) - try: - await self._client.directory.delete(timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def rename_directory( - self, new_name, # type: str - **kwargs # type: Any - ): - # type: (...) -> ShareDirectoryClient - """ - Rename the source directory. - - :param str new_name: - The new directory name. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword bool overwrite: - A boolean value for if the destination file already exists, whether this request will - overwrite the file or not. If true, the rename will succeed and will overwrite the - destination file. If not provided or if false and the destination file does exist, the - request will not overwrite the destination file. If provided and the destination file - doesn't exist, the rename will succeed. - :keyword bool ignore_read_only: - A boolean value that specifies whether the ReadOnly attribute on a preexisting destination - file should be respected. If true, the rename will succeed, otherwise, a previous file at the - destination with the ReadOnly attribute set will cause the rename to fail. - :keyword str file_permission: - If specified the permission (security descriptor) shall be set for the directory. This header - can be used if Permission size is <= 8KB, else file_permission_key shall be used. - If SDDL is specified as input, it must have owner, group and dacl. - A value of 'preserve' can be passed to preserve source permissions. - Note: Only one of the file_permission or file_permission_key should be specified. - :keyword str file_permission_key: - Key of the permission to be set for the directory. - Note: Only one of the file-permission or file-permission-key should be specified. - :keyword file_attributes: - The file system attributes for the directory. - :paramtype file_attributes:~azure.storage.fileshare.NTFSAttributes or str - :keyword file_creation_time: - Creation time for the directory. - :paramtype file_creation_time:~datetime.datetime or str - :keyword file_last_write_time: - Last write time for the file. - :paramtype file_last_write_time:~datetime.datetime or str - :keyword Dict[str,str] metadata: - A name-value pair to associate with a file storage object. - :keyword destination_lease: - Required if the destination file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - :paramtype destination_lease: ~azure.storage.fileshare.ShareLeaseClient or str - :returns: The new Directory Client. - :rtype: ~azure.storage.fileshare.ShareDirectoryClient - """ - if not new_name: - raise ValueError("Please specify a new directory name.") - - new_name = new_name.strip('/') - new_path_and_query = new_name.split('?') - new_dir_path = new_path_and_query[0] - if len(new_path_and_query) == 2: - new_dir_sas = new_path_and_query[1] or self._query_str.strip('?') - else: - new_dir_sas = self._query_str.strip('?') - - new_directory_client = ShareDirectoryClient( - '{}://{}'.format(self.scheme, self.primary_hostname), self.share_name, new_dir_path, - credential=new_dir_sas or self.credential, api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function - ) - - kwargs.update(get_rename_smb_properties(kwargs)) - - timeout = kwargs.pop('timeout', None) - overwrite = kwargs.pop('overwrite', None) - metadata = kwargs.pop('metadata', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - - destination_access_conditions = get_dest_access_conditions(kwargs.pop('destination_lease', None)) - - try: - await new_directory_client._client.directory.rename( # pylint: disable=protected-access - self.url, - timeout=timeout, - replace_if_exists=overwrite, - destination_lease_access_conditions=destination_access_conditions, - headers=headers, - **kwargs) - - return new_directory_client - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_directories_and_files(self, name_starts_with=None, **kwargs): - # type: (Optional[str], Any) -> AsyncItemPaged - """Lists all the directories and files under the directory. - - :param str name_starts_with: - Filters the results to return only entities whose names - begin with the specified prefix. - :keyword list[str] include: - Include this parameter to specify one or more datasets to include in the response. - Possible str values are "timestamps", "Etag", "Attributes", "PermissionKey". - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-10-02'. - - :keyword bool include_extended_info: - If this is set to true, file id will be returned in listed results. - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-10-02'. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties - :rtype: ~azure.core.async_paging.AsyncItemPaged[DirectoryProperties and FileProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START lists_directory] - :end-before: [END lists_directory] - :language: python - :dedent: 16 - :caption: List directories and files. - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.directory.list_files_and_directories_segment, - sharesnapshot=self.snapshot, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=DirectoryPropertiesPaged) - - @distributed_trace - def list_handles(self, recursive=False, **kwargs): - # type: (bool, Any) -> AsyncItemPaged - """Lists opened handles on a directory or a file under the directory. - - :param bool recursive: - Boolean that specifies if operation should apply to the directory specified by the client, - its files, its subdirectories and their files. Default value is False. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of HandleItem - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.HandleItem] - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.directory.list_handles, - sharesnapshot=self.snapshot, - timeout=timeout, - recursive=recursive, - **kwargs) - return AsyncItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=HandlesPaged) - - @distributed_trace_async - async def exists(self, **kwargs): - # type: (**Any) -> bool - """ - Returns True if a directory exists and returns False otherwise. - - :kwarg int timeout: - The timeout parameter is expressed in seconds. - :returns: True if the directory exists, False otherwise. - :rtype: bool - """ - try: - await self._client.directory.get_properties(**kwargs) - return True - except HttpResponseError as error: - try: - process_storage_error(error) - except ResourceNotFoundError: - return False - - @distributed_trace_async - async def close_handle(self, handle, **kwargs): - # type: (Union[str, HandleItem], Any) -> Dict[str, int] - """Close an open file handle. - - :param handle: - A specific handle to close. - :type handle: str or ~azure.storage.fileshare.Handle - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - try: - handle_id = handle.id # type: ignore - except AttributeError: - handle_id = handle - if handle_id == '*': - raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") - try: - response = await self._client.directory.force_close_handles( - handle_id, - marker=None, - recursive=None, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - return { - 'closed_handles_count': response.get('number_of_handles_closed', 0), - 'failed_handles_count': response.get('number_of_handles_failed', 0) - } - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def close_all_handles(self, recursive=False, **kwargs): - # type: (bool, Any) -> Dict[str, int] - """Close any open file handles. - - This operation will block until the service has closed all open handles. - - :param bool recursive: - Boolean that specifies if operation should apply to the directory specified by the client, - its files, its subdirectories and their files. Default value is False. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - timeout = kwargs.pop('timeout', None) - start_time = time.time() - - try_close = True - continuation_token = None - total_closed = 0 - total_failed = 0 - while try_close: - try: - response = await self._client.directory.force_close_handles( - handle_id='*', - timeout=timeout, - marker=continuation_token, - recursive=recursive, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - continuation_token = response.get('marker') - try_close = bool(continuation_token) - total_closed += response.get('number_of_handles_closed', 0) - total_failed += response.get('number_of_handles_failed', 0) - if timeout: - timeout = max(0, timeout - (time.time() - start_time)) - return { - 'closed_handles_count': total_closed, - 'failed_handles_count': total_failed - } - - @distributed_trace_async - async def get_directory_properties(self, **kwargs): - # type: (Any) -> DirectoryProperties - """Returns all user-defined metadata and system properties for the - specified directory. The data returned does not include the directory's - list of files. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: DirectoryProperties - :rtype: ~azure.storage.fileshare.DirectoryProperties - """ - timeout = kwargs.pop('timeout', None) - try: - response = await self._client.directory.get_properties( - timeout=timeout, - cls=deserialize_directory_properties, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return response # type: ignore - - @distributed_trace_async - async def set_directory_metadata(self, metadata, **kwargs): - # type: (Dict[str, Any], Any) -> Dict[str, Any] - """Sets the metadata for the directory. - - Each call to this operation replaces all existing metadata - attached to the directory. To remove all metadata from the directory, - call this operation with an empty metadata dict. - - :param metadata: - Name-value pairs associated with the directory as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Directory-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - try: - return await self._client.directory.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_http_headers(self, file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="preserve", # type: Union[str, datetime] - file_last_write_time="preserve", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Sets HTTP headers on the directory. - - :param file_attributes: - The file system attributes for files and directories. - If not set, indicates preservation of existing values. - Here is an example for when the var type is str: 'Temporary|Archive' - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Preserve. - :type file_creation_time: str or datetime - :param file_last_write_time: Last write time for the file - Default value: Preserve. - :type file_last_write_time: str or datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - timeout = kwargs.pop('timeout', None) - file_permission = _get_file_permission(file_permission, permission_key, 'preserve') - try: - return await self._client.directory.set_properties( # type: ignore - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - timeout=timeout, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def create_subdirectory( - self, directory_name, # type: str - **kwargs - ): - # type: (...) -> ShareDirectoryClient - """Creates a new subdirectory and returns a client to interact - with the subdirectory. - - :param str directory_name: - The name of the subdirectory. - :keyword dict(str,str) metadata: - Name-value pairs associated with the subdirectory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: ShareDirectoryClient - :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START create_subdirectory] - :end-before: [END create_subdirectory] - :language: python - :dedent: 16 - :caption: Create a subdirectory. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - subdir = self.get_subdirectory_client(directory_name) - await subdir.create_directory(metadata=metadata, timeout=timeout, **kwargs) - return subdir # type: ignore - - @distributed_trace_async - async def delete_subdirectory( - self, directory_name, # type: str - **kwargs - ): - # type: (...) -> None - """Deletes a subdirectory. - - :param str directory_name: - The name of the subdirectory. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START delete_subdirectory] - :end-before: [END delete_subdirectory] - :language: python - :dedent: 16 - :caption: Delete a subdirectory. - """ - timeout = kwargs.pop('timeout', None) - subdir = self.get_subdirectory_client(directory_name) - await subdir.delete_directory(timeout=timeout, **kwargs) - - @distributed_trace_async - async def upload_file( - self, file_name, # type: str - data, # type: Any - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> ShareFileClient - """Creates a new file in the directory and returns a ShareFileClient - to interact with the file. - - :param str file_name: - The name of the file. - :param Any data: - Content of the file. - :param int length: - Length of the file in bytes. Specify its maximum size, up to 1 TiB. - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: ShareFileClient - :rtype: ~azure.storage.fileshare.aio.ShareFileClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START upload_file_to_directory] - :end-before: [END upload_file_to_directory] - :language: python - :dedent: 16 - :caption: Upload a file to a directory. - """ - file_client = self.get_file_client(file_name) - await file_client.upload_file( - data, - length=length, - **kwargs) - return file_client # type: ignore - - @distributed_trace_async - async def delete_file( - self, file_name, # type: str - **kwargs # type: Optional[Any] - ): - # type: (...) -> None - """Marks the specified file for deletion. The file is later - deleted during garbage collection. - - :param str file_name: - The name of the file to delete. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_directory_async.py - :start-after: [START delete_file_in_directory] - :end-before: [END delete_file_in_directory] - :language: python - :dedent: 16 - :caption: Delete a file in a directory. - """ - file_client = self.get_file_client(file_name) - await file_client.delete_file(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/aio/_download_async.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/aio/_download_async.py deleted file mode 100644 index 971f12e..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/aio/_download_async.py +++ /dev/null @@ -1,492 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import asyncio -import sys -from io import BytesIO -from itertools import islice -import warnings - -from typing import AsyncIterator -from azure.core.exceptions import HttpResponseError, ResourceModifiedError -from .._shared.encryption import decrypt_blob -from .._shared.request_handlers import validate_and_format_range_headers -from .._shared.response_handlers import process_storage_error, parse_length_from_content_range -from .._download import process_range_and_offset, _ChunkDownloader - - -async def process_content(data, start_offset, end_offset, encryption): - if data is None: - raise ValueError("Response cannot be None.") - try: - content = data.response.body() - except Exception as error: - raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) - if encryption.get('key') is not None or encryption.get('resolver') is not None: - try: - return decrypt_blob( - encryption.get('required'), - encryption.get('key'), - encryption.get('resolver'), - content, - start_offset, - end_offset, - data.response.headers) - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=data.response, - error=error) - return content - - -class _AsyncChunkDownloader(_ChunkDownloader): - def __init__(self, **kwargs): - super(_AsyncChunkDownloader, self).__init__(**kwargs) - self.stream_lock = asyncio.Lock() if kwargs.get('parallel') else None - self.progress_lock = asyncio.Lock() if kwargs.get('parallel') else None - - async def process_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - chunk_data = await self._download_chunk(chunk_start, chunk_end - 1) - length = chunk_end - chunk_start - if length > 0: - await self._write_to_stream(chunk_data, chunk_start) - await self._update_progress(length) - - async def yield_chunk(self, chunk_start): - chunk_start, chunk_end = self._calculate_range(chunk_start) - return await self._download_chunk(chunk_start, chunk_end - 1) - - async def _update_progress(self, length): - if self.progress_lock: - async with self.progress_lock: # pylint: disable=not-async-context-manager - self.progress_total += length - else: - self.progress_total += length - - async def _write_to_stream(self, chunk_data, chunk_start): - if self.stream_lock: - async with self.stream_lock: # pylint: disable=not-async-context-manager - self.stream.seek(self.stream_start + (chunk_start - self.start_index)) - self.stream.write(chunk_data) - else: - self.stream.write(chunk_data) - - async def _download_chunk(self, chunk_start, chunk_end): - download_range, offset = process_range_and_offset( - chunk_start, chunk_end, chunk_end, self.encryption_options - ) - range_header, range_validation = validate_and_format_range_headers( - download_range[0], - download_range[1], - check_content_md5=self.validate_content - ) - try: - _, response = await self.client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self.validate_content, - data_stream_total=self.total_size, - download_stream_current=self.progress_total, - **self.request_options - ) - if response.properties.etag != self.etag: - raise ResourceModifiedError(message="The file has been modified while downloading.") - except HttpResponseError as error: - process_storage_error(error) - - chunk_data = await process_content(response, offset[0], offset[1], self.encryption_options) - return chunk_data - - -class _AsyncChunkIterator(object): - """Async iterator for chunks in blob download stream.""" - - def __init__(self, size, content, downloader, chunk_size): - self.size = size - self._chunk_size = chunk_size - self._current_content = content - self._iter_downloader = downloader - self._iter_chunks = None - self._complete = (size == 0) - - def __len__(self): - return self.size - - def __iter__(self): - raise TypeError("Async stream must be iterated asynchronously.") - - def __aiter__(self): - return self - - async def __anext__(self): - """Iterate through responses.""" - if self._complete: - raise StopAsyncIteration("Download complete") - if not self._iter_downloader: - # cut the data obtained from initial GET into chunks - if len(self._current_content) > self._chunk_size: - return self._get_chunk_data() - self._complete = True - return self._current_content - - if not self._iter_chunks: - self._iter_chunks = self._iter_downloader.get_chunk_offsets() - - # initial GET result still has more than _chunk_size bytes of data - if len(self._current_content) >= self._chunk_size: - return self._get_chunk_data() - - try: - chunk = next(self._iter_chunks) - self._current_content += await self._iter_downloader.yield_chunk(chunk) - except StopIteration: - self._complete = True - # it's likely that there some data left in self._current_content - if self._current_content: - return self._current_content - raise StopAsyncIteration("Download complete") - - return self._get_chunk_data() - - def _get_chunk_data(self): - chunk_data = self._current_content[: self._chunk_size] - self._current_content = self._current_content[self._chunk_size:] - return chunk_data - - -class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes - """A streaming object to download from Azure Storage. - - :ivar str name: - The name of the file being downloaded. - :ivar: str path: - The full path of the file. - :ivar str share: - The name of the share where the file is. - :ivar ~azure.storage.fileshare.FileProperties properties: - The properties of the file being downloaded. If only a range of the data is being - downloaded, this will be reflected in the properties. - :ivar int size: - The size of the total data in the stream. This will be the byte range if speficied, - otherwise the total size of the file. - """ - - def __init__( - self, - client=None, - config=None, - start_range=None, - end_range=None, - validate_content=None, - encryption_options=None, - max_concurrency=1, - name=None, - path=None, - share=None, - encoding=None, - **kwargs - ): - self.name = name - self.path = path - self.share = share - self.properties = None - self.size = None - - self._client = client - self._config = config - self._start_range = start_range - self._end_range = end_range - self._max_concurrency = max_concurrency - self._encoding = encoding - self._validate_content = validate_content - self._encryption_options = encryption_options or {} - self._request_options = kwargs - self._location_mode = None - self._download_complete = False - self._current_content = None - self._file_size = None - self._response = None - self._etag = None - - # The service only provides transactional MD5s for chunks under 4MB. - # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first - # chunk so a transactional MD5 can be retrieved. - self._first_get_size = self._config.max_single_get_size if not self._validate_content \ - else self._config.max_chunk_get_size - initial_request_start = self._start_range if self._start_range is not None else 0 - if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: - initial_request_end = self._end_range - else: - initial_request_end = initial_request_start + self._first_get_size - 1 - - self._initial_range, self._initial_offset = process_range_and_offset( - initial_request_start, initial_request_end, self._end_range, self._encryption_options - ) - - def __len__(self): - return self.size - - async def _setup(self): - self._response = await self._initial_request() - self.properties = self._response.properties - self.properties.name = self.name - self.properties.path = self.path - self.properties.share = self.share - - # Set the content length to the download size instead of the size of - # the last range - self.properties.size = self.size - - # Overwrite the content range to the user requested range - self.properties.content_range = 'bytes {0}-{1}/{2}'.format( - self._start_range, - self._end_range, - self._file_size - ) - - # Overwrite the content MD5 as it is the MD5 for the last range instead - # of the stored MD5 - # TODO: Set to the stored MD5 when the service returns this - self.properties.content_md5 = None - - if self.size == 0: - self._current_content = b"" - else: - self._current_content = await process_content( - self._response, - self._initial_offset[0], - self._initial_offset[1], - self._encryption_options - ) - - async def _initial_request(self): - range_header, range_validation = validate_and_format_range_headers( - self._initial_range[0], - self._initial_range[1], - start_range_required=False, - end_range_required=False, - check_content_md5=self._validate_content) - - try: - location_mode, response = await self._client.download( - range=range_header, - range_get_content_md5=range_validation, - validate_content=self._validate_content, - data_stream_total=None, - download_stream_current=0, - **self._request_options) - - # Check the location we read from to ensure we use the same one - # for subsequent requests. - self._location_mode = location_mode - - # Parse the total file size and adjust the download size if ranges - # were specified - self._file_size = parse_length_from_content_range(response.properties.content_range) - if self._end_range is not None: - # Use the length unless it is over the end of the file - self.size = min(self._file_size, self._end_range - self._start_range + 1) - elif self._start_range is not None: - self.size = self._file_size - self._start_range - else: - self.size = self._file_size - - except HttpResponseError as error: - if self._start_range is None and error.response.status_code == 416: - # Get range will fail on an empty file. If the user did not - # request a range, do a regular get request in order to get - # any properties. - try: - _, response = await self._client.download( - validate_content=self._validate_content, - data_stream_total=0, - download_stream_current=0, - **self._request_options) - except HttpResponseError as error: - process_storage_error(error) - - # Set the download size to empty - self.size = 0 - self._file_size = 0 - else: - process_storage_error(error) - - # If the file is small, the download is complete at this point. - # If file size is large, download the rest of the file in chunks. - if response.properties.size == self.size: - self._download_complete = True - self._etag = response.properties.etag - return response - - def chunks(self): - # type: () -> AsyncIterator[bytes] - """Iterate over chunks in the download stream. - - :rtype: AsyncIterator[bytes] - """ - if self.size == 0 or self._download_complete: - iter_downloader = None - else: - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - iter_downloader = _AsyncChunkDownloader( - client=self._client, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # Start where the first download ended - end_range=data_end, - stream=None, - parallel=False, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - etag=self._etag, - **self._request_options) - return _AsyncChunkIterator( - size=self.size, - content=self._current_content, - downloader=iter_downloader, - chunk_size=self._config.max_chunk_get_size - ) - - async def readall(self): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - :rtype: bytes or str - """ - stream = BytesIO() - await self.readinto(stream) - data = stream.getvalue() - if self._encoding: - return data.decode(self._encoding) - return data - - async def content_as_bytes(self, max_concurrency=1): - """Download the contents of this file. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :rtype: bytes - """ - warnings.warn( - "content_as_bytes is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - return await self.readall() - - async def content_as_text(self, max_concurrency=1, encoding="UTF-8"): - """Download the contents of this file, and decode as text. - - This operation is blocking until all data is downloaded. - - :keyword int max_concurrency: - The number of parallel connections with which to download. - :param str encoding: - Test encoding to decode the downloaded bytes. Default is UTF-8. - :rtype: str - """ - warnings.warn( - "content_as_text is deprecated, use readall instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - self._encoding = encoding - return await self.readall() - - async def readinto(self, stream): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The number of bytes read. - :rtype: int - """ - # the stream must be seekable if parallel download is required - parallel = self._max_concurrency > 1 - if parallel: - error_message = "Target stream handle must be seekable." - if sys.version_info >= (3,) and not stream.seekable(): - raise ValueError(error_message) - - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(error_message) - - # Write the content to the user stream - stream.write(self._current_content) - if self._download_complete: - return self.size - - data_end = self._file_size - if self._end_range is not None: - # Use the length unless it is over the end of the file - data_end = min(self._file_size, self._end_range + 1) - - downloader = _AsyncChunkDownloader( - client=self._client, - total_size=self.size, - chunk_size=self._config.max_chunk_get_size, - current_progress=self._first_get_size, - start_range=self._initial_range[1] + 1, # start where the first download ended - end_range=data_end, - stream=stream, - parallel=parallel, - validate_content=self._validate_content, - encryption_options=self._encryption_options, - use_location=self._location_mode, - etag=self._etag, - **self._request_options) - - dl_tasks = downloader.get_chunk_offsets() - running_futures = [ - asyncio.ensure_future(downloader.process_chunk(d)) - for d in islice(dl_tasks, 0, self._max_concurrency) - ] - while running_futures: - # Wait for some download to finish before adding a new one - _done, running_futures = await asyncio.wait( - running_futures, return_when=asyncio.FIRST_COMPLETED) - try: - next_chunk = next(dl_tasks) - except StopIteration: - break - else: - running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk))) - - if running_futures: - # Wait for the remaining downloads to finish - await asyncio.wait(running_futures) - return self.size - - async def download_to_stream(self, stream, max_concurrency=1): - """Download the contents of this file to a stream. - - :param stream: - The stream to download to. This can be an open file-handle, - or any writable stream. The stream must be seekable if the download - uses more than one parallel connection. - :returns: The properties of the downloaded file. - :rtype: Any - """ - warnings.warn( - "download_to_stream is deprecated, use readinto instead", - DeprecationWarning - ) - self._max_concurrency = max_concurrency - await self.readinto(stream) - return self.properties diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/aio/_file_client_async.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/aio/_file_client_async.py deleted file mode 100644 index c545143..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/aio/_file_client_async.py +++ /dev/null @@ -1,1309 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-lines, invalid-overridden-method, too-many-public-methods -import functools -import time -from io import BytesIO -from typing import Optional, Union, IO, List, Tuple, Dict, Any, Iterable, TYPE_CHECKING # pylint: disable=unused-import - -import six -from azure.core.async_paging import AsyncItemPaged -from azure.core.exceptions import HttpResponseError - -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from .._parser import _datetime_to_str, _get_file_permission -from .._shared.parser import _str - -from .._generated.aio import AzureFileStorage -from .._generated.models import FileHTTPHeaders -from .._shared.policies_async import ExponentialRetry -from .._shared.uploads_async import upload_data_chunks, FileChunkUploader, IterStreamer -from .._shared.base_client_async import AsyncStorageAccountHostsMixin -from .._shared.request_handlers import add_metadata_headers, get_length -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._deserialize import deserialize_file_properties, deserialize_file_stream, get_file_ranges_result -from .._serialize import ( - get_access_conditions, - get_api_version, - get_dest_access_conditions, - get_rename_smb_properties, - get_smb_properties, - get_source_access_conditions) -from .._file_client import ShareFileClient as ShareFileClientBase -from ._models import HandlesPaged -from ._lease_async import ShareLeaseClient -from ._download_async import StorageStreamDownloader - -if TYPE_CHECKING: - from datetime import datetime - from .._models import ShareProperties, ContentSettings, FileProperties, NTFSAttributes - from .._generated.models import HandleItem - - -async def _upload_file_helper( - client, - stream, - size, - metadata, - content_settings, - validate_content, - timeout, - max_concurrency, - file_settings, - file_attributes="none", - file_creation_time="now", - file_last_write_time="now", - file_permission=None, - file_permission_key=None, - **kwargs -): - try: - if size is None or size < 0: - raise ValueError("A content size must be specified for a File.") - response = await client.create_file( - size, content_settings=content_settings, metadata=metadata, - file_attributes=file_attributes, - file_creation_time=file_creation_time, - file_last_write_time=file_last_write_time, - file_permission=file_permission, - permission_key=file_permission_key, - timeout=timeout, - **kwargs - ) - if size == 0: - return response - - responses = await upload_data_chunks( - service=client, - uploader_class=FileChunkUploader, - total_size=size, - chunk_size=file_settings.max_range_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - timeout=timeout, - **kwargs - ) - return sorted(responses, key=lambda r: r.get('last_modified'))[-1] - except HttpResponseError as error: - process_storage_error(error) - - -class ShareFileClient(AsyncStorageAccountHostsMixin, ShareFileClientBase): - """A client to interact with a specific file, although that file may not yet exist. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the - file, use the :func:`from_file_url` classmethod. - :param share_name: - The name of the share for the file. - :type share_name: str - :param str file_path: - The file path to the file with which to interact. If specified, this value will override - a file value specified in the file URL. - :param str snapshot: - An optional file snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`ShareClient.create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is the most recent service version that is - compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword loop: - The event loop to run the asynchronous tasks. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - - def __init__( # type: ignore - self, - account_url, # type: str - share_name, # type: str - file_path, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs["retry_policy"] = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) - loop = kwargs.pop('loop', None) - super(ShareFileClient, self).__init__( - account_url, share_name=share_name, file_path=file_path, snapshot=snapshot, - credential=credential, loop=loop, **kwargs - ) - self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline, loop=loop) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - self._loop = loop - - @distributed_trace_async - async def acquire_lease(self, lease_id=None, **kwargs): - # type: (Optional[str], **Any) -> ShareLeaseClient - """Requests a new lease. - - If the file does not have an active lease, the File - Service creates a lease on the blob and returns a new lease. - - :param str lease_id: - Proposed lease ID, in a GUID string format. The File Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A ShareLeaseClient object. - :rtype: ~azure.storage.fileshare.aio.ShareLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/blob_samples_common.py - :start-after: [START acquire_lease_on_blob] - :end-before: [END acquire_lease_on_blob] - :language: python - :dedent: 8 - :caption: Acquiring a lease on a blob. - """ - kwargs['lease_duration'] = -1 - lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore - await lease.acquire(**kwargs) - return lease - - @distributed_trace_async - async def create_file( # type: ignore - self, - size, # type: int - file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="now", # type: Union[str, datetime] - file_last_write_time="now", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Creates a new file. - - Note that it only initializes the file with no content. - - :param int size: Specifies the maximum size for the file, - up to 1 TB. - :param file_attributes: - The file system attributes for files and directories. - If not set, the default value would be "None" and the attributes will be set to "Archive". - Here is an example for when the var type is str: 'Temporary|Archive'. - file_attributes value is not case sensitive. - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Now. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Now. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START create_file] - :end-before: [END create_file] - :language: python - :dedent: 16 - :caption: Create a file. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - content_settings = kwargs.pop('content_settings', None) - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - if self.require_encryption and not self.key_encryption_key: - raise ValueError("Encryption required but no key was provided.") - - headers = kwargs.pop("headers", {}) - headers.update(add_metadata_headers(metadata)) - file_http_headers = None - if content_settings: - file_http_headers = FileHTTPHeaders( - file_cache_control=content_settings.cache_control, - file_content_type=content_settings.content_type, - file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - file_content_encoding=content_settings.content_encoding, - file_content_language=content_settings.content_language, - file_content_disposition=content_settings.content_disposition, - ) - file_permission = _get_file_permission(file_permission, permission_key, 'Inherit') - try: - return await self._client.file.create( # type: ignore - file_content_length=size, - metadata=metadata, - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - file_http_headers=file_http_headers, - lease_access_conditions=access_conditions, - headers=headers, - timeout=timeout, - cls=return_response_headers, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_file( - self, data, # type: Any - length=None, # type: Optional[int] - file_attributes="none", # type: Union[str, NTFSAttributes] - file_creation_time="now", # type: Union[str, datetime] - file_last_write_time="now", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Uploads a new file. - - :param Any data: - Content of the file. - :param int length: - Length of the file in bytes. Specify its maximum size, up to 1 TiB. - :param file_attributes: - The file system attributes for files and directories. - If not set, the default value would be "None" and the attributes will be set to "Archive". - Here is an example for when the var type is str: 'Temporary|Archive'. - file_attributes value is not case sensitive. - :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes - :param file_creation_time: Creation time for the file - Default value: Now. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Now. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword dict(str,str) metadata: - Name-value pairs associated with the file as metadata. - :keyword ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :keyword bool validate_content: - If true, calculates an MD5 hash for each range of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword str encoding: - Defaults to UTF-8. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START upload_file] - :end-before: [END upload_file] - :language: python - :dedent: 16 - :caption: Upload a file. - """ - metadata = kwargs.pop('metadata', None) - content_settings = kwargs.pop('content_settings', None) - max_concurrency = kwargs.pop('max_concurrency', 1) - validate_content = kwargs.pop('validate_content', False) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - - if isinstance(data, six.text_type): - data = data.encode(encoding) - if length is None: - length = get_length(data) - if isinstance(data, bytes): - data = data[:length] - - if isinstance(data, bytes): - stream = BytesIO(data) - elif hasattr(data, "read"): - stream = data - elif hasattr(data, "__iter__"): - stream = IterStreamer(data, encoding=encoding) # type: ignore - else: - raise TypeError("Unsupported data type: {}".format(type(data))) - return await _upload_file_helper( # type: ignore - self, - stream, - length, - metadata, - content_settings, - validate_content, - timeout, - max_concurrency, - self._config, - file_attributes=file_attributes, - file_creation_time=file_creation_time, - file_last_write_time=file_last_write_time, - file_permission=file_permission, - file_permission_key=permission_key, - **kwargs - ) - - @distributed_trace_async - async def start_copy_from_url(self, source_url, **kwargs): - # type: (str, Any) -> Any - """Initiates the copying of data from a source URL into the file - referenced by the client. - - The status of this copy operation can be found using the `get_properties` - method. - - :param str source_url: - Specifies the URL of the source file. - :keyword str file_permission: - If specified the permission (security descriptor) shall be set for the directory/file. - This value can be set to "source" to copy the security descriptor from the source file. - Otherwise if set, this value will be used to override the source value. If not set, permission value - is inherited from the parent directory of the target file. This setting can be - used if Permission size is <= 8KB, otherwise permission_key shall be used. - If SDDL is specified as input, it must have owner, group and dacl. - Note: Only one of the file_permission or permission_key should be specified. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword str permission_key: - Key of the permission to be set for the directory/file. - This value can be set to "source" to copy the security descriptor from the source file. - Otherwise if set, this value will be used to override the source value. If not set, permission value - is inherited from the parent directory of the target file. - Note: Only one of the file_permission or permission_key should be specified. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword file_attributes: - This value can be set to "source" to copy file attributes from the source file to the target file, - or to clear all attributes, it can be set to "None". Otherwise it can be set to a list of attributes - to set on the target file. If this is not set, the default value is "Archive". - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :keyword file_creation_time: - This value can be set to "source" to copy the creation time from the source file to the target file, - or a datetime to set as creation time on the target file. This could also be a string in ISO 8601 format. - If this is not set, creation time will be set to the date time value of the creation - (or when it was overwritten) of the target file by copy engine. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_creation_time: str or ~datetime.datetime - :keyword file_last_write_time: - This value can be set to "source" to copy the last write time from the source file to the target file, or - a datetime to set as the last write time on the target file. This could also be a string in ISO 8601 format. - If this is not set, value will be the last write time to the file by the copy engine. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :paramtype file_last_write_time: str or ~datetime.datetime - :keyword bool ignore_read_only: - Specifies the option to overwrite the target file if it already exists and has read-only attribute set. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword bool set_archive_attribute: - Specifies the option to set the archive attribute on the target file. - True means the archive attribute will be set on the target file despite attribute - overrides or the source file state. - - .. versionadded:: 12.1.0 - This parameter was introduced in API version '2019-07-07'. - - :keyword metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START copy_file_from_url] - :end-before: [END copy_file_from_url] - :language: python - :dedent: 16 - :caption: Copy a file from a URL - """ - metadata = kwargs.pop('metadata', None) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop("headers", {}) - headers.update(add_metadata_headers(metadata)) - kwargs.update(get_smb_properties(kwargs)) - try: - return await self._client.file.start_copy( - source_url, - metadata=metadata, - lease_access_conditions=access_conditions, - headers=headers, - cls=return_response_headers, - timeout=timeout, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def abort_copy(self, copy_id, **kwargs): - # type: (Union[str, FileProperties], Any) -> None - """Abort an ongoing copy operation. - - This will leave a destination file with zero length and full metadata. - This will raise an error if the copy operation has already ended. - - :param copy_id: - The copy operation to abort. This can be either an ID, or an - instance of FileProperties. - :type copy_id: str or ~azure.storage.fileshare.FileProperties - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - copy_id = copy_id.copy.id - except AttributeError: - try: - copy_id = copy_id["copy_id"] - except TypeError: - pass - try: - await self._client.file.abort_copy(copy_id=copy_id, - lease_access_conditions=access_conditions, - timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def download_file( - self, - offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> StorageStreamDownloader - """Downloads a file to the StorageStreamDownloader. The readall() method must - be used to read all the content or readinto() must be used to download the file into - a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks. - - :param int offset: - Start of byte range to use for downloading a section of the file. - Must be set if length is provided. - :param int length: - Number of bytes to read from the stream. This is optional, but - should be supplied for optimal performance. - :keyword int max_concurrency: - Maximum number of parallel connections to use. - :keyword bool validate_content: - If true, calculates an MD5 hash for each chunk of the file. The storage - service checks the hash of the content that has arrived with the hash - that was sent. This is primarily valuable for detecting bitflips on - the wire if using http instead of https as https (the default) will - already validate. Note that this MD5 hash is not stored with the - file. Also note that if enabled, the memory-efficient upload algorithm - will not be used, because computing the MD5 hash requires buffering - entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A streaming object (StorageStreamDownloader) - :rtype: ~azure.storage.fileshare.aio.StorageStreamDownloader - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START download_file] - :end-before: [END download_file] - :language: python - :dedent: 16 - :caption: Download a file. - """ - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - if length is not None and offset is None: - raise ValueError("Offset value must not be None if length is set.") - - range_end = None - if length is not None: - range_end = offset + length - 1 # Service actually uses an end-range inclusive index - - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - - downloader = StorageStreamDownloader( - client=self._client.file, - config=self._config, - start_range=offset, - end_range=range_end, - encryption_options=None, - name=self.file_name, - path='/'.join(self.file_path), - share=self.share_name, - lease_access_conditions=access_conditions, - cls=deserialize_file_stream, - **kwargs - ) - await downloader._setup() # pylint: disable=protected-access - return downloader - - @distributed_trace_async - async def delete_file(self, **kwargs): - # type: (Any) -> None - """Marks the specified file for deletion. The file is - later deleted during garbage collection. - - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_client_async.py - :start-after: [START delete_file] - :end-before: [END delete_file] - :language: python - :dedent: 16 - :caption: Delete a file. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - await self._client.file.delete(lease_access_conditions=access_conditions, timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def rename_file( - self, new_name, # type: str - **kwargs # type: Any - ): - # type: (...) -> ShareFileClient - """ - Rename the source file. - - :param str new_name: - The new file name. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword bool overwrite: - A boolean value for if the destination file already exists, whether this request will - overwrite the file or not. If true, the rename will succeed and will overwrite the - destination file. If not provided or if false and the destination file does exist, the - request will not overwrite the destination file. If provided and the destination file - doesn't exist, the rename will succeed. - :keyword bool ignore_read_only: - A boolean value that specifies whether the ReadOnly attribute on a preexisting destination - file should be respected. If true, the rename will succeed, otherwise, a previous file at the - destination with the ReadOnly attribute set will cause the rename to fail. - :keyword str file_permission: - If specified the permission (security descriptor) shall be set for the file. This header - can be used if Permission size is <= 8KB, else file_permission_key shall be used. - If SDDL is specified as input, it must have owner, group and dacl. - A value of 'preserve' can be passed to preserve source permissions. - Note: Only one of the file_permission or file_permission_key should be specified. - :keyword str file_permission_key: - Key of the permission to be set for the file. - Note: Only one of the file-permission or file-permission-key should be specified. - :keyword file_attributes: - The file system attributes for the file. - :paramtype file_attributes:~azure.storage.fileshare.NTFSAttributes or str - :keyword file_creation_time: - Creation time for the file. - :paramtype file_creation_time:~datetime.datetime or str - :keyword file_last_write_time: - Last write time for the file. - :paramtype file_last_write_time:~datetime.datetime or str - :keyword Dict[str,str] metadata: - A name-value pair to associate with a file storage object. - :keyword source_lease: - Required if the source file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - :paramtype source_lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword destination_lease: - Required if the destination file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - :paramtype destination_lease: ~azure.storage.fileshare.ShareLeaseClient or str - :returns: The new File Client. - :rtype: ~azure.storage.fileshare.ShareFileClient - """ - if not new_name: - raise ValueError("Please specify a new file name.") - - new_name = new_name.strip('/') - new_path_and_query = new_name.split('?') - new_file_path = new_path_and_query[0] - if len(new_path_and_query) == 2: - new_file_sas = new_path_and_query[1] or self._query_str.strip('?') - else: - new_file_sas = self._query_str.strip('?') - - new_file_client = ShareFileClient( - '{}://{}'.format(self.scheme, self.primary_hostname), self.share_name, new_file_path, - credential=new_file_sas or self.credential, api_version=self.api_version, - _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, - _location_mode=self._location_mode, require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function - ) - - kwargs.update(get_rename_smb_properties(kwargs)) - - timeout = kwargs.pop('timeout', None) - overwrite = kwargs.pop('overwrite', None) - metadata = kwargs.pop('metadata', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - - source_access_conditions = get_source_access_conditions(kwargs.pop('source_lease', None)) - dest_access_conditions = get_dest_access_conditions(kwargs.pop('destination_lease', None)) - - try: - await new_file_client._client.file.rename( # pylint: disable=protected-access - self.url, - timeout=timeout, - replace_if_exists=overwrite, - source_lease_access_conditions=source_access_conditions, - destination_lease_access_conditions=dest_access_conditions, - headers=headers, - **kwargs) - - return new_file_client - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_file_properties(self, **kwargs): - # type: (Any) -> FileProperties - """Returns all user-defined metadata, standard HTTP properties, and - system properties for the file. - - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: FileProperties - :rtype: ~azure.storage.fileshare.FileProperties - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - file_props = await self._client.file.get_properties( - sharesnapshot=self.snapshot, - lease_access_conditions=access_conditions, - timeout=timeout, - cls=deserialize_file_properties, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - file_props.name = self.file_name - file_props.share = self.share_name - file_props.snapshot = self.snapshot - file_props.path = "/".join(self.file_path) - return file_props # type: ignore - - @distributed_trace_async - async def set_http_headers(self, content_settings, # type: ContentSettings - file_attributes="preserve", # type: Union[str, NTFSAttributes] - file_creation_time="preserve", # type: Union[str, datetime] - file_last_write_time="preserve", # type: Union[str, datetime] - file_permission=None, # type: Optional[str] - permission_key=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Dict[str, Any] - """Sets HTTP headers on the file. - - :param ~azure.storage.fileshare.ContentSettings content_settings: - ContentSettings object used to set file properties. Used to set content type, encoding, - language, disposition, md5, and cache control. - :param file_attributes: - The file system attributes for files and directories. - If not set, indicates preservation of existing values. - Here is an example for when the var type is str: 'Temporary|Archive' - :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` - :param file_creation_time: Creation time for the file - Default value: Preserve. - :type file_creation_time: str or ~datetime.datetime - :param file_last_write_time: Last write time for the file - Default value: Preserve. - :type file_last_write_time: str or ~datetime.datetime - :param file_permission: If specified the permission (security - descriptor) shall be set for the directory/file. This header can be - used if Permission size is <= 8KB, else x-ms-file-permission-key - header shall be used. Default value: Inherit. If SDDL is specified as - input, it must have owner, group and dacl. Note: Only one of the - x-ms-file-permission or x-ms-file-permission-key should be specified. - :type file_permission: str - :param permission_key: Key of the permission to be set for the - directory/file. Note: Only one of the x-ms-file-permission or - x-ms-file-permission-key should be specified. - :type permission_key: str - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - file_content_length = kwargs.pop("size", None) - file_http_headers = FileHTTPHeaders( - file_cache_control=content_settings.cache_control, - file_content_type=content_settings.content_type, - file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, - file_content_encoding=content_settings.content_encoding, - file_content_language=content_settings.content_language, - file_content_disposition=content_settings.content_disposition, - ) - file_permission = _get_file_permission(file_permission, permission_key, 'preserve') - try: - return await self._client.file.set_http_headers( # type: ignore - file_content_length=file_content_length, - file_http_headers=file_http_headers, - file_attributes=_str(file_attributes), - file_creation_time=_datetime_to_str(file_creation_time), - file_last_write_time=_datetime_to_str(file_last_write_time), - file_permission=file_permission, - file_permission_key=permission_key, - lease_access_conditions=access_conditions, - timeout=timeout, - cls=return_response_headers, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_file_metadata(self, metadata=None, **kwargs): # type: ignore - # type: (Optional[Dict[str, Any]], Any) -> Dict[str, Any] - """Sets user-defined metadata for the specified file as one or more - name-value pairs. - - Each call to this operation replaces all existing metadata - attached to the file. To remove all metadata from the file, - call this operation with no metadata dict. - - :param metadata: - Name-value pairs associated with the file as metadata. - :type metadata: dict(str, str) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop("headers", {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return await self._client.file.set_metadata( # type: ignore - metadata=metadata, lease_access_conditions=access_conditions, - timeout=timeout, cls=return_response_headers, headers=headers, **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_range( # type: ignore - self, - data, # type: bytes - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Upload a range of bytes to a file. - - :param bytes data: - The data to upload. - :param int offset: - Start of byte range to use for uploading a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for uploading a section of the file. - The range can be up to 4 MB in size. - :keyword bool validate_content: - If true, calculates an MD5 hash of the page content. The storage - service checks the hash of the content that has arrived - with the hash that was sent. This is primarily valuable for detecting - bitflips on the wire if using http instead of https as https (the default) - will already validate. Note that this MD5 hash is not stored with the - file. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str encoding: - Defaults to UTF-8. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - validate_content = kwargs.pop('validate_content', False) - timeout = kwargs.pop('timeout', None) - encoding = kwargs.pop('encoding', 'UTF-8') - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Encryption not supported.") - if isinstance(data, six.text_type): - data = data.encode(encoding) - end_range = offset + length - 1 # Reformat to an inclusive range index - content_range = 'bytes={0}-{1}'.format(offset, end_range) - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - try: - return await self._client.file.upload_range( # type: ignore - range=content_range, - content_length=length, - optionalbody=data, - timeout=timeout, - validate_content=validate_content, - lease_access_conditions=access_conditions, - cls=return_response_headers, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def upload_range_from_url(self, source_url, - offset, - length, - source_offset, - **kwargs - ): - # type: (str, int, int, int, **Any) -> Dict[str, Any] - """ - Writes the bytes from one Azure File endpoint into the specified range of another Azure File endpoint. - - :param int offset: - Start of byte range to use for updating a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for updating a section of the file. - The range can be up to 4 MB in size. - :param str source_url: - A URL of up to 2 KB in length that specifies an Azure file or blob. - The value should be URL-encoded as it would appear in a request URI. - If the source is in another account, the source must either be public - or must be authenticated via a shared access signature. If the source - is public, no authentication is required. - Examples: - https://myaccount.file.core.windows.net/myshare/mydir/myfile - https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken - :param int source_offset: - This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. - The service will read the same number of bytes as the destination range (length-offset). - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword str source_authorization: - Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is - the prefix of the source_authorization string. - """ - options = self._upload_range_from_url_options( - source_url=source_url, - offset=offset, - length=length, - source_offset=source_offset, - **kwargs - ) - try: - return await self._client.file.upload_range_from_url(**options) # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_ranges( # type: ignore - self, offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> List[Dict[str, int]] - """Returns the list of valid page ranges for a file or snapshot - of a file. - - :param int offset: - Specifies the start offset of bytes over which to get ranges. - :param int length: - Number of bytes to use over which to get ranges. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A list of valid ranges. - :rtype: List[dict[str, int]] - """ - options = self._get_ranges_options( - offset=offset, - length=length, - **kwargs) - try: - ranges = await self._client.file.get_range_list(**options) - except HttpResponseError as error: - process_storage_error(error) - return [{'start': file_range.start, 'end': file_range.end} for file_range in ranges.ranges] - - @distributed_trace_async - async def get_ranges_diff( # type: ignore - self, - previous_sharesnapshot, # type: Union[str, Dict[str, Any]] - offset=None, # type: Optional[int] - length=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] - """Returns the list of valid page ranges for a file or snapshot - of a file. - - .. versionadded:: 12.6.0 - - :param int offset: - Specifies the start offset of bytes over which to get ranges. - :param int length: - Number of bytes to use over which to get ranges. - :param str previous_sharesnapshot: - The snapshot diff parameter that contains an opaque DateTime value that - specifies a previous file snapshot to be compared - against a more recent snapshot or the current file. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - A tuple of two lists of file ranges as dictionaries with 'start' and 'end' keys. - The first element are filled file ranges, the 2nd element is cleared file ranges. - :rtype: tuple(list(dict(str, str), list(dict(str, str)) - """ - options = self._get_ranges_options( - offset=offset, - length=length, - previous_sharesnapshot=previous_sharesnapshot, - **kwargs) - try: - ranges = await self._client.file.get_range_list(**options) - except HttpResponseError as error: - process_storage_error(error) - return get_file_ranges_result(ranges) - - @distributed_trace_async - async def clear_range( # type: ignore - self, - offset, # type: int - length, # type: int - **kwargs - ): - # type: (...) -> Dict[str, Any] - """Clears the specified range and releases the space used in storage for - that range. - - :param int offset: - Start of byte range to use for clearing a section of the file. - The range can be up to 4 MB in size. - :param int length: - Number of bytes to use for clearing a section of the file. - The range can be up to 4 MB in size. - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - if self.require_encryption or (self.key_encryption_key is not None): - raise ValueError("Unsupported method for encryption.") - - if offset is None or offset % 512 != 0: - raise ValueError("offset must be an integer that aligns with 512 bytes file size") - if length is None or length % 512 != 0: - raise ValueError("length must be an integer that aligns with 512 bytes file size") - end_range = length + offset - 1 # Reformat to an inclusive range index - content_range = "bytes={0}-{1}".format(offset, end_range) - try: - return await self._client.file.upload_range( # type: ignore - timeout=timeout, - cls=return_response_headers, - content_length=0, - optionalbody=None, - file_range_write="clear", - range=content_range, - lease_access_conditions=access_conditions, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def resize_file(self, size, **kwargs): - # type: (int, Any) -> Dict[str, Any] - """Resizes a file to the specified size. - - :param int size: - Size to resize file to (in bytes) - :keyword lease: - Required if the file has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.1.0 - - :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: File-updated property dict (Etag and last modified). - :rtype: Dict[str, Any] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - return await self._client.file.set_http_headers( # type: ignore - file_content_length=size, - file_attributes="preserve", - file_creation_time="preserve", - file_last_write_time="preserve", - file_permission="preserve", - lease_access_conditions=access_conditions, - cls=return_response_headers, - timeout=timeout, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_handles(self, **kwargs): - # type: (Any) -> AsyncItemPaged - """Lists handles for file. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of HandleItem - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.HandleItem] - """ - timeout = kwargs.pop('timeout', None) - results_per_page = kwargs.pop("results_per_page", None) - command = functools.partial( - self._client.file.list_handles, - sharesnapshot=self.snapshot, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, results_per_page=results_per_page, - page_iterator_class=HandlesPaged) - - @distributed_trace_async - async def close_handle(self, handle, **kwargs): - # type: (Union[str, HandleItem], Any) -> Dict[str, int] - """Close an open file handle. - - :param handle: - A specific handle to close. - :type handle: str or ~azure.storage.fileshare.Handle - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - try: - handle_id = handle.id # type: ignore - except AttributeError: - handle_id = handle - if handle_id == '*': - raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") - try: - response = await self._client.file.force_close_handles( - handle_id, - marker=None, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - return { - 'closed_handles_count': response.get('number_of_handles_closed', 0), - 'failed_handles_count': response.get('number_of_handles_failed', 0) - } - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def close_all_handles(self, **kwargs): - # type: (Any) -> Dict[str, int] - """Close any open file handles. - - This operation will block until the service has closed all open handles. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: - The number of handles closed (this may be 0 if the specified handle was not found) - and the number of handles failed to close in a dict. - :rtype: dict[str, int] - """ - timeout = kwargs.pop('timeout', None) - start_time = time.time() - - try_close = True - continuation_token = None - total_closed = 0 - total_failed = 0 - while try_close: - try: - response = await self._client.file.force_close_handles( - handle_id='*', - timeout=timeout, - marker=continuation_token, - sharesnapshot=self.snapshot, - cls=return_response_headers, - **kwargs - ) - except HttpResponseError as error: - process_storage_error(error) - continuation_token = response.get('marker') - try_close = bool(continuation_token) - total_closed += response.get('number_of_handles_closed', 0) - total_failed += response.get('number_of_handles_failed', 0) - if timeout: - timeout = max(0, timeout - (time.time() - start_time)) - return { - 'closed_handles_count': total_closed, - 'failed_handles_count': total_failed - } diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/aio/_lease_async.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/aio/_lease_async.py deleted file mode 100644 index 0d99845..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/aio/_lease_async.py +++ /dev/null @@ -1,228 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, - TypeVar, TYPE_CHECKING -) - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator_async import distributed_trace_async - -from .._shared.response_handlers import return_response_headers, process_storage_error -from .._generated.aio.operations import FileOperations, ShareOperations -from .._lease import ShareLeaseClient as LeaseClientBase - -if TYPE_CHECKING: - from datetime import datetime - ShareFileClient = TypeVar("ShareFileClient") - ShareClient = TypeVar("ShareClient") - - -class ShareLeaseClient(LeaseClientBase): - """Creates a new ShareLeaseClient. - - This client provides lease operations on a ShareClient or ShareFileClient. - - :ivar str id: - The ID of the lease currently being maintained. This will be `None` if no - lease has yet been acquired. - :ivar str etag: - The ETag of the lease currently being maintained. This will be `None` if no - lease has yet been acquired or modified. - :ivar ~datetime.datetime last_modified: - The last modified timestamp of the lease currently being maintained. - This will be `None` if no lease has yet been acquired or modified. - - :param client: - The client of the file or share to lease. - :type client: ~azure.storage.fileshare.ShareFileClient or - ~azure.storage.fileshare.ShareClient - :param str lease_id: - A string representing the lease ID of an existing lease. This value does not - need to be specified in order to acquire a new lease, or break one. - """ - - def __enter__(self): - raise TypeError("Async lease must use 'async with'.") - - def __exit__(self, *args): - self.release() - - async def __aenter__(self): - return self - - async def __aexit__(self, *args): - await self.release() - - @distributed_trace_async - async def acquire(self, **kwargs): - # type: (**Any) -> None - """Requests a new lease. This operation establishes and manages a lock on a - file or share for write and delete operations. If the file or share does not have an active lease, - the File or Share service creates a lease on the file or share. If the file has an active lease, - you can only request a new lease using the active lease ID. - - - If the file or share does not have an active lease, the File or Share service creates a - lease on the file and returns a new lease ID. - - :keyword int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. File leases never expire. A non-infinite share lease can be - between 15 and 60 seconds. A share lease duration cannot be changed - using renew or change. Default is -1 (infinite share lease). - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - try: - lease_duration = kwargs.pop('lease_duration', -1) - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - response = await self._client.acquire_lease( - timeout=kwargs.pop('timeout', None), - duration=lease_duration, - proposed_lease_id=self.id, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - self.etag = response.get('etag') # type: str - - @distributed_trace_async - async def renew(self, **kwargs): - # type: (Any) -> None - """Renews the share lease. - - The share lease can be renewed if the lease ID specified in the - lease client matches that associated with the share. Note that - the lease may be renewed even if it has expired as long as the share - has not been leased again since the expiration of that lease. When you - renew a lease, the lease duration clock resets. - - .. versionadded:: 12.6.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - if isinstance(self._client, FileOperations): - raise TypeError("Lease renewal operations are only valid for ShareClient.") - try: - response = await self._client.renew_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - sharesnapshot=self._snapshot, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def release(self, **kwargs): - # type: (Any) -> None - """Releases the lease. The lease may be released if the lease ID specified on the request matches - that associated with the share or file. Releasing the lease allows another client to immediately acquire - the lease for the share or file as soon as the release is complete. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - try: - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - response = await self._client.release_lease( - lease_id=self.id, - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def change(self, proposed_lease_id, **kwargs): - # type: (str, Any) -> None - """ Changes the lease ID of an active lease. A change must include the current lease ID in x-ms-lease-id and - a new lease ID in x-ms-proposed-lease-id. - - :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The File or Share service raises an error - (Invalid request) if the proposed lease ID is not in the correct format. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: None - """ - try: - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - response = await self._client.change_lease( - lease_id=self.id, - proposed_lease_id=proposed_lease_id, - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - self.etag = response.get('etag') # type: str - self.id = response.get('lease_id') # type: str - self.last_modified = response.get('last_modified') # type: datetime - - @distributed_trace_async - async def break_lease(self, **kwargs): - # type: (Any) -> int - """Force breaks the lease if the file or share has an active lease. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. An infinite lease breaks immediately. - - Once a lease is broken, it cannot be changed. Any authorized request can break the lease; - the request is not required to specify a matching lease ID. - When a lease is successfully broken, the response indicates the interval - in seconds until a new lease can be acquired. - - :keyword int lease_break_period: - This is the proposed duration of seconds that the share lease - should continue before it is broken, between 0 and 60 seconds. This - break period is only used if it is shorter than the time remaining - on the share lease. If longer, the time remaining on the share lease is used. - A new share lease will not be available before the break period has - expired, but the share lease may be held for longer than the break - period. If this header does not appear with a break - operation, a fixed-duration share lease breaks after the remaining share lease - period elapses, and an infinite share lease breaks immediately. - - .. versionadded:: 12.5.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :return: Approximate time remaining in the lease period, in seconds. - :rtype: int - """ - try: - lease_break_period = kwargs.pop('lease_break_period', None) - if self._snapshot: - kwargs['sharesnapshot'] = self._snapshot - if isinstance(self._client, ShareOperations): - kwargs['break_period'] = lease_break_period - if isinstance(self._client, FileOperations) and lease_break_period: - raise TypeError("Setting a lease break period is only applicable to Share leases.") - - response = await self._client.break_lease( - timeout=kwargs.pop('timeout', None), - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/aio/_models.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/aio/_models.py deleted file mode 100644 index e81133c..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/aio/_models.py +++ /dev/null @@ -1,178 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-few-public-methods, too-many-instance-attributes -# pylint: disable=super-init-not-called, too-many-lines - -from azure.core.async_paging import AsyncPageIterator -from azure.core.exceptions import HttpResponseError - -from .._shared.response_handlers import return_context_and_deserialized, process_storage_error -from .._generated.models import DirectoryItem -from .._models import Handle, ShareProperties, DirectoryProperties, FileProperties - - -def _wrap_item(item): - if isinstance(item, DirectoryItem): - return {'name': item.name, 'is_directory': True} - return {'name': item.name, 'size': item.properties.content_length, 'is_directory': False} - - -class SharePropertiesPaged(AsyncPageIterator): - """An iterable of Share properties. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.fileshare.ShareProperties) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only shares whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(SharePropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [ShareProperties._from_generated(i) for i in self._response.share_items] # pylint: disable=protected-access - return self._response.next_marker or None, self.current_page - - -class HandlesPaged(AsyncPageIterator): - """An iterable of Handles. - - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str marker: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(~azure.storage.fileshare.Handle) - - :param callable command: Function to retrieve the next page of items. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, results_per_page=None, continuation_token=None): - super(HandlesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.current_page = [Handle._from_generated(h) for h in self._response.handle_list] # pylint: disable=protected-access - return self._response.next_marker or None, self.current_page - - -class DirectoryPropertiesPaged(AsyncPageIterator): - """An iterable for the contents of a directory. - - This iterable will yield dicts for the contents of the directory. The dicts - will have the keys 'name' (str) and 'is_directory' (bool). - Items that are files (is_directory=False) will have an additional 'content_length' key. - - :ivar str service_endpoint: The service URL. - :ivar str prefix: A file name prefix being used to filter the list. - :ivar str marker: The continuation token of the current page of results. - :ivar int results_per_page: The maximum number of results retrieved per API call. - :ivar str continuation_token: The continuation token to retrieve the next page of results. - :ivar str location_mode: The location mode being used to list results. The available - options include "primary" and "secondary". - :ivar current_page: The current page of listed results. - :vartype current_page: list(dict(str, Any)) - - :param callable command: Function to retrieve the next page of items. - :param str prefix: Filters the results to return only directories whose names - begin with the specified prefix. - :param int results_per_page: The maximum number of share names to retrieve per - call. - :param str continuation_token: An opaque continuation token. - """ - def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): - super(DirectoryPropertiesPaged, self).__init__( - get_next=self._get_next_cb, - extract_data=self._extract_data_cb, - continuation_token=continuation_token or "" - ) - self._command = command - self.service_endpoint = None - self.prefix = prefix - self.marker = None - self.results_per_page = results_per_page - self.location_mode = None - self.current_page = [] - - async def _get_next_cb(self, continuation_token): - try: - return await self._command( - marker=continuation_token or None, - prefix=self.prefix, - maxresults=self.results_per_page, - cls=return_context_and_deserialized, - use_location=self.location_mode) - except HttpResponseError as error: - process_storage_error(error) - - async def _extract_data_cb(self, get_next_return): - self.location_mode, self._response = get_next_return - self.service_endpoint = self._response.service_endpoint - self.prefix = self._response.prefix - self.marker = self._response.marker - self.results_per_page = self._response.max_results - self.current_page = [DirectoryProperties._from_generated(i) for i in self._response.segment.directory_items] # pylint: disable = protected-access - self.current_page.extend([FileProperties._from_generated(i) for i in self._response.segment.file_items]) # pylint: disable = protected-access - return self._response.next_marker or None, self.current_page diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/aio/_share_client_async.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/aio/_share_client_async.py deleted file mode 100644 index 7aaefbc..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/aio/_share_client_async.py +++ /dev/null @@ -1,756 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -from typing import ( # pylint: disable=unused-import - Optional, Union, Dict, Any, Iterable, TYPE_CHECKING -) - -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.pipeline import AsyncPipeline -from .._shared.policies_async import ExponentialRetry -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.request_handlers import add_metadata_headers, serialize_iso -from .._shared.response_handlers import ( - return_response_headers, - process_storage_error, - return_headers_and_deserialized) -from .._generated.aio import AzureFileStorage -from .._generated.models import ( - SignedIdentifier, - DeleteSnapshotsOptionType) -from .._deserialize import deserialize_share_properties, deserialize_permission -from .._serialize import get_api_version, get_access_conditions -from .._share_client import ShareClient as ShareClientBase -from ._directory_client_async import ShareDirectoryClient -from ._file_client_async import ShareFileClient -from ..aio._lease_async import ShareLeaseClient -from .._models import ShareProtocols - -if TYPE_CHECKING: - from .._models import ShareProperties, AccessPolicy - - -class ShareClient(AsyncStorageAccountHostsMixin, ShareClientBase): - """A client to interact with a specific share, although that share may not yet exist. - - For operations relating to a specific directory or file in this share, the clients for - those entities can also be retrieved using the :func:`get_directory_client` and :func:`get_file_client` functions. - - :param str account_url: - The URI to the storage account. In order to create a client given the full URI to the share, - use the :func:`from_share_url` classmethod. - :param share_name: - The name of the share with which to interact. - :type share_name: str - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is the most recent service version that is - compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword loop: - The event loop to run the asynchronous tasks. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - """ - def __init__( # type: ignore - self, account_url, # type: str - share_name, # type: str - snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - loop = kwargs.pop('loop', None) - super(ShareClient, self).__init__( - account_url, - share_name=share_name, - snapshot=snapshot, - credential=credential, - loop=loop, - **kwargs) - self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline, loop=loop) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - self._loop = loop - - def get_directory_client(self, directory_path=None): - # type: (Optional[str]) -> ShareDirectoryClient - """Get a client to interact with the specified directory. - The directory need not already exist. - - :param str directory_path: - Path to the specified directory. - :returns: A Directory Client. - :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient - """ - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - - return ShareDirectoryClient( - self.url, share_name=self.share_name, directory_path=directory_path or "", snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop) - - def get_file_client(self, file_path): - # type: (str) -> ShareFileClient - """Get a client to interact with the specified file. - The file need not already exist. - - :param str file_path: - Path to the specified file. - :returns: A File Client. - :rtype: ~azure.storage.fileshare.aio.ShareFileClient - """ - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - - return ShareFileClient( - self.url, share_name=self.share_name, file_path=file_path, snapshot=self.snapshot, - credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop) - - @distributed_trace_async() - async def acquire_lease(self, **kwargs): - # type: (**Any) -> ShareLeaseClient - """Requests a new lease. - - If the share does not have an active lease, the Share - Service creates a lease on the share and returns a new lease. - - .. versionadded:: 12.5.0 - - :keyword int lease_duration: - Specifies the duration of the lease, in seconds, or negative one - (-1) for a lease that never expires. A non-infinite lease can be - between 15 and 60 seconds. A lease duration cannot be changed - using renew or change. Default is -1 (infinite lease). - :keyword str lease_id: - Proposed lease ID, in a GUID string format. The Share Service - returns 400 (Invalid request) if the proposed lease ID is not - in the correct format. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A ShareLeaseClient object. - :rtype: ~azure.storage.fileshare.ShareLeaseClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share.py - :start-after: [START acquire_lease_on_share] - :end-before: [END acquire_lease_on_share] - :language: python - :dedent: 8 - :caption: Acquiring a lease on a share. - """ - kwargs['lease_duration'] = kwargs.pop('lease_duration', -1) - lease_id = kwargs.pop('lease_id', None) - lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore - await lease.acquire(**kwargs) - return lease - - @distributed_trace_async - async def create_share(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Creates a new Share under the account. If a share with the - same name already exists, the operation fails. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the share as metadata. - :keyword int quota: - The quota to be allotted. - :keyword access_tier: - Specifies the access tier of the share. - Possible values: 'TransactionOptimized', 'Hot', 'Cool' - :paramtype access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier - - .. versionadded:: 12.4.0 - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword protocols: - Protocols to enable on the share. Only one protocol can be enabled on the share. - :paramtype protocols: str or ~azure.storage.fileshare.ShareProtocols - :keyword root_squash: - Root squash to set on the share. - Only valid for NFS shares. Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash'. - :paramtype root_squash: str or ~azure.storage.fileshare.ShareRootSquash - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START create_share] - :end-before: [END create_share] - :language: python - :dedent: 12 - :caption: Creates a file share. - """ - metadata = kwargs.pop('metadata', None) - quota = kwargs.pop('quota', None) - access_tier = kwargs.pop('access_tier', None) - timeout = kwargs.pop('timeout', None) - root_squash = kwargs.pop('root_squash', None) - protocols = kwargs.pop('protocols', None) - if protocols and protocols not in ['NFS', 'SMB', ShareProtocols.SMB, ShareProtocols.NFS]: - raise ValueError("The enabled protocol must be set to either SMB or NFS.") - if root_squash and protocols not in ['NFS', ShareProtocols.NFS]: - raise ValueError("The 'root_squash' keyword can only be used on NFS enabled shares.") - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - - try: - return await self._client.share.create( # type: ignore - timeout=timeout, - metadata=metadata, - quota=quota, - access_tier=access_tier, - root_squash=root_squash, - enabled_protocols=protocols, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def create_snapshot( # type: ignore - self, - **kwargs # type: Optional[Any] - ): - # type: (...) -> Dict[str, Any] - """Creates a snapshot of the share. - - A snapshot is a read-only version of a share that's taken at a point in time. - It can be read, copied, or deleted, but not modified. Snapshots provide a way - to back up a share as it appears at a moment in time. - - A snapshot of a share has the same name as the base share from which the snapshot - is taken, with a DateTime value appended to indicate the time at which the - snapshot was taken. - - :keyword dict(str,str) metadata: - Name-value pairs associated with the share as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: Share-updated property dict (Snapshot ID, Etag, and last modified). - :rtype: dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START create_share_snapshot] - :end-before: [END create_share_snapshot] - :language: python - :dedent: 16 - :caption: Creates a snapshot of the file share. - """ - metadata = kwargs.pop('metadata', None) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) # type: ignore - try: - return await self._client.share.create_snapshot( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def delete_share( - self, delete_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> None - """Marks the specified share for deletion. The share is - later deleted during garbage collection. - - :param bool delete_snapshots: - Indicates if snapshots are to be deleted. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START delete_share] - :end-before: [END delete_share] - :language: python - :dedent: 16 - :caption: Deletes the share and any snapshots. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - delete_include = None - if delete_snapshots: - delete_include = DeleteSnapshotsOptionType.include - try: - await self._client.share.delete( - timeout=timeout, - sharesnapshot=self.snapshot, - delete_snapshots=delete_include, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_share_properties(self, **kwargs): - # type: (Any) -> ShareProperties - """Returns all user-defined metadata and system properties for the - specified share. The data returned does not include the shares's - list of files or directories. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: The share properties. - :rtype: ~azure.storage.fileshare.ShareProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_hello_world_async.py - :start-after: [START get_share_properties] - :end-before: [END get_share_properties] - :language: python - :dedent: 16 - :caption: Gets the share properties. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - props = await self._client.share.get_properties( - timeout=timeout, - sharesnapshot=self.snapshot, - cls=deserialize_share_properties, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - props.name = self.share_name - props.snapshot = self.snapshot - return props # type: ignore - - @distributed_trace_async - async def set_share_quota(self, quota, **kwargs): - # type: (int, Any) -> Dict[str, Any] - """Sets the quota for the share. - - :param int quota: - Specifies the maximum size of the share, in gigabytes. - Must be greater than 0, and less than or equal to 5TB. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START set_share_quota] - :end-before: [END set_share_quota] - :language: python - :dedent: 16 - :caption: Sets the share quota. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - return await self._client.share.set_properties( # type: ignore - timeout=timeout, - quota=quota, - access_tier=None, - cls=return_response_headers, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - async def set_share_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Sets the share properties. - - .. versionadded:: 12.3.0 - - :keyword access_tier: - Specifies the access tier of the share. - Possible values: 'TransactionOptimized', 'Hot', and 'Cool' - :paramtype access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier - :keyword int quota: - Specifies the maximum size of the share, in gigabytes. - Must be greater than 0, and less than or equal to 5TB. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword root_squash: - Root squash to set on the share. - Only valid for NFS shares. Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash' - :paramtype root_squash: str or ~azure.storage.fileshare.ShareRootSquash - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START set_share_properties] - :end-before: [END set_share_properties] - :language: python - :dedent: 16 - :caption: Sets the share properties. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - access_tier = kwargs.pop('access_tier', None) - quota = kwargs.pop('quota', None) - root_squash = kwargs.pop('root_squash', None) - if all(parameter is None for parameter in [access_tier, quota, root_squash]): - raise ValueError("set_share_properties should be called with at least one parameter.") - try: - return await self._client.share.set_properties( # type: ignore - timeout=timeout, - quota=quota, - access_tier=access_tier, - root_squash=root_squash, - lease_access_conditions=access_conditions, - cls=return_response_headers, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_share_metadata(self, metadata, **kwargs): - # type: (Dict[str, Any], Any) -> Dict[str, Any] - """Sets the metadata for the share. - - Each call to this operation replaces all existing metadata - attached to the share. To remove all metadata from the share, - call this operation with no metadata dict. - - :param metadata: - Name-value pairs associated with the share as metadata. - :type metadata: dict(str, str) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START set_share_metadata] - :end-before: [END set_share_metadata] - :language: python - :dedent: 16 - :caption: Sets the share metadata. - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - headers = kwargs.pop('headers', {}) - headers.update(add_metadata_headers(metadata)) - try: - return await self._client.share.set_metadata( # type: ignore - timeout=timeout, - cls=return_response_headers, - headers=headers, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_share_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the permissions for the share. The permissions - indicate whether files in a share may be accessed publicly. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: Access policy information in a dict. - :rtype: dict[str, Any] - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - response, identifiers = await self._client.share.get_access_policy( - timeout=timeout, - cls=return_headers_and_deserialized, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - return { - 'public_access': response.get('share_public_access'), - 'signed_identifiers': identifiers or [] - } - - @distributed_trace_async - async def set_share_access_policy(self, signed_identifiers, **kwargs): - # type: (Dict[str, AccessPolicy], Any) -> Dict[str, str] - """Sets the permissions for the share, or stored access - policies that may be used with Shared Access Signatures. The permissions - indicate whether files in a share may be accessed publicly. - - :param signed_identifiers: - A dictionary of access policies to associate with the share. The - dictionary may contain up to 5 elements. An empty dictionary - will clear the access policies set on the service. - :type signed_identifiers: dict(str, :class:`~azure.storage.fileshare.AccessPolicy`) - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :returns: Share-updated property dict (Etag and last modified). - :rtype: dict(str, Any) - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - if len(signed_identifiers) > 5: - raise ValueError( - 'Too many access policies provided. The server does not support setting ' - 'more than 5 access policies on a single resource.') - identifiers = [] - for key, value in signed_identifiers.items(): - if value: - value.start = serialize_iso(value.start) - value.expiry = serialize_iso(value.expiry) - identifiers.append(SignedIdentifier(id=key, access_policy=value)) - signed_identifiers = identifiers # type: ignore - - try: - return await self._client.share.set_access_policy( # type: ignore - share_acl=signed_identifiers or None, - timeout=timeout, - cls=return_response_headers, - lease_access_conditions=access_conditions, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_share_stats(self, **kwargs): - # type: (Any) -> int - """Gets the approximate size of the data stored on the share in bytes. - - Note that this value may not include all recently created - or recently re-sized files. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :keyword lease: - Required if the share has an active lease. Value can be a ShareLeaseClient object - or the lease ID as a string. - - .. versionadded:: 12.5.0 - This keyword argument was introduced in API version '2020-08-04'. - - :return: The approximate size of the data (in bytes) stored on the share. - :rtype: int - """ - access_conditions = get_access_conditions(kwargs.pop('lease', None)) - timeout = kwargs.pop('timeout', None) - try: - stats = await self._client.share.get_statistics( - timeout=timeout, - lease_access_conditions=access_conditions, - **kwargs) - return stats.share_usage_bytes # type: ignore - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_directories_and_files( # type: ignore - self, directory_name=None, # type: Optional[str] - name_starts_with=None, # type: Optional[str] - marker=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> Iterable[Dict[str,str]] - """Lists the directories and files under the share. - - :param str directory_name: - Name of a directory. - :param str name_starts_with: - Filters the results to return only directories whose names - begin with the specified prefix. - :param str marker: - An opaque continuation token. This value can be retrieved from the - next_marker field of a previous generator object. If specified, - this generator will begin returning results from this point. - :keyword list[str] include: - Include this parameter to specify one or more datasets to include in the response. - Possible str values are "timestamps", "Etag", "Attributes", "PermissionKey". - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-10-02'. - - :keyword bool include_extended_info: - If this is set to true, file id will be returned in listed results. - - .. versionadded:: 12.6.0 - This keyword argument was introduced in API version '2020-10-02'. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_share_async.py - :start-after: [START share_list_files_in_dir] - :end-before: [END share_list_files_in_dir] - :language: python - :dedent: 16 - :caption: List directories and files in the share. - """ - timeout = kwargs.pop('timeout', None) - directory = self.get_directory_client(directory_name) - return directory.list_directories_and_files( - name_starts_with=name_starts_with, marker=marker, timeout=timeout, **kwargs) - - @distributed_trace_async - async def create_permission_for_share(self, file_permission, # type: str - **kwargs # type: Any - ): - # type: (...) -> str - """Create a permission (a security descriptor) at the share level. - - This 'permission' can be used for the files/directories in the share. - If a 'permission' already exists, it shall return the key of it, else - creates a new permission at the share level and return its key. - - :param str file_permission: - File permission, a Portable SDDL - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A file permission key - :rtype: str - """ - timeout = kwargs.pop('timeout', None) - options = self._create_permission_for_share_options(file_permission, timeout=timeout, **kwargs) - try: - return await self._client.share.create_permission(**options) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def get_permission_for_share( # type: ignore - self, permission_key, # type: str - **kwargs # type: Any - ): - # type: (...) -> str - """Get a permission (a security descriptor) for a given key. - - This 'permission' can be used for the files/directories in the share. - - :param str permission_key: - Key of the file permission to retrieve - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A file permission (a portable SDDL) - :rtype: str - """ - timeout = kwargs.pop('timeout', None) - try: - return await self._client.share.get_permission( # type: ignore - file_permission_key=permission_key, - cls=deserialize_permission, - timeout=timeout, - **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def create_directory(self, directory_name, **kwargs): - # type: (str, Any) -> ShareDirectoryClient - """Creates a directory in the share and returns a client to interact - with the directory. - - :param str directory_name: - The name of the directory. - :keyword dict(str,str) metadata: - Name-value pairs associated with the directory as metadata. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: ShareDirectoryClient - :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient - """ - directory = self.get_directory_client(directory_name) - kwargs.setdefault('merge_span', True) - await directory.create_directory(**kwargs) - return directory # type: ignore - - @distributed_trace_async - async def delete_directory(self, directory_name, **kwargs): - # type: (str, Any) -> None - """Marks the directory for deletion. The directory is - later deleted during garbage collection. - - :param str directory_name: - The name of the directory. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - """ - directory = self.get_directory_client(directory_name) - await directory.delete_directory(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/aio/_share_service_client_async.py b/azure/multiapi/storagev2/fileshare/v2021_04_10/aio/_share_service_client_async.py deleted file mode 100644 index cb88f0a..0000000 --- a/azure/multiapi/storagev2/fileshare/v2021_04_10/aio/_share_service_client_async.py +++ /dev/null @@ -1,369 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method -import functools -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, - TYPE_CHECKING -) - -from azure.core.async_paging import AsyncItemPaged -from azure.core.exceptions import HttpResponseError -from azure.core.tracing.decorator import distributed_trace -from azure.core.pipeline import AsyncPipeline -from azure.core.tracing.decorator_async import distributed_trace_async - -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper -from .._shared.response_handlers import process_storage_error -from .._shared.policies_async import ExponentialRetry -from .._generated.aio import AzureFileStorage -from .._generated.models import StorageServiceProperties -from .._share_service_client import ShareServiceClient as ShareServiceClientBase -from .._serialize import get_api_version -from ._share_client_async import ShareClient -from ._models import SharePropertiesPaged -from .._models import service_properties_deserialize - -if TYPE_CHECKING: - from datetime import datetime - from .._shared.models import ResourceTypes, AccountSasPermissions - from .._models import ( - ShareProperties, - Metrics, - CorsRule, - ShareProtocolSettings, - ) - - -class ShareServiceClient(AsyncStorageAccountHostsMixin, ShareServiceClientBase): - """A client to interact with the File Share Service at the account level. - - This client provides operations to retrieve and configure the account properties - as well as list, create and delete shares within the account. - For operations relating to a specific share, a client for that entity - can also be retrieved using the :func:`get_share_client` function. - - :param str account_url: - The URL to the file share storage account. Any other entities included - in the URL path (e.g. share or file) will be discarded. This URL can be optionally - authenticated with a SAS token. - :param credential: - The credential with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential from azure.core.credentials or an account - shared access key. - :keyword str api_version: - The Storage API version to use for requests. Default value is the most recent service version that is - compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. - - .. versionadded:: 12.1.0 - - :keyword str secondary_hostname: - The hostname of the secondary endpoint. - :keyword loop: - The event loop to run the asynchronous tasks. - :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_authentication_async.py - :start-after: [START create_share_service_client] - :end-before: [END create_share_service_client] - :language: python - :dedent: 8 - :caption: Create the share service client with url and credential. - """ - def __init__( - self, account_url, # type: str - credential=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> None - kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) - loop = kwargs.pop('loop', None) - super(ShareServiceClient, self).__init__( - account_url, - credential=credential, - loop=loop, - **kwargs) - self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline, loop=loop) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - self._loop = loop - - @distributed_trace_async - async def get_service_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] - """Gets the properties of a storage account's File Share service, including - Azure Storage Analytics. - - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: A dictionary containing file service properties such as - analytics logging, hour/minute metrics, cors rules, etc. - :rtype: Dict[str, Any] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START get_service_properties] - :end-before: [END get_service_properties] - :language: python - :dedent: 12 - :caption: Get file share service properties. - """ - timeout = kwargs.pop('timeout', None) - try: - service_props = await self._client.service.get_properties(timeout=timeout, **kwargs) - return service_properties_deserialize(service_props) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def set_service_properties( - self, hour_metrics=None, # type: Optional[Metrics] - minute_metrics=None, # type: Optional[Metrics] - cors=None, # type: Optional[List[CorsRule]] - protocol=None, # type: Optional[ShareProtocolSettings], - **kwargs - ): - # type: (...) -> None - """Sets the properties of a storage account's File Share service, including - Azure Storage Analytics. If an element (e.g. hour_metrics) is left as None, the - existing settings on the service for that functionality are preserved. - - :param hour_metrics: - The hour metrics settings provide a summary of request - statistics grouped by API in hourly aggregates for files. - :type hour_metrics: ~azure.storage.fileshare.Metrics - :param minute_metrics: - The minute metrics settings provide request statistics - for each minute for files. - :type minute_metrics: ~azure.storage.fileshare.Metrics - :param cors: - You can include up to five CorsRule elements in the - list. If an empty list is specified, all CORS rules will be deleted, - and CORS will be disabled for the service. - :type cors: list(:class:`~azure.storage.fileshare.CorsRule`) - :param protocol_settings: - Sets protocol settings - :type protocol: ~azure.storage.fileshare.ShareProtocolSettings - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START set_service_properties] - :end-before: [END set_service_properties] - :language: python - :dedent: 8 - :caption: Sets file share service properties. - """ - timeout = kwargs.pop('timeout', None) - props = StorageServiceProperties( - hour_metrics=hour_metrics, - minute_metrics=minute_metrics, - cors=cors, - protocol=protocol - ) - try: - await self._client.service.set_properties(props, timeout=timeout, **kwargs) - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def list_shares( - self, name_starts_with=None, # type: Optional[str] - include_metadata=False, # type: Optional[bool] - include_snapshots=False, # type: Optional[bool] - **kwargs # type: Any - ): # type: (...) -> AsyncItemPaged - """Returns auto-paging iterable of dict-like ShareProperties under the specified account. - The generator will lazily follow the continuation tokens returned by - the service and stop when all shares have been returned. - - :param str name_starts_with: - Filters the results to return only shares whose names - begin with the specified name_starts_with. - :param bool include_metadata: - Specifies that share metadata be returned in the response. - :param bool include_snapshots: - Specifies that share snapshot be returned in the response. - :keyword bool include_deleted: - Specifies that deleted shares be returned in the response. - This is only for share soft delete enabled account. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :returns: An iterable (auto-paging) of ShareProperties. - :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.ShareProperties] - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START fsc_list_shares] - :end-before: [END fsc_list_shares] - :language: python - :dedent: 16 - :caption: List shares in the file share service. - """ - timeout = kwargs.pop('timeout', None) - include = [] - if include_metadata: - include.append('metadata') - if include_snapshots: - include.append('snapshots') - include_deleted = kwargs.pop('include_deleted', None) - if include_deleted: - include.append("deleted") - - results_per_page = kwargs.pop('results_per_page', None) - command = functools.partial( - self._client.service.list_shares_segment, - include=include, - timeout=timeout, - **kwargs) - return AsyncItemPaged( - command, prefix=name_starts_with, results_per_page=results_per_page, - page_iterator_class=SharePropertiesPaged) - - @distributed_trace_async - async def create_share( - self, share_name, # type: str - **kwargs - ): - # type: (...) -> ShareClient - """Creates a new share under the specified account. If the share - with the same name already exists, the operation fails. Returns a client with - which to interact with the newly created share. - - :param str share_name: The name of the share to create. - :keyword dict(str,str) metadata: - A dict with name_value pairs to associate with the - share as metadata. Example:{'Category':'test'} - :keyword int quota: - Quota in bytes. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.fileshare.aio.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START fsc_create_shares] - :end-before: [END fsc_create_shares] - :language: python - :dedent: 12 - :caption: Create a share in the file share service. - """ - metadata = kwargs.pop('metadata', None) - quota = kwargs.pop('quota', None) - timeout = kwargs.pop('timeout', None) - share = self.get_share_client(share_name) - kwargs.setdefault('merge_span', True) - await share.create_share(metadata=metadata, quota=quota, timeout=timeout, **kwargs) - return share - - @distributed_trace_async - async def delete_share( - self, share_name, # type: Union[ShareProperties, str] - delete_snapshots=False, # type: Optional[bool] - **kwargs - ): - # type: (...) -> None - """Marks the specified share for deletion. The share is - later deleted during garbage collection. - - :param share_name: - The share to delete. This can either be the name of the share, - or an instance of ShareProperties. - :type share_name: str or ~azure.storage.fileshare.ShareProperties - :param bool delete_snapshots: - Indicates if snapshots are to be deleted. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: None - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START fsc_delete_shares] - :end-before: [END fsc_delete_shares] - :language: python - :dedent: 16 - :caption: Delete a share in the file share service. - """ - timeout = kwargs.pop('timeout', None) - share = self.get_share_client(share_name) - kwargs.setdefault('merge_span', True) - await share.delete_share( - delete_snapshots=delete_snapshots, timeout=timeout, **kwargs) - - @distributed_trace_async - async def undelete_share(self, deleted_share_name, deleted_share_version, **kwargs): - # type: (str, str, **Any) -> ShareClient - """Restores soft-deleted share. - - Operation will only be successful if used within the specified number of days - set in the delete retention policy. - - .. versionadded:: 12.2.0 - This operation was introduced in API version '2019-12-12'. - - :param str deleted_share_name: - Specifies the name of the deleted share to restore. - :param str deleted_share_version: - Specifies the version of the deleted share to restore. - :keyword int timeout: - The timeout parameter is expressed in seconds. - :rtype: ~azure.storage.fileshare.aio.ShareClient - """ - share = self.get_share_client(deleted_share_name) - try: - await share._client.share.restore(deleted_share_name=deleted_share_name, # pylint: disable = protected-access - deleted_share_version=deleted_share_version, - timeout=kwargs.pop('timeout', None), **kwargs) - return share - except HttpResponseError as error: - process_storage_error(error) - - def get_share_client(self, share, snapshot=None): - # type: (Union[ShareProperties, str],Optional[Union[Dict[str, Any], str]]) -> ShareClient - """Get a client to interact with the specified share. - The share need not already exist. - - :param share: - The share. This can either be the name of the share, - or an instance of ShareProperties. - :type share: str or ~azure.storage.fileshare.ShareProperties - :param str snapshot: - An optional share snapshot on which to operate. This can be the snapshot ID string - or the response returned from :func:`create_snapshot`. - :returns: A ShareClient. - :rtype: ~azure.storage.fileshare.aio.ShareClient - - .. admonition:: Example: - - .. literalinclude:: ../samples/file_samples_service_async.py - :start-after: [START get_share_client] - :end-before: [END get_share_client] - :language: python - :dedent: 8 - :caption: Gets the share client. - """ - try: - share_name = share.name - except AttributeError: - share_name = share - - _pipeline = AsyncPipeline( - transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access - policies=self._pipeline._impl_policies # pylint: disable = protected-access - ) - return ShareClient( - self.url, share_name=share_name, snapshot=snapshot, credential=self.credential, - api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, - _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop) diff --git a/azure/multiapi/storagev2/fileshare/v2021_04_10/py.typed b/azure/multiapi/storagev2/fileshare/v2021_04_10/py.typed deleted file mode 100644 index e69de29..0000000 diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/__init__.py b/azure/multiapi/storagev2/queue/v2019_07_07/__init__.py similarity index 100% rename from azure/multiapi/storagev2/queue/v2021_02_12/__init__.py rename to azure/multiapi/storagev2/queue/v2019_07_07/__init__.py diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_deserialize.py b/azure/multiapi/storagev2/queue/v2019_07_07/_deserialize.py similarity index 90% rename from azure/multiapi/storagev2/queue/v2021_02_12/_deserialize.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_deserialize.py index 7eda92e..893e255 100644 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_deserialize.py +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_deserialize.py @@ -12,7 +12,7 @@ def deserialize_metadata(response, obj, headers): - raw_metadata = {k: v for k, v in response.http_response.headers.items() if k.startswith("x-ms-meta-")} + raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} return {k[10:]: v for k, v in raw_metadata.items()} @@ -26,7 +26,6 @@ def deserialize_queue_properties(response, obj, headers): def deserialize_queue_creation(response, obj, headers): - response = response.http_response if response.status_code == 204: error_code = StorageErrorCode.queue_already_exists error = ResourceExistsError( diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/__init__.py b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/__init__.py similarity index 86% rename from azure/multiapi/storagev2/blob/v2019_02_02/_generated/__init__.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_generated/__init__.py index f5c8f4a..1ee3b75 100644 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_generated/__init__.py +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/__init__.py @@ -9,8 +9,8 @@ # regenerated. # -------------------------------------------------------------------------- -from ._azure_blob_storage import AzureBlobStorage -__all__ = ['AzureBlobStorage'] +from ._azure_queue_storage import AzureQueueStorage +__all__ = ['AzureQueueStorage'] from .version import VERSION diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/_data_lake_storage_client.py b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/_azure_queue_storage.py similarity index 59% rename from azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/_data_lake_storage_client.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_generated/_azure_queue_storage.py index dcc65ad..e441ecb 100644 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/_data_lake_storage_client.py +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/_azure_queue_storage.py @@ -12,50 +12,51 @@ from azure.core import PipelineClient from msrest import Serializer, Deserializer -from ._configuration import DataLakeStorageClientConfiguration +from ._configuration import AzureQueueStorageConfiguration from azure.core.exceptions import map_error from .operations import ServiceOperations -from .operations import FileSystemOperations -from .operations import PathOperations +from .operations import QueueOperations +from .operations import MessagesOperations +from .operations import MessageIdOperations from . import models -class DataLakeStorageClient(object): - """Azure Data Lake Storage provides storage for Hadoop and other big data workloads. +class AzureQueueStorage(object): + """AzureQueueStorage :ivar service: Service operations - :vartype service: azure.storage.filedatalake.operations.ServiceOperations - :ivar file_system: FileSystem operations - :vartype file_system: azure.storage.filedatalake.operations.FileSystemOperations - :ivar path: Path operations - :vartype path: azure.storage.filedatalake.operations.PathOperations + :vartype service: azure.storage.queue.operations.ServiceOperations + :ivar queue: Queue operations + :vartype queue: azure.storage.queue.operations.QueueOperations + :ivar messages: Messages operations + :vartype messages: azure.storage.queue.operations.MessagesOperations + :ivar message_id: MessageId operations + :vartype message_id: azure.storage.queue.operations.MessageIdOperations - :param url: The URL of the service account, container, or blob that is the + :param url: The URL of the service account, queue or message that is the targe of the desired operation. :type url: str - :param file_system: The filesystem identifier. - :type file_system: str - :param path1: The file or directory path. - :type path1: str """ - def __init__(self, url, file_system, path1, **kwargs): + def __init__(self, url, **kwargs): base_url = '{url}' - self._config = DataLakeStorageClientConfiguration(url, file_system, path1, **kwargs) + self._config = AzureQueueStorageConfiguration(url, **kwargs) self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '2019-12-12' + self.api_version = '2018-03-28' self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) self.service = ServiceOperations( self._client, self._config, self._serialize, self._deserialize) - self.file_system = FileSystemOperations( + self.queue = QueueOperations( self._client, self._config, self._serialize, self._deserialize) - self.path = PathOperations( + self.messages = MessagesOperations( + self._client, self._config, self._serialize, self._deserialize) + self.message_id = MessageIdOperations( self._client, self._config, self._serialize, self._deserialize) def close(self): diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/_configuration.py b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/_configuration.py similarity index 85% rename from azure/multiapi/storagev2/blob/v2020_02_10/_generated/_configuration.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_generated/_configuration.py index c8a1875..75443cb 100644 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/_configuration.py +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/_configuration.py @@ -15,12 +15,12 @@ from .version import VERSION -class AzureBlobStorageConfiguration(Configuration): - """Configuration for AzureBlobStorage +class AzureQueueStorageConfiguration(Configuration): + """Configuration for AzureQueueStorage Note that all parameters used to create this instance are saved as instance attributes. - :param url: The URL of the service account, container, or blob that is the + :param url: The URL of the service account, queue or message that is the targe of the desired operation. :type url: str :ivar version: Specifies the version of the operation to use for this @@ -33,14 +33,14 @@ def __init__(self, url, **kwargs): if url is None: raise ValueError("Parameter 'url' must not be None.") - super(AzureBlobStorageConfiguration, self).__init__(**kwargs) + super(AzureQueueStorageConfiguration, self).__init__(**kwargs) self._configure(**kwargs) - self.user_agent_policy.add_user_agent('azsdk-python-azureblobstorage/{}'.format(VERSION)) + self.user_agent_policy.add_user_agent('azsdk-python-azurequeuestorage/{}'.format(VERSION)) self.generate_client_request_id = True self.url = url - self.version = "2020-02-10" + self.version = "2018-03-28" def _configure(self, **kwargs): self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/__init__.py b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/__init__.py similarity index 84% rename from azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/__init__.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/__init__.py index 009c965..a6c00c2 100644 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/aio/__init__.py +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/__init__.py @@ -9,5 +9,5 @@ # regenerated. # -------------------------------------------------------------------------- -from ._azure_blob_storage_async import AzureBlobStorage -__all__ = ['AzureBlobStorage'] +from ._azure_queue_storage_async import AzureQueueStorage +__all__ = ['AzureQueueStorage'] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/_data_lake_storage_client_async.py b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/_azure_queue_storage_async.py similarity index 59% rename from azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/_data_lake_storage_client_async.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/_azure_queue_storage_async.py index 3486d5c..9d80360 100644 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_generated/aio/_data_lake_storage_client_async.py +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/_azure_queue_storage_async.py @@ -12,51 +12,52 @@ from azure.core import AsyncPipelineClient from msrest import Serializer, Deserializer -from ._configuration_async import DataLakeStorageClientConfiguration +from ._configuration_async import AzureQueueStorageConfiguration from azure.core.exceptions import map_error from .operations_async import ServiceOperations -from .operations_async import FileSystemOperations -from .operations_async import PathOperations +from .operations_async import QueueOperations +from .operations_async import MessagesOperations +from .operations_async import MessageIdOperations from .. import models -class DataLakeStorageClient(object): - """Azure Data Lake Storage provides storage for Hadoop and other big data workloads. +class AzureQueueStorage(object): + """AzureQueueStorage :ivar service: Service operations - :vartype service: azure.storage.filedatalake.aio.operations_async.ServiceOperations - :ivar file_system: FileSystem operations - :vartype file_system: azure.storage.filedatalake.aio.operations_async.FileSystemOperations - :ivar path: Path operations - :vartype path: azure.storage.filedatalake.aio.operations_async.PathOperations + :vartype service: azure.storage.queue.aio.operations_async.ServiceOperations + :ivar queue: Queue operations + :vartype queue: azure.storage.queue.aio.operations_async.QueueOperations + :ivar messages: Messages operations + :vartype messages: azure.storage.queue.aio.operations_async.MessagesOperations + :ivar message_id: MessageId operations + :vartype message_id: azure.storage.queue.aio.operations_async.MessageIdOperations - :param url: The URL of the service account, container, or blob that is the + :param url: The URL of the service account, queue or message that is the targe of the desired operation. :type url: str - :param file_system: The filesystem identifier. - :type file_system: str - :param path1: The file or directory path. - :type path1: str """ def __init__( - self, url, file_system, path1, **kwargs): + self, url, **kwargs): base_url = '{url}' - self._config = DataLakeStorageClientConfiguration(url, file_system, path1, **kwargs) + self._config = AzureQueueStorageConfiguration(url, **kwargs) self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '2020-02-10' + self.api_version = '2018-03-28' self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) self.service = ServiceOperations( self._client, self._config, self._serialize, self._deserialize) - self.file_system = FileSystemOperations( + self.queue = QueueOperations( self._client, self._config, self._serialize, self._deserialize) - self.path = PathOperations( + self.messages = MessagesOperations( + self._client, self._config, self._serialize, self._deserialize) + self.message_id = MessageIdOperations( self._client, self._config, self._serialize, self._deserialize) async def close(self): diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/_configuration_async.py b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/_configuration_async.py similarity index 86% rename from azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/_configuration_async.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/_configuration_async.py index a500a0c..5cbe3a2 100644 --- a/azure/multiapi/storagev2/blob/v2019_12_12/_generated/aio/_configuration_async.py +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/_configuration_async.py @@ -15,12 +15,12 @@ from ..version import VERSION -class AzureBlobStorageConfiguration(Configuration): - """Configuration for AzureBlobStorage +class AzureQueueStorageConfiguration(Configuration): + """Configuration for AzureQueueStorage Note that all parameters used to create this instance are saved as instance attributes. - :param url: The URL of the service account, container, or blob that is the + :param url: The URL of the service account, queue or message that is the targe of the desired operation. :type url: str :ivar version: Specifies the version of the operation to use for this @@ -33,15 +33,15 @@ def __init__(self, url, **kwargs): if url is None: raise ValueError("Parameter 'url' must not be None.") - super(AzureBlobStorageConfiguration, self).__init__(**kwargs) + super(AzureQueueStorageConfiguration, self).__init__(**kwargs) self._configure(**kwargs) - self.user_agent_policy.add_user_agent('azsdk-python-azureblobstorage/{}'.format(VERSION)) + self.user_agent_policy.add_user_agent('azsdk-python-azurequeuestorage/{}'.format(VERSION)) self.generate_client_request_id = True self.accept_language = None self.url = url - self.version = "2019-12-12" + self.version = "2018-03-28" def _configure(self, **kwargs): self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/operations_async/__init__.py b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/__init__.py similarity index 69% rename from azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/operations_async/__init__.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/__init__.py index 1190e52..6d9d667 100644 --- a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_generated/aio/operations_async/__init__.py +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/__init__.py @@ -10,11 +10,13 @@ # -------------------------------------------------------------------------- from ._service_operations_async import ServiceOperations -from ._file_system_operations_async import FileSystemOperations -from ._path_operations_async import PathOperations +from ._queue_operations_async import QueueOperations +from ._messages_operations_async import MessagesOperations +from ._message_id_operations_async import MessageIdOperations __all__ = [ 'ServiceOperations', - 'FileSystemOperations', - 'PathOperations', + 'QueueOperations', + 'MessagesOperations', + 'MessageIdOperations', ] diff --git a/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/_message_id_operations_async.py b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/_message_id_operations_async.py new file mode 100644 index 0000000..1213424 --- /dev/null +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/_message_id_operations_async.py @@ -0,0 +1,184 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from ... import models + + +class MessageIdOperations: + """MessageIdOperations async operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + + async def update(self, pop_receipt, visibilitytimeout, queue_message=None, timeout=None, request_id=None, *, cls=None, **kwargs): + """The Update operation was introduced with version 2011-08-18 of the + Queue service API. The Update Message operation updates the visibility + timeout of a message. You can also use this operation to update the + contents of a message. A message must be in a format that can be + included in an XML request with UTF-8 encoding, and the encoded message + can be up to 64KB in size. + + :param pop_receipt: Required. Specifies the valid pop receipt value + returned from an earlier call to the Get Messages or Update Message + operation. + :type pop_receipt: str + :param visibilitytimeout: Optional. Specifies the new visibility + timeout value, in seconds, relative to server time. The default value + is 30 seconds. A specified value must be larger than or equal to 1 + second, and cannot be larger than 7 days, or larger than 2 hours on + REST protocol versions prior to version 2011-08-18. The visibility + timeout of a message can be set to a value later than the expiry time. + :type visibilitytimeout: int + :param queue_message: A Message object which can be stored in a Queue + :type queue_message: ~azure.storage.queue.models.QueueMessage + :param timeout: The The timeout parameter is expressed in seconds. For + more information, see None: + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + self.peekonly = "true" + + async def dequeue(self, number_of_messages=None, visibilitytimeout=None, timeout=None, request_id=None, *, cls=None, **kwargs): + """The Dequeue operation retrieves one or more messages from the front of + the queue. + + :param number_of_messages: Optional. A nonzero integer value that + specifies the number of messages to retrieve from the queue, up to a + maximum of 32. If fewer are visible, the visible messages are + returned. By default, a single message is retrieved from the queue + with this operation. + :type number_of_messages: int + :param visibilitytimeout: Optional. Specifies the new visibility + timeout value, in seconds, relative to server time. The default value + is 30 seconds. A specified value must be larger than or equal to 1 + second, and cannot be larger than 7 days, or larger than 2 hours on + REST protocol versions prior to version 2011-08-18. The visibility + timeout of a message can be set to a value later than the expiry time. + :type visibilitytimeout: int + :param timeout: The The timeout parameter is expressed in seconds. For + more information, see None: self._config = config - async def set_properties(self, storage_service_properties, timeout=None, request_id=None, *, cls=None, **kwargs): - """Sets properties for a storage account's Blob service endpoint, - including properties for Storage Analytics and CORS (Cross-Origin - Resource Sharing) rules. + async def create(self, timeout=None, metadata=None, request_id=None, *, cls=None, **kwargs): + """creates a new queue under the given account. - :param storage_service_properties: The StorageService properties. - :type storage_service_properties: - ~azure.storage.blob.models.StorageServiceProperties - :param timeout: The timeout parameter is expressed in seconds. For + :param timeout: The The timeout parameter is expressed in seconds. For more information, see Setting - Timeouts for Blob Service Operations. + href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting + Timeouts for Queue Service Operations. :type timeout: int + :param metadata: Optional. Include this parameter to specify that the + queue's metadata be returned as part of the response body. Note that + metadata requested with this parameter must be stored in accordance + with the naming restrictions imposed by the 2009-09-19 version of the + Queue service. Beginning with this version, all metadata names must + adhere to the naming conventions for C# identifiers. + :type metadata: str :param request_id: Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. @@ -57,14 +59,11 @@ async def set_properties(self, storage_service_properties, timeout=None, request :return: None or the result of cls(response) :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) - restype = "service" - comp = "properties" - # Construct URL - url = self.set_properties.metadata['url'] + url = self.create.metadata['url'] path_format_arguments = { 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) } @@ -74,47 +73,41 @@ async def set_properties(self, storage_service_properties, timeout=None, request query_parameters = {} if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') if request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - # Construct body - body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties') - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) + request = self._client.put(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response - if response.status_code not in [202]: + if response.status_code not in [201, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise models.StorageErrorException(response, self._deserialize) if cls: response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), } return cls(response, None, response_headers) - set_properties.metadata = {'url': '/'} + create.metadata = {'url': '/{queueName}'} - async def get_properties(self, timeout=None, request_id=None, *, cls=None, **kwargs): - """gets the properties of a storage account's Blob service, including - properties for Storage Analytics and CORS (Cross-Origin Resource - Sharing) rules. + async def delete(self, timeout=None, request_id=None, *, cls=None, **kwargs): + """operation permanently deletes the specified queue. - :param timeout: The timeout parameter is expressed in seconds. For + :param timeout: The The timeout parameter is expressed in seconds. For more information, see Setting - Timeouts for Blob Service Operations. + href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting + Timeouts for Queue Service Operations. :type timeout: int :param request_id: Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage @@ -122,17 +115,14 @@ async def get_properties(self, timeout=None, request_id=None, *, cls=None, **kwa :type request_id: str :param callable cls: A custom type or function that will be passed the direct response - :return: StorageServiceProperties or the result of cls(response) - :rtype: ~azure.storage.blob.models.StorageServiceProperties + :return: None or the result of cls(response) + :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) - restype = "service" - comp = "properties" - # Construct URL - url = self.get_properties.metadata['url'] + url = self.delete.metadata['url'] path_format_arguments = { 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) } @@ -142,51 +132,40 @@ async def get_properties(self, timeout=None, request_id=None, *, cls=None, **kwa query_parameters = {} if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') # Construct headers header_parameters = {} - header_parameters['Accept'] = 'application/xml' header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') if request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) + request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response - if response.status_code not in [200]: + if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise models.StorageErrorException(response, self._deserialize) - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('StorageServiceProperties', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + if cls: + response_headers = { 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), } + return cls(response, None, response_headers) + delete.metadata = {'url': '/{queueName}'} - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_properties.metadata = {'url': '/'} - - async def get_statistics(self, timeout=None, request_id=None, *, cls=None, **kwargs): - """Retrieves statistics related to replication for the Blob service. It is - only available on the secondary location endpoint when read-access - geo-redundant replication is enabled for the storage account. + async def get_properties(self, timeout=None, request_id=None, *, cls=None, **kwargs): + """Retrieves user-defined metadata and queue properties on the specified + queue. Metadata is associated with the queue as name-values pairs. - :param timeout: The timeout parameter is expressed in seconds. For + :param timeout: The The timeout parameter is expressed in seconds. For more information, see Setting - Timeouts for Blob Service Operations. + href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting + Timeouts for Queue Service Operations. :type timeout: int :param request_id: Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage @@ -194,17 +173,16 @@ async def get_statistics(self, timeout=None, request_id=None, *, cls=None, **kwa :type request_id: str :param callable cls: A custom type or function that will be passed the direct response - :return: StorageServiceStats or the result of cls(response) - :rtype: ~azure.storage.blob.models.StorageServiceStats + :return: None or the result of cls(response) + :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) - restype = "service" - comp = "stats" + comp = "metadata" # Construct URL - url = self.get_statistics.metadata['url'] + url = self.get_properties.metadata['url'] path_format_arguments = { 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) } @@ -214,12 +192,10 @@ async def get_statistics(self, timeout=None, request_id=None, *, cls=None, **kwa query_parameters = {} if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') query_parameters['comp'] = self._serialize.query("comp", comp, 'str') # Construct headers header_parameters = {} - header_parameters['Accept'] = 'application/xml' header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') if request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') @@ -233,74 +209,50 @@ async def get_statistics(self, timeout=None, request_id=None, *, cls=None, **kwa map_error(status_code=response.status_code, response=response, error_map=error_map) raise models.StorageErrorException(response, self._deserialize) - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('StorageServiceStats', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + if cls: + response_headers = { + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'x-ms-approximate-messages-count': self._deserialize('int', response.headers.get('x-ms-approximate-messages-count')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), } + return cls(response, None, response_headers) + get_properties.metadata = {'url': '/{queueName}'} - if cls: - return cls(response, deserialized, header_dict) + async def set_metadata(self, timeout=None, metadata=None, request_id=None, *, cls=None, **kwargs): + """sets user-defined metadata on the specified queue. Metadata is + associated with the queue as name-value pairs. - return deserialized - get_statistics.metadata = {'url': '/'} - - async def list_containers_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, *, cls=None, **kwargs): - """The List Containers Segment operation returns a list of the containers - under the specified account. - - :param prefix: Filters the results to return only containers whose - name begins with the specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list - of containers to be returned with the next listing operation. The - operation returns the NextMarker value within the response body if the - listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value - for the marker parameter in a subsequent call to request the next page - of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to - return. If the request does not specify maxresults, or specifies a - value greater than 5000, the server will return up to 5000 items. Note - that if the listing operation crosses a partition boundary, then the - service will return a continuation token for retrieving the remainder - of the results. For this reason, it is possible that the service will - return fewer results than specified by maxresults, or than the default - of 5000. - :type maxresults: int - :param include: Include this parameter to specify that the container's - metadata be returned as part of the response body. Possible values - include: 'metadata' - :type include: str or - ~azure.storage.blob.models.ListContainersIncludeType - :param timeout: The timeout parameter is expressed in seconds. For + :param timeout: The The timeout parameter is expressed in seconds. For more information, see Setting - Timeouts for Blob Service Operations. + href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting + Timeouts for Queue Service Operations. :type timeout: int + :param metadata: Optional. Include this parameter to specify that the + queue's metadata be returned as part of the response body. Note that + metadata requested with this parameter must be stored in accordance + with the naming restrictions imposed by the 2009-09-19 version of the + Queue service. Beginning with this version, all metadata names must + adhere to the naming conventions for C# identifiers. + :type metadata: str :param request_id: Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. :type request_id: str :param callable cls: A custom type or function that will be passed the direct response - :return: ListContainersSegmentResponse or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListContainersSegmentResponse + :return: None or the result of cls(response) + :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) - comp = "list" + comp = "metadata" # Construct URL - url = self.list_containers_segment.metadata['url'] + url = self.set_metadata.metadata['url'] path_format_arguments = { 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) } @@ -308,61 +260,45 @@ async def list_containers_segment(self, prefix=None, marker=None, maxresults=Non # Construct parameters query_parameters = {} - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, 'ListContainersIncludeType') if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) query_parameters['comp'] = self._serialize.query("comp", comp, 'str') # Construct headers header_parameters = {} - header_parameters['Accept'] = 'application/xml' + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') if request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) + request = self._client.put(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response - if response.status_code not in [200]: + if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise models.StorageErrorException(response, self._deserialize) - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListContainersSegmentResponse', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + if cls: + response_headers = { 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), } + return cls(response, None, response_headers) + set_metadata.metadata = {'url': '/{queueName}'} - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_containers_segment.metadata = {'url': '/'} - - async def get_user_delegation_key(self, key_info, timeout=None, request_id=None, *, cls=None, **kwargs): - """Retrieves a user delegation key for the Blob service. This is only a - valid operation when using bearer token authentication. + async def get_access_policy(self, timeout=None, request_id=None, *, cls=None, **kwargs): + """returns details about any stored access policies specified on the queue + that may be used with Shared Access Signatures. - :param key_info: - :type key_info: ~azure.storage.blob.models.KeyInfo - :param timeout: The timeout parameter is expressed in seconds. For + :param timeout: The The timeout parameter is expressed in seconds. For more information, see Setting - Timeouts for Blob Service Operations. + href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting + Timeouts for Queue Service Operations. :type timeout: int :param request_id: Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage @@ -370,17 +306,16 @@ async def get_user_delegation_key(self, key_info, timeout=None, request_id=None, :type request_id: str :param callable cls: A custom type or function that will be passed the direct response - :return: UserDelegationKey or the result of cls(response) - :rtype: ~azure.storage.blob.models.UserDelegationKey + :return: list or the result of cls(response) + :rtype: list[~azure.storage.queue.models.SignedIdentifier] :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) - restype = "service" - comp = "userdelegationkey" + comp = "acl" # Construct URL - url = self.get_user_delegation_key.metadata['url'] + url = self.get_access_policy.metadata['url'] path_format_arguments = { 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) } @@ -390,22 +325,17 @@ async def get_user_delegation_key(self, key_info, timeout=None, request_id=None, query_parameters = {} if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') query_parameters['comp'] = self._serialize.query("comp", comp, 'str') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/xml' - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') if request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - # Construct body - body_content = self._serialize.body(key_info, 'KeyInfo') - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) + request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response @@ -416,9 +346,8 @@ async def get_user_delegation_key(self, key_info, timeout=None, request_id=None, header_dict = {} deserialized = None if response.status_code == 200: - deserialized = self._deserialize('UserDelegationKey', response) + deserialized = self._deserialize('[SignedIdentifier]', response) header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), @@ -429,76 +358,18 @@ async def get_user_delegation_key(self, key_info, timeout=None, request_id=None, return cls(response, deserialized, header_dict) return deserialized - get_user_delegation_key.metadata = {'url': '/'} - - async def get_account_info(self, *, cls=None, **kwargs): - """Returns the sku name and account kind . - - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "account" - comp = "properties" - - # Construct URL - url = self.get_account_info.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + get_access_policy.metadata = {'url': '/{queueName}'} - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) + async def set_access_policy(self, queue_acl=None, timeout=None, request_id=None, *, cls=None, **kwargs): + """sets stored access policies for the queue that may be used with Shared + Access Signatures. - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')), - 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_account_info.metadata = {'url': '/'} - - async def submit_batch(self, body, content_length, multipart_content_type, timeout=None, request_id=None, *, cls=None, **kwargs): - """The Batch operation allows multiple API calls to be embedded into a - single HTTP request. - - :param body: Initial data - :type body: Generator - :param content_length: The length of the request. - :type content_length: long - :param multipart_content_type: Required. The value of this header must - be multipart/mixed with a batch boundary. Example header value: - multipart/mixed; boundary=batch_ - :type multipart_content_type: str - :param timeout: The timeout parameter is expressed in seconds. For + :param queue_acl: the acls for the queue + :type queue_acl: list[~azure.storage.queue.models.SignedIdentifier] + :param timeout: The The timeout parameter is expressed in seconds. For more information, see Setting - Timeouts for Blob Service Operations. + href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting + Timeouts for Queue Service Operations. :type timeout: int :param request_id: Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage @@ -506,16 +377,16 @@ async def submit_batch(self, body, content_length, multipart_content_type, timeo :type request_id: str :param callable cls: A custom type or function that will be passed the direct response - :return: object or the result of cls(response) - :rtype: Generator + :return: None or the result of cls(response) + :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) - comp = "batch" + comp = "acl" # Construct URL - url = self.submit_batch.metadata['url'] + url = self.set_access_policy.metadata['url'] path_format_arguments = { 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) } @@ -529,39 +400,33 @@ async def submit_batch(self, body, content_length, multipart_content_type, timeo # Construct headers header_parameters = {} - header_parameters['Accept'] = 'application/xml' header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') if request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') # Construct body + serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifier', 'wrapped': True}} + if queue_acl is not None: + body_content = self._serialize.body(queue_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt) + else: + body_content = None # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) + request = self._client.put(url, query_parameters, header_parameters, body_content) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response - if response.status_code not in [200]: - await response.load_body() + if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise models.StorageErrorException(response, self._deserialize) - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + if cls: + response_headers = { 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - submit_batch.metadata = {'url': '/'} + return cls(response, None, response_headers) + set_access_policy.metadata = {'url': '/{queueName}'} diff --git a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/_service_operations_async.py b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/_service_operations_async.py similarity index 53% rename from azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/_service_operations_async.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/_service_operations_async.py index c4e40f1..4738600 100644 --- a/azure/multiapi/storagev2/fileshare/v2020_02_10/_generated/aio/operations_async/_service_operations_async.py +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/aio/operations_async/_service_operations_async.py @@ -37,25 +37,29 @@ def __init__(self, client, config, serializer, deserializer) -> None: self._config = config self.restype = "service" - async def set_properties(self, storage_service_properties, timeout=None, *, cls=None, **kwargs): - """Sets properties for a storage account's File service endpoint, - including properties for Storage Analytics metrics and CORS - (Cross-Origin Resource Sharing) rules. + async def set_properties(self, storage_service_properties, timeout=None, request_id=None, *, cls=None, **kwargs): + """Sets properties for a storage account's Queue service endpoint, + including properties for Storage Analytics and CORS (Cross-Origin + Resource Sharing) rules. :param storage_service_properties: The StorageService properties. :type storage_service_properties: - ~azure.storage.fileshare.models.StorageServiceProperties - :param timeout: The timeout parameter is expressed in seconds. For + ~azure.storage.queue.models.StorageServiceProperties + :param timeout: The The timeout parameter is expressed in seconds. For more information, see Setting - Timeouts for File Service Operations. + href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting + Timeouts for Queue Service Operations. :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str :param callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) comp = "properties" @@ -78,6 +82,8 @@ async def set_properties(self, storage_service_properties, timeout=None, *, cls= header_parameters = {} header_parameters['Content-Type'] = 'application/xml; charset=utf-8' header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') # Construct body body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties') @@ -100,22 +106,26 @@ async def set_properties(self, storage_service_properties, timeout=None, *, cls= return cls(response, None, response_headers) set_properties.metadata = {'url': '/'} - async def get_properties(self, timeout=None, *, cls=None, **kwargs): - """Gets the properties of a storage account's File service, including - properties for Storage Analytics metrics and CORS (Cross-Origin - Resource Sharing) rules. + async def get_properties(self, timeout=None, request_id=None, *, cls=None, **kwargs): + """gets the properties of a storage account's Queue service, including + properties for Storage Analytics and CORS (Cross-Origin Resource + Sharing) rules. - :param timeout: The timeout parameter is expressed in seconds. For + :param timeout: The The timeout parameter is expressed in seconds. For more information, see Setting - Timeouts for File Service Operations. + href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting + Timeouts for Queue Service Operations. :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str :param callable cls: A custom type or function that will be passed the direct response :return: StorageServiceProperties or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.StorageServiceProperties + :rtype: ~azure.storage.queue.models.StorageServiceProperties :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) comp = "properties" @@ -138,6 +148,8 @@ async def get_properties(self, timeout=None, *, cls=None, **kwargs): header_parameters = {} header_parameters['Accept'] = 'application/xml' header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) @@ -164,45 +176,126 @@ async def get_properties(self, timeout=None, *, cls=None, **kwargs): return deserialized get_properties.metadata = {'url': '/'} - async def list_shares_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, *, cls=None, **kwargs): - """The List Shares Segment operation returns a list of the shares and - share snapshots under the specified account. + async def get_statistics(self, timeout=None, request_id=None, *, cls=None, **kwargs): + """Retrieves statistics related to replication for the Queue service. It + is only available on the secondary location endpoint when read-access + geo-redundant replication is enabled for the storage account. + + :param timeout: The The timeout parameter is expressed in seconds. For + more information, see Setting - Timeouts for File Service Operations. + href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting + Timeouts for Queue Service Operations. :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str :param callable cls: A custom type or function that will be passed the direct response - :return: ListSharesResponse or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListSharesResponse + :return: ListQueuesSegmentResponse or the result of cls(response) + :rtype: ~azure.storage.queue.models.ListQueuesSegmentResponse :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) comp = "list" # Construct URL - url = self.list_shares_segment.metadata['url'] + url = self.list_queues_segment.metadata['url'] path_format_arguments = { 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) } @@ -217,7 +310,7 @@ async def list_shares_segment(self, prefix=None, marker=None, maxresults=None, i if maxresults is not None: query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[ListSharesIncludeType]', div=',') + query_parameters['include'] = self._serialize.query("include", include, '[ListQueuesIncludeType]', div=',') if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) query_parameters['comp'] = self._serialize.query("comp", comp, 'str') @@ -226,6 +319,8 @@ async def list_shares_segment(self, prefix=None, marker=None, maxresults=None, i header_parameters = {} header_parameters['Accept'] = 'application/xml' header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) @@ -239,10 +334,11 @@ async def list_shares_segment(self, prefix=None, marker=None, maxresults=None, i header_dict = {} deserialized = None if response.status_code == 200: - deserialized = self._deserialize('ListSharesResponse', response) + deserialized = self._deserialize('ListQueuesSegmentResponse', response) header_dict = { 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), } @@ -250,4 +346,4 @@ async def list_shares_segment(self, prefix=None, marker=None, maxresults=None, i return cls(response, deserialized, header_dict) return deserialized - list_shares_segment.metadata = {'url': '/'} + list_queues_segment.metadata = {'url': '/'} diff --git a/azure/multiapi/storagev2/queue/v2019_07_07/_generated/models/__init__.py b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/models/__init__.py new file mode 100644 index 0000000..5c1e619 --- /dev/null +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/models/__init__.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +try: + from ._models_py3 import AccessPolicy + from ._models_py3 import CorsRule + from ._models_py3 import DequeuedMessageItem + from ._models_py3 import EnqueuedMessage + from ._models_py3 import GeoReplication + from ._models_py3 import ListQueuesSegmentResponse + from ._models_py3 import Logging + from ._models_py3 import Metrics + from ._models_py3 import PeekedMessageItem + from ._models_py3 import QueueItem + from ._models_py3 import QueueMessage + from ._models_py3 import RetentionPolicy + from ._models_py3 import SignedIdentifier + from ._models_py3 import StorageError, StorageErrorException + from ._models_py3 import StorageServiceProperties + from ._models_py3 import StorageServiceStats +except (SyntaxError, ImportError): + from ._models import AccessPolicy + from ._models import CorsRule + from ._models import DequeuedMessageItem + from ._models import EnqueuedMessage + from ._models import GeoReplication + from ._models import ListQueuesSegmentResponse + from ._models import Logging + from ._models import Metrics + from ._models import PeekedMessageItem + from ._models import QueueItem + from ._models import QueueMessage + from ._models import RetentionPolicy + from ._models import SignedIdentifier + from ._models import StorageError, StorageErrorException + from ._models import StorageServiceProperties + from ._models import StorageServiceStats +from ._azure_queue_storage_enums import ( + GeoReplicationStatusType, + ListQueuesIncludeType, + StorageErrorCode, +) + +__all__ = [ + 'AccessPolicy', + 'CorsRule', + 'DequeuedMessageItem', + 'EnqueuedMessage', + 'GeoReplication', + 'ListQueuesSegmentResponse', + 'Logging', + 'Metrics', + 'PeekedMessageItem', + 'QueueItem', + 'QueueMessage', + 'RetentionPolicy', + 'SignedIdentifier', + 'StorageError', 'StorageErrorException', + 'StorageServiceProperties', + 'StorageServiceStats', + 'StorageErrorCode', + 'GeoReplicationStatusType', + 'ListQueuesIncludeType', +] diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/models/_azure_file_storage_enums.py b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/models/_azure_queue_storage_enums.py similarity index 70% rename from azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/models/_azure_file_storage_enums.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_generated/models/_azure_queue_storage_enums.py index d6f14d9..cf6e7d1 100644 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_generated/models/_azure_file_storage_enums.py +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/models/_azure_queue_storage_enums.py @@ -56,24 +56,15 @@ class StorageErrorCode(str, Enum): unsupported_xml_node = "UnsupportedXmlNode" unsupported_query_parameter = "UnsupportedQueryParameter" unsupported_http_verb = "UnsupportedHttpVerb" - cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" - client_cache_flush_delay = "ClientCacheFlushDelay" - delete_pending = "DeletePending" - directory_not_empty = "DirectoryNotEmpty" - file_lock_conflict = "FileLockConflict" - invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" - parent_not_found = "ParentNotFound" - read_only_attribute = "ReadOnlyAttribute" - share_already_exists = "ShareAlreadyExists" - share_being_deleted = "ShareBeingDeleted" - share_disabled = "ShareDisabled" - share_not_found = "ShareNotFound" - sharing_violation = "SharingViolation" - share_snapshot_in_progress = "ShareSnapshotInProgress" - share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" - share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" - share_has_snapshots = "ShareHasSnapshots" - container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" + invalid_marker = "InvalidMarker" + message_not_found = "MessageNotFound" + message_too_large = "MessageTooLarge" + pop_receipt_mismatch = "PopReceiptMismatch" + queue_already_exists = "QueueAlreadyExists" + queue_being_deleted = "QueueBeingDeleted" + queue_disabled = "QueueDisabled" + queue_not_empty = "QueueNotEmpty" + queue_not_found = "QueueNotFound" authorization_source_ip_mismatch = "AuthorizationSourceIPMismatch" authorization_protocol_mismatch = "AuthorizationProtocolMismatch" authorization_permission_mismatch = "AuthorizationPermissionMismatch" @@ -82,26 +73,13 @@ class StorageErrorCode(str, Enum): feature_version_mismatch = "FeatureVersionMismatch" -class DeleteSnapshotsOptionType(str, Enum): +class GeoReplicationStatusType(str, Enum): - include = "include" + live = "live" + bootstrap = "bootstrap" + unavailable = "unavailable" -class ListSharesIncludeType(str, Enum): +class ListQueuesIncludeType(str, Enum): - snapshots = "snapshots" metadata = "metadata" - - -class CopyStatusType(str, Enum): - - pending = "pending" - success = "success" - aborted = "aborted" - failed = "failed" - - -class FileRangeWriteType(str, Enum): - - update = "update" - clear = "clear" diff --git a/azure/multiapi/storagev2/queue/v2019_07_07/_generated/models/_models.py b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/models/_models.py new file mode 100644 index 0000000..c50e93a --- /dev/null +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/models/_models.py @@ -0,0 +1,631 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from azure.core.exceptions import HttpResponseError + + +class AccessPolicy(Model): + """An Access policy. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. the date-time the policy is active + :type start: str + :param expiry: Required. the date-time the policy expires + :type expiry: str + :param permission: Required. the permissions for the acl policy + :type permission: str + """ + + _validation = { + 'start': {'required': True}, + 'expiry': {'required': True}, + 'permission': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, + 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, + 'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(AccessPolicy, self).__init__(**kwargs) + self.start = kwargs.get('start', None) + self.expiry = kwargs.get('expiry', None) + self.permission = kwargs.get('permission', None) + + +class CorsRule(Model): + """CORS is an HTTP feature that enables a web application running under one + domain to access resources in another domain. Web browsers implement a + security restriction known as same-origin policy that prevents a web page + from calling APIs in a different domain; CORS provides a secure way to + allow one domain (the origin domain) to call APIs in another domain. + + All required parameters must be populated in order to send to Azure. + + :param allowed_origins: Required. The origin domains that are permitted to + make a request against the storage service via CORS. The origin domain is + the domain from which the request originates. Note that the origin must be + an exact case-sensitive match with the origin that the user age sends to + the service. You can also use the wildcard character '*' to allow all + origin domains to make requests via CORS. + :type allowed_origins: str + :param allowed_methods: Required. The methods (HTTP request verbs) that + the origin domain may use for a CORS request. (comma separated) + :type allowed_methods: str + :param allowed_headers: Required. the request headers that the origin + domain may specify on the CORS request. + :type allowed_headers: str + :param exposed_headers: Required. The response headers that may be sent in + the response to the CORS request and exposed by the browser to the request + issuer + :type exposed_headers: str + :param max_age_in_seconds: Required. The maximum amount time that a + browser should cache the preflight OPTIONS request. + :type max_age_in_seconds: int + """ + + _validation = { + 'allowed_origins': {'required': True}, + 'allowed_methods': {'required': True}, + 'allowed_headers': {'required': True}, + 'exposed_headers': {'required': True}, + 'max_age_in_seconds': {'required': True, 'minimum': 0}, + } + + _attribute_map = { + 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}}, + 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}}, + 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}}, + 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}}, + 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(CorsRule, self).__init__(**kwargs) + self.allowed_origins = kwargs.get('allowed_origins', None) + self.allowed_methods = kwargs.get('allowed_methods', None) + self.allowed_headers = kwargs.get('allowed_headers', None) + self.exposed_headers = kwargs.get('exposed_headers', None) + self.max_age_in_seconds = kwargs.get('max_age_in_seconds', None) + + +class DequeuedMessageItem(Model): + """The object returned in the QueueMessageList array when calling Get Messages + on a Queue. + + All required parameters must be populated in order to send to Azure. + + :param message_id: Required. The Id of the Message. + :type message_id: str + :param insertion_time: Required. The time the Message was inserted into + the Queue. + :type insertion_time: datetime + :param expiration_time: Required. The time that the Message will expire + and be automatically deleted. + :type expiration_time: datetime + :param pop_receipt: Required. This value is required to delete the + Message. If deletion fails using this popreceipt then the message has been + dequeued by another client. + :type pop_receipt: str + :param time_next_visible: Required. The time that the message will again + become visible in the Queue. + :type time_next_visible: datetime + :param dequeue_count: Required. The number of times the message has been + dequeued. + :type dequeue_count: long + :param message_text: Required. The content of the Message. + :type message_text: str + """ + + _validation = { + 'message_id': {'required': True}, + 'insertion_time': {'required': True}, + 'expiration_time': {'required': True}, + 'pop_receipt': {'required': True}, + 'time_next_visible': {'required': True}, + 'dequeue_count': {'required': True}, + 'message_text': {'required': True}, + } + + _attribute_map = { + 'message_id': {'key': 'MessageId', 'type': 'str', 'xml': {'name': 'MessageId'}}, + 'insertion_time': {'key': 'InsertionTime', 'type': 'rfc-1123', 'xml': {'name': 'InsertionTime'}}, + 'expiration_time': {'key': 'ExpirationTime', 'type': 'rfc-1123', 'xml': {'name': 'ExpirationTime'}}, + 'pop_receipt': {'key': 'PopReceipt', 'type': 'str', 'xml': {'name': 'PopReceipt'}}, + 'time_next_visible': {'key': 'TimeNextVisible', 'type': 'rfc-1123', 'xml': {'name': 'TimeNextVisible'}}, + 'dequeue_count': {'key': 'DequeueCount', 'type': 'long', 'xml': {'name': 'DequeueCount'}}, + 'message_text': {'key': 'MessageText', 'type': 'str', 'xml': {'name': 'MessageText'}}, + } + _xml_map = { + 'name': 'QueueMessage' + } + + def __init__(self, **kwargs): + super(DequeuedMessageItem, self).__init__(**kwargs) + self.message_id = kwargs.get('message_id', None) + self.insertion_time = kwargs.get('insertion_time', None) + self.expiration_time = kwargs.get('expiration_time', None) + self.pop_receipt = kwargs.get('pop_receipt', None) + self.time_next_visible = kwargs.get('time_next_visible', None) + self.dequeue_count = kwargs.get('dequeue_count', None) + self.message_text = kwargs.get('message_text', None) + + +class EnqueuedMessage(Model): + """The object returned in the QueueMessageList array when calling Put Message + on a Queue. + + All required parameters must be populated in order to send to Azure. + + :param message_id: Required. The Id of the Message. + :type message_id: str + :param insertion_time: Required. The time the Message was inserted into + the Queue. + :type insertion_time: datetime + :param expiration_time: Required. The time that the Message will expire + and be automatically deleted. + :type expiration_time: datetime + :param pop_receipt: Required. This value is required to delete the + Message. If deletion fails using this popreceipt then the message has been + dequeued by another client. + :type pop_receipt: str + :param time_next_visible: Required. The time that the message will again + become visible in the Queue. + :type time_next_visible: datetime + """ + + _validation = { + 'message_id': {'required': True}, + 'insertion_time': {'required': True}, + 'expiration_time': {'required': True}, + 'pop_receipt': {'required': True}, + 'time_next_visible': {'required': True}, + } + + _attribute_map = { + 'message_id': {'key': 'MessageId', 'type': 'str', 'xml': {'name': 'MessageId'}}, + 'insertion_time': {'key': 'InsertionTime', 'type': 'rfc-1123', 'xml': {'name': 'InsertionTime'}}, + 'expiration_time': {'key': 'ExpirationTime', 'type': 'rfc-1123', 'xml': {'name': 'ExpirationTime'}}, + 'pop_receipt': {'key': 'PopReceipt', 'type': 'str', 'xml': {'name': 'PopReceipt'}}, + 'time_next_visible': {'key': 'TimeNextVisible', 'type': 'rfc-1123', 'xml': {'name': 'TimeNextVisible'}}, + } + _xml_map = { + 'name': 'QueueMessage' + } + + def __init__(self, **kwargs): + super(EnqueuedMessage, self).__init__(**kwargs) + self.message_id = kwargs.get('message_id', None) + self.insertion_time = kwargs.get('insertion_time', None) + self.expiration_time = kwargs.get('expiration_time', None) + self.pop_receipt = kwargs.get('pop_receipt', None) + self.time_next_visible = kwargs.get('time_next_visible', None) + + +class GeoReplication(Model): + """GeoReplication. + + All required parameters must be populated in order to send to Azure. + + :param status: Required. The status of the secondary location. Possible + values include: 'live', 'bootstrap', 'unavailable' + :type status: str or ~azure.storage.queue.models.GeoReplicationStatusType + :param last_sync_time: Required. A GMT date/time value, to the second. All + primary writes preceding this value are guaranteed to be available for + read operations at the secondary. Primary writes after this point in time + may or may not be available for reads. + :type last_sync_time: datetime + """ + + _validation = { + 'status': {'required': True}, + 'last_sync_time': {'required': True}, + } + + _attribute_map = { + 'status': {'key': 'Status', 'type': 'str', 'xml': {'name': 'Status'}}, + 'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123', 'xml': {'name': 'LastSyncTime'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(GeoReplication, self).__init__(**kwargs) + self.status = kwargs.get('status', None) + self.last_sync_time = kwargs.get('last_sync_time', None) + + +class ListQueuesSegmentResponse(Model): + """The object returned when calling List Queues on a Queue Service. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param prefix: Required. + :type prefix: str + :param marker: + :type marker: str + :param max_results: Required. + :type max_results: int + :param queue_items: + :type queue_items: list[~azure.storage.queue.models.QueueItem] + :param next_marker: Required. + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'prefix': {'required': True}, + 'max_results': {'required': True}, + 'next_marker': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, + 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, + 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, + 'queue_items': {'key': 'QueueItems', 'type': '[QueueItem]', 'xml': {'name': 'Queues', 'itemsName': 'Queues', 'wrapped': True}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__(self, **kwargs): + super(ListQueuesSegmentResponse, self).__init__(**kwargs) + self.service_endpoint = kwargs.get('service_endpoint', None) + self.prefix = kwargs.get('prefix', None) + self.marker = kwargs.get('marker', None) + self.max_results = kwargs.get('max_results', None) + self.queue_items = kwargs.get('queue_items', None) + self.next_marker = kwargs.get('next_marker', None) + + +class Logging(Model): + """Azure Analytics Logging settings. + + All required parameters must be populated in order to send to Azure. + + :param version: Required. The version of Storage Analytics to configure. + :type version: str + :param delete: Required. Indicates whether all delete requests should be + logged. + :type delete: bool + :param read: Required. Indicates whether all read requests should be + logged. + :type read: bool + :param write: Required. Indicates whether all write requests should be + logged. + :type write: bool + :param retention_policy: Required. + :type retention_policy: ~azure.storage.queue.models.RetentionPolicy + """ + + _validation = { + 'version': {'required': True}, + 'delete': {'required': True}, + 'read': {'required': True}, + 'write': {'required': True}, + 'retention_policy': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, + 'delete': {'key': 'Delete', 'type': 'bool', 'xml': {'name': 'Delete'}}, + 'read': {'key': 'Read', 'type': 'bool', 'xml': {'name': 'Read'}}, + 'write': {'key': 'Write', 'type': 'bool', 'xml': {'name': 'Write'}}, + 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(Logging, self).__init__(**kwargs) + self.version = kwargs.get('version', None) + self.delete = kwargs.get('delete', None) + self.read = kwargs.get('read', None) + self.write = kwargs.get('write', None) + self.retention_policy = kwargs.get('retention_policy', None) + + +class Metrics(Model): + """Metrics. + + All required parameters must be populated in order to send to Azure. + + :param version: The version of Storage Analytics to configure. + :type version: str + :param enabled: Required. Indicates whether metrics are enabled for the + Queue service. + :type enabled: bool + :param include_apis: Indicates whether metrics should generate summary + statistics for called API operations. + :type include_apis: bool + :param retention_policy: + :type retention_policy: ~azure.storage.queue.models.RetentionPolicy + """ + + _validation = { + 'enabled': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, + 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, + 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}}, + 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(Metrics, self).__init__(**kwargs) + self.version = kwargs.get('version', None) + self.enabled = kwargs.get('enabled', None) + self.include_apis = kwargs.get('include_apis', None) + self.retention_policy = kwargs.get('retention_policy', None) + + +class PeekedMessageItem(Model): + """The object returned in the QueueMessageList array when calling Peek + Messages on a Queue. + + All required parameters must be populated in order to send to Azure. + + :param message_id: Required. The Id of the Message. + :type message_id: str + :param insertion_time: Required. The time the Message was inserted into + the Queue. + :type insertion_time: datetime + :param expiration_time: Required. The time that the Message will expire + and be automatically deleted. + :type expiration_time: datetime + :param dequeue_count: Required. The number of times the message has been + dequeued. + :type dequeue_count: long + :param message_text: Required. The content of the Message. + :type message_text: str + """ + + _validation = { + 'message_id': {'required': True}, + 'insertion_time': {'required': True}, + 'expiration_time': {'required': True}, + 'dequeue_count': {'required': True}, + 'message_text': {'required': True}, + } + + _attribute_map = { + 'message_id': {'key': 'MessageId', 'type': 'str', 'xml': {'name': 'MessageId'}}, + 'insertion_time': {'key': 'InsertionTime', 'type': 'rfc-1123', 'xml': {'name': 'InsertionTime'}}, + 'expiration_time': {'key': 'ExpirationTime', 'type': 'rfc-1123', 'xml': {'name': 'ExpirationTime'}}, + 'dequeue_count': {'key': 'DequeueCount', 'type': 'long', 'xml': {'name': 'DequeueCount'}}, + 'message_text': {'key': 'MessageText', 'type': 'str', 'xml': {'name': 'MessageText'}}, + } + _xml_map = { + 'name': 'QueueMessage' + } + + def __init__(self, **kwargs): + super(PeekedMessageItem, self).__init__(**kwargs) + self.message_id = kwargs.get('message_id', None) + self.insertion_time = kwargs.get('insertion_time', None) + self.expiration_time = kwargs.get('expiration_time', None) + self.dequeue_count = kwargs.get('dequeue_count', None) + self.message_text = kwargs.get('message_text', None) + + +class QueueItem(Model): + """An Azure Storage Queue. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the Queue. + :type name: str + :param metadata: + :type metadata: dict[str, str] + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}}, + } + _xml_map = { + 'name': 'Queue' + } + + def __init__(self, **kwargs): + super(QueueItem, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.metadata = kwargs.get('metadata', None) + + +class QueueMessage(Model): + """A Message object which can be stored in a Queue. + + All required parameters must be populated in order to send to Azure. + + :param message_text: Required. The content of the message + :type message_text: str + """ + + _validation = { + 'message_text': {'required': True}, + } + + _attribute_map = { + 'message_text': {'key': 'MessageText', 'type': 'str', 'xml': {'name': 'MessageText'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(QueueMessage, self).__init__(**kwargs) + self.message_text = kwargs.get('message_text', None) + + +class RetentionPolicy(Model): + """the retention policy. + + All required parameters must be populated in order to send to Azure. + + :param enabled: Required. Indicates whether a retention policy is enabled + for the storage service + :type enabled: bool + :param days: Indicates the number of days that metrics or logging or + soft-deleted data should be retained. All data older than this value will + be deleted + :type days: int + """ + + _validation = { + 'enabled': {'required': True}, + 'days': {'minimum': 1}, + } + + _attribute_map = { + 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, + 'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(RetentionPolicy, self).__init__(**kwargs) + self.enabled = kwargs.get('enabled', None) + self.days = kwargs.get('days', None) + + +class SignedIdentifier(Model): + """signed identifier. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. a unique id + :type id: str + :param access_policy: The access policy + :type access_policy: ~azure.storage.queue.models.AccessPolicy + """ + + _validation = { + 'id': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}}, + 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(SignedIdentifier, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.access_policy = kwargs.get('access_policy', None) + + +class StorageError(Model): + """StorageError. + + :param message: + :type message: str + """ + + _attribute_map = { + 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(StorageError, self).__init__(**kwargs) + self.message = kwargs.get('message', None) + + +class StorageErrorException(HttpResponseError): + """Server responsed with exception of type: 'StorageError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, response, deserialize, *args): + + model_name = 'StorageError' + self.error = deserialize(model_name, response) + if self.error is None: + self.error = deserialize.dependencies[model_name]() + super(StorageErrorException, self).__init__(response=response) + + +class StorageServiceProperties(Model): + """Storage Service Properties. + + :param logging: Azure Analytics Logging settings + :type logging: ~azure.storage.queue.models.Logging + :param hour_metrics: A summary of request statistics grouped by API in + hourly aggregates for queues + :type hour_metrics: ~azure.storage.queue.models.Metrics + :param minute_metrics: a summary of request statistics grouped by API in + minute aggregates for queues + :type minute_metrics: ~azure.storage.queue.models.Metrics + :param cors: The set of CORS rules. + :type cors: list[~azure.storage.queue.models.CorsRule] + """ + + _attribute_map = { + 'logging': {'key': 'Logging', 'type': 'Logging', 'xml': {'name': 'Logging'}}, + 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}}, + 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}}, + 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(StorageServiceProperties, self).__init__(**kwargs) + self.logging = kwargs.get('logging', None) + self.hour_metrics = kwargs.get('hour_metrics', None) + self.minute_metrics = kwargs.get('minute_metrics', None) + self.cors = kwargs.get('cors', None) + + +class StorageServiceStats(Model): + """Stats for the storage service. + + :param geo_replication: Geo-Replication information for the Secondary + Storage Service + :type geo_replication: ~azure.storage.queue.models.GeoReplication + """ + + _attribute_map = { + 'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication', 'xml': {'name': 'GeoReplication'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(StorageServiceStats, self).__init__(**kwargs) + self.geo_replication = kwargs.get('geo_replication', None) diff --git a/azure/multiapi/storagev2/queue/v2019_07_07/_generated/models/_models_py3.py b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/models/_models_py3.py new file mode 100644 index 0000000..e74ab62 --- /dev/null +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/models/_models_py3.py @@ -0,0 +1,631 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from azure.core.exceptions import HttpResponseError + + +class AccessPolicy(Model): + """An Access policy. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. the date-time the policy is active + :type start: str + :param expiry: Required. the date-time the policy expires + :type expiry: str + :param permission: Required. the permissions for the acl policy + :type permission: str + """ + + _validation = { + 'start': {'required': True}, + 'expiry': {'required': True}, + 'permission': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, + 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, + 'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}}, + } + _xml_map = { + } + + def __init__(self, *, start: str, expiry: str, permission: str, **kwargs) -> None: + super(AccessPolicy, self).__init__(**kwargs) + self.start = start + self.expiry = expiry + self.permission = permission + + +class CorsRule(Model): + """CORS is an HTTP feature that enables a web application running under one + domain to access resources in another domain. Web browsers implement a + security restriction known as same-origin policy that prevents a web page + from calling APIs in a different domain; CORS provides a secure way to + allow one domain (the origin domain) to call APIs in another domain. + + All required parameters must be populated in order to send to Azure. + + :param allowed_origins: Required. The origin domains that are permitted to + make a request against the storage service via CORS. The origin domain is + the domain from which the request originates. Note that the origin must be + an exact case-sensitive match with the origin that the user age sends to + the service. You can also use the wildcard character '*' to allow all + origin domains to make requests via CORS. + :type allowed_origins: str + :param allowed_methods: Required. The methods (HTTP request verbs) that + the origin domain may use for a CORS request. (comma separated) + :type allowed_methods: str + :param allowed_headers: Required. the request headers that the origin + domain may specify on the CORS request. + :type allowed_headers: str + :param exposed_headers: Required. The response headers that may be sent in + the response to the CORS request and exposed by the browser to the request + issuer + :type exposed_headers: str + :param max_age_in_seconds: Required. The maximum amount time that a + browser should cache the preflight OPTIONS request. + :type max_age_in_seconds: int + """ + + _validation = { + 'allowed_origins': {'required': True}, + 'allowed_methods': {'required': True}, + 'allowed_headers': {'required': True}, + 'exposed_headers': {'required': True}, + 'max_age_in_seconds': {'required': True, 'minimum': 0}, + } + + _attribute_map = { + 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}}, + 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}}, + 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}}, + 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}}, + 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}}, + } + _xml_map = { + } + + def __init__(self, *, allowed_origins: str, allowed_methods: str, allowed_headers: str, exposed_headers: str, max_age_in_seconds: int, **kwargs) -> None: + super(CorsRule, self).__init__(**kwargs) + self.allowed_origins = allowed_origins + self.allowed_methods = allowed_methods + self.allowed_headers = allowed_headers + self.exposed_headers = exposed_headers + self.max_age_in_seconds = max_age_in_seconds + + +class DequeuedMessageItem(Model): + """The object returned in the QueueMessageList array when calling Get Messages + on a Queue. + + All required parameters must be populated in order to send to Azure. + + :param message_id: Required. The Id of the Message. + :type message_id: str + :param insertion_time: Required. The time the Message was inserted into + the Queue. + :type insertion_time: datetime + :param expiration_time: Required. The time that the Message will expire + and be automatically deleted. + :type expiration_time: datetime + :param pop_receipt: Required. This value is required to delete the + Message. If deletion fails using this popreceipt then the message has been + dequeued by another client. + :type pop_receipt: str + :param time_next_visible: Required. The time that the message will again + become visible in the Queue. + :type time_next_visible: datetime + :param dequeue_count: Required. The number of times the message has been + dequeued. + :type dequeue_count: long + :param message_text: Required. The content of the Message. + :type message_text: str + """ + + _validation = { + 'message_id': {'required': True}, + 'insertion_time': {'required': True}, + 'expiration_time': {'required': True}, + 'pop_receipt': {'required': True}, + 'time_next_visible': {'required': True}, + 'dequeue_count': {'required': True}, + 'message_text': {'required': True}, + } + + _attribute_map = { + 'message_id': {'key': 'MessageId', 'type': 'str', 'xml': {'name': 'MessageId'}}, + 'insertion_time': {'key': 'InsertionTime', 'type': 'rfc-1123', 'xml': {'name': 'InsertionTime'}}, + 'expiration_time': {'key': 'ExpirationTime', 'type': 'rfc-1123', 'xml': {'name': 'ExpirationTime'}}, + 'pop_receipt': {'key': 'PopReceipt', 'type': 'str', 'xml': {'name': 'PopReceipt'}}, + 'time_next_visible': {'key': 'TimeNextVisible', 'type': 'rfc-1123', 'xml': {'name': 'TimeNextVisible'}}, + 'dequeue_count': {'key': 'DequeueCount', 'type': 'long', 'xml': {'name': 'DequeueCount'}}, + 'message_text': {'key': 'MessageText', 'type': 'str', 'xml': {'name': 'MessageText'}}, + } + _xml_map = { + 'name': 'QueueMessage' + } + + def __init__(self, *, message_id: str, insertion_time, expiration_time, pop_receipt: str, time_next_visible, dequeue_count: int, message_text: str, **kwargs) -> None: + super(DequeuedMessageItem, self).__init__(**kwargs) + self.message_id = message_id + self.insertion_time = insertion_time + self.expiration_time = expiration_time + self.pop_receipt = pop_receipt + self.time_next_visible = time_next_visible + self.dequeue_count = dequeue_count + self.message_text = message_text + + +class EnqueuedMessage(Model): + """The object returned in the QueueMessageList array when calling Put Message + on a Queue. + + All required parameters must be populated in order to send to Azure. + + :param message_id: Required. The Id of the Message. + :type message_id: str + :param insertion_time: Required. The time the Message was inserted into + the Queue. + :type insertion_time: datetime + :param expiration_time: Required. The time that the Message will expire + and be automatically deleted. + :type expiration_time: datetime + :param pop_receipt: Required. This value is required to delete the + Message. If deletion fails using this popreceipt then the message has been + dequeued by another client. + :type pop_receipt: str + :param time_next_visible: Required. The time that the message will again + become visible in the Queue. + :type time_next_visible: datetime + """ + + _validation = { + 'message_id': {'required': True}, + 'insertion_time': {'required': True}, + 'expiration_time': {'required': True}, + 'pop_receipt': {'required': True}, + 'time_next_visible': {'required': True}, + } + + _attribute_map = { + 'message_id': {'key': 'MessageId', 'type': 'str', 'xml': {'name': 'MessageId'}}, + 'insertion_time': {'key': 'InsertionTime', 'type': 'rfc-1123', 'xml': {'name': 'InsertionTime'}}, + 'expiration_time': {'key': 'ExpirationTime', 'type': 'rfc-1123', 'xml': {'name': 'ExpirationTime'}}, + 'pop_receipt': {'key': 'PopReceipt', 'type': 'str', 'xml': {'name': 'PopReceipt'}}, + 'time_next_visible': {'key': 'TimeNextVisible', 'type': 'rfc-1123', 'xml': {'name': 'TimeNextVisible'}}, + } + _xml_map = { + 'name': 'QueueMessage' + } + + def __init__(self, *, message_id: str, insertion_time, expiration_time, pop_receipt: str, time_next_visible, **kwargs) -> None: + super(EnqueuedMessage, self).__init__(**kwargs) + self.message_id = message_id + self.insertion_time = insertion_time + self.expiration_time = expiration_time + self.pop_receipt = pop_receipt + self.time_next_visible = time_next_visible + + +class GeoReplication(Model): + """GeoReplication. + + All required parameters must be populated in order to send to Azure. + + :param status: Required. The status of the secondary location. Possible + values include: 'live', 'bootstrap', 'unavailable' + :type status: str or ~azure.storage.queue.models.GeoReplicationStatusType + :param last_sync_time: Required. A GMT date/time value, to the second. All + primary writes preceding this value are guaranteed to be available for + read operations at the secondary. Primary writes after this point in time + may or may not be available for reads. + :type last_sync_time: datetime + """ + + _validation = { + 'status': {'required': True}, + 'last_sync_time': {'required': True}, + } + + _attribute_map = { + 'status': {'key': 'Status', 'type': 'str', 'xml': {'name': 'Status'}}, + 'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123', 'xml': {'name': 'LastSyncTime'}}, + } + _xml_map = { + } + + def __init__(self, *, status, last_sync_time, **kwargs) -> None: + super(GeoReplication, self).__init__(**kwargs) + self.status = status + self.last_sync_time = last_sync_time + + +class ListQueuesSegmentResponse(Model): + """The object returned when calling List Queues on a Queue Service. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param prefix: Required. + :type prefix: str + :param marker: + :type marker: str + :param max_results: Required. + :type max_results: int + :param queue_items: + :type queue_items: list[~azure.storage.queue.models.QueueItem] + :param next_marker: Required. + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'prefix': {'required': True}, + 'max_results': {'required': True}, + 'next_marker': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, + 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, + 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, + 'queue_items': {'key': 'QueueItems', 'type': '[QueueItem]', 'xml': {'name': 'Queues', 'itemsName': 'Queues', 'wrapped': True}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__(self, *, service_endpoint: str, prefix: str, max_results: int, next_marker: str, marker: str=None, queue_items=None, **kwargs) -> None: + super(ListQueuesSegmentResponse, self).__init__(**kwargs) + self.service_endpoint = service_endpoint + self.prefix = prefix + self.marker = marker + self.max_results = max_results + self.queue_items = queue_items + self.next_marker = next_marker + + +class Logging(Model): + """Azure Analytics Logging settings. + + All required parameters must be populated in order to send to Azure. + + :param version: Required. The version of Storage Analytics to configure. + :type version: str + :param delete: Required. Indicates whether all delete requests should be + logged. + :type delete: bool + :param read: Required. Indicates whether all read requests should be + logged. + :type read: bool + :param write: Required. Indicates whether all write requests should be + logged. + :type write: bool + :param retention_policy: Required. + :type retention_policy: ~azure.storage.queue.models.RetentionPolicy + """ + + _validation = { + 'version': {'required': True}, + 'delete': {'required': True}, + 'read': {'required': True}, + 'write': {'required': True}, + 'retention_policy': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, + 'delete': {'key': 'Delete', 'type': 'bool', 'xml': {'name': 'Delete'}}, + 'read': {'key': 'Read', 'type': 'bool', 'xml': {'name': 'Read'}}, + 'write': {'key': 'Write', 'type': 'bool', 'xml': {'name': 'Write'}}, + 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, + } + _xml_map = { + } + + def __init__(self, *, version: str, delete: bool, read: bool, write: bool, retention_policy, **kwargs) -> None: + super(Logging, self).__init__(**kwargs) + self.version = version + self.delete = delete + self.read = read + self.write = write + self.retention_policy = retention_policy + + +class Metrics(Model): + """Metrics. + + All required parameters must be populated in order to send to Azure. + + :param version: The version of Storage Analytics to configure. + :type version: str + :param enabled: Required. Indicates whether metrics are enabled for the + Queue service. + :type enabled: bool + :param include_apis: Indicates whether metrics should generate summary + statistics for called API operations. + :type include_apis: bool + :param retention_policy: + :type retention_policy: ~azure.storage.queue.models.RetentionPolicy + """ + + _validation = { + 'enabled': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, + 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, + 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}}, + 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, + } + _xml_map = { + } + + def __init__(self, *, enabled: bool, version: str=None, include_apis: bool=None, retention_policy=None, **kwargs) -> None: + super(Metrics, self).__init__(**kwargs) + self.version = version + self.enabled = enabled + self.include_apis = include_apis + self.retention_policy = retention_policy + + +class PeekedMessageItem(Model): + """The object returned in the QueueMessageList array when calling Peek + Messages on a Queue. + + All required parameters must be populated in order to send to Azure. + + :param message_id: Required. The Id of the Message. + :type message_id: str + :param insertion_time: Required. The time the Message was inserted into + the Queue. + :type insertion_time: datetime + :param expiration_time: Required. The time that the Message will expire + and be automatically deleted. + :type expiration_time: datetime + :param dequeue_count: Required. The number of times the message has been + dequeued. + :type dequeue_count: long + :param message_text: Required. The content of the Message. + :type message_text: str + """ + + _validation = { + 'message_id': {'required': True}, + 'insertion_time': {'required': True}, + 'expiration_time': {'required': True}, + 'dequeue_count': {'required': True}, + 'message_text': {'required': True}, + } + + _attribute_map = { + 'message_id': {'key': 'MessageId', 'type': 'str', 'xml': {'name': 'MessageId'}}, + 'insertion_time': {'key': 'InsertionTime', 'type': 'rfc-1123', 'xml': {'name': 'InsertionTime'}}, + 'expiration_time': {'key': 'ExpirationTime', 'type': 'rfc-1123', 'xml': {'name': 'ExpirationTime'}}, + 'dequeue_count': {'key': 'DequeueCount', 'type': 'long', 'xml': {'name': 'DequeueCount'}}, + 'message_text': {'key': 'MessageText', 'type': 'str', 'xml': {'name': 'MessageText'}}, + } + _xml_map = { + 'name': 'QueueMessage' + } + + def __init__(self, *, message_id: str, insertion_time, expiration_time, dequeue_count: int, message_text: str, **kwargs) -> None: + super(PeekedMessageItem, self).__init__(**kwargs) + self.message_id = message_id + self.insertion_time = insertion_time + self.expiration_time = expiration_time + self.dequeue_count = dequeue_count + self.message_text = message_text + + +class QueueItem(Model): + """An Azure Storage Queue. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the Queue. + :type name: str + :param metadata: + :type metadata: dict[str, str] + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}}, + } + _xml_map = { + 'name': 'Queue' + } + + def __init__(self, *, name: str, metadata=None, **kwargs) -> None: + super(QueueItem, self).__init__(**kwargs) + self.name = name + self.metadata = metadata + + +class QueueMessage(Model): + """A Message object which can be stored in a Queue. + + All required parameters must be populated in order to send to Azure. + + :param message_text: Required. The content of the message + :type message_text: str + """ + + _validation = { + 'message_text': {'required': True}, + } + + _attribute_map = { + 'message_text': {'key': 'MessageText', 'type': 'str', 'xml': {'name': 'MessageText'}}, + } + _xml_map = { + } + + def __init__(self, *, message_text: str, **kwargs) -> None: + super(QueueMessage, self).__init__(**kwargs) + self.message_text = message_text + + +class RetentionPolicy(Model): + """the retention policy. + + All required parameters must be populated in order to send to Azure. + + :param enabled: Required. Indicates whether a retention policy is enabled + for the storage service + :type enabled: bool + :param days: Indicates the number of days that metrics or logging or + soft-deleted data should be retained. All data older than this value will + be deleted + :type days: int + """ + + _validation = { + 'enabled': {'required': True}, + 'days': {'minimum': 1}, + } + + _attribute_map = { + 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, + 'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}}, + } + _xml_map = { + } + + def __init__(self, *, enabled: bool, days: int=None, **kwargs) -> None: + super(RetentionPolicy, self).__init__(**kwargs) + self.enabled = enabled + self.days = days + + +class SignedIdentifier(Model): + """signed identifier. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. a unique id + :type id: str + :param access_policy: The access policy + :type access_policy: ~azure.storage.queue.models.AccessPolicy + """ + + _validation = { + 'id': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}}, + 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}}, + } + _xml_map = { + } + + def __init__(self, *, id: str, access_policy=None, **kwargs) -> None: + super(SignedIdentifier, self).__init__(**kwargs) + self.id = id + self.access_policy = access_policy + + +class StorageError(Model): + """StorageError. + + :param message: + :type message: str + """ + + _attribute_map = { + 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, + } + _xml_map = { + } + + def __init__(self, *, message: str=None, **kwargs) -> None: + super(StorageError, self).__init__(**kwargs) + self.message = message + + +class StorageErrorException(HttpResponseError): + """Server responsed with exception of type: 'StorageError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, response, deserialize, *args): + + model_name = 'StorageError' + self.error = deserialize(model_name, response) + if self.error is None: + self.error = deserialize.dependencies[model_name]() + super(StorageErrorException, self).__init__(response=response) + + +class StorageServiceProperties(Model): + """Storage Service Properties. + + :param logging: Azure Analytics Logging settings + :type logging: ~azure.storage.queue.models.Logging + :param hour_metrics: A summary of request statistics grouped by API in + hourly aggregates for queues + :type hour_metrics: ~azure.storage.queue.models.Metrics + :param minute_metrics: a summary of request statistics grouped by API in + minute aggregates for queues + :type minute_metrics: ~azure.storage.queue.models.Metrics + :param cors: The set of CORS rules. + :type cors: list[~azure.storage.queue.models.CorsRule] + """ + + _attribute_map = { + 'logging': {'key': 'Logging', 'type': 'Logging', 'xml': {'name': 'Logging'}}, + 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}}, + 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}}, + 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}}, + } + _xml_map = { + } + + def __init__(self, *, logging=None, hour_metrics=None, minute_metrics=None, cors=None, **kwargs) -> None: + super(StorageServiceProperties, self).__init__(**kwargs) + self.logging = logging + self.hour_metrics = hour_metrics + self.minute_metrics = minute_metrics + self.cors = cors + + +class StorageServiceStats(Model): + """Stats for the storage service. + + :param geo_replication: Geo-Replication information for the Secondary + Storage Service + :type geo_replication: ~azure.storage.queue.models.GeoReplication + """ + + _attribute_map = { + 'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication', 'xml': {'name': 'GeoReplication'}}, + } + _xml_map = { + } + + def __init__(self, *, geo_replication=None, **kwargs) -> None: + super(StorageServiceStats, self).__init__(**kwargs) + self.geo_replication = geo_replication diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/__init__.py b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/__init__.py similarity index 70% rename from azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/__init__.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/__init__.py index 65680c9..d600f52 100644 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/__init__.py +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/__init__.py @@ -10,13 +10,13 @@ # -------------------------------------------------------------------------- from ._service_operations import ServiceOperations -from ._share_operations import ShareOperations -from ._directory_operations import DirectoryOperations -from ._file_operations import FileOperations +from ._queue_operations import QueueOperations +from ._messages_operations import MessagesOperations +from ._message_id_operations import MessageIdOperations __all__ = [ 'ServiceOperations', - 'ShareOperations', - 'DirectoryOperations', - 'FileOperations', + 'QueueOperations', + 'MessagesOperations', + 'MessageIdOperations', ] diff --git a/azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/_message_id_operations.py b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/_message_id_operations.py new file mode 100644 index 0000000..8be7a24 --- /dev/null +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/_message_id_operations.py @@ -0,0 +1,184 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import map_error + +from .. import models + + +class MessageIdOperations(object): + """MessageIdOperations operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + + self._config = config + + def update(self, pop_receipt, visibilitytimeout, queue_message=None, timeout=None, request_id=None, cls=None, **kwargs): + """The Update operation was introduced with version 2011-08-18 of the + Queue service API. The Update Message operation updates the visibility + timeout of a message. You can also use this operation to update the + contents of a message. A message must be in a format that can be + included in an XML request with UTF-8 encoding, and the encoded message + can be up to 64KB in size. + + :param pop_receipt: Required. Specifies the valid pop receipt value + returned from an earlier call to the Get Messages or Update Message + operation. + :type pop_receipt: str + :param visibilitytimeout: Optional. Specifies the new visibility + timeout value, in seconds, relative to server time. The default value + is 30 seconds. A specified value must be larger than or equal to 1 + second, and cannot be larger than 7 days, or larger than 2 hours on + REST protocol versions prior to version 2011-08-18. The visibility + timeout of a message can be set to a value later than the expiry time. + :type visibilitytimeout: int + :param queue_message: A Message object which can be stored in a Queue + :type queue_message: ~azure.storage.queue.models.QueueMessage + :param timeout: The The timeout parameter is expressed in seconds. For + more information, see Setting - Timeouts for Blob Service Operations. + href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting + Timeouts for Queue Service Operations. :type timeout: int + :param metadata: Optional. Include this parameter to specify that the + queue's metadata be returned as part of the response body. Note that + metadata requested with this parameter must be stored in accordance + with the naming restrictions imposed by the 2009-09-19 version of the + Queue service. Beginning with this version, all metadata names must + adhere to the naming conventions for C# identifiers. + :type metadata: str :param request_id: Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. @@ -57,14 +59,11 @@ def set_properties(self, storage_service_properties, timeout=None, request_id=No :return: None or the result of cls(response) :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) - restype = "service" - comp = "properties" - # Construct URL - url = self.set_properties.metadata['url'] + url = self.create.metadata['url'] path_format_arguments = { 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) } @@ -74,47 +73,41 @@ def set_properties(self, storage_service_properties, timeout=None, request_id=No query_parameters = {} if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') if request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - # Construct body - body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties') - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) + request = self._client.put(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response - if response.status_code not in [202]: + if response.status_code not in [201, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise models.StorageErrorException(response, self._deserialize) if cls: response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), } return cls(response, None, response_headers) - set_properties.metadata = {'url': '/'} + create.metadata = {'url': '/{queueName}'} - def get_properties(self, timeout=None, request_id=None, cls=None, **kwargs): - """gets the properties of a storage account's Blob service, including - properties for Storage Analytics and CORS (Cross-Origin Resource - Sharing) rules. + def delete(self, timeout=None, request_id=None, cls=None, **kwargs): + """operation permanently deletes the specified queue. - :param timeout: The timeout parameter is expressed in seconds. For + :param timeout: The The timeout parameter is expressed in seconds. For more information, see Setting - Timeouts for Blob Service Operations. + href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting + Timeouts for Queue Service Operations. :type timeout: int :param request_id: Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage @@ -122,17 +115,14 @@ def get_properties(self, timeout=None, request_id=None, cls=None, **kwargs): :type request_id: str :param callable cls: A custom type or function that will be passed the direct response - :return: StorageServiceProperties or the result of cls(response) - :rtype: ~azure.storage.blob.models.StorageServiceProperties + :return: None or the result of cls(response) + :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) - restype = "service" - comp = "properties" - # Construct URL - url = self.get_properties.metadata['url'] + url = self.delete.metadata['url'] path_format_arguments = { 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) } @@ -142,51 +132,40 @@ def get_properties(self, timeout=None, request_id=None, cls=None, **kwargs): query_parameters = {} if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') # Construct headers header_parameters = {} - header_parameters['Accept'] = 'application/xml' header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') if request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) + request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response - if response.status_code not in [200]: + if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise models.StorageErrorException(response, self._deserialize) - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('StorageServiceProperties', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + if cls: + response_headers = { 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), } + return cls(response, None, response_headers) + delete.metadata = {'url': '/{queueName}'} - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - get_properties.metadata = {'url': '/'} - - def get_statistics(self, timeout=None, request_id=None, cls=None, **kwargs): - """Retrieves statistics related to replication for the Blob service. It is - only available on the secondary location endpoint when read-access - geo-redundant replication is enabled for the storage account. + def get_properties(self, timeout=None, request_id=None, cls=None, **kwargs): + """Retrieves user-defined metadata and queue properties on the specified + queue. Metadata is associated with the queue as name-values pairs. - :param timeout: The timeout parameter is expressed in seconds. For + :param timeout: The The timeout parameter is expressed in seconds. For more information, see Setting - Timeouts for Blob Service Operations. + href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting + Timeouts for Queue Service Operations. :type timeout: int :param request_id: Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage @@ -194,17 +173,16 @@ def get_statistics(self, timeout=None, request_id=None, cls=None, **kwargs): :type request_id: str :param callable cls: A custom type or function that will be passed the direct response - :return: StorageServiceStats or the result of cls(response) - :rtype: ~azure.storage.blob.models.StorageServiceStats + :return: None or the result of cls(response) + :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) - restype = "service" - comp = "stats" + comp = "metadata" # Construct URL - url = self.get_statistics.metadata['url'] + url = self.get_properties.metadata['url'] path_format_arguments = { 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) } @@ -214,12 +192,10 @@ def get_statistics(self, timeout=None, request_id=None, cls=None, **kwargs): query_parameters = {} if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') query_parameters['comp'] = self._serialize.query("comp", comp, 'str') # Construct headers header_parameters = {} - header_parameters['Accept'] = 'application/xml' header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') if request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') @@ -233,74 +209,50 @@ def get_statistics(self, timeout=None, request_id=None, cls=None, **kwargs): map_error(status_code=response.status_code, response=response, error_map=error_map) raise models.StorageErrorException(response, self._deserialize) - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('StorageServiceStats', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + if cls: + response_headers = { + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'x-ms-approximate-messages-count': self._deserialize('int', response.headers.get('x-ms-approximate-messages-count')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), } + return cls(response, None, response_headers) + get_properties.metadata = {'url': '/{queueName}'} - if cls: - return cls(response, deserialized, header_dict) + def set_metadata(self, timeout=None, metadata=None, request_id=None, cls=None, **kwargs): + """sets user-defined metadata on the specified queue. Metadata is + associated with the queue as name-value pairs. - return deserialized - get_statistics.metadata = {'url': '/'} - - def list_containers_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, request_id=None, cls=None, **kwargs): - """The List Containers Segment operation returns a list of the containers - under the specified account. - - :param prefix: Filters the results to return only containers whose - name begins with the specified prefix. - :type prefix: str - :param marker: A string value that identifies the portion of the list - of containers to be returned with the next listing operation. The - operation returns the NextMarker value within the response body if the - listing operation did not return all containers remaining to be listed - with the current page. The NextMarker value can be used as the value - for the marker parameter in a subsequent call to request the next page - of list items. The marker value is opaque to the client. - :type marker: str - :param maxresults: Specifies the maximum number of containers to - return. If the request does not specify maxresults, or specifies a - value greater than 5000, the server will return up to 5000 items. Note - that if the listing operation crosses a partition boundary, then the - service will return a continuation token for retrieving the remainder - of the results. For this reason, it is possible that the service will - return fewer results than specified by maxresults, or than the default - of 5000. - :type maxresults: int - :param include: Include this parameter to specify that the container's - metadata be returned as part of the response body. Possible values - include: 'metadata' - :type include: str or - ~azure.storage.blob.models.ListContainersIncludeType - :param timeout: The timeout parameter is expressed in seconds. For + :param timeout: The The timeout parameter is expressed in seconds. For more information, see Setting - Timeouts for Blob Service Operations. + href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting + Timeouts for Queue Service Operations. :type timeout: int + :param metadata: Optional. Include this parameter to specify that the + queue's metadata be returned as part of the response body. Note that + metadata requested with this parameter must be stored in accordance + with the naming restrictions imposed by the 2009-09-19 version of the + Queue service. Beginning with this version, all metadata names must + adhere to the naming conventions for C# identifiers. + :type metadata: str :param request_id: Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. :type request_id: str :param callable cls: A custom type or function that will be passed the direct response - :return: ListContainersSegmentResponse or the result of cls(response) - :rtype: ~azure.storage.blob.models.ListContainersSegmentResponse + :return: None or the result of cls(response) + :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) - comp = "list" + comp = "metadata" # Construct URL - url = self.list_containers_segment.metadata['url'] + url = self.set_metadata.metadata['url'] path_format_arguments = { 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) } @@ -308,61 +260,45 @@ def list_containers_segment(self, prefix=None, marker=None, maxresults=None, inc # Construct parameters query_parameters = {} - if prefix is not None: - query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') - if marker is not None: - query_parameters['marker'] = self._serialize.query("marker", marker, 'str') - if maxresults is not None: - query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) - if include is not None: - query_parameters['include'] = self._serialize.query("include", include, 'ListContainersIncludeType') if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) query_parameters['comp'] = self._serialize.query("comp", comp, 'str') # Construct headers header_parameters = {} - header_parameters['Accept'] = 'application/xml' + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') if request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) + request = self._client.put(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response - if response.status_code not in [200]: + if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise models.StorageErrorException(response, self._deserialize) - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListContainersSegmentResponse', response) - header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + if cls: + response_headers = { 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), } + return cls(response, None, response_headers) + set_metadata.metadata = {'url': '/{queueName}'} - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - list_containers_segment.metadata = {'url': '/'} - - def get_user_delegation_key(self, key_info, timeout=None, request_id=None, cls=None, **kwargs): - """Retrieves a user delegation key for the Blob service. This is only a - valid operation when using bearer token authentication. + def get_access_policy(self, timeout=None, request_id=None, cls=None, **kwargs): + """returns details about any stored access policies specified on the queue + that may be used with Shared Access Signatures. - :param key_info: - :type key_info: ~azure.storage.blob.models.KeyInfo - :param timeout: The timeout parameter is expressed in seconds. For + :param timeout: The The timeout parameter is expressed in seconds. For more information, see Setting - Timeouts for Blob Service Operations. + href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting + Timeouts for Queue Service Operations. :type timeout: int :param request_id: Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage @@ -370,17 +306,16 @@ def get_user_delegation_key(self, key_info, timeout=None, request_id=None, cls=N :type request_id: str :param callable cls: A custom type or function that will be passed the direct response - :return: UserDelegationKey or the result of cls(response) - :rtype: ~azure.storage.blob.models.UserDelegationKey + :return: list or the result of cls(response) + :rtype: list[~azure.storage.queue.models.SignedIdentifier] :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) - restype = "service" - comp = "userdelegationkey" + comp = "acl" # Construct URL - url = self.get_user_delegation_key.metadata['url'] + url = self.get_access_policy.metadata['url'] path_format_arguments = { 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) } @@ -390,22 +325,17 @@ def get_user_delegation_key(self, key_info, timeout=None, request_id=None, cls=N query_parameters = {} if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') query_parameters['comp'] = self._serialize.query("comp", comp, 'str') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/xml' - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') if request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - # Construct body - body_content = self._serialize.body(key_info, 'KeyInfo') - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) + request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response @@ -416,9 +346,8 @@ def get_user_delegation_key(self, key_info, timeout=None, request_id=None, cls=N header_dict = {} deserialized = None if response.status_code == 200: - deserialized = self._deserialize('UserDelegationKey', response) + deserialized = self._deserialize('[SignedIdentifier]', response) header_dict = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), @@ -429,76 +358,18 @@ def get_user_delegation_key(self, key_info, timeout=None, request_id=None, cls=N return cls(response, deserialized, header_dict) return deserialized - get_user_delegation_key.metadata = {'url': '/'} - - def get_account_info(self, cls=None, **kwargs): - """Returns the sku name and account kind . - - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) - :rtype: None - :raises: - :class:`StorageErrorException` - """ - error_map = kwargs.pop('error_map', None) - restype = "account" - comp = "properties" - - # Construct URL - url = self.get_account_info.metadata['url'] - path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['restype'] = self._serialize.query("restype", restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') - - # Construct headers - header_parameters = {} - header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response + get_access_policy.metadata = {'url': '/{queueName}'} - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) + def set_access_policy(self, queue_acl=None, timeout=None, request_id=None, cls=None, **kwargs): + """sets stored access policies for the queue that may be used with Shared + Access Signatures. - if cls: - response_headers = { - 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-sku-name': self._deserialize(models.SkuName, response.headers.get('x-ms-sku-name')), - 'x-ms-account-kind': self._deserialize(models.AccountKind, response.headers.get('x-ms-account-kind')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - get_account_info.metadata = {'url': '/'} - - def submit_batch(self, body, content_length, multipart_content_type, timeout=None, request_id=None, cls=None, **kwargs): - """The Batch operation allows multiple API calls to be embedded into a - single HTTP request. - - :param body: Initial data - :type body: Generator - :param content_length: The length of the request. - :type content_length: long - :param multipart_content_type: Required. The value of this header must - be multipart/mixed with a batch boundary. Example header value: - multipart/mixed; boundary=batch_ - :type multipart_content_type: str - :param timeout: The timeout parameter is expressed in seconds. For + :param queue_acl: the acls for the queue + :type queue_acl: list[~azure.storage.queue.models.SignedIdentifier] + :param timeout: The The timeout parameter is expressed in seconds. For more information, see Setting - Timeouts for Blob Service Operations. + href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting + Timeouts for Queue Service Operations. :type timeout: int :param request_id: Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage @@ -506,16 +377,16 @@ def submit_batch(self, body, content_length, multipart_content_type, timeout=Non :type request_id: str :param callable cls: A custom type or function that will be passed the direct response - :return: object or the result of cls(response) - :rtype: Generator + :return: None or the result of cls(response) + :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) - comp = "batch" + comp = "acl" # Construct URL - url = self.submit_batch.metadata['url'] + url = self.set_access_policy.metadata['url'] path_format_arguments = { 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) } @@ -529,38 +400,33 @@ def submit_batch(self, body, content_length, multipart_content_type, timeout=Non # Construct headers header_parameters = {} - header_parameters['Accept'] = 'application/xml' header_parameters['Content-Type'] = 'application/xml; charset=utf-8' - header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') - header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') if request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') # Construct body + serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'itemsName': 'SignedIdentifier', 'wrapped': True}} + if queue_acl is not None: + body_content = self._serialize.body(queue_acl, '[SignedIdentifier]', serialization_ctxt=serialization_ctxt) + else: + body_content = None # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, stream_content=body) - pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) + request = self._client.put(url, query_parameters, header_parameters, body_content) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response - if response.status_code not in [200]: + if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise models.StorageErrorException(response, self._deserialize) - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = response.stream_download(self._client._pipeline) - header_dict = { - 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + if cls: + response_headers = { 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), } - - if cls: - return cls(response, deserialized, header_dict) - - return deserialized - submit_batch.metadata = {'url': '/'} + return cls(response, None, response_headers) + set_access_policy.metadata = {'url': '/{queueName}'} diff --git a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/_service_operations.py b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/_service_operations.py similarity index 53% rename from azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/_service_operations.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/_service_operations.py index cd43e83..eef4bc9 100644 --- a/azure/multiapi/storagev2/fileshare/v2019_12_12/_generated/operations/_service_operations.py +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/operations/_service_operations.py @@ -37,25 +37,29 @@ def __init__(self, client, config, serializer, deserializer): self._config = config self.restype = "service" - def set_properties(self, storage_service_properties, timeout=None, cls=None, **kwargs): - """Sets properties for a storage account's File service endpoint, - including properties for Storage Analytics metrics and CORS - (Cross-Origin Resource Sharing) rules. + def set_properties(self, storage_service_properties, timeout=None, request_id=None, cls=None, **kwargs): + """Sets properties for a storage account's Queue service endpoint, + including properties for Storage Analytics and CORS (Cross-Origin + Resource Sharing) rules. :param storage_service_properties: The StorageService properties. :type storage_service_properties: - ~azure.storage.fileshare.models.StorageServiceProperties - :param timeout: The timeout parameter is expressed in seconds. For + ~azure.storage.queue.models.StorageServiceProperties + :param timeout: The The timeout parameter is expressed in seconds. For more information, see Setting - Timeouts for File Service Operations. + href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting + Timeouts for Queue Service Operations. :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str :param callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) comp = "properties" @@ -78,6 +82,8 @@ def set_properties(self, storage_service_properties, timeout=None, cls=None, **k header_parameters = {} header_parameters['Content-Type'] = 'application/xml; charset=utf-8' header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') # Construct body body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties') @@ -100,22 +106,26 @@ def set_properties(self, storage_service_properties, timeout=None, cls=None, **k return cls(response, None, response_headers) set_properties.metadata = {'url': '/'} - def get_properties(self, timeout=None, cls=None, **kwargs): - """Gets the properties of a storage account's File service, including - properties for Storage Analytics metrics and CORS (Cross-Origin - Resource Sharing) rules. + def get_properties(self, timeout=None, request_id=None, cls=None, **kwargs): + """gets the properties of a storage account's Queue service, including + properties for Storage Analytics and CORS (Cross-Origin Resource + Sharing) rules. - :param timeout: The timeout parameter is expressed in seconds. For + :param timeout: The The timeout parameter is expressed in seconds. For more information, see Setting - Timeouts for File Service Operations. + href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting + Timeouts for Queue Service Operations. :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str :param callable cls: A custom type or function that will be passed the direct response :return: StorageServiceProperties or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.StorageServiceProperties + :rtype: ~azure.storage.queue.models.StorageServiceProperties :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) comp = "properties" @@ -138,6 +148,8 @@ def get_properties(self, timeout=None, cls=None, **kwargs): header_parameters = {} header_parameters['Accept'] = 'application/xml' header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) @@ -164,45 +176,126 @@ def get_properties(self, timeout=None, cls=None, **kwargs): return deserialized get_properties.metadata = {'url': '/'} - def list_shares_segment(self, prefix=None, marker=None, maxresults=None, include=None, timeout=None, cls=None, **kwargs): - """The List Shares Segment operation returns a list of the shares and - share snapshots under the specified account. + def get_statistics(self, timeout=None, request_id=None, cls=None, **kwargs): + """Retrieves statistics related to replication for the Queue service. It + is only available on the secondary location endpoint when read-access + geo-redundant replication is enabled for the storage account. + + :param timeout: The The timeout parameter is expressed in seconds. For + more information, see Setting - Timeouts for File Service Operations. + href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting + Timeouts for Queue Service Operations. :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str :param callable cls: A custom type or function that will be passed the direct response - :return: ListSharesResponse or the result of cls(response) - :rtype: ~azure.storage.fileshare.models.ListSharesResponse + :return: ListQueuesSegmentResponse or the result of cls(response) + :rtype: ~azure.storage.queue.models.ListQueuesSegmentResponse :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) comp = "list" # Construct URL - url = self.list_shares_segment.metadata['url'] + url = self.list_queues_segment.metadata['url'] path_format_arguments = { 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) } @@ -217,7 +310,7 @@ def list_shares_segment(self, prefix=None, marker=None, maxresults=None, include if maxresults is not None: query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[ListSharesIncludeType]', div=',') + query_parameters['include'] = self._serialize.query("include", include, '[ListQueuesIncludeType]', div=',') if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) query_parameters['comp'] = self._serialize.query("comp", comp, 'str') @@ -226,6 +319,8 @@ def list_shares_segment(self, prefix=None, marker=None, maxresults=None, include header_parameters = {} header_parameters['Accept'] = 'application/xml' header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) @@ -239,10 +334,11 @@ def list_shares_segment(self, prefix=None, marker=None, maxresults=None, include header_dict = {} deserialized = None if response.status_code == 200: - deserialized = self._deserialize('ListSharesResponse', response) + deserialized = self._deserialize('ListQueuesSegmentResponse', response) header_dict = { 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), } @@ -250,4 +346,4 @@ def list_shares_segment(self, prefix=None, marker=None, maxresults=None, include return cls(response, deserialized, header_dict) return deserialized - list_shares_segment.metadata = {'url': '/'} + list_queues_segment.metadata = {'url': '/'} diff --git a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/version.py b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/version.py similarity index 95% rename from azure/multiapi/storagev2/blob/v2020_02_10/_generated/version.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_generated/version.py index 6ef707d..f8a9f8f 100644 --- a/azure/multiapi/storagev2/blob/v2020_02_10/_generated/version.py +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_generated/version.py @@ -9,5 +9,5 @@ # regenerated. # -------------------------------------------------------------------------- -VERSION = "2020-02-10" +VERSION = "2018-03-28" diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_message_encoding.py b/azure/multiapi/storagev2/queue/v2019_07_07/_message_encoding.py similarity index 94% rename from azure/multiapi/storagev2/queue/v2021_02_12/_message_encoding.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_message_encoding.py index 4e1b595..52b6439 100644 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_message_encoding.py +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_message_encoding.py @@ -5,20 +5,19 @@ # -------------------------------------------------------------------------- # pylint: disable=unused-argument -import sys from base64 import b64encode, b64decode +import sys import six from azure.core.exceptions import DecodeError -from ._encryption import decrypt_queue_message, encrypt_queue_message, _ENCRYPTION_PROTOCOL_V1 +from ._shared.encryption import decrypt_queue_message, encrypt_queue_message class MessageEncodePolicy(object): def __init__(self): self.require_encryption = False - self.encryption_version = None self.key_encryption_key = None self.resolver = None @@ -26,12 +25,11 @@ def __call__(self, content): if content: content = self.encode(content) if self.key_encryption_key is not None: - content = encrypt_queue_message(content, self.key_encryption_key, self.encryption_version) + content = encrypt_queue_message(content, self.key_encryption_key) return content - def configure(self, require_encryption, key_encryption_key, resolver, encryption_version=_ENCRYPTION_PROTOCOL_V1): + def configure(self, require_encryption, key_encryption_key, resolver): self.require_encryption = require_encryption - self.encryption_version = encryption_version self.key_encryption_key = key_encryption_key self.resolver = resolver if self.require_encryption and not self.key_encryption_key: @@ -124,7 +122,6 @@ class BinaryBase64DecodePolicy(MessageDecodePolicy): """ def decode(self, content, response): - response = response.http_response try: return b64decode(content.encode('utf-8')) except (ValueError, TypeError) as error: diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_models.py b/azure/multiapi/storagev2/queue/v2019_07_07/_models.py similarity index 96% rename from azure/multiapi/storagev2/queue/v2021_02_12/_models.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_models.py index b289c90..8fc8a30 100644 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_models.py +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_models.py @@ -7,10 +7,10 @@ # pylint: disable=super-init-not-called from typing import List # pylint: disable=unused-import -from azure.core.exceptions import HttpResponseError from azure.core.paging import PageIterator from ._shared.response_handlers import return_context_and_deserialized, process_storage_error from ._shared.models import DictMixin +from ._generated.models import StorageErrorException from ._generated.models import AccessPolicy as GenAccessPolicy from ._generated.models import Logging as GeneratedLogging from ._generated.models import Metrics as GeneratedMetrics @@ -264,10 +264,8 @@ class MessagesPaged(PageIterator): :param callable command: Function to retrieve the next page of items. :param int results_per_page: The maximum number of messages to retrieve per call. - :param int max_messages: The maximum number of messages to retrieve from - the queue. """ - def __init__(self, command, results_per_page=None, continuation_token=None, max_messages=None): + def __init__(self, command, results_per_page=None, continuation_token=None): if continuation_token is not None: raise ValueError("This operation does not support continuation token") @@ -277,26 +275,17 @@ def __init__(self, command, results_per_page=None, continuation_token=None, max_ ) self._command = command self.results_per_page = results_per_page - self._max_messages = max_messages def _get_next_cb(self, continuation_token): try: - if self._max_messages is not None: - if self.results_per_page is None: - self.results_per_page = 1 - if self._max_messages < 1: - raise StopIteration("End of paging") - self.results_per_page = min(self.results_per_page, self._max_messages) return self._command(number_of_messages=self.results_per_page) - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) def _extract_data_cb(self, messages): # pylint: disable=no-self-use # There is no concept of continuation token, so raising on my own condition if not messages: raise StopIteration("End of paging") - if self._max_messages is not None: - self._max_messages = self._max_messages - len(messages) return "TOKEN_IGNORED", [QueueMessage._from_generated(q) for q in messages] # pylint: disable=protected-access @@ -360,7 +349,7 @@ def _get_next_cb(self, continuation_token): maxresults=self.results_per_page, cls=return_context_and_deserialized, use_location=self.location_mode) - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) def _extract_data_cb(self, get_next_return): @@ -420,7 +409,7 @@ def from_string(cls, permission): p_process = 'p' in permission parsed = cls(p_read, p_add, p_update, p_process) - + parsed._str = permission # pylint: disable = protected-access return parsed diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_queue_client.py b/azure/multiapi/storagev2/queue/v2019_07_07/_queue_client.py similarity index 73% rename from azure/multiapi/storagev2/queue/v2021_02_12/_queue_client.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_queue_client.py index efe9554..b652da1 100644 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_queue_client.py +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_queue_client.py @@ -5,42 +5,41 @@ # -------------------------------------------------------------------------- import functools -import warnings from typing import ( # pylint: disable=unused-import - Any, Dict, List, Optional, + Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, TYPE_CHECKING) -from urllib.parse import urlparse, quote, unquote +try: + from urllib.parse import urlparse, quote, unquote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import quote, unquote # type: ignore import six -from azure.core.exceptions import HttpResponseError + from azure.core.paging import ItemPaged from azure.core.tracing.decorator import distributed_trace - from ._shared.base_client import StorageAccountHostsMixin, parse_connection_str, parse_query from ._shared.request_handlers import add_metadata_headers, serialize_iso from ._shared.response_handlers import ( process_storage_error, return_response_headers, return_headers_and_deserialized) -from ._generated import AzureQueueStorage -from ._generated.models import SignedIdentifier, QueueMessage as GenQueueMessage -from ._deserialize import deserialize_queue_properties, deserialize_queue_creation -from ._encryption import StorageEncryptionMixin from ._message_encoding import NoEncodePolicy, NoDecodePolicy +from ._deserialize import deserialize_queue_properties, deserialize_queue_creation +from ._generated import AzureQueueStorage, VERSION +from ._generated.models import StorageErrorException, SignedIdentifier +from ._generated.models import QueueMessage as GenQueueMessage from ._models import QueueMessage, AccessPolicy, MessagesPaged -from ._serialize import get_api_version if TYPE_CHECKING: + from datetime import datetime + from azure.core.pipeline.policies import HTTPPolicy from ._models import QueueProperties -class QueueClient(StorageAccountHostsMixin, StorageEncryptionMixin): +class QueueClient(StorageAccountHostsMixin): """A client to interact with a specific Queue. - For more optional configuration, please click - `here `_. - :param str account_url: The URL to the storage account. In order to create a client given the full URI to the queue, use the :func:`from_queue_url` classmethod. @@ -48,22 +47,17 @@ class QueueClient(StorageAccountHostsMixin, StorageEncryptionMixin): :type queue_name: str :param credential: The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, - an account shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" - should be the storage account key. + account URL already has a SAS token. The value can be a SAS token string, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. :keyword str api_version: - The Storage API version to use for requests. Default value is the most recent service version that is - compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. :keyword str secondary_hostname: The hostname of the secondary endpoint. - :keyword message_encode_policy: The encoding policy to use on outgoing messages. + :keyword encode_policy: The encoding policy to use on outgoing messages. Default is not to encode messages. Other options include :class:`TextBase64EncodePolicy`, :class:`BinaryBase64EncodePolicy` or `None`. - :keyword message_decode_policy: The decoding policy to use on incoming messages. + :keyword decode_policy: The decoding policy to use on incoming messages. Default value is not to decode messages. Other options include :class:`TextBase64DecodePolicy`, :class:`BinaryBase64DecodePolicy` or `None`. @@ -79,7 +73,7 @@ class QueueClient(StorageAccountHostsMixin, StorageEncryptionMixin): def __init__( self, account_url, # type: str queue_name, # type: str - credential=None, # type: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, "TokenCredential"]] # pylint: disable=line-too-long + credential=None, # type: Optional[Any] **kwargs # type: Any ): # type: (...) -> None @@ -104,9 +98,8 @@ def __init__( self._config.message_encode_policy = kwargs.get('message_encode_policy', None) or NoEncodePolicy() self._config.message_decode_policy = kwargs.get('message_decode_policy', None) or NoDecodePolicy() - self._client = AzureQueueStorage(self.url, base_url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - self._configure_encryption(kwargs) + self._client = AzureQueueStorage(self.url, pipeline=self._pipeline) + self._client._config.version = kwargs.get('api_version', VERSION) # pylint: disable=protected-access def _format_url(self, hostname): """Format the endpoint URL according to the current location @@ -122,24 +115,15 @@ def _format_url(self, hostname): self._query_str) @classmethod - def from_queue_url(cls, - queue_url, # type: str - credential=None, # type: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, "TokenCredential"]] # pylint: disable=line-too-long - **kwargs # type: Any - ): - # type: (...) -> QueueClient + def from_queue_url(cls, queue_url, credential=None, **kwargs): + # type: (str, Optional[Any], Any) -> QueueClient """A client to interact with a specific Queue. :param str queue_url: The full URI to the queue, including SAS token if used. :param credential: The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, - an account shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" - should be the storage account key. + account URL already has a SAS token. The value can be a SAS token string, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. :returns: A queue client. :rtype: ~azure.storage.queue.QueueClient """ @@ -171,10 +155,10 @@ def from_queue_url(cls, def from_connection_string( cls, conn_str, # type: str queue_name, # type: str - credential=None, # type: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, "TokenCredential"]] # pylint: disable=line-too-long + credential=None, # type: Any **kwargs # type: Any - ): - # type: (...) -> QueueClient + ): + # type: (...) -> None """Create QueueClient from a Connection String. :param str conn_str: @@ -184,12 +168,8 @@ def from_connection_string( :param credential: The credentials with which to authenticate. This is optional if the account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, - an account shared access key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" - should be the storage account key. + access key values. The value can be a SAS token string, an account shared access + key, or an instance of a TokenCredentials class from azure.identity. :returns: A queue client. :rtype: ~azure.storage.queue.QueueClient @@ -210,7 +190,7 @@ def from_connection_string( @distributed_trace def create_queue(self, **kwargs): - # type: (Any) -> None + # type: (Optional[Any]) -> None """Creates a new queue in the storage account. If a queue with the same name already exists, the operation fails with @@ -246,12 +226,12 @@ def create_queue(self, **kwargs): headers=headers, cls=deserialize_queue_creation, **kwargs) - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) @distributed_trace def delete_queue(self, **kwargs): - # type: (Any) -> None + # type: (Optional[Any]) -> None """Deletes the specified queue and any messages it contains. When a queue is successfully deleted, it is immediately marked for deletion @@ -278,12 +258,12 @@ def delete_queue(self, **kwargs): timeout = kwargs.pop('timeout', None) try: self._client.queue.delete(timeout=timeout, **kwargs) - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) @distributed_trace def get_queue_properties(self, **kwargs): - # type: (Any) -> QueueProperties + # type: (Optional[Any]) -> QueueProperties """Returns all user-defined metadata for the specified queue. The data returned does not include the queue's list of messages. @@ -308,17 +288,14 @@ def get_queue_properties(self, **kwargs): timeout=timeout, cls=deserialize_queue_properties, **kwargs) - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) response.name = self.queue_name return response # type: ignore @distributed_trace - def set_queue_metadata(self, - metadata=None, # type: Optional[Dict[str, Any]] - **kwargs # type: Any - ): - # type: (...) -> None + def set_queue_metadata(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, Any]], Optional[Any]) -> None """Sets user-defined metadata on the specified queue. Metadata is associated with the queue as name-value pairs. @@ -348,12 +325,12 @@ def set_queue_metadata(self, headers=headers, cls=return_response_headers, **kwargs) - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) @distributed_trace def get_queue_access_policy(self, **kwargs): - # type: (Any) -> Dict[str, AccessPolicy] + # type: (Optional[Any]) -> Dict[str, Any] """Returns details about any stored access policies specified on the queue that may be used with Shared Access Signatures. @@ -368,16 +345,13 @@ def get_queue_access_policy(self, **kwargs): timeout=timeout, cls=return_headers_and_deserialized, **kwargs) - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) return {s.id: s.access_policy or AccessPolicy() for s in identifiers} @distributed_trace - def set_queue_access_policy(self, - signed_identifiers, # type: Dict[str, AccessPolicy] - **kwargs # type: Any - ): - # type: (...) -> None + def set_queue_access_policy(self, signed_identifiers, **kwargs): + # type: (Dict[str, AccessPolicy], Optional[Any]) -> None """Sets stored access policies for the queue that may be used with Shared Access Signatures. @@ -426,15 +400,14 @@ def set_queue_access_policy(self, queue_acl=signed_identifiers or None, timeout=timeout, **kwargs) - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) @distributed_trace - def send_message( - self, - content, # type: Any - **kwargs # type: Any - ): + def send_message( # type: ignore + self, content, # type: Any + **kwargs # type: Optional[Any] + ): # type: (...) -> QueueMessage """Adds a new message to the back of the message queue. @@ -484,25 +457,12 @@ def send_message( visibility_timeout = kwargs.pop('visibility_timeout', None) time_to_live = kwargs.pop('time_to_live', None) timeout = kwargs.pop('timeout', None) - try: - self._config.message_encode_policy.configure( - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - resolver=self.key_resolver_function, - encryption_version=self.encryption_version) - except TypeError: - warnings.warn( - "TypeError when calling message_encode_policy.configure. \ - It is likely missing the encryption_version parameter. \ - Consider updating your encryption information/implementation. \ - Retrying without encryption_version." - ) - self._config.message_encode_policy.configure( - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - resolver=self.key_resolver_function) - encoded_content = self._config.message_encode_policy(content) - new_message = GenQueueMessage(message_text=encoded_content) + self._config.message_encode_policy.configure( + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + resolver=self.key_resolver_function) + content = self._config.message_encode_policy(content) + new_message = GenQueueMessage(message_text=content) try: enqueued = self._client.messages.enqueue( @@ -511,84 +471,26 @@ def send_message( message_time_to_live=time_to_live, timeout=timeout, **kwargs) - queue_message = QueueMessage(content=content) + queue_message = QueueMessage(content=new_message.message_text) queue_message.id = enqueued[0].message_id queue_message.inserted_on = enqueued[0].insertion_time queue_message.expires_on = enqueued[0].expiration_time queue_message.pop_receipt = enqueued[0].pop_receipt queue_message.next_visible_on = enqueued[0].time_next_visible return queue_message - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace - def receive_message(self, **kwargs): - # type: (Any) -> QueueMessage - """Removes one message from the front of the queue. - - When the message is retrieved from the queue, the response includes the message - content and a pop_receipt value, which is required to delete the message. - The message is not automatically deleted from the queue, but after it has - been retrieved, it is not visible to other clients for the time interval - specified by the visibility_timeout parameter. - - If the key-encryption-key or resolver field is set on the local service object, the message will be - decrypted before being returned. - - :keyword int visibility_timeout: - If not specified, the default value is 0. Specifies the - new visibility timeout value, in seconds, relative to server time. - The value must be larger than or equal to 0, and cannot be - larger than 7 days. The visibility timeout of a message cannot be - set to a value later than the expiry time. visibility_timeout - should be set to a value smaller than the time-to-live value. - :keyword int timeout: - The server timeout, expressed in seconds. - :return: - Returns a message from the Queue. - :rtype: ~azure.storage.queue.QueueMessage - - .. admonition:: Example: - - .. literalinclude:: ../samples/queue_samples_message.py - :start-after: [START receive_one_message] - :end-before: [END receive_one_message] - :language: python - :dedent: 12 - :caption: Receive one message from the queue. - """ - visibility_timeout = kwargs.pop('visibility_timeout', None) - timeout = kwargs.pop('timeout', None) - self._config.message_decode_policy.configure( - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - resolver=self.key_resolver_function) - try: - message = self._client.messages.dequeue( - number_of_messages=1, - visibilitytimeout=visibility_timeout, - timeout=timeout, - cls=self._config.message_decode_policy, - **kwargs - ) - wrapped_message = QueueMessage._from_generated( # pylint: disable=protected-access - message[0]) if message != [] else None - return wrapped_message - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) @distributed_trace def receive_messages(self, **kwargs): - # type: (Any) -> ItemPaged[QueueMessage] + # type: (Optional[Any]) -> ItemPaged[QueueMessage] """Removes one or more messages from the front of the queue. When a message is retrieved from the queue, the response includes the message content and a pop_receipt value, which is required to delete the message. The message is not automatically deleted from the queue, but after it has been retrieved, it is not visible to other clients for the time interval - specified by the visibility_timeout parameter. The iterator will continuously - fetch messages until the queue is empty or max_messages is reached (if max_messages - is set). + specified by the visibility_timeout parameter. If the key-encryption-key or resolver field is set on the local service object, the messages will be decrypted before being returned. @@ -598,28 +500,15 @@ def receive_messages(self, **kwargs): messages to retrieve from the queue, up to a maximum of 32. If fewer are visible, the visible messages are returned. By default, a single message is retrieved from the queue with this operation. - `by_page()` can be used to provide a page iterator on the AsyncItemPaged if messages_per_page is set. - `next()` can be used to get the next page. - .. admonition:: Example: - - .. literalinclude:: ../samples/queue_samples_message.py - :start-after: [START receive_messages_listing] - :end-before: [END receive_messages_listing] - :language: python - :dedent: 12 - :caption: List pages and corresponding messages from the queue. - :keyword int visibility_timeout: - If not specified, the default value is 30. Specifies the + If not specified, the default value is 0. Specifies the new visibility timeout value, in seconds, relative to server time. - The value must be larger than or equal to 1, and cannot be + The value must be larger than or equal to 0, and cannot be larger than 7 days. The visibility timeout of a message cannot be set to a value later than the expiry time. visibility_timeout should be set to a value smaller than the time-to-live value. :keyword int timeout: The server timeout, expressed in seconds. - :keyword int max_messages: - An integer that specifies the maximum number of messages to retrieve from the queue. :return: Returns a message iterator of dict-like Message objects. :rtype: ~azure.core.paging.ItemPaged[~azure.storage.queue.QueueMessage] @@ -636,7 +525,6 @@ def receive_messages(self, **kwargs): messages_per_page = kwargs.pop('messages_per_page', None) visibility_timeout = kwargs.pop('visibility_timeout', None) timeout = kwargs.pop('timeout', None) - max_messages = kwargs.pop('max_messages', None) self._config.message_decode_policy.configure( require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, @@ -649,22 +537,13 @@ def receive_messages(self, **kwargs): cls=self._config.message_decode_policy, **kwargs ) - if max_messages is not None and messages_per_page is not None: - if max_messages < messages_per_page: - raise ValueError("max_messages must be greater or equal to messages_per_page") - return ItemPaged(command, results_per_page=messages_per_page, - page_iterator_class=MessagesPaged, max_messages=max_messages) - except HttpResponseError as error: + return ItemPaged(command, results_per_page=messages_per_page, page_iterator_class=MessagesPaged) + except StorageErrorException as error: process_storage_error(error) @distributed_trace - def update_message(self, - message, # type: Any - pop_receipt=None, # type: Optional[str] - content=None, # type: Optional[Any] - **kwargs # type: Any - ): - # type: (...) -> QueueMessage + def update_message(self, message, pop_receipt=None, content=None, **kwargs): + # type: (Any, Optional[str], Optional[Any], Any) -> QueueMessage """Updates the visibility timeout of a message. You can also use this operation to update the contents of a message. @@ -731,25 +610,12 @@ def update_message(self, if receipt is None: raise ValueError("pop_receipt must be present") if message_text is not None: - try: - self._config.message_encode_policy.configure( - self.require_encryption, - self.key_encryption_key, - self.key_resolver_function, - encryption_version=self.encryption_version) - except TypeError: - warnings.warn( - "TypeError when calling message_encode_policy.configure. \ - It is likely missing the encryption_version parameter. \ - Consider updating your encryption information/implementation. \ - Retrying without encryption_version." - ) - self._config.message_encode_policy.configure( - self.require_encryption, - self.key_encryption_key, - self.key_resolver_function) - encoded_message_text = self._config.message_encode_policy(message_text) - updated = GenQueueMessage(message_text=encoded_message_text) + self._config.message_encode_policy.configure( + self.require_encryption, + self.key_encryption_key, + self.key_resolver_function) + message_text = self._config.message_encode_policy(message_text) + updated = GenQueueMessage(message_text=message_text) else: updated = None # type: ignore try: @@ -769,15 +635,12 @@ def update_message(self, new_message.pop_receipt = response['popreceipt'] new_message.next_visible_on = response['time_next_visible'] return new_message - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) @distributed_trace - def peek_messages(self, - max_messages=None, # type: Optional[int] - **kwargs # type: Any - ): - # type: (...) -> List[QueueMessage] + def peek_messages(self, max_messages=None, **kwargs): + # type: (Optional[int], Optional[Any]) -> List[QueueMessage] """Retrieves one or more messages from the front of the queue, but does not alter the visibility of the message. @@ -830,12 +693,12 @@ def peek_messages(self, for peeked in messages: wrapped_messages.append(QueueMessage._from_generated(peeked)) # pylint: disable=protected-access return wrapped_messages - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) @distributed_trace def clear_messages(self, **kwargs): - # type: (Any) -> None + # type: (Optional[Any]) -> None """Deletes all messages from the specified queue. :keyword int timeout: @@ -853,16 +716,12 @@ def clear_messages(self, **kwargs): timeout = kwargs.pop('timeout', None) try: self._client.messages.clear(timeout=timeout, **kwargs) - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) @distributed_trace - def delete_message(self, - message, # type: Any - pop_receipt=None, # type: Optional[str] - **kwargs # type: Any - ): - # type: (...) -> None + def delete_message(self, message, pop_receipt=None, **kwargs): + # type: (Any, Optional[str], Any) -> None """Deletes the specified message. Normally after a client retrieves a message with the receive messages operation, @@ -910,5 +769,5 @@ def delete_message(self, queue_message_id=message_id, **kwargs ) - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_queue_service_client.py b/azure/multiapi/storagev2/queue/v2019_07_07/_queue_service_client.py similarity index 83% rename from azure/multiapi/storagev2/queue/v2021_02_12/_queue_service_client.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_queue_service_client.py index a0f090d..44fc4bd 100644 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_queue_service_client.py +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_queue_service_client.py @@ -6,39 +6,43 @@ import functools from typing import ( # pylint: disable=unused-import - Any, Dict, List, Optional, Union, + Union, Optional, Any, Iterable, Dict, List, TYPE_CHECKING) -from urllib.parse import urlparse +try: + from urllib.parse import urlparse +except ImportError: + from urlparse import urlparse # type: ignore -from azure.core.exceptions import HttpResponseError from azure.core.paging import ItemPaged from azure.core.pipeline import Pipeline from azure.core.tracing.decorator import distributed_trace - -from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query from ._shared.models import LocationMode +from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query from ._shared.response_handlers import process_storage_error -from ._generated import AzureQueueStorage -from ._generated.models import StorageServiceProperties -from ._encryption import StorageEncryptionMixin +from ._generated import AzureQueueStorage, VERSION +from ._generated.models import StorageServiceProperties, StorageErrorException + from ._models import ( QueuePropertiesPaged, service_stats_deserialize, service_properties_deserialize, ) -from ._serialize import get_api_version + from ._queue_client import QueueClient if TYPE_CHECKING: + from datetime import datetime + from azure.core.configuration import Configuration + from azure.core.pipeline.policies import HTTPPolicy from ._models import ( - CorsRule, - Metrics, QueueProperties, QueueAnalyticsLogging, + Metrics, + CorsRule, ) -class QueueServiceClient(StorageAccountHostsMixin, StorageEncryptionMixin): +class QueueServiceClient(StorageAccountHostsMixin): """A client to interact with the Queue Service at the account level. This client provides operations to retrieve and configure the account properties @@ -46,26 +50,17 @@ class QueueServiceClient(StorageAccountHostsMixin, StorageEncryptionMixin): For operations relating to a specific queue, a client for this entity can be retrieved using the :func:`~get_queue_client` function. - For more optional configuration, please click - `here `_. - :param str account_url: The URL to the queue service endpoint. Any other entities included in the URL path (e.g. queue) will be discarded. This URL can be optionally authenticated with a SAS token. :param credential: The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, - an account shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" - should be the storage account key. + account URL already has a SAS token. The value can be a SAS token string, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. :keyword str api_version: - The Storage API version to use for requests. Default value is the most recent service version that is - compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. :keyword str secondary_hostname: The hostname of the secondary endpoint. @@ -88,7 +83,7 @@ class QueueServiceClient(StorageAccountHostsMixin, StorageEncryptionMixin): def __init__( self, account_url, # type: str - credential=None, # type: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, "TokenCredential"]] # pylint: disable=line-too-long + credential=None, # type: Optional[Any] **kwargs # type: Any ): # type: (...) -> None @@ -106,9 +101,8 @@ def __init__( raise ValueError("You need to provide either a SAS token or an account shared key to authenticate.") self._query_str, credential = self._format_query_string(sas_token, credential) super(QueueServiceClient, self).__init__(parsed_url, service='queue', credential=credential, **kwargs) - self._client = AzureQueueStorage(self.url, base_url=self.url, pipeline=self._pipeline) - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access - self._configure_encryption(kwargs) + self._client = AzureQueueStorage(self.url, pipeline=self._pipeline) + self._client._config.version = kwargs.get('api_version', VERSION) # pylint: disable=protected-access def _format_url(self, hostname): """Format the endpoint URL according to the current location @@ -119,7 +113,7 @@ def _format_url(self, hostname): @classmethod def from_connection_string( cls, conn_str, # type: str - credential=None, # type: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, "TokenCredential"]] # pylint: disable=line-too-long + credential=None, # type: Optional[Any] **kwargs # type: Any ): # type: (...) -> QueueServiceClient """Create QueueServiceClient from a Connection String. @@ -129,12 +123,8 @@ def from_connection_string( :param credential: The credentials with which to authenticate. This is optional if the account URL already has a SAS token, or the connection string already has shared - access key values. The value can be a SAS token string, - an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, - an account shared access key, or an instance of a TokenCredentials class from azure.identity. - Credentials provided here will take precedence over those in the connection string. - If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" - should be the storage account key. + access key values. The value can be a SAS token string, an account shared access + key, or an instance of a TokenCredentials class from azure.identity. :returns: A Queue service client. :rtype: ~azure.storage.queue.QueueClient @@ -155,7 +145,7 @@ def from_connection_string( @distributed_trace def get_service_stats(self, **kwargs): - # type: (Any) -> Dict[str, Any] + # type: (Optional[Any]) -> Dict[str, Any] """Retrieves statistics related to replication for the Queue service. It is only available when read-access geo-redundant replication is enabled for @@ -184,12 +174,12 @@ def get_service_stats(self, **kwargs): stats = self._client.service.get_statistics( # type: ignore timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs) return service_stats_deserialize(stats) - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) @distributed_trace def get_service_properties(self, **kwargs): - # type: (Any) -> Dict[str, Any] + # type: (Optional[Any]) -> Dict[str, Any] """Gets the properties of a storage account's Queue service, including Azure Storage Analytics. @@ -212,7 +202,7 @@ def get_service_properties(self, **kwargs): try: service_props = self._client.service.get_properties(timeout=timeout, **kwargs) # type: ignore return service_properties_deserialize(service_props) - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) @distributed_trace @@ -221,7 +211,7 @@ def set_service_properties( # type: ignore hour_metrics=None, # type: Optional[Metrics] minute_metrics=None, # type: Optional[Metrics] cors=None, # type: Optional[List[CorsRule]] - **kwargs # type: Any + **kwargs ): # type: (...) -> None """Sets the properties of a storage account's Queue service, including @@ -268,14 +258,14 @@ def set_service_properties( # type: ignore ) try: return self._client.service.set_properties(props, timeout=timeout, **kwargs) # type: ignore - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) @distributed_trace def list_queues( self, name_starts_with=None, # type: Optional[str] include_metadata=False, # type: Optional[bool] - **kwargs # type: Any + **kwargs ): # type: (...) -> ItemPaged[QueueProperties] """Returns a generator to list the queues under the specified account. @@ -325,7 +315,7 @@ def list_queues( def create_queue( self, name, # type: str metadata=None, # type: Optional[Dict[str, str]] - **kwargs # type: Any + **kwargs ): # type: (...) -> QueueClient """Creates a new queue under the specified account. @@ -360,9 +350,8 @@ def create_queue( @distributed_trace def delete_queue( - self, - queue, # type: Union[QueueProperties, str] - **kwargs # type: Any + self, queue, # type: Union[QueueProperties, str] + **kwargs ): # type: (...) -> None """Deletes the specified queue and any messages it contains. @@ -397,11 +386,8 @@ def delete_queue( kwargs.setdefault('merge_span', True) queue_client.delete_queue(timeout=timeout, **kwargs) - def get_queue_client(self, - queue, # type: Union[QueueProperties, str] - **kwargs # type: Any - ): - # type: (...) -> QueueClient + def get_queue_client(self, queue, **kwargs): + # type: (Union[QueueProperties, str], Optional[Any]) -> QueueClient """Get a client to interact with the specified queue. The queue need not already exist. @@ -435,6 +421,5 @@ def get_queue_client(self, return QueueClient( self.url, queue_name=queue_name, credential=self.credential, key_resolver_function=self.key_resolver_function, require_encryption=self.require_encryption, - encryption_version=self.encryption_version, key_encryption_key=self.key_encryption_key, - api_version=self.api_version, _pipeline=_pipeline, _configuration=self._config, - _location_mode=self._location_mode, _hosts=self._hosts, **kwargs) + key_encryption_key=self.key_encryption_key, api_version=self.api_version, _pipeline=_pipeline, + _configuration=self._config, _location_mode=self._location_mode, _hosts=self._hosts, **kwargs) diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/__init__.py b/azure/multiapi/storagev2/queue/v2019_07_07/_shared/__init__.py similarity index 100% rename from azure/multiapi/storagev2/blob/v2019_12_12/_shared/__init__.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_shared/__init__.py diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/authentication.py b/azure/multiapi/storagev2/queue/v2019_07_07/_shared/authentication.py similarity index 100% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/authentication.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_shared/authentication.py diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/base_client.py b/azure/multiapi/storagev2/queue/v2019_07_07/_shared/base_client.py similarity index 96% rename from azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/base_client.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_shared/base_client.py index 982dcf3..30a89cf 100644 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/base_client.py +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_shared/base_client.py @@ -62,6 +62,7 @@ "blob": {"primary": "BlobEndpoint", "secondary": "BlobSecondaryEndpoint"}, "queue": {"primary": "QueueEndpoint", "secondary": "QueueSecondaryEndpoint"}, "file": {"primary": "FileEndpoint", "secondary": "FileSecondaryEndpoint"}, + "dfs": {"primary": "BlobEndpoint", "secondary": "BlobSecondaryEndpoint"}, } @@ -78,7 +79,7 @@ def __init__( self._hosts = kwargs.get("_hosts") self.scheme = parsed_url.scheme - if service not in ["blob", "queue", "file-share"]: + if service not in ["blob", "queue", "file-share", "dfs"]: raise ValueError("Invalid service: {}".format(service)) service_name = service.split('-')[0] account = parsed_url.netloc.split(".{}.core.".format(service_name)) @@ -188,6 +189,14 @@ def location_mode(self, value): else: raise ValueError("No host URL for location mode: {}".format(value)) + @property + def api_version(self): + """The version of the Storage API used for requests. + + :type: str + """ + return self._client._config.version # pylint: disable=protected-access + def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): query_str = "?" if snapshot: @@ -227,7 +236,7 @@ def _create_pipeline(self, credential, **kwargs): StorageContentValidation(), StorageRequestHook(**kwargs), self._credential_policy, - ContentDecodePolicy(), + ContentDecodePolicy(response_encoding="utf-8"), RedirectPolicy(**kwargs), StorageHosts(hosts=self._hosts, **kwargs), config.retry_policy, @@ -249,7 +258,7 @@ def _batch_send( request = self._client._client.post( # pylint: disable=protected-access url='https://{}/?comp=batch'.format(self.primary_hostname), headers={ - 'x-ms-version': self._client._config.version # pylint: disable=protected-access + 'x-ms-version': self.api_version } ) @@ -324,7 +333,7 @@ def format_shared_key_credential(account, credential): def parse_connection_str(conn_str, credential, service): conn_str = conn_str.rstrip(";") conn_settings = [s.split("=", 1) for s in conn_str.split(";")] - if any(len(tup) != 2 for tup in conn_settings): + if any(len(tup) != 2 for tup in conn_settings): raise ValueError("Connection string is either blank or malformed.") conn_settings = dict(conn_settings) endpoints = _SERVICE_PARAMS[service] diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/base_client_async.py b/azure/multiapi/storagev2/queue/v2019_07_07/_shared/base_client_async.py similarity index 97% rename from azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/base_client_async.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_shared/base_client_async.py index c47cae9..3c806d7 100644 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/base_client_async.py +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_shared/base_client_async.py @@ -93,7 +93,7 @@ def _create_pipeline(self, credential, **kwargs): StorageContentValidation(), StorageRequestHook(**kwargs), self._credential_policy, - ContentDecodePolicy(), + ContentDecodePolicy(response_encoding="utf-8"), AsyncRedirectPolicy(**kwargs), StorageHosts(hosts=self._hosts, **kwargs), # type: ignore config.retry_policy, @@ -115,7 +115,7 @@ async def _batch_send( request = self._client._client.post( # pylint: disable=protected-access url='https://{}/?comp=batch'.format(self.primary_hostname), headers={ - 'x-ms-version': self._client._config.version # pylint: disable=protected-access + 'x-ms-version': self.api_version } ) diff --git a/azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/constants.py b/azure/multiapi/storagev2/queue/v2019_07_07/_shared/constants.py similarity index 100% rename from azure/multiapi/storagev2/filedatalake/v2019_12_12/_shared/constants.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_shared/constants.py diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/encryption.py b/azure/multiapi/storagev2/queue/v2019_07_07/_shared/encryption.py similarity index 100% rename from azure/multiapi/storagev2/blob/v2019_12_12/_shared/encryption.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_shared/encryption.py diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_shared/models.py b/azure/multiapi/storagev2/queue/v2019_07_07/_shared/models.py similarity index 99% rename from azure/multiapi/storagev2/blob/v2019_02_02/_shared/models.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_shared/models.py index 6a8cfaf..87f2f00 100644 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_shared/models.py +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_shared/models.py @@ -259,7 +259,7 @@ def from_string(cls, string): :param str string: Specify service, container, or object in in the string with the first letter of the word. :return: A ResourceTypes object - :rtype: ~azure.storage.blob.ResourceTypes + :rtype: ~azure.storage.queue.ResourceTypes """ res_service = 's' in string res_container = 'c' in string @@ -333,7 +333,7 @@ def from_string(cls, permission): :param str permission: Specify permissions in the string with the first letter of the word. :return: A AccountSasPermissions object - :rtype: ~azure.storage.blob.AccountSasPermissions + :rtype: ~azure.storage.queue.AccountSasPermissions """ p_read = 'r' in permission p_write = 'w' in permission @@ -381,7 +381,7 @@ def from_string(cls, string): :param str string: Specify blob, queue, or file in in the string with the first letter of the word. :return: A Services object - :rtype: ~azure.storage.blob.Services + :rtype: ~azure.storage.queue.Services """ res_blob = 'b' in string res_queue = 'q' in string diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/parser.py b/azure/multiapi/storagev2/queue/v2019_07_07/_shared/parser.py similarity index 100% rename from azure/multiapi/storagev2/blob/v2019_12_12/_shared/parser.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_shared/parser.py diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/policies.py b/azure/multiapi/storagev2/queue/v2019_07_07/_shared/policies.py similarity index 100% rename from azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/policies.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_shared/policies.py diff --git a/azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/policies_async.py b/azure/multiapi/storagev2/queue/v2019_07_07/_shared/policies_async.py similarity index 100% rename from azure/multiapi/storagev2/filedatalake/v2018_11_09/_shared/policies_async.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_shared/policies_async.py diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/request_handlers.py b/azure/multiapi/storagev2/queue/v2019_07_07/_shared/request_handlers.py similarity index 99% rename from azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/request_handlers.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_shared/request_handlers.py index fd1ff19..2ce74d4 100644 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/request_handlers.py +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_shared/request_handlers.py @@ -15,7 +15,6 @@ import isodate -from azure.core.configuration import Configuration from azure.core.exceptions import raise_with_traceback diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/response_handlers.py b/azure/multiapi/storagev2/queue/v2019_07_07/_shared/response_handlers.py similarity index 100% rename from azure/multiapi/storagev2/blob/v2019_12_12/_shared/response_handlers.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_shared/response_handlers.py diff --git a/azure/multiapi/storagev2/blob/v2019_12_12/_shared/shared_access_signature.py b/azure/multiapi/storagev2/queue/v2019_07_07/_shared/shared_access_signature.py similarity index 100% rename from azure/multiapi/storagev2/blob/v2019_12_12/_shared/shared_access_signature.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_shared/shared_access_signature.py diff --git a/azure/multiapi/storagev2/blob/v2019_02_02/_shared/uploads.py b/azure/multiapi/storagev2/queue/v2019_07_07/_shared/uploads.py similarity index 98% rename from azure/multiapi/storagev2/blob/v2019_02_02/_shared/uploads.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_shared/uploads.py index 032915c..13b814e 100644 --- a/azure/multiapi/storagev2/blob/v2019_02_02/_shared/uploads.py +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_shared/uploads.py @@ -280,10 +280,7 @@ class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method def _is_chunk_empty(self, chunk_data): # read until non-zero byte is encountered # if reached the end without returning, then chunk_data is all 0's - for each_byte in chunk_data: - if each_byte not in [0, b"\x00"]: - return False - return True + return not any(bytearray(chunk_data)) def _upload_chunk(self, chunk_offset, chunk_data): # avoid uploading the empty pages @@ -339,11 +336,12 @@ def _upload_chunk(self, chunk_offset, chunk_data): class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method def _upload_chunk(self, chunk_offset, chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 + length = len(chunk_data) + chunk_end = chunk_offset + length - 1 response = self.service.upload_range( chunk_data, chunk_offset, - chunk_end, + length, data_stream_total=self.total_size, upload_stream_current=self.progress_total, **self.request_options diff --git a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/uploads_async.py b/azure/multiapi/storagev2/queue/v2019_07_07/_shared/uploads_async.py similarity index 99% rename from azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/uploads_async.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_shared/uploads_async.py index 0b46112..92fcab5 100644 --- a/azure/multiapi/storagev2/fileshare/v2019_02_02/_shared/uploads_async.py +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_shared/uploads_async.py @@ -337,12 +337,11 @@ async def _upload_chunk(self, chunk_offset, chunk_data): class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method async def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 + chunk_end = chunk_offset + len(chunk_data) - 1 response = await self.service.upload_range( chunk_data, chunk_offset, - length, + chunk_end, data_stream_total=self.total_size, upload_stream_current=self.progress_total, **self.request_options diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_shared_access_signature.py b/azure/multiapi/storagev2/queue/v2019_07_07/_shared_access_signature.py similarity index 97% rename from azure/multiapi/storagev2/queue/v2021_02_12/_shared_access_signature.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_shared_access_signature.py index e531e3d..3959c9b 100644 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_shared_access_signature.py +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_shared_access_signature.py @@ -8,15 +8,15 @@ Union, Optional, Any, TYPE_CHECKING ) -from ._shared import sign_string -from ._shared.constants import X_MS_VERSION -from ._shared.models import Services -from ._shared.shared_access_signature import SharedAccessSignature, _SharedAccessHelper, \ +from azure.storage.queue._shared import sign_string +from azure.storage.queue._shared.constants import X_MS_VERSION +from azure.storage.queue._shared.models import Services +from azure.storage.queue._shared.shared_access_signature import SharedAccessSignature, _SharedAccessHelper, \ QueryStringConstants if TYPE_CHECKING: from datetime import datetime - from .import ( + from azure.storage.queue import ( ResourceTypes, AccountSasPermissions, QueueSasPermissions diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_version.py b/azure/multiapi/storagev2/queue/v2019_07_07/_version.py similarity index 95% rename from azure/multiapi/storagev2/queue/v2021_02_12/_version.py rename to azure/multiapi/storagev2/queue/v2019_07_07/_version.py index 32d8db6..8c6cb2e 100644 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_version.py +++ b/azure/multiapi/storagev2/queue/v2019_07_07/_version.py @@ -9,4 +9,4 @@ # regenerated. # -------------------------------------------------------------------------- -VERSION = "12.5.0b1" +VERSION = "12.1.1" diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/aio/__init__.py b/azure/multiapi/storagev2/queue/v2019_07_07/aio/__init__.py similarity index 100% rename from azure/multiapi/storagev2/queue/v2021_02_12/aio/__init__.py rename to azure/multiapi/storagev2/queue/v2019_07_07/aio/__init__.py diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/aio/_models.py b/azure/multiapi/storagev2/queue/v2019_07_07/aio/_models.py similarity index 84% rename from azure/multiapi/storagev2/queue/v2021_02_12/aio/_models.py rename to azure/multiapi/storagev2/queue/v2019_07_07/aio/_models.py index dcb8d32..4e4577d 100644 --- a/azure/multiapi/storagev2/queue/v2021_02_12/aio/_models.py +++ b/azure/multiapi/storagev2/queue/v2019_07_07/aio/_models.py @@ -8,10 +8,10 @@ from typing import List # pylint: disable=unused-import from azure.core.async_paging import AsyncPageIterator -from azure.core.exceptions import HttpResponseError from .._shared.response_handlers import ( process_storage_error, return_context_and_deserialized) +from .._generated.models import StorageErrorException from .._models import QueueMessage, QueueProperties @@ -21,10 +21,8 @@ class MessagesPaged(AsyncPageIterator): :param callable command: Function to retrieve the next page of items. :param int results_per_page: The maximum number of messages to retrieve per call. - :param int max_messages: The maximum number of messages to retrieve from - the queue. """ - def __init__(self, command, results_per_page=None, continuation_token=None, max_messages=None): + def __init__(self, command, results_per_page=None, continuation_token=None): if continuation_token is not None: raise ValueError("This operation does not support continuation token") @@ -34,26 +32,17 @@ def __init__(self, command, results_per_page=None, continuation_token=None, max_ ) self._command = command self.results_per_page = results_per_page - self._max_messages = max_messages async def _get_next_cb(self, continuation_token): try: - if self._max_messages is not None: - if self.results_per_page is None: - self.results_per_page = 1 - if self._max_messages < 1: - raise StopAsyncIteration("End of paging") - self.results_per_page = min(self.results_per_page, self._max_messages) return await self._command(number_of_messages=self.results_per_page) - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) async def _extract_data_cb(self, messages): # There is no concept of continuation token, so raising on my own condition if not messages: raise StopAsyncIteration("End of paging") - if self._max_messages is not None: - self._max_messages = self._max_messages - len(messages) return "TOKEN_IGNORED", [QueueMessage._from_generated(q) for q in messages] # pylint: disable=protected-access @@ -94,7 +83,7 @@ async def _get_next_cb(self, continuation_token): maxresults=self.results_per_page, cls=return_context_and_deserialized, use_location=self.location_mode) - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) async def _extract_data_cb(self, get_next_return): diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/aio/_queue_client_async.py b/azure/multiapi/storagev2/queue/v2019_07_07/aio/_queue_client_async.py similarity index 78% rename from azure/multiapi/storagev2/queue/v2021_02_12/aio/_queue_client_async.py rename to azure/multiapi/storagev2/queue/v2019_07_07/aio/_queue_client_async.py index eb0c010..3325d1c 100644 --- a/azure/multiapi/storagev2/queue/v2021_02_12/aio/_queue_client_async.py +++ b/azure/multiapi/storagev2/queue/v2019_07_07/aio/_queue_client_async.py @@ -3,41 +3,58 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method import functools -import warnings from typing import ( # pylint: disable=unused-import - Any, Dict, List, Optional, - TYPE_CHECKING) + Union, + Optional, + Any, + IO, + Iterable, + AnyStr, + Dict, + List, + Tuple, + TYPE_CHECKING, +) + +try: + from urllib.parse import urlparse, quote, unquote # pylint: disable=unused-import +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import quote, unquote # type: ignore -from azure.core.async_paging import AsyncItemPaged -from azure.core.exceptions import HttpResponseError from azure.core.tracing.decorator import distributed_trace from azure.core.tracing.decorator_async import distributed_trace_async -from .._serialize import get_api_version +from azure.core.async_paging import AsyncItemPaged + from .._shared.base_client_async import AsyncStorageAccountHostsMixin -from .._shared.policies_async import ExponentialRetry from .._shared.request_handlers import add_metadata_headers, serialize_iso from .._shared.response_handlers import ( return_response_headers, process_storage_error, return_headers_and_deserialized, ) -from .._generated.aio import AzureQueueStorage -from .._generated.models import SignedIdentifier, QueueMessage as GenQueueMessage from .._deserialize import deserialize_queue_properties, deserialize_queue_creation -from .._encryption import StorageEncryptionMixin +from .._generated.version import VERSION +from .._generated.aio import AzureQueueStorage +from .._generated.models import StorageErrorException, SignedIdentifier +from .._generated.models import QueueMessage as GenQueueMessage + from .._models import QueueMessage, AccessPolicy -from .._queue_client import QueueClient as QueueClientBase from ._models import MessagesPaged +from .._shared.policies_async import ExponentialRetry +from .._queue_client import QueueClient as QueueClientBase + if TYPE_CHECKING: - from .._models import QueueProperties + from datetime import datetime + from azure.core.pipeline.policies import HTTPPolicy + from .._models import QueueSasPermissions, QueueProperties -class QueueClient(AsyncStorageAccountHostsMixin, QueueClientBase, StorageEncryptionMixin): +class QueueClient(AsyncStorageAccountHostsMixin, QueueClientBase): """A client to interact with a specific Queue. :param str account_url: @@ -47,22 +64,17 @@ class QueueClient(AsyncStorageAccountHostsMixin, QueueClientBase, StorageEncrypt :type queue_name: str :param credential: The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, - an account shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" - should be the storage account key. + account URL already has a SAS token. The value can be a SAS token string, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. :keyword str api_version: - The Storage API version to use for requests. Default value is the most recent service version that is - compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. :keyword str secondary_hostname: The hostname of the secondary endpoint. - :keyword message_encode_policy: The encoding policy to use on outgoing messages. + :keyword encode_policy: The encoding policy to use on outgoing messages. Default is not to encode messages. Other options include :class:`TextBase64EncodePolicy`, :class:`BinaryBase64EncodePolicy` or `None`. - :keyword message_decode_policy: The decoding policy to use on incoming messages. + :keyword decode_policy: The decoding policy to use on incoming messages. Default value is not to decode messages. Other options include :class:`TextBase64DecodePolicy`, :class:`BinaryBase64DecodePolicy` or `None`. @@ -87,7 +99,7 @@ def __init__( self, account_url, # type: str queue_name, # type: str - credential=None, # type: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, "TokenCredential"]] # pylint: disable=line-too-long + credential=None, # type: Optional[Any] **kwargs # type: Any ): # type: (...) -> None @@ -96,11 +108,9 @@ def __init__( super(QueueClient, self).__init__( account_url, queue_name=queue_name, credential=credential, loop=loop, **kwargs ) - self._client = AzureQueueStorage(self.url, base_url=self.url, - pipeline=self._pipeline, loop=loop) # type: ignore - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access + self._client = AzureQueueStorage(self.url, pipeline=self._pipeline, loop=loop) # type: ignore + self._client._config.version = kwargs.get('api_version', VERSION) # pylint: disable=protected-access self._loop = loop - self._configure_encryption(kwargs) @distributed_trace_async async def create_queue(self, **kwargs): @@ -137,7 +147,7 @@ async def create_queue(self, **kwargs): return await self._client.queue.create( # type: ignore metadata=metadata, timeout=timeout, headers=headers, cls=deserialize_queue_creation, **kwargs ) - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) @distributed_trace_async @@ -169,7 +179,7 @@ async def delete_queue(self, **kwargs): timeout = kwargs.pop('timeout', None) try: await self._client.queue.delete(timeout=timeout, **kwargs) - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) @distributed_trace_async @@ -198,7 +208,7 @@ async def get_queue_properties(self, **kwargs): response = await self._client.queue.get_properties( timeout=timeout, cls=deserialize_queue_properties, **kwargs ) - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) response.name = self.queue_name return response # type: ignore @@ -233,7 +243,7 @@ async def set_queue_metadata(self, metadata=None, **kwargs): return await self._client.queue.set_metadata( # type: ignore timeout=timeout, headers=headers, cls=return_response_headers, **kwargs ) - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) @distributed_trace_async @@ -252,7 +262,7 @@ async def get_queue_access_policy(self, **kwargs): _, identifiers = await self._client.queue.get_access_policy( timeout=timeout, cls=return_headers_and_deserialized, **kwargs ) - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) return {s.id: s.access_policy or AccessPolicy() for s in identifiers} @@ -305,7 +315,7 @@ async def set_queue_access_policy(self, signed_identifiers, **kwargs): signed_identifiers = identifiers # type: ignore try: await self._client.queue.set_access_policy(queue_acl=signed_identifiers or None, timeout=timeout, **kwargs) - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) @distributed_trace_async @@ -363,25 +373,13 @@ async def send_message( # type: ignore visibility_timeout = kwargs.pop('visibility_timeout', None) time_to_live = kwargs.pop('time_to_live', None) timeout = kwargs.pop('timeout', None) - try: - self._config.message_encode_policy.configure( - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - resolver=self.key_resolver_function, - encryption_version=self.encryption_version) - except TypeError: - warnings.warn( - "TypeError when calling message_encode_policy.configure. \ - It is likely missing the encryption_version parameter. \ - Consider updating your encryption information/implementation. \ - Retrying without encryption_version." - ) - self._config.message_encode_policy.configure( - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - resolver=self.key_resolver_function) - encoded_content = self._config.message_encode_policy(content) - new_message = GenQueueMessage(message_text=encoded_content) + self._config.message_encode_policy.configure( + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + resolver=self.key_resolver_function + ) + content = self._config.message_encode_policy(content) + new_message = GenQueueMessage(message_text=content) try: enqueued = await self._client.messages.enqueue( @@ -391,70 +389,14 @@ async def send_message( # type: ignore timeout=timeout, **kwargs ) - queue_message = QueueMessage(content=content) + queue_message = QueueMessage(content=new_message.message_text) queue_message.id = enqueued[0].message_id queue_message.inserted_on = enqueued[0].insertion_time queue_message.expires_on = enqueued[0].expiration_time queue_message.pop_receipt = enqueued[0].pop_receipt queue_message.next_visible_on = enqueued[0].time_next_visible return queue_message - except HttpResponseError as error: - process_storage_error(error) - - @distributed_trace_async - async def receive_message(self, **kwargs): - # type: (Optional[Any]) -> QueueMessage - """Removes one message from the front of the queue. - - When the message is retrieved from the queue, the response includes the message - content and a pop_receipt value, which is required to delete the message. - The message is not automatically deleted from the queue, but after it has - been retrieved, it is not visible to other clients for the time interval - specified by the visibility_timeout parameter. - - If the key-encryption-key or resolver field is set on the local service object, the message will be - decrypted before being returned. - - :keyword int visibility_timeout: - If not specified, the default value is 0. Specifies the - new visibility timeout value, in seconds, relative to server time. - The value must be larger than or equal to 0, and cannot be - larger than 7 days. The visibility timeout of a message cannot be - set to a value later than the expiry time. visibility_timeout - should be set to a value smaller than the time-to-live value. - :keyword int timeout: - The server timeout, expressed in seconds. - :return: - Returns a message from the Queue. - :rtype: ~azure.storage.queue.QueueMessage - - .. admonition:: Example: - - .. literalinclude:: ../samples/queue_samples_message_async.py - :start-after: [START receive_one_message] - :end-before: [END receive_one_message] - :language: python - :dedent: 12 - :caption: Receive one message from the queue. - """ - visibility_timeout = kwargs.pop('visibility_timeout', None) - timeout = kwargs.pop('timeout', None) - self._config.message_decode_policy.configure( - require_encryption=self.require_encryption, - key_encryption_key=self.key_encryption_key, - resolver=self.key_resolver_function) - try: - message = await self._client.messages.dequeue( - number_of_messages=1, - visibilitytimeout=visibility_timeout, - timeout=timeout, - cls=self._config.message_decode_policy, - **kwargs - ) - wrapped_message = QueueMessage._from_generated( # pylint: disable=protected-access - message[0]) if message != [] else None - return wrapped_message - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) @distributed_trace @@ -466,9 +408,7 @@ def receive_messages(self, **kwargs): content and a pop_receipt value, which is required to delete the message. The message is not automatically deleted from the queue, but after it has been retrieved, it is not visible to other clients for the time interval - specified by the visibility_timeout parameter. The iterator will continuously - fetch messages until the queue is empty or max_messages is reached (if max_messages - is set). + specified by the visibility_timeout parameter. If the key-encryption-key or resolver field is set on the local service object, the messages will be decrypted before being returned. @@ -478,19 +418,15 @@ def receive_messages(self, **kwargs): messages to retrieve from the queue, up to a maximum of 32. If fewer are visible, the visible messages are returned. By default, a single message is retrieved from the queue with this operation. - `by_page()` can be used to provide a page iterator on the AsyncItemPaged if messages_per_page is set. - `next()` can be used to get the next page. :keyword int visibility_timeout: - If not specified, the default value is 30. Specifies the + If not specified, the default value is 0. Specifies the new visibility timeout value, in seconds, relative to server time. - The value must be larger than or equal to 1, and cannot be + The value must be larger than or equal to 0, and cannot be larger than 7 days. The visibility timeout of a message cannot be set to a value later than the expiry time. visibility_timeout should be set to a value smaller than the time-to-live value. :keyword int timeout: The server timeout, expressed in seconds. - :keyword int max_messages: - An integer that specifies the maximum number of messages to retrieve from the queue. :return: Returns a message iterator of dict-like Message objects. :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.queue.QueueMessage] @@ -507,7 +443,6 @@ def receive_messages(self, **kwargs): messages_per_page = kwargs.pop('messages_per_page', None) visibility_timeout = kwargs.pop('visibility_timeout', None) timeout = kwargs.pop('timeout', None) - max_messages = kwargs.pop('max_messages', None) self._config.message_decode_policy.configure( require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, @@ -521,12 +456,8 @@ def receive_messages(self, **kwargs): cls=self._config.message_decode_policy, **kwargs ) - if max_messages is not None and messages_per_page is not None: - if max_messages < messages_per_page: - raise ValueError("max_messages must be greater or equal to messages_per_page") - return AsyncItemPaged(command, results_per_page=messages_per_page, - page_iterator_class=MessagesPaged, max_messages=max_messages) - except HttpResponseError as error: + return AsyncItemPaged(command, results_per_page=messages_per_page, page_iterator_class=MessagesPaged) + except StorageErrorException as error: process_storage_error(error) @distributed_trace_async @@ -604,27 +535,11 @@ async def update_message( if receipt is None: raise ValueError("pop_receipt must be present") if message_text is not None: - try: - self._config.message_encode_policy.configure( - self.require_encryption, - self.key_encryption_key, - self.key_resolver_function, - encryption_version=self.encryption_version - ) - except TypeError: - warnings.warn( - "TypeError when calling message_encode_policy.configure. \ - It is likely missing the encryption_version parameter. \ - Consider updating your encryption information/implementation. \ - Retrying without encryption_version." - ) - self._config.message_encode_policy.configure( - self.require_encryption, - self.key_encryption_key, - self.key_resolver_function - ) - encoded_message_text = self._config.message_encode_policy(message_text) - updated = GenQueueMessage(message_text=encoded_message_text) + self._config.message_encode_policy.configure( + self.require_encryption, self.key_encryption_key, self.key_resolver_function + ) + message_text = self._config.message_encode_policy(message_text) + updated = GenQueueMessage(message_text=message_text) else: updated = None # type: ignore try: @@ -645,7 +560,7 @@ async def update_message( new_message.pop_receipt = response["popreceipt"] new_message.next_visible_on = response["time_next_visible"] return new_message - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) @distributed_trace_async @@ -702,7 +617,7 @@ async def peek_messages(self, max_messages=None, **kwargs): for peeked in messages: wrapped_messages.append(QueueMessage._from_generated(peeked)) # pylint: disable=protected-access return wrapped_messages - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) @distributed_trace_async @@ -725,7 +640,7 @@ async def clear_messages(self, **kwargs): timeout = kwargs.pop('timeout', None) try: await self._client.messages.clear(timeout=timeout, **kwargs) - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) @distributed_trace_async @@ -775,5 +690,5 @@ async def delete_message(self, message, pop_receipt=None, **kwargs): await self._client.message_id.delete( pop_receipt=receipt, timeout=timeout, queue_message_id=message_id, **kwargs ) - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/aio/_queue_service_client_async.py b/azure/multiapi/storagev2/queue/v2019_07_07/aio/_queue_service_client_async.py similarity index 89% rename from azure/multiapi/storagev2/queue/v2021_02_12/aio/_queue_service_client_async.py rename to azure/multiapi/storagev2/queue/v2019_07_07/aio/_queue_service_client_async.py index 368cee2..4d32d1f 100644 --- a/azure/multiapi/storagev2/queue/v2021_02_12/aio/_queue_service_client_async.py +++ b/azure/multiapi/storagev2/queue/v2019_07_07/aio/_queue_service_client_async.py @@ -3,45 +3,50 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method import functools from typing import ( # pylint: disable=unused-import - Any, Dict, List, Optional, Union, + Union, Optional, Any, Iterable, Dict, List, TYPE_CHECKING) +try: + from urllib.parse import urlparse # pylint: disable=unused-import +except ImportError: + from urlparse import urlparse # type: ignore from azure.core.async_paging import AsyncItemPaged -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline import AsyncPipeline from azure.core.tracing.decorator import distributed_trace +from azure.core.pipeline import AsyncPipeline from azure.core.tracing.decorator_async import distributed_trace_async -from .._serialize import get_api_version -from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper from .._shared.policies_async import ExponentialRetry +from .._queue_service_client import QueueServiceClient as QueueServiceClientBase from .._shared.models import LocationMode +from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper from .._shared.response_handlers import process_storage_error +from .._generated.version import VERSION from .._generated.aio import AzureQueueStorage -from .._generated.models import StorageServiceProperties -from .._encryption import StorageEncryptionMixin +from .._generated.models import StorageServiceProperties, StorageErrorException + +from ._models import QueuePropertiesPaged +from ._queue_client_async import QueueClient from .._models import ( service_stats_deserialize, service_properties_deserialize, ) -from .._queue_service_client import QueueServiceClient as QueueServiceClientBase -from ._models import QueuePropertiesPaged -from ._queue_client_async import QueueClient if TYPE_CHECKING: + from datetime import datetime + from azure.core.configuration import Configuration + from azure.core.pipeline.policies import HTTPPolicy from .._models import ( - CorsRule, - Metrics, QueueProperties, QueueAnalyticsLogging, + Metrics, + CorsRule, ) -class QueueServiceClient(AsyncStorageAccountHostsMixin, QueueServiceClientBase, StorageEncryptionMixin): +class QueueServiceClient(AsyncStorageAccountHostsMixin, QueueServiceClientBase): """A client to interact with the Queue Service at the account level. This client provides operations to retrieve and configure the account properties @@ -55,16 +60,11 @@ class QueueServiceClient(AsyncStorageAccountHostsMixin, QueueServiceClientBase, authenticated with a SAS token. :param credential: The credentials with which to authenticate. This is optional if the - account URL already has a SAS token. The value can be a SAS token string, - an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, - an account shared access key, or an instance of a TokenCredentials class from azure.identity. - If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential - - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. - If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" - should be the storage account key. + account URL already has a SAS token. The value can be a SAS token string, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. :keyword str api_version: - The Storage API version to use for requests. Default value is the most recent service version that is - compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. :keyword str secondary_hostname: The hostname of the secondary endpoint. @@ -87,7 +87,7 @@ class QueueServiceClient(AsyncStorageAccountHostsMixin, QueueServiceClientBase, def __init__( self, account_url, # type: str - credential=None, # type: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, "TokenCredential"]] # pylint: disable=line-too-long + credential=None, # type: Optional[Any] **kwargs # type: Any ): # type: (...) -> None @@ -98,10 +98,9 @@ def __init__( credential=credential, loop=loop, **kwargs) - self._client = AzureQueueStorage(self.url, base_url=self.url, pipeline=self._pipeline, loop=loop) # type: ignore - self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access + self._client = AzureQueueStorage(url=self.url, pipeline=self._pipeline, loop=loop) # type: ignore + self._client._config.version = kwargs.get('api_version', VERSION) # pylint: disable=protected-access self._loop = loop - self._configure_encryption(kwargs) @distributed_trace_async async def get_service_stats(self, **kwargs): @@ -134,7 +133,7 @@ async def get_service_stats(self, **kwargs): stats = await self._client.service.get_statistics( # type: ignore timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs) return service_stats_deserialize(stats) - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) @distributed_trace_async @@ -162,7 +161,7 @@ async def get_service_properties(self, **kwargs): try: service_props = await self._client.service.get_properties(timeout=timeout, **kwargs) # type: ignore return service_properties_deserialize(service_props) - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) @distributed_trace_async @@ -218,7 +217,7 @@ async def set_service_properties( # type: ignore ) try: return await self._client.service.set_properties(props, timeout=timeout, **kwargs) # type: ignore - except HttpResponseError as error: + except StorageErrorException as error: process_storage_error(error) @distributed_trace @@ -380,6 +379,6 @@ def get_queue_client(self, queue, **kwargs): return QueueClient( self.url, queue_name=queue_name, credential=self.credential, key_resolver_function=self.key_resolver_function, require_encryption=self.require_encryption, - encryption_version=self.encryption_version, key_encryption_key=self.key_encryption_key, - api_version=self.api_version, _pipeline=_pipeline, _configuration=self._config, - _location_mode=self._location_mode, _hosts=self._hosts, **kwargs) + key_encryption_key=self.key_encryption_key, api_version=self.api_version, _pipeline=_pipeline, + _configuration=self._config, _location_mode=self._location_mode, + _hosts=self._hosts, loop=self._loop, **kwargs) diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_encryption.py b/azure/multiapi/storagev2/queue/v2021_02_12/_encryption.py deleted file mode 100644 index c8882f3..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_encryption.py +++ /dev/null @@ -1,979 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os -import math -import sys -import warnings -from collections import OrderedDict -from io import BytesIO -from json import ( - dumps, - loads, -) -from typing import Any, BinaryIO, Dict, Optional, Tuple - -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives.ciphers import Cipher -from cryptography.hazmat.primitives.ciphers.aead import AESGCM -from cryptography.hazmat.primitives.ciphers.algorithms import AES -from cryptography.hazmat.primitives.ciphers.modes import CBC -from cryptography.hazmat.primitives.padding import PKCS7 - -from azure.core.exceptions import HttpResponseError - -from ._version import VERSION -from ._shared import encode_base64, decode_base64_to_bytes - - -_ENCRYPTION_PROTOCOL_V1 = '1.0' -_ENCRYPTION_PROTOCOL_V2 = '2.0' -_GCM_REGION_DATA_LENGTH = 4 * 1024 * 1024 -_GCM_NONCE_LENGTH = 12 -_GCM_TAG_LENGTH = 16 - -_ERROR_OBJECT_INVALID = \ - '{0} does not define a complete interface. Value of {1} is either missing or invalid.' - - -def _validate_not_none(param_name, param): - if param is None: - raise ValueError('{0} should not be None.'.format(param_name)) - - -def _validate_key_encryption_key_wrap(kek): - # Note that None is not callable and so will fail the second clause of each check. - if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) - if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) - - -class StorageEncryptionMixin(object): - def _configure_encryption(self, kwargs): - self.require_encryption = kwargs.get("require_encryption", False) - self.encryption_version = kwargs.get("encryption_version", "1.0") - self.key_encryption_key = kwargs.get("key_encryption_key") - self.key_resolver_function = kwargs.get("key_resolver_function") - if self.key_encryption_key and self.encryption_version == '1.0': - warnings.warn("This client has been configured to use encryption with version 1.0. " + - "Version 1.0 is deprecated and no longer considered secure. It is highly " + - "recommended that you switch to using version 2.0. The version can be " + - "specified using the 'encryption_version' keyword.") - - -class _EncryptionAlgorithm(object): - ''' - Specifies which client encryption algorithm is used. - ''' - AES_CBC_256 = 'AES_CBC_256' - AES_GCM_256 = 'AES_GCM_256' - - -class _WrappedContentKey: - ''' - Represents the envelope key details stored on the service. - ''' - - def __init__(self, algorithm, encrypted_key, key_id): - ''' - :param str algorithm: - The algorithm used for wrapping. - :param bytes encrypted_key: - The encrypted content-encryption-key. - :param str key_id: - The key-encryption-key identifier string. - ''' - - _validate_not_none('algorithm', algorithm) - _validate_not_none('encrypted_key', encrypted_key) - _validate_not_none('key_id', key_id) - - self.algorithm = algorithm - self.encrypted_key = encrypted_key - self.key_id = key_id - - -class _EncryptedRegionInfo: - ''' - Represents the length of encryption elements. - This is only used for Encryption V2. - ''' - - def __init__(self, data_length, nonce_length, tag_length): - ''' - :param int data_length: - The length of the encryption region data (not including nonce + tag). - :param str nonce_length: - The length of nonce used when encrypting. - :param int tag_length: - The length of the encryption tag. - ''' - _validate_not_none('data_length', data_length) - _validate_not_none('nonce_length', nonce_length) - _validate_not_none('tag_length', tag_length) - - self.data_length = data_length - self.nonce_length = nonce_length - self.tag_length = tag_length - - -class _EncryptionAgent: - ''' - Represents the encryption agent stored on the service. - It consists of the encryption protocol version and encryption algorithm used. - ''' - - def __init__(self, encryption_algorithm, protocol): - ''' - :param _EncryptionAlgorithm encryption_algorithm: - The algorithm used for encrypting the message contents. - :param str protocol: - The protocol version used for encryption. - ''' - - _validate_not_none('encryption_algorithm', encryption_algorithm) - _validate_not_none('protocol', protocol) - - self.encryption_algorithm = str(encryption_algorithm) - self.protocol = protocol - - -class _EncryptionData: - ''' - Represents the encryption data that is stored on the service. - ''' - - def __init__( - self, - content_encryption_IV, - encrypted_region_info, - encryption_agent, - wrapped_content_key, - key_wrapping_metadata): - ''' - :param Optional[bytes] content_encryption_IV: - The content encryption initialization vector. - Required for AES-CBC (V1). - :param Optional[_EncryptedRegionInfo] encrypted_region_info: - The info about the autenticated block sizes. - Required for AES-GCM (V2). - :param _EncryptionAgent encryption_agent: - The encryption agent. - :param _WrappedContentKey wrapped_content_key: - An object that stores the wrapping algorithm, the key identifier, - and the encrypted key bytes. - :param dict key_wrapping_metadata: - A dict containing metadata related to the key wrapping. - ''' - - _validate_not_none('encryption_agent', encryption_agent) - _validate_not_none('wrapped_content_key', wrapped_content_key) - - # Validate we have the right matching optional parameter for the specified algorithm - if encryption_agent.encryption_algorithm == _EncryptionAlgorithm.AES_CBC_256: - _validate_not_none('content_encryption_IV', content_encryption_IV) - elif encryption_agent.encryption_algorithm == _EncryptionAlgorithm.AES_GCM_256: - _validate_not_none('encrypted_region_info', encrypted_region_info) - else: - raise ValueError("Invalid encryption algorithm.") - - self.content_encryption_IV = content_encryption_IV - self.encrypted_region_info = encrypted_region_info - self.encryption_agent = encryption_agent - self.wrapped_content_key = wrapped_content_key - self.key_wrapping_metadata = key_wrapping_metadata - - -class GCMBlobEncryptionStream: - """ - A stream that performs AES-GCM encryption on the given data as - it's streamed. Data is read and encrypted in regions. The stream - will use the same encryption key and will generate a guaranteed unique - nonce for each encryption region. - """ - def __init__( - self, - content_encryption_key: bytes, - data_stream: BinaryIO, - ): - """ - :param bytes content_encryption_key: The encryption key to use. - :param BinaryIO data_stream: The data stream to read data from. - """ - self.content_encryption_key = content_encryption_key - self.data_stream = data_stream - - self.offset = 0 - self.current = b'' - self.nonce_counter = 0 - - def read(self, size: int = -1) -> bytes: - """ - Read data from the stream. Specify -1 to read all available data. - - :param int size: The amount of data to read. Defaults to -1 for all data. - """ - result = BytesIO() - remaining = sys.maxsize if size == -1 else size - - while remaining > 0: - # Start by reading from current - if len(self.current) > 0: - read = min(remaining, len(self.current)) - result.write(self.current[:read]) - - self.current = self.current[read:] - self.offset += read - remaining -= read - - if remaining > 0: - # Read one region of data and encrypt it - data = self.data_stream.read(_GCM_REGION_DATA_LENGTH) - if len(data) == 0: - # No more data to read - break - - self.current = self._encrypt_region(data) - - return result.getvalue() - - def _encrypt_region(self, data: bytes) -> bytes: - """ - Encrypt the given region of data using AES-GCM. The result - includes the data in the form: nonce + ciphertext + tag. - - :param bytes data: The data to encrypt. - """ - # Each region MUST use a different nonce - nonce = self.nonce_counter.to_bytes(_GCM_NONCE_LENGTH, 'big') - self.nonce_counter += 1 - - aesgcm = AESGCM(self.content_encryption_key) - - # Returns ciphertext + tag - cipertext_with_tag = aesgcm.encrypt(nonce, data, None) - return nonce + cipertext_with_tag - - -def is_encryption_v2(encryption_data: Optional[_EncryptionData]) -> bool: - """ - Determine whether the given encryption data signifies version 2.0. - - :param Optional[_EncryptionData] encryption_data: The encryption data. Will return False if this is None. - """ - # If encryption_data is None, assume no encryption - return encryption_data and encryption_data.encryption_agent.protocol == _ENCRYPTION_PROTOCOL_V2 - - -def get_adjusted_upload_size(length: int, encryption_version: str) -> int: - """ - Get the adjusted size of the blob upload which accounts for - extra encryption data (padding OR nonce + tag). - - :param int length: The plaintext data length. - :param str encryption_version: The version of encryption being used. - """ - if encryption_version == _ENCRYPTION_PROTOCOL_V1: - return length + (16 - (length % 16)) - - if encryption_version == _ENCRYPTION_PROTOCOL_V2: - encryption_data_length = _GCM_NONCE_LENGTH + _GCM_TAG_LENGTH - regions = math.ceil(length / _GCM_REGION_DATA_LENGTH) - return length + (regions * encryption_data_length) - - raise ValueError("Invalid encryption version specified.") - - -def get_adjusted_download_range_and_offset( - start: int, - end: int, - length: int, - encryption_data: Optional[_EncryptionData]) -> Tuple[Tuple[int, int], Tuple[int, int]]: - """ - Gets the new download range and offsets into the decrypted data for - the given user-specified range. The new download range will include all - the data needed to decrypt the user-provided range and will include only - full encryption regions. - - The offsets returned will be the offsets needed to fetch the user-requested - data out of the full decrypted data. The end offset is different based on the - encryption version. For V1, the end offset is offset from the end whereas for - V2, the end offset is the ending index into the stream. - V1: decrypted_data[start_offset : len(decrypted_data) - end_offset] - V2: decrypted_data[start_offset : end_offset] - - :param int start: The user-requested start index. - :param int end: The user-requested end index. - :param int length: The user-requested length. Only used for V1. - :param Optional[_EncryptionData] encryption_data: The encryption data to determine version and sizes. - :return: (new start, new end), (start offset, end offset) - """ - start_offset, end_offset = 0, 0 - if encryption_data is None: - return (start, end), (start_offset, end_offset) - - if encryption_data.encryption_agent.protocol == _ENCRYPTION_PROTOCOL_V1: - if start is not None: - # Align the start of the range along a 16 byte block - start_offset = start % 16 - start -= start_offset - - # Include an extra 16 bytes for the IV if necessary - # Because of the previous offsetting, start_range will always - # be a multiple of 16. - if start > 0: - start_offset += 16 - start -= 16 - - if length is not None: - # Align the end of the range along a 16 byte block - end_offset = 15 - (end % 16) - end += end_offset - - elif encryption_data.encryption_agent.protocol == _ENCRYPTION_PROTOCOL_V2: - start_offset, end_offset = 0, end - - nonce_length = encryption_data.encrypted_region_info.nonce_length - data_length = encryption_data.encrypted_region_info.data_length - tag_length = encryption_data.encrypted_region_info.tag_length - region_length = nonce_length + data_length + tag_length - requested_length = end - start - - if start is not None: - # Find which data region the start is in - region_num = start // data_length - # The start of the data region is different from the start of the encryption region - data_start = region_num * data_length - region_start = region_num * region_length - # Offset is based on data region - start_offset = start - data_start - # New start is the start of the encryption region - start = region_start - - if end is not None: - # Find which data region the end is in - region_num = end // data_length - end_offset = start_offset + requested_length + 1 - # New end is the end of the encryption region - end = (region_num * region_length) + region_length - 1 - - return (start, end), (start_offset, end_offset) - - -def parse_encryption_data(metadata: Dict[str, Any]) -> Optional[_EncryptionData]: - """ - Parses the encryption data out of the given blob metadata. If metadata does - not exist or there are parsing errors, this function will just return None. - - :param Dict[str, Any] metadata: The blob metadata parsed from the response. - """ - try: - return _dict_to_encryption_data(loads(metadata['encryptiondata'])) - except: # pylint: disable=bare-except - return None - - -def adjust_blob_size_for_encryption(size: int, encryption_data: Optional[_EncryptionData]) -> int: - """ - Adjusts the given blob size for encryption by subtracting the size of - the encryption data (nonce + tag). This only has an affect for encryption V2. - - :param int size: The original blob size. - :param Optional[_EncryptionData] encryption_data: The encryption data to determine version and sizes. - """ - if is_encryption_v2(encryption_data): - nonce_length = encryption_data.encrypted_region_info.nonce_length - data_length = encryption_data.encrypted_region_info.data_length - tag_length = encryption_data.encrypted_region_info.tag_length - region_length = nonce_length + data_length + tag_length - - num_regions = math.ceil(size / region_length) - metadata_size = num_regions * (nonce_length + tag_length) - return size - metadata_size - - return size - - -def _generate_encryption_data_dict(kek, cek, iv, version): - ''' - Generates and returns the encryption metadata as a dict. - - :param object kek: The key encryption key. See calling functions for more information. - :param bytes cek: The content encryption key. - :param Optional[bytes] iv: The initialization vector. Only required for AES-CBC. - :param str version: The client encryption version used. - :return: A dict containing all the encryption metadata. - :rtype: dict - ''' - # Encrypt the cek. - if version == _ENCRYPTION_PROTOCOL_V1: - wrapped_cek = kek.wrap_key(cek) - # For V2, we include the encryption version in the wrapped key. - elif version == _ENCRYPTION_PROTOCOL_V2: - # We must pad the version to 8 bytes for AES Keywrap algorithms - to_wrap = _ENCRYPTION_PROTOCOL_V2.encode().ljust(8, b'\0') + cek - wrapped_cek = kek.wrap_key(to_wrap) - - # Build the encryption_data dict. - # Use OrderedDict to comply with Java's ordering requirement. - wrapped_content_key = OrderedDict() - wrapped_content_key['KeyId'] = kek.get_kid() - wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek) - wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() - - encryption_agent = OrderedDict() - encryption_agent['Protocol'] = version - - if version == _ENCRYPTION_PROTOCOL_V1: - encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 - - elif version == _ENCRYPTION_PROTOCOL_V2: - encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_GCM_256 - - encrypted_region_info = OrderedDict() - encrypted_region_info['DataLength'] = _GCM_REGION_DATA_LENGTH - encrypted_region_info['NonceLength'] = _GCM_NONCE_LENGTH - - encryption_data_dict = OrderedDict() - encryption_data_dict['WrappedContentKey'] = wrapped_content_key - encryption_data_dict['EncryptionAgent'] = encryption_agent - if version == _ENCRYPTION_PROTOCOL_V1: - encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv) - elif version == _ENCRYPTION_PROTOCOL_V2: - encryption_data_dict['EncryptedRegionInfo'] = encrypted_region_info - encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION} - - return encryption_data_dict - - -def _dict_to_encryption_data(encryption_data_dict): - ''' - Converts the specified dictionary to an EncryptionData object for - eventual use in decryption. - - :param dict encryption_data_dict: - The dictionary containing the encryption data. - :return: an _EncryptionData object built from the dictionary. - :rtype: _EncryptionData - ''' - try: - protocol = encryption_data_dict['EncryptionAgent']['Protocol'] - if protocol not in [_ENCRYPTION_PROTOCOL_V1, _ENCRYPTION_PROTOCOL_V2]: - raise ValueError("Unsupported encryption version.") - except KeyError: - raise ValueError("Unsupported encryption version.") - wrapped_content_key = encryption_data_dict['WrappedContentKey'] - wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], - decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), - wrapped_content_key['KeyId']) - - encryption_agent = encryption_data_dict['EncryptionAgent'] - encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], - encryption_agent['Protocol']) - - if 'KeyWrappingMetadata' in encryption_data_dict: - key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] - else: - key_wrapping_metadata = None - - # AES-CBC only - encryption_iv = None - if 'ContentEncryptionIV' in encryption_data_dict: - encryption_iv = decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']) - - # AES-GCM only - region_info = None - if 'EncryptedRegionInfo' in encryption_data_dict: - encrypted_region_info = encryption_data_dict['EncryptedRegionInfo'] - region_info = _EncryptedRegionInfo(encrypted_region_info['DataLength'], - encrypted_region_info['NonceLength'], - _GCM_TAG_LENGTH) - - encryption_data = _EncryptionData(encryption_iv, - region_info, - encryption_agent, - wrapped_content_key, - key_wrapping_metadata) - - return encryption_data - - -def _generate_AES_CBC_cipher(cek, iv): - ''' - Generates and returns an encryption cipher for AES CBC using the given cek and iv. - - :param bytes[] cek: The content encryption key for the cipher. - :param bytes[] iv: The initialization vector for the cipher. - :return: A cipher for encrypting in AES256 CBC. - :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher - ''' - - backend = default_backend() - algorithm = AES(cek) - mode = CBC(iv) - return Cipher(algorithm, mode, backend) - - -def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): - ''' - Extracts and returns the content_encryption_key stored in the encryption_data object - and performs necessary validation on all parameters. - :param _EncryptionData encryption_data: - The encryption metadata of the retrieved value. - :param obj key_encryption_key: - The key_encryption_key used to unwrap the cek. Please refer to high-level service object - instance variables for more details. - :param func key_resolver: - A function used that, given a key_id, will return a key_encryption_key. Please refer - to high-level service object instance variables for more details. - :return: the content_encryption_key stored in the encryption_data object. - :rtype: bytes[] - ''' - - _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) - - # Validate we have the right info for the specified version - if encryption_data.encryption_agent.protocol == _ENCRYPTION_PROTOCOL_V1: - _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) - elif encryption_data.encryption_agent.protocol == _ENCRYPTION_PROTOCOL_V2: - _validate_not_none('encrypted_region_info', encryption_data.encrypted_region_info) - else: - raise ValueError('Specified encryption version is not supported.') - - content_encryption_key = None - - # If the resolver exists, give priority to the key it finds. - if key_resolver is not None: - key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) - - _validate_not_none('key_encryption_key', key_encryption_key) - if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) - if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key): - raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) - if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid(): - raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.') - # Will throw an exception if the specified algorithm is not supported. - content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, - encryption_data.wrapped_content_key.algorithm) - - # For V2, the version is included with the cek. We need to validate it - # and remove it from the actual cek. - if encryption_data.encryption_agent.protocol == _ENCRYPTION_PROTOCOL_V2: - version_2_bytes = _ENCRYPTION_PROTOCOL_V2.encode().ljust(8, b'\0') - cek_version_bytes = content_encryption_key[:len(version_2_bytes)] - if cek_version_bytes != version_2_bytes: - raise ValueError('The encryption metadata is not valid and may have been modified.') - - # Remove version from the start of the cek. - content_encryption_key = content_encryption_key[len(version_2_bytes):] - - _validate_not_none('content_encryption_key', content_encryption_key) - - return content_encryption_key - - -def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None): - ''' - Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. - Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). - Returns the original plaintex. - - :param str message: - The ciphertext to be decrypted. - :param _EncryptionData encryption_data: - The metadata associated with this ciphertext. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key using the string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The decrypted plaintext. - :rtype: str - ''' - _validate_not_none('message', message) - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) - - if encryption_data.encryption_agent.protocol == _ENCRYPTION_PROTOCOL_V1: - if not encryption_data.content_encryption_IV: - raise ValueError("Missing required metadata for decryption.") - - cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) - - # decrypt data - decrypted_data = message - decryptor = cipher.decryptor() - decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) - - # unpad data - unpadder = PKCS7(128).unpadder() - decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) - - elif encryption_data.encryption_agent.protocol == _ENCRYPTION_PROTOCOL_V2: - block_info = encryption_data.encrypted_region_info - if not block_info or not block_info.nonce_length: - raise ValueError("Missing required metadata for decryption.") - - nonce_length = encryption_data.encrypted_region_info.nonce_length - - # First bytes are the nonce - nonce = message[:nonce_length] - ciphertext_with_tag = message[nonce_length:] - - aesgcm = AESGCM(content_encryption_key) - decrypted_data = aesgcm.decrypt(nonce, ciphertext_with_tag, None) - - else: - raise ValueError('Specified encryption version is not supported.') - - return decrypted_data - - -def encrypt_blob(blob, key_encryption_key, version): - ''' - Encrypts the given blob using the given encryption protocol version. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encryption metadata. This method should - only be used when a blob is small enough for single shot upload. Encrypting larger blobs - is done as a part of the upload_data_chunks method. - - :param bytes blob: - The blob to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :param str version: The client encryption version to use. - :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. - :rtype: (str, bytes) - ''' - - _validate_not_none('blob', blob) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - if version == _ENCRYPTION_PROTOCOL_V1: - # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = os.urandom(32) - initialization_vector = os.urandom(16) - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(blob) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - - elif version == _ENCRYPTION_PROTOCOL_V2: - # AES256 GCM uses 256 bit (32 byte) keys and a 12 byte nonce. - content_encryption_key = AESGCM.generate_key(bit_length=256) - initialization_vector = None - - data = BytesIO(blob) - encryption_stream = GCMBlobEncryptionStream(content_encryption_key, data) - - encrypted_data = encryption_stream.read() - - else: - raise ValueError("Invalid encryption version specified.") - - encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, - initialization_vector, version) - encryption_data['EncryptionMode'] = 'FullBlob' - - return dumps(encryption_data), encrypted_data - - -def generate_blob_encryption_data(key_encryption_key, version): - ''' - Generates the encryption_metadata for the blob. - - :param object key_encryption_key: - The key-encryption-key used to wrap the cek associate with this blob. - :param str version: The client encryption version to use. - :return: A tuple containing the cek and iv for this blob as well as the - serialized encryption metadata for the blob. - :rtype: (bytes, Optional[bytes], str) - ''' - encryption_data = None - content_encryption_key = None - initialization_vector = None - if key_encryption_key: - _validate_key_encryption_key_wrap(key_encryption_key) - content_encryption_key = os.urandom(32) - # Initialization vector only needed for V1 - if version == _ENCRYPTION_PROTOCOL_V1: - initialization_vector = os.urandom(16) - encryption_data = _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector, - version) - encryption_data['EncryptionMode'] = 'FullBlob' - encryption_data = dumps(encryption_data) - - return content_encryption_key, initialization_vector, encryption_data - - -def decrypt_blob( # pylint: disable=too-many-locals,too-many-statements - require_encryption, - key_encryption_key, - key_resolver, - content, - start_offset, - end_offset, - response_headers): - """ - Decrypts the given blob contents and returns only the requested range. - - :param bool require_encryption: - Whether the calling blob service requires objects to be decrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :param object key_resolver: - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :param bytes content: - The encrypted blob content. - :param int start_offset: - The adjusted offset from the beginning of the *decrypted* content for the caller's data. - :param int end_offset: - The adjusted offset from the end of the *decrypted* content for the caller's data. - :param Dict[str, Any] response_headers: - A dictionary of response headers from the download request. Expected to include the - 'x-ms-meta-encryptiondata' header if the blob was encrypted. - :return: The decrypted blob content. - :rtype: bytes - """ - try: - encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata'])) - except: # pylint: disable=bare-except - if require_encryption: - raise ValueError( - 'Encryption required, but received data does not contain appropriate metatadata.' + \ - 'Data was either not encrypted or metadata has been lost.') - - return content - - algorithm = encryption_data.encryption_agent.encryption_algorithm - if algorithm not in(_EncryptionAlgorithm.AES_CBC_256, _EncryptionAlgorithm.AES_GCM_256): - raise ValueError('Specified encryption algorithm is not supported.') - - version = encryption_data.encryption_agent.protocol - if version not in (_ENCRYPTION_PROTOCOL_V1, _ENCRYPTION_PROTOCOL_V2): - raise ValueError('Specified encryption version is not supported.') - - content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) - - if version == _ENCRYPTION_PROTOCOL_V1: - blob_type = response_headers['x-ms-blob-type'] - - iv = None - unpad = False - if 'content-range' in response_headers: - content_range = response_headers['content-range'] - # Format: 'bytes x-y/size' - - # Ignore the word 'bytes' - content_range = content_range.split(' ') - - content_range = content_range[1].split('-') - content_range = content_range[1].split('/') - end_range = int(content_range[0]) - blob_size = int(content_range[1]) - - if start_offset >= 16: - iv = content[:16] - content = content[16:] - start_offset -= 16 - else: - iv = encryption_data.content_encryption_IV - - if end_range == blob_size - 1: - unpad = True - else: - unpad = True - iv = encryption_data.content_encryption_IV - - if blob_type == 'PageBlob': - unpad = False - - cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) - decryptor = cipher.decryptor() - - content = decryptor.update(content) + decryptor.finalize() - if unpad: - unpadder = PKCS7(128).unpadder() - content = unpadder.update(content) + unpadder.finalize() - - return content[start_offset: len(content) - end_offset] - - if version == _ENCRYPTION_PROTOCOL_V2: - # We assume the content contains only full encryption regions - total_size = len(content) - offset = 0 - - nonce_length = encryption_data.encrypted_region_info.nonce_length - data_length = encryption_data.encrypted_region_info.data_length - tag_length = encryption_data.encrypted_region_info.tag_length - region_length = nonce_length + data_length + tag_length - - decrypted_content = bytearray() - while offset < total_size: - # Process one encryption region at a time - process_size = min(region_length, total_size) - encrypted_region = content[offset:offset + process_size] - - # First bytes are the nonce - nonce = encrypted_region[:nonce_length] - ciphertext_with_tag = encrypted_region[nonce_length:] - - aesgcm = AESGCM(content_encryption_key) - decrypted_data = aesgcm.decrypt(nonce, ciphertext_with_tag, None) - decrypted_content.extend(decrypted_data) - - offset += process_size - - # Read the caller requested data from the decrypted content - return decrypted_content[start_offset:end_offset] - - -def get_blob_encryptor_and_padder(cek, iv, should_pad): - encryptor = None - padder = None - - if cek is not None and iv is not None: - cipher = _generate_AES_CBC_cipher(cek, iv) - encryptor = cipher.encryptor() - padder = PKCS7(128).padder() if should_pad else None - - return encryptor, padder - - -def encrypt_queue_message(message, key_encryption_key, version): - ''' - Encrypts the given plain text message using the given protocol version. - Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). - Returns a json-formatted string containing the encrypted message and the encryption metadata. - - :param object message: - The plain text messge to be encrypted. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - wrap_key(key)--wraps the specified key using an algorithm of the user's choice. - get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. - get_kid()--returns a string key id for this key-encryption-key. - :param str version: The client encryption version to use. - :return: A json-formatted string containing the encrypted message and the encryption metadata. - :rtype: str - ''' - - _validate_not_none('message', message) - _validate_not_none('key_encryption_key', key_encryption_key) - _validate_key_encryption_key_wrap(key_encryption_key) - - # Queue encoding functions all return unicode strings, and encryption should - # operate on binary strings. - message = message.encode('utf-8') - - if version == _ENCRYPTION_PROTOCOL_V1: - # AES256 CBC uses 256 bit (32 byte) keys and always with 16 byte blocks - content_encryption_key = os.urandom(32) - initialization_vector = os.urandom(16) - - cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) - - # PKCS7 with 16 byte blocks ensures compatibility with AES. - padder = PKCS7(128).padder() - padded_data = padder.update(message) + padder.finalize() - - # Encrypt the data. - encryptor = cipher.encryptor() - encrypted_data = encryptor.update(padded_data) + encryptor.finalize() - - elif version == _ENCRYPTION_PROTOCOL_V2: - # AES256 GCM uses 256 bit (32 byte) keys and a 12 byte nonce. - content_encryption_key = AESGCM.generate_key(bit_length=256) - initialization_vector = None - - # The nonce MUST be different for each key - nonce = os.urandom(12) - aesgcm = AESGCM(content_encryption_key) - - # Returns ciphertext + tag - cipertext_with_tag = aesgcm.encrypt(nonce, message, None) - encrypted_data = nonce + cipertext_with_tag - - else: - raise ValueError("Invalid encryption version specified.") - - # Build the dictionary structure. - queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data), - 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, - content_encryption_key, - initialization_vector, - version)} - - return dumps(queue_message) - - -def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver): - ''' - Returns the decrypted message contents from an EncryptedQueueMessage. - If no encryption metadata is present, will return the unaltered message. - :param str message: - The JSON formatted QueueEncryptedMessage contents with all associated metadata. - :param bool require_encryption: - If set, will enforce that the retrieved messages are encrypted and decrypt them. - :param object key_encryption_key: - The user-provided key-encryption-key. Must implement the following methods: - unwrap_key(key, algorithm) - - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm. - get_kid() - - returns a string key id for this key-encryption-key. - :param function resolver(kid): - The user-provided key resolver. Uses the kid string to return a key-encryption-key - implementing the interface defined above. - :return: The plain text message from the queue message. - :rtype: str - ''' - response = response.http_response - - try: - message = loads(message) - - encryption_data = _dict_to_encryption_data(message['EncryptionData']) - decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents']) - except (KeyError, ValueError): - # Message was not json formatted and so was not encrypted - # or the user provided a json formatted message - # or the metadata was malformed. - if require_encryption: - raise ValueError( - 'Encryption required, but received message does not contain appropriate metatadata. ' + \ - 'Message was either not encrypted or metadata was incorrect.') - - return message - try: - return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') - except Exception as error: - raise HttpResponseError( - message="Decryption failed.", - response=response, - error=error) diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/__init__.py b/azure/multiapi/storagev2/queue/v2021_02_12/_generated/__init__.py deleted file mode 100644 index da9a0cd..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._azure_queue_storage import AzureQueueStorage - -try: - from ._patch import __all__ as _patch_all - from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import -except ImportError: - _patch_all = [] -from ._patch import patch_sdk as _patch_sdk - -__all__ = ["AzureQueueStorage"] -__all__.extend([p for p in _patch_all if p not in __all__]) - -_patch_sdk() diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/_azure_queue_storage.py b/azure/multiapi/storagev2/queue/v2021_02_12/_generated/_azure_queue_storage.py deleted file mode 100644 index fee7eaf..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/_azure_queue_storage.py +++ /dev/null @@ -1,90 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from copy import deepcopy -from typing import Any - -from azure.core import PipelineClient -from azure.core.rest import HttpRequest, HttpResponse - -from . import models -from ._configuration import AzureQueueStorageConfiguration -from ._serialization import Deserializer, Serializer -from .operations import MessageIdOperations, MessagesOperations, QueueOperations, ServiceOperations - - -class AzureQueueStorage: # pylint: disable=client-accepts-api-version-keyword - """AzureQueueStorage. - - :ivar service: ServiceOperations operations - :vartype service: azure.storage.queue.operations.ServiceOperations - :ivar queue: QueueOperations operations - :vartype queue: azure.storage.queue.operations.QueueOperations - :ivar messages: MessagesOperations operations - :vartype messages: azure.storage.queue.operations.MessagesOperations - :ivar message_id: MessageIdOperations operations - :vartype message_id: azure.storage.queue.operations.MessageIdOperations - :param url: The URL of the service account, queue or message that is the target of the desired - operation. Required. - :type url: str - :param base_url: Service URL. Required. Default value is "". - :type base_url: str - :keyword version: Specifies the version of the operation to use for this request. Default value - is "2018-03-28". Note that overriding this default value may result in unsupported behavior. - :paramtype version: str - """ - - def __init__( # pylint: disable=missing-client-constructor-parameter-credential - self, url: str, base_url: str = "", **kwargs: Any - ) -> None: - self._config = AzureQueueStorageConfiguration(url=url, **kwargs) - self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._deserialize = Deserializer(client_models) - self._serialize.client_side_validation = False - self.service = ServiceOperations(self._client, self._config, self._serialize, self._deserialize) - self.queue = QueueOperations(self._client, self._config, self._serialize, self._deserialize) - self.messages = MessagesOperations(self._client, self._config, self._serialize, self._deserialize) - self.message_id = MessageIdOperations(self._client, self._config, self._serialize, self._deserialize) - - def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse: - """Runs the network request through the client's chained policies. - - >>> from azure.core.rest import HttpRequest - >>> request = HttpRequest("GET", "https://www.example.org/") - - >>> response = client._send_request(request) - - - For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request - - :param request: The network request you want to make. Required. - :type request: ~azure.core.rest.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to False. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.rest.HttpResponse - """ - - request_copy = deepcopy(request) - request_copy.url = self._client.format_url(request_copy.url) - return self._client.send_request(request_copy, **kwargs) - - def close(self): - # type: () -> None - self._client.close() - - def __enter__(self): - # type: () -> AzureQueueStorage - self._client.__enter__() - return self - - def __exit__(self, *exc_details): - # type: (Any) -> None - self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/_configuration.py b/azure/multiapi/storagev2/queue/v2021_02_12/_generated/_configuration.py deleted file mode 100644 index 219d867..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/_configuration.py +++ /dev/null @@ -1,55 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -VERSION = "unknown" - - -class AzureQueueStorageConfiguration(Configuration): # pylint: disable=too-many-instance-attributes - """Configuration for AzureQueueStorage. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, queue or message that is the target of the desired - operation. Required. - :type url: str - :keyword version: Specifies the version of the operation to use for this request. Default value - is "2018-03-28". Note that overriding this default value may result in unsupported behavior. - :paramtype version: str - """ - - def __init__(self, url: str, **kwargs: Any) -> None: - super(AzureQueueStorageConfiguration, self).__init__(**kwargs) - version = kwargs.pop("version", "2018-03-28") # type: str - - if url is None: - raise ValueError("Parameter 'url' must not be None.") - - self.url = url - self.version = version - kwargs.setdefault("sdk_moniker", "azurequeuestorage/{}".format(VERSION)) - self._configure(**kwargs) - - def _configure( - self, **kwargs # type: Any - ): - # type: (...) -> None - self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) - self.authentication_policy = kwargs.get("authentication_policy") diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/_patch.py b/azure/multiapi/storagev2/queue/v2021_02_12/_generated/_patch.py deleted file mode 100644 index f99e77f..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/_patch.py +++ /dev/null @@ -1,31 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# -# Copyright (c) Microsoft Corporation. All rights reserved. -# -# The MIT License (MIT) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the ""Software""), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# -# -------------------------------------------------------------------------- - -# This file is used for handwritten extensions to the generated code. Example: -# https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/customize_code/how-to-patch-sdk-code.md -def patch_sdk(): - pass diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/_serialization.py b/azure/multiapi/storagev2/queue/v2021_02_12/_generated/_serialization.py deleted file mode 100644 index 648f84c..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/_serialization.py +++ /dev/null @@ -1,1970 +0,0 @@ -# -------------------------------------------------------------------------- -# -# Copyright (c) Microsoft Corporation. All rights reserved. -# -# The MIT License (MIT) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the ""Software""), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# -# -------------------------------------------------------------------------- - -# pylint: skip-file - -from base64 import b64decode, b64encode -import calendar -import datetime -import decimal -import email -from enum import Enum -import json -import logging -import re -import sys -import codecs - -try: - from urllib import quote # type: ignore -except ImportError: - from urllib.parse import quote # type: ignore -import xml.etree.ElementTree as ET - -import isodate - -from typing import Dict, Any, cast, TYPE_CHECKING - -from azure.core.exceptions import DeserializationError, SerializationError, raise_with_traceback - -_BOM = codecs.BOM_UTF8.decode(encoding="utf-8") - -if TYPE_CHECKING: - from typing import Optional, Union, AnyStr, IO, Mapping - - -class RawDeserializer: - - # Accept "text" because we're open minded people... - JSON_REGEXP = re.compile(r"^(application|text)/([a-z+.]+\+)?json$") - - # Name used in context - CONTEXT_NAME = "deserialized_data" - - @classmethod - def deserialize_from_text(cls, data, content_type=None): - # type: (Optional[Union[AnyStr, IO]], Optional[str]) -> Any - """Decode data according to content-type. - - Accept a stream of data as well, but will be load at once in memory for now. - - If no content-type, will return the string version (not bytes, not stream) - - :param data: Input, could be bytes or stream (will be decoded with UTF8) or text - :type data: str or bytes or IO - :param str content_type: The content type. - """ - if hasattr(data, "read"): - # Assume a stream - data = cast(IO, data).read() - - if isinstance(data, bytes): - data_as_str = data.decode(encoding="utf-8-sig") - else: - # Explain to mypy the correct type. - data_as_str = cast(str, data) - - # Remove Byte Order Mark if present in string - data_as_str = data_as_str.lstrip(_BOM) - - if content_type is None: - return data - - if cls.JSON_REGEXP.match(content_type): - try: - return json.loads(data_as_str) - except ValueError as err: - raise DeserializationError("JSON is invalid: {}".format(err), err) - elif "xml" in (content_type or []): - try: - - try: - if isinstance(data, unicode): # type: ignore - # If I'm Python 2.7 and unicode XML will scream if I try a "fromstring" on unicode string - data_as_str = data_as_str.encode(encoding="utf-8") # type: ignore - except NameError: - pass - - return ET.fromstring(data_as_str) # nosec - except ET.ParseError: - # It might be because the server has an issue, and returned JSON with - # content-type XML.... - # So let's try a JSON load, and if it's still broken - # let's flow the initial exception - def _json_attemp(data): - try: - return True, json.loads(data) - except ValueError: - return False, None # Don't care about this one - - success, json_result = _json_attemp(data) - if success: - return json_result - # If i'm here, it's not JSON, it's not XML, let's scream - # and raise the last context in this block (the XML exception) - # The function hack is because Py2.7 messes up with exception - # context otherwise. - _LOGGER.critical("Wasn't XML not JSON, failing") - raise_with_traceback(DeserializationError, "XML is invalid") - raise DeserializationError("Cannot deserialize content-type: {}".format(content_type)) - - @classmethod - def deserialize_from_http_generics(cls, body_bytes, headers): - # type: (Optional[Union[AnyStr, IO]], Mapping) -> Any - """Deserialize from HTTP response. - - Use bytes and headers to NOT use any requests/aiohttp or whatever - specific implementation. - Headers will tested for "content-type" - """ - # Try to use content-type from headers if available - content_type = None - if "content-type" in headers: - content_type = headers["content-type"].split(";")[0].strip().lower() - # Ouch, this server did not declare what it sent... - # Let's guess it's JSON... - # Also, since Autorest was considering that an empty body was a valid JSON, - # need that test as well.... - else: - content_type = "application/json" - - if body_bytes: - return cls.deserialize_from_text(body_bytes, content_type) - return None - - -try: - basestring # type: ignore - unicode_str = unicode # type: ignore -except NameError: - basestring = str # type: ignore - unicode_str = str # type: ignore - -_LOGGER = logging.getLogger(__name__) - -try: - _long_type = long # type: ignore -except NameError: - _long_type = int - - -class UTC(datetime.tzinfo): - """Time Zone info for handling UTC""" - - def utcoffset(self, dt): - """UTF offset for UTC is 0.""" - return datetime.timedelta(0) - - def tzname(self, dt): - """Timestamp representation.""" - return "Z" - - def dst(self, dt): - """No daylight saving for UTC.""" - return datetime.timedelta(hours=1) - - -try: - from datetime import timezone as _FixedOffset -except ImportError: # Python 2.7 - - class _FixedOffset(datetime.tzinfo): # type: ignore - """Fixed offset in minutes east from UTC. - Copy/pasted from Python doc - :param datetime.timedelta offset: offset in timedelta format - """ - - def __init__(self, offset): - self.__offset = offset - - def utcoffset(self, dt): - return self.__offset - - def tzname(self, dt): - return str(self.__offset.total_seconds() / 3600) - - def __repr__(self): - return "".format(self.tzname(None)) - - def dst(self, dt): - return datetime.timedelta(0) - - def __getinitargs__(self): - return (self.__offset,) - - -try: - from datetime import timezone - - TZ_UTC = timezone.utc # type: ignore -except ImportError: - TZ_UTC = UTC() # type: ignore - -_FLATTEN = re.compile(r"(? y, - "minimum": lambda x, y: x < y, - "maximum": lambda x, y: x > y, - "minimum_ex": lambda x, y: x <= y, - "maximum_ex": lambda x, y: x >= y, - "min_items": lambda x, y: len(x) < y, - "max_items": lambda x, y: len(x) > y, - "pattern": lambda x, y: not re.match(y, x, re.UNICODE), - "unique": lambda x, y: len(x) != len(set(x)), - "multiple": lambda x, y: x % y != 0, - } - - def __init__(self, classes=None): - self.serialize_type = { - "iso-8601": Serializer.serialize_iso, - "rfc-1123": Serializer.serialize_rfc, - "unix-time": Serializer.serialize_unix, - "duration": Serializer.serialize_duration, - "date": Serializer.serialize_date, - "time": Serializer.serialize_time, - "decimal": Serializer.serialize_decimal, - "long": Serializer.serialize_long, - "bytearray": Serializer.serialize_bytearray, - "base64": Serializer.serialize_base64, - "object": self.serialize_object, - "[]": self.serialize_iter, - "{}": self.serialize_dict, - } - self.dependencies = dict(classes) if classes else {} - self.key_transformer = full_restapi_key_transformer - self.client_side_validation = True - - def _serialize(self, target_obj, data_type=None, **kwargs): - """Serialize data into a string according to type. - - :param target_obj: The data to be serialized. - :param str data_type: The type to be serialized from. - :rtype: str, dict - :raises: SerializationError if serialization fails. - """ - key_transformer = kwargs.get("key_transformer", self.key_transformer) - keep_readonly = kwargs.get("keep_readonly", False) - if target_obj is None: - return None - - attr_name = None - class_name = target_obj.__class__.__name__ - - if data_type: - return self.serialize_data(target_obj, data_type, **kwargs) - - if not hasattr(target_obj, "_attribute_map"): - data_type = type(target_obj).__name__ - if data_type in self.basic_types.values(): - return self.serialize_data(target_obj, data_type, **kwargs) - - # Force "is_xml" kwargs if we detect a XML model - try: - is_xml_model_serialization = kwargs["is_xml"] - except KeyError: - is_xml_model_serialization = kwargs.setdefault("is_xml", target_obj.is_xml_model()) - - serialized = {} - if is_xml_model_serialization: - serialized = target_obj._create_xml_node() - try: - attributes = target_obj._attribute_map - for attr, attr_desc in attributes.items(): - attr_name = attr - if not keep_readonly and target_obj._validation.get(attr_name, {}).get("readonly", False): - continue - - if attr_name == "additional_properties" and attr_desc["key"] == "": - if target_obj.additional_properties is not None: - serialized.update(target_obj.additional_properties) - continue - try: - - orig_attr = getattr(target_obj, attr) - if is_xml_model_serialization: - pass # Don't provide "transformer" for XML for now. Keep "orig_attr" - else: # JSON - keys, orig_attr = key_transformer(attr, attr_desc.copy(), orig_attr) - keys = keys if isinstance(keys, list) else [keys] - - kwargs["serialization_ctxt"] = attr_desc - new_attr = self.serialize_data(orig_attr, attr_desc["type"], **kwargs) - - if is_xml_model_serialization: - xml_desc = attr_desc.get("xml", {}) - xml_name = xml_desc.get("name", attr_desc["key"]) - xml_prefix = xml_desc.get("prefix", None) - xml_ns = xml_desc.get("ns", None) - if xml_desc.get("attr", False): - if xml_ns: - ET.register_namespace(xml_prefix, xml_ns) - xml_name = "{}{}".format(xml_ns, xml_name) - serialized.set(xml_name, new_attr) - continue - if xml_desc.get("text", False): - serialized.text = new_attr - continue - if isinstance(new_attr, list): - serialized.extend(new_attr) - elif isinstance(new_attr, ET.Element): - # If the down XML has no XML/Name, we MUST replace the tag with the local tag. But keeping the namespaces. - if "name" not in getattr(orig_attr, "_xml_map", {}): - splitted_tag = new_attr.tag.split("}") - if len(splitted_tag) == 2: # Namespace - new_attr.tag = "}".join([splitted_tag[0], xml_name]) - else: - new_attr.tag = xml_name - serialized.append(new_attr) - else: # That's a basic type - # Integrate namespace if necessary - local_node = _create_xml_node(xml_name, xml_prefix, xml_ns) - local_node.text = unicode_str(new_attr) - serialized.append(local_node) - else: # JSON - for k in reversed(keys): - unflattened = {k: new_attr} - new_attr = unflattened - - _new_attr = new_attr - _serialized = serialized - for k in keys: - if k not in _serialized: - _serialized.update(_new_attr) - _new_attr = _new_attr[k] - _serialized = _serialized[k] - except ValueError: - continue - - except (AttributeError, KeyError, TypeError) as err: - msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj)) - raise_with_traceback(SerializationError, msg, err) - else: - return serialized - - def body(self, data, data_type, **kwargs): - """Serialize data intended for a request body. - - :param data: The data to be serialized. - :param str data_type: The type to be serialized from. - :rtype: dict - :raises: SerializationError if serialization fails. - :raises: ValueError if data is None - """ - - # Just in case this is a dict - internal_data_type = data_type.strip("[]{}") - internal_data_type = self.dependencies.get(internal_data_type, None) - try: - is_xml_model_serialization = kwargs["is_xml"] - except KeyError: - if internal_data_type and issubclass(internal_data_type, Model): - is_xml_model_serialization = kwargs.setdefault("is_xml", internal_data_type.is_xml_model()) - else: - is_xml_model_serialization = False - if internal_data_type and not isinstance(internal_data_type, Enum): - try: - deserializer = Deserializer(self.dependencies) - # Since it's on serialization, it's almost sure that format is not JSON REST - # We're not able to deal with additional properties for now. - deserializer.additional_properties_detection = False - if is_xml_model_serialization: - deserializer.key_extractors = [ - attribute_key_case_insensitive_extractor, - ] - else: - deserializer.key_extractors = [ - rest_key_case_insensitive_extractor, - attribute_key_case_insensitive_extractor, - last_rest_key_case_insensitive_extractor, - ] - data = deserializer._deserialize(data_type, data) - except DeserializationError as err: - raise_with_traceback(SerializationError, "Unable to build a model: " + str(err), err) - - return self._serialize(data, data_type, **kwargs) - - def url(self, name, data, data_type, **kwargs): - """Serialize data intended for a URL path. - - :param data: The data to be serialized. - :param str data_type: The type to be serialized from. - :rtype: str - :raises: TypeError if serialization fails. - :raises: ValueError if data is None - """ - try: - output = self.serialize_data(data, data_type, **kwargs) - if data_type == "bool": - output = json.dumps(output) - - if kwargs.get("skip_quote") is True: - output = str(output) - else: - output = quote(str(output), safe="") - except SerializationError: - raise TypeError("{} must be type {}.".format(name, data_type)) - else: - return output - - def query(self, name, data, data_type, **kwargs): - """Serialize data intended for a URL query. - - :param data: The data to be serialized. - :param str data_type: The type to be serialized from. - :rtype: str - :raises: TypeError if serialization fails. - :raises: ValueError if data is None - """ - try: - # Treat the list aside, since we don't want to encode the div separator - if data_type.startswith("["): - internal_data_type = data_type[1:-1] - data = [self.serialize_data(d, internal_data_type, **kwargs) if d is not None else "" for d in data] - if not kwargs.get("skip_quote", False): - data = [quote(str(d), safe="") for d in data] - return str(self.serialize_iter(data, internal_data_type, **kwargs)) - - # Not a list, regular serialization - output = self.serialize_data(data, data_type, **kwargs) - if data_type == "bool": - output = json.dumps(output) - if kwargs.get("skip_quote") is True: - output = str(output) - else: - output = quote(str(output), safe="") - except SerializationError: - raise TypeError("{} must be type {}.".format(name, data_type)) - else: - return str(output) - - def header(self, name, data, data_type, **kwargs): - """Serialize data intended for a request header. - - :param data: The data to be serialized. - :param str data_type: The type to be serialized from. - :rtype: str - :raises: TypeError if serialization fails. - :raises: ValueError if data is None - """ - try: - if data_type in ["[str]"]: - data = ["" if d is None else d for d in data] - - output = self.serialize_data(data, data_type, **kwargs) - if data_type == "bool": - output = json.dumps(output) - except SerializationError: - raise TypeError("{} must be type {}.".format(name, data_type)) - else: - return str(output) - - def serialize_data(self, data, data_type, **kwargs): - """Serialize generic data according to supplied data type. - - :param data: The data to be serialized. - :param str data_type: The type to be serialized from. - :param bool required: Whether it's essential that the data not be - empty or None - :raises: AttributeError if required data is None. - :raises: ValueError if data is None - :raises: SerializationError if serialization fails. - """ - if data is None: - raise ValueError("No value for given attribute") - - try: - if data_type in self.basic_types.values(): - return self.serialize_basic(data, data_type, **kwargs) - - elif data_type in self.serialize_type: - return self.serialize_type[data_type](data, **kwargs) - - # If dependencies is empty, try with current data class - # It has to be a subclass of Enum anyway - enum_type = self.dependencies.get(data_type, data.__class__) - if issubclass(enum_type, Enum): - return Serializer.serialize_enum(data, enum_obj=enum_type) - - iter_type = data_type[0] + data_type[-1] - if iter_type in self.serialize_type: - return self.serialize_type[iter_type](data, data_type[1:-1], **kwargs) - - except (ValueError, TypeError) as err: - msg = "Unable to serialize value: {!r} as type: {!r}." - raise_with_traceback(SerializationError, msg.format(data, data_type), err) - else: - return self._serialize(data, **kwargs) - - @classmethod - def _get_custom_serializers(cls, data_type, **kwargs): - custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type) - if custom_serializer: - return custom_serializer - if kwargs.get("is_xml", False): - return cls._xml_basic_types_serializers.get(data_type) - - @classmethod - def serialize_basic(cls, data, data_type, **kwargs): - """Serialize basic builting data type. - Serializes objects to str, int, float or bool. - - Possible kwargs: - - basic_types_serializers dict[str, callable] : If set, use the callable as serializer - - is_xml bool : If set, use xml_basic_types_serializers - - :param data: Object to be serialized. - :param str data_type: Type of object in the iterable. - """ - custom_serializer = cls._get_custom_serializers(data_type, **kwargs) - if custom_serializer: - return custom_serializer(data) - if data_type == "str": - return cls.serialize_unicode(data) - return eval(data_type)(data) # nosec - - @classmethod - def serialize_unicode(cls, data): - """Special handling for serializing unicode strings in Py2. - Encode to UTF-8 if unicode, otherwise handle as a str. - - :param data: Object to be serialized. - :rtype: str - """ - try: # If I received an enum, return its value - return data.value - except AttributeError: - pass - - try: - if isinstance(data, unicode): - # Don't change it, JSON and XML ElementTree are totally able - # to serialize correctly u'' strings - return data - except NameError: - return str(data) - else: - return str(data) - - def serialize_iter(self, data, iter_type, div=None, **kwargs): - """Serialize iterable. - - Supported kwargs: - - serialization_ctxt dict : The current entry of _attribute_map, or same format. - serialization_ctxt['type'] should be same as data_type. - - is_xml bool : If set, serialize as XML - - :param list attr: Object to be serialized. - :param str iter_type: Type of object in the iterable. - :param bool required: Whether the objects in the iterable must - not be None or empty. - :param str div: If set, this str will be used to combine the elements - in the iterable into a combined string. Default is 'None'. - :rtype: list, str - """ - if isinstance(data, str): - raise SerializationError("Refuse str type as a valid iter type.") - - serialization_ctxt = kwargs.get("serialization_ctxt", {}) - is_xml = kwargs.get("is_xml", False) - - serialized = [] - for d in data: - try: - serialized.append(self.serialize_data(d, iter_type, **kwargs)) - except ValueError: - serialized.append(None) - - if div: - serialized = ["" if s is None else str(s) for s in serialized] - serialized = div.join(serialized) - - if "xml" in serialization_ctxt or is_xml: - # XML serialization is more complicated - xml_desc = serialization_ctxt.get("xml", {}) - xml_name = xml_desc.get("name") - if not xml_name: - xml_name = serialization_ctxt["key"] - - # Create a wrap node if necessary (use the fact that Element and list have "append") - is_wrapped = xml_desc.get("wrapped", False) - node_name = xml_desc.get("itemsName", xml_name) - if is_wrapped: - final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) - else: - final_result = [] - # All list elements to "local_node" - for el in serialized: - if isinstance(el, ET.Element): - el_node = el - else: - el_node = _create_xml_node(node_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) - if el is not None: # Otherwise it writes "None" :-p - el_node.text = str(el) - final_result.append(el_node) - return final_result - return serialized - - def serialize_dict(self, attr, dict_type, **kwargs): - """Serialize a dictionary of objects. - - :param dict attr: Object to be serialized. - :param str dict_type: Type of object in the dictionary. - :param bool required: Whether the objects in the dictionary must - not be None or empty. - :rtype: dict - """ - serialization_ctxt = kwargs.get("serialization_ctxt", {}) - serialized = {} - for key, value in attr.items(): - try: - serialized[self.serialize_unicode(key)] = self.serialize_data(value, dict_type, **kwargs) - except ValueError: - serialized[self.serialize_unicode(key)] = None - - if "xml" in serialization_ctxt: - # XML serialization is more complicated - xml_desc = serialization_ctxt["xml"] - xml_name = xml_desc["name"] - - final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) - for key, value in serialized.items(): - ET.SubElement(final_result, key).text = value - return final_result - - return serialized - - def serialize_object(self, attr, **kwargs): - """Serialize a generic object. - This will be handled as a dictionary. If object passed in is not - a basic type (str, int, float, dict, list) it will simply be - cast to str. - - :param dict attr: Object to be serialized. - :rtype: dict or str - """ - if attr is None: - return None - if isinstance(attr, ET.Element): - return attr - obj_type = type(attr) - if obj_type in self.basic_types: - return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs) - if obj_type is _long_type: - return self.serialize_long(attr) - if obj_type is unicode_str: - return self.serialize_unicode(attr) - if obj_type is datetime.datetime: - return self.serialize_iso(attr) - if obj_type is datetime.date: - return self.serialize_date(attr) - if obj_type is datetime.time: - return self.serialize_time(attr) - if obj_type is datetime.timedelta: - return self.serialize_duration(attr) - if obj_type is decimal.Decimal: - return self.serialize_decimal(attr) - - # If it's a model or I know this dependency, serialize as a Model - elif obj_type in self.dependencies.values() or isinstance(attr, Model): - return self._serialize(attr) - - if obj_type == dict: - serialized = {} - for key, value in attr.items(): - try: - serialized[self.serialize_unicode(key)] = self.serialize_object(value, **kwargs) - except ValueError: - serialized[self.serialize_unicode(key)] = None - return serialized - - if obj_type == list: - serialized = [] - for obj in attr: - try: - serialized.append(self.serialize_object(obj, **kwargs)) - except ValueError: - pass - return serialized - return str(attr) - - @staticmethod - def serialize_enum(attr, enum_obj=None): - try: - result = attr.value - except AttributeError: - result = attr - try: - enum_obj(result) - return result - except ValueError: - for enum_value in enum_obj: - if enum_value.value.lower() == str(attr).lower(): - return enum_value.value - error = "{!r} is not valid value for enum {!r}" - raise SerializationError(error.format(attr, enum_obj)) - - @staticmethod - def serialize_bytearray(attr, **kwargs): - """Serialize bytearray into base-64 string. - - :param attr: Object to be serialized. - :rtype: str - """ - return b64encode(attr).decode() - - @staticmethod - def serialize_base64(attr, **kwargs): - """Serialize str into base-64 string. - - :param attr: Object to be serialized. - :rtype: str - """ - encoded = b64encode(attr).decode("ascii") - return encoded.strip("=").replace("+", "-").replace("/", "_") - - @staticmethod - def serialize_decimal(attr, **kwargs): - """Serialize Decimal object to float. - - :param attr: Object to be serialized. - :rtype: float - """ - return float(attr) - - @staticmethod - def serialize_long(attr, **kwargs): - """Serialize long (Py2) or int (Py3). - - :param attr: Object to be serialized. - :rtype: int/long - """ - return _long_type(attr) - - @staticmethod - def serialize_date(attr, **kwargs): - """Serialize Date object into ISO-8601 formatted string. - - :param Date attr: Object to be serialized. - :rtype: str - """ - if isinstance(attr, str): - attr = isodate.parse_date(attr) - t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day) - return t - - @staticmethod - def serialize_time(attr, **kwargs): - """Serialize Time object into ISO-8601 formatted string. - - :param datetime.time attr: Object to be serialized. - :rtype: str - """ - if isinstance(attr, str): - attr = isodate.parse_time(attr) - t = "{:02}:{:02}:{:02}".format(attr.hour, attr.minute, attr.second) - if attr.microsecond: - t += ".{:02}".format(attr.microsecond) - return t - - @staticmethod - def serialize_duration(attr, **kwargs): - """Serialize TimeDelta object into ISO-8601 formatted string. - - :param TimeDelta attr: Object to be serialized. - :rtype: str - """ - if isinstance(attr, str): - attr = isodate.parse_duration(attr) - return isodate.duration_isoformat(attr) - - @staticmethod - def serialize_rfc(attr, **kwargs): - """Serialize Datetime object into RFC-1123 formatted string. - - :param Datetime attr: Object to be serialized. - :rtype: str - :raises: TypeError if format invalid. - """ - try: - if not attr.tzinfo: - _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") - utc = attr.utctimetuple() - except AttributeError: - raise TypeError("RFC1123 object must be valid Datetime object.") - - return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format( - Serializer.days[utc.tm_wday], - utc.tm_mday, - Serializer.months[utc.tm_mon], - utc.tm_year, - utc.tm_hour, - utc.tm_min, - utc.tm_sec, - ) - - @staticmethod - def serialize_iso(attr, **kwargs): - """Serialize Datetime object into ISO-8601 formatted string. - - :param Datetime attr: Object to be serialized. - :rtype: str - :raises: SerializationError if format invalid. - """ - if isinstance(attr, str): - attr = isodate.parse_datetime(attr) - try: - if not attr.tzinfo: - _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") - utc = attr.utctimetuple() - if utc.tm_year > 9999 or utc.tm_year < 1: - raise OverflowError("Hit max or min date") - - microseconds = str(attr.microsecond).rjust(6, "0").rstrip("0").ljust(3, "0") - if microseconds: - microseconds = "." + microseconds - date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( - utc.tm_year, utc.tm_mon, utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec - ) - return date + microseconds + "Z" - except (ValueError, OverflowError) as err: - msg = "Unable to serialize datetime object." - raise_with_traceback(SerializationError, msg, err) - except AttributeError as err: - msg = "ISO-8601 object must be valid Datetime object." - raise_with_traceback(TypeError, msg, err) - - @staticmethod - def serialize_unix(attr, **kwargs): - """Serialize Datetime object into IntTime format. - This is represented as seconds. - - :param Datetime attr: Object to be serialized. - :rtype: int - :raises: SerializationError if format invalid - """ - if isinstance(attr, int): - return attr - try: - if not attr.tzinfo: - _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") - return int(calendar.timegm(attr.utctimetuple())) - except AttributeError: - raise TypeError("Unix time object must be valid Datetime object.") - - -def rest_key_extractor(attr, attr_desc, data): - key = attr_desc["key"] - working_data = data - - while "." in key: - dict_keys = _FLATTEN.split(key) - if len(dict_keys) == 1: - key = _decode_attribute_map_key(dict_keys[0]) - break - working_key = _decode_attribute_map_key(dict_keys[0]) - working_data = working_data.get(working_key, data) - if working_data is None: - # If at any point while following flatten JSON path see None, it means - # that all properties under are None as well - # https://github.com/Azure/msrest-for-python/issues/197 - return None - key = ".".join(dict_keys[1:]) - - return working_data.get(key) - - -def rest_key_case_insensitive_extractor(attr, attr_desc, data): - key = attr_desc["key"] - working_data = data - - while "." in key: - dict_keys = _FLATTEN.split(key) - if len(dict_keys) == 1: - key = _decode_attribute_map_key(dict_keys[0]) - break - working_key = _decode_attribute_map_key(dict_keys[0]) - working_data = attribute_key_case_insensitive_extractor(working_key, None, working_data) - if working_data is None: - # If at any point while following flatten JSON path see None, it means - # that all properties under are None as well - # https://github.com/Azure/msrest-for-python/issues/197 - return None - key = ".".join(dict_keys[1:]) - - if working_data: - return attribute_key_case_insensitive_extractor(key, None, working_data) - - -def last_rest_key_extractor(attr, attr_desc, data): - """Extract the attribute in "data" based on the last part of the JSON path key.""" - key = attr_desc["key"] - dict_keys = _FLATTEN.split(key) - return attribute_key_extractor(dict_keys[-1], None, data) - - -def last_rest_key_case_insensitive_extractor(attr, attr_desc, data): - """Extract the attribute in "data" based on the last part of the JSON path key. - - This is the case insensitive version of "last_rest_key_extractor" - """ - key = attr_desc["key"] - dict_keys = _FLATTEN.split(key) - return attribute_key_case_insensitive_extractor(dict_keys[-1], None, data) - - -def attribute_key_extractor(attr, _, data): - return data.get(attr) - - -def attribute_key_case_insensitive_extractor(attr, _, data): - found_key = None - lower_attr = attr.lower() - for key in data: - if lower_attr == key.lower(): - found_key = key - break - - return data.get(found_key) - - -def _extract_name_from_internal_type(internal_type): - """Given an internal type XML description, extract correct XML name with namespace. - - :param dict internal_type: An model type - :rtype: tuple - :returns: A tuple XML name + namespace dict - """ - internal_type_xml_map = getattr(internal_type, "_xml_map", {}) - xml_name = internal_type_xml_map.get("name", internal_type.__name__) - xml_ns = internal_type_xml_map.get("ns", None) - if xml_ns: - xml_name = "{}{}".format(xml_ns, xml_name) - return xml_name - - -def xml_key_extractor(attr, attr_desc, data): - if isinstance(data, dict): - return None - - # Test if this model is XML ready first - if not isinstance(data, ET.Element): - return None - - xml_desc = attr_desc.get("xml", {}) - xml_name = xml_desc.get("name", attr_desc["key"]) - - # Look for a children - is_iter_type = attr_desc["type"].startswith("[") - is_wrapped = xml_desc.get("wrapped", False) - internal_type = attr_desc.get("internalType", None) - internal_type_xml_map = getattr(internal_type, "_xml_map", {}) - - # Integrate namespace if necessary - xml_ns = xml_desc.get("ns", internal_type_xml_map.get("ns", None)) - if xml_ns: - xml_name = "{}{}".format(xml_ns, xml_name) - - # If it's an attribute, that's simple - if xml_desc.get("attr", False): - return data.get(xml_name) - - # If it's x-ms-text, that's simple too - if xml_desc.get("text", False): - return data.text - - # Scenario where I take the local name: - # - Wrapped node - # - Internal type is an enum (considered basic types) - # - Internal type has no XML/Name node - if is_wrapped or (internal_type and (issubclass(internal_type, Enum) or "name" not in internal_type_xml_map)): - children = data.findall(xml_name) - # If internal type has a local name and it's not a list, I use that name - elif not is_iter_type and internal_type and "name" in internal_type_xml_map: - xml_name = _extract_name_from_internal_type(internal_type) - children = data.findall(xml_name) - # That's an array - else: - if internal_type: # Complex type, ignore itemsName and use the complex type name - items_name = _extract_name_from_internal_type(internal_type) - else: - items_name = xml_desc.get("itemsName", xml_name) - children = data.findall(items_name) - - if len(children) == 0: - if is_iter_type: - if is_wrapped: - return None # is_wrapped no node, we want None - else: - return [] # not wrapped, assume empty list - return None # Assume it's not there, maybe an optional node. - - # If is_iter_type and not wrapped, return all found children - if is_iter_type: - if not is_wrapped: - return children - else: # Iter and wrapped, should have found one node only (the wrap one) - if len(children) != 1: - raise DeserializationError( - "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( - xml_name - ) - ) - return list(children[0]) # Might be empty list and that's ok. - - # Here it's not a itertype, we should have found one element only or empty - if len(children) > 1: - raise DeserializationError("Find several XML '{}' where it was not expected".format(xml_name)) - return children[0] - - -class Deserializer(object): - """Response object model deserializer. - - :param dict classes: Class type dictionary for deserializing complex types. - :ivar list key_extractors: Ordered list of extractors to be used by this deserializer. - """ - - basic_types = {str: "str", int: "int", bool: "bool", float: "float"} - - valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") - - def __init__(self, classes=None): - self.deserialize_type = { - "iso-8601": Deserializer.deserialize_iso, - "rfc-1123": Deserializer.deserialize_rfc, - "unix-time": Deserializer.deserialize_unix, - "duration": Deserializer.deserialize_duration, - "date": Deserializer.deserialize_date, - "time": Deserializer.deserialize_time, - "decimal": Deserializer.deserialize_decimal, - "long": Deserializer.deserialize_long, - "bytearray": Deserializer.deserialize_bytearray, - "base64": Deserializer.deserialize_base64, - "object": self.deserialize_object, - "[]": self.deserialize_iter, - "{}": self.deserialize_dict, - } - self.deserialize_expected_types = { - "duration": (isodate.Duration, datetime.timedelta), - "iso-8601": (datetime.datetime), - } - self.dependencies = dict(classes) if classes else {} - self.key_extractors = [rest_key_extractor, xml_key_extractor] - # Additional properties only works if the "rest_key_extractor" is used to - # extract the keys. Making it to work whatever the key extractor is too much - # complicated, with no real scenario for now. - # So adding a flag to disable additional properties detection. This flag should be - # used if your expect the deserialization to NOT come from a JSON REST syntax. - # Otherwise, result are unexpected - self.additional_properties_detection = True - - def __call__(self, target_obj, response_data, content_type=None): - """Call the deserializer to process a REST response. - - :param str target_obj: Target data type to deserialize to. - :param requests.Response response_data: REST response object. - :param str content_type: Swagger "produces" if available. - :raises: DeserializationError if deserialization fails. - :return: Deserialized object. - """ - data = self._unpack_content(response_data, content_type) - return self._deserialize(target_obj, data) - - def _deserialize(self, target_obj, data): - """Call the deserializer on a model. - - Data needs to be already deserialized as JSON or XML ElementTree - - :param str target_obj: Target data type to deserialize to. - :param object data: Object to deserialize. - :raises: DeserializationError if deserialization fails. - :return: Deserialized object. - """ - # This is already a model, go recursive just in case - if hasattr(data, "_attribute_map"): - constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")] - try: - for attr, mapconfig in data._attribute_map.items(): - if attr in constants: - continue - value = getattr(data, attr) - if value is None: - continue - local_type = mapconfig["type"] - internal_data_type = local_type.strip("[]{}") - if internal_data_type not in self.dependencies or isinstance(internal_data_type, Enum): - continue - setattr(data, attr, self._deserialize(local_type, value)) - return data - except AttributeError: - return - - response, class_name = self._classify_target(target_obj, data) - - if isinstance(response, basestring): - return self.deserialize_data(data, response) - elif isinstance(response, type) and issubclass(response, Enum): - return self.deserialize_enum(data, response) - - if data is None: - return data - try: - attributes = response._attribute_map - d_attrs = {} - for attr, attr_desc in attributes.items(): - # Check empty string. If it's not empty, someone has a real "additionalProperties"... - if attr == "additional_properties" and attr_desc["key"] == "": - continue - raw_value = None - # Enhance attr_desc with some dynamic data - attr_desc = attr_desc.copy() # Do a copy, do not change the real one - internal_data_type = attr_desc["type"].strip("[]{}") - if internal_data_type in self.dependencies: - attr_desc["internalType"] = self.dependencies[internal_data_type] - - for key_extractor in self.key_extractors: - found_value = key_extractor(attr, attr_desc, data) - if found_value is not None: - if raw_value is not None and raw_value != found_value: - msg = ( - "Ignoring extracted value '%s' from %s for key '%s'" - " (duplicate extraction, follow extractors order)" - ) - _LOGGER.warning(msg, found_value, key_extractor, attr) - continue - raw_value = found_value - - value = self.deserialize_data(raw_value, attr_desc["type"]) - d_attrs[attr] = value - except (AttributeError, TypeError, KeyError) as err: - msg = "Unable to deserialize to object: " + class_name - raise_with_traceback(DeserializationError, msg, err) - else: - additional_properties = self._build_additional_properties(attributes, data) - return self._instantiate_model(response, d_attrs, additional_properties) - - def _build_additional_properties(self, attribute_map, data): - if not self.additional_properties_detection: - return None - if "additional_properties" in attribute_map and attribute_map.get("additional_properties", {}).get("key") != "": - # Check empty string. If it's not empty, someone has a real "additionalProperties" - return None - if isinstance(data, ET.Element): - data = {el.tag: el.text for el in data} - - known_keys = { - _decode_attribute_map_key(_FLATTEN.split(desc["key"])[0]) - for desc in attribute_map.values() - if desc["key"] != "" - } - present_keys = set(data.keys()) - missing_keys = present_keys - known_keys - return {key: data[key] for key in missing_keys} - - def _classify_target(self, target, data): - """Check to see whether the deserialization target object can - be classified into a subclass. - Once classification has been determined, initialize object. - - :param str target: The target object type to deserialize to. - :param str/dict data: The response data to deseralize. - """ - if target is None: - return None, None - - if isinstance(target, basestring): - try: - target = self.dependencies[target] - except KeyError: - return target, target - - try: - target = target._classify(data, self.dependencies) - except AttributeError: - pass # Target is not a Model, no classify - return target, target.__class__.__name__ - - def failsafe_deserialize(self, target_obj, data, content_type=None): - """Ignores any errors encountered in deserialization, - and falls back to not deserializing the object. Recommended - for use in error deserialization, as we want to return the - HttpResponseError to users, and not have them deal with - a deserialization error. - - :param str target_obj: The target object type to deserialize to. - :param str/dict data: The response data to deseralize. - :param str content_type: Swagger "produces" if available. - """ - try: - return self(target_obj, data, content_type=content_type) - except: - _LOGGER.warning( - "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True - ) - return None - - @staticmethod - def _unpack_content(raw_data, content_type=None): - """Extract the correct structure for deserialization. - - If raw_data is a PipelineResponse, try to extract the result of RawDeserializer. - if we can't, raise. Your Pipeline should have a RawDeserializer. - - If not a pipeline response and raw_data is bytes or string, use content-type - to decode it. If no content-type, try JSON. - - If raw_data is something else, bypass all logic and return it directly. - - :param raw_data: Data to be processed. - :param content_type: How to parse if raw_data is a string/bytes. - :raises JSONDecodeError: If JSON is requested and parsing is impossible. - :raises UnicodeDecodeError: If bytes is not UTF8 - """ - # Assume this is enough to detect a Pipeline Response without importing it - context = getattr(raw_data, "context", {}) - if context: - if RawDeserializer.CONTEXT_NAME in context: - return context[RawDeserializer.CONTEXT_NAME] - raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize") - - # Assume this is enough to recognize universal_http.ClientResponse without importing it - if hasattr(raw_data, "body"): - return RawDeserializer.deserialize_from_http_generics(raw_data.text(), raw_data.headers) - - # Assume this enough to recognize requests.Response without importing it. - if hasattr(raw_data, "_content_consumed"): - return RawDeserializer.deserialize_from_http_generics(raw_data.text, raw_data.headers) - - if isinstance(raw_data, (basestring, bytes)) or hasattr(raw_data, "read"): - return RawDeserializer.deserialize_from_text(raw_data, content_type) - return raw_data - - def _instantiate_model(self, response, attrs, additional_properties=None): - """Instantiate a response model passing in deserialized args. - - :param response: The response model class. - :param d_attrs: The deserialized response attributes. - """ - if callable(response): - subtype = getattr(response, "_subtype_map", {}) - try: - readonly = [k for k, v in response._validation.items() if v.get("readonly")] - const = [k for k, v in response._validation.items() if v.get("constant")] - kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const} - response_obj = response(**kwargs) - for attr in readonly: - setattr(response_obj, attr, attrs.get(attr)) - if additional_properties: - response_obj.additional_properties = additional_properties - return response_obj - except TypeError as err: - msg = "Unable to deserialize {} into model {}. ".format(kwargs, response) - raise DeserializationError(msg + str(err)) - else: - try: - for attr, value in attrs.items(): - setattr(response, attr, value) - return response - except Exception as exp: - msg = "Unable to populate response model. " - msg += "Type: {}, Error: {}".format(type(response), exp) - raise DeserializationError(msg) - - def deserialize_data(self, data, data_type): - """Process data for deserialization according to data type. - - :param str data: The response string to be deserialized. - :param str data_type: The type to deserialize to. - :raises: DeserializationError if deserialization fails. - :return: Deserialized object. - """ - if data is None: - return data - - try: - if not data_type: - return data - if data_type in self.basic_types.values(): - return self.deserialize_basic(data, data_type) - if data_type in self.deserialize_type: - if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())): - return data - - is_a_text_parsing_type = lambda x: x not in ["object", "[]", r"{}"] - if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text: - return None - data_val = self.deserialize_type[data_type](data) - return data_val - - iter_type = data_type[0] + data_type[-1] - if iter_type in self.deserialize_type: - return self.deserialize_type[iter_type](data, data_type[1:-1]) - - obj_type = self.dependencies[data_type] - if issubclass(obj_type, Enum): - if isinstance(data, ET.Element): - data = data.text - return self.deserialize_enum(data, obj_type) - - except (ValueError, TypeError, AttributeError) as err: - msg = "Unable to deserialize response data." - msg += " Data: {}, {}".format(data, data_type) - raise_with_traceback(DeserializationError, msg, err) - else: - return self._deserialize(obj_type, data) - - def deserialize_iter(self, attr, iter_type): - """Deserialize an iterable. - - :param list attr: Iterable to be deserialized. - :param str iter_type: The type of object in the iterable. - :rtype: list - """ - if attr is None: - return None - if isinstance(attr, ET.Element): # If I receive an element here, get the children - attr = list(attr) - if not isinstance(attr, (list, set)): - raise DeserializationError("Cannot deserialize as [{}] an object of type {}".format(iter_type, type(attr))) - return [self.deserialize_data(a, iter_type) for a in attr] - - def deserialize_dict(self, attr, dict_type): - """Deserialize a dictionary. - - :param dict/list attr: Dictionary to be deserialized. Also accepts - a list of key, value pairs. - :param str dict_type: The object type of the items in the dictionary. - :rtype: dict - """ - if isinstance(attr, list): - return {x["key"]: self.deserialize_data(x["value"], dict_type) for x in attr} - - if isinstance(attr, ET.Element): - # Transform value into {"Key": "value"} - attr = {el.tag: el.text for el in attr} - return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()} - - def deserialize_object(self, attr, **kwargs): - """Deserialize a generic object. - This will be handled as a dictionary. - - :param dict attr: Dictionary to be deserialized. - :rtype: dict - :raises: TypeError if non-builtin datatype encountered. - """ - if attr is None: - return None - if isinstance(attr, ET.Element): - # Do no recurse on XML, just return the tree as-is - return attr - if isinstance(attr, basestring): - return self.deserialize_basic(attr, "str") - obj_type = type(attr) - if obj_type in self.basic_types: - return self.deserialize_basic(attr, self.basic_types[obj_type]) - if obj_type is _long_type: - return self.deserialize_long(attr) - - if obj_type == dict: - deserialized = {} - for key, value in attr.items(): - try: - deserialized[key] = self.deserialize_object(value, **kwargs) - except ValueError: - deserialized[key] = None - return deserialized - - if obj_type == list: - deserialized = [] - for obj in attr: - try: - deserialized.append(self.deserialize_object(obj, **kwargs)) - except ValueError: - pass - return deserialized - - else: - error = "Cannot deserialize generic object with type: " - raise TypeError(error + str(obj_type)) - - def deserialize_basic(self, attr, data_type): - """Deserialize basic builtin data type from string. - Will attempt to convert to str, int, float and bool. - This function will also accept '1', '0', 'true' and 'false' as - valid bool values. - - :param str attr: response string to be deserialized. - :param str data_type: deserialization data type. - :rtype: str, int, float or bool - :raises: TypeError if string format is not valid. - """ - # If we're here, data is supposed to be a basic type. - # If it's still an XML node, take the text - if isinstance(attr, ET.Element): - attr = attr.text - if not attr: - if data_type == "str": - # None or '', node is empty string. - return "" - else: - # None or '', node with a strong type is None. - # Don't try to model "empty bool" or "empty int" - return None - - if data_type == "bool": - if attr in [True, False, 1, 0]: - return bool(attr) - elif isinstance(attr, basestring): - if attr.lower() in ["true", "1"]: - return True - elif attr.lower() in ["false", "0"]: - return False - raise TypeError("Invalid boolean value: {}".format(attr)) - - if data_type == "str": - return self.deserialize_unicode(attr) - return eval(data_type)(attr) # nosec - - @staticmethod - def deserialize_unicode(data): - """Preserve unicode objects in Python 2, otherwise return data - as a string. - - :param str data: response string to be deserialized. - :rtype: str or unicode - """ - # We might be here because we have an enum modeled as string, - # and we try to deserialize a partial dict with enum inside - if isinstance(data, Enum): - return data - - # Consider this is real string - try: - if isinstance(data, unicode): - return data - except NameError: - return str(data) - else: - return str(data) - - @staticmethod - def deserialize_enum(data, enum_obj): - """Deserialize string into enum object. - - If the string is not a valid enum value it will be returned as-is - and a warning will be logged. - - :param str data: Response string to be deserialized. If this value is - None or invalid it will be returned as-is. - :param Enum enum_obj: Enum object to deserialize to. - :rtype: Enum - """ - if isinstance(data, enum_obj) or data is None: - return data - if isinstance(data, Enum): - data = data.value - if isinstance(data, int): - # Workaround. We might consider remove it in the future. - # https://github.com/Azure/azure-rest-api-specs/issues/141 - try: - return list(enum_obj.__members__.values())[data] - except IndexError: - error = "{!r} is not a valid index for enum {!r}" - raise DeserializationError(error.format(data, enum_obj)) - try: - return enum_obj(str(data)) - except ValueError: - for enum_value in enum_obj: - if enum_value.value.lower() == str(data).lower(): - return enum_value - # We don't fail anymore for unknown value, we deserialize as a string - _LOGGER.warning("Deserializer is not able to find %s as valid enum in %s", data, enum_obj) - return Deserializer.deserialize_unicode(data) - - @staticmethod - def deserialize_bytearray(attr): - """Deserialize string into bytearray. - - :param str attr: response string to be deserialized. - :rtype: bytearray - :raises: TypeError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - return bytearray(b64decode(attr)) - - @staticmethod - def deserialize_base64(attr): - """Deserialize base64 encoded string into string. - - :param str attr: response string to be deserialized. - :rtype: bytearray - :raises: TypeError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - padding = "=" * (3 - (len(attr) + 3) % 4) - attr = attr + padding - encoded = attr.replace("-", "+").replace("_", "/") - return b64decode(encoded) - - @staticmethod - def deserialize_decimal(attr): - """Deserialize string into Decimal object. - - :param str attr: response string to be deserialized. - :rtype: Decimal - :raises: DeserializationError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - try: - return decimal.Decimal(attr) - except decimal.DecimalException as err: - msg = "Invalid decimal {}".format(attr) - raise_with_traceback(DeserializationError, msg, err) - - @staticmethod - def deserialize_long(attr): - """Deserialize string into long (Py2) or int (Py3). - - :param str attr: response string to be deserialized. - :rtype: long or int - :raises: ValueError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - return _long_type(attr) - - @staticmethod - def deserialize_duration(attr): - """Deserialize ISO-8601 formatted string into TimeDelta object. - - :param str attr: response string to be deserialized. - :rtype: TimeDelta - :raises: DeserializationError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - try: - duration = isodate.parse_duration(attr) - except (ValueError, OverflowError, AttributeError) as err: - msg = "Cannot deserialize duration object." - raise_with_traceback(DeserializationError, msg, err) - else: - return duration - - @staticmethod - def deserialize_date(attr): - """Deserialize ISO-8601 formatted string into Date object. - - :param str attr: response string to be deserialized. - :rtype: Date - :raises: DeserializationError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - if re.search(r"[^\W\d_]", attr, re.I + re.U): - raise DeserializationError("Date must have only digits and -. Received: %s" % attr) - # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. - return isodate.parse_date(attr, defaultmonth=None, defaultday=None) - - @staticmethod - def deserialize_time(attr): - """Deserialize ISO-8601 formatted string into time object. - - :param str attr: response string to be deserialized. - :rtype: datetime.time - :raises: DeserializationError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - if re.search(r"[^\W\d_]", attr, re.I + re.U): - raise DeserializationError("Date must have only digits and -. Received: %s" % attr) - return isodate.parse_time(attr) - - @staticmethod - def deserialize_rfc(attr): - """Deserialize RFC-1123 formatted string into Datetime object. - - :param str attr: response string to be deserialized. - :rtype: Datetime - :raises: DeserializationError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - try: - parsed_date = email.utils.parsedate_tz(attr) - date_obj = datetime.datetime( - *parsed_date[:6], tzinfo=_FixedOffset(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60)) - ) - if not date_obj.tzinfo: - date_obj = date_obj.astimezone(tz=TZ_UTC) - except ValueError as err: - msg = "Cannot deserialize to rfc datetime object." - raise_with_traceback(DeserializationError, msg, err) - else: - return date_obj - - @staticmethod - def deserialize_iso(attr): - """Deserialize ISO-8601 formatted string into Datetime object. - - :param str attr: response string to be deserialized. - :rtype: Datetime - :raises: DeserializationError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - try: - attr = attr.upper() - match = Deserializer.valid_date.match(attr) - if not match: - raise ValueError("Invalid datetime string: " + attr) - - check_decimal = attr.split(".") - if len(check_decimal) > 1: - decimal_str = "" - for digit in check_decimal[1]: - if digit.isdigit(): - decimal_str += digit - else: - break - if len(decimal_str) > 6: - attr = attr.replace(decimal_str, decimal_str[0:6]) - - date_obj = isodate.parse_datetime(attr) - test_utc = date_obj.utctimetuple() - if test_utc.tm_year > 9999 or test_utc.tm_year < 1: - raise OverflowError("Hit max or min date") - except (ValueError, OverflowError, AttributeError) as err: - msg = "Cannot deserialize datetime object." - raise_with_traceback(DeserializationError, msg, err) - else: - return date_obj - - @staticmethod - def deserialize_unix(attr): - """Serialize Datetime object into IntTime format. - This is represented as seconds. - - :param int attr: Object to be serialized. - :rtype: Datetime - :raises: DeserializationError if format invalid - """ - if isinstance(attr, ET.Element): - attr = int(attr.text) - try: - date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC) - except ValueError as err: - msg = "Cannot deserialize to unix datetime object." - raise_with_traceback(DeserializationError, msg, err) - else: - return date_obj diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/_vendor.py b/azure/multiapi/storagev2/queue/v2021_02_12/_generated/_vendor.py deleted file mode 100644 index 9aad73f..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/_vendor.py +++ /dev/null @@ -1,27 +0,0 @@ -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from azure.core.pipeline.transport import HttpRequest - - -def _convert_request(request, files=None): - data = request.content if not files else None - request = HttpRequest(method=request.method, url=request.url, headers=request.headers, data=data) - if files: - request.set_formdata_body(files) - return request - - -def _format_url_section(template, **kwargs): - components = template.split("/") - while components: - try: - return template.format(**kwargs) - except KeyError as key: - formatted_components = template.split("/") - components = [c for c in formatted_components if "{}".format(key.args[0]) not in c] - template = "/".join(components) diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/aio/__init__.py b/azure/multiapi/storagev2/queue/v2021_02_12/_generated/aio/__init__.py deleted file mode 100644 index da9a0cd..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/aio/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._azure_queue_storage import AzureQueueStorage - -try: - from ._patch import __all__ as _patch_all - from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import -except ImportError: - _patch_all = [] -from ._patch import patch_sdk as _patch_sdk - -__all__ = ["AzureQueueStorage"] -__all__.extend([p for p in _patch_all if p not in __all__]) - -_patch_sdk() diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/aio/_azure_queue_storage.py b/azure/multiapi/storagev2/queue/v2021_02_12/_generated/aio/_azure_queue_storage.py deleted file mode 100644 index 0015b47..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/aio/_azure_queue_storage.py +++ /dev/null @@ -1,87 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from copy import deepcopy -from typing import Any, Awaitable - -from azure.core import AsyncPipelineClient -from azure.core.rest import AsyncHttpResponse, HttpRequest - -from .. import models -from .._serialization import Deserializer, Serializer -from ._configuration import AzureQueueStorageConfiguration -from .operations import MessageIdOperations, MessagesOperations, QueueOperations, ServiceOperations - - -class AzureQueueStorage: # pylint: disable=client-accepts-api-version-keyword - """AzureQueueStorage. - - :ivar service: ServiceOperations operations - :vartype service: azure.storage.queue.aio.operations.ServiceOperations - :ivar queue: QueueOperations operations - :vartype queue: azure.storage.queue.aio.operations.QueueOperations - :ivar messages: MessagesOperations operations - :vartype messages: azure.storage.queue.aio.operations.MessagesOperations - :ivar message_id: MessageIdOperations operations - :vartype message_id: azure.storage.queue.aio.operations.MessageIdOperations - :param url: The URL of the service account, queue or message that is the target of the desired - operation. Required. - :type url: str - :param base_url: Service URL. Required. Default value is "". - :type base_url: str - :keyword version: Specifies the version of the operation to use for this request. Default value - is "2018-03-28". Note that overriding this default value may result in unsupported behavior. - :paramtype version: str - """ - - def __init__( # pylint: disable=missing-client-constructor-parameter-credential - self, url: str, base_url: str = "", **kwargs: Any - ) -> None: - self._config = AzureQueueStorageConfiguration(url=url, **kwargs) - self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) - - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._deserialize = Deserializer(client_models) - self._serialize.client_side_validation = False - self.service = ServiceOperations(self._client, self._config, self._serialize, self._deserialize) - self.queue = QueueOperations(self._client, self._config, self._serialize, self._deserialize) - self.messages = MessagesOperations(self._client, self._config, self._serialize, self._deserialize) - self.message_id = MessageIdOperations(self._client, self._config, self._serialize, self._deserialize) - - def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]: - """Runs the network request through the client's chained policies. - - >>> from azure.core.rest import HttpRequest - >>> request = HttpRequest("GET", "https://www.example.org/") - - >>> response = await client._send_request(request) - - - For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request - - :param request: The network request you want to make. Required. - :type request: ~azure.core.rest.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to False. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.rest.AsyncHttpResponse - """ - - request_copy = deepcopy(request) - request_copy.url = self._client.format_url(request_copy.url) - return self._client.send_request(request_copy, **kwargs) - - async def close(self) -> None: - await self._client.close() - - async def __aenter__(self) -> "AzureQueueStorage": - await self._client.__aenter__() - return self - - async def __aexit__(self, *exc_details) -> None: - await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/aio/_configuration.py b/azure/multiapi/storagev2/queue/v2021_02_12/_generated/aio/_configuration.py deleted file mode 100644 index 7c0d3db..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/aio/_configuration.py +++ /dev/null @@ -1,52 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any - -from azure.core.configuration import Configuration -from azure.core.pipeline import policies - -VERSION = "unknown" - - -class AzureQueueStorageConfiguration(Configuration): # pylint: disable=too-many-instance-attributes - """Configuration for AzureQueueStorage. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param url: The URL of the service account, queue or message that is the target of the desired - operation. Required. - :type url: str - :keyword version: Specifies the version of the operation to use for this request. Default value - is "2018-03-28". Note that overriding this default value may result in unsupported behavior. - :paramtype version: str - """ - - def __init__(self, url: str, **kwargs: Any) -> None: - super(AzureQueueStorageConfiguration, self).__init__(**kwargs) - version = kwargs.pop("version", "2018-03-28") # type: str - - if url is None: - raise ValueError("Parameter 'url' must not be None.") - - self.url = url - self.version = version - kwargs.setdefault("sdk_moniker", "azurequeuestorage/{}".format(VERSION)) - self._configure(**kwargs) - - def _configure(self, **kwargs: Any) -> None: - self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) - self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) - self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) - self.authentication_policy = kwargs.get("authentication_policy") diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/aio/_patch.py b/azure/multiapi/storagev2/queue/v2021_02_12/_generated/aio/_patch.py deleted file mode 100644 index f99e77f..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/aio/_patch.py +++ /dev/null @@ -1,31 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# -# Copyright (c) Microsoft Corporation. All rights reserved. -# -# The MIT License (MIT) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the ""Software""), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# -# -------------------------------------------------------------------------- - -# This file is used for handwritten extensions to the generated code. Example: -# https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/customize_code/how-to-patch-sdk-code.md -def patch_sdk(): - pass diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/aio/operations/__init__.py b/azure/multiapi/storagev2/queue/v2021_02_12/_generated/aio/operations/__init__.py deleted file mode 100644 index d6ff628..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/aio/operations/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._queue_operations import QueueOperations -from ._messages_operations import MessagesOperations -from ._message_id_operations import MessageIdOperations - -from ._patch import __all__ as _patch_all -from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import -from ._patch import patch_sdk as _patch_sdk - -__all__ = [ - "ServiceOperations", - "QueueOperations", - "MessagesOperations", - "MessageIdOperations", -] -__all__.extend([p for p in _patch_all if p not in __all__]) -_patch_sdk() diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/aio/operations/_message_id_operations.py b/azure/multiapi/storagev2/queue/v2021_02_12/_generated/aio/operations/_message_id_operations.py deleted file mode 100644 index 1e4a3d2..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/aio/operations/_message_id_operations.py +++ /dev/null @@ -1,208 +0,0 @@ -# pylint: disable=too-many-lines -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Optional, TypeVar - -from azure.core.exceptions import ( - ClientAuthenticationError, - HttpResponseError, - ResourceExistsError, - ResourceNotFoundError, - map_error, -) -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import AsyncHttpResponse -from azure.core.rest import HttpRequest -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.utils import case_insensitive_dict - -from ... import models as _models -from ..._vendor import _convert_request -from ...operations._message_id_operations import build_delete_request, build_update_request - -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - - -class MessageIdOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.storage.queue.aio.AzureQueueStorage`'s - :attr:`message_id` attribute. - """ - - models = _models - - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @distributed_trace_async - async def update( # pylint: disable=inconsistent-return-statements - self, - pop_receipt: str, - visibilitytimeout: int, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - queue_message: Optional[_models.QueueMessage] = None, - **kwargs: Any - ) -> None: - """The Update operation was introduced with version 2011-08-18 of the Queue service API. The - Update Message operation updates the visibility timeout of a message. You can also use this - operation to update the contents of a message. A message must be in a format that can be - included in an XML request with UTF-8 encoding, and the encoded message can be up to 64KB in - size. - - :param pop_receipt: Required. Specifies the valid pop receipt value returned from an earlier - call to the Get Messages or Update Message operation. Required. - :type pop_receipt: str - :param visibilitytimeout: Optional. Specifies the new visibility timeout value, in seconds, - relative to server time. The default value is 30 seconds. A specified value must be larger than - or equal to 1 second, and cannot be larger than 7 days, or larger than 2 hours on REST protocol - versions prior to version 2011-08-18. The visibility timeout of a message can be set to a value - later than the expiry time. Required. - :type visibilitytimeout: int - :param timeout: The The timeout parameter is expressed in seconds. For more information, see None: - """The Delete operation deletes the specified message. - - :param pop_receipt: Required. Specifies the valid pop receipt value returned from an earlier - call to the Get Messages or Update Message operation. Required. - :type pop_receipt: str - :param timeout: The The timeout parameter is expressed in seconds. For more information, see None: - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @distributed_trace_async - async def dequeue( - self, - number_of_messages: Optional[int] = None, - visibilitytimeout: Optional[int] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> List[_models.DequeuedMessageItem]: - """The Dequeue operation retrieves one or more messages from the front of the queue. - - :param number_of_messages: Optional. A nonzero integer value that specifies the number of - messages to retrieve from the queue, up to a maximum of 32. If fewer are visible, the visible - messages are returned. By default, a single message is retrieved from the queue with this - operation. Default value is None. - :type number_of_messages: int - :param visibilitytimeout: Optional. Specifies the new visibility timeout value, in seconds, - relative to server time. The default value is 30 seconds. A specified value must be larger than - or equal to 1 second, and cannot be larger than 7 days, or larger than 2 hours on REST protocol - versions prior to version 2011-08-18. The visibility timeout of a message can be set to a value - later than the expiry time. Default value is None. - :type visibilitytimeout: int - :param timeout: The The timeout parameter is expressed in seconds. For more information, see None: - """The Clear operation deletes all messages from the specified queue. - - :param timeout: The The timeout parameter is expressed in seconds. For more information, see List[_models.EnqueuedMessage]: - """The Enqueue operation adds a new message to the back of the message queue. A visibility timeout - can also be specified to make the message invisible until the visibility timeout expires. A - message must be in a format that can be included in an XML request with UTF-8 encoding. The - encoded message can be up to 64 KB in size for versions 2011-08-18 and newer, or 8 KB in size - for previous versions. - - :param queue_message: A Message object which can be stored in a Queue. Required. - :type queue_message: ~azure.storage.queue.models.QueueMessage - :param visibilitytimeout: Optional. If specified, the request must be made using an - x-ms-version of 2011-08-18 or later. If not specified, the default value is 0. Specifies the - new visibility timeout value, in seconds, relative to server time. The new value must be larger - than or equal to 0, and cannot be larger than 7 days. The visibility timeout of a message - cannot be set to a value later than the expiry time. visibilitytimeout should be set to a value - smaller than the time-to-live value. Default value is None. - :type visibilitytimeout: int - :param message_time_to_live: Optional. Specifies the time-to-live interval for the message, in - seconds. Prior to version 2017-07-29, the maximum time-to-live allowed is 7 days. For version - 2017-07-29 or later, the maximum time-to-live can be any positive number, as well as -1 - indicating that the message does not expire. If this parameter is omitted, the default - time-to-live is 7 days. Default value is None. - :type message_time_to_live: int - :param timeout: The The timeout parameter is expressed in seconds. For more information, see List[_models.PeekedMessageItem]: - """The Peek operation retrieves one or more messages from the front of the queue, but does not - alter the visibility of the message. - - :param number_of_messages: Optional. A nonzero integer value that specifies the number of - messages to retrieve from the queue, up to a maximum of 32. If fewer are visible, the visible - messages are returned. By default, a single message is retrieved from the queue with this - operation. Default value is None. - :type number_of_messages: int - :param timeout: The The timeout parameter is expressed in seconds. For more information, see None: - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @distributed_trace_async - async def create( # pylint: disable=inconsistent-return-statements - self, - timeout: Optional[int] = None, - metadata: Optional[Dict[str, str]] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> None: - """creates a new queue under the given account. - - :param timeout: The The timeout parameter is expressed in seconds. For more information, see None: - """operation permanently deletes the specified queue. - - :param timeout: The The timeout parameter is expressed in seconds. For more information, see None: - """Retrieves user-defined metadata and queue properties on the specified queue. Metadata is - associated with the queue as name-values pairs. - - :param timeout: The The timeout parameter is expressed in seconds. For more information, see None: - """sets user-defined metadata on the specified queue. Metadata is associated with the queue as - name-value pairs. - - :param timeout: The The timeout parameter is expressed in seconds. For more information, see List[_models.SignedIdentifier]: - """returns details about any stored access policies specified on the queue that may be used with - Shared Access Signatures. - - :param timeout: The The timeout parameter is expressed in seconds. For more information, see None: - """sets stored access policies for the queue that may be used with Shared Access Signatures. - - :param timeout: The The timeout parameter is expressed in seconds. For more information, see None: - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @distributed_trace_async - async def set_properties( # pylint: disable=inconsistent-return-statements - self, - storage_service_properties: _models.StorageServiceProperties, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> None: - """Sets properties for a storage account's Queue service endpoint, including properties for - Storage Analytics and CORS (Cross-Origin Resource Sharing) rules. - - :param storage_service_properties: The StorageService properties. Required. - :type storage_service_properties: ~azure.storage.queue.models.StorageServiceProperties - :param timeout: The The timeout parameter is expressed in seconds. For more information, see _models.StorageServiceProperties: - """gets the properties of a storage account's Queue service, including properties for Storage - Analytics and CORS (Cross-Origin Resource Sharing) rules. - - :param timeout: The The timeout parameter is expressed in seconds. For more information, see _models.StorageServiceStats: - """Retrieves statistics related to replication for the Queue service. It is only available on the - secondary location endpoint when read-access geo-redundant replication is enabled for the - storage account. - - :param timeout: The The timeout parameter is expressed in seconds. For more information, see _models.ListQueuesSegmentResponse: - """The List Queues Segment operation returns a list of the queues under the specified account. - - :param prefix: Filters the results to return only queues whose name begins with the specified - prefix. Default value is None. - :type prefix: str - :param marker: A string value that identifies the portion of the list of queues to be returned - with the next listing operation. The operation returns the NextMarker value within the response - body if the listing operation did not return all queues remaining to be listed with the current - page. The NextMarker value can be used as the value for the marker parameter in a subsequent - call to request the next page of list items. The marker value is opaque to the client. Default - value is None. - :type marker: str - :param maxresults: Specifies the maximum number of queues to return. If the request does not - specify maxresults, or specifies a value greater than 5000, the server will return up to 5000 - items. Note that if the listing operation crosses a partition boundary, then the service will - return a continuation token for retrieving the remainder of the results. For this reason, it is - possible that the service will return fewer results than specified by maxresults, or than the - default of 5000. Default value is None. - :type maxresults: int - :param include: Include this parameter to specify that the queues' metadata be returned as part - of the response body. Default value is None. - :type include: list[str] - :param timeout: The The timeout parameter is expressed in seconds. For more information, see `. - :vartype metadata: dict[str, str] - """ - - _validation = { - "name": {"required": True}, - } - - _attribute_map = { - "name": {"key": "Name", "type": "str"}, - "metadata": {"key": "Metadata", "type": "{str}"}, - } - _xml_map = {"name": "Queue"} - - def __init__(self, *, name: str, metadata: Optional[Dict[str, str]] = None, **kwargs): - """ - :keyword name: The name of the Queue. Required. - :paramtype name: str - :keyword metadata: Dictionary of :code:``. - :paramtype metadata: dict[str, str] - """ - super().__init__(**kwargs) - self.name = name - self.metadata = metadata - - -class QueueMessage(_serialization.Model): - """A Message object which can be stored in a Queue. - - All required parameters must be populated in order to send to Azure. - - :ivar message_text: The content of the message. Required. - :vartype message_text: str - """ - - _validation = { - "message_text": {"required": True}, - } - - _attribute_map = { - "message_text": {"key": "MessageText", "type": "str"}, - } - - def __init__(self, *, message_text: str, **kwargs): - """ - :keyword message_text: The content of the message. Required. - :paramtype message_text: str - """ - super().__init__(**kwargs) - self.message_text = message_text - - -class RetentionPolicy(_serialization.Model): - """the retention policy. - - All required parameters must be populated in order to send to Azure. - - :ivar enabled: Indicates whether a retention policy is enabled for the storage service. - Required. - :vartype enabled: bool - :ivar days: Indicates the number of days that metrics or logging or soft-deleted data should be - retained. All data older than this value will be deleted. - :vartype days: int - """ - - _validation = { - "enabled": {"required": True}, - "days": {"minimum": 1}, - } - - _attribute_map = { - "enabled": {"key": "Enabled", "type": "bool"}, - "days": {"key": "Days", "type": "int"}, - } - - def __init__(self, *, enabled: bool, days: Optional[int] = None, **kwargs): - """ - :keyword enabled: Indicates whether a retention policy is enabled for the storage service. - Required. - :paramtype enabled: bool - :keyword days: Indicates the number of days that metrics or logging or soft-deleted data should - be retained. All data older than this value will be deleted. - :paramtype days: int - """ - super().__init__(**kwargs) - self.enabled = enabled - self.days = days - - -class SignedIdentifier(_serialization.Model): - """signed identifier. - - All required parameters must be populated in order to send to Azure. - - :ivar id: a unique id. Required. - :vartype id: str - :ivar access_policy: The access policy. - :vartype access_policy: ~azure.storage.queue.models.AccessPolicy - """ - - _validation = { - "id": {"required": True}, - } - - _attribute_map = { - "id": {"key": "Id", "type": "str"}, - "access_policy": {"key": "AccessPolicy", "type": "AccessPolicy"}, - } - - def __init__( - self, - *, - id: str, # pylint: disable=redefined-builtin - access_policy: Optional["_models.AccessPolicy"] = None, - **kwargs - ): - """ - :keyword id: a unique id. Required. - :paramtype id: str - :keyword access_policy: The access policy. - :paramtype access_policy: ~azure.storage.queue.models.AccessPolicy - """ - super().__init__(**kwargs) - self.id = id - self.access_policy = access_policy - - -class StorageError(_serialization.Model): - """StorageError. - - :ivar message: - :vartype message: str - """ - - _attribute_map = { - "message": {"key": "Message", "type": "str"}, - } - - def __init__(self, *, message: Optional[str] = None, **kwargs): - """ - :keyword message: - :paramtype message: str - """ - super().__init__(**kwargs) - self.message = message - - -class StorageServiceProperties(_serialization.Model): - """Storage Service Properties. - - :ivar logging: Azure Analytics Logging settings. - :vartype logging: ~azure.storage.queue.models.Logging - :ivar hour_metrics: A summary of request statistics grouped by API in hourly aggregates for - queues. - :vartype hour_metrics: ~azure.storage.queue.models.Metrics - :ivar minute_metrics: a summary of request statistics grouped by API in minute aggregates for - queues. - :vartype minute_metrics: ~azure.storage.queue.models.Metrics - :ivar cors: The set of CORS rules. - :vartype cors: list[~azure.storage.queue.models.CorsRule] - """ - - _attribute_map = { - "logging": {"key": "Logging", "type": "Logging"}, - "hour_metrics": {"key": "HourMetrics", "type": "Metrics"}, - "minute_metrics": {"key": "MinuteMetrics", "type": "Metrics"}, - "cors": {"key": "Cors", "type": "[CorsRule]", "xml": {"wrapped": True}}, - } - - def __init__( - self, - *, - logging: Optional["_models.Logging"] = None, - hour_metrics: Optional["_models.Metrics"] = None, - minute_metrics: Optional["_models.Metrics"] = None, - cors: Optional[List["_models.CorsRule"]] = None, - **kwargs - ): - """ - :keyword logging: Azure Analytics Logging settings. - :paramtype logging: ~azure.storage.queue.models.Logging - :keyword hour_metrics: A summary of request statistics grouped by API in hourly aggregates for - queues. - :paramtype hour_metrics: ~azure.storage.queue.models.Metrics - :keyword minute_metrics: a summary of request statistics grouped by API in minute aggregates - for queues. - :paramtype minute_metrics: ~azure.storage.queue.models.Metrics - :keyword cors: The set of CORS rules. - :paramtype cors: list[~azure.storage.queue.models.CorsRule] - """ - super().__init__(**kwargs) - self.logging = logging - self.hour_metrics = hour_metrics - self.minute_metrics = minute_metrics - self.cors = cors - - -class StorageServiceStats(_serialization.Model): - """Stats for the storage service. - - :ivar geo_replication: Geo-Replication information for the Secondary Storage Service. - :vartype geo_replication: ~azure.storage.queue.models.GeoReplication - """ - - _attribute_map = { - "geo_replication": {"key": "GeoReplication", "type": "GeoReplication"}, - } - - def __init__(self, *, geo_replication: Optional["_models.GeoReplication"] = None, **kwargs): - """ - :keyword geo_replication: Geo-Replication information for the Secondary Storage Service. - :paramtype geo_replication: ~azure.storage.queue.models.GeoReplication - """ - super().__init__(**kwargs) - self.geo_replication = geo_replication diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/models/_patch.py b/azure/multiapi/storagev2/queue/v2021_02_12/_generated/models/_patch.py deleted file mode 100644 index f7dd325..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/models/_patch.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -"""Customize generated code here. - -Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize -""" -from typing import List - -__all__: List[str] = [] # Add all objects you want publicly available to users at this package level - - -def patch_sdk(): - """Do not remove from this file. - - `patch_sdk` is a last resort escape hatch that allows you to do customizations - you can't accomplish using the techniques described in - https://aka.ms/azsdk/python/dpcodegen/python/customize - """ diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/operations/__init__.py b/azure/multiapi/storagev2/queue/v2021_02_12/_generated/operations/__init__.py deleted file mode 100644 index d6ff628..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/operations/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from ._service_operations import ServiceOperations -from ._queue_operations import QueueOperations -from ._messages_operations import MessagesOperations -from ._message_id_operations import MessageIdOperations - -from ._patch import __all__ as _patch_all -from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import -from ._patch import patch_sdk as _patch_sdk - -__all__ = [ - "ServiceOperations", - "QueueOperations", - "MessagesOperations", - "MessageIdOperations", -] -__all__.extend([p for p in _patch_all if p not in __all__]) -_patch_sdk() diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/operations/_message_id_operations.py b/azure/multiapi/storagev2/queue/v2021_02_12/_generated/operations/_message_id_operations.py deleted file mode 100644 index c8dcbaf..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_generated/operations/_message_id_operations.py +++ /dev/null @@ -1,291 +0,0 @@ -# pylint: disable=too-many-lines -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Optional, TypeVar - -from azure.core.exceptions import ( - ClientAuthenticationError, - HttpResponseError, - ResourceExistsError, - ResourceNotFoundError, - map_error, -) -from azure.core.pipeline import PipelineResponse -from azure.core.pipeline.transport import HttpResponse -from azure.core.rest import HttpRequest -from azure.core.tracing.decorator import distributed_trace -from azure.core.utils import case_insensitive_dict - -from .. import models as _models -from .._serialization import Serializer -from .._vendor import _convert_request, _format_url_section - -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -_SERIALIZER = Serializer() -_SERIALIZER.client_side_validation = False - - -def build_update_request( - url: str, - *, - pop_receipt: str, - visibilitytimeout: int, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - content: Any = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str] - version = kwargs.pop("version", _headers.pop("x-ms-version", "2018-03-28")) # type: str - accept = _headers.pop("Accept", "application/xml") - - # Construct URL - _url = kwargs.pop("template_url", "{url}/{queueName}/messages/{messageid}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, "str", skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _params["popreceipt"] = _SERIALIZER.query("pop_receipt", pop_receipt, "str") - _params["visibilitytimeout"] = _SERIALIZER.query( - "visibilitytimeout", visibilitytimeout, "int", maximum=604800, minimum=0 - ) - if timeout is not None: - _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) - - # Construct headers - _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") - if request_id_parameter is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, content=content, **kwargs) - - -def build_delete_request( - url: str, - *, - pop_receipt: str, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - version = kwargs.pop("version", _headers.pop("x-ms-version", "2018-03-28")) # type: str - accept = _headers.pop("Accept", "application/xml") - - # Construct URL - _url = kwargs.pop("template_url", "{url}/{queueName}/messages/{messageid}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, "str", skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _params["popreceipt"] = _SERIALIZER.query("pop_receipt", pop_receipt, "str") - if timeout is not None: - _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) - - # Construct headers - _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") - if request_id_parameter is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) - - -class MessageIdOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.storage.queue.AzureQueueStorage`'s - :attr:`message_id` attribute. - """ - - models = _models - - def __init__(self, *args, **kwargs): - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @distributed_trace - def update( # pylint: disable=inconsistent-return-statements - self, - pop_receipt: str, - visibilitytimeout: int, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - queue_message: Optional[_models.QueueMessage] = None, - **kwargs: Any - ) -> None: - """The Update operation was introduced with version 2011-08-18 of the Queue service API. The - Update Message operation updates the visibility timeout of a message. You can also use this - operation to update the contents of a message. A message must be in a format that can be - included in an XML request with UTF-8 encoding, and the encoded message can be up to 64KB in - size. - - :param pop_receipt: Required. Specifies the valid pop receipt value returned from an earlier - call to the Get Messages or Update Message operation. Required. - :type pop_receipt: str - :param visibilitytimeout: Optional. Specifies the new visibility timeout value, in seconds, - relative to server time. The default value is 30 seconds. A specified value must be larger than - or equal to 1 second, and cannot be larger than 7 days, or larger than 2 hours on REST protocol - versions prior to version 2011-08-18. The visibility timeout of a message can be set to a value - later than the expiry time. Required. - :type visibilitytimeout: int - :param timeout: The The timeout parameter is expressed in seconds. For more information, see None: - """The Delete operation deletes the specified message. - - :param pop_receipt: Required. Specifies the valid pop receipt value returned from an earlier - call to the Get Messages or Update Message operation. Required. - :type pop_receipt: str - :param timeout: The The timeout parameter is expressed in seconds. For more information, see HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - version = kwargs.pop("version", _headers.pop("x-ms-version", "2018-03-28")) # type: str - accept = _headers.pop("Accept", "application/xml") - - # Construct URL - _url = kwargs.pop("template_url", "{url}/{queueName}/messages") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, "str", skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - if number_of_messages is not None: - _params["numofmessages"] = _SERIALIZER.query("number_of_messages", number_of_messages, "int", minimum=1) - if visibilitytimeout is not None: - _params["visibilitytimeout"] = _SERIALIZER.query( - "visibilitytimeout", visibilitytimeout, "int", maximum=604800, minimum=0 - ) - if timeout is not None: - _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) - - # Construct headers - _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") - if request_id_parameter is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_clear_request( - url: str, *, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - version = kwargs.pop("version", _headers.pop("x-ms-version", "2018-03-28")) # type: str - accept = _headers.pop("Accept", "application/xml") - - # Construct URL - _url = kwargs.pop("template_url", "{url}/{queueName}/messages") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, "str", skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - if timeout is not None: - _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) - - # Construct headers - _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") - if request_id_parameter is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_enqueue_request( - url: str, - *, - content: Any, - visibilitytimeout: Optional[int] = None, - message_time_to_live: Optional[int] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str] - version = kwargs.pop("version", _headers.pop("x-ms-version", "2018-03-28")) # type: str - accept = _headers.pop("Accept", "application/xml") - - # Construct URL - _url = kwargs.pop("template_url", "{url}/{queueName}/messages") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, "str", skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - if visibilitytimeout is not None: - _params["visibilitytimeout"] = _SERIALIZER.query( - "visibilitytimeout", visibilitytimeout, "int", maximum=604800, minimum=0 - ) - if message_time_to_live is not None: - _params["messagettl"] = _SERIALIZER.query("message_time_to_live", message_time_to_live, "int", minimum=-1) - if timeout is not None: - _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) - - # Construct headers - _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") - if request_id_parameter is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, content=content, **kwargs) - - -def build_peek_request( - url: str, - *, - number_of_messages: Optional[int] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - peekonly = kwargs.pop("peekonly", _params.pop("peekonly", "true")) # type: str - version = kwargs.pop("version", _headers.pop("x-ms-version", "2018-03-28")) # type: str - accept = _headers.pop("Accept", "application/xml") - - # Construct URL - _url = kwargs.pop("template_url", "{url}/{queueName}/messages") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, "str", skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _params["peekonly"] = _SERIALIZER.query("peekonly", peekonly, "str") - if number_of_messages is not None: - _params["numofmessages"] = _SERIALIZER.query("number_of_messages", number_of_messages, "int", minimum=1) - if timeout is not None: - _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) - - # Construct headers - _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") - if request_id_parameter is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -class MessagesOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.storage.queue.AzureQueueStorage`'s - :attr:`messages` attribute. - """ - - models = _models - - def __init__(self, *args, **kwargs): - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @distributed_trace - def dequeue( - self, - number_of_messages: Optional[int] = None, - visibilitytimeout: Optional[int] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> List[_models.DequeuedMessageItem]: - """The Dequeue operation retrieves one or more messages from the front of the queue. - - :param number_of_messages: Optional. A nonzero integer value that specifies the number of - messages to retrieve from the queue, up to a maximum of 32. If fewer are visible, the visible - messages are returned. By default, a single message is retrieved from the queue with this - operation. Default value is None. - :type number_of_messages: int - :param visibilitytimeout: Optional. Specifies the new visibility timeout value, in seconds, - relative to server time. The default value is 30 seconds. A specified value must be larger than - or equal to 1 second, and cannot be larger than 7 days, or larger than 2 hours on REST protocol - versions prior to version 2011-08-18. The visibility timeout of a message can be set to a value - later than the expiry time. Default value is None. - :type visibilitytimeout: int - :param timeout: The The timeout parameter is expressed in seconds. For more information, see None: - """The Clear operation deletes all messages from the specified queue. - - :param timeout: The The timeout parameter is expressed in seconds. For more information, see List[_models.EnqueuedMessage]: - """The Enqueue operation adds a new message to the back of the message queue. A visibility timeout - can also be specified to make the message invisible until the visibility timeout expires. A - message must be in a format that can be included in an XML request with UTF-8 encoding. The - encoded message can be up to 64 KB in size for versions 2011-08-18 and newer, or 8 KB in size - for previous versions. - - :param queue_message: A Message object which can be stored in a Queue. Required. - :type queue_message: ~azure.storage.queue.models.QueueMessage - :param visibilitytimeout: Optional. If specified, the request must be made using an - x-ms-version of 2011-08-18 or later. If not specified, the default value is 0. Specifies the - new visibility timeout value, in seconds, relative to server time. The new value must be larger - than or equal to 0, and cannot be larger than 7 days. The visibility timeout of a message - cannot be set to a value later than the expiry time. visibilitytimeout should be set to a value - smaller than the time-to-live value. Default value is None. - :type visibilitytimeout: int - :param message_time_to_live: Optional. Specifies the time-to-live interval for the message, in - seconds. Prior to version 2017-07-29, the maximum time-to-live allowed is 7 days. For version - 2017-07-29 or later, the maximum time-to-live can be any positive number, as well as -1 - indicating that the message does not expire. If this parameter is omitted, the default - time-to-live is 7 days. Default value is None. - :type message_time_to_live: int - :param timeout: The The timeout parameter is expressed in seconds. For more information, see List[_models.PeekedMessageItem]: - """The Peek operation retrieves one or more messages from the front of the queue, but does not - alter the visibility of the message. - - :param number_of_messages: Optional. A nonzero integer value that specifies the number of - messages to retrieve from the queue, up to a maximum of 32. If fewer are visible, the visible - messages are returned. By default, a single message is retrieved from the queue with this - operation. Default value is None. - :type number_of_messages: int - :param timeout: The The timeout parameter is expressed in seconds. For more information, see HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - version = kwargs.pop("version", _headers.pop("x-ms-version", "2018-03-28")) # type: str - accept = _headers.pop("Accept", "application/xml") - - # Construct URL - _url = kwargs.pop("template_url", "{url}/{queueName}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, "str", skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - if timeout is not None: - _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) - - # Construct headers - if metadata is not None: - _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}") - _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") - if request_id_parameter is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_delete_request( - url: str, *, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - version = kwargs.pop("version", _headers.pop("x-ms-version", "2018-03-28")) # type: str - accept = _headers.pop("Accept", "application/xml") - - # Construct URL - _url = kwargs.pop("template_url", "{url}/{queueName}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, "str", skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - if timeout is not None: - _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) - - # Construct headers - _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") - if request_id_parameter is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_get_properties_request( - url: str, *, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - comp = kwargs.pop("comp", _params.pop("comp", "metadata")) # type: str - version = kwargs.pop("version", _headers.pop("x-ms-version", "2018-03-28")) # type: str - accept = _headers.pop("Accept", "application/xml") - - # Construct URL - _url = kwargs.pop("template_url", "{url}/{queueName}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, "str", skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _params["comp"] = _SERIALIZER.query("comp", comp, "str") - if timeout is not None: - _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) - - # Construct headers - _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") - if request_id_parameter is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_set_metadata_request( - url: str, - *, - timeout: Optional[int] = None, - metadata: Optional[Dict[str, str]] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - comp = kwargs.pop("comp", _params.pop("comp", "metadata")) # type: str - version = kwargs.pop("version", _headers.pop("x-ms-version", "2018-03-28")) # type: str - accept = _headers.pop("Accept", "application/xml") - - # Construct URL - _url = kwargs.pop("template_url", "{url}/{queueName}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, "str", skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _params["comp"] = _SERIALIZER.query("comp", comp, "str") - if timeout is not None: - _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) - - # Construct headers - if metadata is not None: - _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}") - _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") - if request_id_parameter is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_get_access_policy_request( - url: str, *, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - comp = kwargs.pop("comp", _params.pop("comp", "acl")) # type: str - version = kwargs.pop("version", _headers.pop("x-ms-version", "2018-03-28")) # type: str - accept = _headers.pop("Accept", "application/xml") - - # Construct URL - _url = kwargs.pop("template_url", "{url}/{queueName}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, "str", skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _params["comp"] = _SERIALIZER.query("comp", comp, "str") - if timeout is not None: - _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) - - # Construct headers - _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") - if request_id_parameter is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_set_access_policy_request( - url: str, - *, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - content: Any = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - comp = kwargs.pop("comp", _params.pop("comp", "acl")) # type: str - content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str] - version = kwargs.pop("version", _headers.pop("x-ms-version", "2018-03-28")) # type: str - accept = _headers.pop("Accept", "application/xml") - - # Construct URL - _url = kwargs.pop("template_url", "{url}/{queueName}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, "str", skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _params["comp"] = _SERIALIZER.query("comp", comp, "str") - if timeout is not None: - _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) - - # Construct headers - _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") - if request_id_parameter is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, content=content, **kwargs) - - -class QueueOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.storage.queue.AzureQueueStorage`'s - :attr:`queue` attribute. - """ - - models = _models - - def __init__(self, *args, **kwargs): - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @distributed_trace - def create( # pylint: disable=inconsistent-return-statements - self, - timeout: Optional[int] = None, - metadata: Optional[Dict[str, str]] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> None: - """creates a new queue under the given account. - - :param timeout: The The timeout parameter is expressed in seconds. For more information, see None: - """operation permanently deletes the specified queue. - - :param timeout: The The timeout parameter is expressed in seconds. For more information, see None: - """Retrieves user-defined metadata and queue properties on the specified queue. Metadata is - associated with the queue as name-values pairs. - - :param timeout: The The timeout parameter is expressed in seconds. For more information, see None: - """sets user-defined metadata on the specified queue. Metadata is associated with the queue as - name-value pairs. - - :param timeout: The The timeout parameter is expressed in seconds. For more information, see List[_models.SignedIdentifier]: - """returns details about any stored access policies specified on the queue that may be used with - Shared Access Signatures. - - :param timeout: The The timeout parameter is expressed in seconds. For more information, see None: - """sets stored access policies for the queue that may be used with Shared Access Signatures. - - :param timeout: The The timeout parameter is expressed in seconds. For more information, see HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - restype = kwargs.pop("restype", _params.pop("restype", "service")) # type: str - comp = kwargs.pop("comp", _params.pop("comp", "properties")) # type: str - content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str] - version = kwargs.pop("version", _headers.pop("x-ms-version", "2018-03-28")) # type: str - accept = _headers.pop("Accept", "application/xml") - - # Construct URL - _url = kwargs.pop("template_url", "{url}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, "str", skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _params["restype"] = _SERIALIZER.query("restype", restype, "str") - _params["comp"] = _SERIALIZER.query("comp", comp, "str") - if timeout is not None: - _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) - - # Construct headers - _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") - if request_id_parameter is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, content=content, **kwargs) - - -def build_get_properties_request( - url: str, *, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - restype = kwargs.pop("restype", _params.pop("restype", "service")) # type: str - comp = kwargs.pop("comp", _params.pop("comp", "properties")) # type: str - version = kwargs.pop("version", _headers.pop("x-ms-version", "2018-03-28")) # type: str - accept = _headers.pop("Accept", "application/xml") - - # Construct URL - _url = kwargs.pop("template_url", "{url}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, "str", skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _params["restype"] = _SERIALIZER.query("restype", restype, "str") - _params["comp"] = _SERIALIZER.query("comp", comp, "str") - if timeout is not None: - _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) - - # Construct headers - _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") - if request_id_parameter is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_get_statistics_request( - url: str, *, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - restype = kwargs.pop("restype", _params.pop("restype", "service")) # type: str - comp = kwargs.pop("comp", _params.pop("comp", "stats")) # type: str - version = kwargs.pop("version", _headers.pop("x-ms-version", "2018-03-28")) # type: str - accept = _headers.pop("Accept", "application/xml") - - # Construct URL - _url = kwargs.pop("template_url", "{url}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, "str", skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _params["restype"] = _SERIALIZER.query("restype", restype, "str") - _params["comp"] = _SERIALIZER.query("comp", comp, "str") - if timeout is not None: - _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) - - # Construct headers - _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") - if request_id_parameter is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_list_queues_segment_request( - url: str, - *, - prefix: Optional[str] = None, - marker: Optional[str] = None, - maxresults: Optional[int] = None, - include: Optional[List[str]] = None, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - comp = kwargs.pop("comp", _params.pop("comp", "list")) # type: str - version = kwargs.pop("version", _headers.pop("x-ms-version", "2018-03-28")) # type: str - accept = _headers.pop("Accept", "application/xml") - - # Construct URL - _url = kwargs.pop("template_url", "{url}") - path_format_arguments = { - "url": _SERIALIZER.url("url", url, "str", skip_quote=True), - } - - _url = _format_url_section(_url, **path_format_arguments) - - # Construct parameters - _params["comp"] = _SERIALIZER.query("comp", comp, "str") - if prefix is not None: - _params["prefix"] = _SERIALIZER.query("prefix", prefix, "str") - if marker is not None: - _params["marker"] = _SERIALIZER.query("marker", marker, "str") - if maxresults is not None: - _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int", minimum=1) - if include is not None: - _params["include"] = _SERIALIZER.query("include", include, "[str]", div=",") - if timeout is not None: - _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) - - # Construct headers - _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") - if request_id_parameter is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -class ServiceOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.storage.queue.AzureQueueStorage`'s - :attr:`service` attribute. - """ - - models = _models - - def __init__(self, *args, **kwargs): - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @distributed_trace - def set_properties( # pylint: disable=inconsistent-return-statements - self, - storage_service_properties: _models.StorageServiceProperties, - timeout: Optional[int] = None, - request_id_parameter: Optional[str] = None, - **kwargs: Any - ) -> None: - """Sets properties for a storage account's Queue service endpoint, including properties for - Storage Analytics and CORS (Cross-Origin Resource Sharing) rules. - - :param storage_service_properties: The StorageService properties. Required. - :type storage_service_properties: ~azure.storage.queue.models.StorageServiceProperties - :param timeout: The The timeout parameter is expressed in seconds. For more information, see _models.StorageServiceProperties: - """gets the properties of a storage account's Queue service, including properties for Storage - Analytics and CORS (Cross-Origin Resource Sharing) rules. - - :param timeout: The The timeout parameter is expressed in seconds. For more information, see _models.StorageServiceStats: - """Retrieves statistics related to replication for the Queue service. It is only available on the - secondary location endpoint when read-access geo-redundant replication is enabled for the - storage account. - - :param timeout: The The timeout parameter is expressed in seconds. For more information, see _models.ListQueuesSegmentResponse: - """The List Queues Segment operation returns a list of the queues under the specified account. - - :param prefix: Filters the results to return only queues whose name begins with the specified - prefix. Default value is None. - :type prefix: str - :param marker: A string value that identifies the portion of the list of queues to be returned - with the next listing operation. The operation returns the NextMarker value within the response - body if the listing operation did not return all queues remaining to be listed with the current - page. The NextMarker value can be used as the value for the marker parameter in a subsequent - call to request the next page of list items. The marker value is opaque to the client. Default - value is None. - :type marker: str - :param maxresults: Specifies the maximum number of queues to return. If the request does not - specify maxresults, or specifies a value greater than 5000, the server will return up to 5000 - items. Note that if the listing operation crosses a partition boundary, then the service will - return a continuation token for retrieving the remainder of the results. For this reason, it is - possible that the service will return fewer results than specified by maxresults, or than the - default of 5000. Default value is None. - :type maxresults: int - :param include: Include this parameter to specify that the queues' metadata be returned as part - of the response body. Default value is None. - :type include: list[str] - :param timeout: The The timeout parameter is expressed in seconds. For more information, see str - api_version = kwargs.get('api_version', None) - if api_version and api_version not in _SUPPORTED_API_VERSIONS: - versions = '\n'.join(_SUPPORTED_API_VERSIONS) - raise ValueError("Unsupported API version '{}'. Please select from:\n{}".format(api_version, versions)) - return api_version or _SUPPORTED_API_VERSIONS[-1] diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_shared/__init__.py b/azure/multiapi/storagev2/queue/v2021_02_12/_shared/__init__.py deleted file mode 100644 index 160f882..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_shared/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import hmac - -try: - from urllib.parse import quote, unquote -except ImportError: - from urllib2 import quote, unquote # type: ignore - -import six - - -def url_quote(url): - return quote(url) - - -def url_unquote(url): - return unquote(url) - - -def encode_base64(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def decode_base64_to_bytes(data): - if isinstance(data, six.text_type): - data = data.encode('utf-8') - return base64.b64decode(data) - - -def decode_base64_to_text(data): - decoded_bytes = decode_base64_to_bytes(data) - return decoded_bytes.decode('utf-8') - - -def sign_string(key, string_to_sign, key_is_base64=True): - if key_is_base64: - key = decode_base64_to_bytes(key) - else: - if isinstance(key, six.text_type): - key = key.encode('utf-8') - if isinstance(string_to_sign, six.text_type): - string_to_sign = string_to_sign.encode('utf-8') - signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) - digest = signed_hmac_sha256.digest() - encoded_digest = encode_base64(digest) - return encoded_digest diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_shared/authentication.py b/azure/multiapi/storagev2/queue/v2021_02_12/_shared/authentication.py deleted file mode 100644 index d04c1e4..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_shared/authentication.py +++ /dev/null @@ -1,142 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import logging -import sys - -try: - from urllib.parse import urlparse, unquote -except ImportError: - from urlparse import urlparse # type: ignore - from urllib2 import unquote # type: ignore - -try: - from yarl import URL -except ImportError: - pass - -try: - from azure.core.pipeline.transport import AioHttpTransport -except ImportError: - AioHttpTransport = None - -from azure.core.exceptions import ClientAuthenticationError -from azure.core.pipeline.policies import SansIOHTTPPolicy - -from . import sign_string - - -logger = logging.getLogger(__name__) - - - -# wraps a given exception with the desired exception type -def _wrap_exception(ex, desired_type): - msg = "" - if ex.args: - msg = ex.args[0] - if sys.version_info >= (3,): - # Automatic chaining in Python 3 means we keep the trace - return desired_type(msg) - # There isn't a good solution in 2 for keeping the stack trace - # in general, or that will not result in an error in 3 - # However, we can keep the previous error type and message - # TODO: In the future we will log the trace - return desired_type('{}: {}'.format(ex.__class__.__name__, msg)) - - -class AzureSigningError(ClientAuthenticationError): - """ - Represents a fatal error when attempting to sign a request. - In general, the cause of this exception is user error. For example, the given account key is not valid. - Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. - """ - - -# pylint: disable=no-self-use -class SharedKeyCredentialPolicy(SansIOHTTPPolicy): - - def __init__(self, account_name, account_key): - self.account_name = account_name - self.account_key = account_key - super(SharedKeyCredentialPolicy, self).__init__() - - @staticmethod - def _get_headers(request, headers_to_sign): - headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) - if 'content-length' in headers and headers['content-length'] == '0': - del headers['content-length'] - return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' - - @staticmethod - def _get_verb(request): - return request.http_request.method + '\n' - - def _get_canonicalized_resource(self, request): - uri_path = urlparse(request.http_request.url).path - try: - if isinstance(request.context.transport, AioHttpTransport) or \ - isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \ - isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None), - AioHttpTransport): - uri_path = URL(uri_path) - return '/' + self.account_name + str(uri_path) - except TypeError: - pass - return '/' + self.account_name + uri_path - - @staticmethod - def _get_canonicalized_headers(request): - string_to_sign = '' - x_ms_headers = [] - for name, value in request.http_request.headers.items(): - if name.startswith('x-ms-'): - x_ms_headers.append((name.lower(), value)) - x_ms_headers.sort() - for name, value in x_ms_headers: - if value is not None: - string_to_sign += ''.join([name, ':', value, '\n']) - return string_to_sign - - @staticmethod - def _get_canonicalized_resource_query(request): - sorted_queries = list(request.http_request.query.items()) - sorted_queries.sort() - - string_to_sign = '' - for name, value in sorted_queries: - if value is not None: - string_to_sign += '\n' + name.lower() + ':' + unquote(value) - - return string_to_sign - - def _add_authorization_header(self, request, string_to_sign): - try: - signature = sign_string(self.account_key, string_to_sign) - auth_string = 'SharedKey ' + self.account_name + ':' + signature - request.http_request.headers['Authorization'] = auth_string - except Exception as ex: - # Wrap any error that occurred as signing error - # Doing so will clarify/locate the source of problem - raise _wrap_exception(ex, AzureSigningError) - - def on_request(self, request): - string_to_sign = \ - self._get_verb(request) + \ - self._get_headers( - request, - [ - 'content-encoding', 'content-language', 'content-length', - 'content-md5', 'content-type', 'date', 'if-modified-since', - 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' - ] - ) + \ - self._get_canonicalized_headers(request) + \ - self._get_canonicalized_resource(request) + \ - self._get_canonicalized_resource_query(request) - - self._add_authorization_header(request, string_to_sign) - #logger.debug("String_to_sign=%s", string_to_sign) diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_shared/base_client.py b/azure/multiapi/storagev2/queue/v2021_02_12/_shared/base_client.py deleted file mode 100644 index 5ddaf3e..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_shared/base_client.py +++ /dev/null @@ -1,466 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -import logging -import uuid -from typing import ( # pylint: disable=unused-import - Optional, - Any, - Tuple, - TYPE_CHECKING -) - -try: - from urllib.parse import parse_qs, quote -except ImportError: - from urlparse import parse_qs # type: ignore - from urllib2 import quote # type: ignore - -import six - -from azure.core.configuration import Configuration -from azure.core.credentials import AzureSasCredential, AzureNamedKeyCredential -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline import Pipeline -from azure.core.pipeline.transport import RequestsTransport, HttpTransport -from azure.core.pipeline.policies import ( - RedirectPolicy, - ContentDecodePolicy, - BearerTokenCredentialPolicy, - ProxyPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, - UserAgentPolicy, - AzureSasCredentialPolicy -) - -from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .models import LocationMode -from .authentication import SharedKeyCredentialPolicy -from .shared_access_signature import QueryStringConstants -from .request_handlers import serialize_batch_body, _get_batch_request_delimiter -from .policies import ( - StorageHeadersPolicy, - StorageContentValidation, - StorageRequestHook, - StorageResponseHook, - StorageLoggingPolicy, - StorageHosts, - QueueMessagePolicy, - ExponentialRetry, -) -from .._version import VERSION -from .response_handlers import process_storage_error, PartialBatchErrorException - -if TYPE_CHECKING: - from azure.core.credentials import TokenCredential - - -_LOGGER = logging.getLogger(__name__) -_SERVICE_PARAMS = { - "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"}, - "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"}, - "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"}, - "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"}, -} - -class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes - def __init__( - self, - parsed_url, # type: Any - service, # type: str - credential=None, # type: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, "TokenCredential"]] # pylint: disable=line-too-long - **kwargs # type: Any - ): - # type: (...) -> None - self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) - self._hosts = kwargs.get("_hosts") - self.scheme = parsed_url.scheme - - if service not in ["blob", "queue", "file-share", "dfs"]: - raise ValueError("Invalid service: {}".format(service)) - service_name = service.split('-')[0] - account = parsed_url.netloc.split(".{}.core.".format(service_name)) - - self.account_name = account[0] if len(account) > 1 else None - if not self.account_name and parsed_url.netloc.startswith("localhost") \ - or parsed_url.netloc.startswith("127.0.0.1"): - self.account_name = parsed_url.path.strip("/") - - self.credential = _format_shared_key_credential(self.account_name, credential) - if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): - raise ValueError("Token credential is only supported with HTTPS.") - - secondary_hostname = None - if hasattr(self.credential, "account_name"): - self.account_name = self.credential.account_name - secondary_hostname = "{}-secondary.{}.{}".format( - self.credential.account_name, service_name, SERVICE_HOST_BASE) - - if not self._hosts: - if len(account) > 1: - secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") - if kwargs.get("secondary_hostname"): - secondary_hostname = kwargs["secondary_hostname"] - primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') - self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} - - self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) - - def __enter__(self): - self._client.__enter__() - return self - - def __exit__(self, *args): - self._client.__exit__(*args) - - def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - self._client.close() - - @property - def url(self): - """The full endpoint URL to this entity, including SAS token if used. - - This could be either the primary endpoint, - or the secondary endpoint depending on the current :func:`location_mode`. - """ - return self._format_url(self._hosts[self._location_mode]) - - @property - def primary_endpoint(self): - """The full primary endpoint URL. - - :type: str - """ - return self._format_url(self._hosts[LocationMode.PRIMARY]) - - @property - def primary_hostname(self): - """The hostname of the primary endpoint. - - :type: str - """ - return self._hosts[LocationMode.PRIMARY] - - @property - def secondary_endpoint(self): - """The full secondary endpoint URL if configured. - - If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str - :raise ValueError: - """ - if not self._hosts[LocationMode.SECONDARY]: - raise ValueError("No secondary host configured.") - return self._format_url(self._hosts[LocationMode.SECONDARY]) - - @property - def secondary_hostname(self): - """The hostname of the secondary endpoint. - - If not available this will be None. To explicitly specify a secondary hostname, use the optional - `secondary_hostname` keyword argument on instantiation. - - :type: str or None - """ - return self._hosts[LocationMode.SECONDARY] - - @property - def location_mode(self): - """The location mode that the client is currently using. - - By default this will be "primary". Options include "primary" and "secondary". - - :type: str - """ - - return self._location_mode - - @location_mode.setter - def location_mode(self, value): - if self._hosts.get(value): - self._location_mode = value - self._client._config.url = self.url # pylint: disable=protected-access - else: - raise ValueError("No host URL for location mode: {}".format(value)) - - @property - def api_version(self): - """The version of the Storage API used for requests. - - :type: str - """ - return self._client._config.version # pylint: disable=protected-access - - def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): - query_str = "?" - if snapshot: - query_str += "snapshot={}&".format(self.snapshot) - if share_snapshot: - query_str += "sharesnapshot={}&".format(self.snapshot) - if sas_token and isinstance(credential, AzureSasCredential): - raise ValueError( - "You cannot use AzureSasCredential when the resource URI also contains a Shared Access Signature.") - if sas_token and not credential: - query_str += sas_token - elif is_credential_sastoken(credential): - query_str += credential.lstrip("?") - credential = None - return query_str.rstrip("?&"), credential - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, "get_token"): - self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif isinstance(credential, AzureSasCredential): - self._credential_policy = AzureSasCredentialPolicy(credential) - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - - config = kwargs.get("_configuration") or create_configuration(**kwargs) - if kwargs.get("_pipeline"): - return config, kwargs["_pipeline"] - config.transport = kwargs.get("transport") # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - config.transport = RequestsTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - ContentDecodePolicy(response_encoding="utf-8"), - RedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), - config.retry_policy, - config.headers_policy, - StorageRequestHook(**kwargs), - self._credential_policy, - config.logging_policy, - StorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs) - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, Pipeline(config.transport, policies=policies) - - def _batch_send( - self, - *reqs, # type: HttpRequest - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - batch_id = str(uuid.uuid1()) - - request = self._client._client.post( # pylint: disable=protected-access - url='{}://{}/{}?{}comp=batch{}{}'.format( - self.scheme, - self.primary_hostname, - kwargs.pop('path', ""), - kwargs.pop('restype', ""), - kwargs.pop('sas', ""), - kwargs.pop('timeout', "") - ), - headers={ - 'x-ms-version': self.api_version, - "Content-Type": "multipart/mixed; boundary=" + _get_batch_request_delimiter(batch_id, False, False) - } - ) - - policies = [StorageHeadersPolicy()] - if self._credential_policy: - policies.append(self._credential_policy) - - request.set_multipart_mixed( - *reqs, - policies=policies, - enforce_https=False - ) - - Pipeline._prepare_multipart_mixed_request(request) # pylint: disable=protected-access - body = serialize_batch_body(request.multipart_mixed_info[0], batch_id) - request.set_bytes_body(body) - - temp = request.multipart_mixed_info - request.multipart_mixed_info = None - pipeline_response = self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - request.multipart_mixed_info = temp - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() - if raise_on_any_failure: - parts = list(response.parts()) - if any(p for p in parts if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts - ) - raise error - return iter(parts) - return parts - except HttpResponseError as error: - process_storage_error(error) - -class TransportWrapper(HttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, transport): - self._transport = transport - - def send(self, request, **kwargs): - return self._transport.send(request, **kwargs) - - def open(self): - pass - - def close(self): - pass - - def __enter__(self): - pass - - def __exit__(self, *args): # pylint: disable=arguments-differ - pass - - -def _format_shared_key_credential(account_name, credential): - if isinstance(credential, six.string_types): - if not account_name: - raise ValueError("Unable to determine account name for shared key credential.") - credential = {"account_name": account_name, "account_key": credential} - if isinstance(credential, dict): - if "account_name" not in credential: - raise ValueError("Shared key credential missing 'account_name") - if "account_key" not in credential: - raise ValueError("Shared key credential missing 'account_key") - return SharedKeyCredentialPolicy(**credential) - if isinstance(credential, AzureNamedKeyCredential): - return SharedKeyCredentialPolicy(credential.named_key.name, credential.named_key.key) - return credential - - -def parse_connection_str(conn_str, credential, service): - conn_str = conn_str.rstrip(";") - conn_settings = [s.split("=", 1) for s in conn_str.split(";")] - if any(len(tup) != 2 for tup in conn_settings): - raise ValueError("Connection string is either blank or malformed.") - conn_settings = dict((key.upper(), val) for key, val in conn_settings) - endpoints = _SERVICE_PARAMS[service] - primary = None - secondary = None - if not credential: - try: - credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]} - except KeyError: - credential = conn_settings.get("SHAREDACCESSSIGNATURE") - if endpoints["primary"] in conn_settings: - primary = conn_settings[endpoints["primary"]] - if endpoints["secondary"] in conn_settings: - secondary = conn_settings[endpoints["secondary"]] - else: - if endpoints["secondary"] in conn_settings: - raise ValueError("Connection string specifies only secondary endpoint.") - try: - primary = "{}://{}.{}.{}".format( - conn_settings["DEFAULTENDPOINTSPROTOCOL"], - conn_settings["ACCOUNTNAME"], - service, - conn_settings["ENDPOINTSUFFIX"], - ) - secondary = "{}-secondary.{}.{}".format( - conn_settings["ACCOUNTNAME"], service, conn_settings["ENDPOINTSUFFIX"] - ) - except KeyError: - pass - - if not primary: - try: - primary = "https://{}.{}.{}".format( - conn_settings["ACCOUNTNAME"], service, conn_settings.get("ENDPOINTSUFFIX", SERVICE_HOST_BASE) - ) - except KeyError: - raise ValueError("Connection string missing required connection details.") - if service == "dfs": - primary = primary.replace(".blob.", ".dfs.") - if secondary: - secondary = secondary.replace(".blob.", ".dfs.") - return primary, secondary, credential - - -def create_configuration(**kwargs): - # type: (**Any) -> Configuration - config = Configuration(**kwargs) - config.headers_policy = StorageHeadersPolicy(**kwargs) - config.user_agent_policy = UserAgentPolicy( - sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) - config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) - config.logging_policy = StorageLoggingPolicy(**kwargs) - config.proxy_policy = ProxyPolicy(**kwargs) - - # Storage settings - config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) - config.copy_polling_interval = 15 - - # Block blob uploads - config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) - config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) - config.use_byte_buffer = kwargs.get("use_byte_buffer", False) - - # Page blob uploads - config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) - - # Datalake file uploads - config.min_large_chunk_upload_threshold = kwargs.get("min_large_chunk_upload_threshold", 100 * 1024 * 1024 + 1) - - # Blob downloads - config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) - config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) - - # File uploads - config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) - return config - - -def parse_query(query_str): - sas_values = QueryStringConstants.to_list() - parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} - sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values] - sas_token = None - if sas_params: - sas_token = "&".join(sas_params) - - snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") - return snapshot, sas_token - - -def is_credential_sastoken(credential): - if not credential or not isinstance(credential, six.string_types): - return False - - sas_values = QueryStringConstants.to_list() - parsed_query = parse_qs(credential.lstrip("?")) - if parsed_query and all([k in sas_values for k in parsed_query.keys()]): - return True - return False diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_shared/base_client_async.py b/azure/multiapi/storagev2/queue/v2021_02_12/_shared/base_client_async.py deleted file mode 100644 index 16eb6de..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_shared/base_client_async.py +++ /dev/null @@ -1,182 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging - -from azure.core.credentials import AzureSasCredential -from azure.core.pipeline import AsyncPipeline -from azure.core.async_paging import AsyncList -from azure.core.exceptions import HttpResponseError -from azure.core.pipeline.policies import ( - ContentDecodePolicy, - AsyncBearerTokenCredentialPolicy, - AsyncRedirectPolicy, - DistributedTracingPolicy, - HttpLoggingPolicy, AzureSasCredentialPolicy, -) -from azure.core.pipeline.transport import AsyncHttpTransport - -from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT -from .authentication import SharedKeyCredentialPolicy -from .base_client import create_configuration -from .policies import ( - StorageContentValidation, - StorageRequestHook, - StorageHosts, - StorageHeadersPolicy, - QueueMessagePolicy -) -from .policies_async import AsyncStorageResponseHook - -from .response_handlers import process_storage_error, PartialBatchErrorException - -if TYPE_CHECKING: - from azure.core.pipeline import Pipeline - from azure.core.pipeline.transport import HttpRequest - from azure.core.configuration import Configuration -_LOGGER = logging.getLogger(__name__) - - -class AsyncStorageAccountHostsMixin(object): - - def __enter__(self): - raise TypeError("Async client only supports 'async with'.") - - def __exit__(self, *args): - pass - - async def __aenter__(self): - await self._client.__aenter__() - return self - - async def __aexit__(self, *args): - await self._client.__aexit__(*args) - - async def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._client.close() - - def _create_pipeline(self, credential, **kwargs): - # type: (Any, **Any) -> Tuple[Configuration, Pipeline] - self._credential_policy = None - if hasattr(credential, 'get_token'): - self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) - elif isinstance(credential, SharedKeyCredentialPolicy): - self._credential_policy = credential - elif isinstance(credential, AzureSasCredential): - self._credential_policy = AzureSasCredentialPolicy(credential) - elif credential is not None: - raise TypeError("Unsupported credential: {}".format(credential)) - config = kwargs.get('_configuration') or create_configuration(**kwargs) - if kwargs.get('_pipeline'): - return config, kwargs['_pipeline'] - config.transport = kwargs.get('transport') # type: ignore - kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) - kwargs.setdefault("read_timeout", READ_TIMEOUT) - if not config.transport: - try: - from azure.core.pipeline.transport import AioHttpTransport - except ImportError: - raise ImportError("Unable to create async transport. Please check aiohttp is installed.") - config.transport = AioHttpTransport(**kwargs) - policies = [ - QueueMessagePolicy(), - config.headers_policy, - config.proxy_policy, - config.user_agent_policy, - StorageContentValidation(), - StorageRequestHook(**kwargs), - self._credential_policy, - ContentDecodePolicy(response_encoding="utf-8"), - AsyncRedirectPolicy(**kwargs), - StorageHosts(hosts=self._hosts, **kwargs), # type: ignore - config.retry_policy, - config.logging_policy, - AsyncStorageResponseHook(**kwargs), - DistributedTracingPolicy(**kwargs), - HttpLoggingPolicy(**kwargs), - ] - if kwargs.get("_additional_pipeline_policies"): - policies = policies + kwargs.get("_additional_pipeline_policies") - return config, AsyncPipeline(config.transport, policies=policies) - - async def _batch_send( - self, *reqs: 'HttpRequest', - **kwargs - ): - """Given a series of request, do a Storage batch call. - """ - # Pop it here, so requests doesn't feel bad about additional kwarg - raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) - request = self._client._client.post( # pylint: disable=protected-access - url='https://{}/?comp=batch'.format(self.primary_hostname), - headers={ - 'x-ms-version': self.api_version - } - ) - - request.set_multipart_mixed( - *reqs, - policies=[ - StorageHeadersPolicy(), - self._credential_policy - ], - enforce_https=False - ) - - pipeline_response = await self._pipeline.run( - request, **kwargs - ) - response = pipeline_response.http_response - - try: - if response.status_code not in [202]: - raise HttpResponseError(response=response) - parts = response.parts() # Return an AsyncIterator - if raise_on_any_failure: - parts_list = [] - async for part in parts: - parts_list.append(part) - if any(p for p in parts_list if not 200 <= p.status_code < 300): - error = PartialBatchErrorException( - message="There is a partial failure in the batch operation.", - response=response, parts=parts_list - ) - raise error - return AsyncList(parts_list) - return parts - except HttpResponseError as error: - process_storage_error(error) - - -class AsyncTransportWrapper(AsyncHttpTransport): - """Wrapper class that ensures that an inner client created - by a `get_client` method does not close the outer transport for the parent - when used in a context manager. - """ - def __init__(self, async_transport): - self._transport = async_transport - - async def send(self, request, **kwargs): - return await self._transport.send(request, **kwargs) - - async def open(self): - pass - - async def close(self): - pass - - async def __aenter__(self): - pass - - async def __aexit__(self, *args): # pylint: disable=arguments-differ - pass diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_shared/constants.py b/azure/multiapi/storagev2/queue/v2021_02_12/_shared/constants.py deleted file mode 100644 index ea467c5..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_shared/constants.py +++ /dev/null @@ -1,18 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from .._serialize import _SUPPORTED_API_VERSIONS - - -X_MS_VERSION = _SUPPORTED_API_VERSIONS[-1] - -# Default socket timeouts, in seconds -CONNECTION_TIMEOUT = 20 -READ_TIMEOUT = 2000 # 100MB (max block size) / 50KB/s (an arbitrarily chosen minimum upload speed) - -STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" - -SERVICE_HOST_BASE = 'core.windows.net' diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_shared/models.py b/azure/multiapi/storagev2/queue/v2021_02_12/_shared/models.py deleted file mode 100644 index 199b652..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_shared/models.py +++ /dev/null @@ -1,483 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=too-many-instance-attributes - -from enum import Enum - -from azure.core import CaseInsensitiveEnumMeta - - -def get_enum_value(value): - if value is None or value in ["None", ""]: - return None - try: - return value.value - except AttributeError: - return value - - -class StorageErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta): - - # Generic storage values - ACCOUNT_ALREADY_EXISTS = "AccountAlreadyExists" - ACCOUNT_BEING_CREATED = "AccountBeingCreated" - ACCOUNT_IS_DISABLED = "AccountIsDisabled" - AUTHENTICATION_FAILED = "AuthenticationFailed" - AUTHORIZATION_FAILURE = "AuthorizationFailure" - NO_AUTHENTICATION_INFORMATION = "NoAuthenticationInformation" - CONDITION_HEADERS_NOT_SUPPORTED = "ConditionHeadersNotSupported" - CONDITION_NOT_MET = "ConditionNotMet" - EMPTY_METADATA_KEY = "EmptyMetadataKey" - INSUFFICIENT_ACCOUNT_PERMISSIONS = "InsufficientAccountPermissions" - INTERNAL_ERROR = "InternalError" - INVALID_AUTHENTICATION_INFO = "InvalidAuthenticationInfo" - INVALID_HEADER_VALUE = "InvalidHeaderValue" - INVALID_HTTP_VERB = "InvalidHttpVerb" - INVALID_INPUT = "InvalidInput" - INVALID_MD5 = "InvalidMd5" - INVALID_METADATA = "InvalidMetadata" - INVALID_QUERY_PARAMETER_VALUE = "InvalidQueryParameterValue" - INVALID_RANGE = "InvalidRange" - INVALID_RESOURCE_NAME = "InvalidResourceName" - INVALID_URI = "InvalidUri" - INVALID_XML_DOCUMENT = "InvalidXmlDocument" - INVALID_XML_NODE_VALUE = "InvalidXmlNodeValue" - MD5_MISMATCH = "Md5Mismatch" - METADATA_TOO_LARGE = "MetadataTooLarge" - MISSING_CONTENT_LENGTH_HEADER = "MissingContentLengthHeader" - MISSING_REQUIRED_QUERY_PARAMETER = "MissingRequiredQueryParameter" - MISSING_REQUIRED_HEADER = "MissingRequiredHeader" - MISSING_REQUIRED_XML_NODE = "MissingRequiredXmlNode" - MULTIPLE_CONDITION_HEADERS_NOT_SUPPORTED = "MultipleConditionHeadersNotSupported" - OPERATION_TIMED_OUT = "OperationTimedOut" - OUT_OF_RANGE_INPUT = "OutOfRangeInput" - OUT_OF_RANGE_QUERY_PARAMETER_VALUE = "OutOfRangeQueryParameterValue" - REQUEST_BODY_TOO_LARGE = "RequestBodyTooLarge" - RESOURCE_TYPE_MISMATCH = "ResourceTypeMismatch" - REQUEST_URL_FAILED_TO_PARSE = "RequestUrlFailedToParse" - RESOURCE_ALREADY_EXISTS = "ResourceAlreadyExists" - RESOURCE_NOT_FOUND = "ResourceNotFound" - SERVER_BUSY = "ServerBusy" - UNSUPPORTED_HEADER = "UnsupportedHeader" - UNSUPPORTED_XML_NODE = "UnsupportedXmlNode" - UNSUPPORTED_QUERY_PARAMETER = "UnsupportedQueryParameter" - UNSUPPORTED_HTTP_VERB = "UnsupportedHttpVerb" - - # Blob values - APPEND_POSITION_CONDITION_NOT_MET = "AppendPositionConditionNotMet" - BLOB_ALREADY_EXISTS = "BlobAlreadyExists" - BLOB_NOT_FOUND = "BlobNotFound" - BLOB_OVERWRITTEN = "BlobOverwritten" - BLOB_TIER_INADEQUATE_FOR_CONTENT_LENGTH = "BlobTierInadequateForContentLength" - BLOCK_COUNT_EXCEEDS_LIMIT = "BlockCountExceedsLimit" - BLOCK_LIST_TOO_LONG = "BlockListTooLong" - CANNOT_CHANGE_TO_LOWER_TIER = "CannotChangeToLowerTier" - CANNOT_VERIFY_COPY_SOURCE = "CannotVerifyCopySource" - CONTAINER_ALREADY_EXISTS = "ContainerAlreadyExists" - CONTAINER_BEING_DELETED = "ContainerBeingDeleted" - CONTAINER_DISABLED = "ContainerDisabled" - CONTAINER_NOT_FOUND = "ContainerNotFound" - CONTENT_LENGTH_LARGER_THAN_TIER_LIMIT = "ContentLengthLargerThanTierLimit" - COPY_ACROSS_ACCOUNTS_NOT_SUPPORTED = "CopyAcrossAccountsNotSupported" - COPY_ID_MISMATCH = "CopyIdMismatch" - FEATURE_VERSION_MISMATCH = "FeatureVersionMismatch" - INCREMENTAL_COPY_BLOB_MISMATCH = "IncrementalCopyBlobMismatch" - INCREMENTAL_COPY_OF_ERALIER_VERSION_SNAPSHOT_NOT_ALLOWED = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" - INCREMENTAL_COPY_SOURCE_MUST_BE_SNAPSHOT = "IncrementalCopySourceMustBeSnapshot" - INFINITE_LEASE_DURATION_REQUIRED = "InfiniteLeaseDurationRequired" - INVALID_BLOB_OR_BLOCK = "InvalidBlobOrBlock" - INVALID_BLOB_TIER = "InvalidBlobTier" - INVALID_BLOB_TYPE = "InvalidBlobType" - INVALID_BLOCK_ID = "InvalidBlockId" - INVALID_BLOCK_LIST = "InvalidBlockList" - INVALID_OPERATION = "InvalidOperation" - INVALID_PAGE_RANGE = "InvalidPageRange" - INVALID_SOURCE_BLOB_TYPE = "InvalidSourceBlobType" - INVALID_SOURCE_BLOB_URL = "InvalidSourceBlobUrl" - INVALID_VERSION_FOR_PAGE_BLOB_OPERATION = "InvalidVersionForPageBlobOperation" - LEASE_ALREADY_PRESENT = "LeaseAlreadyPresent" - LEASE_ALREADY_BROKEN = "LeaseAlreadyBroken" - LEASE_ID_MISMATCH_WITH_BLOB_OPERATION = "LeaseIdMismatchWithBlobOperation" - LEASE_ID_MISMATCH_WITH_CONTAINER_OPERATION = "LeaseIdMismatchWithContainerOperation" - LEASE_ID_MISMATCH_WITH_LEASE_OPERATION = "LeaseIdMismatchWithLeaseOperation" - LEASE_ID_MISSING = "LeaseIdMissing" - LEASE_IS_BREAKING_AND_CANNOT_BE_ACQUIRED = "LeaseIsBreakingAndCannotBeAcquired" - LEASE_IS_BREAKING_AND_CANNOT_BE_CHANGED = "LeaseIsBreakingAndCannotBeChanged" - LEASE_IS_BROKEN_AND_CANNOT_BE_RENEWED = "LeaseIsBrokenAndCannotBeRenewed" - LEASE_LOST = "LeaseLost" - LEASE_NOT_PRESENT_WITH_BLOB_OPERATION = "LeaseNotPresentWithBlobOperation" - LEASE_NOT_PRESENT_WITH_CONTAINER_OPERATION = "LeaseNotPresentWithContainerOperation" - LEASE_NOT_PRESENT_WITH_LEASE_OPERATION = "LeaseNotPresentWithLeaseOperation" - MAX_BLOB_SIZE_CONDITION_NOT_MET = "MaxBlobSizeConditionNotMet" - NO_PENDING_COPY_OPERATION = "NoPendingCopyOperation" - OPERATION_NOT_ALLOWED_ON_INCREMENTAL_COPY_BLOB = "OperationNotAllowedOnIncrementalCopyBlob" - PENDING_COPY_OPERATION = "PendingCopyOperation" - PREVIOUS_SNAPSHOT_CANNOT_BE_NEWER = "PreviousSnapshotCannotBeNewer" - PREVIOUS_SNAPSHOT_NOT_FOUND = "PreviousSnapshotNotFound" - PREVIOUS_SNAPSHOT_OPERATION_NOT_SUPPORTED = "PreviousSnapshotOperationNotSupported" - SEQUENCE_NUMBER_CONDITION_NOT_MET = "SequenceNumberConditionNotMet" - SEQUENCE_NUMBER_INCREMENT_TOO_LARGE = "SequenceNumberIncrementTooLarge" - SNAPSHOT_COUNT_EXCEEDED = "SnapshotCountExceeded" - SNAPHOT_OPERATION_RATE_EXCEEDED = "SnaphotOperationRateExceeded" - SNAPSHOTS_PRESENT = "SnapshotsPresent" - SOURCE_CONDITION_NOT_MET = "SourceConditionNotMet" - SYSTEM_IN_USE = "SystemInUse" - TARGET_CONDITION_NOT_MET = "TargetConditionNotMet" - UNAUTHORIZED_BLOB_OVERWRITE = "UnauthorizedBlobOverwrite" - BLOB_BEING_REHYDRATED = "BlobBeingRehydrated" - BLOB_ARCHIVED = "BlobArchived" - BLOB_NOT_ARCHIVED = "BlobNotArchived" - - # Queue values - INVALID_MARKER = "InvalidMarker" - MESSAGE_NOT_FOUND = "MessageNotFound" - MESSAGE_TOO_LARGE = "MessageTooLarge" - POP_RECEIPT_MISMATCH = "PopReceiptMismatch" - QUEUE_ALREADY_EXISTS = "QueueAlreadyExists" - QUEUE_BEING_DELETED = "QueueBeingDeleted" - QUEUE_DISABLED = "QueueDisabled" - QUEUE_NOT_EMPTY = "QueueNotEmpty" - QUEUE_NOT_FOUND = "QueueNotFound" - - # File values - CANNOT_DELETE_FILE_OR_DIRECTORY = "CannotDeleteFileOrDirectory" - CLIENT_CACHE_FLUSH_DELAY = "ClientCacheFlushDelay" - DELETE_PENDING = "DeletePending" - DIRECTORY_NOT_EMPTY = "DirectoryNotEmpty" - FILE_LOCK_CONFLICT = "FileLockConflict" - INVALID_FILE_OR_DIRECTORY_PATH_NAME = "InvalidFileOrDirectoryPathName" - PARENT_NOT_FOUND = "ParentNotFound" - READ_ONLY_ATTRIBUTE = "ReadOnlyAttribute" - SHARE_ALREADY_EXISTS = "ShareAlreadyExists" - SHARE_BEING_DELETED = "ShareBeingDeleted" - SHARE_DISABLED = "ShareDisabled" - SHARE_NOT_FOUND = "ShareNotFound" - SHARING_VIOLATION = "SharingViolation" - SHARE_SNAPSHOT_IN_PROGRESS = "ShareSnapshotInProgress" - SHARE_SNAPSHOT_COUNT_EXCEEDED = "ShareSnapshotCountExceeded" - SHARE_SNAPSHOT_OPERATION_NOT_SUPPORTED = "ShareSnapshotOperationNotSupported" - SHARE_HAS_SNAPSHOTS = "ShareHasSnapshots" - CONTAINER_QUOTA_DOWNGRADE_NOT_ALLOWED = "ContainerQuotaDowngradeNotAllowed" - - # DataLake values - CONTENT_LENGTH_MUST_BE_ZERO = 'ContentLengthMustBeZero' - PATH_ALREADY_EXISTS = 'PathAlreadyExists' - INVALID_FLUSH_POSITION = 'InvalidFlushPosition' - INVALID_PROPERTY_NAME = 'InvalidPropertyName' - INVALID_SOURCE_URI = 'InvalidSourceUri' - UNSUPPORTED_REST_VERSION = 'UnsupportedRestVersion' - FILE_SYSTEM_NOT_FOUND = 'FilesystemNotFound' - PATH_NOT_FOUND = 'PathNotFound' - RENAME_DESTINATION_PARENT_PATH_NOT_FOUND = 'RenameDestinationParentPathNotFound' - SOURCE_PATH_NOT_FOUND = 'SourcePathNotFound' - DESTINATION_PATH_IS_BEING_DELETED = 'DestinationPathIsBeingDeleted' - FILE_SYSTEM_ALREADY_EXISTS = 'FilesystemAlreadyExists' - FILE_SYSTEM_BEING_DELETED = 'FilesystemBeingDeleted' - INVALID_DESTINATION_PATH = 'InvalidDestinationPath' - INVALID_RENAME_SOURCE_PATH = 'InvalidRenameSourcePath' - INVALID_SOURCE_OR_DESTINATION_RESOURCE_TYPE = 'InvalidSourceOrDestinationResourceType' - LEASE_IS_ALREADY_BROKEN = 'LeaseIsAlreadyBroken' - LEASE_NAME_MISMATCH = 'LeaseNameMismatch' - PATH_CONFLICT = 'PathConflict' - SOURCE_PATH_IS_BEING_DELETED = 'SourcePathIsBeingDeleted' - - -class DictMixin(object): - - def __setitem__(self, key, item): - self.__dict__[key] = item - - def __getitem__(self, key): - return self.__dict__[key] - - def __repr__(self): - return str(self) - - def __len__(self): - return len(self.keys()) - - def __delitem__(self, key): - self.__dict__[key] = None - - def __eq__(self, other): - """Compare objects by comparing all attributes.""" - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - def __ne__(self, other): - """Compare objects by comparing all attributes.""" - return not self.__eq__(other) - - def __str__(self): - return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) - - def has_key(self, k): - return k in self.__dict__ - - def update(self, *args, **kwargs): - return self.__dict__.update(*args, **kwargs) - - def keys(self): - return [k for k in self.__dict__ if not k.startswith('_')] - - def values(self): - return [v for k, v in self.__dict__.items() if not k.startswith('_')] - - def items(self): - return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] - - def get(self, key, default=None): - if key in self.__dict__: - return self.__dict__[key] - return default - - -class LocationMode(object): - """ - Specifies the location the request should be sent to. This mode only applies - for RA-GRS accounts which allow secondary read access. All other account types - must use PRIMARY. - """ - - PRIMARY = 'primary' #: Requests should be sent to the primary location. - SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. - - -class ResourceTypes(object): - """ - Specifies the resource types that are accessible with the account SAS. - - :param bool service: - Access to service-level APIs (e.g., Get/Set Service Properties, - Get Service Stats, List Containers/Queues/Shares) - :param bool container: - Access to container-level APIs (e.g., Create/Delete Container, - Create/Delete Queue, Create/Delete Share, - List Blobs/Files and Directories) - :param bool object: - Access to object-level APIs for blobs, queue messages, and - files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) - """ - - def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin - self.service = service - self.container = container - self.object = object - self._str = (('s' if self.service else '') + - ('c' if self.container else '') + - ('o' if self.object else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create a ResourceTypes from a string. - - To specify service, container, or object you need only to - include the first letter of the word in the string. E.g. service and container, - you would provide a string "sc". - - :param str string: Specify service, container, or object in - in the string with the first letter of the word. - :return: A ResourceTypes object - :rtype: ~azure.storage.queue.ResourceTypes - """ - res_service = 's' in string - res_container = 'c' in string - res_object = 'o' in string - - parsed = cls(res_service, res_container, res_object) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class AccountSasPermissions(object): - """ - :class:`~ResourceTypes` class to be used with generate_account_sas - function and for the AccessPolicies used with set_*_acl. There are two types of - SAS which may be used to grant resource access. One is to grant access to a - specific resource (resource-specific). Another is to grant access to the - entire service for a specific account and allow certain operations based on - perms found here. - - :param bool read: - Valid for all signed resources types (Service, Container, and Object). - Permits read permissions to the specified resource type. - :param bool write: - Valid for all signed resources types (Service, Container, and Object). - Permits write permissions to the specified resource type. - :param bool delete: - Valid for Container and Object resource types, except for queue messages. - :param bool delete_previous_version: - Delete the previous blob version for the versioning enabled storage account. - :param bool list: - Valid for Service and Container resource types only. - :param bool add: - Valid for the following Object resource types only: queue messages, and append blobs. - :param bool create: - Valid for the following Object resource types only: blobs and files. - Users can create new blobs or files, but may not overwrite existing - blobs or files. - :param bool update: - Valid for the following Object resource types only: queue messages. - :param bool process: - Valid for the following Object resource type only: queue messages. - :keyword bool tag: - To enable set or get tags on the blobs in the container. - :keyword bool filter_by_tags: - To enable get blobs by tags, this should be used together with list permission. - :keyword bool set_immutability_policy: - To enable operations related to set/delete immutability policy. - To get immutability policy, you just need read permission. - :keyword bool permanent_delete: - To enable permanent delete on the blob is permitted. - Valid for Object resource type of Blob only. - """ - def __init__(self, read=False, write=False, delete=False, - list=False, # pylint: disable=redefined-builtin - add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): - self.read = read - self.write = write - self.delete = delete - self.delete_previous_version = delete_previous_version - self.permanent_delete = kwargs.pop('permanent_delete', False) - self.list = list - self.add = add - self.create = create - self.update = update - self.process = process - self.tag = kwargs.pop('tag', False) - self.filter_by_tags = kwargs.pop('filter_by_tags', False) - self.set_immutability_policy = kwargs.pop('set_immutability_policy', False) - self._str = (('r' if self.read else '') + - ('w' if self.write else '') + - ('d' if self.delete else '') + - ('x' if self.delete_previous_version else '') + - ('y' if self.permanent_delete else '') + - ('l' if self.list else '') + - ('a' if self.add else '') + - ('c' if self.create else '') + - ('u' if self.update else '') + - ('p' if self.process else '') + - ('f' if self.filter_by_tags else '') + - ('t' if self.tag else '') + - ('i' if self.set_immutability_policy else '') - ) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, permission): - """Create AccountSasPermissions from a string. - - To specify read, write, delete, etc. permissions you need only to - include the first letter of the word in the string. E.g. for read and write - permissions you would provide a string "rw". - - :param str permission: Specify permissions in - the string with the first letter of the word. - :return: An AccountSasPermissions object - :rtype: ~azure.storage.queue.AccountSasPermissions - """ - p_read = 'r' in permission - p_write = 'w' in permission - p_delete = 'd' in permission - p_delete_previous_version = 'x' in permission - p_permanent_delete = 'y' in permission - p_list = 'l' in permission - p_add = 'a' in permission - p_create = 'c' in permission - p_update = 'u' in permission - p_process = 'p' in permission - p_tag = 't' in permission - p_filter_by_tags = 'f' in permission - p_set_immutability_policy = 'i' in permission - parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, - list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, - filter_by_tags=p_filter_by_tags, set_immutability_policy=p_set_immutability_policy, - permanent_delete=p_permanent_delete) - - return parsed - - -class Services(object): - """Specifies the services accessible with the account SAS. - - :param bool blob: - Access for the `~azure.storage.blob.BlobServiceClient` - :param bool queue: - Access for the `~azure.storage.queue.QueueServiceClient` - :param bool fileshare: - Access for the `~azure.storage.fileshare.ShareServiceClient` - """ - - def __init__(self, blob=False, queue=False, fileshare=False): - self.blob = blob - self.queue = queue - self.fileshare = fileshare - self._str = (('b' if self.blob else '') + - ('q' if self.queue else '') + - ('f' if self.fileshare else '')) - - def __str__(self): - return self._str - - @classmethod - def from_string(cls, string): - """Create Services from a string. - - To specify blob, queue, or file you need only to - include the first letter of the word in the string. E.g. for blob and queue - you would provide a string "bq". - - :param str string: Specify blob, queue, or file in - in the string with the first letter of the word. - :return: A Services object - :rtype: ~azure.storage.queue.Services - """ - res_blob = 'b' in string - res_queue = 'q' in string - res_file = 'f' in string - - parsed = cls(res_blob, res_queue, res_file) - parsed._str = string # pylint: disable = protected-access - return parsed - - -class UserDelegationKey(object): - """ - Represents a user delegation key, provided to the user by Azure Storage - based on their Azure Active Directory access token. - - The fields are saved as simple strings since the user does not have to interact with this object; - to generate an identify SAS, the user can simply pass it to the right API. - - :ivar str signed_oid: - Object ID of this token. - :ivar str signed_tid: - Tenant ID of the tenant that issued this token. - :ivar str signed_start: - The datetime this token becomes valid. - :ivar str signed_expiry: - The datetime this token expires. - :ivar str signed_service: - What service this key is valid for. - :ivar str signed_version: - The version identifier of the REST service that created this token. - :ivar str value: - The user delegation key. - """ - def __init__(self): - self.signed_oid = None - self.signed_tid = None - self.signed_start = None - self.signed_expiry = None - self.signed_service = None - self.signed_version = None - self.value = None diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_shared/parser.py b/azure/multiapi/storagev2/queue/v2021_02_12/_shared/parser.py deleted file mode 100644 index c6feba8..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_shared/parser.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import sys - -if sys.version_info < (3,): - def _str(value): - if isinstance(value, unicode): # pylint: disable=undefined-variable - return value.encode('utf-8') - - return str(value) -else: - _str = str - - -def _to_utc_datetime(value): - return value.strftime('%Y-%m-%dT%H:%M:%SZ') diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_shared/policies.py b/azure/multiapi/storagev2/queue/v2021_02_12/_shared/policies.py deleted file mode 100644 index ea802fe..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_shared/policies.py +++ /dev/null @@ -1,631 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import base64 -import hashlib -import re -import random -from time import time -from io import SEEK_SET, UnsupportedOperation -import logging -import uuid -from typing import Any, TYPE_CHECKING -from wsgiref.handlers import format_date_time -try: - from urllib.parse import ( - urlparse, - parse_qsl, - urlunparse, - urlencode, - ) -except ImportError: - from urllib import urlencode # type: ignore - from urlparse import ( # type: ignore - urlparse, - parse_qsl, - urlunparse, - ) - -from azure.core.pipeline.policies import ( - HeadersPolicy, - SansIOHTTPPolicy, - NetworkTraceLoggingPolicy, - HTTPPolicy, - RequestHistory -) -from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError - -from .models import LocationMode - -try: - _unicode_type = unicode # type: ignore -except NameError: - _unicode_type = str - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -def encode_base64(data): - if isinstance(data, _unicode_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') - - -def is_exhausted(settings): - """Are we out of retries?""" - retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) - retry_counts = list(filter(None, retry_counts)) - if not retry_counts: - return False - return min(retry_counts) < 0 - - -def retry_hook(settings, **kwargs): - if settings['hook']: - settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) - - -def is_retry(response, mode): # pylint: disable=too-many-return-statements - """Is this method/status code retryable? (Based on allowlists and control - variables such as the number of total retries to allow, whether to - respect the Retry-After header, whether this header is present, and - whether the returned status code is on the list of status codes to - be retried upon on the presence of the aforementioned header) - """ - status = response.http_response.status_code - if 300 <= status < 500: - # An exception occured, but in most cases it was expected. Examples could - # include a 309 Conflict or 412 Precondition Failed. - if status == 404 and mode == LocationMode.SECONDARY: - # Response code 404 should be retried if secondary was used. - return True - if status == 408: - # Response code 408 is a timeout and should be retried. - return True - return False - if status >= 500: - # Response codes above 500 with the exception of 501 Not Implemented and - # 505 Version Not Supported indicate a server issue and should be retried. - if status in [501, 505]: - return False - return True - # retry if invalid content md5 - if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): - computed_md5 = response.http_request.headers.get('content-md5', None) or \ - encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) - if response.http_response.headers['content-md5'] != computed_md5: - return True - return False - - -def urljoin(base_url, stub_url): - parsed = urlparse(base_url) - parsed = parsed._replace(path=parsed.path + '/' + stub_url) - return parsed.geturl() - - -class QueueMessagePolicy(SansIOHTTPPolicy): - - def on_request(self, request): - # Hack to fix generated code adding '/messages' after SAS parameters - includes_messages = request.http_request.url.endswith('/messages') - if includes_messages: - request.http_request.url = request.http_request.url[:-(len('/messages'))] - request.http_request.url = urljoin(request.http_request.url, 'messages') - - message_id = request.context.options.pop('queue_message_id', None) - if message_id: - request.http_request.url = urljoin( - request.http_request.url, - message_id) - - -class StorageHeadersPolicy(HeadersPolicy): - request_id_header_name = 'x-ms-client-request-id' - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - super(StorageHeadersPolicy, self).on_request(request) - current_time = format_date_time(time()) - request.http_request.headers['x-ms-date'] = current_time - - custom_id = request.context.options.pop('client_request_id', None) - request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) - - # def on_response(self, request, response): - # # raise exception if the echoed client request id from the service is not identical to the one we sent - # if self.request_id_header_name in response.http_response.headers: - - # client_request_id = request.http_request.headers.get(self.request_id_header_name) - - # if response.http_response.headers[self.request_id_header_name] != client_request_id: - # raise AzureError( - # "Echoed client request ID: {} does not match sent client request ID: {}. " - # "Service request ID: {}".format( - # response.http_response.headers[self.request_id_header_name], client_request_id, - # response.http_response.headers['x-ms-request-id']), - # response=response.http_response - # ) - - -class StorageHosts(SansIOHTTPPolicy): - - def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument - self.hosts = hosts - super(StorageHosts, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - request.context.options['hosts'] = self.hosts - parsed_url = urlparse(request.http_request.url) - - # Detect what location mode we're currently requesting with - location_mode = LocationMode.PRIMARY - for key, value in self.hosts.items(): - if parsed_url.netloc == value: - location_mode = key - - # See if a specific location mode has been specified, and if so, redirect - use_location = request.context.options.pop('use_location', None) - if use_location: - # Lock retries to the specific location - request.context.options['retry_to_secondary'] = False - if use_location not in self.hosts: - raise ValueError("Attempting to use undefined host location {}".format(use_location)) - if use_location != location_mode: - # Update request URL to use the specified location - updated = parsed_url._replace(netloc=self.hosts[use_location]) - request.http_request.url = updated.geturl() - location_mode = use_location - - request.context.options['location_mode'] = location_mode - - -class StorageLoggingPolicy(NetworkTraceLoggingPolicy): - """A policy that logs HTTP request and response to the DEBUG logger. - - This accepts both global configuration, and per-request level with "enable_http_logger" - """ - def __init__(self, logging_enable=False, **kwargs): - self.logging_body = kwargs.pop("logging_body", False) - super(StorageLoggingPolicy, self).__init__(logging_enable=logging_enable, **kwargs) - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - http_request = request.http_request - options = request.context.options - self.logging_body = self.logging_body or options.pop("logging_body", False) - if options.pop("logging_enable", self.enable_http_logger): - request.context["logging_enable"] = True - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - log_url = http_request.url - query_params = http_request.query - if 'sig' in query_params: - log_url = log_url.replace(query_params['sig'], "sig=*****") - _LOGGER.debug("Request URL: %r", log_url) - _LOGGER.debug("Request method: %r", http_request.method) - _LOGGER.debug("Request headers:") - for header, value in http_request.headers.items(): - if header.lower() == 'authorization': - value = '*****' - elif header.lower() == 'x-ms-copy-source' and 'sig' in value: - # take the url apart and scrub away the signed signature - scheme, netloc, path, params, query, fragment = urlparse(value) - parsed_qs = dict(parse_qsl(query)) - parsed_qs['sig'] = '*****' - - # the SAS needs to be put back together - value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) - - _LOGGER.debug(" %r: %r", header, value) - _LOGGER.debug("Request body:") - - if self.logging_body: - _LOGGER.debug(str(http_request.body)) - else: - # We don't want to log the binary data of a file upload. - _LOGGER.debug("Hidden body, please use logging_body to show body") - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log request: %r", err) - - def on_response(self, request, response): - # type: (PipelineRequest, PipelineResponse, Any) -> None - if response.context.pop("logging_enable", self.enable_http_logger): - if not _LOGGER.isEnabledFor(logging.DEBUG): - return - - try: - _LOGGER.debug("Response status: %r", response.http_response.status_code) - _LOGGER.debug("Response headers:") - for res_header, value in response.http_response.headers.items(): - _LOGGER.debug(" %r: %r", res_header, value) - - # We don't want to log binary data if the response is a file. - _LOGGER.debug("Response content:") - pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) - header = response.http_response.headers.get('content-disposition') - resp_content_type = response.http_response.headers.get("content-type", "") - - if header and pattern.match(header): - filename = header.partition('=')[2] - _LOGGER.debug("File attachments: %s", filename) - elif resp_content_type.endswith("octet-stream"): - _LOGGER.debug("Body contains binary data.") - elif resp_content_type.startswith("image"): - _LOGGER.debug("Body contains image data.") - - if self.logging_body and resp_content_type.startswith("text"): - _LOGGER.debug(response.http_response.text()) - elif self.logging_body: - try: - _LOGGER.debug(response.http_response.body()) - except ValueError: - _LOGGER.debug("Body is streamable") - - except Exception as err: # pylint: disable=broad-except - _LOGGER.debug("Failed to log response: %s", repr(err)) - - -class StorageRequestHook(SansIOHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._request_callback = kwargs.get('raw_request_hook') - super(StorageRequestHook, self).__init__() - - def on_request(self, request): - # type: (PipelineRequest, **Any) -> PipelineResponse - request_callback = request.context.options.pop('raw_request_hook', self._request_callback) - if request_callback: - request_callback(request) - - -class StorageResponseHook(HTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(StorageResponseHook, self).__init__() - - def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = self.next.send(request) - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - response_callback(response) - request.context['response_callback'] = response_callback - return response - - -class StorageContentValidation(SansIOHTTPPolicy): - """A simple policy that sends the given headers - with the request. - - This will overwrite any headers already defined in the request. - """ - header_name = 'Content-MD5' - - def __init__(self, **kwargs): # pylint: disable=unused-argument - super(StorageContentValidation, self).__init__() - - @staticmethod - def get_content_md5(data): - # Since HTTP does not differentiate between no content and empty content, - # we have to perform a None check. - data = data or b"" - md5 = hashlib.md5() # nosec - if isinstance(data, bytes): - md5.update(data) - elif hasattr(data, 'read'): - pos = 0 - try: - pos = data.tell() - except: # pylint: disable=bare-except - pass - for chunk in iter(lambda: data.read(4096), b""): - md5.update(chunk) - try: - data.seek(pos, SEEK_SET) - except (AttributeError, IOError): - raise ValueError("Data should be bytes or a seekable file-like object.") - else: - raise ValueError("Data should be bytes or a seekable file-like object.") - - return md5.digest() - - def on_request(self, request): - # type: (PipelineRequest, Any) -> None - validate_content = request.context.options.pop('validate_content', False) - if validate_content and request.http_request.method != 'GET': - computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) - request.http_request.headers[self.header_name] = computed_md5 - request.context['validate_content_md5'] = computed_md5 - request.context['validate_content'] = validate_content - - def on_response(self, request, response): - if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): - computed_md5 = request.context.get('validate_content_md5') or \ - encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) - if response.http_response.headers['content-md5'] != computed_md5: - raise AzureError( - 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format( - response.http_response.headers['content-md5'], computed_md5), - response=response.http_response - ) - - -class StorageRetryPolicy(HTTPPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - def __init__(self, **kwargs): - self.total_retries = kwargs.pop('retry_total', 10) - self.connect_retries = kwargs.pop('retry_connect', 3) - self.read_retries = kwargs.pop('retry_read', 3) - self.status_retries = kwargs.pop('retry_status', 3) - self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) - super(StorageRetryPolicy, self).__init__() - - def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use - """ - A function which sets the next host location on the request, if applicable. - - :param ~azure.storage.models.RetryContext context: - The retry context containing the previous host location and the request - to evaluate and possibly modify. - """ - if settings['hosts'] and all(settings['hosts'].values()): - url = urlparse(request.url) - # If there's more than one possible location, retry to the alternative - if settings['mode'] == LocationMode.PRIMARY: - settings['mode'] = LocationMode.SECONDARY - else: - settings['mode'] = LocationMode.PRIMARY - updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) - request.url = updated.geturl() - - def configure_retries(self, request): # pylint: disable=no-self-use - body_position = None - if hasattr(request.http_request.body, 'read'): - try: - body_position = request.http_request.body.tell() - except (AttributeError, UnsupportedOperation): - # if body position cannot be obtained, then retries will not work - pass - options = request.context.options - return { - 'total': options.pop("retry_total", self.total_retries), - 'connect': options.pop("retry_connect", self.connect_retries), - 'read': options.pop("retry_read", self.read_retries), - 'status': options.pop("retry_status", self.status_retries), - 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), - 'mode': options.pop("location_mode", LocationMode.PRIMARY), - 'hosts': options.pop("hosts", None), - 'hook': options.pop("retry_hook", None), - 'body_position': body_position, - 'count': 0, - 'history': [] - } - - def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use - """ Formula for computing the current backoff. - Should be calculated by child class. - - :rtype: float - """ - return 0 - - def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - transport.sleep(backoff) - - def increment(self, settings, request, response=None, error=None): - """Increment the retry counters. - - :param response: A pipeline response object. - :param error: An error encountered during the request, or - None if the response was received successfully. - - :return: Whether the retry attempts are exhausted. - """ - settings['total'] -= 1 - - if error and isinstance(error, ServiceRequestError): - # Errors when we're fairly sure that the server did not receive the - # request, so it should be safe to retry. - settings['connect'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - elif error and isinstance(error, ServiceResponseError): - # Errors that occur after the request has been started, so we should - # assume that the server began processing it. - settings['read'] -= 1 - settings['history'].append(RequestHistory(request, error=error)) - - else: - # Incrementing because of a server error like a 500 in - # status_forcelist and a the given method is in the allowlist - if response: - settings['status'] -= 1 - settings['history'].append(RequestHistory(request, http_response=response)) - - if not is_exhausted(settings): - if request.method not in ['PUT'] and settings['retry_secondary']: - self._set_next_host_location(settings, request) - - # rewind the request body if it is a stream - if request.body and hasattr(request.body, 'read'): - # no position was saved, then retry would not work - if settings['body_position'] is None: - return False - try: - # attempt to rewind the body to the initial position - request.body.seek(settings['body_position'], SEEK_SET) - except (UnsupportedOperation, ValueError): - # if body is not seekable, then retry would not work - return False - settings['count'] += 1 - return True - return False - - def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(StorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(StorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_shared/policies_async.py b/azure/multiapi/storagev2/queue/v2021_02_12/_shared/policies_async.py deleted file mode 100644 index e0926b8..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_shared/policies_async.py +++ /dev/null @@ -1,220 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=invalid-overridden-method - -import asyncio -import random -import logging -from typing import Any, TYPE_CHECKING - -from azure.core.pipeline.policies import AsyncHTTPPolicy -from azure.core.exceptions import AzureError - -from .policies import is_retry, StorageRetryPolicy - -if TYPE_CHECKING: - from azure.core.pipeline import PipelineRequest, PipelineResponse - - -_LOGGER = logging.getLogger(__name__) - - -async def retry_hook(settings, **kwargs): - if settings['hook']: - if asyncio.iscoroutine(settings['hook']): - await settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - else: - settings['hook']( - retry_count=settings['count'] - 1, - location_mode=settings['mode'], - **kwargs) - - -class AsyncStorageResponseHook(AsyncHTTPPolicy): - - def __init__(self, **kwargs): # pylint: disable=unused-argument - self._response_callback = kwargs.get('raw_response_hook') - super(AsyncStorageResponseHook, self).__init__() - - async def send(self, request): - # type: (PipelineRequest) -> PipelineResponse - data_stream_total = request.context.get('data_stream_total') or \ - request.context.options.pop('data_stream_total', None) - download_stream_current = request.context.get('download_stream_current') or \ - request.context.options.pop('download_stream_current', None) - upload_stream_current = request.context.get('upload_stream_current') or \ - request.context.options.pop('upload_stream_current', None) - response_callback = request.context.get('response_callback') or \ - request.context.options.pop('raw_response_hook', self._response_callback) - - response = await self.next.send(request) - await response.http_response.load_body() - - will_retry = is_retry(response, request.context.options.get('mode')) - if not will_retry and download_stream_current is not None: - download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) - if data_stream_total is None: - content_range = response.http_response.headers.get('Content-Range') - if content_range: - data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) - else: - data_stream_total = download_stream_current - elif not will_retry and upload_stream_current is not None: - upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) - for pipeline_obj in [request, response]: - pipeline_obj.context['data_stream_total'] = data_stream_total - pipeline_obj.context['download_stream_current'] = download_stream_current - pipeline_obj.context['upload_stream_current'] = upload_stream_current - if response_callback: - if asyncio.iscoroutine(response_callback): - await response_callback(response) - else: - response_callback(response) - request.context['response_callback'] = response_callback - return response - -class AsyncStorageRetryPolicy(StorageRetryPolicy): - """ - The base class for Exponential and Linear retries containing shared code. - """ - - async def sleep(self, settings, transport): - backoff = self.get_backoff_time(settings) - if not backoff or backoff < 0: - return - await transport.sleep(backoff) - - async def send(self, request): - retries_remaining = True - response = None - retry_settings = self.configure_retries(request) - while retries_remaining: - try: - response = await self.next.send(request) - if is_retry(response, retry_settings['mode']): - retries_remaining = self.increment( - retry_settings, - request=request.http_request, - response=response.http_response) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=response.http_response, - error=None) - await self.sleep(retry_settings, request.context.transport) - continue - break - except AzureError as err: - retries_remaining = self.increment( - retry_settings, request=request.http_request, error=err) - if retries_remaining: - await retry_hook( - retry_settings, - request=request.http_request, - response=None, - error=err) - await self.sleep(retry_settings, request.context.transport) - continue - raise err - if retry_settings['history']: - response.context['history'] = retry_settings['history'] - response.http_response.location_mode = retry_settings['mode'] - return response - - -class ExponentialRetry(AsyncStorageRetryPolicy): - """Exponential retry.""" - - def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, - retry_to_secondary=False, random_jitter_range=3, **kwargs): - ''' - Constructs an Exponential retry object. The initial_backoff is used for - the first retry. Subsequent retries are retried after initial_backoff + - increment_power^retry_count seconds. For example, by default the first retry - occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the - third after (15+3^2) = 24 seconds. - - :param int initial_backoff: - The initial backoff interval, in seconds, for the first retry. - :param int increment_base: - The base, in seconds, to increment the initial_backoff by after the - first retry. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - ''' - self.initial_backoff = initial_backoff - self.increment_base = increment_base - self.random_jitter_range = random_jitter_range - super(ExponentialRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) - random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 - random_range_end = backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) - - -class LinearRetry(AsyncStorageRetryPolicy): - """Linear retry.""" - - def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): - """ - Constructs a Linear retry object. - - :param int backoff: - The backoff interval, in seconds, between retries. - :param int max_attempts: - The maximum number of retry attempts. - :param bool retry_to_secondary: - Whether the request should be retried to secondary, if able. This should - only be enabled of RA-GRS accounts are used and potentially stale data - can be handled. - :param int random_jitter_range: - A number in seconds which indicates a range to jitter/randomize for the back-off interval. - For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. - """ - self.backoff = backoff - self.random_jitter_range = random_jitter_range - super(LinearRetry, self).__init__( - retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) - - def get_backoff_time(self, settings): - """ - Calculates how long to sleep before retrying. - - :return: - An integer indicating how long to wait before retrying the request, - or None to indicate no retry should be performed. - :rtype: int or None - """ - random_generator = random.Random() - # the backoff interval normally does not change, however there is the possibility - # that it was modified by accessing the property directly after initializing the object - random_range_start = self.backoff - self.random_jitter_range \ - if self.backoff > self.random_jitter_range else 0 - random_range_end = self.backoff + self.random_jitter_range - return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_shared/request_handlers.py b/azure/multiapi/storagev2/queue/v2021_02_12/_shared/request_handlers.py deleted file mode 100644 index 325825c..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_shared/request_handlers.py +++ /dev/null @@ -1,273 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) - -import logging -from os import fstat -from io import (SEEK_END, SEEK_SET, UnsupportedOperation) - -import isodate - -from azure.core.exceptions import raise_with_traceback - - -_LOGGER = logging.getLogger(__name__) - -_REQUEST_DELIMITER_PREFIX = "batch_" -_HTTP1_1_IDENTIFIER = "HTTP/1.1" -_HTTP_LINE_ENDING = "\r\n" - - -def serialize_iso(attr): - """Serialize Datetime object into ISO-8601 formatted string. - - :param Datetime attr: Object to be serialized. - :rtype: str - :raises: ValueError if format invalid. - """ - if not attr: - return None - if isinstance(attr, str): - attr = isodate.parse_datetime(attr) - try: - utc = attr.utctimetuple() - if utc.tm_year > 9999 or utc.tm_year < 1: - raise OverflowError("Hit max or min date") - - date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( - utc.tm_year, utc.tm_mon, utc.tm_mday, - utc.tm_hour, utc.tm_min, utc.tm_sec) - return date + 'Z' - except (ValueError, OverflowError) as err: - msg = "Unable to serialize datetime object." - raise_with_traceback(ValueError, msg, err) - except AttributeError as err: - msg = "ISO-8601 object must be valid Datetime object." - raise_with_traceback(TypeError, msg, err) - - -def get_length(data): - length = None - # Check if object implements the __len__ method, covers most input cases such as bytearray. - try: - length = len(data) - except: # pylint: disable=bare-except - pass - - if not length: - # Check if the stream is a file-like stream object. - # If so, calculate the size using the file descriptor. - try: - fileno = data.fileno() - except (AttributeError, UnsupportedOperation): - pass - else: - try: - return fstat(fileno).st_size - except OSError: - # Not a valid fileno, may be possible requests returned - # a socket number? - pass - - # If the stream is seekable and tell() is implemented, calculate the stream size. - try: - current_position = data.tell() - data.seek(0, SEEK_END) - length = data.tell() - current_position - data.seek(current_position, SEEK_SET) - except (AttributeError, OSError, UnsupportedOperation): - pass - - return length - - -def read_length(data): - try: - if hasattr(data, 'read'): - read_data = b'' - for chunk in iter(lambda: data.read(4096), b""): - read_data += chunk - return len(read_data), read_data - if hasattr(data, '__iter__'): - read_data = b'' - for chunk in data: - read_data += chunk - return len(read_data), read_data - except: # pylint: disable=bare-except - pass - raise ValueError("Unable to calculate content length, please specify.") - - -def validate_and_format_range_headers( - start_range, end_range, start_range_required=True, - end_range_required=True, check_content_md5=False, align_to_page=False): - # If end range is provided, start range must be provided - if (start_range_required or end_range is not None) and start_range is None: - raise ValueError("start_range value cannot be None.") - if end_range_required and end_range is None: - raise ValueError("end_range value cannot be None.") - - # Page ranges must be 512 aligned - if align_to_page: - if start_range is not None and start_range % 512 != 0: - raise ValueError("Invalid page blob start_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(start_range)) - if end_range is not None and end_range % 512 != 511: - raise ValueError("Invalid page blob end_range: {0}. " - "The size must be aligned to a 512-byte boundary.".format(end_range)) - - # Format based on whether end_range is present - range_header = None - if end_range is not None: - range_header = 'bytes={0}-{1}'.format(start_range, end_range) - elif start_range is not None: - range_header = "bytes={0}-".format(start_range) - - # Content MD5 can only be provided for a complete range less than 4MB in size - range_validation = None - if check_content_md5: - if start_range is None or end_range is None: - raise ValueError("Both start and end range requied for MD5 content validation.") - if end_range - start_range > 4 * 1024 * 1024: - raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") - range_validation = 'true' - - return range_header, range_validation - - -def add_metadata_headers(metadata=None): - # type: (Optional[Dict[str, str]]) -> Dict[str, str] - headers = {} - if metadata: - for key, value in metadata.items(): - headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value - return headers - - -def serialize_batch_body(requests, batch_id): - """ - -- - - -- - (repeated as needed) - ---- - - Serializes the requests in this batch to a single HTTP mixed/multipart body. - - :param list[~azure.core.pipeline.transport.HttpRequest] requests: - a list of sub-request for the batch request - :param str batch_id: - to be embedded in batch sub-request delimiter - :return: The body bytes for this batch. - """ - - if requests is None or len(requests) == 0: - raise ValueError('Please provide sub-request(s) for this batch request') - - delimiter_bytes = (_get_batch_request_delimiter(batch_id, True, False) + _HTTP_LINE_ENDING).encode('utf-8') - newline_bytes = _HTTP_LINE_ENDING.encode('utf-8') - batch_body = list() - - content_index = 0 - for request in requests: - request.headers.update({ - "Content-ID": str(content_index), - "Content-Length": str(0) - }) - batch_body.append(delimiter_bytes) - batch_body.append(_make_body_from_sub_request(request)) - batch_body.append(newline_bytes) - content_index += 1 - - batch_body.append(_get_batch_request_delimiter(batch_id, True, True).encode('utf-8')) - # final line of body MUST have \r\n at the end, or it will not be properly read by the service - batch_body.append(newline_bytes) - - return bytes().join(batch_body) - - -def _get_batch_request_delimiter(batch_id, is_prepend_dashes=False, is_append_dashes=False): - """ - Gets the delimiter used for this batch request's mixed/multipart HTTP format. - - :param str batch_id: - Randomly generated id - :param bool is_prepend_dashes: - Whether to include the starting dashes. Used in the body, but non on defining the delimiter. - :param bool is_append_dashes: - Whether to include the ending dashes. Used in the body on the closing delimiter only. - :return: The delimiter, WITHOUT a trailing newline. - """ - - prepend_dashes = '--' if is_prepend_dashes else '' - append_dashes = '--' if is_append_dashes else '' - - return prepend_dashes + _REQUEST_DELIMITER_PREFIX + batch_id + append_dashes - - -def _make_body_from_sub_request(sub_request): - """ - Content-Type: application/http - Content-ID: - Content-Transfer-Encoding: (if present) - - HTTP/ -
:
(repeated as necessary) - Content-Length: - (newline if content length > 0) - (if content length > 0) - - Serializes an http request. - - :param ~azure.core.pipeline.transport.HttpRequest sub_request: - Request to serialize. - :return: The serialized sub-request in bytes - """ - - # put the sub-request's headers into a list for efficient str concatenation - sub_request_body = list() - - # get headers for ease of manipulation; remove headers as they are used - headers = sub_request.headers - - # append opening headers - sub_request_body.append("Content-Type: application/http") - sub_request_body.append(_HTTP_LINE_ENDING) - - sub_request_body.append("Content-ID: ") - sub_request_body.append(headers.pop("Content-ID", "")) - sub_request_body.append(_HTTP_LINE_ENDING) - - sub_request_body.append("Content-Transfer-Encoding: binary") - sub_request_body.append(_HTTP_LINE_ENDING) - - # append blank line - sub_request_body.append(_HTTP_LINE_ENDING) - - # append HTTP verb and path and query and HTTP version - sub_request_body.append(sub_request.method) - sub_request_body.append(' ') - sub_request_body.append(sub_request.url) - sub_request_body.append(' ') - sub_request_body.append(_HTTP1_1_IDENTIFIER) - sub_request_body.append(_HTTP_LINE_ENDING) - - # append remaining headers (this will set the Content-Length, as it was set on `sub-request`) - for header_name, header_value in headers.items(): - if header_value is not None: - sub_request_body.append(header_name) - sub_request_body.append(": ") - sub_request_body.append(header_value) - sub_request_body.append(_HTTP_LINE_ENDING) - - # append blank line - sub_request_body.append(_HTTP_LINE_ENDING) - - return ''.join(sub_request_body).encode() diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_shared/response_handlers.py b/azure/multiapi/storagev2/queue/v2021_02_12/_shared/response_handlers.py deleted file mode 100644 index ab146e9..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_shared/response_handlers.py +++ /dev/null @@ -1,198 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -from typing import ( # pylint: disable=unused-import - Union, Optional, Any, Iterable, Dict, List, Type, Tuple, - TYPE_CHECKING -) -import logging -from xml.etree.ElementTree import Element - -from azure.core.pipeline.policies import ContentDecodePolicy -from azure.core.exceptions import ( - HttpResponseError, - ResourceNotFoundError, - ResourceModifiedError, - ResourceExistsError, - ClientAuthenticationError, - DecodeError) - -from .parser import _to_utc_datetime -from .models import StorageErrorCode, UserDelegationKey, get_enum_value - - -if TYPE_CHECKING: - from datetime import datetime - from azure.core.exceptions import AzureError - - -_LOGGER = logging.getLogger(__name__) - - -class PartialBatchErrorException(HttpResponseError): - """There is a partial failure in batch operations. - - :param str message: The message of the exception. - :param response: Server response to be deserialized. - :param list parts: A list of the parts in multipart response. - """ - - def __init__(self, message, response, parts): - self.parts = parts - super(PartialBatchErrorException, self).__init__(message=message, response=response) - - -def parse_length_from_content_range(content_range): - ''' - Parses the blob length from the content range header: bytes 1-3/65537 - ''' - if content_range is None: - return None - - # First, split in space and take the second half: '1-3/65537' - # Next, split on slash and take the second half: '65537' - # Finally, convert to an int: 65537 - return int(content_range.split(' ', 1)[1].split('/', 1)[1]) - - -def normalize_headers(headers): - normalized = {} - for key, value in headers.items(): - if key.startswith('x-ms-'): - key = key[5:] - normalized[key.lower().replace('-', '_')] = get_enum_value(value) - return normalized - - -def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument - raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} - return {k[10:]: v for k, v in raw_metadata.items()} - - -def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers) - - -def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return normalize_headers(response_headers), deserialized - - -def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return response.http_response.location_mode, deserialized - - -def process_storage_error(storage_error): # pylint:disable=too-many-statements - raise_error = HttpResponseError - serialized = False - if not storage_error.response or storage_error.response.status_code in [200, 204]: - raise storage_error - # If it is one of those three then it has been serialized prior by the generated layer. - if isinstance(storage_error, (PartialBatchErrorException, - ClientAuthenticationError, - ResourceNotFoundError, - ResourceExistsError)): - serialized = True - error_code = storage_error.response.headers.get('x-ms-error-code') - error_message = storage_error.message - additional_data = {} - error_dict = {} - try: - error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) - try: - error_body = error_body or storage_error.response.reason - except AttributeError: - error_body = '' - # If it is an XML response - if isinstance(error_body, Element): - error_dict = { - child.tag.lower(): child.text - for child in error_body - } - # If it is a JSON response - elif isinstance(error_body, dict): - error_dict = error_body.get('error', {}) - elif not error_code: - _LOGGER.warning( - 'Unexpected return type %s from ContentDecodePolicy.deserialize_from_http_generics.', type(error_body)) - error_dict = {'message': str(error_body)} - - # If we extracted from a Json or XML response - if error_dict: - error_code = error_dict.get('code') - error_message = error_dict.get('message') - additional_data = {k: v for k, v in error_dict.items() if k not in {'code', 'message'}} - except DecodeError: - pass - - try: - # This check would be unnecessary if we have already serialized the error - if error_code and not serialized: - error_code = StorageErrorCode(error_code) - if error_code in [StorageErrorCode.condition_not_met, - StorageErrorCode.blob_overwritten]: - raise_error = ResourceModifiedError - if error_code in [StorageErrorCode.invalid_authentication_info, - StorageErrorCode.authentication_failed]: - raise_error = ClientAuthenticationError - if error_code in [StorageErrorCode.resource_not_found, - StorageErrorCode.cannot_verify_copy_source, - StorageErrorCode.blob_not_found, - StorageErrorCode.queue_not_found, - StorageErrorCode.container_not_found, - StorageErrorCode.parent_not_found, - StorageErrorCode.share_not_found]: - raise_error = ResourceNotFoundError - if error_code in [StorageErrorCode.account_already_exists, - StorageErrorCode.account_being_created, - StorageErrorCode.resource_already_exists, - StorageErrorCode.resource_type_mismatch, - StorageErrorCode.blob_already_exists, - StorageErrorCode.queue_already_exists, - StorageErrorCode.container_already_exists, - StorageErrorCode.container_being_deleted, - StorageErrorCode.queue_being_deleted, - StorageErrorCode.share_already_exists, - StorageErrorCode.share_being_deleted]: - raise_error = ResourceExistsError - except ValueError: - # Got an unknown error code - pass - - # Error message should include all the error properties - try: - error_message += "\nErrorCode:{}".format(error_code.value) - except AttributeError: - error_message += "\nErrorCode:{}".format(error_code) - for name, info in additional_data.items(): - error_message += "\n{}:{}".format(name, info) - - # No need to create an instance if it has already been serialized by the generated layer - if serialized: - storage_error.message = error_message - error = storage_error - else: - error = raise_error(message=error_message, response=storage_error.response) - # Ensure these properties are stored in the error instance as well (not just the error message) - error.error_code = error_code - error.additional_info = additional_data - # error.args is what's surfaced on the traceback - show error message in all cases - error.args = (error.message,) - try: - # `from None` prevents us from double printing the exception (suppresses generated layer error context) - exec("raise error from None") # pylint: disable=exec-used # nosec - except SyntaxError: - raise error - - -def parse_to_internal_user_delegation_key(service_user_delegation_key): - internal_user_delegation_key = UserDelegationKey() - internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid - internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid - internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) - internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) - internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service - internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version - internal_user_delegation_key.value = service_user_delegation_key.value - return internal_user_delegation_key diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_shared/shared_access_signature.py b/azure/multiapi/storagev2/queue/v2021_02_12/_shared/shared_access_signature.py deleted file mode 100644 index 8577f30..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_shared/shared_access_signature.py +++ /dev/null @@ -1,222 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -from datetime import date - -from .parser import _str, _to_utc_datetime -from .constants import X_MS_VERSION -from . import sign_string, url_quote - - -class QueryStringConstants(object): - SIGNED_SIGNATURE = 'sig' - SIGNED_PERMISSION = 'sp' - SIGNED_START = 'st' - SIGNED_EXPIRY = 'se' - SIGNED_RESOURCE = 'sr' - SIGNED_IDENTIFIER = 'si' - SIGNED_IP = 'sip' - SIGNED_PROTOCOL = 'spr' - SIGNED_VERSION = 'sv' - SIGNED_CACHE_CONTROL = 'rscc' - SIGNED_CONTENT_DISPOSITION = 'rscd' - SIGNED_CONTENT_ENCODING = 'rsce' - SIGNED_CONTENT_LANGUAGE = 'rscl' - SIGNED_CONTENT_TYPE = 'rsct' - START_PK = 'spk' - START_RK = 'srk' - END_PK = 'epk' - END_RK = 'erk' - SIGNED_RESOURCE_TYPES = 'srt' - SIGNED_SERVICES = 'ss' - SIGNED_OID = 'skoid' - SIGNED_TID = 'sktid' - SIGNED_KEY_START = 'skt' - SIGNED_KEY_EXPIRY = 'ske' - SIGNED_KEY_SERVICE = 'sks' - SIGNED_KEY_VERSION = 'skv' - - # for ADLS - SIGNED_AUTHORIZED_OID = 'saoid' - SIGNED_UNAUTHORIZED_OID = 'suoid' - SIGNED_CORRELATION_ID = 'scid' - SIGNED_DIRECTORY_DEPTH = 'sdd' - - @staticmethod - def to_list(): - return [ - QueryStringConstants.SIGNED_SIGNATURE, - QueryStringConstants.SIGNED_PERMISSION, - QueryStringConstants.SIGNED_START, - QueryStringConstants.SIGNED_EXPIRY, - QueryStringConstants.SIGNED_RESOURCE, - QueryStringConstants.SIGNED_IDENTIFIER, - QueryStringConstants.SIGNED_IP, - QueryStringConstants.SIGNED_PROTOCOL, - QueryStringConstants.SIGNED_VERSION, - QueryStringConstants.SIGNED_CACHE_CONTROL, - QueryStringConstants.SIGNED_CONTENT_DISPOSITION, - QueryStringConstants.SIGNED_CONTENT_ENCODING, - QueryStringConstants.SIGNED_CONTENT_LANGUAGE, - QueryStringConstants.SIGNED_CONTENT_TYPE, - QueryStringConstants.START_PK, - QueryStringConstants.START_RK, - QueryStringConstants.END_PK, - QueryStringConstants.END_RK, - QueryStringConstants.SIGNED_RESOURCE_TYPES, - QueryStringConstants.SIGNED_SERVICES, - QueryStringConstants.SIGNED_OID, - QueryStringConstants.SIGNED_TID, - QueryStringConstants.SIGNED_KEY_START, - QueryStringConstants.SIGNED_KEY_EXPIRY, - QueryStringConstants.SIGNED_KEY_SERVICE, - QueryStringConstants.SIGNED_KEY_VERSION, - # for ADLS - QueryStringConstants.SIGNED_AUTHORIZED_OID, - QueryStringConstants.SIGNED_UNAUTHORIZED_OID, - QueryStringConstants.SIGNED_CORRELATION_ID, - QueryStringConstants.SIGNED_DIRECTORY_DEPTH, - ] - - -class SharedAccessSignature(object): - ''' - Provides a factory for creating account access - signature tokens with an account name and account key. Users can either - use the factory or can construct the appropriate service and use the - generate_*_shared_access_signature method directly. - ''' - - def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): - ''' - :param str account_name: - The storage account name used to generate the shared access signatures. - :param str account_key: - The access key to generate the shares access signatures. - :param str x_ms_version: - The service version used to generate the shared access signatures. - ''' - self.account_name = account_name - self.account_key = account_key - self.x_ms_version = x_ms_version - - def generate_account(self, services, resource_types, permission, expiry, start=None, - ip=None, protocol=None): - ''' - Generates a shared access signature for the account. - Use the returned signature with the sas_token parameter of the service - or to create a new account object. - - :param ResourceTypes resource_types: - Specifies the resource types that are accessible with the account - SAS. You can combine values to provide access to more than one - resource type. - :param AccountSasPermissions permission: - The permissions associated with the shared access signature. The - user is restricted to operations allowed by the permissions. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has been - specified in an associated stored access policy. You can combine - values to provide more than one permission. - :param expiry: - The time at which the shared access signature becomes invalid. - Required unless an id is given referencing a stored access policy - which contains this field. This field must be omitted if it has - been specified in an associated stored access policy. Azure will always - convert values to UTC. If a date is passed in without timezone info, it - is assumed to be UTC. - :type expiry: datetime or str - :param start: - The time at which the shared access signature becomes valid. If - omitted, start time for this call is assumed to be the time when the - storage service receives the request. Azure will always convert values - to UTC. If a date is passed in without timezone info, it is assumed to - be UTC. - :type start: datetime or str - :param str ip: - Specifies an IP address or a range of IP addresses from which to accept requests. - If the IP address from which the request originates does not match the IP address - or address range specified on the SAS token, the request is not authenticated. - For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS - restricts the request to those IP addresses. - :param str protocol: - Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. - ''' - sas = _SharedAccessHelper() - sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) - sas.add_account(services, resource_types) - sas.add_account_signature(self.account_name, self.account_key) - - return sas.get_token() - - -class _SharedAccessHelper(object): - def __init__(self): - self.query_dict = {} - - def _add_query(self, name, val): - if val: - self.query_dict[name] = _str(val) if val is not None else None - - def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): - if isinstance(start, date): - start = _to_utc_datetime(start) - - if isinstance(expiry, date): - expiry = _to_utc_datetime(expiry) - - self._add_query(QueryStringConstants.SIGNED_START, start) - self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) - self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) - self._add_query(QueryStringConstants.SIGNED_IP, ip) - self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) - self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) - - def add_resource(self, resource): - self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) - - def add_id(self, policy_id): - self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) - - def add_account(self, services, resource_types): - self._add_query(QueryStringConstants.SIGNED_SERVICES, services) - self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) - - def add_override_response_headers(self, cache_control, - content_disposition, - content_encoding, - content_language, - content_type): - self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) - self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) - self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) - self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) - self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) - - def add_account_signature(self, account_name, account_key): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - string_to_sign = \ - (account_name + '\n' + - get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + - get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + - get_value_to_append(QueryStringConstants.SIGNED_START) + - get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + - get_value_to_append(QueryStringConstants.SIGNED_IP) + - get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(QueryStringConstants.SIGNED_VERSION) + - '\n' # Signed Encryption Scope - always empty for queue - ) - - self._add_query(QueryStringConstants.SIGNED_SIGNATURE, - sign_string(account_key, string_to_sign)) - - def get_token(self): - return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_shared/uploads.py b/azure/multiapi/storagev2/queue/v2021_02_12/_shared/uploads.py deleted file mode 100644 index 279f084..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_shared/uploads.py +++ /dev/null @@ -1,607 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -from concurrent import futures -from io import BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation -from itertools import islice -from math import ceil -from threading import Lock - -import six -from azure.core.tracing.common import with_current_context - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers - - -_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 -_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." - - -def _parallel_uploads(executor, uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(executor.submit(with_current_context(uploader), next_chunk)) - except StopIteration: - break - - # Wait for the remaining uploads to finish - done, _running = futures.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - validate_content=None, - progress_hook=None, - **kwargs): - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - validate_content=validate_content, - progress_hook=progress_hook, - **kwargs) - if parallel: - with futures.ThreadPoolExecutor(max_concurrency) as executor: - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - executor.submit(with_current_context(uploader.process_chunk), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - progress_hook=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - progress_hook=progress_hook, - **kwargs) - - if parallel: - with futures.ThreadPoolExecutor(max_concurrency) as executor: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - executor.submit(with_current_context(uploader.process_substream_block), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] - if any(range_ids): - return sorted(range_ids) - return [] - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__( - self, service, - total_size, - chunk_size, - stream, - parallel, - encryptor=None, - padder=None, - progress_hook=None, - **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_lock = Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - self.progress_hook = progress_hook - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - def get_chunk_streams(self): - index = 0 - while True: - data = b"" - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError("Blob data should be of type bytes.") - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b"" or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - def _update_progress(self, length): - if self.progress_lock is not None: - with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - if self.progress_hook: - self.progress_hook(self.progress_total, self.total_size) - - def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = self._upload_chunk(chunk_offset, chunk_data) - self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield index, SubStream(self.stream, index, length, lock) - - def process_substream_block(self, block_data): - return self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - def _upload_substream_block(self, index, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - def _upload_substream_block_with_progress(self, index, block_stream): - range_id = self._upload_substream_block(index, block_stream) - self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop("modified_access_conditions", None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - self.service.stage_block( - block_id, - len(chunk_data), - chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return index, block_id - - def _upload_substream_block(self, index, block_stream): - try: - block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) - self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - return not any(bytearray(chunk_data)) - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = self.service.upload_pages( - body=chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - def _upload_substream_block(self, index, block_stream): - pass - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - self.current_length = int(self.response_headers["blob_append_offset"]) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - def _upload_substream_block(self, index, block_stream): - pass - - -class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - self.response_headers = self.service.append_data( - body=chunk_data, - position=chunk_offset, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - def _upload_substream_block(self, index, block_stream): - try: - self.service.append_data( - body=block_stream, - position=index, - content_length=len(block_stream), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response - - # TODO: Implement this method. - def _upload_substream_block(self, index, block_stream): - pass - - -class SubStream(IOBase): - - def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): - # Python 2.7: file-like objects created with open() typically support seek(), but are not - # derivations of io.IOBase and thus do not implement seekable(). - # Python > 3.0: file-like objects created with open() are derived from io.IOBase. - try: - # only the main thread runs this, so there's no need grabbing the lock - wrapped_stream.seek(0, SEEK_CUR) - except: - raise ValueError("Wrapped stream must support seek().") - - self._lock = lockObj - self._wrapped_stream = wrapped_stream - self._position = 0 - self._stream_begin_index = stream_begin_index - self._length = length - self._buffer = BytesIO() - - # we must avoid buffering more than necessary, and also not use up too much memory - # so the max buffer size is capped at 4MB - self._max_buffer_size = ( - length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE - ) - self._current_buffer_start = 0 - self._current_buffer_size = 0 - super(SubStream, self).__init__() - - def __len__(self): - return self._length - - def close(self): - if self._buffer: - self._buffer.close() - self._wrapped_stream = None - IOBase.close(self) - - def fileno(self): - return self._wrapped_stream.fileno() - - def flush(self): - pass - - def read(self, size=None): - if self.closed: # pylint: disable=using-constant-test - raise ValueError("Stream is closed.") - - if size is None: - size = self._length - self._position - - # adjust if out of bounds - if size + self._position >= self._length: - size = self._length - self._position - - # return fast - if size == 0 or self._buffer.closed: - return b"" - - # attempt first read from the read buffer and update position - read_buffer = self._buffer.read(size) - bytes_read = len(read_buffer) - bytes_remaining = size - bytes_read - self._position += bytes_read - - # repopulate the read buffer from the underlying stream to fulfill the request - # ensure the seek and read operations are done atomically (only if a lock is provided) - if bytes_remaining > 0: - with self._buffer: - # either read in the max buffer size specified on the class - # or read in just enough data for the current block/sub stream - current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) - - # lock is only defined if max_concurrency > 1 (parallel uploads) - if self._lock: - with self._lock: - # reposition the underlying stream to match the start of the data to read - absolute_position = self._stream_begin_index + self._position - self._wrapped_stream.seek(absolute_position, SEEK_SET) - # If we can't seek to the right location, our read will be corrupted so fail fast. - if self._wrapped_stream.tell() != absolute_position: - raise IOError("Stream failed to seek to the desired location.") - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - else: - absolute_position = self._stream_begin_index + self._position - # It's possible that there's connection problem during data transfer, - # so when we retry we don't want to read from current position of wrapped stream, - # instead we should seek to where we want to read from. - if self._wrapped_stream.tell() != absolute_position: - self._wrapped_stream.seek(absolute_position, SEEK_SET) - - buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) - - if buffer_from_stream: - # update the buffer with new data from the wrapped stream - # we need to note down the start position and size of the buffer, in case seek is performed later - self._buffer = BytesIO(buffer_from_stream) - self._current_buffer_start = self._position - self._current_buffer_size = len(buffer_from_stream) - - # read the remaining bytes from the new buffer and update position - second_read_buffer = self._buffer.read(bytes_remaining) - read_buffer += second_read_buffer - self._position += len(second_read_buffer) - - return read_buffer - - def readable(self): - return True - - def readinto(self, b): - raise UnsupportedOperation - - def seek(self, offset, whence=0): - if whence is SEEK_SET: - start_index = 0 - elif whence is SEEK_CUR: - start_index = self._position - elif whence is SEEK_END: - start_index = self._length - offset = -offset - else: - raise ValueError("Invalid argument for the 'whence' parameter.") - - pos = start_index + offset - - if pos > self._length: - pos = self._length - elif pos < 0: - pos = 0 - - # check if buffer is still valid - # if not, drop buffer - if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: - self._buffer.close() - self._buffer = BytesIO() - else: # if yes seek to correct position - delta = pos - self._current_buffer_start - self._buffer.seek(delta, SEEK_SET) - - self._position = pos - return pos - - def seekable(self): - return True - - def tell(self): - return self._position - - def write(self): - raise UnsupportedOperation - - def writelines(self): - raise UnsupportedOperation - - def writeable(self): - return False - - -class IterStreamer(object): - """ - File-like streaming iterator. - """ - - def __init__(self, generator, encoding="UTF-8"): - self.generator = generator - self.iterator = iter(generator) - self.leftover = b"" - self.encoding = encoding - - def __len__(self): - return self.generator.__len__() - - def __iter__(self): - return self.iterator - - def seekable(self): - return False - - def __next__(self): - return next(self.iterator) - - next = __next__ # Python 2 compatibility. - - def tell(self, *args, **kwargs): - raise UnsupportedOperation("Data generator does not support tell.") - - def seek(self, *args, **kwargs): - raise UnsupportedOperation("Data generator is unseekable.") - - def read(self, size): - data = self.leftover - count = len(self.leftover) - try: - while count < size: - chunk = self.__next__() - if isinstance(chunk, six.text_type): - chunk = chunk.encode(self.encoding) - data += chunk - count += len(chunk) - except StopIteration: - pass - - if count > size: - self.leftover = data[size:] - - return data[:size] diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/_shared/uploads_async.py b/azure/multiapi/storagev2/queue/v2021_02_12/_shared/uploads_async.py deleted file mode 100644 index 465270b..0000000 --- a/azure/multiapi/storagev2/queue/v2021_02_12/_shared/uploads_async.py +++ /dev/null @@ -1,423 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- -# pylint: disable=no-self-use - -import asyncio -import threading -from asyncio import Lock -from itertools import islice -from math import ceil - -import six - -from . import encode_base64, url_quote -from .request_handlers import get_length -from .response_handlers import return_response_headers -from .uploads import SubStream, IterStreamer # pylint: disable=unused-import - - -async def _async_parallel_uploads(uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = await pending.__anext__() - running.add(asyncio.ensure_future(uploader(next_chunk))) - except StopAsyncIteration: - break - - # Wait for the remaining uploads to finish - if running: - done, _running = await asyncio.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -async def _parallel_uploads(uploader, pending, running): - range_ids = [] - while True: - # Wait for some download to finish before adding a new one - done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) - range_ids.extend([chunk.result() for chunk in done]) - try: - for _ in range(0, len(done)): - next_chunk = next(pending) - running.add(asyncio.ensure_future(uploader(next_chunk))) - except StopIteration: - break - - # Wait for the remaining uploads to finish - if running: - done, _running = await asyncio.wait(running) - range_ids.extend([chunk.result() for chunk in done]) - return range_ids - - -async def upload_data_chunks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - progress_hook=None, - **kwargs): - - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - progress_hook=progress_hook, - **kwargs) - - if parallel: - upload_tasks = uploader.get_chunk_streams() - running_futures = [] - for _ in range(max_concurrency): - try: - chunk = await upload_tasks.__anext__() - running_futures.append(asyncio.ensure_future(uploader.process_chunk(chunk))) - except StopAsyncIteration: - break - - range_ids = await _async_parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) - else: - range_ids = [] - async for chunk in uploader.get_chunk_streams(): - range_ids.append(await uploader.process_chunk(chunk)) - - if any(range_ids): - return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] - return uploader.response_headers - - -async def upload_substream_blocks( - service=None, - uploader_class=None, - total_size=None, - chunk_size=None, - max_concurrency=None, - stream=None, - progress_hook=None, - **kwargs): - parallel = max_concurrency > 1 - if parallel and 'modified_access_conditions' in kwargs: - # Access conditions do not work with parallelism - kwargs['modified_access_conditions'] = None - uploader = uploader_class( - service=service, - total_size=total_size, - chunk_size=chunk_size, - stream=stream, - parallel=parallel, - progress_hook=progress_hook, - **kwargs) - - if parallel: - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - asyncio.ensure_future(uploader.process_substream_block(u)) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) - else: - range_ids = [] - for block in uploader.get_substream_blocks(): - range_ids.append(await uploader.process_substream_block(block)) - if any(range_ids): - return sorted(range_ids) - return - - -class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes - - def __init__( - self, service, - total_size, - chunk_size, - stream, - parallel, - encryptor=None, - padder=None, - progress_hook=None, - **kwargs): - self.service = service - self.total_size = total_size - self.chunk_size = chunk_size - self.stream = stream - self.parallel = parallel - - # Stream management - self.stream_lock = threading.Lock() if parallel else None - - # Progress feedback - self.progress_total = 0 - self.progress_lock = Lock() if parallel else None - self.progress_hook = progress_hook - - # Encryption - self.encryptor = encryptor - self.padder = padder - self.response_headers = None - self.etag = None - self.last_modified = None - self.request_options = kwargs - - async def get_chunk_streams(self): - index = 0 - while True: - data = b'' - read_size = self.chunk_size - - # Buffer until we either reach the end of the stream or get a whole chunk. - while True: - if self.total_size: - read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) - if asyncio.iscoroutinefunction(self.stream.read): - temp = await self.stream.read(read_size) - else: - temp = self.stream.read(read_size) - if not isinstance(temp, six.binary_type): - raise TypeError('Blob data should be of type bytes.') - data += temp or b"" - - # We have read an empty string and so are at the end - # of the buffer or we have read a full chunk. - if temp == b'' or len(data) == self.chunk_size: - break - - if len(data) == self.chunk_size: - if self.padder: - data = self.padder.update(data) - if self.encryptor: - data = self.encryptor.update(data) - yield index, data - else: - if self.padder: - data = self.padder.update(data) + self.padder.finalize() - if self.encryptor: - data = self.encryptor.update(data) + self.encryptor.finalize() - if data: - yield index, data - break - index += len(data) - - async def process_chunk(self, chunk_data): - chunk_bytes = chunk_data[1] - chunk_offset = chunk_data[0] - return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) - - async def _update_progress(self, length): - if self.progress_lock is not None: - async with self.progress_lock: - self.progress_total += length - else: - self.progress_total += length - - if self.progress_hook: - await self.progress_hook(self.progress_total, self.total_size) - - async def _upload_chunk(self, chunk_offset, chunk_data): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): - range_id = await self._upload_chunk(chunk_offset, chunk_data) - await self._update_progress(len(chunk_data)) - return range_id - - def get_substream_blocks(self): - assert self.chunk_size is not None - lock = self.stream_lock - blob_length = self.total_size - - if blob_length is None: - blob_length = get_length(self.stream) - if blob_length is None: - raise ValueError("Unable to determine content length of upload data.") - - blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) - last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size - - for i in range(blocks): - index = i * self.chunk_size - length = last_block_size if i == blocks - 1 else self.chunk_size - yield index, SubStream(self.stream, index, length, lock) - - async def process_substream_block(self, block_data): - return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) - - async def _upload_substream_block(self, index, block_stream): - raise NotImplementedError("Must be implemented by child class.") - - async def _upload_substream_block_with_progress(self, index, block_stream): - range_id = await self._upload_substream_block(index, block_stream) - await self._update_progress(len(block_stream)) - return range_id - - def set_response_properties(self, resp): - self.etag = resp.etag - self.last_modified = resp.last_modified - - -class BlockBlobChunkUploader(_ChunkUploader): - - def __init__(self, *args, **kwargs): - kwargs.pop('modified_access_conditions', None) - super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - # TODO: This is incorrect, but works with recording. - index = '{0:032d}'.format(chunk_offset) - block_id = encode_base64(url_quote(encode_base64(index))) - await self.service.stage_block( - block_id, - len(chunk_data), - body=chunk_data, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - return index, block_id - - async def _upload_substream_block(self, index, block_stream): - try: - block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) - await self.service.stage_block( - block_id, - len(block_stream), - block_stream, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - finally: - block_stream.close() - return block_id - - -class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def _is_chunk_empty(self, chunk_data): - # read until non-zero byte is encountered - # if reached the end without returning, then chunk_data is all 0's - for each_byte in chunk_data: - if each_byte not in [0, b'\x00']: - return False - return True - - async def _upload_chunk(self, chunk_offset, chunk_data): - # avoid uploading the empty pages - if not self._is_chunk_empty(chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 - content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - computed_md5 = None - self.response_headers = await self.service.upload_pages( - body=chunk_data, - content_length=len(chunk_data), - transactional_content_md5=computed_md5, - range=content_range, - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - async def _upload_substream_block(self, index, block_stream): - pass - - -class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - def __init__(self, *args, **kwargs): - super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) - self.current_length = None - - async def _upload_chunk(self, chunk_offset, chunk_data): - if self.current_length is None: - self.response_headers = await self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - self.current_length = int(self.response_headers['blob_append_offset']) - else: - self.request_options['append_position_access_conditions'].append_position = \ - self.current_length + chunk_offset - self.response_headers = await self.service.append_block( - body=chunk_data, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options) - - async def _upload_substream_block(self, index, block_stream): - pass - - -class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - self.response_headers = await self.service.append_data( - body=chunk_data, - position=chunk_offset, - content_length=len(chunk_data), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - - if not self.parallel and self.request_options.get('modified_access_conditions'): - self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] - - async def _upload_substream_block(self, index, block_stream): - try: - await self.service.append_data( - body=block_stream, - position=index, - content_length=len(block_stream), - cls=return_response_headers, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - finally: - block_stream.close() - - -class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method - - async def _upload_chunk(self, chunk_offset, chunk_data): - length = len(chunk_data) - chunk_end = chunk_offset + length - 1 - response = await self.service.upload_range( - chunk_data, - chunk_offset, - length, - data_stream_total=self.total_size, - upload_stream_current=self.progress_total, - **self.request_options - ) - range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) - return range_id, response - - # TODO: Implement this method. - async def _upload_substream_block(self, index, block_stream): - pass diff --git a/azure/multiapi/storagev2/queue/v2021_02_12/py.typed b/azure/multiapi/storagev2/queue/v2021_02_12/py.typed deleted file mode 100644 index e69de29..0000000 diff --git a/setup.py b/setup.py index 57f4489..cd4d79a 100644 --- a/setup.py +++ b/setup.py @@ -35,7 +35,7 @@ setup( name='azure-multiapi-storage', - version='0.10.0', + version='1.0.0', description='Microsoft Azure Storage Client Library for Python with multi API version support.', long_description=open('README.rst', 'r').read(), license='MIT',